id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1612889 | def url():
import urllib.request
try:
site = urllib.request.urlopen('http://pudim.com.br/')
except:
print('\33[1:31mFalha!\33[m\nImpossível de acessar o site no momento!')
else:
print('\33[1:32mSucesso!\33[m\nSite funcionando normalmente!') | StarcoderdataPython |
3210732 | def persistence(n):
nums = [int(x) for x in str(n)]
count = 0
while len(nums) != 1:
n = 1
for i in nums:
n*= i
nums = [int(x) for x in str(n)]
count+=1
return count | StarcoderdataPython |
126045 | <gh_stars>0
""" -----------------------------------------------------
# TECHNOGIX
# -------------------------------------------------------
# Copyright (c) [2022] Technogix SARL
# All rights reserved
# -------------------------------------------------------
# Keywords to manage dynamodb tasks
# -------------------------------------------------------
# <NAME>, @06 october 2021
# Latest revision: 05 october 2021
# --------------------------------------------------- """
# System includes
from sys import path as syspath
from os import path
from json import loads
# Local include
syspath.append(path.normpath(path.join(path.dirname(__file__), './')))
from tool import Tool
class DynamoDBTools(Tool) :
""" Class providing tools to check AWS codecommit compliance """
def __init__(self):
""" Constructor """
super().__init__()
self.m_services.append('dynamodb')
def create_item(self, table, item) :
""" Create an item in a dynamodb table
---
table (str) : Dynamodb table to update
item (str) : Item to add in dynamodb
"""
if self.m_is_active['dynamodb'] :
json_item = loads(item)
self.m_clients['dynamodb'].put_item(TableName=table, Item=json_item)
def remove_item(self, table, item) :
""" Remove an item from a dynamodb table if it exists
---
table (str) : Dynamodb table to update
item (str) : Item to remove from dynamodb
"""
if self.m_is_active['dynamodb'] :
json_item = loads(item)
self.m_clients['dynamodb'].delete_item(TableName=table, Key=json_item)
def item_exists(self, table, item) :
""" Test if an item in a dynamodb table
---
table (str) : Dynamodb table to analyze
item (str) : Item to look for in dynamodb
"""
result = False
if self.m_is_active['dynamodb'] :
json_item = loads(item)
response = self.m_clients['dynamodb'].get_item(TableName=table, Key=json_item)
if 'Item' in response : result = True
return result
def list_tables(self) :
""" List all tables in account """
result = []
if self.m_is_active['dynamodb'] :
paginator = self.m_clients['dynamodb'].get_paginator('list_tables')
response_iterator = paginator.paginate()
for response in response_iterator :
for table in response['TableNames'] :
description = self.m_clients['dynamodb'].describe_table(TableName = table)
description['Table']['Tags'] = []
tags = self.m_clients['dynamodb'].get_paginator('list_tags_of_resource')
tag_iterator = tags.paginate(ResourceArn=description['Table']['TableArn'])
for tag in tag_iterator :
description['Table']['Tags'] = description['Table']['Tags'] + tag['Tags']
result.append(description['Table'])
return result
| StarcoderdataPython |
76906 | import importlib
module = importlib.import_module("07_the_sum_of_its_parts")
find_steps_order = module.find_steps_order
parse_instructions = module.parse_instructions
instructions = [
"Step C must be finished before step A can begin.",
"Step C must be finished before step F can begin.",
"Step A must be finished before step B can begin.",
"Step A must be finished before step D can begin.",
"Step B must be finished before step E can begin.",
"Step D must be finished before step E can begin.",
"Step F must be finished before step E can begin."
]
def test_part1():
order = find_steps_order(instructions, base_working_time=0)
assert order == (21, "CABDFE")
def test_part2():
order = find_steps_order(instructions, workers_count=2, base_working_time=0)
assert order == (15, "CABFDE")
def test_parse_instructions():
steps = parse_instructions(instructions)
assert steps["A"].name == "A"
assert steps["A"].after_names == ["C"]
assert steps["A"].before_names == ["B", "D"]
assert steps["B"].name == "B"
assert steps["B"].after_names == ["A"]
assert steps["B"].before_names == ["E"]
assert steps["C"].name == "C"
assert steps["C"].after_names == []
assert steps["C"].before_names == ["A", "F"]
assert steps["D"].name == "D"
assert steps["D"].after_names == ["A"]
assert steps["D"].before_names == ["E"]
assert steps["E"].name == "E"
assert steps["E"].after_names == ["B", "D", "F"]
assert steps["E"].before_names == []
| StarcoderdataPython |
1737516 | """
Copyright 2021 Nirlep_5252_
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import discord
import asyncio
from discord.ext import commands
from discord.utils import escape_markdown
from humanfriendly import format_timespan
from typing import Optional, Union
from cogs_hidden.automod import (
am_add_badwords, am_disable_modules, am_enable_a_module,
am_enable_module_dropdown, am_remove_badwords, am_whitelist_func, link_add_to_whitelist,
link_remove_from_whitelist, show_automod_config, view_badword_list,
view_whitelisted_links_list
)
from utils.bot import EpicBot
from utils.message import wait_for_msg
from utils.time import convert
from utils.random import gen_random_string
from utils.embed import success_embed, error_embed
from utils.converters import AddRemoveConverter, AutomodModule, Lower, Url
from utils.ui import Confirm
from config import (
BADGE_EMOJIS, EMOJIS, RED_COLOR
)
# warning: the code down below is absolute trash
# i can improve this by a lot, but i just dont have the time for that
# but im sure u do ;)
class AntiAltsSelectionView(discord.ui.View):
def __init__(self, context):
super().__init__(timeout=None)
self.level = 0
self.context = context
self.cancelled = False
@discord.ui.select(placeholder="Please select a level.", options=[
discord.SelectOption(
label="Level 01",
description="Restrict the suspect from sending messages.",
value='1', emoji='🔹'
),
discord.SelectOption(
label="Level 02",
description="Kick the suspect from the server.",
value='2', emoji='💎'
),
discord.SelectOption(
label="Level 03",
description="Ban the suspect from the server.",
value='3', emoji='<a:diamond:862594390256910367>'
),
])
async def callback(self, select: discord.ui.Select, interaction: discord.Interaction):
if interaction.user != self.context.author:
return await interaction.response.send_message("You cannot interact in someone else's interaction.", ephemeral=True)
self.level = int(select.values[0])
await interaction.response.send_message(f"Alt protection Level **{select.values[0]}** has been selected. Please click the `Next` button to continue.", ephemeral=True)
@discord.ui.button(label='Cancel', style=discord.ButtonStyle.red)
async def cancel(self, b: discord.ui.Button, i: discord.Interaction):
if i.user != self.context.author:
return await i.response.send_message("You cannot interact in someone else's interaction.", ephemeral=True)
self.cancelled = True
self.stop()
@discord.ui.button(label='Next', style=discord.ButtonStyle.green)
async def next(self, button: discord.ui.Button, interaction: discord.Interaction):
if interaction.user != self.context.author:
return await interaction.response.send_message("You cannot interact in someone else's interaction.", ephemeral=True)
if self.level == 0:
return await interaction.response.send_message("Please select a level first!", ephemeral=True)
self.stop()
class mod(commands.Cog, description="Keep your server safe! 🛠️"):
def __init__(self, client: EpicBot):
self.client = client
@commands.group(name='automod', aliases=['am'], help="Configure automod for your server!")
@commands.cooldown(3, 10, commands.BucketType.user)
async def _automod(self, ctx: commands.Context):
if ctx.invoked_subcommand is None:
return await ctx.send_help(ctx.command)
@_automod.command(name='show', help='Get the current automod configuration.')
@commands.cooldown(1, 10, commands.BucketType.user)
async def am_show(self, ctx: commands.Context):
embed, view = await show_automod_config(ctx)
await ctx.reply(embed=embed, view=view)
@_automod.command(name='enable', help="Enable a module for your automod!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(2, 20, commands.BucketType.user)
async def automod_enable_module(self, ctx: commands.Context, module: Optional[AutomodModule] = None):
if module is not None:
await am_enable_a_module(ctx, module)
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Enabled!",
f"The automod module `{module}` has been enabled!"
))
else:
embed, view = await am_enable_module_dropdown(ctx)
await ctx.reply(embed=embed, view=view)
@_automod.command(name='disable', help="Disable a module for your automod!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(2, 20, commands.BucketType.user)
async def automod_disable_module(self, ctx: commands.Context, modules: commands.Greedy[AutomodModule] = None):
if not modules:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please specify a module to disable!\nCorrect Usage: `{ctx.clean_prefix}automod disable <module>`",
))
await am_disable_modules(ctx, *modules)
await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Module Disabled!",
f"Module{'' if len(modules) == 1 else 's'}: {', '.join(['`' + module + '`' for module in modules])} {'has' if len(modules) == 1 else 'have'} now been disabled.",
))
@_automod.command(name="logchannel", help="Set the automod log channel.", aliases=['logs', 'log'])
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(2, 20, commands.BucketType.user)
async def automod_set_log_channel(self, ctx: commands.Context, channel: Union[discord.TextChannel, Lower] = None):
p = ctx.clean_prefix
if channel is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please mention a channel.\nCorrect Usage: `{p}automod logchannel #channel`\nTo remove it: `{p}automod logchannel disable`"
))
g = await self.client.get_guild_config(ctx.guild.id)
am = g['automod']
if isinstance(channel, discord.TextChannel):
am['log_channel'] = channel.id
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Log Channel Set!",
f"The automod log channel has been set to {channel.mention}",
))
elif channel in ['disable', 'remove']:
am['log_channel'] = None
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Log Channel Removed!",
"Automod logs have now been turned off."
))
else:
raise commands.ChannelNotFound(channel)
@_automod.group(name='badwords', aliases=['badword'], help="Configure the `banned_words` automod module.", invoke_without_command=True)
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def automod_badword(self, ctx: commands.Context):
if ctx.invoked_subcommand is None:
return await ctx.send_help(ctx.command)
@automod_badword.command(name='add', help="Add a bad word to the list!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(2, 20, commands.BucketType.user)
async def am_badword_add(self, ctx: commands.Context, words: commands.Greedy[Lower] = None):
if words is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please provide a word to add.\nCorrect Usage: `{ctx.clean_prefix}automod badword add <word> ...`\n\nNote: You can type multiple words seperated with a space to add more than one words."
))
added, already_exist = await am_add_badwords(ctx, *words)
if len(added) == 0:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Already exists!",
f"The word{'' if len(already_exist) == 1 else 's'} {', '.join(already_exist)} are already added."
))
await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Word{'' if len(added) == 1 else 's'} added!",
f"The word{'' if len(added) == 1 else 's'}: {', '.join(['`' + word + '`' for word in added])} {'has' if len(added) == 1 else 'have'} been added.\nYou can use `{ctx.clean_prefix}automod badwords show` to get the list."
))
@automod_badword.command(name='remove', help="Remove a bad word from the list!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(2, 20, commands.BucketType.user)
async def am_badword_remove(self, ctx: commands.Context, words: commands.Greedy[Lower] = None):
if words is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please provide a word to remove.\nCorrect Usage: `{ctx.clean_prefix}automod badword remove <word> ...`\n\nNote: You can type multiple words seperated with a space to remove more than one words."
))
removed, not_exist = await am_remove_badwords(ctx, *words)
if len(removed) == 0:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Not found!",
f"The word{'' if len(not_exist) == 1 else 's'} {', '.join(not_exist)} {'was' if len(not_exist) == 1 else 'were'} not found.\nPlease use `{ctx.clean_prefix}automod badword show` to get the whole list."
))
await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Word{'' if len(removed) == 1 else 's'} removed!",
f"The word{'' if len(removed) == 1 else 's'}: {', '.join(['`' + word + '`' for word in removed])} {'has' if len(removed) == 1 else 'have'} been removed.\nYou can use `{ctx.clean_prefix}automod badwords show` to get the list."
))
@automod_badword.command(name='list', aliases=['show', 'l'], help="View the list of bad words!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(2, 20, commands.BucketType.user)
async def am_badword_list(self, ctx: commands.Context):
embed, view = await view_badword_list(ctx)
try:
await ctx.author.send(embed=embed, view=view)
await ctx.message.add_reaction('👌')
except discord.Forbidden:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(f"{EMOJIS['tick_no']}I was unable to DM you the list, please enable your DMs.")
@_automod.group(name='links', aliases=['link'], help="Configure the `links` automod module for your server.", invoke_without_command=True)
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def automod_links(self, ctx: commands.Context):
if ctx.invoked_subcommand is None:
return await ctx.send_help(ctx.command)
@automod_links.command(name='add', help="Add a link to the whitelist links!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def add_whitelist_link(self, ctx: commands.Context, url: Url = None):
prefix = ctx.clean_prefix
if url is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please provide a link to whitelist.\nCorrect Usage: `{prefix}automod links add <link>`\nExample: `{prefix}automod links add https://example.com`"
))
final = await link_add_to_whitelist(ctx, url)
if final:
await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Link added!",
f"The link: `{url}` has been added to the whitelist."
))
else:
ctx.command.reset_cooldown(ctx)
await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Link already there!",
f"This link: `{url}` is already there in the whitelist.\nPlease use `{prefix}automod links show` to view all the whitelisted links."
))
@automod_links.command(name='remove', help="Remove a link from the whitelisted links!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def remove_whitelist_links(self, ctx: commands.Context, url: Url = None):
prefix = ctx.clean_prefix
if url is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please provide a link to unwhitelist.\nCorrect Usage: `{prefix}automod links remove <link>`\nExample: `{prefix}automod links remove https://example.com`"
))
final = await link_remove_from_whitelist(ctx, url)
if final:
await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Link removed!",
f"The link: `{url}` has been removed from the whitelist."
))
else:
ctx.command.reset_cooldown(ctx)
await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Link not found!",
f"This link: `{url}` is not in the whitelist.\nPlease use `{prefix}automod links show` to view all the whitelisted links."
))
@automod_links.command(name='list', aliases=['show'], help="See a list of whitelisted links!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(1, 10, commands.BucketType.user)
async def view_whitelist_links(self, ctx: commands.Context):
embed, view = await view_whitelisted_links_list(ctx)
try:
await ctx.author.send(embed=embed, view=view)
await ctx.message.add_reaction('👌')
except discord.Forbidden:
await ctx.reply(f"{EMOJIS['tick_no']}I was unable to DM you the list, please enable your DMs.")
@_automod.command(name='whitelist', help="Whitelist roles/channels!")
@commands.has_permissions(administrator=True)
@commands.bot_has_permissions(administrator=True)
@commands.cooldown(2, 20, commands.BucketType.user)
async def am_whitelist_stuff(self, ctx: commands.Context, choice: Optional[AddRemoveConverter] = None, setting: Optional[Union[discord.TextChannel, discord.Role]] = None):
p = ctx.clean_prefix
correct_usage = f"{p}automod whitelist add/remove @role/#channel"
example = f"{p}automod whitelist add @boosters"
you_idiot = error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Correct Usage: `{correct_usage}`\nExample: `{example}`"
)
if choice is None or setting is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=you_idiot)
final = await am_whitelist_func(ctx, choice, setting)
if final:
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Success!",
f"Users {'with' if isinstance(setting, discord.Role) else 'in'} {setting.mention} will {'no longer' if choice else 'now'} trigger automod."
))
else:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Failure!",
f"{setting.mention} is {'already' if choice else 'not'} a whitelisted {'role' if isinstance(setting, discord.Role) else 'channel'}."
))
@commands.command(help="Configure Mikuni antialts system.", aliases=['antiraid', 'antialt'])
@commands.cooldown(3, 30, commands.BucketType.user)
@commands.bot_has_guild_permissions(administrator=True)
@commands.has_permissions(administrator=True)
async def antialts(self, ctx, config=None, setting: Union[discord.TextChannel, discord.Role, int, str] = None):
# yes i know this is messy
# and i dont care
prefix = ctx.clean_prefix
g = await self.client.get_guild_config(ctx.guild.id)
aa = g['antialts']
enabled = False if not aa else True
info_embed = success_embed(
f"{BADGE_EMOJIS['bot_mod']} Alt protection",
f"""
Alt protection is current **{'Enabled' if enabled else 'Disabled'}**.
**Level:** `{'0' if not enabled else aa['level']}`
**Log channel:** {'None' if not enabled else '<#'+str(aa['log_channel'])+'>'}
**Minimum account age:** {'None' if not enabled else format_timespan(aa['min_account_age']*24*60*60)}
**Restricted Role:** {'None' if not enabled else '<@&'+str(aa['restricted_role'])+'>'}
"""
).add_field(
name="🔹 Level 01",
value="The bot will restrict the suspect from sending messages in the server and log their info.",
inline=True
).add_field(
name="💎 Level 02",
value="The bot will kick the suspect and log their info, they will be banned if they try to join again.",
inline=True
).add_field(
name="<a:diamond:862594390256910367> Level 03",
value="The bot will ban the suspect and log their info.",
inline=True
).add_field(
name="Commands:",
value=f"""
- `{prefix}antialt enable/disable` - To enable/disable alt protection.
- `{prefix}antialt minage <time>` - To set the minimum age.
- `{prefix}antialt level <number>` - To change the protection level.
- `{prefix}antialt channel #channel` - To change the log channel.
- `{prefix}antialt role @role` - To change the restricted role.
- `{prefix}kickalts` - Kicks all the users with the restricted role.
- `{prefix}banalts` - Bans all the users with the restricted role.
- `{prefix}grantaccess` - Grants server access to a restricted user.
"""
)
if config is None:
return await ctx.reply(embed=info_embed)
if config.lower() == 'enable':
if enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Already enabled.", "Alt protection is already enabled."))
log_channel = None
min_account_age = None
restricted_role = None
view = AntiAltsSelectionView(context=ctx)
msg = await ctx.reply(f"""
**Antialts setup**
- {EMOJIS['idle']} Level.
- {EMOJIS['dnd']} Log channel.
- {EMOJIS['dnd']} Minimum account age.
- {EMOJIS['dnd']} Restricted role.
Please select a protection level.
""", view=view)
await view.wait()
if view.cancelled:
return await msg.edit(
content="",
embed=discord.Embed(title=f"{EMOJIS['tick_no']} Cancelled", color=RED_COLOR),
view=None
)
await msg.edit(f"""
**Antialts setup**
- {EMOJIS['online']} Level: `{view.level}`
- {EMOJIS['idle']} Log channel.
- {EMOJIS['dnd']} Minimum account age.
- {EMOJIS['dnd']} Restricted role.
Please enter a log channel.
Type `create` to automatically create a channel.
Type `cancel` to cancel the command.
""", view=None)
m = await wait_for_msg(ctx, 60, msg)
if m == 'pain':
return
if m.content.lower() == 'create':
overwrites = {
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.guild.me: discord.PermissionOverwrite(read_messages=True)
}
created_channel = await ctx.guild.create_text_channel('alt-logs', overwrites=overwrites)
log_channel = created_channel.id
else:
try:
lul_channel = await commands.TextChannelConverter().convert(ctx=ctx, argument=m.content)
log_channel = lul_channel.id
except commands.ChannelNotFound:
return await msg.reply(content="", embed=error_embed(
f"{EMOJIS['tick_no']} Not found!",
f"I wasn't able to find the channel {m.content}, please try again."
), view=None)
await msg.edit(f"""
**Antialts setup**
- {EMOJIS['online']} Level: `{view.level}`
- {EMOJIS['online']} Log channel: <#{log_channel}>
- {EMOJIS['idle']} Minimum account age.
- {EMOJIS['dnd']} Restricted role.
Please enter the minimum account age requirement (in days).
Type `none` to have the default value (7 days).
Type `cancel` to cancel the setup.
""", view=None)
m = await wait_for_msg(ctx, 60, msg)
if m == 'pain':
return
try:
if m.content.lower() != 'none':
temp_acc_age = int(m.content)
if temp_acc_age <= 0:
return await msg.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} Positive values only!",
"Account age can only be a positive number."
))
min_account_age = temp_acc_age
else:
min_account_age = 7
except Exception:
return await msg.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} Integer values only!",
"Please enter an integer next time."
))
await msg.edit(f"""
**Antialts setup**
- {EMOJIS['online']} Level: `{view.level}`
- {EMOJIS['online']} Log channel: <#{log_channel}>
- {EMOJIS['online']} Minimum account age: {min_account_age} days.
- {EMOJIS['idle']} Restricted role.
Please enter a restricted role.
Type `create` to create one automatically.
Type `cancel` to cancel the setup.
""")
m = await wait_for_msg(ctx, 60, msg)
if m == 'pain':
return
if m.content.lower() != 'create':
try:
r_role = await commands.RoleConverter().convert(ctx=ctx, argument=m.content)
except Exception:
return await msg.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} Not found!",
f"I wasn't able to find the role {m.content}\nPlease re-run the command."
))
restricted_role = r_role.id
else:
await msg.edit(f"Creating the role, this may take a while... {EMOJIS['loading']}")
r_role = await ctx.guild.create_role(name="Restricted", color=0x818386)
for channel in ctx.guild.channels:
try:
await channel.set_permissions(
r_role,
speak=False,
send_messages=False,
add_reactions=False
)
except Exception as e:
print(e)
restricted_role = r_role.id
await msg.edit(f"""
**Setup complete**
Here are you settings:
- {EMOJIS['online']} Level: `{view.level}`
- {EMOJIS['online']} Log channel: <#{log_channel}>
- {EMOJIS['online']} Minimum account age: {min_account_age} days.
- {EMOJIS['online']} Restricted role: <@&{restricted_role}>
""")
g.update({"antialts": {
"level": int(view.level),
"log_channel": log_channel,
"min_account_age": min_account_age,
"restricted_role": restricted_role
}})
return
if config.lower() == 'disable':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Already disabled.", "Alt protection is already disabled."))
g.update({"antialts": False})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Disabled",
"Alt protection has now been disabled."
))
if config.lower() == 'minage':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not enabled.", f"Please enable alt protection system first.\nUsage: `{prefix}antialts enable`"))
if config is None:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid Usage", f"Please use `{prefix}antialts minage <number>`"))
if not isinstance(setting, int):
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Intergers only!", "The minimum age number should be an integer only!"))
if setting <= 0:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Positive integers only!", "The minimum account age number can only be positive."))
aa.update({"min_account_age": setting})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Updated!",
f"The minimum account age has been updated to `{setting}` day(s)."
))
if config.lower() == 'level':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not enabled.", f"Please enable alt protection system first.\nUsage: `{prefix}antialts enable`"))
if config is None:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid Usage", f"Please use `{prefix}antialts level <number>`"))
if not isinstance(setting, int):
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Intergers only!", "The level number should be an integer between 1 and 3 only!"))
if not 1 <= setting <= 3:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid level value!", "The level number should be between and 1 and 3 only!"))
aa.update({"level": setting})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Updated!",
f"The alt protection level has been updated to level `{setting}`"
))
if config.lower() == 'channel':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not enabled.", f"Please enable alt protection system first.\nUsage: `{prefix}antialts enable`"))
if config is None:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid Usage", f"Please use `{prefix}antialts channel #channel`"))
if not isinstance(setting, discord.TextChannel):
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not found!", f"I wasn't able to find channel {setting}, please try again."))
aa.update({"log_channel": setting.id})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Updated!",
f"The log channel has been updated to {setting.mention}"
))
if config.lower() == 'role':
if not enabled:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not enabled.", f"Please enable alt protection system first.\nUsage: `{prefix}antialts enable`"))
if config is None:
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Invalid Usage", f"Please use `{prefix}antialts role @role`"))
if not isinstance(setting, discord.Role):
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Not found!", f"I wasn't able to find the role {setting}, please try again."))
aa.update({"restricted_role": setting.id})
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Updated!",
f"The restricted role has been updated to {setting.mention}"
))
else:
return await ctx.reply(embed=info_embed)
@commands.command(help="Kick all resitricted users by alt protection system.", aliases=['kickrestricted'])
@commands.has_permissions(kick_members=True)
@commands.bot_has_permissions(kick_members=True)
@commands.cooldown(3, 30, commands.BucketType.user)
async def kickalts(self, ctx: commands.Context):
prefix = ctx.clean_prefix
g = await self.client.get_guild_config(ctx.guild.id)
aa = g['antialts']
enabled = False if not aa else True
if not enabled:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Alt protection not enabled!",
f"You can only use this command if alt protection is enabled.\nPlease use `{prefix}antialts enable` to enable it."
))
role = ctx.guild.get_role(aa['restricted_role'])
if role is None:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Restricted role not found!",
"Looks like the restricted role has been deleted."
))
m = await ctx.reply(f"Working on it... {EMOJIS['loading']}")
kids = role.members
if len(kids) == 0:
return await m.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} No restricted users found!",
f"There are no users having the role {role.mention} in this server."
))
kicked_count = 0
for kid in kids:
try:
await kid.kick(reason=f"Action done by user: {ctx.author} ({ctx.author.id})")
kicked_count += 1
await asyncio.sleep(0.5)
except Exception:
pass
await m.edit(f"I have kicked `{kicked_count}` restricted users out of `{len(kids)}`.")
log_channel = self.client.get_channel(aa['log_channel'])
if log_channel is None:
return
await log_channel.send(embed=success_embed(
"Alts kicked!",
f"**{kicked_count}** alts have been kicked by {ctx.author.mention}"
).set_author(name=ctx.author, icon_url=ctx.author.display_avatar.url
).set_footer(text=f"ID: {ctx.author.id}"))
@commands.command(help="Ban all resitricted users by alt protection system.", aliases=['banrestricted'])
@commands.has_permissions(ban_members=True)
@commands.bot_has_permissions(ban_members=True)
@commands.cooldown(3, 30, commands.BucketType.user)
async def banalts(self, ctx: commands.Context):
prefix = ctx.clean_prefix
g = await self.client.get_guild_config(ctx.guild.id)
aa = g['antialts']
enabled = False if not aa else True
if not enabled:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Alt protection not enabled!",
f"You can only use this command if alt protection is enabled.\nPlease use `{prefix}antialts enable` to enable it."
))
role = ctx.guild.get_role(aa['restricted_role'])
if role is None:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Restricted role not found!",
"Looks like the restricted role has been deleted."
))
m = await ctx.reply(f"Working on it... {EMOJIS['loading']}")
kids = role.members
if len(kids) == 0:
return await m.edit(content="", embed=error_embed(
f"{EMOJIS['tick_no']} No restricted users found!",
f"There are no users having the role {role.mention} in this server."
))
banned_count = 0
for kid in kids:
try:
await kid.ban(reason=f"Action done by user: {ctx.author} ({ctx.author.id})")
banned_count += 1
await asyncio.sleep(0.5)
except Exception:
pass
await m.edit(f"I have banned `{banned_count}` restricted users out of `{len(kids)}`.")
log_channel = self.client.get_channel(aa['log_channel'])
if log_channel is None:
return
await log_channel.send(embed=success_embed(
"Alts banned!",
f"**{banned_count}** alts have been banned by {ctx.author.mention}"
).set_author(name=ctx.author, icon_url=ctx.author.display_avatar.url
).set_footer(text=f"ID: {ctx.author.id}"))
@commands.command(help="Give server access to a user who is restricted.", aliases=['giveaccess', 'unrestrict'])
@commands.cooldown(3, 30, commands.BucketType.user)
@commands.bot_has_guild_permissions(manage_roles=True, ban_members=True)
@commands.has_permissions(manage_roles=True)
async def grantaccess(self, ctx, user: Union[discord.Member, discord.User, str] = None):
prefix = ctx.clean_prefix
g = await self.client.get_guild_config(ctx.guild.id)
aa = g['antialts']
enabled = False if not aa else True
if not enabled:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Not enabled!",
"You need to enable alt protection in order to use this command."
))
role = ctx.guild.get_role(aa['restricted_role'])
if user is None:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Please mention a user.",
f"Correct usage: `{prefix}grantaccess @user/userid`"
))
if isinstance(user, discord.Member):
if role not in user.roles:
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Access already granted!",
"It looks like this user already has access to the server."
))
await user.remove_roles(role, reason=f"Access granted by {ctx.author} ({ctx.author.id})")
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Access granted!",
f"Access to the server has been granted to the user {user.mention}"
))
elif isinstance(user, discord.User):
bans = await ctx.guild.bans()
for b in bans:
if b.user == user:
await ctx.guild.unban(b.user, f"Access granted by {ctx.author} ({ctx.author.id})")
return await ctx.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Access granted!",
f"Access to the server has been granted to the user {user.mention}"
))
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Access already granted!",
"It looks like this user already has access to the server."
))
raise commands.UserNotFound(user)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.has_permissions(manage_roles=True)
@commands.bot_has_guild_permissions(manage_roles=True, manage_channels=True)
@commands.command(help="Mute someone.")
async def mute(self, ctx: commands.Context, user: discord.Member = None, *, reason="No Reason Provided"):
prefix = ctx.clean_prefix
if user is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please mention a user.\nCorrect Usage: `{prefix}mute @user [reason]`\nExample: `{prefix}mute @egirl spamming`"
))
if user == ctx.author:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Bruh!", "You can't mute yourself!"))
if user == self.client.user:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(":(", "Why u do this to me?! *cries*"))
if user.bot:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Bruh!", "You can't mute bots!"))
if int(user.top_role.position) >= int(ctx.author.top_role.position) and ctx.author.id != ctx.guild.owner_id:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} No!",
f"You cannot mute **{escape_markdown(str(user))}** because they are a mod/admin."
))
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
if muted_role is None:
muted_role = await ctx.guild.create_role(name="Muted", color=0x818386)
for channel in ctx.guild.channels:
try:
await channel.set_permissions(
muted_role,
speak=False,
send_messages=False,
add_reactions=False
)
except Exception as e:
print(e)
try:
if muted_role in user.roles:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Bruh!", f"**{escape_markdown(str(user))}** is already muted!"))
await user.add_roles(muted_role, reason=f"{ctx.author} ({ctx.author.id}): {reason}")
except Exception:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Missing Permissions!",
f"The {muted_role.mention} role is higher than my top role.\nPlease give me a higher role to fix this issue."
))
await ctx.reply(embed=success_embed(
f"{EMOJIS['muted']} Done!",
f"I have muted **{escape_markdown(str(user))}**."
))
try:
await user.send(embed=success_embed(
f"{EMOJIS['muted']} You have been muted!",
f"You were muted in **{escape_markdown(str(ctx.guild))}**."
).add_field(name="Reason:", value=reason, inline=False))
except Exception:
pass
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.has_permissions(manage_roles=True)
@commands.bot_has_guild_permissions(manage_roles=True, manage_channels=True)
@commands.command(help="Unmute a muted user.")
async def unmute(self, ctx: commands.Context, user: discord.Member = None):
prefix = ctx.clean_prefix
if user is None:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please mention a user.\nCorrect Usage: `{prefix}unmute @user [reason]`\nExample: `{prefix}unmute @egirl`"
))
if user == ctx.author:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Bruh!", "You can't unmute yourself!"))
if user == self.client.user:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed("Bruh", "You can't use this command on me!"))
if user.bot:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Bruh!", "You can't unmute bots!"))
muted_role = discord.utils.get(ctx.guild.roles, name="Muted")
if muted_role is None:
muted_role = await ctx.guild.create_role(name="Muted", color=0x818386)
for channel in ctx.guild.channels:
try:
await channel.set_permissions(
muted_role,
speak=False,
send_messages=False,
add_reactions=False
)
except Exception as e:
print(e)
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Bruh!", f"**{escape_markdown(str(user))}** is not muted!"))
try:
if muted_role not in user.roles:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(f"{EMOJIS['tick_no']} Bruh!", f"**{escape_markdown(str(user))}** is not muted!"))
await user.remove_roles(muted_role, reason=f"Unmuted by: {ctx.author} ({ctx.author.id})")
except Exception:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Missing Permissions!",
f"The {muted_role.mention} role is higher than my top role.\nPlease give me a higher role to fix this issue."
))
await ctx.reply(embed=success_embed(
f"{EMOJIS['unmuted']} Done!",
f"I have unmuted **{escape_markdown(str(user))}**."
))
try:
await user.send(embed=success_embed(
f"{EMOJIS['unmuted']} You have been unmuted!",
f"You were unmuted in **{escape_markdown(str(ctx.guild))}**."
))
except Exception:
pass
@commands.cooldown(3, 30, commands.BucketType.user)
@commands.has_permissions(manage_channels=True, manage_roles=True)
@commands.bot_has_permissions(manage_channels=True, manage_roles=True)
@commands.command(help="Lock a channel.")
async def lock(self, ctx: commands.Context, channel: discord.TextChannel = None):
if channel is None:
channel = ctx.channel
if ctx.guild.default_role not in channel.overwrites:
overwrites = {ctx.guild.default_role: discord.PermissionOverwrite(send_messages=False)}
await channel.edit(overwrites=overwrites)
await ctx.send(f"{EMOJIS['tick_yes']} {channel.mention} has now been locked.")
elif channel.overwrites[ctx.guild.default_role].send_messages or channel.overwrites[ctx.guild.default_role].send_messages is None:
overwrites = channel.overwrites[ctx.guild.default_role]
overwrites.send_messages = False
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrites)
await ctx.send(f"{EMOJIS['tick_yes']} {channel.mention} has now been locked.")
else:
await ctx.send(f"{EMOJIS['tick_no']} {channel.mention} is already locked ._.")
@commands.command(help="Lock the whole server.")
@commands.has_permissions(manage_guild=True, manage_channels=True, manage_roles=True)
@commands.bot_has_permissions(manage_guild=True, manage_channels=True, manage_roles=True)
@commands.cooldown(1, 60, commands.BucketType.guild)
async def lockdown(self, ctx: commands.Context):
v = Confirm(ctx, 60)
m = await ctx.reply("Are you sure you want to lock the whole server?", view=v)
await v.wait()
if not v.value:
return
await m.delete()
async with ctx.typing():
i = 0
for channel in ctx.guild.channels:
if isinstance(channel, discord.TextChannel):
await ctx.invoke(self.client.get_command('lock'), channel=channel)
await asyncio.sleep(0.5)
i += 1
await ctx.send(f"**{EMOJIS['tick_yes']} {i} channels have been locked.**")
@commands.cooldown(3, 30, commands.BucketType.user)
@commands.has_permissions(manage_channels=True, manage_roles=True)
@commands.bot_has_permissions(manage_channels=True, manage_roles=True)
@commands.command(help="Unlock a channel.")
async def unlock(self, ctx: commands.Context, channel: discord.TextChannel = None):
if channel is None:
channel = ctx.channel
if not channel.overwrites[ctx.guild.default_role].send_messages:
overwrites = channel.overwrites[ctx.guild.default_role]
overwrites.send_messages = None
await channel.set_permissions(ctx.guild.default_role, overwrite=overwrites)
await ctx.send(f"{EMOJIS['tick_yes']} {channel.mention} has now been unlocked.")
else:
await ctx.send(f"{EMOJIS['tick_no']} {channel.mention} is already unlocked ._.")
@commands.command(help="Lock the whole server.")
@commands.has_permissions(manage_guild=True, manage_channels=True, manage_roles=True)
@commands.bot_has_permissions(manage_guild=True, manage_channels=True, manage_roles=True)
@commands.cooldown(1, 60, commands.BucketType.guild)
async def unlockdown(self, ctx: commands.Context):
v = Confirm(ctx, 60)
m = await ctx.reply("Are you sure you want to unlock the whole server?", view=v)
await v.wait()
if not v.value:
return
await m.delete()
async with ctx.typing():
i = 0
for channel in ctx.guild.channels:
if isinstance(channel, discord.TextChannel):
await ctx.invoke(self.client.get_command('unlock'), channel=channel)
await asyncio.sleep(0.5)
i += 1
await ctx.send(f"**{EMOJIS['tick_yes']} {i} channels have been unlocked.**")
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.has_permissions(kick_members=True)
@commands.bot_has_guild_permissions(kick_members=True, embed_links=True)
@commands.command(help="Kick someone from your server!")
async def kick(self, ctx: commands.Context, user: discord.Member = None, *, reason='No Reason Provided'):
PREFIX = ctx.clean_prefix
if user is None:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter a user to kick.\nCorrect Usage: `{PREFIX}kick @user [reason]`"
))
if user == ctx.author:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply("Don't kick yourself :(")
if user == self.client.user:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply("Bruh why u wanna kick me :(")
if int(user.top_role.position) >= int(ctx.author.top_role.position) and ctx.author.id != ctx.guild.owner_id:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} No!",
f"You cannot kick **{escape_markdown(str(user))}** because they are a mod/admin."
))
try:
await user.kick(reason=f"{ctx.author} - {ctx.author.id}: {reason}")
try:
await user.send(embed=discord.Embed(
title="You have been kicked!",
description=f"You were kicked from the server: **{ctx.guild}**",
color=RED_COLOR
).add_field(name="Moderator", value=f" {ctx.author.mention} - {escape_markdown(str(ctx.author))}", inline=False
).add_field(name="Reason", value=reason, inline=False))
except Exception:
pass
await ctx.message.reply(embed=success_embed(f"{EMOJIS['tick_yes']} User Kicked!", f"**{escape_markdown(str(user))}** has been kicked!"))
except Exception:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed("Error!", f"I cannot kick **{escape_markdown(str(user))}** because they are a mod/admin."))
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.has_permissions(ban_members=True)
@commands.bot_has_guild_permissions(ban_members=True, embed_links=True)
@commands.command(help="Ban multiple people from your server!")
async def massban(self, ctx: commands.Context, users: commands.Greedy[Union[discord.Member, discord.User, int, str]] = None, *, reason='No Reason Provided'):
if not users:
ctx.command.reset_cooldown(ctx)
return await ctx.reply("Please provide some users to ban!")
for user in users:
await ctx.invoke(self.client.get_command('ban'), user=user, reason=reason)
@commands.cooldown(1, 30, commands.BucketType.user)
@commands.has_permissions(kick_members=True)
@commands.bot_has_guild_permissions(ban_members=True, embed_links=True)
@commands.command(help="Kick multiple people from your server!")
async def masskick(self, ctx: commands.Context, users: commands.Greedy[discord.Member] = None, *, reason='No Reason Provided'):
if not users:
ctx.command.reset_cooldown(ctx)
return await ctx.reply("Please provide some users to kick!")
for user in users:
await ctx.invoke(self.client.get_command('kick'), user=user, reason=reason)
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.has_permissions(ban_members=True)
@commands.bot_has_guild_permissions(ban_members=True, embed_links=True)
@commands.command(help="Ban someone from your server!", aliases=['hackban'])
async def ban(self, ctx: commands.Context, user: Union[discord.Member, discord.User, int, str] = None, *, reason="No Reason Provided"):
PREFIX = ctx.clean_prefix
if user is None:
ctx.command.reset_cooldown(ctx)
return await ctx.send(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter a user to ban.\nCorrect Usage: `{PREFIX}ban @user [reason]`"
))
if isinstance(user, str):
raise commands.UserNotFound(user)
if isinstance(user, int):
try:
await ctx.guild.ban(discord.Object(id=user), reason=f"{ctx.author} - {ctx.author.id}: {reason}")
await ctx.send(embed=success_embed(f"{EMOJIS['tick_yes']}", "They have been banned."))
except Exception as e:
ctx.command.reset_cooldown(ctx)
return await ctx.send(embed=error_embed(f"{EMOJIS['tick_no']} Unable to ban", e))
if user == ctx.author:
ctx.command.reset_cooldown(ctx)
return await ctx.send("Don't ban yourself :(")
if user == self.client.user:
ctx.command.reset_cooldown(ctx)
return await ctx.send("Bruh why u wanna ban me :(")
if isinstance(user, discord.Member):
if int(user.top_role.position) >= int(ctx.author.top_role.position) and ctx.author.id != ctx.guild.owner_id:
ctx.command.reset_cooldown(ctx)
return await ctx.send(embed=error_embed(
f"{EMOJIS['tick_no']} No!",
f"You cannot ban **{escape_markdown(str(user))}** because they are a mod/admin."
))
try:
await ctx.guild.ban(user, reason=f"{ctx.author} - {ctx.author.id}: {reason}")
try:
await user.send(embed=discord.Embed(
title="You have been banned!",
description=f"You were banned from the server: **{ctx.guild}**",
color=RED_COLOR
).add_field(name="Moderator", value=f"{ctx.author.mention} - {escape_markdown(str(ctx.author))}", inline=False
).add_field(name="Reason", value=reason, inline=False))
except Exception:
pass
await ctx.send(embed=success_embed(f"{EMOJIS['tick_yes']} User Banned!", f"**{escape_markdown(str(user))}** has been banned!"))
except Exception:
ctx.command.reset_cooldown(ctx)
return await ctx.send(embed=error_embed("Error!", f"I cannot ban **{escape_markdown(str(user))}** because they are a mod/admin."))
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.has_permissions(ban_members=True)
@commands.bot_has_guild_permissions(ban_members=True, embed_links=True)
@commands.command(help="Unban a user from your server.")
async def unban(self, ctx: commands.Context, *, user=None):
PREFIX = ctx.clean_prefix
if user is None:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage",
f"Please enter a member to unban.\nCorrect Usage: `{PREFIX}unban <user>`\nExample: `{PREFIX}unban egirl#1234`"
))
banned_users = await ctx.guild.bans()
try:
user_id = int(user)
except Exception:
user_id = None
if user_id is not None:
for ban_entry in banned_users:
user_uwu = ban_entry.user
if user_id == user_uwu.id:
await ctx.guild.unban(user_uwu, reason=f"Command used by: {ctx.author} ({ctx.author.id})")
return await ctx.message.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Member Unbanned!",
f"**{escape_markdown(str(user_uwu))}** has been unbanned."
))
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Not found!",
f"Either member with ID: **{user_id}** doesn't exist OR they are not banned."
))
elif user_id is None:
if '#' not in user:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage",
"Please provide a proper user to unban.\nExample of a proper user: `abcd#1234`"
))
member_name, member_discriminator = user.split('#')
for ban_entry in banned_users:
user_uwu = ban_entry.user
print(user_uwu)
print(user)
if (user_uwu.name, user_uwu.discriminator) == (member_name, member_discriminator):
await ctx.guild.unban(user_uwu, reason=f"Command used by: {ctx.author} ({ctx.author.id})")
return await ctx.message.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Member Unbanned!",
f"**{escape_markdown(str(user_uwu))}** hass been unbanned."
))
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Not found!",
f"User **{escape_markdown(str(user))}** doesn't exist OR they aren't banned."
))
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.has_permissions(manage_messages=True)
@commands.bot_has_guild_permissions(manage_messages=True, embed_links=True)
@commands.command(aliases=['clear'], help="Purge a channel.")
async def purge(self, ctx: commands.Context, amount='10', user: discord.Member = None):
PREFIX = ctx.clean_prefix
try:
amount = int(amount)
except Exception:
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter an integer as the amount.\n\nCorrect Usage: `{PREFIX}purge <amount>` OR `{PREFIX}purge <amount> @user`\nExample: `{PREFIX}purge 10` OR `{PREFIX}purge 10 @egirl`"
))
if amount > 500:
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Error!",
"The amount cannot be greater than **500**"
))
if user is None:
await ctx.message.delete()
deleted = await ctx.channel.purge(limit=(amount))
uwu = []
owo = []
for msg in deleted:
uwu.append(msg.author.id)
if msg.author.id not in owo:
owo.append(msg.author.id)
hee = ""
for e in owo:
hee += f"<@{e}> - **{uwu.count(e)}** messages\n"
return await ctx.send(embed=success_embed(
f"{EMOJIS['tick_yes']} Channel Purged!",
f"**{amount+1}** message(s) deleted!\n\n{hee}"
), delete_after=5)
elif user is not None:
def check(e):
return e.author == user
await ctx.message.delete()
await ctx.channel.purge(limit=amount, check=check)
return await ctx.send(embed=success_embed(
f"{EMOJIS['tick_yes']} Channel Purged!",
f"**{amount}** message(s) deleted from {user.mention}"
), delete_after=5)
@commands.cooldown(1, 15, commands.BucketType.user)
@commands.has_guild_permissions(manage_channels=True)
@commands.bot_has_guild_permissions(manage_channels=True)
@commands.command(help="Change the slowmode of a channel.")
async def slowmode(self, ctx: commands.Context, amount=None):
PREFIX = ctx.clean_prefix
if amount is None:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter an amount.\nCorrect Usage: `{PREFIX}slowmode <amount>`\nExample: `{PREFIX}slowmode 5s`"
))
try:
amount = int(amount)
if amount < 0:
ctx.command.reset_cooldown(ctx)
return await ctx.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
"The slowmode value can't be negative."
))
converted_time = [amount, amount, 'second(s)']
except Exception:
converted_time = convert(amount)
if converted_time == -1:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter a proper unit of time (s/m/h/d).\nExample: `{PREFIX}slowmode 10s` OR `{PREFIX}slowmode 1h`"
))
if converted_time == -2:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter a proper integer for time.\nExample: `{PREFIX}slowmode 10s`"
))
if converted_time == -3:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter a positive value of time next time.\nExample: `{PREFIX}slowmode 10s`"
))
if converted_time[0] > 21600:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Too high!",
"The maximum slowmode can only be **6 hours** (21600 seconds)."
))
await ctx.channel.edit(slowmode_delay=converted_time[0])
await ctx.message.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Slowmode Changed!",
f"The slowmode has now been set to **{converted_time[1]} {converted_time[2]}**"
))
@commands.cooldown(2, 15, commands.BucketType.user)
@commands.has_guild_permissions(kick_members=True)
@commands.command(help="Warn a user.")
async def warn(self, ctx: commands.Context, user: discord.Member = None, *, reason='No Reason Provided'):
custom_prefix = ctx.clean_prefix
if user is None:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please mention a user next time.\nExample: `{custom_prefix}warn @egirl spamming`"
))
if user == ctx.author:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Bruh!",
"You cannot warn yourself."
))
if user == self.client.user:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Bruh!",
"You cannot warn me."
))
if user.bot:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Bruh!",
"You cannot warn bots."
))
if int(ctx.author.top_role.position) <= int(user.top_role.position):
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Error!",
"You cannot warn a mod/admin."
))
if len(reason) > 500:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Too long!",
"The reason provided was too long, please try again."
))
random_generated_id = gen_random_string(20)
await self.client.warnings.insert_one({
"_id": random_generated_id,
"user_id": user.id,
"guild_id": ctx.guild.id,
"moderator": ctx.author.id,
"reason": reason
})
try:
await user.send(embed=error_embed(
"You have been warned!",
f"You were warned from the server: **{escape_markdown(str(ctx.guild))}**"
).add_field(name="Moderator:", value=f"{ctx.author.mention} - {escape_markdown(str(ctx.author))}", inline=False
).add_field(name="Reason:", value=reason, inline=False
).set_footer(text=f"Warn ID: {random_generated_id}"))
except Exception as e:
print(e)
await ctx.message.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} User Warned!",
f"**{escape_markdown(str(user))}** has been warned!"
).set_footer(text=f"Warn ID: {random_generated_id}"))
@commands.cooldown(2, 10, commands.BucketType.user)
@commands.has_guild_permissions(kick_members=True)
@commands.command(aliases=['removewarn', 'deletewarn', 'removewarning', 'deletewarning', 'delwarning'], help="Delete a warning.")
async def delwarn(self, ctx: commands.Context, warn_id=None):
prefix = ctx.clean_prefix
if warn_id is None:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Invalid Usage!",
f"Please enter a warn ID.\nExample: `{prefix}delwarn N3vE4g0nN4g1V3y0UUp`"
))
ah_yes = await self.client.warnings.find_one({
"_id": warn_id,
"guild_id": ctx.guild.id
})
if ah_yes is None:
ctx.command.reset_cooldown(ctx)
return await ctx.message.reply(embed=error_embed(
f"{EMOJIS['tick_no']} Not found!",
"The provided warning ID is Invalid.\nPlease enter a valid warning ID."
))
await self.client.warnings.delete_one({"_id": warn_id})
return await ctx.message.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Warning Removed!",
f"The warning: `{warn_id}` was deleted!"
).add_field(
name="Some info on the warning:",
value=f"""
```yaml
User Warned: {self.client.get_user(ah_yes['user_id'])}
Moderator: {self.client.get_user(ah_yes['moderator'])}
Reason: {ah_yes['reason']}
```
""",
inline=False
))
@commands.cooldown(1, 15, commands.BucketType.user)
@commands.has_guild_permissions(kick_members=True)
@commands.command(aliases=['warnings'], help="Check warnings of a user!")
async def warns(self, ctx, user: discord.Member = None):
if user is None:
user = ctx.author
ah_yes = self.client.warnings.find({
"user_id": user.id,
"guild_id": ctx.guild.id
})
uwu = await ah_yes.to_list(length=None)
if len(uwu) == 0:
return await ctx.message.reply(embed=success_embed(
f"{EMOJIS['tick_yes']} Warnings!",
f"**{escape_markdown(str(user))}** has no warnings."
))
uwu_embed = success_embed(
f"{EMOJIS['tick_yes']} Warnings",
f"Warnings of **{escape_markdown(str(user))}**."
)
if len(uwu) <= 25:
for e in uwu:
uwu_embed.add_field(
name=f"Warning ID: `{e['_id']}`",
value=f"""
```yaml
Moderator: {self.client.get_user(e['moderator'])}
Reason: {e['reason']}
```
""",
inline=False
)
if len(uwu) > 25:
i = 0
for e in uwu:
if i == 25:
break
uwu_embed.add_field(
name=f"Warning ID: `{e['_id']}`",
value=f"""
```yaml
Moderator: {self.client.get_user(e['moderator'])}
Reason: {e['reason']}
```
""",
inline=False
)
i += 1
return await ctx.message.reply(embed=uwu_embed)
def setup(client):
client.add_cog(mod(client))
| StarcoderdataPython |
27919 | from rest_framework import serializers
from . import models
class ShelterSerializer(serializers.ModelSerializer):
class Meta:
model = models.Shelter
fields = ('name',
'location')
class DogSerializer(serializers.ModelSerializer):
class Meta:
model = models.Dog
fields = ('shelter',
'name',
'description',
'intake_date')
class ErrorSerializer(serializers.Serializer):
error_message = serializers.CharField(max_length=200)
| StarcoderdataPython |
4810617 | <reponame>jstraub/tdp
import numpy as np
import matplotlib.pyplot as plt
def GetEmptyFig():
fig=plt.figure(frameon=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
return fig
#I = np.resize(np.arange(9),(100,100))
#fig = GetEmptyFig()
#plt.imshow(I,
# interpolation="nearest", cmap=plt.get_cmap("Set1"))
#plt.savefig("../shaders/labels/rnd.png",figure=fig)
#plt.show()
I = np.random.rand(101,101)
for i in range(1,101):
for j in range(1,101):
while np.abs(I[i,j] - I[i-1,j]) < 0.1 or np.abs(I[i,j] - I[i,j-1]) < 0.1:
I[i,j] = np.random.rand(1)
I = I[1::,1::]
fig = GetEmptyFig()
plt.imshow(I,
interpolation="nearest", cmap=plt.get_cmap("jet"))
plt.savefig("../shaders/labels/rnd.png",figure=fig)
plt.show()
| StarcoderdataPython |
16366 | <reponame>vragonx/DiscordStatusChanger
from colorama import Fore, init, Style
import requests
import random
import ctypes
import time
import os
ctypes.windll.kernel32.SetConsoleTitleW('Discord Status Changer')
init(convert=True, autoreset=True)
SuccessCounter = 0
ErrorCounter = 0
os.system('cls')
print(Fore.RED + '\n[' + Fore.WHITE + Style.BRIGHT + '0' + Style.RESET_ALL + Fore.RED + '] ' + Fore.WHITE + Style.BRIGHT + 'Discord Status Changer by vragon')
print(Fore.GREEN + '\n[' + Fore.WHITE + Style.BRIGHT + '1' + Style.RESET_ALL + Fore.GREEN + '] ' + Fore.WHITE + Style.BRIGHT + 'Text')
print(Fore.GREEN + '[' + Fore.WHITE + Style.BRIGHT + '2' + Style.RESET_ALL + Fore.GREEN + '] ' + Fore.WHITE + Style.BRIGHT + 'Text including emoji')
try:
option = int(input(Fore.GREEN + '\n> ' + Fore.WHITE + Style.BRIGHT))
except ValueError as e:
print(' ')
print(Fore.RED + '[ERROR] ' + Fore.WHITE + Style.BRIGHT + str(e))
input()
quit()
if option == 1:
os.system('cls')
print(Fore.WHITE + Style.BRIGHT + '\nToken:')
token = str(input(Fore.GREEN + '> ' + Fore.WHITE + Style.BRIGHT))
print(' ')
def ChangeStatus():
global SuccessCounter
global ErrorCounter
try:
session = requests.Session()
headers = {
'authorization': token,
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.0.306 Chrome/78.0.3904.130 Electron/7.1.11 Safari/537.36',
'content-type': 'application/json'
}
text = random.choice(['Text1', 'Text2', 'Text3'])
data = '{"custom_status":{"text":"' + text + '"}}'
r = session.patch('https://discordapp.com/api/v6/users/@me/settings', headers=headers, data=data)
if '"custom_status": {"text": "' in r.text:
print(Fore.GREEN + '[SUCCESS] ' + Fore.WHITE + Style.BRIGHT + 'Status changed: ' + str(text))
SuccessCounter += 1
ctypes.windll.kernel32.SetConsoleTitleW('Discord Status Changer | Success: ' + str(SuccessCounter) + ' | Errors: ' + str(ErrorCounter))
else:
print(r.text)
except:
pass
time.sleep(1)
while True:
ChangeStatus()
elif option == 2:
os.system('cls')
print(Fore.WHITE + Style.BRIGHT + '\nToken:')
token = str(input(Fore.GREEN + '> ' + Fore.WHITE + Style.BRIGHT))
print(Fore.WHITE + Style.BRIGHT + '\nEmoji name:')
EmojiName = str(input(Fore.GREEN + '> ' + Fore.WHITE + Style.BRIGHT))
print(Fore.WHITE + Style.BRIGHT + '\nEmoji ID:')
try:
EmojiID = int(input(Fore.GREEN + '> ' + Fore.WHITE + Style.BRIGHT))
except ValueError as e:
print(' ')
print(Fore.RED + '[ERROR] ' + Fore.WHITE + Style.BRIGHT + str(e))
input()
quit()
print(' ')
def ChangeStatus():
global SuccessCounter
global ErrorCounter
try:
session = requests.Session()
headers = {
'authorization': token,
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.0.306 Chrome/78.0.3904.130 Electron/7.1.11 Safari/537.36',
'content-type': 'application/json'
}
text = random.choice(['Text1', 'Text2', 'Text3'])
data = '{"custom_status":{"text":"' + text + '","emoji_id":"' + str(EmojiID) + '","emoji_name":"' + str(EmojiName) + '"}}'
r = session.patch('https://discordapp.com/api/v6/users/@me/settings', headers=headers, data=data)
if 'custom_status' in r.text:
print(Fore.GREEN + '[SUCCESS] ' + Fore.WHITE + Style.BRIGHT + 'Status changed: ' + str(text))
SuccessCounter += 1
ctypes.windll.kernel32.SetConsoleTitleW('Discord Status Changer | Success: ' + str(SuccessCounter) + ' | Errors: ' + str(ErrorCounter))
else:
print(r.text)
except:
pass
time.sleep(1)
while True:
ChangeStatus()
| StarcoderdataPython |
4829321 | """
This module provides common classes to document APIs.
The purpose of these classes is to provide an abstraction layer on top of a specific
set of rules to document APIs. For example, it should be possible to generate both
OpenAPI Documentation v2 and v3 (currently only v3 is supported) from these types, and
potentially in the future v4, if it will be so different from v3.
"""
import json
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from http import HTTPStatus
from typing import (
Any,
Callable,
Dict,
Generic,
List,
Mapping,
Optional,
Type,
TypeVar,
Union,
)
from blacksheep.messages import Request
from blacksheep.server.application import Application
from blacksheep.server.authorization import allow_anonymous
from blacksheep.server.files.static import get_response_for_static_content
from blacksheep.server.responses import FriendlyEncoder
from blacksheep.server.routing import Route, Router
from openapidocs.common import Format, OpenAPIRoot, Serializer
from .ui import SwaggerUIProvider, UIOptions, UIProvider
T = TypeVar("T")
class ParameterSource(Enum):
QUERY = "query"
HEADER = "header"
PATH = "path"
COOKIE = "cookie"
@dataclass
class RequestBodyInfo:
description: Optional[str] = None
examples: Optional[Dict[str, Any]] = None
@dataclass
class ParameterExample:
value: Any
name: Optional[str] = None
summary: Optional[str] = None
description: Optional[str] = None
@dataclass
class ParameterInfo:
description: str
value_type: Optional[Type] = None
source: Optional[ParameterSource] = None
required: Optional[bool] = None
deprecated: Optional[bool] = None
allow_empty_value: Optional[bool] = None
example: Optional[Any] = None
examples: Optional[Dict[str, ParameterExample]] = None
@dataclass
class ResponseExample:
value: Any
name: Optional[str] = None
summary: Optional[str] = None
description: Optional[str] = None
@dataclass
class ContentInfo:
type: Type[Any]
examples: Optional[List[Union[ResponseExample, Any]]] = None
content_type: str = "application/json"
@dataclass
class HeaderInfo:
type: Type
description: Optional[str] = None
example: Any = None
@dataclass
class ResponseInfo:
description: str
headers: Optional[Dict[str, HeaderInfo]] = None
content: Optional[List[ContentInfo]] = None
ResponseStatusType = Union[int, str, HTTPStatus]
def response_status_to_str(value: ResponseStatusType) -> str:
if isinstance(value, HTTPStatus):
return str(value.value) # type: ignore
if isinstance(value, str):
return value
return str(value)
@dataclass
class EndpointDocs:
summary: Optional[str] = None
description: Optional[str] = None
tags: Optional[List[str]] = None
parameters: Optional[Mapping[str, ParameterInfo]] = None
request_body: Optional[RequestBodyInfo] = None
responses: Optional[Dict[ResponseStatusType, Union[str, ResponseInfo]]] = None
ignored: Optional[bool] = None
deprecated: Optional[bool] = None
on_created: Optional[Callable[[Any, Any], None]] = None
OpenAPIRootType = TypeVar("OpenAPIRootType", bound=OpenAPIRoot)
class OpenAPIEndpointException(Exception):
pass
class APIDocsHandler(Generic[OpenAPIRootType], ABC):
"""
Provides methods to handle the documentation for an API.
"""
def __init__(
self,
*,
ui_path: str = "/docs",
json_spec_path: str = "/openapi.json",
yaml_spec_path: str = "/openapi.yaml",
preferred_format: Format = Format.JSON,
anonymous_access: bool = True,
) -> None:
self._handlers_docs: Dict[Any, EndpointDocs] = {}
self.use_docstrings: bool = True
self.include: Optional[Callable[[str, Route], bool]] = None
self.json_spec_path = json_spec_path
self.yaml_spec_path = yaml_spec_path
self._json_docs: bytes = b""
self._yaml_docs: bytes = b""
self.preferred_format = preferred_format
self.anonymous_access = anonymous_access
self.ui_providers: List[UIProvider] = [SwaggerUIProvider(ui_path)]
def __call__(
self,
doc: Optional[EndpointDocs] = None,
*,
summary: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
parameters: Optional[Mapping[str, ParameterInfo]] = None,
request_body: Optional[RequestBodyInfo] = None,
responses: Optional[Dict[ResponseStatusType, Union[str, ResponseInfo]]] = None,
ignored: Optional[bool] = None,
deprecated: Optional[bool] = None,
on_created: Optional[Callable[[Any, Any], None]] = None,
) -> Any:
def decorator(fn):
if doc:
self._handlers_docs[fn] = doc
return fn
self._handlers_docs[fn] = EndpointDocs(
summary=summary,
description=description,
tags=tags,
request_body=request_body,
responses=responses,
parameters=parameters,
ignored=ignored,
deprecated=deprecated,
on_created=on_created,
)
return fn
return decorator
def get_handler_docs(self, obj: Any) -> Optional[EndpointDocs]:
return self._handlers_docs.get(obj)
def get_handler_docs_or_set(self, obj: Any) -> EndpointDocs:
if obj in self._handlers_docs:
return self._handlers_docs[obj]
docs = EndpointDocs()
self._handlers_docs[obj] = docs
return docs
def get_summary(self, handler: Any) -> Optional[str]:
docs = self.get_handler_docs(handler)
return docs.summary if docs else None
def get_description(self, handler: Any) -> Optional[str]:
docs = self.get_handler_docs(handler)
return docs.description if docs else None
def ignore(self, value: bool = True):
"""Excludes a request handler from API documentation."""
def decorator(fn):
self.get_handler_docs_or_set(fn).ignored = value
return fn
return decorator
def deprecated(self):
def decorator(fn):
self.get_handler_docs_or_set(fn).deprecated = True
return fn
return decorator
def summary(self, text: str):
"""Assigns a summary to a request handler."""
def decorator(fn):
self.get_handler_docs_or_set(fn).summary = text
return fn
return decorator
def tags(self, *tags: str):
"""Assigns tags to a request handler."""
def decorator(fn):
self.get_handler_docs_or_set(fn).tags = list(tags)
return fn
return decorator
def _get_request_handler(self, route: Route) -> Any:
if hasattr(route.handler, "root_fn"):
return route.handler.root_fn
# this happens rarely, when an app doesn't apply any middleware and
# any normalization
return route.handler # pragma: no cover
def get_handler_tags(self, handler: Any) -> Optional[List[str]]:
docs = self.get_handler_docs(handler)
if docs and docs.tags:
return docs.tags
if hasattr(handler, "controller_type"):
# default to controller's class name for the tags
return [handler.controller_type.class_name().title()]
return None
def is_deprecated(self, handler: Any) -> Optional[bool]:
docs = self.get_handler_docs(handler)
return docs.deprecated if docs else None
def router_to_paths_dict(
self, router: Router, mapper: Callable[[Route], T]
) -> Dict[str, Dict[str, T]]:
routes_dictionary: Dict[str, Dict[str, T]] = {}
for method, routes in router.routes.items():
for route in routes:
key = route.mustache_pattern
if self.include and not self.include(key, route):
continue
handler = self._get_request_handler(route)
docs = self.get_handler_docs(handler)
if docs and docs.ignored:
continue
if key not in routes_dictionary:
if "*" in key:
# ignore catch-all routes from api docs
continue
routes_dictionary[key] = {}
routes_dictionary[key][method.decode("utf8").lower()] = mapper(route)
return routes_dictionary
def get_spec_path(self) -> str:
if self.preferred_format == Format.JSON:
return self.json_spec_path
if self.preferred_format == Format.YAML:
return self.yaml_spec_path
raise OpenAPIEndpointException(
f"Unhandled preferred format {self.preferred_format}"
)
def register_docs_handler(self, app: Application) -> None:
current_time = datetime.utcnow().timestamp()
@self.ignore()
@allow_anonymous(self.anonymous_access)
@app.route(self.json_spec_path, methods=["GET", "HEAD"])
def get_open_api_json(request: Request):
return get_response_for_static_content(
request,
b"application/json",
self._json_docs,
current_time,
cache_time=1,
)
@self.ignore()
@allow_anonymous(self.anonymous_access)
@app.route(self.yaml_spec_path, methods=["GET", "HEAD"])
def get_open_api_yaml(request: Request):
return get_response_for_static_content(
request, b"text/yaml", self._yaml_docs, current_time, cache_time=1
)
def normalize_example(self, value: Any) -> Any:
"""
This method is used to ensure that YAML representations of objects look
exactly the same as JSON representations.
"""
return json.loads(json.dumps(value, cls=FriendlyEncoder))
@abstractmethod
def generate_documentation(self, app: Application) -> OpenAPIRootType:
"""Produces the object that describes the API."""
def on_docs_generated(self, docs: OpenAPIRootType) -> None:
"""
Extensibility point. Override this method to modify an OpenAPI object
before it is serialized to JSON and YAML format.
"""
def get_ui_page_title(self) -> str:
return "API Docs" # pragma: no cover
async def build_docs(self, app: Application) -> None:
docs = self.generate_documentation(app)
self.on_docs_generated(docs)
serializer = Serializer()
ui_options = UIOptions(
spec_url=self.get_spec_path(), page_title=self.get_ui_page_title()
)
for ui_provider in self.ui_providers:
ui_provider.build_ui(ui_options)
self._json_docs = serializer.to_json(docs).encode("utf8")
self._yaml_docs = serializer.to_yaml(docs).encode("utf8")
def bind_app(self, app: Application) -> None:
if app.started:
raise TypeError(
"The application is already started. "
"Use this method before starting the application."
)
for ui_provider in self.ui_providers:
ui_handler = ui_provider.get_ui_handler()
ui_handler = self.ignore()(ui_handler)
ui_handler = allow_anonymous(self.anonymous_access)(ui_handler)
app.router.add_get(ui_provider.ui_path, ui_handler)
self.register_docs_handler(app)
app.after_start += self.build_docs
| StarcoderdataPython |
4831241 | <gh_stars>1-10
class ScriptPlatformScriptGen:
def __init__(self):
pass
def GenerateScriptStart(self):
Script = "var filePath = activeDocument.fullName.path;\r\n"
#Script += "var newDoc = app.activeDocument.duplicate();\r\n"
#Script += "app.activeDocument = newDoc;\r\n"
return Script
def GenerateCardEnd(self, sheet, row):
Script = "var pngExportOpts = new ExportOptionsPNG24();\r\n"
Script += "\r\n"
Script += 'var savePath = File(filePath + "/' + str(sheet.cell_value(row,0)).replace(".","") + '.png");' + "\r\n"
Script += "pngExportOpts.antiAliasing = true;\r\n"
Script += "pngExportOpts.artBoardClipping = true;\r\n"
Script += "pngExportOpts.horizontalScale = 100.0;\r\n"
Script += "pngExportOpts.transparency = true;\r\n"
Script += "pngExportOpts.verticalScale = 100.0;\r\n"
Script += "app.activeDocument.exportFile( savePath, ExportType.PNG24, pngExportOpts );\r\n"
return Script
def GenerateScriptEnd(self):
return "app.activeDocument.close(SaveOptions.DONOTSAVECHANGES);\r\n"
| StarcoderdataPython |
1691604 | <filename>board.py
"""Board module"""
import copy
import math
import random
import string
def create_solution_board(width=6, height=6):
"""Randomly generates a new board
with width by height size
"""
if type(width) != int or type(height) != int:
raise TypeError('Arguments must be int type')
boxes = width * height
if boxes % 2 != 0:
raise ValueError('Number of boxes is not multiple of two')
numbers = list(range(1, boxes // 2 + 1))
numbers = numbers + numbers
random.shuffle(numbers)
board = []
for index in range(height):
board.append([])
for _ in range(width):
random_number = numbers.pop()
board[index].append(random_number)
board[index] = board[index]
return board
def start_board(solution_board):
"""Generates an empty board from a solution board"""
empty_board = copy.deepcopy(solution_board)
for row in empty_board:
for index in range(len(row)):
row[index] = '-'
return empty_board
def format_board(board):
"""Returns a printable board"""
fboard = ''
width = len(board[0])
height = len(board)
cards = (width * height) // 2
letters = string.ascii_uppercase[:width]
if width > len(letters):
raise ValueError('Board width is greater than ' + len(letters))
number_space = int(math.log10(height)) + 2
number_format = '{{:^{}}}'.format(number_space)
max_digits = int(math.log10(cards)) + 2
box_format = '|{{:^{}}}'.format(max_digits)
letter_row = ' ' * number_space
for letter in letters:
letter_row += box_format.format(letter)
letter_row += '\n'
for row in range(height):
fboard += number_format.format(row + 1)
for number in board[row]:
fboard += box_format.format(number)
fboard += '\n'
return letter_row + fboard
def get_x_coordinate(x, board):
"""Returns input as index in x coordinate"""
width = len(board[0])
letters = string.ascii_lowercase[:width]
index = letters.find(x.strip().lower())
if index >= 0:
return index
else:
return -1
def get_y_coordinate(y, board):
"""Returns input as index in y coordinate"""
try:
y = int(y)
except ValueError:
return -1
height = len(board)
y -= 1
if y >= 0 and y < height:
return y
else:
return -1
| StarcoderdataPython |
3359210 | <filename>migrations/211-recategorize-canned-responses.py
"""
All the forum canned responses are stored in KB articles. There is a
category for them now. Luckily they follow a simple pattern of slugs, so
they are easy to find.
"""
from django.conf import settings
from django.db.models import Q
from kitsune.wiki.models import Document
from kitsune.wiki.config import CANNED_RESPONSES_CATEGORY
canned = Q(slug__startswith='forum-response-')
canned |= Q(slug='common-forum-responses')
canned &= Q(locale=settings.WIKI_DEFAULT_LANGUAGE)
Document.objects.filter(canned).update(category=CANNED_RESPONSES_CATEGORY)
| StarcoderdataPython |
1776966 | <filename>speechrec_test.py
#!/usr/bin/env python
import speech_recognition as sr
rec = sr.Recognizer()
mic = sr.Microphone()
while True:
with mic as source:
#ノイズ対策
rec.adjust_for_ambient_noise(source)
audio = rec.listen(source)
try:
print(rec.recognize_google(audio, language='ja-JP'))
except sr.UnknownValueError:
print("音声を認識できません")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
| StarcoderdataPython |
3320768 | import numpy as np
class SGD:
def __init__(self, learning_rate, exponential_weight):
self.learning_rate = learning_rate
self.exponential_weight = 1.
if exponential_weight != None:
self.moving_average = None
self.exponential_weight = exponential_weight
def __call__(self, weights, derivatives):
if self.exponential_weight < 1:
if self.moving_average is None:
self.moving_average = np.zeros_like(weights)
self.moving_average = self.exponential_weight * self.moving_average + (1. - self.exponential_weight) * derivatives
derivatives = self.moving_average
return weights - self.learning_rate * derivatives | StarcoderdataPython |
1667085 | <reponame>treilly94/mango-exercise
from numpy import random
class DistributionGenerator:
"""
Generate random data within a given distribution
"""
def __init__(self, num_samples, mean=0, sd=1, interval=1, n=1, p=0.5):
"""
:param num_samples: Int The number of samples to be returned
:param mean: The Mean value for a normal distribution
:param sd: The Standard Deviation for a normal distribution
:param interval: The Interval for a poisson distribution
:param n: The number of samples for a binomial distribution
:param p: The probability for a binomial distribution
"""
self.num_samples = num_samples
self.mean = mean
self.sd = sd
self.interval = interval
self.n = n
self.p = p
self.data = None
def normal(self):
""" Produce an array of values in a normal distribution
:return: ndarray
"""
self.data = random.normal(size=self.num_samples, loc=self.mean, scale=self.sd)
return self.data
def poisson(self):
""" Produce an array of values in a poisson distribution
:return: ndarray
"""
self.data = random.poisson(size=self.num_samples, lam=self.interval)
return self.data
def binomial(self):
""" Produce an array of values in a binomial distribution
:return: ndarray
"""
self.data = random.binomial(size=self.num_samples, n=self.n, p=self.p)
return self.data
def summarise(self):
""" Print summary statistics about the last dataset created
:return:
"""
if self.data is None:
print("There is no current data")
else:
print("Min: " + str(self.data.min()))
print("Max: " + str(self.data.max()))
print("Mean: " + str(self.data.mean()))
print("Standard deviation: " + str(self.data.std()))
| StarcoderdataPython |
65350 | <gh_stars>0
app_name = "users"
urlpatterns = [
]
| StarcoderdataPython |
1724453 | <reponame>ThBlitz/Self-Driving-car<filename>tfBlitz.py
import tensorflow as tf
import os
import glob
import numpy as np
from random import shuffle
import datetime
import time
from contextlib import redirect_stdout
from tensorflow import keras
# use single folder directory for a single data type
# only one tfinfo.txt file is created for a single data type in a single folder directory
# tfinfo.txt contains valuable information of the data type used for unpacking .tfrecord files
# NEVER DELETE tfinfo.txt else data won't be unpacked
# In case if tfinfo.txt was deleted enter data type of each element into the below list
# format 'nth-element_shape_dtype'
tffile=[]
tweaksenabled=[]
parse_parms={}
# Eg: for 2 elements X , Y of shapes (800,600), (4,) and dtypes uint8, int32
# THEN tffile=['0_(800,600)_uint8','1_(4,)_int32']
# Eg: for n elements A,B,C...etc of shapes (xxx,yyy),(xx,yy),(aaa,fff)...etc and dtypes float32,int64,int32... etc
# THEN tffile=['0_(xxx,yyy)_float32','1_(xx,yy)_int64','2_(aaa,fff)_int32', . . . etc upto nth element]
# Incase if you dont know how to find dtype or shape use print(X.dtype) , print(X.shape) for nparrays
# tweaks={
# 'SHUFFLE_BUFFER_SIZE':1000,
# 'BATCH_SIZE':40,
# 'PREFETCH_NUM_OF_BATCHES':2 -->> prefetchs 2 batches into GPU memory which is 40x2 elements
# }
def tfwrite(filepath,data,firstloop):
tfrecordwriter = tf.io.TFRecordWriter(f'{filepath}.tfrecord')
info=[]
for i in data:
features = {}
count = 0
for array in i:
shape = array.shape
dtype = array.dtype
if firstloop == True:
info.append(f'{count}_{shape}_{dtype}')
features.update({f'array{count}': _bytes_feature(array.tostring())})
count = count + 1
example = tf.train.Example(features=tf.train.Features(feature=features))
tfrecordwriter.write(example.SerializeToString())
if firstloop == True:
with open(f'{os.path.dirname(filepath)}\\tfDATAinfo.txt', 'w') as f:
for lines in info:
f.write(lines + '\n')
firstloop=False
tfrecordwriter.close()
return
def listdevices(deviceslist):
info=[]
for device in deviceslist:
info.append(tf.config.experimental.list_physical_devices(device))
print(info)
return info
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def parsefunction(example_proto):
print('==============================================================================')
print('----------> >> EXAMPLE DATA FORMAT USED << <----------')
print(example_proto)
print('')
features={}
for line in tffile:
info=line.split('_')
features.update({'array'+info[0]:tf.io.FixedLenFeature([],tf.string)})
parsed_features = tf.io.parse_single_example(example_proto, features)
elements=[]
print('----------> >> EXPECTED DATA TO BE FED INTO THE PIPELINE << <----------')
for line in tffile:
shapelist = []
info=line.split('_')
print(info)
array=parsed_features['array'+info[0]]
shape=eval(info[1])
for i in shape:
shapelist.append(i)
# shapelist.insert(0,-1)
shapelist.append(1)
shape=tuple(shapelist)
dtype=info[2]
if dtype=='uint8':
array=tf.io.decode_raw(array,tf.uint8)
elif dtype=='int32':
array=tf.io.decode_raw(array,tf.int32)
elif dtype=='int64':
array=tf.io.decode_raw(array,tf.int64)
elif dtype=='float32':
array=tf.io.decode_raw(array,tf.float32)
elif dtype=='float64':
array=tf.io.decode_raw(array,tf.float64)
elif dtype=='complex64':
array=tf.io.decode_raw(array,tf.complex64)
elif dtype=='complex128':
array=tf.io.decode_raw(array,tf.complex128)
else:
print(f'UN-KNOWN DTYPE {dtype} FOUND')
print('UPDATE THIS DTYPE IN tfBlitz.py under def parsefunction(example_proto) UNDER DTYPE FUNCTIONS')
array=tf.reshape(array,shape=shape)
#######################################################################################################
########################## TF.FLOAT32 IS BETTER THAN TF.FLOAT16 #######################################
if parse_parms['normalize']==True:
if dtype=='uint8':
array=tf.cast(tf.divide(array,255),tf.float32)
elif dtype=='int32':
array=tf.cast(tf.divide(array,1),tf.float32)
else:
print(f'UN-KNOWN DTYPE -- {dtype} -- FOUND')
print('UPDATE THIS DTYPE IN tfBlitz.py under def parsefunction(example_proto) UNDER NORMALIZATION FUNCTIONS')
#######################################################################################################
elements.append(array)
returns=[]
x=parse_parms['X']
y=parse_parms['Y']
print('')
print('----------> >> DATA TO STACK << <----------')
print(f'INPUT OR X = STACK DATA INDEXES {x}')
print(f'OUTPUT OR Y = STACK DATA INDEXES {y}')
print('')
if x==(None) and y==(None):
for i in elements:
returns.append(i)
elif isinstance(x,int) and isinstance(y,int):
returns.append(elements[x])
returns.append(elements[y])
elif isinstance(x,int) and len(y)>1:
returns.append(elements[x])
stack=[]
for i in y:
stack.append(elements[i])
returns.append(tf.stack(stack))
elif len(x)>1 and isinstance(y,int):
stack=[]
for i in x:
stack.append(elements[i])
returns.append(tf.stack(stack))
returns.append(elements[y])
elif len(x)>1 and len(y)>1:
stack=[]
for i in x:
stack.append(elements[i])
returns.append(tf.stack(stack))
stack=[]
for i in y:
stack.append(elements[i])
returns.append(tf.stack(stack))
else:
print('ERROR AT STACK FUNCTIONS IN TFBLITZ.PY DEF PARSE FUNCTION')
# output=tf.stack([elements[1],elements[2]])
# output=elements[1]
# returns.append(elements[0])
# returns.append(output)
print('----------> >> FINAL DATA TO COME OUT OF THE PIPELINE << <----------')
for i in returns:
print(i)
print('')
returns=tuple(returns)
tffile.clear()
return returns
def generatetfrecords(datafilepath,tfrecordsfilepath):
firstloop = True
paths=glob.glob(f'{datafilepath}\\*.npy')
try:
with open(f'{tfrecordsfilepath}\\tfPATHinfo.txt', 'r') as f:
for line in f:
stripped_line = line.strip()
try:
paths.remove(stripped_line)
except:
pass
except:
pass
filecount=len(paths)
print('FILES_LEFT : '+ str(filecount))
for path in paths:
dataname = os.path.basename(path)
dataname=dataname[:-4]
data = np.load(path, allow_pickle=True)
filepath=f'{tfrecordsfilepath}\\{dataname}'
tfwrite(filepath,data,firstloop)
if firstloop == True:
firstloop = False
with open(f'{tfrecordsfilepath}\\tfPATHinfo.txt', 'a') as f:
f.write(path + '\n')
filecount=filecount-1
print(str(filecount)+' FILES_LEFT ')
return
def dataset(tfrecordsfilepath,tweaks=None,normalize=False,stack_X=(None),stack_Y=(None)):
parse_parms.update({'normalize':normalize})
parse_parms.update({'X':stack_X})
parse_parms.update({'Y':stack_Y})
if type(tfrecordsfilepath) is str:
tfpaths=glob.glob(tfrecordsfilepath+'\\*.tfrecord')
txtpath = tfrecordsfilepath + '\\tfDATAinfo.txt'
elif type(tfrecordsfilepath) is list:
tfpaths=tfrecordsfilepath
filepath=os.path.dirname(tfpaths[0])
txtpath=filepath + '\\tfDATAinfo.txt'
else:
print('TF-FILE-PATHS-ERROR')
txtpath=None
tfpaths=None
with open(txtpath, "r") as a_file:
for line in a_file:
stripped_line = line.strip()
tffile.append(stripped_line)
mytffiles = tf.data.Dataset.list_files(tfpaths)
dataset = tf.data.TFRecordDataset(mytffiles)
if tweaks!=None:
dataset = dataset.shuffle(tweaks['SHUFFLE_BUFFER_SIZE'],
reshuffle_each_iteration=True)
dataset = dataset.map(parsefunction)
if tweaks!=None:
dataset=dataset.batch(tweaks['BATCH_SIZE'],drop_remainder=True)
dataset=dataset.prefetch(tweaks['PREFETCH_NUM_OF_BATCHES'])
return dataset
def setmemorygrowth(bool=True):
gpus=tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, bool)
except RuntimeError as e:
print(e)
def setmemorylimit(memory=6144):
gpus=tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_virtual_device_configuration(gpu, [
tf.config.experimental.VirtualDeviceConfiguration(memory_limit=memory)])
except RuntimeError as e:
print(e)
def train_test_datasets(tfrecordsfilepath,tweaks=None,normalize=False,validation_files_split=0.05,stack_X=(0),stack_Y=(1)):
tfpaths=glob.glob(f'{tfrecordsfilepath}\\*.tfrecord')
shuffle(tfpaths)
shuffle(tfpaths)
numoffiles=len(tfpaths)
splitindex=int(numoffiles*validation_files_split)
val_tffiles=tfpaths[:splitindex]
train_tffiles=tfpaths[splitindex:]
train_dataset=dataset(train_tffiles,tweaks,normalize,stack_X,stack_Y)
test_dataset=dataset(val_tffiles,tweaks,normalize,stack_X,stack_Y)
return train_dataset,test_dataset
def getdateandtime(timestamp_in_sec=None):
if timestamp_in_sec==None:
timestamp_in_sec=time.time()
dateandtime = str(datetime.datetime.fromtimestamp(timestamp_in_sec))
dateandtime = list(dateandtime)
dateandtime[10] = '_'
dateandtime[13] = '-'
dateandtime[16] = '-'
dateandtime[19] = '_'
string = ''
for i in dateandtime:
string = string + i
return string
def getpaths(from_directory,to_directory,filetype0='npy',filetype1='tfrecord'):
frompaths=glob.glob(f'{from_directory}\\*.{filetype0}')
topaths=glob.glob(f'{to_directory}\\*.{filetype1}')
from_names=[]
to_names=[]
for path in frompaths:
name=os.path.basename(path)
name,filetype=name.split('.')
from_names.append(name)
for path in topaths:
name = os.path.basename(path)
name, filetype = name.split('.')
to_names.append(name)
for name in to_names:
try:
from_names.remove(name)
except:
pass
from_paths=[]
for name in from_names:
from_paths.append(f'{from_directory}\\{name}.{filetype0}')
return from_paths
def Save(model,filepath,additional_info=None):
model.save(
filepath, overwrite=True, include_optimizer=True, save_format=None,
signatures=None, options=None
)
with open(f'{filepath}\\modelsummary.txt', 'w') as f:
with redirect_stdout(f):
model.summary()
if additional_info!=None:
f.write(str(additional_info))
return
| StarcoderdataPython |
56525 | import torch
import numpy as np
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset, DataLoader
from forPython.datasets.uci import load_mhealth
from forPython.models.torch.cnn import SimpleCNN
from forPython.utility.trainer import TorchSimpleTrainer
np.random.seed(0)
torch.random.manual_seed(0)
(x_train, y_train), (x_test, y_test) = load_mhealth()
y_train -= 1
y_test -= 1
n_timesteps, n_features, n_outputs = x_train.shape[1], x_train.shape[2], 12
batch_size, epochs = 32, 10
x_train = torch.tensor(x_train).float()
x_test = torch.tensor(x_test).float()
y_train = torch.tensor(y_train[:, 0]).long()
y_test = torch.tensor(y_test[:, 0]).long()
mid_size = 128 * 62
model = SimpleCNN(n_features, mid_size, n_outputs)
loss_func = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
train_ds = TensorDataset(x_train, y_train)
test_ds = TensorDataset(x_test, y_test)
train_loader = DataLoader(train_ds, batch_size, False)
test_loader = DataLoader(test_ds, batch_size, False)
clf = TorchSimpleTrainer(model, loss_func, optimizer)
clf.fit(train_loader, epochs)
clf.evaluate(test_loader)
| StarcoderdataPython |
3330513 | <filename>3rd_party_libs/transnetv1/post_process.py
import os
import gc
import sys
from sys import getsizeof
import math
import time
seed_int = 5
from numpy.random import seed as np_seed
np_seed(seed_int)
from tensorflow import set_random_seed as tf_set_random_seed
tf_set_random_seed(seed_int)
import numpy as np
import keras
from keras import layers
from keras import activations
from keras import models
from keras import optimizers
from keras_applications import imagenet_utils
from keras_applications.imagenet_utils import decode_predictions
from keras_applications.imagenet_utils import _obtain_input_shape
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
import cv2
from scipy.signal import argrelextrema
from scipy.signal import find_peaks
def get_fn(s):
a,b = os.path.split(s)
c,d = os.path.splitext(b)
return c
def trunc(x, trunc=4):
dem = float(10**trunc)
for i in range(len(x)):
if trunc>0:
x[i] = float(int(x[i]*dem))/dem
return x
def mov_avg(x, window=3):
l = len(x)
y = np.zeros((l,1), dtype=float)
for i, x in enumerate(x,0):
mov_ave_val = 0
low_limit = int((window-1)/2)
high_limit = l-int((window-1)/2)
if i<low_limit:
count = 0
for j in range(0, low_limit):
count += 1
mov_ave_val += x[j]
mov_ave_val /= float(count)
elif i>=high_limit:
count = 0
for j in range(high_limit, l):
count += 1
mov_ave_val += x[j]
mov_ave_val /= float(count)
else:
for j in range(i-int((window-1)/2), i+int((window-1)/2)+1):
mov_ave_val += x[j]
mov_ave_val /= window
y[i] = mov_ave_val
return y
def smooth(x, window=3):
w=np.ones(window,'d')
y=np.convolve(w/w.sum(), x, mode='same')
return y
def find_extremas(x, order=3):
lmin = argrelextrema(x, np.less, order=order)[0] # local minima
lmax = argrelextrema(x, np.greater, order=order)[0] # local maxima
lmin = []
min = 100000.0
min_pos = -1
for j in range(0,lmax[0]):
if x[j]<min:
min = x[j]
min_pos = j
lmin.append(min_pos)
for i in range(len(lmax)-1):
min = 100000.0
min_pos = -1
# find the minimum between this and the next maxima
for j in range(lmax[i]+1, lmax[i+1]):
if x[j]<min:
min = x[j]
min_pos = j
lmin.append(min_pos)
lmin = np.array(lmin)
lmin = lmin + 1
lmax = lmax + 1
return lmin, lmax
def process_sd_x(x, window=3, order=3, verbose=False):
l = len(x)
x_smoothed = smooth(x, window=window)
mins, maxs = find_extremas(x_smoothed, order=order)
if verbose:
print('mins::', len(mins), '-', mins)
print('maxs::', len(maxs), '-', maxs)
y = np.zeros(l, dtype=float)
for k in range(1,len(maxs)):
y[maxs[k]] = abs(x_smoothed[maxs[k]]-x_smoothed[mins[k-1]]) + abs(x_smoothed[maxs[k]]-x_smoothed[mins[k]])
if y[maxs[k]]>1.0:
y[maxs[k]]=1.0
maxs_t = np.zeros(l, dtype=float)
for k in maxs:
maxs_t[k]=x_smoothed[k]
mins_t = np.zeros(l, dtype=float)
for k in mins:
mins_t[k]=x_smoothed[k]
return y, x_smoothed, mins_t, maxs_t
def trans_to_boundaries(y, t=0.40):
bounds = []
prev = 0
for i in range(len(y)):
if y[i]>=t:
bounds.append([prev+1,i])
prev=i
bounds.append([prev+1, len(y)])
return bounds
def trans_to_list(y, t=0.40):
l = []
for i in range(len(y)):
if y[i]>=t:
l.append(i)
prev=i
l.append(len(y))
return l
if __name__ == "__main__":
epsilon = 1e-7
frames_to_process = 10
batch_size=10
print_more_info = False
print(' Scanning test files...')
root_dir = os.path.join('test', 'video_rai')
video_files = []
frameCounts = []
lengths = []
total_length = 0
for root, directories, filenames in os.walk(root_dir):
for filename in filenames:
if filename.endswith('mp4'):
video_files.append(os.path.join(root,filename))
cap = cv2.VideoCapture(video_files[-1])
frameCounts.append(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))
fps = cap.get(cv2.CAP_PROP_FPS)
lengths.append(frameCounts[-1]/fps)
total_length += lengths[-1]
print(video_files[-1],'-',frameCounts[-1])
print('\n Scanning model files...')
models_dir = os.path.join('model', 'snapshots')
models = []
for m in os.listdir(models_dir):
if m.endswith('.hdf5'):
models.append(os.path.join(models_dir,m))
print(models[-1])
print('\n Predicting...')
for mi,m in enumerate(models):
model_name = m.split('fsd_')[1]
model_name = m.split('.hdf5')[0]
if model_name[model_name.find('cm=')+3]=='c':
class_mode = 'categorical'
loss = 'categorical_crossentropy'
else:
class_mode = 'binary'
loss = 'binary_crossentropy'
lr = float(model_name[model_name.find('ls=')+3:model_name.find('-sp=')])
samplewise_pre = bool(int(model_name[model_name.find('sp=')+3]))
featurewise_pre = bool(int(model_name[model_name.find('fp=')+3]))
if model_name[model_name.find('opt=')+4:model_name.find('opt=')+7]=='sgd':
opt = optimizers.SGD(lr=lr,
momentum=0.0,
decay=0.0,
nesterov=False)
else:
opt = optimizers.Adam(lr=lr,
beta_1=0.9,
beta_2=0.999,
epsilon=None,
decay=0.0,
amsgrad=False)
epoch = int(model_name[model_name.find('_e')+2:])
print('%2d/%2d model... (cm=%s, s_pre=%d, f_pre=%d, lr=%.5f, epoch=%d)' % (mi+1, len(models), class_mode, samplewise_pre, featurewise_pre, lr, epoch))
fsd = load_model(m)
fsd.compile(optimizer=opt, loss=loss, metrics=['accuracy'])
row_axis = 0
col_axis = 1
channel_axis = 2
pred_t = 0
for i,vid in enumerate(video_files):
c = -1
start_time = time.time()
cap = cv2.VideoCapture(vid)
if print_more_info:
print('%d\%d %s (%d), ' % (i+1, len(video_files), get_fn(vid), frameCounts[i]), end='')
f = [None] * frameCounts[i]
for j in range(frameCounts[i]):
ret, frame = cap.read()
x = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
x = cv2.resize(x, (64,64)).astype(np.float)
x /= 255
if samplewise_pre:
mean = np.mean(x, axis=(channel_axis, row_axis, col_axis))
std = np.std(x, axis=(channel_axis, row_axis, col_axis))
x -= mean
x /= (std + epsilon)
f[j] = x
y_pred = [0] * frameCounts[i]
for j in range(0,frameCounts[i]-frames_to_process,batch_size):
Xb = np.zeros((batch_size, frames_to_process, 64, 64, 3), dtype=float)
for b in range(batch_size):
if print_more_info:
print('batch:', (b+j), 'inds: ', end ='')
for t in range(frames_to_process):
if print_more_info:
print('%5d ' % (j+b+t), end='')
if (j+b+t)<frameCounts[i]:
Xb[b,t,:,:,:] = f[j+b+t]
else:
Xb[b,t,:,:,:] = Xb[-1,-1,:,:,:]
if print_more_info:
print('')
if print_more_info:
print('input shape=', Xb.shape)
pred = fsd.predict(Xb, batch_size=batch_size)
pred = np.squeeze(pred)
if class_mode=='categorical':
pred = pred[:,1]
if print_more_info:
print('output shape=', pred.shape)
y_pred[j:j+batch_size] = pred
with open(os.path.join(root_dir, get_fn(vid)+'_pred-'+'cm=%s,s_pre=%d,e=%d'%(class_mode, samplewise_pre, epoch)+'.txt'), 'w') as f:
for j in range(frameCounts[i]):
f.write('%.3f\n'%y_pred[j])
| StarcoderdataPython |
3234053 | import pandas as pd
from leaderboard.constants import scoreWeights
from leaderboard.constants import contribTypes
def get_final_score_table(
intermediate_score_df: pd.DataFrame, user_list: list
) -> pd.DataFrame:
""" Returns final score table dataframe
Args:
df: pandas DataFrame - Intermediate Score Table containing contribution counts of all sub types for given users
"""
intermediate_score_df.set_index("user_name", inplace=True)
final_score_table = pd.DataFrame(
columns=[
"User Name",
contribTypes.T1,
contribTypes.T2,
contribTypes.T3,
contribTypes.T4,
"Total Score",
]
)
for user_name in user_list:
t1_score, t2_score, t3_score, t4_score, total_score = (
0,
0,
0,
0,
0,
)
try:
user_row = intermediate_score_df.loc[user_name]
t1_score = (
user_row.t1s1 * scoreWeights.T1S1 + user_row.t1s2 * scoreWeights.T1S2
)
t2_score = (
user_row.t2s1 * scoreWeights.T2S1
+ user_row.t2s2 * scoreWeights.T2S2
+ user_row.t2s3 * scoreWeights.T2S3
+ user_row.t2s4 * scoreWeights.T2S4
)
t3_score = (
user_row.t3s1 * scoreWeights.T3S1 + user_row.t3s2 * scoreWeights.T3S2
)
t4_score = (
user_row.t4s1 * scoreWeights.T4S1
+ user_row.t4s2 * scoreWeights.T4S2
+ user_row.t4s3 * scoreWeights.T4S3
+ user_row.t4s4 * scoreWeights.T4S4
)
total_score = t1_score + t2_score + t3_score + t4_score
final_score_table = final_score_table.append(
{
"User Name": user_name,
contribTypes.T1: t1_score,
contribTypes.T2: t2_score,
contribTypes.T3: t3_score,
contribTypes.T4: t4_score,
"Total Score": total_score,
},
ignore_index=True,
)
except KeyError:
final_score_table = final_score_table.append(
{
"User Name": user_name,
contribTypes.T1: t1_score,
contribTypes.T2: t2_score,
contribTypes.T3: t3_score,
contribTypes.T4: t4_score,
"Total Score": total_score,
},
ignore_index=True,
)
return final_score_table.sort_values(
by=["Total Score", "User Name"], ascending=[False, True]
)
| StarcoderdataPython |
45619 | <filename>apps/scraper/bing_api.py
#script to scraper bing api
#include libs
import sys
sys.path.insert(0, '..')
from include import *
def generate_scraping_job(query, scraper):
query_string = query[1]
query_id = query[4]
study_id = query[0]
search_engine = scraper
result_pages = 20
number_multi = 50
check_jobs = Scrapers.getScrapingJobs(query_id, study_id, search_engine)
if not check_jobs:
for r in range(result_pages):
start = r * number_multi
print(start)
try:
Scrapers.insertScrapingJobs(query_id, study_id, query_string, search_engine, start, date.today())
print('Scraper Job: '+query_string+' SE:'+search_engine+' start:'+str(start)+' created')
except:
break;
def scrape_query(query, scraper):
today = date.today()
jobs = Scrapers.getScrapingJobsByQueryProgressSE(query, 0, scraper)
subscription_key = "<KEY>"
assert subscription_key
search_url = "https://api.bing.microsoft.com/v7.0/search"
for job in jobs:
search_engine = job[3]
search_query = job[2]
start = job[4]
query_id = job[0]
study_id = job[1]
job_id = job[7]
progress = 2
Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, progress)
sleeper = random.randint(3,10)
time.sleep(sleeper)
#headers = {"Ocp-Apim-Subscription-Key": subscription_key, "X-Search-ClientIP":"192.168.3.11"}
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
params = {"q": search_query, "textDecorations": True, "textFormat": "HTML", "count": 50, "offset": start, "responseFilter": "Webpages"}
try:
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
web_results = search_results['webPages']
except:
Helpers.saveLog("../../logs/"+str(study_id)+"_"+search_query+".log", 'Error Scraping Job', 1)
Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, -1)
Results.deleteResultsNoScrapers(query_id, search_engine)
exit()
results = []
for w in web_results['value']:
results.append(w['url'])
if results:
results_check = results[-1]
check_url = Results.getURL(query_id, study_id, results_check, search_engine)
if check_url:
Scrapers.updateScrapingJobQuerySearchEngine(query_id, search_engine, 1)
Helpers.saveLog("../../logs/"+str(study_id)+"_"+search_query+".log", 'Max Results', 1)
exit()
else:
Scrapers.updateScrapingJob(job_id, 1)
Helpers.saveLog("../../logs/"+str(study_id)+"_"+search_query+".log", 'Start Scraping Results', 1)
results_position = 1
for result in results:
url = result
check_url = Results.getURL(query_id, study_id, url, search_engine)
if (not check_url):
url_meta = Results.getResultMeta(url, str(study_id), search_engine, str(query_id))
hash = url_meta[0]
ip = url_meta[1]
main = url_meta[2]
main_hash = Helpers.computeMD5hash(main+str(study_id)+search_engine+str(query_id))
contact_url = "0"
Helpers.saveLog("../../logs/"+str(study_id)+"_"+search_query+".log", url, 1)
contact_hash = "0"
contact_url = "0"
last_position = Results.getLastPosition(query_id, study_id, search_engine, today)
if last_position:
results_position = last_position[0][0] + 1
if Results.getPosition(query_id, study_id, search_engine, results_position):
results_position = results_position + 1
Results.insertResult(query_id, study_id, job_id, 0, ip, hash, main_hash, contact_hash, search_engine, url, main, contact_url, today, datetime.now(), 1, results_position)
check_sources = Results.getSource(hash)
if not check_sources:
Results.insertSource(hash, None, None, None, today, 0)
Helpers.saveLog("../../logs/"+str(study_id)+"_"+search_query+".log", 'Insert Result', 1)
studies = Studies.getStudiesScraper()
for s in studies:
if "Bing_API" in s[-1]:
scraper = "Bing_API"
studies_id = s[-3]
queries = Queries.getQueriesStudy(studies_id)
for q in queries:
query_id = q[-2]
job = 0
check_jobs = Scrapers.getScrapingJobsBySE(query_id, scraper)
count_jobs = check_jobs[0][0]
if count_jobs == 0:
job = 1
if job == 1:
generate_scraping_job(q, scraper)
open_queries = Queries.getOpenQueriesStudybySE(studies_id, scraper)
if open_queries:
random.shuffle(open_queries)
o = open_queries[0]
if o:
check_progress = Scrapers.getScrapingJobsByQueryProgressSE(o, 2, scraper)
if not check_progress:
print(o)
scrape_query(o, scraper)
| StarcoderdataPython |
43283 | '''
ExperimentClient tests.
'''
import os
import unittest
import pandas as pd
import time
from mljar.client.project import ProjectClient
from mljar.client.dataset import DatasetClient
from mljar.client.experiment import ExperimentClient
from .project_based_test import ProjectBasedTest, get_postfix
class ExperimentClientTest(ProjectBasedTest):
def setUp(self):
proj_title = 'Test project-01'+get_postfix()
proj_task = 'bin_class'
self.expt_title = 'Test experiment-01'
self.validation_kfolds = 5
self.validation_shuffle = True
self.validation_stratify = True
self.validation_train_split = None
self.algorithms = ['xgb']
self.metric = 'logloss'
self.tuning_mode = 'Normal'
self.time_constraint = 1
self.create_enseble = False
# setup project
self.project_client = ProjectClient()
self.project = self.project_client.create_project(title = proj_title, task = proj_task)
# add training data
df = pd.read_csv('tests/data/test_1.csv')
cols = ['sepal length', 'sepal width', 'petal length', 'petal width']
target = 'class'
dc = DatasetClient(self.project.hid)
self.dataset = dc.add_dataset_if_not_exists(df[cols], df[target])
def tearDown(self):
# wait before clean, to have time to initialize models
time.sleep(60)
# clean
self.project_client.delete_project(self.project.hid)
def test_create_with_kfold_cv(self):
#Create experiment test with k-fold CV.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "5-fold CV, Shuffle, Stratify")
self.assertEqual(experiment.metric, self.metric)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# get experiment by hid, there should be the same
experiment_2 = ec.get_experiment(experiment.hid)
self.assertEqual(experiment_2.hid, experiment.hid)
self.assertEqual(experiment_2.title, experiment.title)
self.assertEqual(experiment_2.metric, experiment.metric)
self.assertEqual(experiment_2.validation_scheme, experiment.validation_scheme)
self.assertTrue(experiment.equal(experiment_2))
# test __str__ method
self.assertTrue('id' in str(experiment_2))
self.assertTrue('title' in str(experiment_2))
self.assertTrue('metric' in str(experiment_2))
self.assertTrue('validation' in str(experiment_2))
def test_create_with_train_split(self):
#Create experiment with validation by train split.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, 0.72,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "Split 72/28, Shuffle, Stratify")
def test_create_with_validation_dataset(self):
#Create experiment with validation with dataset.
# add vald dataset
cols = ['sepal length', 'sepal width', 'petal length', 'petal width']
target = 'class'
df = pd.read_csv('tests/data/test_1_vald.csv')
dc = DatasetClient(self.project.hid)
vald_dataset = dc.add_dataset_if_not_exists(df[cols], df[target])
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, vald_dataset, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, 0.72,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
self.assertEqual(experiment.title, self.expt_title)
self.assertEqual(experiment.validation_scheme, "With dataset")
def test_create_if_exists(self):
#Create experiment after experiment is already in project.
# add experiment
ec = ExperimentClient(self.project.hid)
self.assertNotEqual(ec, None)
# there should be none experiments
experiments = ec.get_experiments()
self.assertEqual(experiments, [])
# create new experiment
experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# try to create the same experiment
experiment_2 = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
self.assertNotEqual(experiment, None)
# get all experiments, should be only one
experiments = ec.get_experiments()
self.assertEqual(len(experiments), 1)
# both should be the same
self.assertEqual(experiment_2.hid, experiment.hid)
self.assertEqual(experiment_2.title, experiment.title)
self.assertEqual(experiment_2.metric, experiment.metric)
self.assertEqual(experiment_2.validation_scheme, experiment.validation_scheme)
self.assertTrue(experiment.equal(experiment_2))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
51401 | <reponame>UW-OCP/Collocation-Solver-CUDA
import numpy as np
import scipy.linalg
class collocation_node:
"""
Node class for collocation solver, save the required DAE variables
at each time node
"""
'''
Input: size_y - size of the ODE variables
size_z - size of the DAE variables
size_p - size of the parameters
m: stages of the lobatto coefficients
Construct the empty collocation node with all the
fields set to zero.
'''
def __init__(self, size_y, size_z, size_p, m):
self.size_y = size_y
self.size_z = size_z
self.size_p = size_p
self.m = m
self.delta_t = 0
self.tspan = []
self.y = np.zeros((size_y), dtype = np.float64)
self.z = np.zeros((size_z), dtype = np.float64)
self.y_dot = np.zeros((size_y, m), dtype = np.float64)
self.z_tilda = np.zeros((size_z, m), dtype = np.float64)
self.p = np.zeros((size_p), dtype = np.float64)
self.y_tilda = np.zeros((size_y, m), dtype = np.float64)
self.f_a = np.zeros(((size_y + size_z) * m), dtype = np.float64)
self.f_b = np.zeros((size_y), dtype = np.float64)
self.f_N = []
self.J = np.zeros(((size_y + size_z) * m, size_y), dtype = np.float64)
self.W = np.zeros(((size_y + size_z) * m, (size_y + size_z) * m), dtype = np.float64)
self.V = np.zeros(((size_y + size_z) * m, size_p), dtype = np.float64)
self.V_N = []
self.D = np.zeros((size_y, (size_y + size_z) * m), dtype = np.float64)
self.B = np.zeros((size_y + size_p, size_y), dtype = np.float64)
self.A = np.zeros((size_y, size_y), dtype = np.float64)
self.C = np.zeros((size_y, size_y), dtype = np.float64)
self.H = np.zeros((size_y, size_p), dtype=np.float64)
self.H_N = []
self.b = np.zeros((size_y), dtype=np.float64)
self.b_N = []
self.delta_y = np.zeros(size_y, dtype=np.float64)
self.delta_k = np.zeros(((size_y + size_z) * m), dtype = np.float64)
self.delta_p = np.zeros(size_p, dtype=np.float64)
'''
self.C_tilda = np.zeros((size_y, size_y), dtype = np.float64)
self.G_tilda = np.zeros((size_y, size_y), dtype = np.float64)
self.H_tilda = np.zeros((size_y, size_p), dtype = np.float64)
self.b_tilda = np.zeros((size_y), dtype = np.float64)
self.R = np.zeros()
self.E = np.zeros()
self.G = np.zeros()
self.K = np.zeros()
self.d = np.zeros()
self.Rp = np.zeros()
self.dp = np.zeros()
self.delta_p = np.zeros()
self.delta_y = np.zeros()
self.delta_k = np.zeros()
'''
'''
Input: y - a size_y vector of the value of the ODE variables
Set the ODE variable y.
'''
def set_y(self, y):
for i in range(self.size_y):
self.y[i] = y[i]
'''
Input: z - a size_z vector of the value of the DAE variables
Set the DAE variable z.
'''
def set_z(self, z):
for i in range(self.size_z):
self.z[i] = z[i]
'''
Input: p - a size_p x 1 vector of the value of the parameter variables
Set the parameter variable p.
'''
def set_p(self, p):
for i in range(self.size_p):
self.p[i] = p[i]
'''
Input: delta_t - a double representing the interval of the time
span between the current node and the next node
Set the time interval delta_t.
'''
def set_delta_t(self, delta_t):
self.delta_t = delta_t
'''
Input: tspan representing the time span between the current node and the next node
Set the time interval tspan.
'''
def set_tspan(self, tspan):
for i in range(tspan.shape[0]):
self.tspan.append(tspan[i])
'''
Input:
y_dot : value of the derivative of the ODE variable y
j : index of the collocation point
Set the y_dot at the jth collocation point
'''
def set_y_dot(self, y_dot, j):
for i in range(self.size_y):
self.y_dot[i][j] = y_dot[i]
'''
Input:
z_tilda : value of the DAE variable z
j : index of the collocation point
Set the z_tilda at the jth collocation point
'''
def set_z_tilda(self, z_tilda, j):
for i in range(self.size_z):
self.z_tilda[i][j] = z_tilda[i]
'''
Input:
y_tilda : value of the ODE variable y
j : index of the collocation point
Set the y_tilda at the jth collocation point
'''
def set_y_tilda(self, y_tilda, j):
for i in range(self.size_y):
self.y_tilda[i][j] = y_tilda[i]
'''
Input:
f_a : value of the residual of all the collocation points of the time interval
Set the residual f_a of the time interval
'''
def set_f_a(self, f_a):
for i in range((self.size_y + self.size_z) * self.m):
self.f_a[i] = f_a[i]
'''
Input:
f_b : value of the residual of at the node
Set the residual f_b of the node
'''
def set_f_b(self, f_b):
for i in range(self.size_y):
self.f_b[i] = f_b[i]
# self.f_N = np.zeros((size_y + size_p), dtype = np.float64)
def set_f_N(self, f_N):
for i in range(self.size_y + self.size_p):
self.f_N.append(f_N[i])
def get_y(self):
y = np.zeros((self.size_y), dtype = np.float64)
for i in range(self.size_y):
y[i] = self.y[i]
return y
def get_y_tilda(self, j):
y = np.zeros((self.size_y), dtype = np.float64)
for i in range(self.size_y):
y[i] = self.y_tilda[i][j]
return y
def get_z_tilda(self, j):
z = np.zeros((self.size_z), dtype = np.float64)
for i in range(self.size_z):
z[i] = self.z_tilda[i][j]
return z
def set_B(self, B):
for i in range(self.size_y + self.size_p):
for j in range(self.size_y):
self.B[i][j] = B[i][j]
# self.VN = np.zeros((size_y + size_p, size_p), dtype = np.float64)
def set_VN(self, VN):
for i in range(self.size_y + self.size_p):
V_row = []
for j in range(self.size_p):
V_row.append(VN[i][j])
self.V_N.append(V_row)
# self.HN = np.zeros((size_y + size_p, size_p), dtype = np.float64)
def set_HN(self, VN):
for i in range(self.size_y + self.size_p):
H_row = []
for j in range(self.size_p):
H_row.append(VN[i][j])
self.H_N.append(H_row)
# self.b_N = np.zeros((size_y + size_p), dtype = np.float64)
def set_bN(self, f_b):
for i in range(self.size_y + self.size_p):
self.b_N.append(-f_b[i])
def set_delta_y(self, delta_y):
for i in range(self.size_y):
self.delta_y[i] = delta_y[i]
def set_delta_k(self, delta_k):
for i in range((self.size_y + self.size_z) * self.m):
self.delta_k[i] = delta_k[i]
# j_col : jth collocation node
def set_Jacobian(self, a, b, Dh, Dg, j_col):
'''
hy = np.zeros((self.size_y, self.size_y), dtype = np.float64)
hz = np.zeros((self.size_y, self.size_z), dtype = np.float64)
hp = np.zeros((self.size_y, self.size_p), dtype = np.float64)
gy = np.zeros((self.size_z, self.size_y), dtype = np.float64)
gz = np.zeros((self.size_z, self.size_z), dtype = np.float64)
gp = np.zeros((self.size_z, self.size_p), dtype = np.float64)
'''
hy = Dh[0 : , 0 : self.size_y]
hz = Dh[0 : , self.size_y : self.size_y + self.size_z]
hp = Dh[0 : , self.size_y + self.size_z : ]
gy = Dg[0 : , 0 : self.size_y]
gz = Dg[0 : , self.size_y : self.size_y + self.size_z]
gp = Dg[0 : , self.size_y + self.size_z : ]
'''
for i in range(self.size_y):
for j in range(self.size_y):
hy[i][j] = Dh[i][j]
for j in range(self.size_z):
hz[i][j] = Dh[i][j + self.size_y]
for j in range(self.size_p):
hp[i][j] = Dh[i][j + (self.size_y + self.size_z)]
for i in range(self.size_z):
for j in range(self.size_y):
gy[i][j] = Dg[i][j]
for j in range(self.size_z):
gz[i][j] = Dg[i][j + self.size_y]
for j in range(self.size_p):
gp[i][j] = Dg[i][j + (self.size_y + self.size_z)]
'''
start_row_index_h = j_col * (self.size_y + self.size_z)
self.J[start_row_index_h : start_row_index_h + self.size_y, 0 : self.size_y] = hy
self.V[start_row_index_h : start_row_index_h + self.size_y, 0 : self.size_p] = hp
'''
for i in range(self.size_y):
for j in range(self.size_y):
self.J[start_row_index_h + i][j] = hy[i][j]
for j in range(self.size_p):
self.V[start_row_index_h + i][j] = hp[i][j]
'''
start_row_index_g = start_row_index_h + self.size_y
self.J[start_row_index_g : start_row_index_g + self.size_z, 0 : self.size_y] = gy
self.V[start_row_index_g : start_row_index_g + self.size_z, 0 : self.size_p] = gp
'''
for i in range(self.size_z):
for j in range(self.size_y):
self.J[start_row_index_g + i][j] = gy[i][j]
for j in range(self.size_p):
self.V[start_row_index_g + i][j] = gp[i][j]
'''
self.D[0 : self.size_y, j_col * (self.size_y + self.size_z) : j_col * (self.size_y + self.size_z) + self.size_y] = self.delta_t * b * np.eye(self.size_y, dtype = np.float64)
'''
for i in range(self.size_y):
self.D[i][i + j_col * (self.size_y + self.size_z)] = self.delta_t * b * 1
'''
# for each row block j_col
# loop through all the column block
for i in range(self.m):
start_row_index = j_col * (self.size_y + self.size_z)
w_tmp = np.zeros(((self.size_y + self.size_z), (self.size_y + self.size_z)), dtype = np.float64)
if i == j_col:
start_col_index = i * (self.size_y + self.size_z)
identity = np.eye(self.size_y, dtype = np.float64)
w_tmp[0 : self.size_y, 0 : self.size_y] = -identity + self.delta_t * a[j_col, j_col] * hy
w_tmp[0 : self.size_y, self.size_y : ] = hz
w_tmp[self.size_y : , 0 : self.size_y] = self.delta_t * a[j_col, j_col] * gy
w_tmp[self.size_y : , self.size_y : ] = gz
self.W[start_row_index : start_row_index + (self.size_y + self.size_z), start_col_index : start_col_index + (self.size_y + self.size_z)] = w_tmp
'''
for j in range(self.size_y):
for k in range(self.size_y):
w_tmp[j][k] = -identity[j][k] + self.delta_t * a[j_col][j_col] * hy[j][k]
for k in range(self.size_z):
w_tmp[j][k + self.size_y] = hz[j][k]
for j in range(self.size_z):
for k in range(self.size_y):
w_tmp[j + self.size_y][k] = self.delta_t * a[j_col][j_col] * gy[j][k]
for k in range(self.size_z):
w_tmp[j + self.size_y][k + self.size_y] = gz[j][k]
for j in range(self.size_y + self.size_z):
for k in range(self.size_y + self.size_z):
self.W[start_row_index + j][start_col_index + k] = w_tmp[j][k]
'''
else:
start_col_index = i * (self.size_y + self.size_z)
w_tmp[0 : self.size_y, 0 : self.size_y] = self.delta_t * a[j_col, i] * hy
w_tmp[0 : self.size_y, self.size_y : ] = np.zeros((self.size_y, self.size_z), dtype = np.float64)
w_tmp[self.size_y : , 0 : self.size_y] = self.delta_t * a[j_col, i] * gy
w_tmp[self.size_y : , self.size_y : ] = np.zeros((self.size_z, self.size_z), dtype = np.float64)
self.W[start_row_index : start_row_index + (self.size_y + self.size_z), start_col_index : start_col_index + (self.size_y + self.size_z)] = w_tmp
'''
for j in range(self.size_y):
for k in range(self.size_y):
w_tmp[j][k] = self.delta_t * a[j_col][i] * hy[j][k]
for k in range(self.size_z):
w_tmp[j][k + self.size_y] = 0
for j in range(self.size_z):
for k in range(self.size_y):
w_tmp[j + self.size_y][k] = self.delta_t * a[j_col][i] * gy[j][k]
for k in range(self.size_z):
w_tmp[j + self.size_y][k + self.size_y] = 0
for j in range(self.size_y + self.size_z):
for k in range(self.size_y + self.size_z):
self.W[start_row_index + j][start_col_index + k] = w_tmp[j][k]
'''
def update_Jacobian(self):
# W_inv = np.linalg.inv(self.W)
# identity = np.eye(self.size_y, dtype=np.float64)
# D_W_inv = np.dot(self.D, W_inv)
# self.A = -identity + np.dot(D_W_inv, self.J)
# self.C = np.eye(self.size_y)
# self.H = np.dot(D_W_inv, self.V)
# self.b = -self.f_b - np.dot(D_W_inv, self.f_a)
identity = np.eye(self.size_y, dtype=np.float64)
# P * W = L * U
P, L, U = scipy.linalg.lu(self.W)
# A = -I + D * W^(-1) * J
# X = np.dot(P, self.J)
X = np.linalg.solve(P, self.J)
L_inv_J = np.linalg.solve(L, X)
W_inv_J = np.linalg.solve(U, L_inv_J)
D_W_inv_J = np.dot(self.D, W_inv_J)
self.A = -identity + D_W_inv_J
# H = D * W^(-1) * V
X = np.linalg.solve(P, self.V)
L_inv_V = np.linalg.solve(L, X)
W_inv_V = np.linalg.solve(U, L_inv_V)
self.H = np.dot(self.D, W_inv_V)
# C = I
self.C = identity
# b = -f_b - D * W^(-1) * f_a
X = np.linalg.solve(P, self.f_a)
L_inv_f_a = np.linalg.solve(L, X)
W_inv_f_a = np.linalg.solve(U, L_inv_f_a)
D_W_inv_f_a = np.dot(self.D, W_inv_f_a)
self.b = -self.f_b - D_W_inv_f_a
| StarcoderdataPython |
3206639 | #On the name of ALLAH and may the blessing and peace of Allah
#be upon the Messenger of <NAME> <NAME>.
#Author : <NAME>
#Date : 15/06/10
#version :2.6
from string import *
from itertools import *
"""
My program uses special functions to test, count, and extract vowels and consonants.
However,the string_check function uses isinstance() to check an objects type
and isinstance(obj, str) will be True only if obj is a str, and the
vowel_or_Consonant is a boolean function that accept a text(string)as
an argument and which return either True or False and which you can call in
the program at any time to test every letter within a string to
determine whether it is a vowel or consonant.
Though, Vowel and Consonant functions return the count of each vowel and consonant.
and Anagrams_search function return a set of every possible combination,thus,
every possible Anagram.
"""
def my_try(arg =''):
""" Raises an error exception if a letter is not in the alphabet or if the letter is not a space character ."""
for item in arg:
if item not in 'abcdefghijklmnopqrstuvwxyz ':
raise TypeError,\
"\n<Every letter within the text should be in the alphabet. \n"
def string_check(function):
"""
A function which uses isinstance to determine whether an object is a string.
"""
def wrapper(character):
# string_check raises assertionError
# if the character is not a string
assert isinstance(character, str),\
"Please enter a string and not %s" % (character)
return function(character)
return wrapper
def Vowel_or_Consonant(char = ''):
"""
A boolean function, which return either True or False
"""
# Determine whether each letter in the text is a vowel or a
# consonant. if it is a vowel, set test to True, otherwise, set test to false.
for i in char:
if str(i)in 'aeiouy':
test = True
else :
test = False
# Return the value of the test variable
return test
@string_check
def Vowel(text = ''):
"""
A function which return a set of vowels and the total
number of each vowel in the text.
"""
#empty string
string_A = ''
for item in lower(text):
if Vowel_or_Consonant(str(item)):
string_A += item
# sort a string_A
char_A = sorted(string_A)
# vowels' counts
return "\n<The vowels are : %s \n" % \
[(arg, len(list(karg))) for arg, karg in groupby(char_A)]
@string_check
def Consonant(text = ''):
"""
A function which return a set of consonants and the total
number of each consonant in the text.
"""
string_B = ''
string_C = ''
for arg in lower(text):
if not Vowel_or_Consonant(str(arg)) and str(arg) in 'bcdfghjklmnpqrstvwxz':
string_B += arg
elif not Vowel_or_Consonant(str(arg)) and str(arg) not in 'bcdfghjklmnpqrstvwxz':
string_C += arg
# sort a string_B
char_B = sorted(string_B)
char_C = sorted(string_C)
# consonants and others characters' Counts
return "<The consonants are :%s \n\n<And the others characters are : %s\n" % \
([(arg, len(list(karg))) for arg, karg in groupby(char_B)],\
[(arg, len(list(karg))) for arg, karg in groupby(char_C)])
def Anagrams_search(phrase = ''):
"""
A function which return a set of every combination possible and for
every word within a text.
"""
#empty list
mylist = []
try:
my_try(lower(phrase))
for word in list(split(phrase)):
#every possible combination for each word within the text
split_list = [arg for arg in permutations(lower(word),len(word))]
for item in split_list:
split_list = join(item,'')
#append mylist
mylist.append(split_list)
# a list of every possible combination including anagrams
return "<The list of every possible combination and anagrams : %s" % \
mylist
#The program raise TypeError if input is not in the alphabet
except TypeError,exception :
print exception
if __name__ == "__main__":
vowels = Vowel('Fouad Teniou')
print vowels
consonants = Consonant('Fouad Teniou')
print consonants
anagrams = Anagrams_search('Ten iou')
print anagrams
anagrams1 = Anagrams_search('Ten i7u')
print anagrams1
#######################################################################
#python "C:\PythonPrograms\Anagrams-vowels-consonants.py"
#<The vowels are : [('a', 1), ('e', 1), ('i', 1), ('o', 2), ('u', 2)]
#<The consonants are :[('d', 1), ('f', 1), ('n', 1), ('t', 1)]
#<And the others characters are : [(' ', 1)].
#<The list of every possible combination and anagrams :
#['ten', 'tne', 'etn', 'ent', 'nte', 'net', 'iou', 'iuo', 'oiu', 'oui', 'uio', 'uoi']
#<Every letter within the text should be in the alphabet.
#######################################################################
#VERSION PYTHON 3.2
#from itertools import *
#def my_try(arg =''):
# """ Raises NegativeNumberError if number less than 0, and
# raises ZeroNumberException if number is equal to 0."""
# for item in arg:
# if item not in 'abcdefghijklmnopqrstuvwxyz ':
# raise TypeError("\n<Every letter within the text should be in the alphabet #\n")
#
#def string_check(function):
# """
# A function which uses isinstance to determine whether an object is a string.
# """
#
# def wrapper(character):
# # string_check raises assertionError
# # if the character is not a string
# assert isinstance(character, str),\
# "Please enter a string and not %s" % (character)
# return function(character)
# return wrapper
#
#def Vowel_or_Consonant(char = ''):
# """
# A boolean function, which return either True or False
# """
#
# # Determine whether each letter in the text is a vowel or a
# # consonant. if it is a vowel, set test to True, otherwise, set test to false.
# for i in char:
# if str(i)in 'aeiouy':
# test = True
# else :
# test = False
# # Return the value of the test variable
# return test
#
#@string_check
#def Vowel(text = ''):
# """
# A function which return a set of vowels and the total
# number of each vowel in the text.
# """
#
# #empty string
# string_A = ''
# for item in str.lower(text):
# if Vowel_or_Consonant(str(item)):
# string_A += item
#
# # sort a string_A
# char_A = sorted(string_A)
#
# # vowels' counts
# return "\n<The vowels are : %s \n" % \
# [(arg, len(list(karg))) for arg, karg in groupby(char_A)]#
#
#@string_check
#def Consonant(text = ''):
# """
# A function which return a set of consonants and the total
# number of each consonant in the text.
# """
#
# string_B = ''
# string_C = ''
# for arg in str.lower(text):
# if not Vowel_or_Consonant(str(arg)) and str(arg) in 'bcdfghjklmnpqrstvwxz':
# string_B += arg
# elif not Vowel_or_Consonant(str(arg)) and str(arg) not in 'bcdfghjklmnpqrstvwxz':
# string_C += arg
# # sort a string_B
# char_B = sorted(string_B)
# char_C = sorted(string_C)
# # consonants and others characters' Counts
# return "<The consonants are :%s \n\n<And the others characters are : %s\n" % \
# ([(arg, len(list(karg))) for arg, karg in groupby(char_B)],\
# [(arg, len(list(karg))) for arg, karg in groupby(char_C)])
#
#def Anagrams_search(phrase = ''):
# """
# A function which return set of every combination possible and for
# every word within a text.
# """
# #empty list
# mylist = []
# try:
# my_try(str.lower(phrase))
#
# for word in list(str.split(phrase)):
# #every possible combination for each word within the text
# split_list = [arg for arg in permutations(str.lower(word),len(word))]
#
# for item in split_list:
#
#
# split_list = ''.join(item)
# #append mylist
#
# mylist.append(split_list)
# # a list of every possible combination including anagrams
# return "<The list of every possible combination and anagrams : %s" % \
# mylist
# #The program raise TypeError if input is not in the alphabet
# except TypeError as exception :
# print(exception)
#
| StarcoderdataPython |
3258556 | <filename>tests/test_quickumls-service.py
'''
Example of json TCP communication, make sure you have app/quickumls-service.py running on port 9999
'''
import json
import socket
def recvall(sock):
BUFF_SIZE = 2048
data = b''
while True:
part = sock.recv(BUFF_SIZE)
data += part
if len(part) < BUFF_SIZE:
break
return data
TCP_IP = '127.0.0.1'
TCP_PORT = 9999
BUFFER_SIZE = 2048
with open('resources/ignored/report.txt', 'r') as reportfile:
MESSAGE = reportfile.read()
data = {}
data['text'] = MESSAGE
json_data = json.dumps(data)
json_dataCRLF = json_data+'\r\n'
print("send " + json_data)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
s.sendall(json_dataCRLF.encode('utf-8'))
data_response = recvall(s)
s.close()
data_response_obj = {}
data_response_obj['response'] = json.loads(data_response.decode("utf-8"))
print("received data:", data_response)
with open('resources/ignored/data_result.txt', 'w') as outfile:
json.dump(data_response_obj, outfile) | StarcoderdataPython |
3212414 | <filename>brincadeiras/func.py
import config.janela as cjane
def como_jogar(titulo, conteudo):
"""Cria a tela de "como jogar" para um jogo, recebendo o título e as instruções daquele jogo."""
janela = cjane.Janela()
janela.muda_linha(1, titulo.upper())
janela.muda_linha(3, ' - INSTRUÇÕES:', alin='ljust')
for i in range(5, 22):
try:
linha = conteudo[i - 5]
janela.muda_linha(i, linha, alin='ljust')
except IndexError:
pass
print(janela)
input('(Aperte ENTER para jogar!)')
| StarcoderdataPython |
1796468 | <reponame>NathanKr/docker-compose-python-mysql-playground
from datetime import datetime
import mysql.connector
import time
import os
now = datetime.now()
print(f' ########### hello python start -{now}-')
mydb = None
while True:
try:
mydb = mysql.connector.connect(
# host is db container name !!!
host= 'docker-compose-python-mysql-playground_dc_db_service_1',
user = "root",
password=<PASSWORD>('<PASSWORD>'),
database = os.getenv('MYSQL_DATABASE')
)
print('connected to the db')
break
except Exception as e:
print(e)
time.sleep(2)
print(f'mydb : {mydb}')
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM employees")
myresult = mycursor.fetchall()
for x in myresult:
print(x)
print(f' ########### hello python end -{now}-')
| StarcoderdataPython |
121989 | import sqlite3
conn = sqlite3.connect('rpg_db.sqlite3')
curs = conn.cursor()
count_characters = 'SELECT COUNT(*) FROM charactercreator_character;'
print(curs.execute(count_characters).fetchall() [0][0])
query = '''SELECT character_id, COUNT(distinct item_id)
FROM charactercreator_character_inventory
GROUP BY character_id
LIMIT 20'''
result = curs.execute(query).fetchall()
print('\n Each character has the following number of items:')
for each in result:
print(f'Character {each[0]} has {each[1]} item(s).')
# Alternative character count
char = curs.execute('SELECT * FROM charactercreator_character').fetchall()
print('# of characters',len(char))
# Subclasses counts
mage = curs.execute('SELECT * FROM charactercreator_mage').fetchall()
thief = curs.execute('SELECT * FROM charactercreator_thief').fetchall()
cleric = curs.execute('SELECT * FROM charactercreator_cleric').fetchall()
fighter = curs.execute('SELECT * FROM charactercreator_fighter').fetchall()
print('# of Mages: ',len(mage))
print('# of Thieves: ',len(thief))
print('# of Clerics: ',len(cleric))
print('# of Fighters: ',len(fighter))
# items in armory_item
item = curs.execute('SELECT * FROM armory_item').fetchall()
print('# of items: ', len(item))
# weapons in armory_weapon
weapon = curs.execute('SELECT * FROM armory_weapon').fetchall()
print('# of weapons: ',len(weapon))
# items for a character (alternative)
char_inv = curs.execute('''SELECT character_id, count(item_id)
FROM charactercreator_character_inventory
GROUP BY character_id''').fetchall()
# weapon ownership by a character
char_w = curs.execute('''SELECT character_id, count(item_id) as weapons
FROM charactercreator_character_inventory as cci, armory_weapon as w
WHERE cci.item_id = w.item_ptr_id
GROUP BY character_id''').fetchall()
print('\n Each character has the following number of weapons:')
for each in char_w[:20]:
print(f'Character {each[0]} has {each[1]} weapon(s).')
# average items and weapons
ave_item = curs.execute('''SELECT avg(items)
FROM (SELECT count(item_id) as items
FROM charactercreator_character_inventory as c
GROUP BY character_id )''').fetchall()[0][0]
ave_weapon = curs.execute('''SELECT avg(weapons)
FROM (SELECT count(item_id) as weapons
FROM charactercreator_character_inventory as c, armory_weapon as w
WHERE c.item_id = w.item_ptr_id
GROUP BY character_id )''').fetchall()[0][0]
print('Average items per character: ', round(ave_item, 2))
print('Average weapons per character: ', round(ave_weapon, 2)) | StarcoderdataPython |
4804476 | from braces.views import LoginRequiredMixin
from django.conf import settings
from model_controller.utils import EXCLUDE_MODEL_CONTROLLER_FIELDS
class ExtendedLoginRequiredMixin(LoginRequiredMixin):
login_url = settings.LOGIN_URL
class ModelControllerAdminMixin(object):
exclude = EXCLUDE_MODEL_CONTROLLER_FIELDS
def save_model(self, request, obj, form, change):
if obj.pk is None:
obj.created_user = request.user
obj.updated_user = request.user
obj.save()
| StarcoderdataPython |
146045 | <reponame>NovaSBE-DSKC/retention-evaluation
def calc_percentage_at_k(test, pred, k):
# sort scores, ascending
pred, test = zip(*sorted(zip(pred, test)))
pred, test = list(pred), list(test)
pred.reverse()
test.reverse()
# calculates number of values to consider
n_percentage = round(len(pred) * k / 100)
# check if predicted is equal to true value, and count it
# set true and false positive counters
tp, fp = 0, 0
for i in range(n_percentage):
# true positive
if test[i] == 1:
tp += 1
precision_at_k = tp / n_percentage
return round(precision_at_k * 100, 2) | StarcoderdataPython |
1799662 | <filename>models/loss/__init__.py
from .loss_functions import listMLE, pointwise_ranking_loss, listwise_ranking_loss
| StarcoderdataPython |
1781245 | <reponame>interactiveinstitute/watthappened<gh_stars>0
from ektime import *
class Value(object):
def __init__(self, *args):
self.set(*args)
def set(self):
raise NotImplementedError
def current(self):
return self.predicted_at(Time.from_s(time.time()))
def predicted_at(self, time):
raise NotImplementedError
def predicted_time_when(self, value):
raise NotImplementedError
class PiecewiseLinearValue(Value):
def set(self, dF=0, dt=1, t1=1, F0=0):
'''
dF is the difference in the primitive of the value
dt is the time window duration in which the value was measured, in s
t1 is the end of the time window, as a Time object
F0 is the value of the primitive at t1-dt, or 0
e.g. a plugwise measurement:
energy E(t0, t1) = dF[Ws] with dt = t1-t0[s] and F0 = E(0, t0)
given by F0 = E(0, t0), F1 = E(0, t1) = F0+dF, dt=t1-t0
power P(t1) = dF[Ws]/1[s] = dF[W] over interval [t1-dt, t1) with dt=1
given by dF, dt=1, t1=now
e.g. if we express presence as 1p or 0p, and presence time as 'pseconds':
presence q(t1) = dF[ps]/1[s] = dF[p] in {0,1} over interval " with "
prestime Q(t0, t1) = dF[ps] with dt = t1-t0[s] and F0 = Q(0, t0)
dF and F0 may be any vector value of the same type.
'''
if not self.zero:
self.zero = dF * 0
self.dF = dF
self.dt = dt
self.t1 = t1.as_s()
self.t0 = self.t1 - dt
if F0 is 0:
self.F0 = self.zero
else:
self.F0 = F0
self.dFdt = dF * (1. / dt)
def predicted_at(self, time):
t = time.as_s()
assert t > self.t0
return self.F0 + self.dFdt * (t - self.t0)
def predicted_time_until(self, value):
F1 = self.F0 + self.dF
value += self.F0
if ((value > F1 and self.dFdt > self.zero) or
(value < F1 and self.dFdt < self.zero)):
seconds = (value - self.F0) / self.dFdt - self.dt
elif value == self.F0:
seconds = 0
else:
seconds = float('inf')
return seconds
class EnergyValue(PiecewiseLinearValue):
unit = 'Ws'
derivative = 'W'
zero = 0
@classmethod
def from_power(cls, P, t=None):
return cls(P, 1, t)
@classmethod
def from_energy(cls, t1, E1, t0=0, E0=0):
return cls(E1 - E0, t1.as_s() - t0.as_s(), t1, E0)
def as_W(self):
return self.dFdt
def as_Wh(self):
return self.dF / 3600
def current_as_Wh(self):
return self.current()
# TODO(sander) / 3600?
def __repr__(self):
return '%.1f Ws over %.1f s' % (self.dF, self.dt)
class ValueType(object):
UNKNOWN = 0
POWER = 1
ENERGY = 2
PRESENCE = 3
PRESENCE_TIME = 4
class PresenceTimeValue(PiecewiseLinearValue):
unit = 'presence s'
derivative = 'presence'
zero = 0
if __name__ == '__main__':
print 'Tests (read the code):'
print
# PlugWise example input
during_last_second = 0.042 # Wh
t = Time()
val = EnergyValue.from_power(during_last_second * 3600, t)
print 'PlugWise reported %.1f W at %s.' % (val.as_W(), t)
energy = val.predicted_at(Time.from_s(t.as_s() + 24 * 3600)) / 3600 / 1000
print 'It this keeps going on for a day, that is %.1f kWh.' % energy
print
# Two (time [s], energy [Ws]) tuples
datapoint1 = (Time.from_s(1370369092), 16.3 * 3600 * 1000)
datapoint2 = (Time.from_s(1370382710), 22.1 * 3600 * 1000)
val = EnergyValue.from_energy(*(datapoint2 + datapoint1))
print '%(P).1f W * %(t).1f h = %(E).1f Wh' % {
'P': val.as_W(),
't': val.dt / 3600,
'E': val.as_Wh()
}
kWh = 10.
until = val.predicted_time_until(kWh * 3600 * 1000)
print 'With this pace, you will reach 10 kWh after %.1f h.' % (until / 3600)
sleep = 2
first = val.current_as_Wh()
time.sleep(sleep)
second = val.current_as_Wh()
difference = (second - first) / 3600
print 'During the last %d seconds, %.1f Wh was added.' % (sleep, difference)
| StarcoderdataPython |
3293995 | # Maze in a 60x60 grid
# Nodes: 620
# Edges: 654
adjList = [
[23, 1],
[45, 2, 0],
[27, 1],
[28, 4],
[31, 3],
[6],
[14, 5],
[17, 8],
[7],
[19],
[60, 11],
[41, 12, 10],
[29, 11],
[22],
[54, 6, 15],
[14],
[34, 17],
[35, 7, 16],
[38, 19],
[39, 9, 18],
[40, 21],
[59, 20],
[30, 13, 23],
[0, 22],
[49, 25],
[46, 24],
[27],
[2, 28, 26],
[3, 27],
[12],
[22],
[4, 32],
[53, 31],
[55, 34],
[16, 33],
[57, 17, 36],
[58, 35],
[82, 38],
[18, 37],
[19, 40],
[20, 39],
[11, 42],
[61, 41],
[64, 44],
[65, 45, 43],
[1, 44],
[25, 47],
[67, 46],
[93, 49],
[24, 48],
[74, 51],
[75, 52, 50],
[76, 51],
[77, 32, 54],
[14, 53],
[33, 56],
[68, 55],
[69, 35, 58],
[36, 57],
[21, 60],
[10, 59],
[42, 62],
[90, 61],
[91, 64],
[43, 63],
[44],
[95, 67],
[47, 66],
[80, 56, 69],
[97, 57, 68],
[83],
[72],
[85, 71],
[88],
[50, 75],
[100, 51, 74],
[52, 77],
[101, 53, 78, 76],
[77],
[103, 80],
[68, 79],
[99, 82],
[37, 81],
[104, 70, 84],
[105, 83],
[106, 72, 86],
[118, 85],
[119, 88],
[73, 87],
[109, 90],
[62, 89],
[111, 63, 92],
[112, 91],
[48, 94],
[113, 93],
[66, 96],
[122, 95],
[69, 98],
[115, 99, 97],
[116, 81, 98],
[75],
[77],
[124, 103],
[79, 102],
[117, 83, 105],
[84, 106, 104],
[85, 105],
[150, 108],
[128, 107],
[89, 110],
[152, 109],
[136, 91, 112],
[92, 113, 111],
[120, 94, 112],
[126],
[98, 116],
[99, 117, 115],
[132, 104, 116],
[86, 119],
[87, 118],
[113],
[140, 122],
[96, 121],
[143, 124],
[102, 123],
[146, 126],
[114, 127, 125],
[126],
[108, 129],
[151, 128],
[168, 131],
[154, 130],
[155, 117, 133],
[156, 132],
[157, 135],
[187, 134],
[159, 111, 137],
[136],
[161, 139],
[172, 138],
[121, 141],
[173, 140],
[162, 143],
[123, 142],
[164, 145],
[166, 146, 144],
[153, 125, 145],
[185, 148],
[167, 147],
[169, 150],
[107, 149],
[129, 152],
[158, 110, 151],
[146],
[131, 155],
[132, 154],
[133, 157],
[134, 156],
[152],
[136],
[197, 161],
[138, 160],
[142, 163],
[179, 164, 162],
[144, 163],
[182, 166],
[145, 165],
[148, 168],
[130, 167],
[149, 170],
[177, 171, 169],
[189, 170],
[139, 173],
[141, 172],
[175],
[195, 176, 174],
[226, 175],
[170],
[199, 179],
[163, 180, 178],
[200, 179],
[201, 182],
[165, 183, 181],
[182],
[203, 185],
[147, 184],
[204],
[135, 188],
[206, 187],
[171, 190],
[210, 189],
[217, 192],
[234, 191],
[213, 194],
[223, 195, 193],
[175, 194],
[216, 197],
[160, 196],
[235, 199],
[178, 198],
[180, 201],
[181, 200],
[220, 203],
[184, 202],
[245, 186, 205],
[214, 206, 204],
[188, 207, 205],
[228, 208, 206],
[229, 207],
[230, 210],
[190, 209],
[233],
[272, 213],
[193, 212],
[205],
[246, 216],
[196, 217, 215],
[191, 216],
[236, 219],
[239, 218],
[202, 221],
[241, 220],
[242, 223],
[194, 222],
[249],
[263, 226],
[176, 225],
[252, 228],
[207, 229, 227],
[208, 228],
[209, 231],
[279, 230],
[258, 233],
[211, 232],
[192, 235],
[198, 236, 234],
[255, 218, 235],
[238],
[256, 239, 237],
[219, 238],
[270, 241],
[221, 240],
[222, 243],
[248, 242],
[264, 245],
[204, 244],
[348, 215, 247],
[246],
[261, 243, 249],
[262, 224, 248],
[273, 251],
[274, 252, 250],
[227, 251],
[276, 254],
[278, 253],
[236],
[238, 257],
[283, 258, 256],
[232, 257],
[296, 260],
[285, 261, 259],
[248, 260],
[286, 249, 263],
[225, 264, 262],
[244, 263],
[301, 266],
[307, 265],
[291, 268],
[303, 267],
[284, 270],
[240, 269],
[294, 272],
[212, 271],
[250, 274],
[316, 251, 273],
[317, 276],
[253, 277, 275],
[298, 278, 276],
[254, 279, 277],
[368, 231, 278],
[327, 281],
[300, 280],
[304, 283],
[257, 284, 282],
[292, 269, 283],
[314, 260, 286],
[262, 285],
[306, 288],
[297, 289, 287],
[315, 288],
[308, 291],
[267, 290],
[284, 293],
[310, 292],
[271],
[312, 296],
[259, 295],
[288],
[277, 299],
[319, 298],
[281, 301],
[265, 300],
[320, 303],
[268, 302],
[282],
[324, 306],
[287, 305],
[266, 308],
[290, 307],
[310],
[293, 309],
[333, 312],
[295, 311],
[335, 314],
[322, 285, 313],
[289, 316],
[274, 315],
[275],
[339, 319],
[299, 318],
[302, 321],
[340, 320],
[314, 323],
[356, 324, 322],
[305, 323],
[326],
[336, 325],
[280, 328],
[359, 327],
[330],
[350, 331, 329],
[330],
[353, 333],
[311, 334, 332],
[355, 333],
[313],
[345, 326, 337],
[336],
[347, 339],
[318, 338],
[321, 341],
[361, 342, 340],
[341],
[364, 344],
[387, 345, 343],
[365, 336, 344],
[389, 347],
[399, 338, 346],
[246, 349],
[378, 348],
[330, 351],
[370, 352, 350],
[371, 351],
[332, 354],
[375, 353],
[334, 356],
[323, 357, 355],
[363, 356],
[367],
[328, 360],
[391, 359],
[341, 362],
[380, 361],
[385, 357, 364],
[343, 363],
[345, 366],
[388, 365],
[430, 358, 368],
[279, 367],
[392, 370],
[351, 371, 369],
[394, 352, 370],
[395],
[396, 374],
[397, 375, 373],
[354, 374],
[400],
[420, 378],
[349, 377],
[404, 380],
[362, 381, 379],
[380],
[407, 383],
[408, 382],
[409, 385],
[363, 384],
[419, 387],
[344, 386],
[366, 389],
[346, 388],
[402],
[360, 392],
[369, 391],
[414, 394],
[371, 395, 393],
[424, 372, 394],
[373, 397],
[374, 398, 396],
[416, 397],
[429, 347, 400],
[452, 376, 399],
[431],
[390, 403],
[423, 402],
[379, 405],
[447, 404],
[437, 407],
[382, 406],
[383, 409],
[417, 384, 408],
[426, 411],
[427, 412, 410],
[428, 411],
[445, 414],
[393, 413],
[434, 416],
[398, 415],
[409, 418],
[440, 419, 417],
[386, 418],
[377, 421],
[466, 420],
[505, 423],
[403, 422],
[433, 395, 425],
[446, 424],
[443, 410, 427],
[411, 426],
[412, 429],
[399, 428],
[367, 431],
[471, 401, 430],
[454, 433],
[474, 424, 432],
[415, 435],
[469, 434],
[456, 437],
[406, 436],
[439],
[449, 440, 438],
[418, 441, 439],
[461, 440],
[443],
[482, 426, 442],
[490, 445],
[413, 444],
[425],
[405, 448],
[468, 447],
[439],
[484, 451],
[485, 450],
[487, 400, 453],
[470, 452],
[473, 432, 455],
[467, 454],
[436, 457],
[477, 458, 456],
[498, 457],
[479, 460],
[507, 459],
[441, 462],
[481, 461],
[511, 464],
[463],
[488, 466],
[421, 465],
[455],
[448, 469],
[435, 468],
[453, 471],
[431, 470],
[491, 473],
[527, 454, 472],
[433, 475],
[493, 476, 474],
[494, 475],
[457],
[500, 479],
[459, 478],
[481],
[462, 482, 480],
[443, 481],
[518, 484],
[450, 485, 483],
[451, 486, 484],
[502, 487, 485],
[452, 486],
[465, 489],
[504, 488],
[444, 491],
[472, 490],
[529, 493],
[475, 492],
[476, 495],
[512, 494],
[513, 497],
[514, 496],
[458, 499],
[515, 500, 498],
[478, 499],
[502],
[520, 486, 503, 501],
[522, 502],
[489, 505],
[422, 506, 504],
[525, 505],
[460, 508],
[536, 507],
[537, 510],
[517, 511, 509],
[463, 510],
[495, 513],
[496, 514, 512],
[533, 497, 513],
[499, 516],
[576, 515],
[510],
[483, 519],
[541, 518],
[502, 521],
[543, 520],
[503],
[546, 524],
[523],
[506, 526],
[548, 525],
[473, 528],
[549, 529, 527],
[492, 528],
[531],
[551, 532, 530],
[552, 531],
[514, 534],
[557, 533],
[559, 536],
[508, 535],
[509, 538],
[582, 537],
[562, 540],
[563, 539],
[519, 542],
[565, 541],
[566, 521, 544],
[568, 543],
[600, 546],
[523, 547, 545],
[569, 546],
[526, 549],
[528, 550, 548],
[578, 549],
[531],
[532, 553],
[575, 552],
[570],
[579, 556],
[592, 555],
[534, 558],
[581, 557],
[535, 560],
[599, 559],
[583, 562],
[584, 539, 561],
[540, 564],
[586, 563],
[542, 566],
[543, 567, 565],
[588, 568, 566],
[589, 544, 567],
[547, 570],
[590, 554, 569],
[591, 572],
[610, 571],
[613, 574],
[595, 575, 573],
[553, 574],
[516, 577],
[576],
[550, 579],
[555, 578],
[616, 581],
[558, 580],
[604, 538, 583],
[605, 561, 584, 582],
[562, 585, 583],
[606, 584],
[564, 587],
[608, 586],
[567, 589],
[609, 568, 588],
[570, 591],
[602, 571, 590],
[611, 556, 593],
[612, 592],
[614, 595],
[574, 594],
[617, 597],
[618, 596],
[619, 599],
[560, 598],
[545, 601],
[600],
[591],
[604],
[582, 605, 603],
[583, 606, 604],
[585, 607, 605],
[606],
[587, 609],
[589, 608],
[572, 611],
[592, 612, 610],
[593, 613, 611],
[573, 612],
[594, 615],
[614],
[580, 617],
[596, 616],
[597, 619],
[598, 618]]
# x coord, y coord
nodeData = [
(5, 1),
(7, 1),
(17, 1),
(19, 1),
(21, 1),
(24, 1),
(25, 1),
(33, 1),
(38, 1),
(44, 1),
(52, 1),
(55, 1),
(60, 1),
(2, 2),
(25, 2),
(26, 2),
(29, 2),
(33, 2),
(41, 2),
(44, 2),
(47, 2),
(50, 2),
(2, 3),
(5, 3),
(10, 3),
(12, 3),
(16, 3),
(17, 3),
(19, 3),
(60, 3),
(2, 4),
(21, 4),
(23, 4),
(27, 4),
(29, 4),
(33, 4),
(36, 4),
(39, 4),
(41, 4),
(44, 4),
(47, 4),
(55, 4),
(57, 4),
(4, 5),
(6, 5),
(7, 5),
(12, 5),
(14, 5),
(8, 6),
(10, 6),
(17, 6),
(19, 6),
(21, 6),
(23, 6),
(25, 6),
(27, 6),
(31, 6),
(33, 6),
(36, 6),
(50, 6),
(52, 6),
(57, 6),
(60, 6),
(2, 7),
(4, 7),
(6, 7),
(12, 7),
(14, 7),
(31, 7),
(33, 7),
(42, 7),
(46, 7),
(47, 7),
(54, 7),
(17, 8),
(19, 8),
(21, 8),
(23, 8),
(25, 8),
(28, 8),
(31, 8),
(37, 8),
(39, 8),
(42, 8),
(44, 8),
(47, 8),
(49, 8),
(52, 8),
(54, 8),
(58, 8),
(60, 8),
(2, 9),
(4, 9),
(8, 9),
(10, 9),
(12, 9),
(16, 9),
(33, 9),
(35, 9),
(37, 9),
(19, 10),
(23, 10),
(25, 10),
(28, 10),
(42, 10),
(44, 10),
(47, 10),
(54, 10),
(56, 10),
(58, 10),
(60, 10),
(2, 11),
(4, 11),
(10, 11),
(31, 11),
(35, 11),
(37, 11),
(42, 11),
(49, 11),
(52, 11),
(10, 12),
(13, 12),
(16, 12),
(21, 12),
(25, 12),
(29, 12),
(31, 12),
(32, 12),
(56, 12),
(58, 12),
(37, 13),
(39, 13),
(42, 13),
(44, 13),
(46, 13),
(49, 13),
(2, 14),
(3, 14),
(6, 14),
(9, 14),
(13, 14),
(16, 14),
(18, 14),
(21, 14),
(23, 14),
(27, 14),
(29, 14),
(32, 14),
(35, 14),
(51, 14),
(54, 14),
(58, 14),
(60, 14),
(29, 15),
(39, 15),
(42, 15),
(44, 15),
(46, 15),
(60, 15),
(2, 16),
(4, 16),
(6, 16),
(18, 16),
(19, 16),
(23, 16),
(25, 16),
(27, 16),
(35, 16),
(37, 16),
(51, 16),
(54, 16),
(57, 16),
(9, 17),
(16, 17),
(40, 17),
(42, 17),
(44, 17),
(54, 17),
(17, 18),
(19, 18),
(21, 18),
(23, 18),
(25, 18),
(27, 18),
(30, 18),
(32, 18),
(47, 18),
(49, 18),
(51, 18),
(57, 18),
(59, 18),
(6, 19),
(9, 19),
(34, 19),
(38, 19),
(42, 19),
(2, 20),
(4, 20),
(11, 20),
(17, 20),
(21, 20),
(23, 20),
(28, 20),
(30, 20),
(47, 20),
(49, 20),
(51, 20),
(52, 20),
(55, 20),
(57, 20),
(59, 20),
(25, 21),
(32, 21),
(34, 21),
(49, 21),
(1, 22),
(2, 22),
(6, 22),
(13, 22),
(21, 22),
(28, 22),
(30, 22),
(35, 22),
(38, 22),
(40, 22),
(42, 22),
(44, 22),
(51, 22),
(52, 22),
(55, 22),
(57, 22),
(59, 22),
(23, 23),
(25, 23),
(9, 24),
(11, 24),
(13, 24),
(16, 24),
(19, 24),
(21, 24),
(27, 24),
(30, 24),
(35, 24),
(38, 24),
(44, 24),
(47, 24),
(1, 25),
(7, 25),
(38, 25),
(40, 25),
(48, 25),
(50, 25),
(51, 25),
(53, 25),
(57, 25),
(13, 26),
(19, 26),
(22, 26),
(23, 26),
(35, 26),
(37, 26),
(38, 26),
(40, 26),
(42, 26),
(44, 26),
(8, 27),
(10, 27),
(14, 27),
(17, 27),
(24, 27),
(27, 27),
(29, 27),
(32, 27),
(48, 27),
(50, 27),
(52, 27),
(53, 27),
(55, 27),
(57, 27),
(59, 27),
(3, 28),
(6, 28),
(20, 28),
(22, 28),
(24, 28),
(37, 28),
(40, 28),
(42, 28),
(44, 28),
(47, 28),
(12, 29),
(14, 29),
(24, 29),
(26, 29),
(29, 29),
(33, 29),
(35, 29),
(44, 29),
(55, 29),
(57, 29),
(6, 30),
(8, 30),
(15, 30),
(17, 30),
(20, 30),
(40, 30),
(42, 30),
(10, 31),
(12, 31),
(23, 31),
(26, 31),
(28, 31),
(33, 31),
(35, 31),
(37, 31),
(47, 31),
(50, 31),
(52, 31),
(55, 31),
(57, 31),
(15, 32),
(18, 32),
(37, 32),
(38, 32),
(40, 32),
(44, 32),
(46, 32),
(3, 33),
(5, 33),
(8, 33),
(11, 33),
(12, 33),
(27, 33),
(28, 33),
(31, 33),
(35, 33),
(46, 33),
(47, 33),
(53, 33),
(55, 33),
(18, 34),
(20, 34),
(24, 34),
(41, 34),
(44, 34),
(46, 34),
(50, 34),
(53, 34),
(1, 35),
(3, 35),
(11, 35),
(13, 35),
(15, 35),
(27, 35),
(29, 35),
(31, 35),
(38, 35),
(39, 35),
(57, 35),
(5, 36),
(8, 36),
(20, 36),
(22, 36),
(39, 36),
(41, 36),
(46, 36),
(48, 36),
(57, 36),
(59, 36),
(11, 37),
(13, 37),
(15, 37),
(17, 37),
(25, 37),
(27, 37),
(29, 37),
(55, 37),
(1, 38),
(3, 38),
(19, 38),
(22, 38),
(23, 38),
(32, 38),
(34, 38),
(36, 38),
(39, 38),
(42, 38),
(44, 38),
(48, 38),
(50, 38),
(5, 39),
(8, 39),
(11, 39),
(13, 39),
(15, 39),
(17, 39),
(25, 39),
(27, 39),
(28, 39),
(53, 39),
(55, 39),
(59, 39),
(5, 40),
(7, 40),
(19, 40),
(21, 40),
(30, 40),
(32, 40),
(34, 40),
(36, 40),
(45, 40),
(47, 40),
(49, 40),
(10, 41),
(13, 41),
(24, 41),
(28, 41),
(36, 41),
(37, 41),
(42, 41),
(1, 42),
(3, 42),
(5, 42),
(7, 42),
(17, 42),
(19, 42),
(45, 42),
(47, 42),
(49, 42),
(53, 42),
(57, 42),
(59, 42),
(12, 43),
(17, 43),
(24, 43),
(26, 43),
(28, 43),
(30, 43),
(33, 43),
(34, 43),
(37, 43),
(41, 43),
(44, 43),
(45, 43),
(8, 44),
(10, 44),
(19, 44),
(21, 44),
(23, 44),
(34, 44),
(51, 44),
(53, 44),
(55, 44),
(57, 44),
(12, 45),
(14, 45),
(28, 45),
(29, 45),
(31, 45),
(36, 45),
(38, 45),
(41, 45),
(43, 45),
(47, 45),
(48, 45),
(1, 46),
(3, 46),
(14, 46),
(23, 46),
(26, 46),
(57, 46),
(59, 46),
(10, 47),
(12, 47),
(17, 47),
(19, 47),
(21, 47),
(29, 47),
(34, 47),
(36, 47),
(41, 47),
(43, 47),
(45, 47),
(49, 47),
(51, 47),
(53, 47),
(54, 47),
(55, 47),
(1, 48),
(3, 48),
(8, 49),
(10, 49),
(15, 49),
(19, 49),
(21, 49),
(25, 49),
(27, 49),
(29, 49),
(31, 49),
(32, 49),
(34, 49),
(53, 49),
(54, 49),
(60, 49),
(3, 50),
(5, 50),
(7, 50),
(38, 50),
(40, 50),
(42, 50),
(45, 50),
(47, 50),
(25, 51),
(27, 51),
(29, 51),
(32, 51),
(35, 51),
(45, 51),
(49, 51),
(52, 51),
(54, 51),
(56, 51),
(60, 51),
(2, 52),
(4, 52),
(7, 52),
(9, 52),
(12, 52),
(13, 52),
(15, 52),
(18, 52),
(21, 52),
(23, 52),
(29, 52),
(31, 52),
(38, 52),
(40, 52),
(42, 53),
(44, 53),
(48, 53),
(50, 53),
(52, 53),
(54, 53),
(56, 53),
(59, 53),
(1, 54),
(2, 54),
(4, 54),
(9, 54),
(13, 54),
(14, 54),
(21, 54),
(23, 54),
(29, 54),
(7, 55),
(16, 55),
(19, 55),
(31, 55),
(33, 55),
(38, 55),
(41, 55),
(46, 55),
(48, 55),
(50, 55),
(52, 55),
(54, 55),
(56, 55),
(57, 55),
(59, 55),
(4, 56),
(7, 56),
(9, 56),
(12, 56),
(24, 56),
(28, 56),
(29, 56),
(35, 56),
(36, 56),
(14, 57),
(16, 57),
(31, 57),
(33, 57),
(44, 57),
(46, 57),
(48, 57),
(50, 57),
(52, 57),
(54, 57),
(57, 57),
(59, 57),
(7, 58),
(9, 58),
(19, 58),
(22, 58),
(26, 58),
(28, 58),
(34, 58),
(36, 58),
(38, 58),
(41, 58),
(1, 59),
(5, 59),
(9, 59),
(43, 59),
(44, 59),
(46, 59),
(50, 59),
(51, 59),
(54, 59),
(59, 59),
(12, 60),
(19, 60),
(22, 60),
(24, 60),
(26, 60),
(27, 60),
(31, 60),
(34, 60),
(36, 60),
(38, 60)]
| StarcoderdataPython |
169947 | <reponame>peter88213/PyWriter
"""Provide a generic class for csv file import.
Other csv file representations inherit from this class.
Copyright (c) 2021 <NAME>
For further information see https://github.com/peter88213/PyWriter
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
import os
import csv
from pywriter.model.novel import Novel
class CsvFile(Novel):
"""csv file representation.
- Records are separated by line breaks.
- Data fields are delimited by the _SEPARATOR character.
"""
EXTENSION = '.csv'
# overwrites Novel.EXTENSION
_SEPARATOR = ','
# delimits data fields within a record.
rowTitles = []
def read(self):
"""Parse the csv file located at filePath, fetching the rows.
Check the number of fields in each row.
Return a message beginning with SUCCESS or ERROR.
Override the superclass method.
"""
self.rows = []
cellsPerRow = len(self.rowTitles)
try:
with open(self.filePath, newline='', encoding='utf-8') as f:
reader = csv.reader(f, delimiter=self._SEPARATOR)
for row in reader:
# Each row read from the csv file is returned
# as a list of strings
if len(row) != cellsPerRow:
return 'ERROR: Wrong csv structure.'
self.rows.append(row)
except(FileNotFoundError):
return 'ERROR: "' + os.path.normpath(self.filePath) + '" not found.'
except:
return 'ERROR: Can not parse "' + os.path.normpath(self.filePath) + '".'
return 'SUCCESS'
def get_list(self, text):
"""Split a sequence of comma separated strings into a list of strings.
Remove leading and trailing spaces, if any.
"""
elements = []
tempList = text.split(',')
for element in tempList:
elements.append(element.lstrip().rstrip())
return elements
| StarcoderdataPython |
198153 | <reponame>pauloubuntu/ocr-processing-service
__author__ = 'paulo.rodenas'
from datetime import datetime
class Date(object):
BRAZILIAN_FORMAT = '%d/%m/%Y'
@staticmethod
def parse(date_str, date_format=BRAZILIAN_FORMAT):
try:
return datetime.strptime(date_str, date_format)
except ValueError:
return None
@staticmethod
def format(date, date_format=BRAZILIAN_FORMAT):
try:
return datetime.strftime(date, date_format)
except ValueError:
return None | StarcoderdataPython |
3292417 | <gh_stars>0
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from structuredDataCrawler.items import PipeSeedItem
from structuredDataCrawler.items import SeedItem
from collections import defaultdict
from pymongo import Connection
import pymongo
import MySQLdb as mdb
import json
import ConfigParser
from scrapy import log
from spider_utilities import domain_extractor, tld_extractor
class CrawlSeedPipeline(object):
def process_item(self, item, spider):
# we make the assumption that we have only one url in the list of start_urls due to the usage of our spiders
# for iurl in spider.start_urls:
# log.msg("In CrawlSeedPipeline pipeline for: "+iurl,log.INFO)
new_item = PipeSeedItem()
new_item['main_url'] = item['main_url']
new_item['seed_url'] = item['seed_url']
new_item['seed_response_url'] = item['seed_response_url']
new_item['RDFa_namespaces'] = sorted(set(item['RDFa_namespaces']))
new_item['microformats'] = sorted(set(item['microformats']))
new_item['microformats2'] = sorted(set(item['microformats2']))
new_item['rss'] = sorted(set(item['rss']))
new_item['atom'] = sorted(set(item['atom']))
new_item['sitemap'] = item['sitemap']
new_item['headRDFLinks'] = sorted(set(item['headRDFLinks']))
new_item['metatags'] = sorted(set(item['metatags']))
new_item['RDFa_typeOf'] = sorted(set(item['RDFa_typeOf']))
# We remove the duplicates
external_links = sorted(set(item['all_external_links']))
new_item['microdata_types'] = sorted(set(item['microdata_types']))
new_item['iframes'] = sorted(set(item['iframes']))
new_item['CMS_type'] = item['CMS_type']
new_item['server_type'] = item['server_type']
# Populate the new item
new_item['schema_types'] = []
new_item['twitter'] = []
new_item['facebook'] = []
new_item['gplus'] = []
new_item['tripadvisor'] = []
new_item['review_widgets_links'] = []
new_item['other_external_links'] = []
for itype in new_item['microdata_types']:
if "schema.org" in itype:
new_item['schema_types'].append(itype)
# Cluster the extracted links in categories
for ilink in external_links:
if "plus.google.com" in ilink:
new_item['gplus'].append(ilink)
elif "facebook.com" in ilink:
new_item['facebook'].append(ilink)
elif "twitter.com" in ilink:
new_item['twitter'].append(ilink)
elif ("tripadvisor" in ilink) or ("trustyou" in ilink) or ("hotelnavigators" in ilink) or (
"customer-alliance" in ilink):
new_item['review_widgets_links'].append(ilink)
if ("tripadvisor" in ilink):
new_item['tripadvisor'].append(ilink)
else:
new_item['other_external_links'].append(ilink)
return new_item
class AggregationPipeline(object):
# This pipeline aggregates the scraped data per seed.
def __init__(self):
# we save the scraped items per seed in a list
self.dictSeed_items = defaultdict(list)
self.file_aggr = open('items_aggr.json', 'a')
def process_item(self, item, spider):
h_url = item['main_url']
# add the scraped item to the respective seed
if not self.dictSeed_items[h_url]:
self.dictSeed_items[h_url] = item
else:
aggr_item = self.dictSeed_items[h_url]
for ilink in item['RDFa_namespaces']:
if not ilink in aggr_item['RDFa_namespaces']:
aggr_item['RDFa_namespaces'].append(ilink)
for ilink in item['microformats']:
if not ilink in aggr_item['microformats']:
aggr_item['microformats'].append(ilink)
for ilink in item['microformats2']:
if not ilink in aggr_item['microformats2']:
aggr_item['microformats2'].append(ilink)
for ilink in item['rss']:
if not ilink in aggr_item['rss']:
aggr_item['rss'].append(ilink)
for ilink in item['microdata_types']:
if not ilink in aggr_item['microdata_types']:
aggr_item['microdata_types'].append(ilink)
for ilink in item['iframes']:
if not ilink in aggr_item['iframes']:
aggr_item['iframes'].append(ilink)
for ilink in item['CMS_type']:
if not ilink in aggr_item['CMS_type']:
aggr_item['CMS_type'].append(ilink)
for ilink in item['server_type']:
if not ilink in aggr_item['server_type']:
aggr_item['server_type'].append(ilink)
for ilink in item['schema_types']:
if not ilink in aggr_item['schema_types']:
aggr_item['schema_types'].append(ilink)
for ilink in item['twitter']:
if not ilink in aggr_item['twitter']:
aggr_item['twitter'].append(ilink)
for ilink in item['facebook']:
if not ilink in aggr_item['facebook']:
aggr_item['facebook'].append(ilink)
for ilink in item['gplus']:
if not ilink in aggr_item['gplus']:
aggr_item['gplus'].append(ilink)
for ilink in item['tripadvisor']:
if not ilink in aggr_item['tripadvisor']:
aggr_item['tripadvisor'].append(ilink)
for ilink in item['review_widgets_links']:
if not ilink in aggr_item['review_widgets_links']:
aggr_item['review_widgets_links'].append(ilink)
for ilink in item['other_external_links']:
if not ilink in aggr_item['other_external_links']:
aggr_item['other_external_links'].append(ilink)
self.dictSeed_items[h_url] = aggr_item
return item
def close_spider(self, spider):
for k, v in self.dictSeed_items.iteritems():
line = json.dumps(dict(v)) + "\n"
self.file_aggr.write(line)
self.file_aggr.close()
class MongoDBItemsPipeline(object):
# This pipeline inserts the items in MongoDB.
def __init__(self):
# We save the number of scraped items per seed
self.dictSeed = defaultdict(int)
def process_item(self, item, spider):
# log.msg("In MongoDBItemsPipeline pipeline for: "+spider.start_urls[0]+", "+item['main_url'],log.INFO)
h_url = item['main_url']
h_response = item['seed_response_url']
h_response_domain = domain_extractor(h_response)
if (h_response_domain in h_url):
# increase the number of scraped items for the respective seed
if not self.dictSeed[h_url]:
self.dictSeed[h_url] = 1
else:
self.dictSeed[h_url] += 1
line = json.dumps(dict(item))
# Save the scrapped items in MongoDB
# Loading the MongoDB credentials from the settings file
Config = ConfigParser.ConfigParser()
Config.read("..//settings.cfg")
db_hostname = Config.get("MongoDB", "db_hostname")
db_port = Config.get("MongoDB", "db_port")
db_database = Config.get("MongoDB", "db_database")
db_collection = Config.get("MongoDB", "db_collection_items")
conn = Connection(db_hostname, int(db_port))
db = conn[db_database]
coll = db[db_collection]
coll.insert(json.loads(line))
return item
else:
raise DropItem("The spider brought irrelevant item: " + h_response_domain + " for seed:" + h_url)
class JsonWriterPipeline(object):
def __init__(self):
self.file = open('items.json', 'a')
def process_item(self, item, spider):
line = json.dumps(dict(item)) + "\n"
self.file.write(line)
return item
| StarcoderdataPython |
1664770 | <reponame>adolphus-c/AI_LAB<gh_stars>0
import re
def getAttributes(string):
expr = '\([^)]+\)'
matches = re.findall(expr, string)
return [m for m in str(matches) if m.isalpha()]
def getPredicates(string):
expr = '[a-z~]+\([A-Za-z,]+\)'
return re.findall(expr, string)
def DeMorgan(sentence):
string = ''.join(list(sentence).copy())
string = string.replace('~~','')
flag = '[' in string
string = string.replace('~[','')
string = string.strip(']')
for predicate in getPredicates(string):
string = string.replace(predicate, f'~{predicate}')
s = list(string)
for i, c in enumerate(string):
if c == 'V':
s[i] = '^'
elif c == '^':
s[i] = 'V'
string = ''.join(s)
string = string.replace('~~','')
return f'[{string}]' if flag else string
def Skolemization(sentence):
SKOLEM_CONSTANTS = [f'{chr(c)}' for c in range(ord('A'), ord('Z')+1)]
statement = ''.join(list(sentence).copy())
matches = re.findall('[∀∃].', statement)
for match in matches[::-1]:
statement = statement.replace(match, '')
statements = re.findall('\[\[[^]]+\]]', statement)
for s in statements:
statement = statement.replace(s, s[1:-1])
for predicate in getPredicates(statement):
attributes = getAttributes(predicate)
if ''.join(attributes).islower():
statement = statement.replace(match[1],SKOLEM_CONSTANTS.pop(0))
else:
aL = [a for a in attributes if a.islower()]
aU = [a for a in attributes if not a.islower()][0]
statement = statement.replace(aU, f'{SKOLEM_CONSTANTS.pop(0)}({aL[0] if len(aL) else match[1]})')
return statement
def fol_to_cnf(fol):
statement = fol.replace("<=>", "_")
while '_' in statement:
i = statement.index('_')
new_statement = '[' + statement[:i] + '=>' + statement[i+1:] + ']^['+ statement[i+1:] + '=>' + statement[:i] + ']'
statement = new_statement
statement = statement.replace("=>", "-")
expr = '\[([^]]+)\]'
statements = re.findall(expr, statement)
for i, s in enumerate(statements):
if '[' in s and ']' not in s:
statements[i] += ']'
for s in statements:
statement = statement.replace(s, fol_to_cnf(s))
while '-' in statement:
i = statement.index('-')
br = statement.index('[') if '[' in statement else 0
new_statement = '~' + statement[br:i] + 'V' + statement[i+1:]
statement = statement[:br] + new_statement if br > 0 else new_statement
while '~∀' in statement:
i = statement.index('~∀')
statement = list(statement)
statement[i], statement[i+1], statement[i+2] = '∃', statement[i+2], '~'
statement = ''.join(statement)
while '~∃' in statement:
i = statement.index('~∃')
s = list(statement)
s[i], s[i+1], s[i+2] = '∀', s[i+2], '~'
statement = ''.join(s)
statement = statement.replace('~[∀','[~∀')
statement = statement.replace('~[∃','[~∃')
expr = '(~[∀V∃].)'
statements = re.findall(expr, statement)
for s in statements:
statement = statement.replace(s, fol_to_cnf(s))
expr = '~\[[^]]+\]'
statements = re.findall(expr, statement)
for s in statements:
statement = statement.replace(s, DeMorgan(s))
return statement
def main():
print("Enter FOL:")
fol = input()
print("The CNF form of the given FOL is: ")
print(Skolemization(fol_to_cnf(fol)))
main()
# Test 1
# Enter FOL:
# ∀x food(x) => likes(John, x)
# The CNF form of the given FOL is:
# ~ food(A) V likes(John, A)
# Test 2
# Enter FOL:
# ∀x[∃z[loves(x,z)]]
# The CNF form of the given FOL is:
# [loves(x,B(x))] | StarcoderdataPython |
4811884 | <gh_stars>0
# -*- coding: utf-8 -*-
'''Test cases for Sequenceops module.'''
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import unittest
from functools import reduce
from future.builtins import (ascii, filter, hex, map, oct, zip, range)
from intro_py import util
from intro_py.practice import sequenceops as seqops
LST, REVLST = list(range(0, 5)), list(range(4, -1, -1))
def setUpModule():
'''Set up (module-level) test fixtures, if any.'''
print('Setup module: {0}'.format(__name__))
def tearDownModule():
'''Tear down (module-level) test fixtures, if any.'''
print('Teardown module: {0}'.format(__name__))
class TestSequenceops(unittest.TestCase):
'''Tests for Sequenceops module.'''
@classmethod
def setUpClass(cls):
'''Set up (class-level) test fixtures, if any.'''
print('Setup class: {0}'.format(cls.__name__))
@classmethod
def tearDownClass(cls):
'''Tear down (class-level) test fixtures, if any.'''
print('\nTeardown class: {0}'.format(cls.__name__))
def setUp(self):
'''Set up test fixtures, if any.'''
print('Setup method: {0}'.format(self._testMethodName))
def tearDown(self):
'''Tear down test fixtures, if any.'''
print('Teardown method: {0}'.format(self.id().split('.')[-1]))
def test_tabulate(self):
from itertools import count
def proc_id(el): return el
def proc1(el): return 32 // (2 ** el)
for func, cnt in [(proc_id, 5), (proc1, 5)]:
ans_gen = map(func, count())
ans = [next(ans_gen) for i in range(cnt)]
for fn1 in [seqops.tabulate_i, seqops.tabulate_r, seqops.tabulate_lp]:
self.assertEqual(ans, fn1(func, cnt))
def test_length(self):
for xss in [list(range(len1)) for len1 in [0, 3, 5, 7]]:
ans = len(xss)
for fn1 in [seqops.length_i, seqops.length_r, seqops.length_lp]:
self.assertEqual(ans, fn1(xss))
def test_nth(self):
for xss in [LST, REVLST]:
ans = xss[3]
for fn1 in [seqops.nth_i, seqops.nth_r, seqops.nth_lp]:
self.assertEqual(ans, fn1(3, xss))
def test_index(self):
for xss in [LST, REVLST]:
ans = xss.index(3)
for fn1 in [seqops.index_i, seqops.index_r, seqops.index_lp]:
self.assertEqual(ans, fn1(3, xss))
self.assertEqual(-1, fn1(-20, xss))
def test_find(self):
for xss in [LST, REVLST]:
ans = 3
for fn1 in [seqops.find_i, seqops.find_r, seqops.find_lp]:
self.assertEqual(ans, fn1(3, xss))
def test_min_max(self):
for xss in [LST, REVLST]:
ans_min, ans_max = min(xss), max(xss)
for (fn_min, fn_max) in [(seqops.min_i, seqops.max_i),
(seqops.min_r, seqops.max_r), (seqops.min_lp, seqops.max_lp)]:
self.assertEqual(ans_min, fn_min(xss))
self.assertEqual(ans_max, fn_max(xss))
def test_reverse(self):
for xss in [LST, REVLST]:
ans = list(reversed(xss[:]))
for fn1 in [seqops.reverse_i, seqops.reverse_r, seqops.reverse_lp]:
self.assertEqual(ans, fn1(xss))
def test_reverse_mut(self):
for xss in [LST, REVLST]:
exp_lst = xss[:]
exp_lst.reverse()
for fn1 in [seqops.reverse_mut_i, seqops.reverse_mut_lp]:
act_lst = xss[:]
fn1(act_lst)
self.assertEqual(exp_lst, act_lst)
def test_copy_of(self):
for xss in [LST, REVLST]:
ans = xss[:]
for fn1 in [seqops.copy_of_i, seqops.copy_of_r, seqops.copy_of_lp]:
self.assertEqual(ans, fn1(xss))
def test_take_drop(self):
for xss in [LST, REVLST]:
ans_take = reduce(lambda a, i_e: a + [i_e[1]] if 3 > i_e[0] else a,
enumerate(xss), [])
ans_drop = reduce(lambda a, i_e: a + [i_e[1]] if 3 <= i_e[0] else a,
enumerate(xss), [])
for (fn_take, fn_drop) in [(seqops.take_i, seqops.drop_i),
(seqops.take_lp, seqops.drop_lp)]:
self.assertEqual(ans_take, fn_take(3, xss))
self.assertEqual(ans_drop, fn_drop(3, xss))
for fn1 in [seqops.split_at_i, seqops.split_at_lp]:
res = fn1(3, xss)
self.assertEqual(ans_take, res[0])
self.assertEqual(ans_drop, res[1])
def test_any_all(self):
def pred1(el): return 0 == el % 2
def pred2(el): return [] != el
for (pred, xss) in [(pred1, [1, 2, 3]), (pred2, [[1, 2], [], [3, 4]]),
(pred1, [6, 2, 4]), (pred2, [[1, 2], [5], [3, 4]])]:
ans_any = reduce(lambda a, e: a or pred(e), xss, False)
ans_all = reduce(lambda a, e: a and pred(e), xss, True)
for (fn_any, fn_all) in [(seqops.any_i, seqops.all_i),
(seqops.any_r, seqops.all_r), (seqops.any_lp, seqops.all_lp)]:
self.assertEqual(ans_any, fn_any(pred, xss))
self.assertEqual(ans_all, fn_all(pred, xss))
def test_map(self):
def proc(el): return el + 2
for xss in [LST, REVLST]:
ans = list(map(proc, xss))
for fn1 in [seqops.map_i, seqops.map_r, seqops.map_lp]:
self.assertEqual(ans, fn1(proc, xss))
def test_foreach(self):
def proc(el): print('{0} '.format(el))
for xss in [LST, REVLST]:
ans = None
for fn1 in [seqops.foreach_i, seqops.foreach_r, seqops.foreach_lp]:
self.assertEqual(ans, fn1(proc, xss))
def test_filter_remove(self):
def pred1(el): return 0 == el % 2
for xss in [LST, REVLST]:
ans_filter = reduce(lambda a, e: a + [e] if pred1(e) else a,
xss, [])
ans_remove = reduce(lambda a, e: a + [e] if not pred1(e) else a,
xss, [])
for (fn_f, fn_r) in [(seqops.filter_i, seqops.remove_i),
(seqops.filter_r, seqops.remove_r),
(seqops.filter_lp, seqops.remove_lp)]:
self.assertEqual(ans_filter, fn_f(pred1, xss))
self.assertEqual(ans_remove, fn_r(pred1, xss))
for fn1 in [seqops.partition_i, seqops.partition_r,
seqops.partition_lp]:
res = fn1(pred1, xss)
self.assertEqual(ans_filter, res[0])
self.assertEqual(ans_remove, res[1])
def test_fold_left(self):
def corp1(a, e): return a + e
def corp2(a, e): return a - e
for xss in [LST, REVLST]:
ans1 = reduce(corp1, xss, 0)
ans2 = reduce(corp2, xss, 0)
for fn1 in [seqops.fold_left_i, seqops.fold_left_r,
seqops.fold_left_lp]:
self.assertEqual(ans1, fn1(corp1, 0, xss))
self.assertEqual(ans2, fn1(corp2, 0, xss))
def test_fold_right(self):
def proc1(e, a): return e + a
def proc2(e, a): return e - a
for xss in [LST, REVLST]:
ans1 = reduce(lambda a, e: proc1(e, a), reversed(xss), 0)
ans2 = reduce(lambda a, e: proc2(e, a), reversed(xss), 0)
for fn1 in [seqops.fold_right_i, seqops.fold_right_r,
seqops.fold_right_lp]:
self.assertEqual(ans1, fn1(proc1, xss, 0))
self.assertEqual(ans2, fn1(proc2, xss, 0))
def test_unfold_right(self):
def func1(fst_snd):
(fst, snd) = fst_snd
if 0 == snd: return None
return (fst, (fst + 1, snd - fst))
def func2(fst_snd):
(fst, snd) = fst_snd
if 0 == snd: return None
return (fst, (fst + 1, fst - snd))
for fn1 in [seqops.unfold_right_i, seqops.unfold_right_lp]:
self.assertEqual([4, 3, 2, 1, 0], fn1(func1, (0, 10)))
self.assertEqual([4, 3, 2, 1, 0], fn1(func2, (0, 2)))
def test_unfold_left(self):
def func1(fst_snd):
(fst, snd) = fst_snd
if 0 == snd: return None
return (fst, (fst + 1, snd - fst))
def func2(fst_snd):
(fst, snd) = fst_snd
if 0 == snd: return None
return (fst, (fst + 1, fst + snd))
for fn1 in [seqops.unfold_left_r, seqops.unfold_left_lp]:
self.assertEqual([0, 1, 2, 3, 4], fn1(func1, (0, 10)))
self.assertEqual([0, 1, 2, 3, 4], fn1(func2, (0, -10)))
def test_is_ordered(self):
for xss in [LST, REVLST, ['a', 'b', 'b', 'c'], ['c', 'b', 'a', 'a']]:
ansrev = reduce(lambda a, i: a and xss[i] >= xss[i + 1],
range(len(xss) - 1), True)
ans = reduce(lambda a, i: a and xss[i] <= xss[i + 1],
range(len(xss) - 1), True)
for fn1 in [seqops.is_ordered_i, seqops.is_ordered_r,
seqops.is_ordered_lp]:
self.assertEqual(ansrev, fn1(xss, reverse=True))
self.assertEqual(ans, fn1(xss))
def test_sort(self):
for xss in [['a', 'd', 'b', 'c'], ['c', 'b', 'a', 'e'], LST, REVLST]:
ansrev, ans = sorted(xss, reverse=True), sorted(xss)
for fn1 in [seqops.quick_sort]:
lst_rev, lst = xss[:], xss[:]
fn1(lst_rev, 0, len(xss) - 1, reverse=True)
fn1(lst, 0, len(xss) - 1)
self.assertEqual(ansrev, lst_rev)
self.assertEqual(ans, lst)
for fn_verify in [seqops.is_ordered_i, seqops.is_ordered_r,
seqops.is_ordered_lp]:
self.assertTrue(fn_verify(lst_rev, reverse=True))
self.assertTrue(fn_verify(lst))
def test_append(self):
lst2 = [9, 9, 9, 9]
lst, revlst = seqops.copy_of_i(LST), seqops.copy_of_i(REVLST)
for xss in [lst, revlst]:
ans = (xss + lst2)
for fn1 in [seqops.append_i, seqops.append_r, seqops.append_lp]:
self.assertEqual(ans, fn1(xss, lst2))
def test_interleave(self):
lst2 = [9, 9, 9, 9]
for fn1 in [seqops.interleave_i, seqops.interleave_r,
seqops.interleave_lp]:
self.assertEqual([0, 9, 1, 9, 2, 9, 3, 9, 4], fn1(LST, lst2))
def test_map2(self):
def proc(e1, e2): return e1 + e2 + 2
for xss in [LST, REVLST]:
ans = reduce(lambda a, i: a + [proc(xss[i], xss[i])],
range(len(xss)), [])
for fn1 in [seqops.map2_i, seqops.map2_r, seqops.map2_lp]:
self.assertEqual(ans, fn1(proc, xss, xss))
def test_zip(self):
lst1, lst2 = [0, 1, 2], [20, 30, 40]
ans = list(zip(lst1, lst2))
for fn1 in [seqops.zip_i, seqops.zip_r, seqops.zip_lp]:
self.assertEqual(ans, fn1(lst1, lst2))
def test_unzip(self):
lst = [(0, 20), (1, 30)]
ans = list(zip(*lst))
for fn1 in [seqops.unzip_i]:
self.assertEqual(ans, fn1(lst))
def test_concat(self):
nlst1, nlst2 = [[0, 1, 2], [20, 30]], [[[0, 1]], [], [[20, 30]]]
for nlst in [nlst1, nlst2]:
ans = reduce(lambda a, e: a + e, nlst, [])
for fn1 in [seqops.concat_i, seqops.concat_r, seqops.concat_lp]:
self.assertEqual(ans, fn1(nlst))
| StarcoderdataPython |
1799756 | <gh_stars>1000+
# coding=utf-8
from .test_default_mirror import TestDefaultMirror
from .test_httpbin import TestHttpbin
from .test_verification import TestVerification, TestVerificationSingleAnswer
from .test_cache_system import TestCacheSystem
from .test_cdn import TestCDN
from .test_redirection import TestRedirection
from .test_functions import TestFunctions
from .test_custom_response_text_rewrite import TestCustomResponseRewriter
from .test_developer_functions import TestDeveloperFunctions
from .test_non_standard_port import TestNonStandardPort
from .test_regex import TestRegex
from .test_connection_pool import TestConnectionPool
from .test_custom_content_injection import TestContentInjection
| StarcoderdataPython |
3213957 | import logging
import snowflake
import snowflake.connector.errors
from ubiops_connector import InputConnector, ConnectorError, RecoverableConnectorError, get_variable, retry
logger = logging.getLogger('Snowflake Connector')
class Deployment(InputConnector):
"""
Snowflake connector
"""
def __init__(self, base_directory, context):
"""
:param str base_directory: absolute path to the directory where the deployment file is located
:param dict context: a dictionary containing details of the deployment that might be useful in your code
"""
InputConnector.__init__(self, base_directory, context)
self.connection = None
self.database = None
self.schema = None
def connect(self):
"""
Connect to a Snowflake database based on the environment variables specified.
"""
self.connection = None
self.database = get_variable('DATABASE')
self.schema = get_variable('SCHEMA', 'public')
try:
self.connection = snowflake.connector.connect(
user=get_variable('USERNAME'),
password=get_variable('PASSWORD'),
account=get_variable('ACCOUNT'),
database=self.database
)
except snowflake.connector.errors.DatabaseError as e:
raise RecoverableConnectorError(f"Failed to connect to database: {e}")
except Exception as e:
raise RecoverableConnectorError(f'Unknown error occurred when connecting to database {e}')
@retry(attempts=3)
def retrieve(self):
"""
Retrieve data from Snowflake by executing the string query.
:return dict|list: dictionary with the values expected as output of the deployment, or a list of those
dictionaries
"""
# Including the connect in the retrieve method such that it can benefit from retrying
if not self.connection:
self.connect()
try:
logger.info("Retrieving data")
cursor = self.connection.cursor(snowflake.connector.DictCursor)
# Use the schema that is specified by the user
cursor.execute(f"USE SCHEMA {self.database}.{self.schema}")
cursor.execute(self.get_query())
result = cursor.fetchall()
logger.info(f"Retrieved {len(result)} rows")
# Convert all database columns to lower case
result = [{k.lower(): v for k, v in x.items()} for x in result]
# Return the result, assuming that the column names returned from the query are matching the deployment
# output fields
return result
except snowflake.connector.errors.ProgrammingError as e:
raise ConnectorError(f"Invalid query or schema error: {e}")
except snowflake.connector.errors.DatabaseError as e:
raise ConnectorError(f"Error while fetching data: {e}")
def stop(self):
"""
Close connection to the database if the connection has been initialised
"""
try:
self.connection.close()
except Exception:
pass
self.connection = None
@staticmethod
def get_query():
"""
Gets query string, either from the environment variable or, if that is not provided, the hardcoded query
:return str: the query string that will run when a request is made
"""
# You can hard code a query here, that will be used if the QUERY environment variable is not set.
default_query = "SELECT * FROM product WHERE price < 2.5;"
env_query = get_variable('QUERY', default_query)
if env_query and len(env_query) > 0:
return env_query
return default_query
| StarcoderdataPython |
162396 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 1998-2005 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
import pythia.pyre.geometry.solids
from .AbstractNode import AbstractNode
class Block(AbstractNode):
tag = "block"
def notify(self, parent):
block = pythia.pyre.geometry.solids.block(diagonal=self._diagonal)
parent.onBlock(block)
return
def __init__(self, document, attributes):
AbstractNode.__init__(self, attributes)
self._diagonal = self._parse(attributes["diagonal"])
return
# version
__id__ = "$Id: Block.py,v 1.1.1.1 2005/03/08 16:13:45 aivazis Exp $"
# End of file
| StarcoderdataPython |
3365550 | import os
import time
from collections import defaultdict
from itertools import combinations, permutations
from lib.report.report_tools import global_track_dt
from lib.filters.structural_QC import identify_intronic_transcripts
from lib.parsing.gtf_object_tools import create_gtf_object, write_gtf
from lib.tools.other_tools import get_transcript_length, get_longest_transcript
from lib.tools.other_tools import group_transcripts_by_overlap, group_transcripts_across_strands
def identify_unstranded_transcripts(gtf_obj):
print(time.asctime(), f"Identifying unstranded transcripts")
invalid_strand_trans = set()
for trans_id, trans_strand in gtf_obj.trans_sense_dt.items():
if trans_strand not in {"+", "-"}:
invalid_strand_trans.add(trans_id)
return invalid_strand_trans
def identify_monoexon_gene_transcripts(gtf_obj, trans_group):
overlapping_transcripts = group_transcripts_by_overlap(gtf_obj, trans_group)
single_monoexonic = set()
for overlap_group in overlapping_transcripts:
# Only transcript in group
if len(overlap_group) == 1:
trans_id = overlap_group[0]
trans_exons = gtf_obj.trans_exons_dt[trans_id]
# If it is mono-exonic
if len(trans_exons) == 1:
single_monoexonic.add(trans_id)
return single_monoexonic
def identify_monoexonic_antisense(gtf_obj, to_ignore=None):
if to_ignore is None:
to_ignore = set()
print(time.asctime(), f"Identifying mono-exonic antisense transcripts")
# print(time.asctime(), f"Grouping chromosomes/scaffolds")
# Group related strands; the strands can be "+", "-", and "." (unknown)
grouped_chrom_dt = defaultdict(list)
for chrom, trans_list in gtf_obj.chrom_trans_dt.items():
if not chrom[-1] in {"+", "-", "."}:
print(f'WARNING: Chromosome/Scaffold "{chrom}" does not finish with strand tag. Please check.')
# Remove strand tag
chrom_key = chrom[:-1]
grouped_chrom_dt[chrom_key].append(chrom)
# An antisense monoexon can be either completly nested withing the gene in the other strand, or just partially overlap it
strict_nested = False
# Relate gene coordinates to transcript IDs to track against which models is the selected transcripts an antisense
gene_coord_trans_dt, antisense_relation_dt = [defaultdict(set) for _ in range(2)]
# print(time.asctime(), f"Analyzing overlap of Mono-exonic Genes across strands")
monoexonic_antisense_set = set()
for chrom_key, chrom_groups in grouped_chrom_dt.items():
# combinations() is faster but it doesn't allow to reveal all the possible relationship of antisense transcripts
for chrom_A, chrom_B in permutations(chrom_groups, 2):
strand_A_trans = gtf_obj.chrom_trans_dt[chrom_A]
strand_B_trans = gtf_obj.chrom_trans_dt[chrom_B]
strand_A_trans = set(t_id for t_id in strand_A_trans if t_id not in to_ignore)
strand_B_trans = set(t_id for t_id in strand_B_trans if t_id not in to_ignore)
if not strand_A_trans or not strand_B_trans:
continue
# print(time.asctime(), f'Processing scaffolds: {chrom_A} / {chrom_B}')
# Get information from the 1st chromosome
strand_A_monoexonics = identify_monoexon_gene_transcripts(gtf_obj, strand_A_trans)
strand_A_gene_coordinates = set()
for gene_id in gtf_obj.chrom_gene_dt[chrom_A]:
gene_c = gtf_obj.gene_coords_dt[gene_id]
strand_A_gene_coordinates.add(gene_c)
gene_transcripts = gtf_obj.gene_trans_dt[gene_id]
gene_coord_trans_dt[tuple(gene_c)].update(gene_transcripts)
strand_A_gene_coordinates = sorted(strand_A_gene_coordinates)
# Get information from the 2nd chromosome
strand_B_monoexonics = identify_monoexon_gene_transcripts(gtf_obj, strand_B_trans)
strand_B_gene_coordinates = set()
for gene_id in gtf_obj.chrom_gene_dt[chrom_B]:
gene_c = gtf_obj.gene_coords_dt[gene_id]
strand_B_gene_coordinates.add(gene_c)
gene_transcripts = gtf_obj.gene_trans_dt[gene_id]
gene_coord_trans_dt[tuple(gene_c)].update(gene_transcripts)
strand_B_gene_coordinates = sorted(strand_B_gene_coordinates)
# Identify anti-sense transcripts in the 1st scaffold
for mono_trans in strand_A_monoexonics:
mono_c = gtf_obj.trans_exons_dt[mono_trans][0]
for gene_c in strand_B_gene_coordinates:
# Important! This assumes gene coordinates are already sorted (gene_coord[0] < gene_coord[1])
if strict_nested:
if gene_c[0] <= mono_c[0] <= gene_c[1] and gene_c[0] <= mono_c[1] <= gene_c[1]:
monoexonic_antisense_set.add(mono_trans)
# Check the transcripts the model is antisense to
antisense_to = gene_coord_trans_dt[gene_c]
antisense_relation_dt[mono_trans].update(antisense_to)
# If the other model is also a monoexonic gene, then flag both as potential "antisense"
if len(antisense_to) == 1:
antisense_to = sorted(antisense_to)[0]
if len(gtf_obj.trans_exons_dt[antisense_to]) == 1:
monoexonic_antisense_set.add(antisense_to)
antisense_relation_dt[antisense_to].add(mono_trans)
if mono_c[0] > gene_c[0]:
# Using continue instead of break to capture all transcripts that are antisense with it
continue
else:
# Check for partial overlaps instead
if gene_c[0] <= mono_c[0] <= gene_c[1] or gene_c[0] <= mono_c[1] <= gene_c[1]:
monoexonic_antisense_set.add(mono_trans)
# Check the transcripts the model is antisense to
antisense_to = gene_coord_trans_dt[gene_c]
antisense_relation_dt[mono_trans].update(antisense_to)
# If the other model is also a monoexonic gene, then flag both as potential "antisense"
if len(antisense_to) == 1:
antisense_to = sorted(antisense_to)[0]
if len(gtf_obj.trans_exons_dt[antisense_to]) == 1:
monoexonic_antisense_set.add(antisense_to)
antisense_relation_dt[antisense_to].add(mono_trans)
if mono_c[0] > gene_c[0]:
continue
# Identify anti-sense transcripts in the 2nd scaffold
for mono_trans in strand_B_monoexonics:
mono_c = gtf_obj.trans_exons_dt[mono_trans][0]
for gene_c in strand_A_gene_coordinates:
# Important! This assumes gene coordinates are already sorted (gene_coord[0] < gene_coord[1])
if strict_nested:
if gene_c[0] <= mono_c[0] <= gene_c[1] and gene_c[0] <= mono_c[1] <= gene_c[1]:
monoexonic_antisense_set.add(mono_trans)
# Check the transcripts the model is antisense to
antisense_to = gene_coord_trans_dt[gene_c]
antisense_relation_dt[mono_trans].update(antisense_to)
# If the other model is also a monoexonic gene, then flag both as potential "antisense"
if len(antisense_to) == 1:
antisense_to = sorted(antisense_to)[0]
if len(gtf_obj.trans_exons_dt[antisense_to]) == 1:
monoexonic_antisense_set.add(antisense_to)
antisense_relation_dt[antisense_to].add(mono_trans)
if mono_c[0] > gene_c[0]:
continue
# break
else:
# Check for partial overlaps instead
if gene_c[0] <= mono_c[0] <= gene_c[1] or gene_c[0] <= mono_c[1] <= gene_c[1]:
monoexonic_antisense_set.add(mono_trans)
# Check the transcripts the model is antisense to
antisense_to = gene_coord_trans_dt[gene_c]
antisense_relation_dt[mono_trans].update(antisense_to)
# If the other model is also a monoexonic gene, then flag both as potential "antisense"
if len(antisense_to) == 1:
antisense_to = sorted(antisense_to)[0]
if len(gtf_obj.trans_exons_dt[antisense_to]) == 1:
monoexonic_antisense_set.add(antisense_to)
antisense_relation_dt[antisense_to].add(mono_trans)
if mono_c[0] > gene_c[0]:
continue
# break
return monoexonic_antisense_set
def identify_antisense_fragments(gtf_obj, to_analyze=None, len_th=0.5):
print(time.asctime(), f"Identifying antisense fragments")
assert 0.0 <= len_th <= 1.0
antisense_fragments, monoexon_fragments, intronic = [set() for _ in range(3)]
if not to_analyze:
to_analyze = set(gtf_obj.trans_exons_dt.keys())
loci_group_dt = group_transcripts_across_strands(gtf_obj, to_analyze=to_analyze)
# This dictionary is useful to write a table to explore the antisense fragments lengths
len_ratio_dt = {}
for gene_loci, overlap_group in loci_group_dt.items():
longest_trans = get_longest_transcript(gtf_obj, overlap_group)
longest_trans_strand = gtf_obj.trans_sense_dt[longest_trans]
longest_len = get_transcript_length(gtf_obj, longest_trans)
for t_id in sorted(overlap_group):
# Analyze only potential antisense fragments in the overlap group, they are by definition monoexonic
if len(gtf_obj.trans_exons_dt[t_id]) == 1:
t_strand = gtf_obj.trans_sense_dt[t_id]
t_len = get_transcript_length(gtf_obj, t_id)
t_len_ratio = t_len / longest_len
data = (t_len_ratio, t_len, longest_len)
len_ratio_dt[t_id] = data
# The comparison must be <= to remove those cases were the length is the same
if t_len_ratio <= len_th and t_id != longest_trans:
if t_strand != longest_trans_strand:
antisense_fragments.add(t_id)
else:
monoexon_fragments.add(t_id)
# Track intronic transcripts in case the user wants to keep them
group_intronic = identify_intronic_transcripts(gtf_obj, overlap_group)
intronic = intronic | group_intronic
return antisense_fragments, monoexon_fragments, intronic, len_ratio_dt
def identify_unknown_chromosome_models(gtf_obj, genome_fa):
print(time.asctime(), f'Identifying transcripts with unknown scaffold IDs', flush=True)
print(time.asctime(), f'Extracting scaffold IDs from genome FASTA')
genome_chrom = set()
with open(genome_fa) as fh:
for row in fh:
if row.startswith(">"):
# This allows to, most likely, get only the Chrom ID without any other information
chrom_id = row.strip("\n").strip(">").replace("\t", "#").replace(" ", "#").split("#")[0]
genome_chrom.add(chrom_id)
unknown_chrom_transcripts = set()
for chrom_id, chrom_trans in gtf_obj.chrom_trans_dt.items():
# Important: GTF_OBJ Chrom IDs contain the strand information in the last position(Ex: chr01+)
if chrom_id[:-1] not in genome_chrom:
unknown_chrom_transcripts.update(chrom_trans)
return unknown_chrom_transcripts
def remove_ambiguous_location_models(gtf_file, paths_dt, outname, to_add=None, to_keep=None, antisense_len=0, genome_fa=None):
# print(time.asctime(), f'Analyzing transcripts models across strands for file: {gtf_file}', flush=True)
if to_add is None:
to_add = set()
if to_keep is None:
to_keep = set()
gtf_obj = create_gtf_object(gtf_file)
if genome_fa:
unknown_chrom_transcripts = identify_unknown_chromosome_models(gtf_obj, genome_fa)
else:
unknown_chrom_transcripts = set()
if 'unstranded' not in to_add:
unstranded_transcripts = identify_unstranded_transcripts(gtf_obj)
else:
print(time.asctime(), 'Skipping identification of unstranded transcripts')
unstranded_transcripts = set()
monoexonic_antisense = identify_monoexonic_antisense(gtf_obj, to_ignore=unstranded_transcripts)
antisense_fragments, monoexon_fragments, intronic, _ = identify_antisense_fragments(gtf_obj, monoexonic_antisense, antisense_len)
if 'intronic' in to_add:
monoexon_fragments = monoexon_fragments - intronic
# Identify accepted transcripts
tot_trans = set(gtf_obj.trans_exons_dt.keys())
to_remove = unstranded_transcripts | antisense_fragments | monoexon_fragments | unknown_chrom_transcripts
to_accept = tot_trans - to_remove
outfile = write_gtf(gtf_obj, to_accept, paths_dt['inter'], f"{outname}_valid_loc.gtf")
if 'additional' in to_keep:
_ = write_gtf(gtf_obj, monoexonic_antisense, paths_dt['inter'], f"{outname}_antisense_monoexonic.gtf")
if 'removed' in to_keep:
if monoexonic_antisense:
_ = write_gtf(gtf_obj, antisense_fragments, paths_dt['removed'], f"{outname}_fragments_antisense.gtf")
_ = write_gtf(gtf_obj, monoexon_fragments, paths_dt['removed'], f"{outname}_fragments_monoexon.gtf")
if unstranded_transcripts:
_ = write_gtf(gtf_obj, unstranded_transcripts, paths_dt['removed'], f"{outname}_unstranded.gtf")
if unknown_chrom_transcripts:
_ = write_gtf(gtf_obj, unknown_chrom_transcripts, paths_dt['removed'], f"{outname}_unknown_chromosome.gtf")
# Track numbers of accepted / removed
global_track_dt[f"Removed#{outname}_fragments_antisense.gtf"] = len(antisense_fragments)
global_track_dt[f"Removed#{outname}_fragments_monoexon.gtf"] = len(monoexon_fragments)
global_track_dt[f"Removed#{outname}_unstranded.gtf"] = len(unstranded_transcripts)
global_track_dt[f"Removed#{outname}_unknown_chromosome.gtf"] = len(unknown_chrom_transcripts)
global_track_dt[f"Accepted#{outname}_valid_loc.gtf"] = len(to_accept)
return outfile
| StarcoderdataPython |
138358 | <reponame>kmarcini/Project-Euler-Python
###########################
#
# #718 Unreachable Numbers - Project Euler
# https://projecteuler.net/problem=718
#
# Code by <NAME>
#
###########################
| StarcoderdataPython |
97205 | <filename>apps/shop/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-11 11:07
from __future__ import unicode_literals
import apps.shop.utils
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='name', slugify=apps.shop.utils.slugify_, unique=True)),
('image', models.ImageField(blank=True, upload_to=apps.shop.utils.category_img_path, verbose_name='Зображення')),
('parent_category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Category')),
],
options={
'verbose_name': 'Категорія',
'ordering': ['name'],
'verbose_name_plural': 'Категорії',
},
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200, verbose_name='Виробник')),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='name', slugify=apps.shop.utils.slugify_, unique=True)),
('image', models.ImageField(blank=True, upload_to=apps.shop.utils.manufacturer_img_path, verbose_name='Зображення')),
],
options={
'verbose_name': 'Виробник',
'ordering': ['name'],
'verbose_name_plural': 'Виробники',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200, verbose_name='Назва')),
('model_name', models.CharField(blank=True, max_length=200, verbose_name='Модель')),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from=apps.shop.utils.base_for_product_slug, slugify=apps.shop.utils.slugify_, unique=True)),
('main_image', models.ImageField(blank=True, upload_to=apps.shop.utils.product_main_img_path, verbose_name='Зображення')),
('description', models.TextField(blank=True, verbose_name='Опис')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Ціна')),
('stock', models.PositiveIntegerField(verbose_name='На складі')),
('available', models.BooleanField(default=True, verbose_name='Доступний')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_products', to='shop.Category', verbose_name='Категорія')),
('manufacturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='manufacturer_products', to='shop.Manufacturer', verbose_name='Виробник')),
],
options={
'verbose_name': 'Товар',
'ordering': ['name'],
'verbose_name_plural': 'Товари',
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, upload_to=apps.shop.utils.product_img_path, verbose_name='Зображення')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='shop.Product')),
],
),
]
| StarcoderdataPython |
3280010 | <gh_stars>10-100
###################################################
# header_troops.py
# This file contains declarations for troops
# DO NOT EDIT THIS FILE!
###################################################
from header_common import *
# Troop flags
tf_male = 0
tf_female = 1
tf_undead = 2
troop_type_mask = 0x0000000f
tf_hero = 0x00000010
tf_inactive = 0x00000020
tf_unkillable = 0x00000040
tf_allways_fall_dead = 0x00000080
tf_no_capture_alive = 0x00000100
tf_mounted = 0x00000400 # Troop's movement speed on map is determined by riding skill.
tf_is_merchant = 0x00001000 # When set, troop does not equip stuff he owns.
tf_randomize_face = 0x00008000 # Randomize face at the beginning of the game.
tf_guarantee_boots = 0x00100000
tf_guarantee_armor = 0x00200000
tf_guarantee_helmet = 0x00400000
tf_guarantee_gloves = 0x00800000
tf_guarantee_horse = 0x01000000
tf_guarantee_shield = 0x02000000
tf_guarantee_ranged = 0x04000000
tf_guarantee_polearm = 0x08000000
tf_unmoveable_in_party_window = 0x10000000
# Character attributes
ca_strength = 0
ca_agility = 1
ca_intelligence = 2
ca_charisma = 3
wpt_one_handed_weapon = 0
wpt_two_handed_weapon = 1
wpt_polearm = 2
wpt_archery = 3
wpt_crossbow = 4
wpt_throwing = 5
wpt_firearm = 6
# personality modifiers, courage 8 means neutral:
courage_4 = 0x0004
courage_5 = 0x0005
courage_6 = 0x0006
courage_7 = 0x0007
courage_8 = 0x0008
courage_9 = 0x0009
courage_10 = 0x000A
courage_11 = 0x000B
courage_12 = 0x000C
courage_13 = 0x000D
courage_14 = 0x000E
courage_15 = 0x000F
aggresiveness_1 = 0x0010
aggresiveness_2 = 0x0020
aggresiveness_3 = 0x0030
aggresiveness_4 = 0x0040
aggresiveness_5 = 0x0050
aggresiveness_6 = 0x0060
aggresiveness_7 = 0x0070
aggresiveness_8 = 0x0080
aggresiveness_9 = 0x0090
aggresiveness_10 = 0x00A0
aggresiveness_11 = 0x00B0
aggresiveness_12 = 0x00C0
aggresiveness_13 = 0x00D0
aggresiveness_14 = 0x00E0
aggresiveness_15 = 0x00F0
is_bandit = 0x0100
tsf_site_id_mask = 0x0000ffff
tsf_entry_mask = 0x00ff0000
tsf_entry_bits = 16
def entry(n):
return (((n) << tsf_entry_bits) & tsf_entry_mask)
str_3 = bignum | 0x00000003
str_4 = bignum | 0x00000004
str_5 = bignum | 0x00000005
str_6 = bignum | 0x00000006
str_7 = bignum | 0x00000007
str_8 = bignum | 0x00000008
str_9 = bignum | 0x00000009
str_10 = bignum | 0x0000000a
str_11 = bignum | 0x0000000b
str_12 = bignum | 0x0000000c
str_13 = bignum | 0x0000000d
str_14 = bignum | 0x0000000e
str_15 = bignum | 0x0000000f
str_16 = bignum | 0x00000010
str_17 = bignum | 0x00000011
str_18 = bignum | 0x00000012
str_19 = bignum | 0x00000013
str_20 = bignum | 0x00000014
str_21 = bignum | 0x00000015
str_22 = bignum | 0x00000016
str_23 = bignum | 0x00000017
str_24 = bignum | 0x00000018
str_25 = bignum | 0x00000019
str_26 = bignum | 0x0000001a
str_27 = bignum | 0x0000001b
str_28 = bignum | 0x0000001c
str_29 = bignum | 0x0000001d
str_30 = bignum | 0x0000001e
agi_3 = bignum | 0x00000300
agi_4 = bignum | 0x00000400
agi_5 = bignum | 0x00000500
agi_6 = bignum | 0x00000600
agi_7 = bignum | 0x00000700
agi_8 = bignum | 0x00000800
agi_9 = bignum | 0x00000900
agi_10 = bignum | 0x00000a00
agi_11 = bignum | 0x00000b00
agi_12 = bignum | 0x00000c00
agi_13 = bignum | 0x00000d00
agi_14 = bignum | 0x00000e00
agi_15 = bignum | 0x00000f00
agi_16 = bignum | 0x00001000
agi_17 = bignum | 0x00001100
agi_18 = bignum | 0x00001200
agi_19 = bignum | 0x00001300
agi_20 = bignum | 0x00001400
agi_21 = bignum | 0x00001500
agi_22 = bignum | 0x00001600
agi_23 = bignum | 0x00001700
agi_24 = bignum | 0x00001800
agi_25 = bignum | 0x00001900
agi_26 = bignum | 0x00001a00
agi_27 = bignum | 0x00001b00
agi_28 = bignum | 0x00001c00
agi_29 = bignum | 0x00001d00
agi_30 = bignum | 0x00001e00
int_3 = bignum | 0x00030000
int_4 = bignum | 0x00040000
int_5 = bignum | 0x00050000
int_6 = bignum | 0x00060000
int_7 = bignum | 0x00070000
int_8 = bignum | 0x00080000
int_9 = bignum | 0x00090000
int_10 = bignum | 0x000a0000
int_11 = bignum | 0x000b0000
int_12 = bignum | 0x000c0000
int_13 = bignum | 0x000d0000
int_14 = bignum | 0x000e0000
int_15 = bignum | 0x000f0000
int_16 = bignum | 0x00100000
int_17 = bignum | 0x00110000
int_18 = bignum | 0x00120000
int_19 = bignum | 0x00130000
int_20 = bignum | 0x00140000
int_21 = bignum | 0x00150000
int_22 = bignum | 0x00160000
int_23 = bignum | 0x00170000
int_24 = bignum | 0x00180000
int_25 = bignum | 0x00190000
int_26 = bignum | 0x001a0000
int_27 = bignum | 0x001b0000
int_28 = bignum | 0x001c0000
int_29 = bignum | 0x001d0000
int_30 = bignum | 0x001e0000
cha_3 = bignum | 0x03000000
cha_4 = bignum | 0x04000000
cha_5 = bignum | 0x05000000
cha_6 = bignum | 0x06000000
cha_7 = bignum | 0x07000000
cha_8 = bignum | 0x08000000
cha_9 = bignum | 0x09000000
cha_10 = bignum | 0x0a000000
cha_11 = bignum | 0x0b000000
cha_12 = bignum | 0x0c000000
cha_13 = bignum | 0x0d000000
cha_14 = bignum | 0x0e000000
cha_15 = bignum | 0x0f000000
cha_16 = bignum | 0x10000000
cha_17 = bignum | 0x11000000
cha_18 = bignum | 0x12000000
cha_19 = bignum | 0x13000000
cha_20 = bignum | 0x14000000
cha_21 = bignum | 0x15000000
cha_22 = bignum | 0x16000000
cha_23 = bignum | 0x17000000
cha_24 = bignum | 0x18000000
cha_25 = bignum | 0x19000000
cha_26 = bignum | 0x1a000000
cha_27 = bignum | 0x1b000000
cha_28 = bignum | 0x1c000000
cha_29 = bignum | 0x1d000000
cha_30 = bignum | 0x1e000000
level_mask = 0x000000FF
level_bits = 32
def level(v):
if (v > level_mask):
v = level_mask
return (bignum|v) << level_bits
def_attrib = str_5 | agi_5 | int_4 | cha_4
# Weapon proficiencies:
one_handed_bits = 0
two_handed_bits = 10
polearm_bits = 20
archery_bits = 30
crossbow_bits = 40
throwing_bits = 50
firearm_bits = 60
num_weapon_proficiencies = 7
def wp_one_handed(x):
return (((bignum | x) & 0x3FF) << one_handed_bits)
def wp_two_handed(x):
return (((bignum | x) & 0x3FF) << two_handed_bits)
def wp_polearm(x):
return (((bignum | x) & 0x3FF) << polearm_bits)
def wp_archery(x):
return (((bignum | x) & 0x3FF) << archery_bits)
def wp_crossbow(x):
return (((bignum | x) & 0x3FF) << crossbow_bits)
def wp_throwing(x):
return (((bignum | x) & 0x3FF) << throwing_bits)
def wp_firearm(x):
return (((bignum | x) & 0x3FF) << firearm_bits)
def find_troop(troops,troop_id):
result = -1
num_troops = len(troops)
i_troop = 0
while (i_troop < num_troops) and (result == -1):
troop = troops[i_troop]
if (troop[0] == troop_id):
result = i_troop
else:
i_troop += 1
return result
def upgrade(troops,troop1_id,troop2_id):
troop1_no = find_troop(troops,troop1_id)
troop2_no = find_troop(troops,troop2_id)
if (troop1_no == -1):
print "Error with upgrade def: Unable to find troop1-id: " + troop1_id
elif (troop2_no == -1):
print "Error with upgrade def: Unable to find troop2-id: " + troop2_id
else:
cur_troop = troops[troop1_no]
cur_troop_length = len(cur_troop)
if cur_troop_length == 11:
cur_troop[11:11] = [0, 0, 0, troop2_no, 0]
elif cur_troop_length == 12:
cur_troop[12:12] = [0, 0, troop2_no, 0]
elif cur_troop_length == 13:
cur_troop[13:13] = [0, troop2_no, 0]
else:
cur_troop[14:14] = [troop2_no, 0]
def upgrade2(troops,troop1_id,troop2_id,troop3_id):
troop1_no = find_troop(troops,troop1_id)
troop2_no = find_troop(troops,troop2_id)
troop3_no = find_troop(troops,troop3_id)
if (troop1_no == -1):
print "Error with upgrade2 def: Unable to find troop1-id: " + troop1_id
elif (troop2_no == -1):
print "Error with upgrade2 def: Unable to find troop2-id: " + troop2_id
elif (troop3_no == -1):
print "Error with upgrade2 def: Unable to find troop3-id: " + troop3_id
else:
cur_troop = troops[troop1_no]
cur_troop_length = len(cur_troop)
if cur_troop_length == 11:
cur_troop[11:11] = [0, 0, 0, troop2_no, troop3_no]
elif cur_troop_length == 12:
cur_troop[12:12] = [0, 0, troop2_no, troop3_no]
elif cur_troop_length == 13:
cur_troop[13:13] = [0, troop2_no, troop3_no]
else:
cur_troop[14:14] = [troop2_no, troop3_no]
| StarcoderdataPython |
3294289 | # coding=utf-8
# author: al0ne
# https://github.com/al0ne
import re
import base64
import urllib3
import glob
import itertools
import concurrent.futures
import logging
import ssl
import chardet
import socket
import OpenSSL
import requests
import random
from urllib import parse
from bs4 import BeautifulSoup
from lib.verify import verify_ext
from lib.sqldb import Sqldb
from lib.settings import *
from lib.cli_output import *
from lib.Requests import Requests
from plugins.ActiveReconnaissance.robots import robots
class Cartesian(object):
def __init__(self):
self._data_list = []
# 添加生成笛卡尔积的数据列表
def add_data(self, data=[]):
self._data_list.append(data)
# 计算笛卡尔积
def build(self):
urls = []
for item in itertools.product(*self._data_list):
urls.append(item[0] + item[1])
return urls
class DirScan():
def __init__(self, dbname, apps, host):
self.notstr = ''
self.apps = apps
self.notlen = ''
self.goto = ''
self.host = host
self.title = ''
self.dbname = dbname
self.outjson = []
self.req = Requests()
def get_urls(self, domain):
wordlist = []
robot = robots(domain)
domain = domain.replace('http://', '').replace('https://', '').rstrip('/')
domain2 = re.sub(r'\.', '_', domain)
domain3 = domain.strip('www.')
ext = verify_ext(self.apps)
ext = list(map(lambda x: '.' + x, ext))
path = []
for txt in glob.glob(r'data/path/*.txt'):
with open(txt, 'r', encoding='utf-8') as f:
for i in f.readlines():
path.append(i.strip())
leaks = Cartesian()
leaks.add_data([
'/www',
'/1',
'/2016',
'/2017',
'/2018',
'/2019',
'/wwwroot',
'/backup',
'/index',
'/web',
'/test',
'/tmp',
'/default',
'/temp',
'/website',
'/upload',
'/bin',
'/bbs',
'/www1',
'/www2',
'/log',
'/extra',
'/file',
'/qq',
'/up',
'/config',
'/' + domain,
'/userlist',
'/dev',
'/a',
'/123',
'/sysadmin',
'/localhost',
'/111',
'/access',
'/old',
'/i',
'/vip',
'/index.php',
'/global',
'/key',
'/webroot',
'/out',
'/server',
])
leaks.add_data([
'.tar.gz', '.zip', '.rar', '.sql', '.7z', '.bak', '.tar', '.txt', '.tgz', '.swp', '~', '.old', '.tar.bz2',
'.data', '.csv'
])
path.extend(leaks.build())
index = Cartesian()
index.add_data([
'/1', '/l', '/info', '/index', '/admin', '/login', '/qq', '/q', '/search', '/install', '/default', '/cmd',
'/upload', '/test', '/manage', '/loading', '/left', '/zzzz', '/welcome', '/ma', '/66'
])
index.add_data(ext)
path.extend(index.build())
path.extend(wordlist)
if robot:
path.extend(robot)
return list(set(path))
def _verify(self, url, code, contype, length, goto, text, title):
# 验证404页面
try:
result = True
if code in BLOCK_CODE:
result = False
if contype in BLOCK_CONTYPE:
result = False
if length == self.notlen:
result = False
# 调转等于404页面的调转时
if goto == self.goto:
result = False
# url在跳转路径中
if (url in goto) or (goto in url):
result = False
if url.strip('/') == self.goto or url.strip('/') == goto:
result = False
for i in PAGE_404:
if i in text:
result = False
break
if title == self.title and title != 'None':
result = False
# 有些302跳转会在location里出现error或者404等关键字
if re.search(r'forbidden|error|404', goto):
result = False
# 文件内容类型对不上的情况
if re.search(r'\.bak$|\.zip$|\.rar$|\.7z$|\.old$|\.htaccess$|\.csv$|\.txt$|\.sql$|\.tar$|\.tar.gz$',
url) and contype == 'html':
result = False
return result
except:
return False
def parse_html(self, text):
result = []
soup = BeautifulSoup(text, 'html.parser')
for i in soup.find_all(['a', 'img', 'script']):
if i.get('src'):
result.append(i.get('src'))
if i.get('href'):
result.append(i.get('href'))
return result
def check404(self, url):
# 访问一个随机的页面记录404页面的长度与内容
key = str(random.random() * 100)
random_url = base64.b64encode(key.encode('utf-8'))
url = url + '/' + random_url.decode('utf-8') + '.html'
try:
self.notstr = '404page'
r = self.req.get(url)
if r.status_code == '200':
coding = chardet.detect(r.content[:10000]).get('encoding')
if coding:
text = r.content[:20000].decode(coding)
self.notstr = self.parse_html(text)
self.notlen = r.headers.get('Content-Length')
if not self.notlen:
self.notlen = len(r.content)
if r.is_redirect:
self.goto = r.headers['Location']
except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout, requests.exceptions.Timeout,
requests.exceptions.SSLError, requests.exceptions.ConnectionError, ssl.SSLError, AttributeError,
ConnectionRefusedError, socket.timeout, urllib3.exceptions.ReadTimeoutError,
urllib3.exceptions.ProtocolError, OpenSSL.SSL.WantReadError):
pass
except UnboundLocalError:
pass
except Exception as e:
logging.exception(e)
def scan(self, host):
try:
r = self.req.scan(host)
if r.is_redirect:
goto = r.headers.get('Location')
else:
goto = 'test'
if r.headers.get('Content-Type'):
contype = re.sub(r'\w+/', '', str(r.headers.get('Content-Type')))
contype = re.sub(r';.*', '', contype)
else:
contype = 'None'
rsp_len = r.headers.get('Content-Length')
# 判断是不是网页或者文本,如果是其他文件coding将置为空
ishtml = False
if contype == 'html':
ishtml = True
content = r.raw.read()
else:
content = r.raw.read(25000)
if ishtml:
coding = chardet.detect(content).get('encoding')
if coding:
text = content.decode(coding)
title = re.search('(?<=<title>).*(?=</title>)', text)
else:
text = 'Other'
title = None
else:
text = 'Other'
title = None
if not rsp_len:
rsp_len = len(content)
urlresult = parse.urlparse(host)
if self._verify(urlresult.path, r.status_code, contype, rsp_len, goto, text, title):
result = 0
if ishtml:
pagemd5 = self.parse_html(text)
if pagemd5 == self.notstr:
result = 1
if result < 0.5:
if title is None:
title = 'None'
else:
title = title.group()
title = re.sub(r'\n|\t', '', title)
console('URLS', urlresult.netloc, urlresult.path + '\n')
data = {
urlresult.netloc: {
"rsp_code": r.status_code,
"rsp_len": rsp_len,
"title": title,
"contype": contype,
"url": urlresult.path
}
}
self.outjson.append(data)
r.close()
except (requests.exceptions.ConnectTimeout, requests.exceptions.ReadTimeout, requests.exceptions.Timeout,
requests.exceptions.SSLError, requests.exceptions.ConnectionError, ssl.SSLError, AttributeError,
ConnectionRefusedError, socket.timeout, urllib3.exceptions.ReadTimeoutError,
urllib3.exceptions.ProtocolError, OpenSSL.SSL.WantReadError):
pass
except (UnboundLocalError, AttributeError):
pass
except Exception as e:
logging.exception(host)
logging.exception(e)
try:
r.close()
except:
pass
return 'OK'
def save(self, urls):
Sqldb(self.dbname).get_urls(urls)
def run(self, task):
try:
with concurrent.futures.ThreadPoolExecutor(max_workers=THREADS) as executor:
futures = [executor.submit(self.scan, i) for i in task]
for future in concurrent.futures.as_completed(futures, timeout=3):
future.result()
except (EOFError, concurrent.futures._base.TimeoutError):
pass
# 创建启动任务
def pool(self):
host = self.host.strip('/')
self.check404(host)
task = []
urls = self.get_urls(host)
random.shuffle(urls)
for url in urls:
task.append(host + url)
self.run(task)
# 保存结果
self.save(self.outjson)
if __name__ == "__main__":
start_time = time.time()
DirScan('result', ['php'], 'http://127.0.0.1').pool()
end_time = time.time()
print('\nrunning {0:.3f} seconds...'.format(end_time - start_time))
| StarcoderdataPython |
3314010 | import sys
from draw import *
import random
import json
# GAME RULES
# 1)Any live cell with fewer than two live neighbours dies, as if by underpopulation.
# Default is UNDER_THRESH = 2
# 2)Any live cells that does not die due to underpopulation or overpopulation remains alive.
# 3)Any live cell with more than three live neighbours dies, as if by overpopulation.
# Default is OVER_THRESH = 3
# 4)Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
# Default is REPR_THRESH = 3
# GAME VARIABLES
UNDER_THRESH = 2
OVER_THRESH = 3
REPR_THRESH = 3
# CAN BE CHANGED BASED ON PC \/ \/
GRID_SIZE = [100, 100]
FPS = 30
# CAN BE CHANGED BASED ON PC /\ /\
SIZE = 12 # square size
CONST_GRID_OFFSET = [0, 0] # move entire grid by
CONST_EDGE_IGNORE = 1 # make outer squares on grid unusable, to enclose chain reaction
CONTINUE = False # False means that game is paused
PRESET_DROPDOWN = False
SETTING_DROPDOWN = False
SCROLL = 0 # used for preset dropdown scrolling. +ive value will move the text upwards
# RGB color codes
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREY = (30, 30, 40)
LIGHT_GREY = (206, 206, 206)
BLUE = (160, 240, 255)
GREEN = (127, 255, 212)
clock = pygame.time.Clock() # initialize clock
pygame.init() # initialize pygame
font_style = pygame.font.SysFont('cambria', 15)
font_style_med = pygame.font.SysFont('cambria', 18)
font_style_big = pygame.font.SysFont('cambria', 25)
pygame.display.set_icon(programIcon)
pygame.display.set_caption('Game of Life')
# load map from GameMap.txt
# splits the grid of 1's and 0's into into 2D list
def load_text_file(path):
f = open(path + '.txt', 'r')
data = f.read()
f.close()
data = data.split('\n')
game_map = []
for row in data:
game_map.append(row.split(' '))
return game_map
def import_pattern(json_path):
with open(json_path, 'r') as fh:
s = fh.read()
json_patterns = json.loads(s)
return json_patterns
# makes a grid of Squares. If 1 occurs, make the Square alive, else it is not alive
# marks outer Squares, so that a Square won't 'scan' neighbors outside of grid
def make_grid(text_map):
len_y = len(text_map)
len_x = len(text_map[0])
edge = False
alive = False
map = []
global SIZE
SIZE = int(WINDOW_SIZE[0] / len_x)
off_x = int((WINDOW_SIZE[0] - len_x * SIZE) / 2)
off_y = int((WINDOW_SIZE[1] - len_y * SIZE) / 2)
global CONST_GRID_OFFSET # how much to displace the grid
CONST_GRID_OFFSET = [off_x, off_y]
for y in range(len_y):
temp = []
x = 0
for _ in range(len_x):
if y < CONST_EDGE_IGNORE or y > len_y - CONST_EDGE_IGNORE - 1:
edge = True
if x < CONST_EDGE_IGNORE or x > len_x - CONST_EDGE_IGNORE - 1:
edge = True
if text_map[y][x] == '1':
alive = True
temp.append(
Square(x * SIZE + CONST_GRID_OFFSET[0], y * SIZE + CONST_GRID_OFFSET[1], edge, alive))
x += 1
edge = False
alive = False
map.append(temp)
y += 1
return map
# does not use text file
# the grid rows and cols can be changed easier with parameters, len_x and len_y
# all square will start off not alive
def make_custom_grid(grid_size):
edge = False
map = []
global SIZE
SIZE = int(WINDOW_SIZE[0] / (grid_size[0] / 1.3))
off_x = int((WINDOW_SIZE[0] - grid_size[0] * SIZE) / 2)
off_y = int((WINDOW_SIZE[1] - grid_size[1] * SIZE) / 2)
global CONST_GRID_OFFSET # how much to displace the grid
CONST_GRID_OFFSET = [off_x, off_y]
for y in range(grid_size[1]):
temp = []
x = 0
for _ in range(grid_size[0]):
if y < CONST_EDGE_IGNORE or y > grid_size[1] - CONST_EDGE_IGNORE - 1:
edge = True
if x < CONST_EDGE_IGNORE or x > grid_size[0] - CONST_EDGE_IGNORE - 1:
edge = True
# marks outer Squares, so that a Square won't 'scan' neighbors outside of grid
temp.append(
Square(x * SIZE + CONST_GRID_OFFSET[0], y * SIZE + CONST_GRID_OFFSET[1], edge, False))
x += 1
edge = False
map.append(temp)
y += 1
return map
# draws grid of squares
# if cursor is on a square, light up the square, but it is not 'alive'
def draw_grid(grid_squares):
for row in grid_squares:
for square in row:
if square.alive:
# makes the square that has been alive for longer, more blue. the max square.time is 200
# so darkest color a square can be is (55, 55, 255)
pygame.draw.rect(display, (255 - square.time, 255 - square.time, 255), square.rect)
else:
pygame.draw.rect(display, BLACK, square.rect)
if square.touch_cursor:
pygame.draw.rect(display, WHITE, square.rect)
pygame.draw.rect(display, GREY, square.rect, 1) # black border around each square
# marks the square which the cursor is on
# make square 'alive' if click is True
def cursor_on_square(grid_squares, cursor_loc, click, buttons_rect):
for row in grid_squares:
for square in row:
if pygame.Rect.collidepoint(square.rect, cursor_loc) \
and not pygame.Rect.collidepoint(buttons_rect, cursor_loc):
square.touch_cursor = True
if click:
square.alive = not square.alive
else:
square.touch_cursor = False
# changes the size of squares for zoom effect
# center square is the focal point of zoom in/out
# if pause/play button clicked, the game stops executing the die_alive_method.
def zoom_button_click(grid_squares, in_button_rect, out_button_rect, cursor_loc, click):
# zoom in = 1
# zoom out = -1
# nothing, zoom = 0
if pygame.Rect.collidepoint(in_button_rect, cursor_loc) and click:
zoom = 1
elif pygame.Rect.collidepoint(out_button_rect, cursor_loc) and click:
zoom = -1
else:
zoom = 0
len_y = len(grid_squares)
len_x = len(grid_squares[0])
center = [int(len_y / 2), int(len_x / 2)]
min_size = int(WINDOW_SIZE[0] / len_x)
max_size = 100
# when zooming out
if zoom == 1 and max_size > grid_squares[0][0].rect.width:
for y in range(len_y):
for x in range(len_x):
# squares that are further out from center square move more than squares closer to center
grid_squares[y][x].rect.y += -(center[0] - y + 1) * zoom
grid_squares[y][x].rect.x += -(center[1] - x + 1) * zoom
grid_squares[y][x].rect.width += 1 * zoom
grid_squares[y][x].rect.height += 1 * zoom
in_screen = False
bigger_rect = 24
screen_rect = pygame.Rect(-bigger_rect, -bigger_rect, WINDOW_SIZE[0] + bigger_rect, WINDOW_SIZE[1] + bigger_rect)
# uses screen_rect, size of the window, used too see if being zoomed out of screen
# if any side of grid moves past this rect, then stop zooming out.
if grid_squares[0][len_x - 1].rect.right > screen_rect.right and grid_squares[0][0].rect.left < screen_rect.left \
and grid_squares[len_y - 1][0].rect.bottom > screen_rect.bottom and grid_squares[0][0].rect.top < \
screen_rect.top:
in_screen = True
# when zooming out
if zoom == -1 and grid_squares[0][0].rect.width > min_size and in_screen:
for y in range(len_y):
for x in range(len_x):
# squares that are further out from center square move more than squares closer to center
grid_squares[y][x].rect.y += -(center[0] - y + 1) * zoom
grid_squares[y][x].rect.x += -(center[1] - x + 1) * zoom
grid_squares[y][x].rect.width += 1 * zoom
grid_squares[y][x].rect.height += 1 * zoom
return grid_squares
def move_button_click(grid_squares, right_button_rect, left_button_rect, up_button_rect, down_button_rect, cursor_loc,
click):
screen_rect = pygame.Rect(0, 0, WINDOW_SIZE[0], WINDOW_SIZE[1])
speed = 3
len_x = len(grid_squares[0])
len_y = len(grid_squares)
if click:
if pygame.Rect.collidepoint(right_button_rect, cursor_loc):
direction = [-speed, 0]
elif pygame.Rect.collidepoint(left_button_rect, cursor_loc):
direction = [speed, 0]
elif pygame.Rect.collidepoint(up_button_rect, cursor_loc):
direction = [0, speed]
elif pygame.Rect.collidepoint(down_button_rect, cursor_loc):
direction = [0, -speed]
else:
direction = [0, 0]
# movement in x dir
if direction[0] != 0:
# moves squares left, making the screen look like its moving right
if direction[0] < 0:
if grid_squares[0][len_x - 1].rect.right > screen_rect.right:
for row in grid_squares:
for square in row:
square.rect.x += direction[0]
# moves squares right, making the screen look like its moving left
else:
if grid_squares[0][0].rect.left < screen_rect.left:
for row in grid_squares:
for square in row:
square.rect.x += direction[0]
# movement in y dir
else:
# moves squares left, making the screen look like its moving right
if direction[1] < 0:
if grid_squares[len_y - 1][0].rect.bottom > screen_rect.bottom:
for row in grid_squares:
for square in row:
square.rect.y += direction[1]
# moves squares right, making the screen look like its moving left
else:
if grid_squares[0][0].rect.top < screen_rect.top:
for row in grid_squares:
for square in row:
square.rect.y += direction[1]
return grid_squares
# clears all squares
# makes all squares either alive of dead, using the random module
def other_buttons(grid_squares, cursor_loc, click, rect_pause, rect_shuffle, rect_clear, rect_preset, rect_settings):
len_y = len(grid_squares)
len_x = len(grid_squares[0])
global PRESET_DROPDOWN
global SETTING_DROPDOWN
global CONTINUE
# shuffle feature
if pygame.Rect.collidepoint(rect_shuffle, cursor_loc) and click:
for y in range(len_y):
for x in range(len_x):
edge = False
if y < CONST_EDGE_IGNORE or y > len_y - CONST_EDGE_IGNORE - 1:
edge = True
if x < CONST_EDGE_IGNORE or x > len_x - CONST_EDGE_IGNORE - 1:
edge = True
if not edge:
if random.randint(0, 9) < 5:
grid_squares[y][x].alive = True
else:
grid_squares[y][x].alive = False
# starts and pause feature
if pygame.Rect.collidepoint(rect_pause, cursor_loc) and click:
CONTINUE = not CONTINUE
# clear feature
if pygame.Rect.collidepoint(rect_clear, cursor_loc) and click:
for row in grid_squares:
for square in row:
square.alive = False
# preset feature
#
if pygame.Rect.collidepoint(rect_preset, cursor_loc) and click:
PRESET_DROPDOWN = not PRESET_DROPDOWN
SETTING_DROPDOWN = False
if pygame.Rect.collidepoint(rect_settings, cursor_loc) and click:
SETTING_DROPDOWN = not SETTING_DROPDOWN
PRESET_DROPDOWN = False
return grid_squares
def draw_dropdown(preset_dropdown, setting_dropdown, cursor_loc, click, preset_json, grid_squares):
preset_loc = [570, 50]
setting_loc = [570, 410]
global UNDER_THRESH
global OVER_THRESH
global REPR_THRESH
if preset_dropdown:
pygame.draw.rect(display, BLACK, (preset_loc[0], preset_loc[1], 155, 540))
# 18 boxes in preset dropdown
for i in range(0, 18):
pygame.draw.rect(display, WHITE, (preset_loc[0], preset_loc[1] + 30 * i, 155, 30), 2)
text = preset_json[i + SCROLL]["title"]
label = font_style.render(text, True, WHITE)
display.blit(label, (preset_loc[0] + 5, preset_loc[1] + 5 + 30 * i))
# every loop is each box on screen
if pygame.Rect.collidepoint(pygame.Rect(preset_loc[0], preset_loc[1] + 30 * i, 155, 30), cursor_loc) \
and click:
pattern = preset_json[i + SCROLL]["life"] # the pattern chosen by user
len_x = len(grid_squares[0])
len_y = len(grid_squares)
if len(pattern) > len_y or len(pattern[0]) > len_x:
print("preset_json pattern is too big")
else:
# clears grid
for row in grid_squares:
for square in row:
square.alive = False
# location of where pattern should be placed
pattern_loc = [int((len_x - len(pattern[0])) / 2), int((len_y - len(pattern)) / 2)]
# draws pattern onto grid
for y in range(len(pattern)):
for x in range(len(pattern[0])):
if pattern[y][x] == 1:
grid_squares[pattern_loc[1] + y][pattern_loc[0] + x].alive = True
elif setting_dropdown:
pygame.draw.rect(display, BLACK, (setting_loc[0], setting_loc[1], 155, 150))
# 3 boxes in settings dropdown
for i in range(3):
# draws all the boxes, texts and triangles for the settings dropdown
draw_shapes_texts_setting(i, setting_loc, font_style_med, font_style_big, UNDER_THRESH, OVER_THRESH,
REPR_THRESH)
if pygame.Rect.collidepoint(pygame.Rect((setting_loc[0] + 110, setting_loc[1] + 60 * i, 45, 15)),
cursor_loc) and click:
# 8 is the limit because there are only 8 squares around a single square
if i == 0:
UNDER_THRESH += 1
if UNDER_THRESH > 8:
UNDER_THRESH = 8
if UNDER_THRESH > OVER_THRESH:
UNDER_THRESH -= 1
elif i == 1:
OVER_THRESH += 1
if OVER_THRESH > 8:
OVER_THRESH = 8
elif i == 2:
REPR_THRESH += 1
if REPR_THRESH > OVER_THRESH:
REPR_THRESH -= 1
elif pygame.Rect.collidepoint(pygame.Rect((setting_loc[0] + 110, setting_loc[1] + 45 + 60 * i, 45, 15)),
cursor_loc) and click:
if i == 0:
UNDER_THRESH -= 1
if UNDER_THRESH < 1:
UNDER_THRESH = 1
elif i == 1:
OVER_THRESH -= 1
if OVER_THRESH < 1:
OVER_THRESH = 1
if OVER_THRESH < UNDER_THRESH:
OVER_THRESH += 1
elif i == 2:
REPR_THRESH -= 1
if REPR_THRESH < UNDER_THRESH:
REPR_THRESH += 1
# REPR_THRESH has to be in between UNDER_THRESH and OVER_THRESH inclusively,
# otherwise squares will die before new ones get created.
if REPR_THRESH > OVER_THRESH:
REPR_THRESH = OVER_THRESH
if REPR_THRESH < UNDER_THRESH:
REPR_THRESH = UNDER_THRESH
return grid_squares
# each square on grid is made of Square class
class Square:
def __init__(self, x, y, edge_square, alive):
self.x = x # x the pixel cords on the window
self.y = y # y the pixel cords on the window
self.loc = [int((x - CONST_GRID_OFFSET[0]) / SIZE), int((y - CONST_GRID_OFFSET[1]) / SIZE)]
self.alive = alive # is the number of 'alive' squares around the self square
self.rect = pygame.Rect(x, y, SIZE, SIZE)
self.touch_cursor = False
self.neighbors = 0
self.grid_edge = edge_square # indicates whether the square is on the edge of grid
self.time = 0 # is the number of frames the square has been alive without dying. Used for color and max is 200
self.prev_alive = False # tells you if square was alive previous frame
# counts the number of 'alive' squares around self square
def tot_neighbors(self, grid_squares):
tot = 0
for y, x in ((-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)):
if not grid_squares[self.loc[1]][self.loc[0]].grid_edge:
if grid_squares[self.loc[1] + y][self.loc[0] + x].alive:
tot += 1
self.neighbors = tot
# checks if square is on the edge of grid, applies rules of game, increments self.time up to a maximum
def die_alive_method(self):
if not self.grid_edge:
if self.alive:
self.prev_alive = True
else:
self.prev_alive = False
# implements the rules of the game. Can be changed by changing THRESHOLDS
if self.neighbors == REPR_THRESH:
self.alive = True
if self.neighbors < UNDER_THRESH:
self.alive = False
elif self.neighbors > OVER_THRESH:
self.alive = False
# self.time used for color of square, so limit is 200 to not make square too dark blue
if self.prev_alive and self.alive:
if self.time < 200:
self.time += 2
else:
self.time = 0
patterns = import_pattern("../json/all_patterns.json")
text_output = load_text_file("../res/GameMap")
# COMMENT OUT ONE OF THESE \/ \/ \/
# grid = make_grid(text_output) # To draw pattern in text file. Found in res/GameMap.txt
grid = make_custom_grid(GRID_SIZE) # To draw in game. Change GRID_SIZE to change grid size. Larger grid, slower game
# COMMENT OUT ONE OF THESE /\ /\ /\
long_click = False
while True: # Main game loop
display.fill(BLACK) # makes screen black
mx, my = pygame.mouse.get_pos() # gets cursor co-ords
loc = [mx, my]
single_click = False
for event in pygame.event.get(): # event loop
if event.type == pygame.QUIT: # checks if window is closed
pygame.quit() # stops pygame
sys.exit() # stops script
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1: # left click
long_click = True
single_click = True
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1: # left click
long_click = False
if event.type == pygame.MOUSEWHEEL:
SCROLL += - event.y
if SCROLL < 0:
SCROLL = 0
elif SCROLL > len(patterns) - 18:
SCROLL = len(patterns) - 18
# updates the # of neighbors around square
for row_of_squares in grid:
for single_square in row_of_squares:
single_square.tot_neighbors(grid)
# 'kills' or 'spawn' squares depending of the number of neighbors
if CONTINUE:
for row_of_squares in grid:
for single_square in row_of_squares:
single_square.die_alive_method()
# implements on screen features
zoom_button_click(grid, zoom_in_rect, zoom_out_rect, loc, long_click)
move_button_click(grid, right_rect, left_rect, up_rect, down_rect, loc, long_click)
other_buttons(grid, loc, single_click, play_pause_rect, shuffle_rect, clear_rect, preset_rect, settings_rect)
cursor_on_square(grid, loc, single_click, buttons_area_rect)
draw_grid(grid)
draw_tools(CONTINUE, font_style)
draw_dropdown(PRESET_DROPDOWN, SETTING_DROPDOWN, loc, single_click, patterns, grid)
pygame.display.update() # update display
screen.blit(display, (0, 0))
clock.tick(FPS) # set frame rate
| StarcoderdataPython |
3367586 | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Re-runs the ChromeDriver's client-side commands, given a log file.
Takes a ChromeDriver log file that was created with the --replayable=true
command-line flag for the ChromeDriver binary (or with the same flag for
the run_py_tests.py).
To replay a log file, just run this script with the log file specified
in the --input-log-path flag. Alternatively, construct a CommandSequence
instance and iterate over it to access the logged commands one-by-one.
Notice that for the iteration approach, you must call
CommandSequence.ingestRealResponse with each response.
Implementation:
The CommandSequence class is the core of the implementation here. At a
basic level, it opens the given log file, looks for the next command and
response pair, and returns them (along with their parameters/payload) on
NextCommand, next, or __iter__.
To get effective replay, there are a few deviations from simply verbatim
repeating the logged commands and parameters:
1. Session, window, and element IDs in the log are identified with the
corresponding ID in the new session and substituted in each command
returned.
2. When a response is an error, we need to infer other parts of the
original response that would have been returned along with the
error.
3. If GetSessions is called while there are multiple sessions open,
the log will show more calls than actually occurred (one per open
session, even if it was only called once), so we absorb all of
these calls back into one.
"""
import collections
import json
import optparse
import os
import re
import sys
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
_PARENT_DIR = os.path.join(_THIS_DIR, os.pardir)
_CLIENT_DIR = os.path.join(_PARENT_DIR, "client")
_SERVER_DIR = os.path.join(_PARENT_DIR, "server")
# pylint: disable=g-import-not-at-top
sys.path.insert(1, _CLIENT_DIR)
import command_executor
sys.path.remove(_CLIENT_DIR)
sys.path.insert(1, _SERVER_DIR)
import server
sys.path.remove(_SERVER_DIR)
sys.path.insert(1, _PARENT_DIR)
import util
sys.path.remove(_PARENT_DIR)
# pylint: enable=g-import-not-at-top
class Method(object):
GET = "GET"
POST = "POST"
DELETE = "DELETE"
# TODO(crbug/chromedriver/2511) there should be a single source of truth for
# this data throughout chromedriver code (see e.g. http_handler.cc)
_COMMANDS = {
"AcceptAlert": (Method.POST, "/session/:sessionId/alert/accept"),
"AddCookie": (Method.POST, "/session/:sessionId/cookie"),
"ClearElement": (Method.POST, "/session/:sessionId/element/:id/clear"),
"ClearLocalStorage": (Method.DELETE, "/session/:sessionId/local_storage"),
"ClearSessionStorage":
(Method.DELETE, "/session/:sessionId/session_storage"),
"Click": (Method.POST, "/session/:sessionId/click"),
"ClickElement": (Method.POST, "/session/:sessionId/element/:id/click"),
"CloseWindow": (Method.DELETE, "/session/:sessionId/window"),
"DeleteAllCookies": (Method.DELETE, "/session/:sessionId/cookie"),
"DeleteCookie": (Method.DELETE, "/session/:sessionId/cookie/:name"),
"DeleteNetworkConditions":
(Method.DELETE, "/session/:sessionId/chromium/network_conditions"),
"DismissAlert": command_executor.Command.DISMISS_ALERT,
"DoubleClick": (Method.POST, "/session/:sessionId/doubleclick"),
"ElementScreenshot":
(Method.GET, "/session/:sessionId/element/:id/screenshot"),
"ExecuteAsyncScript": command_executor.Command.EXECUTE_ASYNC_SCRIPT,
"ExecuteCDP": (Method.POST, "/session/:sessionId/goog/cdp/execute"),
"ExecuteScript": (Method.POST, "/session/:sessionId/execute/sync"),
"FindChildElement":
(Method.POST, "/session/:sessionId/element/:id/element"),
"FindChildElements":
(Method.POST, "/session/:sessionId/element/:id/elements"),
"FindElement": (Method.POST, "/session/:sessionId/element"),
"FindElements": (Method.POST, "/session/:sessionId/elements"),
"Freeze": (Method.POST, "/session/:sessionId/goog/page/freeze"),
"FullscreenWindow": (Method.POST, "/session/:sessionId/window/fullscreen"),
"GetActiveElement": command_executor.Command.GET_ACTIVE_ELEMENT,
"GetAlertMessage": (Method.GET, "/session/:sessionId/alert_text"),
"GetCookies": (Method.GET, "/session/:sessionId/cookie"),
"GetElementAttribute":
(Method.GET, "/session/:sessionId/element/:id/attribute/:name"),
"GetElementProperty":
(Method.GET, "/session/:sessionId/element/:id/property/:name"),
"GetElementCSSProperty":
(Method.GET, "/session/:sessionId/element/:id/css/:propertyName"),
"GetElementLocation":
(Method.GET, "/session/:sessionId/element/:id/location"),
"GetElementLocationInView":
(Method.GET, "/session/:sessionId/element/:id/location_in_view"),
"GetElementRect": (Method.GET, "/session/:sessionId/element/:id/rect"),
"GetElementSize": (Method.GET, "/session/:sessionId/element/:id/size"),
"GetElementTagName": (Method.GET, "/session/:sessionId/element/:id/name"),
"GetElementText": (Method.GET, "/session/:sessionId/element/:id/text"),
"GetElementValue": (Method.GET, "/session/:sessionId/element/:id/value"),
"GetGeolocation": (Method.GET, "/session/:sessionId/location"),
"GetLocalStorageItem":
(Method.GET, "/session/:sessionId/local_storage/key/:key"),
"GetLocalStorageKeys":
(Method.GET, "/session/:sessionId/local_storage"),
"GetLocalStorageSize":
(Method.GET, "/session/:sessionId/local_storage/size"),
"GetLog": (Method.POST, "/session/:sessionId/se/log"),
"GetLogTypes": (Method.GET, "/session/:sessionId/se/log/types"),
"GetNamedCookie": (Method.GET, "/session/:sessionId/cookie/:name"),
"GetNetworkConditions":
(Method.GET, "/session/:sessionId/chromium/network_conditions"),
"GetNetworkConnection":
(Method.GET, "/session/:sessionId/network_connection"),
"GetSessionCapabilities": (Method.GET, "/session/:sessionId"),
"GetSessionStorageItem":
(Method.GET, "/session/:sessionId/session_storage/key/:key"),
"GetSessionStorageKeys":
(Method.GET, "/session/:sessionId/session_storage"),
"GetSessionStorageSize":
(Method.GET, "/session/:sessionId/session_storage/size"),
"GetSessions": (Method.GET, "/sessions"),
"GetSource": (Method.GET, "/session/:sessionId/source"),
"GetStatus": (Method.GET, "status"),
"GetTimeouts": (Method.GET, "/session/:sessionId/timeouts"),
"GetTitle": (Method.GET, "/session/:sessionId/title"),
"GetUrl": (Method.GET, "/session/:sessionId/url"),
"GetWindow": command_executor.Command.GET_CURRENT_WINDOW_HANDLE,
"GetWindowPosition":
(Method.GET, "/session/:sessionId/window/:windowHandle/position"),
"GetWindowRect":
(Method.GET, "/session/:sessionId/window/rect"),
"GetWindowSize":
(Method.GET, "/session/:sessionId/window/:windowHandle/size"),
"GetWindows": command_executor.Command.GET_WINDOW_HANDLES,
"GoBack": (Method.POST, "/session/:sessionId/back"),
"GoForward": (Method.POST, "/session/:sessionId/forward"),
"HeapSnapshot": (Method.GET, "/session/:sessionId/chromium/heap_snapshot"),
"InitSession": (Method.POST, "/session"),
"IsAlertOpen": (Method.GET, "/session/:sessionId/alert"),
"IsElementDisplayed":
(Method.GET, "/session/:sessionId/element/:id/displayed"),
"IsElementEnabled": (Method.GET, "/session/:sessionId/element/:id/enabled"),
"IsElementEqual":
(Method.GET, "/session/:sessionId/element/:id/equals/:other"),
"IsElementSelected":
(Method.GET, "/session/:sessionId/element/:id/selected"),
"IsLoading": (Method.GET, "/session/:sessionId/is_loading"),
"LaunchApp": (Method.POST, "/session/:sessionId/chromium/launch_app"),
"MaximizeWindow": (Method.POST, "/session/:sessionId/window/maximize"),
"MinimizeWindow": (Method.POST, "/session/:sessionId/window/minimize"),
"MouseDown": (Method.POST, "/session/:sessionId/buttondown"),
"MouseMove": (Method.POST, "/session/:sessionId/moveto"),
"MouseUp": (Method.POST, "/session/:sessionId/buttonup"),
"Navigate": (Method.POST, "/session/:sessionId/url"),
"PerformActions": (Method.POST, "/session/:sessionId/actions"),
"Quit": (Method.DELETE, "/session/:sessionId"),
"Refresh": (Method.POST, "/session/:sessionId/refresh"),
"ReleaseActions": (Method.DELETE, "/session/:sessionId/actions"),
"RemoveLocalStorageItem":
(Method.DELETE, "/session/:sessionId/local_storage/key/:key"),
"RemoveSessionStorageItem":
(Method.DELETE, "/session/:sessionId/session_storage/key/:key"),
"Resume": (Method.POST, "/session/:sessionId/goog/page/resume"),
"Screenshot": (Method.GET, "/session/:sessionId/screenshot"),
"SendCommand": (Method.POST, "/session/:sessionId/chromium/send_command"),
"SendCommandAndGetResult":
(Method.POST, "/session/:sessionId/chromium/send_command_and_get_result"),
"SendCommandFromWebSocket":
(Method.POST, "session/:sessionId/chromium/send_command_from_websocket"),
"SetAlertPrompt": command_executor.Command.SET_ALERT_VALUE,
"SetGeolocation": (Method.POST, "/session/:sessionId/location"),
"SetImplicitWait":
(Method.POST, "/session/:sessionId/timeouts/implicit_wait"),
"SetLocalStorageKeys": (Method.POST, "/session/:sessionId/local_storage"),
"SetNetworkConditions":
(Method.POST, "/session/:sessionId/chromium/network_conditions"),
"SetNetworkConnection":
(Method.POST, "/session/:sessionId/network_connection"),
"SetScriptTimeout":
(Method.POST, "/session/:sessionId/timeouts/async_script"),
"SetSessionStorageItem":
(Method.POST, "/session/:sessionId/session_storage"),
"SetTimeouts": (Method.POST, "/session/:sessionId/timeouts"),
"SetWindowPosition":
(Method.POST, "/session/:sessionId/window/:windowHandle/position"),
"SetWindowRect": (Method.POST, "/session/:sessionId/window/rect"),
"SetWindowSize":
(Method.POST, "/session/:sessionId/window/:windowHandle/size"),
"SubmitElement": (Method.POST, "/session/:sessionId/element/:id/submit"),
"SwitchToFrame": (Method.POST, "/session/:sessionId/frame"),
"SwitchToParentFrame": (Method.POST, "/session/:sessionId/frame/parent"),
"SwitchToWindow": (Method.POST, "/session/:sessionId/window"),
"Tap": (Method.POST, "/session/:sessionId/touch/click"),
"TouchDoubleTap": (Method.POST, "/session/:sessionId/touch/doubleclick"),
"TouchDown": (Method.POST, "/session/:sessionId/touch/down"),
"TouchFlick": (Method.POST, "/session/:sessionId/touch/flick"),
"TouchLongPress": (Method.POST, "/session/:sessionId/touch/longclick"),
"TouchMove": (Method.POST, "/session/:sessionId/touch/move"),
"TouchScroll": (Method.POST, "/session/:sessionId/touch/scroll"),
"TouchUp": (Method.POST, "/session/:sessionId/touch/up"),
"Type": (Method.POST, "/session/:sessionId/keys"),
"TypeElement": (Method.POST, "/session/:sessionId/element/:id/value"),
"UploadFile": (Method.POST, "/session/:sessionId/file")
}
MULTI_SESSION_COMMANDS = ["GetSessions"]
class ReplayException(Exception):
"""Thrown for irrecoverable problems in parsing the log file."""
def _CountChar(line, opening_char, closing_char):
"""Count (number of opening_char) - (number of closing_char) in |line|.
Used to check for the end of JSON parameters. Ignores characters inside of
non-escaped quotes.
Args:
line: line to count characters in
opening_char: "+1" character, { or [
closing_char: "-1" character, ] or }
Returns:
(number of opening_char) - (number of closing_char)
"""
in_quote = False
total = 0
for i, c in enumerate(line):
if not in_quote and c is opening_char:
total += 1
if not in_quote and c is closing_char:
total -= 1
if c == '"' and (i == 0 or line[i-1] != "\\"):
in_quote = not in_quote
return total
def _GetCommandName(header_line):
"""Return the command name from the logged header line."""
return header_line.split()[3]
def _GetEntryType(header_line):
return header_line.split()[2]
def _GetSessionId(header_line):
"""Return the session ID from the logged header line."""
return header_line.split()[1][1:-1]
# TODO(cwinstanley): Might just want to literally dump these to strings and
# search using regexes. All the ids have distinctive formats
# and this would allow getting even ids returned from scripts.
# TODO(cwinstanley): W3C element compliance
def _GetAnyElementIds(payload):
"""Looks for any element, session, or window IDs, and returns them.
Payload should be passed as a dict or list.
Args:
payload: payload to check for IDs, as a python list or dict.
Returns:
list of ID strings, in order, in this payload
"""
element_tag="element-6066-11e4-a52e-4f735466cecf"
if isinstance(payload, dict):
if element_tag in payload:
return [payload[element_tag]]
elif isinstance(payload, list):
elements = [item[element_tag] for item in payload if element_tag in item]
windows = [item for item in payload if "CDwindow" in item]
if not elements and not windows:
return None
return elements + windows
return None
def _ReplaceWindowAndElementIds(payload, id_map):
"""Replace the window, session, and element IDs in |payload| using |id_map|.
Checks |payload| for window, element, and session IDs that are in |id_map|,
and replaces them.
Args:
payload: payload in which to replace IDs. This is edited in-place.
id_map: mapping from old to new IDs that should be replaced.
"""
if isinstance(payload, dict):
for key, value in payload.items():
if isinstance(value, basestring) and value in id_map:
payload[key] = id_map[value]
else:
_ReplaceWindowAndElementIds(payload[key], id_map)
elif isinstance(payload, list):
for i, value in enumerate(payload):
if isinstance(value, basestring) and value in id_map:
payload[i] = id_map[value]
else:
_ReplaceWindowAndElementIds(payload[i], id_map)
def _ReplaceUrl(payload, base_url):
"""Swap out the base URL (starting with protocol) in this payload.
Useful when switching ports or URLs.
Args:
payload: payload in which to do the url replacement
base_url: url to replace any applicable urls in |payload| with.
"""
if base_url and "url" in payload:
payload["url"] = re.sub(r"^https?://((?!/).)*/",
base_url + "/", payload["url"])
def _ReplaceBinary(payload, binary):
"""Replace the binary path in |payload| with the one in |binary|.
If |binary| exists but there is no binary in |payload|, it is added at the
appropriate location. Operates in-place.
Args:
payload: InitSession payload as a dictionary to replace binary in
binary: new binary to replace in payload. If binary is not truthy, but
there is a binary path in |payload|, we remove the binary path, which will
trigger ChromeDriver's mechanism for locating the Chrome binary.
"""
if ("desiredCapabilities" in payload
and "goog:chromeOptions" in payload["desiredCapabilities"]):
if binary:
(payload["desiredCapabilities"]["goog:chromeOptions"]
["binary"]) = binary
elif "binary" in payload["desiredCapabilities"]["goog:chromeOptions"]:
del payload["desiredCapabilities"]["goog:chromeOptions"]["binary"]
elif binary:
if "desiredCapabilities" not in payload:
payload["desiredCapabilities"] = {
"goog:chromeOptions": {
"binary": binary
}
}
elif "goog:chromeOptions" not in payload["desiredCapabilities"]:
payload["desiredCapabilities"]["goog:chromeOptions"] = {
"binary": binary
}
def _ReplaceSessionId(payload, id_map):
"""Update session IDs in this payload to match the current session.
Operates in-place.
Args:
payload: payload in which to replace session IDs.
id_map: mapping from logged IDs to IDs in the current session
"""
if "sessionId" in payload and payload["sessionId"] in id_map:
payload["sessionId"] = id_map[payload["sessionId"]]
class _Payload(object):
"""Object containing a payload, which usually belongs to a LogEntry."""
def __init__(self, payload_string):
"""Initialize the payload object.
Parses the payload, represented as a string, into a Python object.
Payloads appear in the log as a multi-line (usually) JSON string starting
on the header line, like the following, where the payload starts after the
word InitSession:
[1532467931.153][INFO]: [<session_id>] COMMAND InitSession {
"desiredCapabilities": {
"goog:chromeOptions": {
"args": [ "no-sandbox", "disable-gpu" ],
"binary": "<binary_path>"
}
}
}
Payloads can also be "singular" entries, like "1", "false", be an error
string (signified by the payload starting with "ERROR") or be totally
nonexistent for a given command.
Args:
payload_string: payload represented as a string.
"""
self.is_empty = not payload_string
self.is_error = not self.is_empty and payload_string[:5] == "ERROR"
if self.is_error or self.is_empty:
self.payload_raw = payload_string
else:
self.payload_raw = json.loads(payload_string)
def AddSessionId(self, session_id):
"""Adds a session ID into this payload.
Args:
session_id: session ID to add.
"""
self.payload_raw["sessionId"] = session_id
def SubstituteIds(self, id_map, binary, base_url="", init_session=False):
"""Replace old IDs in the given payload with ones for the current session.
Args:
id_map: mapping from logged IDs to current-session ones
binary: binary to add into this command, if |init_session| is True
base_url: base url to replace in the payload for navigation commands
init_session: whether this payload belongs to an InitSession command.
"""
if self.is_error or self.is_empty:
return
_ReplaceWindowAndElementIds(self.payload_raw, id_map)
_ReplaceSessionId(self.payload_raw, id_map)
if init_session:
_ReplaceBinary(self.payload_raw, binary)
_ReplaceUrl(self.payload_raw, base_url)
def GetAnyElementIds(self):
return _GetAnyElementIds(self.payload_raw)
class _GetSessionsResponseEntry(object):
"""Special LogEntry object for GetSessions commands.
We need a separate class for GetSessions because we need to manually build
the payload from separate log entries in CommandSequence._HandleGetSessions.
This means that we cannot use the payload object that we use for other
commands. There is also no canonical session ID for GetSessions.
"""
def __init__(self, payload):
"""Initialize the _GetSessionsResponseEntry.
Args:
payload: python dict of the payload for this GetSessions response
"""
self._payload = payload
self.name = "GetSessions"
self.session_id = ""
def GetPayloadPrimitive(self):
"""Get the payload for this entry."""
return self._payload
class LogEntry(object):
"""A helper class that can store a command or a response.
Public attributes:
name: name of the command, like InitSession.
session_id: session ID for this command, let as "" for GetSessions.
payload: parameters for a command or the payload returned with a response.
"""
_COMMAND = "COMMAND"
_RESPONSE = "RESPONSE"
def __init__(self, header_line, payload_string):
"""Initialize the LogEntry.
Args:
header_line: the line from the log that has the header of this entry.
This also sometimes has part or all of the payload in it.
Header lines look like the following:
[1532467931.153][INFO]: [<session_id>] <COMMAND or RESPONSE> <command>
payload_string: string representing the payload (usually a JSON dict, but
occasionally a string, bool, or int).
"""
self.name = _GetCommandName(header_line)
self._type = _GetEntryType(header_line)
self.session_id = _GetSessionId(header_line)
self.payload = _Payload(payload_string)
def IsResponse(self):
"""Returns whether this instance is a response."""
return self._type == self._RESPONSE
def IsCommand(self):
"""Returns whether this instance is a command."""
return self._type == self._COMMAND
def UpdatePayloadForReplaySession(self,
id_map=None,
binary="",
base_url=None):
"""Processes IDs in the payload to match the current session.
This replaces old window, element, and session IDs in the payload to match
the ones in the current session as defined in |id_map|. It also replaces
the binary and the url if appropriate.
Args:
id_map:
dict matching element, session, and window IDs in the logged session
with the ones from the current (replaying) session.
binary:
Chrome binary to replace if this is an InitSession call. The binary
will be removed if this is not set. This will cause ChromeDriver to
use it's own algorithm to find an appropriate Chrome binary.
base_url:
Url to replace the ones in the log with in Navigate commands.
"""
self.payload.AddSessionId(self.session_id)
self.payload.SubstituteIds(
id_map, binary, base_url, self.name == "InitSession")
def GetPayloadPrimitive(self):
"""Returns the payload associated with this LogEntry as a primitive."""
return self.payload.payload_raw
class _ParserWithUndo(object):
def __init__(self, log_file):
"""Wrapper around _Parser that implements a UndoGetNext function.
Args:
log_file: file that we wish to open as the log. This should be a
Python file object, or something else with readline capability.
"""
self._parser = _Parser(log_file)
self._saved_log_entry = None
def GetNext(self):
"""Get the next client command or response in the log.
Returns:
LogEntry object representing the next command or response in the log.
"""
if self._saved_log_entry is not None:
log_entry = self._saved_log_entry
self._saved_log_entry = None
return log_entry
return self._parser.GetNext()
def UndoGetNext(self, log_entry):
"""Undo the most recent GetNext call that returned |log_entry|.
Simulates going backwards in the log file by storing |log_entry| and
returning that on the next GetNext call.
Args:
entry: the returned entry from the GetNext that we wish to "undo"
Raises:
ReplayException: if this is called multiple times in a row, which will
cause the object to lose the previously undone entry.
"""
if self._saved_log_entry is not None:
raise RuntimeError('Cannot undo multiple times in a row.')
self._saved_log_entry = log_entry
class _Parser(object):
"""Class responsible for parsing (and not interpreting) the log file."""
# Matches headers for client commands/responses only (not DevTools events)
_CLIENT_PREAMBLE_REGEX = re.compile(
r"^\[[0-9]{10}\.[0-9]{3}\]\[INFO\]: \[[a-f0-9]*\]")
# Matches headers for client commands/responses when readable-timestamp
#option is selected. Depending on OS, final component may be 3 or 6 digits
_CLIENT_PREAMBLE_REGEX_READABLE = re.compile(
r"^\[[0-9]{2}-[0-9]{2}-[0-9]{4} "
"[0-9]{2}:[0-9]{2}:[0-9]{2}.([0-9]{3}){1,2}\]\[INFO\]: \[[a-f0-9]*\]")
def __init__(self, log_file):
"""Initialize the _Parser instance.
Args:
log_file: file that we wish to open as the log. This should be a
Python file object, or something else with readline capability.
"""
self._log_file = log_file
def GetNext(self):
"""Get the next client command or response in the log.
Returns:
LogEntry object representing the next command or response in the log.
Returns None if at the end of the log
"""
header = self._GetNextClientHeaderLine()
if not header:
return None
payload_string = self._GetPayloadString(header)
return LogEntry(header, payload_string)
def _GetNextClientHeaderLine(self):
"""Get the next line that is a command or response for the client.
Returns:
String containing the header of the next client command/response, or
an empty string if we're at the end of the log file.
"""
while True:
next_line = self._log_file.readline()
if not next_line: # empty string indicates end of the log file.
return None
if re.match(self._CLIENT_PREAMBLE_REGEX, next_line):
return next_line
if re.match(self._CLIENT_PREAMBLE_REGEX_READABLE, next_line):
#Readable timestamp contains a space between date and time,
#which breaks other parsing of the header. Replace with underscore
next_line = next_line.replace(" ", "_", 1)
return next_line
def _GetPayloadString(self, header_line):
"""Gets the payload for the current command in self._logfile.
Parses the given header line, along with any additional lines as
applicable, to get a complete JSON payload object from the current
command in the log file. Note that the payload can be JSON, and error
(just a string), or something else like an int or a boolean.
Args:
header_line: the first line of this command
Raises:
ReplayException: if the JSON appears to be incomplete in the log
Returns:
payload of the command as a string
"""
min_header = 5
header_segments = header_line.split()
if len(header_segments) < min_header:
return None
payload = " ".join(header_segments[min_header-1:])
opening_char = header_segments[min_header-1]
if opening_char == "{":
closing_char = "}"
elif opening_char == "[":
closing_char = "]"
else:
return payload # payload is singular, like "1", "false", or an error
opening_char_count = (payload.count(opening_char)
- payload.count(closing_char))
while opening_char_count > 0:
next_line = self._log_file.readline()
if not next_line:
# It'd be quite surprising that the log is truncated in the middle of
# a JSON; far more likely that the parsing failed for some reason.
raise ReplayException(
"Reached end of file without reaching end of JSON payload")
payload += next_line
opening_char_count += _CountChar(next_line, opening_char,
closing_char)
return payload
class CommandSequence(object):
"""Interface to the sequence of commands in a log file."""
def __init__(self, log_path="", base_url=None, chrome_binary=None):
"""Initialize the CommandSequence.
Args:
log_path: file to read logs from (usually opened with with)
base_url: url to replace the base of logged urls with, if
applicable. Replaces port number as well.
chrome_binary: use this Chrome binary instead of the one in the log,
if not None.
"""
self._base_url = base_url
self._binary = chrome_binary
self._id_map = {}
self._parser = _ParserWithUndo(log_path)
self._staged_logged_ids = None
self._staged_logged_session_id = None
self._last_response = None
def NextCommand(self, previous_response):
"""Get the next command in the log file.
Gets start of next command, returning the command and response,
ready to be executed directly in the new session.
Args:
previous_response: the response payload from running the previous command
outputted by this function; None if this is the first command, or
element, session, and window ID substitution is not desired (i.e.
use the logged IDs). This provides the IDs that are then mapped
back onto the ones in the log to formulate future commands correctly.
Raises:
ReplayException: there is a problem with the log making it not
parseable.
Returns:
None if there are no remaining logs.
Otherwise, |command|, a LogEntry object with the following fields:
name: command name (e.g. InitSession)
type: either LogEntry.COMMAND or LogEntry.RESPONSE
payload: parameters passed with the command
session_id: intended session ID for the command, or "" if the
command is GetSessions.
"""
if previous_response:
self._IngestRealResponse(previous_response)
command = self._parser.GetNext()
if not command: # Reached end of log file
return None
if not command.IsCommand():
raise ReplayException("Command and Response unexpectedly out of order.")
if command.name == "GetSessions":
return self._HandleGetSessions(command)
command.UpdatePayloadForReplaySession(
self._id_map, self._binary, self._base_url)
response = self._parser.GetNext()
if not response:
return command
if not response.IsResponse():
raise ReplayException("Command and Response unexpectedly out of order.")
self._IngestLoggedResponse(response)
return command
def _IngestRealResponse(self, response):
"""Process the actual response from the previously issued command.
Ingests the given response that came from calling the last command on
the running ChromeDriver replay instance. This is the step where the
session and element IDs are matched between |response| and the logged
response.
Args:
response: Python dict of the real response to be analyzed for IDs.
"""
if "value" in response and self._staged_logged_ids:
real_ids = _GetAnyElementIds(response["value"])
if real_ids and self._staged_logged_ids:
for id_old, id_new in zip(self._staged_logged_ids, real_ids):
self._id_map[id_old] = id_new
self._staged_logged_ids = None
# In W3C format, the http response is a single key dict,
# where the value is None, a single value, or another dictionary
# sessionId is contained in the nested dictionary
if (self._staged_logged_session_id
and "value" in response and response["value"]
and isinstance(response["value"], dict)
and "sessionId" in response["value"]):
self._id_map[self._staged_logged_session_id] = (
response["value"]["sessionId"])
self._staged_logged_session_id = None
def _IngestLoggedResponse(self, response):
"""Reads the response at the current position in the log file.
Also matches IDs between the logged and new sessions.
Args:
response: the response from the log (from _parser.GetNext)
"""
self._last_response = response # store for testing purposes
self._staged_logged_ids = response.payload.GetAnyElementIds()
if response.name == "InitSession":
self._staged_logged_session_id = response.session_id
def _HandleGetSessions(self, first_command):
"""Special case handler for the GetSessions command.
Since it is dispatched to each session thread, GetSessions doesn't guarantee
command-response-command-response ordering in the log. This happens with
getSessions, which is broadcast to and logged by each of the active sessions
in the ChromeDriver instance. This simply consumes all the necessary logs
resulting from that command until it reaches the next command in the log.
This results in one returned |overall_response|, which is a list of the
responses from each GetSessions sub-call. This is not the same as what is
in the log file, but it is what ChromeDriver returns in real life.
Args:
first_command: The first GetSessions command from the log
Returns:
first_command: the command that triggered all of the calls absorbed by
this function
"""
command_response_pairs = collections.defaultdict(dict)
command_response_pairs[first_command.session_id] = (
{"command": first_command})
while True:
next_entry = self._parser.GetNext()
if not next_entry:
self._parser.UndoGetNext(next_entry)
break
if next_entry.IsResponse():
command_response_pairs[next_entry.session_id]["response"] = next_entry
elif next_entry.IsCommand():
if (next_entry.name != first_command.name
or next_entry.session_id in command_response_pairs):
self._parser.UndoGetNext(next_entry)
break
command_response_pairs[next_entry.session_id]["command"] = next_entry
response = [
{u"id": key, u"capabilities": val["response"].GetPayloadPrimitive()}
for key, val in command_response_pairs.items()
]
self._last_response = _GetSessionsResponseEntry(response)
return first_command
class Replayer(object):
"""Replays the commands in the log file, using CommandSequence internally.
This class provides the command-line functionality for this file.
"""
def __init__(self, logfile, server, chrome_binary, base_url=None):
"""Initialize the Replayer instance.
Args:
logfile: log file handle object to replay from.
options: command-line options; see below. Needs at least
options.chromedriver for the ChromeDriver binary.
base_url: string, base of the url to replace in the logged urls (useful
for when ports change). If any value is passed here, it overrides any
base url passed in options.
"""
# TODO(cwinstanley) Add Android support and perhaps support for other
# chromedriver command line options.
self.executor = command_executor.CommandExecutor(server.GetUrl())
self.command_sequence = CommandSequence(logfile, base_url=base_url,
chrome_binary=chrome_binary)
def Run(self):
"""Runs the replay."""
real_response = None
while True:
command = self.command_sequence.NextCommand(real_response)
if not command:
break
real_response = self.executor.Execute(_COMMANDS[command.name],
command.GetPayloadPrimitive())
def StartChromeDriverServer(chromedriver_binary,
output_log_path,
devtools_replay_path="",
replayable=False,
additional_args=None):
chromedriver = util.GetAbsolutePathOfUserPath(chromedriver_binary)
if (not os.path.exists(chromedriver) and
util.GetPlatformName() == "win" and
not chromedriver.lower().endswith(".exe")):
chromedriver = chromedriver + ".exe"
if output_log_path:
output_log_path = util.GetAbsolutePathOfUserPath(output_log_path)
chromedriver_server = server.Server(chromedriver_binary,
log_path=output_log_path,
devtools_replay_path=devtools_replay_path,
replayable=replayable,
additional_args=additional_args)
return chromedriver_server
def _CommandLineError(parser, message):
parser.error(message + '\nPlease run "%s --help" for help' % __file__)
def _GetCommandLineOptions():
"""Get, parse, and error check command line options for this file."""
usage = "usage: %prog <chromedriver binary> <input log path> [options]"
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"", "--output-log-path",
help="Output verbose server logs to this file")
parser.add_option(
"", "--chrome", help="Path to a build of the chrome binary. If not\n"
"specified, uses ChromeDriver's own algorithm to find Chrome.")
parser.add_option(
"", "--base-url", help="Base url to replace logged urls (in "
"navigate, getUrl, and similar commands/responses).")
parser.add_option(
"", "--devtools-replay", help="Replay DevTools actions in addition\n"
"to client-side actions")
parser.add_option(
"", "--replayable", help="Generate logs that do not have truncated\n"
"strings so that they can be replayed again.")
parser.add_option(
'', '--additional-args', action='append',
help='Additional arguments to add on ChromeDriver command line')
options, args = parser.parse_args()
if len(args) < 2:
_CommandLineError(parser,
'ChromeDriver binary and/or input log path missing.')
if len(args) > 2:
_CommandLineError(parser, 'Too many command line arguments.')
if not os.path.exists(args[0]):
_CommandLineError(parser, 'Path given for chromedriver is invalid.')
if options.chrome and not os.path.exists(options.chrome):
_CommandLineError(parser, 'Path given by --chrome is invalid.')
if options.replayable and not options.output_log_path:
_CommandLineError(
parser, 'Replayable log option needs --output-log-path specified.')
return options, args
def main():
options, args = _GetCommandLineOptions()
devtools_replay_path = args[1] if options.devtools_replay else None
server = StartChromeDriverServer(args[0], options.output_log_path,
devtools_replay_path, options.replayable, options.additional_args)
input_log_path = util.GetAbsolutePathOfUserPath(args[1])
chrome_binary = (util.GetAbsolutePathOfUserPath(options.chrome)
if options.chrome else None)
with open(input_log_path) as logfile:
Replayer(logfile, server, chrome_binary, options.base_url).Run()
server.Kill()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1700867 | # -*- coding: utf-8 -*-
import sys
import random
from collections import Counter
import numpy as np
import cmath
from qlazy.error import *
from qlazy.config import *
from qlazy.util import *
from qulacs import QuantumState
from qulacs import QuantumCircuit
from qulacs.gate import Identity, X, Y, Z
from qulacs.gate import H, S, Sdag, T, Tdag, sqrtX, sqrtXdag, sqrtY, sqrtYdag
from qulacs.gate import CNOT, CZ, SWAP
from qulacs.gate import RX, RY, RZ
from qulacs.gate import U1, U2, U3
from qulacs.gate import Measurement
from qulacs.gate import DenseMatrix
from qulacs.gate import to_matrix_gate
GateFunctionName = {
# 1-qubit, 0-parameter gate
PAULI_X: 'X',
PAULI_Y: 'Y',
PAULI_Z: 'Z',
ROOT_PAULI_X:'sqrtX',
ROOT_PAULI_X_:'sqrtXdag',
HADAMARD:'H',
PHASE_SHIFT_S:'S',
PHASE_SHIFT_S_:'Sdag',
PHASE_SHIFT_T:'T',
PHASE_SHIFT_T_:'Tdag',
IDENTITY:'Identity',
# 1-qubit, 1-parameter gate
ROTATION_X:'RX',
ROTATION_Y:'RY',
ROTATION_Z:'RZ',
PHASE_SHIFT:'__get_P',
ROTATION_U1:'U1',
# 1-qubit, 2-parameter gate
ROTATION_U2:'U2',
# 1-qubit, 3-parameter gate
ROTATION_U3:'U3',
# 2-qubit, 0-parameters gate
CONTROLLED_X:'CNOT',
CONTROLLED_Y:'__get_CY',
CONTROLLED_Z:'CZ',
CONTROLLED_XR:'__get_CXR',
CONTROLLED_XR_:'__get_CXR_dg',
CONTROLLED_H:'__get_CH',
CONTROLLED_S:'__get_CS',
CONTROLLED_S_:'__get_CS_dg',
CONTROLLED_T:'__get_CT',
CONTROLLED_T_:'__get_CT_dg',
CONTROLLED_P:'__get_CP',
SWAP_QUBITS:'SWAP',
# 2-qubit, 1-parameters gate
CONTROLLED_RX:'__get_CRX',
CONTROLLED_RY:'__get_CRY',
CONTROLLED_RZ:'__get_CRZ',
CONTROLLED_U1:'__get_CU1',
# 2-qubit, 2-parameters gate
CONTROLLED_U2:'__get_CU2',
# 2-qubit, 3-parameters gate
CONTROLLED_U3:'__get_CU3',
}
def init(qubit_num=0, backend=None):
qstate = QuantumState(qubit_num)
return qstate
def run(qubit_num=0, cmem_num=0, qstate=None, qcirc=[], cmem=[], shots=1, backend=None):
# number of measurement (measurement_cnt)
# and its position of last measurement (end_of_measurements)
measurement_cnt = 0
end_of_measurements = -1
for j, c in enumerate(qcirc):
if c['kind'] == MEASURE:
measurement_cnt += 1
end_of_measurements = j
# qcirc have only one measurement at the end, or not
if measurement_cnt == 1 and end_of_measurements == len(qcirc) - 1:
only_one_measurement_end = True
else:
only_one_measurement_end = False
# run the quantum circuit
freq = Counter()
for cnt in range(shots):
for i, c in enumerate(qcirc):
if c['kind'] == MEASURE:
md = __qulacs_measure(qstate, qubit_num, qid=c['qid'], shots=1)
if c['cid'] != None:
for k,mval in enumerate(list(md['last'])):
cmem[c['cid'][k]] = int(mval)
if end_of_measurements == i:
freq += md['frequency']
else:
if c['ctrl'] == None or cmem[c['ctrl']] == 1:
__qulacs_operate_qgate(qstate, qubit_num, kind=c['kind'], qid=c['qid'],
phase=c['phase'], phase1=c['phase1'], phase2=c['phase2'])
# qcirc have only one measurement
if only_one_measurement_end == True and i == len(qcirc) - 2:
md = __qulacs_measure(qstate, qubit_num, qid=qcirc[-1]['qid'], shots=shots)
freq = md['frequency']
if qcirc[-1]['cid'] != None:
for k,mval in enumerate(list(md['last'])):
cmem[qcirc[-1]['cid'][k]] = int(mval)
break
if only_one_measurement_end == True and i == len(qcirc) - 2:
break
# reset classical memory and qubits, if not end of the shots
if cnt < shots-1:
cmem = [0] * len(cmem)
qstate.set_zero_state()
# if end_of_measurements > 0:
if measurement_cnt > 0:
measured_qid = qcirc[end_of_measurements]['qid']
result = {'measured_qid': measured_qid, 'frequency': freq}
else:
result = None
return result
def reset(qstate=None, backend=None):
if qstate != None:
qstate.set_zero_state()
# return True
def free(qstate=None, backend=None):
if qstate != None:
del qstate
def __is_supported_qgate(kind):
if kind in GateFunctionName.keys():
return True
else:
return False
# not supported as pre-defined gates
def __get_P(q0, phase):
exp = cmath.exp(1.j * phase)
gate = DenseMatrix(q0, [[1., 0.], [0., exp]])
return gate
def __get_CY(q0, q1):
gate = to_matrix_gate(Y(q1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CXR(q0, q1):
gate = to_matrix_gate(sqrtX(q1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CXR_dg(q0, q1):
gate = to_matrix_gate(sqrtXdag(q1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CH(q0, q1):
gate = to_matrix_gate(H(q1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CS(q0, q1):
gate = to_matrix_gate(S(q1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CS_dg(q0, q1):
gate = to_matrix_gate(Sdag(q1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CT(q0, q1):
gate = to_matrix_gate(T(q1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CT_dg(q0, q1):
gate = to_matrix_gate(Tdag(q1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CP(q0, q1, phase):
exp = cmath.exp(1.j * phase)
gate = DenseMatrix(q1, [[1., 0.], [0., exp]])
gate.add_control_qubit(q0, 1)
return gate
def __get_CRX(q0, q1, phase):
gate = to_matrix_gate(RX(q1, phase))
gate.add_control_qubit(q0, 1)
return gate
def __get_CRY(q0, q1, phase):
gate = to_matrix_gate(RY(q1, phase))
gate.add_control_qubit(q0, 1)
return gate
def __get_CRZ(q0, q1, phase):
gate = to_matrix_gate(RZ(q1, phase))
gate.add_control_qubit(q0, 1)
return gate
def __get_CU1(q0, q1, phase):
gate = to_matrix_gate(U1(q1, phase))
gate.add_control_qubit(q0, 1)
return gate
def __get_CU2(q0, q1, phase, phase1):
gate = to_matrix_gate(U2(q1, phase, phase1))
gate.add_control_qubit(q0, 1)
return gate
def __get_CU3(q0, q1, phase, phase1, phase2):
gate = to_matrix_gate(U3(q1, phase, phase1, phase2))
gate.add_control_qubit(q0, 1)
return gate
def __qulacs_operate_qgate(qstate, qubit_num, kind, qid, phase, phase1, phase2):
if __is_supported_qgate(kind) == False:
raise ValueError("not supported quantum gate")
circ = QuantumCircuit(qubit_num)
term_num = get_qgate_qubit_num(kind)
para_num = get_qgate_param_num(kind)
gate_function_name = GateFunctionName[kind]
phase = phase * np.pi
phase1 = phase1 * np.pi
phase2 = phase2 * np.pi
# the sign-definition of rotation gate on qulacs
if (kind == ROTATION_X or kind == ROTATION_Y or kind == ROTATION_Z or
kind == CONTROLLED_RX or kind == CONTROLLED_RY or kind == CONTROLLED_RZ):
phase = -phase
# the argument-order-definition of U2 gate on qulacs
elif kind == ROTATION_U2 or kind == CONTROLLED_U2:
phase1, phase = phase, phase1
# the argument-order-definition of U3 gate on qulacs
elif kind == ROTATION_U3 or kind == CONTROLLED_U3:
phase2, phase = phase, phase2
if term_num == 1 and para_num == 0:
circ.add_gate(eval(gate_function_name)(qid[0]))
elif term_num == 1 and para_num == 1:
circ.add_gate(eval(gate_function_name)(qid[0], phase))
elif term_num == 1 and para_num == 2:
circ.add_gate(eval(gate_function_name)(qid[0], phase, phase1))
elif term_num == 1 and para_num == 3:
circ.add_gate(eval(gate_function_name)(qid[0], phase, phase1, phase2))
elif term_num == 2 and para_num == 0:
circ.add_gate(eval(gate_function_name)(qid[0], qid[1]))
elif term_num == 2 and para_num == 1:
circ.add_gate(eval(gate_function_name)(qid[0], qid[1], phase))
elif term_num == 2 and para_num == 2:
circ.add_gate(eval(gate_function_name)(qid[0], qid[1], phase, phase1))
elif term_num == 2 and para_num == 3:
circ.add_gate(eval(gate_function_name)(qid[0], qid[1], phase, phase1, phase2))
else:
raise ValueError("not supported terminal or parameter-number")
circ.update_quantum_state(qstate)
def __qulacs_measure(qstate, qubit_num, qid, shots=1):
# error check
# qubit_num = qstate.get_qubit_count()
if max(qid) >= qubit_num:
raise ValueError
# list of binary vectors for len(qid) bit integers
qid_sorted = sorted(qid)
mbits_list = []
for i in range(2**len(qid)):
# ex)
# qid = [5,0,2] -> qid_sorted = [0,2,5]
# i = (0,1,2), idx = (2,0,1)
# bits = [q0,q1,q2] -> mbits = [q1,q2,q0]
bits = list(map(int, list(format(i, '0{}b'.format(len(qid))))))
mbits = [0] * len(qid)
for i, q in enumerate(qid):
idx = qid_sorted.index(q)
mbits[idx] = bits[i]
mbits_list.append(mbits)
# list of probabilities
prob_list = []
prob = 0.0
for mbits in mbits_list:
args = [2] * qubit_num
for j, q in enumerate(qid):
args[q] = mbits[j]
prob += qstate.get_marginal_probability(args)
prob_list.append(prob)
if prob_list[-1] != 1.0:
prob_list[-1] = 1.0
# frequency
mval_data = []
if shots > 1:
for i in range(shots - 1):
rand = random.random()
for mbits, prob in zip(mbits_list, prob_list):
mval = ''.join(map(str, mbits))
if rand <= prob:
mval_data.append(mval)
break
# last quantum state
circ = QuantumCircuit(qubit_num)
for i, q in enumerate(qid):
circ.add_gate(Measurement(q, i))
circ.update_quantum_state(qstate)
last = ''.join(map(str, [qstate.get_classical_value(i) for i in range(len(qid))]))
mval_data.append(last)
frequency = Counter(mval_data)
measured_data = {'measured_qid': qid, 'frequency': frequency, 'last': last}
return measured_data
| StarcoderdataPython |
3202277 | <reponame>anilpai/leetcode
# w = [1, 2, 4, 2, 5]
# v = [5, 3, 5, 3, 2]
# C = 10
'''
'w' is the weights of items.
'v' is the values of items.
'''
w = [1, 3, 4, 5]
v = [1, 4, 5, 7]
# Naive version.
print("### Naive solution. ###")
def KS(n, C):
if n==0 or C==0:
res = 0
elif w[n] > C:
res = KS(n-1, C)
else:
tmp1 = KS(n-1,C)
tmp2 = v[n] + KS(n-1, C-w[n])
res = max(tmp1, tmp2)
return res
C = 7
print(KS(len(w)-1, C))
'''
Dynamic Programming based solution.
Source: GeeksforGeeks.com
'''
print("### Dynamic Programming based solution. ###")
def knapSack(W, wt, val, n):
K = [[0 for x in range(W + 1)] for x in range(n + 1)]
# Build table K[][] in bottom up manner
for i in range(n + 1):
for w in range(W + 1):
if i == 0 or w == 0:
K[i][w] = 0
elif wt[i - 1] <= w:
K[i][w] = max(val[i - 1] + K[i - 1][w - wt[i - 1]], K[i - 1][w])
else:
K[i][w] = K[i - 1][w]
return K[n][W]
# Driver program to test above function
val = [60, 100, 120]
wt = [10, 20, 30]
W = 50
n = len(val)
print(knapSack(W, wt, val, n)) | StarcoderdataPython |
1720154 | <reponame>nervmaster/djangoproj
from django.db import models
from decimal import Decimal
# Create your models here.
class Ingredient(models.Model):
MEASURE_CHOICES = (
('L', 'Liter'),
('G', 'Gram')
)
CURRENCY_CHOICES = (
('USD', 'US Dollars'),
('EUR', 'EURO'),
('BRL', 'BRAZILIAN REAL')
)
name = models.CharField(max_length=250)
article_number = models.CharField(max_length=250) #See bar code pattern
measure = models.DecimalField(max_digits=19, decimal_places=3)
unit_of_measure = models.CharField(max_length=1, choices=MEASURE_CHOICES)
measure_scale_factor = models.DecimalField(max_digits=12, decimal_places=6)
cost = models.DecimalField(max_digits = 19, decimal_places=3)
currency = models.CharField(max_length=3, choices=CURRENCY_CHOICES)
def __str__(self):
output = 'name: {} ({}) '.format(self.name, self.id)
output += 'article_number: {} '.format(self.article_number)
output += 'measure: {} '.format(self.measure)
output += 'unit_of_measure: {} '.format(self.unit_of_measure)
output += 'measure_scale_factor: {} '.format(self.measure_scale_factor)
output += 'cost: {} '.format(self.measure_scale_factor)
output += 'currency: {} '.format(self.currency)
return output
def get_base_cost(self):
return self.cost / (self.measure * self.measure_scale_factor)
class Recipe(models.Model):
name = models.CharField(max_length=250)
def __str__(self):
return 'name: {} ({})'.format(self.name, self.id)
class RecipeIngredientQuantity(models.Model):
recipe = models.ForeignKey(Recipe, on_delete=models.CASCADE)
ingredient = models.ForeignKey(Ingredient, on_delete=models.CASCADE)
quantity = models.DecimalField(max_digits = 19, decimal_places=3)
scale_factor = models.DecimalField(max_digits=12, decimal_places=6)
class Meta:
unique_together = ('recipe', 'ingredient')
def __str__(self):
output = 'recipe: {} ({}) '.format(self.recipe.name, self.recipe.id)
output += 'ingredient: {} ({}) '.format(self.ingredient.name, self.ingredient.id)
output += 'quantity: {} '.format(self.quantity)
output += 'scale_factor: {}'.format(self.scale_factor)
return output
def get_ingredient_cost(self):
return self.quantity * self.ingredient.get_base_cost() * self.scale_factor | StarcoderdataPython |
1611497 | from functools import wraps
from .checker import (
satisfies_independent_axiom,
satisfies_dependent_axiom,
satisfies_bases_axiom,
satisfies_circuits_axiom,
satisfies_rank_function_axiom,
satisfies_nulity_function_axiom,
satisfies_closure_axiom,
satisfies_open_sets_axiom,
satisfies_hyperplanes_axiom,
satisfies_spanning_sets_axiom,
)
from .exception import MatroidAxiomError
from .types import MatroidAxiom
def validate_matroid_axiom(func):
@wraps(func)
def __wrapper(*args, **kwargs):
maybe_matroid, axiom = args[0], args[1]
if any([
axiom is MatroidAxiom.INDEPENDENT_SETS and not satisfies_independent_axiom(maybe_matroid),
axiom is MatroidAxiom.DEPENDENT_SETS and not satisfies_dependent_axiom(maybe_matroid),
axiom is MatroidAxiom.BASES and not satisfies_bases_axiom(maybe_matroid),
axiom is MatroidAxiom.CIRCUITS and not satisfies_circuits_axiom(maybe_matroid),
axiom is MatroidAxiom.RANK_FUNCTION and not satisfies_rank_function_axiom(maybe_matroid),
axiom is MatroidAxiom.NULITY_FUNCTION and not satisfies_nulity_function_axiom(maybe_matroid),
axiom is MatroidAxiom.CLOSURE_FUNCTION and not satisfies_closure_axiom(maybe_matroid),
axiom is MatroidAxiom.OPEN_SETS and not satisfies_open_sets_axiom(maybe_matroid),
axiom is MatroidAxiom.HYPERPLANES and not satisfies_hyperplanes_axiom(maybe_matroid),
axiom is MatroidAxiom.SPANNING_SETS and not satisfies_spanning_sets_axiom(maybe_matroid),
]):
raise MatroidAxiomError(f"The given family doesn't satisfy {axiom.value}!")
return func(*args, **kwargs)
return __wrapper
def validate_independent_sets(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_independent_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Independent Sets!")
return func(*args, **kwargs)
return __wrapper
def validate_dependent_sets(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_dependent_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Dependent Sets!")
return func(*args, **kwargs)
return __wrapper
def validate_bases(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_bases_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Bases!")
return func(*args, **kwargs)
return __wrapper
def validate_circuits(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_circuits_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Circuits!")
return func(*args, **kwargs)
return __wrapper
def validate_rank_function(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_rank_function_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Rank Function!")
return func(*args, **kwargs)
return __wrapper
def validate_nulity_function(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_nulity_function_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Nulity Function!")
return func(*args, **kwargs)
return __wrapper
def validate_closure_function(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_closure_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Closure Function!")
return func(*args, **kwargs)
return __wrapper
def validate_open_sets(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_open_sets_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Open Sets!")
return func(*args, **kwargs)
return __wrapper
def validate_hyperplanes(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_hyperplanes_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Hyperplanes!")
return func(*args, **kwargs)
return __wrapper
def validate_spanning_sets(func):
@wraps(func)
def __wrapper(*args, **kwargs):
if not satisfies_spanning_sets_axiom(args[0]):
raise MatroidAxiomError("The given family doesn't satisfy the axiom of Spanning Sets!")
return func(*args, **kwargs)
return __wrapper | StarcoderdataPython |
4806175 | <reponame>Siddharthgolecha/UDMIS
from inspect import CO_VARKEYWORDS
import sys
import pennylane as qml
from pennylane import numpy as np
def hamiltonian_coeffs_and_obs(graph):
"""Creates an ordered list of coefficients and observables used to construct
the UDMIS Hamiltonian.
Args:
- graph (list((float, float))): A list of x,y coordinates. e.g. graph = [(1.0, 1.1), (4.5, 3.1)]
Returns:
- coeffs (list): List of coefficients for elementary parts of the UDMIS Hamiltonian
- obs (list(qml.ops)): List of qml.ops
"""
num_vertices = len(graph)
E, num_edges = edges(graph)
u = 1.35
obs = []
coeffs = []
global COORDS
COORDS = []
for i in range(num_vertices):
coeffs.append(-1)
for i in range(num_edges):
coeffs.append(u)
Z = np.array([[1,0],[0,-1]])
I = np.array([[1,0],[0,1]])
for i in range(num_vertices):
obs.append(qml.Hermitian((Z+I)/2,wires=i))
for i in range(len(E)):
for j in range(len(E[i])):
if E[i][j]:
COORDS.append(i)
obs.append(qml.Hermitian((Z+I)/2,wires=i)@qml.Hermitian((Z+I)/2,wires=j))
return coeffs, obs
def edges(graph):
"""Creates a matrix of bools that are interpreted as the existence/non-existence (True/False)
of edges between vertices (i,j).
Args:
- graph (list((float, float))): A list of x,y coordinates. e.g. graph = [(1.0, 1.1), (4.5, 3.1)]
Returns:
- num_edges (int): The total number of edges in the graph
- E (np.ndarray): A Matrix of edges
"""
num_vertices = len(graph)
E = np.zeros((num_vertices, num_vertices), dtype=bool)
for vertex_i in range(num_vertices - 1):
xi, yi = graph[vertex_i] # coordinates
for vertex_j in range(vertex_i + 1, num_vertices):
xj, yj = graph[vertex_j] # coordinates
dij = np.sqrt((xi - xj) ** 2 + (yi - yj) ** 2)
E[vertex_i, vertex_j] = 1 if dij <= 1.0 else 0
return E, np.sum(E, axis=(0, 1))
def variational_circuit(params, num_vertices):
"""A variational circuit.
Args:
- params (np.ndarray): your variational parameters
- num_vertices (int): The number of vertices in the graph. Also used for number of wires.
"""
for i in range(len(COORDS)):
qml.RY(params[i],wires=COORDS[i])
def train_circuit(num_vertices, H):
"""Trains a quantum circuit to learn the ground state of the UDMIS Hamiltonian.
Args:
- num_vertices (int): The number of vertices/wires in the graph
- H (qml.Hamiltonian): The result of qml.Hamiltonian(coeffs, obs)
Returns:
- E / num_vertices (float): The ground state energy density.
"""
dev = qml.device("default.qubit", wires=num_vertices)
@qml.qnode(dev)
def cost(params):
"""The energy expectation value of a Hamiltonian"""
variational_circuit(params, num_vertices)
return qml.expval(H)
epochs = 100
params = np.array([np.random.random()*np.pi for i in range(len(COORDS))],
requires_grad=True)
opt = qml.AdamOptimizer(stepsize=0.8)
E = 0
for i in range(epochs):
params, E = opt.step_and_cost(cost, params)
return E / float(num_vertices)
if __name__ == "__main__":
inputs = np.array(sys.stdin.read().split(","), dtype=float, requires_grad=False)
num_vertices = int(len(inputs) / 2)
x = inputs[:num_vertices]
y = inputs[num_vertices:]
graph = []
for n in range(num_vertices):
graph.append((x[n].item(), y[n].item()))
coeffs, obs = hamiltonian_coeffs_and_obs(graph)
H = qml.Hamiltonian(coeffs, obs)
energy_density = train_circuit(num_vertices, H)
print(f"{energy_density:.6f}")
| StarcoderdataPython |
1761329 | import numpy
from chainer import distributions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(2, 3), ()],
'is_variable': [True, False],
'sample_shape': [(3, 2), ()],
}))
@testing.fix_random()
@testing.with_requires('scipy')
class TestLogNormal(testing.distribution_unittest):
scipy_onebyone = True
def setUp_configure(self):
from scipy import stats
self.dist = distributions.LogNormal
self.scipy_dist = stats.lognorm
self.test_targets = set([
"batch_shape", "entropy", "event_shape", "log_prob", "mean",
"sample", "support", "variance"])
mu = utils.force_array(
numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32))
sigma = utils.force_array(numpy.exp(numpy.random.uniform(
-1, 0, self.shape)).astype(numpy.float32))
self.params = {"mu": mu, "sigma": sigma}
self.scipy_params = {"s": sigma, "scale": numpy.exp(mu)}
self.support = 'positive'
def sample_for_test(self):
smp = numpy.random.lognormal(
size=self.sample_shape + self.shape).astype(numpy.float32)
return smp
testing.run_module(__name__, __file__)
| StarcoderdataPython |
100803 | <gh_stars>1-10
import endpoints
from api_user import UserAPI
from api_posts import PostsAPI
from api_comments import ReactionAPI
from api_image import ImageAPI
APPLICATION = endpoints.api_server([PostsAPI, ReactionAPI, UserAPI, ImageAPI]) | StarcoderdataPython |
1702397 | <gh_stars>1-10
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.distributions as dist
import matplotlib.pyplot as plt
from src.params_to_flat import params_to_flat
p = 2
n = 10
od = 1
dist_w = dist.Laplace(0, 0.1)
dist_o = dist.Exponential(1)
# number of samples
N = 10 ** 3
L = 100
# create parameters
xi = nn.Parameter(torch.zeros(n, p), requires_grad=True)
old_ig = None
T = 1.0
# network
vmarg_l_1 = nn.Linear(p * n, 64)
vmarg_l_l2 = nn.Linear(64, 64)
vmarg_l_mu = nn.Linear(64, od)
vmarg_l_s = nn.Linear(64, int(od * (od + 1) / 2))
vmarg_nn_params = list(vmarg_l_1.parameters()) + list(vmarg_l_l2.parameters()) + list(vmarg_l_mu.parameters()) + list(vmarg_l_s.parameters())
# network
vpost_l_1 = nn.Linear((p + 1) * n, 64)
vpost_l_l2 = nn.Linear(64, 64)
vpost_l_mu = nn.Linear(64, p)
vpost_l_s = nn.Linear(64, int(p * (p + 1) / 2))
vpost_nn_params = list(vpost_l_1.parameters()) + list(vpost_l_l2.parameters()) + list(vpost_l_mu.parameters()) + list(vpost_l_s.parameters())
def calc_eig_vnmc(xi):
# structure
nxi = torch.tanh(xi)
w = dist_w.rsample([p, N])
o = dist_o.rsample([N])
prior_mu = nxi @ w
# obtain observations
dist_y = dist.Normal(prior_mu, o)
y = dist_y.rsample()
# build network
reshaped_xi = nxi.reshape([1, -1]).repeat([N, 1])
reshaped_y = y.T
comb = torch.cat([reshaped_xi, reshaped_y], dim=1)
ac1 = torch.tanh(vpost_l_1(comb))
ac2 = torch.tanh(vpost_l_l2(ac1))
mu = vpost_l_mu(ac2)
s = vpost_l_s(ac2)
# build dist
SigmaR = torch.zeros([N, p, p])
SigmaR[:, range(p), range(p)] = F.softplus(s[:, range(p)]) + 1e-6
iv = od
for ip in range(1, p):
l = p - 1
SigmaR[:, ip, :ip - 1] = s[:, iv:iv + l]
iv += l
p_dist_th = dist.MultivariateNormal(mu, scale_tril=SigmaR)
th_posterior = p_dist_th.rsample([L])
post_lp = p_dist_th.log_prob(th_posterior)
prior_lp = dist_w.log_prob(th_posterior).sum(-1)
# structure
prior_mu_po = torch.einsum('kd,abd->abk', nxi, th_posterior)
prior_o_po = dist_o.rsample([L, N])
# obtain observations
dist_y_po = dist.Normal(prior_mu_po, prior_o_po.unsqueeze(-1).repeat([1, 1, n]))
ll_lp = dist_y_po.log_prob(y.T).sum(-1)
log_dist = (prior_lp + ll_lp) / T - post_lp
rv = torch.logsumexp(log_dist, dim=0) - np.log(L+1)
lv = dist_y.log_prob(y).sum(0)
eig = torch.mean(lv - rv)
cur_eig = eig.detach().numpy()
g1 = torch.autograd.grad(eig, xi, retain_graph=True)[0]
g2 = torch.autograd.grad(eig, vpost_nn_params)
return cur_eig, g1, g2
opt_xi = optim.Adam([xi])
opt_nn = optim.Adam(vpost_nn_params)
vpost_opt_nn = optim.Adam(vpost_nn_params)
best_ig = None
for it in range(100):
opt_xi.zero_grad()
opt_nn.zero_grad()
fr = True
cur_ig, xi_grad, th_grad = calc_eig_vnmc(xi)
print(cur_ig)
xi.grad = -xi_grad
for pa, v in zip(vpost_nn_params, th_grad):
pa.grad = -v.detach()
opt_xi.step()
opt_nn.step()
des = torch.tanh(xi).detach().numpy()
plt.figure()
plt.scatter(des[:, 0], des[:, 1])
plt.xlim([np.min(des[:, 0]) - 0.005, np.max(des[:, 0]) + 0.005])
plt.ylim([np.min(des[:, 1]) - 0.005, np.max(des[:, 1]) + 0.005])
plt.show()
| StarcoderdataPython |
3332306 | # write a program that adds the digits in a 2 digit number.
# the user inputs a two digit number
number = input("Type a two digit number: \n")
# assign a variable to each number
string_first_number = number[0]
string_second_number = number[1]
# convert from string to integer
first_number = int(string_first_number)
second_number = int(string_second_number)
# prints the sum and gives the ouput
print("Adding the two digits, gives a total of:", first_number + second_number) | StarcoderdataPython |
3313517 | """
Creating cover file for configuring registration image pairs
The paths and all other constants are set to run on CMP grid for ANHIR dataset
Copyright (C) 2016-2018 <NAME> <<EMAIL>>
"""
import os
import glob
import logging
from functools import partial
import pandas as pd
DATASET_IMAGES = '/datagrid/Medical/dataset_ANHIR/images'
DATASET_LANDMARKS = '/datagrid/Medical/dataset_ANHIR/landmarks_all'
DATASET_COVERS = '/datagrid/Medical/dataset_ANHIR/images'
NAME_COVER_FILE = 'dataset_%s.csv'
GENERATED_SCALES = (5, 10, 25, 50, 100)
NAME_DIR_SCALE = 'scale-%ipc'
DATASET_TISSUE_SCALE = {
'kidney_*': 5,
'lesions_[1,3]': 10,
'lesions_2': 5,
'lung-lobes_*': 10,
'mammary-gland_*': 5,
'COAD_*': 5,
}
DATASET_TISSUE_PARTIAL = ('kidney_*', 'COAD_*')
STEP_PARTIAL = 3
DATASET_SCALES = {
'small': 0,
'medium': 2,
# 'large': 3,
}
IMAGE_EXTENSIONS = ('.png', '.jpg', '.jpeg')
COLUMNS_EMPTY = (
'Warped target landmarks',
'Warped source landmarks',
'Execution time [minutes]'
)
def get_relative_paths(paths, path_base):
paths_r = [p.replace(path_base, '')[1:] for p in sorted(paths)]
return paths_r
def list_landmarks_images(path_tissue, sc, path_landmarks, path_images):
path_ = os.path.join(path_tissue, NAME_DIR_SCALE % sc, '*.csv')
rp_lnds = get_relative_paths(glob.glob(path_), path_landmarks)
paths_imgs = []
for rp_lnd in rp_lnds:
p_imgs = glob.glob(os.path.join(path_images, os.path.splitext(rp_lnd)[0] + '.*'))
p_imgs = [p for p in p_imgs if os.path.splitext(p)[-1] in IMAGE_EXTENSIONS]
if not len(p_imgs):
logging.error('missing image for "%s"', rp_lnd)
return [], []
paths_imgs.append(sorted(p_imgs)[0])
rp_imgs = get_relative_paths(paths_imgs, path_images)
return rp_lnds, rp_imgs
def get_pairing(count, step=None):
idxs = list(range(count))
priv = idxs[::step]
# prune image on diagonal and missing both landmarks (targer and source)
pairs = [(i, j) for i in idxs for j in idxs
if i != j and (i not in priv or j in priv)]
# prune symmetric image pairs
pairs = [(i, j) for k, (i, j) in enumerate(pairs)
if (j, i) not in pairs[:k]]
return pairs
def generate_reg_pairs(rp_imgs, rp_lnds, pairs):
reg_pairs = []
for i, j in pairs:
reg_pairs.append({
'Target image': rp_imgs[i],
'Source image': rp_imgs[j],
'Target landmarks': rp_lnds[i],
'Source landmarks': rp_lnds[j],
})
return reg_pairs
def create_dataset_cover(dataset, path_images, path_landmarks, path_out,
step_landmarks, tissue_partial):
name, scale_step = dataset
reg_pairs = []
for tissue in sorted(DATASET_TISSUE_SCALE):
sc = DATASET_TISSUE_SCALE[tissue]
sc = GENERATED_SCALES[min(GENERATED_SCALES.index(sc) + scale_step,
len(GENERATED_SCALES))]
paths_tissue = [p for p in glob.glob(os.path.join(path_landmarks, tissue))
if os.path.isdir(p)]
for p_tissue in sorted(paths_tissue):
rp_lnds, rp_imgs = list_landmarks_images(p_tissue, sc,
path_landmarks, path_images)
assert len(rp_lnds) == len(rp_imgs), \
'the list of landmarks and images does not match'
step_landmarks = step_landmarks if tissue in tissue_partial else None
pairs = get_pairing(len(rp_lnds), step_landmarks)
reg_pairs += generate_reg_pairs(rp_imgs, rp_lnds, pairs)
df_cover = pd.DataFrame(reg_pairs)
for col in COLUMNS_EMPTY:
df_cover[col] = None
path_csv = os.path.join(path_out, NAME_COVER_FILE % name)
logging.info('exporting CSV: %s', path_csv)
df_cover.to_csv(path_csv)
def main(path_images, path_landmarks, path_out, step_lnds, datasets,
tissue_partial):
_create_cover = partial(create_dataset_cover,
path_images=path_images,
path_landmarks=path_landmarks,
path_out=path_out,
step_landmarks=step_lnds,
tissue_partial=tissue_partial)
for name in datasets:
scale_step = datasets[name]
_create_cover((name, scale_step))
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.info('running...')
main(path_images=DATASET_IMAGES, path_landmarks=DATASET_LANDMARKS,
path_out=DATASET_COVERS, step_lnds=STEP_PARTIAL,
datasets=DATASET_SCALES, tissue_partial=DATASET_TISSUE_PARTIAL)
logging.info('Done :]')
| StarcoderdataPython |
4810871 | <reponame>NHSDigital/list-reconciliation
from validate_and_forward.lambda_handler import ValidateAndForward
app = ValidateAndForward()
def lambda_handler(event, context):
return app.main(event, context)
| StarcoderdataPython |
1776879 | import os
import time
def log(filename, text):
"""
Writes text to file in logs/mainnet/filename and adds a timestamp
:param filename: filename
:param text: text
:return: None
"""
path = "logs/"
if not os.path.isdir("logs/"):
os.makedirs("logs/")
if not os.path.isdir("logs/"):
os.makedirs("logs/")
f = open(path+filename, "a")
f.write(time.strftime('[%Y-%m-%d %H:%M:%S]:', time.localtime(time.time()))+str(text)+"\n")
f.flush()
f.close()
def log_and_print(filename, text):
"""
Writes text to file in logs/mainnet/filename, adds a timestamp and prints the same to the console
:param filename: filename
:param text: text
:return: None
"""
log(filename, text)
print(time.strftime('[%Y-%m-%d %H:%M:%S]:', time.localtime(time.time()))+str(text))
| StarcoderdataPython |
1666129 | """Kata url: https://www.codewars.com/kata/570a6a46455d08ff8d001002."""
def no_boring_zeros(n: int) -> int:
if n == 0:
return 0
while not n % 10:
n //= 10
return n
| StarcoderdataPython |
1738964 | <gh_stars>0
# -*- coding: utf-8 -*-
import grok
import z3c.flashmessage.interfaces
from grokui.base.layout import GrokUIView
from grokui.admin.interfaces import ISecurityNotifier
from grokui.admin.utilities import getVersion
from grokui.admin.security import SecurityNotifier
from ZODB.interfaces import IDatabase
from ZODB.FileStorage.FileStorage import FileStorageError
from zope.size import byteDisplay
from zope.site.interfaces import IRootFolder
from zope.applicationcontrol.interfaces import IServerControl, IRuntimeInfo
from zope.applicationcontrol.applicationcontrol import applicationController
from zope.component import getUtility, queryUtility, getUtilitiesFor
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('grokui')
class Server(GrokUIView):
"""Zope3 management screen.
"""
grok.title('Server Control')
grok.require('grok.ManageApplications')
_fields = (
"ZopeVersion",
"PythonVersion",
"PythonPath",
"SystemPlatform",
"PreferredEncoding",
"FileSystemEncoding",
"CommandLine",
"ProcessId",
"DeveloperMode",
)
_unavailable = _("Unavailable")
@property
def grok_version(self):
return getVersion('grok')
@property
def grokuiadmin_version(self):
return getVersion('grokui.admin')
def root_url(self, name=None):
obj = self.context
while obj is not None:
if IRootFolder.providedBy(obj):
return self.url(obj, name)
obj = obj.__parent__
raise ValueError("No application nor root element found.")
@property
def security_notifier_url(self):
"""Get the URL to look up for security warnings.
"""
return self.security_notifier.lookup_url
@property
def security_notifier(self):
"""Get a local security notifier.
The security notifier is installed as a local utility by an
event handler in the security module.
"""
site = grok.getSite()
site_manager = site.getSiteManager()
return site_manager.queryUtility(ISecurityNotifier, default=None)
@property
def secnotes_enabled(self):
if self.security_notifier is None:
return False
return self.security_notifier.enabled
@property
def secnotes_message(self):
if self.security_notifier is None:
return u'Security notifier is not installed.'
return self.security_notifier.getNotification()
@property
def server_control(self):
return queryUtility(IServerControl)
@property
def runtime_info(self):
try:
ri = IRuntimeInfo(applicationController)
except TypeError:
formatted = dict.fromkeys(self._fields, self._unavailable)
formatted["Uptime"] = self._unavailable
else:
formatted = self._getInfo(ri)
return formatted
def _getInfo(self, ri):
formatted = {}
for name in self._fields:
try:
value = getattr(ri, "get" + name)()
except ValueError:
value = self._unavailable
formatted[name] = value
formatted["Uptime"] = self._getUptime(ri)
return formatted
def _getUptime(self, ri):
# make a unix "uptime" uptime format
uptime = int(ri.getUptime())
minutes, seconds = divmod(uptime, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
return _('${days} day(s) ${hours}:${minutes}:${seconds}',
mapping={'days': '%d' % days,
'hours': '%02d' % hours,
'minutes': '%02d' % minutes,
'seconds': '%02d' % seconds})
@property
def current_message(self):
source = getUtility(
z3c.flashmessage.interfaces.IMessageSource, name='admin')
messages = list(source.list())
if messages:
return messages[0]
def updateSecurityNotifier(self, setsecnotes=None, setsecnotesource=None,
secnotesource=None):
if self.security_notifier is None:
site = grok.getSite()
site_manager = site.getSiteManager()
if 'grokadmin_security' not in site_manager:
site_manager['grokadmin_security'] = SecurityNotifier()
utility = site_manager['grokadmin_security']
site_manager.registerUtility(
utility, ISecurityNotifier, name=u'')
if setsecnotesource is not None:
self.security_notifier.setLookupURL(secnotesource)
if setsecnotes is not None:
if self.security_notifier.enabled is True:
self.security_notifier.disable()
else:
self.security_notifier.enable()
if self.secnotes_enabled is False:
return
return
def update(self, time=None, restart=None, shutdown=None,
setsecnotes=None, secnotesource=None, setsecnotesource=None,
admin_message=None, submitted=False, dbName="", pack=None,
days=0):
# Packing control
if pack is not None:
return self.pack(dbName, days)
# Security notification control
self.updateSecurityNotifier(setsecnotes, setsecnotesource,
secnotesource)
if not submitted:
return
# Admin message control
source = getUtility(
z3c.flashmessage.interfaces.IMessageSource, name='admin')
if admin_message is not None:
source.send(admin_message)
elif getattr(source, 'current_message', False):
source.delete(source.current_message)
# Restart control
if time is not None:
try:
time = int(time)
except BaseException:
time = 0
else:
time = 0
if restart is not None:
self.server_control.restart(time)
elif shutdown is not None:
self.server_control.shutdown(time)
self.redirect(self.url())
@property
def databases(self):
res = []
for name, db in getUtilitiesFor(IDatabase):
d = dict(
dbName=db.getName(),
utilName=str(name),
size=self._getSize(db))
res.append(d)
return res
def _getSize(self, db):
"""Get the database size in a human readable format.
"""
size = db.getSize()
if not isinstance(size, (int, float)):
return str(size)
return byteDisplay(size)
def pack(self, dbName, days):
try:
days = int(days)
except ValueError:
self.flash('Error: Invalid Number')
return
db = getUtility(IDatabase, name=dbName)
print("DB: ", db, days)
db.pack(days=days)
return
try:
db.pack(days=days)
self.flash('ZODB `%s` successfully packed.' % (dbName))
except FileStorageError as err:
self.flash('ERROR packing ZODB `%s`: %s' % (dbName, err))
| StarcoderdataPython |
171091 | # -*- coding: utf-8 -*-
# script.module.python.koding.aio
# Python Koding AIO (c) by whufclee (<EMAIL>)
# Python Koding AIO is licensed under a
# Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.
# You should have received a copy of the license along with this
# work. If not, see http://creativecommons.org/licenses/by-nc-nd/4.0.
# IMPORTANT: If you choose to use the special noobsandnerds features which hook into their server
# please make sure you give approptiate credit in your add-on description (noobsandnerds.com)
#
# Please make sure you've read and understood the license, this code can NOT be used commercially
# and it can NOT be modified and redistributed. If you're found to be in breach of this license
# then any affected add-ons will be blacklisted and will not be able to work on the same system
# as any other add-ons which use this code. Thank you for your cooperation.
import os
import sys
import xbmc
import xbmcgui
from systemtools import Last_Error
dialog = xbmcgui.Dialog()
koding_path = xbmc.translatePath("special://home/addons/script.module.python.koding.aio")
#----------------------------------------------------------------
# TUTORIAL #
def Browse_To_Folder(header='Select the folder you want to use', path = 'special://home'):
"""
Browse to a folder and return the path
CODE: koding.Browse_To_Folder([header, path])
AVAILABLE PARAMS:
header - As the name suggests this is a string to be used for the header/title
of the window. The default is "Select the folder you want to use".
path - Optionally you can add a default path for the browse start folder.
The default start position is the Kodi HOME folder.
EXAMPLE CODE:
folder = koding.Browse_To_Folder('Choose a folder you want to use')
dialog.ok('FOLDER DETAILS','Folder path: [COLOR=dodgerblue]%s[/COLOR]'%folder)
~"""
text = dialog.browse(type=3, heading=header, shares='files', useThumbs=False, treatAsFolder=False, defaultt=path)
return text
#----------------------------------------------------------------
# TUTORIAL #
def Browse_To_File(header='Select the file you want to use', path='special://home/addons/', extension='', browse_in_archives=False):
"""
This will allow the user to browse to a specific file and return the path.
IMPORTANT: Do not confuse this with the Browse_To_Folder function
CODE: koding.Browse_To_File([header, path, extension, browse_in_archives])
AVAILABLE PARAMS:
header - As the name suggests this is a string to be used for the header/title
of the window. The default is "Select the file you want to use".
path - Optionally you can add a default path for the browse start folder.
The default start position is the Kodi HOME folder.
extension - Optionally set extensions to filter by, let's say you only wanted
zip and txt files to show you would send through '.zip|.txt'
browse_in_archives - Set to true if you want to be able to browse inside zips and
other archive files. By default this is set to False.
EXAMPLE CODE:
dialog.ok('[COLOR gold]BROWSE TO FILE 1[/COLOR]','We will now browse to your addons folder with browse_in_archives set to [COLOR dodgerblue]False[/COLOR]. Try clicking on a zip file if you can find one (check packages folder).')
folder = koding.Browse_To_File(header='Choose a file you want to use', path='special://home/addons')
dialog.ok('FOLDER DETAILS','File path: [COLOR=dodgerblue]%s[/COLOR]'%folder)
dialog.ok('[COLOR gold]BROWSE TO FILE 2[/COLOR]','We will now browse to your addons folder with browse_in_archives set to [COLOR dodgerblue]True[/COLOR]. Try clicking on a zip file if you can find one (check packages folder).')
folder = koding.Browse_To_File(header='Choose a file you want to use', path='special://home/addons', browse_in_archives=True)
dialog.ok('FOLDER DETAILS','File path: [COLOR=dodgerblue]%s[/COLOR]'%folder)
~"""
if not path.endswith(os.sep):
path += os.sep
try:
text = dialog.browse(type=1, heading=header, shares='myprograms', mask=extension, useThumbs=False, treatAsFolder=browse_in_archives, defaultt=path)
except:
text = dialog.browse(type=1, heading=header, s_shares='myprograms', mask=extension, useThumbs=False,
treatAsFolder=browse_in_archives, defaultt=path)
return text
#----------------------------------------------------------------
# TUTORIAL #
def Countdown(title='COUNTDOWN STARTED', message='A quick simple countdown example.', update_msg='Please wait, %s seconds remaining.', wait_time=10, allow_cancel=True, cancel_msg='[COLOR=gold]Sorry, this process cannot be cancelled[/COLOR]'):
"""
Bring up a countdown timer and return true if waited or false if cancelled.
CODE: Countdown(title, message, update_msg, wait_time, allow_cancel, cancel_msg):
AVAILABLE PARAMS:
title - The header string in the dialog window, the default is:
'COUNTDOWN STARTED'
message - A short line of info which will show on the first line
of the dialog window just below the title. Default is:
'A quick simple countdown example.'
update_msg - The message you want to update during the countdown.
This must contain a %s which will be replaced by the current amount
of seconds that have passed. The default is:
'Please wait, %s seconds remaining.'
wait_time - This is the amount of seconds you want the countdown to
run for. The default is 10.
allow_cancel - By default this is set to true and the user can cancel
which will result in False being returned. If this is set to True
they will be unable to cancel.
cancel_msg - If allow_cancel is set to False you can add a custom
message when the user tries to cancel. The default string is:
'[COLOR=gold]Sorry, this process cannot be cancelled[/COLOR]'
EXAMPLE CODE:
dialog.ok('COUNTDOWN EXAMPLE', 'Press OK to bring up a countdown timer', '', 'Try cancelling the process.')
my_return = koding.Countdown(title='COUNTDOWN EXAMPLE', message='Quick simple countdown message (cancel enabled).', update_msg='%s seconds remaining', wait_time=5)
if my_return:
dialog.ok('SUCCESS!','Congratulations you actually waited through the countdown timer without cancelling!')
else:
dialog.ok('BORED MUCH?','What happened, did you get bored waiting?', '', '[COLOR=dodgerblue]Let\'s set off another countdown you CANNOT cancel...[/COLOR]')
koding.Countdown(title='COUNTDOWN EXAMPLE', message='Quick simple countdown message (cancel disabled).', update_msg='%s seconds remaining', wait_time=5, allow_cancel=False, cancel_msg='[COLOR=gold]Sorry, this process cannot be cancelled[/COLOR]')
~"""
dp = xbmcgui.DialogProgress()
current = 0
increment = 100 / wait_time
cancelled = False
dp.create(title)
while current <= wait_time:
if (dp.iscanceled()):
if allow_cancel:
cancelled = True
break
else:
dp.create(title,cancel_msg)
if current != 0:
xbmc.sleep(1000)
remaining = wait_time - current
if remaining == 0:
percent = 100
else:
percent = increment * current
remaining_display = update_msg % remaining
dp.update(percent, message, remaining_display)
current += 1
if cancelled == True:
return False
else:
return True
#----------------------------------------------------------------
# TUTORIAL #
def Custom_Dialog(pos='center', dialog='Text', size='700x500', button_width=200,\
header='Disclaimer', main_content='Add some text here', buttons=['Decline','Agree'],\
header_color='gold', text_color='white', background='000000', transparency=100,\
highlight_color='gold', button_color_focused='4e91cf', button_trans_focused=100,\
button_color_nonfocused='586381', button_trans_nonfocused=50):
"""
A fully customisable dialog where you can have as many buttons as you want.
Similar behaviour to the standard Kodi yesno dialog but this allows as many buttons
as you want, as much text as you want (with a slider) as well as fully configurable
sizing and positioning.
CODE: Custom_Dialog([pos, dialog, size, button_width, header, main_content, buttons,\
header_color, text_color, background, transparency, highlight_color, button_color_focused,\
button_trans_focused, button_color_nonfocused, button_trans_nonfocused])
AVAILABLE PARAMS:
pos - This is the co-ordinates of where on the screen you want the
dialog to appear. This needs to be sent through as a string so for
example if you want the dialog top left corner to be 20px in and
10px down you would use pos='20x10'. By default this is set to 'center'
which will center the dialog on the screen.
dialog - By default this is set to 'Text'. Currently that is the
only custom dialog available but there are plans to improve upon this
and allow for image and even video dialogs.
size - Sent through as a string this is the dimensions you want the
dialog to be, by default it's set to '700x500' but you can set to any
size you want using that same format. Setting to 'fullscreen' will
use 1280x720 (fullscreen).
button_width - This is sent through as an integer and is the width you
want your buttons to be. By default this is set to 200 which is quite large
but looks quite nice if using only 2 or 3 buttons.
header - Sent through as a string this is the header shown in the dialog.
The default is 'Disclaimer'.
header_color - Set the text colour, by default it's 'gold'
text_color - Set the text colour, by default it's 'white'
main_content - This is sent through as a string and is the main message text
you want to show in your dialog. When the ability to add videos, images etc.
is added there may well be new options added to this param but it will remain
backwards compatible.
buttons - Sent through as a list (tuple) this is a list of all your buttons.
Make sure you do not duplicate any names otherwise it will throw off the
formatting of the dialog and you'll get false positives with the results.
background - Optionally set the background colour (hex colour codes required).
The default is '000000' (black).
transparency - Set the percentage of transparency as an integer. By default
it's set to 100 which is a solid colour.
highlight_color - Set the highlighted text colour, by default it's 'gold'
button_color_focused - Using the same format as background you can set the
colour to use for a button when it's focused.
button_trans_focused - Using the same format as transparency you can set the
transparency amount to use on the button when in focus.
button_color_nonfocused - Using the same format as background you can set the
colour to use for buttons when they are not in focus.
button_trans_nonfocused - Using the same format as transparency you can set the
transparency amount to use on the buttons when not in focus.
EXAMPLE CODE:
main_text = 'This is my main text.\n\nYou can add anything you want in here and the slider will allow you to see all the contents.\n\nThis example shows using a blue background colour and a transparency of 90%.\n\nWe have also changed the highlighted_color to yellow.'
my_buttons = ['button 1', 'button 2', 'button 3']
my_choice = koding.Custom_Dialog(main_content=main_text,pos='center',buttons=my_buttons,background='213749',transparency=90,highlight_color='yellow')
dialog.ok('CUSTOM DIALOG 1','You selected option %s'%my_choice,'The value of this is: [COLOR=dodgerblue]%s[/COLOR]'%my_buttons[my_choice])
main_text = 'This is example 2 with no fancy colours, just a fullscreen and a working scrollbar.\n\nYou\'ll notice there are also a few more buttons on this one.\n\nline 1\nline 2\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline 9\nline 10\nline 11\nline 12\nline 13\nline 14\nline 15\nline 16\nline 17\nline 18\nline 19\nline 20\n\nYou get the idea we\'ll stop there!'
my_buttons = ['button 1', 'button 2', 'button 3','button 4', 'button 5', 'button 6','button 7', 'button 8', 'button 9','button 10', 'button 11', 'button 12', 'button 13','button 14', 'button 15', 'button 16','button 17', 'button 18', 'button 19','button 20']
my_choice = koding.Custom_Dialog(main_content=main_text,pos='center',size='fullscreen',buttons=my_buttons)
dialog.ok('CUSTOM DIALOG 2','You selected option %s'%my_choice,'The value of this is: [COLOR=dodgerblue]%s[/COLOR]'%my_buttons[my_choice])
~"""
skin_path = os.path.join(koding_path,"resources","skins","Default","720p")
ACTION = -1
# Convert the transparency percentage to hex
transparency = float(transparency) / 100 * 255
transparency = hex(int(transparency)).split('x')[1]
button_trans_focused = float(button_trans_focused) / 100 * 255
button_trans_focused = hex(int(button_trans_focused)).split('x')[1]
button_trans_nonfocused = float(button_trans_nonfocused) / 100 * 255
button_trans_nonfocused = hex(int(button_trans_nonfocused)).split('x')[1]
# Work out the dialog dimensions
if size == 'fullscreen':
dialog_width = '1280'
dialog_height = '720'
else:
dialog_width, dialog_height = size.split('x')
button_count = len(buttons)
buttons_per_row = (int(dialog_width)-25) / (button_width+25)
if buttons_per_row > button_count:
buttons_per_row = button_count
# work out the number of rows, round up if a float
button_rows = int(button_count/buttons_per_row) + (button_count % buttons_per_row > 0)
# Work out the positioning of the dialog
if pos == 'center':
posx = str( (1280 - int(dialog_width)) / 2)
posy = str( (720 - int(dialog_height)) / 2)
else:
posx, posy = pos.split(',')
# Work out the text area size
text_width = str( int(dialog_width)-80 )
text_height = str( (int(dialog_height)-(50*(button_rows+1)))-70 )
scroll_pos = str( int(text_width)+32 )
button_max = int(dialog_height)-30
# Work out the button positions
if dialog == 'Text':
button_spacing = ( int(dialog_width)-(buttons_per_row*button_width) ) / (buttons_per_row+1)
buttons_dict = {}
counter = 1
row = 1
# Create a dictionary of button positioning
for button in buttons:
if counter > buttons_per_row:
counter = 1
row += 1
# If starting a new line reset the values
if counter > buttons_per_row or counter == 1:
current_pos = button_spacing
counter += 1
else:
current_pos = current_pos+button_width+button_spacing
counter += 1
buttons_dict[button] = [str(current_pos),row]
# Set the dialog template name and new temporary "live" XML
dialog_type = dialog.capitalize()+'.xml'
dialog_new = 'temp.xml'
dialog_path = os.path.join(skin_path,dialog_type)
temp_path = os.path.join(skin_path,dialog_new)
button_num = 100
counter = 1
buttons_code = ''
for button in buttons:
if buttons_dict[button][1] == 1:
onup = 99
else:
onup = button_num-buttons_per_row
# If button is on the last row we set down to scrollbar
if buttons_dict[button][1] == button_rows:
ondown = 99
# Otherwise set down to the item on row below
elif buttons_dict[button][1] != button_rows:
ondown = button_num+buttons_per_row
# Set the vertical position (y) of the buttons
button_y = str( int(text_height)+(buttons_dict[button][1]*50)+40 )
if ( int(text_height) < 200 ) or ( int(button_y) > button_max ):
if size != 'fullscreen':
xbmcgui.Dialog().ok('WE NEED A BIGGER WINDOW!','The amount of buttons sent through do not fit in this window. Either make the button width smaller or make a bigger window')
else:
xbmcgui.Dialog().ok('SMALLER BUTTONS NEEDED!','The amount of buttons sent through do not fit in this window. Either send through less buttons or decrease their width using the button_width param.')
return
button_x = str( buttons_dict[button][0] )
buttons_code += '\
<control type="button" id="%s">\n\
<posx>%s</posx>\n\
<posy>%s</posy>\n\
<width>%s</width>\n\
<height>40</height>\n\
<label>%s</label>\n\
<texturefocus colordiffuse="%s%s">DialogBack.png</texturefocus>\n\
<texturenofocus colordiffuse="%s%s">DialogBack.png</texturenofocus>\n\
<font>font12_title</font>\n\
<textcolor>%s</textcolor>\n\
<focusedcolor>%s</focusedcolor>\n\
<align>center</align>\n\
<onleft>%s</onleft>\n\
<onright>%s</onright>\n\
<onup>%s</onup>\n\
<ondown>%s</ondown>\n\
</control>\n' % (button_num, button_x, button_y, button_width, buttons[counter-1],\
button_trans_focused, button_color_focused, button_trans_nonfocused,\
button_color_nonfocused, text_color, highlight_color, button_num-1,\
button_num+1, onup, ondown)
button_num += 1
counter += 1
# Grab contents of the template and replace with our new values
with open(dialog_path, 'r') as content_file:
content = content_file.read()
content = content.replace('dialog_width',dialog_width)\
.replace('dialog_height',dialog_height)\
.replace('text_width',text_width)\
.replace('text_height',text_height)\
.replace('pos_x',posx)\
.replace('pos_y',posy)\
.replace('PK_Transparency',transparency)\
.replace('PK_Color',background)\
.replace('PK_Text_Color',text_color)\
.replace('PK_Header_Color',header_color)\
.replace('<!-- buttons -->',buttons_code)
# Create the new temp "live" XML
myfile = open(temp_path,'w')
myfile.write(content)
myfile.close()
d=MyDisclaimer(dialog_new,koding_path,header=header,main_content=main_content)
d.doModal()
ACTION = d.ACTION
del d
return ACTION
class MyDisclaimer(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs):
self.header=kwargs['header']
self.main_content=kwargs['main_content']
self.WINDOW=xbmcgui.Window( 10000 )
self.WINDOW.setProperty( 'PK_Header' , self.header )
self.WINDOW.setProperty( 'PK_Main_Text' , self.main_content )
self.ACTION=-1
def onClick( self, controlID ):
if controlID>=100:
self.ACTION=(controlID-100)
self.close()
elif controlID==12:
self.close()
def onAction( self, action ):
if action in [ 5, 6, 7, 9, 10, 92, 117 ] or action.getButtonCode() in [275,257,261]:
self.close()
#----------------------------------------------------------------
# TUTORIAL #
def Keyboard(heading='',default='',hidden=False,return_false=False,autoclose=False,kb_type='alphanum'):
"""
Show an on-screen keyboard and return the string
CODE: koding.Keyboard([default, heading, hidden, return_false, autoclose, kb_type])
AVAILABLE PARAMS:
heading - Optionally enter a heading for the text box.
default - This is optional, if set this will act as the default text shown in the text box
hidden - Boolean, if set to True the text will appear as hidden (starred out)
return_false - By default this is set to False and when escaping out of the keyboard
the default text is returned (or an empty string if not set). If set to True then
you'll receive a return of False.
autoclose - By default this is set to False but if you want the keyboard to auto-close
after a period of time you can send through an integer. The value sent through needs to
be milliseconds, so for example if you want it to close after 3 seconds you'd send through
3000. The autoclose function only works with standard alphanumeric keyboard types.
kb_type - This is the type of keyboard you want to show, by default it's set to alphanum.
A list of available values are listed below:
'alphanum' - A standard on-screen keyboard containing alphanumeric characters.
'numeric' - An on-screen numerical pad.
'date' - An on-screen numerical pad formatted only for a date.
'time' - An on-screen numerical pad formatted only for a time.
'ipaddress' - An on-screen numerical pad formatted only for an IP Address.
'password' - A standard keyboard but returns value as md5 hash. When typing
the text is starred out, once you've entered the password you'll get another
keyboard pop up asking you to verify. If the 2 match then your md5 has is returned.
EXAMPLE CODE:
mytext = koding.Keyboard(heading='Type in the text you want returned',default='test text')
dialog.ok('TEXT RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
dialog.ok('AUTOCLOSE ENABLED','This following example we\'ve set the autoclose to 3000. That\'s milliseconds which converts to 3 seconds.')
mytext = koding.Keyboard(heading='Type in the text you want returned',default='this will close in 3s',autoclose=3000)
dialog.ok('TEXT RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
mytext = koding.Keyboard(heading='Enter a number',kb_type='numeric')
dialog.ok('NUMBER RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
dialog.ok('RETURN FALSE ENABLED','All of the following examples have "return_false" enabled. This means if you escape out of the keyboard the return will be False.')
mytext = koding.Keyboard(heading='Enter a date',return_false=True,kb_type='date')
dialog.ok('DATE RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
mytext = koding.Keyboard(heading='Enter a time',return_false=True,kb_type='time')
dialog.ok('TIME RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
mytext = koding.Keyboard(heading='IP Address',return_false=True,kb_type='ipaddress',autoclose=5)
dialog.ok('IP RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
mytext = koding.Keyboard(heading='Password',kb_type='password')
dialog.ok('MD5 RETURN','The md5 for this password is:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
~"""
from vartools import Decode_String
kb_type = eval( 'xbmcgui.INPUT_%s'%kb_type.upper() )
if hidden:
hidden = eval( 'xbmcgui.%s_HIDE_INPUT'%kb_type.upper() )
keyboard = dialog.input(heading,default,kb_type,hidden,autoclose)
if keyboard != '':
return keyboard
elif not return_false:
return Decode_String(default)
else:
return False
#----------------------------------------------------------------
# TUTORIAL #
def Notify(title, message, duration=2000, icon='special://home/addons/script.module.python.koding.aio/resources/update.png'):
"""
Show a short notification for x amount of seconds
CODE: koding.Notify(title, message, [duration, icon])
AVAILABLE PARAMS:
(*) title - A short title to show on top line of notification
(*) message - A short message to show on the bottom line of notification
duration - An integer in milliseconds, the default to show the notification for is 2000
icon - The icon to show in notification bar, default is the update icon from this module.
EXAMPLE CODE:
koding.Notify(title='TEST NOTIFICATION', message='This is a quick 5 second test', duration=5000)
~"""
xbmc.executebuiltin('XBMC.Notification(%s, %s, %s, %s)' % (title , message , duration, icon))
#----------------------------------------------------------------
# TUTORIAL #
def OK_Dialog(title,message):
"""
This will bring up a short text message in a dialog.ok window.
CODE: OK_Dialog(title,message)
AVAILABLE PARAMS:
(*) title - This is title which appears in the header of the window.
(*) message - This is the main text you want to appear.
EXAMPLE CODE:
koding.OK_Dialog(title='TEST DIALOG',message='This is a test dialog ok box. Click OK to quit.')
~"""
dialog.ok(title,message)
#----------------------------------------------------------------
# TUTORIAL #
def Select_Dialog(title,options,key=True):
"""
This will bring up a selection of options to choose from. The options are
sent through as a list and only one can be selected - this is not a multi-select dialog.
CODE: Select_Dialog(title,options,[key])
AVAILABLE PARAMS:
(*) title - This is title which appears in the header of the window.
(*) options - This is a list of the options you want the user to be able to choose from.
key - By default this is set to True so you'll get a return of the item number. For example
if the user picks "option 2" and that is the second item in the list you'll receive a return of
1 (0 would be the first item in list and 1 is the second). If set to False you'll recieve a return
of the actual string associated with that key, in this example the return would be "option 2".
EXAMPLE CODE:
my_options = ['Option 1','Option 2','Option 3','Option 4','Option 5']
mychoice = koding.Select_Dialog(title='TEST DIALOG',options=my_options,key=False)
koding.OK_Dialog(title='SELECTED ITEM',message='You selected: [COLOR=dodgerblue]%s[/COLOR]\nNow let\'s try again - this time we will return a key...'%mychoice)
mychoice = koding.Select_Dialog(title='TEST DIALOG',options=my_options,key=True)
koding.OK_Dialog(title='SELECTED ITEM',message='The item you selected was position number [COLOR=dodgerblue]%s[/COLOR] in the list'%mychoice)
~"""
mychoice = dialog.select(title,options)
if key:
return mychoice
else:
return options[mychoice]
#----------------------------------------------------------------
# TUTORIAL #
def Show_Busy(status=True, sleep=0):
"""
This will show/hide a "working" symbol.
CODE: Show_Busy([status, sleep])
AVAILABLE PARAMS:
status - This optional, by default it's True which means the "working"
symbol appears. False will disable.
sleep - If set the busy symbol will appear for <sleep> amount of
milliseconds and then disappear.
EXAMPLE CODE:
dialog.ok('BUSY SYMBOL','Press OK to show a busy dialog which restricts any user interaction. We have added a sleep of 5 seconds at which point it will disable.')
koding.Show_Busy(sleep=5000)
dialog.ok('BUSY SYMBOL','We will now do the same but with slightly different code')
koding.Show_Busy(status=True)
xbmc.sleep(5000)
koding.Show_Busy(status=False)
~"""
if status:
xbmc.executebuiltin("ActivateWindow(busydialog)")
if sleep:
xbmc.sleep(sleep)
xbmc.executebuiltin("Dialog.Close(busydialog)")
else:
xbmc.executebuiltin("Dialog.Close(busydialog)")
#----------------------------------------------------------------
# TUTORIAL #
def Text_Box(header, message):
"""
This will allow you to open a blank window and fill it with some text.
CODE: koding.Text_Box(header, message)
AVAILABLE PARAMS:
(*) header - As the name suggests this is a string to be used for the header/title of the window
(*) message - Yes you've probably already gussed it, this is the main message text
EXAMPLE CODE:
koding.Text_Box('TEST HEADER','Just some random text... Use kodi tags for new lines, colours etc.')
~"""
xbmc.executebuiltin("ActivateWindow(10147)")
controller = xbmcgui.Window(10147)
xbmc.sleep(500)
controller.getControl(1).setLabel(header)
controller.getControl(5).setText(message)
#----------------------------------------------------------------
# TUTORIAL #
def Reset_Percent(property='update_percent_',window_id=10000):
"""
If using the Update_Progress function for setting percentages in skinning then this
will allow you to reset all the percent properties (1-100)
CODE: Reset_Percent([property,window_id])
AVAILABLE PARAMS:
property - the property name you want reset, this will reset all properties starting
with this string from 1-100. For example if you use the default 'update_percent_' this
will loop through and reset update_percent_1, update_percent_2 etc. all the way through
to update_percent_100.
window_id - By default this is set to 10000 but you can send any id through you want.
kwargs - Send through any other params and the respective property will be set.colours etc.')
~"""
counter = 0
while counter <= 100:
xbmcgui.Window(10000).clearProperty('update_percent_%s'%counter)
counter +=1
#----------------------------------------------------------------
# TUTORIAL #
def Update_Progress(total_items,current_item,**kwargs):
"""
This function is designed for skinners but can be used for general Python too. It will
work out the current percentage of items that have been processed and update the
"update_percent" property accordingly (1-100). You can also send through any properties
you want updated and it will loop through updating them with the relevant values.
To send through properties just send through the property name as the param and assign to a value.
Example: Update_Progress( total_items=100,current_item=56, {"myproperty1":"test1","myproperty2":"test2"} )
CODE: Update_Progress(total_items,current_item,[kwargs])
AVAILABLE PARAMS:
(*) total_items - Total amount of items in your list you're processing
(*) current_item - Current item number that's been processed.
kwargs - Send through any other params and the respective property will be set.colours etc.
~"""
Reset_Percent()
for item in kwargs:
if item.endswith('color'):
value = '0xFF'+kwargs[item]
else:
value = kwargs[item]
if value == 'false' or value == '' and not item.endswith('color'):
xbmcgui.Window(10000).clearProperty(item)
elif value:
xbmcgui.Window(10000).setProperty(item, value)
percent = 100*(current_item/(total_items*1.0))
newpercent=int(percent)
if (newpercent % 1 == 0) and (newpercent <=100):
xbmcgui.Window(10000).setProperty('update_percent',str(newpercent))
xbmcgui.Window(10000).setProperty('update_percent_%s'%newpercent,'true')
if newpercent == 100:
xbmc.executebuiltin('Action(firstpage)')
#-----------------------------------------------------------------------------
# TUTORIAL #
def Update_Screen(disable_quit=False, auto_close=True):
"""
This will create a full screen overlay showing progress of updates. You'll need to
use this in conjunction with the Update_Progress function.
CODE: Update_Screen([disable_quit, auto_close))
AVAILABLE PARAMS:
disable_quit - By default this is set to False and pressing the parent directory
button (generally esc) will allow you to close the window. Setting this to True
will mean it's not possible to close the window manually.
auto_close - By default this is set to true and when the percentage hits 100
the window will close. If you intend on then sending through some more commands
you might want to consider leaving this window open in which case you'd set this
to false. Bare in mind if you go this route the window will stay active until
you send through the kill command which is: xbmc.executebuiltin('Action(firstpage)')
EXAMPLE CODE:
mykwargs = {
"update_header" : "Downloading latest updates",\
"update_main_text" : "Your device is now downloading all the latest updates.\nThis shouldn\'t take too long, "\
"depending on your internet speed this could take anything from 2 to 10 minutes.\n\n"\
"Once downloaded the system will start to install the updates.",\
"update_bar_color" : "4e91cf",\
"update_icon" : "special://home/addons/script.module.python.koding.aio/resources/skins/Default/media/update.png",\
"update_spinner" : "true"}
Update_Screen()
counter = 1
while counter <= 60:
xbmc.sleep(300)
Update_Progress(total_items=60,current_item=counter,**mykwargs)
if counter == 30:
mykwargs = {
"update_header" : "Halfway there!",\
"update_main_text" : "We just updated the properties to show how you can change things on the fly "\
"simply by sending through some different properties. Both the icon and the "\
"background images you see here are being pulled from online.",\
"update_header_color" : "4e91cf",\
"update_percent_color" : "4e91cf",\
"update_bar_color" : "4e91cf",\
"update_background" : "http://www.planwallpaper.com/static/images/518164-backgrounds.jpg",\
"update_icon" : "http://totalrevolution.tv/img/tr_small_black_bg.jpg",\
"update_spinner" : "false"}
counter += 1
~"""
import threading
update_screen_thread = threading.Thread(target=Show_Update_Screen, args=[disable_quit, auto_close])
update_screen_thread.start()
xbmc.sleep(2000)
def Show_Update_Screen(disable_quit=False,auto_close=True):
xbmcgui.Window(10000).clearProperty('update_icon')
xbmcgui.Window(10000).clearProperty('update_percent')
xbmcgui.Window(10000).clearProperty('update_spinner')
xbmcgui.Window(10000).clearProperty('update_header')
xbmcgui.Window(10000).clearProperty('update_main_text')
xbmcgui.Window(10000).setProperty('update_background','whitebg.jpg')
xbmcgui.Window(10000).setProperty('update_percent_color','0xFF000000')
xbmcgui.Window(10000).setProperty('update_bar_color','0xFF000000')
xbmcgui.Window(10000).setProperty('update_main_color','0xFF000000')
xbmcgui.Window(10000).setProperty('update_header_color','0xFF000000')
# Set a property so we can determine if update screen is active
xbmcgui.Window(10000).setProperty('update_screen','active')
d=MyUpdateScreen('Loading.xml',koding_path,disable_quit=disable_quit,auto_close=auto_close)
d.doModal()
del d
xbmcgui.Window(10000).clearProperty('update_screen')
class MyUpdateScreen(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs):
self.disable_quit=kwargs['disable_quit']
self.auto_close=kwargs['auto_close']
self.WINDOW=xbmcgui.Window( 10000 )
def onAction( self, action ):
if action in [10,7]:
if self.disable_quit:
xbmc.log("ESC and HOME Disabled",2)
else:
self.close()
if action==159 and self.auto_close:
self.close()
#----------------------------------------------------------------
# TUTORIAL #
def YesNo_Dialog(title,message,yes=None,no=None):
"""
This will bring up a short text message in a dialog.yesno window. This will
return True or False
CODE: YesNo_Dialog(title,message,[yeslabel,nolabel])
AVAILABLE PARAMS:
(*) title - This is title which appears in the header of the window.
(*) message - This is the main text you want to appear.
yes - Optionally change the default "YES" to a custom string
no - Optionally change the default "NO" to a custom string
EXAMPLE CODE:
mychoice = koding.YesNo_Dialog(title='TEST DIALOG',message='This is a yes/no dialog with custom labels.\nDo you want to see an example of a standard yes/no.',yes='Go on then',no='Nooooo!')
if mychoice:
koding.YesNo_Dialog(title='STANDARD DIALOG',message='This is an example of a standard one without sending custom yes/no params through.')
~"""
choice = dialog.yesno(title,message,yeslabel=yes,nolabel=no)
return choice
#----------------------------------------------------------------
| StarcoderdataPython |
3314203 | <reponame>Lewkow/astro-outlier<gh_stars>0
class IOHandler:
def __init__(self):
self.data_fid_npy = "../data/KeplerSampleWErr.npy"
self.data_fid_csv = "../data/kepDenselcvs.csv"
def import_numpy_fid(self, numpy_fid):
data_array = np.load(self.data_fid, allow_pickle=True)
return data_array
def read_data(data_fid):
if 'csv' in self.data_fid:
return pd.read_csv(self.data_fid)
else:
print('whomp whomp')
| StarcoderdataPython |
3373040 | <gh_stars>0
import math
class Position(object):
def __init__(self, position=None, xpos=None, ypos=None):
self.__x = 0
self.__y = 0
if position is not None:
self.__x = position.x
self.__y = position.y
else:
if xpos is not None:
self.__x = xpos
if ypos is not None:
self.__y = ypos
# getters & setters
@property
def x(self):
return self.__x
@x.setter
def x(self, value):
self.__x = value
@property
def y(self):
return self.__y
@y.setter
def y(self, value):
self.__y = value
def distance(self, position):
if position:
return math.sqrt((self.x - position.x) * (self.x - position.x) + (self.y - position.y) * (self.y - position.y))
return -1
def __eq__(self, other):
return (self.x == other.x) and (self.y == other.y)
def __str__(self):
return "({0}, {1})".format(self.x, self.y)
| StarcoderdataPython |
93709 | __author__ = '<NAME> <<EMAIL>>'
__date__ = ' 16 December 2017'
__copyright__ = 'Copyright (c) 2017 <NAME>'
import dg
import pandas as pd
from copy import deepcopy
from dg.utils import bar
from dg import persistence
from dg.config import Config
from dg.enums import Mode, Dataset
def train_model(model, train_set, eval_set, model_dir=None, save=False,
silent=False):
"""Train a single model and save it
Args:
model (dg.Model): Model to train
train_set (str): Dataset to train on
eval_set (str): Dataset to use for evaluation during training
model_dir (str): Path to the directory where the model should be saved
save (bool): Save the model
silent (bool): Don't print details to standard out.
"""
if not silent:
print('Training:', model.id)
model.fit_dataset(train_set, eval_set)
if save:
if not silent:
print('Saving:', model.id)
persistence.save(model, model_dir)
def train(models, train_set, eval_set=None, silent=False):
"""Train all model for production and save them
Args:
models (list of str): Model names. Pass if you want to train a just a
set particular models
train_set (dg.enums.Dataset): Dataset to train on
eval_set (dg.enums.Dataset): Dataset to use for evaluation during
training.
silent (bool): Don't print details to standard out.
"""
config = Config()
model_dir = config.get_model_dir()
if not silent:
print('Model dir: ', model_dir)
bar(silent=silent)
for model_id in models:
model = config.models[model_id].set_params(
**config.get_params(model_id)
)
datasets = config.get_datasets(model.id)
train_set = (
datasets[train_set.value] if isinstance(train_set, Dataset)
else train_set
)
eval_set = (
datasets[eval_set.value] if isinstance(eval_set, Dataset)
else eval_set
)
train_model(model, train_set=train_set, eval_set=eval_set,
model_dir=model_dir, save=True, silent=silent)
bar(silent=silent)
def print_metrics(metrics):
"""Pretty print the metrics"""
if metrics[Mode.TRAIN.value]:
print('Train:')
for key, value in metrics[Mode.TRAIN.value].items():
print(f'\t{key}:\t{value:.4f}')
if metrics[Mode.EVAL.value]:
print('Eval:')
for key, value in metrics[Mode.EVAL.value].items():
print(f'\t{key}:\t{value:.4f}')
if metrics[Mode.TEST.value]:
print('Test:')
for key, value in metrics[Mode.TEST.value].items():
print(f'\t{key}:\t{value:.4f}')
def metrics_to_dict(model, metrics):
d = {'model': model.id}
for ds in metrics:
if metrics[ds] is not None:
for metric in metrics[ds]:
d[f'{ds}-{metric}'] = metrics[ds][metric]
return d
def columns():
config = Config()
cols = ['model']
for ds in Dataset.for_eval():
metrics = config.get('metrics.all', None)
if metrics is None:
cols.append(f'{ds.value}-score')
else:
for metric in metrics:
cols.append(f'{ds.value}-{metric}')
return cols
def evaluate_model(model, datasets, silent=False):
"""Evaluate a single model
Args:
model (dg.Model): Model to evaluate
datasets (list of dg.enums.Dataset): List of datasets used for
evaluation.
silent (bool): Don't print details to standard out.
Returns:
dict: Evaluation metrics
"""
config = Config()
metrics = config.get('metrics.all', None)
if not silent:
print('Evaluating:', model.id)
db = persistence.Database()
old_metrics = db.get(model)
new_metrics = deepcopy(old_metrics)
model_datasets = config.get_datasets(model.id)
for ds in datasets:
if (
new_metrics.get(ds.value, None) is None and
model_datasets[ds.value] is not None
):
score = model.score_dataset(model_datasets[ds.value],
metrics=metrics)
new_metrics[ds.value] = (
score if isinstance(score, dict) else {'score': score}
)
if old_metrics != new_metrics:
db.add(model, new_metrics)
if not silent:
print_metrics(new_metrics)
return metrics_to_dict(model, new_metrics)
def evaluate(models, datasets, silent=False):
"""Evaluate all models and print out the metrics for evaluation.
Evaluation is using the production model.
Args:
models (list of str): Model names. Pass if you want to evaluate just a
set of particular models.
datasets (list of dg.enums.Dataset): List of datasets used for
evaluation.
silent (bool): Don't print details to standard out.
"""
config = Config()
all_metrics = []
bar(silent=silent)
for name in models:
model = persistence.load(config.models[name])
all_metrics.append(evaluate_model(model, datasets, silent=silent))
bar(silent=silent)
df = pd.DataFrame(all_metrics, columns=columns())
return df
def train_and_evaluate(models, datasets, silent=False):
"""Train end evaluate models and print out the metrics for evaluation
Args:
models (list of str): Model names. Pass if you want to train/evaluate
just a set of particular models
datasets (list of dg.enums.Dataset): List of datasets used for
evaluation.
silent (bool): Don't print details to standard out.
"""
config = dg.Config()
all_metrics = []
bar(silent=silent)
for model_id in models:
model = config.models[model_id].set_params(
**config.get_params(model_id)
)
dss = config.get_datasets(model.id)
train_model(model, train_set=dss[Dataset.TRAIN.value],
eval_set=dss[Dataset.EVAL.value], save=False,
silent=silent)
all_metrics.append(evaluate_model(model, datasets, silent=silent))
bar(silent=silent)
df = pd.DataFrame(all_metrics, columns=columns())
return df
| StarcoderdataPython |
3223629 | # Databricks notebook source
# MAGIC %md
# MAGIC # What's in this exercise?
# MAGIC
# MAGIC 1) Read raw data, augment with derived attributes, augment with reference data & persist<BR>
# MAGIC 2) Create external unmanaged Hive tables<BR>
# MAGIC 3) Create statistics for tables
# COMMAND ----------
from pyspark.sql.functions import *
from pyspark.sql.types import StructType, StructField, StringType, IntegerType,LongType,FloatType,DoubleType, TimestampType
# COMMAND ----------
# MAGIC %md
# MAGIC ### 1. Execute notebook with common/reusable functions
# COMMAND ----------
# MAGIC %run "../01-General/2-CommonFunctions"
# COMMAND ----------
# MAGIC %md
# MAGIC ### 2. Read raw, augment, persist as parquet
# COMMAND ----------
curatedDF = sql("""
select distinct t.taxi_type,
t.vendor_id as vendor_id,
t.pickup_datetime,
t.dropoff_datetime,
t.store_and_fwd_flag,
t.rate_code_id,
t.pickup_location_id,
t.dropoff_location_id,
t.pickup_longitude,
t.pickup_latitude,
t.dropoff_longitude,
t.dropoff_latitude,
t.passenger_count,
t.trip_distance,
t.fare_amount,
t.extra,
t.mta_tax,
t.tip_amount,
t.tolls_amount,
t.improvement_surcharge,
t.total_amount,
t.payment_type,
t.trip_year,
t.trip_month,
v.abbreviation as vendor_abbreviation,
v.description as vendor_description,
tm.month_name_short,
tm.month_name_full,
pt.description as payment_type_description,
rc.description as rate_code_description,
tzpu.borough as pickup_borough,
tzpu.zone as pickup_zone,
tzpu.service_zone as pickup_service_zone,
tzdo.borough as dropoff_borough,
tzdo.zone as dropoff_zone,
tzdo.service_zone as dropoff_service_zone,
year(t.pickup_datetime) as pickup_year,
month(t.pickup_datetime) as pickup_month,
day(t.pickup_datetime) as pickup_day,
hour(t.pickup_datetime) as pickup_hour,
minute(t.pickup_datetime) as pickup_minute,
second(t.pickup_datetime) as pickup_second,
date(t.pickup_datetime) as pickup_date,
year(t.dropoff_datetime) as dropoff_year,
month(t.dropoff_datetime) as dropoff_month,
day(t.dropoff_datetime) as dropoff_day,
hour(t.dropoff_datetime) as dropoff_hour,
minute(t.dropoff_datetime) as dropoff_minute,
second(t.dropoff_datetime) as dropoff_second,
date(t.dropoff_datetime) as dropoff_date
from
taxi_db.yellow_taxi_trips_raw t
left outer join taxi_db.vendor_lookup v
on (t.vendor_id = case when t.trip_year < "2015" then v.abbreviation else v.vendor_id end)
left outer join taxi_db.trip_month_lookup tm
on (t.trip_month = tm.trip_month)
left outer join taxi_db.payment_type_lookup pt
on (t.payment_type = case when t.trip_year < "2015" then pt.abbreviation else pt.payment_type end)
left outer join taxi_db.rate_code_lookup rc
on (t.rate_code_id = rc.rate_code_id)
left outer join taxi_db.taxi_zone_lookup tzpu
on (t.pickup_location_id = tzpu.location_id)
left outer join taxi_db.taxi_zone_lookup tzdo
on (t.dropoff_location_id = tzdo.location_id)
""")
curatedDFConformed = (curatedDF.withColumn("temp_vendor_id", col("vendor_id").cast("integer")).drop("vendor_id").withColumnRenamed("temp_vendor_id", "vendor_id").withColumn("temp_payment_type", col("payment_type").cast("integer")).drop("payment_type").withColumnRenamed("temp_payment_type", "payment_type"))
#Save as parquet, partition by year and month
#curatedDFConformed.coalesce(15).write.partitionBy("trip_year", "trip_month").parquet(destDataDirRoot)
# COMMAND ----------
#Destination directory
destDataDirRoot = "/mnt/workshop/curated/nyctaxi/transactions/yellow-taxi"
#Delete any residual data from prior executions for an idempotent run
dbutils.fs.rm(destDataDirRoot,recurse=True)
# COMMAND ----------
#Save as Delta, partition by year and month
curatedDFConformed.coalesce(10).write.format("delta").mode("append").partitionBy("trip_year","trip_month").save(destDataDirRoot)
# COMMAND ----------
# MAGIC %md
# MAGIC ### 3. Define external table
# COMMAND ----------
# MAGIC %sql
# MAGIC USE taxi_db;
# MAGIC DROP TABLE IF EXISTS yellow_taxi_trips_curated;
# MAGIC CREATE TABLE yellow_taxi_trips_curated
# MAGIC USING DELTA
# MAGIC LOCATION '/mnt/workshop/curated/nyctaxi/transactions/yellow-taxi';
# COMMAND ----------
# MAGIC %md
# MAGIC ### 4. Explore
# COMMAND ----------
# MAGIC %sql
# MAGIC select count(*) as trip_count from taxi_db.yellow_taxi_trips_curated
# COMMAND ----------
# MAGIC %sql
# MAGIC select trip_year,trip_month, count(*) as trip_count from taxi_db.yellow_taxi_trips_curated group by trip_year,trip_month | StarcoderdataPython |
198469 | import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
def cross_entropy_2d(predict, target):
"""
Args:
predict:(n, c, h, w)
target:(n, h, w)
"""
assert not target.requires_grad
assert predict.dim() == 4
assert target.dim() == 3
assert predict.size(0) == target.size(0), f"{predict.size(0)} vs {target.size(0)}"
assert predict.size(2) == target.size(1), f"{predict.size(2)} vs {target.size(1)}"
assert predict.size(3) == target.size(2), f"{predict.size(3)} vs {target.size(3)}"
n, c, h, w = predict.size()
target_mask = (target >= 0) * (target < 200)
target = target[target_mask]
if not target.data.dim():
return Variable(torch.zeros(1))
predict = predict.transpose(1, 2).transpose(2, 3).contiguous()
predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)
loss = F.cross_entropy(predict, target, size_average=True)
return loss
def entropy_loss(v):
"""
Entropy loss for probabilistic prediction vectors
input: batch_size x channels x h x w
output: batch_size x 1 x h x w
"""
assert v.dim() == 4
n, c, h, w = v.size()
return -torch.sum(torch.mul(v, torch.log2(v + 1e-30))) / (n * h * w * np.log2(c))
| StarcoderdataPython |
49315 | <gh_stars>10-100
#!/usr/bin/python
## @package catalog-manager
# Provides general functions to parse SQ3 files
# and generate SQL code.
#
# Can manage both 3D and MATERIALS (with TEXTURES).
import csv
import fnmatch
import getopt
import logging
import os
import platform
import random
import re
import shutil
import string
import sys
import time
## Main logger
#
# There are two loggers:
# 1. A file logger starting a WARNING level
# 2. A console logger starting a DEUBG level
logger = logging.getLogger('catalog-manager')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('catalog-manager.log')
fh.setLevel(logging.WARNING)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
## Deprecated
KIND_FURNITURE = 1
KIND_MATERIAL = 2
KIND_FRAME = 3
## Catalogs
CATALOGS = {
'Generique': 1,
'Fly': 2,
'Castorama': 3,
'Made': 4,
'Decoondemand': 5
}
CATALOG_GENERIQUE = 1
## Function for making unique non-existent file name
# with saving source file extension.
#
# Credit goes to <NAME>:
# http://code.activestate.com/recipes/577200-make-unique-file-name/
def add_unique_postfix(fn):
path, name = os.path.split(fn)
name, ext = os.path.splitext(name)
make_fn = lambda i: os.path.join(path, '%s_%d%s' % (name, i, ext))
for i in xrange(1, sys.maxint):
uni_fn = make_fn(i)
if not os.path.exists(uni_fn):
return uni_fn
return None
## Increase occurence of a key in a dictionary.
def increment_occurence(d, k):
if k not in d:
d[k] = 1
else:
d[k] += 1
## Parse the CSV file used to keep track of ids in a catalogue.
#
# The ids are used in order to avoid duplicates and make proper
# copies of SQ3 for SQCM.
def parse_csv(filename):
files = {}
try:
read = csv.reader(open(filename, "rb"))
except IOError:
return files
ids_occ = {}
logger.info("parsing: %s", filename)
for r in read:
iden = r[0]
increment_occurence(ids_occ, iden)
if iden in files:
pass
else:
files[iden] = {
'path': ''
}
for k, v in [(k, v) for k, v in ids_occ.items() if v > 1]:
logger.warning('%s found %d times in csv' % (k, v))
return files
## Parse a 'materials' file (usually Style.xls)
#
# Fills in a dictionary where the key is the id of the material.
# The value is another dictionary containing 'cat_name_fr' and 'texture'.
# Ex:
# {
# 'sketch_in_the_grass_06': {
# 'cat_name_fr': 'Papier peint a motifs',
# 'texture': 'in_the_grass_06'
# }
# }
def parse_material_xls(xls, textures):
import xlrd
logger.info("parsing xml file: %s" % xls)
try:
book = xlrd.open_workbook(xls, formatting_info=True)
except IOError:
logger.error("unable to open: %s" % xls)
sys.exit(2)
materials = {}
for i in range(book.nsheets):
sheet = book.sheet_by_index(i)
# Invalid sheet
if sheet.nrows < 5 or sheet.ncols < 17:
continue
for row in range(4, sheet.nrows):
ide = unicode(sheet.cell(row, 0).value).strip()
lib = string.capwords(unicode(sheet.cell(row, 3).value))
typ = unicode(sheet.cell(row, 5).value)
cat = unicode(sheet.cell(row, 6).value)
tex = unicode(sheet.cell(row, 15).value).strip()
tep = "" # Texture path
if not ide:
continue
logger.debug("material: %s - texture: %s" % (ide, tex))
if len(typ):
typ = typ[0].upper() + typ[1:]
if tex:
if tex not in textures:
logger.error("unable to find texture: %s for: %s" %
(tex, ide))
continue
else:
tep = textures[tex]['path']
if ide in materials:
logger.error("duplicate key: %s" % ide)
continue
buf = {
'cat_name_fr': lib if lib != '' else None,
'texture': tep
}
materials[ide] = buf
return materials
## Find all textures (usually jpg files) in CG/TEXTURES/
#
# Fills in a dictionary of the basename (without the extension) with a path.
# Ex:
# {
# 'in_the_grass_06': {
# 'path': './CG/TEXTURES/garden/in_the_grass_06.jpg'
# }
# }
def find_textures(directory, extensions=["jpg"]):
logger.info('looking for textures in %s' % os.path.abspath(directory))
textures = {}
for root, dirnames, filenames in os.walk(directory):
for f in filenames:
n, e = os.path.splitext(f)
if e[1:].lower() in extensions:
path = os.path.join(root, f)
if n in textures:
logger.error("texture: %s already found here: %s" %
(path, textures[n]['path']))
sys.exit(2)
else:
textures[n] = {
'path': path
}
return textures
## Find geometry files (usually sq3 files) in CG/3D/CATALOG
#
# Fills in a dictionary based on catalog_geometryid.
# Ex:
# {
# 'generic_archmodels_05': {
# 'sq3': 'archmodels_05.SQ3',
# 'path': './CG/3D/GENERIQUE/.../Beds/archmodels_05/archmodels05.SQ3',
# 'type': 'Beds',
# }
# }
def find_geometry(directory, catalog, extension="sq3",
previous_files={}, only_new=True):
logger.info('looking for files in %s' % os.path.abspath(directory))
catalog = catalog.lower()
ids = {}
old = {}
ids_occ = {}
ids_rem = previous_files.copy() # this dict should be empty at the end
sep = os.sep if os.sep != '\\' else '\\\\'
for root, dirnames, filenames in os.walk(directory):
for f in filenames:
n, e = os.path.splitext(f)
if e[1:].lower() == extension:
tmp, bas = os.path.split(root)
ide = '%s_%s' % (catalog, bas)
tmp2, typ = os.path.split(tmp)
increment_occurence(ids_occ, ide)
new = {
'sq3': f,
'path': '%s%s%s' % (root, os.sep, f),
'type': typ,
}
if ide in previous_files:
# Remove key
try:
ids_rem.pop(ide)
except:
pass
if only_new:
continue
else:
old[ide] = new
else:
ids[ide] = new
if len(ids_rem):
for k, v in ids_rem.items():
logger.error('id: %s was removed be careful' % k)
sys.exit(2)
for k, v in [(k, v) for k, v in ids_occ.items() if v > 1]:
logger.warning('id: %s found %d times' % (k, v))
if k in ids:
ids.pop(k)
return ids, old
## Load a Dictionary containing unique names for geometry.
def load_names():
dictionary = os.path.join(os.curdir, "NAMES", "Dictionary.txt")
names = open(dictionary, "rt")
return [ l.strip() for l in names.readlines() ]
## Save the dictionary, removing the names that were used
# in the process of generating the CSV file.
def save_names(names):
dictionary = os.path.join(os.curdir, "NAMES", "Dictionary.txt")
new_names = open(dictionary, "wt")
new_names.writelines([ '%s\r\n' % n for n in names ])
## Generate CSV files.
#
# This function will generate 3 CSV files:
# One for general geometries to keep track of all ids.
# One for new goemetries added to make it easier for import in Excel.
# It's naming convention is: catalog_geometry_X.csv where X is unique.
# One for new categories added when new geometry is found.
def generate_csv(files, output_name, random_names=True):
names = load_names()
pattern = os.sep if os.sep != '\\' else '\\\\'
lines = []
categories = set()
xl_path = os.path.join(os.curdir, "EXCEL")
for k, v in files.items():
if len(names) == 0:
logger.error("no more names in dictionary, please insert new ones")
sys.exit(2)
r = random.randint(0, len(names) - 1)
f = v['path']
t = v['type']
splits = re.split(pattern, f)
splits = splits[3:] # Remove './CG/3D/GENERIQUE/'
cat = os.sep.join(splits[0:-2])
if random_names:
nam = names.pop(r)
else:
nam = ""
if cat not in categories:
categories.add(cat)
line = [k, k + ".SQ3", v['sq3'], nam, t] # [ID, File.sq3, Type]
lines.append(line)
lines_s = sorted(lines, key=lambda x: x[0])
categories_s = sorted(categories)
save_names(names)
geometry_name = '%s_geometry.csv' % output_name.lower()
filename = os.path.join(xl_path, geometry_name)
logger.info("updating: %s" % filename)
output = open(filename, mode='ab')
writer = csv.writer(output)
for l in lines_s:
writer.writerow(l)
filename = os.path.join(xl_path, '%s_geometry.csv' % output_name.lower())
geometry_unique = add_unique_postfix(filename)
logger.info("generating: %s" % geometry_unique)
output = open(geometry_unique, mode='wb')
writer = csv.writer(output)
for l in lines_s:
writer.writerow(l)
filename = os.path.join(xl_path, '%s_category.csv' % output_name.lower())
category_name = add_unique_postfix(filename)
logger.info("generating: %s" % category_name)
catego = open(category_name, 'wb')
writer = csv.writer(catego)
for c in categories_s:
splits = re.split(pattern, c)
writer.writerow(splits)
## Retrieve metadata of a given filename.
def get_file_metadata(filename):
stat_info = os.stat(filename)
return stat_info
## Find out if a file needs to be updated.
#
# If origin is newer than copy, this function will return true.
# Otherwise it will return false.
def need_update(origin, copy):
ori_info = get_file_metadata(origin)
cpy_info = get_file_metadata(copy)
return cpy_info.st_mtime < ori_info.st_mtime
## Copy a file from 'fr' to 'to' if it needs an update.
def copy_file(ide, fr, to):
try:
if os.path.exists(to):
if need_update(fr, to):
logger.warning("updating file: %s" % to)
shutil.copy(fr, to)
else:
shutil.copy(fr, to)
except:
logger.error("unable to copy: %s for id: %s" % (fr, ide))
## Flaten all textures from a material catalog for easier SQCM management.
def tex_to_sqcm(materials, catalog):
path_out = os.path.join(os.curdir, "CG", "MATERIALS")
path_out = os.path.join(path_out, "%s_SQCM" % catalog.split("_")[0])
logger.info("generating sqcm tree to: %s_SQCM" % path_out)
if not os.path.exists(path_out):
logger.info("creating directory: %s" % path_out)
os.makedirs(path_out)
for k, v in materials.items():
texture = v['texture']
if not texture:
continue
filename = os.path.basename(texture)
logger.debug("checking to copy: %s" % filename)
tex_sqcm = os.path.join(path_out, filename)
# Update texture if needed
copy_file(k, texture, tex_sqcm)
## Flaten all geometries from a 3D catalog for easier SQCM management.
#
# It will also look for thumbnails and copy them if needed.
def sq3_to_sqcm(ids, catalog):
logger.info("generating sqcm tree to: %s_SQCM" % catalog)
pattern = os.sep if os.sep != '\\' else '\\\\'
for k, v in ids.items():
sq3 = v['path']
path, filename = os.path.split(sq3)
spl = re.split(pattern, sq3)
out = spl[3] + "_SQCM"
las = spl[-2]
typ = v['type']
thu = os.path.join(path, "%s_v77.jpg" % las)
big = os.path.join(path, "%s_v0001.jpg" % las)
pat = os.path.join(os.curdir, "CG", "3D", out)
if not os.path.exists(pat):
logger.info("creating directory: %s" % pat)
os.makedirs(pat)
sq3_sqcm = os.path.join(pat, "%s.SQ3" % k)
thu_sqcm = os.path.join(pat, "%s_v77.jpg" % k)
big_sqcm = os.path.join(pat, "%s_v512.jpg" % k)
# Update geometry and thumbnails if needed
copy_file(k, sq3, sq3_sqcm)
copy_file(k, thu, thu_sqcm)
copy_file(k, big, big_sqcm)
## Generate SQL based on a Schema file and Database.xls
def generate_sql(host, user, passw, db):
import MySQLdb as mysql
import xlrd
con = None
cur = None
try:
con = mysql.connect(host, user, passw , db,
use_unicode=True, charset="utf8")
cur = con.cursor()
sql = os.path.join('SQL', 'schema.sql')
# Insert SQL Schema
for l in open(sql, 'rt'):
cur.execute(l)
xls = os.path.join("EXCEL", "Database.xls")
book = xlrd.open_workbook(xls, formatting_info=True)
for i in range(book.nsheets):
sheet = book.sheet_by_index(i)
logger.info("processing stylesheet: %s" % sheet.name)
if sheet.name == "Category":
for row in range(4, sheet.nrows):
cate_par_id = cate_cur_id = None
for col in range(1, sheet.ncols):
cate_par_id = cate_cur_id
cat = unicode(sheet.cell(row, col).value).strip()
if not cat:
continue
if col == 1:
cat = cat.capitalize()
cur.execute("SELECT id FROM nehome_catalog \
WHERE name=%s", cat)
data = cur.fetchone()
if not data:
if cat not in CATALOGS:
logger.error("unkown catalog: %s" % cat)
logger.info("update dictionary CATALOGS")
sys.exit(2)
id_cat = CATALOGS[cat]
cur.execute("INSERT INTO nehome_catalog \
SET id=%s, name=%s", (id_cat, cat))
cata_cur_id = id_cat
logger.debug("created catalogue: %s" % cat)
else:
cata_cur_id = int(data[0])
logger.debug("catalog id: %d" % cata_cur_id)
# Inserting new category if needed
cur.execute("SELECT id, id_catalog, name_en, name_fr \
FROM nehome_category \
WHERE name_en=%s AND id_catalog=%s",
(cat, cata_cur_id))
data = cur.fetchone()
if not data:
cur.execute("INSERT INTO nehome_category \
SET name_en=%s, id_catalog=%s",
(cat, cata_cur_id))
cur.execute("SELECT LAST_INSERT_ID()")
cate_cur_id = int(cur.fetchone()[0])
logger.debug("created category: %s" % cat)
else:
cate_cur_id = int(data[0])
# Inserting new tree: parent -> child if needed
if cate_par_id:
# Can occur when two same categories
# follow each other
if cate_par_id == cate_cur_id:
logger.warning("category: %s is looping" % cat)
continue
cur.execute("SELECT * FROM nehome_cat_arbo \
WHERE id_cat_parent=%s AND \
id_cat_child=%s",
(cate_par_id, cate_cur_id))
data = cur.fetchone()
if not data:
cur.execute("INSERT INTO nehome_cat_arbo \
SET id_cat_parent=%s, \
id_cat_child=%s",
(cate_par_id, cate_cur_id))
logger.debug("created arbo: %d -> %d" %
(cate_par_id, cate_cur_id))
elif sheet.name == "Geometry":
cur.execute("INSERT INTO nehome_kind SET \
id=%s, name_en=%s, name_fr=%s",
(1, "Furniture", "Meubles"))
for row in range(4, sheet.nrows):
iden = unicode(sheet.cell(row, 1).value).strip()
geom = unicode(sheet.cell(row, 2).value).strip()
fsq3 = unicode(sheet.cell(row, 3).value).strip()
name = unicode(sheet.cell(row, 4).value).strip()
cate = unicode(sheet.cell(row, 5).value).strip()
defr = unicode(sheet.cell(row, 7).value).strip()
deen = unicode(sheet.cell(row, 8).value).strip()
urlv = unicode(sheet.cell(row, 9).value).strip()
cata = iden.split("_")[0].capitalize()
typc = ('%s_%s' % (cata, cate.replace(" ", "_"))).lower()
id_cata = CATALOGS[cata]
logger.debug('geometry: %s - %s - %s - %s' %
(iden, name, cate, cata))
# Find corresponding catalogue
cur.execute("SELECT id FROM nehome_catalog \
WHERE name=%s", cata)
data = cur.fetchone()
if not data:
logger.error("unable to find catalog: %s" % cata)
#sys.exit(2)
continue
id_cata = int(data[0])
# Find type if exists
cur.execute("SELECT id, name FROM nehome_type \
WHERE name=%s", typc)
data = cur.fetchone()
if not data:
# Find category from name and catalog
cur.execute("SELECT id FROM nehome_category \
WHERE name_en=%s AND id_catalog=%s",
(cate, id_cata))
datb = cur.fetchone()
if not datb:
logger.error("missing category: %s for: %s (%s)" %
(cate, iden, cata))
#sys.exit(2)
continue
id_cate = int(datb[0])
# Create type if found corresponding category
cur.execute("INSERT INTO nehome_type SET name=%s",
typc)
cur.execute("SELECT LAST_INSERT_ID()")
id_type = int(cur.fetchone()[0])
cur.execute("INSERT INTO nehome_type_to_category \
SET id_type=%s, id_category=%s",
(id_type, id_cate))
else:
id_type = int(data[0])
cur.execute("INSERT INTO nehome_object \
SET id=%s, name_en=%s, name_fr=%s, \
desc_en=%s, desc_fr=%s, url=%s, \
sq3_sqcm=%s, sq3_origin=%s, \
id_type=%s, id_catalog=%s, id_kind=%s", (
iden, name, name, deen, defr, urlv,
geom, fsq3, id_type, id_cata, 1))
# Insertion of objects is over
# Now it's time to insert more type_to_categories
cur.execute(" \
SELECT id, id_catalog, name_en \
FROM nehome_category c \
WHERE c.id_catalog=%s \
ORDER BY c.name_en", CATALOGS['Generique'])
data = cur.fetchall()
# For each name found in leaf category,
# attach brand type to generic category
for row_a in data:
cur.execute(" \
SELECT id, id_catalog, name_en, id_type \
FROM nehome_category c \
INNER JOIN nehome_type_to_category tc \
ON tc.id_category=c.id \
WHERE c.name_en=%s AND c.id_catalog>%s \
GROUP BY id",
(row_a[2], CATALOGS['Generique']))
datb = cur.fetchall()
for row_b in datb:
cur.execute(" \
INSERT INTO nehome_type_to_category \
SET id_type=%s, id_category=%s",
(row_b[3], row_a[0]))
elif sheet.name == "Label":
for row in range(4, sheet.nrows):
cate = unicode(sheet.cell(row, 1).value).strip()
cate_en = unicode(sheet.cell(row, 2).value).strip()
cate_fr = unicode(sheet.cell(row, 3).value).strip()
#logger.debug('label: %s - %s - %s' %
# (cate, cate_en, cate_fr))
cur.execute("SELECT id FROM nehome_category \
WHERE name_en=%s", cate)
data = cur.fetchall()
if not data:
#logger.info("category: %s does not exist" % cate)
continue
for d in data:
cur.execute("UPDATE nehome_category \
SET name_en=%s, name_fr=%s \
WHERE id=%s",
(cate_en, cate_fr, int(d[0])))
# Checking missing translations
cur.execute(" \
SELECT c.id, c.name_en FROM nehome_category c \
INNER JOIN nehome_type_to_category tc \
ON c.id=tc.id_category \
INNER JOIN nehome_type t ON t.id=tc.id_type \
WHERE name_fr IS NULL \
GROUP BY name_en")
data = cur.fetchall()
for row in data:
logger.warning("missing translation for category: %s",
row[1])
else:
logger.warning("unkown sheet name: %s" % sheet.name)
# Update name_fr for Brands
cur.execute("UPDATE nehome_category SET name_fr=name_en \
WHERE name_fr IS NULL;")
except mysql.Error, e:
logger.error('mysql error: (%d - %s)' % (e.args[0], e.args[1]))
con.rollback()
except IOError, e:
logger.error('IOError: %s' % str(e))
con.commit()
## Import all geometries from a catalog.
#
# This is a 4-step process:
# 1. Parse a persistent CSV file to grab previous ids.
# 2. Find new geometry files.
# 3. Flaten those files to a directory for SQCM.
# 4. Generate the corresponding CSV files for import in Database.xls
def import_catalog(catalog):
logger.info("importing 3d catalogue: %s" % catalog)
filename = os.path.join(os.curdir, "EXCEL",
'%s_geometry.csv' % catalog.lower())
ids_prev = parse_csv(filename)
catalog_path = os.path.join(os.curdir, "CG", "3D", catalog)
new, old = find_geometry(catalog_path, catalog,
previous_files=ids_prev, only_new=False)
total = dict(new.items() + old.items())
logger.info('found %d SQ3 files (%d new, %d old)' %
(len(total), len(new), len(old)))
if len(total):
sq3_to_sqcm(total, catalog)
if catalog == "GENERIQUE":
random_names = True
else:
random_names = False
generate_csv(new, catalog, random_names)
## Import Styles.xls from a material catalog.
#
# This is a 3-step process:
# 1. Look for all textures.
# 2. Parse Styles.xls to look for materials and grab their textures.
# 3. Copy the textures to a flat directory for SQCM.
# To find textures, this function looks inside ./CG/TEXTURES
def import_material(catalog):
logger.info("importing material from catalog: %s" % catalog)
path_mat = os.path.join(os.curdir, "CG", "MATERIALS", catalog)
path_tex = os.path.join(os.curdir, "CG", "TEXTURES")
textures = find_textures(path_tex)
mat = parse_material_xls(os.path.join(path_mat, "Styles.xls"), textures)
tex_to_sqcm(mat, catalog)
## Print usage of the package.
def usage():
basename = os.path.basename(sys.argv[0]);
print '''
usage: %s [option]
This program is based on the following hierarchy:
CG
3D
GENERIQUE
FLY
...
MATERIALS
GENERIQUE
...
It will, depending on the following options, generate the corresponding
_SQCM flat folders to upload to SQCM later in the process
Options:
--catalog
Import specified 3D catalog
--material
Import specified MATERIAL using MATERIAL/Style.xls
--generate-sql
Generates SQL from SQL/Database.xls
''' % basename
## Entry point.
#
# Deals with options and redirects to proper function.
def main():
try:
opts, argv = getopt.getopt(sys.argv[1:], "h", [
"help", "catalog=", "skip-csv", "generate-sql", "material="
])
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
system = platform.system()
db = False
xls = None
catalog = ""
material = ""
csv = False
reorder = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("--skip-csv"):
pass
elif o in ("--catalog"):
catalog = a
elif o in ("--material"):
material = a
elif o in ("--generate-sql"):
try:
import xlrd
except ImportError:
logger.error('cannot import xlrd python module')
logger.warning('cannot parse database file')
sys.exit(2)
try:
import MySQLdb
db = True
except ImportError:
logger.error('cannot import mysql python module')
logger.warning('unable to generate database file')
sys.exit(2)
else:
assert False, "unhandled option"
ids_prev = {}
ids = {}
files = []
if db:
generate_sql('localhost', 'sq', 'squareclock', 'hbm')
elif catalog:
import_catalog(catalog)
elif material:
import_material(material)
else:
logger.error("you must specify a catalog or generate-sql")
usage()
sys.exit(2)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1649453 | <reponame>dblueai/dblue-stats
class Constants:
# Data types
DATA_TYPE_INTEGER = "integer"
DATA_TYPE_NUMBER = "number"
DATA_TYPE_STRING = "string"
DATA_TYPE_BOOLEAN = "boolean"
# Field types
FIELD_TYPE_CATEGORICAL = "categorical"
FIELD_TYPE_NUMERICAL = "numerical"
| StarcoderdataPython |
3312926 | <reponame>rwiuff/QuantumTransport
from matplotlib import pyplot as plt # Pyplot for nice graphs
# from matplotlib.gridspec import GridSpec
from progress.bar import Bar
import numpy as np # NumPy
from Functions import Import, NPGElectrode
from Functions import EnergyRecursion, Transmission, PeriodicHamiltonian
import sys
from fractions import Fraction
from matplotlib.ticker import FormatStrFormatter
np.set_printoptions(threshold=sys.maxsize)
nx = 1
ny = 2
contactrep = 1
shiftx = 2.46
ev1 = -1.5
ev2 = 1.5
numkP = 3
k1 = 0
k2 = np.pi
En = np.linspace(ev1 / 2.7, ev2 / 2.7, 200)
En = np.delete(En, np.where(En == 1))
En = np.delete(En, np.where(En == -1))
eta = 1e-6j
kP = np.linspace(k1, k2, numkP)
xyz, UX, UY, filename, dgeom, cellsize = Import(nx, contactrep)
RestL, L, R, C, RestR = NPGElectrode(xyz, dgeom, cellsize, nx)
TT = np.zeros((kP.shape[0], En.shape[0]))
GG = np.zeros((kP.shape[0], En.shape[0]), dtype=complex)
q = 0
for i in kP:
print('----------------------------------------------------------------------')
print('Calculating for k-point: {}'.format(i))
Ham = PeriodicHamiltonian(xyz, UY, i) # Get Hamiltonian for specific k-point
HL = Ham[L] # Left contact Hamiltonian is in a matrix with indicies specified by "L"
HL = HL[:, L]
HR = Ham[R] # Right contact Hamiltonian is in a matrix with indicies specified by "R"
HR = HR[:, R]
VL = Ham[L] # Hop elements from the left are in a matrix with indices "L,RestL"
VL = VL[:, RestL]
VR = Ham[RestR] # Hop elements to the right are in a matrix with indices "RestR,R"
VR = VR[:, R]
# gs = GridSpec(2, 2, width_ratios=[1, 2])
# a = plt.figure(figsize=(7, 4))
# ax1 = plt.subplot(gs[:, 1])
# plt.imshow(Ham.real)
# ax2 = plt.subplot(gs[0, 0])
# plt.imshow(HL.real)
# ax3 = plt.subplot(gs[1, 0])
# plt.imshow(HR.real)
# a.show()
# b = plt.figure(figsize=(7, 4))
# plt.subplot(121)
# plt.imshow(VL.real)
# plt.subplot(122)
# plt.imshow(VR.real)
# b.show()
# input('Press any key to continue')
GD, GammaL, GammaR = EnergyRecursion(Ham, HL, HR, VL, VR, En, eta)
G = np.zeros((En.shape[0]), dtype=complex)
bar = Bar('Retrieving Greens function ', max=En.shape[0])
for i in range(En.shape[0]):
G[i] = GD["GD{:d}".format(i)].diagonal()[0]
bar.next()
bar.finish()
T = Transmission(GammaL=GammaL, GammaR=GammaR, GD=GD, En=En)
GG[q, :] = G
TT[q, :] = T.real
q = q + 1
X = En * 2.7
Y0 = 0
numplot = numkP + 1
if numplot % 2 == 0:
if numplot % 3 == 0:
ncol = 3
else:
ncol = 2
else:
if numplot % 3 == 0:
ncol = 3
nrow = numplot / ncol
numplot = int(numplot)
nrow = int(nrow)
ncol = int(ncol)
q = int(0)
numplot = int(numplot)
axnames = ''
print('Plotting Greens functions')
for i in range(numplot):
a = 'ax{},'.format(i + 1)
axnames = axnames + a
fig, (axnames) = plt.subplots(nrow, ncol, sharex=True, figsize=(20, 20))
for i in range(nrow):
for j in range(ncol):
if q + 1 == numplot:
G = np.average(GG, axis=0)
Y = G
Y1 = Y.real
Y2 = Y.imag
fig.axes[numplot - 1].plot(X, Y1, label='Greens function')
fig.axes[numplot - 1].fill_between(X, 0, Y2,
color='orange',
alpha=0.8, label='LDOS')
fig.axes[numplot - 1].grid(which='both', axis='both')
fig.axes[numplot - 1].legend(loc="upper right")
fig.axes[numplot - 1].set_title('Average over k-points')
fig.axes[numplot - 1].yaxis.set_major_formatter(
FormatStrFormatter('%.2f'))
else:
Y = GG[q, :]
Y1 = Y.real
Y2 = Y.imag
fig.axes[q].plot(X, Y1, label='Greens function')
fig.axes[q].fill_between(
X, 0, Y2, color='orange', alpha=0.8, label='LDOS')
fig.axes[q].grid(which='both', axis='both')
fig.axes[q].legend(loc="upper right")
frac = Fraction(kP[q] * (1 / np.pi))
pi = r'$\ \pi$'
fig.axes[q].set_title('{}'.format(frac) + pi)
fig.axes[q].yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
q = q + int(1)
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False,
bottom=False, left=False, right=False)
plt.xlabel('Energy E arb. unit')
plt.ylabel('Re[G(E)]/Im[G(E)]', labelpad=15)
plt.show()
for i in range(numplot - 1):
fig = plt.figure()
Y = GG[i, :]
Y1 = Y.real
Y2 = Y.imag
plt.plot(X, Y1, label='Greens function')
plt.fill_between(
X, 0, Y2, color='orange', alpha=0.8, label='LDOS')
plt.legend(loc="upper right")
plt.grid(which='both', axis='both')
plt.xlabel('Energy E arb. unit')
plt.ylabel('Re[G(E)]/Im[G(E)]', labelpad=15)
plt.xlim(ev1, ev2)
plt.show()
fig = plt.figure()
Y = np.average(GG, axis=0)
Y1 = Y.real
Y2 = Y.imag
plt.plot(X, Y1, label='Greens function')
plt.fill_between(
X, 0, Y2, color='orange', alpha=0.8, label='LDOS')
plt.legend(loc="upper right")
plt.grid(which='both', axis='both')
plt.xlabel('Energy E arb. unit')
plt.ylabel('Re[G(E)]/Im[G(E)]', labelpad=15)
plt.xlim(ev1, ev2)
plt.show()
q = int(0)
axnames = ''
print('Plotting Transmission')
for i in range(numplot):
a = 'ax{},'.format(i + 1)
axnames = axnames + a
fig, (axnames) = plt.subplots(nrow, ncol, sharex=True, figsize=(20, 20))
for i in range(nrow):
for j in range(ncol):
if q + 1 == numplot:
T = np.average(TT, axis=0)
Y = T.real
fig.axes[numplot - 1].plot(X, Y)
fig.axes[numplot - 1].grid(which='both', axis='both')
fig.axes[numplot - 1].set_title('Average over k-points')
fig.axes[numplot - 1].yaxis.set_major_formatter(
FormatStrFormatter('%.2f'))
else:
T = TT[q]
Y = T.real
fig.axes[q].plot(X, Y)
fig.axes[q].grid(which='both', axis='both')
frac = Fraction(kP[q] * (1 / np.pi))
pi = r'$\ \pi$'
fig.axes[q].set_title('{}'.format(frac) + pi)
fig.axes[q].yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
q = q + int(1)
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False,
bottom=False, left=False, right=False)
plt.xlabel('E[eV]')
plt.ylabel('T(E)', labelpad=15)
plt.show()
for i in range(numplot - 1):
fig, ax = plt.subplots()
T = TT[i]
Y = T.real
plt.plot(X, Y)
plt.grid(which='both', axis='both')
plt.xlabel('E[eV]')
plt.ylabel('T(E)', labelpad=15)
plt.xlim(ev1, ev2)
plt.ylim(0, np.max(Y) + 0.25)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.show()
fig, ax = plt.subplots()
T = np.average(TT, axis=0)
Y = T.real
plt.plot(X, Y)
plt.grid(which='both', axis='both')
plt.xlabel('E[eV]')
plt.ylabel('T(E)', labelpad=15)
plt.xlim(ev1, ev2)
plt.ylim(0, np.max(Y) + 0.25)
ax.yaxis.set_major_formatter(FormatStrFormatter('%.2f'))
plt.show()
input("Press any key to quit")
quit()
| StarcoderdataPython |
1625884 | M, H = [int(n) for n in input().split()]
print('No' if H%M else 'Yes')
| StarcoderdataPython |
3397283 | import logging
import random
from typing import Any, Dict, List, Optional, Union
from pydantic import create_model, BaseModel
from .base import BaseSchema, ProviderNotSetException
logger = logging.getLogger()
class PropertyNames(BaseModel):
pattern: Optional[str] = None
PropertyDependency = Dict[str, List[str]]
SchemaDependency = Dict[str, "Object"]
class Object(BaseSchema):
properties: Dict[str, BaseSchema] = None
additionalProperties: Optional[Union[bool, BaseSchema]] = None
required: Optional[List[str]] = None
propertyNames: Optional[PropertyNames] = None
minProperties: Optional[int] = None
maxProperties: Optional[int] = None
dependencies: Optional[Union[PropertyDependency, SchemaDependency]] = None
patternProperties: Optional[Dict[str, BaseSchema]] = None
def from_dict(d):
return Object(**d)
def should_keep(self, property_name: str) -> bool:
if isinstance(self.required, list) and property_name in self.required:
return True
return random.uniform(0, 1) < 0.5
def generate(self, context: Dict[str, Any]) -> Optional[Dict[str, Any]]:
try:
return super().generate(context)
except ProviderNotSetException:
return {o.name: o.generate(context) for o in self.properties if self.should_keep(o.name)}
def model(self, context: Dict[str, Any]):
self.generate(context)
name = self._get_unique_name(context)
_type = create_model(name, **{o.name: o.model(context) for o in self.properties})
context["__internal__"][_type.__name__] = _type
return self.to_pydantic(context, _type)
Object.update_forward_refs()
| StarcoderdataPython |
1789960 | <reponame>fyumoto/RBMs
import json
import os
# load the configuration file with the backend specification
filedir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(filedir,"config.json"), "r") as infile:
config = json.load(infile)
# read the processor type
PROCESSOR = config['processor']
BACKEND = config['backend']
# check validity
assert PROCESSOR in ["cpu", "gpu"], "processor must by cpu or gpu"
assert BACKEND in ["python", "pytorch"], "backend must be python or pytorch"
if PROCESSOR == "gpu":
assert BACKEND == "pytorch", "must specify pytorch backend to use gpu"
def test_has_cuda():
try:
import torch
except ImportError:
assert False, "must have pytorch installed to use pytorch backend"
try:
torch.cuda.FloatTensor()
except Exception:
assert False, "must have cuda enabled pytorch to use gpu"
test_has_cuda()
print("Running paysage with the {} backend on the {}".format(BACKEND, PROCESSOR))
| StarcoderdataPython |
48860 | <filename>bloodbank_rl/tianshou_utils/policies.py
from tianshou.policy import A2CPolicy, PPOPolicy
from typing import Any, Dict, List, Optional, Type
from tianshou.data import Batch
# Normal key structure for output doesn't work with MLFlow logging nicely
# loss used as a key and also parent
class A2CPolicyforMLFlow(A2CPolicy):
def learn(
self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any
) -> Dict[str, List[float]]:
loss_dict = super().learn(batch, batch_size, repeat, **kwargs)
output_loss_dict = {}
output_loss_dict["loss"] = loss_dict["loss"]
output_loss_dict["loss_component/actor"] = loss_dict["loss/actor"]
output_loss_dict["loss_component/vf"] = loss_dict["loss/vf"]
output_loss_dict["loss_component/ent"] = loss_dict["loss/ent"]
return output_loss_dict
class PPOPolicyforMLFlow(PPOPolicy):
def learn(
self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any
) -> Dict[str, List[float]]:
loss_dict = super().learn(batch, batch_size, repeat, **kwargs)
output_loss_dict = {}
output_loss_dict["loss"] = loss_dict["loss"]
output_loss_dict["loss_component/clip"] = loss_dict["loss/clip"]
output_loss_dict["loss_component/vf"] = loss_dict["loss/vf"]
output_loss_dict["loss_component/ent"] = loss_dict["loss/ent"]
return output_loss_dict
| StarcoderdataPython |
1687548 | import json
from datetime import datetime, timezone
from typing import Dict, Any, NamedTuple, Optional
from uuid import UUID
import bach
from bach import DataFrame
from sql_models.constants import DBDialect
from sql_models.util import is_postgres, is_bigquery
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from tests.functional.bach.test_data_and_utils import run_query
from tests.unit.bach.util import get_pandas_df
from modelhub import ModelHub
from tests_modelhub.data_and_utils.data_json_real import TEST_DATA_JSON_REAL, JSON_COLUMNS_REAL
from tests_modelhub.data_and_utils.data_objectiv import TEST_DATA_OBJECTIV
class DBParams(NamedTuple):
url: str
credentials: Optional[str]
table_name: str
def _convert_moment_to_utc_time(moment: str) -> int:
dt = datetime.fromisoformat(moment)
dt = dt.replace(tzinfo=timezone.utc)
return int(dt.timestamp() * 1e3)
def get_df_with_json_data_real(db_params: DBParams) -> DataFrame:
engine = create_engine_from_db_params(db_params)
df = DataFrame.from_pandas(
engine=engine,
df=get_pandas_df(dataset=TEST_DATA_JSON_REAL, columns=JSON_COLUMNS_REAL),
convert_objects=True,
)
df['global_contexts'] = df.global_contexts.astype('json')
df['location_stack'] = df.location_stack.astype('json')
return df
def get_objectiv_dataframe_test(db_params=None, time_aggregation=None):
if not db_params:
# by default use PG (this should be removed after modelhub is able to work with all bach engines)
import os
db_url = os.environ.get('OBJ_DB_PG_TEST_URL', 'postgresql://objectiv:@localhost:5432/objectiv')
credentials = None
table_name = 'objectiv_data'
else:
db_url = db_params.url
credentials = db_params.credentials
table_name = db_params.table_name
kwargs = {}
if time_aggregation:
kwargs = {'time_aggregation': time_aggregation}
modelhub = ModelHub(**kwargs)
return modelhub.get_objectiv_dataframe(
db_url=db_url,
table_name=table_name,
bq_credentials_path=credentials,
), modelhub
def get_parsed_objectiv_data(engine):
parsed_data = []
for event_data in TEST_DATA_OBJECTIV:
event_id, day, moment, cookie_id, value = event_data
value = json.loads(value)
# BQ uses time from taxonomy json for getting moment and day
# therefore time value MUST be the same as moment
if is_bigquery(engine):
value['time'] = _convert_moment_to_utc_time(moment)
parsed_data.append(
{
'event_id': UUID(event_id),
'day': datetime.strptime(day, '%Y-%m-%d').date(),
'moment': datetime.fromisoformat(moment),
'cookie_id': UUID(cookie_id),
'value': value
}
)
return parsed_data
def create_engine_from_db_params(db_params: DBParams) -> Engine:
if db_params.credentials:
engine = create_engine(url=db_params.url, credentials_path=db_params.credentials)
else:
engine = create_engine(url=db_params.url)
return engine
def setup_db(engine: Engine, table_name: str):
columns = {
'event_id': bach.SeriesUuid.supported_db_dtype[DBDialect.POSTGRES],
'day': bach.SeriesDate.supported_db_dtype[DBDialect.POSTGRES],
'moment': bach.SeriesTimestamp.supported_db_dtype[DBDialect.POSTGRES],
'cookie_id': bach.SeriesUuid.supported_db_dtype[DBDialect.POSTGRES],
'value': bach.SeriesJson.supported_db_dtype[DBDialect.POSTGRES],
}
_prep_db_table(engine, table_name=table_name, columns=columns)
_insert_records_in_db(engine, table_name=table_name, columns=columns)
def _prep_db_table(engine, table_name: str, columns: Dict[str, Any]):
if is_postgres(engine):
column_stmt = ','.join(f'{col_name} {db_type}' for col_name, db_type in columns.items())
sql = f"""
drop table if exists {table_name};
create table {table_name} ({column_stmt});
alter table {table_name}
owner to objectiv
"""
else:
raise Exception()
run_query(engine, sql)
def _insert_records_in_db(engine, table_name: str, columns: Dict[str, Any]):
from tests_modelhub.data_and_utils.data_objectiv import TEST_DATA_OBJECTIV
column_stmt = ','.join(columns.keys())
records = []
if is_postgres(engine):
for record in TEST_DATA_OBJECTIV:
formatted_values = [f"'{record[col_index]}'" for col_index, _ in enumerate(columns)]
records.append(f"({','.join(formatted_values)})")
else:
raise Exception()
values_stmt = ','.join(records)
sql = f'insert into {table_name} ({column_stmt}) values {values_stmt}'
run_query(engine, sql)
| StarcoderdataPython |
4823195 | #
# Project SmartDjango REST
# Copyright (c) <NAME> 2021
# This software is licensed under MIT license
#
import logging
from rest_framework import viewsets, permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from smartdjangorest.dao import get_book_by_author
from smartdjangorest.models import Book
from smartdjangorest.serializers import BookSerializer
logger = logging.getLogger(__name__)
class BookViewSet(viewsets.ModelViewSet):
"""
API endpoints that allows books to be viewed or edited.
"""
queryset = Book.objects.all().order_by('title')
permission_classes = [permissions.IsAuthenticated]
serializer_class = BookSerializer
@action(methods=['get'], detail=False, url_path='by_author/(?P<author>[^/.]+)')
def get_by_author(self, request, author):
"""
API endpoint to get books by author
"""
logger.info('Looking books with author = ' + author)
filtered_books = get_book_by_author(author)
logger.info('Found books: ' + str(filtered_books))
serializer = BookSerializer(filtered_books, many=True)
return Response(serializer.data)
| StarcoderdataPython |
71823 | from easydict import EasyDict
from typing import Optional, List
import copy
final_eval_reward_wrapper = EasyDict(type='final_eval_reward')
def get_default_wrappers(env_wrapper_name: str, env_id: Optional[str] = None) -> List[dict]:
if env_wrapper_name == 'mujoco_default':
return [
EasyDict(type='delay_reward', kwargs=dict(delay_reward_step=3)),
copy.deepcopy(final_eval_reward_wrapper),
]
elif env_wrapper_name == 'atari_default':
wrapper_list = []
wrapper_list.append(EasyDict(type='noop_reset', kwargs=dict(noop_max=30)))
wrapper_list.append(EasyDict(type='max_and_skip', kwargs=dict(skip=4)))
wrapper_list.append(EasyDict(type='episodic_life'))
if env_id is not None:
if 'Pong' in env_id or 'Qbert' in env_id or 'SpaceInvader' in env_id or 'Montezuma' in env_id:
wrapper_list.append(EasyDict(type='fire_reset'))
wrapper_list.append(EasyDict(type='warp_frame'))
wrapper_list.append(EasyDict(type='scaled_float_frame'))
wrapper_list.append(EasyDict(type='clip_reward'))
wrapper_list.append(EasyDict(type='frame_stack', kwargs=dict(n_frames=4)))
wrapper_list.append(copy.deepcopy(final_eval_reward_wrapper))
return wrapper_list
elif env_wrapper_name == 'gym_hybrid_default':
return [
EasyDict(type='gym_hybrid_dict_action'),
copy.deepcopy(final_eval_reward_wrapper),
]
elif env_wrapper_name == 'default':
return [copy.deepcopy(final_eval_reward_wrapper)]
else:
raise NotImplementedError()
| StarcoderdataPython |
4830702 | <reponame>slomrafgrav/MMdnn
from mmdnn.conversion.pytorch.pytorch_parser import PytorchParser
parser = PytorchParser('/media/slomkarafa/HDD0/Projects/android/py_to_tf/model.pth',[3,224,224])
# parser = PytorchParser(dens,(3,224,224))
parser.run('out/sen') | StarcoderdataPython |
3372818 | __source__ = 'https://leetcode.com/problems/construct-quad-tree/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 427. Construct Quad Tree
#
# We want to use quad trees to store an N x N boolean grid.
# Each cell in the grid can only be true or false.
# The root node represents the whole grid.
# For each node, it will be subdivided into four children nodes
# until the values in the region it represents are all the same.
#
# Each node has another two boolean attributes : isLeaf and val.
# isLeaf is true if and only if the node is a leaf node.
# The val attribute for a leaf node contains the value of the region it represents.
#
# Your task is to use a quad tree to represent a given grid.
# The following example may help you understand the problem better:
#
# Given the 8 x 8 grid below, we want to construct the corresponding quad tree:
#
# It can be divided according to the definition above:
#
# The corresponding quad tree should be as following,
# where each node is represented as a (isLeaf, val) pair.
#
# For the non-leaf nodes, val can be arbitrary, so it is represented as *.
# Note:
#
# N is less than 1000 and guaranteened to be a power of 2.
# If you want to know more about the quad tree, you can refer to its wiki.
#
import unittest
# Definition for a QuadTree node.
class Node(object):
def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight):
self.val = val
self.isLeaf = isLeaf
self.topLeft = topLeft
self.topRight = topRight
self.bottomLeft = bottomLeft
self.bottomRight = bottomRight
# 156ms 97.42%
class Solution(object):
def construct(self, grid):
"""
:type grid: List[List[int]]
:rtype: Node
"""
if not grid:
return None
if self.isLeaf(grid):
return Node(grid[0][0] == 1, True, None, None, None, None)
n = len(grid)
return Node('*',
False,
self.construct([row[:n/2] for row in grid[:n/2]]),
self.construct([row[n/2:] for row in grid[:n/2]]),
self.construct([row[:n/2] for row in grid[n/2:]]),
self.construct([row[n/2:] for row in grid[n/2:]])
)
def isLeaf(self, grid):
vals = set()
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
vals.add(grid[i][j])
if len(vals) > 1:
return False
return True
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
/*
// Definition for a QuadTree node.
class Node {
public boolean val;
public boolean isLeaf;
public Node topLeft;
public Node topRight;
public Node bottomLeft;
public Node bottomRight;
public Node() {}
public Node(boolean _val,boolean _isLeaf,Node _topLeft,Node _topRight,Node _bottomLeft,Node _bottomRight) {
val = _val;
isLeaf = _isLeaf;
topLeft = _topLeft;
topRight = _topRight;
bottomLeft = _bottomLeft;
bottomRight = _bottomRight;
}
};
*/
# 2ms 100%
class Solution {
public Node construct(int[][] grid) {
return helper(grid, 0, 0, grid.length);
}
private Node helper(int[][] grid, int i, int j, int len) {
if (len == 1) return new Node(grid[i][j] == 1, true, null, null, null, null);
int next_len = len >> 1;
Node tl = helper(grid, i, j, next_len);
Node tr = helper(grid, i, j + next_len, next_len);
Node bl = helper(grid, i + next_len, j, next_len);
Node br = helper(grid, i + next_len, j + next_len, next_len);
if (tl.isLeaf && tr.isLeaf && bl.isLeaf && br.isLeaf
&& (tl.val && tr.val && bl.val && br.val ||
!tl.val && !tr.val && !bl.val && !br.val)) return new Node(tl.val, true, null, null, null, null);
else {
return new Node(false, false, tl, tr, bl, br);
}
}
}
# 2ms 100%
class Solution {
public Node construct(int[][] grid) {
return construct(0, grid.length - 1, 0, grid.length - 1, grid);
}
Node construct(int r1, int r2, int c1, int c2, int[][] grid) {
if (r1 > r2 || c1 > c2) return null;
boolean isLeaf = true;
int val = grid[r1][c1];
for (int i = r1; i <= r2; i++) {
for (int j = c1; j <= c2; j++) {
if (grid[i][j] != val) {
isLeaf = false;
break;
}
}
}
if (isLeaf) {
return new Node(val == 1, true, null, null, null, null);
}
int rowMid = (r1 + r2) / 2;
int colMid = (c1 + c2) / 2;
return new Node(false, false,
construct(r1, rowMid, c1, colMid, grid),
construct(r1, rowMid, colMid + 1, c2, grid),
construct(rowMid + 1, r2, c1, colMid, grid),
construct(rowMid + 1, r2, colMid + 1, c2, grid));
}
}
''' | StarcoderdataPython |
123701 | <gh_stars>0
import threading
import time
from datetime import datetime
from sensor.sensor import Sensor, SensorException
from logger.logger import SensorDataLogger
class DataCollector(threading.Thread):
def __init__(self):
super().__init__()
self.sensors = {}
self.loggers = []
self.stop_flag = False
def add_sensor(self, sensor, interval=10):
if not isinstance(sensor, Sensor):
raise TypeError('Object should be an instance of type Sensor')
self.sensors[sensor.SENSOR_NAME] = {'sensor': sensor, 'interval': int(interval), 'last_update': None}
def add_logger(self, logger):
if not isinstance(logger, SensorDataLogger):
raise TypeError('Object should be an instance of type SensorDataLoggers')
self.loggers.append(logger)
def notify_loggers(self, sensor, sample):
for logger in self.loggers:
logger.log(sensor, sample)
def run(self):
while not self.stop_flag:
for sensor, value in self.sensors.items():
last_update = value['last_update']
now = datetime.now()
if not last_update or (now - last_update).seconds >= value['interval']:
value['last_update'] = now
try:
sample = value['sensor'].sample
self.notify_loggers(sensor, sample)
except SensorException as e:
print(e)
time.sleep(1)
def stop(self):
if self.is_alive():
self.stop_flag = True
self.join()
self.stop_flag = False
| StarcoderdataPython |
1653005 | <filename>backend/coreapp/migrations/0001_initial.py
# Generated by Django 3.2.4 on 2021-08-26 10:41
import django.db.models.deletion
from django.db import migrations, models
from ..models.scratch import gen_scratch_id
class Migration(migrations.Migration):
initial = True
dependencies = [] # type: ignore
operations = [
migrations.CreateModel(
name="Asm",
fields=[
(
"hash",
models.CharField(max_length=64, primary_key=True, serialize=False),
),
("data", models.TextField()),
],
),
migrations.CreateModel(
name="Assembly",
fields=[
(
"hash",
models.CharField(max_length=64, primary_key=True, serialize=False),
),
("time", models.DateTimeField(auto_now_add=True)),
("arch", models.CharField(max_length=100)),
("as_opts", models.TextField(blank=True, max_length=1000, null=True)),
("elf_object", models.BinaryField(blank=True)),
(
"source_asm",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="coreapp.asm"
),
),
],
),
migrations.CreateModel(
name="Compilation",
fields=[
(
"hash",
models.CharField(max_length=64, primary_key=True, serialize=False),
),
("time", models.DateTimeField(auto_now_add=True)),
("compiler", models.CharField(max_length=100)),
("cc_opts", models.TextField(blank=True, max_length=1000, null=True)),
("source_code", models.TextField()),
("context", models.TextField(blank=True)),
("elf_object", models.BinaryField(blank=True)),
("stderr", models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name="Profile",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
),
migrations.CreateModel(
name="Scratch",
fields=[
(
"slug",
models.SlugField(
default=gen_scratch_id, primary_key=True, serialize=False
),
),
("creation_time", models.DateTimeField(auto_now_add=True)),
("last_updated", models.DateTimeField(auto_now=True)),
("compiler", models.CharField(blank=True, max_length=100)),
("cc_opts", models.TextField(blank=True, max_length=1000, null=True)),
("source_code", models.TextField(blank=True)),
("context", models.TextField(blank=True)),
("original_context", models.TextField(blank=True)),
(
"owner",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="coreapp.profile",
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="coreapp.scratch",
),
),
(
"target_assembly",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="coreapp.assembly",
),
),
],
),
]
| StarcoderdataPython |
1741125 | <reponame>crf1111/Bio-Informatics-Learning<filename>Bioinformatics-Armory/src/Protein_Translation.py
from Bio.Seq import translate
# List of all possible NCBI table codes
NCBI_LIST = [1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 15]
def translate_dna(dna, ncbi):
return translate(dna, stop_symbol = "", table = ncbi)
def translate_index(dna, protein):
for i in NCBI_LIST:
if translate_dna(dna, i) == protein:
return i
if __name__ == '__main__':
dna, protein = open('data/data.txt').read().strip().split('\n')
print translate_index(dna, protein) | StarcoderdataPython |
150767 | <gh_stars>1000+
def test_eval_mode(wdriver):
assert wdriver.execute_script("return window.rp.pyEval('1+1')") == 2
def test_exec_mode(wdriver):
assert wdriver.execute_script("return window.rp.pyExec('1+1')") is None
def test_exec_single_mode(wdriver):
assert wdriver.execute_script("return window.rp.pyExecSingle('1+1')") == 2
stdout = wdriver.execute_script(
"""
let output = "";
save_output = function(text) {{
output += text
}};
window.rp.pyExecSingle('1+1\\n2+2',{stdout: save_output});
return output;
"""
)
assert stdout == "2\n4\n"
| StarcoderdataPython |
12967 | <reponame>PhillSimonds/capirca
"""Google Cloud Hierarchical Firewall Generator.
Hierarchical Firewalls (HF) are represented in a SecurityPolicy GCP resouce.
"""
import copy
import re
from typing import Dict, Any
from absl import logging
from capirca.lib import gcp
from capirca.lib import nacaddr
class ExceededCostError(gcp.Error):
"""Raised when the total cost of a policy is above the maximum."""
class DifferentPolicyNameError(gcp.Error):
"""Raised when headers in the same policy have a different policy name."""
class ApiVersionSyntaxMap:
"""Defines the syntax changes between different API versions.
http://cloud/compute/docs/reference/rest/v1/firewallPolicies/addRule
http://cloud/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
"""
SYNTAX_MAP = {
'beta': {
'display_name': 'displayName',
'dest_ip_range': 'destIpRanges',
'src_ip_range': 'srcIpRanges',
'layer_4_config': 'layer4Configs'
},
'ga': {
'display_name': 'shortName',
'dest_ip_range': 'destIpRanges',
'src_ip_range': 'srcIpRanges',
'layer_4_config': 'layer4Configs'
}
}
class Term(gcp.Term):
"""Used to create an individual term."""
ACTION_MAP = {'accept': 'allow', 'next': 'goto_next'}
_MAX_TERM_COMMENT_LENGTH = 64
_TARGET_RESOURCE_FORMAT = 'https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}'
_TERM_ADDRESS_LIMIT = 256
_TERM_TARGET_RESOURCES_LIMIT = 256
_TERM_DESTINATION_PORTS_LIMIT = 256
def __init__(self,
term,
address_family='inet',
policy_inet_version='inet',
api_version='beta'):
super().__init__(term)
self.address_family = address_family
self.term = term
self.skip = False
self._ValidateTerm()
self.api_version = api_version
# This is to handle mixed, where the policy_inet_version is mixed,
# but the term inet version is either inet/inet6.
# This is only useful for term name and priority.
self.policy_inet_version = policy_inet_version
def _ValidateTerm(self):
if self.term.destination_tag or self.term.source_tag:
raise gcp.TermError('Hierarchical Firewall does not support tags')
if len(self.term.target_resources) > self._TERM_TARGET_RESOURCES_LIMIT:
raise gcp.TermError(
'Term: %s target_resources field contains %s resources. It should not contain more than "%s".'
% (self.term.name, str(len(
self.term.target_resources)), self._TERM_TARGET_RESOURCES_LIMIT))
for proj, vpc in self.term.target_resources:
if not gcp.IsProjectIDValid(proj):
raise gcp.TermError(
'Project ID "%s" must be 6 to 30 lowercase letters, digits, or hyphens.'
' It must start with a letter. Trailing hyphens are prohibited.' %
proj)
if not gcp.IsVPCNameValid(vpc):
raise gcp.TermError('VPC name "%s" must start with a lowercase letter '
'followed by up to 62 lowercase letters, numbers, '
'or hyphens, and cannot end with a hyphen.' % vpc)
if self.term.source_port:
raise gcp.TermError('Hierarchical firewall does not support source port '
'restrictions.')
if self.term.option:
raise gcp.TermError('Hierarchical firewall does not support the '
'TCP_ESTABLISHED option.')
if len(self.term.destination_port) > self._TERM_DESTINATION_PORTS_LIMIT:
raise gcp.TermError(
'Term: %s destination_port field contains %s ports. It should not contain more than "%s".'
% (self.term.name, str(len(
self.term.destination_port)), self._TERM_DESTINATION_PORTS_LIMIT))
# Since policy_inet_version is used to handle 'mixed'.
# We should error out if the individual term's inet version (address_family)
# is anything other than inet/inet6, since this should never happen
# naturally. Something has gone horribly wrong if you encounter this error.
if self.address_family == 'mixed':
raise gcp.TermError(
'Hierarchical firewall rule has incorrect inet_version for rule: %s' %
self.term.name)
def ConvertToDict(self, priority_index):
"""Converts term to dict representation of SecurityPolicy.Rule JSON format.
Takes all of the attributes associated with a term (match, action, etc) and
converts them into a dictionary which most closely represents
the SecurityPolicy.Rule JSON format.
Args:
priority_index: An integer priority value assigned to the term.
Returns:
A dict term.
"""
if self.skip:
return {}
rules = []
# Identify if this is inet6 processing for a term under a mixed policy.
mixed_policy_inet6_term = False
if self.policy_inet_version == 'mixed' and self.address_family == 'inet6':
mixed_policy_inet6_term = True
term_dict = {
'action': self.ACTION_MAP.get(self.term.action[0], self.term.action[0]),
'direction': self.term.direction,
'priority': priority_index
}
# Get the correct syntax for API versions.
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['src_ip_range']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['dest_ip_range']
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['layer_4_config']
target_resources = []
for proj, vpc in self.term.target_resources:
target_resources.append(self._TARGET_RESOURCE_FORMAT.format(proj, vpc))
if target_resources: # Only set when non-empty.
term_dict['targetResources'] = target_resources
term_dict['enableLogging'] = self._GetLoggingSetting()
# This combo provides ability to identify the rule.
term_name = self.term.name
if mixed_policy_inet6_term:
term_name = gcp.GetIpv6TermName(term_name)
raw_description = term_name + ': ' + ' '.join(self.term.comment)
term_dict['description'] = gcp.TruncateString(raw_description,
self._MAX_TERM_COMMENT_LENGTH)
filtered_protocols = []
for proto in self.term.protocol:
# ICMP filtering by inet_version
# Since each term has inet_version, 'mixed' is correctly processed here.
if proto == 'icmp' and self.address_family == 'inet6':
logging.warning(
'WARNING: Term %s is being rendered for inet6, ICMP '
'protocol will not be rendered.', self.term.name)
continue
if proto == 'icmpv6' and self.address_family == 'inet':
logging.warning(
'WARNING: Term %s is being rendered for inet, ICMPv6 '
'protocol will not be rendered.', self.term.name)
continue
if proto == 'igmp' and self.address_family == 'inet6':
logging.warning(
'WARNING: Term %s is being rendered for inet6, IGMP '
'protocol will not be rendered.', self.term.name)
continue
filtered_protocols.append(proto)
# If there is no protocol left after ICMP/IGMP filtering, drop this term.
# But only do this for terms that originally had protocols.
# Otherwise you end up dropping the default-deny.
if self.term.protocol and not filtered_protocols:
return {}
protocols_and_ports = []
if not self.term.protocol:
# Empty protocol list means any protocol, but any protocol in HF is
# represented as "all"
protocols_and_ports = [{'ipProtocol': 'all'}]
else:
for proto in filtered_protocols:
# If the protocol name is not supported, use the protocol number.
if proto not in self._ALLOW_PROTO_NAME:
proto = str(self.PROTO_MAP[proto])
logging.info('INFO: Term %s is being rendered using protocol number',
self.term.name)
proto_ports = {'ipProtocol': proto}
if self.term.destination_port:
ports = self._GetPorts()
if ports: # Only set when non-empty.
proto_ports['ports'] = ports
protocols_and_ports.append(proto_ports)
if self.api_version == 'ga':
term_dict['match'] = {layer_4_config: protocols_and_ports}
else:
term_dict['match'] = {'config': {layer_4_config: protocols_and_ports}}
# match needs a field called versionedExpr with value FIREWALL
# See documentation:
# https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
term_dict['match']['versionedExpr'] = 'FIREWALL'
ip_version = self.AF_MAP[self.address_family]
if ip_version == 4:
any_ip = [nacaddr.IP('0.0.0.0/0')]
else:
any_ip = [nacaddr.IPv6('::/0')]
if self.term.direction == 'EGRESS':
daddrs = self.term.GetAddressOfVersion('destination_address', ip_version)
# If the address got filtered out and is empty due to address family, we
# don't render the term. At this point of term processing, the direction
# has already been validated, so we can just log and return empty rule.
if self.term.destination_address and not daddrs:
logging.warning(
'WARNING: Term %s is not being rendered for %s, '
'because there are no addresses of that family.', self.term.name,
self.address_family)
return []
# This should only happen if there were no addresses set originally.
if not daddrs:
daddrs = any_ip
destination_address_chunks = [
daddrs[x:x + self._TERM_ADDRESS_LIMIT]
for x in range(0, len(daddrs), self._TERM_ADDRESS_LIMIT)
]
for daddr_chunk in destination_address_chunks:
rule = copy.deepcopy(term_dict)
if self.api_version == 'ga':
rule['match'][dest_ip_range] = [
daddr.with_prefixlen for daddr in daddr_chunk
]
else:
rule['match']['config'][dest_ip_range] = [
daddr.with_prefixlen for daddr in daddr_chunk
]
rule['priority'] = priority_index
rules.append(rule)
priority_index += 1
else:
saddrs = self.term.GetAddressOfVersion('source_address', ip_version)
# If the address got filtered out and is empty due to address family, we
# don't render the term. At this point of term processing, the direction
# has already been validated, so we can just log and return empty rule.
if self.term.source_address and not saddrs:
logging.warning(
'WARNING: Term %s is not being rendered for %s, '
'because there are no addresses of that family.', self.term.name,
self.address_family)
return []
# This should only happen if there were no addresses set originally.
if not saddrs:
saddrs = any_ip
source_address_chunks = [
saddrs[x:x + self._TERM_ADDRESS_LIMIT]
for x in range(0, len(saddrs), self._TERM_ADDRESS_LIMIT)
]
for saddr_chunk in source_address_chunks:
rule = copy.deepcopy(term_dict)
if self.api_version == 'ga':
rule['match'][src_ip_range] = [
saddr.with_prefixlen for saddr in saddr_chunk
]
else:
rule['match']['config'][src_ip_range] = [
saddr.with_prefixlen for saddr in saddr_chunk
]
rule['priority'] = priority_index
rules.append(rule)
priority_index += 1
return rules
def __str__(self):
return ''
class HierarchicalFirewall(gcp.GCP):
"""A GCP Hierarchical Firewall policy."""
SUFFIX = '.gcphf'
_ANY_IP = {
'inet': nacaddr.IP('0.0.0.0/0'),
'inet6': nacaddr.IP('::/0'),
}
_PLATFORM = 'gcp_hf'
_SUPPORTED_AF = frozenset(['inet', 'inet6', 'mixed'])
# Beta is the default API version. GA supports IPv6 (inet6/mixed).
_SUPPORTED_API_VERSION = frozenset(['beta', 'ga'])
_DEFAULT_MAXIMUM_COST = 100
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
Tuple containing both supported tokens and sub tokens.
"""
supported_tokens, _ = super()._BuildTokens()
supported_tokens |= {
'destination_tag', 'expiration', 'source_tag', 'translated',
'target_resources', 'logging'
}
supported_tokens -= {
'destination_address_exclude', 'expiration', 'icmp_type',
'source_address_exclude', 'verbatim'
}
supported_sub_tokens = {'action': {'accept', 'deny', 'next'}}
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
"""Translates a Capirca policy into a HF-specific data structure.
Takes in a POL file, parses each term and populates the policy
dict. Each term in this list is a dictionary formatted according to
HF's rule API specification. Additionally, checks for its quota.
Args:
pol: A Policy() object representing a given POL file.
exp_info: An int that specifies number of weeks until policy expiry.
Raises:
ExceededCostError: Raised when the cost of a policy exceeds the default
maximum cost.
HeaderError: Raised when the header cannot be parsed or a header option is
invalid.
DifferentPolicyNameError: Raised when a header policy name differs from
other in the same policy.
"""
self.policies = []
policy = {
'rules': [],
'type': 'FIREWALL'
}
is_policy_modified = False
counter = 1
total_cost = 0
for header, terms in pol.filters:
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)
is_policy_modified = True
# Get term direction if set.
direction = 'INGRESS'
for i in self._GOOD_DIRECTION:
if i in filter_options:
direction = i
filter_options.remove(i)
# Get the address family if set.
address_family = 'inet'
for i in self._SUPPORTED_AF:
if i in filter_options:
address_family = i
filter_options.remove(i)
# Get the compute API version if set.
api_version = 'beta'
for i in self._SUPPORTED_API_VERSION:
if i in filter_options:
api_version = i
filter_options.remove(i)
break
# Find the default maximum cost of a policy, an integer, if specified.
max_cost = self._DEFAULT_MAXIMUM_COST
for opt in filter_options:
try:
max_cost = int(opt)
filter_options.remove(opt)
break
except ValueError:
continue
if max_cost > 65536:
raise gcp.HeaderError(
'Default maximum cost cannot be higher than 65536')
display_name = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['display_name']
# Get policy name and validate it to meet displayName requirements.
policy_name = header.FilterName(self._PLATFORM)
if not policy_name:
raise gcp.HeaderError(
'Policy name was not specified in header')
filter_options.remove(policy_name)
if len(policy_name) > 63:
raise gcp.HeaderError(
'Policy name "%s" is too long; the maximum number of characters '
'allowed is 63' % (policy_name))
if not bool(re.match('^[a-z]([-a-z0-9]*[a-z0-9])?$', policy_name)):
raise gcp.HeaderError(
'Invalid string for displayName, "%s"; the first character must be '
'a lowercase letter, and all following characters must be a dash, '
'lowercase letter, or digit, except the last character, which '
'cannot be a dash.' % (policy_name))
if display_name in policy and policy[display_name] != policy_name:
raise DifferentPolicyNameError(
'Policy names that are from the same policy are expected to be '
'equal, but %s is different to %s' %
(policy[display_name], policy_name))
policy[display_name] = policy_name
# If there are remaining options, they are unknown/unsupported options.
if filter_options:
raise gcp.HeaderError(
'Unsupported or unknown filter options %s in policy %s ' %
(str(filter_options), policy_name))
# Handle mixed for each indvidual term as inet and inet6.
# inet/inet6 are treated the same.
term_address_families = []
if address_family == 'mixed':
term_address_families = ['inet', 'inet6']
else:
term_address_families = [address_family]
for term in terms:
if term.stateless_reply:
continue
if gcp.IsDefaultDeny(term):
if direction == 'EGRESS':
if address_family != 'mixed':
# Default deny also gets processed as part of terms processing.
# The name and priority get updated there.
term.destination_address = [self._ANY_IP[address_family]]
else:
term.destination_address = [
self._ANY_IP['inet'], self._ANY_IP['inet6']
]
else:
if address_family != 'mixed':
term.source_address = [self._ANY_IP[address_family]]
else:
term.source_address = [
self._ANY_IP['inet'], self._ANY_IP['inet6']
]
term.name = self.FixTermLength(term.name)
term.direction = direction
# Only generate the term if it's for the appropriate platform
if term.platform:
if self._PLATFORM not in term.platform:
continue
if term.platform_exclude:
if self._PLATFORM in term.platform_exclude:
continue
for term_af in term_address_families:
rules = Term(
term,
address_family=term_af,
policy_inet_version=address_family,
api_version=api_version).ConvertToDict(priority_index=counter)
if not rules:
continue
for dict_term in rules:
total_cost += GetRuleTupleCount(dict_term, api_version)
if total_cost > max_cost:
raise ExceededCostError(
'Policy cost (%d) for %s reached the '
'maximum (%d)' %
(total_cost, policy[display_name], max_cost))
policy['rules'].append(dict_term)
counter += len(rules)
self.policies.append(policy)
# Do not render an empty rules if no policies have been evaluated.
if not is_policy_modified:
self.policies = []
if total_cost > 0:
logging.info('Policy %s quota cost: %d',
policy[display_name], total_cost)
def GetRuleTupleCount(dict_term: Dict[str, Any], api_version):
"""Calculate the tuple count of a rule in its dictionary form.
Quota is charged based on how complex the rules are rather than simply
limiting the number of rules.
The cost of a rule is the number of distinct protocol:port combinations plus
the number of IP addresses plus the number of targets.
Note: The goal of this function is not to determine if a rule is valid, but
to calculate its tuple count regardless of correctness.
Args:
dict_term: A dict object.
api_version: A string indicating the api version.
Returns:
int: The tuple count of the rule.
"""
layer4_count = 0
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['layer_4_config']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['dest_ip_range']
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['src_ip_range']
targets_count = len(dict_term.get('targetResources', []))
if api_version == 'ga':
config = dict_term.get('match', {})
else:
config = dict_term.get('match', {}).get('config', {})
addresses_count = len(
config.get(dest_ip_range, []) + config.get(src_ip_range, []))
for l4config in config.get(layer_4_config, []):
for _ in l4config.get('ports', []):
layer4_count += 1
if l4config.get('ipProtocol'):
layer4_count += +1
return addresses_count + layer4_count + targets_count
| StarcoderdataPython |
3351162 | <filename>src/pymordemos/elliptic_oned.py
#!/usr/bin/env python
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from typer import Argument, Option, run
from pymor.analyticalproblems.domaindescriptions import LineDomain
from pymor.analyticalproblems.elliptic import StationaryProblem
from pymor.analyticalproblems.functions import ExpressionFunction, ConstantFunction, LincombFunction
from pymor.discretizers.builtin import discretize_stationary_cg, discretize_stationary_fv
from pymor.parameters.functionals import ProjectionParameterFunctional
def main(
problem_number: int = Argument(..., min=0, max=1, help='Selects the problem to solve [0 or 1].'),
n: int = Argument(..., help='Grid interval count.'),
fv: bool = Option(False, help='Use finite volume discretization instead of finite elements.'),
):
"""Solves the Poisson equation in 1D using pyMOR's builtin discreization toolkit."""
rhss = [ExpressionFunction('ones(x.shape[:-1]) * 10', 1, ()),
ExpressionFunction('(x - 0.5)**2 * 1000', 1, ())]
rhs = rhss[problem_number]
d0 = ExpressionFunction('1 - x', 1, ())
d1 = ExpressionFunction('x', 1, ())
f0 = ProjectionParameterFunctional('diffusionl')
f1 = 1.
problem = StationaryProblem(
domain=LineDomain(),
rhs=rhs,
diffusion=LincombFunction([d0, d1], [f0, f1]),
dirichlet_data=ConstantFunction(value=0, dim_domain=1),
name='1DProblem'
)
parameter_space = problem.parameters.space(0.1, 1)
print('Discretize ...')
discretizer = discretize_stationary_fv if fv else discretize_stationary_cg
m, data = discretizer(problem, diameter=1 / n)
print(data['grid'])
print()
print('Solve ...')
U = m.solution_space.empty()
for mu in parameter_space.sample_uniformly(10):
U.append(m.solve(mu))
m.visualize(U, title='Solution for diffusionl in [0.1, 1]')
if __name__ == '__main__':
run(main)
| StarcoderdataPython |
1688499 | from numpy import fabs, arange
class Integration:
EPS = 1e-6
area_domain = list()
area_codomain = list()
@staticmethod
def rectangles(func, a, b, iterations=180):
delta_x = (b - a) / iterations
x = [a + 0.5 * delta_x]
y = [func(x[0])]
current_area = 0.0
for iteration in range(iterations):
prev_area = current_area
current_area += func(x[iteration]) * delta_x
if fabs(current_area - prev_area) < Integration.EPS:
break
x.append(x[iteration] + delta_x)
y.append(func(x[iteration+1]))
# plt.plot(x, y, 'bs', color='red', label='Rectangles')
return current_area
@staticmethod
def trapezoids(func, a, b, iterations=150):
delta_x = (b - a) / iterations
x = [a + 0.5 * delta_x]
y = [func(x[0])]
current_area = 0.0
for iteration in range(iterations):
current_area += func(x[iteration])
x.append(x[iteration] + delta_x)
y.append(func(x[iteration + 1]))
# plt.plot(x, y, 'g^', color='yellow', label='Rectangles')
return delta_x * ((func(a) + func(b)) / 2 + current_area)
@staticmethod
def simpson(func, a, b, iterations=50):
delta_x = (b - a) / iterations
Integration.area_domain.extend(arange(a, b, delta_x))
x = a
current_area = func(a) + func(b)
for iteration in range(iterations):
x += delta_x
if iteration % 2:
current_area += 2.0 * func(x)
else:
current_area += 4.0 * func(x)
Integration.area_codomain.append((delta_x / 3) * current_area)
return (delta_x/3.0)*current_area
| StarcoderdataPython |
98758 | import numpy as np
from scipy import signal
def random_spikes(size):
"""
Generate zeros and ones in an array of size=size.
probabilities = [probability 0 will appear, probability 1 will appear]
"""
spikes = np.random.choice(2, size, p=[0.99, 0.01])
# Get rid of spikes that are on top of each other
for i, s in enumerate(spikes):
if i < len(spikes) - 1 and (spikes[i] == 1 and spikes[i + 1] == 1):
spikes[i] = 0
return spikes
| StarcoderdataPython |
1794016 |
'''
import Monsoon.HVPM as HVPM
import Monsoon.sampleEngine as sampleEngine
import Monsoon.Operations as op
import pyqtgraph as pg
getted_num = 0
usb_data = []
Mon = HVPM.Monsoon()
Mon.setup_usb()
Mon.setVout(4.0)
engine = sampleEngine.SampleEngine(Mon)
engine.enableChannel(sampleEngine.channels.USBCurrent)
Mon.setUSBPassthroughMode(op.USB_Passthrough.On)
engine.disableCSVOutput()#enableCSVOutput('esssss.txt')
engine.ConsoleOutput(True)
numSamples = sampleEngine.triggers.SAMPLECOUNT_INFINITE
engine.startSampling(numSamples) ##start sampling
''' | StarcoderdataPython |
1665729 | <filename>sphinxcontrib/varlinks.py
# -*- coding: utf-8 -*-
"""
Variable hyperlinks
~~~~~~~~~~~~~~~~~~~
Extension that adds support for substitutions in hyperlinks.
Substitutions are supported in both the link's label
and target.
Eg. `Download |subproject| <http://example.com/|subproject|/>`_.
:copyright: Copyright 2017 by <NAME>.
:license: BSD, see LICENSE for details.
"""
import re
from docutils import nodes
from docutils.transforms import Transform
__all__ = ['setup']
__version__ = '0.1.4'
class LinkSubstitutionTransform(Transform):
subst_pattern = r'\|([^|]+)\|'
def _maybe_hyperlink(self, node):
return isinstance(node, (nodes.reference, nodes.target))
class LinkSubstitutionPhase1(LinkSubstitutionTransform):
# This transformation is applied very early.
# At a minimum, it must be run before substitutions are applied.
default_priority = 10
def apply(self):
"""Create substitution nodes for hyperlinks"""
# In this phase, we look for hyperlinks (references nodes)
# that contain substitutions (of the form "|foo|").
# We then add actual "substitution"s nodes to those references,
# so that they can be replaced by the substitution processor.
subst_re = re.compile(self.subst_pattern)
for link in self.document.traverse(self._maybe_hyperlink):
if 'refuri' not in link:
continue
# Note: "target" nodes do not have a "name" attribute.
if '|' not in link['refuri'] and '|' not in link.get('name', ''):
continue
# This list acts as a cache so that only one substitution node
# is added as a child for each substitution name.
substitutions = []
matches = subst_re.findall(link['refuri']) + \
subst_re.findall(link.get('name', ''))
for subref_text in matches:
if subref_text in substitutions:
continue
substitutions.append(subref_text)
subref_node = nodes.substitution_reference(subref_text)
link.append(subref_node)
self.document.note_substitution_ref(subref_node, subref_text)
# Build a map of substitutions names to child indices
# (minus one since the actual link label is in link[0]).
link['varlinks'] = \
dict(zip(substitutions, range(len(substitutions))))
class LinkSubstitutionPhase2(LinkSubstitutionTransform):
# Apply this transformation right after substitutions have been applied.
default_priority = 221
def _replace(self, mapping, sub, offset):
def inner(match):
# The match's group is the full substitution name (eg. "|foo|").
name = match.group()
# "sub" contains the actual replacement nodes,
# but for hyperlink references, the first element
# is actually the link's label, so we need to offset that.
res = sub[mapping[name[1:-1]] + offset]
# Lastly, we do not want the full node,
# only its text representation.
return res.astext()
return inner
def apply(self):
"""Replace substitutions in hyperlinks with their contents"""
# In this phase, we replace the substitutions in hyperlinks
# with the contents of the sub-nodes introduced during phase 1.
# We also remove those temporary nodes from the tree.
subst_re = re.compile(self.subst_pattern)
# Apply the substitutions to hyperlink references.
for link in self.document.traverse(nodes.reference):
substitutions = link.get('varlinks')
if not substitutions:
continue
replacer = self._replace(substitutions, link.children, 1)
link['refuri'] = subst_re.sub(replacer, link['refuri'])
content = subst_re.sub(replacer, link[0])
# Cleanup the temporary nodes and recreate the node's content.
link.clear()
del link['varlinks']
link.append(nodes.Text(content))
# Do the same with hyperlink targets.
for link in self.document.traverse(nodes.target):
substitutions = link.get('varlinks')
if not substitutions:
continue
replacer = self._replace(substitutions, link.children, 0)
link['refuri'] = subst_re.sub(replacer, link['refuri'])
# Cleanup the temporary nodes.
link.clear()
del link['varlinks']
def setup(app):
app.add_transform(LinkSubstitutionPhase1)
app.add_transform(LinkSubstitutionPhase2)
| StarcoderdataPython |
1688088 | <gh_stars>1-10
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingException,
QgsProcessingAlgorithm,
QgsProcessingParameterNumber,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterRasterDestination,
QgsProcessingParameterEnum,
QgsProcessingParameterBoolean)
from qgis import processing
import numpy as np
import rvt.default
import rvt.vis
class RVTHillshade(QgsProcessingAlgorithm):
"""
RVT Hillshade.
"""
# processing function parameters
INPUT = 'INPUT'
VE_FACTOR = 'VE_FACTOR'
SUN_AZIMUTH = 'SUN_AZIMUTH'
SUN_ELEVATION = 'SUN_ELEVATION'
SAVE_AS_8BIT = "SAVE_AS_8BIT"
OUTPUT = 'OUTPUT'
def tr(self, string):
"""
Returns a translatable string with the self.tr() function.
"""
return QCoreApplication.translate('Processing', string)
def createInstance(self):
return RVTHillshade()
def name(self):
"""
Returns the algorithm name, used for identifying the algorithm. This
string should be fixed for the algorithm, and must not be localised.
The name should be unique within each provider. Names should contain
lowercase alphanumeric characters only and no spaces or other
formatting characters.
"""
return 'rvt_hillshade'
def displayName(self):
"""
Returns the translated algorithm name, which should be used for any
user-visible display of the algorithm name.
"""
return self.tr('RVT Hillshade')
def shortHelpString(self):
"""
Returns a localised short helper string for the algorithm. This string
should provide a basic description about what the algorithm does and the
parameters and outputs associated with it..
"""
return self.tr("Relief visualization toolbox, Hillshade. Calculates hillshade.")
def initAlgorithm(self, config=None):
"""
Here we define the inputs and output of the algorithm, along
with some other properties.
"""
self.addParameter(
QgsProcessingParameterRasterLayer(
self.INPUT,
self.tr('Input DEM raster layer'),
[QgsProcessing.TypeRaster]
)
)
self.addParameter(
QgsProcessingParameterNumber(
name="VE_FACTOR",
description="Vertical exaggeration factor",
type=QgsProcessingParameterNumber.Double,
defaultValue=1,
minValue=-1000,
maxValue=1000
)
)
self.addParameter(
QgsProcessingParameterNumber(
name="SUN_AZIMUTH",
description="Solar azimuth angle (clockwise from North) in degrees",
type=QgsProcessingParameterNumber.Double,
defaultValue=315,
minValue=0,
maxValue=360
)
)
self.addParameter(
QgsProcessingParameterNumber(
name="SUN_ELEVATION",
description="Solar vertical angle (above the horizon) in degrees",
type=QgsProcessingParameterNumber.Double,
defaultValue=35,
minValue=0,
maxValue=90
)
)
self.addParameter(
QgsProcessingParameterBoolean(
name="SAVE_AS_8BIT",
description="Save as 8bit raster",
defaultValue=False
)
)
self.addParameter(
QgsProcessingParameterRasterDestination(
self.OUTPUT,
self.tr('Output visualization raster layer')
)
)
def processAlgorithm(self, parameters, context, feedback):
"""
Here is where the processing itself takes place.
"""
dem_layer = self.parameterAsRasterLayer(
parameters,
self.INPUT,
context
)
ve_factor = float(self.parameterAsDouble(
parameters,
self.VE_FACTOR,
context
))
sun_azimuth = float(self.parameterAsDouble(
parameters,
self.SUN_AZIMUTH,
context
))
sun_elevation = float(self.parameterAsDouble(
parameters,
self.SUN_ELEVATION,
context
))
save_8bit = bool(self.parameterAsBool(
parameters,
self.SAVE_AS_8BIT,
context
))
visualization_path = (self.parameterAsOutputLayer(
parameters,
self.OUTPUT,
context,
))
dem_path = str(dem_layer.source())
dict_arr_dem = rvt.default.get_raster_arr(dem_path)
resolution = dict_arr_dem["resolution"] # (x_res, y_res)
dem_arr = dict_arr_dem["array"]
no_data = dict_arr_dem["no_data"]
visualization_arr = rvt.vis.hillshade(dem=dem_arr, resolution_x=resolution[0], resolution_y=resolution[1],
sun_azimuth=sun_azimuth, sun_elevation=sun_elevation, ve_factor=ve_factor,
no_data=no_data)
if not save_8bit:
rvt.default.save_raster(src_raster_path=dem_path, out_raster_path=visualization_path,
out_raster_arr=visualization_arr, e_type=6, no_data=np.nan)
else:
visualization_8bit_arr = rvt.default.DefaultValues().float_to_8bit(float_arr=visualization_arr,
vis="hillshade")
rvt.default.save_raster(src_raster_path=dem_path, out_raster_path=visualization_path,
out_raster_arr=visualization_8bit_arr, e_type=1, no_data=np.nan)
result = {self.OUTPUT: visualization_path}
return result
| StarcoderdataPython |
68942 | <reponame>yay4ya/catflap<gh_stars>0
from catflap.data import Message
from catflap.proxies.proxy import Proxy
@Proxy.register("stdout")
class StdoutProxy(Proxy):
def post(self, message: Message) -> None:
output = f"{message.datetime} [{message.author.name}] - {message.message}"
print(output)
def post_system_message(self, text: str) -> None:
output = f"[ SYSTEM MESSAGE ] {text}"
print(output)
| StarcoderdataPython |
1759622 | <filename>modoboa/transport/factories.py
"""Transport factories."""
import factory
from . import models
class TransportFactory(factory.django.DjangoModelFactory):
"""Factory for Transport."""
class Meta:
model = models.Transport
django_get_or_create = ("pattern", )
pattern = factory.Sequence(lambda n: "transport{}".format(n))
service = "relay"
next_hop = "[external.host.tld]:25"
| StarcoderdataPython |
3252358 | import hashlib
import uuid
def get_uuid():
"""
生成UUID
:return: UUID
"""
uuid_str = str(uuid.uuid4())
md5 = hashlib.md5()
md5.update(uuid_str.encode('utf-8'))
return md5.hexdigest()
| StarcoderdataPython |
3285035 | import lyricsgenius
from discord.ext import commands
import discord
import traceback
from EZPaginator import Paginator
genius = lyricsgenius.Genius('')
class Lyric(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=["가사", "ly", "ㅣㅛ"])
async def lyric(self, ctx , * , song_name="Counting Stars"):
try:
song = genius.search_song(song_name)
print(song.lyrics)
except:
embed = discord.Embed(
title = f"{song_name} 의 가사",
description = f"에러: {traceback.format_exc()}",
colour = discord.Colour.red()
)
await ctx.send(embed=embed)
else:
embed = discord.Embed(title = f"{song_name} 의 가사", colour = discord.Colour.green())
try:
embed.description = song.lyrics
await ctx.send(embed=embed)
except:
embed.description = song.lyrics[:1900]
embed2 = discord.Embed(
title = f"{song_name} 의 가사",
description = song.lyrics[1900:],
colour = discord.Colour.green()
)
msg = await ctx.send(embed=embed)
embeds = [embed, embed2]
page = Paginator(self.bot, msg, embeds=embeds)
await page.start()
def setup(bot):
bot.add_cog(Lyric(bot))
| StarcoderdataPython |
1665219 | <gh_stars>0
#!/usr/bin/env python
# class Parent(object):
#
# def __call__(self, name):
# print("hello world, ", name)
#
#
# class Person(Parent):
#
# def __call__(self, someinfo):
# super(Person, self).__call__(someinfo) # znaczy ze bierze pochodna od Parent czyli tutaj Person
#
# p = Person()
# p("info")
#
# eqiwalent to :
class Parent(object):
def __call__(self, name):
print("hello world, ", name)
def someMethod(self):
print("Method in Parent")
class Person(Parent):
def __call__(self, someinfo):
super().__call__(someinfo) # znaczy ze bierze pochodna od Parent czyli tutaj Person
p = Person()
p("info")
class Man(Person):
def __call__(self, Name):
super(Person, self).__call__(Name) # referujemy do ojca Person czyli do Parent: spodziewam sie hello word
g = Man()
g('Przemek')
g.someMethod() | StarcoderdataPython |
1693218 | # %%
from copy import deepcopy
import enum
from re import T
from numpy.core.defchararray import mod
from numpy.core.function_base import linspace
from scipy.ndimage.measurements import label
from scipy.sparse import data
from qutip.visualization import plot_fock_distribution
from qutip.states import coherent
import hbar_compiler
import hbar_processor
import hbar_simulation_class
import hbar_fitting
import numpy as np
import matplotlib.pyplot as plt
from importlib import reload
from qutip.qip.circuit import Measurement, QubitCircuit
import qutip as qt
from qutip import basis
%matplotlib qt
#%%
reload(hbar_compiler)
reload(hbar_processor)
reload(hbar_simulation_class)
reload(hbar_fitting)
#qubit dimission, we only consider g and e states here
qubit_dim=2
#phonon dimission
phonon_dim=20
#how many phonon modes we consider here
phonon_num=2
#the frequency difference between qubit and phonon (qubit minus phonon)
qubit_freq=5970.04
phonon_freq=5974.11577
interaction_1_freq=5972.2
interaction_3_freq=5972.95
qubit_phonon_detuning=qubit_freq-phonon_freq
#dimission of the system, qubit dimission + phonons dimission
dims=[qubit_dim]+[phonon_dim]+[2]
#T1 T2 is the average number of the two interaction point
t1=[(13.1+9.7)/2]+[81]*(phonon_num)
t2=[(9.8+10.1)/2]+[134]*(phonon_num)
#pi time list for different fock state
pi_time_list=[0.9616123677058709,
0.679329038657111,
0.5548147810734809,
0.48027408123596266]
pi_time_list=[0.899+0.08,0.623+0.08,0.525+0.08]
#set up the processor and compiler,qb5d97 is the qubit we play around
#Omega=50 because we use width of pi pulse as 20ns
qb_processor=hbar_processor.HBAR_processor((phonon_num+1),t1,t2,dims, Omega=50/(2*np.pi),g=[0.26,0.099],\
rest_place=qubit_phonon_detuning,FSR=1.1)
qb_compiler = hbar_compiler.HBAR_Compiler(qb_processor.num_qubits,\
qb_processor.params, qb_processor.pulse_dict)
qb_simulation=hbar_simulation_class.Simulation(qb_processor,qb_compiler)
qb_simulation.swap_time_list=pi_time_list
#%%
qb_simulation.phonon_rabi_measurement(1.1)
# %%
'''
qubit T1 measurement to test code
'''
qb_simulation.qubit_T1_measurement()
# %%
'''
For higher order phonon rabi
'''
pi_time_list=[]
for n in range(4):
qb_simulation.t_list=np.linspace(0.01,10,100)
qb_simulation.generate_fock_state(n)
qb_simulation.phonon_rabi_measurement()
pi_time_list.append(qb_simulation.fit_result[-1]['swap_time'])
qb_simulation.swap_time_list=pi_time_list
# %%
#first,let's calibrate the artificial detuning
qb_simulation.t_list=np.linspace(0.01,10,101)
qb_simulation.ideal_phonon_fock(0)
qb_simulation.qubit_ramsey_measurement(artificial_detuning=interaction_1_freq-qubit_freq+0.5,
starkshift_amp=interaction_1_freq-phonon_freq,if_fit=True)
extra_detuning=qb_simulation.fit_result[-1]['delta']-0.5
#%%
#test parity measurement,with generated phonon fock
y_2d_list=[]
for i in range(5):
qb_simulation.generate_fock_state(i)
qb_simulation.qubit_ramsey_measurement(artificial_detuning=interaction_1_freq-qubit_freq-extra_detuning,
starkshift_amp=interaction_1_freq-phonon_freq,if_fit=False)
y_2d_list.append(qb_simulation.y_array)
figure, ax = plt.subplots(figsize=(8,6))
for i in range(5):
ax.plot(qb_simulation.t_list,y_2d_list[i],label='Fock{}'.format(i))
plt.legend()
# %%
#plot the sum parity,find the parity measurement time
sum=np.zeros(len(qb_simulation.t_list))
figure, ax = plt.subplots(figsize=(8,6))
for i,y in enumerate(y_2d_list):
sum=sum+(-1)**(i)*(y-0.5)*2
ax.plot(qb_simulation.t_list,sum)
t_parity=qb_simulation.t_list[np.argsort(sum)[-1]]
ax.plot([t_parity,t_parity],[np.min(sum),np.max(sum)])
ax.set_title('best parity measurement time is {} us'.format(t_parity))
plt.show()
# %%
'''
calculate the driving amplitude in the real measurement
drive amplitude in the experiment is 0.06
for qubit operation, pi_amp=0.64, width of the pi pulse is 20us
'''
Omega_drive=50/(2*np.pi)/2.49986*np.pi/2/0.64*0.06
#%%
'''
probe phonon, because qubit and phonon dispersive coupling, phonon frequency changed
'''
param_probe={'Omega':0.1,
'sigma': 0.5,
'duration':5,
'amplitude_starkshift':0}
qb_simulation.detuning_list=np.linspace(-qubit_phonon_detuning-0.05,-qubit_phonon_detuning+0.1,20)
qb_simulation.spec_measurement(param_probe,readout_type='read phonon')
#the phonon frequency move to 4.0915MHz.
#%%
'''
try find a good driving amplitude
'''
param_drive={'Omega':0.52,
'sigma':0.5,
'duration':12,
'rotate_direction':np.pi,
'detuning':-qubit_phonon_detuning
}
qb_simulation.generate_coherent_state(param_drive)
qb_simulation.fit_wigner()
starkshift_param={'detuning':interaction_1_freq-phonon_freq,
'duration':7}
abs(qb_simulation.alpha)
#%%
'''
see the linear relation between drive amplitude and generated alpha
'''
drive_amp_list=np.linspace(0.01,0.5,10)
alpha_list=[]
for drive_amp in drive_amp_list:
param_drive={'Omega':drive_amp,
'sigma':0.5,
'duration':12,
'rotate_direction':np.pi,
'detuning':-qubit_phonon_detuning
}
qb_simulation.generate_coherent_state(param_drive)
qb_simulation.fit_wigner()
alpha_list.append( abs(qb_simulation.alpha))
fig, ax=plt.subplots(figsize=(8,6))
ax.plot(drive_amp_list,alpha_list)
ax.set_xlabel('drive amp')
ax.set_ylabel('fitted alpha')
plt.legend()
fig.show()
#%%
'''
test wigner 1D measurement
'''
calibration_phase=0.131
qb_simulation.calibration_phase=calibration_phase
qb_simulation.generate_fock_state(0)
qb_simulation.wigner_measurement_1D(param_drive,starkshift_param,steps=9,
phase_calibration=False,if_echo=True,first_pulse_phases=[0,np.pi/2,np.pi,np.pi/2*3])
# np.save('simulated_data//cut_fock1.npy',qb_simulation.y_array)
#%%
'''
load measurement data,
plot measurement, simulated and ideal together
'''
x_simulated=np.load('simulated_data//axis_v3.npy')
y_simulated=np.load('simulated_data//fock_{}_wigner_v3.npy'.format(2))[20]
x_ideal=np.linspace(-2,2,40)
y_ideal=qt.wigner(qt.fock(10,0),x_ideal,[0])[0]*np.pi/2
x_measurement=np.linspace(-0.06,0.06,40)*32.1/0.9
y_measurement=np.load('wigner_data//fock_{}_measured_data.npy'.format(2))[20]/4
fig, ax=plt.subplots(figsize=(8,6))
ax.plot(x_ideal,y_ideal,label='ideal')
ax.plot(x_simulated,y_simulated,label='simulated')
ax.plot(x_measurement,y_measurement,label='measurement')
plt.legend()
fig.show()
# %%
'''
calibrate the relation between phase of second half pi pulse and time
'''
phase_list=[]
time_list=[7,7.05,7.1]
for time in time_list:
starkshift_param={'detuning':interaction_1_freq-phonon_freq,
'duration':time}
qb_simulation.wigner_measurement_1D(param_drive,starkshift_param,steps=30,
phase_calibration=True,if_echo=True)
phase_list.append(qb_simulation.fit_result[-1]['phi'])
phase_fit=np.polyfit(time_list,phase_list,1)
#%%
phase_fit
#%%
'''
generate the list of phase for second half pi pulse
'''
# phase_fit=[ 12.14661223, -84.90036848]
phase_fit=[12.15574521,-84.91190764]
duration_list=np.linspace(6,8,50)
calibration_phase_list=phase_fit[0]*duration_list+phase_fit[1]
# %%
#calibrate the wigner background with time
param_drive={'Omega':0.7,
'sigma':0.5,
'duration':12,
'rotate_direction':0,
'detuning':-qubit_phonon_detuning
}
qb_simulation.generate_fock_state(0)
qb_simulation.wigner_measurement_time_calibrate(param_drive,duration_list,
interaction_1_freq-phonon_freq,
calibration_phases=calibration_phase_list,
if_echo=True,
first_pulse_phases=[0,np.pi/2,np.pi,np.pi/2*3])
#%%
fig, ax=plt.subplots(figsize=(8,6))
ax.plot(qb_simulation.x_array,y_container[0])
ax.plot([duration_list[0],duration_list[-1]],[0.5,0.5])
fig.show()
#%%
y_container=[]
for i,duration in enumerate(duration_list):
qb_simulation.calibration_phase=calibration_phase_list[i]
qb_simulation.generate_fock_state(0)
starkshift_param={'detuning':interaction_1_freq-phonon_freq,
'duration':duration}
qb_simulation.wigner_measurement_1D(param_drive,starkshift_param,steps=9,
phase_calibration=False,if_echo=True,first_pulse_phases=[0,np.pi/2,np.pi,np.pi/2*3])
y_container.append(qb_simulation.y_array)
#%%
zz_data=np.array(y_container)
def axis_for_mesh(axis):
begin=axis[0]
end=axis[-1]
length=len(axis)
step=axis[1]-axis[0]
begin=begin-step/2
end=end+step/2
length=length+1
return np.linspace(begin,end,length)
xx,yy=np.meshgrid(axis_for_mesh(qb_simulation.x_array),axis_for_mesh(duration_list))
fig, ax1, = plt.subplots(1, 1, figsize=(6,6))
im = ax1.pcolormesh(yy,xx, zz_data, cmap='seismic')
fig.colorbar(im)
fig.legend()
fig.show()
# %%
#plot 2D wigner
wigner_data_list=[]
phase_fit=[ 12.14661223, -84.90036848]
parity_time_list=[7.075]
fock_number_list=[2]
for i,fock_number in enumerate(fock_number_list):
print(param_drive)
starkshift_param={'detuning':interaction_1_freq-phonon_freq,
'duration':parity_time_list[i]}
calibration_phase=phase_fit[0]*starkshift_param['duration']+phase_fit[1]
qb_simulation.calibration_phase=calibration_phase
# qb_simulation.generate_fock_state(fock_number,0.85)
qb_simulation.generate_fock_state(fock_number,direction_phase=1.32)
wigner_data=qb_simulation.wigner_measurement_2D(param_drive,starkshift_param,steps=15,
if_echo=True,first_pulse_phases=[0,np.pi/2,np.pi,np.pi/2*3])
wigner_data_list.append(qb_simulation.y_array)
# np.save('simulated_data//fock_{}_wigner_v9.npy'.format(fock_number),wigner_data)
# %%
#phase calibration
phase_fit=[ 12.14661223, -84.90036848]
data_list=[]
phase_list=np.linspace(0,np.pi/2,10)
starkshift_param={'detuning':interaction_1_freq-phonon_freq,
'duration':7.075}
calibration_phase=1.03
qb_simulation.calibration_phase=calibration_phase
for phase in phase_list:
qb_simulation.generate_fock_state(2,phase)
qb_simulation.wigner_measurement_1D(param_drive,starkshift_param,steps=20,
phase_calibration=False,if_echo=True,first_pulse_phases=[0,np.pi/2,np.pi,np.pi/2*3])
data_list.append(qb_simulation.y_array)
fig, ax=plt.subplots(figsize=(8,6))
for i,phase in enumerate(phase_list):
ax.plot(qb_simulation.x_array,data_list[i],label=phase)
plt.legend()
fig.show()
| StarcoderdataPython |
3349416 | from nltk.corpus import wordnet
from nltk.tokenize import word_tokenize
from random import randint
import nltk.data
class Replacer(object):
def __init__(self, text):
self.text = text
self.output = ""
def tokenize(self):
# Load the pretrained neural net
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# Get the list of words from the entire text
words = word_tokenize(self.text)
# Identify the parts of speech
tagged = nltk.pos_tag(words)
#print tagged
for i in range(0,len(words)):
replacements = []
# Only replace nouns with nouns, vowels with vowels etc.
for syn in wordnet.synsets(words[i]):
# Do not attempt to replace proper nouns or determiners
if tagged[i][1] == 'NNP' or tagged[i][1] == 'DT':
break
# The tokenizer returns strings like NNP, VBP etc
# but the wordnet synonyms has tags like .n.
# So we extract the first character from NNP ie n
# then we check if the dictionary word has a .n. or not
word_type = tagged[i][1][0].lower()
if syn.name().find("."+word_type+"."):
# extract the word only
r = syn.name()[0:syn.name().find(".")]
replacements.append(r)
if len(replacements) > 0:
# Choose a random replacement
replacement = replacements[randint(0,len(replacements)-1)]
self.output = self.output + " " + replacement
else:
# If no replacement could be found, then just use the
# original word
self.output = self.output + " " + words[i]
def get_output(self):
#print the result
return (self.output)
'''
r = Replacer("Your desire is inside spiritual fulfillment")
r.tokenize()
print r.get_output()
''' | StarcoderdataPython |
3387510 | <gh_stars>1-10
# pyjam build file. See https://github.com/kaspar030/pyjam for info.
default.CFLAGS = "-O3 -DUHCP_SYSTEM_LINUX -DUHCP_SERVER"
Main("uhcpd")
| StarcoderdataPython |
1762600 | from django.contrib.auth import login, authenticate, logout
from django.shortcuts import render
from django.views import View
from carts.utils import merge_cart_cookie_to_redis
from goods.models import SKU
from meiduo_mall.utils.views import LoginRequiredMixin
from .models import User, Address
from django.http import JsonResponse
import json, re
from django_redis import get_redis_connection
from celery_tasks.email.tasks import send_verify_email
class SaveHistoryView(View):
def post(self, request):
'''保存用户浏览记录'''
# 1.接收json参数
dict = json.loads(request.body.decode())
sku_id = dict.get('sku_id')
# 2.检验sku_id是否正确
try:
SKU.objects.get(id=sku_id)
except Exception as e:
return JsonResponse({'code':400,
'errmsg':'sku_id验证失败'})
# 3.链接redis, 获取链接对象
redis_conn = get_redis_connection('history')
pl = redis_conn.pipeline()
user_id = request.user.id
# 4.删除redis中的sku_id数据
pl.lrem('history_%s' % user_id, 0, sku_id)
# 5.增加sku_id数据
pl.lpush('history_%s' % user_id, sku_id)
# 6.保留5个商品数据
pl.ltrim('history_%s' % user_id, 0, 4)
pl.execute()
# 7.返回结果
return JsonResponse({'code': 0,
'errmsg': 'ok'})
class ChangePasswordView(View):
def put(self, request):
'''修改密码'''
# 1.接收json的参数
dict = json.loads(request.body.decode())
old_password = dict.get('old_password')
new_password = dict.get('new_password')
new_password2 = dict.get('new_password2')
# 2.整体检验, 查看三个参数是否齐全
if not all([old_password, new_password, new_password2]):
return JsonResponse({'code':400,
'errmsg':'缺少必传参数'})
# 3.验证老密码是否正确, 获取结果
result = request.user.check_password(old_password)
# 4.判断结果是否存在, 如果不存在, 返回
if not result:
return JsonResponse({'code': 400,
'errmsg': '老密码错误'})
# 5.检验新密码是否满足格式要求
if not re.match(r'^[0-9A-Za-z]{8,20}$', new_password):
return JsonResponse({'code': 400,
'errmsg': '新密码不满足格式要求'})
# 6.检验两个新密码是否一致
if new_password != <PASSWORD>:
return JsonResponse({'code': 400,
'errmsg': '两个密码不一致'})
# 7.设置数据库中的新密码
try:
request.user.set_password(<PASSWORD>)
# 8.保存
request.user.save()
except Exception as e:
return JsonResponse({'code': 400,
'errmsg': '保存密码失败'})
# 10.清除状态: session + cookie
logout(request)
response = JsonResponse({'code': 0,
'errmsg': '保存密码成功'})
response.delete_cookie('username')
# 9.返回结果json
return response
class UpdateTitleAddressView(View):
def put(self, request, address_id):
'''修改地址标题'''
# 1.接收json参数
dict = json.loads(request.body.decode())
title = dict.get('title')
# 2.根据address_id获取对应的对象
try:
address = Address.objects.get(id=address_id)
# 3.把该对象的地址改为前端传入的地址
address.title = title
# 4.保存结果
address.save()
except Exception as e:
return JsonResponse({'code':400,
'errmsg':'更新地址标题出错'})
# 5.返回
return JsonResponse({'code': 0,
'errmsg': 'ok'})
class ChangeDefaultAddressView(View):
def put(self, request, address_id):
'''设置默认地址'''
try:
# 1.从mysql中根据address_id获取对象
# address = Address.objects.get(id=address_id)
# 2.把该对象赋给user中的default_address字段
# request.user.default_address = address
request.user.default_address_id = address_id
# 3.保存
request.user.save()
except Exception as e:
return JsonResponse({'code':400,
'errmsg':'设置默认地址数据库出错'})
# 4.返回响应
return JsonResponse({'code':0,
'errmsg':'ok'})
class UpdateDestroyAddressView(View):
def put(self, request, address_id):
'''更新某一个指定的地址'''
# 1.接收参数(json)
dict = json.loads(request.body.decode())
receiver = dict.get('receiver')
province_id = dict.get('province_id')
city_id = dict.get('city_id')
district_id = dict.get('district_id')
place = dict.get('place')
mobile = dict.get('mobile')
# 底下的两个字段可以为空:
phone = dict.get('tel')
email = dict.get('email')
# 2.检验参数(整体 + 局部)
if not all([receiver, province_id, city_id, district_id, place, mobile]):
return JsonResponse({'code': 400,
'errmsg': '必传参数有为空的'})
if not re.match(r'^1[3-9]\d{9}$', mobile):
return JsonResponse({'code': 400,
'errmsg': 'mobile格式不正确'})
if phone:
if not re.match(r'^(0[0-9]{2,3}-)?([2-9][0-9]{6,7})+(-[0-9]{1,4})?$', phone):
return JsonResponse({'code': 400,
'errmsg': 'tel格式不正确'})
if email:
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return JsonResponse({'code': 400,
'errmsg': 'email格式不正确'})
# 3.更新数据库中某一个地址记录 (先查询, 再更新)
try:
Address.objects.filter(id=address_id).update(
user=request.user,
province_id=province_id,
city_id=city_id,
district_id=district_id,
title=receiver,
receiver=receiver,
place=place,
mobile=mobile,
phone=phone,
email=email
)
address = Address.objects.get(id=address_id)
except Exception as e:
return JsonResponse({'code': 400,
'errmsg': '更新失败'})
# 4.拼接json参数, 返回
dict = {
'id': address.id,
'receiver': address.receiver,
'province': address.province.name,
'city': address.city.name,
'district': address.district.name,
'place': address.place,
'mobile': address.mobile,
'tel': address.phone,
'email': address.email,
'title':address.title
}
return JsonResponse({'code': 0,
'errmsg': 'ok',
'address': dict})
def delete(self, request, address_id):
'''删除对应的地址'''
# 1.通过address_id拿取数据库中对应的地址对象
try:
address = Address.objects.get(id=address_id)
# 2.把该地址对象的is_deleted改为true
address.is_deleted = True
# 3.保存
address.save()
except Exception as e:
return JsonResponse({'code':400,
'errmsg':'修改数据库失败'})
# 4.返回结果
return JsonResponse({'code':0,
'errmsg':'ok'})
class AddressView(View):
def get(self, request):
'''返回地址信息'''
# 1.从mysql中获取当前用户没有删除的地址
try:
addresses = Address.objects.filter(user=request.user,
is_delete=False)
list = []
# 2.遍历所有地址, 获取每一个地址
for address in addresses:
# 3.把每一个地址信息 ---> {} ---> []
dict = {
'id': address.id,
'receiver': address.receiver,
'province': address.province.name,
'city': address.city.name,
'district': address.district.name,
'province_id': address.province.id,
'city_id': address.city.id,
'district_id': address.district.id,
'place': address.place,
'mobile': address.mobile,
'tel': address.phone,
'email': address.email,
'title':address.title
}
default_address = request.user.default_address
if default_address.id == address.id:
# address就是默认地址:
list.insert(0, dict)
else:
# address不是默认地址:
list.append(dict)
except Exception as e:
return JsonResponse({'code':400,
'errmsg':"获取地址失败"})
# 4.获取默认地址的id
default_id = request.user.default_address_id
# 5.拼接参数返回
return JsonResponse({'code':0,
'errmsg':'ok',
'addresses':list,
'default_address_id':default_id})
class CreateAddressView(View):
def post(self, request):
'''新增地址'''
# 1.查询数据库中该用户下的所有没有删除的地址个数
try:
count = Address.objects.filter(user=request.user,
is_delete=False).count()
except Exception as e:
return JsonResponse({'code':400,
'errmsg':'获取数据库中用户的地址个数出错'})
# 2.判断该地址个数是否大于等于20, 如果满足, 返回
if count >= 20:
return JsonResponse({'code': 400,
'errmsg': '地址超过20个'})
# 3.如果不满足----> 新增
# 4.接收参数 json
dict = json.loads(request.body.decode())
receiver = dict.get('receiver')
province_id = dict.get('province_id')
city_id = dict.get('city_id')
district_id = dict.get('district_id')
place = dict.get('place')
mobile = dict.get('mobile')
# 底下的两个字段可以为空:
tel = dict.get('tel')
email = dict.get('email')
# 5.校验参数(整体 + 局部)
if not all([receiver, province_id, city_id, district_id, place, mobile]):
return JsonResponse({'code': 400,
'errmsg': '必传参数有为空的'})
if not re.match(r'', mobile):
return JsonResponse({'code': 400,
'errmsg': 'mobile格式不正确'})
if tel:
if not re.match(r'', tel):
return JsonResponse({'code': 400,
'errmsg': 'tel格式不正确'})
if email:
if not re.match(r'', email):
return JsonResponse({'code': 400,
'errmsg': 'email格式不正确'})
# 6.往mysql中增加地址
try:
address = Address.objects.create(
user = request.user,
province_id = province_id,
city_id = city_id,
district_id = district_id,
title = receiver,
receiver = receiver,
place = place,
mobile = mobile,
phone = tel,
email = email
)
# 7.判断是否有默认地址, 如果没有(第一次增加) 把第一个地址设为默认地址
if not request.user.default_address:
request.user.default_address = address
request.user.save()
except Exception as e:
return JsonResponse({'code': 400,
'errmsg': '增加数据失败'})
# 8.拼接参数, 返回
dict = {
'id': address.id,
'receiver': address.receiver,
'province': address.province.name,
'city': address.city.name,
'district': address.district.name,
'place': address.place,
'mobile': address.mobile,
'tel': address.phone,
'email': address.email,
}
return JsonResponse({'code':0,
'errmsg':'ok',
'address':dict})
class VerifyEmailView(View):
def put(self, request):
'''验证邮箱'''
# 1.接收参数
token = request.GET.get('token')
# 2.检验参数
if not token:
return JsonResponse({'code':400,
'errmsg':'缺少token参数'})
# 3.解密 token ---> user
user = User.check_verify_url_token(token)
# 4.判断 user 是否存在
if user is None:
return JsonResponse({'code': 400,
'errmsg': 'token有问题'})
# 5.更改user的 email_active 为 True
try:
user.email_active = True
user.save()
except Exception as e:
return JsonResponse({'code': 400,
'errmsg': '修改数据库报错'})
# 6.返回json结果
return JsonResponse({'code': 0,
'errmsg': 'ok'})
class EmailView(View):
def put(self, request):
'''修改用户的email'''
# 1.接收参数
dict = json.loads(request.body.decode())
email = dict.get('email')
# 2.校验参数
if not email:
return JsonResponse({'code':400,
'errmsg':'缺少email参数'})
if not re.match(r'^[a-z0-9][\w\.\-]*@[a-z0-9\-]+(\.[a-z]{2,5}){1,2}$', email):
return JsonResponse({'code': 400,
'errmsg': 'email格式不正确'})
try:
# 3.把传入的email修改到数据库
request.user.email = email
# 4.保存
request.user.save()
except Exception as e:
return JsonResponse({'code': 400,
'errmsg': '数据库修改失败'})
verify_url = request.user.generate_verify_url()
# 补充: 给当前邮箱发送验证邮件:
send_verify_email.delay(email, verify_url)
# 5.返回json结果
return JsonResponse({'code': 0,
'errmsg': 'ok'})
class UserInfoView(LoginRequiredMixin, View):
def get(self, request):
'''get请求'''
# 1. 获取用户
user = request.user
# 2. 拼接用户数据
dict = {
'username':user.username,
'mobile':user.mobile,
'email':user.email,
'email_active': user.email_active
}
# 3. 拼接json字符串, 返回
return JsonResponse({'code':0,
'errmsg':'ok',
'info_data':dict})
class LogoutView(View):
def delete(self, request):
'''取消登录功能'''
# 1.删除session
logout(request)
response = JsonResponse({'code':0,
'errmsg':'ok'})
# 2.清除cookie
response.delete_cookie('username')
# 3.返回
return response
class LoginView(View):
def post(self, request):
'''登录的接口'''
# 1.接收参数(username, password, remembered)
dict = json.loads(request.body.decode())
username = dict.get('username')
password = dict.get('password')
remembered = dict.get('remembered')
# 2.检验
if not all([username, password]):
return JsonResponse({'code':400,
'errmsg':'缺少必传参数'})
# 3.认证是否能够登录
user = authenticate(username=username,
password=password)
# 3.1判断用户是否存在
if user is None:
return JsonResponse({'code': 400,
'errmsg': '输入的用户名或密码错误'})
# 4.状态保持
login(request,user)
# 5.判断用户是否勾选了记住状态
if remembered is True:
# 6.如果勾选--->保存两周免登陆
request.session.set_expiry(None)
else:
# 7.如果没有勾选---> 浏览器关闭, sessionid立刻过期
request.session.set_expiry(0)
# 8.返回状态
# return JsonResponse({'code': 0,
# 'errmsg': 'ok'})
response = JsonResponse({'code': 0,
'errmsg': 'ok'})
response.set_cookie('username', user.username, max_age=3600 * 24 * 14)
# 补充内容:购物车的合并
response = merge_cart_cookie_to_redis(request, response)
return response
class RegisterView(View):
def post(self, request):
'''注册功能接口'''
# 1.接收参数
dict = json.loads(request.body.decode())
username = dict.get('username')
password = dict.get('password')
password2 = dict.get('<PASSWORD>')
mobile = dict.get('mobile')
allow = dict.get('allow')
sms_code_client = dict.get('sms_code')
# 2.校验参数(整体)
if not all([username, password, <PASSWORD>, mobile, sms_code_client, allow]):
return JsonResponse({'code':400,
'errmsg':'缺少必传参数'})
# 3.单个检验: username
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return JsonResponse({'code': 400,
'errmsg': 'username格式不正确'})
# 4.password
if not re.match(r'^[a-zA-Z0-9]{8,20}$', password):
return JsonResponse({'code': 400,
'errmsg': 'password格式不正确'})
# 5.password 和 <PASSWORD>
if password != <PASSWORD>:
return JsonResponse({'code': 400,
'errmsg': '两次输入密码不一致'})
# 6.mobile
if not re.match(r'^1[3-9]\d{9}$', mobile):
return JsonResponse({'code': 400,
'errmsg': 'mobile格式不正确'})
# 7.allow
if allow != True:
return JsonResponse({'code': 400,
'errmsg': '请勾选协议'})
# 8.链接redis, 获取链接对象
redis_conn = get_redis_connection('verify_code')
# 9.利用练级对象, 获取服务端的短信验证码
sms_code_server = redis_conn.get('sms_%s' % mobile)
# 10.判断服务端的短信验证码是否存在
if not sms_code_server:
return JsonResponse({'code': 400,
'errmsg': '短信验证码过期'})
# 11.对比前后端的短信验证码
if sms_code_client != sms_code_server.decode():
return JsonResponse({'code': 400,
'errmsg': '输入的短信验证码有误'})
# 12.把username, password mobile保存到User
try:
user = User.objects.create_user(username=username,
password=password,
mobile=mobile)
except Exception as e:
return JsonResponse({'code': 400,
'errmsg': '保存到数据库出错'})
# 状态保持:
login(request, user)
# 13.返回结果(json)
# return JsonResponse({'code': 0,
# 'errmsg': 'ok'})
response = JsonResponse({'code': 0,
'errmsg': 'ok'})
response.set_cookie('username', user.username, max_age=3600 * 24 * 14)
# 补充内容:购物车的合并
response = merge_cart_cookie_to_redis(request, response)
return response
class MobileCountView(View):
def get(self, request, mobile):
'''判断手机号是否重复注册'''
# 1.查询mobile在mysql中的个数
try:
count = User.objects.filter(mobile=mobile).count()
except Exception as e:
return JsonResponse({'code':400,
'errmsg':'查询数据库出错'})
# 2.返回结果(json)
return JsonResponse({'code':0,
'errmsg':'ok',
'count':count})
class UsernameCountView(View):
def get(self, request, username):
'''判断用户名是否重复'''
# 1.查询username在数据库中的个数
try:
count = User.objects.filter(username=username).count()
except Exception as e:
return JsonResponse({'code':400,
'errmsg':'访问数据库失败'})
# 2.返回结果(json) ---> code & errmsg & count
return JsonResponse({'code': 0,
'errmsg': 'ok',
'count': count})
| StarcoderdataPython |
Subsets and Splits