id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3377378 | <gh_stars>1-10
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_image_classification.ipynb (unless otherwise specified).
__all__ = ['data']
# Cell
from fastai.vision.all import *
# Cell
data = DataBlock(blocks = (ImageBlock, CategoryBlock),
get_items = get_image_files,
get_y = parent_label,
splitter = GrandparentSplitter(valid_name='val'),
item_tfms = RandomResizedCrop(128, min_scale=0.35),
batch_tfms = Normalize.from_stats(*imagenet_stats)
) | StarcoderdataPython |
1625880 | <reponame>Heyjoy/P3<filename>datafield.py<gh_stars>0
import math
# models hyperParmeter
TrainTestSplitSize = 0.2
N_EPOCH = 20
Verbose = 1
BatchSize = 64
zeroSteeringCount = 3
#GaussianNoiseStddev = 1
# Imgae Process tuning paramter
IMGPath = '../data/IMG/'
CSVPath = '../data/driving_log.csv'
ImgShape = [160, 320, 3]
ResizedShape = [64, 64, 3]
cropBottom = math.floor(ImgShape[0]/6) #
cropTop = cropBottom * 2
AngleOffset = 0.25 # offset for left and right camera
## Image flip random
FilpProb = 0.5
## Brightness random
RandomBrightOffset = 0.25
## translate Image method parameter
x_trRange = int(ImgShape[1]/10) # 320 = 6.4*50
y_trRange = int(ImgShape[0]/10) # 160 = 6.4 *25
trShiftAngle = 0.4
| StarcoderdataPython |
45432 | <reponame>pep7/GorillaBot
# Copyright (c) 2013-2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import message
from plugins.util import admin, command, humanize_list
from queue import Empty
@command("admincommandlist")
def admincommands(m):
"""Provide a list of admin-only commands."""
#- !admincommands
#-
#- ```irc
#- < GorillaWarfare> !admincommands
#- < GorillaBot> My available admin commands are join, part, quit, setcommand,
#- and unset. See http://molly.github.io/GorillaBot for documentation.
#- ```
#-
#- Say the available admin-only commands. This does not display command aliases.
commands = [key for key in m.bot.admin_commands.keys() if not m.bot.admin_commands[key][1]]
commands.sort()
if len(commands) == 0:
m.bot.private_message(m.location, "I have no available admin commands. See "
"http://molly.github.io/GorillaBot for documentation.")
elif len(commands) == 1:
m.bot.private_message(m.location, "My available admin command is {0}. See "
"http://molly.github.io/GorillaBot for "
"documentation.".format(commands[0]))
else:
m.bot.private_message(m.location, "My available admin commands are {0}. See "
"http://molly.github.io/GorillaBot for "
"documentation.".format(
humanize_list(commands)))
@command("admins", "botops", "oplist")
def adminlist(m):
"""Provide a list of current bot admins."""
#- !adminlist
#-
#- ```irc
#- < GorillaWarfare> !adminlist
#- < GorillaBot> My bot admin is GorillaWarfare.
#- ```
#-
#- Say the current bot operators.
ops = list(m.bot.configuration["botops"].keys())
if ops:
if len(ops) == 1:
m.bot.private_message(m.location, "My bot admin is " + ops[0] + ".")
else:
m.bot.private_message(m.location, "My bot admins are " + humanize_list(ops))
else:
nick = m.bot.configuration["nick"]
m.bot.private_message(m.location, "{0} has no master. {0} is a free bot.".format(nick))
@command("pingall", "highlightall")
def attention(m):
"""Ping everyone currently joined to the channel. Be careful to only turn this on if you trust
those in the channel not to abuse it."""
#- !attention
#-
#- ```irc
#- < GorillaWarfare> !attention
#- < GorillaBot> user1, user2, user3: GorillaWarfare wants your attention
#- ```
#-
#- Ping all of the users in the channel.
#-
#- #### Settings
#- `on` - Anyone can use this command. Be sure you trust everyone in the channel not to abuse
#- it.
#- `admin` - Only bot admins can use this command.
logger = logging.getLogger("GorillaBot")
attention_setting = m.bot.get_setting('attention', m.location)
if attention_setting == 'admin':
if not m.bot.is_admin(m.sender):
m.bot.private_message(m.location, "Please ask a bot operator to perform this action for"
" you.")
return
elif attention_setting != 'on':
m.bot.private_message(m.location, "Command not enabled.")
return
# Okay, we're authorized to do this.
m.bot.response_lock.acquire()
ignored_messages = []
m.bot.send("NAMES {}".format(m.location))
while True:
try:
msg = m.bot.message_q.get(True, 120)
except Empty:
logger.error("No response from server when trying to get nicks. Shutting down.")
m.bot.shutdown.set()
return
if isinstance(msg, message.Numeric):
if msg.number == '353':
nicks = msg.body.split()
nicks = nicks[2:]
nicks[0] = nicks[0][1:]
sender = m.bot.parse_hostmask(m.sender)["nick"]
try:
nicks.remove(sender)
nicks.remove(m.bot.configuration["nick"])
except ValueError:
pass
m.bot.private_message(m.location, "{0}: {1} wants your attention"
.format(", ".join(nicks), sender))
break
ignored_messages.append(msg)
for msg in ignored_messages:
m.bot.message_q.put(msg)
m.bot.response_lock.release()
@command("commandlist", "help")
def commands(m):
"""Provide a list of commands available to all users."""
#- !commands
#-
#- ```irc
#- < GorillaWarfare> !commands
#- < GorillaBot> My available commands are admincommands, adminlist, commands, hug,
#- link, spotify, and xkcd. See http://molly.github.io/GorillaBot
#- for documentation.
#- ```
#-
#- Say the available all-user commands. This does not display command aliases.
commands = [key for key in m.bot.commands.keys() if not m.bot.commands[key][1]]
commands.sort()
if len(commands) == 0:
m.bot.private_message(m.location, "I have no available commands. See "
"http://molly.github.io/GorillaBot for documentation.")
elif len(commands) == 1:
m.bot.private_message(m.location, "My available command is {0}. See "
"http://molly.github.io/GorillaBot for "
"documentation.".format(commands[0]))
else:
m.bot.private_message(m.location, "My available commands are {0}. See "
"http://molly.github.io/GorillaBot for "
"documentation.".format(
humanize_list(commands))) | StarcoderdataPython |
1649956 | <reponame>jdmartinez36/azure-batch-cli-extensions
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
# These properties are reserved for application template use
# and may not be used on jobs using an application template
PROPS_RESERVED_FOR_TEMPLATES = {
'jobManagerTask',
'jobPreparationTask',
'jobReleaseTask',
#'commonEnvironmentSettings',
'usesTaskDependencies',
'onAllTasksComplete',
'onTaskFailure',
'taskFactory'}
PROPS_PERMITTED_ON_TEMPLATES = PROPS_RESERVED_FOR_TEMPLATES.union({
'templateMetadata',
'parameters',
'metadata'})
ATTRS_RESERVED_FOR_TEMPLATES = {
'job_manager_task',
'job_preparation_task',
'job_release_task',
#'common_environment_settings',
'uses_task_dependencies',
'on_all_tasks_complete',
'on_task_failure',
'task_factory'}
# These properties are reserved for job use
# and may not be used on an application template
PROPS_RESERVED_FOR_JOBS = {
'id',
'displayName',
'priority',
'constraints',
'poolInfo',
'applicationTemplateInfo'}
# Properties on a repeatTask object that should be
# applied to each expanded task.
PROPS_ON_REPEAT_TASK = {
'displayName',
'containerSettings',
'resourceFiles',
'environmentSettings',
'constraints',
'userIdentity',
'exitConditions',
'clientExtensions',
'outputFiles',
'packageReferences'}
PROPS_ON_COLLECTION_TASK = PROPS_ON_REPEAT_TASK.union({
'multiInstanceSettings',
'dependsOn'})
# Dates used as cutoffs for different SDK extension versions
class KnownTemplateVersion(Enum):
Dec2018 = "2018-12-01"
| StarcoderdataPython |
128922 | <reponame>Tiago-S-Ribeiro/Python-Pro-Bootcamp<filename>100_days_of_code/Intermediate+/day_37/main.py
import requests
import datetime as dt
from data import USER, TOKEN, G_ID, PXL_ENDPOINT, NEW_PIXEL_ENDPOINT, GRAPH_ENDPOINT
headers = {
"X-USER-TOKEN": TOKEN
}
today = dt.datetime.now()
#------------------- Create a new user using POST -------------------
new_graph_params = {
"token": TOKEN,
"username": USER,
"agreeTermsOfService": "yes",
"notMinor": "yes"
}
response = requests.post(url=PXL_ENDPOINT, json=new_graph_params)
print(response.text)
#---------------- Create a new Pixela graph using POST ----------------
graph_config = {
"id": G_ID,
"name": "Reading Graph",
"unit": "pages",
"type": "int",
"color": "momiji"
}
response = requests.post(url=GRAPH_ENDPOINT, json=graph_config, headers=headers)
print(response.text)
#-------------------- Create a new pixel using POST --------------------
pixel_params = {
"date": today.strftime("%Y%m%d"),
"quantity": input("How many pages did you read today? ")
}
response = requests.post(url=NEW_PIXEL_ENDPOINT, json=pixel_params, headers=headers)
print(response.text)
#----------------------- Update a pixel using PUT -----------------------
put = {
"quantity": "14"
}
date = dt.datetime(year=2022, month=1, day=10)
update_endpoint = f"{NEW_PIXEL_ENDPOINT}/{date.strftime('%Y%m%d')}"
response = requests.put(url=update_endpoint, json=put, headers=headers)
print(response.text)
#---------------------------- Delete a pixel ----------------------------
response = requests.delete(url=update_endpoint, headers=headers)
print(response.text) | StarcoderdataPython |
3251212 | from .directory import DirectoryClient
from .organization import OrganizationClient
from .service import ServiceClient
| StarcoderdataPython |
1768653 | import discord
import secrets
import itertools
import random
import re
import os
from keepAlive import keep_alive
import requests
import json
client = discord.Client()
prefix = '&'
diceTypes = [4,6,8,10,12,20,100]
dnd5e_races = ["DragonBorn", "Dwarf", "Elf", "Gnome", "Half-Elf", "Halfing", "Half-Orc", "Human", "Tiefling", "Orc of Exandria", "Leonin", "Satyr", "Aarakocra", "Genasi", "Goliath", "Aasimar", "Bugbear", "Firbolg", "Goblin", "Hobgoblin", "Kenku", "Kobold", "Lizardfolk", "Orc", "Tabaxi", "Triton", "Yuan-ti Pureblood", "Feral Tiefling", "Tortle", "Changeling", "Kalashtar", "Orc of Eberron", "Shifter", "Warforged", "Gith", "Centaur", "Loxodon", "Minotaur", "Simic Hybrid", "Vedalken", "Verdan", "Locatah", "Grung"]
dnd5e_races_phb = ["DragonBorn", "Dwarf", "Elf", "Gnome", "Half-Elf", "Halfing", "Half-Orc", "Human", "Tiefling"]
dnd5e_classes = ["Barbarian", "Bard", "Cleric", "Druid", "Fighter", "Monk", "Paladin", "Ranger", "Rogue", "Sorcerer", "Walorck", "Wizard", "Artificer", "Blood Hunter"]
dnd5e_classes_phb = ["Barbarian", "Bard", "Cleric", "Druid", "Fighter", "Monk", "Paladin", "Ranger", "Rogue", "Sorcerer", "Walorck", "Wizard"]
def searchCondition(query):
response = requests.get('https://www.dnd5eapi.co/api/conditions/'+query)
json_data = json.loads(response.text)
name = json_data['name']
desc = ''
for i in json_data['desc']:
desc = desc + i+"\n"
return (name,desc)
def conditionList():
response = requests.get('https://www.dnd5eapi.co/api/conditions')
json_data = json.loads(response.text)
cond = ''
for i in json_data['results']:
cond = cond + i['index']+", "
return cond[:-2]
def searchAbility(query):
response = requests.get('https://www.dnd5eapi.co/api/ability-scores/'+query)
json_data = json.loads(response.text)
name = json_data['name']
desc = ''
for i in json_data['desc']:
desc = desc + i+"\n"
skills = ''
for i in json_data['skills']:
skills = skills + i['name'] + ", "
return (name,desc,skills[:-2])
def abilityList():
response = requests.get('https://www.dnd5eapi.co/api/ability-scores')
json_data = json.loads(response.text)
cond = ''
for i in json_data['results']:
cond = cond + i['index']+", "
return cond[:-2]
def skillList():
response = requests.get('https://www.dnd5eapi.co/api/skills')
json_data = json.loads(response.text)
cond = ''
for i in json_data['results']:
cond = cond + i['index']+", "
return cond[:-2]
def searchSkill(query):
response = requests.get('https://www.dnd5eapi.co/api/skills/'+query)
json_data = json.loads(response.text)
name = json_data['name']
desc = ''
for i in json_data['desc']:
desc = desc + i+"\n"
abi = json_data['ability_score']['index']
return (name,desc,abi)
def damageList():
response = requests.get('https://www.dnd5eapi.co/api/damage-types')
json_data = json.loads(response.text)
damage = ''
for i in json_data['results']:
damage = damage + i['index']+", "
return damage[:-2]
def searchDamage(query):
response = requests.get('https://www.dnd5eapi.co/api/damage-types/'+query)
json_data = json.loads(response.text)
name = json_data['name']
desc = ''
for i in json_data['desc']:
desc = desc + i+"\n"
return (name,desc)
def helpList():
string = '**Praise Asmodeus**'+'\n'+'Bot prefix: '+ prefix + '\n' + 'Rolling Dice: &[#dice]d[Type], ex: &8d6' + '\n' + 'Random Race(w/Expansions): &randrace' + '\n' + 'Random Race(PHB): &randracephb'+ '\n' + 'Random Class(w/Expansions): &randclass' + '\n' + 'Random Class(PHB): &randclassphb' + '\n' + 'Random Ability Scores: &randas'+ '\n' + 'Roll d20 with advantage: &adv' + '\n' + 'Roll d20 with disadvantage: &ddv' + '\n' + 'Roll 1d20: &r' + '\n' + 'Generate Random Character(w/Expansions): &randchar' + '\n' + 'Generate Random Character(PHB): &randcharphb' + '\n' + 'Ability Scores List: &abi' + '\n' + 'Ability Scores Descriptions: &[ability], ex:&dex' + '\n' + 'Conditions List: &cond' + '\n' + 'Conditions Description: &[condition], ex: &exhaustion' + '\n' + 'Skills List: &skills' + '\n' + 'Skills Description: &[skill], ex:&animal-handling' + '\n' + 'Damage Types: &damage' + '\n' + 'Damage Types Description: &[type], ex: &thunder'
return string
def diceRoll(message):
split = re.split('&|d',message)
number = int(split[1])
dice = int(split[2])
string = ''
result = 0
if dice in diceTypes:
if number == 1:
rand = random.randrange(1, dice+1)
result = rand
string = string + str(rand)
else:
for i in itertools.repeat(None, number):
rand = random.randrange(1, dice+1)
result = result + rand
string = string + str(rand) + ', '
else:
string = 'Invalid'
result = dice
return (string[:-2],result)
def randAS():
string = ''
ability = 0
total = 0
for i in itertools.repeat(None, 6):
one = random.randrange(1, 7)
two = random.randrange(1, 7)
three = random.randrange(1, 7)
four = random.randrange(1, 7)
list = [one, two, three, four]
list2 = '('
lowest = min(list)
ability = sum(list) - lowest
total = total + ability
counter = 0
for i in list:
counter = counter + 1
if i != lowest and counter == 4:
list2 = list2 + ' '+ str(i) + ' )'
if i != lowest and counter != 4:
list2 = list2 + ' '+str(i) + ' ,'
if i == lowest and counter == 4:
list2 = list2 + ' '+'~~'+str(i)+'~~' + ' )'
lowest = 0
if i == lowest and counter != 4:
list2 = list2 + ' '+'~~'+str(i)+'~~' + ' ,'
lowest = 0
string = string + list2 + ' = '+'**'+str(ability)+'**'+ "\n"
return string + 'Total: ' + '**'+str(total)+'**'
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if re.fullmatch(prefix+r'\d*d\d*',message.content):
(string,result) = diceRoll(message.content)
if string == 'Invalid':
await message.channel.send(message.author.mention +"\n"+'Invalid dice format: d'+str(result))
else:
await message.channel.send( message.author.mention +"\n"+ '**Rolls:** '+ string +"\n"+ '**Total:** '+ str(result) )
if re.fullmatch(prefix+r'randrace',message.content):
racechoice = secrets.choice(dnd5e_races)
await message.channel.send(message.author.mention +"\n"+racechoice)
if re.fullmatch(prefix+r'randracephb',message.content):
classchoice = secrets.choice(dnd5e_races_phb)
await message.channel.send(message.author.mention +"\n"+classchoice)
if re.fullmatch(prefix+r'randclass',message.content):
racechoice = secrets.choice(dnd5e_classes)
await message.channel.send(message.author.mention +"\n"+racechoice)
if re.fullmatch(prefix+r'randclassphb',message.content):
classchoice = secrets.choice(dnd5e_classes_phb)
await message.channel.send(message.author.mention +"\n"+classchoice)
if re.fullmatch(prefix+r'randas',message.content):
await message.channel.send(message.author.mention +"\n"+randAS())
if re.fullmatch(prefix+r'adv',message.content):
rand = random.randrange(1, 21)
rand2 = random.randrange(1, 21)
if rand > rand2:
rand = '**'+str(rand)+'**'
rand2 = str(rand2)
else:
rand = str(rand)
rand2 = '**'+str(rand2)+'**'
await message.channel.send(message.author.mention +"\n"+'**Advantage Rolls:** '+ rand+ ', ' + rand2 )
if re.fullmatch(prefix+r'ddv',message.content):
rand = random.randrange(1, 21)
rand2 = random.randrange(1, 21)
if rand < rand2:
rand = '**'+str(rand)+'**'
rand2 = str(rand2)
else:
rand = str(rand)
rand2 = '**'+str(rand2)+'**'
await message.channel.send(message.author.mention +"\n"+'**Disadvantage Rolls:** '+ rand+ ', ' + rand2 )
if re.fullmatch(prefix+r'r',message.content):
rand = random.randrange(1, 21)
await message.channel.send(message.author.mention +"\n"+'**Roll:** ' + str(rand))
if re.fullmatch(prefix+r'randchar',message.content):
racechoice = secrets.choice(dnd5e_races)
classchoice = secrets.choice(dnd5e_classes)
await message.channel.send(message.author.mention +"\n" +'**Race:** '+"\n"+racechoice+"\n"+'**Class:** '+classchoice + "\n" +'**Ability Scores:** ' +"\n" +randAS())
if re.fullmatch(prefix+r'randcharphb',message.content):
racechoice = secrets.choice(dnd5e_races_phb)
classchoice = secrets.choice(dnd5e_classes_phb)
await message.channel.send(message.author.mention +"\n" +'**Race:** '+"\n"+racechoice+"\n"+'**Class:** '+classchoice + "\n" +'**Ability Scores:** ' +"\n" +randAS())
if re.fullmatch(r'&blinded|&charmed|&deafened|&exhaustion|&frightened|&grappled|&incapacitated|&invisible|¶lyzed|&petrified|&poisoned|&restrained|&stunned|&unconscious',message.content):
(name,desc)=searchCondition(message.content[1:])
await message.channel.send(message.author.mention +"\n" +'**Name:** '+name+"\n"+'**Desc:** '+desc)
if re.fullmatch(r'&str|&con|&dex|&wis|&cha|&int',message.content):
(name,desc,skills)=searchAbility(message.content[1:])
await message.channel.send(message.author.mention +"\n" +'**Name:** '+name+"\n"+'**Desc:** '+desc+"\n"+'**Skills:** '+skills)
if re.fullmatch(prefix+r'cond',message.content):
cond = conditionList()
await message.channel.send(message.author.mention +"\n" +'**Conditions:** '+cond)
if re.fullmatch(prefix+r'abi',message.content):
abi = abilityList()
await message.channel.send(message.author.mention +"\n" +'**Ability Scores:** '+abi)
if re.fullmatch(prefix+r'skills',message.content):
skill = skillList()
await message.channel.send(message.author.mention +"\n" +'**Skills:** '+skill)
if re.fullmatch(r'&acrobatics|&animal-handling|&arcana|&athletics|&deception|&history|&insight|&intimidation|&investigation|&medicine|&nature|&perception|&performance|&persuasion|&religion|&sleight-of-hand|&stealth|&survival',message.content):
(name,desc,abi)=searchSkill(message.content[1:])
await message.channel.send(message.author.mention +"\n" +'**Name:** '+name+"\n"+'**Desc:** '+desc+"\n"+'**Ability Mod:** '+abi)
if re.fullmatch(prefix+r'damage',message.content):
damage = damageList()
await message.channel.send(message.author.mention +"\n" +'**Damage Types:** '+damage)
if re.fullmatch(r'&acid|&bludgeoning|&cold|&fire|&force|&lightning|&necrotic|&piercing|&poison|&psychic|&radiant|&slashing|&thunder',message.content):
(name,desc)=searchDamage(message.content[1:])
await message.channel.send(message.author.mention +"\n" +'**Damage Type:** '+name+"\n"+'**Desc:** '+desc)
if re.fullmatch(prefix+r'help',message.content):
await message.channel.send(message.author.mention +"\n" + helpList())
keep_alive()
client.run(os.getenv('TOKEN'))
| StarcoderdataPython |
152736 | '''
defines all the sources necessary for building cgui.pyd
'''
import os
BUILD_BUDDYLIST_GUI = False
thisdir = os.path.dirname(os.path.abspath(__file__))
sources = '''
src/ctextutil.cpp
src/SplitImage4.cpp
src/ScrollWindow.cpp
src/skinvlist.cpp
src/pyutils.cpp
src/cwindowfx.cpp
src/SkinSplitter.cpp
src/alphaborder.cpp
src/skin/skinobjects.cpp
src/skin/SkinBitmap.cpp
src/LoginWindow.cpp
src/DragMixin.cpp
src/MiscUI.cpp
src/SelectionEvent.cpp
src/InputBox.cpp
src/ExpandoTextCtrl.cpp
src/ExpandEvent.cpp
src/GettextPython.cpp
'''.split()
include_dirs = '''
src
src/skin
src/Animation
src/Animation/Platform
src/Animation/Platform/wx
src/BuddyList
'''.split()
boost_env_dir = os.getenv('BOOST_DIR')
if boost_env_dir is not None:
include_dirs.append(boost_env_dir)
# rtf
rtf_files = \
'''
DebugUtil.cpp
HTMLEncoder.cpp
MSIMEncoder.cpp
MSNEncoder.cpp
RTFToX.cpp
StyleDesc.cpp
StringUtil.cpp
XHTMLEncoder.cpp
YahooEncoder.cpp
'''.split()
sources.extend('src/RTFToX/%s' % s for s in rtf_files)
include_dirs.append('src/RTFToX')
import sys
if sys.platform == 'win32':
sources.extend('''
src/alphaborder_win.cpp
src/win/PlatformMessagesWin.cpp
src/win/WindowSnapperWin.cpp
src/WindowSnapper.cpp
src/win/FullscreenWin.cpp
src/win/WinUtils.cpp
src/win/WinTaskbar.cpp
src/win/WinJumpList.cpp
src/win/RichEditUtils.cpp
src/TransparentFrame.cpp
src/Statistics.cpp
src/IconUtils.cpp
'''.split())
include_dirs.extend([
'src/win',
])
if BUILD_BUDDYLIST_GUI:
sources.extend('''
src/TreeList.cpp
src/BuddyList.cpp
'''.split())
| StarcoderdataPython |
1691462 | from http.server import HTTPServer, SimpleHTTPRequestHandler
class MyHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b"hi there")
if __name__ == '__main__':
server_address = ('127.0.0.1', 8000)
httpd = HTTPServer(server_address, MyHTTPRequestHandler)
httpd.serve_forever()
| StarcoderdataPython |
1684312 | #!/usr/bin/env python
"""
TAP protocol client library.
Copyright (c) 2010 <NAME> <<EMAIL>>
"""
import socket
import string
import random
import struct
import asyncore
import mc_bin_server
import mc_bin_client
from memcacheConstants import REQ_MAGIC_BYTE, RES_MAGIC_BYTE
from memcacheConstants import REQ_PKT_FMT, RES_PKT_FMT, MIN_RECV_PACKET
from memcacheConstants import SET_PKT_FMT, DEL_PKT_FMT, INCRDECR_RES_FMT
import memcacheConstants
class TapConnection(mc_bin_server.MemcachedBinaryChannel):
def __init__(self, server, port, callback, clientId=None, opts={}, user=None, pswd=None):
mc_bin_server.MemcachedBinaryChannel.__init__(self, None, None,
self._createTapCall(clientId,
opts))
self.server = server
self.port = port
self.callback = callback
self.identifier = (server, port)
self.user = user
self.pswd = pswd
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((server, port))
def create_socket(self, family, type):
if not self.user:
mc_bin_server.MemcachedBinaryChannel.create_socket(self, family, type)
return
self.family_and_type = family, type
self.mc = mc_bin_client.MemcachedClient(self.server, self.port)
self.mc.sasl_auth_plain(self.user, self.pswd or "")
sock = self.mc.s
sock.setblocking(0)
self.set_socket(sock)
def _createTapCall(self, key=None, opts={}):
# Client identifier
if not key:
key = "".join(random.sample(string.letters, 16))
dtype=0
opaque=0
cas=0
extraHeader, val = self._encodeOpts(opts)
msg=struct.pack(REQ_PKT_FMT, REQ_MAGIC_BYTE,
memcacheConstants.CMD_TAP_CONNECT,
len(key), len(extraHeader), dtype, 0,
len(key) + len(extraHeader) + len(val),
opaque, cas)
return msg + extraHeader + key + val
def _encodeOpts(self, opts):
header = 0
val = []
for op in sorted(opts.keys()):
header |= op
if op in memcacheConstants.TAP_FLAG_TYPES:
val.append(struct.pack(memcacheConstants.TAP_FLAG_TYPES[op],
opts[op]))
elif op == memcacheConstants.TAP_FLAG_LIST_VBUCKETS:
val.append(self._encodeVBucketList(opts[op]))
else:
val.append(opts[op])
return struct.pack(">I", header), ''.join(val)
def _encodeVBucketList(self, vbl):
l = list(vbl) # in case it's a generator
vals = [struct.pack("!H", len(l))]
for v in vbl:
vals.append(struct.pack("!H", v))
return ''.join(vals)
def processCommand(self, cmd, klen, vb, extralen, cas, data):
extra = data[0:extralen]
key = data[extralen:(extralen+klen)]
val = data[(extralen+klen):]
return self.callback(self.identifier, cmd, extra, key, vb, val, cas)
def handle_connect(self):
pass
def handle_close(self):
self.close()
class TapClient(object):
def __init__(self, servers, callback, opts={}, user=None, pswd=None):
for t in servers:
tc = TapConnection(t.host, t.port, callback, t.id, opts, user, pswd)
class TapDescriptor(object):
port = 11211
id = None
def __init__(self, s):
self.host = s
if ':' in s:
self.host, self.port = s.split(':', 1)
self.port = int(self.port)
if '@' in self.host:
self.id, self.host = self.host.split('@', 1)
def __repr__(self):
return "<TapDescriptor %s@%s:%d>" % (self.id or "(anon)", self.host, self.port)
| StarcoderdataPython |
3281548 | <gh_stars>1000+
#!/usr/bin/python3
"""
[[https://bluemaestro.com/products/product-details/bluetooth-environmental-monitor-and-logger][Bluemaestro]] temperature/humidity/pressure monitor
"""
# todo most of it belongs to DAL... but considering so few people use it I didn't bother for now
from datetime import datetime, timedelta
from pathlib import Path
import re
import sqlite3
from typing import Iterable, Sequence, Set, Optional
from my.core import get_files, LazyLogger, dataclass, Res
from my.core.sqlite import sqlite_connect_immutable
from my.config import bluemaestro as config
# todo control level via env variable?
# i.e. HPI_LOGGING_MY_BLUEMAESTRO_LEVEL=debug
logger = LazyLogger(__name__, level='debug')
def inputs() -> Sequence[Path]:
return get_files(config.export_path)
Celsius = float
Percent = float
mBar = float
@dataclass
class Measurement:
dt: datetime # todo aware/naive
temp : Celsius
humidity: Percent
pressure: mBar
dewpoint: Celsius
# fixme: later, rely on the timezone provider
# NOTE: the timezone should be set with respect to the export date!!!
import pytz # type: ignore
tz = pytz.timezone('Europe/London')
# TODO when I change tz, check the diff
def is_bad_table(name: str) -> bool:
# todo hmm would be nice to have a hook that can patch any module up to
delegate = getattr(config, 'is_bad_table', None)
return False if delegate is None else delegate(name)
from my.core.cachew import cache_dir
from my.core.common import mcachew
@mcachew(depends_on=lambda: inputs(), cache_path=cache_dir('bluemaestro'))
def measurements() -> Iterable[Res[Measurement]]:
# todo ideally this would be via arguments... but needs to be lazy
dbs = inputs()
last: Optional[datetime] = None
# tables are immutable, so can save on processing..
processed_tables: Set[str] = set()
for f in dbs:
logger.debug('processing %s', f)
tot = 0
new = 0
# todo assert increasing timestamp?
with sqlite_connect_immutable(f) as db:
db_dt: Optional[datetime] = None
try:
datas = db.execute(f'SELECT "{f.name}" as name, Time, Temperature, Humidity, Pressure, Dewpoint FROM data ORDER BY log_index')
oldfmt = True
db_dts = list(db.execute('SELECT last_download FROM info'))[0][0]
if db_dts == 'N/A':
# ??? happens for 20180923-20180928
continue
if db_dts.endswith(':'):
db_dts += '00' # wtf.. happens on some day
db_dt = tz.localize(datetime.strptime(db_dts, '%Y-%m-%d %H:%M:%S'))
except sqlite3.OperationalError:
# Right, this looks really bad.
# The device doesn't have internal time & what it does is:
# 1. every X seconds, record a datapoint, store it in the internal memory
# 2. on sync, take the phone's datetime ('now') and then ASSIGN the timestamps to the collected data
# as now, now - X, now - 2X, etc
#
# that basically means that for example, hourly timestamps are completely useless? because their error is about 1h
# yep, confirmed on some historic exports. seriously, what the fuck???
#
# The device _does_ have an internal clock, but it's basically set to 0 every time you update settings
# So, e.g. if, say, at 17:15 you set the interval to 3600, the 'real' timestamps would be
# 17:15, 18:15, 19:15, etc
# But depending on when you export, you might get
# 17:35, 18:35, 19:35; or 17:55, 18:55, 19:55, etc
# basically all you guaranteed is that the 'correct' interval is within the frequency
# it doesn't seem to keep the reference time in the database
#
# UPD: fucking hell, so you can set the reference date in the settings (calcReferenceUnix field in meta db)
# but it's not set by default.
log_tables = [c[0] for c in db.execute('SELECT name FROM sqlite_sequence WHERE name LIKE "%_log"')]
log_tables = [t for t in log_tables if t not in processed_tables]
processed_tables |= set(log_tables)
# todo use later?
frequencies = [list(db.execute(f'SELECT interval from {t.replace("_log", "_meta")}'))[0][0] for t in log_tables]
# todo could just filter out the older datapoints?? dunno.
# eh. a bit horrible, but seems the easiest way to do it?
# note: for some reason everything in the new table multiplied by 10
query = ' UNION '.join(
f'SELECT "{t}" AS name, unix, tempReadings / 10.0, humiReadings / 10.0, pressReadings / 10.0, dewpReadings / 10.0 FROM {t}'
for t in log_tables
)
if len(log_tables) > 0: # ugh. otherwise end up with syntax error..
query = f'SELECT * FROM ({query}) ORDER BY name, unix'
datas = db.execute(query)
oldfmt = False
db_dt = None
for i, (name, tsc, temp, hum, pres, dewp) in enumerate(datas):
if is_bad_table(name):
continue
# note: bluemaestro keeps local datetime
if oldfmt:
tss = tsc.replace('Juli', 'Jul').replace('Aug.', 'Aug')
dt = datetime.strptime(tss, '%Y-%b-%d %H:%M')
dt = tz.localize(dt)
assert db_dt is not None
else:
# todo cache?
m = re.search(r'_(\d+)_', name)
assert m is not None
export_ts = int(m.group(1))
db_dt = datetime.fromtimestamp(export_ts / 1000, tz=tz)
dt = datetime.fromtimestamp(tsc / 1000, tz=tz)
## sanity checks (todo make defensive/configurable?)
# not sure how that happens.. but basically they'd better be excluded
lower = timedelta(days=6000 / 24) # ugh some time ago I only did it once in an hour.. in theory can detect from meta?
upper = timedelta(days=10) # kinda arbitrary
if not (db_dt - lower < dt < db_dt + timedelta(days=10)):
# todo could be more defenive??
yield RuntimeError('timestamp too far out', f, name, db_dt, dt)
continue
assert -60 <= temp <= 60, (f, dt, temp)
##
tot += 1
if last is not None and last >= dt:
continue
# todo for performance, pass 'last' to sqlite instead?
last = dt
new += 1
p = Measurement(
dt=dt,
temp=temp,
pressure=pres,
humidity=hum,
dewpoint=dewp,
)
yield p
logger.debug('%s: new %d/%d', f, new, tot)
# logger.info('total items: %d', len(merged))
# for k, v in merged.items():
# # TODO shit. quite a few of them have varying values... how is that freaking possible????
# # most of them are within 0.5 degree though... so just ignore?
# if isinstance(v, set) and len(v) > 1:
# print(k, v)
# for k, v in merged.items():
# yield Point(dt=k, temp=v) # meh?
from my.core import stat, Stats
def stats() -> Stats:
return stat(measurements)
from my.core.pandas import DataFrameT, as_dataframe
def dataframe() -> DataFrameT:
"""
%matplotlib gtk
from my.bluemaestro import dataframe
dataframe().plot()
"""
df = as_dataframe(measurements(), schema=Measurement)
# todo not sure how it would handle mixed timezones??
# todo hmm, not sure about setting the index
return df.set_index('dt')
def fill_influxdb() -> None:
from my.core import influxdb
influxdb.fill(measurements(), measurement=__name__)
def check() -> None:
temps = list(measurements())
latest = temps[:-2]
from my.core.error import unwrap
prev = unwrap(latest[-2]).dt
last = unwrap(latest[-1]).dt
# todo stat should expose a dataclass?
# TODO ugh. might need to warn about points past 'now'??
# the default shouldn't allow points in the future...
#
# TODO also needs to be filtered out on processing, should be rejected on the basis of export date?
POINTS_STORED = 6000 # on device?
FREQ_SEC = 60
SECS_STORED = POINTS_STORED * FREQ_SEC
HOURS_STORED = POINTS_STORED / (60 * 60 / FREQ_SEC) # around 4 days
NOW = datetime.now()
assert NOW - last < timedelta(hours=HOURS_STORED / 2), f'old backup! {last}'
assert last - prev < timedelta(minutes=3), f'bad interval! {last - prev}'
single = (last - prev).seconds
| StarcoderdataPython |
68495 | import datetime
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.acunetix.parser import AcunetixParser
class TestAcunetixParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
testfile = open("unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(1, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(352, finding.cwe)
self.assertEqual(datetime.date(2018, 9, 24), finding.date)
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertFalse(finding.false_p)
self.assertEqual("Vijay Test Imapact", finding.impact)
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
self.assertEqual(1, len(finding.unsaved_endpoints))
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual('https', endpoint.protocol)
self.assertEqual(443, endpoint.port)
self.assertEqual('vijaytest.com', endpoint.host)
self.assertEqual('some/path', endpoint.path)
def test_parse_file_with_multiple_finding(self):
testfile = open("unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(4, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("A single machine can take down another machine's web server with minimal bandwidth and side effects on unrelated services and ports.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("Possible virtual host found", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(200, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible sensitive information disclosure.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("Unencrypted connection (verified)", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(310, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible information disclosure.", finding.impact)
# check that this finding have no references
self.assertIsNone(finding.references)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsec<EMAIL>', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
def test_parse_file_with_example_com(self):
testfile = open("unittests/scans/acunetix/XML_http_example_co_id_.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(7, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("HTML form without CSRF protection", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:L/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertIn("An attacker could use CSRF to trick a victim into accessing a website hosted by the attacker,", finding.impact)
# aggregated
self.assertEqual(3, finding.nb_occurences)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(3, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('h/search', endpoint.path)
endpoint = finding.unsaved_endpoints[1]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('m/zmain', endpoint.path)
# check req/resp
self.assertEqual(3, len(finding.unsaved_req_resp))
for req_resp in finding.unsaved_req_resp:
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=6):
finding = findings[6]
self.assertEqual("Content Security Policy (CSP) not implemented", finding.title)
self.assertEqual("Info", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertFalse(finding.false_p)
self.assertIn("CSP can be used to prevent and/or mitigate attacks that involve content/code injection,", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
| StarcoderdataPython |
1625657 | # This module is used to map the old Python 2 names to the new names used in
# Python 3 for the pickle module. This needed to make pickle streams
# generated with Python 2 loadable by Python 3.
# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import
# lib2to3 and use the mapping defined there, because lib2to3 uses pickle.
# Thus, this could cause the module to be imported recursively.
IMPORT_MAPPING = {
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'tkFileDialog': 'tkinter.filedialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'test.test_support': 'test.support',
'commands': 'subprocess',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
'urllib2': 'urllib.request',
'anydbm': 'dbm',
'_abcoll' : 'collections.abc',
}
# This contains rename rules that are easy to handle. We ignore the more
# complex stuff (e.g. mapping the names in the urllib and types modules).
# These rules should be run before import names are fixed.
NAME_MAPPING = {
('__builtin__', 'xrange'): ('builtins', 'range'),
('__builtin__', 'reduce'): ('functools', 'reduce'),
('__builtin__', 'intern'): ('sys', 'intern'),
('__builtin__', 'unichr'): ('builtins', 'chr'),
('__builtin__', 'unicode'): ('builtins', 'str'),
('__builtin__', 'long'): ('builtins', 'int'),
('itertools', 'izip'): ('builtins', 'zip'),
('itertools', 'imap'): ('builtins', 'map'),
('itertools', 'ifilter'): ('builtins', 'filter'),
('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'),
('itertools', 'izip_longest'): ('itertools', 'zip_longest'),
('UserDict', 'IterableUserDict'): ('collections', 'UserDict'),
('UserList', 'UserList'): ('collections', 'UserList'),
('UserString', 'UserString'): ('collections', 'UserString'),
('whichdb', 'whichdb'): ('dbm', 'whichdb'),
('_socket', 'fromfd'): ('socket', 'fromfd'),
('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'),
('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'),
('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'),
('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'),
('urllib', 'getproxies'): ('urllib.request', 'getproxies'),
('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'),
('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'),
('urllib', 'quote'): ('urllib.parse', 'quote'),
('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'),
('urllib', 'unquote'): ('urllib.parse', 'unquote'),
('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'),
('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'),
('urllib', 'urlencode'): ('urllib.parse', 'urlencode'),
('urllib', 'urlopen'): ('urllib.request', 'urlopen'),
('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'),
('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'),
('urllib2', 'URLError'): ('urllib.error', 'URLError'),
}
PYTHON2_EXCEPTIONS = (
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"BufferError",
"BytesWarning",
"DeprecationWarning",
"EOFError",
"EnvironmentError",
"Exception",
"FloatingPointError",
"FutureWarning",
"GeneratorExit",
"IOError",
"ImportError",
"ImportWarning",
"IndentationError",
"IndexError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"MemoryError",
"NameError",
"NotImplementedError",
"OSError",
"OverflowError",
"PendingDeprecationWarning",
"ReferenceError",
"RuntimeError",
"RuntimeWarning",
# StandardError is gone in Python 3, so we map it to Exception
"StopIteration",
"SyntaxError",
"SyntaxWarning",
"SystemError",
"SystemExit",
"TabError",
"TargetScopeError",
"TypeError",
"UnboundLocalError",
"UnicodeDecodeError",
"UnicodeEncodeError",
"UnicodeError",
"UnicodeTranslateError",
"UnicodeWarning",
"UserWarning",
"ValueError",
"Warning",
"ZeroDivisionError",
)
try:
TaskletExit
except NameError:
pass
else:
PYTHON2_EXCEPTIONS += ("TaskletExit",)
try:
WindowsError
except NameError:
pass
else:
PYTHON2_EXCEPTIONS += ("WindowsError",)
for excname in PYTHON2_EXCEPTIONS:
NAME_MAPPING[("exceptions", excname)] = ("builtins", excname)
MULTIPROCESSING_EXCEPTIONS = (
'AuthenticationError',
'BufferTooShort',
'ProcessError',
'TimeoutError',
)
for excname in MULTIPROCESSING_EXCEPTIONS:
NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname)
# Same, but for 3.x to 2.x
REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items())
assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING)
REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items())
assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING)
# Non-mutual mappings.
IMPORT_MAPPING.update({
'cPickle': 'pickle',
'_elementtree': 'xml.etree.ElementTree',
'FileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
# For compatibility with broken pickles saved in old Python 3 versions
'UserDict': 'collections',
'UserList': 'collections',
'UserString': 'collections',
'whichdb': 'dbm',
'StringIO': 'io',
'cStringIO': 'io',
})
REVERSE_IMPORT_MAPPING.update({
'_bz2': 'bz2',
'_dbm': 'dbm',
'_functools': 'functools',
'_gdbm': 'gdbm',
'_pickle': 'pickle',
})
NAME_MAPPING.update({
('__builtin__', 'basestring'): ('builtins', 'str'),
('exceptions', 'StandardError'): ('builtins', 'Exception'),
('UserDict', 'UserDict'): ('collections', 'UserDict'),
('socket', '_socketobject'): ('socket', 'SocketType'),
})
REVERSE_NAME_MAPPING.update({
('_functools', 'reduce'): ('__builtin__', 'reduce'),
('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'),
('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'),
('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'),
('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'),
('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'),
('xmlrpc.server', 'XMLRPCDocGenerator'):
('DocXMLRPCServer', 'XMLRPCDocGenerator'),
('xmlrpc.server', 'DocXMLRPCRequestHandler'):
('DocXMLRPCServer', 'DocXMLRPCRequestHandler'),
('xmlrpc.server', 'DocXMLRPCServer'):
('DocXMLRPCServer', 'DocXMLRPCServer'),
('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'):
('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'),
('http.server', 'SimpleHTTPRequestHandler'):
('SimpleHTTPServer', 'SimpleHTTPRequestHandler'),
('http.server', 'CGIHTTPRequestHandler'):
('CGIHTTPServer', 'CGIHTTPRequestHandler'),
('_socket', 'socket'): ('socket', '_socketobject'),
})
PYTHON3_OSERROR_EXCEPTIONS = (
'BrokenPipeError',
'ChildProcessError',
'ConnectionAbortedError',
'ConnectionError',
'ConnectionRefusedError',
'ConnectionResetError',
'FileExistsError',
'FileNotFoundError',
'InterruptedError',
'IsADirectoryError',
'NotADirectoryError',
'PermissionError',
'ProcessLookupError',
'TimeoutError',
)
for excname in PYTHON3_OSERROR_EXCEPTIONS:
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError')
PYTHON3_IMPORTERROR_EXCEPTIONS = (
'ModuleNotFoundError',
)
for excname in PYTHON3_IMPORTERROR_EXCEPTIONS:
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError')
| StarcoderdataPython |
4813743 | <reponame>Shea192/pytorch-lightning<gh_stars>1-10
import torch
from pytorch_lightning import Trainer
from tests.base import EvalModelTemplate
import tests.base.utils as tutils
def test_training_epoch_end_metrics_collection(tmpdir):
""" Test that progress bar metrics also get collected at the end of an epoch. """
num_epochs = 3
class CurrentModel(EvalModelTemplate):
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
output['progress_bar'].update({'step_metric': torch.tensor(-1)})
output['progress_bar'].update({'shared_metric': 100})
return output
def training_epoch_end(self, outputs):
epoch = self.current_epoch
# both scalar tensors and Python numbers are accepted
return {
'progress_bar': {
f'epoch_metric_{epoch}': torch.tensor(epoch), # add a new metric key every epoch
'shared_metric': 111,
}
}
model = CurrentModel(tutils.get_default_hparams())
trainer = Trainer(
max_epochs=num_epochs,
default_root_dir=tmpdir,
overfit_pct=0.1,
)
result = trainer.fit(model)
assert result == 1
metrics = trainer.progress_bar_dict
# metrics added in training step should be unchanged by epoch end method
assert metrics['step_metric'] == -1
# a metric shared in both methods gets overwritten by epoch_end
assert metrics['shared_metric'] == 111
# metrics are kept after each epoch
for i in range(num_epochs):
assert metrics[f'epoch_metric_{i}'] == i
| StarcoderdataPython |
25897 | <reponame>nvuillam/checkov<filename>tests/cloudformation/graph_builder/test_local_graph.py
import os
from unittest import TestCase
from checkov.cloudformation.graph_builder.graph_components.block_types import BlockType
from checkov.cloudformation.graph_builder.local_graph import CloudformationLocalGraph
from checkov.cloudformation.parser import parse
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
class TestLocalGraph(TestCase):
def test_build_graph_with_single_resource(self):
relative_file_path = "../checks/resource/aws/example_APIGatewayXray/APIGatewayXray-PASSED.yaml"
definitions = {}
file = os.path.realpath(os.path.join(TEST_DIRNAME, relative_file_path))
(definitions[relative_file_path], definitions_raw) = parse(file)
local_graph = CloudformationLocalGraph(definitions)
local_graph.build_graph(render_variables=False)
self.assertEqual(1, len(local_graph.vertices))
resource_vertex = local_graph.vertices[0]
self.assertEqual("AWS::ApiGateway::Stage.MyStage", resource_vertex.name)
self.assertEqual("AWS::ApiGateway::Stage.MyStage", resource_vertex.id)
self.assertEqual(BlockType.RESOURCE, resource_vertex.block_type)
self.assertEqual("CloudFormation", resource_vertex.source)
self.assertDictEqual(definitions[relative_file_path]["Resources"]["MyStage"]["Properties"], resource_vertex.attributes)
| StarcoderdataPython |
3293553 | # -*- coding: utf-8 -*-
from argh.decorators import arg
from lain_sdk.util import warn, info
from lain_cli.utils import get_version_lists, lain_yaml, check_phase, ClusterConfig
@arg('phase', help="lain cluster phase id, can be added by lain config save")
@arg('-r', '--registry', help='registry url')
def appversion(phase, registry=None):
"""
Show available app versions in remote registry of lain
"""
check_phase(phase)
params = dict(name=phase)
if registry is not None:
params['registry'] = registry
cluster_config = ClusterConfig(**params)
yml = lain_yaml(ignore_prepare=True)
version_list = get_version_lists(phase, yml.appname, ClusterConfig=cluster_config)
print_available_version(version_list)
def print_available_version(version_list):
if len(version_list) == 0:
warn("No available release versions.")
else:
info("Below are the available versions: ")
for version in version_list:
print(version)
| StarcoderdataPython |
3285051 | #!/usr/bin/env python
import sys
#from heapq import heappush, heappop, heapify
from random import randint, choice, seed
try:
import numpy
#import scipy.sparse.linalg as la
except ImportError:
print("numpy not found")
if sys.version_info.major>=3:
long = int
from bruhat.util import write
class Point(object):
"""
A Point is a vertex in a Graph (an undirected graph).
Each Point has a "desc", this is any distinguishing
characteristic (colour/type, etc.)
as respected by isomorphisms of Graph's.
The "desc" can be any string.
"""
def __init__(self, desc, idx, nbd=None, colour="", **kw):
self.desc = desc
self._colour = colour
self._desc = {} # cache get_desc
self.idx = idx
if nbd is None:
nbd = []
self.nbd = nbd
self.__dict__.update(kw)
def __str__(self):
return "Point(desc=%r, idx=%s, nbd=%s)"%(
self.desc, self.idx, [p.idx for p in self.nbd])
__repr__ = __str__
def get_colour(self):
return self._colour
def set_colour(self, colour):
self._desc = {} # clear cache
self._colour = colour
colour = property(get_colour, set_colour)
def get_desc(self, depth=1, source=None):
assert self.nbd is not None
assert depth>=0
assert depth<=1
#_desc = self._desc.get(depth)
#if _desc:
# return _desc
desc = self.desc+str(self._colour)
if depth==0:
#self._desc = desc
return desc
if source is None:
source = []
else:
assert self not in source
descs = [a.get_desc(depth-1, source+[self]) for a in self.nbd if a not in source]
descs.sort()
desc = "%s[%s]"%(desc, ' '.join(descs))
#self._desc = desc
return desc
#def __str__(self):
# return "Point(%s: %s)"%(self.desc, descs)
class Graph(object):
"""
Undirected graph.
"""
def __init__(self, points=[], **attrs):
self.__dict__.update(attrs)
self.descs = {} # cache, map point -> desc
self.deps = None # map point -> list of points
self.attrs = dict(attrs)
self.points = list(points)
for i, point in enumerate(points):
assert point.idx == i
def add(self, desc='', **kw):
"add a Point"
assert not self.descs
assert self.deps is None
i = len(self.points)
point = Point(desc, i, **kw)
self.points.append(point)
return point
def add_directed(self, pi, pj, desc='directed'):
"encode a directed edge using a path with two extra (coloured) Point's"
pa = self.add("%s_a"%desc)
pb = self.add("%s_b"%desc)
self.join(pi, pa)
self.join(pa, pb)
self.join(pb, pj)
def __str__(self):
return "Graph(%s)"%(self.points,)
def __len__(self):
return len(self.points)
def __getitem__(self, idx):
return self.points[idx]
def join(self, pi, pj):
points = self.points
if type(pi) in [int, long]:
pi = points[pi]
if type(pj) in [int, long]:
pj = points[pj]
if pi not in pj.nbd:
pj.nbd.append(pi)
if pj not in pi.nbd:
pi.nbd.append(pj)
@classmethod
def build(cls, Gx):
m, n = Gx.shape
points = []
for i in range(m):
g = Gx[i]
assert g.sum()==4
weights = []
for j in numpy.where(g)[0]:
weights.append(Gx[:, j].sum())
weights.sort()
desc = ''.join(str(w) for w in weights)
a = Point(desc, i)
points.append(a)
#print [a.desc for a in points]
for i in range(m):
g = Gx[i]
a = points[i]
for j in numpy.where(g)[0]:
for i1 in numpy.where(Gx[:, j])[0]:
if i1 != i:
a.nbd.append(points[i1])
return cls(points, m=m, n=n)
def map(self, fn):
points = [None]*len(self)
for p in self.points:
p = Point(p.desc, fn[p.idx])
points[p.idx] = p
for p in self.points:
for p1 in p.nbd:
points[fn[p.idx]].nbd.append(points[fn[p1.idx]]) # whoops.. tricky
return self.__class__(points, **self.attrs)
def get_desc(self, depth=1):
return [v.get_desc(depth) for v in self.points]
def get_stats(self, depth=1):
stats = {}
for point in self:
desc = point.get_desc(depth)
stats[desc] = stats.get(desc, 0) + 1
return stats
# ---------- HOTSPOT ----------------------------->
def get_orbits(self, depth=1):
orbits = {}
assert depth==1
if self.deps is None:
deps = {}
for p in self.points:
deps[p] = [p]+p.nbd # 1-neighbours
self.deps = deps
descs = self.descs
for p in self.points:
desc = descs.get(p)
if desc is None:
desc = p.get_desc(depth)
descs[p] = desc
orbit = orbits.setdefault(desc, [])
orbit.append(p)
return orbits # map desc -> list of points
def set_colour(self, p, colour=''):
if colour:
assert p.colour==''
else:
assert p.colour
p.colour = colour
for p in self.deps[p]:
self.descs[p] = None # clear cache
Bag = Graph # backwards compat
class Tanner(Graph):
# This is the Tanner graph
@classmethod
def build(cls, Gx, Gz=None):
if Gz is not None:
return cls.build2(Gx, Gz)
m, n = Gx.shape
checks = [Point('c', i) for i in range(m)]
bits = [Point('b', i+m) for i in range(n)]
for i in range(m):
for j in range(n):
if Gx[i, j]==0:
continue
checks[i].nbd.append(bits[j])
bits[j].nbd.append(checks[i])
return cls(checks+bits, m=m, n=n)
@classmethod
def build2(cls, Gx, Gz):
# This is the Tanner graph
mx, n = Gx.shape
mz, n = Gz.shape
xchecks = [Point('x', i, row=i) for i in range(mx)]
zchecks = [Point('z', i+mx, row=i) for i in range(mz)]
bits = [Point('b', i+mx+mz, row=i) for i in range(n)]
for i in range(mx):
for j in range(n):
if Gx[i, j]==0:
continue
xchecks[i].nbd.append(bits[j])
bits[j].nbd.append(xchecks[i])
for i in range(mz):
for j in range(n):
if Gz[i, j]==0:
continue
zchecks[i].nbd.append(bits[j])
bits[j].nbd.append(zchecks[i])
return cls(xchecks+zchecks+bits, mx=mx, mz=mz, n=n)
def shortstr(self):
m, n = self.m, self.n
rows = []
for i in range(m): # checks
row = ['.']*n
p = self.points[i]
for p1 in p.nbd:
row[p1.idx-m] = '1'
row = ''.join(row)
rows.append(row)
return '\n'.join(rows)
def from_sparse_ham(n, H):
points = []
for i in range(n):
p = Point('(%s)'%H[i, i], i)
points.append(p)
for i, j in H.keys():
if i!=j:
points[i].nbd.append(points[j])
graph = Graph(points)
return graph
def from_ham(H, syndromes=None):
if syndromes is not None:
return from_ham_syndromes(H, syndromes) # <------ return
n = len(H)
points = []
for i in range(n):
p = Point('(%s)'%H[i, i], i)
points.append(p)
for i in range(n):
for j in range(n):
if i==j:
continue
if H[i, j]:
points[i].nbd.append(points[j])
graph = Graph(points)
return graph
def from_ham_syndromes(H, syndromes):
n = len(H) # dimension of state space
assert len(syndromes)==n # one syndrome for each basis vector
m = len(syndromes[0]) # each syndrome has m check values
points = []
for i in range(n):
p = Point('(%s)'%H[i, i], i)
points.append(p)
checks = []
for i in range(m):
c = Point('c', n+i)
checks.append(c)
for i in range(n):
for j in range(n):
if i==j:
continue
if H[i, j]:
points[i].nbd.append(points[j])
for j in range(m):
if syndromes[i][j]:
points[i].nbd.append(checks[j])
checks[j].nbd.append(points[i])
graph = Graph(points+checks)
return graph
def get_perm(m, n, fn):
U = numpy.zeros((m, m), dtype=int)
for i in range(m):
j = fn[i]
U[i, j] = 1
V = numpy.zeros((n, n), dtype=int)
for i in range(n):
j = fn[i+m]-m
V[j, i] = 1
return U, V
def search_recursive(graph0, graph1, fn=None, depth=1):
assert depth>0
if fn is None:
fn = {}
if len(graph0)!=len(graph1):
return
assert graph0 is not graph1
orbits0 = graph0.get_orbits(depth)
orbits1 = graph1.get_orbits(depth)
if len(orbits0) != len(orbits1):
return
keys0 = list(orbits0.keys())
keys1 = list(orbits1.keys())
keys0.sort()
keys1.sort()
if keys0 != keys1:
return
idx = len(fn)
# choose any uncoloured graph0 point
p = graph0.points[idx]
assert p.colour == ''
key = p.get_desc(depth)
orbit = orbits1[key]
#p.colour = str(idx)
graph0.set_colour(p, str(idx))
# go through each candidate in graph1
for p1 in orbit:
assert p1.colour == ''
#p1.colour = str(idx)
graph1.set_colour(p1, str(idx))
assert fn.get(idx) is None
fn[idx] = p1.idx
if len(fn) == len(graph0):
yield dict(fn)
else:
for _fn in search_recursive(graph0, graph1, fn, depth):
yield _fn
del fn[idx]
assert len(fn) == idx
#p1.colour = ''
graph1.set_colour(p1)
#p.colour = ''
graph0.set_colour(p, '')
class Backtrack(Exception):
pass
class State(object):
def __init__(self, graph0, graph1, depth=1):
orbits0 = graph0.get_orbits(depth) # map: desc -> list of points
orbits1 = graph1.get_orbits(depth) # map: desc -> list of points
if len(orbits0) != len(orbits1):
raise Backtrack() # <-------------- raise
keys0 = list(orbits0.keys())
keys1 = list(orbits1.keys())
keys0.sort()
keys1.sort()
if keys0 != keys1:
raise Backtrack() # <-------------- raise
self.graphs = graph0, graph1
self.orbitss = orbits0, orbits1
self.keyss = keys0, keys1
self.idx0 = None
self.depth = depth
def choose(self, idx0):
assert self.idx0 is None
assert idx0 is not None
graph0, graph1 = self.graphs
p0 = graph0.points[idx0]
assert p0.colour == ''
key0 = p0.get_desc(self.depth)
self.orbit1 = self.orbitss[1][key0]
assert self.orbit1 # otherwise: wtf?
self.idx0 = idx0 # source index: this is constant
self.idx1 = 0 # search target index
self.p0 = p0
self.p1 = None
def choose_best(self):
XXX
orbits0 = self.orbitss[0]
items = orbits0.items()
items.sort(key = lambda item : len(item[1]))
p = items[0][1][0] # first guy in smallest orbit
self.choose(p.idx)
return p.idx
def do(self, fn):
graph0, graph1 = self.graphs
# make assignment: idx0 -> idx1
p0 = self.p0
#assert p0.colour == ''
#p0.colour = str(self.idx0)
graph0.set_colour(p0, str(self.idx0))
p1 = self.orbit1[self.idx1]
#assert p1.colour == ''
#p1.colour = str(self.idx0)
graph1.set_colour(p1, str(self.idx0))
assert fn.get(self.idx0) is None
fn[self.idx0] = p1.idx
assert self.p1 is None
self.p1 = p1
def undo(self, fn):
graph0, graph1 = self.graphs
# undo assignment
del fn[self.idx0]
assert self.p1 is not None
p0 = self.p0
p1 = self.p1
assert p1.colour==str(self.idx0)
assert p0.colour==str(self.idx0)
#p0.colour = ''
#p1.colour = ''
graph0.set_colour(p0)
graph1.set_colour(p1)
self.p1 = None
def next(self):
assert self.p1 is None
self.idx1 += 1
if self.idx1 >= len(self.orbit1):
raise Backtrack() # <-------------- raise
def search(graph0, graph1, depth=1, fn=None, verbose=False):
# return dict: int --> int
assert graph0 is not graph1
if len(graph0) != len(graph1):
return
# doesn't help any:
#if graph0.get_stats() != graph1.get_stats():
# return
if fn is None:
fn = {}
remain = range(len(graph0))
orbits = graph0.get_orbits(depth)
graph1.get_orbits()
keys = list(orbits.keys())
keys.sort(key = lambda key : len(orbits[key]))
remain = []
for key in keys:
for p in orbits[key]:
if p.idx not in fn:
remain.append(p.idx)
#for idx in fn.keys():
# remain.remove(idx)
remain.sort()
for idx in fn:
graph0.set_colour(graph0[idx], str(idx))
graph1.set_colour(graph1[fn[idx]], str(idx))
try:
state = State(graph0, graph1, depth)
except Backtrack:
return
idx = remain.pop(0)
state.choose(idx)
#idx = remain.pop(randint(0, len(remain)-1))
#state.choose(idx)
#idx = state.choose_best()
#remain.remove(idx)
stack = [state]
while stack:
if verbose:
print( "SEARCH", len(stack))
for idx in remain:
assert fn.get(idx) is None
assert len(remain)+len(fn)+1==len(graph0)
state = stack[-1]
state.do(fn)
assert len(remain)+len(fn)==len(graph0)
if verbose:
print( fn)
if len(fn) == len(graph0):
if verbose:
print( "FOUND")
yield dict(fn)
else:
# try to add another state
try:
_state = State(graph0, graph1, depth)
#idx = remain.pop(randint(0, len(remain)-1))
idx = remain.pop(0)
_state.choose(idx)
#idx = _state.choose_best()
#remain.remove(idx)
stack.append(_state)
if verbose:
print( "PUSH")
continue
except Backtrack:
if verbose:
print( "BACK")
# the above do() doesn't work
pass
# next
while stack:
state = stack[-1]
if verbose:
print( "UNDO")
assert len(remain)+len(fn)==len(graph0)
state.undo(fn)
assert len(remain)+len(fn)+1==len(graph0)
try:
if verbose:
print( "NEXT")
state.next()
break # ok, finished backtracking
except Backtrack:
if verbose:
print( "POP")
state = stack.pop() # discard this guy
#remain.append(state.idx0)
remain.insert(0, state.idx0)
def all_autos(Gx):
#Gx = parse(gcolor_gauge)
m, n = Gx.shape
graph0 = Tanner.build(Gx)
graph1 = Tanner.build(Gx)
for fn in search(graph0, graph1):
U, V = get_perm(m, n, fn)
yield U, V
def peterson_graph():
inside = [Point('', i) for i in range(5)]
outside = [Point('', i+5) for i in range(5)]
graph = Graph(inside+outside)
for i in range(5):
graph.join(i, (i+2)%5)
graph.join(i, (i+3)%5)
graph.join(i, i+5)
if i<4:
graph.join(i+5, i+6)
else:
graph.join(i+5, i+1)
return graph
def cyclic_graph():
n = 5
points = [Point('', i) for i in range(n)]
graph = Graph(points)
# for i in range(n):
# points[i].nbd.append(points[(i+1)%n])
# points[(i+1)%n].nbd.append(points[i])
for i in range(n):
graph.add_directed(points[i], points[(i+1)%n])
return graph
gcolor_gauge = """
1111...........
11..11.........
1.1.1.1........
..11..11.......
.1.1.1.1.......
....1111.......
11......11.....
1.1.....1.1....
........1111...
..11......11...
.1.1.....1.1...
1...1...1...1..
........11..11.
.1...1...1...1.
....11......11.
........1.1.1.1
..1...1...1...1
....1.1.....1.1
"""
gcolor_stab = """
11111111.......
1111....1111...
11..11..11..11.
1.1.1.1.1.1.1.1
"""
cube_ham = """
6111....
14..11..
1.4.1.1.
1..4.11.
.11.2..1
.1.1.2.1
..11..21
....1110
"""
def parse(s):
s = s.replace('.', '0')
lines = s.split()
lines = [l.strip() for l in lines if l.strip()]
rows = [list(int(c) for c in l) for l in lines]
if rows:
n = len(rows[0])
for row in rows:
assert len(row)==n, "rows have varying lengths"
a = numpy.array(rows, dtype=numpy.int32)
return a
def test():
# Find rotation symmetry of the code. It's S_4 with order 24.
Gx = parse(gcolor_gauge)
m, n = Gx.shape
graph0 = Tanner.build(Gx)
graph1 = Tanner.build(Gx)
#global search
#search = search_recursive
count = 0
for fn in search(graph0, graph1):
#print "iso", fn
graph = graph0.map(fn)
#print graph.shortstr()
U, V = get_perm(m, n, fn)
Gx1 = numpy.dot(U, numpy.dot(Gx, V))
assert numpy.abs(Gx-Gx1).sum()==0
count += 1
#print "count:", count
assert count == 24
# S_3 symmetry of cubical hamiltonian
depth = 1
H = parse(cube_ham)
graph0 = from_ham(H)
graph1 = from_ham(H)
count = 0
for fn in search(graph0, graph1, depth=depth):
count += 1
assert count == 6
graph0 = peterson_graph()
graph1 = peterson_graph()
assert len(list(search(graph0, graph1, depth=1))) == 120
# directed graph
graph0 = cyclic_graph()
graph1 = cyclic_graph()
assert len(list(search(graph0, graph1))) == 5
#for f in (search(graph0, graph1)):
# print(f)
from bruhat.argv import argv
if __name__ == "__main__":
if argv.profile:
import cProfile as profile
profile.run("test()")
else:
test()
print( "OK")
| StarcoderdataPython |
89200 | <reponame>subaruclover/apis-fixed<filename>plots_fixed.py<gh_stars>0
"""
Show the results of different acc with plots
input data files: .csv files with one week data from sample data
House ID: E001 ~ E004
created by Qiong
TODO: plot the sample data (from apis-emulator/data/input/Sample) and compare it with our acc data (try: compare p2)
"""
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(style="white")
import pandas as pd
import os
import global_var as gl
inputFile = "sample_acc_60.csv"
inputData = pd.read_csv(inputFile)
memory = inputData.to_numpy()
# calculate the coefficient w.r.t gl.acc
filename = os.path.splitext(inputFile)[0]
check_acc = filename.split("_")[2]
coeff = int(60 / gl.acc)
if int(check_acc) == gl.acc:
print("acc correctly received")
# PLOT Houses data
rows_e001 = list(range(0, 10000, 4))
rows_e002 = [x + 1 for x in rows_e001]
rows_e003 = [x + 2 for x in rows_e001]
rows_e004 = [x + 3 for x in rows_e001]
pvc_e001 = memory[rows_e001, 0]
load_e001 = memory[rows_e001, 1]
p2_e001 = memory[rows_e001, 2]
rsoc_e001 = memory[rows_e001, 3]
pvc_e002 = memory[rows_e002, 0]
load_e002 = memory[rows_e002, 1]
p2_e002 = memory[rows_e002, 2]
rsoc_e002 = memory[rows_e002, 3]
pvc_e003 = memory[rows_e003, 0]
load_e003 = memory[rows_e003, 1]
p2_e003 = memory[rows_e003, 2]
rsoc_e003 = memory[rows_e003, 3]
pvc_e004 = memory[rows_e004, 0]
load_e004 = memory[rows_e004, 1]
p2_e004 = memory[rows_e004, 2]
rsoc_e004 = memory[rows_e004, 3]
"""
Plot data
"""
# fig, axs = plt.subplots(2, 2, figsize=(12, 12))
fig, (ax0, ax1, ax2, ax3) = plt.subplots(4, 1, figsize=(12, 12))
ax0_2 = ax0.twinx()
ax1_2 = ax1.twinx()
ax2_2 = ax2.twinx()
ax3_2 = ax3.twinx()
fig.suptitle("The default scenario, E001-E004, acc=%i" % gl.acc)
pvc_e001_plot = ax0.plot(pvc_e001[:24 * 7 * coeff], 'm*-', label="PV E001")
load_e001_plot = ax0.plot(load_e001[:24 * 7 * coeff], 'y--', label="Load E001")
p2_e001_plot = ax0.plot(p2_e001[:24 * 7 * coeff], 'b', label="p2 E001")
rsoc_e001_plot = ax0_2.plot(rsoc_e001[:24 * 7 * coeff], 'g', label="RSOC E001")
# ticks = np.arange(0, 24*7*coeff, 24*coeff)
ax0_ticks = ax0.set_xticks(np.linspace(0, 24*7*coeff, 8, endpoint=True))
hours = np.round(np.linspace(0, 24*7*coeff, 8, endpoint=True) / coeff).astype(int)
label = []
for i in range(len(hours)):
label.append(str(hours[i])) # ['0', '24', '48', '72', '96', '120', '144', '168']
ax0_labels = ax0.set_xticklabels(label)
# ax0.set_xlabel("Hour")
ax0.set_ylabel("Power (W)")
ax0_2.set_ylabel(" % ")
plots_e001 = pvc_e001_plot + load_e001_plot + p2_e001_plot + rsoc_e001_plot
labels_e001 = [plot.get_label() for plot in plots_e001]
ax0.legend(plots_e001, labels_e001, loc='upper left')
pvc_e002_plot = ax1.plot(pvc_e002[:24 * 7 * coeff], 'm*-', label="PV E002")
load_e002_plot = ax1.plot(load_e002[:24 * 7 * coeff], 'y--', label="Load E002")
p2_e002_plot = ax1.plot(p2_e002[:24 * 7 * coeff], 'b', label="p2 E002")
rsoc_e002_plot = ax1_2.plot(rsoc_e002[:24 * 7 * coeff], 'g', label="RSOC E002")
ax1_ticks = ax1.set_xticks(np.linspace(0, 24*7*coeff, 8, endpoint=True))
ax1_labels = ax1.set_xticklabels(label)
# ax1.set_xlabel("Hour")
ax1.set_ylabel("Power (W)")
ax1_2.set_ylabel(" % ")
plots_e002 = pvc_e002_plot + load_e002_plot + p2_e002_plot + rsoc_e002_plot
labels_e002 = [plot.get_label() for plot in plots_e002]
ax1.legend(plots_e002, labels_e002, loc='upper left')
pvc_e003_plot = ax2.plot(pvc_e003[:24 * 7 * coeff], 'm*-', label="PV E003")
load_e003_plot = ax2.plot(load_e003[:24 * 7 * coeff], 'y--', label="Load E003")
p2_e003_plot = ax2.plot(p2_e003[:24 * 7 * coeff], 'b', label="p2 E003")
rsoc_e003_plot = ax2_2.plot(rsoc_e003[:24 * 7 * coeff], 'g', label="RSOC E003")
ax2_ticks = ax2.set_xticks(np.linspace(0, 24*7*coeff, 8, endpoint=True))
ax2_labels = ax2.set_xticklabels(label)
# ax2.set_xlabel("Hour")
ax2.set_ylabel("Power (W)")
ax2_2.set_ylabel(" % ")
plots_e003 = pvc_e003_plot + load_e003_plot + p2_e003_plot + rsoc_e003_plot
labels_e003 = [plot.get_label() for plot in plots_e003]
ax2.legend(plots_e003, labels_e003, loc='upper left')
pvc_e004_plot = ax3.plot(pvc_e004[:24 * 7 * coeff], 'm*-', label="PV E004")
load_e004_plot = ax3.plot(load_e004[:24 * 7 * coeff], 'y--', label="Load E004")
p2_e004_plot = ax3.plot(p2_e004[:24 * 7 * coeff], 'b', label="p2 E004")
rsoc_e004_plot = ax3_2.plot(rsoc_e004[:24 * 7 * coeff], 'g', label="RSOC E004")
ax3_ticks = ax3.set_xticks(np.linspace(0, 24*7*coeff, 8, endpoint=True))
ax3_labels = ax3.set_xticklabels(label)
ax3.set_xlabel("Hour")
ax3.set_ylabel("Power (W)")
ax3_2.set_ylabel(" % ")
plots_e004 = pvc_e004_plot + load_e004_plot + p2_e004_plot + rsoc_e004_plot
labels_e004 = [plot.get_label() for plot in plots_e004]
ax3.legend(plots_e004, labels_e004, loc='upper left')
plt.show()
else:
print("check acc value and try again")
# Compare MSE (or sth. likely) to check different acc
| StarcoderdataPython |
1622599 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import glob, csv, librosa, os, subprocess, time
import numpy as np
import pandas as pd
import data_vn
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__author__ = '<EMAIL>'
# data path
data_path = "asset/data/"
#
# process Vivos corpus
#
def process_vivos(csv_file, category):
parent_path = data_path + 'vivos/'
labels, wave_files = [], []
# create csv writer
writer = csv.writer(csv_file, delimiter=',')
# read label-info
content_filename = parent_path + category + '/prompts.txt'
label_info = pd.read_table(content_filename, usecols=['ID'], index_col=False, delim_whitespace=True)
# print(label_info) # testpoint: label_info
# read file IDs
# file_ids = []
# for uid in label_info.ID.values:
# print(uid) # testpoint: uid
# folder_path, filename = uid.split("_")
# for d in [parent_path + category + '/waves/%s' % folder_path]:
# print(d) # testpoint: folder_path
# a = glob.glob(d + '*.txt')
# print(a)
# b = sorted(glob.glob(d + '*.txt'))
# print(b)
# for f in sorted(glob.glob(d + '*.txt')):
# # print(f[-12:-4])
# file_ids.extend([f[-12:-4]])
# # print(file_ids)
file_ids = label_info.ID
# print(file_ids) # testpoint: file_ID
# preprocess
content_ = open(content_filename, 'r')
title_content = content_.readline()
# print(title_content) # Result: 'ID\t\tContent\n'
for i, f in enumerate(file_ids):
# wave file name
wave_file = parent_path + category + '/waves/%s/' % f[0:10] + f + '.wav'
# print(wave_file)
fn = wave_file.split('/')[-1]
# print(fn)
target_filename = 'asset/data/preprocess_vn/mfcc/' + fn + '.npy'
# print(target_filename)
if os.path.exists(target_filename):
continue
print("Vivos corpus preprocessing (%d/%d) - ['%s']" % (i, len(file_ids), wave_file))
# load wave file
wave, sr = librosa.load(wave_file, sr=16000, mono=True) # default: sr=22050Hz
# re-sample (48K --> 16K)
# wave = wave[::3]
# get mfcc feature
mfcc = librosa.feature.mfcc(wave, sr=16000)
# get label index
curr_content = content_.readline()
curr_content = curr_content[(len(fn)-3):(len(curr_content))]
print(curr_content)
label = data_vn.str2index(curr_content)
# save result (exclude small mfcc data to prevent CTC loss)
if len(label) < mfcc.shape[1]:
# save meta info
writer.writerow([fn] + label)
# save mfcc
np.save(target_filename, mfcc, allow_pickle=False)
# check saved features
print(data_vn.index2str(label), '\n')
# delay for observation and analysis
# time.sleep(10)
#
# Create directories
#
if not os.path.exists('asset/data/preprocess_vn'):
os.makedirs('asset/data/preprocess_vn')
if not os.path.exists('asset/data/preprocess_vn/meta'):
os.makedirs('asset/data/preprocess_vn/meta')
if not os.path.exists('asset/data/preprocess_vn/mfcc'):
os.makedirs('asset/data/preprocess_vn/mfcc')
#
# Run pre-processing for training
#
# Vivos corpus for training
csv_file_train = open('asset/data/preprocess_vn/meta/train.csv', 'w')
process_vivos(csv_file_train, 'train')
csv_file_train.close()
#
# Run pre-processing for testing
#
# Vivos corpus for test
csv_file_test = open('asset/data/preprocess_vn/meta/test.csv', 'w')
process_vivos(csv_file_test, 'test')
csv_file_test.close()
| StarcoderdataPython |
164741 | from functools import partial
from . import utils
import numpy as np
import jax.numpy as jnp
import jax.random as random
from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian
#class Lattice(seed, cell_params, sim_params,
def random_c0(subkeys, odds_c, n):
"""Make random initial conditions given odds ratio of cell types."""
n_ctypes = len(odds_c)
n_c = (n * odds_c / odds_c.sum()).astype(int)
n_c = n_c.at[0].add(n - n_c.sum())
c0 = jnp.repeat(jnp.arange(n_ctypes), n_c)
nmap = np.ndim(subkeys) - 1
fun = lambda sk: random.permutation(sk, c0)
for _ in range(nmap):
fun = vmap(fun)
return n_c, fun(subkeys)
@jit
def dE_swap(ij, c, W, AL):
"""
Energy differential after swapping cells i and j.
Depends only on i, j, and their neighbors
"""
new_c = c.at[ij].set(c[ij[::-1]])
E_local = -W[ c[ij, None], c[AL[ij]]].sum()
E_local_swap = -W[new_c[ij, None], new_c[AL[ij]]].sum()
return E_local_swap - E_local
@jit
def quadratic_form(a, G):
"""Quadratic form of column vector `a` induced by matrix `G`"""
return a.T @ G @ a
@jit
def P_swap(dE, beta):
"""
Probability of a swap between cells i and j. Symmetric w.r.t. i and j.
"""
# Glauber dynamics probability
# return 1 / (1 + jnp.exp(beta * dE))
# Metropolis acceptance probability
return jnp.minimum(1., jnp.exp(-beta * dE))
@jit
def swap_ij(c, ij):
"""Swaps cells i and j in the cell type state vector `c`. """
cji = c[ij][::-1]
return c.at[ij].set(cji)
@jit
def accept_swap(c, P, ij):
"""
Returns cell state and log-probability after swapping i <--> j
"""
return swap_ij(c, ij)
@jit
def reject_swap(c, P, ij):
"""
Returns cell state and log-probability after rejecting i <--> j
"""
return c, jnp.log(1 - P)
@jit
def make_swap(c, P, ij, accept):
"""
Returns cell state vector and log-probability of event
after an accepted/rejected swap of cells `i` and `j`.
"""
return lax.cond(accept, accept_swap, reject_swap, c, P, ij)
@jit
def get_random_pair(key, AL):
"""Returns indices of a pair of adjacent cells"""
i, Aj = random.randint(
key=key, shape=(2,), minval=jnp.array([0, 0]), maxval=jnp.array(AL.shape)
)
j = AL[i, Aj]
return jnp.array([i, j])
@jit
def take_MC_step(key, c, beta, W, AL, n):
"""
Randomly selects a swap between adjacent cells and accepts/rejects.
Acceptance is based on Metropolis algorithm.
"""
key, sk1, sk2 = random.split(key, 3)
# Pick random interface and acceptance threshold
ij = get_random_pair(sk1, AL)
thresh = random.uniform(key=sk2)
# Take a Metropolis step
dE = dE_swap(ij, c, W, AL)
P = P_swap(dE, beta)
accept = P > thresh
new_c = make_swap(c, P, ij, accept)
expected_dE = P * dE
return key, new_c, expected_dE
@jit
def propose_swap(key, c, beta, W, AL):
"""
"""
ij = get_random_pair(key, AL)
c_swap = swap_ij(c, ij)
dE = dE_swap(ij, c, W, AL)
P = P_swap(dE, beta)
return ij, c_swap, dE, P
@jit
def local_alignment(c, A, k, I, O):
s = I[c] @ O
s_swap = I[c_swap] @ O
m_diff_nb = (A_k * diff_nb) @ s / n_diff_nb
@jit
def local_alignment_change(ij, c, c_swap, AL, k, I, O):
A_k = get_knn_adjacency_matrix(AL, k)
# cells that are neighbors (within k radii) of
# `i` but not `j` and vice-versa - i.e. different neighbors
diff_nb = jnp.expand_dims(jnp.logical_xor(*A_k[ij]), 1)
n_diff_nb = 4 * k + 2
s = I[c] @ O
s_swap = I[c_swap] @ O
m_diff_nb = (A_k * diff_nb) @ s / n_diff_nb
m_diff_nb_swap = (A_k * diff_nb) @ s_swap / n_diff_nb
return ((m_diff_nb_swap ** 2) - (m_diff_nb ** 2)).sum()
mapped_local_alignment_change = vmap(
local_alignment_change, in_axes=(None, None, None, None, 0, None, None)
)
#@jit
def take_MC_step2(args, step):
"""
Randomly selects a swap between adjacent cells and accepts/rejects.
Acceptance is based on Metropolis algorithm.
"""
key, c_t, beta_t, W, AL, *align_args = args
c = c_t[step]
beta = beta_t[step]
new_key, sk1, sk2 = random.split(key, 3)
# Propose a random swap
ij, c_swap, dE, P = propose_swap(sk1, c, beta, W, AL)
expected_d_eta = P * mapped_local_alignment_change(
ij, c, c_swap, AL, *align_args
).mean()
# Accept/reject
thresh = random.uniform(key=sk2)
do_swap = P > thresh
new_c = lax.cond(do_swap, lambda: c_swap, lambda: c)
return (
new_key, c_t.at[step + 1].set(new_c), beta_t, W, AL, *align_args
), expected_d_eta
@partial(jit, static_argnums=(2, 3, 4))
def simulate(theta, args, nsweeps, n, n_ctypes):
key, c, t, _, *more_args = args
beta_t = jnp.power(10., -utils.map_linear(t, theta[0], theta[1]))
W = jnp.eye(n_ctypes) * theta[2]
new_args, expected_d_etas = lax.scan(
take_MC_step2,
(key, c, beta_t, W, *more_args),
jnp.repeat(jnp.arange(nsweeps), n),
)
return new_args, expected_d_etas
@partial(jit, static_argnums=(2, 3, 4))
def simulate_loss(theta, args, nsweeps, n, n_ctypes):
return simulate(theta, args, nsweeps, n, n_ctypes)[1].mean()
@partial(jit, static_argnums=(2, 3))
def update(theta, args, nt, lr):
"""Performs one update step on T."""
# Compute the gradients on replicates
eta, grads = jax.value_and_grad(
simulate,
)(T, key, l, nt)
new_T = T - grads * lr_toy
return new_T, loss, grads
@partial(jit, static_argnums=3)
def update_toy(T, key, l, nt, lr_toy):
"""Performs one update step on T."""
# Compute the gradients on replicates
loss, grads = jax.value_and_grad(
simulate_loss,
)(T, key, l, nt)
new_T = T - grads * lr_toy
return new_T, loss, grads
@jit
def MC_iteration(step, args):
key, c, *extra = args
key, c, expected_dE = take_MC_step(*args)
return key, c, *extra
@jit
def MC_sweep(key, c, beta, W, AL, n):
args = (key, c, beta, W, AL, n)
return lax.fori_loop(0, n, MC_iteration, args)
@jit
def n_cmatch_t(c_t, AL):
"""Returns number of homotypic interfaces at each time-point."""
return cmatch_t(c_t, c_t[:, AL]).sum(axis=(1, 2)) // 2
@jit
def get_E_cell(c, W):
return W[c[:, None], c[AL]].mean(axis=1)
#### sorting metrics
def get_identity(n_ctypes):
"""Returns the (n_ctypes, n_ctypes) identity matrix."""
return jnp.eye(n_ctypes, dtype=int)
def get_difference_matrix(n_ctypes):
"""
Returns a (n_ctypes, n_ctypes - 1) matrix `O` with -1 on the principal
diagonal and 1 elsewhere. `O @ u` thus computes a difference on the
components of `u`.
"""
return 1 - 2 * jnp.eye(n_ctypes, n_ctypes - 1, dtype=int)
@jit
def get_num_neighbors(k):
return 1 + 3 * k * (k + 1)
@jit
def pow_matrix(A, k):
return lax.fori_loop(1, k, lambda i, M: jnp.matmul(M, A), A)
@jit
def get_knn_adjacency_matrix(AL, k):
n, nnb = AL.shape
diag_true = jnp.diag(jnp.ones(n, dtype=bool))
A = adjacency_matrix_from_adjacency_list(AL, dtype=bool)
A = A | diag_true
A = pow_matrix(A, k)
return A
equal_vec_scalar = vmap(lambda a, b: a == b, (0, None))
equal_outer_1d_1d = vmap(equal_vec_scalar, (None, 0))
equal_outer_1d_2d = vmap(equal_outer_1d_1d, (None, 0))
equal_outer_2d_1d = vmap(equal_outer_1d_1d, (0, None))
mult_vec_scalar = vmap(lambda a, b: a * b, (0, None))
mult_outer_1d_1d = vmap(mult_vec_scalar, (None, 0))
mult_outer_1d_2d = vmap(mult_outer_1d_1d, (None, 0))
mult_outer_2d_1d = vmap(mult_outer_1d_1d, (0, None))
@jit
def local_spin(c, AL, k):
"""
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = jnp.array([-1, 1])[c]
return A_k @ s_i / nnb
@jit
def knn_alignment_per_cell(c, AL, k, I, O):
"""
Return alignment of cell types `c` in local neighborhoods.
`c` is the cell type vector of shape `(n,)` with dtype `int`
`A` is the `(n, n)`cell-cell adjacency matrix (can be Boolean)
`I` is the `(n_ctypes, n_ctypes)` identity matrix, where `n_ctypes`
is the number of cell types in the tissue.
`O` is the `(n_ctypes, n_ctypes - 1)` difference matrix with `-1` on
the principal diagonal and `1` elsewhere. `I[c] @ O` converts cell
types (non-negative `int`) to spins (difference vectors). The sum
of spin vector components lies in [-1, 1].
`nnb` is the number of neighbors in the (regular) lattice within
distance `k`.
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = I[c] @ O
m_i = A_k @ s_i / nnb
return 1 - (m_i ** 2).mean(axis=1)
@jit
def knn_alignment_tissue(c, AL, k, I, O):
"""
Return mean alignment of cell types in a tissue by averaging
over neighborhoods. This is equivalent to
`knn_alignment_per_cell(*args).mean()`
`c` is the cell type vector of shape `(n,)` with dtype `int`
`A` is the `(n, n)`cell-cell adjacency matrix (can be Boolean)
`I` is the `(n_ctypes, n_ctypes)` identity matrix, where `n_ctypes`
is the number of cell types in the tissue.
`O` is the `(n_ctypes, n_ctypes - 1)` difference matrix with `-1` on
the principal diagonal and `1` elsewhere. `I[c] @ O` converts cell
types (non-negative `int`) to spins (difference vectors). The sum
of spin vector components lies in [-1, 1].
`nnb` is the number of neighbors in the (regular) lattice within
distance `k`.
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = I[c] @ O
m_i = A_k @ s_i / nnb
return 1 - (m_i ** 2).mean()
#### Graph
def adjacency_matrix_from_adjacency_list(AL, dtype=bool):
"""
Returns adjacency matrix for a nnb-regular graph given the adjacency list.
"""
n, nnb = AL.shape
A = jnp.zeros((n, n), dtype=dtype)
return A.at[jnp.repeat(jnp.arange(n), nnb), AL.flatten()].set(1)
def get_adjacency_matrix_periodic(rows, cols=0):
"""Construct adjacency matrix for a periodic hexagonal
lattice of dimensions rows x cols."""
AL = get_adjacency_list_periodic(rows, cols, **kwargs)
return adjacency_matrix_from_adjacency_list(AL)
def get_adjacency_list_periodic(rows, cols=0):
"""Construct adjacency matrix for a periodic hexagonal
lattice of dimensions rows x cols."""
# Assume square if not specified
if cols == 0:
cols = rows
n = rows * cols
row, col = np.meshgrid(np.arange(rows), np.arange(cols))
row = row.flatten()
col = col.flatten()
# Get row of adjacent cells
dr = np.array([0, 1, 1, 0, -1, -1])
AL_row = np.add.outer(row, dr) % rows
# Get column of adjacent cells, accounting for staggering
dc1 = np.array([1, 0, -1, -1, -1, 0])
dc2 = np.array([1, 1, 0, -1, 0, 1])
AL_col = np.add.outer(col, dc1)
AL_col[1::2] += dc2 - dc1
AL_col = AL_col % cols
return rows * AL_col + AL_row
def hex_grid(rows, cols=0, r=1., sigma=0, **kwargs):
"""
Returns XY coordinates of a regular 2D hexagonal grid
(rows x cols) with edge length r. Points are optionally
passed through a Gaussian filter with std. dev. = sigma * r.
"""
print("Deprecated: please use `cx.geom.hex_grid")
# Check if square grid
if cols == 0:
cols = rows
# Populate grid
x_coords = np.linspace(-r * (cols - 1) / 2, r * (cols - 1) / 2, cols)
y_coords = np.linspace(-np.sqrt(3) * r * (rows - 1) / 4, np.sqrt(3) * r * (rows - 1) / 4, rows)
X = []
for i, x in enumerate(x_coords):
for j, y in enumerate(y_coords):
X.append(np.array([x + (j % 2) * r / 2, y]))
X = np.array(X)
# Apply Gaussian filter if specified
if sigma != 0:
X = np.array([np.random.normal(loc=x, scale=sigma*r) for x in X])
return X
def get_outer_idx(rows, cols):
"""Returns the indices of cells on the border of the lattice grid"""
print("Deprecated: please use `cx.geom.get_outer_idx")
return np.array([
rows * c + r
for c in range(cols)
for r in range(rows)
if ((r in (0, rows - 1)) or (c in (0, cols - 1)))
])
| StarcoderdataPython |
3248149 | from flask import current_app, request, make_response, jsonify, abort
from api.blueprint import api
from core import crossdomain
from flask_security import auth_token_required, roles_required
from flask_login import current_user
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from api.views.all_views import api_version
from api.correction import correction_api
@api.route('/<api_version>/corrections/<int:correction_id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='authentication-token')
@auth_token_required
@api_version
def get_correction(api_version, correction_id):
correction = correction_api.find_correction_by_id(correction_id)
if(correction):
return jsonify( correction.to_dict() )
else:
abort(404)
@api.route('/<api_version>/corrections', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='authentication-token')
@auth_token_required
@api_version
def add_correction(api_version):
json = request.get_json()
schema = {
"content" : "string",
"file_id" : "number",
"format" : "string",
"required": ["content", "file_id", "format"]
}
try:
validate(json, schema)
except ValidationError as ve:
return make_response(jsonify( { 'error': ve.message } ), 400)
correction = correction_api.add_correction(json['content'], json['file_id'], json['format'])
return jsonify( correction.to_dict() )
@api.route('/<api_version>/corrections/<int:correction_id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='authentication-token')
@auth_token_required
@api_version
def update_correction(api_version, correction_id):
json = request.get_json()
schema = {
"content" : "string",
"format" : "string",
"required": ["content", "format"]
}
try:
validate(json, schema)
except ValidationError as ve:
return make_response(jsonify( { 'error': ve.message } ), 400)
correction = correction_api.find_correction_by_id(correction_id)
if(correction):
correction = correction_api.update_correction_content(correction, content=json['content'])
return jsonify( correction.to_dict() )
else:
abort(404)
| StarcoderdataPython |
1745205 | <reponame>WeiwenXu21/FRU<gh_stars>10-100
import math
import tensorflow as tf
from tensorflow.python.util import nest
import collections
import pdb
_FRUStateTuple = collections.namedtuple("FRUStateTuple", ("state", "t"))
class FRUStateTuple(_FRUStateTuple):
"""Tuple used by FRU Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(state, t)`, in that order. Where `state` is the hidden state
and `t` is the time step.
"""
__slots__ = ()
@property
def dtype(self):
(state, t) = self
if state.dtype != t.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(state.dtype), str(t.dtype)))
return state.dtype
class FRUCell(tf.contrib.rnn.RNNCell):
"""Implements a simple distribution based recurrent unit that keeps moving
averages of the mean map embeddings of features of inputs.
"""
"""
num_stats: phi size
freqs: array of w
freqs_mask: mask value when frequency is not equal to zero
output_dims: output size
recur_dims: r size
seq_len: length of sequence
"""
def __init__(self, num_stats, freqs, freqs_mask, output_dims, recur_dims, seq_len,
summarize=True, linear_out=False,
include_input=False, activation=tf.nn.relu):
self._num_stats = num_stats
self._output_dims = output_dims
self._recur_dims = recur_dims
self._freqs_array = freqs
self._nfreqs = len(freqs)
self._freqs_mask_array = [0.0 if w == 0 and len(freqs) > 1 else freqs_mask for w in freqs]
print "frequency_mask = ", self._freqs_mask_array
self._summarize = summarize
self._linear_out = linear_out
self._activation = activation
self._include_input = include_input
# as tensorflow does not feed current time step to __call__
# I have to manually record it
self._seq_len = seq_len
self.W = []
self.b = []
"""
nfreqs*num_stats
"""
@property
def state_size(self):
return FRUStateTuple(int(self._nfreqs * self._num_stats), 1)
@property
def output_size(self):
return self._output_dims
def __call__(self, inputs, state_tuple, scope=None):
"""
recur*: r
state*: mu, state_tuple includes (state, t)
stats*: phi
freq*: frequency vector
"""
state, cur_time_step = state_tuple
with tf.variable_scope(scope or type(self).__name__):
self._freqs = tf.reshape(tf.get_variable("frequency", initializer=self._freqs_array, trainable=False), [1, -1, 1])
self._phases = tf.reshape(tf.get_variable("phase", [self._nfreqs], initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32), trainable=True), [1, -1, 1])
self._freqs_mask = tf.reshape(tf.get_variable("frequency_mask", initializer=self._freqs_mask_array, trainable=False), [1, -1, 1])
# Make statistics on input.
if self._recur_dims > 0:
"""
r_t = f(W^r mu_{t-1} + b^r)
"""
recur_output = self._activation(_linear(
state, self._recur_dims, True, scope='recur_feats'
), name='recur_feats_act')
"""
phi_t = W^phi r_t + W^x x_t + b^phi
"""
stats = self._activation(_linear(
[inputs, recur_output], self._num_stats, True, scope='stats',
), name='stats_act')
else:
stats = self._activation(_linear(
inputs, self._num_stats, True, scope='stats'
), name='stats_act')
# Compute moving averages of statistics for the state.
with tf.variable_scope('out_state'):
state_tensor = tf.reshape(
state, [-1, self._nfreqs, self._num_stats], 'state_tensor'
)
stats_tensor = tf.reshape(
stats, [-1, 1, self._num_stats], 'stats_tensor'
)
#cur_time_step = tf.Print(cur_time_step, [cur_time_step], message="cur_time_step = ")
"""
mu_t = mask*mu_{t-1} + cos(2*pi*w*t/T + 2*pi*phase)*phi_t
"""
out_state = tf.reshape(self._freqs_mask*state_tensor +
1.0/self._seq_len*tf.cos(2.0*math.pi/self._seq_len*tf.reshape(cur_time_step, shape=[-1, 1, 1])*self._freqs + 2.0*math.pi*self._phases)*stats_tensor,
[-1, self.state_size.state], 'out_state')
# Compute the output.
if self._include_input:
output_vars = [out_state, inputs]
else:
output_vars = out_state
"""
o_t = W^o mu_t + b^o
"""
output = _linear(
output_vars, self._output_dims, True, scope='output'
)
if not self._linear_out:
output = self._activation(output, name='output_act')
# update time step
out_state_tuple = (out_state, cur_time_step+1)
# Retrieve RNN Variables
if not self.W:
with tf.variable_scope('recur_feats', reuse=True):
self.W.append(tf.get_variable('Matrix'))
self.b.append(tf.get_variable('Bias'))
with tf.variable_scope('stats', reuse=True):
self.W.append(tf.get_variable('Matrix'))
self.b.append(tf.get_variable('Bias'))
with tf.variable_scope('output', reuse=True):
self.W.append(tf.get_variable('Matrix'))
self.b.append(tf.get_variable('Bias'))
print("W = ", self.W)
print("b = ", self.b)
"""
o_t and mu_t
"""
return (output, out_state_tuple)
# No longer publicly expose function in tensorflow.
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
"Linear is expecting 2D arguments: %s" %
str(shapes))
if not shape[1]:
raise ValueError(
"Linear expects shape[1] of arguments: %s" %
str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable(
"Matrix", [total_arg_size, output_size], initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=dtype), dtype=dtype)
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(args, 1), matrix)
if not bias:
return res
bias_term = tf.get_variable(
"Bias", [output_size],
dtype=dtype,
initializer=tf.constant_initializer(bias_start, dtype=dtype)
)
return res + bias_term
| StarcoderdataPython |
1633058 | <gh_stars>10-100
################################################
## Writen by <NAME>
################################################
from utilities import *
from Co8 import *
# Define dictionaries of ordinary gems and jewelry in the game.
# Format is key : [value in gp, [list of proto numbers]]
gem_table = {
1: [10, [12042, 12044]],
2: [50, [12041, 12042]],
3: [100, [12035, 12040]],
4: [500, [12034, 12039]],
5: [1000, [12010, 12038]],
6: [5000, [12036, 12037]]
}
jewelry_table = {
1: [50, [6180, 6190]],
2: [100, [6181, 6185]],
3: [200, [6157]],
4: [250, [6182, 6194]],
5: [500, [6186, 6191]],
6: [750, [6183, 6193]],
7: [1000, [6184, 6192]],
8: [2500, [6187, 6197]],
9: [5000, [6188, 6195]],
10: [7500, [6189, 6196]]
}
def RespawnInventory(attachee, num = 0):
# Removes all attachee's inventory, and respawns it friom the InvenSource.mes line number specified by 'num'.
# If num is not given in the function call, the function will attempt to use the default InvenSource.mes line number for the attachee, if one is defined.
# If no InvenSource.mes line number is defined, the function will terminate.
# Example call 1: RespawnInventory(attachee, 1) will create Burne's inventory(per line number 1 in InvenSource.mes) in attachee's inventory.
# Example call 2: RespawnInventory(attachee) will attempt to create the attachee's pre-defined inventory (per InvenSource.mes).
# If the attachee has no Inventory Source defined, the function will terminate.
if num == 0:
if attachee.type == obj_t_container:
num = attachee.obj_get_int( obj_f_container_inventory_source)
elif attachee.type == obj_t_npc:
num = attachee.obj_get_int(obj_f_critter_inventory_source)
else:
print attachee, 'is not a valid type'
return
if num == 0:
print attachee, 'has no inventory source defined'
print 'Please specify an inventory to respawn'
return
ClearInv(attachee)
CreateInv(attachee, num)
return
def ClearInv(attachee):
# Removes all inventory from attachee.
for num in range(4000, 13000):
item = attachee.item_find_by_proto(num)
while (item != OBJ_HANDLE_NULL):
item.destroy()
item = attachee.item_find_by_proto(num)
return
def CreateInv(attachee, num):
# Creates inventory from the structured list created by GetInv from the InvenSource.mes line number 'num'.
inv = GetInv(num)
for i in range(len(inv)):
if not (type(inv[i][0]) is str):
if type(inv[i][1]) is int:
if inv[i][0] <= 100:
chance = inv[i][0]
if chance >= game.random_range(1,100):
create_item_in_inventory(inv[i][1], attachee)
else:
money = create_item_in_inventory(inv[i][0], attachee)
money.obj_set_int(obj_f_money_quantity, inv[i][1])
else:
if inv[i][0] == 100:
n = game.random_range(0, len(inv[i][1]) - 1)
create_item_in_inventory(inv[i][1][n], attachee)
elif inv[i][0] >= 7000 and inv[i][0] <= 7003:
money = create_item_in_inventory(inv[i][0], attachee)
money.obj_set_int(obj_f_money_quantity, game.random_range(inv[i][1][0], inv[i][1][1]))
else:
gjlist = CalcGJ(inv[i][0], inv[i][1])
if gjlist != []:
for k in range(len(gjlist)):
create_item_in_inventory(gjlist[k], attachee)
return
def GetInv(num, filename = 'data\\rules\\InvenSource.mes'):
# Reads InvenSource.mes, finds the line numbered 'num', and creates a structured list of the entries in that line.
InvDict = readMes(filename) #readMes is in Co8.py
InvLine = InvDict[num][0]
InvLine = InvLine.split(':')
InvLine.remove(InvLine[0])
InvLine[0] = InvLine[0].strip()
n = InvLine[0].find('_num')
if n != -1:
n = n + 7
InvLine[0] = InvLine[0][n:]
inv = InvLine[0]
inv = inv.split(' ')
for i in range(len(inv)):
if inv[i].find('(') == -1:
inv[i] = inv[i].split(',')
for j in range(len(inv[i])):
if inv[i][j] == 'copper':
inv[i][j] = 7000
elif inv[i][j] == 'silver':
inv[i][j] = 7001
elif inv[i][j] == 'gold':
inv[i][j] = 7002
elif inv[i][j] == 'platinum':
inv[i][j] = 7003
elif type(inv[i][j]) is str and inv[i][j].find('-') != -1:
inv[i][j] = inv[i][j].split('-')
for k in range(len(inv[i][j])):
inv[i][j][k] = ConvertToInt(inv[i][j][k])
if type(inv[i][j]) is str:
inv[i][j] = ConvertToInt(inv[i][j])
else:
temp1 = inv[i]
temp1 = str(temp1)
temp1 = temp1[1:-1]
temp1 = temp1.split(',')
for n in range(len(temp1)):
temp1[n] = ConvertToInt(temp1[n])
temp2 = [100, temp1]
inv[i] = temp2
return inv
def ConvertToInt( string ):
if type(string) is str:
try:
string = int(string)
except:
if not (string == 'gems' or string == 'jewelry'):
print 'WARNING: NON-INTEGER FOUND'
print 'Non-integer found is', string
else:
print 'WARNING: NON-STRING FOUND'
print 'Non-string found is', string
return string
def CalcGJ(string, value):
gjlist = []
if string == 'gems':
table = gem_table
elif string == 'jewelry':
table = jewelry_table
else:
return gjlist
if not (type(value) is int):
value = ConvertToInt(value)
if not (type(value) is int):
return gjlist
n = len(table)
while value >= table[1][0]:
if table[n][0] <= value:
gjlist.append(table[n][1][game.random_range(0, len(table[n][1]) - 1)])
value = value - table[n][0]
else:
n = n - 1
return gjlist
| StarcoderdataPython |
1648823 | # slope
"""
x_0:
initial position
dt:
dt float
iteration:
integer
a:
accerelation
0 < a < 1
t:
dt * iteration
"""
# class slope_field(object):
# """docstring for slope_field"""
# def __init__(self, arg):
# super(slope_field, self).__init__()
# self.arg = arg
def slope(x_0, dt, iteration, a):
x = np.array([x_0])
t = np.array([0])
for i in range(iteration):
x_i = x[i]
t_i = t[i]
x_i = x_i + x_dot(x_i) * a
t_i = t_i + dt
x = np.append(x, np.array([x_i]))
t = np.append(t, np.array([t_i]))
return x, t | StarcoderdataPython |
1646651 | <filename>sdk/python/pulumi_azure/cognitive/outputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AccountNetworkAcls',
]
@pulumi.output_type
class AccountNetworkAcls(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultAction":
suggest = "default_action"
elif key == "ipRules":
suggest = "ip_rules"
elif key == "virtualNetworkSubnetIds":
suggest = "virtual_network_subnet_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccountNetworkAcls. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccountNetworkAcls.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccountNetworkAcls.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_action: str,
ip_rules: Optional[Sequence[str]] = None,
virtual_network_subnet_ids: Optional[Sequence[str]] = None):
"""
:param str default_action: The Default Action to use when no rules match from `ip_rules` / `virtual_network_subnet_ids`. Possible values are `Allow` and `Deny`.
:param Sequence[str] ip_rules: One or more IP Addresses, or CIDR Blocks which should be able to access the Cognitive Account.
:param Sequence[str] virtual_network_subnet_ids: One or more Subnet ID's which should be able to access this Cognitive Account.
"""
pulumi.set(__self__, "default_action", default_action)
if ip_rules is not None:
pulumi.set(__self__, "ip_rules", ip_rules)
if virtual_network_subnet_ids is not None:
pulumi.set(__self__, "virtual_network_subnet_ids", virtual_network_subnet_ids)
@property
@pulumi.getter(name="defaultAction")
def default_action(self) -> str:
"""
The Default Action to use when no rules match from `ip_rules` / `virtual_network_subnet_ids`. Possible values are `Allow` and `Deny`.
"""
return pulumi.get(self, "default_action")
@property
@pulumi.getter(name="ipRules")
def ip_rules(self) -> Optional[Sequence[str]]:
"""
One or more IP Addresses, or CIDR Blocks which should be able to access the Cognitive Account.
"""
return pulumi.get(self, "ip_rules")
@property
@pulumi.getter(name="virtualNetworkSubnetIds")
def virtual_network_subnet_ids(self) -> Optional[Sequence[str]]:
"""
One or more Subnet ID's which should be able to access this Cognitive Account.
"""
return pulumi.get(self, "virtual_network_subnet_ids")
| StarcoderdataPython |
4814134 | from django.apps import AppConfig
class ChatUsersConfig(AppConfig):
name = 'chat_users'
| StarcoderdataPython |
1761695 | from __future__ import print_function
import sys
import numpy as np
def main(argv):
np.random.seed(2)
numPoints = 1001
xs = np.linspace(-5, 5, numPoints)
probs = generateData(numPoints)
convProbs = convolveProbs(probs)
print(np.sum(convProbs))
plt.imshow(convProbs, interpolation = 'none')
plt.show()
def generateData(numPoints):
probs = list()
for i in range(4):
p1 = np.random.rand(numPoints)
p1 /= np.sum(p1)
probs.append(p1)
return probs
def convolveProbs(probs):
numPoints = len(probs[0])
convProbs = np.diag(probs[0])
for p1 in probs[1:]:
convProbsCopy = np.copy(convProbs)
convProbs = np.zeros((numPoints, numPoints))
rowCumSums = np.zeros((numPoints, numPoints))
for j in range(numPoints):
rowCumSums[:j, j] = np.cumsum(convProbsCopy[1:j+1, j][::-1])[::-1]
for i in range(numPoints):
convProbs[i, i:] += convProbsCopy[i, i:]*np.cumsum(p1[i:])
convProbs[i, i:] += rowCumSums[i, i:]*p1[i]
convProbs[i, i+1:] += np.cumsum(convProbsCopy[i, i:-1])*p1[i+1:]
return convProbs
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
1701222 | import os
os.system('docker login -u "dashy2004" -p "12345678qwerty123" repo.treescale.com')
os.system('docker build -t games-day .')
os.system('docker tag games-day repo.treescale.com/dashy2004/games-day:latest')
os.system('docker push repo.treescale.com/dashy2004/games-day:latest')
print('The build passed yay!') | StarcoderdataPython |
1759169 | import os
import sys
from alembic import command
from alembic import config
directory = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, directory)
import settings
import data.db_session as db_session
directory = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../..'))
sys.path.insert(0, directory)
from migrations import utils as migrations_utils
def run():
alembic_cfg = config.Config(settings.ALEMBIC_INI)
if not migrations_utils.is_current_rev_is_latest():
command.upgrade(alembic_cfg, 'head')
def setup_db():
db_session.init_sql(settings.DB_CONNECTION)
if __name__ == '__main__':
run()
| StarcoderdataPython |
1666648 | <filename>setup.py
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='diffenv',
version='0.2.9',
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
url='http://github.com/error-central/diffenv',
description='Compare development environments',
long_description=readme(),
long_description_content_type='text/markdown',
include_package_data=True,
scripts=['bin/diffenv'],
license='MIT',
packages=['diffenv'],
install_requires=[
'colorama',
'requests',
'ruamel.yaml',
'gitpython',
'psutil',
'importlib_metadata',
],
zip_safe=False,
)
| StarcoderdataPython |
4833512 | <gh_stars>0
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage import io, transform
from datawriter import FolderWriter, ICDAR2015Writer
from synthgen import RendererV3
import random
# Define some configuration variables:
NUM_IMG = -1 # no. of images to use for generation (-1 to use all available):
# SECS_PER_IMG = 5 #max time per image in seconds
SECS_PER_IMG = None # max time per image in seconds
# INSTANCE_PER_IMAGE = 900 # no. of times to use the same image
INSTANCE_PER_IMAGE = 5 # no. of times to use the same image
# path to the data-file, containing image, depth and segmentation:
SEED = 2001
def main(bg_dir: Path, depth_dir: Path, seg_dir: Path, font_dir: Path,
text_path: Path, output_dir: Path, total_samples, viz):
writer = ICDAR2015Writer(output_dir, total_samples)
writer.open()
random.seed(SEED)
np.random.seed(SEED)
color_model_path = model_dir / 'colors_new.cp'
font_model_path = model_dir / 'font_px2pt.pkl'
RV3 = RendererV3(color_model_path, font_dir, text_path, font_model_path, max_time=SECS_PER_IMG)
for i, image_path in enumerate(bg_dir.iterdir()):
image_name = image_path.stem
print('Processing', image_path)
depth_path = depth_dir / (image_name + '.npz')
if not depth_path.exists():
print(depth_path, 'does not exist. Skip')
continue
seg_path = seg_dir / (image_name + '.npz')
if not seg_path.exists():
print(seg_path, 'does not exist. Skip')
continue
img = io.imread(str(image_path))
with np.load(depth_path) as data:
depth = data['depth']
depth = (depth - depth.min()) / (depth.max() - depth.min())
depth = 1 - depth
depth = depth * 255
with np.load(seg_path) as data:
seg = data['seg']
area = data['area']
label = data['label']
# try:
res = RV3.render_text(img, depth, seg, area, label,
ninstance=INSTANCE_PER_IMAGE, viz=viz)
# except Exception as e:
# print(f'[ERROR] {image_path}: {e}')
# print(res)
if len(res) > 0:
writer.write(res)
# visualize the output:
if viz:
plt.show(block=True)
if 'q' == input('Continue? (q to quit)'):
break
writer.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Generate Synthetic Scene-Text Images')
parser.add_argument('data_dir', type=Path)
parser.add_argument('--bg_dir', type=Path, default=None)
parser.add_argument('--depth_dir', type=Path, default=None)
parser.add_argument('--seg_dir', type=Path, default=None)
parser.add_argument('--font_dir', type=Path, default=None)
parser.add_argument('--text_path', type=Path, default=None)
parser.add_argument('--model_dir', type=Path, default=None)
parser.add_argument('--viz', action='store_true', dest='viz',
default=False, help='flag for turning on visualizations')
parser.add_argument('--output_dir', default='outputs', type=Path,
help='path to store generated results')
parser.add_argument('--total_samples', default=10000,
help='Total number of samples to generate')
args = parser.parse_args()
bg_dir = args.bg_dir or Path(args.data_dir) / 'bg'
depth_dir = args.depth_dir or Path(args.data_dir) / 'depths'
seg_dir = args.seg_dir or Path(args.data_dir) / 'segs'
font_dir = args.font_dir or Path(args.data_dir) / 'fonts'
text_path = args.text_path or Path(args.data_dir) / 'text.txt'
model_dir = args.model_dir or Path(args.data_dir) / 'models'
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
main(bg_dir, depth_dir, seg_dir, font_dir, text_path, output_dir, args.total_samples, args.viz)
cv2.destroyAllWindows()
| StarcoderdataPython |
47229 | import os
from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class EnvironmentVariablesEndpoint(Resource):
def get(self):
return [(key, os.environ[key]) for key in os.environ.keys()]
api.add_resource(EnvironmentVariablesEndpoint, '/')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8000)
| StarcoderdataPython |
37424 | import math
import pathlib
import sys
import torch
import torch.nn as nn
PROJECT_DIR = pathlib.Path(__file__).absolute().parent.parent.parent # main directory, the parent of src
if str(PROJECT_DIR) not in sys.path:
sys.path.append(str(PROJECT_DIR))
from src.model.ConvLayer import ConvLayer
from src.model.PrimaryCaps import PrimaryCaps
from src.model.DigitCaps import DigitCaps
from src.model.Decoder import Decoder
INPUT_WIDTH = 28
NUM_CONV_IN_CHANNELS = 1
CONV_KERNEL = 9
CONV_STRIDE = 1
NUM_CONV_OUT_CHANNELS = 256
NUM_PRIMARY_CHANNELS = 32
PRIMARY_CAPS_DIM = 8
PRIMARY_KERNEL = 9
PRIMARY_STRIDE = 2
DIGIT_CAPS_DIM = 16
NUM_CLASSES = 10
REGULARIZATION_SCALE = 0.0005
ITER = 3
DEC1_DIM = 512
DEC2_DIM = 1024
CUDA_ENABLED = True
SMALL_DECODER = False
DEVICE = 'cuda:0'
CONV_SHARED_WEIGHTS = 0 # disabled
PRIMARY_SHARED_WEIGHTS = 0 # disabled
DIGIT_SHARED_WEIGHTS = 0 # disabled
CONV_SHARED_BIAS = CONV_SHARED_WEIGHTS # to have coherency as default
SQUASH_APPROX = False
class Net(nn.Module):
def __init__(self,
input_wh=INPUT_WIDTH,
num_conv_in_channels=NUM_CONV_IN_CHANNELS,
conv_kernel=CONV_KERNEL,
conv_stride=CONV_STRIDE,
num_conv_out_channels=NUM_CONV_OUT_CHANNELS,
num_primary_channels=NUM_PRIMARY_CHANNELS,
primary_caps_dim=PRIMARY_CAPS_DIM,
primary_kernel=PRIMARY_KERNEL,
primary_stride=PRIMARY_STRIDE,
digit_caps_dim=DIGIT_CAPS_DIM,
num_classes=NUM_CLASSES,
regularization_scale=REGULARIZATION_SCALE,
iter=ITER,
dec1_dim=DEC1_DIM,
dec2_dim=DEC2_DIM,
cuda_enabled=CUDA_ENABLED,
small_decoder=SMALL_DECODER,
device=DEVICE,
conv_shared_weights=CONV_SHARED_WEIGHTS,
primary_shared_weights=PRIMARY_SHARED_WEIGHTS,
digit_shared_weights=DIGIT_SHARED_WEIGHTS,
conv_shared_bias=CONV_SHARED_BIAS,
squash_approx=SQUASH_APPROX):
super(Net, self).__init__()
self.cuda_enabled = cuda_enabled
if cuda_enabled:
self.device = torch.device(device)
else:
self.device = torch.device('cpu')
self.regularization_scale = regularization_scale
conv_dimension = math.floor(
(input_wh-conv_kernel+conv_stride)/conv_stride)
primary_dimension = math.floor(
(conv_dimension-primary_kernel+primary_stride)/primary_stride)
self.conv = ConvLayer(in_channels=num_conv_in_channels,
out_channels=num_conv_out_channels,
kernel_size=conv_kernel,
stride=conv_stride,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=conv_shared_weights,
shared_bias=conv_shared_bias)
self.primary = PrimaryCaps(in_channels=num_conv_out_channels,
out_channels=num_primary_channels,
out_caps_dim=primary_caps_dim,
kernel_size=primary_kernel,
stride=primary_stride,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=primary_shared_weights,
squash_approx=squash_approx)
self.digit = DigitCaps(in_dim=num_primary_channels*primary_dimension*primary_dimension,
out_dim=num_classes,
in_caps_dim=primary_caps_dim,
out_caps_dim=digit_caps_dim,
iter=iter,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=digit_shared_weights,
squash_approx=squash_approx)
decoder_in_dim = digit_caps_dim if small_decoder else num_classes * digit_caps_dim
self.decoder = Decoder(in_dim=decoder_in_dim,
l1_dim=dec1_dim,
l2_dim=dec2_dim,
out_dim=input_wh*input_wh,
device=device,
small_decoder=small_decoder)
def forward(self, x, labels, is_training=True):
out_conv = self.conv(x)
out_primary = self.primary(out_conv)
out_digit = self.digit(out_primary)
reconstruction = self.decoder(out_digit, labels, is_training)
return out_digit, reconstruction
| StarcoderdataPython |
3333357 | <filename>instascrape/structures.py
import os
import sys
import json
import logging
import traceback
from typing import *
from io import BytesIO
from collections import namedtuple, OrderedDict
import requests
from instascrape.constants import *
from instascrape.exceptions import *
from instascrape.group import *
from instascrape.utils import get_username_from_userid, set_mtime, get_biggest_media, verify_file, to_datetime
__all__ = ("Post", "IGTV", "Profile", "Hashtag", "Explore")
logger = logging.getLogger("instascrape")
CommentItem = namedtuple("CommentItem", "author text created_time")
class DataGetterMixin:
@property
def raw_data(self) -> dict:
if self._full_data is None:
self._obtain_full_data()
return self._full_data
def _find_or_get(self, *keys: str, data: dict = None, i: int = None):
i = 0 if i is None else i
key = keys[i]
if data is not None:
if key in data:
return data[key]
else:
# get full data & find in it
self._obtain_full_data()
d = self._full_data[keys[0]]
for k in keys[1:]:
d = d[k] # raises KeyError
return d
else:
# [1] find in initial data
if key in self._init_data:
d = self._init_data[key]
# [2] find in full data (if not None)
elif self._full_data is not None and key in self._full_data:
d = self._full_data[key]
else:
# get full data & find in it
self._obtain_full_data()
d = self._full_data[key] # raises KeyError
i += 1
return self._find_or_get(*keys, data=d, i=i) if len(keys) > 1 else d
class AsDictMixin:
info_vars = ()
def as_dict(self, *, extra: bool = False) -> OrderedDict:
"""Returns all 'info_vars' as an 'OrderedDict'.
Arguments:
extra: Add extra data to the dictionary if True.
"""
assert len(self.info_vars) > 0, "'AsDictMixin' should not be used in this class if 'info_vars' is intended to be empty"
dictionary = OrderedDict({"_struct": self.__class__.__name__} if extra else {})
for attr in self.info_vars:
dictionary[attr] = getattr(self, attr)
return dictionary
class MediaItem(AsDictMixin):
"""Represents a media item (image or video)."""
info_vars = ("typename", "src", "width", "height", "is_video")
@classmethod
def compose_items(cls, data: dict) -> List["MediaItem"]:
"""Composes 'MediaItem' objects by extracting from 'data'."""
def make(node: dict) -> "MediaItem":
typename = node["__typename"]
if typename == "GraphImage":
item = get_biggest_media(node["display_resources"])
elif typename == "GraphVideo":
item = {"src": node["video_url"]}
return cls(typename, item.get("src"), item.get("config_width"), item.get("config_height"))
typename = data["__typename"]
if typename in ("GraphImage", "GraphVideo"):
items = [make(data)]
elif typename == "GraphSidecar":
items = []
data = data["edge_sidecar_to_children"]["edges"]
for node in data:
items.append(make(node["node"]))
else:
raise AssertionError("unrecognized typename: '{}'".format(typename))
return items
def __init__(self, typename: str, src: str, width: int, height: int):
self.typename = typename
self.src = src
self.width = width
self.height = height
def __repr__(self) -> str:
return "MediaItem(typename='{}', src='{}', width={}, height={})".format(self.typename, self.src, self.width, self.height)
def __eq__(self, other) -> bool:
return isinstance(other, MediaItem) and self.src == other.src
def __hash__(self) -> int:
return hash(self.src)
@property
def is_video(self) -> bool:
"""Returns True if this media is a video."""
return self.typename == "GraphStoryVideo"
def download(self, dest: str, filename: str, *, write: bool = True, verify: bool = True) -> Optional[str]:
"""Download this media item to a file.
Arguments:
dest: Path to the destination directory.
filename: Name of the file without extension.
write: Write file to disk if True, write to memory otherwise (for testing and debugging).
verify: Verify file integrity if True, check the size of file in bytes otherwise.
Returns:
The path to the downloaded file if download suceeded, False otherwise
"""
try:
f = None
logger.debug("Downloading file {0} -> {1}".format(self.src, dest))
r = requests.get(self.src, stream=True, timeout=30)
# get info of the file
mime = r.headers["Content-Type"]
bytesize = int(r.headers["Content-Length"])
size = int(bytesize / 1024)
if mime == "video/mp4":
ext = ".mp4"
elif mime == "image/jpeg":
ext = ".jpg"
else:
raise DownloadError("Unsupported MIME type: {0}".format(mime), self.src)
finish_filename = filename + ext
finish_path = os.path.join(dest, finish_filename)
part_filename = filename + ext + ".part"
part_path = os.path.join(dest, part_filename)
# skip if the file is existing and intact
if os.path.isfile(finish_path):
# verify file integrity using md5
if verify and verify_file(r.content, finish_path):
logger.debug("~> [{0}] {1} [skip] (already downloaded)".format(mime, finish_filename))
return None
# verify file by checking the size in byte
if os.stat(finish_path).st_size == bytesize:
logger.debug("~> [{0}] {1} [skip] (already downloaded)".format(mime, finish_filename))
return None
# write to file
f = open(part_path, "wb+") if write else BytesIO()
for chunk in r.iter_content(1024):
if chunk:
f.write(chunk)
logger.debug("=> [{0}] {1} [{2}x{3}] ({4} kB)".format(mime, finish_filename, self.width or "?", self.height or "?", size))
except Exception as e:
raise DownloadError(str(e), self.src) from e
else:
# rename .part file to its real extension
if f:
f.close()
os.rename(part_path, finish_path)
return finish_path
finally:
if f and not f.closed:
f.close()
class ReelItem(MediaItem):
"""Represents a media item (image or video) of a reel."""
info_vars = ("typename", "src", "width", "height", "is_video", "id", "owner_username", "owner_id", "owner_profile_picture_url", "created_time", "expire_time", "cta_url")
@classmethod
def compose_items(cls, data: dict) -> List["ReelItem"]:
"""Composes 'ReelItem' objects by extracting from 'data'."""
def make(node: dict) -> "ReelItem":
typename = node["__typename"]
if typename == "GraphStoryImage":
item = get_biggest_media(node["display_resources"])
elif typename == "GraphStoryVideo":
item = get_biggest_media(node["video_resources"])
return cls(typename, item.get("src"), item.get("config_width"), item.get("config_height"), node)
items = []
data = data["items"]
for node in data:
items.append(make(node))
return items
def __init__(self, typename: str, src: str, width: int, height: int, data: dict):
super().__init__(typename, src, width, height)
self.data = data
def __repr__(self) -> str:
return "ReelItem(typename='{}', src='{}', width={}, height={})".format(self.typename, self.src, self.width, self.height)
def __eq__(self, other) -> bool:
return isinstance(other, ReelItem) and self.src == other.src and self.id == other.id
def __hash__(self) -> int:
return hash(self.id)
@property
def is_video(self) -> bool:
"""Returns True if this media item is a video."""
return self.typename == "GraphStoryVideo"
@property
def id(self) -> str:
"""Returns the ID of this reel item."""
return self.data["id"]
@property
def owner_username(self) -> str:
"""Returns the owner's username of this reel item."""
return self.data["owner"]["username"]
@property
def owner_id(self) -> str:
"""Returns the owner's ID of this reel item."""
return self.data["owner"]["id"]
@property
def owner_profile_picture_url(self) -> str:
"""Returns the URL of the owner's profile picture of this reel item."""
return self.data["owner"]["profile_pic_url"]
def owner_profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' that represents the owner's profile picture of this reel item."""
return MediaItem("GraphImage", self.owner_profile_picture_url, 150, 150)
@property
def created_time(self) -> int:
"""Returns the created time (timestamp) of this reel item."""
return int(self.data["taken_at_timestamp"])
@property
def expire_time(self) -> int:
"""Returns the expire time in timestamp of this reel item."""
return int(self.data["expiring_at_timestamp"])
@property
def cta_url(self) -> Optional[str]:
"""Returns the 'swipe up for more' URL of this reel item."""
return self.data["story_cta_url"]
class Post(AsDictMixin, DataGetterMixin):
"""Represents a Post entity."""
info_vars = ("shortcode", "url", "typename", "id", "owner_username", "owner_id", "owner_profile_picture_url",
"created_time", "caption", "media_count", "likes_count", "comments_count")
@classmethod
def from_shortcode(cls, insta, shortcode: str):
"""Returns a 'Post' instance by shortcode."""
post = cls(insta, {"shortcode": shortcode})
post._obtain_full_data()
return post
def __init__(self, insta, data: dict):
self._insta = insta
self._init_data = data
self._full_data = None
self.shortcode = data["shortcode"]
def _obtain_full_data(self):
if self._full_data is None:
logger.debug("Fetching initial json data of Post(shortcode='{}')...".format(self.shortcode))
self._full_data = self._insta._fetch_json_data(POST_URL.format(shortcode=self.shortcode))["shortcode_media"]
def __repr__(self) -> str:
return "Post(shortcode='{0}', typename='{1}')".format(self.shortcode, self.typename)
def __eq__(self, other) -> bool:
return isinstance(other, Post) and self.shortcode == other.shortcode and self.id == other.id
def __hash__(self) -> int:
return hash(self.shortcode)
def __len__(self) -> int:
return self.media_count
def __getitem__(self, index: int) -> MediaItem:
return self.media_items()[index]
def __iter__(self) -> MediaItem:
for media in self.media_items():
yield media
@property
def url(self) -> str:
"""Returns the URL of this post."""
return "https://instagram.com/p/" + self.shortcode
@property
def typename(self) -> str:
"""Returns the typename of this post (one of 'GraphImage', 'GraphVideo', 'GraphSidecar')."""
return self._find_or_get("__typename")
@property
def id(self) -> str:
"""Returns the ID of this post."""
return self._find_or_get("id")
@property
def owner_username(self) -> str:
"""Returns the owner's username this post."""
return self._find_or_get("owner")["username"]
@property
def owner_id(self) -> str:
"""Returns the owner's ID of this post."""
return self._find_or_get("owner")["id"]
@property
def owner_profile_picture_url(self) -> str:
"""Returns the URL of the owner's profile picture of this post."""
return self._find_or_get("owner", "profile_pic_url")
def owner_profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' object of the owner's profile picture of this post."""
return MediaItem("GraphImage", self.owner_profile_picture_url, 150, 150)
@property
def created_time(self) -> int:
"""Returns the created_time (timestamp) of this post."""
return int(self._find_or_get("taken_at_timestamp"))
@property
def caption(self) -> str:
"""Returns the caption of this post."""
edges = self._find_or_get("edge_media_to_caption", "edges")
if not edges:
return ""
return edges[0]["node"]["text"]
@property
def likes_count(self) -> int:
"""Returns the amount of likes of this post."""
return self._find_or_get("edge_media_preview_like")["count"]
@property
def comments_count(self) -> int:
"""Returns the amount of comments of this post."""
try:
return self._find_or_get("edge_media_preview_comment")["count"]
except KeyError:
# fallback
return self._find_or_get("edge_media_to_parent_comment")["count"]
@property
def media_count(self) -> int:
"""Returns the amount of media items in this post."""
return len(self.media_items())
def media_items(self) -> List[MediaItem]:
"""Returns a list of 'MediaItem' of this post."""
self._obtain_full_data()
return MediaItem.compose_items(self._full_data)
def likes(self) -> Group:
"""Retrieves likes of this post in the form of usernames.
Returns:
A 'Group' object that yields 'Profile' objects.
"""
logger.info("Retrieving likes of :{0}".format(self.shortcode))
variables = {"shortcode": self.shortcode}
nodes = self._insta._graphql_query_edges(QUERYHASH_LIKES, variables, "shortcode_media", "edge_liked_by")
return Group(next(nodes), (Profile(self._insta, node) for node in nodes))
def comments(self):
"""Retrieves likes of this post in the form of usernames.
Returns:
- An integer that idicates the estimated amount of items.
- A generator that yields 'CommentItem' -> namedtuple(author, text, created_time).
"""
logger.info("Retrieving comments of :{0}".format(self.shortcode))
variables = {"shortcode": self.shortcode}
nodes = self._insta._graphql_query_edges(QUERYHASH_COMMENTS, variables, "shortcode_media", "edge_media_to_comment")
return next(nodes), (CommentItem(node["owner"]["username"], node["text"], node["created_at"]) for node in nodes)
def download(self, dest: str = None, *, write: bool = True, verify: bool = True,
on_item_start: Callable = None, on_item_finish: Callable = None, on_item_error: Callable = None):
"""Download all media items of this post.
Arguments:
dest: Path to the destination directory.
write: Write file to disk if True, write to memory otherwise.
verify: Verify file integrity if True, check the size of file in bytes otherwise. See 'MediaItem.download()'.
on_item_start: A callable (Post, int, MediaItem). Called on start of each item.
on_item_finish: A callable (Post, int, MediaItem, str). Called on finish of each item.
on_item_error: A callable (Post, int, MediaItem, Exception). Called on error of each item.
"""
dest = os.path.abspath(dest or "./")
media_items = self.media_items()
multi = self.media_count > 1
subdest = os.path.join(dest, self.shortcode) if multi else None
if subdest and not os.path.isdir(subdest):
os.mkdir(subdest)
logger.debug("Downloading {0} ({1} media) [{2}]...".format(repr(self), len(media_items), self.typename))
logger.debug("Dest: " + dest)
for i, item in enumerate(media_items):
if on_item_start is not None:
on_item_start(self, i, item)
try:
filename = str(i) if multi else self.shortcode
file_path = item.download(subdest or dest, filename, write=write, verify=verify)
if file_path is not None:
set_mtime(file_path, self.created_time)
if on_item_finish is not None:
on_item_finish(self, i, item, file_path)
except Exception as e:
# NOTE: if the Post has multiple media items to download, the occurrence of exception will NOT interrupt
# the whole download of the post, unless user reraises the exception in 'on_item_error()'.
exc_type, exc_value, tb = sys.exc_info()
logger.error("{}: {}".format(exc_type.__name__, exc_value))
logger.debug("".join(traceback.format_tb(tb)))
if on_item_error is not None:
on_item_error(self, i, item, e)
continue
class IGTV(Post):
"""Represents an IGTV Post entity."""
info_vars = ("shortcode", "url", "typename", "id", "owner_username", "owner_id", "owner_profile_picture_url",
"created_time", "caption", "media_count", "likes_count", "comments_count", "title", "duration")
def __init__(self, insta, data: dict):
# In fact, the URL of a IGTV Post is 'instagram.com/tv/{shortcode}'
# but I found out that using 'instagram.com/p/{shortcode}' is just the same, since it is also considered as a Post
super().__init__(insta, data)
def __repr__(self) -> str:
return "IGTV(title='{0}', shortcode='{1}')".format(self.title, self.shortcode)
@property
def title(self) -> str:
"""Returns the title of this IGTV post."""
return self._find_or_get("title")
@property
def duration(self) -> float:
"""Returns the video duration of this IGTV post."""
return float(self._find_or_get("video_duration"))
@property
def view_count(self) -> int:
"""Returns the video view count of this IGTV post."""
return self._find_or_get("video_view_count")
class Story(AsDictMixin):
"""Represents a Story entity."""
info_vars = ("typename", "id", "reel_count")
def __init__(self, data: dict):
self.data = data
def __repr__(self):
return NotImplemented
def __eq__(self, other) -> bool:
return isinstance(other, Story) and self.id == other.id
def __hash__(self) -> int:
return hash(self.id)
def __len__(self) -> int:
return self.reel_count
def __getitem__(self, index: int) -> ReelItem:
return self.reel_items()[index]
def __iter__(self) -> ReelItem:
for reel in self.reel_items():
yield reel
@property
def typename(self) -> str:
"""Returns the typename of this story."""
return self.data["__typename"]
@property
def id(self) -> str:
"""Returns the ID of this story."""
return self.data["id"]
@property
def reel_count(self) -> int:
"""Returns the amount of reel items in this story."""
return len(self.reel_items())
def reel_items(self) -> List[ReelItem]:
"""Returns a list of reel items of this story."""
return ReelItem.compose_items(self.data)
def download(self, dest: str = None, *, write: bool = True, verify: bool = True,
on_item_start: Callable = None, on_item_finish: Callable = None, on_item_error: Callable = None):
"""Download all reel items of this story.
Arguments:
dest: Path to the destination directory.
write: Write file to disk if True, write to memory otherwise.
verify: Verify file integrity if True, check the size of file in bytes otherwise. See 'MediaItem.download()'.
on_item_start: A callable (Story, int, ReelItem). Called on start of each item.
on_item_finish: A callable (Story, int, ReelItem, str). Called on finish of each item.
on_item_error: A callable (Story, int, ReelItem, Exception). Called on error of each item.
"""
dest = os.path.abspath(dest or "./")
reel_items = self.reel_items()
logger.debug("Downloading {0} ({1} media) [{2}]...".format(repr(self), len(reel_items), self.typename))
logger.debug("Dest: " + dest)
for i, item in enumerate(reel_items):
if on_item_start is not None:
on_item_start(self, i, item)
try:
filename = to_datetime(item.created_time)
file_path = item.download(dest, filename, write=write, verify=verify)
if file_path is not None:
set_mtime(file_path, item.created_time)
if on_item_finish is not None:
on_item_finish(self, i, item, file_path)
except Exception as e:
# NOTE: if the Story has multiple reel items to download, the occurrence of exception will NOT interrupt
# the whole download of the story, unless user reraises the exception in 'on_item_error()'.
exc_type, exc_value, tb = sys.exc_info()
logger.error("{}: {}".format(exc_type.__name__, exc_value))
logger.debug("".join(traceback.format_tb(tb)))
if on_item_error is not None:
on_item_error(self, i, item, e)
continue
class UserStory(Story):
"""Represents a Story entity that belongs to a Profile."""
info_vars = ("typename", "id", "latest_reel_media", "reel_count", "owner_username", "owner_id", "owner_profile_picture_url", "seen_time")
def __init__(self, data: dict):
super().__init__(data)
def __repr__(self) -> str:
return "UserStory(owner_username='{0}', typename='{1}')".format(self.owner_username, self.typename)
@property
def latest_reel_media(self) -> int:
"""Returns the created time of the latest reel media (timestamp) of this story."""
return int(self.data["latest_reel_media"])
@property
def owner_username(self) -> str:
"""Returns the owner's username of this story."""
return self.data["owner"]["username"]
@property
def owner_id(self) -> str:
"""Returns the owner's ID of this story."""
return self.data["owner"]["id"]
@property
def owner_profile_picture_url(self) -> str:
"""Returns the URL of the owner's profile picture of this story."""
return self.data["owner"]["profile_pic_url"]
def owner_profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' object of the owner's profile picture of this story."""
return MediaItem("GraphImage", self.data["owner"]["profile_pic_url"], 150, 150)
@property
def seen_time(self) -> Optional[int]:
"""Returns the seen time (timestamp) of this story if it has been seen, None otherwise."""
if self.data["seen"]:
return int(self.data["seen"])
class HashtagStory(Story):
"""Represents a Story entity that belongs to a Hashtag."""
info_vars = ("typename", "id", "latest_reel_media", "reel_count", "tagname")
def __init__(self, data: dict):
super().__init__(data)
def __repr__(self) -> str:
return "HashtagStory(tagname='{0}', typename='{1}')".format(self.tagname, self.typename)
@property
def latest_reel_media(self) -> int:
"""Returns the created time of the latest reel media (timestamp) of this story."""
return int(self.data["latest_reel_media"])
@property
def tagname(self) -> str:
"""Returns the hashtag's tag name of this story."""
return self.data["owner"]["name"]
class Highlight(Story):
"""Represents a Highlight entity."""
info_vars = ("typename", "id", "title", "cover_media_thumbnail", "owner_username", "owner_id", "owner_profile_picture_url", "reel_count")
def __init__(self, data: dict):
super().__init__(data)
def __repr__(self) -> str:
return "Highlight(title='{}')".format(self.title)
@property
def title(self) -> str:
"""Returns the title of this highlight."""
return self.data["title"]
@property
def cover_media_thumbnail(self) -> str:
"""Returns the URL of the cover thumbnail of this highlight."""
return self.data["cover_media"]["thumbnail_src"]
@property
def owner_username(self) -> str:
"""Returns the owner's username of this highlight."""
return self.data["owner"]["username"]
@property
def owner_id(self) -> str:
"""Returns the owner's ID of this highlight."""
return self.data["owner"]["id"]
@property
def owner_profile_picture_url(self) -> str:
"""Returns the URL of the owner's profile picture of this highlight."""
return self.data["owner"]["profile_pic_url"]
def owner_profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' object of the owner's profile picture of this highlight."""
return MediaItem("GraphImage", self.data["owner"]["profile_pic_url"], 150, 150)
class Profile(AsDictMixin, DataGetterMixin):
"""Represents a user Profile entity."""
info_vars = ("username", "url", "id", "fullname", "biography", "website", "followers_count", "followings_count",
"mutual_followers_count", "is_verified", "is_private", "profile_picture_url")
@classmethod
def from_id(cls, insta, id: str):
"""Returns a Post instance from user ID.
* This takes one more step to obtain the username of the user.
"""
username = get_username_from_userid(id)
return cls.from_username(insta, username)
@classmethod
def from_username(cls, insta, username: str):
"""Returns a Post instance from username."""
profile = cls(insta, {"username": username})
profile._obtain_full_data()
return profile
def __init__(self, insta, data: dict):
self._insta = insta
self._init_data = data
self._full_data = None
self.username = data["username"]
def _obtain_full_data(self):
if self._full_data is None:
logger.debug("Obtaining full data of Profile(username='{}')".format(self.username))
self._full_data = self._insta._fetch_json_data(PROFILE_URL.format(username=self.username))["user"]
def __repr__(self):
return "Profile(username='{0}', id='{1}')".format(self.username, self.id)
def __eq__(self, other):
return isinstance(other, Profile) and self.username == other.username and self.id == other.id
def __hash__(self) -> int:
return hash(self.id)
@property
def url(self) -> str:
"""Returns the URL of this profile."""
return "https://instagram.com/" + self.username
@property
def id(self) -> str:
"""Returns the ID (user ID) of this profile."""
return self._find_or_get("id")
@property
def fullname(self) -> str:
"""Returns the fullname of this profile."""
return self._find_or_get("full_name")
@property
def biography(self) -> str:
"""Returns the biography of this profile."""
return self._find_or_get("biography")
@property
def website(self) -> Optional[str]:
"""Returns the website of this profile if applicable, None otherwise."""
return self._find_or_get("external_url")
@property
def followers_count(self) -> int:
"""Returns the amount of followers this profile has."""
return self._find_or_get("edge_followed_by")["count"]
@property
def followings_count(self) -> int:
"""Returns the amount of users this profile is following."""
return self._find_or_get("edge_follow")["count"]
@property
def mutual_followers_count(self) -> int:
"""Returns the amount of mutual followers of this profile."""
return self._find_or_get("edge_mutual_followed_by")["count"]
@property
def is_verified(self) -> bool:
"""Returns True if this profile is verified, False otherwise"""
return self._find_or_get("is_verified")
@property
def is_private(self) -> bool:
"""Returns True if this profile is private, False otherwise"""
return self._find_or_get("is_private")
@property
def profile_picture_url(self) -> str:
"""Retunrs the URL of the profile picture of this profile."""
return self._find_or_get("profile_pic_url_hd")
def profile_picture(self) -> MediaItem:
"""Retunrs a 'MediaItem' of the profile picture of this profile."""
return MediaItem("GraphImage", self.profile_picture_url, 320, 320)
def timeline_posts(self) -> PostGroup:
"""Retrieves timeline posts of this profile.
Returns:
A 'PostGroup' object.
"""
self._obtain_full_data()
logger.info("Retrieving timeline posts of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_TIMELINE, variables, "user", "edge_owner_to_timeline_media", self._full_data)
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def saved_posts(self) -> PostGroup:
"""Retrieves saved posts of this profile.
* Requires authentication.
Returns:
A 'PostGroup' object.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
self._obtain_full_data()
logger.info("Retrieving saved posts of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_SAVED, variables, "user", "edge_saved_media", self._full_data)
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def tagged_posts(self) -> PostGroup:
"""Retrieves tagged posts of this profile.
Returns:
A 'PostGroup' object.
"""
logger.info("Retrieving tagged posts of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_TAGGED, variables, "user", "edge_user_to_photos_of_you")
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def igtv_posts(self) -> PostGroup:
"""Retrieves IGTV posts of this profile.
Returns:
A 'PostGroup' object.
"""
self._obtain_full_data()
logger.info("Retrieving IGTV video posts of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_IGTV, variables, "user", "edge_felix_video_timeline", self._full_data)
return Group.of_posts(next(nodes), (IGTV(self._insta, node) for node in nodes))
def followers(self) -> Group:
"""Retrieves followers of this profile.
* Requires authentication.
Returns:
A 'Group' object that yields 'Profile' instances.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving followers of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_FOLLOWERS, variables, "user", "edge_followed_by")
return Group(next(nodes), (Profile(self._insta, node) for node in nodes))
def followings(self) -> Group:
"""Retrieves profiles that this profile is following.
* Requires authentication.
Returns:
A 'Group' object that yields 'Profile' instances.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving followings of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_FOLLOWINGS, variables, "user", "edge_follow")
return Group(next(nodes), (Profile(self._insta, node) for node in nodes))
def highlights(self) -> List[Highlight]:
"""Retrieves highlights of this profile.
* Requires authentication.
Returns:
A list of 'Highlight' objects.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving story highlights of @{0}".format(self.username))
# [1] retrieve all available highlights of this user
variables = {"user_id": self.id, "include_chaining": False, "include_reel": False,
"include_suggested_users": False, "include_logged_out_extras": False, "include_highlight_reels": True}
data = self._insta._graphql_query(QUERYHASH_HIGHLIGHTS, variables)["user"]["edge_highlight_reels"]
nodes = [edge["node"] for edge in data["edges"]]
if not nodes:
logger.warning("No visible highlight is found for this profile.")
return []
# [2] do GraphQL query to get the reel items data of all highlights at once
logger.debug("Fetching json data of highlights of @{} ...".format(self.username))
variables = {"highlight_reel_ids": [str(node["id"]) for node in nodes], "precomposed_overlay": False, "show_story_viewer_list": False}
url = QUERY_URL.format(QUERYHASH_REELITEMS, json.dumps(variables))
data = self._insta._fetch_json_data(url)["reels_media"]
hs = []
for d in data:
for node in nodes:
if node["id"] == d["id"]:
d.update(node)
break
else:
continue
# produce 'Highlight' object
hs.append(Highlight(d))
return hs
def story(self) -> Optional[UserStory]:
"""Retrieves the currently visible story of this profile.
* Requires authentication.
Returns:
A 'UserStory' object if applicable, None otherwise.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving story of @{0}".format(self.username))
variables = {"reel_ids": [self.id], "precomposed_overlay": False, "show_story_viewer_list": False}
data = self._insta._graphql_query(QUERYHASH_REELITEMS, variables)["reels_media"]
if not data:
logger.warning("No visible story is available now for this profile.")
return
return UserStory(data[0])
class Hashtag(DataGetterMixin):
"""Represents a Hashtag entity."""
@classmethod
def from_tagname(cls, insta, tagname: str):
"""Returns a Hashtag instance from tag name."""
hashtag = cls(insta, {"name": tagname})
hashtag._obtain_full_data()
return hashtag
def __init__(self, insta, data: dict):
self._insta = insta
self._init_data = data
self._full_data = None
self.tagname = data["name"]
def _obtain_full_data(self):
if self._full_data is None:
logger.debug("Obtaining full data of Hashtag(tagname='{}')".format(self.tagname))
self._full_data = self._insta._fetch_json_data(HASHTAG_URL.format(tagname=self.tagname))["hashtag"]
def __repr__(self):
return "Hashtag(tagname='{0}')".format(self.tagname)
def __eq__(self, other):
return isinstance(other, Hashtag) and self.tagname == other.tagname and self.id == other.id
def __hash__(self) -> int:
return hash(self.tagname)
@property
def id(self) -> str:
"""Returns the ID of this hashtag."""
return self._find_or_get("id")
@property
def profile_picture_url(self) -> str:
"""Returns the URl of the profile picture of this hashtag."""
return self._find_or_get("profile_pic_url")
def profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' of the profile picture of this hashtag."""
return MediaItem("GraphImage", self.profile_picture_url, 320, 320)
def top_posts(self) -> PostGroup:
"""Retrieves top posts if this hashtag.
* Only 9 posts at most.
Returns:
A 'PostGroup' object.
"""
self._obtain_full_data()
logger.info("Retrieving top posts of #{0}".format(self.tagname))
nodes = self._insta._graphql_query_edges("", {}, "hashtag", "edge_hashtag_to_top_posts", self._full_data)
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def recent_posts(self) -> PostGroup:
"""Retrieves most recent posts if this hashtag.
Returns:
A 'PostGroup' object.
"""
logger.info("Retrieving recent posts of #{0}".format(self.tagname))
variables = {"tag_name": self.tagname}
nodes = self._insta._graphql_query_edges(QUERYHASH_HASHTAG, variables, "hashtag", "edge_hashtag_to_media")
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def story(self) -> Optional[HashtagStory]:
"""Retrieves the current visible Story of this hashtag.
* Requires authentication.
Returns:
A 'HashtagStory' object.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving story of #{0}".format(self.tagname))
variables = {"tag_names": [self.tagname], "precomposed_overlay": False, "show_story_viewer_list": False}
data = self._insta._graphql_query(QUERYHASH_REELITEMS, variables)["reels_media"]
if not data:
logger.warning("No visible story is avaliable now for this hashtag.")
return
return HashtagStory(data[0])
class Explore:
"""Represents the Explore entity in the discover section."""
def __init__(self, insta):
self._insta = insta
def __repr__(self):
return "Explore()"
def posts(self) -> PostGroup:
"""Retrieves posts of explore.
* Requires authentication.
Returns:
A 'PostGroup' object.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving explore posts...")
nodes = self._insta._graphql_query_edges(QUERYHASH_EXPLORE, {}, "user", "edge_web_discover_media")
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
| StarcoderdataPython |
End of preview. Expand
in Dataset Viewer.
- Downloads last month
- 43