id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
3377378 | <gh_stars>1-10
# AUTOGENERATED! DO NOT EDIT! File to edit: 01_image_classification.ipynb (unless otherwise specified).
__all__ = ['data']
# Cell
from fastai.vision.all import *
# Cell
data = DataBlock(blocks = (ImageBlock, CategoryBlock),
get_items = get_image_files,
get_y = parent_label,
splitter = GrandparentSplitter(valid_name='val'),
item_tfms = RandomResizedCrop(128, min_scale=0.35),
batch_tfms = Normalize.from_stats(*imagenet_stats)
) | StarcoderdataPython |
1625880 | <reponame>Heyjoy/P3<filename>datafield.py<gh_stars>0
import math
# models hyperParmeter
TrainTestSplitSize = 0.2
N_EPOCH = 20
Verbose = 1
BatchSize = 64
zeroSteeringCount = 3
#GaussianNoiseStddev = 1
# Imgae Process tuning paramter
IMGPath = '../data/IMG/'
CSVPath = '../data/driving_log.csv'
ImgShape = [160, 320, 3]
ResizedShape = [64, 64, 3]
cropBottom = math.floor(ImgShape[0]/6) #
cropTop = cropBottom * 2
AngleOffset = 0.25 # offset for left and right camera
## Image flip random
FilpProb = 0.5
## Brightness random
RandomBrightOffset = 0.25
## translate Image method parameter
x_trRange = int(ImgShape[1]/10) # 320 = 6.4*50
y_trRange = int(ImgShape[0]/10) # 160 = 6.4 *25
trShiftAngle = 0.4
| StarcoderdataPython |
45432 | <reponame>pep7/GorillaBot
# Copyright (c) 2013-2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import message
from plugins.util import admin, command, humanize_list
from queue import Empty
@command("admincommandlist")
def admincommands(m):
"""Provide a list of admin-only commands."""
#- !admincommands
#-
#- ```irc
#- < GorillaWarfare> !admincommands
#- < GorillaBot> My available admin commands are join, part, quit, setcommand,
#- and unset. See http://molly.github.io/GorillaBot for documentation.
#- ```
#-
#- Say the available admin-only commands. This does not display command aliases.
commands = [key for key in m.bot.admin_commands.keys() if not m.bot.admin_commands[key][1]]
commands.sort()
if len(commands) == 0:
m.bot.private_message(m.location, "I have no available admin commands. See "
"http://molly.github.io/GorillaBot for documentation.")
elif len(commands) == 1:
m.bot.private_message(m.location, "My available admin command is {0}. See "
"http://molly.github.io/GorillaBot for "
"documentation.".format(commands[0]))
else:
m.bot.private_message(m.location, "My available admin commands are {0}. See "
"http://molly.github.io/GorillaBot for "
"documentation.".format(
humanize_list(commands)))
@command("admins", "botops", "oplist")
def adminlist(m):
"""Provide a list of current bot admins."""
#- !adminlist
#-
#- ```irc
#- < GorillaWarfare> !adminlist
#- < GorillaBot> My bot admin is GorillaWarfare.
#- ```
#-
#- Say the current bot operators.
ops = list(m.bot.configuration["botops"].keys())
if ops:
if len(ops) == 1:
m.bot.private_message(m.location, "My bot admin is " + ops[0] + ".")
else:
m.bot.private_message(m.location, "My bot admins are " + humanize_list(ops))
else:
nick = m.bot.configuration["nick"]
m.bot.private_message(m.location, "{0} has no master. {0} is a free bot.".format(nick))
@command("pingall", "highlightall")
def attention(m):
"""Ping everyone currently joined to the channel. Be careful to only turn this on if you trust
those in the channel not to abuse it."""
#- !attention
#-
#- ```irc
#- < GorillaWarfare> !attention
#- < GorillaBot> user1, user2, user3: GorillaWarfare wants your attention
#- ```
#-
#- Ping all of the users in the channel.
#-
#- #### Settings
#- `on` - Anyone can use this command. Be sure you trust everyone in the channel not to abuse
#- it.
#- `admin` - Only bot admins can use this command.
logger = logging.getLogger("GorillaBot")
attention_setting = m.bot.get_setting('attention', m.location)
if attention_setting == 'admin':
if not m.bot.is_admin(m.sender):
m.bot.private_message(m.location, "Please ask a bot operator to perform this action for"
" you.")
return
elif attention_setting != 'on':
m.bot.private_message(m.location, "Command not enabled.")
return
# Okay, we're authorized to do this.
m.bot.response_lock.acquire()
ignored_messages = []
m.bot.send("NAMES {}".format(m.location))
while True:
try:
msg = m.bot.message_q.get(True, 120)
except Empty:
logger.error("No response from server when trying to get nicks. Shutting down.")
m.bot.shutdown.set()
return
if isinstance(msg, message.Numeric):
if msg.number == '353':
nicks = msg.body.split()
nicks = nicks[2:]
nicks[0] = nicks[0][1:]
sender = m.bot.parse_hostmask(m.sender)["nick"]
try:
nicks.remove(sender)
nicks.remove(m.bot.configuration["nick"])
except ValueError:
pass
m.bot.private_message(m.location, "{0}: {1} wants your attention"
.format(", ".join(nicks), sender))
break
ignored_messages.append(msg)
for msg in ignored_messages:
m.bot.message_q.put(msg)
m.bot.response_lock.release()
@command("commandlist", "help")
def commands(m):
"""Provide a list of commands available to all users."""
#- !commands
#-
#- ```irc
#- < GorillaWarfare> !commands
#- < GorillaBot> My available commands are admincommands, adminlist, commands, hug,
#- link, spotify, and xkcd. See http://molly.github.io/GorillaBot
#- for documentation.
#- ```
#-
#- Say the available all-user commands. This does not display command aliases.
commands = [key for key in m.bot.commands.keys() if not m.bot.commands[key][1]]
commands.sort()
if len(commands) == 0:
m.bot.private_message(m.location, "I have no available commands. See "
"http://molly.github.io/GorillaBot for documentation.")
elif len(commands) == 1:
m.bot.private_message(m.location, "My available command is {0}. See "
"http://molly.github.io/GorillaBot for "
"documentation.".format(commands[0]))
else:
m.bot.private_message(m.location, "My available commands are {0}. See "
"http://molly.github.io/GorillaBot for "
"documentation.".format(
humanize_list(commands))) | StarcoderdataPython |
1649956 | <reponame>jdmartinez36/azure-batch-cli-extensions
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
# These properties are reserved for application template use
# and may not be used on jobs using an application template
PROPS_RESERVED_FOR_TEMPLATES = {
'jobManagerTask',
'jobPreparationTask',
'jobReleaseTask',
#'commonEnvironmentSettings',
'usesTaskDependencies',
'onAllTasksComplete',
'onTaskFailure',
'taskFactory'}
PROPS_PERMITTED_ON_TEMPLATES = PROPS_RESERVED_FOR_TEMPLATES.union({
'templateMetadata',
'parameters',
'metadata'})
ATTRS_RESERVED_FOR_TEMPLATES = {
'job_manager_task',
'job_preparation_task',
'job_release_task',
#'common_environment_settings',
'uses_task_dependencies',
'on_all_tasks_complete',
'on_task_failure',
'task_factory'}
# These properties are reserved for job use
# and may not be used on an application template
PROPS_RESERVED_FOR_JOBS = {
'id',
'displayName',
'priority',
'constraints',
'poolInfo',
'applicationTemplateInfo'}
# Properties on a repeatTask object that should be
# applied to each expanded task.
PROPS_ON_REPEAT_TASK = {
'displayName',
'containerSettings',
'resourceFiles',
'environmentSettings',
'constraints',
'userIdentity',
'exitConditions',
'clientExtensions',
'outputFiles',
'packageReferences'}
PROPS_ON_COLLECTION_TASK = PROPS_ON_REPEAT_TASK.union({
'multiInstanceSettings',
'dependsOn'})
# Dates used as cutoffs for different SDK extension versions
class KnownTemplateVersion(Enum):
Dec2018 = "2018-12-01"
| StarcoderdataPython |
128922 | <reponame>Tiago-S-Ribeiro/Python-Pro-Bootcamp<filename>100_days_of_code/Intermediate+/day_37/main.py
import requests
import datetime as dt
from data import USER, TOKEN, G_ID, PXL_ENDPOINT, NEW_PIXEL_ENDPOINT, GRAPH_ENDPOINT
headers = {
"X-USER-TOKEN": TOKEN
}
today = dt.datetime.now()
#------------------- Create a new user using POST -------------------
new_graph_params = {
"token": TOKEN,
"username": USER,
"agreeTermsOfService": "yes",
"notMinor": "yes"
}
response = requests.post(url=PXL_ENDPOINT, json=new_graph_params)
print(response.text)
#---------------- Create a new Pixela graph using POST ----------------
graph_config = {
"id": G_ID,
"name": "Reading Graph",
"unit": "pages",
"type": "int",
"color": "momiji"
}
response = requests.post(url=GRAPH_ENDPOINT, json=graph_config, headers=headers)
print(response.text)
#-------------------- Create a new pixel using POST --------------------
pixel_params = {
"date": today.strftime("%Y%m%d"),
"quantity": input("How many pages did you read today? ")
}
response = requests.post(url=NEW_PIXEL_ENDPOINT, json=pixel_params, headers=headers)
print(response.text)
#----------------------- Update a pixel using PUT -----------------------
put = {
"quantity": "14"
}
date = dt.datetime(year=2022, month=1, day=10)
update_endpoint = f"{NEW_PIXEL_ENDPOINT}/{date.strftime('%Y%m%d')}"
response = requests.put(url=update_endpoint, json=put, headers=headers)
print(response.text)
#---------------------------- Delete a pixel ----------------------------
response = requests.delete(url=update_endpoint, headers=headers)
print(response.text) | StarcoderdataPython |
3251212 | from .directory import DirectoryClient
from .organization import OrganizationClient
from .service import ServiceClient
| StarcoderdataPython |
1768653 | import discord
import secrets
import itertools
import random
import re
import os
from keepAlive import keep_alive
import requests
import json
client = discord.Client()
prefix = '&'
diceTypes = [4,6,8,10,12,20,100]
dnd5e_races = ["DragonBorn", "Dwarf", "Elf", "Gnome", "Half-Elf", "Halfing", "Half-Orc", "Human", "Tiefling", "Orc of Exandria", "Leonin", "Satyr", "Aarakocra", "Genasi", "Goliath", "Aasimar", "Bugbear", "Firbolg", "Goblin", "Hobgoblin", "Kenku", "Kobold", "Lizardfolk", "Orc", "Tabaxi", "Triton", "Yuan-ti Pureblood", "Feral Tiefling", "Tortle", "Changeling", "Kalashtar", "Orc of Eberron", "Shifter", "Warforged", "Gith", "Centaur", "Loxodon", "Minotaur", "Simic Hybrid", "Vedalken", "Verdan", "Locatah", "Grung"]
dnd5e_races_phb = ["DragonBorn", "Dwarf", "Elf", "Gnome", "Half-Elf", "Halfing", "Half-Orc", "Human", "Tiefling"]
dnd5e_classes = ["Barbarian", "Bard", "Cleric", "Druid", "Fighter", "Monk", "Paladin", "Ranger", "Rogue", "Sorcerer", "Walorck", "Wizard", "Artificer", "Blood Hunter"]
dnd5e_classes_phb = ["Barbarian", "Bard", "Cleric", "Druid", "Fighter", "Monk", "Paladin", "Ranger", "Rogue", "Sorcerer", "Walorck", "Wizard"]
def searchCondition(query):
response = requests.get('https://www.dnd5eapi.co/api/conditions/'+query)
json_data = json.loads(response.text)
name = json_data['name']
desc = ''
for i in json_data['desc']:
desc = desc + i+"\n"
return (name,desc)
def conditionList():
response = requests.get('https://www.dnd5eapi.co/api/conditions')
json_data = json.loads(response.text)
cond = ''
for i in json_data['results']:
cond = cond + i['index']+", "
return cond[:-2]
def searchAbility(query):
response = requests.get('https://www.dnd5eapi.co/api/ability-scores/'+query)
json_data = json.loads(response.text)
name = json_data['name']
desc = ''
for i in json_data['desc']:
desc = desc + i+"\n"
skills = ''
for i in json_data['skills']:
skills = skills + i['name'] + ", "
return (name,desc,skills[:-2])
def abilityList():
response = requests.get('https://www.dnd5eapi.co/api/ability-scores')
json_data = json.loads(response.text)
cond = ''
for i in json_data['results']:
cond = cond + i['index']+", "
return cond[:-2]
def skillList():
response = requests.get('https://www.dnd5eapi.co/api/skills')
json_data = json.loads(response.text)
cond = ''
for i in json_data['results']:
cond = cond + i['index']+", "
return cond[:-2]
def searchSkill(query):
response = requests.get('https://www.dnd5eapi.co/api/skills/'+query)
json_data = json.loads(response.text)
name = json_data['name']
desc = ''
for i in json_data['desc']:
desc = desc + i+"\n"
abi = json_data['ability_score']['index']
return (name,desc,abi)
def damageList():
response = requests.get('https://www.dnd5eapi.co/api/damage-types')
json_data = json.loads(response.text)
damage = ''
for i in json_data['results']:
damage = damage + i['index']+", "
return damage[:-2]
def searchDamage(query):
response = requests.get('https://www.dnd5eapi.co/api/damage-types/'+query)
json_data = json.loads(response.text)
name = json_data['name']
desc = ''
for i in json_data['desc']:
desc = desc + i+"\n"
return (name,desc)
def helpList():
string = '**Praise Asmodeus**'+'\n'+'Bot prefix: '+ prefix + '\n' + 'Rolling Dice: &[#dice]d[Type], ex: &8d6' + '\n' + 'Random Race(w/Expansions): &randrace' + '\n' + 'Random Race(PHB): &randracephb'+ '\n' + 'Random Class(w/Expansions): &randclass' + '\n' + 'Random Class(PHB): &randclassphb' + '\n' + 'Random Ability Scores: &randas'+ '\n' + 'Roll d20 with advantage: &adv' + '\n' + 'Roll d20 with disadvantage: &ddv' + '\n' + 'Roll 1d20: &r' + '\n' + 'Generate Random Character(w/Expansions): &randchar' + '\n' + 'Generate Random Character(PHB): &randcharphb' + '\n' + 'Ability Scores List: &abi' + '\n' + 'Ability Scores Descriptions: &[ability], ex:&dex' + '\n' + 'Conditions List: &cond' + '\n' + 'Conditions Description: &[condition], ex: &exhaustion' + '\n' + 'Skills List: &skills' + '\n' + 'Skills Description: &[skill], ex:&animal-handling' + '\n' + 'Damage Types: &damage' + '\n' + 'Damage Types Description: &[type], ex: &thunder'
return string
def diceRoll(message):
split = re.split('&|d',message)
number = int(split[1])
dice = int(split[2])
string = ''
result = 0
if dice in diceTypes:
if number == 1:
rand = random.randrange(1, dice+1)
result = rand
string = string + str(rand)
else:
for i in itertools.repeat(None, number):
rand = random.randrange(1, dice+1)
result = result + rand
string = string + str(rand) + ', '
else:
string = 'Invalid'
result = dice
return (string[:-2],result)
def randAS():
string = ''
ability = 0
total = 0
for i in itertools.repeat(None, 6):
one = random.randrange(1, 7)
two = random.randrange(1, 7)
three = random.randrange(1, 7)
four = random.randrange(1, 7)
list = [one, two, three, four]
list2 = '('
lowest = min(list)
ability = sum(list) - lowest
total = total + ability
counter = 0
for i in list:
counter = counter + 1
if i != lowest and counter == 4:
list2 = list2 + ' '+ str(i) + ' )'
if i != lowest and counter != 4:
list2 = list2 + ' '+str(i) + ' ,'
if i == lowest and counter == 4:
list2 = list2 + ' '+'~~'+str(i)+'~~' + ' )'
lowest = 0
if i == lowest and counter != 4:
list2 = list2 + ' '+'~~'+str(i)+'~~' + ' ,'
lowest = 0
string = string + list2 + ' = '+'**'+str(ability)+'**'+ "\n"
return string + 'Total: ' + '**'+str(total)+'**'
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
if message.author == client.user:
return
if re.fullmatch(prefix+r'\d*d\d*',message.content):
(string,result) = diceRoll(message.content)
if string == 'Invalid':
await message.channel.send(message.author.mention +"\n"+'Invalid dice format: d'+str(result))
else:
await message.channel.send( message.author.mention +"\n"+ '**Rolls:** '+ string +"\n"+ '**Total:** '+ str(result) )
if re.fullmatch(prefix+r'randrace',message.content):
racechoice = secrets.choice(dnd5e_races)
await message.channel.send(message.author.mention +"\n"+racechoice)
if re.fullmatch(prefix+r'randracephb',message.content):
classchoice = secrets.choice(dnd5e_races_phb)
await message.channel.send(message.author.mention +"\n"+classchoice)
if re.fullmatch(prefix+r'randclass',message.content):
racechoice = secrets.choice(dnd5e_classes)
await message.channel.send(message.author.mention +"\n"+racechoice)
if re.fullmatch(prefix+r'randclassphb',message.content):
classchoice = secrets.choice(dnd5e_classes_phb)
await message.channel.send(message.author.mention +"\n"+classchoice)
if re.fullmatch(prefix+r'randas',message.content):
await message.channel.send(message.author.mention +"\n"+randAS())
if re.fullmatch(prefix+r'adv',message.content):
rand = random.randrange(1, 21)
rand2 = random.randrange(1, 21)
if rand > rand2:
rand = '**'+str(rand)+'**'
rand2 = str(rand2)
else:
rand = str(rand)
rand2 = '**'+str(rand2)+'**'
await message.channel.send(message.author.mention +"\n"+'**Advantage Rolls:** '+ rand+ ', ' + rand2 )
if re.fullmatch(prefix+r'ddv',message.content):
rand = random.randrange(1, 21)
rand2 = random.randrange(1, 21)
if rand < rand2:
rand = '**'+str(rand)+'**'
rand2 = str(rand2)
else:
rand = str(rand)
rand2 = '**'+str(rand2)+'**'
await message.channel.send(message.author.mention +"\n"+'**Disadvantage Rolls:** '+ rand+ ', ' + rand2 )
if re.fullmatch(prefix+r'r',message.content):
rand = random.randrange(1, 21)
await message.channel.send(message.author.mention +"\n"+'**Roll:** ' + str(rand))
if re.fullmatch(prefix+r'randchar',message.content):
racechoice = secrets.choice(dnd5e_races)
classchoice = secrets.choice(dnd5e_classes)
await message.channel.send(message.author.mention +"\n" +'**Race:** '+"\n"+racechoice+"\n"+'**Class:** '+classchoice + "\n" +'**Ability Scores:** ' +"\n" +randAS())
if re.fullmatch(prefix+r'randcharphb',message.content):
racechoice = secrets.choice(dnd5e_races_phb)
classchoice = secrets.choice(dnd5e_classes_phb)
await message.channel.send(message.author.mention +"\n" +'**Race:** '+"\n"+racechoice+"\n"+'**Class:** '+classchoice + "\n" +'**Ability Scores:** ' +"\n" +randAS())
if re.fullmatch(r'&blinded|&charmed|&deafened|&exhaustion|&frightened|&grappled|&incapacitated|&invisible|¶lyzed|&petrified|&poisoned|&restrained|&stunned|&unconscious',message.content):
(name,desc)=searchCondition(message.content[1:])
await message.channel.send(message.author.mention +"\n" +'**Name:** '+name+"\n"+'**Desc:** '+desc)
if re.fullmatch(r'&str|&con|&dex|&wis|&cha|&int',message.content):
(name,desc,skills)=searchAbility(message.content[1:])
await message.channel.send(message.author.mention +"\n" +'**Name:** '+name+"\n"+'**Desc:** '+desc+"\n"+'**Skills:** '+skills)
if re.fullmatch(prefix+r'cond',message.content):
cond = conditionList()
await message.channel.send(message.author.mention +"\n" +'**Conditions:** '+cond)
if re.fullmatch(prefix+r'abi',message.content):
abi = abilityList()
await message.channel.send(message.author.mention +"\n" +'**Ability Scores:** '+abi)
if re.fullmatch(prefix+r'skills',message.content):
skill = skillList()
await message.channel.send(message.author.mention +"\n" +'**Skills:** '+skill)
if re.fullmatch(r'&acrobatics|&animal-handling|&arcana|&athletics|&deception|&history|&insight|&intimidation|&investigation|&medicine|&nature|&perception|&performance|&persuasion|&religion|&sleight-of-hand|&stealth|&survival',message.content):
(name,desc,abi)=searchSkill(message.content[1:])
await message.channel.send(message.author.mention +"\n" +'**Name:** '+name+"\n"+'**Desc:** '+desc+"\n"+'**Ability Mod:** '+abi)
if re.fullmatch(prefix+r'damage',message.content):
damage = damageList()
await message.channel.send(message.author.mention +"\n" +'**Damage Types:** '+damage)
if re.fullmatch(r'&acid|&bludgeoning|&cold|&fire|&force|&lightning|&necrotic|&piercing|&poison|&psychic|&radiant|&slashing|&thunder',message.content):
(name,desc)=searchDamage(message.content[1:])
await message.channel.send(message.author.mention +"\n" +'**Damage Type:** '+name+"\n"+'**Desc:** '+desc)
if re.fullmatch(prefix+r'help',message.content):
await message.channel.send(message.author.mention +"\n" + helpList())
keep_alive()
client.run(os.getenv('TOKEN'))
| StarcoderdataPython |
152736 | '''
defines all the sources necessary for building cgui.pyd
'''
import os
BUILD_BUDDYLIST_GUI = False
thisdir = os.path.dirname(os.path.abspath(__file__))
sources = '''
src/ctextutil.cpp
src/SplitImage4.cpp
src/ScrollWindow.cpp
src/skinvlist.cpp
src/pyutils.cpp
src/cwindowfx.cpp
src/SkinSplitter.cpp
src/alphaborder.cpp
src/skin/skinobjects.cpp
src/skin/SkinBitmap.cpp
src/LoginWindow.cpp
src/DragMixin.cpp
src/MiscUI.cpp
src/SelectionEvent.cpp
src/InputBox.cpp
src/ExpandoTextCtrl.cpp
src/ExpandEvent.cpp
src/GettextPython.cpp
'''.split()
include_dirs = '''
src
src/skin
src/Animation
src/Animation/Platform
src/Animation/Platform/wx
src/BuddyList
'''.split()
boost_env_dir = os.getenv('BOOST_DIR')
if boost_env_dir is not None:
include_dirs.append(boost_env_dir)
# rtf
rtf_files = \
'''
DebugUtil.cpp
HTMLEncoder.cpp
MSIMEncoder.cpp
MSNEncoder.cpp
RTFToX.cpp
StyleDesc.cpp
StringUtil.cpp
XHTMLEncoder.cpp
YahooEncoder.cpp
'''.split()
sources.extend('src/RTFToX/%s' % s for s in rtf_files)
include_dirs.append('src/RTFToX')
import sys
if sys.platform == 'win32':
sources.extend('''
src/alphaborder_win.cpp
src/win/PlatformMessagesWin.cpp
src/win/WindowSnapperWin.cpp
src/WindowSnapper.cpp
src/win/FullscreenWin.cpp
src/win/WinUtils.cpp
src/win/WinTaskbar.cpp
src/win/WinJumpList.cpp
src/win/RichEditUtils.cpp
src/TransparentFrame.cpp
src/Statistics.cpp
src/IconUtils.cpp
'''.split())
include_dirs.extend([
'src/win',
])
if BUILD_BUDDYLIST_GUI:
sources.extend('''
src/TreeList.cpp
src/BuddyList.cpp
'''.split())
| StarcoderdataPython |
1691462 | from http.server import HTTPServer, SimpleHTTPRequestHandler
class MyHTTPRequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.end_headers()
self.wfile.write(b"hi there")
if __name__ == '__main__':
server_address = ('127.0.0.1', 8000)
httpd = HTTPServer(server_address, MyHTTPRequestHandler)
httpd.serve_forever()
| StarcoderdataPython |
1684312 | #!/usr/bin/env python
"""
TAP protocol client library.
Copyright (c) 2010 <NAME> <<EMAIL>>
"""
import socket
import string
import random
import struct
import asyncore
import mc_bin_server
import mc_bin_client
from memcacheConstants import REQ_MAGIC_BYTE, RES_MAGIC_BYTE
from memcacheConstants import REQ_PKT_FMT, RES_PKT_FMT, MIN_RECV_PACKET
from memcacheConstants import SET_PKT_FMT, DEL_PKT_FMT, INCRDECR_RES_FMT
import memcacheConstants
class TapConnection(mc_bin_server.MemcachedBinaryChannel):
def __init__(self, server, port, callback, clientId=None, opts={}, user=None, pswd=None):
mc_bin_server.MemcachedBinaryChannel.__init__(self, None, None,
self._createTapCall(clientId,
opts))
self.server = server
self.port = port
self.callback = callback
self.identifier = (server, port)
self.user = user
self.pswd = pswd
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((server, port))
def create_socket(self, family, type):
if not self.user:
mc_bin_server.MemcachedBinaryChannel.create_socket(self, family, type)
return
self.family_and_type = family, type
self.mc = mc_bin_client.MemcachedClient(self.server, self.port)
self.mc.sasl_auth_plain(self.user, self.pswd or "")
sock = self.mc.s
sock.setblocking(0)
self.set_socket(sock)
def _createTapCall(self, key=None, opts={}):
# Client identifier
if not key:
key = "".join(random.sample(string.letters, 16))
dtype=0
opaque=0
cas=0
extraHeader, val = self._encodeOpts(opts)
msg=struct.pack(REQ_PKT_FMT, REQ_MAGIC_BYTE,
memcacheConstants.CMD_TAP_CONNECT,
len(key), len(extraHeader), dtype, 0,
len(key) + len(extraHeader) + len(val),
opaque, cas)
return msg + extraHeader + key + val
def _encodeOpts(self, opts):
header = 0
val = []
for op in sorted(opts.keys()):
header |= op
if op in memcacheConstants.TAP_FLAG_TYPES:
val.append(struct.pack(memcacheConstants.TAP_FLAG_TYPES[op],
opts[op]))
elif op == memcacheConstants.TAP_FLAG_LIST_VBUCKETS:
val.append(self._encodeVBucketList(opts[op]))
else:
val.append(opts[op])
return struct.pack(">I", header), ''.join(val)
def _encodeVBucketList(self, vbl):
l = list(vbl) # in case it's a generator
vals = [struct.pack("!H", len(l))]
for v in vbl:
vals.append(struct.pack("!H", v))
return ''.join(vals)
def processCommand(self, cmd, klen, vb, extralen, cas, data):
extra = data[0:extralen]
key = data[extralen:(extralen+klen)]
val = data[(extralen+klen):]
return self.callback(self.identifier, cmd, extra, key, vb, val, cas)
def handle_connect(self):
pass
def handle_close(self):
self.close()
class TapClient(object):
def __init__(self, servers, callback, opts={}, user=None, pswd=None):
for t in servers:
tc = TapConnection(t.host, t.port, callback, t.id, opts, user, pswd)
class TapDescriptor(object):
port = 11211
id = None
def __init__(self, s):
self.host = s
if ':' in s:
self.host, self.port = s.split(':', 1)
self.port = int(self.port)
if '@' in self.host:
self.id, self.host = self.host.split('@', 1)
def __repr__(self):
return "<TapDescriptor %s@%s:%d>" % (self.id or "(anon)", self.host, self.port)
| StarcoderdataPython |
3281548 | <gh_stars>1000+
#!/usr/bin/python3
"""
[[https://bluemaestro.com/products/product-details/bluetooth-environmental-monitor-and-logger][Bluemaestro]] temperature/humidity/pressure monitor
"""
# todo most of it belongs to DAL... but considering so few people use it I didn't bother for now
from datetime import datetime, timedelta
from pathlib import Path
import re
import sqlite3
from typing import Iterable, Sequence, Set, Optional
from my.core import get_files, LazyLogger, dataclass, Res
from my.core.sqlite import sqlite_connect_immutable
from my.config import bluemaestro as config
# todo control level via env variable?
# i.e. HPI_LOGGING_MY_BLUEMAESTRO_LEVEL=debug
logger = LazyLogger(__name__, level='debug')
def inputs() -> Sequence[Path]:
return get_files(config.export_path)
Celsius = float
Percent = float
mBar = float
@dataclass
class Measurement:
dt: datetime # todo aware/naive
temp : Celsius
humidity: Percent
pressure: mBar
dewpoint: Celsius
# fixme: later, rely on the timezone provider
# NOTE: the timezone should be set with respect to the export date!!!
import pytz # type: ignore
tz = pytz.timezone('Europe/London')
# TODO when I change tz, check the diff
def is_bad_table(name: str) -> bool:
# todo hmm would be nice to have a hook that can patch any module up to
delegate = getattr(config, 'is_bad_table', None)
return False if delegate is None else delegate(name)
from my.core.cachew import cache_dir
from my.core.common import mcachew
@mcachew(depends_on=lambda: inputs(), cache_path=cache_dir('bluemaestro'))
def measurements() -> Iterable[Res[Measurement]]:
# todo ideally this would be via arguments... but needs to be lazy
dbs = inputs()
last: Optional[datetime] = None
# tables are immutable, so can save on processing..
processed_tables: Set[str] = set()
for f in dbs:
logger.debug('processing %s', f)
tot = 0
new = 0
# todo assert increasing timestamp?
with sqlite_connect_immutable(f) as db:
db_dt: Optional[datetime] = None
try:
datas = db.execute(f'SELECT "{f.name}" as name, Time, Temperature, Humidity, Pressure, Dewpoint FROM data ORDER BY log_index')
oldfmt = True
db_dts = list(db.execute('SELECT last_download FROM info'))[0][0]
if db_dts == 'N/A':
# ??? happens for 20180923-20180928
continue
if db_dts.endswith(':'):
db_dts += '00' # wtf.. happens on some day
db_dt = tz.localize(datetime.strptime(db_dts, '%Y-%m-%d %H:%M:%S'))
except sqlite3.OperationalError:
# Right, this looks really bad.
# The device doesn't have internal time & what it does is:
# 1. every X seconds, record a datapoint, store it in the internal memory
# 2. on sync, take the phone's datetime ('now') and then ASSIGN the timestamps to the collected data
# as now, now - X, now - 2X, etc
#
# that basically means that for example, hourly timestamps are completely useless? because their error is about 1h
# yep, confirmed on some historic exports. seriously, what the fuck???
#
# The device _does_ have an internal clock, but it's basically set to 0 every time you update settings
# So, e.g. if, say, at 17:15 you set the interval to 3600, the 'real' timestamps would be
# 17:15, 18:15, 19:15, etc
# But depending on when you export, you might get
# 17:35, 18:35, 19:35; or 17:55, 18:55, 19:55, etc
# basically all you guaranteed is that the 'correct' interval is within the frequency
# it doesn't seem to keep the reference time in the database
#
# UPD: fucking hell, so you can set the reference date in the settings (calcReferenceUnix field in meta db)
# but it's not set by default.
log_tables = [c[0] for c in db.execute('SELECT name FROM sqlite_sequence WHERE name LIKE "%_log"')]
log_tables = [t for t in log_tables if t not in processed_tables]
processed_tables |= set(log_tables)
# todo use later?
frequencies = [list(db.execute(f'SELECT interval from {t.replace("_log", "_meta")}'))[0][0] for t in log_tables]
# todo could just filter out the older datapoints?? dunno.
# eh. a bit horrible, but seems the easiest way to do it?
# note: for some reason everything in the new table multiplied by 10
query = ' UNION '.join(
f'SELECT "{t}" AS name, unix, tempReadings / 10.0, humiReadings / 10.0, pressReadings / 10.0, dewpReadings / 10.0 FROM {t}'
for t in log_tables
)
if len(log_tables) > 0: # ugh. otherwise end up with syntax error..
query = f'SELECT * FROM ({query}) ORDER BY name, unix'
datas = db.execute(query)
oldfmt = False
db_dt = None
for i, (name, tsc, temp, hum, pres, dewp) in enumerate(datas):
if is_bad_table(name):
continue
# note: bluemaestro keeps local datetime
if oldfmt:
tss = tsc.replace('Juli', 'Jul').replace('Aug.', 'Aug')
dt = datetime.strptime(tss, '%Y-%b-%d %H:%M')
dt = tz.localize(dt)
assert db_dt is not None
else:
# todo cache?
m = re.search(r'_(\d+)_', name)
assert m is not None
export_ts = int(m.group(1))
db_dt = datetime.fromtimestamp(export_ts / 1000, tz=tz)
dt = datetime.fromtimestamp(tsc / 1000, tz=tz)
## sanity checks (todo make defensive/configurable?)
# not sure how that happens.. but basically they'd better be excluded
lower = timedelta(days=6000 / 24) # ugh some time ago I only did it once in an hour.. in theory can detect from meta?
upper = timedelta(days=10) # kinda arbitrary
if not (db_dt - lower < dt < db_dt + timedelta(days=10)):
# todo could be more defenive??
yield RuntimeError('timestamp too far out', f, name, db_dt, dt)
continue
assert -60 <= temp <= 60, (f, dt, temp)
##
tot += 1
if last is not None and last >= dt:
continue
# todo for performance, pass 'last' to sqlite instead?
last = dt
new += 1
p = Measurement(
dt=dt,
temp=temp,
pressure=pres,
humidity=hum,
dewpoint=dewp,
)
yield p
logger.debug('%s: new %d/%d', f, new, tot)
# logger.info('total items: %d', len(merged))
# for k, v in merged.items():
# # TODO shit. quite a few of them have varying values... how is that freaking possible????
# # most of them are within 0.5 degree though... so just ignore?
# if isinstance(v, set) and len(v) > 1:
# print(k, v)
# for k, v in merged.items():
# yield Point(dt=k, temp=v) # meh?
from my.core import stat, Stats
def stats() -> Stats:
return stat(measurements)
from my.core.pandas import DataFrameT, as_dataframe
def dataframe() -> DataFrameT:
"""
%matplotlib gtk
from my.bluemaestro import dataframe
dataframe().plot()
"""
df = as_dataframe(measurements(), schema=Measurement)
# todo not sure how it would handle mixed timezones??
# todo hmm, not sure about setting the index
return df.set_index('dt')
def fill_influxdb() -> None:
from my.core import influxdb
influxdb.fill(measurements(), measurement=__name__)
def check() -> None:
temps = list(measurements())
latest = temps[:-2]
from my.core.error import unwrap
prev = unwrap(latest[-2]).dt
last = unwrap(latest[-1]).dt
# todo stat should expose a dataclass?
# TODO ugh. might need to warn about points past 'now'??
# the default shouldn't allow points in the future...
#
# TODO also needs to be filtered out on processing, should be rejected on the basis of export date?
POINTS_STORED = 6000 # on device?
FREQ_SEC = 60
SECS_STORED = POINTS_STORED * FREQ_SEC
HOURS_STORED = POINTS_STORED / (60 * 60 / FREQ_SEC) # around 4 days
NOW = datetime.now()
assert NOW - last < timedelta(hours=HOURS_STORED / 2), f'old backup! {last}'
assert last - prev < timedelta(minutes=3), f'bad interval! {last - prev}'
single = (last - prev).seconds
| StarcoderdataPython |
68495 | import datetime
from ..dojo_test_case import DojoTestCase
from dojo.models import Test
from dojo.tools.acunetix.parser import AcunetixParser
class TestAcunetixParser(DojoTestCase):
def test_parse_file_with_one_finding(self):
testfile = open("unittests/scans/acunetix/one_finding.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(1, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(352, finding.cwe)
self.assertEqual(datetime.date(2018, 9, 24), finding.date)
self.assertIsNotNone(finding.description)
self.assertGreater(len(finding.description), 0)
self.assertFalse(finding.false_p)
self.assertEqual("Vijay Test Imapact", finding.impact)
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
self.assertEqual(1, len(finding.unsaved_endpoints))
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertEqual('https', endpoint.protocol)
self.assertEqual(443, endpoint.port)
self.assertEqual('vijaytest.com', endpoint.host)
self.assertEqual('some/path', endpoint.path)
def test_parse_file_with_multiple_finding(self):
testfile = open("unittests/scans/acunetix/many_findings.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(4, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("A single machine can take down another machine's web server with minimal bandwidth and side effects on unrelated services and ports.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=1):
finding = findings[1]
self.assertEqual("Possible virtual host found", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(200, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:N/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible sensitive information disclosure.", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsecgames.com', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=2):
finding = findings[2]
self.assertEqual("Unencrypted connection (verified)", finding.title)
self.assertEqual("Low", finding.severity)
self.assertEqual(310, finding.cwe)
self.assertEqual(datetime.date(2020, 2, 27), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertEqual("Possible information disclosure.", finding.impact)
# check that this finding have no references
self.assertIsNone(finding.references)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('www.itsec<EMAIL>', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
def test_parse_file_with_example_com(self):
testfile = open("unittests/scans/acunetix/XML_http_example_co_id_.xml")
parser = AcunetixParser()
findings = parser.get_findings(testfile, Test())
for finding in findings:
for endpoint in finding.unsaved_endpoints:
endpoint.clean()
self.assertEqual(7, len(findings))
with self.subTest(i=0):
finding = findings[0]
self.assertEqual("HTML form without CSRF protection", finding.title)
self.assertEqual("Medium", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertEqual("CVSS:3.0/AV:N/AC:L/PR:N/UI:R/S:U/C:N/I:L/A:N", finding.cvssv3)
self.assertFalse(finding.false_p)
self.assertIn("An attacker could use CSRF to trick a victim into accessing a website hosted by the attacker,", finding.impact)
# aggregated
self.assertEqual(3, finding.nb_occurences)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(3, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('h/search', endpoint.path)
endpoint = finding.unsaved_endpoints[1]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertEqual('m/zmain', endpoint.path)
# check req/resp
self.assertEqual(3, len(finding.unsaved_req_resp))
for req_resp in finding.unsaved_req_resp:
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
with self.subTest(i=6):
finding = findings[6]
self.assertEqual("Content Security Policy (CSP) not implemented", finding.title)
self.assertEqual("Info", finding.severity)
self.assertEqual(datetime.date(2020, 4, 28), finding.date)
self.assertIsNotNone(finding.description)
self.assertFalse(finding.false_p)
self.assertIn("CSP can be used to prevent and/or mitigate attacks that involve content/code injection,", finding.impact)
# check that this finding have references
self.assertIsNotNone(finding.references)
self.assertGreater(len(finding.references), 0)
# check endpoints
self.assertEqual(1, len(finding.unsaved_endpoints))
endpoint = finding.unsaved_endpoints[0]
self.assertIsNone(endpoint.protocol)
self.assertIsNone(endpoint.port)
self.assertEqual('example.co.id', endpoint.host)
self.assertIsNone(endpoint.path)
# check req/resp
self.assertEqual(1, len(finding.unsaved_req_resp))
req_resp = finding.unsaved_req_resp[0]
self.assertIn('req', req_resp)
self.assertIsNotNone(req_resp['req'])
self.assertIsInstance(req_resp['req'], str)
self.assertIn('resp', req_resp)
self.assertIsNotNone(req_resp['resp'])
self.assertIsInstance(req_resp['resp'], str)
| StarcoderdataPython |
1625657 | # This module is used to map the old Python 2 names to the new names used in
# Python 3 for the pickle module. This needed to make pickle streams
# generated with Python 2 loadable by Python 3.
# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import
# lib2to3 and use the mapping defined there, because lib2to3 uses pickle.
# Thus, this could cause the module to be imported recursively.
IMPORT_MAPPING = {
'__builtin__' : 'builtins',
'copy_reg': 'copyreg',
'Queue': 'queue',
'SocketServer': 'socketserver',
'ConfigParser': 'configparser',
'repr': 'reprlib',
'tkFileDialog': 'tkinter.filedialog',
'tkSimpleDialog': 'tkinter.simpledialog',
'tkColorChooser': 'tkinter.colorchooser',
'tkCommonDialog': 'tkinter.commondialog',
'Dialog': 'tkinter.dialog',
'Tkdnd': 'tkinter.dnd',
'tkFont': 'tkinter.font',
'tkMessageBox': 'tkinter.messagebox',
'ScrolledText': 'tkinter.scrolledtext',
'Tkconstants': 'tkinter.constants',
'Tix': 'tkinter.tix',
'ttk': 'tkinter.ttk',
'Tkinter': 'tkinter',
'markupbase': '_markupbase',
'_winreg': 'winreg',
'thread': '_thread',
'dummy_thread': '_dummy_thread',
'dbhash': 'dbm.bsd',
'dumbdbm': 'dbm.dumb',
'dbm': 'dbm.ndbm',
'gdbm': 'dbm.gnu',
'xmlrpclib': 'xmlrpc.client',
'SimpleXMLRPCServer': 'xmlrpc.server',
'httplib': 'http.client',
'htmlentitydefs' : 'html.entities',
'HTMLParser' : 'html.parser',
'Cookie': 'http.cookies',
'cookielib': 'http.cookiejar',
'BaseHTTPServer': 'http.server',
'test.test_support': 'test.support',
'commands': 'subprocess',
'urlparse' : 'urllib.parse',
'robotparser' : 'urllib.robotparser',
'urllib2': 'urllib.request',
'anydbm': 'dbm',
'_abcoll' : 'collections.abc',
}
# This contains rename rules that are easy to handle. We ignore the more
# complex stuff (e.g. mapping the names in the urllib and types modules).
# These rules should be run before import names are fixed.
NAME_MAPPING = {
('__builtin__', 'xrange'): ('builtins', 'range'),
('__builtin__', 'reduce'): ('functools', 'reduce'),
('__builtin__', 'intern'): ('sys', 'intern'),
('__builtin__', 'unichr'): ('builtins', 'chr'),
('__builtin__', 'unicode'): ('builtins', 'str'),
('__builtin__', 'long'): ('builtins', 'int'),
('itertools', 'izip'): ('builtins', 'zip'),
('itertools', 'imap'): ('builtins', 'map'),
('itertools', 'ifilter'): ('builtins', 'filter'),
('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'),
('itertools', 'izip_longest'): ('itertools', 'zip_longest'),
('UserDict', 'IterableUserDict'): ('collections', 'UserDict'),
('UserList', 'UserList'): ('collections', 'UserList'),
('UserString', 'UserString'): ('collections', 'UserString'),
('whichdb', 'whichdb'): ('dbm', 'whichdb'),
('_socket', 'fromfd'): ('socket', 'fromfd'),
('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'),
('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'),
('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'),
('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'),
('urllib', 'getproxies'): ('urllib.request', 'getproxies'),
('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'),
('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'),
('urllib', 'quote'): ('urllib.parse', 'quote'),
('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'),
('urllib', 'unquote'): ('urllib.parse', 'unquote'),
('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'),
('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'),
('urllib', 'urlencode'): ('urllib.parse', 'urlencode'),
('urllib', 'urlopen'): ('urllib.request', 'urlopen'),
('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'),
('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'),
('urllib2', 'URLError'): ('urllib.error', 'URLError'),
}
PYTHON2_EXCEPTIONS = (
"ArithmeticError",
"AssertionError",
"AttributeError",
"BaseException",
"BufferError",
"BytesWarning",
"DeprecationWarning",
"EOFError",
"EnvironmentError",
"Exception",
"FloatingPointError",
"FutureWarning",
"GeneratorExit",
"IOError",
"ImportError",
"ImportWarning",
"IndentationError",
"IndexError",
"KeyError",
"KeyboardInterrupt",
"LookupError",
"MemoryError",
"NameError",
"NotImplementedError",
"OSError",
"OverflowError",
"PendingDeprecationWarning",
"ReferenceError",
"RuntimeError",
"RuntimeWarning",
# StandardError is gone in Python 3, so we map it to Exception
"StopIteration",
"SyntaxError",
"SyntaxWarning",
"SystemError",
"SystemExit",
"TabError",
"TargetScopeError",
"TypeError",
"UnboundLocalError",
"UnicodeDecodeError",
"UnicodeEncodeError",
"UnicodeError",
"UnicodeTranslateError",
"UnicodeWarning",
"UserWarning",
"ValueError",
"Warning",
"ZeroDivisionError",
)
try:
TaskletExit
except NameError:
pass
else:
PYTHON2_EXCEPTIONS += ("TaskletExit",)
try:
WindowsError
except NameError:
pass
else:
PYTHON2_EXCEPTIONS += ("WindowsError",)
for excname in PYTHON2_EXCEPTIONS:
NAME_MAPPING[("exceptions", excname)] = ("builtins", excname)
MULTIPROCESSING_EXCEPTIONS = (
'AuthenticationError',
'BufferTooShort',
'ProcessError',
'TimeoutError',
)
for excname in MULTIPROCESSING_EXCEPTIONS:
NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname)
# Same, but for 3.x to 2.x
REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items())
assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING)
REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items())
assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING)
# Non-mutual mappings.
IMPORT_MAPPING.update({
'cPickle': 'pickle',
'_elementtree': 'xml.etree.ElementTree',
'FileDialog': 'tkinter.filedialog',
'SimpleDialog': 'tkinter.simpledialog',
'DocXMLRPCServer': 'xmlrpc.server',
'SimpleHTTPServer': 'http.server',
'CGIHTTPServer': 'http.server',
# For compatibility with broken pickles saved in old Python 3 versions
'UserDict': 'collections',
'UserList': 'collections',
'UserString': 'collections',
'whichdb': 'dbm',
'StringIO': 'io',
'cStringIO': 'io',
})
REVERSE_IMPORT_MAPPING.update({
'_bz2': 'bz2',
'_dbm': 'dbm',
'_functools': 'functools',
'_gdbm': 'gdbm',
'_pickle': 'pickle',
})
NAME_MAPPING.update({
('__builtin__', 'basestring'): ('builtins', 'str'),
('exceptions', 'StandardError'): ('builtins', 'Exception'),
('UserDict', 'UserDict'): ('collections', 'UserDict'),
('socket', '_socketobject'): ('socket', 'SocketType'),
})
REVERSE_NAME_MAPPING.update({
('_functools', 'reduce'): ('__builtin__', 'reduce'),
('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'),
('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'),
('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'),
('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'),
('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'),
('xmlrpc.server', 'XMLRPCDocGenerator'):
('DocXMLRPCServer', 'XMLRPCDocGenerator'),
('xmlrpc.server', 'DocXMLRPCRequestHandler'):
('DocXMLRPCServer', 'DocXMLRPCRequestHandler'),
('xmlrpc.server', 'DocXMLRPCServer'):
('DocXMLRPCServer', 'DocXMLRPCServer'),
('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'):
('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'),
('http.server', 'SimpleHTTPRequestHandler'):
('SimpleHTTPServer', 'SimpleHTTPRequestHandler'),
('http.server', 'CGIHTTPRequestHandler'):
('CGIHTTPServer', 'CGIHTTPRequestHandler'),
('_socket', 'socket'): ('socket', '_socketobject'),
})
PYTHON3_OSERROR_EXCEPTIONS = (
'BrokenPipeError',
'ChildProcessError',
'ConnectionAbortedError',
'ConnectionError',
'ConnectionRefusedError',
'ConnectionResetError',
'FileExistsError',
'FileNotFoundError',
'InterruptedError',
'IsADirectoryError',
'NotADirectoryError',
'PermissionError',
'ProcessLookupError',
'TimeoutError',
)
for excname in PYTHON3_OSERROR_EXCEPTIONS:
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError')
PYTHON3_IMPORTERROR_EXCEPTIONS = (
'ModuleNotFoundError',
)
for excname in PYTHON3_IMPORTERROR_EXCEPTIONS:
REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError')
| StarcoderdataPython |
4813743 | <reponame>Shea192/pytorch-lightning<gh_stars>1-10
import torch
from pytorch_lightning import Trainer
from tests.base import EvalModelTemplate
import tests.base.utils as tutils
def test_training_epoch_end_metrics_collection(tmpdir):
""" Test that progress bar metrics also get collected at the end of an epoch. """
num_epochs = 3
class CurrentModel(EvalModelTemplate):
def training_step(self, *args, **kwargs):
output = super().training_step(*args, **kwargs)
output['progress_bar'].update({'step_metric': torch.tensor(-1)})
output['progress_bar'].update({'shared_metric': 100})
return output
def training_epoch_end(self, outputs):
epoch = self.current_epoch
# both scalar tensors and Python numbers are accepted
return {
'progress_bar': {
f'epoch_metric_{epoch}': torch.tensor(epoch), # add a new metric key every epoch
'shared_metric': 111,
}
}
model = CurrentModel(tutils.get_default_hparams())
trainer = Trainer(
max_epochs=num_epochs,
default_root_dir=tmpdir,
overfit_pct=0.1,
)
result = trainer.fit(model)
assert result == 1
metrics = trainer.progress_bar_dict
# metrics added in training step should be unchanged by epoch end method
assert metrics['step_metric'] == -1
# a metric shared in both methods gets overwritten by epoch_end
assert metrics['shared_metric'] == 111
# metrics are kept after each epoch
for i in range(num_epochs):
assert metrics[f'epoch_metric_{i}'] == i
| StarcoderdataPython |
25897 | <reponame>nvuillam/checkov<filename>tests/cloudformation/graph_builder/test_local_graph.py
import os
from unittest import TestCase
from checkov.cloudformation.graph_builder.graph_components.block_types import BlockType
from checkov.cloudformation.graph_builder.local_graph import CloudformationLocalGraph
from checkov.cloudformation.parser import parse
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
class TestLocalGraph(TestCase):
def test_build_graph_with_single_resource(self):
relative_file_path = "../checks/resource/aws/example_APIGatewayXray/APIGatewayXray-PASSED.yaml"
definitions = {}
file = os.path.realpath(os.path.join(TEST_DIRNAME, relative_file_path))
(definitions[relative_file_path], definitions_raw) = parse(file)
local_graph = CloudformationLocalGraph(definitions)
local_graph.build_graph(render_variables=False)
self.assertEqual(1, len(local_graph.vertices))
resource_vertex = local_graph.vertices[0]
self.assertEqual("AWS::ApiGateway::Stage.MyStage", resource_vertex.name)
self.assertEqual("AWS::ApiGateway::Stage.MyStage", resource_vertex.id)
self.assertEqual(BlockType.RESOURCE, resource_vertex.block_type)
self.assertEqual("CloudFormation", resource_vertex.source)
self.assertDictEqual(definitions[relative_file_path]["Resources"]["MyStage"]["Properties"], resource_vertex.attributes)
| StarcoderdataPython |
3293553 | # -*- coding: utf-8 -*-
from argh.decorators import arg
from lain_sdk.util import warn, info
from lain_cli.utils import get_version_lists, lain_yaml, check_phase, ClusterConfig
@arg('phase', help="lain cluster phase id, can be added by lain config save")
@arg('-r', '--registry', help='registry url')
def appversion(phase, registry=None):
"""
Show available app versions in remote registry of lain
"""
check_phase(phase)
params = dict(name=phase)
if registry is not None:
params['registry'] = registry
cluster_config = ClusterConfig(**params)
yml = lain_yaml(ignore_prepare=True)
version_list = get_version_lists(phase, yml.appname, ClusterConfig=cluster_config)
print_available_version(version_list)
def print_available_version(version_list):
if len(version_list) == 0:
warn("No available release versions.")
else:
info("Below are the available versions: ")
for version in version_list:
print(version)
| StarcoderdataPython |
3285051 | #!/usr/bin/env python
import sys
#from heapq import heappush, heappop, heapify
from random import randint, choice, seed
try:
import numpy
#import scipy.sparse.linalg as la
except ImportError:
print("numpy not found")
if sys.version_info.major>=3:
long = int
from bruhat.util import write
class Point(object):
"""
A Point is a vertex in a Graph (an undirected graph).
Each Point has a "desc", this is any distinguishing
characteristic (colour/type, etc.)
as respected by isomorphisms of Graph's.
The "desc" can be any string.
"""
def __init__(self, desc, idx, nbd=None, colour="", **kw):
self.desc = desc
self._colour = colour
self._desc = {} # cache get_desc
self.idx = idx
if nbd is None:
nbd = []
self.nbd = nbd
self.__dict__.update(kw)
def __str__(self):
return "Point(desc=%r, idx=%s, nbd=%s)"%(
self.desc, self.idx, [p.idx for p in self.nbd])
__repr__ = __str__
def get_colour(self):
return self._colour
def set_colour(self, colour):
self._desc = {} # clear cache
self._colour = colour
colour = property(get_colour, set_colour)
def get_desc(self, depth=1, source=None):
assert self.nbd is not None
assert depth>=0
assert depth<=1
#_desc = self._desc.get(depth)
#if _desc:
# return _desc
desc = self.desc+str(self._colour)
if depth==0:
#self._desc = desc
return desc
if source is None:
source = []
else:
assert self not in source
descs = [a.get_desc(depth-1, source+[self]) for a in self.nbd if a not in source]
descs.sort()
desc = "%s[%s]"%(desc, ' '.join(descs))
#self._desc = desc
return desc
#def __str__(self):
# return "Point(%s: %s)"%(self.desc, descs)
class Graph(object):
"""
Undirected graph.
"""
def __init__(self, points=[], **attrs):
self.__dict__.update(attrs)
self.descs = {} # cache, map point -> desc
self.deps = None # map point -> list of points
self.attrs = dict(attrs)
self.points = list(points)
for i, point in enumerate(points):
assert point.idx == i
def add(self, desc='', **kw):
"add a Point"
assert not self.descs
assert self.deps is None
i = len(self.points)
point = Point(desc, i, **kw)
self.points.append(point)
return point
def add_directed(self, pi, pj, desc='directed'):
"encode a directed edge using a path with two extra (coloured) Point's"
pa = self.add("%s_a"%desc)
pb = self.add("%s_b"%desc)
self.join(pi, pa)
self.join(pa, pb)
self.join(pb, pj)
def __str__(self):
return "Graph(%s)"%(self.points,)
def __len__(self):
return len(self.points)
def __getitem__(self, idx):
return self.points[idx]
def join(self, pi, pj):
points = self.points
if type(pi) in [int, long]:
pi = points[pi]
if type(pj) in [int, long]:
pj = points[pj]
if pi not in pj.nbd:
pj.nbd.append(pi)
if pj not in pi.nbd:
pi.nbd.append(pj)
@classmethod
def build(cls, Gx):
m, n = Gx.shape
points = []
for i in range(m):
g = Gx[i]
assert g.sum()==4
weights = []
for j in numpy.where(g)[0]:
weights.append(Gx[:, j].sum())
weights.sort()
desc = ''.join(str(w) for w in weights)
a = Point(desc, i)
points.append(a)
#print [a.desc for a in points]
for i in range(m):
g = Gx[i]
a = points[i]
for j in numpy.where(g)[0]:
for i1 in numpy.where(Gx[:, j])[0]:
if i1 != i:
a.nbd.append(points[i1])
return cls(points, m=m, n=n)
def map(self, fn):
points = [None]*len(self)
for p in self.points:
p = Point(p.desc, fn[p.idx])
points[p.idx] = p
for p in self.points:
for p1 in p.nbd:
points[fn[p.idx]].nbd.append(points[fn[p1.idx]]) # whoops.. tricky
return self.__class__(points, **self.attrs)
def get_desc(self, depth=1):
return [v.get_desc(depth) for v in self.points]
def get_stats(self, depth=1):
stats = {}
for point in self:
desc = point.get_desc(depth)
stats[desc] = stats.get(desc, 0) + 1
return stats
# ---------- HOTSPOT ----------------------------->
def get_orbits(self, depth=1):
orbits = {}
assert depth==1
if self.deps is None:
deps = {}
for p in self.points:
deps[p] = [p]+p.nbd # 1-neighbours
self.deps = deps
descs = self.descs
for p in self.points:
desc = descs.get(p)
if desc is None:
desc = p.get_desc(depth)
descs[p] = desc
orbit = orbits.setdefault(desc, [])
orbit.append(p)
return orbits # map desc -> list of points
def set_colour(self, p, colour=''):
if colour:
assert p.colour==''
else:
assert p.colour
p.colour = colour
for p in self.deps[p]:
self.descs[p] = None # clear cache
Bag = Graph # backwards compat
class Tanner(Graph):
# This is the Tanner graph
@classmethod
def build(cls, Gx, Gz=None):
if Gz is not None:
return cls.build2(Gx, Gz)
m, n = Gx.shape
checks = [Point('c', i) for i in range(m)]
bits = [Point('b', i+m) for i in range(n)]
for i in range(m):
for j in range(n):
if Gx[i, j]==0:
continue
checks[i].nbd.append(bits[j])
bits[j].nbd.append(checks[i])
return cls(checks+bits, m=m, n=n)
@classmethod
def build2(cls, Gx, Gz):
# This is the Tanner graph
mx, n = Gx.shape
mz, n = Gz.shape
xchecks = [Point('x', i, row=i) for i in range(mx)]
zchecks = [Point('z', i+mx, row=i) for i in range(mz)]
bits = [Point('b', i+mx+mz, row=i) for i in range(n)]
for i in range(mx):
for j in range(n):
if Gx[i, j]==0:
continue
xchecks[i].nbd.append(bits[j])
bits[j].nbd.append(xchecks[i])
for i in range(mz):
for j in range(n):
if Gz[i, j]==0:
continue
zchecks[i].nbd.append(bits[j])
bits[j].nbd.append(zchecks[i])
return cls(xchecks+zchecks+bits, mx=mx, mz=mz, n=n)
def shortstr(self):
m, n = self.m, self.n
rows = []
for i in range(m): # checks
row = ['.']*n
p = self.points[i]
for p1 in p.nbd:
row[p1.idx-m] = '1'
row = ''.join(row)
rows.append(row)
return '\n'.join(rows)
def from_sparse_ham(n, H):
points = []
for i in range(n):
p = Point('(%s)'%H[i, i], i)
points.append(p)
for i, j in H.keys():
if i!=j:
points[i].nbd.append(points[j])
graph = Graph(points)
return graph
def from_ham(H, syndromes=None):
if syndromes is not None:
return from_ham_syndromes(H, syndromes) # <------ return
n = len(H)
points = []
for i in range(n):
p = Point('(%s)'%H[i, i], i)
points.append(p)
for i in range(n):
for j in range(n):
if i==j:
continue
if H[i, j]:
points[i].nbd.append(points[j])
graph = Graph(points)
return graph
def from_ham_syndromes(H, syndromes):
n = len(H) # dimension of state space
assert len(syndromes)==n # one syndrome for each basis vector
m = len(syndromes[0]) # each syndrome has m check values
points = []
for i in range(n):
p = Point('(%s)'%H[i, i], i)
points.append(p)
checks = []
for i in range(m):
c = Point('c', n+i)
checks.append(c)
for i in range(n):
for j in range(n):
if i==j:
continue
if H[i, j]:
points[i].nbd.append(points[j])
for j in range(m):
if syndromes[i][j]:
points[i].nbd.append(checks[j])
checks[j].nbd.append(points[i])
graph = Graph(points+checks)
return graph
def get_perm(m, n, fn):
U = numpy.zeros((m, m), dtype=int)
for i in range(m):
j = fn[i]
U[i, j] = 1
V = numpy.zeros((n, n), dtype=int)
for i in range(n):
j = fn[i+m]-m
V[j, i] = 1
return U, V
def search_recursive(graph0, graph1, fn=None, depth=1):
assert depth>0
if fn is None:
fn = {}
if len(graph0)!=len(graph1):
return
assert graph0 is not graph1
orbits0 = graph0.get_orbits(depth)
orbits1 = graph1.get_orbits(depth)
if len(orbits0) != len(orbits1):
return
keys0 = list(orbits0.keys())
keys1 = list(orbits1.keys())
keys0.sort()
keys1.sort()
if keys0 != keys1:
return
idx = len(fn)
# choose any uncoloured graph0 point
p = graph0.points[idx]
assert p.colour == ''
key = p.get_desc(depth)
orbit = orbits1[key]
#p.colour = str(idx)
graph0.set_colour(p, str(idx))
# go through each candidate in graph1
for p1 in orbit:
assert p1.colour == ''
#p1.colour = str(idx)
graph1.set_colour(p1, str(idx))
assert fn.get(idx) is None
fn[idx] = p1.idx
if len(fn) == len(graph0):
yield dict(fn)
else:
for _fn in search_recursive(graph0, graph1, fn, depth):
yield _fn
del fn[idx]
assert len(fn) == idx
#p1.colour = ''
graph1.set_colour(p1)
#p.colour = ''
graph0.set_colour(p, '')
class Backtrack(Exception):
pass
class State(object):
def __init__(self, graph0, graph1, depth=1):
orbits0 = graph0.get_orbits(depth) # map: desc -> list of points
orbits1 = graph1.get_orbits(depth) # map: desc -> list of points
if len(orbits0) != len(orbits1):
raise Backtrack() # <-------------- raise
keys0 = list(orbits0.keys())
keys1 = list(orbits1.keys())
keys0.sort()
keys1.sort()
if keys0 != keys1:
raise Backtrack() # <-------------- raise
self.graphs = graph0, graph1
self.orbitss = orbits0, orbits1
self.keyss = keys0, keys1
self.idx0 = None
self.depth = depth
def choose(self, idx0):
assert self.idx0 is None
assert idx0 is not None
graph0, graph1 = self.graphs
p0 = graph0.points[idx0]
assert p0.colour == ''
key0 = p0.get_desc(self.depth)
self.orbit1 = self.orbitss[1][key0]
assert self.orbit1 # otherwise: wtf?
self.idx0 = idx0 # source index: this is constant
self.idx1 = 0 # search target index
self.p0 = p0
self.p1 = None
def choose_best(self):
XXX
orbits0 = self.orbitss[0]
items = orbits0.items()
items.sort(key = lambda item : len(item[1]))
p = items[0][1][0] # first guy in smallest orbit
self.choose(p.idx)
return p.idx
def do(self, fn):
graph0, graph1 = self.graphs
# make assignment: idx0 -> idx1
p0 = self.p0
#assert p0.colour == ''
#p0.colour = str(self.idx0)
graph0.set_colour(p0, str(self.idx0))
p1 = self.orbit1[self.idx1]
#assert p1.colour == ''
#p1.colour = str(self.idx0)
graph1.set_colour(p1, str(self.idx0))
assert fn.get(self.idx0) is None
fn[self.idx0] = p1.idx
assert self.p1 is None
self.p1 = p1
def undo(self, fn):
graph0, graph1 = self.graphs
# undo assignment
del fn[self.idx0]
assert self.p1 is not None
p0 = self.p0
p1 = self.p1
assert p1.colour==str(self.idx0)
assert p0.colour==str(self.idx0)
#p0.colour = ''
#p1.colour = ''
graph0.set_colour(p0)
graph1.set_colour(p1)
self.p1 = None
def next(self):
assert self.p1 is None
self.idx1 += 1
if self.idx1 >= len(self.orbit1):
raise Backtrack() # <-------------- raise
def search(graph0, graph1, depth=1, fn=None, verbose=False):
# return dict: int --> int
assert graph0 is not graph1
if len(graph0) != len(graph1):
return
# doesn't help any:
#if graph0.get_stats() != graph1.get_stats():
# return
if fn is None:
fn = {}
remain = range(len(graph0))
orbits = graph0.get_orbits(depth)
graph1.get_orbits()
keys = list(orbits.keys())
keys.sort(key = lambda key : len(orbits[key]))
remain = []
for key in keys:
for p in orbits[key]:
if p.idx not in fn:
remain.append(p.idx)
#for idx in fn.keys():
# remain.remove(idx)
remain.sort()
for idx in fn:
graph0.set_colour(graph0[idx], str(idx))
graph1.set_colour(graph1[fn[idx]], str(idx))
try:
state = State(graph0, graph1, depth)
except Backtrack:
return
idx = remain.pop(0)
state.choose(idx)
#idx = remain.pop(randint(0, len(remain)-1))
#state.choose(idx)
#idx = state.choose_best()
#remain.remove(idx)
stack = [state]
while stack:
if verbose:
print( "SEARCH", len(stack))
for idx in remain:
assert fn.get(idx) is None
assert len(remain)+len(fn)+1==len(graph0)
state = stack[-1]
state.do(fn)
assert len(remain)+len(fn)==len(graph0)
if verbose:
print( fn)
if len(fn) == len(graph0):
if verbose:
print( "FOUND")
yield dict(fn)
else:
# try to add another state
try:
_state = State(graph0, graph1, depth)
#idx = remain.pop(randint(0, len(remain)-1))
idx = remain.pop(0)
_state.choose(idx)
#idx = _state.choose_best()
#remain.remove(idx)
stack.append(_state)
if verbose:
print( "PUSH")
continue
except Backtrack:
if verbose:
print( "BACK")
# the above do() doesn't work
pass
# next
while stack:
state = stack[-1]
if verbose:
print( "UNDO")
assert len(remain)+len(fn)==len(graph0)
state.undo(fn)
assert len(remain)+len(fn)+1==len(graph0)
try:
if verbose:
print( "NEXT")
state.next()
break # ok, finished backtracking
except Backtrack:
if verbose:
print( "POP")
state = stack.pop() # discard this guy
#remain.append(state.idx0)
remain.insert(0, state.idx0)
def all_autos(Gx):
#Gx = parse(gcolor_gauge)
m, n = Gx.shape
graph0 = Tanner.build(Gx)
graph1 = Tanner.build(Gx)
for fn in search(graph0, graph1):
U, V = get_perm(m, n, fn)
yield U, V
def peterson_graph():
inside = [Point('', i) for i in range(5)]
outside = [Point('', i+5) for i in range(5)]
graph = Graph(inside+outside)
for i in range(5):
graph.join(i, (i+2)%5)
graph.join(i, (i+3)%5)
graph.join(i, i+5)
if i<4:
graph.join(i+5, i+6)
else:
graph.join(i+5, i+1)
return graph
def cyclic_graph():
n = 5
points = [Point('', i) for i in range(n)]
graph = Graph(points)
# for i in range(n):
# points[i].nbd.append(points[(i+1)%n])
# points[(i+1)%n].nbd.append(points[i])
for i in range(n):
graph.add_directed(points[i], points[(i+1)%n])
return graph
gcolor_gauge = """
1111...........
11..11.........
1.1.1.1........
..11..11.......
.1.1.1.1.......
....1111.......
11......11.....
1.1.....1.1....
........1111...
..11......11...
.1.1.....1.1...
1...1...1...1..
........11..11.
.1...1...1...1.
....11......11.
........1.1.1.1
..1...1...1...1
....1.1.....1.1
"""
gcolor_stab = """
11111111.......
1111....1111...
11..11..11..11.
1.1.1.1.1.1.1.1
"""
cube_ham = """
6111....
14..11..
1.4.1.1.
1..4.11.
.11.2..1
.1.1.2.1
..11..21
....1110
"""
def parse(s):
s = s.replace('.', '0')
lines = s.split()
lines = [l.strip() for l in lines if l.strip()]
rows = [list(int(c) for c in l) for l in lines]
if rows:
n = len(rows[0])
for row in rows:
assert len(row)==n, "rows have varying lengths"
a = numpy.array(rows, dtype=numpy.int32)
return a
def test():
# Find rotation symmetry of the code. It's S_4 with order 24.
Gx = parse(gcolor_gauge)
m, n = Gx.shape
graph0 = Tanner.build(Gx)
graph1 = Tanner.build(Gx)
#global search
#search = search_recursive
count = 0
for fn in search(graph0, graph1):
#print "iso", fn
graph = graph0.map(fn)
#print graph.shortstr()
U, V = get_perm(m, n, fn)
Gx1 = numpy.dot(U, numpy.dot(Gx, V))
assert numpy.abs(Gx-Gx1).sum()==0
count += 1
#print "count:", count
assert count == 24
# S_3 symmetry of cubical hamiltonian
depth = 1
H = parse(cube_ham)
graph0 = from_ham(H)
graph1 = from_ham(H)
count = 0
for fn in search(graph0, graph1, depth=depth):
count += 1
assert count == 6
graph0 = peterson_graph()
graph1 = peterson_graph()
assert len(list(search(graph0, graph1, depth=1))) == 120
# directed graph
graph0 = cyclic_graph()
graph1 = cyclic_graph()
assert len(list(search(graph0, graph1))) == 5
#for f in (search(graph0, graph1)):
# print(f)
from bruhat.argv import argv
if __name__ == "__main__":
if argv.profile:
import cProfile as profile
profile.run("test()")
else:
test()
print( "OK")
| StarcoderdataPython |
89200 | <reponame>subaruclover/apis-fixed<filename>plots_fixed.py<gh_stars>0
"""
Show the results of different acc with plots
input data files: .csv files with one week data from sample data
House ID: E001 ~ E004
created by Qiong
TODO: plot the sample data (from apis-emulator/data/input/Sample) and compare it with our acc data (try: compare p2)
"""
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
sns.set(style="white")
import pandas as pd
import os
import global_var as gl
inputFile = "sample_acc_60.csv"
inputData = pd.read_csv(inputFile)
memory = inputData.to_numpy()
# calculate the coefficient w.r.t gl.acc
filename = os.path.splitext(inputFile)[0]
check_acc = filename.split("_")[2]
coeff = int(60 / gl.acc)
if int(check_acc) == gl.acc:
print("acc correctly received")
# PLOT Houses data
rows_e001 = list(range(0, 10000, 4))
rows_e002 = [x + 1 for x in rows_e001]
rows_e003 = [x + 2 for x in rows_e001]
rows_e004 = [x + 3 for x in rows_e001]
pvc_e001 = memory[rows_e001, 0]
load_e001 = memory[rows_e001, 1]
p2_e001 = memory[rows_e001, 2]
rsoc_e001 = memory[rows_e001, 3]
pvc_e002 = memory[rows_e002, 0]
load_e002 = memory[rows_e002, 1]
p2_e002 = memory[rows_e002, 2]
rsoc_e002 = memory[rows_e002, 3]
pvc_e003 = memory[rows_e003, 0]
load_e003 = memory[rows_e003, 1]
p2_e003 = memory[rows_e003, 2]
rsoc_e003 = memory[rows_e003, 3]
pvc_e004 = memory[rows_e004, 0]
load_e004 = memory[rows_e004, 1]
p2_e004 = memory[rows_e004, 2]
rsoc_e004 = memory[rows_e004, 3]
"""
Plot data
"""
# fig, axs = plt.subplots(2, 2, figsize=(12, 12))
fig, (ax0, ax1, ax2, ax3) = plt.subplots(4, 1, figsize=(12, 12))
ax0_2 = ax0.twinx()
ax1_2 = ax1.twinx()
ax2_2 = ax2.twinx()
ax3_2 = ax3.twinx()
fig.suptitle("The default scenario, E001-E004, acc=%i" % gl.acc)
pvc_e001_plot = ax0.plot(pvc_e001[:24 * 7 * coeff], 'm*-', label="PV E001")
load_e001_plot = ax0.plot(load_e001[:24 * 7 * coeff], 'y--', label="Load E001")
p2_e001_plot = ax0.plot(p2_e001[:24 * 7 * coeff], 'b', label="p2 E001")
rsoc_e001_plot = ax0_2.plot(rsoc_e001[:24 * 7 * coeff], 'g', label="RSOC E001")
# ticks = np.arange(0, 24*7*coeff, 24*coeff)
ax0_ticks = ax0.set_xticks(np.linspace(0, 24*7*coeff, 8, endpoint=True))
hours = np.round(np.linspace(0, 24*7*coeff, 8, endpoint=True) / coeff).astype(int)
label = []
for i in range(len(hours)):
label.append(str(hours[i])) # ['0', '24', '48', '72', '96', '120', '144', '168']
ax0_labels = ax0.set_xticklabels(label)
# ax0.set_xlabel("Hour")
ax0.set_ylabel("Power (W)")
ax0_2.set_ylabel(" % ")
plots_e001 = pvc_e001_plot + load_e001_plot + p2_e001_plot + rsoc_e001_plot
labels_e001 = [plot.get_label() for plot in plots_e001]
ax0.legend(plots_e001, labels_e001, loc='upper left')
pvc_e002_plot = ax1.plot(pvc_e002[:24 * 7 * coeff], 'm*-', label="PV E002")
load_e002_plot = ax1.plot(load_e002[:24 * 7 * coeff], 'y--', label="Load E002")
p2_e002_plot = ax1.plot(p2_e002[:24 * 7 * coeff], 'b', label="p2 E002")
rsoc_e002_plot = ax1_2.plot(rsoc_e002[:24 * 7 * coeff], 'g', label="RSOC E002")
ax1_ticks = ax1.set_xticks(np.linspace(0, 24*7*coeff, 8, endpoint=True))
ax1_labels = ax1.set_xticklabels(label)
# ax1.set_xlabel("Hour")
ax1.set_ylabel("Power (W)")
ax1_2.set_ylabel(" % ")
plots_e002 = pvc_e002_plot + load_e002_plot + p2_e002_plot + rsoc_e002_plot
labels_e002 = [plot.get_label() for plot in plots_e002]
ax1.legend(plots_e002, labels_e002, loc='upper left')
pvc_e003_plot = ax2.plot(pvc_e003[:24 * 7 * coeff], 'm*-', label="PV E003")
load_e003_plot = ax2.plot(load_e003[:24 * 7 * coeff], 'y--', label="Load E003")
p2_e003_plot = ax2.plot(p2_e003[:24 * 7 * coeff], 'b', label="p2 E003")
rsoc_e003_plot = ax2_2.plot(rsoc_e003[:24 * 7 * coeff], 'g', label="RSOC E003")
ax2_ticks = ax2.set_xticks(np.linspace(0, 24*7*coeff, 8, endpoint=True))
ax2_labels = ax2.set_xticklabels(label)
# ax2.set_xlabel("Hour")
ax2.set_ylabel("Power (W)")
ax2_2.set_ylabel(" % ")
plots_e003 = pvc_e003_plot + load_e003_plot + p2_e003_plot + rsoc_e003_plot
labels_e003 = [plot.get_label() for plot in plots_e003]
ax2.legend(plots_e003, labels_e003, loc='upper left')
pvc_e004_plot = ax3.plot(pvc_e004[:24 * 7 * coeff], 'm*-', label="PV E004")
load_e004_plot = ax3.plot(load_e004[:24 * 7 * coeff], 'y--', label="Load E004")
p2_e004_plot = ax3.plot(p2_e004[:24 * 7 * coeff], 'b', label="p2 E004")
rsoc_e004_plot = ax3_2.plot(rsoc_e004[:24 * 7 * coeff], 'g', label="RSOC E004")
ax3_ticks = ax3.set_xticks(np.linspace(0, 24*7*coeff, 8, endpoint=True))
ax3_labels = ax3.set_xticklabels(label)
ax3.set_xlabel("Hour")
ax3.set_ylabel("Power (W)")
ax3_2.set_ylabel(" % ")
plots_e004 = pvc_e004_plot + load_e004_plot + p2_e004_plot + rsoc_e004_plot
labels_e004 = [plot.get_label() for plot in plots_e004]
ax3.legend(plots_e004, labels_e004, loc='upper left')
plt.show()
else:
print("check acc value and try again")
# Compare MSE (or sth. likely) to check different acc
| StarcoderdataPython |
1622599 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import glob, csv, librosa, os, subprocess, time
import numpy as np
import pandas as pd
import data_vn
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__author__ = '<EMAIL>'
# data path
data_path = "asset/data/"
#
# process Vivos corpus
#
def process_vivos(csv_file, category):
parent_path = data_path + 'vivos/'
labels, wave_files = [], []
# create csv writer
writer = csv.writer(csv_file, delimiter=',')
# read label-info
content_filename = parent_path + category + '/prompts.txt'
label_info = pd.read_table(content_filename, usecols=['ID'], index_col=False, delim_whitespace=True)
# print(label_info) # testpoint: label_info
# read file IDs
# file_ids = []
# for uid in label_info.ID.values:
# print(uid) # testpoint: uid
# folder_path, filename = uid.split("_")
# for d in [parent_path + category + '/waves/%s' % folder_path]:
# print(d) # testpoint: folder_path
# a = glob.glob(d + '*.txt')
# print(a)
# b = sorted(glob.glob(d + '*.txt'))
# print(b)
# for f in sorted(glob.glob(d + '*.txt')):
# # print(f[-12:-4])
# file_ids.extend([f[-12:-4]])
# # print(file_ids)
file_ids = label_info.ID
# print(file_ids) # testpoint: file_ID
# preprocess
content_ = open(content_filename, 'r')
title_content = content_.readline()
# print(title_content) # Result: 'ID\t\tContent\n'
for i, f in enumerate(file_ids):
# wave file name
wave_file = parent_path + category + '/waves/%s/' % f[0:10] + f + '.wav'
# print(wave_file)
fn = wave_file.split('/')[-1]
# print(fn)
target_filename = 'asset/data/preprocess_vn/mfcc/' + fn + '.npy'
# print(target_filename)
if os.path.exists(target_filename):
continue
print("Vivos corpus preprocessing (%d/%d) - ['%s']" % (i, len(file_ids), wave_file))
# load wave file
wave, sr = librosa.load(wave_file, sr=16000, mono=True) # default: sr=22050Hz
# re-sample (48K --> 16K)
# wave = wave[::3]
# get mfcc feature
mfcc = librosa.feature.mfcc(wave, sr=16000)
# get label index
curr_content = content_.readline()
curr_content = curr_content[(len(fn)-3):(len(curr_content))]
print(curr_content)
label = data_vn.str2index(curr_content)
# save result (exclude small mfcc data to prevent CTC loss)
if len(label) < mfcc.shape[1]:
# save meta info
writer.writerow([fn] + label)
# save mfcc
np.save(target_filename, mfcc, allow_pickle=False)
# check saved features
print(data_vn.index2str(label), '\n')
# delay for observation and analysis
# time.sleep(10)
#
# Create directories
#
if not os.path.exists('asset/data/preprocess_vn'):
os.makedirs('asset/data/preprocess_vn')
if not os.path.exists('asset/data/preprocess_vn/meta'):
os.makedirs('asset/data/preprocess_vn/meta')
if not os.path.exists('asset/data/preprocess_vn/mfcc'):
os.makedirs('asset/data/preprocess_vn/mfcc')
#
# Run pre-processing for training
#
# Vivos corpus for training
csv_file_train = open('asset/data/preprocess_vn/meta/train.csv', 'w')
process_vivos(csv_file_train, 'train')
csv_file_train.close()
#
# Run pre-processing for testing
#
# Vivos corpus for test
csv_file_test = open('asset/data/preprocess_vn/meta/test.csv', 'w')
process_vivos(csv_file_test, 'test')
csv_file_test.close()
| StarcoderdataPython |
164741 | from functools import partial
from . import utils
import numpy as np
import jax.numpy as jnp
import jax.random as random
from jax import grad, jit, vmap, lax, jacrev, jacfwd, jvp, vjp, hessian
#class Lattice(seed, cell_params, sim_params,
def random_c0(subkeys, odds_c, n):
"""Make random initial conditions given odds ratio of cell types."""
n_ctypes = len(odds_c)
n_c = (n * odds_c / odds_c.sum()).astype(int)
n_c = n_c.at[0].add(n - n_c.sum())
c0 = jnp.repeat(jnp.arange(n_ctypes), n_c)
nmap = np.ndim(subkeys) - 1
fun = lambda sk: random.permutation(sk, c0)
for _ in range(nmap):
fun = vmap(fun)
return n_c, fun(subkeys)
@jit
def dE_swap(ij, c, W, AL):
"""
Energy differential after swapping cells i and j.
Depends only on i, j, and their neighbors
"""
new_c = c.at[ij].set(c[ij[::-1]])
E_local = -W[ c[ij, None], c[AL[ij]]].sum()
E_local_swap = -W[new_c[ij, None], new_c[AL[ij]]].sum()
return E_local_swap - E_local
@jit
def quadratic_form(a, G):
"""Quadratic form of column vector `a` induced by matrix `G`"""
return a.T @ G @ a
@jit
def P_swap(dE, beta):
"""
Probability of a swap between cells i and j. Symmetric w.r.t. i and j.
"""
# Glauber dynamics probability
# return 1 / (1 + jnp.exp(beta * dE))
# Metropolis acceptance probability
return jnp.minimum(1., jnp.exp(-beta * dE))
@jit
def swap_ij(c, ij):
"""Swaps cells i and j in the cell type state vector `c`. """
cji = c[ij][::-1]
return c.at[ij].set(cji)
@jit
def accept_swap(c, P, ij):
"""
Returns cell state and log-probability after swapping i <--> j
"""
return swap_ij(c, ij)
@jit
def reject_swap(c, P, ij):
"""
Returns cell state and log-probability after rejecting i <--> j
"""
return c, jnp.log(1 - P)
@jit
def make_swap(c, P, ij, accept):
"""
Returns cell state vector and log-probability of event
after an accepted/rejected swap of cells `i` and `j`.
"""
return lax.cond(accept, accept_swap, reject_swap, c, P, ij)
@jit
def get_random_pair(key, AL):
"""Returns indices of a pair of adjacent cells"""
i, Aj = random.randint(
key=key, shape=(2,), minval=jnp.array([0, 0]), maxval=jnp.array(AL.shape)
)
j = AL[i, Aj]
return jnp.array([i, j])
@jit
def take_MC_step(key, c, beta, W, AL, n):
"""
Randomly selects a swap between adjacent cells and accepts/rejects.
Acceptance is based on Metropolis algorithm.
"""
key, sk1, sk2 = random.split(key, 3)
# Pick random interface and acceptance threshold
ij = get_random_pair(sk1, AL)
thresh = random.uniform(key=sk2)
# Take a Metropolis step
dE = dE_swap(ij, c, W, AL)
P = P_swap(dE, beta)
accept = P > thresh
new_c = make_swap(c, P, ij, accept)
expected_dE = P * dE
return key, new_c, expected_dE
@jit
def propose_swap(key, c, beta, W, AL):
"""
"""
ij = get_random_pair(key, AL)
c_swap = swap_ij(c, ij)
dE = dE_swap(ij, c, W, AL)
P = P_swap(dE, beta)
return ij, c_swap, dE, P
@jit
def local_alignment(c, A, k, I, O):
s = I[c] @ O
s_swap = I[c_swap] @ O
m_diff_nb = (A_k * diff_nb) @ s / n_diff_nb
@jit
def local_alignment_change(ij, c, c_swap, AL, k, I, O):
A_k = get_knn_adjacency_matrix(AL, k)
# cells that are neighbors (within k radii) of
# `i` but not `j` and vice-versa - i.e. different neighbors
diff_nb = jnp.expand_dims(jnp.logical_xor(*A_k[ij]), 1)
n_diff_nb = 4 * k + 2
s = I[c] @ O
s_swap = I[c_swap] @ O
m_diff_nb = (A_k * diff_nb) @ s / n_diff_nb
m_diff_nb_swap = (A_k * diff_nb) @ s_swap / n_diff_nb
return ((m_diff_nb_swap ** 2) - (m_diff_nb ** 2)).sum()
mapped_local_alignment_change = vmap(
local_alignment_change, in_axes=(None, None, None, None, 0, None, None)
)
#@jit
def take_MC_step2(args, step):
"""
Randomly selects a swap between adjacent cells and accepts/rejects.
Acceptance is based on Metropolis algorithm.
"""
key, c_t, beta_t, W, AL, *align_args = args
c = c_t[step]
beta = beta_t[step]
new_key, sk1, sk2 = random.split(key, 3)
# Propose a random swap
ij, c_swap, dE, P = propose_swap(sk1, c, beta, W, AL)
expected_d_eta = P * mapped_local_alignment_change(
ij, c, c_swap, AL, *align_args
).mean()
# Accept/reject
thresh = random.uniform(key=sk2)
do_swap = P > thresh
new_c = lax.cond(do_swap, lambda: c_swap, lambda: c)
return (
new_key, c_t.at[step + 1].set(new_c), beta_t, W, AL, *align_args
), expected_d_eta
@partial(jit, static_argnums=(2, 3, 4))
def simulate(theta, args, nsweeps, n, n_ctypes):
key, c, t, _, *more_args = args
beta_t = jnp.power(10., -utils.map_linear(t, theta[0], theta[1]))
W = jnp.eye(n_ctypes) * theta[2]
new_args, expected_d_etas = lax.scan(
take_MC_step2,
(key, c, beta_t, W, *more_args),
jnp.repeat(jnp.arange(nsweeps), n),
)
return new_args, expected_d_etas
@partial(jit, static_argnums=(2, 3, 4))
def simulate_loss(theta, args, nsweeps, n, n_ctypes):
return simulate(theta, args, nsweeps, n, n_ctypes)[1].mean()
@partial(jit, static_argnums=(2, 3))
def update(theta, args, nt, lr):
"""Performs one update step on T."""
# Compute the gradients on replicates
eta, grads = jax.value_and_grad(
simulate,
)(T, key, l, nt)
new_T = T - grads * lr_toy
return new_T, loss, grads
@partial(jit, static_argnums=3)
def update_toy(T, key, l, nt, lr_toy):
"""Performs one update step on T."""
# Compute the gradients on replicates
loss, grads = jax.value_and_grad(
simulate_loss,
)(T, key, l, nt)
new_T = T - grads * lr_toy
return new_T, loss, grads
@jit
def MC_iteration(step, args):
key, c, *extra = args
key, c, expected_dE = take_MC_step(*args)
return key, c, *extra
@jit
def MC_sweep(key, c, beta, W, AL, n):
args = (key, c, beta, W, AL, n)
return lax.fori_loop(0, n, MC_iteration, args)
@jit
def n_cmatch_t(c_t, AL):
"""Returns number of homotypic interfaces at each time-point."""
return cmatch_t(c_t, c_t[:, AL]).sum(axis=(1, 2)) // 2
@jit
def get_E_cell(c, W):
return W[c[:, None], c[AL]].mean(axis=1)
#### sorting metrics
def get_identity(n_ctypes):
"""Returns the (n_ctypes, n_ctypes) identity matrix."""
return jnp.eye(n_ctypes, dtype=int)
def get_difference_matrix(n_ctypes):
"""
Returns a (n_ctypes, n_ctypes - 1) matrix `O` with -1 on the principal
diagonal and 1 elsewhere. `O @ u` thus computes a difference on the
components of `u`.
"""
return 1 - 2 * jnp.eye(n_ctypes, n_ctypes - 1, dtype=int)
@jit
def get_num_neighbors(k):
return 1 + 3 * k * (k + 1)
@jit
def pow_matrix(A, k):
return lax.fori_loop(1, k, lambda i, M: jnp.matmul(M, A), A)
@jit
def get_knn_adjacency_matrix(AL, k):
n, nnb = AL.shape
diag_true = jnp.diag(jnp.ones(n, dtype=bool))
A = adjacency_matrix_from_adjacency_list(AL, dtype=bool)
A = A | diag_true
A = pow_matrix(A, k)
return A
equal_vec_scalar = vmap(lambda a, b: a == b, (0, None))
equal_outer_1d_1d = vmap(equal_vec_scalar, (None, 0))
equal_outer_1d_2d = vmap(equal_outer_1d_1d, (None, 0))
equal_outer_2d_1d = vmap(equal_outer_1d_1d, (0, None))
mult_vec_scalar = vmap(lambda a, b: a * b, (0, None))
mult_outer_1d_1d = vmap(mult_vec_scalar, (None, 0))
mult_outer_1d_2d = vmap(mult_outer_1d_1d, (None, 0))
mult_outer_2d_1d = vmap(mult_outer_1d_1d, (0, None))
@jit
def local_spin(c, AL, k):
"""
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = jnp.array([-1, 1])[c]
return A_k @ s_i / nnb
@jit
def knn_alignment_per_cell(c, AL, k, I, O):
"""
Return alignment of cell types `c` in local neighborhoods.
`c` is the cell type vector of shape `(n,)` with dtype `int`
`A` is the `(n, n)`cell-cell adjacency matrix (can be Boolean)
`I` is the `(n_ctypes, n_ctypes)` identity matrix, where `n_ctypes`
is the number of cell types in the tissue.
`O` is the `(n_ctypes, n_ctypes - 1)` difference matrix with `-1` on
the principal diagonal and `1` elsewhere. `I[c] @ O` converts cell
types (non-negative `int`) to spins (difference vectors). The sum
of spin vector components lies in [-1, 1].
`nnb` is the number of neighbors in the (regular) lattice within
distance `k`.
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = I[c] @ O
m_i = A_k @ s_i / nnb
return 1 - (m_i ** 2).mean(axis=1)
@jit
def knn_alignment_tissue(c, AL, k, I, O):
"""
Return mean alignment of cell types in a tissue by averaging
over neighborhoods. This is equivalent to
`knn_alignment_per_cell(*args).mean()`
`c` is the cell type vector of shape `(n,)` with dtype `int`
`A` is the `(n, n)`cell-cell adjacency matrix (can be Boolean)
`I` is the `(n_ctypes, n_ctypes)` identity matrix, where `n_ctypes`
is the number of cell types in the tissue.
`O` is the `(n_ctypes, n_ctypes - 1)` difference matrix with `-1` on
the principal diagonal and `1` elsewhere. `I[c] @ O` converts cell
types (non-negative `int`) to spins (difference vectors). The sum
of spin vector components lies in [-1, 1].
`nnb` is the number of neighbors in the (regular) lattice within
distance `k`.
"""
A_k = get_knn_adjacency_matrix(AL, k)
nnb = get_num_neighbors(k)
s_i = I[c] @ O
m_i = A_k @ s_i / nnb
return 1 - (m_i ** 2).mean()
#### Graph
def adjacency_matrix_from_adjacency_list(AL, dtype=bool):
"""
Returns adjacency matrix for a nnb-regular graph given the adjacency list.
"""
n, nnb = AL.shape
A = jnp.zeros((n, n), dtype=dtype)
return A.at[jnp.repeat(jnp.arange(n), nnb), AL.flatten()].set(1)
def get_adjacency_matrix_periodic(rows, cols=0):
"""Construct adjacency matrix for a periodic hexagonal
lattice of dimensions rows x cols."""
AL = get_adjacency_list_periodic(rows, cols, **kwargs)
return adjacency_matrix_from_adjacency_list(AL)
def get_adjacency_list_periodic(rows, cols=0):
"""Construct adjacency matrix for a periodic hexagonal
lattice of dimensions rows x cols."""
# Assume square if not specified
if cols == 0:
cols = rows
n = rows * cols
row, col = np.meshgrid(np.arange(rows), np.arange(cols))
row = row.flatten()
col = col.flatten()
# Get row of adjacent cells
dr = np.array([0, 1, 1, 0, -1, -1])
AL_row = np.add.outer(row, dr) % rows
# Get column of adjacent cells, accounting for staggering
dc1 = np.array([1, 0, -1, -1, -1, 0])
dc2 = np.array([1, 1, 0, -1, 0, 1])
AL_col = np.add.outer(col, dc1)
AL_col[1::2] += dc2 - dc1
AL_col = AL_col % cols
return rows * AL_col + AL_row
def hex_grid(rows, cols=0, r=1., sigma=0, **kwargs):
"""
Returns XY coordinates of a regular 2D hexagonal grid
(rows x cols) with edge length r. Points are optionally
passed through a Gaussian filter with std. dev. = sigma * r.
"""
print("Deprecated: please use `cx.geom.hex_grid")
# Check if square grid
if cols == 0:
cols = rows
# Populate grid
x_coords = np.linspace(-r * (cols - 1) / 2, r * (cols - 1) / 2, cols)
y_coords = np.linspace(-np.sqrt(3) * r * (rows - 1) / 4, np.sqrt(3) * r * (rows - 1) / 4, rows)
X = []
for i, x in enumerate(x_coords):
for j, y in enumerate(y_coords):
X.append(np.array([x + (j % 2) * r / 2, y]))
X = np.array(X)
# Apply Gaussian filter if specified
if sigma != 0:
X = np.array([np.random.normal(loc=x, scale=sigma*r) for x in X])
return X
def get_outer_idx(rows, cols):
"""Returns the indices of cells on the border of the lattice grid"""
print("Deprecated: please use `cx.geom.get_outer_idx")
return np.array([
rows * c + r
for c in range(cols)
for r in range(rows)
if ((r in (0, rows - 1)) or (c in (0, cols - 1)))
])
| StarcoderdataPython |
3248149 | from flask import current_app, request, make_response, jsonify, abort
from api.blueprint import api
from core import crossdomain
from flask_security import auth_token_required, roles_required
from flask_login import current_user
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from api.views.all_views import api_version
from api.correction import correction_api
@api.route('/<api_version>/corrections/<int:correction_id>', methods=['GET', 'OPTIONS'])
@crossdomain(origin='*', headers='authentication-token')
@auth_token_required
@api_version
def get_correction(api_version, correction_id):
correction = correction_api.find_correction_by_id(correction_id)
if(correction):
return jsonify( correction.to_dict() )
else:
abort(404)
@api.route('/<api_version>/corrections', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', headers='authentication-token')
@auth_token_required
@api_version
def add_correction(api_version):
json = request.get_json()
schema = {
"content" : "string",
"file_id" : "number",
"format" : "string",
"required": ["content", "file_id", "format"]
}
try:
validate(json, schema)
except ValidationError as ve:
return make_response(jsonify( { 'error': ve.message } ), 400)
correction = correction_api.add_correction(json['content'], json['file_id'], json['format'])
return jsonify( correction.to_dict() )
@api.route('/<api_version>/corrections/<int:correction_id>', methods=['PUT', 'OPTIONS'])
@crossdomain(origin='*', headers='authentication-token')
@auth_token_required
@api_version
def update_correction(api_version, correction_id):
json = request.get_json()
schema = {
"content" : "string",
"format" : "string",
"required": ["content", "format"]
}
try:
validate(json, schema)
except ValidationError as ve:
return make_response(jsonify( { 'error': ve.message } ), 400)
correction = correction_api.find_correction_by_id(correction_id)
if(correction):
correction = correction_api.update_correction_content(correction, content=json['content'])
return jsonify( correction.to_dict() )
else:
abort(404)
| StarcoderdataPython |
1745205 | <reponame>WeiwenXu21/FRU<gh_stars>10-100
import math
import tensorflow as tf
from tensorflow.python.util import nest
import collections
import pdb
_FRUStateTuple = collections.namedtuple("FRUStateTuple", ("state", "t"))
class FRUStateTuple(_FRUStateTuple):
"""Tuple used by FRU Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(state, t)`, in that order. Where `state` is the hidden state
and `t` is the time step.
"""
__slots__ = ()
@property
def dtype(self):
(state, t) = self
if state.dtype != t.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(state.dtype), str(t.dtype)))
return state.dtype
class FRUCell(tf.contrib.rnn.RNNCell):
"""Implements a simple distribution based recurrent unit that keeps moving
averages of the mean map embeddings of features of inputs.
"""
"""
num_stats: phi size
freqs: array of w
freqs_mask: mask value when frequency is not equal to zero
output_dims: output size
recur_dims: r size
seq_len: length of sequence
"""
def __init__(self, num_stats, freqs, freqs_mask, output_dims, recur_dims, seq_len,
summarize=True, linear_out=False,
include_input=False, activation=tf.nn.relu):
self._num_stats = num_stats
self._output_dims = output_dims
self._recur_dims = recur_dims
self._freqs_array = freqs
self._nfreqs = len(freqs)
self._freqs_mask_array = [0.0 if w == 0 and len(freqs) > 1 else freqs_mask for w in freqs]
print "frequency_mask = ", self._freqs_mask_array
self._summarize = summarize
self._linear_out = linear_out
self._activation = activation
self._include_input = include_input
# as tensorflow does not feed current time step to __call__
# I have to manually record it
self._seq_len = seq_len
self.W = []
self.b = []
"""
nfreqs*num_stats
"""
@property
def state_size(self):
return FRUStateTuple(int(self._nfreqs * self._num_stats), 1)
@property
def output_size(self):
return self._output_dims
def __call__(self, inputs, state_tuple, scope=None):
"""
recur*: r
state*: mu, state_tuple includes (state, t)
stats*: phi
freq*: frequency vector
"""
state, cur_time_step = state_tuple
with tf.variable_scope(scope or type(self).__name__):
self._freqs = tf.reshape(tf.get_variable("frequency", initializer=self._freqs_array, trainable=False), [1, -1, 1])
self._phases = tf.reshape(tf.get_variable("phase", [self._nfreqs], initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32), trainable=True), [1, -1, 1])
self._freqs_mask = tf.reshape(tf.get_variable("frequency_mask", initializer=self._freqs_mask_array, trainable=False), [1, -1, 1])
# Make statistics on input.
if self._recur_dims > 0:
"""
r_t = f(W^r mu_{t-1} + b^r)
"""
recur_output = self._activation(_linear(
state, self._recur_dims, True, scope='recur_feats'
), name='recur_feats_act')
"""
phi_t = W^phi r_t + W^x x_t + b^phi
"""
stats = self._activation(_linear(
[inputs, recur_output], self._num_stats, True, scope='stats',
), name='stats_act')
else:
stats = self._activation(_linear(
inputs, self._num_stats, True, scope='stats'
), name='stats_act')
# Compute moving averages of statistics for the state.
with tf.variable_scope('out_state'):
state_tensor = tf.reshape(
state, [-1, self._nfreqs, self._num_stats], 'state_tensor'
)
stats_tensor = tf.reshape(
stats, [-1, 1, self._num_stats], 'stats_tensor'
)
#cur_time_step = tf.Print(cur_time_step, [cur_time_step], message="cur_time_step = ")
"""
mu_t = mask*mu_{t-1} + cos(2*pi*w*t/T + 2*pi*phase)*phi_t
"""
out_state = tf.reshape(self._freqs_mask*state_tensor +
1.0/self._seq_len*tf.cos(2.0*math.pi/self._seq_len*tf.reshape(cur_time_step, shape=[-1, 1, 1])*self._freqs + 2.0*math.pi*self._phases)*stats_tensor,
[-1, self.state_size.state], 'out_state')
# Compute the output.
if self._include_input:
output_vars = [out_state, inputs]
else:
output_vars = out_state
"""
o_t = W^o mu_t + b^o
"""
output = _linear(
output_vars, self._output_dims, True, scope='output'
)
if not self._linear_out:
output = self._activation(output, name='output_act')
# update time step
out_state_tuple = (out_state, cur_time_step+1)
# Retrieve RNN Variables
if not self.W:
with tf.variable_scope('recur_feats', reuse=True):
self.W.append(tf.get_variable('Matrix'))
self.b.append(tf.get_variable('Bias'))
with tf.variable_scope('stats', reuse=True):
self.W.append(tf.get_variable('Matrix'))
self.b.append(tf.get_variable('Bias'))
with tf.variable_scope('output', reuse=True):
self.W.append(tf.get_variable('Matrix'))
self.b.append(tf.get_variable('Bias'))
print("W = ", self.W)
print("b = ", self.b)
"""
o_t and mu_t
"""
return (output, out_state_tuple)
# No longer publicly expose function in tensorflow.
def _linear(args, output_size, bias, bias_start=0.0, scope=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
"Linear is expecting 2D arguments: %s" %
str(shapes))
if not shape[1]:
raise ValueError(
"Linear expects shape[1] of arguments: %s" %
str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
matrix = tf.get_variable(
"Matrix", [total_arg_size, output_size], initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=dtype), dtype=dtype)
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(args, 1), matrix)
if not bias:
return res
bias_term = tf.get_variable(
"Bias", [output_size],
dtype=dtype,
initializer=tf.constant_initializer(bias_start, dtype=dtype)
)
return res + bias_term
| StarcoderdataPython |
1633058 | <gh_stars>10-100
################################################
## Writen by <NAME>
################################################
from utilities import *
from Co8 import *
# Define dictionaries of ordinary gems and jewelry in the game.
# Format is key : [value in gp, [list of proto numbers]]
gem_table = {
1: [10, [12042, 12044]],
2: [50, [12041, 12042]],
3: [100, [12035, 12040]],
4: [500, [12034, 12039]],
5: [1000, [12010, 12038]],
6: [5000, [12036, 12037]]
}
jewelry_table = {
1: [50, [6180, 6190]],
2: [100, [6181, 6185]],
3: [200, [6157]],
4: [250, [6182, 6194]],
5: [500, [6186, 6191]],
6: [750, [6183, 6193]],
7: [1000, [6184, 6192]],
8: [2500, [6187, 6197]],
9: [5000, [6188, 6195]],
10: [7500, [6189, 6196]]
}
def RespawnInventory(attachee, num = 0):
# Removes all attachee's inventory, and respawns it friom the InvenSource.mes line number specified by 'num'.
# If num is not given in the function call, the function will attempt to use the default InvenSource.mes line number for the attachee, if one is defined.
# If no InvenSource.mes line number is defined, the function will terminate.
# Example call 1: RespawnInventory(attachee, 1) will create Burne's inventory(per line number 1 in InvenSource.mes) in attachee's inventory.
# Example call 2: RespawnInventory(attachee) will attempt to create the attachee's pre-defined inventory (per InvenSource.mes).
# If the attachee has no Inventory Source defined, the function will terminate.
if num == 0:
if attachee.type == obj_t_container:
num = attachee.obj_get_int( obj_f_container_inventory_source)
elif attachee.type == obj_t_npc:
num = attachee.obj_get_int(obj_f_critter_inventory_source)
else:
print attachee, 'is not a valid type'
return
if num == 0:
print attachee, 'has no inventory source defined'
print 'Please specify an inventory to respawn'
return
ClearInv(attachee)
CreateInv(attachee, num)
return
def ClearInv(attachee):
# Removes all inventory from attachee.
for num in range(4000, 13000):
item = attachee.item_find_by_proto(num)
while (item != OBJ_HANDLE_NULL):
item.destroy()
item = attachee.item_find_by_proto(num)
return
def CreateInv(attachee, num):
# Creates inventory from the structured list created by GetInv from the InvenSource.mes line number 'num'.
inv = GetInv(num)
for i in range(len(inv)):
if not (type(inv[i][0]) is str):
if type(inv[i][1]) is int:
if inv[i][0] <= 100:
chance = inv[i][0]
if chance >= game.random_range(1,100):
create_item_in_inventory(inv[i][1], attachee)
else:
money = create_item_in_inventory(inv[i][0], attachee)
money.obj_set_int(obj_f_money_quantity, inv[i][1])
else:
if inv[i][0] == 100:
n = game.random_range(0, len(inv[i][1]) - 1)
create_item_in_inventory(inv[i][1][n], attachee)
elif inv[i][0] >= 7000 and inv[i][0] <= 7003:
money = create_item_in_inventory(inv[i][0], attachee)
money.obj_set_int(obj_f_money_quantity, game.random_range(inv[i][1][0], inv[i][1][1]))
else:
gjlist = CalcGJ(inv[i][0], inv[i][1])
if gjlist != []:
for k in range(len(gjlist)):
create_item_in_inventory(gjlist[k], attachee)
return
def GetInv(num, filename = 'data\\rules\\InvenSource.mes'):
# Reads InvenSource.mes, finds the line numbered 'num', and creates a structured list of the entries in that line.
InvDict = readMes(filename) #readMes is in Co8.py
InvLine = InvDict[num][0]
InvLine = InvLine.split(':')
InvLine.remove(InvLine[0])
InvLine[0] = InvLine[0].strip()
n = InvLine[0].find('_num')
if n != -1:
n = n + 7
InvLine[0] = InvLine[0][n:]
inv = InvLine[0]
inv = inv.split(' ')
for i in range(len(inv)):
if inv[i].find('(') == -1:
inv[i] = inv[i].split(',')
for j in range(len(inv[i])):
if inv[i][j] == 'copper':
inv[i][j] = 7000
elif inv[i][j] == 'silver':
inv[i][j] = 7001
elif inv[i][j] == 'gold':
inv[i][j] = 7002
elif inv[i][j] == 'platinum':
inv[i][j] = 7003
elif type(inv[i][j]) is str and inv[i][j].find('-') != -1:
inv[i][j] = inv[i][j].split('-')
for k in range(len(inv[i][j])):
inv[i][j][k] = ConvertToInt(inv[i][j][k])
if type(inv[i][j]) is str:
inv[i][j] = ConvertToInt(inv[i][j])
else:
temp1 = inv[i]
temp1 = str(temp1)
temp1 = temp1[1:-1]
temp1 = temp1.split(',')
for n in range(len(temp1)):
temp1[n] = ConvertToInt(temp1[n])
temp2 = [100, temp1]
inv[i] = temp2
return inv
def ConvertToInt( string ):
if type(string) is str:
try:
string = int(string)
except:
if not (string == 'gems' or string == 'jewelry'):
print 'WARNING: NON-INTEGER FOUND'
print 'Non-integer found is', string
else:
print 'WARNING: NON-STRING FOUND'
print 'Non-string found is', string
return string
def CalcGJ(string, value):
gjlist = []
if string == 'gems':
table = gem_table
elif string == 'jewelry':
table = jewelry_table
else:
return gjlist
if not (type(value) is int):
value = ConvertToInt(value)
if not (type(value) is int):
return gjlist
n = len(table)
while value >= table[1][0]:
if table[n][0] <= value:
gjlist.append(table[n][1][game.random_range(0, len(table[n][1]) - 1)])
value = value - table[n][0]
else:
n = n - 1
return gjlist
| StarcoderdataPython |
1648823 | # slope
"""
x_0:
initial position
dt:
dt float
iteration:
integer
a:
accerelation
0 < a < 1
t:
dt * iteration
"""
# class slope_field(object):
# """docstring for slope_field"""
# def __init__(self, arg):
# super(slope_field, self).__init__()
# self.arg = arg
def slope(x_0, dt, iteration, a):
x = np.array([x_0])
t = np.array([0])
for i in range(iteration):
x_i = x[i]
t_i = t[i]
x_i = x_i + x_dot(x_i) * a
t_i = t_i + dt
x = np.append(x, np.array([x_i]))
t = np.append(t, np.array([t_i]))
return x, t | StarcoderdataPython |
1646651 | <filename>sdk/python/pulumi_azure/cognitive/outputs.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AccountNetworkAcls',
]
@pulumi.output_type
class AccountNetworkAcls(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "defaultAction":
suggest = "default_action"
elif key == "ipRules":
suggest = "ip_rules"
elif key == "virtualNetworkSubnetIds":
suggest = "virtual_network_subnet_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccountNetworkAcls. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccountNetworkAcls.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccountNetworkAcls.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default_action: str,
ip_rules: Optional[Sequence[str]] = None,
virtual_network_subnet_ids: Optional[Sequence[str]] = None):
"""
:param str default_action: The Default Action to use when no rules match from `ip_rules` / `virtual_network_subnet_ids`. Possible values are `Allow` and `Deny`.
:param Sequence[str] ip_rules: One or more IP Addresses, or CIDR Blocks which should be able to access the Cognitive Account.
:param Sequence[str] virtual_network_subnet_ids: One or more Subnet ID's which should be able to access this Cognitive Account.
"""
pulumi.set(__self__, "default_action", default_action)
if ip_rules is not None:
pulumi.set(__self__, "ip_rules", ip_rules)
if virtual_network_subnet_ids is not None:
pulumi.set(__self__, "virtual_network_subnet_ids", virtual_network_subnet_ids)
@property
@pulumi.getter(name="defaultAction")
def default_action(self) -> str:
"""
The Default Action to use when no rules match from `ip_rules` / `virtual_network_subnet_ids`. Possible values are `Allow` and `Deny`.
"""
return pulumi.get(self, "default_action")
@property
@pulumi.getter(name="ipRules")
def ip_rules(self) -> Optional[Sequence[str]]:
"""
One or more IP Addresses, or CIDR Blocks which should be able to access the Cognitive Account.
"""
return pulumi.get(self, "ip_rules")
@property
@pulumi.getter(name="virtualNetworkSubnetIds")
def virtual_network_subnet_ids(self) -> Optional[Sequence[str]]:
"""
One or more Subnet ID's which should be able to access this Cognitive Account.
"""
return pulumi.get(self, "virtual_network_subnet_ids")
| StarcoderdataPython |
4814134 | from django.apps import AppConfig
class ChatUsersConfig(AppConfig):
name = 'chat_users'
| StarcoderdataPython |
1761695 | from __future__ import print_function
import sys
import numpy as np
def main(argv):
np.random.seed(2)
numPoints = 1001
xs = np.linspace(-5, 5, numPoints)
probs = generateData(numPoints)
convProbs = convolveProbs(probs)
print(np.sum(convProbs))
plt.imshow(convProbs, interpolation = 'none')
plt.show()
def generateData(numPoints):
probs = list()
for i in range(4):
p1 = np.random.rand(numPoints)
p1 /= np.sum(p1)
probs.append(p1)
return probs
def convolveProbs(probs):
numPoints = len(probs[0])
convProbs = np.diag(probs[0])
for p1 in probs[1:]:
convProbsCopy = np.copy(convProbs)
convProbs = np.zeros((numPoints, numPoints))
rowCumSums = np.zeros((numPoints, numPoints))
for j in range(numPoints):
rowCumSums[:j, j] = np.cumsum(convProbsCopy[1:j+1, j][::-1])[::-1]
for i in range(numPoints):
convProbs[i, i:] += convProbsCopy[i, i:]*np.cumsum(p1[i:])
convProbs[i, i:] += rowCumSums[i, i:]*p1[i]
convProbs[i, i+1:] += np.cumsum(convProbsCopy[i, i:-1])*p1[i+1:]
return convProbs
if __name__ == "__main__":
main(sys.argv[1:])
| StarcoderdataPython |
1701222 | import os
os.system('docker login -u "dashy2004" -p "12345678qwerty123" repo.treescale.com')
os.system('docker build -t games-day .')
os.system('docker tag games-day repo.treescale.com/dashy2004/games-day:latest')
os.system('docker push repo.treescale.com/dashy2004/games-day:latest')
print('The build passed yay!') | StarcoderdataPython |
1759169 | import os
import sys
from alembic import command
from alembic import config
directory = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, directory)
import settings
import data.db_session as db_session
directory = os.path.abspath(
os.path.join(os.path.dirname(__file__), '../..'))
sys.path.insert(0, directory)
from migrations import utils as migrations_utils
def run():
alembic_cfg = config.Config(settings.ALEMBIC_INI)
if not migrations_utils.is_current_rev_is_latest():
command.upgrade(alembic_cfg, 'head')
def setup_db():
db_session.init_sql(settings.DB_CONNECTION)
if __name__ == '__main__':
run()
| StarcoderdataPython |
1666648 | <filename>setup.py
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='diffenv',
version='0.2.9',
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
url='http://github.com/error-central/diffenv',
description='Compare development environments',
long_description=readme(),
long_description_content_type='text/markdown',
include_package_data=True,
scripts=['bin/diffenv'],
license='MIT',
packages=['diffenv'],
install_requires=[
'colorama',
'requests',
'ruamel.yaml',
'gitpython',
'psutil',
'importlib_metadata',
],
zip_safe=False,
)
| StarcoderdataPython |
4833512 | <gh_stars>0
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage import io, transform
from datawriter import FolderWriter, ICDAR2015Writer
from synthgen import RendererV3
import random
# Define some configuration variables:
NUM_IMG = -1 # no. of images to use for generation (-1 to use all available):
# SECS_PER_IMG = 5 #max time per image in seconds
SECS_PER_IMG = None # max time per image in seconds
# INSTANCE_PER_IMAGE = 900 # no. of times to use the same image
INSTANCE_PER_IMAGE = 5 # no. of times to use the same image
# path to the data-file, containing image, depth and segmentation:
SEED = 2001
def main(bg_dir: Path, depth_dir: Path, seg_dir: Path, font_dir: Path,
text_path: Path, output_dir: Path, total_samples, viz):
writer = ICDAR2015Writer(output_dir, total_samples)
writer.open()
random.seed(SEED)
np.random.seed(SEED)
color_model_path = model_dir / 'colors_new.cp'
font_model_path = model_dir / 'font_px2pt.pkl'
RV3 = RendererV3(color_model_path, font_dir, text_path, font_model_path, max_time=SECS_PER_IMG)
for i, image_path in enumerate(bg_dir.iterdir()):
image_name = image_path.stem
print('Processing', image_path)
depth_path = depth_dir / (image_name + '.npz')
if not depth_path.exists():
print(depth_path, 'does not exist. Skip')
continue
seg_path = seg_dir / (image_name + '.npz')
if not seg_path.exists():
print(seg_path, 'does not exist. Skip')
continue
img = io.imread(str(image_path))
with np.load(depth_path) as data:
depth = data['depth']
depth = (depth - depth.min()) / (depth.max() - depth.min())
depth = 1 - depth
depth = depth * 255
with np.load(seg_path) as data:
seg = data['seg']
area = data['area']
label = data['label']
# try:
res = RV3.render_text(img, depth, seg, area, label,
ninstance=INSTANCE_PER_IMAGE, viz=viz)
# except Exception as e:
# print(f'[ERROR] {image_path}: {e}')
# print(res)
if len(res) > 0:
writer.write(res)
# visualize the output:
if viz:
plt.show(block=True)
if 'q' == input('Continue? (q to quit)'):
break
writer.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Generate Synthetic Scene-Text Images')
parser.add_argument('data_dir', type=Path)
parser.add_argument('--bg_dir', type=Path, default=None)
parser.add_argument('--depth_dir', type=Path, default=None)
parser.add_argument('--seg_dir', type=Path, default=None)
parser.add_argument('--font_dir', type=Path, default=None)
parser.add_argument('--text_path', type=Path, default=None)
parser.add_argument('--model_dir', type=Path, default=None)
parser.add_argument('--viz', action='store_true', dest='viz',
default=False, help='flag for turning on visualizations')
parser.add_argument('--output_dir', default='outputs', type=Path,
help='path to store generated results')
parser.add_argument('--total_samples', default=10000,
help='Total number of samples to generate')
args = parser.parse_args()
bg_dir = args.bg_dir or Path(args.data_dir) / 'bg'
depth_dir = args.depth_dir or Path(args.data_dir) / 'depths'
seg_dir = args.seg_dir or Path(args.data_dir) / 'segs'
font_dir = args.font_dir or Path(args.data_dir) / 'fonts'
text_path = args.text_path or Path(args.data_dir) / 'text.txt'
model_dir = args.model_dir or Path(args.data_dir) / 'models'
output_dir = Path(args.output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
main(bg_dir, depth_dir, seg_dir, font_dir, text_path, output_dir, args.total_samples, args.viz)
cv2.destroyAllWindows()
| StarcoderdataPython |
47229 | import os
from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class EnvironmentVariablesEndpoint(Resource):
def get(self):
return [(key, os.environ[key]) for key in os.environ.keys()]
api.add_resource(EnvironmentVariablesEndpoint, '/')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8000)
| StarcoderdataPython |
37424 | import math
import pathlib
import sys
import torch
import torch.nn as nn
PROJECT_DIR = pathlib.Path(__file__).absolute().parent.parent.parent # main directory, the parent of src
if str(PROJECT_DIR) not in sys.path:
sys.path.append(str(PROJECT_DIR))
from src.model.ConvLayer import ConvLayer
from src.model.PrimaryCaps import PrimaryCaps
from src.model.DigitCaps import DigitCaps
from src.model.Decoder import Decoder
INPUT_WIDTH = 28
NUM_CONV_IN_CHANNELS = 1
CONV_KERNEL = 9
CONV_STRIDE = 1
NUM_CONV_OUT_CHANNELS = 256
NUM_PRIMARY_CHANNELS = 32
PRIMARY_CAPS_DIM = 8
PRIMARY_KERNEL = 9
PRIMARY_STRIDE = 2
DIGIT_CAPS_DIM = 16
NUM_CLASSES = 10
REGULARIZATION_SCALE = 0.0005
ITER = 3
DEC1_DIM = 512
DEC2_DIM = 1024
CUDA_ENABLED = True
SMALL_DECODER = False
DEVICE = 'cuda:0'
CONV_SHARED_WEIGHTS = 0 # disabled
PRIMARY_SHARED_WEIGHTS = 0 # disabled
DIGIT_SHARED_WEIGHTS = 0 # disabled
CONV_SHARED_BIAS = CONV_SHARED_WEIGHTS # to have coherency as default
SQUASH_APPROX = False
class Net(nn.Module):
def __init__(self,
input_wh=INPUT_WIDTH,
num_conv_in_channels=NUM_CONV_IN_CHANNELS,
conv_kernel=CONV_KERNEL,
conv_stride=CONV_STRIDE,
num_conv_out_channels=NUM_CONV_OUT_CHANNELS,
num_primary_channels=NUM_PRIMARY_CHANNELS,
primary_caps_dim=PRIMARY_CAPS_DIM,
primary_kernel=PRIMARY_KERNEL,
primary_stride=PRIMARY_STRIDE,
digit_caps_dim=DIGIT_CAPS_DIM,
num_classes=NUM_CLASSES,
regularization_scale=REGULARIZATION_SCALE,
iter=ITER,
dec1_dim=DEC1_DIM,
dec2_dim=DEC2_DIM,
cuda_enabled=CUDA_ENABLED,
small_decoder=SMALL_DECODER,
device=DEVICE,
conv_shared_weights=CONV_SHARED_WEIGHTS,
primary_shared_weights=PRIMARY_SHARED_WEIGHTS,
digit_shared_weights=DIGIT_SHARED_WEIGHTS,
conv_shared_bias=CONV_SHARED_BIAS,
squash_approx=SQUASH_APPROX):
super(Net, self).__init__()
self.cuda_enabled = cuda_enabled
if cuda_enabled:
self.device = torch.device(device)
else:
self.device = torch.device('cpu')
self.regularization_scale = regularization_scale
conv_dimension = math.floor(
(input_wh-conv_kernel+conv_stride)/conv_stride)
primary_dimension = math.floor(
(conv_dimension-primary_kernel+primary_stride)/primary_stride)
self.conv = ConvLayer(in_channels=num_conv_in_channels,
out_channels=num_conv_out_channels,
kernel_size=conv_kernel,
stride=conv_stride,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=conv_shared_weights,
shared_bias=conv_shared_bias)
self.primary = PrimaryCaps(in_channels=num_conv_out_channels,
out_channels=num_primary_channels,
out_caps_dim=primary_caps_dim,
kernel_size=primary_kernel,
stride=primary_stride,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=primary_shared_weights,
squash_approx=squash_approx)
self.digit = DigitCaps(in_dim=num_primary_channels*primary_dimension*primary_dimension,
out_dim=num_classes,
in_caps_dim=primary_caps_dim,
out_caps_dim=digit_caps_dim,
iter=iter,
cuda_enabled=cuda_enabled,
device=device,
shared_weights=digit_shared_weights,
squash_approx=squash_approx)
decoder_in_dim = digit_caps_dim if small_decoder else num_classes * digit_caps_dim
self.decoder = Decoder(in_dim=decoder_in_dim,
l1_dim=dec1_dim,
l2_dim=dec2_dim,
out_dim=input_wh*input_wh,
device=device,
small_decoder=small_decoder)
def forward(self, x, labels, is_training=True):
out_conv = self.conv(x)
out_primary = self.primary(out_conv)
out_digit = self.digit(out_primary)
reconstruction = self.decoder(out_digit, labels, is_training)
return out_digit, reconstruction
| StarcoderdataPython |
3333357 | <filename>instascrape/structures.py
import os
import sys
import json
import logging
import traceback
from typing import *
from io import BytesIO
from collections import namedtuple, OrderedDict
import requests
from instascrape.constants import *
from instascrape.exceptions import *
from instascrape.group import *
from instascrape.utils import get_username_from_userid, set_mtime, get_biggest_media, verify_file, to_datetime
__all__ = ("Post", "IGTV", "Profile", "Hashtag", "Explore")
logger = logging.getLogger("instascrape")
CommentItem = namedtuple("CommentItem", "author text created_time")
class DataGetterMixin:
@property
def raw_data(self) -> dict:
if self._full_data is None:
self._obtain_full_data()
return self._full_data
def _find_or_get(self, *keys: str, data: dict = None, i: int = None):
i = 0 if i is None else i
key = keys[i]
if data is not None:
if key in data:
return data[key]
else:
# get full data & find in it
self._obtain_full_data()
d = self._full_data[keys[0]]
for k in keys[1:]:
d = d[k] # raises KeyError
return d
else:
# [1] find in initial data
if key in self._init_data:
d = self._init_data[key]
# [2] find in full data (if not None)
elif self._full_data is not None and key in self._full_data:
d = self._full_data[key]
else:
# get full data & find in it
self._obtain_full_data()
d = self._full_data[key] # raises KeyError
i += 1
return self._find_or_get(*keys, data=d, i=i) if len(keys) > 1 else d
class AsDictMixin:
info_vars = ()
def as_dict(self, *, extra: bool = False) -> OrderedDict:
"""Returns all 'info_vars' as an 'OrderedDict'.
Arguments:
extra: Add extra data to the dictionary if True.
"""
assert len(self.info_vars) > 0, "'AsDictMixin' should not be used in this class if 'info_vars' is intended to be empty"
dictionary = OrderedDict({"_struct": self.__class__.__name__} if extra else {})
for attr in self.info_vars:
dictionary[attr] = getattr(self, attr)
return dictionary
class MediaItem(AsDictMixin):
"""Represents a media item (image or video)."""
info_vars = ("typename", "src", "width", "height", "is_video")
@classmethod
def compose_items(cls, data: dict) -> List["MediaItem"]:
"""Composes 'MediaItem' objects by extracting from 'data'."""
def make(node: dict) -> "MediaItem":
typename = node["__typename"]
if typename == "GraphImage":
item = get_biggest_media(node["display_resources"])
elif typename == "GraphVideo":
item = {"src": node["video_url"]}
return cls(typename, item.get("src"), item.get("config_width"), item.get("config_height"))
typename = data["__typename"]
if typename in ("GraphImage", "GraphVideo"):
items = [make(data)]
elif typename == "GraphSidecar":
items = []
data = data["edge_sidecar_to_children"]["edges"]
for node in data:
items.append(make(node["node"]))
else:
raise AssertionError("unrecognized typename: '{}'".format(typename))
return items
def __init__(self, typename: str, src: str, width: int, height: int):
self.typename = typename
self.src = src
self.width = width
self.height = height
def __repr__(self) -> str:
return "MediaItem(typename='{}', src='{}', width={}, height={})".format(self.typename, self.src, self.width, self.height)
def __eq__(self, other) -> bool:
return isinstance(other, MediaItem) and self.src == other.src
def __hash__(self) -> int:
return hash(self.src)
@property
def is_video(self) -> bool:
"""Returns True if this media is a video."""
return self.typename == "GraphStoryVideo"
def download(self, dest: str, filename: str, *, write: bool = True, verify: bool = True) -> Optional[str]:
"""Download this media item to a file.
Arguments:
dest: Path to the destination directory.
filename: Name of the file without extension.
write: Write file to disk if True, write to memory otherwise (for testing and debugging).
verify: Verify file integrity if True, check the size of file in bytes otherwise.
Returns:
The path to the downloaded file if download suceeded, False otherwise
"""
try:
f = None
logger.debug("Downloading file {0} -> {1}".format(self.src, dest))
r = requests.get(self.src, stream=True, timeout=30)
# get info of the file
mime = r.headers["Content-Type"]
bytesize = int(r.headers["Content-Length"])
size = int(bytesize / 1024)
if mime == "video/mp4":
ext = ".mp4"
elif mime == "image/jpeg":
ext = ".jpg"
else:
raise DownloadError("Unsupported MIME type: {0}".format(mime), self.src)
finish_filename = filename + ext
finish_path = os.path.join(dest, finish_filename)
part_filename = filename + ext + ".part"
part_path = os.path.join(dest, part_filename)
# skip if the file is existing and intact
if os.path.isfile(finish_path):
# verify file integrity using md5
if verify and verify_file(r.content, finish_path):
logger.debug("~> [{0}] {1} [skip] (already downloaded)".format(mime, finish_filename))
return None
# verify file by checking the size in byte
if os.stat(finish_path).st_size == bytesize:
logger.debug("~> [{0}] {1} [skip] (already downloaded)".format(mime, finish_filename))
return None
# write to file
f = open(part_path, "wb+") if write else BytesIO()
for chunk in r.iter_content(1024):
if chunk:
f.write(chunk)
logger.debug("=> [{0}] {1} [{2}x{3}] ({4} kB)".format(mime, finish_filename, self.width or "?", self.height or "?", size))
except Exception as e:
raise DownloadError(str(e), self.src) from e
else:
# rename .part file to its real extension
if f:
f.close()
os.rename(part_path, finish_path)
return finish_path
finally:
if f and not f.closed:
f.close()
class ReelItem(MediaItem):
"""Represents a media item (image or video) of a reel."""
info_vars = ("typename", "src", "width", "height", "is_video", "id", "owner_username", "owner_id", "owner_profile_picture_url", "created_time", "expire_time", "cta_url")
@classmethod
def compose_items(cls, data: dict) -> List["ReelItem"]:
"""Composes 'ReelItem' objects by extracting from 'data'."""
def make(node: dict) -> "ReelItem":
typename = node["__typename"]
if typename == "GraphStoryImage":
item = get_biggest_media(node["display_resources"])
elif typename == "GraphStoryVideo":
item = get_biggest_media(node["video_resources"])
return cls(typename, item.get("src"), item.get("config_width"), item.get("config_height"), node)
items = []
data = data["items"]
for node in data:
items.append(make(node))
return items
def __init__(self, typename: str, src: str, width: int, height: int, data: dict):
super().__init__(typename, src, width, height)
self.data = data
def __repr__(self) -> str:
return "ReelItem(typename='{}', src='{}', width={}, height={})".format(self.typename, self.src, self.width, self.height)
def __eq__(self, other) -> bool:
return isinstance(other, ReelItem) and self.src == other.src and self.id == other.id
def __hash__(self) -> int:
return hash(self.id)
@property
def is_video(self) -> bool:
"""Returns True if this media item is a video."""
return self.typename == "GraphStoryVideo"
@property
def id(self) -> str:
"""Returns the ID of this reel item."""
return self.data["id"]
@property
def owner_username(self) -> str:
"""Returns the owner's username of this reel item."""
return self.data["owner"]["username"]
@property
def owner_id(self) -> str:
"""Returns the owner's ID of this reel item."""
return self.data["owner"]["id"]
@property
def owner_profile_picture_url(self) -> str:
"""Returns the URL of the owner's profile picture of this reel item."""
return self.data["owner"]["profile_pic_url"]
def owner_profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' that represents the owner's profile picture of this reel item."""
return MediaItem("GraphImage", self.owner_profile_picture_url, 150, 150)
@property
def created_time(self) -> int:
"""Returns the created time (timestamp) of this reel item."""
return int(self.data["taken_at_timestamp"])
@property
def expire_time(self) -> int:
"""Returns the expire time in timestamp of this reel item."""
return int(self.data["expiring_at_timestamp"])
@property
def cta_url(self) -> Optional[str]:
"""Returns the 'swipe up for more' URL of this reel item."""
return self.data["story_cta_url"]
class Post(AsDictMixin, DataGetterMixin):
"""Represents a Post entity."""
info_vars = ("shortcode", "url", "typename", "id", "owner_username", "owner_id", "owner_profile_picture_url",
"created_time", "caption", "media_count", "likes_count", "comments_count")
@classmethod
def from_shortcode(cls, insta, shortcode: str):
"""Returns a 'Post' instance by shortcode."""
post = cls(insta, {"shortcode": shortcode})
post._obtain_full_data()
return post
def __init__(self, insta, data: dict):
self._insta = insta
self._init_data = data
self._full_data = None
self.shortcode = data["shortcode"]
def _obtain_full_data(self):
if self._full_data is None:
logger.debug("Fetching initial json data of Post(shortcode='{}')...".format(self.shortcode))
self._full_data = self._insta._fetch_json_data(POST_URL.format(shortcode=self.shortcode))["shortcode_media"]
def __repr__(self) -> str:
return "Post(shortcode='{0}', typename='{1}')".format(self.shortcode, self.typename)
def __eq__(self, other) -> bool:
return isinstance(other, Post) and self.shortcode == other.shortcode and self.id == other.id
def __hash__(self) -> int:
return hash(self.shortcode)
def __len__(self) -> int:
return self.media_count
def __getitem__(self, index: int) -> MediaItem:
return self.media_items()[index]
def __iter__(self) -> MediaItem:
for media in self.media_items():
yield media
@property
def url(self) -> str:
"""Returns the URL of this post."""
return "https://instagram.com/p/" + self.shortcode
@property
def typename(self) -> str:
"""Returns the typename of this post (one of 'GraphImage', 'GraphVideo', 'GraphSidecar')."""
return self._find_or_get("__typename")
@property
def id(self) -> str:
"""Returns the ID of this post."""
return self._find_or_get("id")
@property
def owner_username(self) -> str:
"""Returns the owner's username this post."""
return self._find_or_get("owner")["username"]
@property
def owner_id(self) -> str:
"""Returns the owner's ID of this post."""
return self._find_or_get("owner")["id"]
@property
def owner_profile_picture_url(self) -> str:
"""Returns the URL of the owner's profile picture of this post."""
return self._find_or_get("owner", "profile_pic_url")
def owner_profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' object of the owner's profile picture of this post."""
return MediaItem("GraphImage", self.owner_profile_picture_url, 150, 150)
@property
def created_time(self) -> int:
"""Returns the created_time (timestamp) of this post."""
return int(self._find_or_get("taken_at_timestamp"))
@property
def caption(self) -> str:
"""Returns the caption of this post."""
edges = self._find_or_get("edge_media_to_caption", "edges")
if not edges:
return ""
return edges[0]["node"]["text"]
@property
def likes_count(self) -> int:
"""Returns the amount of likes of this post."""
return self._find_or_get("edge_media_preview_like")["count"]
@property
def comments_count(self) -> int:
"""Returns the amount of comments of this post."""
try:
return self._find_or_get("edge_media_preview_comment")["count"]
except KeyError:
# fallback
return self._find_or_get("edge_media_to_parent_comment")["count"]
@property
def media_count(self) -> int:
"""Returns the amount of media items in this post."""
return len(self.media_items())
def media_items(self) -> List[MediaItem]:
"""Returns a list of 'MediaItem' of this post."""
self._obtain_full_data()
return MediaItem.compose_items(self._full_data)
def likes(self) -> Group:
"""Retrieves likes of this post in the form of usernames.
Returns:
A 'Group' object that yields 'Profile' objects.
"""
logger.info("Retrieving likes of :{0}".format(self.shortcode))
variables = {"shortcode": self.shortcode}
nodes = self._insta._graphql_query_edges(QUERYHASH_LIKES, variables, "shortcode_media", "edge_liked_by")
return Group(next(nodes), (Profile(self._insta, node) for node in nodes))
def comments(self):
"""Retrieves likes of this post in the form of usernames.
Returns:
- An integer that idicates the estimated amount of items.
- A generator that yields 'CommentItem' -> namedtuple(author, text, created_time).
"""
logger.info("Retrieving comments of :{0}".format(self.shortcode))
variables = {"shortcode": self.shortcode}
nodes = self._insta._graphql_query_edges(QUERYHASH_COMMENTS, variables, "shortcode_media", "edge_media_to_comment")
return next(nodes), (CommentItem(node["owner"]["username"], node["text"], node["created_at"]) for node in nodes)
def download(self, dest: str = None, *, write: bool = True, verify: bool = True,
on_item_start: Callable = None, on_item_finish: Callable = None, on_item_error: Callable = None):
"""Download all media items of this post.
Arguments:
dest: Path to the destination directory.
write: Write file to disk if True, write to memory otherwise.
verify: Verify file integrity if True, check the size of file in bytes otherwise. See 'MediaItem.download()'.
on_item_start: A callable (Post, int, MediaItem). Called on start of each item.
on_item_finish: A callable (Post, int, MediaItem, str). Called on finish of each item.
on_item_error: A callable (Post, int, MediaItem, Exception). Called on error of each item.
"""
dest = os.path.abspath(dest or "./")
media_items = self.media_items()
multi = self.media_count > 1
subdest = os.path.join(dest, self.shortcode) if multi else None
if subdest and not os.path.isdir(subdest):
os.mkdir(subdest)
logger.debug("Downloading {0} ({1} media) [{2}]...".format(repr(self), len(media_items), self.typename))
logger.debug("Dest: " + dest)
for i, item in enumerate(media_items):
if on_item_start is not None:
on_item_start(self, i, item)
try:
filename = str(i) if multi else self.shortcode
file_path = item.download(subdest or dest, filename, write=write, verify=verify)
if file_path is not None:
set_mtime(file_path, self.created_time)
if on_item_finish is not None:
on_item_finish(self, i, item, file_path)
except Exception as e:
# NOTE: if the Post has multiple media items to download, the occurrence of exception will NOT interrupt
# the whole download of the post, unless user reraises the exception in 'on_item_error()'.
exc_type, exc_value, tb = sys.exc_info()
logger.error("{}: {}".format(exc_type.__name__, exc_value))
logger.debug("".join(traceback.format_tb(tb)))
if on_item_error is not None:
on_item_error(self, i, item, e)
continue
class IGTV(Post):
"""Represents an IGTV Post entity."""
info_vars = ("shortcode", "url", "typename", "id", "owner_username", "owner_id", "owner_profile_picture_url",
"created_time", "caption", "media_count", "likes_count", "comments_count", "title", "duration")
def __init__(self, insta, data: dict):
# In fact, the URL of a IGTV Post is 'instagram.com/tv/{shortcode}'
# but I found out that using 'instagram.com/p/{shortcode}' is just the same, since it is also considered as a Post
super().__init__(insta, data)
def __repr__(self) -> str:
return "IGTV(title='{0}', shortcode='{1}')".format(self.title, self.shortcode)
@property
def title(self) -> str:
"""Returns the title of this IGTV post."""
return self._find_or_get("title")
@property
def duration(self) -> float:
"""Returns the video duration of this IGTV post."""
return float(self._find_or_get("video_duration"))
@property
def view_count(self) -> int:
"""Returns the video view count of this IGTV post."""
return self._find_or_get("video_view_count")
class Story(AsDictMixin):
"""Represents a Story entity."""
info_vars = ("typename", "id", "reel_count")
def __init__(self, data: dict):
self.data = data
def __repr__(self):
return NotImplemented
def __eq__(self, other) -> bool:
return isinstance(other, Story) and self.id == other.id
def __hash__(self) -> int:
return hash(self.id)
def __len__(self) -> int:
return self.reel_count
def __getitem__(self, index: int) -> ReelItem:
return self.reel_items()[index]
def __iter__(self) -> ReelItem:
for reel in self.reel_items():
yield reel
@property
def typename(self) -> str:
"""Returns the typename of this story."""
return self.data["__typename"]
@property
def id(self) -> str:
"""Returns the ID of this story."""
return self.data["id"]
@property
def reel_count(self) -> int:
"""Returns the amount of reel items in this story."""
return len(self.reel_items())
def reel_items(self) -> List[ReelItem]:
"""Returns a list of reel items of this story."""
return ReelItem.compose_items(self.data)
def download(self, dest: str = None, *, write: bool = True, verify: bool = True,
on_item_start: Callable = None, on_item_finish: Callable = None, on_item_error: Callable = None):
"""Download all reel items of this story.
Arguments:
dest: Path to the destination directory.
write: Write file to disk if True, write to memory otherwise.
verify: Verify file integrity if True, check the size of file in bytes otherwise. See 'MediaItem.download()'.
on_item_start: A callable (Story, int, ReelItem). Called on start of each item.
on_item_finish: A callable (Story, int, ReelItem, str). Called on finish of each item.
on_item_error: A callable (Story, int, ReelItem, Exception). Called on error of each item.
"""
dest = os.path.abspath(dest or "./")
reel_items = self.reel_items()
logger.debug("Downloading {0} ({1} media) [{2}]...".format(repr(self), len(reel_items), self.typename))
logger.debug("Dest: " + dest)
for i, item in enumerate(reel_items):
if on_item_start is not None:
on_item_start(self, i, item)
try:
filename = to_datetime(item.created_time)
file_path = item.download(dest, filename, write=write, verify=verify)
if file_path is not None:
set_mtime(file_path, item.created_time)
if on_item_finish is not None:
on_item_finish(self, i, item, file_path)
except Exception as e:
# NOTE: if the Story has multiple reel items to download, the occurrence of exception will NOT interrupt
# the whole download of the story, unless user reraises the exception in 'on_item_error()'.
exc_type, exc_value, tb = sys.exc_info()
logger.error("{}: {}".format(exc_type.__name__, exc_value))
logger.debug("".join(traceback.format_tb(tb)))
if on_item_error is not None:
on_item_error(self, i, item, e)
continue
class UserStory(Story):
"""Represents a Story entity that belongs to a Profile."""
info_vars = ("typename", "id", "latest_reel_media", "reel_count", "owner_username", "owner_id", "owner_profile_picture_url", "seen_time")
def __init__(self, data: dict):
super().__init__(data)
def __repr__(self) -> str:
return "UserStory(owner_username='{0}', typename='{1}')".format(self.owner_username, self.typename)
@property
def latest_reel_media(self) -> int:
"""Returns the created time of the latest reel media (timestamp) of this story."""
return int(self.data["latest_reel_media"])
@property
def owner_username(self) -> str:
"""Returns the owner's username of this story."""
return self.data["owner"]["username"]
@property
def owner_id(self) -> str:
"""Returns the owner's ID of this story."""
return self.data["owner"]["id"]
@property
def owner_profile_picture_url(self) -> str:
"""Returns the URL of the owner's profile picture of this story."""
return self.data["owner"]["profile_pic_url"]
def owner_profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' object of the owner's profile picture of this story."""
return MediaItem("GraphImage", self.data["owner"]["profile_pic_url"], 150, 150)
@property
def seen_time(self) -> Optional[int]:
"""Returns the seen time (timestamp) of this story if it has been seen, None otherwise."""
if self.data["seen"]:
return int(self.data["seen"])
class HashtagStory(Story):
"""Represents a Story entity that belongs to a Hashtag."""
info_vars = ("typename", "id", "latest_reel_media", "reel_count", "tagname")
def __init__(self, data: dict):
super().__init__(data)
def __repr__(self) -> str:
return "HashtagStory(tagname='{0}', typename='{1}')".format(self.tagname, self.typename)
@property
def latest_reel_media(self) -> int:
"""Returns the created time of the latest reel media (timestamp) of this story."""
return int(self.data["latest_reel_media"])
@property
def tagname(self) -> str:
"""Returns the hashtag's tag name of this story."""
return self.data["owner"]["name"]
class Highlight(Story):
"""Represents a Highlight entity."""
info_vars = ("typename", "id", "title", "cover_media_thumbnail", "owner_username", "owner_id", "owner_profile_picture_url", "reel_count")
def __init__(self, data: dict):
super().__init__(data)
def __repr__(self) -> str:
return "Highlight(title='{}')".format(self.title)
@property
def title(self) -> str:
"""Returns the title of this highlight."""
return self.data["title"]
@property
def cover_media_thumbnail(self) -> str:
"""Returns the URL of the cover thumbnail of this highlight."""
return self.data["cover_media"]["thumbnail_src"]
@property
def owner_username(self) -> str:
"""Returns the owner's username of this highlight."""
return self.data["owner"]["username"]
@property
def owner_id(self) -> str:
"""Returns the owner's ID of this highlight."""
return self.data["owner"]["id"]
@property
def owner_profile_picture_url(self) -> str:
"""Returns the URL of the owner's profile picture of this highlight."""
return self.data["owner"]["profile_pic_url"]
def owner_profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' object of the owner's profile picture of this highlight."""
return MediaItem("GraphImage", self.data["owner"]["profile_pic_url"], 150, 150)
class Profile(AsDictMixin, DataGetterMixin):
"""Represents a user Profile entity."""
info_vars = ("username", "url", "id", "fullname", "biography", "website", "followers_count", "followings_count",
"mutual_followers_count", "is_verified", "is_private", "profile_picture_url")
@classmethod
def from_id(cls, insta, id: str):
"""Returns a Post instance from user ID.
* This takes one more step to obtain the username of the user.
"""
username = get_username_from_userid(id)
return cls.from_username(insta, username)
@classmethod
def from_username(cls, insta, username: str):
"""Returns a Post instance from username."""
profile = cls(insta, {"username": username})
profile._obtain_full_data()
return profile
def __init__(self, insta, data: dict):
self._insta = insta
self._init_data = data
self._full_data = None
self.username = data["username"]
def _obtain_full_data(self):
if self._full_data is None:
logger.debug("Obtaining full data of Profile(username='{}')".format(self.username))
self._full_data = self._insta._fetch_json_data(PROFILE_URL.format(username=self.username))["user"]
def __repr__(self):
return "Profile(username='{0}', id='{1}')".format(self.username, self.id)
def __eq__(self, other):
return isinstance(other, Profile) and self.username == other.username and self.id == other.id
def __hash__(self) -> int:
return hash(self.id)
@property
def url(self) -> str:
"""Returns the URL of this profile."""
return "https://instagram.com/" + self.username
@property
def id(self) -> str:
"""Returns the ID (user ID) of this profile."""
return self._find_or_get("id")
@property
def fullname(self) -> str:
"""Returns the fullname of this profile."""
return self._find_or_get("full_name")
@property
def biography(self) -> str:
"""Returns the biography of this profile."""
return self._find_or_get("biography")
@property
def website(self) -> Optional[str]:
"""Returns the website of this profile if applicable, None otherwise."""
return self._find_or_get("external_url")
@property
def followers_count(self) -> int:
"""Returns the amount of followers this profile has."""
return self._find_or_get("edge_followed_by")["count"]
@property
def followings_count(self) -> int:
"""Returns the amount of users this profile is following."""
return self._find_or_get("edge_follow")["count"]
@property
def mutual_followers_count(self) -> int:
"""Returns the amount of mutual followers of this profile."""
return self._find_or_get("edge_mutual_followed_by")["count"]
@property
def is_verified(self) -> bool:
"""Returns True if this profile is verified, False otherwise"""
return self._find_or_get("is_verified")
@property
def is_private(self) -> bool:
"""Returns True if this profile is private, False otherwise"""
return self._find_or_get("is_private")
@property
def profile_picture_url(self) -> str:
"""Retunrs the URL of the profile picture of this profile."""
return self._find_or_get("profile_pic_url_hd")
def profile_picture(self) -> MediaItem:
"""Retunrs a 'MediaItem' of the profile picture of this profile."""
return MediaItem("GraphImage", self.profile_picture_url, 320, 320)
def timeline_posts(self) -> PostGroup:
"""Retrieves timeline posts of this profile.
Returns:
A 'PostGroup' object.
"""
self._obtain_full_data()
logger.info("Retrieving timeline posts of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_TIMELINE, variables, "user", "edge_owner_to_timeline_media", self._full_data)
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def saved_posts(self) -> PostGroup:
"""Retrieves saved posts of this profile.
* Requires authentication.
Returns:
A 'PostGroup' object.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
self._obtain_full_data()
logger.info("Retrieving saved posts of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_SAVED, variables, "user", "edge_saved_media", self._full_data)
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def tagged_posts(self) -> PostGroup:
"""Retrieves tagged posts of this profile.
Returns:
A 'PostGroup' object.
"""
logger.info("Retrieving tagged posts of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_TAGGED, variables, "user", "edge_user_to_photos_of_you")
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def igtv_posts(self) -> PostGroup:
"""Retrieves IGTV posts of this profile.
Returns:
A 'PostGroup' object.
"""
self._obtain_full_data()
logger.info("Retrieving IGTV video posts of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_IGTV, variables, "user", "edge_felix_video_timeline", self._full_data)
return Group.of_posts(next(nodes), (IGTV(self._insta, node) for node in nodes))
def followers(self) -> Group:
"""Retrieves followers of this profile.
* Requires authentication.
Returns:
A 'Group' object that yields 'Profile' instances.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving followers of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_FOLLOWERS, variables, "user", "edge_followed_by")
return Group(next(nodes), (Profile(self._insta, node) for node in nodes))
def followings(self) -> Group:
"""Retrieves profiles that this profile is following.
* Requires authentication.
Returns:
A 'Group' object that yields 'Profile' instances.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving followings of @{0}".format(self.username))
variables = {"id": self.id}
nodes = self._insta._graphql_query_edges(QUERYHASH_FOLLOWINGS, variables, "user", "edge_follow")
return Group(next(nodes), (Profile(self._insta, node) for node in nodes))
def highlights(self) -> List[Highlight]:
"""Retrieves highlights of this profile.
* Requires authentication.
Returns:
A list of 'Highlight' objects.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving story highlights of @{0}".format(self.username))
# [1] retrieve all available highlights of this user
variables = {"user_id": self.id, "include_chaining": False, "include_reel": False,
"include_suggested_users": False, "include_logged_out_extras": False, "include_highlight_reels": True}
data = self._insta._graphql_query(QUERYHASH_HIGHLIGHTS, variables)["user"]["edge_highlight_reels"]
nodes = [edge["node"] for edge in data["edges"]]
if not nodes:
logger.warning("No visible highlight is found for this profile.")
return []
# [2] do GraphQL query to get the reel items data of all highlights at once
logger.debug("Fetching json data of highlights of @{} ...".format(self.username))
variables = {"highlight_reel_ids": [str(node["id"]) for node in nodes], "precomposed_overlay": False, "show_story_viewer_list": False}
url = QUERY_URL.format(QUERYHASH_REELITEMS, json.dumps(variables))
data = self._insta._fetch_json_data(url)["reels_media"]
hs = []
for d in data:
for node in nodes:
if node["id"] == d["id"]:
d.update(node)
break
else:
continue
# produce 'Highlight' object
hs.append(Highlight(d))
return hs
def story(self) -> Optional[UserStory]:
"""Retrieves the currently visible story of this profile.
* Requires authentication.
Returns:
A 'UserStory' object if applicable, None otherwise.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving story of @{0}".format(self.username))
variables = {"reel_ids": [self.id], "precomposed_overlay": False, "show_story_viewer_list": False}
data = self._insta._graphql_query(QUERYHASH_REELITEMS, variables)["reels_media"]
if not data:
logger.warning("No visible story is available now for this profile.")
return
return UserStory(data[0])
class Hashtag(DataGetterMixin):
"""Represents a Hashtag entity."""
@classmethod
def from_tagname(cls, insta, tagname: str):
"""Returns a Hashtag instance from tag name."""
hashtag = cls(insta, {"name": tagname})
hashtag._obtain_full_data()
return hashtag
def __init__(self, insta, data: dict):
self._insta = insta
self._init_data = data
self._full_data = None
self.tagname = data["name"]
def _obtain_full_data(self):
if self._full_data is None:
logger.debug("Obtaining full data of Hashtag(tagname='{}')".format(self.tagname))
self._full_data = self._insta._fetch_json_data(HASHTAG_URL.format(tagname=self.tagname))["hashtag"]
def __repr__(self):
return "Hashtag(tagname='{0}')".format(self.tagname)
def __eq__(self, other):
return isinstance(other, Hashtag) and self.tagname == other.tagname and self.id == other.id
def __hash__(self) -> int:
return hash(self.tagname)
@property
def id(self) -> str:
"""Returns the ID of this hashtag."""
return self._find_or_get("id")
@property
def profile_picture_url(self) -> str:
"""Returns the URl of the profile picture of this hashtag."""
return self._find_or_get("profile_pic_url")
def profile_picture(self) -> MediaItem:
"""Returns a 'MediaItem' of the profile picture of this hashtag."""
return MediaItem("GraphImage", self.profile_picture_url, 320, 320)
def top_posts(self) -> PostGroup:
"""Retrieves top posts if this hashtag.
* Only 9 posts at most.
Returns:
A 'PostGroup' object.
"""
self._obtain_full_data()
logger.info("Retrieving top posts of #{0}".format(self.tagname))
nodes = self._insta._graphql_query_edges("", {}, "hashtag", "edge_hashtag_to_top_posts", self._full_data)
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def recent_posts(self) -> PostGroup:
"""Retrieves most recent posts if this hashtag.
Returns:
A 'PostGroup' object.
"""
logger.info("Retrieving recent posts of #{0}".format(self.tagname))
variables = {"tag_name": self.tagname}
nodes = self._insta._graphql_query_edges(QUERYHASH_HASHTAG, variables, "hashtag", "edge_hashtag_to_media")
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
def story(self) -> Optional[HashtagStory]:
"""Retrieves the current visible Story of this hashtag.
* Requires authentication.
Returns:
A 'HashtagStory' object.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving story of #{0}".format(self.tagname))
variables = {"tag_names": [self.tagname], "precomposed_overlay": False, "show_story_viewer_list": False}
data = self._insta._graphql_query(QUERYHASH_REELITEMS, variables)["reels_media"]
if not data:
logger.warning("No visible story is avaliable now for this hashtag.")
return
return HashtagStory(data[0])
class Explore:
"""Represents the Explore entity in the discover section."""
def __init__(self, insta):
self._insta = insta
def __repr__(self):
return "Explore()"
def posts(self) -> PostGroup:
"""Retrieves posts of explore.
* Requires authentication.
Returns:
A 'PostGroup' object.
"""
if not self._insta.authenticated:
raise AuthenticationRequired()
logger.info("Retrieving explore posts...")
nodes = self._insta._graphql_query_edges(QUERYHASH_EXPLORE, {}, "user", "edge_web_discover_media")
return Group.of_posts(next(nodes), (Post(self._insta, node) for node in nodes))
| StarcoderdataPython |
1358 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""List of renames to apply when converting from TF 1.0 to TF 2.0.
THIS FILE IS AUTOGENERATED: To update, please run:
bazel build tensorflow/tools/compatibility/update:generate_v2_renames_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_renames_map
This file should be updated whenever endpoints are deprecated.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
renames = {
'tf.AUTO_REUSE': 'tf.compat.v1.AUTO_REUSE',
'tf.AttrValue': 'tf.compat.v1.AttrValue',
'tf.COMPILER_VERSION': 'tf.version.COMPILER_VERSION',
'tf.CXX11_ABI_FLAG': 'tf.sysconfig.CXX11_ABI_FLAG',
'tf.ConditionalAccumulator': 'tf.compat.v1.ConditionalAccumulator',
'tf.ConditionalAccumulatorBase': 'tf.compat.v1.ConditionalAccumulatorBase',
'tf.ConfigProto': 'tf.compat.v1.ConfigProto',
'tf.DeviceSpec': 'tf.compat.v1.DeviceSpec',
'tf.Dimension': 'tf.compat.v1.Dimension',
'tf.Event': 'tf.compat.v1.Event',
'tf.FIFOQueue': 'tf.queue.FIFOQueue',
'tf.FixedLenFeature': 'tf.io.FixedLenFeature',
'tf.FixedLenSequenceFeature': 'tf.io.FixedLenSequenceFeature',
'tf.FixedLengthRecordReader': 'tf.compat.v1.FixedLengthRecordReader',
'tf.GIT_VERSION': 'tf.version.GIT_VERSION',
'tf.GPUOptions': 'tf.compat.v1.GPUOptions',
'tf.GRAPH_DEF_VERSION': 'tf.version.GRAPH_DEF_VERSION',
'tf.GRAPH_DEF_VERSION_MIN_CONSUMER': 'tf.version.GRAPH_DEF_VERSION_MIN_CONSUMER',
'tf.GRAPH_DEF_VERSION_MIN_PRODUCER': 'tf.version.GRAPH_DEF_VERSION_MIN_PRODUCER',
'tf.GraphDef': 'tf.compat.v1.GraphDef',
'tf.GraphKeys': 'tf.compat.v1.GraphKeys',
'tf.GraphOptions': 'tf.compat.v1.GraphOptions',
'tf.HistogramProto': 'tf.compat.v1.HistogramProto',
'tf.IdentityReader': 'tf.compat.v1.IdentityReader',
'tf.InteractiveSession': 'tf.compat.v1.InteractiveSession',
'tf.LMDBReader': 'tf.compat.v1.LMDBReader',
'tf.LogMessage': 'tf.compat.v1.LogMessage',
'tf.MONOLITHIC_BUILD': 'tf.sysconfig.MONOLITHIC_BUILD',
'tf.MetaGraphDef': 'tf.compat.v1.MetaGraphDef',
'tf.NameAttrList': 'tf.compat.v1.NameAttrList',
'tf.NoGradient': 'tf.no_gradient',
'tf.NodeDef': 'tf.compat.v1.NodeDef',
'tf.NotDifferentiable': 'tf.no_gradient',
'tf.OpError': 'tf.errors.OpError',
'tf.OptimizerOptions': 'tf.compat.v1.OptimizerOptions',
'tf.PaddingFIFOQueue': 'tf.queue.PaddingFIFOQueue',
'tf.Print': 'tf.compat.v1.Print',
'tf.PriorityQueue': 'tf.queue.PriorityQueue',
'tf.QUANTIZED_DTYPES': 'tf.dtypes.QUANTIZED_DTYPES',
'tf.QueueBase': 'tf.queue.QueueBase',
'tf.RandomShuffleQueue': 'tf.queue.RandomShuffleQueue',
'tf.ReaderBase': 'tf.compat.v1.ReaderBase',
'tf.RunMetadata': 'tf.compat.v1.RunMetadata',
'tf.RunOptions': 'tf.compat.v1.RunOptions',
'tf.Session': 'tf.compat.v1.Session',
'tf.SessionLog': 'tf.compat.v1.SessionLog',
'tf.SparseConditionalAccumulator': 'tf.sparse.SparseConditionalAccumulator',
'tf.SparseFeature': 'tf.io.SparseFeature',
'tf.SparseTensorValue': 'tf.compat.v1.SparseTensorValue',
'tf.Summary': 'tf.compat.v1.Summary',
'tf.SummaryMetadata': 'tf.compat.v1.SummaryMetadata',
'tf.TFRecordReader': 'tf.compat.v1.TFRecordReader',
'tf.TensorInfo': 'tf.compat.v1.TensorInfo',
'tf.TextLineReader': 'tf.compat.v1.TextLineReader',
'tf.VERSION': 'tf.version.VERSION',
'tf.VarLenFeature': 'tf.io.VarLenFeature',
'tf.VariableScope': 'tf.compat.v1.VariableScope',
'tf.WholeFileReader': 'tf.compat.v1.WholeFileReader',
'tf.accumulate_n': 'tf.math.accumulate_n',
'tf.add_check_numerics_ops': 'tf.compat.v1.add_check_numerics_ops',
'tf.add_to_collection': 'tf.compat.v1.add_to_collection',
'tf.add_to_collections': 'tf.compat.v1.add_to_collections',
'tf.all_variables': 'tf.compat.v1.all_variables',
'tf.angle': 'tf.math.angle',
'tf.app.run': 'tf.compat.v1.app.run',
'tf.assert_greater_equal': 'tf.compat.v1.assert_greater_equal',
'tf.assert_integer': 'tf.compat.v1.assert_integer',
'tf.assert_less_equal': 'tf.compat.v1.assert_less_equal',
'tf.assert_near': 'tf.compat.v1.assert_near',
'tf.assert_negative': 'tf.compat.v1.assert_negative',
'tf.assert_non_negative': 'tf.compat.v1.assert_non_negative',
'tf.assert_non_positive': 'tf.compat.v1.assert_non_positive',
'tf.assert_none_equal': 'tf.compat.v1.assert_none_equal',
'tf.assert_positive': 'tf.compat.v1.assert_positive',
'tf.assert_proper_iterable': 'tf.debugging.assert_proper_iterable',
'tf.assert_rank_at_least': 'tf.compat.v1.assert_rank_at_least',
'tf.assert_rank_in': 'tf.compat.v1.assert_rank_in',
'tf.assert_same_float_dtype': 'tf.debugging.assert_same_float_dtype',
'tf.assert_scalar': 'tf.compat.v1.assert_scalar',
'tf.assert_type': 'tf.compat.v1.assert_type',
'tf.assert_variables_initialized': 'tf.compat.v1.assert_variables_initialized',
'tf.assign': 'tf.compat.v1.assign',
'tf.assign_add': 'tf.compat.v1.assign_add',
'tf.assign_sub': 'tf.compat.v1.assign_sub',
'tf.batch_scatter_update': 'tf.compat.v1.batch_scatter_update',
'tf.betainc': 'tf.math.betainc',
'tf.ceil': 'tf.math.ceil',
'tf.check_numerics': 'tf.debugging.check_numerics',
'tf.cholesky': 'tf.linalg.cholesky',
'tf.cholesky_solve': 'tf.linalg.cholesky_solve',
'tf.clip_by_average_norm': 'tf.compat.v1.clip_by_average_norm',
'tf.colocate_with': 'tf.compat.v1.colocate_with',
'tf.conj': 'tf.math.conj',
'tf.container': 'tf.compat.v1.container',
'tf.convert_to_tensor_or_indexed_slices': 'tf.compat.v1.convert_to_tensor_or_indexed_slices',
'tf.convert_to_tensor_or_sparse_tensor': 'tf.compat.v1.convert_to_tensor_or_sparse_tensor',
'tf.count_up_to': 'tf.compat.v1.count_up_to',
'tf.create_partitioned_variables': 'tf.compat.v1.create_partitioned_variables',
'tf.cross': 'tf.linalg.cross',
'tf.cumprod': 'tf.math.cumprod',
'tf.data.make_initializable_iterator': 'tf.compat.v1.data.make_initializable_iterator',
'tf.data.make_one_shot_iterator': 'tf.compat.v1.data.make_one_shot_iterator',
'tf.debugging.is_finite': 'tf.math.is_finite',
'tf.debugging.is_inf': 'tf.math.is_inf',
'tf.debugging.is_nan': 'tf.math.is_nan',
'tf.debugging.is_non_decreasing': 'tf.math.is_non_decreasing',
'tf.debugging.is_strictly_increasing': 'tf.math.is_strictly_increasing',
'tf.decode_base64': 'tf.io.decode_base64',
'tf.decode_compressed': 'tf.io.decode_compressed',
'tf.decode_json_example': 'tf.io.decode_json_example',
'tf.decode_raw': 'tf.io.decode_raw',
'tf.delete_session_tensor': 'tf.compat.v1.delete_session_tensor',
'tf.depth_to_space': 'tf.compat.v1.depth_to_space',
'tf.dequantize': 'tf.quantization.dequantize',
'tf.deserialize_many_sparse': 'tf.io.deserialize_many_sparse',
'tf.diag': 'tf.linalg.tensor_diag',
'tf.diag_part': 'tf.linalg.tensor_diag_part',
'tf.digamma': 'tf.math.digamma',
'tf.dimension_at_index': 'tf.compat.dimension_at_index',
'tf.dimension_value': 'tf.compat.dimension_value',
'tf.disable_eager_execution': 'tf.compat.v1.disable_eager_execution',
'tf.disable_resource_variables': 'tf.compat.v1.disable_resource_variables',
'tf.disable_v2_batch_normalization': 'tf.compat.v1.disable_v2_batch_normalization',
'tf.disable_v2_behavior': 'tf.compat.v1.disable_v2_behavior',
'tf.disable_v2_tensorshape': 'tf.compat.v1.disable_v2_tensorshape',
'tf.distributions.Bernoulli': 'tf.compat.v1.distributions.Bernoulli',
'tf.distributions.Beta': 'tf.compat.v1.distributions.Beta',
'tf.distributions.Categorical': 'tf.compat.v1.distributions.Categorical',
'tf.distributions.Dirichlet': 'tf.compat.v1.distributions.Dirichlet',
'tf.distributions.DirichletMultinomial': 'tf.compat.v1.distributions.DirichletMultinomial',
'tf.distributions.Distribution': 'tf.compat.v1.distributions.Distribution',
'tf.distributions.Exponential': 'tf.compat.v1.distributions.Exponential',
'tf.distributions.FULLY_REPARAMETERIZED': 'tf.compat.v1.distributions.FULLY_REPARAMETERIZED',
'tf.distributions.Gamma': 'tf.compat.v1.distributions.Gamma',
'tf.distributions.Laplace': 'tf.compat.v1.distributions.Laplace',
'tf.distributions.Multinomial': 'tf.compat.v1.distributions.Multinomial',
'tf.distributions.NOT_REPARAMETERIZED': 'tf.compat.v1.distributions.NOT_REPARAMETERIZED',
'tf.distributions.Normal': 'tf.compat.v1.distributions.Normal',
'tf.distributions.RegisterKL': 'tf.compat.v1.distributions.RegisterKL',
'tf.distributions.ReparameterizationType': 'tf.compat.v1.distributions.ReparameterizationType',
'tf.distributions.StudentT': 'tf.compat.v1.distributions.StudentT',
'tf.distributions.Uniform': 'tf.compat.v1.distributions.Uniform',
'tf.distributions.kl_divergence': 'tf.compat.v1.distributions.kl_divergence',
'tf.div': 'tf.compat.v1.div',
'tf.dtypes.as_string': 'tf.strings.as_string',
'tf.enable_eager_execution': 'tf.compat.v1.enable_eager_execution',
'tf.enable_resource_variables': 'tf.compat.v1.enable_resource_variables',
'tf.enable_v2_batch_normalization': 'tf.compat.v1.enable_v2_batch_normalization',
'tf.enable_v2_behavior': 'tf.compat.v1.enable_v2_behavior',
'tf.enable_v2_tensorshape': 'tf.compat.v1.enable_v2_tensorshape',
'tf.encode_base64': 'tf.io.encode_base64',
'tf.erf': 'tf.math.erf',
'tf.erfc': 'tf.math.erfc',
'tf.expm1': 'tf.math.expm1',
'tf.fake_quant_with_min_max_args': 'tf.quantization.fake_quant_with_min_max_args',
'tf.fake_quant_with_min_max_args_gradient': 'tf.quantization.fake_quant_with_min_max_args_gradient',
'tf.fake_quant_with_min_max_vars': 'tf.quantization.fake_quant_with_min_max_vars',
'tf.fake_quant_with_min_max_vars_gradient': 'tf.quantization.fake_quant_with_min_max_vars_gradient',
'tf.fake_quant_with_min_max_vars_per_channel': 'tf.quantization.fake_quant_with_min_max_vars_per_channel',
'tf.fake_quant_with_min_max_vars_per_channel_gradient': 'tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient',
'tf.feature_column.input_layer': 'tf.compat.v1.feature_column.input_layer',
'tf.feature_column.linear_model': 'tf.compat.v1.feature_column.linear_model',
'tf.fft': 'tf.signal.fft',
'tf.fft2d': 'tf.signal.fft2d',
'tf.fft3d': 'tf.signal.fft3d',
'tf.fixed_size_partitioner': 'tf.compat.v1.fixed_size_partitioner',
'tf.floordiv': 'tf.math.floordiv',
'tf.get_collection': 'tf.compat.v1.get_collection',
'tf.get_collection_ref': 'tf.compat.v1.get_collection_ref',
'tf.get_default_graph': 'tf.compat.v1.get_default_graph',
'tf.get_default_session': 'tf.compat.v1.get_default_session',
'tf.get_local_variable': 'tf.compat.v1.get_local_variable',
'tf.get_seed': 'tf.compat.v1.get_seed',
'tf.get_session_handle': 'tf.compat.v1.get_session_handle',
'tf.get_session_tensor': 'tf.compat.v1.get_session_tensor',
'tf.get_variable': 'tf.compat.v1.get_variable',
'tf.get_variable_scope': 'tf.compat.v1.get_variable_scope',
'tf.gfile.FastGFile': 'tf.compat.v1.gfile.FastGFile',
'tf.gfile.GFile': 'tf.io.gfile.GFile',
'tf.gfile.Open': 'tf.io.gfile.GFile',
'tf.global_norm': 'tf.linalg.global_norm',
'tf.global_variables': 'tf.compat.v1.global_variables',
'tf.global_variables_initializer': 'tf.compat.v1.global_variables_initializer',
'tf.glorot_normal_initializer': 'tf.compat.v1.glorot_normal_initializer',
'tf.glorot_uniform_initializer': 'tf.compat.v1.glorot_uniform_initializer',
'tf.graph_util.convert_variables_to_constants': 'tf.compat.v1.graph_util.convert_variables_to_constants',
'tf.graph_util.extract_sub_graph': 'tf.compat.v1.graph_util.extract_sub_graph',
'tf.graph_util.must_run_on_cpu': 'tf.compat.v1.graph_util.must_run_on_cpu',
'tf.graph_util.remove_training_nodes': 'tf.compat.v1.graph_util.remove_training_nodes',
'tf.graph_util.tensor_shape_from_node_def_name': 'tf.compat.v1.graph_util.tensor_shape_from_node_def_name',
'tf.ifft': 'tf.signal.ifft',
'tf.ifft2d': 'tf.signal.ifft2d',
'tf.ifft3d': 'tf.signal.ifft3d',
'tf.igamma': 'tf.math.igamma',
'tf.igammac': 'tf.math.igammac',
'tf.imag': 'tf.math.imag',
'tf.image.resize_area': 'tf.compat.v1.image.resize_area',
'tf.image.resize_bicubic': 'tf.compat.v1.image.resize_bicubic',
'tf.image.resize_bilinear': 'tf.compat.v1.image.resize_bilinear',
'tf.image.resize_nearest_neighbor': 'tf.compat.v1.image.resize_nearest_neighbor',
'tf.image.transpose_image': 'tf.compat.v1.image.transpose_image',
'tf.initialize_all_tables': 'tf.compat.v1.initialize_all_tables',
'tf.initialize_all_variables': 'tf.compat.v1.initialize_all_variables',
'tf.initialize_local_variables': 'tf.compat.v1.initialize_local_variables',
'tf.initialize_variables': 'tf.compat.v1.initialize_variables',
'tf.initializers.constant': 'tf.compat.v1.initializers.constant',
'tf.initializers.global_variables': 'tf.compat.v1.initializers.global_variables',
'tf.initializers.glorot_normal': 'tf.compat.v1.initializers.glorot_normal',
'tf.initializers.glorot_uniform': 'tf.compat.v1.initializers.glorot_uniform',
'tf.initializers.he_normal': 'tf.compat.v1.initializers.he_normal',
'tf.initializers.he_uniform': 'tf.compat.v1.initializers.he_uniform',
'tf.initializers.identity': 'tf.compat.v1.initializers.identity',
'tf.initializers.lecun_normal': 'tf.compat.v1.initializers.lecun_normal',
'tf.initializers.lecun_uniform': 'tf.compat.v1.initializers.lecun_uniform',
'tf.initializers.local_variables': 'tf.compat.v1.initializers.local_variables',
'tf.initializers.ones': 'tf.compat.v1.initializers.ones',
'tf.initializers.orthogonal': 'tf.compat.v1.initializers.orthogonal',
'tf.initializers.random_normal': 'tf.compat.v1.initializers.random_normal',
'tf.initializers.random_uniform': 'tf.compat.v1.initializers.random_uniform',
'tf.initializers.tables_initializer': 'tf.compat.v1.initializers.tables_initializer',
'tf.initializers.truncated_normal': 'tf.compat.v1.initializers.truncated_normal',
'tf.initializers.uniform_unit_scaling': 'tf.compat.v1.initializers.uniform_unit_scaling',
'tf.initializers.variables': 'tf.compat.v1.initializers.variables',
'tf.initializers.variance_scaling': 'tf.compat.v1.initializers.variance_scaling',
'tf.initializers.zeros': 'tf.compat.v1.initializers.zeros',
'tf.invert_permutation': 'tf.math.invert_permutation',
'tf.io.PaddingFIFOQueue': 'tf.queue.PaddingFIFOQueue',
'tf.io.PriorityQueue': 'tf.queue.PriorityQueue',
'tf.io.QueueBase': 'tf.queue.QueueBase',
'tf.io.RandomShuffleQueue': 'tf.queue.RandomShuffleQueue',
'tf.io.tf_record_iterator': 'tf.compat.v1.io.tf_record_iterator',
'tf.is_finite': 'tf.math.is_finite',
'tf.is_inf': 'tf.math.is_inf',
'tf.is_nan': 'tf.math.is_nan',
'tf.is_non_decreasing': 'tf.math.is_non_decreasing',
'tf.is_numeric_tensor': 'tf.debugging.is_numeric_tensor',
'tf.is_strictly_increasing': 'tf.math.is_strictly_increasing',
'tf.is_variable_initialized': 'tf.compat.v1.is_variable_initialized',
'tf.keras.initializers.Identity': 'tf.compat.v1.keras.initializers.Identity',
'tf.keras.initializers.Orthogonal': 'tf.compat.v1.keras.initializers.Orthogonal',
'tf.keras.initializers.TruncatedNormal': 'tf.compat.v1.keras.initializers.TruncatedNormal',
'tf.keras.initializers.VarianceScaling': 'tf.compat.v1.keras.initializers.VarianceScaling',
'tf.keras.initializers.constant': 'tf.compat.v1.keras.initializers.constant',
'tf.keras.initializers.glorot_normal': 'tf.compat.v1.keras.initializers.glorot_normal',
'tf.keras.initializers.glorot_uniform': 'tf.compat.v1.keras.initializers.glorot_uniform',
'tf.keras.initializers.he_normal': 'tf.compat.v1.keras.initializers.he_normal',
'tf.keras.initializers.he_uniform': 'tf.compat.v1.keras.initializers.he_uniform',
'tf.keras.initializers.identity': 'tf.compat.v1.keras.initializers.identity',
'tf.keras.initializers.lecun_normal': 'tf.compat.v1.keras.initializers.lecun_normal',
'tf.keras.initializers.lecun_uniform': 'tf.compat.v1.keras.initializers.lecun_uniform',
'tf.keras.initializers.normal': 'tf.compat.v1.keras.initializers.normal',
'tf.keras.initializers.ones': 'tf.compat.v1.keras.initializers.ones',
'tf.keras.initializers.orthogonal': 'tf.compat.v1.keras.initializers.orthogonal',
'tf.keras.initializers.random_normal': 'tf.compat.v1.keras.initializers.random_normal',
'tf.keras.initializers.random_uniform': 'tf.compat.v1.keras.initializers.random_uniform',
'tf.keras.initializers.truncated_normal': 'tf.compat.v1.keras.initializers.truncated_normal',
'tf.keras.initializers.uniform': 'tf.compat.v1.keras.initializers.uniform',
'tf.keras.initializers.zeros': 'tf.compat.v1.keras.initializers.zeros',
'tf.layers.AveragePooling1D': 'tf.compat.v1.layers.AveragePooling1D',
'tf.layers.AveragePooling2D': 'tf.compat.v1.layers.AveragePooling2D',
'tf.layers.AveragePooling3D': 'tf.compat.v1.layers.AveragePooling3D',
'tf.layers.BatchNormalization': 'tf.compat.v1.layers.BatchNormalization',
'tf.layers.Conv1D': 'tf.compat.v1.layers.Conv1D',
'tf.layers.Conv2D': 'tf.compat.v1.layers.Conv2D',
'tf.layers.Conv2DTranspose': 'tf.compat.v1.layers.Conv2DTranspose',
'tf.layers.Conv3D': 'tf.compat.v1.layers.Conv3D',
'tf.layers.Conv3DTranspose': 'tf.compat.v1.layers.Conv3DTranspose',
'tf.layers.Dense': 'tf.compat.v1.layers.Dense',
'tf.layers.Dropout': 'tf.compat.v1.layers.Dropout',
'tf.layers.Flatten': 'tf.compat.v1.layers.Flatten',
'tf.layers.InputSpec': 'tf.keras.layers.InputSpec',
'tf.layers.Layer': 'tf.compat.v1.layers.Layer',
'tf.layers.MaxPooling1D': 'tf.compat.v1.layers.MaxPooling1D',
'tf.layers.MaxPooling2D': 'tf.compat.v1.layers.MaxPooling2D',
'tf.layers.MaxPooling3D': 'tf.compat.v1.layers.MaxPooling3D',
'tf.layers.SeparableConv1D': 'tf.compat.v1.layers.SeparableConv1D',
'tf.layers.SeparableConv2D': 'tf.compat.v1.layers.SeparableConv2D',
'tf.layers.average_pooling1d': 'tf.compat.v1.layers.average_pooling1d',
'tf.layers.average_pooling2d': 'tf.compat.v1.layers.average_pooling2d',
'tf.layers.average_pooling3d': 'tf.compat.v1.layers.average_pooling3d',
'tf.layers.batch_normalization': 'tf.compat.v1.layers.batch_normalization',
'tf.layers.conv1d': 'tf.compat.v1.layers.conv1d',
'tf.layers.conv2d': 'tf.compat.v1.layers.conv2d',
'tf.layers.conv2d_transpose': 'tf.compat.v1.layers.conv2d_transpose',
'tf.layers.conv3d': 'tf.compat.v1.layers.conv3d',
'tf.layers.conv3d_transpose': 'tf.compat.v1.layers.conv3d_transpose',
'tf.layers.dense': 'tf.compat.v1.layers.dense',
'tf.layers.dropout': 'tf.compat.v1.layers.dropout',
'tf.layers.experimental.keras_style_scope': 'tf.compat.v1.layers.experimental.keras_style_scope',
'tf.layers.experimental.set_keras_style': 'tf.compat.v1.layers.experimental.set_keras_style',
'tf.layers.flatten': 'tf.compat.v1.layers.flatten',
'tf.layers.max_pooling1d': 'tf.compat.v1.layers.max_pooling1d',
'tf.layers.max_pooling2d': 'tf.compat.v1.layers.max_pooling2d',
'tf.layers.max_pooling3d': 'tf.compat.v1.layers.max_pooling3d',
'tf.layers.separable_conv1d': 'tf.compat.v1.layers.separable_conv1d',
'tf.layers.separable_conv2d': 'tf.compat.v1.layers.separable_conv2d',
'tf.lbeta': 'tf.math.lbeta',
'tf.lgamma': 'tf.math.lgamma',
'tf.lin_space': 'tf.linspace',
'tf.local_variables': 'tf.compat.v1.local_variables',
'tf.local_variables_initializer': 'tf.compat.v1.local_variables_initializer',
'tf.log': 'tf.math.log',
'tf.log1p': 'tf.math.log1p',
'tf.log_sigmoid': 'tf.math.log_sigmoid',
'tf.logging.DEBUG': 'tf.compat.v1.logging.DEBUG',
'tf.logging.ERROR': 'tf.compat.v1.logging.ERROR',
'tf.logging.FATAL': 'tf.compat.v1.logging.FATAL',
'tf.logging.INFO': 'tf.compat.v1.logging.INFO',
'tf.logging.TaskLevelStatusMessage': 'tf.compat.v1.logging.TaskLevelStatusMessage',
'tf.logging.WARN': 'tf.compat.v1.logging.WARN',
'tf.logging.debug': 'tf.compat.v1.logging.debug',
'tf.logging.error': 'tf.compat.v1.logging.error',
'tf.logging.fatal': 'tf.compat.v1.logging.fatal',
'tf.logging.flush': 'tf.compat.v1.logging.flush',
'tf.logging.get_verbosity': 'tf.compat.v1.logging.get_verbosity',
'tf.logging.info': 'tf.compat.v1.logging.info',
'tf.logging.log': 'tf.compat.v1.logging.log',
'tf.logging.log_every_n': 'tf.compat.v1.logging.log_every_n',
'tf.logging.log_first_n': 'tf.compat.v1.logging.log_first_n',
'tf.logging.log_if': 'tf.compat.v1.logging.log_if',
'tf.logging.set_verbosity': 'tf.compat.v1.logging.set_verbosity',
'tf.logging.vlog': 'tf.compat.v1.logging.vlog',
'tf.logging.warn': 'tf.compat.v1.logging.warn',
'tf.logging.warning': 'tf.compat.v1.logging.warning',
'tf.logical_xor': 'tf.math.logical_xor',
'tf.losses.absolute_difference': 'tf.compat.v1.losses.absolute_difference',
'tf.losses.add_loss': 'tf.compat.v1.losses.add_loss',
'tf.losses.compute_weighted_loss': 'tf.compat.v1.losses.compute_weighted_loss',
'tf.losses.cosine_distance': 'tf.compat.v1.losses.cosine_distance',
'tf.losses.get_losses': 'tf.compat.v1.losses.get_losses',
'tf.losses.get_regularization_loss': 'tf.compat.v1.losses.get_regularization_loss',
'tf.losses.get_regularization_losses': 'tf.compat.v1.losses.get_regularization_losses',
'tf.losses.get_total_loss': 'tf.compat.v1.losses.get_total_loss',
'tf.losses.hinge_loss': 'tf.compat.v1.losses.hinge_loss',
'tf.losses.huber_loss': 'tf.compat.v1.losses.huber_loss',
'tf.losses.log_loss': 'tf.compat.v1.losses.log_loss',
'tf.losses.mean_pairwise_squared_error': 'tf.compat.v1.losses.mean_pairwise_squared_error',
'tf.losses.mean_squared_error': 'tf.compat.v1.losses.mean_squared_error',
'tf.losses.sigmoid_cross_entropy': 'tf.compat.v1.losses.sigmoid_cross_entropy',
'tf.losses.softmax_cross_entropy': 'tf.compat.v1.losses.softmax_cross_entropy',
'tf.losses.sparse_softmax_cross_entropy': 'tf.compat.v1.losses.sparse_softmax_cross_entropy',
'tf.make_template': 'tf.compat.v1.make_template',
'tf.make_tensor_proto': 'tf.compat.v1.make_tensor_proto',
'tf.manip.gather_nd': 'tf.gather_nd',
'tf.manip.reshape': 'tf.reshape',
'tf.manip.reverse': 'tf.reverse',
'tf.manip.roll': 'tf.roll',
'tf.manip.scatter_nd': 'tf.scatter_nd',
'tf.manip.space_to_batch_nd': 'tf.space_to_batch_nd',
'tf.manip.tile': 'tf.tile',
'tf.matching_files': 'tf.io.matching_files',
'tf.matrix_band_part': 'tf.linalg.band_part',
'tf.matrix_determinant': 'tf.linalg.det',
'tf.matrix_diag': 'tf.linalg.diag',
'tf.matrix_diag_part': 'tf.linalg.diag_part',
'tf.matrix_inverse': 'tf.linalg.inv',
'tf.matrix_set_diag': 'tf.linalg.set_diag',
'tf.matrix_solve': 'tf.linalg.solve',
'tf.matrix_solve_ls': 'tf.linalg.lstsq',
'tf.matrix_transpose': 'tf.linalg.transpose',
'tf.matrix_triangular_solve': 'tf.linalg.triangular_solve',
'tf.metrics.accuracy': 'tf.compat.v1.metrics.accuracy',
'tf.metrics.auc': 'tf.compat.v1.metrics.auc',
'tf.metrics.average_precision_at_k': 'tf.compat.v1.metrics.average_precision_at_k',
'tf.metrics.false_negatives': 'tf.compat.v1.metrics.false_negatives',
'tf.metrics.false_negatives_at_thresholds': 'tf.compat.v1.metrics.false_negatives_at_thresholds',
'tf.metrics.false_positives': 'tf.compat.v1.metrics.false_positives',
'tf.metrics.false_positives_at_thresholds': 'tf.compat.v1.metrics.false_positives_at_thresholds',
'tf.metrics.mean': 'tf.compat.v1.metrics.mean',
'tf.metrics.mean_absolute_error': 'tf.compat.v1.metrics.mean_absolute_error',
'tf.metrics.mean_cosine_distance': 'tf.compat.v1.metrics.mean_cosine_distance',
'tf.metrics.mean_iou': 'tf.compat.v1.metrics.mean_iou',
'tf.metrics.mean_per_class_accuracy': 'tf.compat.v1.metrics.mean_per_class_accuracy',
'tf.metrics.mean_relative_error': 'tf.compat.v1.metrics.mean_relative_error',
'tf.metrics.mean_squared_error': 'tf.compat.v1.metrics.mean_squared_error',
'tf.metrics.mean_tensor': 'tf.compat.v1.metrics.mean_tensor',
'tf.metrics.percentage_below': 'tf.compat.v1.metrics.percentage_below',
'tf.metrics.precision': 'tf.compat.v1.metrics.precision',
'tf.metrics.precision_at_k': 'tf.compat.v1.metrics.precision_at_k',
'tf.metrics.precision_at_thresholds': 'tf.compat.v1.metrics.precision_at_thresholds',
'tf.metrics.precision_at_top_k': 'tf.compat.v1.metrics.precision_at_top_k',
'tf.metrics.recall': 'tf.compat.v1.metrics.recall',
'tf.metrics.recall_at_k': 'tf.compat.v1.metrics.recall_at_k',
'tf.metrics.recall_at_thresholds': 'tf.compat.v1.metrics.recall_at_thresholds',
'tf.metrics.recall_at_top_k': 'tf.compat.v1.metrics.recall_at_top_k',
'tf.metrics.root_mean_squared_error': 'tf.compat.v1.metrics.root_mean_squared_error',
'tf.metrics.sensitivity_at_specificity': 'tf.compat.v1.metrics.sensitivity_at_specificity',
'tf.metrics.sparse_average_precision_at_k': 'tf.compat.v1.metrics.sparse_average_precision_at_k',
'tf.metrics.sparse_precision_at_k': 'tf.compat.v1.metrics.sparse_precision_at_k',
'tf.metrics.specificity_at_sensitivity': 'tf.compat.v1.metrics.specificity_at_sensitivity',
'tf.metrics.true_negatives': 'tf.compat.v1.metrics.true_negatives',
'tf.metrics.true_negatives_at_thresholds': 'tf.compat.v1.metrics.true_negatives_at_thresholds',
'tf.metrics.true_positives': 'tf.compat.v1.metrics.true_positives',
'tf.metrics.true_positives_at_thresholds': 'tf.compat.v1.metrics.true_positives_at_thresholds',
'tf.min_max_variable_partitioner': 'tf.compat.v1.min_max_variable_partitioner',
'tf.model_variables': 'tf.compat.v1.model_variables',
'tf.moving_average_variables': 'tf.compat.v1.moving_average_variables',
'tf.nn.bidirectional_dynamic_rnn': 'tf.compat.v1.nn.bidirectional_dynamic_rnn',
'tf.nn.conv3d_backprop_filter_v2': 'tf.nn.conv3d_backprop_filter',
'tf.nn.ctc_beam_search_decoder_v2': 'tf.nn.ctc_beam_search_decoder',
'tf.nn.ctc_loss_v2': 'tf.nn.ctc_loss',
'tf.nn.depthwise_conv2d_native': 'tf.compat.v1.nn.depthwise_conv2d_native',
'tf.nn.depthwise_conv2d_native_backprop_filter': 'tf.nn.depthwise_conv2d_backprop_filter',
'tf.nn.depthwise_conv2d_native_backprop_input': 'tf.nn.depthwise_conv2d_backprop_input',
'tf.nn.dynamic_rnn': 'tf.compat.v1.nn.dynamic_rnn',
'tf.nn.log_uniform_candidate_sampler': 'tf.random.log_uniform_candidate_sampler',
'tf.nn.quantized_avg_pool': 'tf.compat.v1.nn.quantized_avg_pool',
'tf.nn.quantized_conv2d': 'tf.compat.v1.nn.quantized_conv2d',
'tf.nn.quantized_max_pool': 'tf.compat.v1.nn.quantized_max_pool',
'tf.nn.quantized_relu_x': 'tf.compat.v1.nn.quantized_relu_x',
'tf.nn.raw_rnn': 'tf.compat.v1.nn.raw_rnn',
'tf.nn.relu_layer': 'tf.compat.v1.nn.relu_layer',
'tf.nn.rnn_cell.BasicLSTMCell': 'tf.compat.v1.nn.rnn_cell.BasicLSTMCell',
'tf.nn.rnn_cell.BasicRNNCell': 'tf.compat.v1.nn.rnn_cell.BasicRNNCell',
'tf.nn.rnn_cell.DropoutWrapper': 'tf.compat.v1.nn.rnn_cell.DropoutWrapper',
'tf.nn.rnn_cell.GRUCell': 'tf.compat.v1.nn.rnn_cell.GRUCell',
'tf.nn.rnn_cell.LSTMCell': 'tf.compat.v1.nn.rnn_cell.LSTMCell',
'tf.nn.rnn_cell.MultiRNNCell': 'tf.compat.v1.nn.rnn_cell.MultiRNNCell',
'tf.nn.static_bidirectional_rnn': 'tf.compat.v1.nn.static_bidirectional_rnn',
'tf.nn.static_rnn': 'tf.compat.v1.nn.static_rnn',
'tf.nn.uniform_candidate_sampler': 'tf.random.uniform_candidate_sampler',
'tf.nn.xw_plus_b': 'tf.compat.v1.nn.xw_plus_b',
'tf.op_scope': 'tf.compat.v1.op_scope',
'tf.orthogonal_initializer': 'tf.compat.v1.orthogonal_initializer',
'tf.parse_single_sequence_example': 'tf.io.parse_single_sequence_example',
'tf.parse_tensor': 'tf.io.parse_tensor',
'tf.placeholder': 'tf.compat.v1.placeholder',
'tf.placeholder_with_default': 'tf.compat.v1.placeholder_with_default',
'tf.polygamma': 'tf.math.polygamma',
'tf.profiler.AdviceProto': 'tf.compat.v1.profiler.AdviceProto',
'tf.profiler.GraphNodeProto': 'tf.compat.v1.profiler.GraphNodeProto',
'tf.profiler.MultiGraphNodeProto': 'tf.compat.v1.profiler.MultiGraphNodeProto',
'tf.profiler.OpLogProto': 'tf.compat.v1.profiler.OpLogProto',
'tf.profiler.ProfileOptionBuilder': 'tf.compat.v1.profiler.ProfileOptionBuilder',
'tf.profiler.Profiler': 'tf.compat.v1.profiler.Profiler',
'tf.profiler.advise': 'tf.compat.v1.profiler.advise',
'tf.profiler.profile': 'tf.compat.v1.profiler.profile',
'tf.profiler.write_op_log': 'tf.compat.v1.profiler.write_op_log',
'tf.py_func': 'tf.compat.v1.py_func',
'tf.python_io.TFRecordCompressionType': 'tf.io.TFRecordCompressionType',
'tf.python_io.TFRecordOptions': 'tf.io.TFRecordOptions',
'tf.python_io.TFRecordWriter': 'tf.io.TFRecordWriter',
'tf.python_io.tf_record_iterator': 'tf.compat.v1.python_io.tf_record_iterator',
'tf.qr': 'tf.linalg.qr',
'tf.quantize': 'tf.quantization.quantize',
'tf.quantized_concat': 'tf.quantization.quantized_concat',
'tf.ragged.RaggedTensorValue': 'tf.compat.v1.ragged.RaggedTensorValue',
'tf.ragged.constant_value': 'tf.compat.v1.ragged.constant_value',
'tf.random.get_seed': 'tf.compat.v1.random.get_seed',
'tf.random.set_random_seed': 'tf.compat.v1.random.set_random_seed',
'tf.random_crop': 'tf.image.random_crop',
'tf.random_gamma': 'tf.random.gamma',
'tf.random_normal': 'tf.random.normal',
'tf.random_shuffle': 'tf.random.shuffle',
'tf.random_uniform': 'tf.random.uniform',
'tf.read_file': 'tf.io.read_file',
'tf.real': 'tf.math.real',
'tf.reciprocal': 'tf.math.reciprocal',
'tf.regex_replace': 'tf.strings.regex_replace',
'tf.report_uninitialized_variables': 'tf.compat.v1.report_uninitialized_variables',
'tf.reset_default_graph': 'tf.compat.v1.reset_default_graph',
'tf.resource_loader.get_data_files_path': 'tf.compat.v1.resource_loader.get_data_files_path',
'tf.resource_loader.get_path_to_datafile': 'tf.compat.v1.resource_loader.get_path_to_datafile',
'tf.resource_loader.get_root_dir_with_all_resources': 'tf.compat.v1.resource_loader.get_root_dir_with_all_resources',
'tf.resource_loader.load_resource': 'tf.compat.v1.resource_loader.load_resource',
'tf.resource_loader.readahead_file_path': 'tf.compat.v1.resource_loader.readahead_file_path',
'tf.reverse_v2': 'tf.reverse',
'tf.rint': 'tf.math.rint',
'tf.rsqrt': 'tf.math.rsqrt',
'tf.saved_model.Builder': 'tf.compat.v1.saved_model.Builder',
'tf.saved_model.LEGACY_INIT_OP_KEY': 'tf.compat.v1.saved_model.LEGACY_INIT_OP_KEY',
'tf.saved_model.MAIN_OP_KEY': 'tf.compat.v1.saved_model.MAIN_OP_KEY',
'tf.saved_model.build_tensor_info': 'tf.compat.v1.saved_model.build_tensor_info',
'tf.saved_model.builder.SavedModelBuilder': 'tf.compat.v1.saved_model.builder.SavedModelBuilder',
'tf.saved_model.constants.ASSETS_DIRECTORY': 'tf.saved_model.ASSETS_DIRECTORY',
'tf.saved_model.constants.ASSETS_KEY': 'tf.saved_model.ASSETS_KEY',
'tf.saved_model.constants.LEGACY_INIT_OP_KEY': 'tf.compat.v1.saved_model.constants.LEGACY_INIT_OP_KEY',
'tf.saved_model.constants.MAIN_OP_KEY': 'tf.compat.v1.saved_model.constants.MAIN_OP_KEY',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PB': 'tf.saved_model.SAVED_MODEL_FILENAME_PB',
'tf.saved_model.constants.SAVED_MODEL_FILENAME_PBTXT': 'tf.saved_model.SAVED_MODEL_FILENAME_PBTXT',
'tf.saved_model.constants.SAVED_MODEL_SCHEMA_VERSION': 'tf.saved_model.SAVED_MODEL_SCHEMA_VERSION',
'tf.saved_model.constants.VARIABLES_DIRECTORY': 'tf.saved_model.VARIABLES_DIRECTORY',
'tf.saved_model.constants.VARIABLES_FILENAME': 'tf.saved_model.VARIABLES_FILENAME',
'tf.saved_model.experimental.save': 'tf.saved_model.save',
'tf.saved_model.get_tensor_from_tensor_info': 'tf.compat.v1.saved_model.get_tensor_from_tensor_info',
'tf.saved_model.load': 'tf.compat.v1.saved_model.load',
'tf.saved_model.loader.load': 'tf.compat.v1.saved_model.loader.load',
'tf.saved_model.loader.maybe_saved_model_directory': 'tf.compat.v1.saved_model.loader.maybe_saved_model_directory',
'tf.saved_model.main_op.main_op': 'tf.compat.v1.saved_model.main_op.main_op',
'tf.saved_model.main_op.main_op_with_restore': 'tf.compat.v1.saved_model.main_op.main_op_with_restore',
'tf.saved_model.main_op_with_restore': 'tf.compat.v1.saved_model.main_op_with_restore',
'tf.saved_model.maybe_saved_model_directory': 'tf.compat.v1.saved_model.maybe_saved_model_directory',
'tf.saved_model.signature_constants.CLASSIFY_INPUTS': 'tf.saved_model.CLASSIFY_INPUTS',
'tf.saved_model.signature_constants.CLASSIFY_METHOD_NAME': 'tf.saved_model.CLASSIFY_METHOD_NAME',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_CLASSES': 'tf.saved_model.CLASSIFY_OUTPUT_CLASSES',
'tf.saved_model.signature_constants.CLASSIFY_OUTPUT_SCORES': 'tf.saved_model.CLASSIFY_OUTPUT_SCORES',
'tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY': 'tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY',
'tf.saved_model.signature_constants.PREDICT_INPUTS': 'tf.saved_model.PREDICT_INPUTS',
'tf.saved_model.signature_constants.PREDICT_METHOD_NAME': 'tf.saved_model.PREDICT_METHOD_NAME',
'tf.saved_model.signature_constants.PREDICT_OUTPUTS': 'tf.saved_model.PREDICT_OUTPUTS',
'tf.saved_model.signature_constants.REGRESS_INPUTS': 'tf.saved_model.REGRESS_INPUTS',
'tf.saved_model.signature_constants.REGRESS_METHOD_NAME': 'tf.saved_model.REGRESS_METHOD_NAME',
'tf.saved_model.signature_constants.REGRESS_OUTPUTS': 'tf.saved_model.REGRESS_OUTPUTS',
'tf.saved_model.signature_def_utils.build_signature_def': 'tf.saved_model.build_signature_def',
'tf.saved_model.signature_def_utils.classification_signature_def': 'tf.saved_model.classification_signature_def',
'tf.saved_model.signature_def_utils.is_valid_signature': 'tf.saved_model.is_valid_signature',
'tf.saved_model.signature_def_utils.predict_signature_def': 'tf.saved_model.predict_signature_def',
'tf.saved_model.signature_def_utils.regression_signature_def': 'tf.saved_model.regression_signature_def',
'tf.saved_model.simple_save': 'tf.compat.v1.saved_model.simple_save',
'tf.saved_model.tag_constants.GPU': 'tf.saved_model.GPU',
'tf.saved_model.tag_constants.SERVING': 'tf.saved_model.SERVING',
'tf.saved_model.tag_constants.TPU': 'tf.saved_model.TPU',
'tf.saved_model.tag_constants.TRAINING': 'tf.saved_model.TRAINING',
'tf.saved_model.utils.build_tensor_info': 'tf.compat.v1.saved_model.utils.build_tensor_info',
'tf.saved_model.utils.get_tensor_from_tensor_info': 'tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info',
'tf.scatter_add': 'tf.compat.v1.scatter_add',
'tf.scatter_nd_add': 'tf.compat.v1.scatter_nd_add',
'tf.scatter_nd_sub': 'tf.compat.v1.scatter_nd_sub',
'tf.scatter_nd_update': 'tf.compat.v1.scatter_nd_update',
'tf.scatter_sub': 'tf.compat.v1.scatter_sub',
'tf.scatter_update': 'tf.compat.v1.scatter_update',
'tf.segment_max': 'tf.math.segment_max',
'tf.segment_mean': 'tf.math.segment_mean',
'tf.segment_min': 'tf.math.segment_min',
'tf.segment_prod': 'tf.math.segment_prod',
'tf.segment_sum': 'tf.math.segment_sum',
'tf.self_adjoint_eig': 'tf.linalg.eigh',
'tf.self_adjoint_eigvals': 'tf.linalg.eigvalsh',
'tf.serialize_many_sparse': 'tf.compat.v1.serialize_many_sparse',
'tf.serialize_sparse': 'tf.compat.v1.serialize_sparse',
'tf.serialize_tensor': 'tf.io.serialize_tensor',
'tf.set_random_seed': 'tf.compat.v1.set_random_seed',
'tf.setdiff1d': 'tf.compat.v1.setdiff1d',
'tf.sets.set_difference': 'tf.sets.difference',
'tf.sets.set_intersection': 'tf.sets.intersection',
'tf.sets.set_size': 'tf.sets.size',
'tf.sets.set_union': 'tf.sets.union',
'tf.space_to_depth': 'tf.compat.v1.space_to_depth',
'tf.sparse.matmul': 'tf.sparse.sparse_dense_matmul',
'tf.sparse.merge': 'tf.compat.v1.sparse.merge',
'tf.sparse.placeholder': 'tf.compat.v1.sparse.placeholder',
'tf.sparse.reduce_max_sparse': 'tf.compat.v1.sparse.reduce_max_sparse',
'tf.sparse.reduce_sum_sparse': 'tf.compat.v1.sparse.reduce_sum_sparse',
'tf.sparse_fill_empty_rows': 'tf.sparse.fill_empty_rows',
'tf.sparse_mask': 'tf.sparse.mask',
'tf.sparse_maximum': 'tf.sparse.maximum',
'tf.sparse_merge': 'tf.compat.v1.sparse_merge',
'tf.sparse_minimum': 'tf.sparse.minimum',
'tf.sparse_placeholder': 'tf.compat.v1.sparse_placeholder',
'tf.sparse_reduce_max_sparse': 'tf.compat.v1.sparse_reduce_max_sparse',
'tf.sparse_reduce_sum_sparse': 'tf.compat.v1.sparse_reduce_sum_sparse',
'tf.sparse_reorder': 'tf.sparse.reorder',
'tf.sparse_reset_shape': 'tf.sparse.reset_shape',
'tf.sparse_reshape': 'tf.sparse.reshape',
'tf.sparse_retain': 'tf.sparse.retain',
'tf.sparse_segment_mean': 'tf.compat.v1.sparse_segment_mean',
'tf.sparse_segment_sqrt_n': 'tf.compat.v1.sparse_segment_sqrt_n',
'tf.sparse_segment_sum': 'tf.compat.v1.sparse_segment_sum',
'tf.sparse_slice': 'tf.sparse.slice',
'tf.sparse_softmax': 'tf.sparse.softmax',
'tf.sparse_tensor_dense_matmul': 'tf.sparse.sparse_dense_matmul',
'tf.sparse_tensor_to_dense': 'tf.sparse.to_dense',
'tf.sparse_to_dense': 'tf.compat.v1.sparse_to_dense',
'tf.sparse_to_indicator': 'tf.sparse.to_indicator',
'tf.sparse_transpose': 'tf.sparse.transpose',
'tf.spectral.dct': 'tf.signal.dct',
'tf.spectral.fft': 'tf.signal.fft',
'tf.spectral.fft2d': 'tf.signal.fft2d',
'tf.spectral.fft3d': 'tf.signal.fft3d',
'tf.spectral.idct': 'tf.signal.idct',
'tf.spectral.ifft': 'tf.signal.ifft',
'tf.spectral.ifft2d': 'tf.signal.ifft2d',
'tf.spectral.ifft3d': 'tf.signal.ifft3d',
'tf.spectral.irfft': 'tf.signal.irfft',
'tf.spectral.irfft2d': 'tf.signal.irfft2d',
'tf.spectral.irfft3d': 'tf.signal.irfft3d',
'tf.spectral.rfft': 'tf.signal.rfft',
'tf.spectral.rfft2d': 'tf.signal.rfft2d',
'tf.spectral.rfft3d': 'tf.signal.rfft3d',
'tf.squared_difference': 'tf.math.squared_difference',
'tf.string_join': 'tf.strings.join',
'tf.string_strip': 'tf.strings.strip',
'tf.string_to_hash_bucket_fast': 'tf.strings.to_hash_bucket_fast',
'tf.string_to_hash_bucket_strong': 'tf.strings.to_hash_bucket_strong',
'tf.summary.Event': 'tf.compat.v1.summary.Event',
'tf.summary.FileWriter': 'tf.compat.v1.summary.FileWriter',
'tf.summary.FileWriterCache': 'tf.compat.v1.summary.FileWriterCache',
'tf.summary.SessionLog': 'tf.compat.v1.summary.SessionLog',
'tf.summary.Summary': 'tf.compat.v1.summary.Summary',
'tf.summary.SummaryDescription': 'tf.compat.v1.summary.SummaryDescription',
'tf.summary.TaggedRunMetadata': 'tf.compat.v1.summary.TaggedRunMetadata',
'tf.summary.audio': 'tf.compat.v1.summary.audio',
'tf.summary.get_summary_description': 'tf.compat.v1.summary.get_summary_description',
'tf.summary.histogram': 'tf.compat.v1.summary.histogram',
'tf.summary.image': 'tf.compat.v1.summary.image',
'tf.summary.merge': 'tf.compat.v1.summary.merge',
'tf.summary.merge_all': 'tf.compat.v1.summary.merge_all',
'tf.summary.scalar': 'tf.compat.v1.summary.scalar',
'tf.summary.tensor_summary': 'tf.compat.v1.summary.tensor_summary',
'tf.summary.text': 'tf.compat.v1.summary.text',
'tf.svd': 'tf.linalg.svd',
'tf.tables_initializer': 'tf.compat.v1.tables_initializer',
'tf.test.StubOutForTesting': 'tf.compat.v1.test.StubOutForTesting',
'tf.test.compute_gradient': 'tf.compat.v1.test.compute_gradient',
'tf.test.compute_gradient_error': 'tf.compat.v1.test.compute_gradient_error',
'tf.test.get_temp_dir': 'tf.compat.v1.test.get_temp_dir',
'tf.test.mock': 'tf.compat.v1.test.mock',
'tf.test.test_src_dir_path': 'tf.compat.v1.test.test_src_dir_path',
'tf.to_bfloat16': 'tf.compat.v1.to_bfloat16',
'tf.to_complex128': 'tf.compat.v1.to_complex128',
'tf.to_complex64': 'tf.compat.v1.to_complex64',
'tf.to_double': 'tf.compat.v1.to_double',
'tf.to_float': 'tf.compat.v1.to_float',
'tf.to_int32': 'tf.compat.v1.to_int32',
'tf.to_int64': 'tf.compat.v1.to_int64',
'tf.trace': 'tf.linalg.trace',
'tf.train.AdadeltaOptimizer': 'tf.compat.v1.train.AdadeltaOptimizer',
'tf.train.AdagradDAOptimizer': 'tf.compat.v1.train.AdagradDAOptimizer',
'tf.train.AdagradOptimizer': 'tf.compat.v1.train.AdagradOptimizer',
'tf.train.AdamOptimizer': 'tf.compat.v1.train.AdamOptimizer',
'tf.train.CheckpointSaverHook': 'tf.estimator.CheckpointSaverHook',
'tf.train.CheckpointSaverListener': 'tf.estimator.CheckpointSaverListener',
'tf.train.ChiefSessionCreator': 'tf.compat.v1.train.ChiefSessionCreator',
'tf.train.FeedFnHook': 'tf.estimator.FeedFnHook',
'tf.train.FinalOpsHook': 'tf.estimator.FinalOpsHook',
'tf.train.FtrlOptimizer': 'tf.compat.v1.train.FtrlOptimizer',
'tf.train.GlobalStepWaiterHook': 'tf.estimator.GlobalStepWaiterHook',
'tf.train.GradientDescentOptimizer': 'tf.compat.v1.train.GradientDescentOptimizer',
'tf.train.LoggingTensorHook': 'tf.estimator.LoggingTensorHook',
'tf.train.LooperThread': 'tf.compat.v1.train.LooperThread',
'tf.train.MomentumOptimizer': 'tf.compat.v1.train.MomentumOptimizer',
'tf.train.MonitoredSession': 'tf.compat.v1.train.MonitoredSession',
'tf.train.MonitoredTrainingSession': 'tf.compat.v1.train.MonitoredTrainingSession',
'tf.train.NanLossDuringTrainingError': 'tf.estimator.NanLossDuringTrainingError',
'tf.train.NanTensorHook': 'tf.estimator.NanTensorHook',
'tf.train.NewCheckpointReader': 'tf.compat.v1.train.NewCheckpointReader',
'tf.train.Optimizer': 'tf.compat.v1.train.Optimizer',
'tf.train.ProfilerHook': 'tf.estimator.ProfilerHook',
'tf.train.ProximalAdagradOptimizer': 'tf.compat.v1.train.ProximalAdagradOptimizer',
'tf.train.ProximalGradientDescentOptimizer': 'tf.compat.v1.train.ProximalGradientDescentOptimizer',
'tf.train.QueueRunner': 'tf.compat.v1.train.QueueRunner',
'tf.train.RMSPropOptimizer': 'tf.compat.v1.train.RMSPropOptimizer',
'tf.train.Saver': 'tf.compat.v1.train.Saver',
'tf.train.SaverDef': 'tf.compat.v1.train.SaverDef',
'tf.train.Scaffold': 'tf.compat.v1.train.Scaffold',
'tf.train.SecondOrStepTimer': 'tf.estimator.SecondOrStepTimer',
'tf.train.Server': 'tf.distribute.Server',
'tf.train.SessionCreator': 'tf.compat.v1.train.SessionCreator',
'tf.train.SessionManager': 'tf.compat.v1.train.SessionManager',
'tf.train.SessionRunArgs': 'tf.estimator.SessionRunArgs',
'tf.train.SessionRunContext': 'tf.estimator.SessionRunContext',
'tf.train.SessionRunHook': 'tf.estimator.SessionRunHook',
'tf.train.SessionRunValues': 'tf.estimator.SessionRunValues',
'tf.train.SingularMonitoredSession': 'tf.compat.v1.train.SingularMonitoredSession',
'tf.train.StepCounterHook': 'tf.estimator.StepCounterHook',
'tf.train.StopAtStepHook': 'tf.estimator.StopAtStepHook',
'tf.train.SummarySaverHook': 'tf.estimator.SummarySaverHook',
'tf.train.Supervisor': 'tf.compat.v1.train.Supervisor',
'tf.train.SyncReplicasOptimizer': 'tf.compat.v1.train.SyncReplicasOptimizer',
'tf.train.VocabInfo': 'tf.estimator.VocabInfo',
'tf.train.WorkerSessionCreator': 'tf.compat.v1.train.WorkerSessionCreator',
'tf.train.add_queue_runner': 'tf.compat.v1.train.add_queue_runner',
'tf.train.assert_global_step': 'tf.compat.v1.train.assert_global_step',
'tf.train.basic_train_loop': 'tf.compat.v1.train.basic_train_loop',
'tf.train.batch': 'tf.compat.v1.train.batch',
'tf.train.batch_join': 'tf.compat.v1.train.batch_join',
'tf.train.checkpoint_exists': 'tf.compat.v1.train.checkpoint_exists',
'tf.train.create_global_step': 'tf.compat.v1.train.create_global_step',
'tf.train.do_quantize_training_on_graphdef': 'tf.compat.v1.train.do_quantize_training_on_graphdef',
'tf.train.export_meta_graph': 'tf.compat.v1.train.export_meta_graph',
'tf.train.generate_checkpoint_state_proto': 'tf.compat.v1.train.generate_checkpoint_state_proto',
'tf.train.get_checkpoint_mtimes': 'tf.compat.v1.train.get_checkpoint_mtimes',
'tf.train.get_global_step': 'tf.compat.v1.train.get_global_step',
'tf.train.get_or_create_global_step': 'tf.compat.v1.train.get_or_create_global_step',
'tf.train.global_step': 'tf.compat.v1.train.global_step',
'tf.train.import_meta_graph': 'tf.compat.v1.train.import_meta_graph',
'tf.train.init_from_checkpoint': 'tf.compat.v1.train.init_from_checkpoint',
'tf.train.input_producer': 'tf.compat.v1.train.input_producer',
'tf.train.limit_epochs': 'tf.compat.v1.train.limit_epochs',
'tf.train.match_filenames_once': 'tf.io.match_filenames_once',
'tf.train.maybe_batch': 'tf.compat.v1.train.maybe_batch',
'tf.train.maybe_batch_join': 'tf.compat.v1.train.maybe_batch_join',
'tf.train.maybe_shuffle_batch': 'tf.compat.v1.train.maybe_shuffle_batch',
'tf.train.maybe_shuffle_batch_join': 'tf.compat.v1.train.maybe_shuffle_batch_join',
'tf.train.piecewise_constant': 'tf.compat.v1.train.piecewise_constant',
'tf.train.queue_runner.QueueRunner': 'tf.compat.v1.train.queue_runner.QueueRunner',
'tf.train.queue_runner.add_queue_runner': 'tf.compat.v1.train.queue_runner.add_queue_runner',
'tf.train.queue_runner.start_queue_runners': 'tf.compat.v1.train.queue_runner.start_queue_runners',
'tf.train.range_input_producer': 'tf.compat.v1.train.range_input_producer',
'tf.train.remove_checkpoint': 'tf.compat.v1.train.remove_checkpoint',
'tf.train.replica_device_setter': 'tf.compat.v1.train.replica_device_setter',
'tf.train.shuffle_batch': 'tf.compat.v1.train.shuffle_batch',
'tf.train.shuffle_batch_join': 'tf.compat.v1.train.shuffle_batch_join',
'tf.train.slice_input_producer': 'tf.compat.v1.train.slice_input_producer',
'tf.train.start_queue_runners': 'tf.compat.v1.train.start_queue_runners',
'tf.train.string_input_producer': 'tf.compat.v1.train.string_input_producer',
'tf.train.summary_iterator': 'tf.compat.v1.train.summary_iterator',
'tf.train.update_checkpoint_state': 'tf.compat.v1.train.update_checkpoint_state',
'tf.train.warm_start': 'tf.compat.v1.train.warm_start',
'tf.train.write_graph': 'tf.io.write_graph',
'tf.trainable_variables': 'tf.compat.v1.trainable_variables',
'tf.truncated_normal': 'tf.random.truncated_normal',
'tf.uniform_unit_scaling_initializer': 'tf.compat.v1.uniform_unit_scaling_initializer',
'tf.unsorted_segment_max': 'tf.math.unsorted_segment_max',
'tf.unsorted_segment_mean': 'tf.math.unsorted_segment_mean',
'tf.unsorted_segment_min': 'tf.math.unsorted_segment_min',
'tf.unsorted_segment_prod': 'tf.math.unsorted_segment_prod',
'tf.unsorted_segment_sqrt_n': 'tf.math.unsorted_segment_sqrt_n',
'tf.unsorted_segment_sum': 'tf.math.unsorted_segment_sum',
'tf.variable_axis_size_partitioner': 'tf.compat.v1.variable_axis_size_partitioner',
'tf.variable_op_scope': 'tf.compat.v1.variable_op_scope',
'tf.variable_scope': 'tf.compat.v1.variable_scope',
'tf.variables_initializer': 'tf.compat.v1.variables_initializer',
'tf.variance_scaling_initializer': 'tf.compat.v1.variance_scaling_initializer',
'tf.verify_tensor_all_finite': 'tf.compat.v1.verify_tensor_all_finite',
'tf.wrap_function': 'tf.compat.v1.wrap_function',
'tf.write_file': 'tf.io.write_file',
'tf.zeta': 'tf.math.zeta'
}
| StarcoderdataPython |
1646915 | <reponame>larson-group/clubb_release
import netCDF4
import numpy as np
import pylab as pl
clubb_nc = netCDF4.Dataset('cldwt/rico_silhs_zt.nc')
silhs_files = [ 'cldwt/rico_silhs_lh_zt.nc' ]
silhs_labels = [ 'cldwt' ]
silhs_sfc_nc = netCDF4.Dataset('cldwt/rico_silhs_lh_sfc.nc')
silhs_ncs = list()
for silhs_file in silhs_files:
silhs_ncs.append(netCDF4.Dataset(silhs_file))
#############
silhs_2D_u = netCDF4.Dataset('cldwt/rico_silhs_u_lh_sample_points_2D.nc')
silhs_2D_nl = netCDF4.Dataset('cldwt/rico_silhs_nl_lh_sample_points_2D.nc')
dp1 = silhs_2D_u.variables['dp1']
rr_nl = silhs_2D_nl.variables['rr']
mf1 = clubb_nc.variables['mixt_frac']
#############
clubb_var = clubb_nc.variables['mu_rr_1']
l_time_shift = False
silhs_vars = list()
for silhs_nc in silhs_ncs:
silhs_vars.append(silhs_nc.variables['lh_rrm'])
k_lh_start = silhs_sfc_nc.variables['k_lh_start']
time1 = 300
time2 = 400
clubb_var_plt = np.empty(time2-time1)
silhs_vars_plt = list()
for silhs_var in silhs_vars:
silhs_vars_plt.append(np.empty(time2-time1))
for t in range(time1,time2):
k = int(round(k_lh_start[t,0,0,0])) - 1
if l_time_shift:
clubb_var_plt[t-time1] = clubb_var[t-1,k,0,0]
else:
clubb_var_plt[t-time1] = clubb_var[t,k,0,0]
# for u in range(0,len(silhs_vars_plt)):
# silhs_vars_plt[u][t-time1] = silhs_vars[u][t,k,0,0]
samples = []
for i in range(0,100):
if dp1[t,k,i,0] < mf1[t,k,0,0] and rr_nl[t,k,i,0] > 0:
samples.append(rr_nl[t,k,i,0])
avg = np.average(samples)
silhs_vars_plt[0][t-time1] = avg
pl.plot(range(time1,time2), clubb_var_plt[:], label='analytic')
for u in range(0,len(silhs_vars_plt)):
pl.plot(range(time1,time2), silhs_vars_plt[u][:], label=silhs_labels[u])
pl.show()
pl.legend()
clubb_nc.close()
for silhs_nc in silhs_ncs:
silhs_nc.close()
silhs_sfc_nc.close() | StarcoderdataPython |
1792039 | <gh_stars>0
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import pwd
import re
import sys
from subprocess import check_output, CalledProcessError
users = [user for user in pwd.getpwall() if 1000 <= user.pw_uid < 2000]
try:
lxc_cmd = ["lxc", "ls", "volatile.last_state.power=RUNNING", "-c", "n", "--format", "csv"]
lxc_running = check_output(lxc_cmd, universal_newlines=True).splitlines()
except (CalledProcessError, FileNotFoundError):
lxc_running = []
def get_cpu_usage():
out = check_output(["systemd-cgtop", "-b", "-n", "2", "-c", "--raw"], universal_newlines=True)
outlines = out.splitlines()
regex_user = re.compile(r'^/user.slice/user-(1\d{3}).slice ')
regex_lxc = re.compile(r'^/lxc/(.+?) ')
cpu_usage_users = {}
cpu_usage_lxc = {}
for line in outlines[len(outlines)//2:]:
match_user = regex_user.match(line)
match_lxc = regex_lxc.match(line)
if match_user or match_lxc:
_, _, cpu, _, _, _ = line.split()
if cpu == '-':
continue
if match_user:
uid = int(match_user.group(1))
cpu_usage_users[uid] = cpu
elif match_lxc:
lxc_label = match_lxc.group(1)
cpu_usage_lxc[lxc_label] = cpu
else:
continue
for user in users:
label = "u{}".format(user.pw_uid)
value = cpu_usage_users.get(user.pw_uid, 'U')
print("{}.value {}".format(label, value))
for lxc in lxc_running:
label = "lxc_{}".format(re.sub(r'[^a-zA-Z0-9_]', '_', lxc))
value = cpu_usage_lxc.get(lxc, 'U')
print("{}.value {}".format(label, value))
def output_config():
print("graph_title CPU usage per user and LXC containers")
print("graph_vlabel %")
print("graph_category system")
print("graph_args -l 0 -u 3200")
print("graph_scale no")
print("graph_total Total")
first = True
for user in users:
label = "u{}".format(user.pw_uid)
print("{}.label {}".format(label, user.pw_name))
print("{}.info Amount of CPU used by {}".format(label, user.pw_name))
if first:
print("{}.draw AREA".format(label))
else:
print("{}.draw STACK".format(label))
print("{}.min 0".format(label))
first = False
for lxc in lxc_running:
label = "lxc_{}".format(re.sub(r'[^a-zA-Z0-9_]', '_', lxc))
print("{}.label {}".format(label, lxc))
print("{}.info Amount of CPU used by LXC container {}".format(label, lxc))
if first:
print("{}.draw AREA".format(label))
else:
print("{}.draw STACK".format(label))
print("{}.min 0".format(label))
first = False
def main():
if len(sys.argv) == 1:
get_cpu_usage()
if len(sys.argv) == 2:
if sys.argv[1] == 'config':
output_config()
if __name__ == '__main__':
main()
| StarcoderdataPython |
183149 | <reponame>alexa/aac-sdk
import subprocess
from pylib.common import BaseHandler
class BuilderHandler(BaseHandler):
def run( self ):
clean_packages = self.builder_configuration.find_packages( self.get_arg("pattern") )
self.log_info( "Cleaning cached builder data..." )
for next in clean_packages:
# remove package from builder settings
self.builder_configuration.remove_package( next )
# remove package from conan
if not self.get_arg( "skip_conan", False ):
subprocess.run( ["conan", "remove", next, "-f"], env=self.conan_env, capture_output=not self.verbose, check=True )
# save the settings state
self.builder_configuration.save()
| StarcoderdataPython |
3328500 | import argparse
from CoolDataLoader import CoolDataLoader
from CoolDataProcessor import *
# Training settings
parser = argparse.ArgumentParser()
parser.add_argument('--f', type=str)
args = parser.parse_args()
Loader = CoolDataLoader(args.f)
# print(Loader.get_course())
# print(Loader.get_videos())
# print(Loader.get_students())
# print(Loader.get_records())
videos_df = Loader.get_videos()
cool_df = Loader.filter_select(["b07705016", "b07705051"], [], "", "")
# print(cool_df.head())
# print(videos_df.head())
# func 1
watch_time_df = watch_time(cool_df)
# print(watch_time_df)
# func2
complete_table = completion_rate(cool_df, videos_df)
# print(complete_table)
# func 3
action_freq_table = action_freq(cool_df, action="forward")
# print(action_freq_table)
# func 4
action_dura_table = action_duration(cool_df, action="forward")
# print(action_dura_table)
# func 5
pause_freq_table = pause_freq(cool_df)
# print(pause_freq_table)
period_watch_df = watch_time(cool_df, [0000, 600])
print(period_watch_df)
| StarcoderdataPython |
24340 | #
# One-liner implementation of cPickle
#
from pickle import *
from pickle import __doc__, __version__, format_version, compatible_formats
BadPickleGet = KeyError
UnpickleableError = PicklingError
# ____________________________________________________________
# XXX some temporary dark magic to produce pickled dumps that are
# closer to the ones produced by cPickle in CPython
from pickle import StringIO
PythonPickler = Pickler
class Pickler(PythonPickler):
def __init__(self, *args, **kw):
self.__f = None
if len(args) == 1 and isinstance(args[0], int):
self.__f = StringIO()
PythonPickler.__init__(self, self.__f, args[0], **kw)
else:
PythonPickler.__init__(self, *args, **kw)
def memoize(self, obj):
self.memo[None] = None # cPickle starts counting at one
return PythonPickler.memoize(self, obj)
def getvalue(self):
return self.__f and self.__f.getvalue()
def dump(obj, file, protocol=None, bin=None):
Pickler(file, protocol, bin).dump(obj)
def dumps(obj, protocol=None, bin=None):
file = StringIO()
Pickler(file, protocol, bin).dump(obj)
return file.getvalue()
| StarcoderdataPython |
3257591 | """
Week 2, Day 2: Valid Perfect Square
Given a positive integer num, write a function which returns True if num is a perfect square else False.
Note:
Do not use any built-in library function such as sqrt.
Example 1:
Input: 16
Output: true
Example 2:
Input: 14
Output: false
"""
from functools import reduce
from time import perf_counter_ns
def isPerfectSquare(num: int) -> bool:
"""Okay. Solution is O(1)."""
r = int(num ** 0.5)
return r * r == num
def isPerfectSquare_v2(num: int) -> bool:
"""
This O(1) solution were contributed to LeetCode by another user.
Way faster than my first solution!
A good example why you should always: 'Know your standard API!'
But there is so much much python magic in it, that it almost feels like cheating.
"""
return (num ** 0.5).is_integer()
def isPerfectSquare_v3(num: int) -> bool:
"""
Solve with math. Because (x + 1)^2 = x^2 + 2*x + 1. With 2*x + 1 being an odd number.
This math based solution is O(n), and not O(1), so it is elegant, but slow.
"""
x = 1
while num > 0:
num -= x
x += 2
return num == 0
if __name__ == '__main__':
p = 4321 * 4321
q = 4321 * 4319
start = perf_counter_ns()
print(isPerfectSquare(16) is True)
print(isPerfectSquare(14) is False)
print(isPerfectSquare(p) is True)
print(isPerfectSquare(q) is False)
print('v1', perf_counter_ns() - start)
start = perf_counter_ns()
print(isPerfectSquare_v2(16) is True)
print(isPerfectSquare_v2(14) is False)
print(isPerfectSquare_v2(p) is True)
print(isPerfectSquare_v2(q) is False)
print('v2', perf_counter_ns() - start)
start = perf_counter_ns()
print(isPerfectSquare_v3(16) is True)
print(isPerfectSquare_v3(14) is False)
print(isPerfectSquare_v3(p) is True)
print(isPerfectSquare_v3(q) is False)
print('v3', perf_counter_ns() - start)
# last line of code
| StarcoderdataPython |
1672653 | <filename>secrets.template.py
##
## SP API Developer Settings
##
# This is the first part of the LWA credentials from the developer console
# and is specific to the application you set up. This looks something like
# "amzn1.application-oa2-client.<hex id>"
client_id = None
# This is the hidden part of the LWA credentials from the developer console
client_secret = None
# This is what you get after you click Authorize to initate a self authorization
# for this specific application in the specific marketplace.
refresh_token = None
##
## AWS Credentials
##
# If you aren't in a lambda you need to fill out the following 3 items
# You also don't need the first two if you have system wide credentials
# set up for AWS e.g. via `aws configure`
access_key = None
secret_key = None
registered_role_arn = None
| StarcoderdataPython |
1700709 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='currency-wallet',
version='0.1.0',
description="Track investment returns in multiple currencies through the National Bank of Poland's API.",
packages=find_packages(include=['currency_wallet']),
python_requires='>=3.6',
entry_points={
'console_scripts': ['currency-wallet=currency_wallet.cli:cli'],
},
)
| StarcoderdataPython |
105760 | <reponame>jeamick/ares-visual<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: <NAME>
import json
import importlib
import inspect
import sys
from ares.Lib.js import AresJsEncoder
factory = None
def getConfigs(libraries):
"""
:category: Factory
:rubric: JS
:type: Configuration
:dsc:
Load the factory with all the different javascript configuration for the different HTML components.
Some components like charts, tables, pivot and lists are bespoke and would require extra tuning according to the need.
This module in the framework will segregate all the different official configurations. Some bespoke ones can be
added in the reports using the available hooks for each type of components
:return: The content of the factory
"""
global factory
if factory is None:
tmpFactory = {}
for libConfig in libraries:
chartMod = importlib.import_module('ares.Lib.js.configs.JsConfig%s' % libConfig)
for name, chartCls in inspect.getmembers(sys.modules[chartMod.__name__]):
chartAlias = getattr(chartCls, 'alias', None)
if chartAlias is not None:
if chartAlias in tmpFactory.get(libConfig, {}):
raise Exception("Duplicated Name - Chart %s in %s cannot be replaced !!!" % (chartAlias, libConfig))
tmpFactory.setdefault(libConfig, {})[chartAlias] = chartCls
factory = tmpFactory
return factory
def getConfig(pyCls, chartFam):
"""
:category: Chart Bespoke Configuration
:rubric: JS
:type: Framework Extension
:example: aresObj.addChartConfig(JsTestHBar, 'ChartJs')
:dsc:
Entry point to allow the add of bespoke configurations. Those configurations should be linked to an alias which has
to be unique. From this entry point it is not possible to update existing configurations.
Those configurations should follow the defined class structure in order to be then easily added to the framework in the
next release.
The entry point of this function in the framework is in the function aresObj.addChartConfig in the framework
"""
chartMod = importlib.import_module('ares.Lib.js.configs.JsConfig%s' % chartFam)
return type(pyCls.__name__, (pyCls, chartMod.JsBase), {})
class JsConfig(dict):
"""
:category: Javascript Wrapper
:rubric: JS
:type: System
:dsc:
Base class in charge of the conversion of Python configurations to Javascript ones.
Those configurations defined on the Python side will only be used and visible on the Javascript.
This class will build a dictionary of valid parameters for the Javascript layer.
## Class Parameters
- aresObj: The uniq AReS object, shared with all the different objects in the framework
- seriesProperties: Dictionary with configuration to be added after the Javascript data transformation to the object
- data: The Python data structure which will be added to the data section of the Javascript chart
## Special static class variables
Those variable are properties of the class and should not be changed directly. Some methods are available
in order to add bespoke configuration to the chart or to the series like addAttr() and addSeriesAttr().
If something seems to be missing, please never change those variable and either create a new bespoke configuration
or talk to your IT team.
- _attrs, Chart properties and styles
- _statics, parameters added to each series at the end of the data build
The different Javascript structure are defined by the charting libraries
"""
def __init__(self, aresObj, data, seriesProperties):
self.aresObj, self.seriesProperties = aresObj, seriesProperties
resolvedAttrs = {}
self.rAttr(self._attrs, resolvedAttrs)
if getattr(self, '_statics', None) is not None:
seriesProperties.setdefault('static', {}).update(self._statics)
self.update(resolvedAttrs)
self.data = self.transformation(data)
self.config()
def config(self): pass
def rAttr(self, srcVals, dstVals, srcKey=None):
"""
:category:
:rubric: PY
:type: System
:dsc:
"""
if isinstance(srcVals, dict):
for key, val in srcVals.items():
if isinstance(val, dict):
dstVals[key] = {}
self.rAttr(val, dstVals[key])
else:
self.rAttr(val, dstVals, key)
elif isinstance(srcVals, list):
dstVals[srcKey] = []
for val in srcVals:
dstVals[srcKey].append({})
self.rAttr(val, dstVals[srcKey][-1])
else:
if srcKey is not None:
if isinstance(srcVals, str):
if srcVals.startswith("function") or srcVals.startswith("JSON.stringify"):
dstVals[srcKey] = srcVals
else:
dstVals[srcKey] = json.dumps(srcVals)
else:
dstVals[srcKey] = json.dumps(srcVals)
elif isinstance(dstVals, list):
dstVals.append(json.dumps(srcVals))
def toJs(self, options=None): return self
@classmethod
def transformation(cls, data):
"""
:category: Data Transformation
:rubric: PY
:type: Transformation
:dsc:
Data transformation for the DataFrame. Using this function might create a new DataFrame. Thus a new Javascript
object will be created and the logic within the global filters might not work correctly.
If you use this, please make it obvious to ensure other users might not be surprised
"""
return data
def addAttr(self, key, val, tree=None, category=None, isPyData=True):
if isinstance(key, dict):
for k, v in key.items():
self.addAttr.addAttr(k, v, category=category, isPyData=isPyData)
if isPyData:
val = json.dumps(val, cls=AresJsEncoder.AresEncoder)
if category is None and tree is not None:
category, tree = tree, None
if tree is not None:
chartLocation = self[category]
if not isinstance(tree, list):
tree = [tree]
for subCategory in tree:
if isinstance(subCategory, tuple):
subCategory, subCategoryIndex = subCategory
else:
subCategory, subCategoryIndex = subCategory, 0
if subCategory in self.listAttributes:
if not subCategory in chartLocation:
chartLocation[subCategory] = []
for i in range(subCategoryIndex + 1):
chartLocation[subCategory].append({})
if len(chartLocation[subCategory]) < subCategoryIndex + 1:
for i in range(subCategoryIndex + 1):
if i not in chartLocation[subCategory]:
chartLocation[subCategory].append({})
chartLocation = chartLocation[subCategory][subCategoryIndex]
else:
if not subCategory in chartLocation:
chartLocation[subCategory] = {}
chartLocation = chartLocation[subCategory]
if isinstance(chartLocation, list):
chartLocation[0][key] = val
else:
chartLocation[key] = val
elif category is not None:
self.setdefault(category, {})[key] = val
else:
self[key] = val
def delAttr(self, keys, tree=None, category=None):
""" """
chart = self
if tree is not None:
chartLocation = self.get(category, {})
for subCategory in tree:
chartLocation = chartLocation.get(subCategory, {})
chart = chartLocation
if category is not None:
chart = self.get(category, {})
for attr in keys:
if attr in chart:
del chart[attr]
def _colors(self, cList, index=None):
"""
:category: Chart Series Colors
:rubric: JS
:type: Configuration
:dsc:
"""
if index is None:
for i in range(len(self.data._schema['values'])):
if len(cList) > i:
self.seriesProperties['dynamic'].setdefault(i, {})['backgroundColor'] = cList[i]
else:
self.seriesProperties['dynamic'].setdefault(index, {})['backgroundColor'] = cList
if __name__ == "__main__":
print(getConfigs(['ChartJs'])) | StarcoderdataPython |
1669620 | import pygame
import random
from inc_SpriteSheet import SpriteSheet
''' Enemy
(sprite group)
This class handles the badguys which fires lasers
'''
class Enemy(pygame.sprite.Sprite):
''' Init
This function is called automatically when we initialize the Class
'''
def __init__(self):
super().__init__()
self.animation_frames = [] # empty list to hold all sprite frames
# Load the sprite sheet
sprite_sheet = SpriteSheet("assets/Images/sprite_sheet.png")
# enemy sprites (3 frame animation)
image = sprite_sheet.get_image(0, 16, 16, 16); # (x, y, width, height)
self.animation_frames.append(image)
image = sprite_sheet.get_image(16, 16, 16, 16);
self.animation_frames.append(image)
image = sprite_sheet.get_image(32, 16, 16, 16);
self.animation_frames.append(image)
# enemy explosion (4 frame animation)
image = sprite_sheet.get_image(0, 32, 16, 16);
self.animation_frames.append(image)
image = sprite_sheet.get_image(16, 32, 16, 16);
self.animation_frames.append(image)
image = sprite_sheet.get_image(32, 32, 16, 16);
self.animation_frames.append(image)
image = sprite_sheet.get_image(48, 32, 16, 16);
self.animation_frames.append(image)
self.image = self.animation_frames[0] # set initial frame
# Create a mask for collision (same for both lasers)
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.rect.x = -16 # enemy init location (horizontal) - offscreen
self.rect.y = -16 # enemy init location (vertical) - offscreen
self.frame = 1 # current animation frame
self.animation_time = 0 # animation delay speed
self.shoot_time = pygame.time.get_ticks() + random.randrange(0, 1000) # delay between firing
self.gun_loaded = 0 # ready to fire!
self.alive = True # Flag if we're alive or not
''' Update
Handles animations and gun timing
'''
def update(self):
if pygame.time.get_ticks() > self.shoot_time + 1000:
self.shoot_time = pygame.time.get_ticks() + random.randrange(0, 1000)
self.gun_loaded = 1
# Animation Frames
if pygame.time.get_ticks() > self.animation_time + 50:
self.animation_time = pygame.time.get_ticks()
self.frame = self.frame + 1
if self.frame > 2 and self.alive == True: # reset animation loop
self.frame = 0
elif self.frame > 5 and self.alive == False: # dead :(
self.kill()
self.image = self.animation_frames[self.frame]
self.rect.x -= 1 # scoot across the screen kinda slow
# Offscreen, remove this sprite
if self.rect.y < -16:
self.kill()
if self.rect.y > 240:
self.kill()
if self.rect.x < -16:
self.kill()
if self.rect.x > 320:
self.kill()
''' Draw
Places the current animation frame image onto the passed screen
'''
def draw(self, win):
win.blit(self.image, self.rect)
| StarcoderdataPython |
100540 | """Interfaces for interacting with Pinterest users"""
import logging
import json
from datetime import datetime
from dateutil import tz
from friendlypins.board import Board
class User(object):
"""Abstraction around a Pinterest user and their associated data"""
def __init__(self, url, rest_io):
"""
Args:
url (str): URL for this user, relative to the API root
rest_io (RestIO): reference to the Pinterest REST API
"""
self._log = logging.getLogger(__name__)
self._io = rest_io
self._relative_url = url
self._data_cache = None
@staticmethod
def default_fields():
"""list (str): list of fields we pre-populate when loading user data"""
return [
"id",
"username",
"first_name",
"last_name",
"bio",
"created_at",
"counts",
"image",
"account_type",
"url"
]
def refresh(self):
"""Updates cached response data describing the state of this user
NOTE: This method simply clears the internal cache, and updated
information will automatically be pulled on demand as additional
queries are made through the API"""
self._data_cache = None
@property
def _data(self):
"""dict: JSON response containing details of the users' profile
This internal helper caches the user profile data to minimize the
number of calls to the REST API, to make more efficient use of rate
limitations.
"""
if self._data_cache is not None:
return self._data_cache
self._log.debug("Getting authenticated user details...")
fields = ",".join(self.default_fields())
temp = self._io.get(self._relative_url, {"fields": fields})
assert 'data' in temp
self._data_cache = temp["data"]
return self._data_cache
def __str__(self):
return json.dumps(dict(self._data), sort_keys=True, indent=4)
def __repr__(self):
return "<{0} ({1} {2})>".format(
self.__class__.__name__,
self.first_name,
self.last_name)
@property
def unique_id(self):
"""int: Gets the internal unique ID associated with the user"""
return int(self._data['id'])
@property
def first_name(self):
"""str: the first name of the user"""
return self._data['first_name']
@property
def last_name(self):
"""str: the last name of the user"""
return self._data['last_name']
@property
def name(self):
"""str: the full name of the user
alias for first_name + last_name
"""
return "{0} {1}".format(self.first_name, self.last_name).strip()
@property
def username(self):
"""str: display name, used for logging in to Pinterest"""
return self._data["username"]
@property
def url(self):
"""str: the URL of the users profile"""
return self._data['url']
@property
def num_pins(self):
"""int: the total number of pins owned by this user"""
return self._data['counts']['pins']
@property
def num_boards(self):
"""int: the total number of boards owned by this user"""
return self._data['counts']['boards']
@property
def num_followers(self):
"""int: number of people following this Pinterest user"""
return self._data["counts"]["followers"]
@property
def created(self):
"""datetime.datetime: when this user's profile was created"""
# sample datetime to parse: "2020-07-21T16:16:03" (in UTC)
raw_date = self._data["created_at"]
retval = datetime.strptime(raw_date, "%Y-%m-%dT%H:%M:%S")
return retval.replace(tzinfo=tz.tzutc())
@property
def account_type(self):
"""str: type of Pinterest account (ie: individual / business)"""
return self._data["account_type"]
@property
def bio(self):
"""str: description of who this user is"""
return self._data["bio"]
@property
def boards(self):
"""Board: Generator for iterating over the boards owned by this user"""
self._log.debug('Loading boards for user %s...', self._relative_url)
properties = {
"fields": ','.join(Board.default_fields())
}
board_url = "{0}/boards".format(self._relative_url)
for cur_page in self._io.get_pages(board_url, properties):
assert 'data' in cur_page
for cur_item in cur_page['data']:
yield Board.from_json(cur_item, self._io)
def create_board(self, name, description=None):
"""Creates a new board for the currently authenticated user
Args:
name (str): name for the new board
description (str): optional descriptive text for the board
Returns:
Board: reference to the newly created board
"""
properties = {
"fields": ','.join(Board.default_fields())
}
data = {"name": name}
if description:
data["description"] = description
result = self._io.post("boards", data, properties)
return Board.from_json(result['data'], self._io)
if __name__ == "__main__": # pragma: no cover
pass
| StarcoderdataPython |
1727735 | from jinja2 import Template
# >>> template = Template('Hello {{ name }}!')
# >>> template.render(name='<NAME>')
template = ''' <div class="col-md-4 col-sm-6 portfolio-item">
<div>
<img src="img/games/jigsaw_puzzles.png" class="img-responsive" alt="">
</div>
<div class="portfolio-caption">
<a href="http://www.onet.pl">
<img src="img/google-play-badge.png" class="img-responsive center-block" alt="">
</a>
<h4>+</h4>
<a href="http://www.onet.pl">
<img src="img/ms-badge.png" class="img-responsive center-block" alt="">
</a>
<h3>Jigsaw Puzzle</h3>
<!-- <p class="text-muted">Website Design</p> -->
</div>
</div>'''
template2 = ''' {% for game in games %}
<div class="col-xs-6 col-sm-4 col-md-3 portfolio-item">
<div>
<img src="img/games/{{ game["image"] }}" class="img-responsive" alt="">
</div>
<div class="portfolio-caption">
<div class="row">{% for store in game["stores"] %}
<div class="col-xs-6 portfolio-item">
<a href="{{ store["link"] }}">
<img src="{{ store["badge"] }}" class="img-responsive center-block" alt="">
</a>
</div>{% endfor %}
</div>
<h4>{{ game["name"] }}</h4>
</div>
</div>
{% endfor %}
'''
def get_stores(stores):
def get_store(store, col_class):
return f"""<div class="portfolio-caption">
<a href = "{store["link"]}" >
<img src = "{store["badge"]}" class = "img-responsive center-block" alt = "" >
</a >
</div >"""
if len(stores) == 1:
return get_store(stores[0], "")
assert len(stores) == 2
return "\n".join(get_store(store, "col-xs-6") for store in stores)
def get_template_item(game):
return f"""
<div class="col-xs-6 col-sm-4 col-md-3 portfolio-item">
<a href = "{game["stores"][0]["link"]}" >
<div class="portfolio-caption">
<h4>{game["name"]}</h4>
</div>
<div class="portfolio-caption">
<img src="img/games/{game["image"]}" class="img-responsive" alt="">
</div>
{get_stores(game["stores"])}
</a>
</div>"""
def get_template_items(games):
return "\n".join([get_template_item(game) for game in games])
def googleLink(id):
return 'https://play.google.com/store/apps/details?id=com.crazyhappygame.{id}&pcampaignid=MKT-Other-global-all-co-prtnr-py-PartBadge-Mar2515-1'.format(id=id)
def getGoogleStore(id):
return {
"badge": "img/google-play-badge.png",
"link": googleLink(id)
}
def msLink(id):
return 'https://www.microsoft.com/store/apps/{id}?ocid=badge'.format(id=id)
def getMsStore(id):
return {
"badge": "img/ms-badge.png",
"link": msLink(id)
}
games = [
{
"name": "DotPoly",
"image": "dotpoly.png",
"stores": [getGoogleStore("dotpoly")]
},
{
"name": "DotToDot",
"image": "dottodot.png",
"stores": [getGoogleStore("dottodot")]
},
{
"name": "Mahjong",
"image": "mahjong.png",
"stores": [getGoogleStore("mahjong")]
},
{
"name": "Planet Draw",
"image": "planet_draw.png",
"stores": [getGoogleStore("planetdraw")]
},
{
"name": "Jigsaw Puzzle",
"image": "jigsaw_puzzle.png",
"stores": [getGoogleStore("jigsawpuzzle"), getMsStore("9nblggh4tpj1")]
},
{
"name": "Coloring Book",
"image": "coloring_book.png",
"stores": [getGoogleStore("coloringpagespuzzleforkids"), getMsStore("9nblggh4m297")]
},
{
"name": "<NAME>uzzle",
"image": "puzzle_animals.png",
"stores": [getGoogleStore("letterjigsawpuzzlesforkids"), getMsStore("9nblggh4nxmn")]
},
{
"name": "Puzzle/Memo",
"image": "bee_pack.png",
"stores": [getGoogleStore("kidspuzzlebeepack"), getMsStore("9nblggh3vrtd")]
},
{
"name": "<NAME>",
"image": "christmas_tree.png",
"stores": [getGoogleStore("christmastree")]
},
# {
# "name": "Smart Draw",
# "image": "smart_draw.png",
# "stores": [getGoogleStore("smartdraw")]
# },
{
"name": "<NAME>",
"image": "bee.png",
"stores": [getGoogleStore("kidspuzzlebee")]
},
{
"name": "Cats Puzzle",
"image": "puzzle_animals2.png",
"stores": [getGoogleStore("catsandmicejigsawpuzzlesforkids")]
}
]
t = Template(template2)
# print(t.render(games=games))
print(get_template_items(games))
| StarcoderdataPython |
3387673 | from django.apps import AppConfig
class ColdocappConfig(AppConfig):
name = 'ColDocApp'
default_auto_field = 'django.db.models.AutoField'
| StarcoderdataPython |
1774526 | <reponame>andyjohn23/python-instagram-clone
from django import forms
from .models import UserAccount, Profile
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import authenticate, login
from django.forms.widgets import TextInput
class RegisterUserForm(UserCreationForm):
email = forms.EmailField(
max_length=200, help_text='Required valid emailaddress')
class Meta:
model = UserAccount
fields = ['username', 'email', '<PASSWORD>', '<PASSWORD>']
def clean_email(self):
email = self.cleaned_data['email'].lower()
try:
account = UserAccount.objects.get(email=email)
except Exception as e:
return email
raise forms.ValidationError(f'Email {email} is already in use!')
def clean_username(self):
username = self.cleaned_data['username']
try:
account = UserAccount.objects.get(username=username)
except Exception as e:
return username
raise forms.ValidationError(f'Username {username} is already in use!')
class AuthenticationForm(forms.ModelForm):
password = forms.CharField(label='password', widget=forms.PasswordInput)
class Meta:
model = UserAccount
fields = ('email', 'password')
def clean(self):
if self.is_valid():
email = self.cleaned_data['email']
password = self.cleaned_data['password']
if not authenticate(email=email, password=password):
raise forms.ValidationError('invalid login')
class UserUpdateForm(forms.ModelForm):
username = forms.CharField(widget=forms.TextInput(),
max_length=15, required=True)
email = forms.CharField(widget=forms.TextInput(),
max_length=100, required=True)
class Meta:
model = UserAccount
fields = ['username', 'email']
class ProfileUpdateForm(forms.ModelForm):
bio = forms.CharField(widget=forms.Textarea(
attrs={'class': 'input is-medium'}), max_length=150, required=False)
class Meta:
model = Profile
fields = ['bio']
class UserSearchForm(forms.Form):
q = forms.CharField()
c = forms.ModelChoiceField(
queryset=UserAccount.objects.all().order_by('username'))
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['c'].label = ''
self.fields['c'].required = False
self.fields['c'].label = 'Category'
self.fields['q'].label = 'Search For'
self.fields['q'].widget.attrs.update(
{'class': 'form-control menudd'})
self.fields['q'].widget.attrs.update(
{'data-toggle': 'dropdown'}) | StarcoderdataPython |
3203665 | from .na_graves_top import *
from .na_graves_jng import *
from .na_graves_mid import *
from .na_graves_bot import *
from .na_graves_sup import *
| StarcoderdataPython |
3201805 | import numpy as np
from src.features import get_dtmp_distribution_statistics, get_dtl_distribution_statistics
from src.helpers import read_video
def test_get_dtmp_distribution_statistics():
df_video = read_video(video_id='001')
feature = get_dtmp_distribution_statistics(df_video, 'left', 'ankle', np.nanmedian)
assert len(feature) == 10
def test_get_dtl_distribution_statistics():
df_video = read_video(video_id='001')
feature = get_dtl_distribution_statistics(df_video, 'left', 'ankle', np.nanmedian)
assert len(feature) == 10
| StarcoderdataPython |
1749614 | from pathlib import Path
USER_REPOS_PAGE1 = '''\
[
{
"archive_url": "https://api.github.com/repos/jwodder/advent350/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/advent350/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/advent350/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/advent350/branches{/branch}",
"clone_url": "https://github.com/jwodder/advent350.git",
"collaborators_url": "https://api.github.com/repos/jwodder/advent350/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/advent350/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/advent350/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/advent350/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/advent350/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/advent350/contributors",
"created_at": "2009-07-03T04:36:49Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/advent350/deployments",
"description": "Yet another port of the Colossal Cave Adventure",
"downloads_url": "https://api.github.com/repos/jwodder/advent350/downloads",
"events_url": "https://api.github.com/repos/jwodder/advent350/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/advent350/forks",
"full_name": "jwodder/advent350",
"git_commits_url": "https://api.github.com/repos/jwodder/advent350/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/advent350/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/advent350/git/tags{/sha}",
"git_url": "git://github.com/jwodder/advent350.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": false,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/advent350/hooks",
"html_url": "https://github.com/jwodder/advent350",
"id": 242192,
"issue_comment_url": "https://api.github.com/repos/jwodder/advent350/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/advent350/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/advent350/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/advent350/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/advent350/labels{/name}",
"language": "Perl6",
"languages_url": "https://api.github.com/repos/jwodder/advent350/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/advent350/merges",
"milestones_url": "https://api.github.com/repos/jwodder/advent350/milestones{/number}",
"mirror_url": null,
"name": "advent350",
"notifications_url": "https://api.github.com/repos/jwodder/advent350/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/advent350/pulls{/number}",
"pushed_at": "2015-12-31T00:57:19Z",
"releases_url": "https://api.github.com/repos/jwodder/advent350/releases{/id}",
"size": 417,
"ssh_url": "[email protected]:jwodder/advent350.git",
"stargazers_count": 2,
"stargazers_url": "https://api.github.com/repos/jwodder/advent350/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/advent350/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/advent350/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/advent350/subscription",
"svn_url": "https://github.com/jwodder/advent350",
"tags_url": "https://api.github.com/repos/jwodder/advent350/tags",
"teams_url": "https://api.github.com/repos/jwodder/advent350/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/advent350/git/trees{/sha}",
"updated_at": "2016-08-14T18:09:16Z",
"url": "https://api.github.com/repos/jwodder/advent350",
"watchers": 2,
"watchers_count": 2
},
{
"archive_url": "https://api.github.com/repos/jwodder/aptrepo/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/aptrepo/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/aptrepo/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/aptrepo/branches{/branch}",
"clone_url": "https://github.com/jwodder/aptrepo.git",
"collaborators_url": "https://api.github.com/repos/jwodder/aptrepo/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/aptrepo/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/aptrepo/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/aptrepo/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/aptrepo/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/aptrepo/contributors",
"created_at": "2016-10-03T18:31:42Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/aptrepo/deployments",
"description": "Examining & traversing APT repositories",
"downloads_url": "https://api.github.com/repos/jwodder/aptrepo/downloads",
"events_url": "https://api.github.com/repos/jwodder/aptrepo/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/aptrepo/forks",
"full_name": "jwodder/aptrepo",
"git_commits_url": "https://api.github.com/repos/jwodder/aptrepo/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/aptrepo/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/aptrepo/git/tags{/sha}",
"git_url": "git://github.com/jwodder/aptrepo.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/aptrepo/hooks",
"html_url": "https://github.com/jwodder/aptrepo",
"id": 69898888,
"issue_comment_url": "https://api.github.com/repos/jwodder/aptrepo/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/aptrepo/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/aptrepo/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/aptrepo/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/aptrepo/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/aptrepo/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/aptrepo/merges",
"milestones_url": "https://api.github.com/repos/jwodder/aptrepo/milestones{/number}",
"mirror_url": null,
"name": "aptrepo",
"notifications_url": "https://api.github.com/repos/jwodder/aptrepo/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/aptrepo/pulls{/number}",
"pushed_at": "2017-06-10T19:54:05Z",
"releases_url": "https://api.github.com/repos/jwodder/aptrepo/releases{/id}",
"size": 90,
"ssh_url": "git<EMAIL>.com:jwodder/aptrepo.git",
"stargazers_count": 2,
"stargazers_url": "https://api.github.com/repos/jwodder/aptrepo/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/aptrepo/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/aptrepo/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/aptrepo/subscription",
"svn_url": "https://github.com/jwodder/aptrepo",
"tags_url": "https://api.github.com/repos/jwodder/aptrepo/tags",
"teams_url": "https://api.github.com/repos/jwodder/aptrepo/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/aptrepo/git/trees{/sha}",
"updated_at": "2017-06-12T16:37:38Z",
"url": "https://api.github.com/repos/jwodder/aptrepo",
"watchers": 2,
"watchers_count": 2
},
{
"archive_url": "https://api.github.com/repos/jwodder/awesome-python/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/awesome-python/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/awesome-python/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/awesome-python/branches{/branch}",
"clone_url": "https://github.com/jwodder/awesome-python.git",
"collaborators_url": "https://api.github.com/repos/jwodder/awesome-python/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/awesome-python/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/awesome-python/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/awesome-python/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/awesome-python/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/awesome-python/contributors",
"created_at": "2017-04-15T23:25:28Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/awesome-python/deployments",
"description": "A curated list of awesome Python frameworks, libraries, software and resources",
"downloads_url": "https://api.github.com/repos/jwodder/awesome-python/downloads",
"events_url": "https://api.github.com/repos/jwodder/awesome-python/events",
"fork": true,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/awesome-python/forks",
"full_name": "jwodder/awesome-python",
"git_commits_url": "https://api.github.com/repos/jwodder/awesome-python/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/awesome-python/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/awesome-python/git/tags{/sha}",
"git_url": "git://github.com/jwodder/awesome-python.git",
"has_downloads": true,
"has_issues": false,
"has_pages": false,
"has_projects": true,
"has_wiki": false,
"homepage": "http://awesome-python.com/",
"hooks_url": "https://api.github.com/repos/jwodder/awesome-python/hooks",
"html_url": "https://github.com/jwodder/awesome-python",
"id": 88379694,
"issue_comment_url": "https://api.github.com/repos/jwodder/awesome-python/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/awesome-python/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/awesome-python/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/awesome-python/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/awesome-python/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/awesome-python/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/awesome-python/merges",
"milestones_url": "https://api.github.com/repos/jwodder/awesome-python/milestones{/number}",
"mirror_url": null,
"name": "awesome-python",
"notifications_url": "https://api.github.com/repos/jwodder/awesome-python/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/awesome-python/pulls{/number}",
"pushed_at": "2017-04-15T23:37:44Z",
"releases_url": "https://api.github.com/repos/jwodder/awesome-python/releases{/id}",
"size": 3216,
"ssh_url": "[email protected]:jwodder/awesome-python.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/awesome-python/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/awesome-python/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/awesome-python/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/awesome-python/subscription",
"svn_url": "https://github.com/jwodder/awesome-python",
"tags_url": "https://api.github.com/repos/jwodder/awesome-python/tags",
"teams_url": "https://api.github.com/repos/jwodder/awesome-python/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/awesome-python/git/trees{/sha}",
"updated_at": "2017-04-15T23:25:31Z",
"url": "https://api.github.com/repos/jwodder/awesome-python",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/binheat/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/binheat/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/binheat/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/binheat/branches{/branch}",
"clone_url": "https://github.com/jwodder/binheat.git",
"collaborators_url": "https://api.github.com/repos/jwodder/binheat/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/binheat/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/binheat/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/binheat/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/binheat/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/binheat/contributors",
"created_at": "2014-04-30T21:35:56Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/binheat/deployments",
"description": "Binary heat map generator",
"downloads_url": "https://api.github.com/repos/jwodder/binheat/downloads",
"events_url": "https://api.github.com/repos/jwodder/binheat/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/binheat/forks",
"full_name": "jwodder/binheat",
"git_commits_url": "https://api.github.com/repos/jwodder/binheat/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/binheat/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/binheat/git/tags{/sha}",
"git_url": "git://github.com/jwodder/binheat.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/binheat/hooks",
"html_url": "https://github.com/jwodder/binheat",
"id": 19328026,
"issue_comment_url": "https://api.github.com/repos/jwodder/binheat/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/binheat/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/binheat/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/binheat/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/binheat/labels{/name}",
"language": "Perl",
"languages_url": "https://api.github.com/repos/jwodder/binheat/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/binheat/merges",
"milestones_url": "https://api.github.com/repos/jwodder/binheat/milestones{/number}",
"mirror_url": null,
"name": "binheat",
"notifications_url": "https://api.github.com/repos/jwodder/binheat/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/binheat/pulls{/number}",
"pushed_at": "2014-10-29T22:20:10Z",
"releases_url": "https://api.github.com/repos/jwodder/binheat/releases{/id}",
"size": 172,
"ssh_url": "<EMAIL>:jwodder/binheat.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/binheat/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/binheat/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/binheat/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/binheat/subscription",
"svn_url": "https://github.com/jwodder/binheat",
"tags_url": "https://api.github.com/repos/jwodder/binheat/tags",
"teams_url": "https://api.github.com/repos/jwodder/binheat/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/binheat/git/trees{/sha}",
"updated_at": "2014-09-09T18:09:46Z",
"url": "https://api.github.com/repos/jwodder/binheat",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/bitvector.py/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/bitvector.py/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/bitvector.py/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/bitvector.py/branches{/branch}",
"clone_url": "https://github.com/jwodder/bitvector.py.git",
"collaborators_url": "https://api.github.com/repos/jwodder/bitvector.py/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/bitvector.py/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/bitvector.py/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/bitvector.py/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/bitvector.py/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/bitvector.py/contributors",
"created_at": "2014-05-21T00:10:35Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/bitvector.py/deployments",
"description": "[Abandoned] Python class for vectors of bits",
"downloads_url": "https://api.github.com/repos/jwodder/bitvector.py/downloads",
"events_url": "https://api.github.com/repos/jwodder/bitvector.py/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/bitvector.py/forks",
"full_name": "jwodder/bitvector.py",
"git_commits_url": "https://api.github.com/repos/jwodder/bitvector.py/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/bitvector.py/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/bitvector.py/git/tags{/sha}",
"git_url": "git://github.com/jwodder/bitvector.py.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/bitvector.py/hooks",
"html_url": "https://github.com/jwodder/bitvector.py",
"id": 20002028,
"issue_comment_url": "https://api.github.com/repos/jwodder/bitvector.py/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/bitvector.py/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/bitvector.py/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/bitvector.py/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/bitvector.py/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/bitvector.py/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/bitvector.py/merges",
"milestones_url": "https://api.github.com/repos/jwodder/bitvector.py/milestones{/number}",
"mirror_url": null,
"name": "bitvector.py",
"notifications_url": "https://api.github.com/repos/jwodder/bitvector.py/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/bitvector.py/pulls{/number}",
"pushed_at": "2015-11-29T01:29:03Z",
"releases_url": "https://api.github.com/repos/jwodder/bitvector.py/releases{/id}",
"size": 21,
"ssh_url": "[email protected]:jwodder/bitvector.py.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/bitvector.py/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/bitvector.py/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/bitvector.py/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/bitvector.py/subscription",
"svn_url": "https://github.com/jwodder/bitvector.py",
"tags_url": "https://api.github.com/repos/jwodder/bitvector.py/tags",
"teams_url": "https://api.github.com/repos/jwodder/bitvector.py/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/bitvector.py/git/trees{/sha}",
"updated_at": "2016-03-16T02:10:02Z",
"url": "https://api.github.com/repos/jwodder/bitvector.py",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/caught/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/caught/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/caught/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/caught/branches{/branch}",
"clone_url": "https://github.com/jwodder/caught.git",
"collaborators_url": "https://api.github.com/repos/jwodder/caught/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/caught/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/caught/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/caught/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/caught/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/caught/contributors",
"created_at": "2015-04-13T02:01:49Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/caught/deployments",
"description": "Track caught Pokémon in multiple games",
"downloads_url": "https://api.github.com/repos/jwodder/caught/downloads",
"events_url": "https://api.github.com/repos/jwodder/caught/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/caught/forks",
"full_name": "jwodder/caught",
"git_commits_url": "https://api.github.com/repos/jwodder/caught/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/caught/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/caught/git/tags{/sha}",
"git_url": "git://github.com/jwodder/caught.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/caught/hooks",
"html_url": "https://github.com/jwodder/caught",
"id": 33842682,
"issue_comment_url": "https://api.github.com/repos/jwodder/caught/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/caught/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/caught/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/caught/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/caught/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/caught/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/caught/merges",
"milestones_url": "https://api.github.com/repos/jwodder/caught/milestones{/number}",
"mirror_url": null,
"name": "caught",
"notifications_url": "https://api.github.com/repos/jwodder/caught/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/caught/pulls{/number}",
"pushed_at": "2015-12-04T02:36:47Z",
"releases_url": "https://api.github.com/repos/jwodder/caught/releases{/id}",
"size": 80,
"ssh_url": "[email protected]:jwodder/caught.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/caught/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/caught/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/caught/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/caught/subscription",
"svn_url": "https://github.com/jwodder/caught",
"tags_url": "https://api.github.com/repos/jwodder/caught/tags",
"teams_url": "https://api.github.com/repos/jwodder/caught/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/caught/git/trees{/sha}",
"updated_at": "2015-04-13T02:02:34Z",
"url": "https://api.github.com/repos/jwodder/caught",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/Chess.hs/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/Chess.hs/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/Chess.hs/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/Chess.hs/branches{/branch}",
"clone_url": "https://github.com/jwodder/Chess.hs.git",
"collaborators_url": "https://api.github.com/repos/jwodder/Chess.hs/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/Chess.hs/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/Chess.hs/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/Chess.hs/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/Chess.hs/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/Chess.hs/contributors",
"created_at": "2014-05-08T00:15:23Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/Chess.hs/deployments",
"description": "Representing chess games & notation in Haskell",
"downloads_url": "https://api.github.com/repos/jwodder/Chess.hs/downloads",
"events_url": "https://api.github.com/repos/jwodder/Chess.hs/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/Chess.hs/forks",
"full_name": "jwodder/Chess.hs",
"git_commits_url": "https://api.github.com/repos/jwodder/Chess.hs/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/Chess.hs/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/Chess.hs/git/tags{/sha}",
"git_url": "git://github.com/jwodder/Chess.hs.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/Chess.hs/hooks",
"html_url": "https://github.com/jwodder/Chess.hs",
"id": 19554865,
"issue_comment_url": "https://api.github.com/repos/jwodder/Chess.hs/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/Chess.hs/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/Chess.hs/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/Chess.hs/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/Chess.hs/labels{/name}",
"language": "Haskell",
"languages_url": "https://api.github.com/repos/jwodder/Chess.hs/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/Chess.hs/merges",
"milestones_url": "https://api.github.com/repos/jwodder/Chess.hs/milestones{/number}",
"mirror_url": null,
"name": "Chess.hs",
"notifications_url": "https://api.github.com/repos/jwodder/Chess.hs/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/Chess.hs/pulls{/number}",
"pushed_at": "2015-06-06T19:12:11Z",
"releases_url": "https://api.github.com/repos/jwodder/Chess.hs/releases{/id}",
"size": 192,
"ssh_url": "[email protected]:jwodder/Chess.hs.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/Chess.hs/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/Chess.hs/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/Chess.hs/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/Chess.hs/subscription",
"svn_url": "https://github.com/jwodder/Chess.hs",
"tags_url": "https://api.github.com/repos/jwodder/Chess.hs/tags",
"teams_url": "https://api.github.com/repos/jwodder/Chess.hs/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/Chess.hs/git/trees{/sha}",
"updated_at": "2014-10-29T22:18:38Z",
"url": "https://api.github.com/repos/jwodder/Chess.hs",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/conjugate/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/conjugate/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/conjugate/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/conjugate/branches{/branch}",
"clone_url": "https://github.com/jwodder/conjugate.git",
"collaborators_url": "https://api.github.com/repos/jwodder/conjugate/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/conjugate/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/conjugate/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/conjugate/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/conjugate/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/conjugate/contributors",
"created_at": "2014-05-04T22:49:11Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/conjugate/deployments",
"description": "Latin verb conjugator",
"downloads_url": "https://api.github.com/repos/jwodder/conjugate/downloads",
"events_url": "https://api.github.com/repos/jwodder/conjugate/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/conjugate/forks",
"full_name": "jwodder/conjugate",
"git_commits_url": "https://api.github.com/repos/jwodder/conjugate/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/conjugate/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/conjugate/git/tags{/sha}",
"git_url": "git://github.com/jwodder/conjugate.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/conjugate/hooks",
"html_url": "https://github.com/jwodder/conjugate",
"id": 19437932,
"issue_comment_url": "https://api.github.com/repos/jwodder/conjugate/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/conjugate/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/conjugate/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/conjugate/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/conjugate/labels{/name}",
"language": "Perl",
"languages_url": "https://api.github.com/repos/jwodder/conjugate/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/conjugate/merges",
"milestones_url": "https://api.github.com/repos/jwodder/conjugate/milestones{/number}",
"mirror_url": null,
"name": "conjugate",
"notifications_url": "https://api.github.com/repos/jwodder/conjugate/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/conjugate/pulls{/number}",
"pushed_at": "2014-10-29T22:23:37Z",
"releases_url": "https://api.github.com/repos/jwodder/conjugate/releases{/id}",
"size": 148,
"ssh_url": "[email protected]:jwodder/conjugate.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/conjugate/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/conjugate/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/conjugate/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/conjugate/subscription",
"svn_url": "https://github.com/jwodder/conjugate",
"tags_url": "https://api.github.com/repos/jwodder/conjugate/tags",
"teams_url": "https://api.github.com/repos/jwodder/conjugate/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/conjugate/git/trees{/sha}",
"updated_at": "2014-09-09T18:13:20Z",
"url": "https://api.github.com/repos/jwodder/conjugate",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/daemail/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/daemail/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/daemail/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/daemail/branches{/branch}",
"clone_url": "https://github.com/jwodder/daemail.git",
"collaborators_url": "https://api.github.com/repos/jwodder/daemail/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/daemail/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/daemail/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/daemail/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/daemail/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/daemail/contributors",
"created_at": "2016-04-28T21:34:45Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/daemail/deployments",
"description": "Daemonize a command and e-mail the results",
"downloads_url": "https://api.github.com/repos/jwodder/daemail/downloads",
"events_url": "https://api.github.com/repos/jwodder/daemail/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/daemail/forks",
"full_name": "jwodder/daemail",
"git_commits_url": "https://api.github.com/repos/jwodder/daemail/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/daemail/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/daemail/git/tags{/sha}",
"git_url": "git://github.com/jwodder/daemail.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/daemail/hooks",
"html_url": "https://github.com/jwodder/daemail",
"id": 57335258,
"issue_comment_url": "https://api.github.com/repos/jwodder/daemail/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/daemail/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/daemail/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/daemail/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/daemail/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/daemail/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/daemail/merges",
"milestones_url": "https://api.github.com/repos/jwodder/daemail/milestones{/number}",
"mirror_url": null,
"name": "daemail",
"notifications_url": "https://api.github.com/repos/jwodder/daemail/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/daemail/pulls{/number}",
"pushed_at": "2017-04-17T17:54:23Z",
"releases_url": "https://api.github.com/repos/jwodder/daemail/releases{/id}",
"size": 129,
"ssh_url": "git<EMAIL>:jwodder/daemail.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/daemail/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/daemail/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/daemail/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/daemail/subscription",
"svn_url": "https://github.com/jwodder/daemail",
"tags_url": "https://api.github.com/repos/jwodder/daemail/tags",
"teams_url": "https://api.github.com/repos/jwodder/daemail/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/daemail/git/trees{/sha}",
"updated_at": "2016-10-23T19:52:40Z",
"url": "https://api.github.com/repos/jwodder/daemail",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/doapi/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/doapi/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/doapi/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/doapi/branches{/branch}",
"clone_url": "https://github.com/jwodder/doapi.git",
"collaborators_url": "https://api.github.com/repos/jwodder/doapi/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/doapi/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/doapi/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/doapi/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/doapi/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/doapi/contributors",
"created_at": "2015-12-31T00:55:35Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/doapi/deployments",
"description": "DigitalOcean API Python library & CLI",
"downloads_url": "https://api.github.com/repos/jwodder/doapi/downloads",
"events_url": "https://api.github.com/repos/jwodder/doapi/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/doapi/forks",
"full_name": "jwodder/doapi",
"git_commits_url": "https://api.github.com/repos/jwodder/doapi/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/doapi/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/doapi/git/tags{/sha}",
"git_url": "git://github.com/jwodder/doapi.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/doapi/hooks",
"html_url": "https://github.com/jwodder/doapi",
"id": 48825963,
"issue_comment_url": "https://api.github.com/repos/jwodder/doapi/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/doapi/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/doapi/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/doapi/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/doapi/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/doapi/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/doapi/merges",
"milestones_url": "https://api.github.com/repos/jwodder/doapi/milestones{/number}",
"mirror_url": null,
"name": "doapi",
"notifications_url": "https://api.github.com/repos/jwodder/doapi/notifications{?since,all,participating}",
"open_issues": 11,
"open_issues_count": 11,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/doapi/pulls{/number}",
"pushed_at": "2017-04-17T18:05:10Z",
"releases_url": "https://api.github.com/repos/jwodder/doapi/releases{/id}",
"size": 480,
"ssh_url": "<EMAIL>:jwodder/doapi.git",
"stargazers_count": 3,
"stargazers_url": "https://api.github.com/repos/jwodder/doapi/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/doapi/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/doapi/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/doapi/subscription",
"svn_url": "https://github.com/jwodder/doapi",
"tags_url": "https://api.github.com/repos/jwodder/doapi/tags",
"teams_url": "https://api.github.com/repos/jwodder/doapi/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/doapi/git/trees{/sha}",
"updated_at": "2017-01-27T20:00:53Z",
"url": "https://api.github.com/repos/jwodder/doapi",
"watchers": 3,
"watchers_count": 3
},
{
"archive_url": "https://api.github.com/repos/jwodder/envec/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/envec/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/envec/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/envec/branches{/branch}",
"clone_url": "https://github.com/jwodder/envec.git",
"collaborators_url": "https://api.github.com/repos/jwodder/envec/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/envec/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/envec/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/envec/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/envec/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/envec/contributors",
"created_at": "2014-05-10T22:42:33Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/envec/deployments",
"description": "M:TG Oracle database scraper & offline manipulator",
"downloads_url": "https://api.github.com/repos/jwodder/envec/downloads",
"events_url": "https://api.github.com/repos/jwodder/envec/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/envec/forks",
"full_name": "jwodder/envec",
"git_commits_url": "https://api.github.com/repos/jwodder/envec/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/envec/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/envec/git/tags{/sha}",
"git_url": "git://github.com/jwodder/envec.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/envec/hooks",
"html_url": "https://github.com/jwodder/envec",
"id": 19653700,
"issue_comment_url": "https://api.github.com/repos/jwodder/envec/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/envec/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/envec/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/envec/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/envec/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/envec/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/envec/merges",
"milestones_url": "https://api.github.com/repos/jwodder/envec/milestones{/number}",
"mirror_url": null,
"name": "envec",
"notifications_url": "https://api.github.com/repos/jwodder/envec/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/envec/pulls{/number}",
"pushed_at": "2016-06-08T13:21:07Z",
"releases_url": "https://api.github.com/repos/jwodder/envec/releases{/id}",
"size": 556,
"ssh_url": "[email protected]:jwodder/envec.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/envec/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/envec/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/envec/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/envec/subscription",
"svn_url": "https://github.com/jwodder/envec",
"tags_url": "https://api.github.com/repos/jwodder/envec/tags",
"teams_url": "https://api.github.com/repos/jwodder/envec/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/envec/git/trees{/sha}",
"updated_at": "2016-01-14T00:14:48Z",
"url": "https://api.github.com/repos/jwodder/envec",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/euler/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/euler/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/euler/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/euler/branches{/branch}",
"clone_url": "https://github.com/jwodder/euler.git",
"collaborators_url": "https://api.github.com/repos/jwodder/euler/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/euler/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/euler/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/euler/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/euler/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/euler/contributors",
"created_at": "2014-07-04T01:58:55Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/euler/deployments",
"description": "Yet another Project Euler repository",
"downloads_url": "https://api.github.com/repos/jwodder/euler/downloads",
"events_url": "https://api.github.com/repos/jwodder/euler/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/euler/forks",
"full_name": "jwodder/euler",
"git_commits_url": "https://api.github.com/repos/jwodder/euler/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/euler/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/euler/git/tags{/sha}",
"git_url": "git://github.com/jwodder/euler.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/euler/hooks",
"html_url": "https://github.com/jwodder/euler",
"id": 21482466,
"issue_comment_url": "https://api.github.com/repos/jwodder/euler/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/euler/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/euler/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/euler/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/euler/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/euler/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/euler/merges",
"milestones_url": "https://api.github.com/repos/jwodder/euler/milestones{/number}",
"mirror_url": null,
"name": "euler",
"notifications_url": "https://api.github.com/repos/jwodder/euler/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/euler/pulls{/number}",
"pushed_at": "2016-08-06T01:29:31Z",
"releases_url": "https://api.github.com/repos/jwodder/euler/releases{/id}",
"size": 371,
"ssh_url": "[email protected]:jwodder/euler.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/euler/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/euler/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/euler/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/euler/subscription",
"svn_url": "https://github.com/jwodder/euler",
"tags_url": "https://api.github.com/repos/jwodder/euler/tags",
"teams_url": "https://api.github.com/repos/jwodder/euler/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/euler/git/trees{/sha}",
"updated_at": "2016-01-22T15:54:11Z",
"url": "https://api.github.com/repos/jwodder/euler",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/gforth-docker/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/gforth-docker/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/gforth-docker/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/gforth-docker/branches{/branch}",
"clone_url": "https://github.com/jwodder/gforth-docker.git",
"collaborators_url": "https://api.github.com/repos/jwodder/gforth-docker/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/gforth-docker/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/gforth-docker/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/gforth-docker/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/gforth-docker/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/gforth-docker/contributors",
"created_at": "2015-11-02T00:05:41Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/gforth-docker/deployments",
"description": "Gforth Docker image",
"downloads_url": "https://api.github.com/repos/jwodder/gforth-docker/downloads",
"events_url": "https://api.github.com/repos/jwodder/gforth-docker/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/gforth-docker/forks",
"full_name": "jwodder/gforth-docker",
"git_commits_url": "https://api.github.com/repos/jwodder/gforth-docker/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/gforth-docker/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/gforth-docker/git/tags{/sha}",
"git_url": "git://github.com/jwodder/gforth-docker.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/gforth-docker/hooks",
"html_url": "https://github.com/jwodder/gforth-docker",
"id": 45362940,
"issue_comment_url": "https://api.github.com/repos/jwodder/gforth-docker/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/gforth-docker/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/gforth-docker/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/gforth-docker/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/gforth-docker/labels{/name}",
"language": null,
"languages_url": "https://api.github.com/repos/jwodder/gforth-docker/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/gforth-docker/merges",
"milestones_url": "https://api.github.com/repos/jwodder/gforth-docker/milestones{/number}",
"mirror_url": null,
"name": "gforth-docker",
"notifications_url": "https://api.github.com/repos/jwodder/gforth-docker/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/gforth-docker/pulls{/number}",
"pushed_at": "2015-11-04T03:35:34Z",
"releases_url": "https://api.github.com/repos/jwodder/gforth-docker/releases{/id}",
"size": 168,
"ssh_url": "<EMAIL>:jwodder/gforth-docker.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/gforth-docker/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/gforth-docker/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/gforth-docker/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/gforth-docker/subscription",
"svn_url": "https://github.com/jwodder/gforth-docker",
"tags_url": "https://api.github.com/repos/jwodder/gforth-docker/tags",
"teams_url": "https://api.github.com/repos/jwodder/gforth-docker/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/gforth-docker/git/trees{/sha}",
"updated_at": "2015-11-02T02:50:16Z",
"url": "https://api.github.com/repos/jwodder/gforth-docker",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/ghutil/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/ghutil/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/ghutil/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/ghutil/branches{/branch}",
"clone_url": "https://github.com/jwodder/ghutil.git",
"collaborators_url": "https://api.github.com/repos/jwodder/ghutil/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/ghutil/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/ghutil/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/ghutil/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/ghutil/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/ghutil/contributors",
"created_at": "2017-05-19T19:40:57Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/ghutil/deployments",
"description": "Interact with GitHub from the command line",
"downloads_url": "https://api.github.com/repos/jwodder/ghutil/downloads",
"events_url": "https://api.github.com/repos/jwodder/ghutil/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/ghutil/forks",
"full_name": "jwodder/ghutil",
"git_commits_url": "https://api.github.com/repos/jwodder/ghutil/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/ghutil/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/ghutil/git/tags{/sha}",
"git_url": "git://github.com/jwodder/ghutil.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/ghutil/hooks",
"html_url": "https://github.com/jwodder/ghutil",
"id": 91839769,
"issue_comment_url": "https://api.github.com/repos/jwodder/ghutil/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/ghutil/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/ghutil/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/ghutil/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/ghutil/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/ghutil/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/ghutil/merges",
"milestones_url": "https://api.github.com/repos/jwodder/ghutil/milestones{/number}",
"mirror_url": null,
"name": "ghutil",
"notifications_url": "https://api.github.com/repos/jwodder/ghutil/notifications{?since,all,participating}",
"open_issues": 1,
"open_issues_count": 1,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/ghutil/pulls{/number}",
"pushed_at": "2017-07-20T14:03:23Z",
"releases_url": "https://api.github.com/repos/jwodder/ghutil/releases{/id}",
"size": 151,
"ssh_url": "[email protected]:jwodder/ghutil.git",
"stargazers_count": 3,
"stargazers_url": "https://api.github.com/repos/jwodder/ghutil/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/ghutil/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/ghutil/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/ghutil/subscription",
"svn_url": "https://github.com/jwodder/ghutil",
"tags_url": "https://api.github.com/repos/jwodder/ghutil/tags",
"teams_url": "https://api.github.com/repos/jwodder/ghutil/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/ghutil/git/trees{/sha}",
"updated_at": "2017-06-12T16:38:36Z",
"url": "https://api.github.com/repos/jwodder/ghutil",
"watchers": 3,
"watchers_count": 3
},
{
"archive_url": "https://api.github.com/repos/jwodder/groups/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/groups/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/groups/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/groups/branches{/branch}",
"clone_url": "https://github.com/jwodder/groups.git",
"collaborators_url": "https://api.github.com/repos/jwodder/groups/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/groups/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/groups/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/groups/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/groups/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/groups/contributors",
"created_at": "2014-05-18T23:37:03Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/groups/deployments",
"description": "Group theory code",
"downloads_url": "https://api.github.com/repos/jwodder/groups/downloads",
"events_url": "https://api.github.com/repos/jwodder/groups/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/groups/forks",
"full_name": "jwodder/groups",
"git_commits_url": "https://api.github.com/repos/jwodder/groups/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/groups/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/groups/git/tags{/sha}",
"git_url": "git://github.com/jwodder/groups.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/groups/hooks",
"html_url": "https://github.com/jwodder/groups",
"id": 19923702,
"issue_comment_url": "https://api.github.com/repos/jwodder/groups/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/groups/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/groups/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/groups/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/groups/labels{/name}",
"language": "C++",
"languages_url": "https://api.github.com/repos/jwodder/groups/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/groups/merges",
"milestones_url": "https://api.github.com/repos/jwodder/groups/milestones{/number}",
"mirror_url": null,
"name": "groups",
"notifications_url": "https://api.github.com/repos/jwodder/groups/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/groups/pulls{/number}",
"pushed_at": "2016-03-07T00:35:50Z",
"releases_url": "https://api.github.com/repos/jwodder/groups/releases{/id}",
"size": 442,
"ssh_url": "[email protected]:jwodder/groups.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/groups/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/groups/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/groups/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/groups/subscription",
"svn_url": "https://github.com/jwodder/groups",
"tags_url": "https://api.github.com/repos/jwodder/groups/tags",
"teams_url": "https://api.github.com/repos/jwodder/groups/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/groups/git/trees{/sha}",
"updated_at": "2016-02-25T03:54:02Z",
"url": "https://api.github.com/repos/jwodder/groups",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/headerparser/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/headerparser/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/headerparser/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/headerparser/branches{/branch}",
"clone_url": "https://github.com/jwodder/headerparser.git",
"collaborators_url": "https://api.github.com/repos/jwodder/headerparser/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/headerparser/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/headerparser/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/headerparser/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/headerparser/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/headerparser/contributors",
"created_at": "2017-01-25T14:14:33Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/headerparser/deployments",
"description": "argparse for mail-style headers",
"downloads_url": "https://api.github.com/repos/jwodder/headerparser/downloads",
"events_url": "https://api.github.com/repos/jwodder/headerparser/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/headerparser/forks",
"full_name": "jwodder/headerparser",
"git_commits_url": "https://api.github.com/repos/jwodder/headerparser/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/headerparser/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/headerparser/git/tags{/sha}",
"git_url": "git://github.com/jwodder/headerparser.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/headerparser/hooks",
"html_url": "https://github.com/jwodder/headerparser",
"id": 80020669,
"issue_comment_url": "https://api.github.com/repos/jwodder/headerparser/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/headerparser/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/headerparser/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/headerparser/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/headerparser/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/headerparser/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/headerparser/merges",
"milestones_url": "https://api.github.com/repos/jwodder/headerparser/milestones{/number}",
"mirror_url": null,
"name": "headerparser",
"notifications_url": "https://api.github.com/repos/jwodder/headerparser/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/headerparser/pulls{/number}",
"pushed_at": "2017-07-19T17:28:08Z",
"releases_url": "https://api.github.com/repos/jwodder/headerparser/releases{/id}",
"size": 126,
"ssh_url": "[email protected]:jwodder/headerparser.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/headerparser/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/headerparser/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/headerparser/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/headerparser/subscription",
"svn_url": "https://github.com/jwodder/headerparser",
"tags_url": "https://api.github.com/repos/jwodder/headerparser/tags",
"teams_url": "https://api.github.com/repos/jwodder/headerparser/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/headerparser/git/trees{/sha}",
"updated_at": "2017-04-02T20:14:21Z",
"url": "https://api.github.com/repos/jwodder/headerparser",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/hsgraphics/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/hsgraphics/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/hsgraphics/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/hsgraphics/branches{/branch}",
"clone_url": "https://github.com/jwodder/hsgraphics.git",
"collaborators_url": "https://api.github.com/repos/jwodder/hsgraphics/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/hsgraphics/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/hsgraphics/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/hsgraphics/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/hsgraphics/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/hsgraphics/contributors",
"created_at": "2014-06-22T02:15:41Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/hsgraphics/deployments",
"description": "Haskell raster images and raytracing",
"downloads_url": "https://api.github.com/repos/jwodder/hsgraphics/downloads",
"events_url": "https://api.github.com/repos/jwodder/hsgraphics/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/hsgraphics/forks",
"full_name": "jwodder/hsgraphics",
"git_commits_url": "https://api.github.com/repos/jwodder/hsgraphics/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/hsgraphics/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/hsgraphics/git/tags{/sha}",
"git_url": "git://github.com/jwodder/hsgraphics.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/hsgraphics/hooks",
"html_url": "https://github.com/jwodder/hsgraphics",
"id": 21083505,
"issue_comment_url": "https://api.github.com/repos/jwodder/hsgraphics/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/hsgraphics/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/hsgraphics/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/hsgraphics/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/hsgraphics/labels{/name}",
"language": "Haskell",
"languages_url": "https://api.github.com/repos/jwodder/hsgraphics/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/hsgraphics/merges",
"milestones_url": "https://api.github.com/repos/jwodder/hsgraphics/milestones{/number}",
"mirror_url": null,
"name": "hsgraphics",
"notifications_url": "https://api.github.com/repos/jwodder/hsgraphics/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/hsgraphics/pulls{/number}",
"pushed_at": "2014-10-29T22:30:14Z",
"releases_url": "https://api.github.com/repos/jwodder/hsgraphics/releases{/id}",
"size": 180,
"ssh_url": "[email protected]:jwodder/hsgraphics.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/hsgraphics/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/hsgraphics/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/hsgraphics/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/hsgraphics/subscription",
"svn_url": "https://github.com/jwodder/hsgraphics",
"tags_url": "https://api.github.com/repos/jwodder/hsgraphics/tags",
"teams_url": "https://api.github.com/repos/jwodder/hsgraphics/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/hsgraphics/git/trees{/sha}",
"updated_at": "2014-10-29T22:30:14Z",
"url": "https://api.github.com/repos/jwodder/hsgraphics",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/inplace/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/inplace/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/inplace/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/inplace/branches{/branch}",
"clone_url": "https://github.com/jwodder/inplace.git",
"collaborators_url": "https://api.github.com/repos/jwodder/inplace/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/inplace/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/inplace/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/inplace/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/inplace/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/inplace/contributors",
"created_at": "2016-11-09T15:50:18Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/inplace/deployments",
"description": "In-place file processing in Python",
"downloads_url": "https://api.github.com/repos/jwodder/inplace/downloads",
"events_url": "https://api.github.com/repos/jwodder/inplace/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/inplace/forks",
"full_name": "jwodder/inplace",
"git_commits_url": "https://api.github.com/repos/jwodder/inplace/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/inplace/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/inplace/git/tags{/sha}",
"git_url": "git://github.com/jwodder/inplace.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/inplace/hooks",
"html_url": "https://github.com/jwodder/inplace",
"id": 73298075,
"issue_comment_url": "https://api.github.com/repos/jwodder/inplace/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/inplace/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/inplace/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/inplace/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/inplace/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/inplace/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/inplace/merges",
"milestones_url": "https://api.github.com/repos/jwodder/inplace/milestones{/number}",
"mirror_url": null,
"name": "inplace",
"notifications_url": "https://api.github.com/repos/jwodder/inplace/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/inplace/pulls{/number}",
"pushed_at": "2017-06-06T19:03:59Z",
"releases_url": "https://api.github.com/repos/jwodder/inplace/releases{/id}",
"size": 85,
"ssh_url": "git<EMAIL>.com:jwodder/inplace.git",
"stargazers_count": 2,
"stargazers_url": "https://api.github.com/repos/jwodder/inplace/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/inplace/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/inplace/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/inplace/subscription",
"svn_url": "https://github.com/jwodder/inplace",
"tags_url": "https://api.github.com/repos/jwodder/inplace/tags",
"teams_url": "https://api.github.com/repos/jwodder/inplace/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/inplace/git/trees{/sha}",
"updated_at": "2017-06-12T16:38:13Z",
"url": "https://api.github.com/repos/jwodder/inplace",
"watchers": 2,
"watchers_count": 2
},
{
"archive_url": "https://api.github.com/repos/jwodder/javaproperties/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/javaproperties/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/javaproperties/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/javaproperties/branches{/branch}",
"clone_url": "https://github.com/jwodder/javaproperties.git",
"collaborators_url": "https://api.github.com/repos/jwodder/javaproperties/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/javaproperties/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/javaproperties/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/javaproperties/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/javaproperties/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/javaproperties/contributors",
"created_at": "2016-08-13T22:19:16Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/javaproperties/deployments",
"description": "Python library for reading & writing Java .properties files",
"downloads_url": "https://api.github.com/repos/jwodder/javaproperties/downloads",
"events_url": "https://api.github.com/repos/jwodder/javaproperties/events",
"fork": false,
"forks": 2,
"forks_count": 2,
"forks_url": "https://api.github.com/repos/jwodder/javaproperties/forks",
"full_name": "jwodder/javaproperties",
"git_commits_url": "https://api.github.com/repos/jwodder/javaproperties/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/javaproperties/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/javaproperties/git/tags{/sha}",
"git_url": "git://github.com/jwodder/javaproperties.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/javaproperties/hooks",
"html_url": "https://github.com/jwodder/javaproperties",
"id": 65639217,
"issue_comment_url": "https://api.github.com/repos/jwodder/javaproperties/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/javaproperties/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/javaproperties/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/javaproperties/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/javaproperties/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/javaproperties/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/javaproperties/merges",
"milestones_url": "https://api.github.com/repos/jwodder/javaproperties/milestones{/number}",
"mirror_url": null,
"name": "javaproperties",
"notifications_url": "https://api.github.com/repos/jwodder/javaproperties/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/javaproperties/pulls{/number}",
"pushed_at": "2017-05-18T17:16:20Z",
"releases_url": "https://api.github.com/repos/jwodder/javaproperties/releases{/id}",
"size": 236,
"ssh_url": "<EMAIL>:jwodder/javaproperties.git",
"stargazers_count": 3,
"stargazers_url": "https://api.github.com/repos/jwodder/javaproperties/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/javaproperties/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/javaproperties/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/javaproperties/subscription",
"svn_url": "https://github.com/jwodder/javaproperties",
"tags_url": "https://api.github.com/repos/jwodder/javaproperties/tags",
"teams_url": "https://api.github.com/repos/jwodder/javaproperties/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/javaproperties/git/trees{/sha}",
"updated_at": "2017-05-30T15:40:02Z",
"url": "https://api.github.com/repos/jwodder/javaproperties",
"watchers": 3,
"watchers_count": 3
},
{
"archive_url": "https://api.github.com/repos/jwodder/javaproperties-cli/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/javaproperties-cli/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/javaproperties-cli/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/javaproperties-cli/branches{/branch}",
"clone_url": "https://github.com/jwodder/javaproperties-cli.git",
"collaborators_url": "https://api.github.com/repos/jwodder/javaproperties-cli/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/javaproperties-cli/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/javaproperties-cli/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/javaproperties-cli/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/javaproperties-cli/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/javaproperties-cli/contributors",
"created_at": "2017-04-20T13:33:30Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/javaproperties-cli/deployments",
"description": "Command-line manipulation of Java .properties files",
"downloads_url": "https://api.github.com/repos/jwodder/javaproperties-cli/downloads",
"events_url": "https://api.github.com/repos/jwodder/javaproperties-cli/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/javaproperties-cli/forks",
"full_name": "jwodder/javaproperties-cli",
"git_commits_url": "https://api.github.com/repos/jwodder/javaproperties-cli/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/javaproperties-cli/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/javaproperties-cli/git/tags{/sha}",
"git_url": "git://github.com/jwodder/javaproperties-cli.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/javaproperties-cli/hooks",
"html_url": "https://github.com/jwodder/javaproperties-cli",
"id": 88869195,
"issue_comment_url": "https://api.github.com/repos/jwodder/javaproperties-cli/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/javaproperties-cli/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/javaproperties-cli/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/javaproperties-cli/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/javaproperties-cli/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/javaproperties-cli/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/javaproperties-cli/merges",
"milestones_url": "https://api.github.com/repos/jwodder/javaproperties-cli/milestones{/number}",
"mirror_url": null,
"name": "javaproperties-cli",
"notifications_url": "https://api.github.com/repos/jwodder/javaproperties-cli/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/javaproperties-cli/pulls{/number}",
"pushed_at": "2017-05-17T13:12:54Z",
"releases_url": "https://api.github.com/repos/jwodder/javaproperties-cli/releases{/id}",
"size": 206,
"ssh_url": "[email protected]:jwodder/javaproperties-cli.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/javaproperties-cli/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/javaproperties-cli/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/javaproperties-cli/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/javaproperties-cli/subscription",
"svn_url": "https://github.com/jwodder/javaproperties-cli",
"tags_url": "https://api.github.com/repos/jwodder/javaproperties-cli/tags",
"teams_url": "https://api.github.com/repos/jwodder/javaproperties-cli/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/javaproperties-cli/git/trees{/sha}",
"updated_at": "2017-04-27T02:11:05Z",
"url": "https://api.github.com/repos/jwodder/javaproperties-cli",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/jbobaf/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/jbobaf/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/jbobaf/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/jbobaf/branches{/branch}",
"clone_url": "https://github.com/jwodder/jbobaf.git",
"collaborators_url": "https://api.github.com/repos/jwodder/jbobaf/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/jbobaf/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/jbobaf/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/jbobaf/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/jbobaf/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/jbobaf/contributors",
"created_at": "2010-03-20T02:06:23Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/jbobaf/deployments",
"description": "[Dead] Miscellaneous Lojban-related code",
"downloads_url": "https://api.github.com/repos/jwodder/jbobaf/downloads",
"events_url": "https://api.github.com/repos/jwodder/jbobaf/events",
"fork": false,
"forks": 4,
"forks_count": 4,
"forks_url": "https://api.github.com/repos/jwodder/jbobaf/forks",
"full_name": "jwodder/jbobaf",
"git_commits_url": "https://api.github.com/repos/jwodder/jbobaf/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/jbobaf/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/jbobaf/git/tags{/sha}",
"git_url": "git://github.com/jwodder/jbobaf.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/jbobaf/hooks",
"html_url": "https://github.com/jwodder/jbobaf",
"id": 570896,
"issue_comment_url": "https://api.github.com/repos/jwodder/jbobaf/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/jbobaf/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/jbobaf/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/jbobaf/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/jbobaf/labels{/name}",
"language": "Perl",
"languages_url": "https://api.github.com/repos/jwodder/jbobaf/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/jbobaf/merges",
"milestones_url": "https://api.github.com/repos/jwodder/jbobaf/milestones{/number}",
"mirror_url": null,
"name": "jbobaf",
"notifications_url": "https://api.github.com/repos/jwodder/jbobaf/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/jbobaf/pulls{/number}",
"pushed_at": "2014-10-29T22:32:06Z",
"releases_url": "https://api.github.com/repos/jwodder/jbobaf/releases{/id}",
"size": 554,
"ssh_url": "[email protected]:jwodder/jbobaf.git",
"stargazers_count": 5,
"stargazers_url": "https://api.github.com/repos/jwodder/jbobaf/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/jbobaf/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/jbobaf/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/jbobaf/subscription",
"svn_url": "https://github.com/jwodder/jbobaf",
"tags_url": "https://api.github.com/repos/jwodder/jbobaf/tags",
"teams_url": "https://api.github.com/repos/jwodder/jbobaf/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/jbobaf/git/trees{/sha}",
"updated_at": "2016-02-21T05:41:30Z",
"url": "https://api.github.com/repos/jwodder/jbobaf",
"watchers": 5,
"watchers_count": 5
},
{
"archive_url": "https://api.github.com/repos/jwodder/julian/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/julian/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/julian/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/julian/branches{/branch}",
"clone_url": "https://github.com/jwodder/julian.git",
"collaborators_url": "https://api.github.com/repos/jwodder/julian/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/julian/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/julian/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/julian/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/julian/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/julian/contributors",
"created_at": "2014-04-05T19:07:23Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/julian/deployments",
"description": "Convert Julian dates to & from calendar dates",
"downloads_url": "https://api.github.com/repos/jwodder/julian/downloads",
"events_url": "https://api.github.com/repos/jwodder/julian/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/julian/forks",
"full_name": "jwodder/julian",
"git_commits_url": "https://api.github.com/repos/jwodder/julian/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/julian/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/julian/git/tags{/sha}",
"git_url": "git://github.com/jwodder/julian.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/julian/hooks",
"html_url": "https://github.com/jwodder/julian",
"id": 18474140,
"issue_comment_url": "https://api.github.com/repos/jwodder/julian/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/julian/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/julian/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/julian/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/julian/labels{/name}",
"language": "C",
"languages_url": "https://api.github.com/repos/jwodder/julian/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/julian/merges",
"milestones_url": "https://api.github.com/repos/jwodder/julian/milestones{/number}",
"mirror_url": null,
"name": "julian",
"notifications_url": "https://api.github.com/repos/jwodder/julian/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/julian/pulls{/number}",
"pushed_at": "2015-09-26T01:29:20Z",
"releases_url": "https://api.github.com/repos/jwodder/julian/releases{/id}",
"size": 236,
"ssh_url": "[email protected]:jwodder/julian.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/julian/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/julian/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/julian/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/julian/subscription",
"svn_url": "https://github.com/jwodder/julian",
"tags_url": "https://api.github.com/repos/jwodder/julian/tags",
"teams_url": "https://api.github.com/repos/jwodder/julian/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/julian/git/trees{/sha}",
"updated_at": "2015-10-07T01:22:09Z",
"url": "https://api.github.com/repos/jwodder/julian",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/jwodder.github.io/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/jwodder.github.io/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/jwodder.github.io/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/jwodder.github.io/branches{/branch}",
"clone_url": "https://github.com/jwodder/jwodder.github.io.git",
"collaborators_url": "https://api.github.com/repos/jwodder/jwodder.github.io/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/jwodder.github.io/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/jwodder.github.io/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/jwodder.github.io/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/jwodder.github.io/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/jwodder.github.io/contributors",
"created_at": "2014-06-23T21:46:29Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/jwodder.github.io/deployments",
"description": "Yet another GitHub Pages site",
"downloads_url": "https://api.github.com/repos/jwodder/jwodder.github.io/downloads",
"events_url": "https://api.github.com/repos/jwodder/jwodder.github.io/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/jwodder.github.io/forks",
"full_name": "jwodder/jwodder.github.io",
"git_commits_url": "https://api.github.com/repos/jwodder/jwodder.github.io/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/jwodder.github.io/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/jwodder.github.io/git/tags{/sha}",
"git_url": "git://github.com/jwodder/jwodder.github.io.git",
"has_downloads": true,
"has_issues": true,
"has_pages": true,
"has_projects": true,
"has_wiki": true,
"homepage": "https://jwodder.github.io",
"hooks_url": "https://api.github.com/repos/jwodder/jwodder.github.io/hooks",
"html_url": "https://github.com/jwodder/jwodder.github.io",
"id": 21143368,
"issue_comment_url": "https://api.github.com/repos/jwodder/jwodder.github.io/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/jwodder.github.io/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/jwodder.github.io/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/jwodder.github.io/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/jwodder.github.io/labels{/name}",
"language": "CSS",
"languages_url": "https://api.github.com/repos/jwodder/jwodder.github.io/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/jwodder.github.io/merges",
"milestones_url": "https://api.github.com/repos/jwodder/jwodder.github.io/milestones{/number}",
"mirror_url": null,
"name": "jwodder.github.io",
"notifications_url": "https://api.github.com/repos/jwodder/jwodder.github.io/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/jwodder.github.io/pulls{/number}",
"pushed_at": "2016-12-06T03:26:41Z",
"releases_url": "https://api.github.com/repos/jwodder/jwodder.github.io/releases{/id}",
"size": 125,
"ssh_url": "[email protected]:jwodder/jwodder.github.io.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/jwodder.github.io/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/jwodder.github.io/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/jwodder.github.io/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/jwodder.github.io/subscription",
"svn_url": "https://github.com/jwodder/jwodder.github.io",
"tags_url": "https://api.github.com/repos/jwodder/jwodder.github.io/tags",
"teams_url": "https://api.github.com/repos/jwodder/jwodder.github.io/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/jwodder.github.io/git/trees{/sha}",
"updated_at": "2017-01-29T22:21:35Z",
"url": "https://api.github.com/repos/jwodder/jwodder.github.io",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/lambdas/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/lambdas/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/lambdas/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/lambdas/branches{/branch}",
"clone_url": "https://github.com/jwodder/lambdas.git",
"collaborators_url": "https://api.github.com/repos/jwodder/lambdas/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/lambdas/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/lambdas/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/lambdas/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/lambdas/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/lambdas/contributors",
"created_at": "2014-06-14T23:57:33Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/lambdas/deployments",
"description": "Lambda calculus interpreter",
"downloads_url": "https://api.github.com/repos/jwodder/lambdas/downloads",
"events_url": "https://api.github.com/repos/jwodder/lambdas/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/lambdas/forks",
"full_name": "jwodder/lambdas",
"git_commits_url": "https://api.github.com/repos/jwodder/lambdas/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/lambdas/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/lambdas/git/tags{/sha}",
"git_url": "git://github.com/jwodder/lambdas.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/lambdas/hooks",
"html_url": "https://github.com/jwodder/lambdas",
"id": 20844504,
"issue_comment_url": "https://api.github.com/repos/jwodder/lambdas/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/lambdas/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/lambdas/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/lambdas/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/lambdas/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/lambdas/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/lambdas/merges",
"milestones_url": "https://api.github.com/repos/jwodder/lambdas/milestones{/number}",
"mirror_url": null,
"name": "lambdas",
"notifications_url": "https://api.github.com/repos/jwodder/lambdas/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/lambdas/pulls{/number}",
"pushed_at": "2015-01-11T03:57:09Z",
"releases_url": "https://api.github.com/repos/jwodder/lambdas/releases{/id}",
"size": 196,
"ssh_url": "[email protected]:jwodder/lambdas.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/lambdas/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/lambdas/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/lambdas/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/lambdas/subscription",
"svn_url": "https://github.com/jwodder/lambdas",
"tags_url": "https://api.github.com/repos/jwodder/lambdas/tags",
"teams_url": "https://api.github.com/repos/jwodder/lambdas/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/lambdas/git/trees{/sha}",
"updated_at": "2015-01-11T03:57:09Z",
"url": "https://api.github.com/repos/jwodder/lambdas",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/linesep/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/linesep/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/linesep/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/linesep/branches{/branch}",
"clone_url": "https://github.com/jwodder/linesep.git",
"collaborators_url": "https://api.github.com/repos/jwodder/linesep/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/linesep/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/linesep/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/linesep/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/linesep/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/linesep/contributors",
"created_at": "2016-05-12T13:44:52Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/linesep/deployments",
"description": "Handling lines with arbitrary separators",
"downloads_url": "https://api.github.com/repos/jwodder/linesep/downloads",
"events_url": "https://api.github.com/repos/jwodder/linesep/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/linesep/forks",
"full_name": "jwodder/linesep",
"git_commits_url": "https://api.github.com/repos/jwodder/linesep/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/linesep/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/linesep/git/tags{/sha}",
"git_url": "git://github.com/jwodder/linesep.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/linesep/hooks",
"html_url": "https://github.com/jwodder/linesep",
"id": 58645336,
"issue_comment_url": "https://api.github.com/repos/jwodder/linesep/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/linesep/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/linesep/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/linesep/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/linesep/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/linesep/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/linesep/merges",
"milestones_url": "https://api.github.com/repos/jwodder/linesep/milestones{/number}",
"mirror_url": null,
"name": "linesep",
"notifications_url": "https://api.github.com/repos/jwodder/linesep/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/linesep/pulls{/number}",
"pushed_at": "2017-05-29T17:53:24Z",
"releases_url": "https://api.github.com/repos/jwodder/linesep/releases{/id}",
"size": 49,
"ssh_url": "<EMAIL>:jwodder/linesep.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/linesep/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/linesep/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/linesep/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/linesep/subscription",
"svn_url": "https://github.com/jwodder/linesep",
"tags_url": "https://api.github.com/repos/jwodder/linesep/tags",
"teams_url": "https://api.github.com/repos/jwodder/linesep/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/linesep/git/trees{/sha}",
"updated_at": "2017-01-16T01:09:01Z",
"url": "https://api.github.com/repos/jwodder/linesep",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/literal_exec/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/literal_exec/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/literal_exec/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/literal_exec/branches{/branch}",
"clone_url": "https://github.com/jwodder/literal_exec.git",
"collaborators_url": "https://api.github.com/repos/jwodder/literal_exec/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/literal_exec/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/literal_exec/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/literal_exec/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/literal_exec/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/literal_exec/contributors",
"created_at": "2017-06-04T15:21:06Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/literal_exec/deployments",
"description": "Parse literal variable assignments from source files",
"downloads_url": "https://api.github.com/repos/jwodder/literal_exec/downloads",
"events_url": "https://api.github.com/repos/jwodder/literal_exec/events",
"fork": false,
"forks": 2,
"forks_count": 2,
"forks_url": "https://api.github.com/repos/jwodder/literal_exec/forks",
"full_name": "jwodder/literal_exec",
"git_commits_url": "https://api.github.com/repos/jwodder/literal_exec/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/literal_exec/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/literal_exec/git/tags{/sha}",
"git_url": "git://github.com/jwodder/literal_exec.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/literal_exec/hooks",
"html_url": "https://github.com/jwodder/literal_exec",
"id": 93322183,
"issue_comment_url": "https://api.github.com/repos/jwodder/literal_exec/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/literal_exec/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/literal_exec/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/literal_exec/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/literal_exec/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/literal_exec/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/literal_exec/merges",
"milestones_url": "https://api.github.com/repos/jwodder/literal_exec/milestones{/number}",
"mirror_url": null,
"name": "literal_exec",
"notifications_url": "https://api.github.com/repos/jwodder/literal_exec/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/literal_exec/pulls{/number}",
"pushed_at": "2017-07-07T15:03:33Z",
"releases_url": "https://api.github.com/repos/jwodder/literal_exec/releases{/id}",
"size": 24,
"ssh_url": "[email protected]:jwodder/literal_exec.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/literal_exec/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/literal_exec/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/literal_exec/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/literal_exec/subscription",
"svn_url": "https://github.com/jwodder/literal_exec",
"tags_url": "https://api.github.com/repos/jwodder/literal_exec/tags",
"teams_url": "https://api.github.com/repos/jwodder/literal_exec/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/literal_exec/git/trees{/sha}",
"updated_at": "2017-06-25T14:27:11Z",
"url": "https://api.github.com/repos/jwodder/literal_exec",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/logger/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/logger/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/logger/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/logger/branches{/branch}",
"clone_url": "https://github.com/jwodder/logger.git",
"collaborators_url": "https://api.github.com/repos/jwodder/logger/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/logger/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/logger/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/logger/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/logger/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/logger/contributors",
"created_at": "2014-05-02T21:47:28Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/logger/deployments",
"description": "IRC logging bot",
"downloads_url": "https://api.github.com/repos/jwodder/logger/downloads",
"events_url": "https://api.github.com/repos/jwodder/logger/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/logger/forks",
"full_name": "jwodder/logger",
"git_commits_url": "https://api.github.com/repos/jwodder/logger/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/logger/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/logger/git/tags{/sha}",
"git_url": "git://github.com/jwodder/logger.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/logger/hooks",
"html_url": "https://github.com/jwodder/logger",
"id": 19389598,
"issue_comment_url": "https://api.github.com/repos/jwodder/logger/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/logger/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/logger/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/logger/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/logger/labels{/name}",
"language": "Perl",
"languages_url": "https://api.github.com/repos/jwodder/logger/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/logger/merges",
"milestones_url": "https://api.github.com/repos/jwodder/logger/milestones{/number}",
"mirror_url": null,
"name": "logger",
"notifications_url": "https://api.github.com/repos/jwodder/logger/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/logger/pulls{/number}",
"pushed_at": "2014-10-29T22:55:27Z",
"releases_url": "https://api.github.com/repos/jwodder/logger/releases{/id}",
"size": 168,
"ssh_url": "[email protected]:jwodder/logger.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/logger/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/logger/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/logger/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/logger/subscription",
"svn_url": "https://github.com/jwodder/logger",
"tags_url": "https://api.github.com/repos/jwodder/logger/tags",
"teams_url": "https://api.github.com/repos/jwodder/logger/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/logger/git/trees{/sha}",
"updated_at": "2017-03-29T20:11:01Z",
"url": "https://api.github.com/repos/jwodder/logger",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/nethack-docker/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/nethack-docker/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/nethack-docker/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/nethack-docker/branches{/branch}",
"clone_url": "https://github.com/jwodder/nethack-docker.git",
"collaborators_url": "https://api.github.com/repos/jwodder/nethack-docker/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/nethack-docker/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/nethack-docker/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/nethack-docker/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/nethack-docker/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/nethack-docker/contributors",
"created_at": "2015-12-13T23:59:06Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/nethack-docker/deployments",
"description": "NetHack Docker image",
"downloads_url": "https://api.github.com/repos/jwodder/nethack-docker/downloads",
"events_url": "https://api.github.com/repos/jwodder/nethack-docker/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/nethack-docker/forks",
"full_name": "jwodder/nethack-docker",
"git_commits_url": "https://api.github.com/repos/jwodder/nethack-docker/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/nethack-docker/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/nethack-docker/git/tags{/sha}",
"git_url": "git://github.com/jwodder/nethack-docker.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/nethack-docker/hooks",
"html_url": "https://github.com/jwodder/nethack-docker",
"id": 47942009,
"issue_comment_url": "https://api.github.com/repos/jwodder/nethack-docker/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/nethack-docker/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/nethack-docker/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/nethack-docker/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/nethack-docker/labels{/name}",
"language": "Shell",
"languages_url": "https://api.github.com/repos/jwodder/nethack-docker/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/nethack-docker/merges",
"milestones_url": "https://api.github.com/repos/jwodder/nethack-docker/milestones{/number}",
"mirror_url": null,
"name": "nethack-docker",
"notifications_url": "https://api.github.com/repos/jwodder/nethack-docker/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/nethack-docker/pulls{/number}",
"pushed_at": "2016-03-19T23:40:07Z",
"releases_url": "https://api.github.com/repos/jwodder/nethack-docker/releases{/id}",
"size": 16,
"ssh_url": "[email protected]:jwodder/nethack-docker.git",
"stargazers_count": 2,
"stargazers_url": "https://api.github.com/repos/jwodder/nethack-docker/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/nethack-docker/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/nethack-docker/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/nethack-docker/subscription",
"svn_url": "https://github.com/jwodder/nethack-docker",
"tags_url": "https://api.github.com/repos/jwodder/nethack-docker/tags",
"teams_url": "https://api.github.com/repos/jwodder/nethack-docker/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/nethack-docker/git/trees{/sha}",
"updated_at": "2016-08-03T16:07:13Z",
"url": "https://api.github.com/repos/jwodder/nethack-docker",
"watchers": 2,
"watchers_count": 2
},
{
"archive_url": "https://api.github.com/repos/jwodder/notesys/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/notesys/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/notesys/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/notesys/branches{/branch}",
"clone_url": "https://github.com/jwodder/notesys.git",
"collaborators_url": "https://api.github.com/repos/jwodder/notesys/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/notesys/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/notesys/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/notesys/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/notesys/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/notesys/contributors",
"created_at": "2009-12-03T16:01:02Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/notesys/deployments",
"description": "[Abandoned] A web-based personal notes management system",
"downloads_url": "https://api.github.com/repos/jwodder/notesys/downloads",
"events_url": "https://api.github.com/repos/jwodder/notesys/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/notesys/forks",
"full_name": "jwodder/notesys",
"git_commits_url": "https://api.github.com/repos/jwodder/notesys/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/notesys/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/notesys/git/tags{/sha}",
"git_url": "git://github.com/jwodder/notesys.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": false,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/notesys/hooks",
"html_url": "https://github.com/jwodder/notesys",
"id": 394200,
"issue_comment_url": "https://api.github.com/repos/jwodder/notesys/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/notesys/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/notesys/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/notesys/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/notesys/labels{/name}",
"language": "Perl",
"languages_url": "https://api.github.com/repos/jwodder/notesys/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/notesys/merges",
"milestones_url": "https://api.github.com/repos/jwodder/notesys/milestones{/number}",
"mirror_url": null,
"name": "notesys",
"notifications_url": "https://api.github.com/repos/jwodder/notesys/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/notesys/pulls{/number}",
"pushed_at": "2014-05-22T20:29:59Z",
"releases_url": "https://api.github.com/repos/jwodder/notesys/releases{/id}",
"size": 184,
"ssh_url": "[email protected]:jwodder/notesys.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/notesys/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/notesys/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/notesys/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/notesys/subscription",
"svn_url": "https://github.com/jwodder/notesys",
"tags_url": "https://api.github.com/repos/jwodder/notesys/tags",
"teams_url": "https://api.github.com/repos/jwodder/notesys/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/notesys/git/trees{/sha}",
"updated_at": "2014-09-08T20:38:32Z",
"url": "https://api.github.com/repos/jwodder/notesys",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/open-humans-api/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/open-humans-api/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/open-humans-api/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/open-humans-api/branches{/branch}",
"clone_url": "https://github.com/jwodder/open-humans-api.git",
"collaborators_url": "https://api.github.com/repos/jwodder/open-humans-api/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/open-humans-api/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/open-humans-api/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/open-humans-api/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/open-humans-api/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/open-humans-api/contributors",
"created_at": "2017-07-08T18:38:44Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/open-humans-api/deployments",
"description": "Tools to work with our APIs.",
"downloads_url": "https://api.github.com/repos/jwodder/open-humans-api/downloads",
"events_url": "https://api.github.com/repos/jwodder/open-humans-api/events",
"fork": true,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/open-humans-api/forks",
"full_name": "jwodder/open-humans-api",
"git_commits_url": "https://api.github.com/repos/jwodder/open-humans-api/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/open-humans-api/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/open-humans-api/git/tags{/sha}",
"git_url": "git://github.com/jwodder/open-humans-api.git",
"has_downloads": true,
"has_issues": false,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/open-humans-api/hooks",
"html_url": "https://github.com/jwodder/open-humans-api",
"id": 96637377,
"issue_comment_url": "https://api.github.com/repos/jwodder/open-humans-api/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/open-humans-api/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/open-humans-api/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/open-humans-api/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/open-humans-api/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/open-humans-api/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/open-humans-api/merges",
"milestones_url": "https://api.github.com/repos/jwodder/open-humans-api/milestones{/number}",
"mirror_url": null,
"name": "open-humans-api",
"notifications_url": "https://api.github.com/repos/jwodder/open-humans-api/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/open-humans-api/pulls{/number}",
"pushed_at": "2017-07-08T18:40:48Z",
"releases_url": "https://api.github.com/repos/jwodder/open-humans-api/releases{/id}",
"size": 27,
"ssh_url": "[email protected]:jwodder/open-humans-api.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/open-humans-api/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/open-humans-api/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/open-humans-api/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/open-humans-api/subscription",
"svn_url": "https://github.com/jwodder/open-humans-api",
"tags_url": "https://api.github.com/repos/jwodder/open-humans-api/tags",
"teams_url": "https://api.github.com/repos/jwodder/open-humans-api/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/open-humans-api/git/trees{/sha}",
"updated_at": "2017-07-08T18:38:46Z",
"url": "https://api.github.com/repos/jwodder/open-humans-api",
"watchers": 0,
"watchers_count": 0
}
]
'''
USER_REPOS_PAGE2 = '''\
[
{
"archive_url": "https://api.github.com/repos/jwodder/packaging/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/packaging/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/packaging/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/packaging/branches{/branch}",
"clone_url": "https://github.com/jwodder/packaging.git",
"collaborators_url": "https://api.github.com/repos/jwodder/packaging/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/packaging/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/packaging/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/packaging/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/packaging/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/packaging/contributors",
"created_at": "2017-06-11T23:02:07Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/packaging/deployments",
"description": "Core utilities for Python packages",
"downloads_url": "https://api.github.com/repos/jwodder/packaging/downloads",
"events_url": "https://api.github.com/repos/jwodder/packaging/events",
"fork": true,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/packaging/forks",
"full_name": "jwodder/packaging",
"git_commits_url": "https://api.github.com/repos/jwodder/packaging/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/packaging/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/packaging/git/tags{/sha}",
"git_url": "git://github.com/jwodder/packaging.git",
"has_downloads": true,
"has_issues": false,
"has_pages": false,
"has_projects": true,
"has_wiki": false,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/packaging/hooks",
"html_url": "https://github.com/jwodder/packaging",
"id": 94036941,
"issue_comment_url": "https://api.github.com/repos/jwodder/packaging/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/packaging/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/packaging/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/packaging/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/packaging/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/packaging/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/packaging/merges",
"milestones_url": "https://api.github.com/repos/jwodder/packaging/milestones{/number}",
"mirror_url": null,
"name": "packaging",
"notifications_url": "https://api.github.com/repos/jwodder/packaging/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/packaging/pulls{/number}",
"pushed_at": "2017-06-14T19:33:29Z",
"releases_url": "https://api.github.com/repos/jwodder/packaging/releases{/id}",
"size": 279,
"ssh_url": "[email protected]:jwodder/packaging.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/packaging/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/packaging/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/packaging/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/packaging/subscription",
"svn_url": "https://github.com/jwodder/packaging",
"tags_url": "https://api.github.com/repos/jwodder/packaging/tags",
"teams_url": "https://api.github.com/repos/jwodder/packaging/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/packaging/git/trees{/sha}",
"updated_at": "2017-06-11T23:02:09Z",
"url": "https://api.github.com/repos/jwodder/packaging",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/peps/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/peps/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/peps/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/peps/branches{/branch}",
"clone_url": "https://github.com/jwodder/peps.git",
"collaborators_url": "https://api.github.com/repos/jwodder/peps/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/peps/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/peps/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/peps/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/peps/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/peps/contributors",
"created_at": "2017-06-18T02:28:46Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/peps/deployments",
"description": "Python Enhancement Proposals",
"downloads_url": "https://api.github.com/repos/jwodder/peps/downloads",
"events_url": "https://api.github.com/repos/jwodder/peps/events",
"fork": true,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/peps/forks",
"full_name": "jwodder/peps",
"git_commits_url": "https://api.github.com/repos/jwodder/peps/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/peps/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/peps/git/tags{/sha}",
"git_url": "git://github.com/jwodder/peps.git",
"has_downloads": true,
"has_issues": false,
"has_pages": false,
"has_projects": true,
"has_wiki": false,
"homepage": "https://www.python.org/dev/peps/",
"hooks_url": "https://api.github.com/repos/jwodder/peps/hooks",
"html_url": "https://github.com/jwodder/peps",
"id": 94660395,
"issue_comment_url": "https://api.github.com/repos/jwodder/peps/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/peps/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/peps/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/peps/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/peps/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/peps/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/peps/merges",
"milestones_url": "https://api.github.com/repos/jwodder/peps/milestones{/number}",
"mirror_url": null,
"name": "peps",
"notifications_url": "https://api.github.com/repos/jwodder/peps/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/peps/pulls{/number}",
"pushed_at": "2017-06-25T14:42:55Z",
"releases_url": "https://api.github.com/repos/jwodder/peps/releases{/id}",
"size": 8759,
"ssh_url": "[email protected]:jwodder/peps.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/peps/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/peps/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/peps/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/peps/subscription",
"svn_url": "https://github.com/jwodder/peps",
"tags_url": "https://api.github.com/repos/jwodder/peps/tags",
"teams_url": "https://api.github.com/repos/jwodder/peps/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/peps/git/trees{/sha}",
"updated_at": "2017-06-24T17:33:07Z",
"url": "https://api.github.com/repos/jwodder/peps",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/psych/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/psych/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/psych/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/psych/branches{/branch}",
"clone_url": "https://github.com/jwodder/psych.git",
"collaborators_url": "https://api.github.com/repos/jwodder/psych/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/psych/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/psych/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/psych/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/psych/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/psych/contributors",
"created_at": "2009-09-28T21:25:58Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/psych/deployments",
"description": "Brainf*ck interpreter",
"downloads_url": "https://api.github.com/repos/jwodder/psych/downloads",
"events_url": "https://api.github.com/repos/jwodder/psych/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/psych/forks",
"full_name": "jwodder/psych",
"git_commits_url": "https://api.github.com/repos/jwodder/psych/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/psych/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/psych/git/tags{/sha}",
"git_url": "git://github.com/jwodder/psych.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/psych/hooks",
"html_url": "https://github.com/jwodder/psych",
"id": 320444,
"issue_comment_url": "https://api.github.com/repos/jwodder/psych/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/psych/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/psych/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/psych/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/psych/labels{/name}",
"language": "Groff",
"languages_url": "https://api.github.com/repos/jwodder/psych/languages",
"license": {
"featured": true,
"key": "gpl-3.0",
"name": "GNU General Public License v3.0",
"spdx_id": "GPL-3.0",
"url": "https://api.github.com/licenses/gpl-3.0"
},
"merges_url": "https://api.github.com/repos/jwodder/psych/merges",
"milestones_url": "https://api.github.com/repos/jwodder/psych/milestones{/number}",
"mirror_url": null,
"name": "psych",
"notifications_url": "https://api.github.com/repos/jwodder/psych/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/psych/pulls{/number}",
"pushed_at": "2015-12-18T21:55:47Z",
"releases_url": "https://api.github.com/repos/jwodder/psych/releases{/id}",
"size": 57,
"ssh_url": "[email protected]:jwodder/psych.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/psych/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/psych/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/psych/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/psych/subscription",
"svn_url": "https://github.com/jwodder/psych",
"tags_url": "https://api.github.com/repos/jwodder/psych/tags",
"teams_url": "https://api.github.com/repos/jwodder/psych/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/psych/git/trees{/sha}",
"updated_at": "2015-12-18T21:55:48Z",
"url": "https://api.github.com/repos/jwodder/psych",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/branches{/branch}",
"clone_url": "https://github.com/jwodder/python-packaging-user-guide.git",
"collaborators_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/contributors",
"created_at": "2017-06-18T01:44:17Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/deployments",
"description": "Python Packaging User Guide",
"downloads_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/downloads",
"events_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/events",
"fork": true,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/forks",
"full_name": "jwodder/python-packaging-user-guide",
"git_commits_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/git/tags{/sha}",
"git_url": "git://github.com/jwodder/python-packaging-user-guide.git",
"has_downloads": true,
"has_issues": false,
"has_pages": false,
"has_projects": true,
"has_wiki": false,
"homepage": "http://packaging.python.org",
"hooks_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/hooks",
"html_url": "https://github.com/jwodder/python-packaging-user-guide",
"id": 94658816,
"issue_comment_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/merges",
"milestones_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/milestones{/number}",
"mirror_url": null,
"name": "python-packaging-user-guide",
"notifications_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/pulls{/number}",
"pushed_at": "2017-06-26T00:50:17Z",
"releases_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/releases{/id}",
"size": 2120,
"ssh_url": "[email protected]:jwodder/python-packaging-user-guide.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/subscription",
"svn_url": "https://github.com/jwodder/python-packaging-user-guide",
"tags_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/tags",
"teams_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/python-packaging-user-guide/git/trees{/sha}",
"updated_at": "2017-06-18T01:44:18Z",
"url": "https://api.github.com/repos/jwodder/python-packaging-user-guide",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/qypi/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/qypi/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/qypi/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/qypi/branches{/branch}",
"clone_url": "https://github.com/jwodder/qypi.git",
"collaborators_url": "https://api.github.com/repos/jwodder/qypi/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/qypi/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/qypi/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/qypi/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/qypi/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/qypi/contributors",
"created_at": "2017-03-28T01:44:27Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/qypi/deployments",
"description": "Query PyPI from the command line",
"downloads_url": "https://api.github.com/repos/jwodder/qypi/downloads",
"events_url": "https://api.github.com/repos/jwodder/qypi/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/qypi/forks",
"full_name": "jwodder/qypi",
"git_commits_url": "https://api.github.com/repos/jwodder/qypi/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/qypi/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/qypi/git/tags{/sha}",
"git_url": "git://github.com/jwodder/qypi.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/qypi/hooks",
"html_url": "https://github.com/jwodder/qypi",
"id": 86402360,
"issue_comment_url": "https://api.github.com/repos/jwodder/qypi/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/qypi/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/qypi/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/qypi/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/qypi/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/qypi/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/qypi/merges",
"milestones_url": "https://api.github.com/repos/jwodder/qypi/milestones{/number}",
"mirror_url": null,
"name": "qypi",
"notifications_url": "https://api.github.com/repos/jwodder/qypi/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/qypi/pulls{/number}",
"pushed_at": "2017-07-03T19:26:28Z",
"releases_url": "https://api.github.com/repos/jwodder/qypi/releases{/id}",
"size": 62,
"ssh_url": "[email protected]:jwodder/qypi.git",
"stargazers_count": 3,
"stargazers_url": "https://api.github.com/repos/jwodder/qypi/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/qypi/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/qypi/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/qypi/subscription",
"svn_url": "https://github.com/jwodder/qypi",
"tags_url": "https://api.github.com/repos/jwodder/qypi/tags",
"teams_url": "https://api.github.com/repos/jwodder/qypi/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/qypi/git/trees{/sha}",
"updated_at": "2017-07-11T02:09:57Z",
"url": "https://api.github.com/repos/jwodder/qypi",
"watchers": 3,
"watchers_count": 3
},
{
"archive_url": "https://api.github.com/repos/jwodder/schedule/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/schedule/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/schedule/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/schedule/branches{/branch}",
"clone_url": "https://github.com/jwodder/schedule.git",
"collaborators_url": "https://api.github.com/repos/jwodder/schedule/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/schedule/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/schedule/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/schedule/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/schedule/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/schedule/contributors",
"created_at": "2014-04-30T21:49:22Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/schedule/deployments",
"description": "Weekly schedule typesetter",
"downloads_url": "https://api.github.com/repos/jwodder/schedule/downloads",
"events_url": "https://api.github.com/repos/jwodder/schedule/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/schedule/forks",
"full_name": "jwodder/schedule",
"git_commits_url": "https://api.github.com/repos/jwodder/schedule/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/schedule/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/schedule/git/tags{/sha}",
"git_url": "git://github.com/jwodder/schedule.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/schedule/hooks",
"html_url": "https://github.com/jwodder/schedule",
"id": 19328407,
"issue_comment_url": "https://api.github.com/repos/jwodder/schedule/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/schedule/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/schedule/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/schedule/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/schedule/labels{/name}",
"language": "Perl",
"languages_url": "https://api.github.com/repos/jwodder/schedule/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/schedule/merges",
"milestones_url": "https://api.github.com/repos/jwodder/schedule/milestones{/number}",
"mirror_url": null,
"name": "schedule",
"notifications_url": "https://api.github.com/repos/jwodder/schedule/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/schedule/pulls{/number}",
"pushed_at": "2014-10-29T22:38:39Z",
"releases_url": "https://api.github.com/repos/jwodder/schedule/releases{/id}",
"size": 148,
"ssh_url": "<EMAIL>:jwodder/schedule.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/schedule/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/schedule/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/schedule/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/schedule/subscription",
"svn_url": "https://github.com/jwodder/schedule",
"tags_url": "https://api.github.com/repos/jwodder/schedule/tags",
"teams_url": "https://api.github.com/repos/jwodder/schedule/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/schedule/git/trees{/sha}",
"updated_at": "2014-09-09T18:09:47Z",
"url": "https://api.github.com/repos/jwodder/schedule",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/scoreGismu/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/scoreGismu/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/scoreGismu/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/scoreGismu/branches{/branch}",
"clone_url": "https://github.com/jwodder/scoreGismu.git",
"collaborators_url": "https://api.github.com/repos/jwodder/scoreGismu/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/scoreGismu/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/scoreGismu/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/scoreGismu/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/scoreGismu/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/scoreGismu/contributors",
"created_at": "2014-05-01T00:04:06Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/scoreGismu/deployments",
"description": "{gismu} creation algorithm",
"downloads_url": "https://api.github.com/repos/jwodder/scoreGismu/downloads",
"events_url": "https://api.github.com/repos/jwodder/scoreGismu/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/scoreGismu/forks",
"full_name": "jwodder/scoreGismu",
"git_commits_url": "https://api.github.com/repos/jwodder/scoreGismu/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/scoreGismu/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/scoreGismu/git/tags{/sha}",
"git_url": "git://github.com/jwodder/scoreGismu.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/scoreGismu/hooks",
"html_url": "https://github.com/jwodder/scoreGismu",
"id": 19331080,
"issue_comment_url": "https://api.github.com/repos/jwodder/scoreGismu/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/scoreGismu/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/scoreGismu/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/scoreGismu/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/scoreGismu/labels{/name}",
"language": "C++",
"languages_url": "https://api.github.com/repos/jwodder/scoreGismu/languages",
"license": null,
"merges_url": "https://api.github.com/repos/jwodder/scoreGismu/merges",
"milestones_url": "https://api.github.com/repos/jwodder/scoreGismu/milestones{/number}",
"mirror_url": null,
"name": "scoreGismu",
"notifications_url": "https://api.github.com/repos/jwodder/scoreGismu/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/scoreGismu/pulls{/number}",
"pushed_at": "2015-06-07T23:34:03Z",
"releases_url": "https://api.github.com/repos/jwodder/scoreGismu/releases{/id}",
"size": 194,
"ssh_url": "[email protected]:jwodder/scoreGismu.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/scoreGismu/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/scoreGismu/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/scoreGismu/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/scoreGismu/subscription",
"svn_url": "https://github.com/jwodder/scoreGismu",
"tags_url": "https://api.github.com/repos/jwodder/scoreGismu/tags",
"teams_url": "https://api.github.com/repos/jwodder/scoreGismu/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/scoreGismu/git/trees{/sha}",
"updated_at": "2015-10-07T01:22:23Z",
"url": "https://api.github.com/repos/jwodder/scoreGismu",
"watchers": 1,
"watchers_count": 1
},
{
"archive_url": "https://api.github.com/repos/jwodder/statjson/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/statjson/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/statjson/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/statjson/branches{/branch}",
"clone_url": "https://github.com/jwodder/statjson.git",
"collaborators_url": "https://api.github.com/repos/jwodder/statjson/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/statjson/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/statjson/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/statjson/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/statjson/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/statjson/contributors",
"created_at": "2017-05-08T15:54:09Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/statjson/deployments",
"description": "stat(2) output as JSON",
"downloads_url": "https://api.github.com/repos/jwodder/statjson/downloads",
"events_url": "https://api.github.com/repos/jwodder/statjson/events",
"fork": false,
"forks": 1,
"forks_count": 1,
"forks_url": "https://api.github.com/repos/jwodder/statjson/forks",
"full_name": "jwodder/statjson",
"git_commits_url": "https://api.github.com/repos/jwodder/statjson/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/statjson/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/statjson/git/tags{/sha}",
"git_url": "git://github.com/jwodder/statjson.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/statjson/hooks",
"html_url": "https://github.com/jwodder/statjson",
"id": 90646347,
"issue_comment_url": "https://api.github.com/repos/jwodder/statjson/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/statjson/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/statjson/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/statjson/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/statjson/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/statjson/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/statjson/merges",
"milestones_url": "https://api.github.com/repos/jwodder/statjson/milestones{/number}",
"mirror_url": null,
"name": "statjson",
"notifications_url": "https://api.github.com/repos/jwodder/statjson/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/statjson/pulls{/number}",
"pushed_at": "2017-05-13T18:04:44Z",
"releases_url": "https://api.github.com/repos/jwodder/statjson/releases{/id}",
"size": 23,
"ssh_url": "[email protected]:jwodder/statjson.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/statjson/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/statjson/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/statjson/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/statjson/subscription",
"svn_url": "https://github.com/jwodder/statjson",
"tags_url": "https://api.github.com/repos/jwodder/statjson/tags",
"teams_url": "https://api.github.com/repos/jwodder/statjson/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/statjson/git/trees{/sha}",
"updated_at": "2017-05-30T21:07:01Z",
"url": "https://api.github.com/repos/jwodder/statjson",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/Verity/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/Verity/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/Verity/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/Verity/branches{/branch}",
"clone_url": "https://github.com/jwodder/Verity.git",
"collaborators_url": "https://api.github.com/repos/jwodder/Verity/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/Verity/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/Verity/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/Verity/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/Verity/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/Verity/contributors",
"created_at": "2009-06-23T16:46:53Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/Verity/deployments",
"description": "Truth table generator",
"downloads_url": "https://api.github.com/repos/jwodder/Verity/downloads",
"events_url": "https://api.github.com/repos/jwodder/Verity/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/Verity/forks",
"full_name": "jwodder/Verity",
"git_commits_url": "https://api.github.com/repos/jwodder/Verity/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/Verity/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/Verity/git/tags{/sha}",
"git_url": "git://github.com/jwodder/Verity.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/Verity/hooks",
"html_url": "https://github.com/jwodder/Verity",
"id": 234445,
"issue_comment_url": "https://api.github.com/repos/jwodder/Verity/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/Verity/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/Verity/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/Verity/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/Verity/labels{/name}",
"language": "C",
"languages_url": "https://api.github.com/repos/jwodder/Verity/languages",
"license": {
"featured": true,
"key": "gpl-3.0",
"name": "GNU General Public License v3.0",
"spdx_id": "GPL-3.0",
"url": "https://api.github.com/licenses/gpl-3.0"
},
"merges_url": "https://api.github.com/repos/jwodder/Verity/merges",
"milestones_url": "https://api.github.com/repos/jwodder/Verity/milestones{/number}",
"mirror_url": null,
"name": "Verity",
"notifications_url": "https://api.github.com/repos/jwodder/Verity/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/Verity/pulls{/number}",
"pushed_at": "2014-03-29T22:58:19Z",
"releases_url": "https://api.github.com/repos/jwodder/Verity/releases{/id}",
"size": 248,
"ssh_url": "[email protected]:jwodder/Verity.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/Verity/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/Verity/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/Verity/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/Verity/subscription",
"svn_url": "https://github.com/jwodder/Verity",
"tags_url": "https://api.github.com/repos/jwodder/Verity/tags",
"teams_url": "https://api.github.com/repos/jwodder/Verity/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/Verity/git/trees{/sha}",
"updated_at": "2014-06-24T22:54:05Z",
"url": "https://api.github.com/repos/jwodder/Verity",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/whitaker-docker/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/whitaker-docker/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/whitaker-docker/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/whitaker-docker/branches{/branch}",
"clone_url": "https://github.com/jwodder/whitaker-docker.git",
"collaborators_url": "https://api.github.com/repos/jwodder/whitaker-docker/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/whitaker-docker/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/whitaker-docker/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/whitaker-docker/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/whitaker-docker/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/whitaker-docker/contributors",
"created_at": "2016-04-10T23:52:16Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/whitaker-docker/deployments",
"description": "Whitaker's Words Docker image",
"downloads_url": "https://api.github.com/repos/jwodder/whitaker-docker/downloads",
"events_url": "https://api.github.com/repos/jwodder/whitaker-docker/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/whitaker-docker/forks",
"full_name": "jwodder/whitaker-docker",
"git_commits_url": "https://api.github.com/repos/jwodder/whitaker-docker/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/whitaker-docker/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/whitaker-docker/git/tags{/sha}",
"git_url": "git://github.com/jwodder/whitaker-docker.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/whitaker-docker/hooks",
"html_url": "https://github.com/jwodder/whitaker-docker",
"id": 55928611,
"issue_comment_url": "https://api.github.com/repos/jwodder/whitaker-docker/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/whitaker-docker/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/whitaker-docker/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/whitaker-docker/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/whitaker-docker/labels{/name}",
"language": null,
"languages_url": "https://api.github.com/repos/jwodder/whitaker-docker/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/whitaker-docker/merges",
"milestones_url": "https://api.github.com/repos/jwodder/whitaker-docker/milestones{/number}",
"mirror_url": null,
"name": "whitaker-docker",
"notifications_url": "https://api.github.com/repos/jwodder/whitaker-docker/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/whitaker-docker/pulls{/number}",
"pushed_at": "2016-04-14T23:40:19Z",
"releases_url": "https://api.github.com/repos/jwodder/whitaker-docker/releases{/id}",
"size": 2,
"ssh_url": "git<EMAIL>.com:jwodder/whitaker-docker.git",
"stargazers_count": 0,
"stargazers_url": "https://api.github.com/repos/jwodder/whitaker-docker/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/whitaker-docker/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/whitaker-docker/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/whitaker-docker/subscription",
"svn_url": "https://github.com/jwodder/whitaker-docker",
"tags_url": "https://api.github.com/repos/jwodder/whitaker-docker/tags",
"teams_url": "https://api.github.com/repos/jwodder/whitaker-docker/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/whitaker-docker/git/trees{/sha}",
"updated_at": "2016-04-10T23:52:16Z",
"url": "https://api.github.com/repos/jwodder/whitaker-docker",
"watchers": 0,
"watchers_count": 0
},
{
"archive_url": "https://api.github.com/repos/jwodder/whitaker2json/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/whitaker2json/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/whitaker2json/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/whitaker2json/branches{/branch}",
"clone_url": "https://github.com/jwodder/whitaker2json.git",
"collaborators_url": "https://api.github.com/repos/jwodder/whitaker2json/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/whitaker2json/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/whitaker2json/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/whitaker2json/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/whitaker2json/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/whitaker2json/contributors",
"created_at": "2016-02-28T04:48:26Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/whitaker2json/deployments",
"description": "Convert William Whitaker's DICTPAGE.RAW to JSON",
"downloads_url": "https://api.github.com/repos/jwodder/whitaker2json/downloads",
"events_url": "https://api.github.com/repos/jwodder/whitaker2json/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/whitaker2json/forks",
"full_name": "jwodder/whitaker2json",
"git_commits_url": "https://api.github.com/repos/jwodder/whitaker2json/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/whitaker2json/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/whitaker2json/git/tags{/sha}",
"git_url": "git://github.com/jwodder/whitaker2json.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": null,
"hooks_url": "https://api.github.com/repos/jwodder/whitaker2json/hooks",
"html_url": "https://github.com/jwodder/whitaker2json",
"id": 52705142,
"issue_comment_url": "https://api.github.com/repos/jwodder/whitaker2json/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/whitaker2json/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/whitaker2json/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/whitaker2json/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/whitaker2json/labels{/name}",
"language": "Python",
"languages_url": "https://api.github.com/repos/jwodder/whitaker2json/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/whitaker2json/merges",
"milestones_url": "https://api.github.com/repos/jwodder/whitaker2json/milestones{/number}",
"mirror_url": null,
"name": "whitaker2json",
"notifications_url": "https://api.github.com/repos/jwodder/whitaker2json/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/whitaker2json/pulls{/number}",
"pushed_at": "2017-02-21T15:26:27Z",
"releases_url": "https://api.github.com/repos/jwodder/whitaker2json/releases{/id}",
"size": 38,
"ssh_url": "[email protected]:jwodder/whitaker2json.git",
"stargazers_count": 2,
"stargazers_url": "https://api.github.com/repos/jwodder/whitaker2json/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/whitaker2json/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/whitaker2json/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/whitaker2json/subscription",
"svn_url": "https://github.com/jwodder/whitaker2json",
"tags_url": "https://api.github.com/repos/jwodder/whitaker2json/tags",
"teams_url": "https://api.github.com/repos/jwodder/whitaker2json/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/whitaker2json/git/trees{/sha}",
"updated_at": "2017-07-09T23:26:26Z",
"url": "https://api.github.com/repos/jwodder/whitaker2json",
"watchers": 2,
"watchers_count": 2
},
{
"archive_url": "https://api.github.com/repos/jwodder/xattr/{archive_format}{/ref}",
"assignees_url": "https://api.github.com/repos/jwodder/xattr/assignees{/user}",
"blobs_url": "https://api.github.com/repos/jwodder/xattr/git/blobs{/sha}",
"branches_url": "https://api.github.com/repos/jwodder/xattr/branches{/branch}",
"clone_url": "https://github.com/jwodder/xattr.git",
"collaborators_url": "https://api.github.com/repos/jwodder/xattr/collaborators{/collaborator}",
"comments_url": "https://api.github.com/repos/jwodder/xattr/comments{/number}",
"commits_url": "https://api.github.com/repos/jwodder/xattr/commits{/sha}",
"compare_url": "https://api.github.com/repos/jwodder/xattr/compare/{base}...{head}",
"contents_url": "https://api.github.com/repos/jwodder/xattr/contents/{+path}",
"contributors_url": "https://api.github.com/repos/jwodder/xattr/contributors",
"created_at": "2009-09-28T20:33:40Z",
"default_branch": "master",
"deployments_url": "https://api.github.com/repos/jwodder/xattr/deployments",
"description": "[Obsolete] Extended attributes manipulator for Mac OS X",
"downloads_url": "https://api.github.com/repos/jwodder/xattr/downloads",
"events_url": "https://api.github.com/repos/jwodder/xattr/events",
"fork": false,
"forks": 0,
"forks_count": 0,
"forks_url": "https://api.github.com/repos/jwodder/xattr/forks",
"full_name": "jwodder/xattr",
"git_commits_url": "https://api.github.com/repos/jwodder/xattr/git/commits{/sha}",
"git_refs_url": "https://api.github.com/repos/jwodder/xattr/git/refs{/sha}",
"git_tags_url": "https://api.github.com/repos/jwodder/xattr/git/tags{/sha}",
"git_url": "git://github.com/jwodder/xattr.git",
"has_downloads": true,
"has_issues": true,
"has_pages": false,
"has_projects": true,
"has_wiki": true,
"homepage": "",
"hooks_url": "https://api.github.com/repos/jwodder/xattr/hooks",
"html_url": "https://github.com/jwodder/xattr",
"id": 320398,
"issue_comment_url": "https://api.github.com/repos/jwodder/xattr/issues/comments{/number}",
"issue_events_url": "https://api.github.com/repos/jwodder/xattr/issues/events{/number}",
"issues_url": "https://api.github.com/repos/jwodder/xattr/issues{/number}",
"keys_url": "https://api.github.com/repos/jwodder/xattr/keys{/key_id}",
"labels_url": "https://api.github.com/repos/jwodder/xattr/labels{/name}",
"language": "C",
"languages_url": "https://api.github.com/repos/jwodder/xattr/languages",
"license": {
"featured": true,
"key": "mit",
"name": "MIT License",
"spdx_id": "MIT",
"url": "https://api.github.com/licenses/mit"
},
"merges_url": "https://api.github.com/repos/jwodder/xattr/merges",
"milestones_url": "https://api.github.com/repos/jwodder/xattr/milestones{/number}",
"mirror_url": null,
"name": "xattr",
"notifications_url": "https://api.github.com/repos/jwodder/xattr/notifications{?since,all,participating}",
"open_issues": 0,
"open_issues_count": 0,
"owner": {
"avatar_url": "https://avatars1.githubusercontent.com/u/98207?v=4",
"events_url": "https://api.github.com/users/jwodder/events{/privacy}",
"followers_url": "https://api.github.com/users/jwodder/followers",
"following_url": "https://api.github.com/users/jwodder/following{/other_user}",
"gists_url": "https://api.github.com/users/jwodder/gists{/gist_id}",
"gravatar_id": "",
"html_url": "https://github.com/jwodder",
"id": 98207,
"login": "jwodder",
"organizations_url": "https://api.github.com/users/jwodder/orgs",
"received_events_url": "https://api.github.com/users/jwodder/received_events",
"repos_url": "https://api.github.com/users/jwodder/repos",
"site_admin": false,
"starred_url": "https://api.github.com/users/jwodder/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jwodder/subscriptions",
"type": "User",
"url": "https://api.github.com/users/jwodder"
},
"permissions": {
"admin": true,
"pull": true,
"push": true
},
"private": false,
"pulls_url": "https://api.github.com/repos/jwodder/xattr/pulls{/number}",
"pushed_at": "2014-07-12T23:14:06Z",
"releases_url": "https://api.github.com/repos/jwodder/xattr/releases{/id}",
"size": 136,
"ssh_url": "<EMAIL>:jwodder/xattr.git",
"stargazers_count": 1,
"stargazers_url": "https://api.github.com/repos/jwodder/xattr/stargazers",
"statuses_url": "https://api.github.com/repos/jwodder/xattr/statuses/{sha}",
"subscribers_url": "https://api.github.com/repos/jwodder/xattr/subscribers",
"subscription_url": "https://api.github.com/repos/jwodder/xattr/subscription",
"svn_url": "https://github.com/jwodder/xattr",
"tags_url": "https://api.github.com/repos/jwodder/xattr/tags",
"teams_url": "https://api.github.com/repos/jwodder/xattr/teams",
"topics": [],
"trees_url": "https://api.github.com/repos/jwodder/xattr/git/trees{/sha}",
"updated_at": "2016-02-21T05:40:57Z",
"url": "https://api.github.com/repos/jwodder/xattr",
"watchers": 1,
"watchers_count": 1
}
]
'''
def test_request_user_repos(cmd):
r = cmd('request', '/user/repos')
assert r.exit_code == 0
assert r.output == USER_REPOS_PAGE1
def test_request_paginate_user_repos(cmd):
r = cmd('request', '--paginate', '/user/repos')
assert r.exit_code == 0
assert r.output == USER_REPOS_PAGE1 + USER_REPOS_PAGE2
def test_request_debug_user_repos(cmd):
r = cmd('--debug', 'request', '/user/repos')
assert r.exit_code == 0
assert r.output == 'GET https://api.github.com/user/repos\n' \
+ USER_REPOS_PAGE1
def test_request_debug_paginate_user_repos(cmd):
r = cmd('--debug', 'request', '--paginate', '/user/repos')
assert r.exit_code == 0
assert r.output == 'GET https://api.github.com/user/repos\n' \
+ USER_REPOS_PAGE1 \
+ 'GET https://api.github.com/user/repos?page=2\n' \
+ USER_REPOS_PAGE2
def test_request_nonjson(cmd):
r = cmd('request', 'https://github.com/vinta/awesome-python/pull/875.diff')
assert r.exit_code == 0
assert r.output == '''\
diff --git a/README.md b/README.md
index 0f63a961..3508e953 100644
--- a/README.md
+++ b/README.md
@@ -777,6 +777,7 @@ Inspired by [awesome-php](https://github.com/ziadoz/awesome-php).
\n *Useful libraries or tools that don't fit in the categories above.*
\n+* [attrs](https://github.com/python-attrs/attrs) - Replacement for `__init__`, `__eq__`, `__repr__`, etc. boilerplate in class definitions.
* [blinker](https://github.com/jek/blinker) - A fast Python in-process signal/event dispatching system.
* [itsdangerous](https://github.com/pallets/itsdangerous) - Various helpers to pass trusted data to untrusted environments.
* [pluginbase](https://github.com/mitsuhiko/pluginbase) - A simple but flexible plugin system for Python.
'''
def test_request_post_data(cmd):
r = cmd(
'--debug',
'request',
'-XPOST',
'-H', 'Content-Type: application/json',
'-d{"name": "Test Label", "color": "FF0000"}',
'https://api.github.com/repos/jwodder/test/labels',
)
assert r.exit_code == 0
assert r.output == '''\
POST https://api.github.com/repos/jwodder/test/labels
{"name": "Test Label", "color": "FF0000"}
{
"color": "FF0000",
"default": false,
"id": 671710206,
"name": "Test Label",
"url": "https://api.github.com/repos/jwodder/test/labels/Test%20Label"
}
'''
def test_request_post_data_file(cmd):
r = cmd(
'--debug',
'request',
'-XPOST',
'-H', 'Content-Type: application/json',
'-d@' + str(Path(__file__).with_name('data')/'files'/'label.json'),
'https://api.github.com/repos/jwodder/test/labels',
)
assert r.exit_code == 0
assert r.output == '''\
POST https://api.github.com/repos/jwodder/test/labels
{"name": "Test Label", "color": "FF0000"}
{
"color": "FF0000",
"default": false,
"id": 671710206,
"name": "Test Label",
"url": "https://api.github.com/repos/jwodder/test/labels/Test%20Label"
}
'''
def test_request_post_data_stdin(cmd):
r = cmd(
'--debug',
'request',
'-XPOST',
'-H', 'Content-Type: application/json',
'-d@-',
'https://api.github.com/repos/jwodder/test/labels',
input='{"name": "Test Label", "color": "FF0000"}',
)
assert r.exit_code == 0
assert r.output == '''\
POST https://api.github.com/repos/jwodder/test/labels
{"name": "Test Label", "color": "FF0000"}
{
"color": "FF0000",
"default": false,
"id": 671710206,
"name": "Test Label",
"url": "https://api.github.com/repos/jwodder/test/labels/Test%20Label"
}
'''
| StarcoderdataPython |
1749065 | import datetime as dt
import blpapi
import logging
from .BbgRefDataService import BbgRefDataService
import pandas as pd
import numpy as np
from . import BbgLogger
import pytz
from tzlocal import get_localzone
logger = BbgLogger.logger
SECURITY_DATA = blpapi.Name("securityData")
SECURITY = blpapi.Name("security")
FIELD_DATA = blpapi.Name("fieldData")
FIELD_EXCEPTIONS = blpapi.Name("fieldExceptions")
FIELD_ID = blpapi.Name("fieldId")
ERROR_INFO = blpapi.Name("errorInfo")
BAR_DATA = blpapi.Name("barData")
BAR_TICK_DATA = blpapi.Name("barTickData")
OPEN = blpapi.Name("open")
HIGH = blpapi.Name("high")
LOW = blpapi.Name("low")
CLOSE = blpapi.Name("close")
VOLUME = blpapi.Name("volume")
NUM_EVENTS = blpapi.Name("numEvents")
TIME = blpapi.Name("time")
class BbgIntradayBar(BbgRefDataService):
def __init__(self, securities, startTime, endTime, event = "TRADE", barInterval = 60, timeZone = str(get_localzone()), gapFillInitialBar = False, adjustmentSplit = True, adjustmentAbnormal = False, adjustmentNormal = False, adjustmentFollowDPDF = True):
'''
Bloomberg Intraday Bar query object. Allows user to input a list of securities retrieval over a specified time period subject to the usual constraints that apply to Bloomberg Intraday Bar data retrieval.
Parameters
----------
fields : tuple, list, or ndarray
The list of fields to be retrieved, field names and data types can be determined by typing FLDS <GO> and using the search box.
securities : tuple, list, or ndarray
List of Bloomberg tickers to retrieve data for. If one item is passed this can be input as a string, otherwise inputs must be passed as a list or array-like.
startTime : datetime.datetime
The start date and time at which to retrieving data from. Must be passed as a datetime.
endTime : datetime.datetime
The end date and time at which to retrieving data from. Must be passed as a datetime.
event : string
Defines the market event supplied for an intraday request. Could be TRADE, BID or ASK. If no event is passed, will default to TRADE.
barInterval : integer
Sets the length of each time-bar in the response. Entered as a whole number (between 1 and 1,440 minutes). If omitted, the request will default to 60 minutes. One minute is the lowest possible granularity.
timeZone : string
Timezone for the request based on the pytz package timezone names. If no timezone is passed, will default to current system timezone.
gapFillInitialBar : bool
Adjust historical pricing to reflect: Special Cash, Liquidation, Capital Gains, Long-Term Capital Gains, Short-Term Capital Gains, Memorial, Return of Capital, Rights Redemption, Miscellaneous, Return Premium, Preferred Rights Redemption, Proceeds/Rights, Proceeds/Shares, Proceeds/Warrants
adjustmentSplit : bool
Adjust historical pricing and/or volume to reflect: Spin-Offs, Stock Splits/Consolidations, Stock Dividend/Bonus, Rights Offerings/Entitlement. If not set, will be set to True.
adjustmentAbnormal : bool
Adjust historical pricing to reflect: Special Cash, Liquidation, Capital Gains, Long-Term Capital Gains, Short-Term Capital Gains, Memorial, Return of Capital, Rights Redemption, Miscellaneous, Return Premium, Preferred Rights Redemption, Proceeds/Rights, Proceeds/Shares, Proceeds/Warrants. If not set, will be set to False.
adjustmentNormal : bool
Adjust historical pricing to reflect: Regular Cash, Interim, 1st Interim, 2nd Interim, 3rd Interim, 4th Interim, 5th Interim, Income, Estimated, Partnership Distribution, Final, Interest on Capital, Distribution, Prorated. If not set, will be set to False.
adjustmentFollowDPDF : bool
Setting to True will follow the DPDF <GO> Terminal function. True is the default setting for this option. If not set, will be set to True.
See Also
--------
BbgIntradayBar.constructDf : Constructor method, retrieves data associated with a BbgDataPoint query object and generates a dataframe from it.
BbgDataPoint : Retrieve single point static, calculated or other reference data.
BbgIntradayTick : Retrieve historic tick-level data for a given security.
BbgIntradayBar : Retrieve historic bar level data for a given security (open, high, low and close) for a specified time interval given in minutes.
Examples
--------
Retrieve open, high, low, close, volume, number of events and value data for a basket of securities between two datetimes.
>>> import datetime as dt
>>> import pandas as pd
>>> import BloombergDataModule as bbg
>>> futHist = bbg.BbgIntradayBar(securities = ["YMH0 Comdty", "XMH0 Comdty"], startTime = dt.datetime(2020, 1, 31, 9, 0, 0), endTime = dt.datetime(2020, 1, 31, 12, 0, 0), barInterval = 5)
>>> futHist.constructDf().head()
Field open high low close volume numEvents value
Security time
YMH0 Comdty 2020-01-31 09:10:00+11:00 99.37 99.375 99.37 99.375 149 3 14806.3
2020-01-31 09:15:00+11:00 99.375 99.38 99.375 99.38 1749 13 173807
2020-01-31 09:20:00+11:00 99.38 99.38 99.38 99.38 6 6 596.28
2020-01-31 09:25:00+11:00 99.38 99.38 99.375 99.38 2170 35 215655
2020-01-31 09:30:00+11:00 99.38 99.38 99.375 99.38 93 3 9241.89
'''
self.securities = list(securities) if type(securities) is not list else securities
self.startTime = startTime
self.endTime = endTime
self.event = event
self.barInterval = barInterval
self.timeZone = timeZone
self.gapFillInitialBar = gapFillInitialBar
self.adjustmentSplit = adjustmentSplit
self.adjustmentAbnormal = adjustmentAbnormal
self.adjustmentNormal = adjustmentNormal
self.adjustmentFollowDPDF = adjustmentFollowDPDF
def constructDf(self):
'''
The constructDf method retrieves data associated with a BbgIntradayBar query object and generates a dataframe from it.
Parameters
----------
None
Returns
-------
table : DataFrame
Raises
------
ValueError:
Blah blah blah
See Also
--------
BbgDataHistory.constructDf : retrieves static history data and constructs a DataFrame from it. It has more customisability with respect to overrides
BbgIntradayTick.constructDf: retrieves intraday (or multi-day) tick level data and constructs a dataframe from it. It has applications in more data intensive and granular analysis
BbgDataPoint.constructDf: retrieves intraday (or multi-day) bar level (open-high-low-close) data and constructs a dataframe from it. It is for use in more data intensive and granular analysis.constructDf. The bar interval frequency can be specified in minutes to optimise for efficiency and speed.
Notes
-----
Blah blah blah
Examples
--------
Retrieve open, high, low, close, volume, number of events and value data for a basket of securities between two datetimes.
>>> import datetime as dt
>>> import pandas as pd
>>> import BloombergDataModule as bbg
>>> futHist = bbg.BbgIntradayBar(securities = ["YMH0 Comdty", "XMH0 Comdty"], startTime = dt.datetime(2020, 1, 31, 9, 0, 0), endTime = dt.datetime(2020, 1, 31, 12, 0, 0), barInterval = 5)
>>> futHist.constructDf().head()
Field open high low close volume numEvents value
Security time
YMH0 Comdty 2020-01-31 09:10:00+11:00 99.37 99.375 99.37 99.375 149 3 14806.3
2020-01-31 09:15:00+11:00 99.375 99.38 99.375 99.38 1749 13 173807
2020-01-31 09:20:00+11:00 99.38 99.38 99.38 99.38 6 6 596.28
2020-01-31 09:25:00+11:00 99.38 99.38 99.375 99.38 2170 35 215655
2020-01-31 09:30:00+11:00 99.38 99.38 99.375 99.38 93 3 9241.89
'''
BbgRefDataService.__init__(self)
self.bbgRefData = pd.DataFrame()
UTCStartTime = self.__convertFromTimezoneToUTC(self.startTime, self.timeZone)
UTCEndTime = self.__convertFromTimezoneToUTC(self.endTime, self.timeZone)
for sec in self.securities:
self.request = self.createIntradayBarRequest(security = sec, requestType = "IntradayBarRequest", startTime = UTCStartTime, endTime = UTCEndTime, event = self.event, barInterval = self.barInterval, gapFillInitialBar = self.gapFillInitialBar, adjustmentSplit = self.adjustmentSplit, adjustmentAbnormal = self.adjustmentAbnormal, adjustmentNormal = self.adjustmentNormal, adjustmentFollowDPDF = self.adjustmentFollowDPDF)
self.cid = self.session.sendRequest(self.request)
for response in self.parseResponse(self.cid, False):
self.bbgRefData = self.bbgRefData.append(self.refDataContentToDf(response, sec))
BbgRefDataService.__del__(self)
self.bbgRefData['time'] = self.bbgRefData['time'].apply(lambda x: self.__convertFromUTCToTimezone(x, self.timeZone))
return self.bbgRefData.set_index(['Security', 'time'])
def refDataContentToDf(self, response, security):
securityData = response['content']['IntradayBarResponse']['barData']
barData = securityData['barTickData']
returnDf = pd.DataFrame()
for snapShot in barData:
fieldData = snapShot['barTickData']
returnDf = returnDf.append(pd.DataFrame(fieldData.items(), columns=['Field', 'Values']).set_index('Field').transpose().reset_index(drop=True))#, index = [fieldData['time'] for i in range(0, len(fieldData))])[1:])
returnDf.index.names = ['time']
returnDf['Security'] = security
return returnDf
def __convertFromUTCToTimezone(self, fromDt, toTimeZone):
return pytz.utc.localize(fromDt).astimezone(pytz.timezone(toTimeZone))
def __convertFromTimezoneToUTC(self, fromDt, fromTimeZone):
return pytz.timezone(fromTimeZone).localize(fromDt).astimezone(pytz.utc) | StarcoderdataPython |
1724575 | <reponame>welvin21/pysimt
from .base_sublayer import BaseSublayer
from .self_attention_sublayer import SelfAttentionSublayer
from .cross_attention_sublayer import CrossAttentionSublayer
from .cross_attention_sublayer_mm_flat import FlatMMCrossAttentionSublayer
from .cross_attention_sublayer_mm_hier import HierarchicalMMCrossAttentionSublayer
from .cross_attention_sublayer_mm_serial import SerialMMCrossAttentionSublayer
from .cross_attention_sublayer_mm_parallel import ParallelMMCrossAttentionSublayer
| StarcoderdataPython |
100834 | import FWCore.ParameterSet.Config as cms
from RecoBTag.SecondaryVertex.candidateCombinedSecondaryVertexV2Computer_cfi import *
candidateNegativeCombinedSecondaryVertexV2Computer = candidateCombinedSecondaryVertexV2Computer.clone()
candidateNegativeCombinedSecondaryVertexV2Computer.vertexFlip = True
candidateNegativeCombinedSecondaryVertexV2Computer.trackFlip = True
candidateNegativeCombinedSecondaryVertexV2Computer.trackSelection.sip3dSigMax = 0
candidateNegativeCombinedSecondaryVertexV2Computer.trackPseudoSelection.sip3dSigMax = 0
candidateNegativeCombinedSecondaryVertexV2Computer.trackPseudoSelection.sip2dSigMin = -99999.9
candidateNegativeCombinedSecondaryVertexV2Computer.trackPseudoSelection.sip2dSigMax = -2.0
| StarcoderdataPython |
124563 | <filename>unsec/email_collection.py
# coding: utf8
"""email_collection.py: EmailCollection is a container for Email class """
__author__ = "<NAME> - <NAME>"
__copyright__ = "Copyright 2015, labsquare"
__license__ = "GPL3"
__email__ = "<EMAIL>"
import re
import glob
from unsec import Email, tools
import logging
class EmailCollection(object):
def __init__(self, directory = None, name = None):
self.emails = []
self.log = logging.getLogger(__name__)
self.name = name
if directory is not None:
self.add_from_directory(directory)
def get_emails(self):
"""
emails accessors
@param string lang
@return generator emails emails in the collection
"""
for email in self.emails:
yield email
def add_email(self, email):
self.emails.append(email)
def add_file(self, filename):
"""
add email from filename
@param string filename
"""
email = Email(filename)
self.add_email(email)
self.log.debug("add {}".format(filename))
def add_from_directory(self, directory):
"""
add email from a directory
@param string directory path of directory
"""
for f in glob.glob(directory+"/*"):
self.add_file(f)
def add_from_files(self, directory):
"""
add email from a directory
@param string files
"""
for f in glob.glob(directory):
self.add_file(f)
def get_subjects(self):
"""
return all subjects from collection's emails
@return generator subjects
"""
for email in self.emails:
yield email.get_subject()
def get_bodies(self):
"""
return all bodies from collection's emails
@return generator bodies
"""
for email in self.emails:
yield email.get_body()
def get_senders(self):
"""
return all subjects from collection's emails
@return generator senders
"""
for email in self.emails:
yield email.get_sender()
def select(self, category):
return [email for email in self.get_emails() if email.category == category.lower()]
def count(self):
"""
return size of collection
@return int count
"""
return len(self.emails)
def category_count(self, category):
return len(select(category))
def at(self, index):
"""
return email from index
@return Email email
"""
return self.emails[index]
def keep_lang(self, lang="fr"):
self.log.info("Fitering language : {}".format(lang))
new_list = []
for email in self.get_emails():
if email.get_lang() == lang:
new_list.append(email)
self.log.debug("keep file {}".format(email.filename))
self.emails = new_list
def get_categories(self):
categories = {}
for email in self.get_emails():
if email.category not in categories:
categories[email.category] = 1
else:
categories[email.category]+=1
return categories
def get_vectors(self):
vectors = []
# WORKS ONLY IF CLUSTERIZER HAS BEEN PROCESS
for email in self.get_emails():
vectors.append(email.vector)
return vectors
def get_centroid(self):
return tools.barycenter(self.get_vectors())
def get_similarity(self):
center = self.get_centroid()
return tools.avg_distance(self.get_vectors(), center)
def __getitem__(self, index):
return self.at(index)
def __iter__(self):
return self.get_emails()
def __str__(self):
return "Collection of {} emails".format(self.count())
| StarcoderdataPython |
1763099 | <reponame>lizhaoliu-Lec/Revisiting_Deep_Metric_Learning_PyTorch
"""
The network architectures and weights are adapted and used
from the great https://github.com/Cadene/pretrained-models.pytorch.
"""
import pretrainedmodels as ptm
import torch.nn as nn
import torch.nn.functional as F
class Network(nn.Module):
def __init__(self, opt, return_embed_dict=False):
super(Network, self).__init__()
self.pars = opt
self.model = ptm.__dict__['bninception'](num_classes=1000, pretrained='imagenet')
self.model.last_linear = nn.Linear(self.model.last_linear.in_features, opt.embed_dim)
if '_he' in opt.arch:
nn.init.kaiming_normal_(self.model.last_linear.weight, mode='fan_out')
nn.init.constant_(self.model.last_linear.bias, 0)
if 'frozen' in opt.arch:
for module in filter(lambda m: type(m) == nn.BatchNorm2d, self.model.modules()):
module.eval()
module.train = lambda _: None
self.return_embed_dict = return_embed_dict
self.pool_base = nn.AdaptiveAvgPool2d(1)
self.pool_aux = nn.AdaptiveMaxPool2d(1) if 'double' in opt.arch else None
self.name = opt.arch
self.out_adjust = None
def forward(self, x, warmup=False, **kwargs):
x_before_pooled = self.model.features(x)
x_pooled = self.pool_base(x_before_pooled)
if self.pool_aux is not None:
x_pooled += self.pool_aux(x)
if warmup:
x_pooled, x = x_pooled.detach(), x.detach()
x = self.model.last_linear(x_pooled.view(x.size(0), -1))
if 'normalize' in self.name:
x = F.normalize(x, dim=-1)
if self.out_adjust and not self.training:
x = self.out_adjust(x)
return x, (x_pooled, x_before_pooled)
def functional_forward(self, x):
pass
| StarcoderdataPython |
4836049 | <gh_stars>1-10
from setuptools import setup, find_packages
import os
package_name = "pyromocc"
package_data = {}
if os.name == 'posix':
package_data[package_name] = ['*.so']
else:
package_data[package_name] = ['*.pyd', '*.dll']
setup(name=package_name,
version='0.0.4',
author="<NAME>",
packages=find_packages(exclude=['third_party', 'examples']),
install_requires=['numpy'],
setup_requires=['wheel'],
extras_require={'examples': ["matplotlib"]},
include_package_data=True,
classifiers=[
'Operating System :: POSIX :: Linux',
'Programming Language :: C++',
'Programming Language :: Python :: 3 :: 3.6',
'Programming Language :: Python :: 3 :: 3.7',
'Programming Language :: Python :: 3 :: 3.8',
],
python_requires='>=3.6',
package_data=package_data)
| StarcoderdataPython |
1725710 | # Copyright (c) 2020 the Eclipse BaSyx Authors
#
# This program and the accompanying materials are made available under the terms of the MIT License, available in
# the LICENSE file of this project.
#
# SPDX-License-Identifier: MIT
import io
import unittest
from basyx.aas import model
from basyx.aas.adapter.xml import write_aas_xml_file, read_aas_xml_file
from basyx.aas.examples.data import example_concept_description, example_aas_missing_attributes, example_aas, \
example_aas_mandatory_attributes, example_submodel_template, create_example
from basyx.aas.examples.data._helper import AASDataChecker
def _serialize_and_deserialize(data: model.DictObjectStore) -> model.DictObjectStore:
file = io.BytesIO()
write_aas_xml_file(file=file, data=data)
# try deserializing the xml document into a DictObjectStore of AAS objects with help of the xml module
file.seek(0)
return read_aas_xml_file(file, failsafe=False)
class XMLSerializationDeserializationTest(unittest.TestCase):
def test_example_serialization_deserialization(self) -> None:
object_store = _serialize_and_deserialize(example_aas.create_full_example())
checker = AASDataChecker(raise_immediately=True)
example_aas.check_full_example(checker, object_store)
def test_example_mandatory_attributes_serialization_deserialization(self) -> None:
object_store = _serialize_and_deserialize(example_aas_mandatory_attributes.create_full_example())
checker = AASDataChecker(raise_immediately=True)
example_aas_mandatory_attributes.check_full_example(checker, object_store)
def test_example_missing_attributes_serialization_deserialization(self) -> None:
object_store = _serialize_and_deserialize(example_aas_missing_attributes.create_full_example())
checker = AASDataChecker(raise_immediately=True)
example_aas_missing_attributes.check_full_example(checker, object_store)
def test_example_submodel_template_serialization_deserialization(self) -> None:
data: model.DictObjectStore[model.Identifiable] = model.DictObjectStore()
data.add(example_submodel_template.create_example_submodel_template())
object_store = _serialize_and_deserialize(data)
checker = AASDataChecker(raise_immediately=True)
example_submodel_template.check_full_example(checker, object_store)
def test_example_iec61360_concept_description_serialization_deserialization(self) -> None:
data: model.DictObjectStore[model.Identifiable] = model.DictObjectStore()
data.add(example_concept_description.create_iec61360_concept_description())
object_store = _serialize_and_deserialize(data)
checker = AASDataChecker(raise_immediately=True)
example_concept_description.check_full_example(checker, object_store)
def test_example_all_examples_serialization_deserialization(self) -> None:
data: model.DictObjectStore[model.Identifiable] = create_example()
object_store = _serialize_and_deserialize(data)
checker = AASDataChecker(raise_immediately=True)
checker.check_object_store(object_store, data)
| StarcoderdataPython |
3350646 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import jsonpickle
from rqalpha.environment import Environment
from rqalpha.const import DAYS_CNT, DEFAULT_ACCOUNT_TYPE
from rqalpha.utils import get_account_type, merge_dicts
from rqalpha.utils.repr import property_repr
from rqalpha.events import EVENT
class Portfolio(object):
__repr__ = property_repr
def __init__(self, start_date, static_unit_net_value, units, accounts, register_event=True):
self._start_date = start_date
self._static_unit_net_value = static_unit_net_value
self._units = units
self._accounts = accounts
self._mixed_positions = None
if register_event:
self.register_event()
def register_event(self):
"""
注册事件
"""
event_bus = Environment.get_instance().event_bus
event_bus.prepend_listener(EVENT.PRE_BEFORE_TRADING, self._pre_before_trading)
def order(self, order_book_id, quantity, style, target=False):
account_type = get_account_type(order_book_id)
return self.accounts[account_type].order(order_book_id, quantity, style, target)
def get_state(self):
return jsonpickle.encode({
'start_date': self._start_date,
'static_unit_net_value': self._static_unit_net_value,
'units': self._units,
'accounts': {
name: account.get_state() for name, account in six.iteritems(self._accounts)
}
}).encode('utf-8')
def set_state(self, state):
state = state.decode('utf-8')
value = jsonpickle.decode(state)
self._start_date = value['start_date']
self._static_unit_net_value = value['static_unit_net_value']
self._units = value['units']
for k, v in six.iteritems(value['accounts']):
self._accounts[k].set_state(v)
def _pre_before_trading(self, event):
self._static_unit_net_value = self.unit_net_value
@property
def accounts(self):
"""
[dict] 账户字典
"""
return self._accounts
@property
def stock_account(self):
"""
[StockAccount] 股票账户
"""
return self._accounts.get(DEFAULT_ACCOUNT_TYPE.STOCK.name, None)
@property
def future_account(self):
"""
[FutureAccount] 期货账户
"""
return self._accounts.get(DEFAULT_ACCOUNT_TYPE.FUTURE.name, None)
@property
def start_date(self):
"""
[datetime.datetime] 策略投资组合的开始日期
"""
return self._start_date
@property
def units(self):
"""
[float] 份额
"""
return self._units
@property
def unit_net_value(self):
"""
[float] 实时净值
"""
return self.total_value / self._units
@property
def static_unit_net_value(self):
return self._static_unit_net_value
@property
def daily_pnl(self):
"""
[float] 当日盈亏
"""
return self.total_value - self._static_unit_net_value * self.units
@property
def daily_returns(self):
"""
[float] 当前最新一天的日收益
"""
return 0 if self._static_unit_net_value == 0 else self.unit_net_value / self._static_unit_net_value - 1
@property
def total_returns(self):
"""
[float] 累计收益率
"""
return self.unit_net_value - 1
@property
def annualized_returns(self):
"""
[float] 累计年化收益率
"""
current_date = Environment.get_instance().trading_dt.date()
return self.unit_net_value ** (DAYS_CNT.DAYS_A_YEAR / float((current_date - self.start_date).days + 1)) - 1
@property
def total_value(self):
"""
[float]总权益
"""
return sum(account.total_value for account in six.itervalues(self._accounts))
@property
def portfolio_value(self):
"""
[Deprecated] 总权益
"""
return self.total_value
@property
def positions(self):
"""
[dict] 持仓
"""
if self._mixed_positions is None:
self._mixed_positions = MixedPositions(self._accounts)
return self._mixed_positions
@property
def cash(self):
"""
[float] 可用资金
"""
return sum(account.cash for account in six.itervalues(self._accounts))
@property
def dividend_receivable(self):
return sum(getattr(account, 'dividend_receivable', 0) for account in six.itervalues(self._accounts))
@property
def transaction_cost(self):
return sum(account.transaction_cost for account in six.itervalues(self._accounts))
@property
def market_value(self):
"""
[float] 市值
"""
return sum(account.market_value for account in six.itervalues(self._accounts))
@property
def pnl(self):
return (self.unit_net_value - 1) * self.units
@property
def starting_cash(self):
return self.units
@property
def frozen_cash(self):
return sum(account.frozen_cash for account in six.itervalues(self._accounts))
class MixedPositions(dict):
def __init__(self, accounts):
super(MixedPositions, self).__init__()
self._accounts = accounts
def __missing__(self, key):
account_type = get_account_type(key)
for a_type in self._accounts:
if a_type == account_type:
return self._accounts[a_type].positions[key]
return None
def __contains__(self, item):
return item in self.keys()
def __repr__(self):
keys = []
for account in six.itervalues(self._accounts):
keys += account.positions.keys()
return str(sorted(keys))
def __len__(self):
return sum(len(account.positions) for account in six.itervalues(self._accounts))
def __iter__(self):
keys = []
for account in six.itervalues(self._accounts):
keys += account.positions.keys()
for key in sorted(keys):
yield key
def items(self):
items = merge_dicts(*[account.positions.items() for account in six.itervalues(self._accounts)])
for k in sorted(items.keys()):
yield k, items[k]
def keys(self):
keys = []
for account in six.itervalues(self._accounts):
keys += list(account.positions.keys())
return sorted(keys)
| StarcoderdataPython |
191277 | <filename>flaskdeploy/test/prototype.py
from config import *
from validation import *
from utils import *
import subprocess
import getpass
import os
import click
domain = "xx.com"
usr = getpass.getuser()
loc = os.path.join(os.getcwd(), domain)
DOMAIN, USR, CUR_LOC = domain, usr, loc
cli = click.Group()
@click.command(short_help='AliYun Option',context_settings=dict(
allow_extra_args=True
))
@click.option('--ali_key', prompt='Ali_Key')
@click.option('--ali_secret', prompt='Ali_Secret')
@click.pass_context
def op_ali(ctx,ali_key,ali_secret):
dns_op = "dns_ali"
op_1 = "Ali_Key={}".format(ali_key)
op_2 = "Ali_Secret={}".format(ali_secret)
ssl_multi_gen(DOMAIN, USR, CUR_LOC, op_1,op_2,dns_op)
raise JumpOutFuckingClick2
@click.command(short_help='CloudFlare Option',context_settings=dict(
allow_extra_args=True
))
@click.option('--cf_email', prompt='CF_Email')
@click.option('--cf_key', prompt='CF_Key')
@click.pass_context
def op_cf(ctx,cf_email,cf_key):
dns_op = "dns_cf"
op_1 = "CF_Email={}".format(cf_email)
op_2 = "CF_Key={}".format(cf_key)
ssl_multi_gen(DOMAIN, USR, CUR_LOC, op_1,op_2,dns_op)
raise JumpOutFuckingClick2
@click.command(context_settings=dict(
allow_extra_args=True
))
@click.option('--dns_type', prompt='Service options. \n [1] CloudFlare \n [2] AliYun \n\n\nYour Choice')
@click.pass_context
def miss_ssl(ctx,dns_type):
"""
These are available DNS provider servie options. \n
[1] CloudFlare <CF_Email,CF_Key> --dns dns_cf \n
[2] AliYun <Ali_Key,Ali_Secret> --dns dns_ali \n
"""
# if not dns_type:
if(str(dns_type)=="1"):
try:
op_cf()
except JumpOutFuckingClick2:
click.echo("<_@,@_<2")
if(str(dns_type)=="2"):
try:
op_ali()
except JumpOutFuckingClick2:
click.echo("<_@,@_<2")
raise JumpOutFuckingClick
@click.command(short_help='test',context_settings=dict(
allow_extra_args=True
))
@click.option('--count', prompt='count')
def test(count):
click.echo("2333")
raise JumpOutFuckingClick
@click.command(short_help='dist')
@click.option('--count')
@click.pass_context
def dist(ctx, count):
if not count:
try:
# test()
miss_ssl()
except JumpOutFuckingClick:
click.echo("<_@,@_<")
cli.add_command(dist, 'dist')
cli.add_command(test, 'test')
cli.add_command(op_ali, 'op_ali')
cli.add_command(miss_ssl, 'miss_ssl')
if __name__ == '__main__':
cli() | StarcoderdataPython |
3290047 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from tvm import te
import numpy as np
import psutil
def schedule(attrs):
cfg, s = attrs.auto_config, attrs.scheduler
def mcpu_auto_schedule(s, output, prefix):
hyper_params = [[-1, 2, 8, 4], [-1, 1, 512, 1]]
slice_data, slice_reduce = [], []
for i in range(len(output.op.axis)):
slice_data.append(cfg.define_split(f"{prefix}:D{i}", attrs.get_extent(output.op.axis[i]), num_outputs=4, init_vals=[hyper_params[i % len(hyper_params)],]))
for i in range(len(output.op.reduce_axis)):
slice_reduce.append(cfg.define_split(f"{prefix}:R{i}", attrs.get_extent(output.op.reduce_axis[i]), num_outputs=2, init_vals=[[-1, 4],]))
unroll = cfg.define_knob(f"{prefix}:UN", [1, 4, 8, 16, 32, 64], init_vals=[1,] if attrs.backend == 'c-mcpu_avx512' else [0,])
output_local, = s.cache_write([output], "local")
slice_axes = []
for i in range(len(output.op.axis)):
slice_axes.append(cfg.apply_split(s, output_local, output_local.op.axis[i], slice_data[i]))
if output.op.reduce_axis:
reduce_at = cfg.define_knob(f"{prefix}:RA", [x for x in range(len(output.op.reduce_axis))], init_vals=[0,])
output_local_K_o, output_local_K_i = cfg.apply_split(s, output_local, output_local.op.reduce_axis[reduce_at], slice_reduce[reduce_at])
output_local_K_o, output_local_K_i = [output_local_K_o], [output_local_K_i]
else:
output_local_K_o, output_local_K_i = [], []
first, second, third, fourth = [x[0] for x in slice_axes], [x[1] for x in slice_axes], [x[2] for x in slice_axes], [x[3] for x in slice_axes]
s[output_local].reorder(*(first + second + output_local_K_o + third + output_local_K_i + fourth))
slice_global_axes = []
for i in range(len(output.op.axis)):
if cfg.define_knob(f"{prefix}:_{i}", [False, True], init_vals=[0,]):
slice_global_axes.append(cfg.apply_split(s, output, output.op.axis[i], [-1, slice_data[i][1], int(np.product(slice_data[i][2:]))]))
else:
slice_global_axes.append(cfg.apply_split(s, output, output.op.axis[i], [-1, 1, int(np.product(slice_data[i][1:]))]))
s[output].reorder(*([x[0] for x in slice_global_axes] + [x[1] for x in slice_global_axes] + [x[2] for x in slice_global_axes]))
s[output_local].compute_at(s[output], slice_global_axes[-1][1])
s[output].bind(s[output].fuse(*[x[0] for x in slice_global_axes]), te.thread_axis('threadIdx.x'))
s[output_local].pragma(first[0], "auto_unroll_max_step", unroll)
s[output_local].pragma(first[0], "unroll_explicit", True)
# s[output_local].vectorize(fourth[-1])
s[output_local].unroll(fourth[-1])
def mcpu_simple_schedule(s, output, prefix):
slice_data = [cfg.define_split(f"{prefix}:D{i}", attrs.get_extent(output.op.axis[i]), num_outputs=3, init_vals=[[-1, 1, 1],]) for i in range(len(output.op.axis))]
slice_axes = [cfg.apply_split(s, output, output.op.axis[i], [-1, 1] + slice_data[i][1:]) for i in range(len(output.op.axis))]
first, second, third, fourth = [x[0] for x in slice_axes], [x[1] for x in slice_axes], [x[2] for x in slice_axes], [x[3] for x in slice_axes]
s[output].reorder(*(first + second + third + fourth))
s[output].bind(s[output].fuse(*first), te.thread_axis('threadIdx.x'))
s[output].bind(s[output].fuse(*second), te.thread_axis('vthread'))
for i, m in enumerate(attrs.explicit_ops):
if len(m.output(0).op.reduce_axis) == 0:
return mcpu_simple_schedule(s, m.output(0), f'T{m.output(0).name}')
mcpu_auto_schedule(s, m.output(0), f'T{m.output(0).name}')
| StarcoderdataPython |
1799457 | import os
from game.prepare import stat_library
from models import FilesConfig, LabelsConfig, Game
def main():
stat_library('/opt/game/game')
input_folder = '/opt/game/games/slack_22_02_2019/'
output_folder = '/opt/game/game/output/out-test-10/'
files = FilesConfig(
os.path.join(input_folder, 'lines.dat'), # todo was 'inputs'
os.path.join(input_folder, 'errors.dat'),
os.path.join(input_folder, 'labels.dat'),
output_folder,
additional_files=True
)
labels = LabelsConfig(
["g0", "n", "NH", "U", "Z", "Av", "fesc"],
)
driver = Game(files, 4, 5, labels)
driver.run()
if __name__ == "__main__":
main()
| StarcoderdataPython |
185007 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: main.py
Author: huxuan
Email: i(at)huxuan.org
Description: Filter IPTV m3u playlists according to customized criteria.
"""
import argparse
from . import __version__
from .config import Config
from .constants import defaults
from .constants import helps
from .models import Playlist
def parse_args():
"""Arguments Parsers."""
parser = argparse.ArgumentParser()
parser.add_argument('--min-height', default=defaults.MIN_HEIGHT, type=int,
help=helps.MIN_HEIGHT)
parser.add_argument('-c', '--config', default=defaults.CONFIG,
help=helps.CONFIG)
parser.add_argument('-i', '--input', action='append', default=[],
help=helps.INPUT)
parser.add_argument('-I', '--interval', default=defaults.INTERVAL,
type=int, help=helps.INTERVAL)
parser.add_argument('-o', '--output', default=defaults.OUTPUT,
help=helps.OUTPUT)
parser.add_argument('-r', '--replace-group-by-source', action='store_true',
help=helps.REPLACE_GROUP_BY_SOURCE)
parser.add_argument('-t', '--template', action='append', default=[],
help=helps.TEMPLATE)
parser.add_argument('-T', '--timeout', default=defaults.TIMEOUT, type=int,
help=helps.TIMEOUT)
parser.add_argument('-u', '--udpxy', default=defaults.UDPXY,
help=helps.UDPXY)
parser.add_argument('-v', '--version', action='version',
version=__version__)
return parser.parse_args()
def main():
"""Main process."""
args = parse_args()
if not args.input:
args.input = [defaults.INPUT]
Config.init(args.config)
playlist = Playlist()
playlist.parse(args)
playlist.filter(args)
open(args.output, 'w', encoding='utf-8').write(playlist.export(args))
print('Invalid Urls:')
print('\n'.join(sorted(playlist.invalid_urls)))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1614415 | <reponame>alessandrostockman/dl-flatlands<gh_stars>1-10
from abc import abstractclassmethod, abstractmethod
import random
from collections import deque
import numpy as np
from fltlnd.utils import SumTree
class Buffer:
def __init__(self, buffer_size, batch_size):
self.buffer_size = buffer_size
self.batch_size = batch_size
@abstractmethod
def add(self, state, action, reward, next_state, done):
pass
@abstractmethod
def sample(self):
pass
@abstractmethod
def update(self, idx, error):
pass
@abstractmethod
def add_agent_episode(self, agent, action, value, obs, reward, done, policy_logits):
pass
@abstractmethod
def retrieve_agent_episodes(self, agent):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def __len__(self):
pass
class ReplayBuffer(Buffer):
"""Fixed-size buffer to store experience tuples."""
def __init__(self, buffer_size, batch_size):
super().__init__(buffer_size, batch_size)
self._batch_size = buffer_size
self.memory = deque(maxlen=self._batch_size)
self.has_probability = False
def add(self, state, action, reward, next_state, done, probability=None):
"""Add a new experience to memory."""
self.memory.append([state, action, reward, next_state, done, probability])
if probability is not None:
self.has_probability = True
def get_last(self):
return self.memory.__getitem__(self.memory.__len__()-1)
def sample(self):
"""Randomly sample a batch of experiences from memory."""
# sample memory for a minibatch
batch = random.sample(self.memory, self.batch_size)
# separate minibatch into elements
state, action, reward, next_state, done, probability = [np.squeeze(i) for i in zip(*batch)]
if self.has_probability:
return state, action, reward, next_state, done, probability
else:
return state, action, reward, next_state, done
def update(self, error):
pass
def add_agent_episode(self, agent, action, value, obs, reward, done, policy_logits):
raise NotImplementedError()
def retrieve_agent_episodes(self, agent):
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
def __len__(self):
"""Return the current size of internal memory."""
return len(self.memory)
class PrioritizedBuffer(Buffer):
def __init__(self, buffer_size, batch_size):
super().__init__(buffer_size, batch_size)
self._internal_len = 0
self.eta = 0.01
self.alpha = 0.6
self.beta = 0.4
self.beta_growth = 0.001
self._batch_size = batch_size
self.tree = SumTree(batch_size)
def _get_priority(self, error):
return (error + self.eta) ** self.alpha
def add(self, state, action, reward, next_state, done):
"""Add a new experience to memory."""
sample = [state, action, reward, next_state, done]
# Find the max priority
max_priority = np.max(self.tree.tree[-self.tree.capacity:])
# If the max priority = 0 we can't put priority = 0 since this experience will never have a chance to be selected
# So we use a minimum priority
if max_priority == 0:
max_priority = 1
self._internal_len += 1
self.tree.add(max_priority, sample)
def sample(self):
batch = []
idxs = []
segment = self.tree.total() / self.batch_size
priorities = []
self.beta = np.min([1., self.beta + self.beta_growth])
for i in range(self.batch_size):
a = segment * i
b = segment * (i + 1)
s = random.uniform(a, b)
(idx, p, data) = self.tree.get(s)
priorities.append(p)
batch.append(data)
idxs.append(idx)
sampling_probabilities = priorities / self.tree.total()
is_weight = np.power(self.tree.n_entries * sampling_probabilities, -self.beta)
is_weight /= is_weight.max()
self.sample_ids = idxs
state, action, reward, next_state, done = [np.squeeze(i) for i in zip(*batch)]
return state, action, reward, next_state, done
def update(self, error):
p = self._get_priority(error)
for idx in self.sample_ids:
self.tree.update(idx, p)
def add_agent_episode(self, agent, action, value, obs, reward, done, policy_logits):
raise NotImplementedError()
def retrieve_agent_episodes(self, agent):
raise NotImplementedError()
def reset(self):
raise NotImplementedError()
def __len__(self):
return self._internal_len
class AgentEpisodeBuffer(Buffer):
def __init__(self, buffer_size, batch_size):
self._memory = {}
def add(self, state, action, reward, next_state, done):
raise NotImplementedError()
def sample(self):
raise NotImplementedError()
def update(self, idx, error):
raise NotImplementedError()
def add_agent_episode(self, agent, action, value, obs, reward, done, policy_logits):
agent_mem = self._memory.get(agent, [])
agent_mem.append([action, value, obs, reward, done, policy_logits])
self._memory[agent] = agent_mem
def retrieve_agent_episodes(self, agent):
action, value, obs, reward, done, policy_logits = [np.squeeze(i) for i in zip(*self._memory[agent])]
return action, value, obs, reward, done, policy_logits
def reset(self):
self._memory = {}
def __len__(self):
pass | StarcoderdataPython |
3388550 | <gh_stars>0
"""
.. module: horseradish.models
:platform: Unix
:synopsis: This module contains all of the associative tables
that help define the many to many relationships established in Horseradish
:copyright: (c) 2020 by <NAME>, see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from sqlalchemy import Column, Integer, ForeignKey, Index, UniqueConstraint
from horseradish.database import db
roles_users = db.Table(
"roles_users",
Column("user_id", Integer, ForeignKey("users.id")),
Column("role_id", Integer, ForeignKey("roles.id")),
)
Index("roles_users_ix", roles_users.c.user_id, roles_users.c.role_id)
| StarcoderdataPython |
3233661 | <filename>modules/couchpotato.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import htpc
import requests
from htpc.auth2 import require, member_of
import logging
import hashlib
from htpc.helpers import fix_basepath, get_image, striphttp, comp_table
import json
import os
import re
class Couchpotato(object):
def __init__(self):
self.logger = logging.getLogger('modules.couchpotato')
htpc.MODULES.append({
'name': 'CouchPotato',
'id': 'couchpotato',
'test': htpc.WEBDIR + 'couchpotato/getapikey',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'couchpotato_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'couchpotato_name'},
{'type': 'text', 'label': 'Username', 'name': 'couchpotato_username'},
{'type': 'password', 'label': 'Password', 'name': 'couchpotato_password'},
{'type': 'text', 'label': 'IP / Host *', 'name': 'couchpotato_host'},
{'type': 'text', 'label': 'Port', 'placeholder': '5050', 'name': 'couchpotato_port'},
{'type': 'text', 'label': 'Basepath', 'placeholder': '/couchpotato', 'name': 'couchpotato_basepath'},
{'type': 'text', 'label': 'API key', 'desc': 'Press test get apikey', 'name': 'couchpotato_apikey'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'couchpotato_ssl'},
{'type': 'text', 'label': 'Reverse proxy link', 'placeholder': '', 'desc': 'Reverse proxy link ex: https://couchpotato.domain.com', 'name': 'couchpotato_reverse_proxy_link'},
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('couchpotato.html').render(scriptname='couchpotato', webinterface=self.webinterface())
def webinterface(self):
''' Generate page from template '''
ssl = 's' if htpc.settings.get('couchpotato_ssl', 0) else ''
host = striphttp(htpc.settings.get('couchpotato_host', ''))
port = str(htpc.settings.get('couchpotato_port', ''))
basepath = fix_basepath(htpc.settings.get('couchpotato_basepath', '/'))
url = 'http%s://%s:%s%s' % (ssl, host, port, basepath)
if htpc.settings.get('couchpotato_reverse_proxy_link'):
url = htpc.settings.get('couchpotato_reverse_proxy_link')
return url
def ctrl_c(self, filt):
ctrl_char = ''
if '!=' in filt:
ctrl_char = '!='
elif '==' in filt:
ctrl_char = '=='
elif '<=' in filt:
ctrl_char = '<='
elif '>=' in filt:
ctrl_char = '>='
elif '<=' in filt:
ctrl_char = '=='
elif '!' in filt:
ctrl_char = '!'
elif '<' in filt:
ctrl_char = '<'
elif '>' in filt:
ctrl_char = '>'
elif '=' in filt:
ctrl_char = '='
return ctrl_char
def cp_filter(self, filt, collection):
self.logger.debug('Called cp_filter %s' % filt)
before = len(collection.get('movies', 0))
results = []
if collection.get('movies', ''):
check = self.ctrl_c(filt)
if filt:
# default to fuzzy title search "16 blocks"
if check == '':
pat = '.*?'.join(map(re.escape, filt))
regex = re.compile(pat, flags=re.I)
for m in collection['movies']:
f = regex.search(m['title'])
if f:
results.append(m)
else:
# default to normal search
if check:
filt = filt.split(check)
for m in collection['movies']:
# flatten the info since we would normally be interessed in that
if 'info' in m:
for k, v in m['info'].items():
m[k] = v
try:
imdb = m['info']['rating']['imdb']
m['rating'] = imdb[0]
except:
pass
for k, v in m.items():
if k.lower() == filt[0].lower():
if isinstance(v, dict):
# actor roles='<NAME>'
for kk, vv in v.items():
if v == kk:
results.append(m)
elif isinstance(v, list):
# genres=action
if filt[1].lower() in [z.lower() for z in v]:
results.append(m)
elif isinstance(v, (int, float)):
# for year!=1337 rating<=5.0
if check and check != '=':
if comp_table[check](float(v), float(filt[1])):
results.append(m)
elif isinstance(v, str):
# plot='some string'
if filt[1].lower() in v.lower():
results.append(m)
self.logger.debug('Filter out %s' % (before - len(results)))
return results
@cherrypy.expose()
@require(member_of(htpc.role_admin))
@cherrypy.tools.json_out()
def ping(self, couchpotato_host, couchpotato_port, couchpotato_apikey, couchpotato_basepath, couchpotato_ssl=False, **kwargs):
self.logger.debug('Testing connectivity to couchpotato')
couchpotato_basepath = fix_basepath(couchpotato_basepath)
couchpotato_host = striphttp(couchpotato_host)
ssl = 's' if couchpotato_ssl else ''
url = 'http%s://%s:%s%sapi/%s' % (ssl, couchpotato_host, couchpotato_port, couchpotato_apikey)
try:
f = requests.get(url + '/app.available/', timeout=10)
return f.json()
except:
self.logger.error('Unable to connect to couchpotato')
self.logger.debug('connection-URL: %s' % url)
return
@cherrypy.expose()
@require(member_of(htpc.role_admin))
@cherrypy.tools.json_out()
def getapikey(self, couchpotato_username, couchpotato_password, couchpotato_host, couchpotato_port, couchpotato_apikey, couchpotato_basepath, couchpotato_ssl=False, **kwargs):
self.logger.debug('Testing connectivity to couchpotato')
if couchpotato_password and couchpotato_username != '':
couchpotato_password = hashlib.md5(<PASSWORD>potato_password).hexdigest()
couchpotato_username = hashlib.md5(couchpotato_username).hexdigest()
getkey = 'getkey/?p=%s&u=%s' % (couchpotato_password, couchpotato_username)
couchpotato_basepath = fix_basepath(couchpotato_basepath)
ssl = 's' if couchpotato_ssl else ''
url = 'http%s://%s:%s%s%s' % (ssl, striphttp(couchpotato_host), couchpotato_port, couchpotato_basepath, getkey)
try:
f = requests.get(url, timeout=10, verify=False)
return f.json()
except Exception as e:
self.logger.error('Unable to connect to couchpotato %s' % e)
self.logger.debug('connection-URL: %s' % url)
return
@cherrypy.expose()
@require()
def GetImage(self, url, h=None, w=None, o=100, *args, **kwargs):
# url can be a string or json
working_url = None
imgdir = os.path.join(htpc.DATADIR, 'images/')
try:
x = json.loads(url)
if isinstance(x, list):
tl = [(hashlib.md5(u).hexdigest(), u) for u in x]
checkurl = []
# check any of the images exist in the cache
for i in tl:
if os.path.isfile(os.path.join(imgdir, i[0])):
#self.logger.debug('%s exist in cache, ignore the rest of the hashes %s' % (str(i[1]), str(tl)))
# dont bother checking any else if we have image
checkurl = []
working_url = i[1]
break
else:
checkurl.append(i)
continue
if working_url:
return get_image(working_url, h, w, o)
else:
# None of the imges existed in the cache
if checkurl:
for ii, i in enumerate(checkurl):
# verify that the download is ok before we try to cache it.
try:
r = requests.get(i[1], headers={'Cache-Control': 'private, max-age=0, no-cache, must-revalidate', 'Pragma': 'no-cache'})
if r.content:
working_url = i[1]
break
except Exception as e:
self.logger.error('Error: %s url: %s item: %s loop n : %s tuplelist %s' % (e, i[1], i, ii, str(tl)))
if working_url:
return get_image(working_url, h, w, o)
except ValueError as e:
if isinstance(url, str):
return get_image(url, h, w, o)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetMovieList(self, status='', limit='', f=''):
self.logger.debug('Fetching Movies')
if status == 'done':
status += '&type=movie&release_status=done&status_or=1'
data = self.fetch('media.list/?status=' + status)
if f:
filtered_movies = self.cp_filter(f, data)
data['movies'] = filtered_movies
data['total'] = len(filtered_movies)
return data
else:
return data
data = self.fetch('media.list/?status=' + status + '&limit_offset=' + limit)
if f:
filtered_movies = self.cp_filter(f, data)
data['movies'] = filtered_movies
data['total'] = len(filtered_movies)
return data
else:
return data
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetNotificationList(self, limit='20'):
self.logger.debug('Fetching Notification')
data = self.fetch('notification.list/?limit_offset=' + limit)
self.fetch('notification.markread')
return data
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def SearchMovie(self, q=''):
self.logger.debug('Searching for movie')
return self.fetch('movie.search/?q=' + q)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def AddMovie(self, movieid, profile, title, category_id=''):
self.logger.debug('Adding movie')
if category_id:
return self.fetch('movie.add/?profile_id=' + profile + '&identifier=' + movieid + '&title=' + title + '&category_id=' + category_id)
return self.fetch('movie.add/?profile_id=' + profile + '&identifier=' + movieid + '&title=' + title)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def EditMovie(self, id, profile, title):
self.logger.debug('Editing movie')
return self.fetch('movie.edit/?id=' + id + '&profile_id=' + profile + '&default_title=' + title)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def RefreshMovie(self, id):
self.logger.debug('Refreshing movie')
return self.fetch('movie.refresh/?id=' + id)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def DeleteMovie(self, id=''):
self.logger.debug('Deleting movie')
return self.fetch('movie.delete/?id=' + id)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetReleases(self, id=''):
self.logger.debug('Downloading movie')
return self.fetch('media.get/?id=' + id)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def DownloadRelease(self, id=''):
self.logger.debug('Downloading movie')
return self.fetch('release.manual_download/?id=' + id)
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def IgnoreRelease(self, id=''):
self.logger.debug('Downloading movie')
return self.fetch('release.ignore/?id=' + id)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetProfiles(self):
self.logger.debug('Fetching available profiles')
return self.fetch('profile.list/')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def GetCategories(self):
self.logger.debug('Feching categories')
return self.fetch('category.list')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def Suggestion(self):
self.logger.debug('Fetching suggestion')
return self.fetch('suggestion.view')
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def ChartsView(self):
self.logger.debug('Fetching charts')
return self.fetch('charts.view')
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def SuggestionIgnore(self, imdb=None, seenit=None):
u = 'suggestion.ignore/?imdb=%s' % imdb
if seenit:
u += '&seenit=1'
self.logger.debug('Fetching suggestion')
return self.fetch(u)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def DashboardSoon(self):
return self.fetch('dashboard.soon')
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def Restart(self):
return self.fetch('app.restart')
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def Shutdown(self):
return self.fetch('app.shutdown')
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def Update(self):
return self.fetch('updater.update')
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def SearchAllWanted(self):
return self.fetch('movie.searcher.full_search')
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def Postprocess(self, path=''):
u = 'renamer.scan'
if path:
u += '/?base_folder=%s' % path
return self.fetch(u)
def fetch(self, path):
try:
host = striphttp(htpc.settings.get('couchpotato_host', ''))
port = str(htpc.settings.get('couchpotato_port', ''))
apikey = htpc.settings.get('couchpotato_apikey', '')
basepath = fix_basepath(htpc.settings.get('couchpotato_basepath', '/'))
ssl = 's' if htpc.settings.get('couchpotato_ssl', 0) else ''
url = 'http%s://%s:%s%sapi/%s/%s' % (ssl, host, port, basepath, apikey, path)
self.logger.debug('Fetching information from: %s' % url)
f = requests.get(url, timeout=60, verify=False)
return f.json()
except Exception as e:
self.logger.debug('Exception: %s' % e)
self.logger.error('Unable to fetch information')
return
| StarcoderdataPython |
1697034 | <reponame>weezel/BandEventNotifier<filename>venues/plugin_lutakko.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import lxml.html
import re
import time
from venues.abstract_venue import AbstractVenue
class Lutakko(AbstractVenue):
def __init__(self):
super().__init__()
self.url = "http://www.jelmu.net/"
self.name = "Lutakko"
self.city = "Jyväskylä"
self.country = "Finland"
# Parsing patterns
self.datepat = re.compile("[0-9.]+")
def parse_price(self, t):
tag = " ".join(t.xpath('./div[@role="tickets"]/div/a/strong/text()'))
return "0" if len(tag) == 0 else "%s" % tag
def parse_date(self, t):
month_now = time.strftime("%m")
year = int(time.strftime("%Y"))
date = ""
tag = " ".join(t.xpath('./div[@class="badges"]' +
'/div[@class="date"]' +
'/span/text()'))
splt = tag.split(" - ")
slen = len(splt)
if slen == 1:
date = " ".join(re.findall(self.datepat, tag))
# We only care about the starting date
elif slen > 1:
date = " ".join(re.findall(self.datepat, tag)[0])
date = date.rstrip(".") # FIXME
day, month = date.split(".")
# Are we on the new year already?
if int(month) < int(month_now):
year += 1
return "%.4d-%.2d-%.2d" % (int(year), int(month), int(day))
def parse_event(self, tag):
date = self.parse_date(tag)
title = tag.xpath('./a')[0].text_content()
desc = tag.xpath('./p')[0].text_content()
price = self.parse_price(tag)
name = f"{title} {desc}"
name = re.sub("\s+", " ", name).lstrip(" ").rstrip(" ")
return {
"venue": self.get_venue_name(),
"date": date,
"name": name,
"price": price
}
def parse_events(self, data: str):
doc = lxml.html.fromstring(data)
for event in doc.xpath('//ul[@role="upcoming-events"]/li'):
yield self.parse_event(event)
if __name__ == '__main__':
import requests
l = Lutakko()
r = requests.get(l.url)
for e in l.parse_events(r.content):
for k, v in e.items():
print(f"{k:>10s}: {v}")
print()
| StarcoderdataPython |
1780241 | import logging
import os
import re
from pathlib import Path
class Problem:
editor = "code"
def __init__(self, pid: str):
self._pid = pid
self._logger = logging.getLogger(f"题目 {pid}")
self._path = Path(f"problems/{re.match(r'[A-Z]+', pid).group()}/{pid}")
self._path_md = self._path / f"{self._pid}.md"
self._path_gen_cpp = self._path / "gen.cpp"
self._path_gen_py = self._path / "gen.py"
self._path_std = self._path / "std.cpp"
def open(self):
os.system(f"{self.editor} {self._path_md}")
return self
| StarcoderdataPython |
188252 | <filename>examples/nw/tests/test_upd_employee_salary.py
import sys, unittest
import logic_bank_utils.util as logic_bank_utils
from datetime import datetime
from decimal import Decimal
(did_fix_path, sys_env_info) = \
logic_bank_utils.add_python_path(project_dir="LogicBank", my_file=__file__)
if __name__ == '__main__':
print("\nStarted from cmd line - launch unittest and exit\n")
sys.argv = [sys.argv[0]]
unittest.main(module="examples.nw.tests.test_update_employee_salary")
exit(0)
else:
print("Started from unittest: " + __name__)
from examples.nw import tests
tests.copy_gold_over_db()
import examples.nw.db.models as models
from examples.nw.logic import session, engine # opens db, activates rules <--
# activate rules: LogicBank.activate(session=session, activator=declare_logic)
from logic_bank.util import prt
print("\n" + sys_env_info + "\n\n")
class Test(unittest.TestCase):
def setUp(self): # banner
self.started_at = str(datetime.now())
tests.setUp(file=__file__)
def tearDown(self):
tests.tearDown(file=__file__, started_at=self.started_at, engine=engine, session=session)
def test_run(self):
""" Test State Transition Logic - raise over 20%
should fail due to credit limit exceeded (catch exception to verify)
"""
bad_employee_raise = session.query(models.Employee).filter(models.Employee.Id == 1).one()
bad_employee_raise.Salary = bad_employee_raise.Salary * Decimal('1.1')
did_fail_as_expected = False
try:
session.commit()
except:
session.rollback()
did_fail_as_expected = True
if not did_fail_as_expected:
self.fail("too-small should have failed constraint, but succeeded")
else:
print("\n" + prt("puny raise failed constraint as expected."))
print("\nupd_employee_salary, ran to completion")
self.assertTrue(True)
| StarcoderdataPython |
1662845 | """ Install odet first:
https://github.com/kun0906/odet
"""
import os
import pickle
import pandas as pd
from odet.pparser.parser import _pcap2flows, _get_IAT_SIZE, _get_STATS
from odet.utils.tool import dump_data, check_path
import numpy as np
""" Analyze IOT datasets (data-clean.zip: 20GB, 20210714) collected on 2021.
"""
import collections
import os
import subprocess
from odet.pparser.parser import PCAP
import numpy as np
RANDOM_STATE = 42
def check_path(in_dir):
if not os.path.exists(in_dir):
os.makedirs(in_dir)
def dump_data(data, out_file=''):
"""Save data to file
Parameters
----------
data: any data
out_file: str
out file path
verbose: int (default is 1)
a print level is to control what information should be printed according to the given value.
The higher the value is, the more info is printed.
Returns
-------
"""
# save results
with open(out_file, 'wb') as out_hdl:
pickle.dump(data, out_hdl)
class IOT2021(PCAP):
def get_flows(self, in_file='xxx.pcap'):
# flows: [(fid, arrival times list, packet sizes list)]
self.flows = _pcap2flows(in_file, flow_pkts_thres=2)
def keep_ip(self, pcap_file, kept_ips=[], output_file=''):
if output_file == '':
output_file = os.path.splitext(pcap_file)[0] + 'kept_ips.pcap' # Split a path in root and extension.
# only keep srcIPs' traffic
# srcIP_str = " or ".join([f'ip.src=={srcIP}' for srcIP in kept_ips])
# filter by mac srcIP address
srcIP_str = " or ".join([f'eth.src=={srcIP}' for srcIP in kept_ips])
cmd = f"tshark -r {pcap_file} -w {output_file} {srcIP_str}"
print(f'{cmd}')
try:
result = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8')
except Exception as e:
print(f'{e}, {result}')
return -1
return output_file
def get_pcaps(in_dir, file_type='normal'):
files = collections.defaultdict(list)
for activity_name in sorted(os.listdir(in_dir)):
if activity_name.startswith('.'): continue
activity_dir = os.path.join(in_dir, activity_name)
for partcipant_id in sorted(os.listdir(activity_dir)):
if partcipant_id.startswith('.'): continue
partcipant_dir = os.path.join(activity_dir, partcipant_id)
for f in sorted(os.listdir(partcipant_dir)):
if f.startswith('.'): continue
if f.endswith('pcap'):
f = os.path.join(partcipant_dir, f)
files[activity_name].append(f)
# files.append(f)
else:
pass
return files
def get_mac_ip(flows):
ips = []
macs = []
# [(fid, arrival times list, packet sizes list)]
for i, (fid, pkt_times, pkts) in enumerate(flows):
macs.append(pkts[0].src)
ips.append(fid[0])
print(set(ips))
return macs, ips
def get_durations(flows):
durations = []
# [(fid, arrival times list, packet sizes list)]
for i, (fid, pkt_times, pkts) in enumerate(flows):
start = min(pkt_times)
end = max(pkt_times)
durations.append(end - start)
return durations
# IP has changed (dynamic IPs) in different collection process, so please use mac to filter packets.
ip2device = {'192.168.143.152': 'refrigerator', }
device2ip = {'refrigerator': '192.168.143.43', 'nestcam': '192.168.143.104', 'alexa': '192.168.143.74'}
device2mac = {'refrigerator': '70:2c:1f:39:25:6e', 'nestcam': '18:b4:30:8a:9f:b2', 'alexa': '4c:ef:c0:0b:91:b3'}
#
# def main(device='refrigerator'):
# in_dir = f'../Datasets/UCHI/IOT_2021/data-clean/{device}'
# out_dir = f'examples/datasets/IOT2021/data-clean/{device}'
# device_meta_file = os.path.join(out_dir, f'{device}.dat')
# device_meta = {}
# if not os.path.exists(device_meta_file):
# device_files = get_pcaps(in_dir, file_type='normal')
# for i, (activity_name, files) in enumerate(device_files.items()):
# activity_flows = []
# for j, f in enumerate(files):
# print(j, f)
# # create the PCAP object
# pp = IOT2021()
#
# # filter unnecesarry IP addresses
# filtered_f = os.path.join(out_dir,
# os.path.splitext(os.path.relpath(f, start=in_dir))[0] + '-filtered.pcap')
# check_path(filtered_f)
# # pp.keep_ip(f, kept_ips=[device2ip[device]], output_file=filtered_f)
# pp.keep_ip(f, kept_ips=[device2mac[device]], output_file=filtered_f)
#
# # parse pcap and get the flows (only forward flows (sent from src IP))
# pp.get_flows(filtered_f)
#
# # concatenated the flows to the total flows
# device_files[activity_name][j] = (filtered_f, pp.flows)
# activity_flows += pp.flows
# # break
#
# # activity_flows = sum(len(flows_) for flows_ in ])
# print(f'activity_flows: {len(activity_flows)}')
# device_meta[activity_name] = (activity_flows, device_files[activity_name])
# check_path(device_meta_file)
# print(device_meta_file)
# dump_data(device_meta, output_file=device_meta_file)
# else:
# device_meta = load_data(device_meta_file)
#
# ips = set()
# macs = set()
# for i, (activity_name, vs_) in enumerate(device_meta.items()):
# activity_flows, file_flows = vs_
# print(i, activity_name, len(activity_flows))
# macs_, ips_ = get_mac_ip(activity_flows)
# # print strange IP and pcap_file
# for v_, (f_, _) in zip(ips_, file_flows):
# if v_ == '0.0.0.0':
# print(activity_name, v_, f_)
# macs.update(macs_)
# ips.update(ips_)
#
# print(f'MAC: {macs}, IP: {ips}')
# # get normal_durations
# normal_flows = device_meta['no_interaction'][0]
# normal_durations = get_durations(normal_flows)
#
# # get subflow_interval
# q_flow_dur = 0.9
# subflow_interval = np.quantile(normal_durations, q=q_flow_dur) # median of flow_durations
# print(f'---subflow_interval: ', subflow_interval, f', q_flow_dur: {q_flow_dur}')
#
# subflow_device_meta = {'q_flow_dur': q_flow_dur, 'subflow_interval': subflow_interval,
# 'normal_durations': normal_durations}
# for i, (activity_name, vs_) in enumerate(device_meta.items()):
# activity_flows, file_flows = vs_
# subflows = []
# for file_, flows_ in file_flows:
# subflows_ = flow2subflows(flows_, interval=subflow_interval, num_pkt_thresh=2, verbose=False)
# subflows += subflows_
# print(i, activity_name, len(activity_flows), len(subflows))
# subflow_device_meta[activity_name] = subflows[:]
#
# print('\n')
# # print subflow results
# for i, (key, vs_) in enumerate(sorted(subflow_device_meta.items())):
# if type(vs_) == list:
# print(i, key, len(vs_))
# else:
# print(i, key, vs_)
def _extract_pcap_feature(pcap_file, out_dir, feat_type='IAT+SIZE', device='refrigerator'):
# # filter ip by macaddress
# filtered_pcap_file = os.path.join(out_dir, os.path.basename(pcap_file))
# keep_ip(pcap_file, kept_ips= [device2mac[device]], output_file= filtered_pcap_file)
# create the PCAP object
pp = IOT2021()
# filter unnecesarry IP addresses
filtered_f = os.path.join(out_dir, os.path.basename(pcap_file))
check_path(os.path.dirname(filtered_f))
# pp.keep_ip(f, kept_ips=[device2ip[device]], output_file=filtered_f)
pp.keep_ip(pcap_file, kept_ips=[device2mac[device]], output_file=filtered_f)
# parse pcap and get the flows (only forward flows (sent from src IP))
pp.get_flows(filtered_f)
pp.flows = [(fid, pkts) for fid, pkts in pp.flows if '0.0.0.0' not in fid[0] and '0.0.0.0' not in fid[1]]
check_path(out_dir)
out_file = os.path.join(out_dir, os.path.basename(pcap_file) + f'-flows.dat')
dump_data(pp.flows, out_file)
# get features
if feat_type == 'IAT+SIZE':
features, fids = _get_IAT_SIZE(pp.flows)
elif feat_type == 'STATS':
features, fids = _get_STATS(pp.flows)
else:
msg = f'{feat_type}'
raise NotImplementedError(msg)
feature_file = os.path.join(out_dir, os.path.basename(pcap_file) + f'-{feat_type}.dat')
dump_data((features, fids), feature_file)
return out_file, feature_file, 0
def pcap2feature(in_dir, out_dir, is_subclip=True, is_mirror=False, is_cnn_feature=False, feat_type='IAT+SIZE',
device_type='refrigerator'):
""" preprocessing the videos:
e.g., trim and mirror videos, extract features by CNN
Parameters
----------
in_dir: ['data/data-clean/refrigerator]
out_dir:
is_subclip: cut video
is_mirror
is_cnn_feature
Returns
-------
meta: dictionary
"""
# video_logs = parse_logs(in_dir='data/data-clean/log')
# issued_videos = pd.read_csv(os.path.join('data/data-clean/refrigerator', 'issued_videos.csv'), header=None).values[
# :, -1].tolist()
issued_videos = []
data = [] # [(video_path, cnn_feature, y)]
durations = {'camera1': [], 'camera2': [], 'camera3': []}
# list device folders (e.g., refrigerator or camera)
i = 0
cnt_3 = 0 # camera_3
cnt_32 = 0 # camera_32: backup
for device_dir in sorted(in_dir):
out_dir_sub = ''
if device_type not in device_dir: continue
# list activity folders (e.g., open_close or take_out )
for activity_dir in sorted(os.listdir(device_dir)):
activity_label = activity_dir
out_dir_activity = activity_dir
activity_dir = os.path.join(device_dir, activity_dir)
if not os.path.exists(activity_dir) or '.DS_Store' in activity_dir or not os.path.isdir(
activity_dir): continue
# list participant folders (e.g., participant 1 or participant 2)
for participant_dir in sorted(os.listdir(activity_dir)):
out_dir_participant = participant_dir
out_dir_sub = os.path.join(participant_dir)
participant_dir = os.path.join(activity_dir, participant_dir)
if not os.path.exists(participant_dir) or '.DS_Store' in participant_dir: continue
# print(participant_dir)
# list videos (e.g., 'no_interaction_1_1614038765_1.mp4')
for f in sorted(os.listdir(participant_dir)):
print(f)
if f.startswith('.'): continue
if not f.endswith('.pcap'): continue
issued_flg = False
for _issued_f in issued_videos:
if f in _issued_f + '.npy':
issued_flg = True
break
if issued_flg:
continue # issued videos, skip
x = os.path.join(participant_dir, f)
try:
# vd_info = get_info(x)
out_dir_tmp = os.path.join(out_dir, out_dir_activity, out_dir_participant)
x_flows, x_feat, kept_durations = _extract_pcap_feature(x, out_dir=out_dir_tmp,
feat_type=feat_type)
data.append((x, x_feat, activity_label))
except Exception as e:
msg = f'error: {e} on {x}'
raise ValueError(msg)
i += 1
print(f'tot pcaps: {i}')
meta = {'data': data, 'is_mirror': is_mirror, 'is_cnn_feature': is_cnn_feature}
return meta
if __name__ == '__main__':
pcap2feature(in_dir=['data/data-clean/refrigerator'], out_dir='out/data/data-clean/refrigerator',
feat_type='IAT+SIZE', device_type='refrigerator')
| StarcoderdataPython |
44735 | <reponame>digital-land/pipeline<filename>tests/unit/test_uri.py<gh_stars>1-10
from digital_land.log import IssueLog
from digital_land.datatype.uri import URIDataType
def test_uri_normalise():
uri = URIDataType()
assert uri.normalise("https://example.com/foo") == "https://example.com/foo"
assert (
uri.normalise("https://example.com/foo\nbar\n/baz")
== "https://example.com/foobar/baz"
)
issues = IssueLog()
assert uri.normalise("example.com", issues=issues) == ""
issue = issues.rows.pop()
assert issue["issue-type"] == "invalid URI"
assert issue["value"] == "example.com"
assert issues.rows == []
| StarcoderdataPython |
3214401 | from .fhirbase import fhirbase
class Task(fhirbase):
"""
A task to be performed.
Attributes:
resourceType: This is a Task resource
identifier: The business identifier for this task.
definitionUri: A reference to a formal or informal definition of the
task. For example, a protocol, a step within a defined workflow
definition, etc.
definitionReference: A reference to a formal or informal definition of
the task. For example, a protocol, a step within a defined workflow
definition, etc.
basedOn: BasedOn refers to a higher-level authorization that triggered
the creation of the task. It references a "request" resource such as
a ProcedureRequest, MedicationRequest, ProcedureRequest, CarePlan,
etc. which is distinct from the "request" resource the task is seeking
to fulfil. This latter resource is referenced by FocusOn. For
example, based on a ProcedureRequest (= BasedOn), a task is created to
fulfil a procedureRequest ( = FocusOn ) to collect a specimen from a
patient.
groupIdentifier: An identifier that links together multiple tasks and
other requests that were created in the same context.
partOf: Task that this particular task is part of.
status: The current status of the task.
statusReason: An explanation as to why this task is held, failed, was
refused, etc.
businessStatus: Contains business-specific nuances of the business
state.
intent: Indicates the "level" of actionability associated with the
Task. I.e. Is this a proposed task, a planned task, an actionable
task, etc.
priority: Indicates how quickly the Task should be addressed with
respect to other requests.
code: A name or code (or both) briefly describing what the task
involves.
description: A free-text description of what is to be performed.
focus: The request being actioned or the resource being manipulated by
this task.
for: The entity who benefits from the performance of the service
specified in the task (e.g., the patient).
context: The healthcare event (e.g. a patient and healthcare provider
interaction) during which this task was created.
executionPeriod: Identifies the time action was first taken against
the task (start) and/or the time final action was taken against the
task prior to marking it as completed (end).
authoredOn: The date and time this task was created.
lastModified: The date and time of last modification to this task.
requester: The creator of the task.
performerType: The type of participant that can execute the task.
owner: Individual organization or Device currently responsible for
task execution.
reason: A description or code indicating why this task needs to be
performed.
note: Free-text information captured about the task as it progresses.
relevantHistory: Links to Provenance records for past versions of this
Task that identify key state transitions or updates that are likely to
be relevant to a user looking at the current version of the task.
restriction: If the Task.focus is a request resource and the task is
seeking fulfillment (i.e is asking for the request to be actioned),
this element identifies any limitations on what parts of the
referenced request should be actioned.
input: Additional information that may be needed in the execution of
the task.
output: Outputs produced by the Task.
"""
__name__ = 'Task'
def __init__(self, dict_values=None):
self.resourceType = 'Task'
# type: str
# possible values: Task
self.definitionUri = None
# type: str
self.definitionReference = None
# reference to Reference: identifier
self.basedOn = None
# type: list
# reference to Reference: identifier
self.groupIdentifier = None
# reference to Identifier
self.partOf = None
# type: list
# reference to Reference: identifier
self.status = None
# type: str
# possible values: draft, requested, received, accepted,
# rejected, ready, cancelled, in-progress, on-hold, failed, completed,
# entered-in-error
self.statusReason = None
# reference to CodeableConcept
self.businessStatus = None
# reference to CodeableConcept
self.intent = None
# type: str
self.priority = None
# type: str
self.code = None
# reference to CodeableConcept
self.description = None
# type: str
self.focus = None
# reference to Reference: identifier
self._for = None
# reference to Reference: identifier
self.context = None
# reference to Reference: identifier
self.executionPeriod = None
# reference to Period
self.authoredOn = None
# type: str
self.lastModified = None
# type: str
self.requester = None
# reference to Task_Requester
self.performerType = None
# type: list
# reference to CodeableConcept
self.owner = None
# reference to Reference: identifier
self.reason = None
# reference to CodeableConcept
self.note = None
# type: list
# reference to Annotation
self.relevantHistory = None
# type: list
# reference to Reference: identifier
self.restriction = None
# reference to Task_Restriction
self.input = None
# type: list
# reference to Task_Input
self.output = None
# type: list
# reference to Task_Output
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'draft', 'requested', 'received', 'accepted', 'rejected', 'ready',
'cancelled', 'in-progress', 'on-hold', 'failed', 'completed',
'entered-in-error']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'draft, requested, received, accepted, rejected, ready, cancelled,'
'in-progress, on-hold, failed, completed, entered-in-error'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'context'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'identifier'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'performerType'},
{'parent_entity': 'Task_Restriction',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'restriction'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'executionPeriod'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'basedOn'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'focus'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'reason'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'note'},
{'parent_entity': 'Task_Input',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'input'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'relevantHistory'},
{'parent_entity': 'Task_Requester',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'requester'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'definitionReference'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'code'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'owner'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'statusReason'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'groupIdentifier'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': 'partOf'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task',
'child_variable': '_for'},
{'parent_entity': 'Task_Output',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'output'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task',
'child_variable': 'businessStatus'},
]
class Task_Requester(fhirbase):
"""
A task to be performed.
Attributes:
agent: The device, practitioner, etc. who initiated the task.
onBehalfOf: The organization the device or practitioner was acting on
behalf of when they initiated the task.
"""
__name__ = 'Task_Requester'
def __init__(self, dict_values=None):
self.agent = None
# reference to Reference: identifier
self.onBehalfOf = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Requester',
'child_variable': 'onBehalfOf'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Requester',
'child_variable': 'agent'},
]
class Task_Restriction(fhirbase):
"""
A task to be performed.
Attributes:
repetitions: Indicates the number of times the requested action should
occur.
period: Over what time-period is fulfillment sought.
recipient: For requests that are targeted to more than on potential
recipient/target, for whom is fulfillment sought?
"""
__name__ = 'Task_Restriction'
def __init__(self, dict_values=None):
self.repetitions = None
# type: int
self.period = None
# reference to Period
self.recipient = None
# type: list
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Restriction',
'child_variable': 'period'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Restriction',
'child_variable': 'recipient'},
]
class Task_Input(fhirbase):
"""
A task to be performed.
Attributes:
type: A code or description indicating how the input is intended to be
used as part of the task execution.
valueBoolean: The value of the input parameter as a basic type.
valueInteger: The value of the input parameter as a basic type.
valueDecimal: The value of the input parameter as a basic type.
valueBase64Binary: The value of the input parameter as a basic type.
valueInstant: The value of the input parameter as a basic type.
valueString: The value of the input parameter as a basic type.
valueUri: The value of the input parameter as a basic type.
valueDate: The value of the input parameter as a basic type.
valueDateTime: The value of the input parameter as a basic type.
valueTime: The value of the input parameter as a basic type.
valueCode: The value of the input parameter as a basic type.
valueOid: The value of the input parameter as a basic type.
valueUuid: The value of the input parameter as a basic type.
valueId: The value of the input parameter as a basic type.
valueUnsignedInt: The value of the input parameter as a basic type.
valuePositiveInt: The value of the input parameter as a basic type.
valueMarkdown: The value of the input parameter as a basic type.
valueElement: The value of the input parameter as a basic type.
valueExtension: The value of the input parameter as a basic type.
valueBackboneElement: The value of the input parameter as a basic
type.
valueNarrative: The value of the input parameter as a basic type.
valueAnnotation: The value of the input parameter as a basic type.
valueAttachment: The value of the input parameter as a basic type.
valueIdentifier: The value of the input parameter as a basic type.
valueCodeableConcept: The value of the input parameter as a basic
type.
valueCoding: The value of the input parameter as a basic type.
valueQuantity: The value of the input parameter as a basic type.
valueDuration: The value of the input parameter as a basic type.
valueSimpleQuantity: The value of the input parameter as a basic type.
valueDistance: The value of the input parameter as a basic type.
valueCount: The value of the input parameter as a basic type.
valueMoney: The value of the input parameter as a basic type.
valueAge: The value of the input parameter as a basic type.
valueRange: The value of the input parameter as a basic type.
valuePeriod: The value of the input parameter as a basic type.
valueRatio: The value of the input parameter as a basic type.
valueReference: The value of the input parameter as a basic type.
valueSampledData: The value of the input parameter as a basic type.
valueSignature: The value of the input parameter as a basic type.
valueHumanName: The value of the input parameter as a basic type.
valueAddress: The value of the input parameter as a basic type.
valueContactPoint: The value of the input parameter as a basic type.
valueTiming: The value of the input parameter as a basic type.
valueMeta: The value of the input parameter as a basic type.
valueElementDefinition: The value of the input parameter as a basic
type.
valueContactDetail: The value of the input parameter as a basic type.
valueContributor: The value of the input parameter as a basic type.
valueDosage: The value of the input parameter as a basic type.
valueRelatedArtifact: The value of the input parameter as a basic
type.
valueUsageContext: The value of the input parameter as a basic type.
valueDataRequirement: The value of the input parameter as a basic
type.
valueParameterDefinition: The value of the input parameter as a basic
type.
valueTriggerDefinition: The value of the input parameter as a basic
type.
"""
__name__ = 'Task_Input'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.valueBoolean = None
# type: bool
self.valueInteger = None
# type: int
self.valueDecimal = None
# type: int
self.valueBase64Binary = None
# type: str
self.valueInstant = None
# type: str
self.valueString = None
# type: str
self.valueUri = None
# type: str
self.valueDate = None
# type: str
self.valueDateTime = None
# type: str
self.valueTime = None
# type: str
self.valueCode = None
# type: str
self.valueOid = None
# type: str
self.valueUuid = None
# type: str
self.valueId = None
# type: str
self.valueUnsignedInt = None
# type: int
self.valuePositiveInt = None
# type: int
self.valueMarkdown = None
# type: str
self.valueElement = None
# reference to Element: id
self.valueExtension = None
# reference to Extension
self.valueBackboneElement = None
# reference to BackboneElement
self.valueNarrative = None
# reference to Narrative
self.valueAnnotation = None
# reference to Annotation
self.valueAttachment = None
# reference to Attachment
self.valueIdentifier = None
# reference to Identifier
self.valueCodeableConcept = None
# reference to CodeableConcept
self.valueCoding = None
# reference to Coding
self.valueQuantity = None
# reference to Quantity
self.valueDuration = None
# reference to Duration
self.valueSimpleQuantity = None
# reference to Quantity
self.valueDistance = None
# reference to Distance
self.valueCount = None
# reference to Count
self.valueMoney = None
# reference to Money
self.valueAge = None
# reference to Age
self.valueRange = None
# reference to Range
self.valuePeriod = None
# reference to Period
self.valueRatio = None
# reference to Ratio
self.valueReference = None
# reference to Reference: identifier
self.valueSampledData = None
# reference to SampledData
self.valueSignature = None
# reference to Signature
self.valueHumanName = None
# reference to HumanName
self.valueAddress = None
# reference to Address
self.valueContactPoint = None
# reference to ContactPoint
self.valueTiming = None
# reference to Timing
self.valueMeta = None
# reference to Meta
self.valueElementDefinition = None
# reference to ElementDefinition
self.valueContactDetail = None
# reference to ContactDetail
self.valueContributor = None
# reference to Contributor
self.valueDosage = None
# reference to Dosage
self.valueRelatedArtifact = None
# reference to RelatedArtifact
self.valueUsageContext = None
# reference to UsageContext
self.valueDataRequirement = None
# reference to DataRequirement
self.valueParameterDefinition = None
# reference to ParameterDefinition
self.valueTriggerDefinition = None
# reference to TriggerDefinition
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCodeableConcept'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContactDetail'},
{'parent_entity': 'Contributor',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContributor'},
{'parent_entity': 'RelatedArtifact',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRelatedArtifact'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueIdentifier'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAttachment'},
{'parent_entity': 'Meta',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueMeta'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSimpleQuantity'},
{'parent_entity': 'Extension',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueExtension'},
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAddress'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valuePeriod'},
{'parent_entity': 'DataRequirement',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDataRequirement'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueQuantity'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Input',
'child_variable': 'valueReference'},
{'parent_entity': 'TriggerDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueTriggerDefinition'},
{'parent_entity': 'Duration',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDuration'},
{'parent_entity': 'ElementDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueElementDefinition'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueMoney'},
{'parent_entity': 'Range',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRange'},
{'parent_entity': 'Signature',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSignature'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueUsageContext'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCoding'},
{'parent_entity': 'Dosage',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDosage'},
{'parent_entity': 'Narrative',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueNarrative'},
{'parent_entity': 'Element',
'parent_variable': 'id',
'child_entity': 'Task_Input',
'child_variable': 'valueElement'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAnnotation'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'type'},
{'parent_entity': 'Count',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueCount'},
{'parent_entity': 'Ratio',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueRatio'},
{'parent_entity': 'Distance',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueDistance'},
{'parent_entity': 'BackboneElement',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueBackboneElement'},
{'parent_entity': 'ContactPoint',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueContactPoint'},
{'parent_entity': 'Age',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueAge'},
{'parent_entity': 'Timing',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueTiming'},
{'parent_entity': 'ParameterDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueParameterDefinition'},
{'parent_entity': 'HumanName',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueHumanName'},
{'parent_entity': 'SampledData',
'parent_variable': 'object_id',
'child_entity': 'Task_Input',
'child_variable': 'valueSampledData'},
]
class Task_Output(fhirbase):
"""
A task to be performed.
Attributes:
type: The name of the Output parameter.
valueBoolean: The value of the Output parameter as a basic type.
valueInteger: The value of the Output parameter as a basic type.
valueDecimal: The value of the Output parameter as a basic type.
valueBase64Binary: The value of the Output parameter as a basic type.
valueInstant: The value of the Output parameter as a basic type.
valueString: The value of the Output parameter as a basic type.
valueUri: The value of the Output parameter as a basic type.
valueDate: The value of the Output parameter as a basic type.
valueDateTime: The value of the Output parameter as a basic type.
valueTime: The value of the Output parameter as a basic type.
valueCode: The value of the Output parameter as a basic type.
valueOid: The value of the Output parameter as a basic type.
valueUuid: The value of the Output parameter as a basic type.
valueId: The value of the Output parameter as a basic type.
valueUnsignedInt: The value of the Output parameter as a basic type.
valuePositiveInt: The value of the Output parameter as a basic type.
valueMarkdown: The value of the Output parameter as a basic type.
valueElement: The value of the Output parameter as a basic type.
valueExtension: The value of the Output parameter as a basic type.
valueBackboneElement: The value of the Output parameter as a basic
type.
valueNarrative: The value of the Output parameter as a basic type.
valueAnnotation: The value of the Output parameter as a basic type.
valueAttachment: The value of the Output parameter as a basic type.
valueIdentifier: The value of the Output parameter as a basic type.
valueCodeableConcept: The value of the Output parameter as a basic
type.
valueCoding: The value of the Output parameter as a basic type.
valueQuantity: The value of the Output parameter as a basic type.
valueDuration: The value of the Output parameter as a basic type.
valueSimpleQuantity: The value of the Output parameter as a basic
type.
valueDistance: The value of the Output parameter as a basic type.
valueCount: The value of the Output parameter as a basic type.
valueMoney: The value of the Output parameter as a basic type.
valueAge: The value of the Output parameter as a basic type.
valueRange: The value of the Output parameter as a basic type.
valuePeriod: The value of the Output parameter as a basic type.
valueRatio: The value of the Output parameter as a basic type.
valueReference: The value of the Output parameter as a basic type.
valueSampledData: The value of the Output parameter as a basic type.
valueSignature: The value of the Output parameter as a basic type.
valueHumanName: The value of the Output parameter as a basic type.
valueAddress: The value of the Output parameter as a basic type.
valueContactPoint: The value of the Output parameter as a basic type.
valueTiming: The value of the Output parameter as a basic type.
valueMeta: The value of the Output parameter as a basic type.
valueElementDefinition: The value of the Output parameter as a basic
type.
valueContactDetail: The value of the Output parameter as a basic type.
valueContributor: The value of the Output parameter as a basic type.
valueDosage: The value of the Output parameter as a basic type.
valueRelatedArtifact: The value of the Output parameter as a basic
type.
valueUsageContext: The value of the Output parameter as a basic type.
valueDataRequirement: The value of the Output parameter as a basic
type.
valueParameterDefinition: The value of the Output parameter as a basic
type.
valueTriggerDefinition: The value of the Output parameter as a basic
type.
"""
__name__ = 'Task_Output'
def __init__(self, dict_values=None):
self.type = None
# reference to CodeableConcept
self.valueBoolean = None
# type: bool
self.valueInteger = None
# type: int
self.valueDecimal = None
# type: int
self.valueBase64Binary = None
# type: str
self.valueInstant = None
# type: str
self.valueString = None
# type: str
self.valueUri = None
# type: str
self.valueDate = None
# type: str
self.valueDateTime = None
# type: str
self.valueTime = None
# type: str
self.valueCode = None
# type: str
self.valueOid = None
# type: str
self.valueUuid = None
# type: str
self.valueId = None
# type: str
self.valueUnsignedInt = None
# type: int
self.valuePositiveInt = None
# type: int
self.valueMarkdown = None
# type: str
self.valueElement = None
# reference to Element: id
self.valueExtension = None
# reference to Extension
self.valueBackboneElement = None
# reference to BackboneElement
self.valueNarrative = None
# reference to Narrative
self.valueAnnotation = None
# reference to Annotation
self.valueAttachment = None
# reference to Attachment
self.valueIdentifier = None
# reference to Identifier
self.valueCodeableConcept = None
# reference to CodeableConcept
self.valueCoding = None
# reference to Coding
self.valueQuantity = None
# reference to Quantity
self.valueDuration = None
# reference to Duration
self.valueSimpleQuantity = None
# reference to Quantity
self.valueDistance = None
# reference to Distance
self.valueCount = None
# reference to Count
self.valueMoney = None
# reference to Money
self.valueAge = None
# reference to Age
self.valueRange = None
# reference to Range
self.valuePeriod = None
# reference to Period
self.valueRatio = None
# reference to Ratio
self.valueReference = None
# reference to Reference: identifier
self.valueSampledData = None
# reference to SampledData
self.valueSignature = None
# reference to Signature
self.valueHumanName = None
# reference to HumanName
self.valueAddress = None
# reference to Address
self.valueContactPoint = None
# reference to ContactPoint
self.valueTiming = None
# reference to Timing
self.valueMeta = None
# reference to Meta
self.valueElementDefinition = None
# reference to ElementDefinition
self.valueContactDetail = None
# reference to ContactDetail
self.valueContributor = None
# reference to Contributor
self.valueDosage = None
# reference to Dosage
self.valueRelatedArtifact = None
# reference to RelatedArtifact
self.valueUsageContext = None
# reference to UsageContext
self.valueDataRequirement = None
# reference to DataRequirement
self.valueParameterDefinition = None
# reference to ParameterDefinition
self.valueTriggerDefinition = None
# reference to TriggerDefinition
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Signature',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSignature'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Task_Output',
'child_variable': 'valueReference'},
{'parent_entity': 'BackboneElement',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueBackboneElement'},
{'parent_entity': 'RelatedArtifact',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRelatedArtifact'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSimpleQuantity'},
{'parent_entity': 'ContactPoint',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContactPoint'},
{'parent_entity': 'Extension',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueExtension'},
{'parent_entity': 'Age',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAge'},
{'parent_entity': 'Meta',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueMeta'},
{'parent_entity': 'Dosage',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDosage'},
{'parent_entity': 'TriggerDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueTriggerDefinition'},
{'parent_entity': 'Distance',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDistance'},
{'parent_entity': 'Coding',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCoding'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCodeableConcept'},
{'parent_entity': 'ElementDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueElementDefinition'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valuePeriod'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueIdentifier'},
{'parent_entity': 'DataRequirement',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDataRequirement'},
{'parent_entity': 'SampledData',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueSampledData'},
{'parent_entity': 'Element',
'parent_variable': 'id',
'child_entity': 'Task_Output',
'child_variable': 'valueElement'},
{'parent_entity': 'HumanName',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueHumanName'},
{'parent_entity': 'Money',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueMoney'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueQuantity'},
{'parent_entity': 'ContactDetail',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContactDetail'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAttachment'},
{'parent_entity': 'Count',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueCount'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'type'},
{'parent_entity': 'Range',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRange'},
{'parent_entity': 'Timing',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueTiming'},
{'parent_entity': 'Duration',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueDuration'},
{'parent_entity': 'Narrative',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueNarrative'},
{'parent_entity': 'ParameterDefinition',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueParameterDefinition'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAnnotation'},
{'parent_entity': 'Ratio',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueRatio'},
{'parent_entity': 'UsageContext',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueUsageContext'},
{'parent_entity': 'Address',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueAddress'},
{'parent_entity': 'Contributor',
'parent_variable': 'object_id',
'child_entity': 'Task_Output',
'child_variable': 'valueContributor'},
]
| StarcoderdataPython |
4803499 |
__version__ = '0.9.1.1'
__gui__ = True # global option to enable/disable graphics | StarcoderdataPython |
3239964 | <reponame>velocist/TS4CheatsInfo
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\carry\carry_elements.py
# Compiled at: 2018-05-18 03:28:16
# Size of source mod 2**32: 44951 bytes
import functools
from animation import ClipEventType
from animation.animation_utils import flush_all_animations, disable_asm_auto_exit
from animation.arb import Arb
from animation.arb_element import distribute_arb_element
from carry.carry_tuning import CarryPostureStaticTuning
from carry.carry_utils import hand_to_track, track_to_hand, SCRIPT_EVENT_ID_START_CARRY, SCRIPT_EVENT_ID_STOP_CARRY
from element_utils import build_element, build_critical_section, must_run, build_critical_section_with_finally
from interactions import ParticipantType, ParticipantTypeSingleSim
from interactions.aop import AffordanceObjectPair
from interactions.context import QueueInsertStrategy, InteractionContext
from postures import PostureTrack
from postures.context import PostureContext
from postures.posture_specs import PostureSpecVariable, PostureOperation, PostureAspectBody, PostureAspectSurface, SURFACE_TARGET_INDEX, SURFACE_SLOT_TYPE_INDEX, SURFACE_INDEX
from postures.transition import PostureTransition
from sims4.log import StackVar
from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, HasTunableSingletonFactory, TunableEnumEntry, TunableVariant, TunableFactory, TunableTuple, TunablePackSafeReference
from singletons import DEFAULT
import element_utils, elements, services, sims4.log, sims4.resources
from postures.posture_state import PostureState
logger = sims4.log.Logger('Carry', default_owner='rmccord')
def _create_enter_carry_posture(sim, posture_state, carry_target, track):
var_map = {PostureSpecVariable.CARRY_TARGET: carry_target,
PostureSpecVariable.HAND: track_to_hand(track),
PostureSpecVariable.POSTURE_TYPE_CARRY_OBJECT: carry_target.get_carry_object_posture()}
pick_up_operation = PostureOperation.PickUpObject(PostureSpecVariable.POSTURE_TYPE_CARRY_OBJECT, PostureSpecVariable.CARRY_TARGET)
new_source_aop = pick_up_operation.associated_aop(sim, var_map)
new_posture_spec = pick_up_operation.apply((posture_state.get_posture_spec(var_map)), enter_carry_while_holding=True)
if new_posture_spec is None:
raise RuntimeError('[rmccord] Failed to create new_posture_spec in enter_carry_while_holding!')
new_posture_state = PostureState(sim, posture_state, new_posture_spec, var_map)
new_posture = new_posture_state.get_aspect(track)
from carry.carry_postures import CarryingNothing
if new_posture is None or isinstance(new_posture, CarryingNothing):
raise RuntimeError('[rmccord] Failed to create a valid new_posture ({}) from new_posture_state ({}) in enter_carry_while_holding!'.format(new_posture, new_posture_state))
new_posture.external_transition = True
return (
new_posture_state, new_posture, new_source_aop, var_map)
def enter_carry_while_holding(si, obj=None, carry_obj_participant_type=None, callback=None, create_si_fn=DEFAULT, sim_participant_type=ParticipantType.Actor, target_participant_type=None, owning_affordance=DEFAULT, carry_track_override=None, sequence=None, carry_sim=DEFAULT, track=DEFAULT, asm_context=None, priority_override=None, target_override=None):
sim = si.get_participant(sim_participant_type) if carry_sim is DEFAULT else carry_sim
if target_override is None:
target = si.get_participant(target_participant_type) if target_participant_type is not None else None
else:
target = target_override
context = si.context.clone_for_sim(sim, insert_strategy=(QueueInsertStrategy.NEXT))
if priority_override is not None:
context.priority = priority_override
if carry_track_override is not None:
track = carry_track_override
if track is DEFAULT:
track = si.carry_track
if track is None:
raise RuntimeError("[rmccord] enter_carry_while_holding: Interaction {} does not have a carry_track, which means its animation tuning doesn't have a carry target or create target specified in object editor or the posture manifest from the swing graph does not require a specific object. {}".format(si, StackVar(('process',
'_auto_constraints'))))
if create_si_fn is DEFAULT:
if owning_affordance is None:
create_si_fn = None
if create_si_fn is DEFAULT:
if owning_affordance is DEFAULT:
raise AssertionError("[rmccord] No create_si_fn was provided and we don't know how to make one.")
def create_si_fn():
context.carry_target = obj
aop = AffordanceObjectPair(owning_affordance, target, owning_affordance, None)
return (aop, context)
def set_up_transition_gen(timeline):
nonlocal obj
nonlocal sequence
if carry_obj_participant_type is not None:
obj = si.get_participant(carry_obj_participant_type)
if obj is None:
raise ValueError('[rmccord] Attempt to perform an enter carry while holding with None as the carried object. SI: {}'.format(si))
else:
new_posture_state, new_posture, new_source_aop, var_map = _create_enter_carry_posture(sim, sim.posture_state, obj, track)
if obj.is_sim:
target_posture_state = new_posture.set_target_linked_posture_data()
else:
target_posture_state = None
got_callback = False
def event_handler_enter_carry(event_data):
nonlocal got_callback
if got_callback:
logger.warn('Animation({}) calling to start a carry multiple times', event_data.event_data.get('clip_name'))
return
got_callback = True
arb = Arb()
locked_params = new_posture.get_locked_params(None)
old_carry_posture = sim.posture_state.get_aspect(track)
if old_carry_posture is not None:
old_carry_posture.append_exit_to_arb(arb, new_posture_state, new_posture, var_map)
new_posture.append_transition_to_arb(arb, old_carry_posture, locked_params=locked_params,
in_xevt_handler=True)
distribute_arb_element(arb)
if asm_context is not None:
asm_context.register_event_handler(event_handler_enter_carry, handler_type=(ClipEventType.Script), handler_id=SCRIPT_EVENT_ID_START_CARRY, tag='enter_carry')
else:
si.store_event_handler(event_handler_enter_carry, handler_id=SCRIPT_EVENT_ID_START_CARRY)
def maybe_do_transition_gen(timeline):
def push_si_gen(timeline):
context = InteractionContext(sim, (InteractionContext.SOURCE_POSTURE_GRAPH), (si.priority if priority_override is None else priority_override),
run_priority=(si.run_priority if priority_override is None else priority_override),
insert_strategy=(QueueInsertStrategy.FIRST),
must_run_next=True,
group_id=(si.group_id))
result = new_source_aop.interaction_factory(context)
if not result:
return result
source_interaction = result.interaction
new_posture.source_interaction = source_interaction
owning_interaction = None
if create_si_fn is not None:
aop, context = create_si_fn()
if aop is not None:
if context is not None:
if aop.test(context):
result = aop.interaction_factory(context)
if result:
owning_interaction = result.interaction
if owning_interaction is None:
si.acquire_posture_ownership(new_posture)
yield from source_interaction.run_direct_gen(timeline)
else:
owning_interaction.acquire_posture_ownership(new_posture)
aop.execute_interaction(owning_interaction)
new_source_aop.execute_interaction(source_interaction)
if target_posture_state is not None:
yield from new_posture.kickstart_linked_carried_posture_gen(timeline)
return result
if False:
yield None
def call_callback(_):
if callback is not None:
callback(new_posture, new_posture.source_interaction)
if got_callback:
if target_posture_state is not None:
obj.posture_state = target_posture_state
result = yield from element_utils.run_child(timeline, must_run([
PostureTransition(new_posture, new_posture_state, context, var_map), push_si_gen, call_callback]))
return result
return True
if False:
yield None
sequence = disable_asm_auto_exit(sim, sequence)
with si.cancel_deferred((si,)):
yield from element_utils.run_child(timeline, must_run(build_critical_section(build_critical_section(sequence, flush_all_animations), maybe_do_transition_gen)))
if False:
yield None
return build_element(set_up_transition_gen)
def _create_exit_carry_posture(sim, target, interaction, use_posture_animations, preserve_posture=None):
failure_result = (None, None, None, None, None)
slot_manifest = interaction.slot_manifest
old_carry_posture = sim.posture_state.get_carry_posture(target)
if old_carry_posture is None:
return failure_result
spec_surface = sim.posture_state.spec[SURFACE_INDEX]
has_slot_surface = spec_surface is not None and spec_surface[SURFACE_SLOT_TYPE_INDEX] is not None
if not target.transient:
if has_slot_surface:
put_down_operation = PostureOperation.PutDownObjectOnSurface(PostureSpecVariable.POSTURE_TYPE_CARRY_NOTHING, spec_surface[SURFACE_TARGET_INDEX], spec_surface[SURFACE_SLOT_TYPE_INDEX], PostureSpecVariable.CARRY_TARGET)
else:
put_down_operation = PostureOperation.PutDownObject(PostureSpecVariable.POSTURE_TYPE_CARRY_NOTHING, PostureSpecVariable.CARRY_TARGET)
var_map = {PostureSpecVariable.CARRY_TARGET: target,
PostureSpecVariable.HAND: track_to_hand(old_carry_posture.track),
PostureSpecVariable.POSTURE_TYPE_CARRY_NOTHING: CarryPostureStaticTuning.POSTURE_CARRY_NOTHING,
PostureSpecVariable.SLOT: slot_manifest,
PostureSpecVariable.SLOT_TEST_DEFINITION: interaction.create_target}
current_spec = sim.posture_state.get_posture_spec(var_map)
if current_spec is None:
if preserve_posture is None:
logger.warn('Failed to get posture spec for var_map: {} for {}', sim.posture_state, var_map)
return failure_result
new_posture_spec = put_down_operation.apply(current_spec)
if new_posture_spec is None:
if preserve_posture is None:
logger.warn('Failed to apply put_down_operation: {}', put_down_operation)
return failure_result
if not new_posture_spec.validate_destination((new_posture_spec,), var_map, interaction.affordance, sim):
if preserve_posture is None:
logger.warn('Failed to validate put down spec {} with var map {}', new_posture_spec, var_map)
return failure_result
carry_posture_overrides = {}
if preserve_posture is not None:
carry_posture_overrides[preserve_posture.track] = preserve_posture
new_posture_state = PostureState(sim, (sim.posture_state), new_posture_spec, var_map, carry_posture_overrides=carry_posture_overrides)
new_posture = new_posture_state.get_aspect(old_carry_posture.track)
new_posture.source_interaction = interaction.super_interaction
new_posture.external_transition = not use_posture_animations
posture_context = PostureContext(interaction.context.source, interaction.priority, None)
transition = PostureTransition(new_posture, new_posture_state, posture_context, var_map, locked_params=(interaction.locked_params))
transition.must_run = True
return (
old_carry_posture, new_posture, new_posture_state, transition, var_map)
def exit_carry_while_holding(interaction, callback=None, sequence=None, sim_participant_type=ParticipantType.Actor, use_posture_animations=False, carry_system_target=None, target=DEFAULT, arb=None):
si = interaction.super_interaction
sim = interaction.get_participant(sim_participant_type)
target = interaction.carry_target or interaction.target if target is DEFAULT else target
def set_up_transition_gen(timeline):
old_carry_posture, new_posture, _, transition, var_map = _create_exit_carry_posture(sim, target, interaction, use_posture_animations)
if transition is None:
yield from element_utils.run_child(timeline, sequence)
return
elif arb is None:
register_event = functools.partial((interaction.store_event_handler), handler_id=SCRIPT_EVENT_ID_STOP_CARRY)
else:
register_event = functools.partial((arb.register_event_handler), handler_id=SCRIPT_EVENT_ID_STOP_CARRY)
exited_carry = False
if not use_posture_animations:
def event_handler_exit_carry(event_data):
nonlocal exited_carry
exited_carry = True
arb = Arb()
old_carry_posture.append_exit_to_arb(arb, None, new_posture, var_map, exit_while_holding=True)
new_posture.append_transition_to_arb(arb, old_carry_posture, in_xevt_handler=True)
distribute_arb_element(arb, master=sim)
register_event(event_handler_exit_carry)
if callback is not None:
register_event(callback)
def maybe_do_transition(timeline):
nonlocal transition
_, _, _, new_transition, _ = _create_exit_carry_posture(sim, target, interaction, use_posture_animations, preserve_posture=new_posture)
if new_transition is not None:
transition = new_transition
if not use_posture_animations:
if not exited_carry:
event_handler_exit_carry(None)
if callback is not None:
callback()
if use_posture_animations or exited_carry:
interaction_target_was_target = False
si_target_was_target = False
if old_carry_posture.target_is_transient:
if interaction.target == target:
interaction_target_was_target = True
interaction.set_target(None)
if si.target == target:
si_target_was_target = True
si.set_target(None)
if carry_system_target is not None:
old_carry_posture.carry_system_target = carry_system_target
def do_transition(timeline):
result = yield from element_utils.run_child(timeline, transition)
if result:
if target.is_sim:
body_posture_type = sim.posture_state.spec.body.posture_type
if not body_posture_type.multi_sim:
post_transition_spec = sim.posture_state.spec.clone(body=(PostureAspectBody((body_posture_type, None))),
surface=(PostureAspectSurface((None, None, None))))
post_posture_state = PostureState(sim, sim.posture_state, post_transition_spec, var_map)
post_posture_state.body.source_interaction = sim.posture.source_interaction
post_transition = PostureTransition(post_posture_state.body, post_posture_state, sim.posture.posture_context, var_map)
post_transition.must_run = True
yield from element_utils.run_child(timeline, post_transition)
interaction_target_was_target = False
si_target_was_target = False
new_posture.source_interaction = None
return True
return False
if False:
yield None
def post_transition(_):
if interaction_target_was_target:
interaction.set_target(target)
if si_target_was_target:
si.set_target(target)
if carry_system_target is not None:
old_carry_posture.carry_system_target = None
yield from element_utils.run_child(timeline, must_run(build_critical_section_with_finally(do_transition, post_transition)))
if False:
yield None
new_sequence = disable_asm_auto_exit(sim, sequence)
yield from element_utils.run_child(timeline, build_critical_section(build_critical_section(new_sequence, flush_all_animations), maybe_do_transition))
if False:
yield None
return build_element(set_up_transition_gen)
def swap_carry_while_holding(interaction, original_carry_target, new_carry_object, callback=None, sequence=None, sim_participant_type=ParticipantType.Actor, carry_system_target=None):
si = interaction.super_interaction
sim = interaction.get_participant(sim_participant_type)
def set_up_transition(timeline):
original_carry_posture, carry_nothing_posture, carry_nothing_posture_state, transition_to_carry_nothing, carry_nothing_var_map = _create_exit_carry_posture(sim, original_carry_target, interaction, False)
if transition_to_carry_nothing is None:
return False
final_posture_state, final_posture, final_source_aop, final_var_map = _create_enter_carry_posture(sim, carry_nothing_posture_state, new_carry_object, original_carry_posture.track)
got_callback = False
def event_handler_swap_carry(event_data):
nonlocal got_callback
if got_callback:
logger.warn('Animation({}) calling to start a carry multiple times', event_data.event_data.get('clip_name'))
return
got_callback = True
arb_exit = Arb()
original_carry_posture.append_exit_to_arb(arb_exit, None, carry_nothing_posture, carry_nothing_var_map, exit_while_holding=True)
carry_nothing_posture.append_transition_to_arb(arb_exit, original_carry_posture, in_xevt_handler=True)
distribute_arb_element(arb_exit)
original_carry_posture.target.transient = True
original_carry_posture.target.clear_parent(sim.transform, sim.routing_surface)
original_carry_posture.target.remove_from_client()
arb_enter = Arb()
locked_params = final_posture.get_locked_params(None)
if carry_nothing_posture is not None:
carry_nothing_posture.append_exit_to_arb(arb_enter, final_posture_state, final_posture, final_var_map)
final_posture.append_transition_to_arb(arb_enter, carry_nothing_posture, locked_params=locked_params,
in_xevt_handler=True)
distribute_arb_element(arb_enter)
interaction.store_event_handler(event_handler_swap_carry, handler_id=SCRIPT_EVENT_ID_START_CARRY)
if callback is not None:
interaction.store_event_handler(callback, handler_id=SCRIPT_EVENT_ID_START_CARRY)
def maybe_do_transition(timeline):
def push_si(_):
context = InteractionContext(sim, (InteractionContext.SOURCE_POSTURE_GRAPH),
(si.priority),
run_priority=(si.run_priority),
insert_strategy=(QueueInsertStrategy.NEXT),
must_run_next=True,
group_id=(si.group_id))
result = final_source_aop.interaction_factory(context)
if not result:
return result
final_source_interaction = result.interaction
si.acquire_posture_ownership(final_posture)
yield from final_source_interaction.run_direct_gen(timeline)
final_posture.source_interaction = final_source_interaction
return result
if False:
yield None
if not got_callback:
event_handler_swap_carry(None)
if callback is not None:
callback()
if got_callback:
if original_carry_posture.target_is_transient:
if interaction.target == original_carry_target:
interaction_target_was_target = True
interaction.set_target(None)
else:
interaction_target_was_target = False
if si.target == original_carry_target:
si_target_was_target = True
si.set_target(None)
else:
si_target_was_target = False
else:
interaction_target_was_target = False
si_target_was_target = False
if carry_system_target is not None:
original_carry_posture.carry_system_target = carry_system_target
def do_transition(timeline):
nonlocal interaction_target_was_target
nonlocal si_target_was_target
result = yield from element_utils.run_child(timeline, transition_to_carry_nothing)
if not result:
return False
interaction_target_was_target = False
si_target_was_target = False
carry_nothing_posture.source_interaction = None
return True
if False:
yield None
def post_transition(_):
if interaction_target_was_target:
interaction.set_target(original_carry_target)
if si_target_was_target:
si.set_target(original_carry_target)
if carry_system_target is not None:
original_carry_posture.carry_system_target = None
exit_carry_result = yield from element_utils.run_child(timeline, must_run(build_critical_section_with_finally(do_transition, post_transition)))
if not exit_carry_result:
raise RuntimeError('[maxr] Failed to exit carry: {}'.format(original_carry_posture))
if got_callback:
context = si.context.clone_for_sim(sim)
yield from element_utils.run_child(timeline, (
PostureTransition(final_posture, final_posture_state, context, final_var_map), push_si))
if False:
yield None
new_sequence = disable_asm_auto_exit(sim, sequence)
yield from element_utils.run_child(timeline, build_critical_section(build_critical_section(new_sequence, flush_all_animations), maybe_do_transition))
if False:
yield None
return (
set_up_transition,)
class EnterCarryWhileHolding(elements.ParentElement, HasTunableFactory, AutoFactoryInit):
class TrackOverrideExplicit(HasTunableSingletonFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'carry_track': TunableEnumEntry(description='\n Which hand to carry the object in.\n ',
tunable_type=PostureTrack,
default=(PostureTrack.RIGHT),
invalid_enums=(
PostureTrack.BODY,))}
def get_override(self, *args, **kwargs):
return self.carry_track
class TrackOverrideHandedness(HasTunableSingletonFactory, AutoFactoryInit):
def get_override(self, interaction, sim_participant, *args, **kwargs):
carry_participant = interaction.get_participant(sim_participant)
if carry_participant is None:
return
hand = carry_participant.get_preferred_hand()
return hand_to_track(hand)
NONE = 1
OBJECT_TO_BE_CARRIED = 2
PARTICIPANT_TYPE = 3
FACTORY_TUNABLES = {'carry_obj_participant_type':TunableEnumEntry(description='\n The object that will be carried.\n ',
tunable_type=ParticipantType,
default=ParticipantType.CarriedObject),
'sim_participant_type':TunableEnumEntry(description='\n The Sim that will get a new carry.\n ',
tunable_type=ParticipantTypeSingleSim,
default=ParticipantTypeSingleSim.Actor),
'target':TunableVariant(description='\n Specify what to use as the target of\n the owning affordance.\n ',
object_to_be_carried=TunableTuple(description='\n Target is the object that WILL be carried.\n ',
locked_args={'target_type': OBJECT_TO_BE_CARRIED}),
none=TunableTuple(description='\n Target is None\n ',
locked_args={'target_type': NONE}),
participant_type=TunableTuple(description='\n Target is the specified participant of THIS interaction.\n \n This is necessary if we need to target another participant\n when we push the owning affordance\n ',
participant=TunableEnumEntry(tunable_type=ParticipantType,
default=(ParticipantType.CarriedObject)),
locked_args={'target_type': PARTICIPANT_TYPE}),
default='object_to_be_carried'),
'owning_affordance':TunablePackSafeReference(description='\n The interaction that will be pushed that will own the carry\n state (e.g. a put down).\n ',
manager=services.get_instance_manager(sims4.resources.Types.INTERACTION),
allow_none=True),
'carry_track_override':TunableVariant(description='\n Specify the carry track, instead of using the carry of the SI.\n ',
explicit=TrackOverrideExplicit.TunableFactory(),
handedness=TrackOverrideHandedness.TunableFactory(),
default='disabled',
locked_args={'disabled': None})}
def __init__(self, interaction, *args, sequence=(), **kwargs):
(super().__init__)(*args, **kwargs)
self.interaction = interaction
self.sequence = sequence
def _run(self, timeline):
carry_track_override = self.carry_track_override.get_override(self.interaction, self.sim_participant_type) if self.carry_track_override is not None else None
target = self.target
if target.target_type == EnterCarryWhileHolding.NONE:
target_participant_type = None
else:
if target.target_type == EnterCarryWhileHolding.OBJECT_TO_BE_CARRIED:
target_participant_type = self.carry_obj_participant_type
else:
if target.target_type == EnterCarryWhileHolding.PARTICIPANT_TYPE:
target_participant_type = target.participant
carry_element = enter_carry_while_holding((self.interaction), sequence=(self.sequence),
carry_obj_participant_type=(self.carry_obj_participant_type),
sim_participant_type=(self.sim_participant_type),
target_participant_type=target_participant_type,
owning_affordance=(self.owning_affordance),
carry_track_override=carry_track_override)
return timeline.run_child(carry_element)
class TunableExitCarryWhileHolding(TunableFactory):
FACTORY_TYPE = staticmethod(exit_carry_while_holding)
def __init__(self, *args, description='Exit the carry for the target or carry_target of an interaction. The animations played during the interaction should exit the carry via an XEVT.', **kwargs):
(super().__init__)(args, description=description, sim_participant_type=TunableEnumEntry(description='\n The Sim that will exit a carry.\n ',
tunable_type=ParticipantType,
default=(ParticipantType.Actor)), **kwargs)
class TransferCarryWhileHolding(elements.ParentElement, HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'enter_carry_while_holding':EnterCarryWhileHolding.TunableFactory(),
'exit_carry_while_holding':TunableExitCarryWhileHolding()}
def __init__(self, interaction, *args, sequence=(), **kwargs):
(super().__init__)(*args, **kwargs)
self.interaction = interaction
self.sequence = sequence
def _run(self, timeline):
obj = self.interaction.get_participant(self.enter_carry_while_holding.carry_obj_participant_type)
source_sim = self.interaction.get_participant(self.exit_carry_while_holding.sim_participant_type)
target_sim = self.interaction.get_participant(self.enter_carry_while_holding.sim_participant_type)
def _add_reservation_clobberer(_):
obj.add_reservation_clobberer(source_sim, target_sim)
def _remove_reservation_clobberer(_):
obj.remove_reservation_clobberer(source_sim, target_sim)
sequence = self.enter_carry_while_holding((self.interaction), sequence=(self.sequence))
sequence = self.exit_carry_while_holding((self.interaction), sequence=sequence)
sequence = element_utils.build_critical_section_with_finally(_add_reservation_clobberer, sequence, _remove_reservation_clobberer)
return timeline.run_child(sequence) | StarcoderdataPython |
36426 | <filename>goodrich/python_primer/c119.py
"""
C 1.19
---------------------------------
Problem Statement : Demonstrate how to use Python’s list comprehension syntax to produce
the list [ a , b , c , ..., z ], but without having to type all 26 such
characters literally.
Author : Saurabh
"""
print([chr(x + 97) for x in range(26)])
| StarcoderdataPython |
46494 | import numpy as np
import pandas as pd
import time
mnist = pd.read_csv("../input/train.csv")
mnist.head()
y_train = mnist.label.values
x_train = mnist.drop('label',axis=1)
x_train = (x_train / 255.0).values
x_train = np.reshape(x_train,(42000,1,28,28))
x_train.shape
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import MaxPooling2D
from keras.optimizers import SGD
from keras import backend as K
K.set_image_data_format('channels_first')
IMG_SIZE = 28
NUM_CLASSES = 10
def cnn_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=(1, IMG_SIZE, IMG_SIZE),
activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same',
activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3), padding='same',
activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax'))
return model
model = cnn_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=3)
test = pd.read_csv("../input/test.csv")
test.describe()
x_test = (test / 255.0).values
x_test = np.reshape(x_test,(28000,1,28,28))
x_test.shape
predictions = model.predict(x_test)
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid('off')
plt.imshow(np.reshape(x_test[i],(28,28)), cmap=plt.cm.binary)
predicted_label = np.argmax(predictions[i])
#true_label = y_test[i]
#if predicted_label == true_label:
# color = 'green'
#else:
# color = 'red'
plt.xlabel("{} ".format(predicted_label),
color='green')
model_name = "digit_clf_model_"+ time.strftime("%Y-%m-%d-%H%M") +".h5"
model.save_weights("models/"+model_name)
# f=open("submissions.csv","w")
# # Write headers
# f.write("ImageId,Label\n")
# for key,p in enumerate(predictions):
# i = key+1
# line = str(i)+","+str(np.argmax(p))+"\n"
# f.write(line)
# f.close()
# sub = pd.read_csv("submissions.csv")
# sub.head()
| StarcoderdataPython |
1671913 | <filename>openstack_dashboard/dashboards/project/data_processing/jobs/tests.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:data_processing.jobs:index')
DETAILS_URL = reverse(
'horizon:project:data_processing.jobs:details', args=['id'])
class DataProcessingJobTests(test.TestCase):
@test.create_stubs({api.sahara: ('job_list',)})
def test_index(self):
api.sahara.job_list(IsA(http.HttpRequest), {}) \
.AndReturn(self.jobs.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res,
'project/data_processing.jobs/jobs.html')
self.assertContains(res, 'Jobs')
self.assertContains(res, 'Name')
@test.create_stubs({api.sahara: ('job_get',)})
def test_details(self):
api.sahara.job_get(IsA(http.HttpRequest), IsA(unicode)) \
.AndReturn(self.jobs.list()[0])
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(res,
'project/data_processing.jobs/details.html')
self.assertContains(res, 'pigjob')
@test.create_stubs({api.sahara: ('job_list',
'job_delete')})
def test_delete(self):
job = self.jobs.first()
api.sahara.job_list(IsA(http.HttpRequest), {}) \
.AndReturn(self.jobs.list())
api.sahara.job_delete(IsA(http.HttpRequest), job.id)
self.mox.ReplayAll()
form_data = {'action': 'jobs__delete__%s' % job.id}
res = self.client.post(INDEX_URL, form_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
| StarcoderdataPython |
4842839 | <reponame>OpenDataServices/flatten-tool<gh_stars>10-100
"""
This file contains code that takes an instance of a JSON file as input (not a
JSON schema, for that see schema.py).
"""
import codecs
import copy
import os
import tempfile
import uuid
from collections import OrderedDict
from decimal import Decimal
from warnings import warn
import BTrees.OOBTree
import ijson
import transaction
import xmltodict
import zc.zlibstorage
import ZODB.FileStorage
from flattentool.i18n import _
from flattentool.input import path_search
from flattentool.schema import make_sub_sheet_name
from flattentool.sheet import PersistentSheet
BASIC_TYPES = [str, bool, int, Decimal, type(None)]
class BadlyFormedJSONError(ValueError):
pass
class BadlyFormedJSONErrorUTF8(BadlyFormedJSONError):
pass
def sheet_key_field(sheet, key):
if key not in sheet:
sheet.append(key)
return key
def sheet_key_title(sheet, key):
"""
If the key has a corresponding title, return that. If doesn't, create it in the sheet and return it.
"""
if key in sheet.titles:
title = sheet.titles[key]
if title not in sheet:
sheet.append(title)
return title
else:
if key not in sheet:
sheet.append(key)
return key
def lists_of_dicts_paths(xml_dict):
for key, value in xml_dict.items():
if isinstance(value, list) and value and isinstance(value[0], dict):
yield (key,)
for x in value:
if isinstance(x, dict):
for path in lists_of_dicts_paths(x):
yield (key,) + path
elif isinstance(value, dict):
for path in lists_of_dicts_paths(value):
yield (key,) + path
def dicts_to_list_of_dicts(lists_of_dicts_paths_set, xml_dict, path=()):
for key, value in xml_dict.items():
if isinstance(value, list):
for x in value:
if isinstance(x, dict):
dicts_to_list_of_dicts(lists_of_dicts_paths_set, x, path + (key,))
elif isinstance(value, dict):
child_path = path + (key,)
dicts_to_list_of_dicts(lists_of_dicts_paths_set, value, child_path)
if child_path in lists_of_dicts_paths_set:
xml_dict[key] = [value]
def list_dict_consistency(xml_dict):
"""
For use with XML files opened with xmltodict.
If there is only one tag, xmltodict produces a dict. If there are
multiple, xmltodict produces a list of dicts. This functions replaces
dicts with lists of dicts, if there exists a list of dicts for the same
path elsewhere in the file.
"""
lists_of_dicts_paths_set = set(lists_of_dicts_paths(xml_dict))
dicts_to_list_of_dicts(lists_of_dicts_paths_set, xml_dict)
class JSONParser(object):
# Named for consistency with schema.SchemaParser, but not sure it's the most appropriate name.
# Similarly with methods like parse_json_dict
def __init__(
self,
json_filename=None,
root_json_dict=None,
schema_parser=None,
root_list_path=None,
root_id="ocid",
use_titles=False,
xml=False,
id_name="id",
filter_field=None,
filter_value=None,
preserve_fields=None,
remove_empty_schema_columns=False,
rollup=False,
truncation_length=3,
persist=False,
):
if persist:
# Use temp directories in OS agnostic way
self.zodb_db_location = (
tempfile.gettempdir() + "/flattentool-" + str(uuid.uuid4())
)
# zlibstorage lowers disk usage by a lot at very small performance cost
zodb_storage = zc.zlibstorage.ZlibStorage(
ZODB.FileStorage.FileStorage(self.zodb_db_location)
)
self.db = ZODB.DB(zodb_storage)
else:
# If None, in memory storage is used.
self.db = ZODB.DB(None)
self.connection = self.db.open()
# ZODB root, only objects attached here will be persisted
root = self.connection.root
# OOBTree means a btree with keys and values are objects (including strings)
root.sheet_store = BTrees.OOBTree.BTree()
self.sub_sheets = {}
self.main_sheet = PersistentSheet(connection=self.connection, name="")
self.root_list_path = root_list_path
self.root_id = root_id
self.use_titles = use_titles
self.truncation_length = truncation_length
self.id_name = id_name
self.xml = xml
self.filter_field = filter_field
self.filter_value = filter_value
self.remove_empty_schema_columns = remove_empty_schema_columns
self.seen_paths = set()
self.persist = persist
if schema_parser:
# schema parser does not make sheets that are persistent,
# so use from_sheets which deep copies everything in it.
self.main_sheet = PersistentSheet.from_sheet(
schema_parser.main_sheet, self.connection
)
for sheet_name, sheet in list(self.sub_sheets.items()):
self.sub_sheets[sheet_name] = PersistentSheet.from_sheet(
sheet, self.connection
)
self.sub_sheets = copy.deepcopy(schema_parser.sub_sheets)
if remove_empty_schema_columns:
# Don't use columns from the schema parser
# (avoids empty columns)
self.main_sheet.columns = []
for sheet_name, sheet in list(self.sub_sheets.items()):
sheet.columns = []
self.schema_parser = schema_parser
else:
self.schema_parser = None
self.rollup = False
if rollup:
if schema_parser and len(schema_parser.rollup) > 0:
# If rollUp is present in the schema this takes precedence over direct input.
self.rollup = schema_parser.rollup
if isinstance(rollup, (list,)) and (
len(rollup) > 1 or (len(rollup) == 1 and rollup[0] is not True)
):
warn(_("Using rollUp values from schema, ignoring direct input."))
elif isinstance(rollup, (list,)):
if len(rollup) == 1 and os.path.isfile(rollup[0]):
# Parse file, one json path per line.
rollup_from_file = set()
with open(rollup[0]) as rollup_file:
for line in rollup_file:
line = line.strip()
rollup_from_file.add(line)
self.rollup = rollup_from_file
# Rollup args passed directly at the commandline
elif len(rollup) == 1 and rollup[0] is True:
warn(
_(
"No fields to rollup found (pass json path directly, as a list in a file, or via a schema)"
)
)
else:
self.rollup = set(rollup)
else:
warn(
_(
"Invalid value passed for rollup (pass json path directly, as a list in a file, or via a schema)"
)
)
if self.xml:
with codecs.open(json_filename, "rb") as xml_file:
top_dict = xmltodict.parse(
xml_file, force_list=(root_list_path,), force_cdata=True,
)
# AFAICT, this should be true for *all* XML files
assert len(top_dict) == 1
root_json_dict = list(top_dict.values())[0]
list_dict_consistency(root_json_dict)
json_filename = None
if json_filename is None and root_json_dict is None:
raise ValueError(
_("Either json_filename or root_json_dict must be supplied")
)
if json_filename is not None and root_json_dict is not None:
raise ValueError(
_("Only one of json_file or root_json_dict should be supplied")
)
if not json_filename:
if self.root_list_path is None:
self.root_json_list = root_json_dict
else:
self.root_json_list = path_search(
root_json_dict, self.root_list_path.split("/")
)
if preserve_fields:
# Extract fields to be preserved from input file (one path per line)
preserve_fields_all = []
preserve_fields_input = []
with open(preserve_fields) as preserve_fields_file:
for line in preserve_fields_file:
line = line.strip()
path_fields = line.rsplit("/", 1)
preserve_fields_all = (
preserve_fields_all + path_fields + [line.rstrip("/")]
)
preserve_fields_input = preserve_fields_input + [line.rstrip("/")]
self.preserve_fields = set(preserve_fields_all)
self.preserve_fields_input = set(preserve_fields_input)
try:
input_not_in_schema = set()
for field in self.preserve_fields_input:
if field not in self.schema_parser.flattened.keys():
input_not_in_schema.add(field)
warn(
_(
"You wanted to preserve the following fields which are not present in the supplied schema: {}"
).format(list(input_not_in_schema))
)
except AttributeError:
# no schema
pass
else:
self.preserve_fields = None
self.preserve_fields_input = None
if json_filename:
if self.root_list_path is None:
path = "item"
else:
path = root_list_path.replace("/", ".") + ".item"
json_file = codecs.open(json_filename, encoding="utf-8")
self.root_json_list = ijson.items(json_file, path, map_type=OrderedDict)
try:
self.parse()
except ijson.common.IncompleteJSONError as err:
raise BadlyFormedJSONError(*err.args)
except UnicodeDecodeError as err:
raise BadlyFormedJSONErrorUTF8(*err.args)
finally:
if json_filename:
json_file.close()
def parse(self):
for num, json_dict in enumerate(self.root_json_list):
if json_dict is None:
# This is particularly useful for IATI XML, in order to not
# fall over on empty activity, e.g. <iati-activity/>
continue
self.parse_json_dict(json_dict, sheet=self.main_sheet)
# only persist every 2000 objects. peristing more often slows down storing.
# 2000 top level objects normally not too much to store in memory.
if num % 2000 == 0 and num != 0:
transaction.commit()
# This commit could be removed which would mean that upto 2000 objects
# could be stored in memory without anything being persisted.
transaction.commit()
if self.remove_empty_schema_columns:
# Remove sheets with no lines of data
for sheet_name, sheet in list(self.sub_sheets.items()):
if not sheet.lines:
del self.sub_sheets[sheet_name]
if self.preserve_fields_input:
nonexistent_input_paths = []
for field in self.preserve_fields_input:
if field not in self.seen_paths:
nonexistent_input_paths.append(field)
if len(nonexistent_input_paths) > 0:
warn(
_(
"You wanted to preserve the following fields which are not present in the input data: {}"
).format(nonexistent_input_paths)
)
def parse_json_dict(
self,
json_dict,
sheet,
json_key=None,
parent_name="",
flattened_dict=None,
parent_id_fields=None,
top_level_of_sub_sheet=False,
):
"""
Parse a json dictionary.
json_dict - the json dictionary
sheet - a sheet.Sheet object representing the resulting spreadsheet
json_key - the key that maps to this JSON dict, either directly to the dict, or to a dict that this list contains. Is None if this dict is contained in root_json_list directly.
"""
# Possibly main_sheet should be main_sheet_columns, but this is
# currently named for consistency with schema.py
if self.use_titles:
sheet_key = sheet_key_title
else:
sheet_key = sheet_key_field
parent_id_fields = copy.copy(parent_id_fields) or OrderedDict()
if flattened_dict is None:
flattened_dict = {}
top = True
else:
top = False
if parent_name == "" and self.filter_field and self.filter_value:
if self.filter_field not in json_dict:
return
if json_dict[self.filter_field] != self.filter_value:
return
if top_level_of_sub_sheet:
# Add the IDs for the top level of object in an array
for k, v in parent_id_fields.items():
if self.xml:
flattened_dict[sheet_key(sheet, k)] = v["#text"]
else:
flattened_dict[sheet_key(sheet, k)] = v
if self.root_id and self.root_id in json_dict:
parent_id_fields[sheet_key(sheet, self.root_id)] = json_dict[self.root_id]
if self.id_name in json_dict:
parent_id_fields[sheet_key(sheet, parent_name + self.id_name)] = json_dict[
self.id_name
]
for key, value in json_dict.items():
# Keep a unique list of all the JSON paths in the data that have been seen.
parent_path = parent_name.replace("/0", "")
full_path = parent_path + key
self.seen_paths.add(full_path)
if self.preserve_fields:
siblings = False
for field in self.preserve_fields:
if parent_path in field:
siblings = True
if siblings and full_path not in self.preserve_fields:
continue
if type(value) in BASIC_TYPES:
if self.xml and key == "#text":
# Handle the text output from xmltodict
key = ""
parent_name = parent_name.strip("/")
flattened_dict[sheet_key(sheet, parent_name + key)] = value
elif hasattr(value, "items"):
self.parse_json_dict(
value,
sheet=sheet,
json_key=key,
parent_name=parent_name + key + "/",
flattened_dict=flattened_dict,
parent_id_fields=parent_id_fields,
)
elif hasattr(value, "__iter__"):
if all(type(x) in BASIC_TYPES for x in value):
# Check for an array of BASIC types
# TODO Make this check the schema
# TODO Error if the any of the values contain the separator
# TODO Support doubly nested arrays
flattened_dict[sheet_key(sheet, parent_name + key)] = ";".join(
map(str, value)
)
else:
if (
self.rollup and parent_name == ""
): # Rollup only currently possible to main sheet
if self.use_titles and not self.schema_parser:
warn(
_(
"Warning: No schema was provided so column headings are JSON keys, not titles."
)
)
if len(value) == 1:
for k, v in value[0].items():
if (
self.preserve_fields
and parent_name + key + "/" + k
not in self.preserve_fields
):
continue
if type(v) not in BASIC_TYPES:
raise ValueError(
_("Rolled up values must be basic types")
)
else:
if self.schema_parser:
# We want titles and there's a schema and rollUp is in it
if (
self.use_titles
and parent_name + key + "/0/" + k
in self.schema_parser.main_sheet.titles
):
flattened_dict[
sheet_key_title(
sheet, parent_name + key + "/0/" + k
)
] = v
# We want titles and there's a schema but rollUp isn't in it
# so the titles for rollup properties aren't in the main sheet
# so we need to try to get the titles from a subsheet
elif (
self.use_titles
and parent_name + key in self.rollup
and self.schema_parser.sub_sheet_titles.get(
(parent_name, key,)
)
in self.schema_parser.sub_sheets
):
relevant_subsheet = self.schema_parser.sub_sheets.get(
self.schema_parser.sub_sheet_titles.get(
(parent_name, key,)
)
)
if relevant_subsheet is not None:
rollup_field_title = sheet_key_title(
relevant_subsheet,
parent_name + key + "/0/" + k,
)
flattened_dict[
sheet_key(sheet, rollup_field_title)
] = v
# We don't want titles even though there's a schema
elif not self.use_titles and (
parent_name + key + "/0/" + k
in self.schema_parser.main_sheet
or parent_name + key in self.rollup
):
flattened_dict[
sheet_key(
sheet, parent_name + key + "/0/" + k
)
] = v
# No schema, so no titles
elif parent_name + key in self.rollup:
flattened_dict[
sheet_key(
sheet, parent_name + key + "/0/" + k
)
] = v
elif len(value) > 1:
for k in set(sum((list(x.keys()) for x in value), [])):
if (
self.preserve_fields
and parent_name + key + "/" + k
not in self.preserve_fields
):
continue
if (
self.schema_parser
and parent_name + key + "/0/" + k
in self.schema_parser.main_sheet
):
warn(
_(
'More than one value supplied for "{}". Could not provide rollup, so adding a warning to the relevant cell(s) in the spreadsheet.'
).format(parent_name + key)
)
flattened_dict[
sheet_key(sheet, parent_name + key + "/0/" + k)
] = _(
"WARNING: More than one value supplied, consult the relevant sub-sheet for the data."
)
elif parent_name + key in self.rollup:
warn(
_(
'More than one value supplied for "{}". Could not provide rollup, so adding a warning to the relevant cell(s) in the spreadsheet.'
).format(parent_name + key)
)
flattened_dict[
sheet_key(sheet, parent_name + key + "/0/" + k)
] = _(
"WARNING: More than one value supplied, consult the relevant sub-sheet for the data."
)
if (
self.use_titles
and self.schema_parser
and (parent_name, key,) in self.schema_parser.sub_sheet_titles
):
sub_sheet_name = self.schema_parser.sub_sheet_titles[
(parent_name, key,)
]
else:
sub_sheet_name = make_sub_sheet_name(
parent_name, key, truncation_length=self.truncation_length
)
if sub_sheet_name not in self.sub_sheets:
self.sub_sheets[sub_sheet_name] = PersistentSheet(
name=sub_sheet_name, connection=self.connection
)
for json_dict in value:
if json_dict is None:
continue
self.parse_json_dict(
json_dict,
sheet=self.sub_sheets[sub_sheet_name],
json_key=key,
parent_id_fields=parent_id_fields,
parent_name=parent_name + key + "/0/",
top_level_of_sub_sheet=True,
)
else:
raise ValueError(_("Unsupported type {}").format(type(value)))
if top:
sheet.append_line(flattened_dict)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.persist:
self.connection.close()
self.db.close()
os.remove(self.zodb_db_location)
os.remove(self.zodb_db_location + ".lock")
os.remove(self.zodb_db_location + ".index")
os.remove(self.zodb_db_location + ".tmp")
| StarcoderdataPython |
3323350 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_pynab
----------------------------------
Tests for `pynab` module.
"""
import unittest
from pprint import pformat
from pynab.server import Server
from pynab.db import db_session
import pynab.parts
from pynab import log
import regex
from pynab.categories import extract_features
class TestPynab(unittest.TestCase):
def setUp(self):
self.server = None
def test_connect(self):
self.server = Server()
self.server.connect()
self.assertTrue(self.server)
def test_capabilities(self):
self.test_connect()
print(self.server.connection.getcapabilities())
def test_fetch_headers(self):
self.test_connect()
groups = ['alt.binaries.teevee']
for group in groups:
(_, _, first, last, _) = self.server.connection.group(group)
for x in range(0, 40000, 20000):
y = x + 20000 - 1
parts = self.server.scan(group, last - y, last - x)
pynab.parts.save_all(parts)
def test_group_update(self):
import pynab.groups
pynab.groups.update('alt.binaries.teevee')
def test_request_process(self):
import pynab.requests
pynab.requests.process()
def test_update_pres(self):
from scripts.nzedb_pre_import import largeNzedbPre, nzedbPre
largeNzedbPre()
nzedbPre()
def test_process_binaries(self):
import pynab.binaries
pynab.binaries.process()
def test_process_releases(self):
import pynab.releases
pynab.releases.process()
def test_update_blacklist(self):
import pynab.util
pynab.util.update_blacklist()
def test_update_regex(self):
import pynab.util
pynab.util.update_regex()
def test_process_requests(self):
import pynab.requests
pynab.requests.process()
def test_quick_postproc(self):
import scripts.quick_postprocess
scripts.quick_postprocess.local_postprocess()
def test_process_ids(self):
import pynab.ids
pynab.ids.process('movie')
def test_remove_metablacks(self):
from pynab.db import MetaBlack
with db_session() as db:
db.query(MetaBlack).delete()
db.commit()
def test_search_releases(self):
from sqlalchemy_searchable import search
from pynab.db import Release
with db_session() as db:
q = db.query(Release)
q = search(q, 'engaged e06')
print(q.first().search_name)
def test_nzb_parse(self):
import pynab.nzbs
from pynab.db import NZB
with db_session() as db:
nzb = db.query(NZB).filter(NZB.id==1).one()
import pprint
pprint.pprint(pynab.nzbs.get_nzb_details(nzb))
def test_scrape_nzbsu(self):
import requests
import time
from bs4 import BeautifulSoup
url = 'https://api.nzb.su/api?apikey=4d901407e99ae6c942416585c8a44673'
ua = {'User-agent': 'CouchPotato 3.0.1'}
results = []
for category in [5020,5030,5040,5050,5060,5070,5080,2010,2020,2030,2040,2050,2060,2070,4010,4020,4030,1010,1020,1030,1050,1080,1090,1100,4050,3010,3020,3030,3040,3050,7010,7020,7030,6010,6020,6030,6040,6050,6060,6070,8010]:
data = requests.get(url + '&t=search&cat={}&o=json'.format(category), headers=ua).json()
if 'item' in data['channel']:
results.extend(data['channel']['item'])
with open('dog_releases.csv', 'w', encoding='utf-8') as f:
f.write('"r","name","name","category_id","name","name"\r\n')
# turn results into useful data
for i, result in enumerate(results):
try:
resp = requests.get(url + '&t=details&id={}'.format(result['attr'][3]['@attributes']['value']), headers=ua)
soup = BeautifulSoup(resp.text)
group = soup.find(attrs={'name':'group'})['value']
f.write('"{}","{}","{}","{}","{}","{}"\r\n'.format(i, result['title'], group, result['attr'][1]['@attributes']['value'], *result['category'].split(' > ')))
time.sleep(5)
except:
continue
def test_categorise(self):
import nltk
import regex
import csv
import random
import pprint
#def determine_category(name, group_name=''):
def load_data(filename):
with open(filename, encoding='utf-8') as f:
f.readline()
csvfile = csv.reader(f, delimiter=',', quotechar='"')
data = []
for line in csvfile:
features = extract_features(line[1])
features['group'] = line[2]
features['name'] = line[1]
data.append((features, line[3]))
random.shuffle(data)
return data
train_data = load_data('tagged_releases_train.csv')
test_data = load_data('tagged_releases_test.csv')
nzbsu_data = load_data('tagged_releases_test_nzbsu.csv')
train_set = train_data
test_set = test_data
nzbsu_set = nzbsu_data
classifier = nltk.NaiveBayesClassifier.train(train_set)
from pickle import dump
with open('release_categoriser.pkl', 'wb') as out:
dump(classifier, out, -1)
errors = []
for features, tag in nzbsu_set:
guess = classifier.classify(features)
if guess[:2] != tag[:2]:
errors.append((tag, guess, features))
for tag, guess, features in errors:
print('correct={} guess={} name={}'.format(tag, guess, features['name'].encode('utf-8')))
print(classifier.show_most_informative_features())
print('test: {}'.format(nltk.classify.accuracy(classifier, test_set)))
print('test: {}'.format(nltk.classify.accuracy(classifier, nzbsu_set)))
def test_load_and_categorise(self):
from pynab.db import db_session, Release, Group, windowed_query
from pickle import load
with open('release_categoriser.pkl', 'rb') as cat_file:
categoriser = load(cat_file)
with db_session() as db:
errors = []
i = 0
query = db.query(Release).join(Group)
count = query.count()
for result in windowed_query(query, Release.id, 500):
features = extract_features(result.name)
features['group'] = result.group.name
features['name'] = result.name
guess = categoriser.classify(features)
if guess[:2] != str(result.category_id)[:2]:
errors.append((result.category_id, guess, features))
i += 1
if i % 500 == 0:
print('{} - {:.3f}%'.format((i/count)*100, (1 - (len(errors) / i)) * 100))
for tag, guess, features in errors:
print('correct={} guess={} name={}'.format(tag, guess, features['name'].encode('utf-8')))
print('accuracy={}'.format(1 - (len(errors)/i)))
def tearDown(self):
try:
self.server.connection.quit()
except:
pass
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1718023 | <filename>main.py<gh_stars>0
import os
import requests
from pathlib import Path
from requests.exceptions import ConnectionError
test_results = {}
#
# CHECK ENVIRONMENT VARIALBES
#
print('--> Reading the environment variables')
print(f'ENVIRONMENT: {os.environ}')
test_results['environment'] = os.environ
#
# CHECK INPUT_FILE
#
try:
with open(os.environ['INPUT_FILE'], 'rb') as f:
print('--> Reading input file')
print(f'INPUT FILE: {f.read()}')
test_results['READ_INPUT_FILE'] = {'Success': True}
except Exception as e:
print('x-> Reading input file failed')
test_results['READ_INPUT_FILE'] = {'Success': False, 'Exception': e}
#
# CHECK OUTPUT FILE
#
try:
with open(os.environ['OUTPUT_FILE'], 'w') as f:
print('--> Writing to output file (contents: test)')
f.write('test')
with open(os.environ['OUTPUT_FILE'], 'r') as f:
print('--> Reading output file back and check')
print(f.read())
test_results['WRITE_READ_OUTPUT_FILE'] = {'Success': True}
except Exception as e:
print('x-> Reading or Writing output file failed')
test_results['WRITE_READ_OUTPUT_FILE'] = {'Success': False, 'Exception': e}
#
# CHECK TOKEN FILE
#
try:
with open(os.environ['TOKEN_FILE'], 'r') as f:
print('--> Reading token file')
print(f'TOKEN: {f.read()}')
test_results['READ_TOKEN_FILE'] = {'Success': True}
except Exception as e:
print('x-> Reading token file failed')
test_results['READ_TOKEN_FILE'] = {'Success': False, 'Exception': e}
#
# CHECK TEMPORARY VOLUME
#
print('--> Test temporary volume')
try:
temp_file = f'{os.environ["TEMPORARY_FOLDER"]}/test.txt'
with open(temp_file, 'w') as f:
print(f'--> Writing to temporary file: {temp_file}')
f.write('test')
test_results['TEMPORARY_VOLUME'] = {'Success': True}
except Exception as e:
print('x-> Writing to temporary folder failed')
test_results['TEMPORARY_VOLUME'] = {'Success': False, 'Exception': e}
print('--> Test that the temporary file is created')
try:
file_exists = Path(temp_file).exists()
print(f'FILE CREATED: {file_exists}')
test_results['TEMPORARY_VOLUME_FILE_EXISTS'] = {'Success': file_exists}
except Exception as e:
print('x-> Test temporary volume failed')
test_results['TEMPORARY_VOLUME_FILE_EXISTS'] = {'Success': False, 'Exception': e}
# --> Check that we can reach the local proxy
print('--> Test that we can reach the local proxy (and thereby the server)')
try:
host = os.environ['HOST']
port = os.environ['PORT']
response = requests.get(f'{host}:{port}/version')
ok = response.status_code == 200
test_results['LOCAL_PROXY_CENTRAL_SERVER'] = {'Success': ok}
except Exception as e:
print('x-> Using the local proxy failed')
test_results['LOCAL_PROXY_CENTRAL_SERVER'] = {'Success': False, 'Exception': e}
# --> check that we cannot reach another address
print('--> Verify that the container has no internet connection')
try:
try:
response = requests.get('https://google.nl')
except ConnectionError as e:
print('--> Connection error catched')
print(e)
test_results['ISOLATION_TEST'] = {'Success': ok}
except Exception as e:
print('x-> Testing an external connection failed...')
test_results['ISOLATION_TEST'] = {'Success': False, 'Exception': e}
print(test_results) | StarcoderdataPython |
1765907 | import numpy as np
import pandas as pd
import tensorflow as tf
import time
import argparse
from matplotlib import pyplot as plt
from random_effect_logistic_regression_utils import generate_data, timestamp
import sys
sys.path.append('../models')
from random_effect_logistic_regression import random_effect_logistic_regression as RELR
from random_effect_logistic_regression import bayesian_random_effect_logistic_regression as BRELR
# Turn GPUs off
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
EPS = 1e-6
def d(f, params):
# Take the derivative of f
# returned value is a function df/d[beta0, beta, alpha]
def df(x,y,level):
with tf.GradientTape(persistent=True) as g:
g.watch(params)
target = f(x,y,level)
est0 = g.gradient(target, params)
est = np.concatenate([e.numpy().reshape([-1]) for e in est0], axis=0)
return est
return df
def get_mlmc_cost(N, max_level, b, w0):
# compute the cost of MLMC estimation
# when the size of x (and that of y) is N
if max_level==0:
levels = np.array([0])
weights = np.array([1.])
else:
weights = 2.**(-(b+1)/2*np.arange(max_level))
weights /= sum(weights)
weights = np.concatenate([[w0], (1-w0)*weights])
levels = np.arange(max_level+1)
cost = N * weights[0] + N * sum( weights[1:] * (2**levels[1:] + 2**(levels[1:]-1)) )
return cost
# Argparse
parser = argparse.ArgumentParser()
parser.add_argument("--output_file", default="../../out/random_effect_logistic_regression/learning_curve_{}.csv".format(timestamp()), type=str,
help="Output file name. \nAn example usage: `python random_effect_logistic_regression_learning_log.py --output_file example.csv`")
args = parser.parse_args()
input_files = args.input_files.split()
output_file = args.output_file
x_axis = args.x_axis
time_discretization = args.time_discretization
### Initializations
N_total = 100000
B,T,D = (1000, 2, 3) if tf.test.is_gpu_available() else (100, 2, 3)#(100, 2, 3)
cost_nmc = B * 2**9
cost_mlmc = get_mlmc_cost(B, max_level=9, b=1.8, w0=0.9)
cost_sumo = B * 9
B_mlmc = np.math.ceil(B * (cost_nmc / cost_mlmc))
B_sumo = np.math.ceil(B * (cost_nmc / cost_sumo))
alpha = np.float64(1.)
beta0 = np.float64(0.)
beta = np.array([0.25, 0.50, 0.75]) #np.random.randn(D) / np.sqrt(D)
model = RELR(D=D)
# True model parameters
X,Y,_ = generate_data(N_total, D, T, beta0, beta, alpha)
objectives = {
"iwelbo1": lambda x, y: model.IWELBO(x, y, n_MC=1),
"iwelbo8": lambda x, y: model.IWELBO(x, y, n_MC=8),
"iwelbo64": lambda x, y: model.IWELBO(x, y, n_MC=64),
"iwelbo512": lambda x, y: model.IWELBO(x, y, n_MC=512),
"iwelbo512_mlmc": lambda x, y: model.IWELBO_MLMC(x, y, max_level=9, b=1.8, w0=0.9, randomize=False),
"iwelbo512_randmlmc": lambda x, y: model.IWELBO_MLMC(x, y, max_level=9, b=1.8, w0=0.9, randomize=True),
"iwelbo512_sumo": lambda x, y: model.IWELBO_SUMO(x, y, K_max=512),
"jvi8": lambda x, y: model.JVI_IWELBO(x, y, n_MC=8),
"jvi64": lambda x, y: model.JVI_IWELBO(x, y, n_MC=64),
"jvi512": lambda x, y: model.JVI_IWELBO(x, y, n_MC=512),
}
# for parallelization
#obj_id = int(input())
#objectives = {k:objectives[k] for i, k in enumerate(objectives.keys()) if i == obj_id}
#print(objectives)
n_train_steps = {
"iwelbo1": 2000,
"iwelbo8": 2000,
"iwelbo64": 2000,
"iwelbo512": 17000,
"iwelbo512_mlmc": 3000,
"iwelbo512_randmlmc": 3000,
"iwelbo512_sumo": 2000,
"jvi8": 2000,
"jvi64": 2000,
"jvi512": 17000,
}
data = []
n_repeat = 100 # TODO: change to 20
params_repeated = {name:[] for name in objectives.keys()}
for name, obj in objectives.items():
for i in range(n_repeat):
print("training {}.... #iter:{} ".format(name,i))
# initialize parameters
model.beta0 = tf.Variable(0.0, dtype=tf.float64)
model.beta = tf.Variable(np.zeros([model.D]), dtype=tf.float64)
model.alpha = tf.Variable(0.0, dtype=tf.float64)
# pointers to the parameters of trained model
params_list = [
model.beta0,
model.beta,
model.alpha
]
optimizer = tf.keras.optimizers.Adam(0.005)
# Training
start = time.time()
for t in range(n_train_steps[name] + 1):
# Balance the cost of mlmc and nmc when level=9 (n_MC=512)
# by changing the batch size adoptively
if 'mlmc' in name:
batch = np.random.choice(np.arange(N_total), B_mlmc)
elif 'sumo' in name:
batch = np.random.choice(np.arange(N_total), B_sumo)
else:
batch = np.random.choice(np.arange(N_total), B)
x = X[batch]
y = Y[batch]
# Train step
with tf.GradientTape() as g:
g.watch(params_list)
loss = - obj(x, y)
dparams = g.gradient(loss, params_list)
optimizer.apply_gradients(zip(dparams, params_list))
# Take a log
if t%5==0:
data.append({
"objective": name,
"#iter": i,
"step": t,
"elapsed time": time.time() - start,
"alpha": model.alpha.numpy(),
"beta0": model.beta0.numpy(),
"beta1": model.beta.numpy()[0],
"beta2": model.beta.numpy()[1],
"beta3": model.beta.numpy()[2],
"squared error": sum(
np.concatenate([
[alpha - model.alpha.numpy()],
[beta0 - model.beta0.numpy()],
beta - model.beta.numpy()
]) ** 2
)
})
if t%200==0 and i == 0:
print("#iter: {},\tloss: {}".format(t, loss.numpy()))
print()
print("\n======== Results ========\n")
data = pd.DataFrame(
data=data,
columns = [
"objective", "#iter", "elapsed time", "step",
"alpha", "beta0", "beta1", "beta2", "beta3", "squared error"
]
)
print(data)
data.to_csv(output_file)
print("\nSaved the results to:\n{}".format(output_file))
| StarcoderdataPython |
3357980 | from datetime import datetime
from pathlib import Path
from pony.orm import Database, Optional, PrimaryKey, Required, Set, db_session
from PhotoPhixer.common.config import SysConfig
def db_connection(config: SysConfig) -> Database:
"""
This routine must be used to create and manage data in database.
:param config: A config instance where important DB properties must be set
:return: A database connection to handle data
"""
config_dict = config.list_config()
sqlite_path = Path(config_dict['GLOBAL']['sqlite_path'])
db = Database(
config_dict['GLOBAL']['db_engine'],
str(sqlite_path),
create_db=True)
class File(db.Entity):
id = PrimaryKey(str)
name = Optional(str)
file_type = Optional(str)
device = Optional(str)
has_metadata = Optional(bool)
date_processing = Optional(datetime)
date_file_creation = Optional(datetime)
date_last_change = Optional(datetime)
dropbox_hash = Optional(str)
directory = Optional('Directory')
class Directory(db.Entity):
id = PrimaryKey(str)
path = Required(str)
date_creation = Required(datetime)
date_last_update = Optional(datetime)
files = Set(File)
db.generate_mapping(create_tables=True)
add_null_objects(db)
return db
@db_session
def add_null_objects(db: Database) -> None:
"""
This routine creates null objects into Database so we can return these
objects instead of raising an error or different objects types.
:param db: Database connection
:return: None
"""
if not db.File.exists(id='None'):
null_file = db.File(
id='None',
name='None',
file_type='None'
)
if not db.Directory.exists(id='None'):
db.Directory(
id='None',
path='None',
date_creation=datetime.now(),
date_last_update=datetime.now(),
files=null_file
)
@db_session
def add_device_directories(config: SysConfig, db: Database) -> None:
"""
:param config:
:param db: Database connection
:return: None
"""
dir_list = list()
config_dict = config.list_config()
for device_dir in config_dict['DROPBOX']:
| StarcoderdataPython |
47149 | <filename>lists.py
# Set number of participants
num_dyads = 4
num_participants = num_dyads*2
# Create lists for iterations
participants = list(range(num_participants))
dyads = list(range(num_dyads)) | StarcoderdataPython |
3266848 | <reponame>omaskery/classdict
from .toplevel import to_dict, from_dict, can_consume_dict, can_become_dict
from .errors import *
class FieldType(object):
def __init__(self, expected_type=None, required=False):
self.required = required
self.type = expected_type
self._name = None
def set_name(self, name):
self._name = name
def validate(self, value):
if value is None and self.required:
raise RequiredFieldError(
"field {name} absent but is required".format(
name=self._name
)
)
if self.type is not None and value is not None and not isinstance(value, self.type):
raise ValidationError(
"field {name} got value of unexpected type {got}, expected: {type}".format(
got=type(value),
name=self._name,
type=self.type
)
)
def to_dict(self, value):
return to_dict(value)
def from_dict(self, value):
return from_dict(self.type, value)
class EmbeddedFieldType(FieldType):
def __init__(self, objdict_class, **kwargs):
if 'expected_type' not in kwargs:
kwargs['expected_type'] = objdict_class
super().__init__(**kwargs)
self._class = objdict_class
class ListFieldType(FieldType):
def validate(self, value):
if not isinstance(value, list):
raise ValidationError(
"field {name} got value of unexpected type {got}, expected a list of {type}".format(
got=type(value),
name=self._name,
type=self.type
)
)
list(map(super().validate, value))
def to_dict(self, value):
return list(map(to_dict, value))
def from_dict(self, value):
return list(map(lambda x: from_dict(self.type, x), value))
class TupleFieldType(FieldType):
def __init__(self, *args, **kwargs):
if 'expected_type' in kwargs:
raise ObjDictError("expected type is not compatible with TupleFieldType, specify element types as *args")
kwargs['expected_type'] = args
super().__init__(**kwargs)
def validate(self, value):
if not isinstance(value, (tuple, list)):
raise ValidationError(
"field {name} got value of unexpected type {got}, expected a tuple of types ({type})".format(
name=self._name,
got=type(value),
type=", ".join(map(str, self.type))
)
)
if len(value) != len(self.type):
raise ValidationError(
"field {name} expected a tuple of length {len}, got tuple of length {got}".format(
name=self._name,
len=len(self.type),
got=len(value)
)
)
for index, (expected, got) in enumerate(zip(self.type, value)):
if not isinstance(got, expected):
raise ValidationError("field {name} expected element {nth} to be {type}, got type {got}".format(
name=self._name,
nth=index,
type=expected,
got=type(got)
))
def to_dict(self, value):
return tuple(map(to_dict, value))
def from_dict(self, value):
return tuple([
cls.from_dict(element)
for element, cls in zip(value, self.type)
])
| StarcoderdataPython |
111132 | #!/usr/bin/env python
from M2Crypto import X509
import binascii
import hashlib
import ssl
import sys
def main(argv):
if len(argv) != 1 and len(argv) != 2:
print "Usage: pin.py [<certificate_path> | <host> <port>]"
return
if (len(argv) == 1):
cert = X509.load_cert(argv[0])
else:
peerCert = ssl.get_server_certificate((argv[0], int(argv[1])))
cert = X509.load_cert_string(peerCert)
pubkey = cert.get_pubkey().as_der()
digest = hashlib.sha256()
digest.update(pubkey)
sha256 = digest.digest()
print "Calculating PIN for certificate: " + cert.get_subject().as_text()
print "\n"
print "Public Key Pins:"
print "----------------"
print "SHA256:" + binascii.hexlify(sha256)
print "PLAIN:" + binascii.hexlify(pubkey)
print "\n"
print "Certificate Pins:"
print "-----------------"
print "CERTSHA256:" + cert.get_fingerprint('sha256')
print "CERTPLAIN:" + binascii.hexlify(cert.as_der())
if __name__ == '__main__':
main(sys.argv[1:])
| StarcoderdataPython |
4090 | <gh_stars>1-10
"""viewer application which allows to interactively view spatio-temporal gap filling results"""
import os
import argparse
from datetime import datetime, timedelta
from tkinter import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL
import numpy as np
from PIL import Image, ImageTk
import probgf.media as media
class MainWindow():
def next(self, event=None):
self.curr_img = (self.curr_img + 1) % len(self.imgs_orig)
self.refresh()
def prev(self, event=None):
self.curr_img = (self.curr_img - 1) % len(self.imgs_orig)
self.refresh()
def click_wheel(self, event):
self.start_drag = (event.x + self.shift_x, event.y + self.shift_y)
def click_left(self, event):
if not self.over_button:
self.prev()
def click_right(self, event):
if not self.over_button:
self.next()
def refresh(self):
zoom = float(self.zoom) / 100
self.start_x = int(self.img_w_f / 2 - self.img_w_f / zoom / 2) + self.shift_x
self.end_x = int(self.start_x + self.img_w_f / zoom)
self.start_y = int(self.img_w_f / 2 - self.img_w_f / zoom / 2) + self.shift_y
self.end_y = int(self.start_y + self.img_w_f / zoom)
if not self.mask_toggle:
self.b_masks.config(relief=RAISED)
img1 = self.imgs_orig[self.curr_img]
img2 = self.imgs_pred[self.curr_img]
else:
self.b_masks.config(relief=SUNKEN)
img1 = self.imgs_orig_m[self.curr_img]
img2 = self.imgs_pred_m[self.curr_img]
img1 = img1.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)
img2 = img2.crop((self.start_x, self.start_y, self.end_x, self.end_y)).resize((self.img_w, self.img_w), Image.ANTIALIAS)
self.imgs_orig_v[self.curr_img] = ImageTk.PhotoImage(img1)
self.imgs_pred_v[self.curr_img] = ImageTk.PhotoImage(img2)
self.canvas.itemconfig(self.i_left, image = self.imgs_orig_v[self.curr_img])
self.canvas.itemconfig(self.i_right, image = self.imgs_pred_v[self.curr_img])
self.canvas.itemconfig(self.i_list, image = self.imagelists[self.curr_img])
self.canvas.itemconfig(self.day_info, text='{} - cloud cover {:06.2f}% - estimated MAE {}'.format(self.dates[self.curr_img],
self.cc[self.curr_img] * 100,
self.errors[self.curr_img]))
if self.zoom == 100:
self.canvas.itemconfig(self.zoom, text='')
self.b_reset.config(state=DISABLED)
else:
self.canvas.itemconfig(self.zoom, text='ZOOM: {:3d}%'.format(self.zoom))
self.b_reset.config(state=NORMAL)
def zoomer(self, event):
if event.num == 4 or event.delta == 120 or event.keysym == 'plus':
self.zoom += 20
elif event.delta == 240:
self.zoom += 40
elif event.delta == 360:
self.zoom += 60
else:
if self.zoom - 20 >= 100:
self.zoom -= 20
if self.zoom == 100:
self.reset_transform()
self.refresh()
def drag_roi(self, event):
self.shift_x = min(max(self.start_drag[0] - event.x, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),
int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))
self.shift_y = min(max(self.start_drag[1] - event.y, 0 - int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2)),
int(self.img_w_f / 2 - self.img_w_f / self.zoom / 2))
self.refresh()
def toggle_mask(self, event=None):
self.mask_toggle = not self.mask_toggle
self.refresh()
def reset_transform(self, event=None):
self.mask_toggle = False
self.zoom = 100
self.shift_x = 0
self.shift_y = 0
self.refresh()
def button_enter(self, event):
self.over_button = True
def button_leave(self, enter):
self.over_button = False
def __init__(self, root, w, h, imgs_p, imgs_o, imgs_m, dates, errors, logos):
self.dates = dates
self.errors = errors
# setup images
self.img_w = int(h * 0.68) # width of each displayed image
self.imgs_orig_m = [] # masked full images
self.imgs_pred_m = []
self.imgs_orig = [] # unmasked full images
self.imgs_pred = []
self.cc = []
for index, img in enumerate(imgs_p):
self.imgs_orig.append(imgs_o[index].resize((self.img_w, self.img_w), resample=0))
self.imgs_pred.append(img.resize((self.img_w, self.img_w), resample=0))
self.imgs_orig_m.append(Image.blend(self.imgs_orig[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))
self.imgs_pred_m.append(Image.blend(self.imgs_pred[-1], imgs_m[index].convert(mode='RGB').resize((self.img_w, self.img_w), resample=0), alpha=.5))
self.cc.append(1 - np.count_nonzero(np.array(imgs_m[index])) / np.array(imgs_m[index]).size)
self.curr_img = 0
# text labels and logos
h_logos = int(h / 17)
b_logos = int(w / 100)
self.canvas = Canvas(root, width=w, height=h)
self.canvas.pack()
self.canvas.configure(background='white')
self.logo1 = ImageTk.PhotoImage(logos[0].resize((int(h_logos / logos[0].size[1] * logos[0].size[0]), h_logos), Image.ANTIALIAS))
self.logo2 = ImageTk.PhotoImage(logos[1].resize((int(h_logos / logos[1].size[1] * logos[1].size[0]), h_logos), Image.ANTIALIAS))
self.logo3 = ImageTk.PhotoImage(logos[2].resize((int(h_logos / logos[2].size[1] * logos[2].size[0]), h_logos), Image.ANTIALIAS))
self.canvas.create_image(int(self.logo1.width() / 2 + b_logos), int(self.logo1.height() / 2 + b_logos), image=self.logo1)
self.canvas.create_image(int(w - self.logo2.width() / 2 - b_logos), int(self.logo2.height() / 2 + b_logos), image=self.logo2)
self.canvas.create_image(int(w - self.logo3.width() / 2 - b_logos), int(h - (self.logo3.height() / 2 + b_logos)), image=self.logo3)
self.canvas.create_text(w / 2, h * 0.06, font=("Courier", int(h / 25)), text='Gap Filling Viewer')
self.canvas.create_text(w / 3.9, h * 0.19, font=("Courier", int(h / 35)), text='Observed')
self.canvas.create_text(w - w / 3.9, h * 0.19, font=("Courier", int(h / 35)), text='Predicted')
self.day_info = self.canvas.create_text(w / 2, h * 0.13, font=("Courier", int(h / 30)), text='')
self.zoom = self.canvas.create_text(w * 0.12, h * 0.94, font=("Courier", int(h / 50)), text='')
# image timeline
imagelist_h = int(self.img_w / len(self.imgs_pred)) + 1
imagelist_a = np.zeros((len(self.imgs_pred), imagelist_h, imagelist_h, 3), dtype='uint8')
for index in range(len(self.imgs_pred)):
imagelist_a[index, :, :, :] = np.array(self.imgs_pred[index].resize((imagelist_h, imagelist_h), Image.ANTIALIAS))
self.imagelists = []
for index in range(len(self.imgs_pred)):
c_list = np.array(imagelist_a)
c_list[index, :int(w / 600), :, :] = 255
c_list[index, (imagelist_h - int(w / 600)):, :, :] = 255
c_list[index, :, :int(w / 600), :] = 255
c_list[index, :, (imagelist_h - int(w / 600)):, :] = 255
self.imagelists.append(ImageTk.PhotoImage(Image.fromarray(c_list.reshape(len(self.imgs_pred) * imagelist_h, imagelist_h, 3))))
self.i_list = self.canvas.create_image(w * 0.5, h * 0.56, image=self.imagelists[self.curr_img])
# images and buttons
self.img_w_f = self.imgs_orig[0].size[0] # full image width
self.imgs_orig_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_orig] # images for visualization
self.imgs_pred_v = [ImageTk.PhotoImage(img.resize((self.img_w, self.img_w), Image.ANTIALIAS)) for img in self.imgs_pred]
self.i_left = self.canvas.create_image(w / 3.9, h * 0.56, image=self.imgs_orig_v[self.curr_img])
self.i_right = self.canvas.create_image(w - w / 3.9, h * 0.56, image=self.imgs_pred_v[self.curr_img])
self.b_masks = Button(root, font=("Courier", int(h / 50)), text = "Show masks", command=self.toggle_mask)
self.b_reset = Button(root, font=("Courier", int(h / 50)), text = "Reset view", command=self.reset_transform, state=DISABLED)
self.b_quit = Button(root, font=("Courier", int(h / 50)), text = "Quit", command=self.canvas.master.destroy)
self.reset_transform()
self.canvas.create_window(w * 0.30, h * 0.94, window=self.b_masks)
self.canvas.create_window(w * 0.50, h * 0.94, window=self.b_reset)
self.canvas.create_window(w * 0.70, h * 0.94, window=self.b_quit)
# bind buttons and keys
root.bind("q", lambda e: self.canvas.master.destroy())
root.bind("r", self.reset_transform)
root.bind("m", self.toggle_mask)
root.bind("<Right>", self.next)
root.bind("<Left>", self.prev)
root.bind("<Down>", self.next)
root.bind("<Up>", self.prev)
root.bind("<Button-3>", self.click_right)
root.bind("<Button-1>", self.click_left)
root.bind("<Button-2>", self.click_wheel)
root.bind("<Button-4>", self.zoomer)
root.bind("<Button-5>", self.zoomer)
root.bind("<MouseWheel>", self.zoomer)
root.bind("<B2-Motion>", self.drag_roi)
root.bind("+", self.zoomer)
root.bind("-", self.zoomer)
self.over_button = False
self.b_masks.bind("<Enter>", self.button_enter)
self.b_masks.bind("<Leave>", self.button_leave)
self.b_reset.bind("<Enter>", self.button_enter)
self.b_reset.bind("<Leave>", self.button_leave)
self.b_quit.bind("<Enter>", self.button_enter)
self.b_quit.bind("<Leave>", self.button_leave)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-l', '--left', default='imgs/original/',
help='directory with images which are shown on the left')
parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/',
help='directory with images which are shown on the right')
parser.add_argument('-m', '--masks', default='imgs/mask/',
help='directory with mask images')
parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv',
help='report containing date and error information for the right hand images')
parser.add_argument('-y', '--year', type=int, default=2018,
help='year of data acquisition')
parser.add_argument('-W', '--width', type=int, default=1280,
help='window width')
parser.add_argument('-H', '--height', type=int, default=720,
help='window height')
args = parser.parse_args()
imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])]
imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])]
imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])]
report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1]
dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for day in range(report.shape[0])]
errors = ['{:4.1f}'.format(error) if error != 0.0 else 'n.a. ' for error in report[:, 5]]
logos = [media.logo1, media.logo2, media.logo3]
if len(imgs_o) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.left, args.report))
if len(imgs_p) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.right, args.report))
if len(imgs_m) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.masks, args.report))
root = Tk()
root.title('Gap Filling Viewer')
root.geometry("%dx%d+0+0" % (args.width, args.height))
MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors, logos)
root.focus_set()
root.mainloop()
| StarcoderdataPython |
3203779 | #!/usr/bin/python
from functools import wraps
def log(fun):
@wraps(fun)
def do_log(*args, **kwargs):
print(f'{fun.__name__} was called')
return fun(*args, **kwargs)
return do_log
@log
def power(x):
return x * x
res = power(4)
print(res)
| StarcoderdataPython |
1772652 | <filename>Abbot/script.py
def inv_me(num, den):
ans = []
ans.append(num // den)
num = num % den
reducer = 0
while True:
reducer += 1
num, den = den * reducer, num
ans.append(num // den)
num = num % den
if num == 0:
break
return bytes(ans)
def inv_you(num, den):
ans = []
ans.append(num // den)
num = num % den
reducer = 1
while True:
reducer *= -1
num, den = den * reducer, num
ans.append(abs(num // den))
num = num % den
if num == 0:
break
return bytes(ans)
def inv_us(num, den):
ans = []
ans.append(num // den)
num = num % den
while True:
num, den = den, num
ans.append(abs(num // den))
num = num % den
if num == 0:
break
return bytes(ans)
enc = [
(inv_us, 4874974328610108385835995981839358584964018454799387862, 72744608672130404216404640268150609115102538654479393),
(inv_you, 39640220997840521464725453281273913920171987264976009809, 366968282179507143583456804992018400453304099650742276),
(inv_me, 145338791483840102508854650881795321139259790204977, 1529712573230983998328149700664285268430918011078),
(inv_me, 84704403065477663839636886654846156888046890191627, 717773708720775877427974283328022404459326394028),
(inv_you, 287605888305597385307725138275886061497915866633976011, 8712550395581704680675139804565590824398265004367939)
]
flag = b''
for func, num, den in enc:
flag += func(num, den)
print(flag)
| StarcoderdataPython |
3292376 | class UnknownError(Exception):
pass
class ImageCheckError(Exception):
pass
class ImageAuthenticationError(ImageCheckError):
pass
class ImageNameError(ImageCheckError):
pass
| StarcoderdataPython |
1782690 | from .base import *
import django_heroku
import dj_database_url
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', cast=bool)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
ALLOWED_HOSTS = ['https://learnwithanarul.herokuapp.com/','*']
# database management
DATABASES = {'default': dj_database_url.config()}
# Configure Django App for Heroku.
django_heroku.settings(locals())
# whitenoise collectstatic
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_BACKEND = config('EMAIL_BACKEND')
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL')
# cloudinary config for heroku
# cloudinary.config(
# cloud_name = 'mohammadanarul',
# api_key = '867477367854119',
# api_secret = '<KEY>'
# ) | StarcoderdataPython |
1676499 | <reponame>fqrouter/fquni
import signal
import socket
import logging
import os
import time
import argparse
import dpkt.ip
LOGGER = logging.getLogger(__name__)
udp_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
udp_socket.setblocking(False)
SO_MARK = 36
udp_socket.setsockopt(socket.SOL_SOCKET, SO_MARK, 0xcafe)
udp_socket.setsockopt(socket.SOL_IP, socket.IP_TTL, 250)
SERVER_IP = None
SERVER_PORT = None
def main():
global SERVER_IP, SERVER_PORT
from netfilterqueue import NetfilterQueue
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument('--log-file')
argument_parser.add_argument('--log-level', choices=['INFO', 'DEBUG'], default='INFO')
argument_parser.add_argument('--queue-number', default=0, type=int)
argument_parser.add_argument('server', help='x.x.x.x:19842')
args = argument_parser.parse_args()
log_level = getattr(logging, args.log_level)
logging.getLogger().setLevel(log_level)
logging.getLogger().handlers = []
if args.log_file:
handler = logging.handlers.RotatingFileHandler(
args.log_file, maxBytes=1024 * 16, backupCount=0)
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
handler.setLevel(log_level)
logging.getLogger().addHandler(handler)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
handler.setLevel(log_level)
logging.getLogger().addHandler(handler)
signal.signal(signal.SIGTERM, lambda signum, fame: os._exit(1))
signal.signal(signal.SIGINT, lambda signum, fame: os._exit(1))
SERVER_IP, SERVER_PORT = args.server.split(':')
SERVER_PORT = int(SERVER_PORT)
nfqueue = NetfilterQueue()
nfqueue.bind(args.queue_number, handle_nfqueue_element)
LOGGER.info('fquni client started')
nfqueue.run()
def handle_nfqueue_element(nfqueue_element):
try:
raw_ip_packet = nfqueue_element.get_payload()
try:
ip_packet = dpkt.ip.IP(raw_ip_packet)
except:
LOGGER.error('not ip packet')
nfqueue_element.accept()
return
l4_packet = getattr(ip_packet, 'tcp', None) or getattr(ip_packet, 'udp', None)
if not l4_packet:
LOGGER.error('%s not tcp or udp' % repr(ip_packet))
nfqueue_element.accept()
return
ip_packet.src_ip = socket.inet_ntoa(ip_packet.src)
ip_packet.dst_ip = socket.inet_ntoa(ip_packet.dst)
if SERVER_IP == ip_packet.dst_ip:
nfqueue_element.accept()
return
if getattr(ip_packet, 'tcp', None) and dpkt.tcp.TH_SYN == ip_packet.tcp.flags:
LOGGER.info('%s:%s =syn=> %s:%s' % (ip_packet.src_ip, ip_packet.tcp.sport, ip_packet.dst_ip, ip_packet.tcp.dport))
elif getattr(ip_packet, 'udp', None) and 53 == ip_packet.udp.dport:
LOGGER.info('%s:%s =dns=> %s:%s' % (ip_packet.src_ip, ip_packet.udp.sport, ip_packet.dst_ip, ip_packet.udp.dport))
udp_socket.sendto(raw_ip_packet, (SERVER_IP, SERVER_PORT))
ip_packet.ttl = 3
l4_packet.sum = 1
ip_packet.sum = 0
nfqueue_element.set_payload(str(ip_packet))
nfqueue_element.accept()
except:
LOGGER.exception('failed to handle nfqueue element')
time.sleep(3)
if '__main__' == __name__:
main() | StarcoderdataPython |
158137 | from defusedcsv import csv
import importlib
import logging
import os
import re
from pathlib import Path
from tempfile import mkstemp
from urllib.parse import urlparse
import pypandoc
from django.apps import apps
from django.conf import settings
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.template.loader import get_template
from django.utils.translation import ugettext_lazy as _
log = logging.getLogger(__name__)
def get_script_alias(request):
return request.path[:-len(request.path_info)]
def get_referer(request, default=None):
return request.META.get('HTTP_REFERER', default)
def get_referer_path_info(request, default=''):
referer = request.META.get('HTTP_REFERER', None)
if not referer:
return default
script_alias = get_script_alias(request)
return urlparse(referer).path[len(script_alias):]
def get_next(request):
next = request.POST.get('next')
current = request.path_info
if next in (current, None):
return get_script_alias(request) + '/'
else:
return get_script_alias(request) + next
def get_uri_prefix(obj):
# needs to stay, is part of a migration
r = settings.DEFAULT_URI_PREFIX
if bool(obj.uri_prefix) is True:
r = obj.uri_prefix.rstrip('/')
return r
def get_pandoc_version():
return int(pypandoc.get_pandoc_version().split('.')[0])
def join_url(base, *args):
url = base
for arg in args:
url = url.rstrip('/') + '/' + arg.lstrip('/')
return url
def get_model_field_meta(model):
meta = {}
for field in model._meta.get_fields():
meta[field.name] = {}
if hasattr(field, 'verbose_name'):
meta[field.name]['verbose_name'] = field.verbose_name
if hasattr(field, 'help_text'):
meta[field.name]['help_text'] = field.help_text
return meta
def get_languages():
languages = []
for i in range(5):
try:
language = settings.LANGUAGES[i][0], settings.LANGUAGES[i][1],\
'lang%i' % (i + 1)
languages.append(language)
except IndexError:
pass
return languages
def get_language_fields(field_name):
return [
field_name + '_' + lang_field for lang_code,
lang_string, lang_field in get_languages()
]
def get_language_warning(obj, field):
for lang_code, lang_string, lang_field in get_languages():
if not getattr(obj, '%s_%s' % (field, lang_field)):
return True
return False
def set_export_reference_document(format, context):
# try to get the view uri from the context
try:
view = context['view']
view_uri = getattr(view, 'uri')
except (AttributeError, KeyError, TypeError):
view_uri = None
refdocs = []
if format == 'odt':
# append view specific custom refdoc
try:
refdocs.append(settings.EXPORT_REFERENCE_ODT_VIEWS[view_uri])
except KeyError:
pass
# append custom refdoc
if settings.EXPORT_REFERENCE_ODT:
refdocs.append(settings.EXPORT_REFERENCE_ODT)
elif format == 'docx':
# append view specific custom refdoc
try:
refdocs.append(settings.EXPORT_REFERENCE_DOCX_VIEWS[view_uri])
except KeyError:
pass
# append custom refdoc
if settings.EXPORT_REFERENCE_DOCX:
refdocs.append(settings.EXPORT_REFERENCE_DOCX)
# append the default reference docs
refdocs.append(
os.path.join(
apps.get_app_config('rdmo').path,
'share', 'reference' + '.' + format
)
)
# return the first file in refdocs that actually exists
for refdoc in refdocs:
if os.path.isfile(refdoc):
return refdoc
def render_to_format(request, export_format, title, template_src, context):
if export_format not in dict(settings.EXPORT_FORMATS):
return HttpResponseBadRequest(_('This format is not supported.'))
# render the template to a html string
template = get_template(template_src)
html = template.render(context)
# remove empty lines
html = os.linesep.join([line for line in html.splitlines() if line.strip()])
if export_format == 'html':
# create the response object
response = HttpResponse(html)
else:
pandoc_version = get_pandoc_version()
pandoc_args = settings.EXPORT_PANDOC_ARGS.get(export_format, [])
content_disposition = 'attachment; filename="%s.%s"' % (title, export_format)
if export_format == 'pdf':
# check pandoc version (the pdf arg changed to version 2)
if pandoc_version == 1:
pandoc_args = [arg.replace(
'--pdf-engine=xelatex', '--latex-engine=xelatex'
) for arg in pandoc_args]
# display pdf in browser
content_disposition = 'filename="%s.%s"' % (title, export_format)
# use reference document for certain file formats
refdoc = set_export_reference_document(export_format, context)
if refdoc is not None and export_format in ['docx', 'odt']:
# check pandoc version (the args changed to version 2)
if pandoc_version == 1:
pandoc_args.append('--reference-{}={}'.format(export_format, refdoc))
else:
pandoc_args.append('--reference-doc={}'.format(refdoc))
# add the possible resource-path
if 'resource_path' in context and pandoc_version > 1:
resource_path = Path(settings.MEDIA_ROOT).joinpath(context['resource_path']).as_posix()
pandoc_args.append('--resource-path={}'.format(resource_path))
# create a temporary file
(tmp_fd, tmp_filename) = mkstemp('.' + export_format)
# convert the file using pandoc
log.info('Export %s document using args %s.', export_format, pandoc_args)
pypandoc.convert_text(html, export_format, format='html', outputfile=tmp_filename, extra_args=pandoc_args)
# read the temporary file
file_handler = os.fdopen(tmp_fd, 'rb')
file_content = file_handler.read()
file_handler.close()
# delete the temporary file
os.remove(tmp_filename)
# create the response object
response = HttpResponse(file_content, content_type='application/%s' % export_format)
response['Content-Disposition'] = content_disposition.encode('utf-8')
return response
def render_to_csv(title, rows, delimiter=','):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % title
writer = csv.writer(response, delimiter=delimiter)
for row in rows:
writer.writerow(
['' if x is None else str(x) for x in row]
)
return response
def return_file_response(file_path, content_type):
file_abspath = Path(settings.MEDIA_ROOT) / file_path
if file_abspath.exists():
with file_abspath.open('rb') as fp:
response = HttpResponse(fp.read(), content_type=content_type)
response['Content-Disposition'] = 'attachment; filename=' + file_abspath.name
return response
else:
raise Http404
def sanitize_url(s):
# is used in the rdmo-app
try:
m = re.search('[a-z0-9-_]', s)
except TypeError:
s = ''
else:
if bool(m) is False:
s = ''
else:
s = re.sub('/+', '/', s)
return s
def import_class(string):
module_name, class_name = string.rsplit('.', 1)
return getattr(importlib.import_module(module_name), class_name)
def copy_model(instance, **kwargs):
# get values from instance which are not id, ForeignKeys orde M2M relations
data = {}
for field in instance._meta.get_fields():
if not (field.name == 'id' or field.is_relation):
data[field.name] = getattr(instance, field.name)
# update with the kwargs provided to this function
data.update(kwargs)
# create and save new instance
instance_copy = instance._meta.model(**data)
instance_copy.save()
return instance_copy
def human2bytes(string):
if not string:
return 0
m = re.match(r'([0-9.]+)\s*([A-Za-z]+)', string)
number, unit = float(m.group(1)), m.group(2).strip().lower()
if unit == 'kb' or unit == 'k':
return number * 1000
elif unit == 'mb' or unit == 'm':
return number * 1000**2
elif unit == 'gb' or unit == 'g':
return number * 1000**3
elif unit == 'tb' or unit == 't':
return number * 1000**4
elif unit == 'pb' or unit == 'p':
return number * 1000**5
elif unit == 'kib':
return number * 1024
elif unit == 'mib':
return number * 1024**2
elif unit == 'gib':
return number * 1024**3
elif unit == 'tib':
return number * 1024**4
elif unit == 'pib':
return number * 1024**5
| StarcoderdataPython |
1775322 | '''AlexNet for CIFAR10. FC layers are removed. Paddings are adjusted.
Without BN, the start learning rate should be 0.01
(c) <NAME>
'''
import torch.nn as nn
import torch.nn.functional as F
from torchsso.utils.accumulator import TensorAccumulator
__all__ = ['alexnet', 'alexnet_mcdropout']
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=5)
self.conv2 = nn.Conv2d(64, 192, kernel_size=5, padding=2)
self.conv3 = nn.Conv2d(192, 384, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(384, 256, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(256, 256, kernel_size=3, padding=1)
self.fc = nn.Linear(256, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class AlexNetMCDropout(AlexNet):
mc_dropout = True
def __init__(self, num_classes=10, dropout_ratio=0.5, val_mc=10):
super(AlexNetMCDropout, self).__init__(num_classes)
self.dropout_ratio = dropout_ratio
self.val_mc = val_mc
def forward(self, x):
dropout_ratio = self.dropout_ratio
x = F.relu(F.dropout(self.conv1(x), p=dropout_ratio))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(F.dropout(self.conv2(x), p=dropout_ratio))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(F.dropout(self.conv3(x), p=dropout_ratio))
x = F.relu(F.dropout(self.conv4(x), p=dropout_ratio))
x = F.relu(F.dropout(self.conv5(x), p=dropout_ratio))
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def prediction(self, x):
acc_prob = TensorAccumulator()
m = self.val_mc
for _ in range(m):
output = self.forward(x)
prob = F.softmax(output, dim=1)
acc_prob.update(prob, scale=1/m)
prob = acc_prob.get()
return prob
def alexnet(**kwargs):
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
"""
model = AlexNet(**kwargs)
return model
def alexnet_mcdropout(**kwargs):
model = AlexNetMCDropout(**kwargs)
return model
| StarcoderdataPython |
134151 | <reponame>jhonatanlteodoro/ecommerce-django
"""
Django settings for djangoecommerce project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Meus apps
'core',
'catalog',
'accounts',
'checkout',
#libs
'widget_tweaks',
'paypal.standard.ipn',
'easy_thumbnails',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#custom middleware
'checkout.middleware.cart_item_middleware',
]
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ROOT_URLCONF = 'djangoecommerce.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
#context do meu app catalog
'catalog.context_processors.categories',
],
},
},
]
WSGI_APPLICATION = 'djangoecommerce.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
#Configurações de usuário - Auth
AUTH_USER_MODEL = 'accounts.User'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'accounts.backends.ModelBackend',
)
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'index'
# Menssagens Custom
from django.contrib.messages import constants as messages_constants
MESSAGE_TAGS = {
messages_constants.DEBUG: 'debug',
messages_constants.INFO: 'info',
messages_constants.SUCCESS: 'success',
messages_constants.WARNING: 'warning',
messages_constants.ERROR: 'danger',
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
#E-mail
#EMAIL_HOST = ''
#EMAIL_HOST_USER = ''
#EMAIL_HOST_PASSWORD = ''
DEFAULT_FROM_EMAIL = 'admindjangoecmmerce@localhost'
#CONFIGURAÇÃO PAGSEGURO
#Validar doc pagseguro
PAGSEGURO_TOKEN = ''
PAGSEGURO_EMAIL = '<EMAIL>'
PAGSEGURO_SANDBOX = True
#CONFIGURAÇÃO PAYPAL
#Validar doc paypal
PAYPAL_TEST = True
PAYPAL_EMAIL = '<EMAIL>'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'staticfiles')
]
MEDIA_URL = '/media/'
MEDIA_ROOT =os.path.join(BASE_DIR, 'media')
# Thumbnails
THUMBNAIL_ALIASES = {
'':{
'product_image': {'size': (285, 160), 'crop': True},
},
}
| StarcoderdataPython |
1774386 | class Solution:
def findSubstring(self, s: str, words: List[str]) -> List[int]:
if not words:
return []
nwords = len(words)
counter = collections.Counter(words)
ans = []
m = len(words[0])
total_len = nwords * m
for i in range(len(s) - total_len + 1):
seen = collections.Counter()
j = i
while j < i + total_len:
word = s[j:j + m]
if word in counter and seen[word] < counter[word]:
seen[word] += 1
else:
break
j += m
else:
ans.append(i)
return ans
| StarcoderdataPython |
4814205 | <reponame>Random-Coders/trash-sorter-flask<gh_stars>1-10
"""
Imports
"""
# Flask imports
from flask import Flask
# Create Flask app
app = Flask(__name__, template_folder='templates')
# Add Configurations to app
app.config.from_pyfile('config.py', silent=False)
from trash import views
| StarcoderdataPython |
4830817 | <reponame>tobykirk/PyBaMM
from .base_porosity import BaseModel
from .constant_porosity import Constant
from .reaction_driven_porosity import ReactionDriven
| StarcoderdataPython |
4832525 | """Init file for Supervisor Security RESTful API."""
import logging
from typing import Any, Dict
from aiohttp import web
import voluptuous as vol
from ..const import ATTR_CONTENT_TRUST, ATTR_FORCE_SECURITY, ATTR_PWNED
from ..coresys import CoreSysAttributes
from .utils import api_process, api_validate
_LOGGER: logging.Logger = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
SCHEMA_OPTIONS = vol.Schema(
{
vol.Optional(ATTR_PWNED): vol.Boolean(),
vol.Optional(ATTR_CONTENT_TRUST): vol.Boolean(),
vol.Optional(ATTR_FORCE_SECURITY): vol.Boolean(),
}
)
class APISecurity(CoreSysAttributes):
"""Handle RESTful API for Security functions."""
@api_process
async def info(self, request: web.Request) -> Dict[str, Any]:
"""Return Security information."""
return {
ATTR_CONTENT_TRUST: self.sys_security.content_trust,
ATTR_PWNED: self.sys_security.pwned,
ATTR_FORCE_SECURITY: self.sys_security.force,
}
@api_process
async def options(self, request: web.Request) -> None:
"""Set options for Security."""
body = await api_validate(SCHEMA_OPTIONS, request)
if ATTR_PWNED in body:
self.sys_security.pwned = body[ATTR_PWNED]
if ATTR_CONTENT_TRUST in body:
self.sys_security.content_trust = body[ATTR_CONTENT_TRUST]
if ATTR_FORCE_SECURITY in body:
self.sys_security.force = body[ATTR_FORCE_SECURITY]
self.sys_security.save_data()
await self.sys_resolution.evaluate.evaluate_system()
| StarcoderdataPython |
4800427 | <filename>h/indexer/__init__.py
from h.indexer.reindexer import reindex
__all__ = ("reindex",)
| StarcoderdataPython |