id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
11374124
|
"""Test for nx_itertools.recipes.first_true"""
from nx_itertools.recipes import first_true
def test_normal():
"""Test first_true."""
# first_true with success
data = iter([0, 0, 'A', 0])
res = first_true(data)
assert res == 'A'
assert list(data) == [0]
# first_true with no success
data = iter([0, 0, 0, 0])
res = first_true(data)
assert res is False
assert list(data) == []
# first_true with empty
data = ()
res = first_true(data)
assert res is False
# first_true with predicate and success
data = iter('ABCDE')
res = first_true(data, pred=lambda x: x == 'D')
assert res == 'D'
assert list(data) == ['E']
# first_true with predicate and no success
data = iter('ABCDE')
res = first_true(data, pred=lambda x: x == 'X')
assert res is False
assert list(data) == []
# first_true with no success and default
data = iter([0, 0, 0, 0])
res = first_true(data, default='X')
assert res == 'X'
assert list(data) == []
# first_true with empty and default
data = ()
res = first_true(data, default='X')
assert res == 'X'
# first_true with predicate, no success, and default
data = iter('ABCDE')
res = first_true(data, default='Q', pred=lambda x: x == 'X')
assert res == 'Q'
assert list(data) == []
# first_true with empty, predicate, no success, and default
data = ()
res = first_true(data, default='Q', pred=lambda x: x == 'X')
assert res == 'Q'
|
StarcoderdataPython
|
12813577
|
#!/usr/bin/env python3
"""Tests for PartOfSpeechTagger class"""
import copy
import unittest
from gruut.pos import PartOfSpeechTagger
class PartOfSpeechTaggerTestCase(unittest.TestCase):
"""Test cases for PartOfSpeechTagger class"""
def test_encode_decode(self):
"""Test encode/decode functions for pycrfsuite features"""
s = "ði ıntəˈnæʃənəl fəˈnɛtık əsoʊsiˈeıʃn"
self.assertEqual(
PartOfSpeechTagger.decode_string(PartOfSpeechTagger.encode_string(s)), s
)
def test_features(self):
"""Test sentence features"""
sentence = "1 test .".split()
word_features = {
"1": {
"bias": 1.0,
"word": "1",
"len(word)": 1,
"word.ispunctuation": False,
"word.isdigit()": True,
"word[:2]": "1",
"word[-2:]": "1",
},
"test": {
"bias": 1.0,
"word": "test",
"len(word)": 4,
"word.ispunctuation": False,
"word[-2:]": "st",
"word[:2]": "te",
"word.isdigit()": False,
},
".": {
"bias": 1.0,
"word": ".",
"len(word)": 1,
"word.ispunctuation": True,
"word.isdigit()": False,
"word[-2:]": ".",
"word[:2]": ".",
},
}
def add_prefix(d, prefix):
return {f"{prefix}{k}": v for k, v in d.items()}
# Add context
context_features = copy.deepcopy(word_features)
context_features["1"].update(add_prefix(word_features["test"], "+1:"))
context_features["test"].update(add_prefix(word_features["1"], "-1:"))
context_features["test"].update(add_prefix(word_features["."], "+1:"))
context_features["."].update(add_prefix(word_features["test"], "-1:"))
# Add BOS/EOS
context_features["1"]["BOS"] = True
context_features["."]["EOS"] = True
expected_features = [
context_features["1"],
context_features["test"],
context_features["."],
]
actual_features = PartOfSpeechTagger.sent2features(
sentence,
words_forward=1,
words_backward=1,
chars_front=2,
chars_back=2,
encode=False,
)
self.assertEqual(expected_features, actual_features)
# -----------------------------------------------------------------------------
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
1934993
|
<reponame>ckurze/mongodb-iot-reference
import base64
import os
import yaml
import json
from collections import defaultdict
from pymongo import MongoClient
import helper
import db_operations as operations
def process(event, context):
'''
Entrypoint for Cloud Function 'iotcore_to_mongodb'. Read messages IoT Hub and insert them into MongoDB.
'''
MONGO_URI = os.environ["MONGO_URI"]
if MONGO_URI == None:
raise ValueError('No MongoDB Cluster provided. Will exit.')
mongo_client = MongoClient(MONGO_URI)
db = mongo_client.citibike
#read messages from event and group them by action
message_str = base64.b64decode(event['data']).decode('utf-8')
message = json.loads(message_str)
grouped_messages = defaultdict(list)
action = message.pop('action', 'none')
grouped_messages[action].append(message)
#messages with action == fullRefresh are split into status and station messages
split_full_refresh_messages(grouped_messages)
#do "bulk" inserts for both types of messages (we used the same code as for Azure
#where bulk reading messages is possible. In this implementation each message is read seperately
#so no actual bulk inserts should happen)
refresh_stations(db, grouped_messages.get('refreshStation', []))
refresh_status(db, grouped_messages.get('refreshStatus', []))
return
def split_full_refresh_messages(grouped_messages):
'''
Split messages with an action 'fullRefresh' into 'refreshStatus' and 'refreshStation' messages.
'''
full_refresh_msgs = grouped_messages.get('fullRefresh', [])
for msg in full_refresh_msgs:
grouped_messages['refreshStatus'].append(msg.pop('status'))
grouped_messages['refreshStation'].append(msg)
def refresh_stations(db, messages):
'''
Insert 'station_information' updates into MongoDB.
'''
if len(messages) == 0:
return
stations_collection = db.stations
operations.ensure_indexes(
db=db, stations_collection=stations_collection)
#pre process station information -> convert geo information to valid geo json object
stations = helper.preprocess_stations(messages)
#send bulk updates to database
operations.update_station_information(
stations=stations, collection=stations_collection, batch_size=100)
def refresh_status(db, messages):
'''
Insert 'status' updates into MongoDB.
'''
if len(messages) == 0:
return
status_collection = db.status
metadata_collection = db.metadata
operations.ensure_indexes(
db=db, status_collection=status_collection, metadata_collection=metadata_collection)
#find stations that should be updated based on last update
stations_last_updated = operations.get_station_last_updated(
collection=metadata_collection, feed='refreshStatus')
stations = helper.get_station_status_to_update(
messages, stations_last_updated)
#update remaining stations
operations.update_station_status(stations=stations, collection=status_collection,
metadata_collection=metadata_collection, feed='STATUS_URL', batch_size=100)
def load_env(path):
with open(path) as yaml_file:
values = yaml.load(yaml_file, Loader=yaml.FullLoader)
for key in values:
os.environ[key] = str(values[key])
|
StarcoderdataPython
|
3428899
|
<filename>cogs/levels.py
import io
import random
import time
from datetime import datetime
import discord
from discord.ext import commands
from easy_pil import Canvas, Editor, Font, load_image_async
from tabulate import tabulate
from helpers.bot import Bonbons
class Levels(commands.Cog):
"""
A cog for levels.
"""
def __init__(self, bot: Bonbons) -> None:
self.bot = bot
self.db = self.bot.mongo["levels"]
self.levels: dict[int, int] = {}
self.make_levels()
self.set_attributes()
@property
def emoji(self) -> str:
return "⬆️"
def make_levels(self) -> None:
for number in range(500):
self.levels[number] = 125 * number
def set_attributes(self) -> None:
self.bot.generate_rank_card = self.generate_rank_card
self.bot.generate_leaderboard = self.generate_leaderboard
async def generate_rank_card(
self, ctx: commands.Context, member: discord.Member, data, background=None
) -> None:
next_level_xp = self.levels[int(data["level"]) + 1]
percentage = (data["xp"] / next_level_xp) * 100
user_data = {
"name": str(member),
"xp": int(data["xp"]),
"next_level_xp": next_level_xp,
"level": int(data["level"]),
"percentage": int(percentage),
}
if background:
background = Editor(await load_image_async(str(background))).resize(
(800, 280)
)
else:
background = Editor(Canvas((800, 280), color="#23272A"))
profile_image = await load_image_async(str(member.display_avatar.url))
profile = Editor(profile_image).resize((150, 150)).circle_image()
poppins = Font.poppins(size=40)
poppins_small = Font.poppins(size=30)
card_right_shape = [(600, 0), (750, 300), (900, 300), (900, 0)]
background.polygon(card_right_shape, "#2C2F33")
background.paste(profile, (30, 30))
background.rectangle((30, 200), width=650, height=40, fill="#494b4f", radius=20)
background.bar(
(30, 200),
max_width=650,
height=40,
percentage=user_data["percentage"],
fill="#3db374",
radius=20,
)
background.text((200, 40), user_data["name"], font=poppins, color="white")
background.rectangle((200, 100), width=350, height=2, fill="#17F3F6")
background.text(
(200, 130),
f"Level: {user_data['level']} "
+ f" XP: {user_data['xp']: ,} / {user_data['next_level_xp']: ,}",
font=poppins_small,
color="white",
)
with io.BytesIO() as buffer:
background.save(buffer, "PNG")
buffer.seek(0)
embed = discord.Embed(color=discord.Color.blurple())
embed.set_image(url="attachment://rank_card.png")
return await ctx.send(
file=discord.File(buffer, "rank_card.png"), embed=embed
)
async def generate_leaderboard(self, ctx: commands.Context) -> None:
before = time.perf_counter()
background = Editor(Canvas((1400, 1280), color="#23272A"))
db = self.db[str(ctx.guild.id)]
paste_size = 0
text_size = 40
for x in await db.find().sort("level", -1).to_list(10):
user = self.bot.get_user(x["_id"]) or await self.bot.fetch_user(x["_id"])
if user.avatar is None:
img = Editor(await load_image_async(str(user.display_avatar))).resize(
(128, 128)
)
else:
img = await load_image_async(
str(user.display_avatar.with_size(128).with_format("png"))
)
background.text(
(175, text_size),
f'{str(user)} • Level{x["level"]: ,}',
color="white",
font=Font.poppins(size=50),
)
background.paste(img, (0, paste_size))
paste_size += 128
text_size += 130
with io.BytesIO() as buffer:
background.save(buffer, "PNG")
buffer.seek(0)
done = time.perf_counter() - before
embed = discord.Embed(
title=f"{ctx.guild.name} Level Leaderboard",
description="This is based off of your level and not XP.",
color=discord.Color.blurple(),
timestamp=datetime.utcnow(),
)
embed.set_image(url="attachment://leaderboard.png")
embed.set_footer(text=f"Took{done: .2f}s")
return await ctx.send(
file=discord.File(buffer, "leaderboard.png"), embed=embed
)
@commands.command(name="rank", aliases=("level",))
@commands.cooldown(1, 20, commands.BucketType.user)
async def rank(self, ctx: commands.Context, member: discord.Member = None):
"""Tells you your current level embedded inside an image."""
member = member or ctx.author
db = self.db[str(ctx.guild.id)]
data = await db.find_one({"_id": member.id})
if data is not None:
return await self.generate_rank_card(ctx, member, data)
await ctx.reply(
"You have no XP somehow. Send some more messages into the chat and try again.."
)
@commands.command(name="setlevel")
@commands.cooldown(1, 20, commands.BucketType.user)
async def setlevel(self, ctx: commands.Context, member: discord.Member, level: int):
db = self.db[str(ctx.guild.id)]
data = await db.find_one({"_id": member.id})
if data is not None:
await self.db.update_one({"_id": data["_id"]}, {"$set": {"level": level}})
return await ctx.reply(f"{member}'s level has been set to {level}.")
@commands.command(name="leaderboard", aliases=("lb",))
@commands.cooldown(1, 20, commands.BucketType.user)
async def leaderboard(self, ctx: commands.Context, args: str = None):
"""Shows the level leaderboard for the current server."""
if args:
if args == "--text" or args == "--all":
data = {"level": [], "xp": [], "id": []}
db = self.db[str(ctx.guild.id)]
async for doc in db.find().sort("level", -1):
data["level"].append(doc["level"])
data["xp"].append(doc["xp"])
data["id"].append(doc["_id"])
content = tabulate(
{"ID": data["id"], "Level": data["level"], "XP": data["xp"]},
["ID", "Level", "XP"],
tablefmt="pretty",
)
file = discord.File(
io.BytesIO(content.encode("utf-8")), filename="text.txt"
)
return await ctx.send(file=file)
else:
pass
await self.generate_leaderboard(ctx)
@commands.Cog.listener("on_message")
async def handle_message(self, message: discord.Message):
if message.author.bot:
return
if not isinstance(message.channel, discord.TextChannel):
return
db = self.db[str(message.guild.id)]
data = await db.find_one({"_id": message.author.id})
xp = random.randint(10, 200)
if data is not None:
next_level = data["level"] + 1
next_level_xp = self.levels[next_level]
if int(data["xp"]) >= int(next_level_xp):
await db.update_one({"_id": message.author.id}, {"$inc": {"level": 1}})
await db.update_one(
{"_id": message.author.id}, {"$set": {"xp": xp / 2}}
)
return
await db.update_one({"_id": message.author.id}, {"$inc": {"xp": xp}})
return
if data is None:
await db.insert_one({"_id": message.author.id, "xp": xp, "level": 1})
return
async def setup(bot):
print("Loaded: Levels")
await bot.add_cog(Levels(bot))
|
StarcoderdataPython
|
6650898
|
from .dirutils import *
|
StarcoderdataPython
|
1869035
|
import os
from mask_rcnn.utils import extract_bboxes
from visual_tools.visualize import display_instances
from .gui_viewer import GuiViewer
class GuiCocoViewer(GuiViewer):
def __init__(self, figurename, dataset):
super(GuiCocoViewer, self).__init__(figurename)
self.dataset = dataset
self.num_images = self.dataset.num_images
self.create_slider()
self.create_textbox()
self.display()
def display(self):
should_update = super(GuiCocoViewer, self).display()
if should_update:
image = self.dataset.load_image(self.image_id)
masks, class_ids = self.dataset.load_mask(self.image_id)
# Compute Bounding box
bbox = extract_bboxes(masks)
display_instances(image, bbox, masks, class_ids,
self.dataset.class_names, ax=self.ax)
title = "ID: {}\nImage file name: {}\nThe number of objects: {}".format(
self.image_id,
os.path.basename(self.dataset.source_image_link(self.image_id)),
len(class_ids)
)
self.fig.suptitle(title, fontsize=20)
self.fig.canvas.draw_idle()
|
StarcoderdataPython
|
11250090
|
import sys
from cmake_checker.components.file_finder import provide_files_for_verification
from cmake_checker.components.parse_arguments import parse_arguments
from cmake_checker.components.verifier import Verifier
from cmake_checker.components.reporter import Reporter
def compute_exit_code(violations: list, warn_only: bool) -> int:
if warn_only is True:
return 0
if any(len(v) for f, v in violations):
return -1
return 0
def main():
arguments = parse_arguments()
verify = Verifier()
files = provide_files_for_verification(arguments.PATH, arguments.whitelist)
files_with_info = verify.check_path(files)
reporter = Reporter.create(arguments.reporter, files_with_info)
arguments.output_file.write(reporter.generate_report())
sys.exit(compute_exit_code(files_with_info, arguments.warn_only))
main()
|
StarcoderdataPython
|
6686193
|
import datetime
bills = [
{
u'_all_ids': [u'EXB00000001'],
u'_current_session': True,
u'_current_term': True,
u'_id': u'EXB00000001',
u'_term': u'T1',
u'_type': u'bill',
u'action_dates': {
u'first': datetime.datetime(2011, 1, 7, 0, 0),
u'last': datetime.datetime(2011, 4, 15, 0, 0),
u'passed_lower': datetime.datetime(2011, 4, 15, 0, 0),
u'passed_upper': None,
u'signed': None
},
u'actions': [
{u'action': u'Fake Passed',
u'actor': u'lower',
u'date': datetime.datetime(2011, 8, 24, 0, 0),
u'related_entities': [],
u'type': [u'bill:passed']},
{u'action': u'Fake introduced',
u'actor': u'lower',
u'date': datetime.datetime(2012, 1, 23, 0, 0),
u'related_entities': []}
],
u'type': [u'bill:introduced'],
u'alternate_titles': [],
u'bill_id': u'AB 1',
u'chamber': u'lower',
u'companions': [],
u'country': u'us',
u'level': u'state',
u'session': u'S1',
u'sponsors': [
{u'leg_id': u'EXL000001',
u'name': u'FakeLegislator1',
u'type': u'primary'}],
u'state': u'ex',
u'title': u'A fake act.',
u'type': [u'bill'],
u'subjects': [u'Labor and Employment']
},
{
u'_all_ids': [u'LOB00000001'],
u'_current_session': True,
u'_current_term': True,
u'_id': u'LOB00000001',
u'_term': u'T1',
u'_type': u'bill',
u'action_dates': {
u'first': datetime.datetime(2011, 1, 7, 0, 0),
u'last': datetime.datetime(2011, 4, 15, 0, 0),
u'passed_lower': datetime.datetime(2011, 4, 15, 0, 0),
u'passed_upper': None,
u'signed': None
},
u'actions': [
{u'action': u'LOL Passed',
u'actor': u'lower',
u'date': datetime.datetime(2011, 8, 24, 0, 0),
u'related_entities': [],
u'type': [u'bill:passed']},
{u'action': u'LOL introduced',
u'actor': u'lower',
u'date': datetime.datetime(2012, 1, 23, 0, 0),
u'related_entities': []}],
u'type': [u'bill:introduced'],
u'alternate_titles': [],
u'bill_id': u'HB 1',
u'chamber': u'lower',
u'companions': [],
u'country': u'us',
u'level': u'state',
u'session': u'S1',
u'sponsors': [
{u'leg_id': u'LOL000001',
u'name': u'<NAME>',
u'type': u'primary'}],
u'state': u'ex',
u'title': u'A fake act.',
u'type': [u'bill']
}
]
|
StarcoderdataPython
|
6408164
|
from .convert import UCCA2tree, to_UCCA
from .trees import InternalParseNode, LeafParseNode
from .trees import InternalTreebankNode, LeafTreebankNode
from .trees import get_position
__all__ = (
"UCCA2tree",
"to_UCCA",
"InternalParseNode",
"LeafParseNode",
"InternalTreebankNode",
"LeafTreebankNode",
"get_position",
)
|
StarcoderdataPython
|
3544728
|
'''
Simple text based Sudoku solver.
'''
__author__ = '<NAME>'
import copy
def uniqueInsert(l, v):
'''
Add v to list if it is not already there, else raise ValueError
'''
if v is not None:
if v in l:
raise ValueError('list already contains value %s' % v)
assert 0 < v < 10, 'Only 1-9 allowed, got %s' % v
l.append(v)
class Sudoku:
def submat(self, i, j):
'''
Return i, j 3x3 submatrix of self.
'''
mat = self.mat
out = []
for srow_i in range(3):
row = []
for scol_i in range(3):
v = mat[i * 3 + srow_i][j * 3 + scol_i]
row.append(v)
out.append(row)
return out
def copy(self):
return Sudoku(copy.deepcopy(self.mat))
def add(self, v, i, j):
'''
Fill in an entry in self.mat
'''
self.mat[i][j] = v
uniqueInsert(self.rows[i], v)
uniqueInsert(self.cols[j], v)
sub_i = i // 3 * 3 + j // 3
uniqueInsert(self.subs[sub_i], v)
def __init__(self, mat):
'''
Create a new Sudoku instance.
mat -- 9x9 array of digits 1-9
or None if no value is known for that spot
'''
self.mat = mat
# keep track of all values used in each row, column and sub-matrix.
rows = [[] for i in range(9)]
cols = [[] for i in range(9)]
subs = [[] for i in range(9)]
for row_i in range(9):
for col_i in range(9):
v = self.mat[row_i][col_i]
uniqueInsert(rows[row_i], v)
uniqueInsert(cols[col_i], v)
for srow_i in range(3):
for scol_i in range(3):
sub = self.submat(srow_i, scol_i)
for i in range(3):
for j in range(3):
v = sub[i][j]
sub_i = srow_i * 3 + scol_i
uniqueInsert(subs[sub_i], v)
self.rows = rows
self.cols = cols
self.subs = subs
def __repr__(self):
out = ''
for i in range(9):
if i % 3 == 0:
out += '+-------+-------+-------+\n'
for j in range(9):
if j % 3 == 0:
out += '| '
v = self.mat[i][j]
if v is not None:
out += '%1d ' % v
else:
out += ' '
out += '|\n'
out += '+-------+-------+-------+\n'
return out
def solve(self):
'''
Solve for the unknown positions of the puzzle
'''
min_poss = 9 # Minimum possible number of choices for a cell
done = True
for i in range(9):
for j in range(9):
sub_i = i // 3 * 3 + j // 3 # sub-matrix index
v = self.mat[i][j]
if v:
pass
else:
# not all values filled out so we are not done yet
done = False
all = set(range(1, 10))
# determine all possible values for this cell
possible = (all.difference(self.rows[i])
.difference(self.cols[j])
.difference(self.subs[sub_i]))
# see if we have run into a brick wall
if len(possible) == 0:
raise ValueError('Sudoku not solvable')
elif len(possible) < min_poss:
# keep track of cell with smallest number of choices
min_poss = len(possible)
best = possible
min_i = i
min_j = j
if done:
out = self
else:
# Try these possibilities and recurse
for b in best:
print(min_i, min_j, b)
trial = self.copy()
trial.add(b, min_i, min_j)
print(trial)
try:
soln = trial.solve()
break
except ValueError:
soln = None
if soln is None:
print(self)
raise ValueError('Sudoku not solvable')
out = soln
return out
N = None
easy = [
[7, N, N, 1, 5, N, N, N, 8],
[N, N, 4, N, N, 2, N, N, N],
[N, N, N, N, N, 4, 5, 6, N],
[6, N, N, N, N, N, N, 2, 9],
[5, N, 2, N, N, N, 8, N, 4],
[3, 4, N, N, N, N, N, N, 1],
[N, 3, 8, 6, N, N, N, N, N],
[N, N, N, 2, N, N, 9, N, N],
[1, N, N, N, 8, N, N, N, 3]
]
hard = [
[N, 4, N, N, N, 7, 9, N, N],
[N, N, 8, 5, 3, 9, N, N, N],
[N, 6, N, N, N, N, 2, N, 3],
[N, N, N, N, N, 2, 5, N, N],
[N, 8, 6, N, N, N, 1, 4, N],
[N, N, 9, 8, N, N, N, N, N],
[6, N, 3, N, N, N, N, 9, N],
[N, N, N, 9, 8, 6, 3, N, N],
[N, N, 1, 4, N, N, N, 6, N]
]
evil = [
[4, 2, N, N, N, N, N, 1, N],
[N, N, N, 5, 4, N, N, 3, N],
[N, N, 6, N, N, 7, N, N, N],
[N, N, N, N, N, N, 2, 7, 9],
[N, 1, N, N, N, N, N, 6, N],
[3, 4, 2, N, N, N, N, N, N],
[N, N, N, 9, N, N, 3, N, N],
[N, 6, N, N, 3, 8, N, N, N],
[N, 8, N, N, N, N, N, 5, 7]
]
blank = [
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N],
[N, N, N, N, N, N, N, N, N]
]
import time
easy = Sudoku(easy)
hard = Sudoku(hard)
evil = Sudoku(evil)
print()
print('easy')
print(easy)
time.sleep(2)
easy.solve()
print()
print('hard')
print(hard)
time.sleep(2)
hard.solve()
print()
print('evil')
print(evil)
print()
time.sleep(2)
evil.solve()
|
StarcoderdataPython
|
261670
|
<reponame>HitmanBobina47/family-task-queue<gh_stars>0
from flask import Flask
import tempfile, os
from family_task_queue import db
def create_app():
app = Flask(__name__)
db_path = os.path.join(tempfile.gettempdir(), "test.db")
app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{db_path}"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["TESTING"] = True
db.init_app(app)
db.db.drop_all(app=app)
db.db.create_all(app=app)
return app
def create_user_and_family(**kwargs):
kwargs2 = {
"first_name": "Fatso",
"last_name": "McGatso",
"family": "Mongowski"
}
db.user.create_family(kwargs2["family"])
kwargs2.update(kwargs)
db.user.create_user(
"fatsomcgatso",
"password",
"<EMAIL>",
**kwargs2
)
|
StarcoderdataPython
|
1666539
|
# -*- coding: utf-8 -*-
"""These are search-related elements for the django ORM, which have
been moved to a separate model because the FTS implementation with
requires a *hybrid* implementation that is different than the other
models. The functionality was moved here to make it easier to support
test routines for our FTS implementation.
There is no FTS support in django but it can be made to work with a
model that shadows a postgres materialized view built to contain
search terms in a specially constructed document.
.. moduleauthor:: <NAME>, glenl.glx at gmail.com
"""
import re
import string
from django.db import connection
from django.db import models
from mutopia.models import Piece
MV_NAME = 'mutopia_search_view'
MV_INDEX_NAME = 'mutopia_search_index'
MV_DROP = 'DROP MATERIALIZED VIEW IF EXISTS {0}'
MV_CREATE = """
CREATE MATERIALIZED VIEW {0}
(id, piece_id, document)
AS SELECT
p.piece_id,
p.piece_id,
(to_tsvector('pg_catalog.simple',
concat_ws(' ', unaccent(p.title),
unaccent(c.description),
unaccent(p.opus),
p.style_id,
unaccent(p.raw_instrument),
unaccent(p.lyricist),
unaccent(p.source),
unaccent(m.name),
p.date_composed,
unaccent(p.moreinfo),
v.version)
)) AS document
FROM "mutopia_piece" as p
JOIN "mutopia_lpversion" AS v ON v.id = p.version_id
JOIN "mutopia_composer" AS c ON c.composer = p.composer_id
JOIN "mutopia_contributor" AS m ON m.id = p.maintainer_id
"""
MV_INDEX_DROP = 'DROP INDEX IF EXISTS {0}'
MV_INDEX_CREATE = 'CREATE INDEX {0} ON {1} USING GIN(document)'
MV_REFRESH = 'REFRESH MATERIALIZED VIEW {0}'
# FTS is not supported directly in Django so we are going to execute a
# manual query. This is a format string that is designed to be filled
# in by a keyword sanitized to form the query.
_PG_FTSQ = ' '.join([ "SELECT piece_id FROM",
MV_NAME,
"WHERE document @@ to_tsquery('pg_catalog.simple', unaccent(%s))"])
class SearchTerm(models.Model):
"""A model to shadow a Postgres materialized view containing a
document containing search terms associated with the given
:class:`mutopia.models.Piece`.
The shadowed table should be refreshed after any updates.
"""
#:The target piece for this search document.
piece_id = models.ForeignKey(Piece)
#:The search document for FTS
document = models.TextField()
class Meta:
db_table = MV_NAME
managed = False
@classmethod
def rebuild_view(cls):
"""Drop and re-create the materialized view and its index."""
cursor = connection.cursor()
cursor.execute(MV_DROP.format(MV_NAME))
cursor.execute(MV_CREATE.format(MV_NAME))
cursor.execute(MV_INDEX_DROP.format(MV_INDEX_NAME))
cursor.execute(MV_INDEX_CREATE.format(MV_INDEX_NAME, MV_NAME))
@classmethod
def refresh_view(cls):
"""Refresh the associated view.
After updates and inserts, the materialized view needs to be
refreshed. We could do this with a trigger but for now it is
simple enough to do it from this class method after processing
submissions.
"""
cursor = connection.cursor()
cursor.execute(MV_REFRESH.format(MV_NAME))
@classmethod
def _sanitize(cls, term):
"""Sanitize input to the search routine.
:param str term: Input string to clean.
:return: A sanitized string, ready for FTS.
"""
# Replace all puncuation with spaces.
allowed_punctuation = set(['&', '|', '"', "'", '!'])
all_punctuation = set(string.punctuation)
punctuation = ''.join(all_punctuation - allowed_punctuation)
term = re.sub(r'[{}]+'.format(re.escape(punctuation)), ' ', term)
# Substitute all double quotes to single quotes.
term = term.replace('"', "'")
term = re.sub(r"'+", "'", term)
# if no special characters, and search terms together.
if not re.search('[&|!]', term):
term = re.sub(r'\s+', ' & ', term)
return term
@classmethod
def search(cls, keywords):
"""Given keyword string, search using FTS. Because FTS is not a
supported feature of django, it is faked here by using a
manual query that returns :class:`mutopia.models.Piece` keys.
:param str keywords: Input from the user
:return: Zero or more Pieces.
:rtype: A Piece query set.
"""
terms = cls._sanitize(keywords)
cursor = connection.cursor()
results = []
try:
cursor.execute(_PG_FTSQ, [terms,])
results = [row[0] for row in cursor.fetchall()]
finally:
cursor.close()
# The results from manual queries do not return true QuerySets
# so these are translated for the caller with a filter.
return Piece.objects.filter(pk__in=results).order_by('-piece_id')
|
StarcoderdataPython
|
3439010
|
cpu = {
'cpu_times': {
'user': 0.0,
'system': 0.0,
'idle': 0.0,
'interrupt': 0.0,
'dpc': 0.0,
},
'cpu_percent': {
'0' : 0.0,
'1' : 0.0,
'2' : 0.0,
'4' : 0.0
},
'cpu_times_percent': {
},
'cpu_count': {
},
'cpu_stats': {
},
'cpu_freq': {
},
'cpu_load': {
}
}
|
StarcoderdataPython
|
1778529
|
# -*- coding: utf-8 -*-
# @Time : 26.04.21 11:56
# @Author : sing_sd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
from pathlib import Path
import src.common_functions as cf
import src.clustering.COStransforms as ct
import src.clustering.cluster_association as ca
plt.rcParams.update({'font.size': 12})
def main():
data = pd.DataFrame(get_ais_data_interpol_1min())
data.columns = ["x", "y", "cog", "sog", "cluster", "mmsi"]
with open("../resources/graph_nodes_refined.pkl", 'rb') as f:
nodes_new = pickle.load(f)
print("Graph nodes import succeeded")
with open("../resources/graph_edges_refined.pkl", 'rb') as f:
edges_new = pickle.load(f)
print("Graph edges import succeeded")
plot_refined_graph_association(data, nodes_new, edges_new)
def get_ais_data_interpol_1min():
nr_of_actual_vessels = -1
path = Path('/home/sing_sd/Desktop/anomaly_detection/PythonCode/Trajectory_Prediction/')
filename = path / 'all_sampled_tracks_interpol_1minute.csv'
with open(filename, 'rb') as f:
data = pd.read_csv(f)
data["cluster"] = 0
data["mmsi"] = 0
data = np.array(data)
filename = path / 'data_len_sampled_tracks_interpol_1minute.pkl'
with open(filename, 'rb') as f:
data_all_tracks = pickle.load(f)
overall_data = np.ones_like(data)
nr_track = 1000000
index = 0
data_index = 0
dim = 4
for track_nr in range(len(data_all_tracks)):
nr_data = data_all_tracks[track_nr]
if 20 < data_all_tracks[track_nr] < 500:
nr_track += 1
overall_data[index: index + nr_data, 0:dim] = data[data_index: data_index + nr_data, 0:dim]
overall_data[index: index + nr_data, dim + 1] = nr_track
index += nr_data
data_index += nr_data
# get rostock-gedsar data # above code does not get rostock-gedsar data
filename = path / 'rostock_gedsar_interpol_1min.csv'
with open(filename, 'rb') as f:
data = np.array(pd.read_csv(f))
overall_data[index: index + len(data), 0:dim] = data[:, 0:dim]
overall_data[index: index + len(data), dim + 1] = nr_track + 1
index += len(data)
overall_data = np.delete(overall_data, range(index, len(overall_data)), axis=0)
return overall_data
def plot_refined_graph_association(data, nodes_new, edges_new):
fig, axs = plt.subplots(1, 1)
fig.set_size_inches([7, 5])
plt.pause(0.0001)
colour_array = ["r", "g", "b", "y", "c", "m", "#9475FC", "k"] # an extra k
for mmsi in data.mmsi.unique(): # [vessel_nr:vessel_nr+1]: #
idx_all = data['mmsi'] == mmsi
decoded_mmsi = data[data['mmsi'] == mmsi]
decoded_mmsi = decoded_mmsi.reset_index(drop=True)
# assignment = ca.assign_to_graph(decoded_mmsi[["x", "y"]])#
assignment = ca.get_assignment(decoded_mmsi[["x", "y"]])
for cluster in np.unique(assignment):
idx = assignment == cluster
axs.scatter(np.array(decoded_mmsi.iloc[idx, 0]), np.array(decoded_mmsi.iloc[idx, 1]), c=colour_array[cluster],
marker=".", s=0.5) # s=0.05
plt.pause(0.00001)
data.iloc[idx_all, data.columns.get_loc("cluster")] = assignment #
data.to_csv("./resources/ais_data_1min_graph.csv", index=False)
for ee, e in enumerate(edges_new):
axs.plot([nodes_new[e[0], 0], nodes_new[e[1], 0]], [nodes_new[e[0], 1], nodes_new[e[1], 1]], linestyle="-",
color="black", linewidth=4)
axs.scatter([nodes_new[e[0], 0], nodes_new[e[1], 0]], [nodes_new[e[0], 1], nodes_new[e[1], 1]], marker=".",
color="black", s=10)
plt.pause(0.0001)
axs.set_xlabel('Longitude [deg]')
axs.set_ylabel('Latitude [deg]')
plt.pause(0.001)
plt.savefig("./results/graph_association_dataset2.png")
plt.savefig("./results/graph_association_dataset2.pdf")
plt.show()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8031471
|
<filename>src/file_note/main.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import os
import sys
import logging
from argparse import ArgumentParser
from file_note.note import FileNote, filter_notes_by_keyword, formatted_print
logging.basicConfig(format="%(levelname)s - %(message)s", level=logging.INFO)
def main():
parser = ArgumentParser('file_note')
parser.add_argument('--add', '-a', type=str, nargs='+', help='Add file note.')
parser.add_argument('--add_relation', '-ar', type=str, nargs='+', help='Add file note.')
parser.add_argument('--print_file', '-pf', action='store_true', help='Print the file table.')
parser.add_argument('--print_relation', '-pr', type=str, help='Print the relationships of a given file.')
parser.add_argument('--dir', '-d', type=str, help='Print the notes of files in the given dir.')
parser.add_argument('--keyword', '-k', type=str, help='Print the notes of files with the given keyword.')
parser.add_argument('--recursive', '-r', action='store_true', help='Print the notes of files in the given directory recursively.')
parser.add_argument('--replace', '-rp', action='store_true', help='If replace the existing note.')
args = parser.parse_args()
fn = FileNote()
if args.add:
add = args.add
assert len(add) == 2, 'The length of add must be 2.'
fp, rmk = add
if not os.path.exists(fp):
raise FileNotFoundError(fp)
else:
fp = os.path.abspath(fp)
fn.add_note(fp, rmk, args.replace)
if args.add_relation:
add_rel = args.add_relation
if len(add_rel) == 3:
fn.add_relation(*add_rel, '', replace=args.replace)
elif len(add_rel) == 4:
fn.add_relation(*add_rel, replace=args.replace)
else:
logging.error('The length of add_relation must be 3 or 4.')
sys.exit()
if args.print_file:
if args.keyword:
fn.file_table = filter_notes_by_keyword(fn.file_table, args.keyword)
loc = os.path.abspath(args.dir) if args.dir else ''
if not args.recursive:
for k, v in fn.file_table.items():
if loc:
d = os.path.split(k)[0]
if d == loc:
formatted_print(k, v)
else:
formatted_print(k, v)
else:
fn.print_notes_recursively(args.dir)
if args.print_relation:
rel = args.print_relation
fn.print_relationships(rel)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6558187
|
import sys
import mock
from hurricane.server.debugging import setup_debugging, setup_debugpy, setup_pycharm
from hurricane.server.exceptions import IncompatibleOptions
from hurricane.testing import HurricanServerTest
class HurricanDebuggerServerTest(HurricanServerTest):
alive_route = "/alive"
@HurricanServerTest.cycle_server(args=["--debugger"])
def test_debugger(self):
res = self.probe_client.get(self.alive_route)
out, err = self.driver.get_output(read_all=True)
self.assertEqual(res.status, 200)
self.assertIn("Listening for debug clients at port 5678", out)
def test_incompatible_debugger_and_autoreload(self):
with self.assertRaises(IncompatibleOptions):
setup_debugging({"autoreload": True, "debugger": True, "pycharm_host": True})
def test_debugger_success_and_import_error(self):
options = {"debugger": True, "debugger_port": 8071}
with mock.patch("debugpy.listen") as dbgpy:
dbgpy.side_effect = None
setup_debugpy(options)
sys.modules["debugpy"] = None
options = {"debugger": True}
setup_debugpy(options)
def test_pycharm_success_and_import_error(self):
options = {"pycharm_host": "test", "pycharm_port": 8071}
with mock.patch("pydevd_pycharm.settrace") as pdvd:
pdvd.side_effect = None
setup_pycharm(options)
sys.modules["pydevd_pycharm"] = None
options = {"pycharm_host": "test"}
setup_pycharm(options)
|
StarcoderdataPython
|
3387788
|
class Constants(object):
VERSIONS = {
'champion_mastery': 'v4',
'league': 'v4',
'match': 'v4',
'spectator': 'v4',
'summoner': 'v4',
'third_party_code': 'v4'
}
RESPONSE_CODES = {
'400': 'Bad request',
'401': 'Unauthorized',
'403': 'Forbidden',
'404': 'Not found',
'405': 'Method not allowed',
'415': 'Unsupported media type',
'422': 'Player exists, but hasn\'t played since match history collection began',
'429': 'Rate limit exceeded',
'500': 'Internal server error',
'502': 'Bad gateway',
'503': 'Service unavailable',
'504': 'Gateway timeout'
}
SEASONS = [
'Preseason 3',
'Season 3',
'Preseason 2014',
'Season 2014',
'Preseason 2015',
'Season 2015',
'Preseason 2016',
'Season 2016',
'Preseason 2017',
'Season 2017',
'Preseason 2018',
'Season 2018'
]
# Populate the queue, queueId, and gameQueueConfigId fields; not including deprecated queues
MATCHMAKING_QUEUES = {
'0': {
'Map': 'Custom Games',
'Description': None,
},
'72': {
'Map': 'Howling Abyss',
'Description': '1v1 Snowdown Showdown games',
},
'73': {
'Map': 'Howling Abyss',
'Description': '2v2 Snowdown Showdown games',
},
'75': {
'Map': 'Summoner\'s Rift',
'Description': '6v6 Hexakill games',
},
'76': {
'Map': 'Summoner\'s Rift',
'Description': 'Ultra Rapid Fire games',
},
'78': {
'Map': 'Howling Abyss',
'Description': 'One For All: Mirror Mode games',
},
'83': {
'Map': 'Summoner\'s Rift',
'Description': 'Co-op vs AI Ultra Rapid Fire games',
},
'98': {
'Map': 'Twisted Treeline',
'Description': '6v6 Hexakill games',
},
'100': {
'Map': 'Butcher\'s Bridge',
'Description': '5v5 ARAM games',
},
'310': {
'Map': 'Summoner\'s Rift',
'Description': 'Nemesis games',
},
'313': {
'Map': 'Summoner\'s Rift',
'Description': 'Black Market Brawlers games',
},
}
# DEPRECATED_QUEUES = {
# }
ENDPOINTS = {
# Regional Endpoints
'br': 'br1.api.riotgames.com',
'eune': 'eun1.api.riotgames.com',
'euw': 'euw1.api.riotgames.com',
'jp': 'jp1.api.riotgames.com',
'kr': 'kr.api.riotgames.com',
'lan': 'la1.api.riotgames.com',
'las': 'la2.api.riotgames.com',
'na': 'na.api.riotgames.com',
'na-old': 'na1.api.riotgames.com',
'oce': 'oc1.api.riotgames.com',
'tr': 'tr1.api.riotgames.com',
'ru': 'ru.api.riotgames.com',
'pbe': 'pbe1.api.riotgames.com',
# Regional Proxies
'americas': 'americas.api.riotgames.com',
'europe': 'europe.api.riotgames.com',
'asia': 'asia.api.riotgames.com'
}
# URLS; Convention: all urls start in '/' but do not end in one
URLS_BASE = {
# Main base for all URLs
'base': 'https://{endpoint}{url}',
# Primary midpoints for all sub-apis
'champion_mastery': '/lol/champion-mastery/{version}',
'league': '/lol/league/{version}',
'match': '/lol/match/{version}',
'spectator': '/lol/spectator/{version}',
'summoner': '/lol/summoner/{version}',
'third_party_code': '/lol/platform/{version}'
}
URLS_CHAMPION_MASTERY = {
# Get all champion mastery entities sorted by number of champion points descending.
'all masteries': URLS_BASE['champion_mastery'] + \
'/champion-masteries/by-summoner/{summoner_id}',
# Get a champion mastery by player ID and champion ID.
'champion mastery': URLS_BASE['champion_mastery'] + \
'/champion-masteries/by-summoner/{summoner_id}/by-champion/{champion_id}',
# Get a player's total champion mastery score, which is the sum of individual champion mastery levels.
'total mastery': URLS_BASE['champion_mastery'] + \
'/scores/by-summoner/{summoner_id}'
}
URLS_LEAGUE = {
# Get the challenger league for given queue.
'challenger league': URLS_BASE['league'] + \
'/challengerleagues/by-queue/{queue}',
#Get the master league for given queue.
'master league': URLS_BASE['league'] + \
'/masterleagues/by-queue/{queue}',
#Get the grandmaster league for a given queue
'grandmaster league': URLS_BASE['league'] + \
'/grandmasterleagues/by-queue/{queue}',
# Get league with given ID, including inactive entries.
'league': URLS_BASE['league'] + \
'/leagues/{league_id}',
# Get league positions in all queues for a given summoner ID.
'league positions': URLS_BASE['league'] + \
'/positions/by-summoner/{summoner_id}'
}
URLS_MATCH = {
# Get match IDs by tournament code.
'matches by tournmanet': URLS_BASE['match'] +\
'/matches/by-tournament-code/{tournament_code}/ids',
#Get match by match ID.
'match': URLS_BASE['match'] + \
'/matches/{match_id}',
#Get match by match ID and tournament code.
'match by tournament': URLS_BASE['match'] + \
'/matches/{match_id}/by-tournament-code/{tournament_code}',
# Get matchlist for games played on given account ID and platform ID and filtered using given filter parameters, if any.
'matchlist': URLS_BASE['match'] + \
'/matchlists/by-account/{account_id}',
# Get match timeline by match ID.
'timeline': URLS_BASE['match'] + \
'/timelines/by-match/{match_id}'
}
URLS_SPECTATOR = {
# Get current game information for the given summoner ID.
'active match': URLS_BASE['spectator'] + \
'/active-games/by-summoner/{summoner_id}',
# Get list of featured games.
'featured games': URLS_BASE['spectator'] + \
'/featured-games'
}
URLS_SUMMONER = {
# Get a summoner by account ID.
'summoner by account id': URLS_BASE['summoner'] + \
'/summoners/by-account/{account_id}',
# Get a summoner by summoner name.
'summoner by name': URLS_BASE['summoner'] + \
'/summoners/by-name/{summoner_name}',
# Get a summoner by summoner ID.
'summoner by summoner id': URLS_BASE['summoner'] + \
'/summoners/{summoner_id}',
# get a summoner by PUUID.
'summoner by puuid': URLS_BASE['summoner'] + \
'/summoners/by-puuid/{puuid}'
}
URLS_THIRD_PARTY_CODE = {
# Get third party code for a given summoner ID. (?)
'third party code': URLS_BASE['third_party_code'] + \
'/third-party-code/by-summoner/{summoner_id}'
}
|
StarcoderdataPython
|
8092335
|
#!/usr/bin/env python3
from pyroute2 import IPRoute
IP = IPRoute()
def get_peer_addr(ifname):
"""Return the peer address of given peer interface.
None if address not exist or not a peer-to-peer interface.
"""
for addr in IP.get_addr(label=ifname):
attrs = dict(addr.get('attrs', []))
if 'IFA_ADDRESS' in attrs:
return attrs['IFA_ADDRESS']
if __name__ == '__main__':
print(get_peer_addr('tun-rpi'))
|
StarcoderdataPython
|
3325219
|
from .configs.config import Config
from .lib.authentication import Authentication
from .menu.menu_generator import MenuGenerator
# import typer
from typing import Optional
# app = typer.Typer()
class Application():
def __init__(self):
self.configuration = Config(environment="Prod") #TODO: Make this dynamic
# print(f"running in {self.configuration.get_environment()}")
self.secrets = Authentication()
# print(f"Credentials are: {self.secrets.get_auth_vars()}")
# menu = MenuGenerator()
# menu.paint_menu()
|
StarcoderdataPython
|
3590469
|
import twitter
from models import feed_user_coll
from datetime import datetime, timedelta
from pathlib import Path
import test_credentials as c
import sys
import json
import pickle
import pytz
from collections import defaultdict
if __name__ == "__main__":
api = twitter.Api(
c.CONSUMER_KEY,
c.CONSUMER_SECRET,
c.ACCESS_TOKEN_KEY,
c.ACCESS_TOKEN_SECRET
)
screen_name = sys.argv[1]
pkl_name = './output/'+screen_name+'_24hr.pkl'
cutoff = datetime.now() - timedelta(days=1)
Path('./output').mkdir(parents=True, exist_ok=True)
if (len(sys.argv) < 3):
with open(pkl_name,'rb') as file:
print('===INFO: Loading data from',pkl_name)
friends_data = pickle.load(file)
else:
today = datetime.now(pytz.utc)
yesterday = today - timedelta(days=1)
friends = api.GetFriends(screen_name=screen_name)
friends_data = feed_user_coll.feed_user_coll(friends)
friends_data.populate_all(api, today, yesterday)
with open(pkl_name,'wb') as file:
print('===INFO: Saving friends list as', pkl_name)
pickle.dump(friends_data, file, pickle.HIGHEST_PROTOCOL)
csv_name = './output/'+screen_name+'_24hr.csv'
with open (csv_name,'w+') as file:
print('===INFO: saving data to',csv_name)
file.write('screen_name,tweets,proportion\n')
for k,v in friends_data.followed_users.items():
file.write(v.user.screen_name)
file.write(',')
file.write(str(len(v.tweets)))
file.write(',')
file.write(str(len(v.tweets)/friends_data.statuses))
file.write('\n')
print('total tweets:',friends_data.statuses)
|
StarcoderdataPython
|
1692708
|
<reponame>xtommy-1/community-detection
# louvain示意图生成
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
edge0 = [
(0, 2), (0, 3), (0, 4), (0, 5),
(1, 2), (1, 4), (1, 7),
(2, 4), (2, 5), (2, 6),
(3, 7),
(4, 10),
(5, 7), (5, 11),
(6, 7), (6, 11),
(8, 9), (8, 10), (8, 11), (8, 14), (8, 15),
(9, 12), (9, 14),
(10, 11), (10, 12), (10, 13), (10, 14),
(11, 13)
]
def graph0(nodes):
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edge0, weight=1)
return g
def graph1():
g = nx.Graph()
g.add_edge(0, 1, weight=4)
g.add_edge(0, 2, weight=1)
g.add_edge(0, 3, weight=1)
g.add_edge(1, 2, weight=1)
g.add_edge(2, 3, weight=3)
return g
def graph2():
g = nx.Graph()
g.add_nodes_from([0, 1, 2, 3])
g.add_edge(0, 2, weight=3)
return g
def pass0():
g = graph0(range(16))
pos = nx.circular_layout(g)
nx.draw(g, pos, with_labels=True, edge_color='b', node_color=range(len(g.nodes())), node_size=500, cmap=plt.cm.cool)
plt.show()
def pass1_phase1():
g = graph0([0, 1, 2, 4, 5, 3, 6, 7, 11, 13, 8, 9, 10, 12, 14, 15])
pos = nx.circular_layout(g)
color = [0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3]
nx.draw(g, pos, with_labels=True, edge_color='b', node_color=color, node_size=500, cmap=plt.cm.cool)
plt.show()
def pass1_phase2():
g = graph1()
edge_labels = dict([((u, v,), d['weight']) for u, v, d in g.edges(data=True)])
pos = nx.circular_layout(g)
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, font_size=20)
nx.draw(g, pos, with_labels=True, edge_color='b', node_color=range(len(g.nodes())), node_size=550, cmap=plt.cm.cool)
plt.show()
def pass2():
g = graph2()
edge_labels = dict([((u, v,), d['weight']) for u, v, d in g.edges(data=True)])
pos = nx.circular_layout(g)
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, font_size=20)
nx.draw(g, pos, with_labels=True, edge_color='b', node_color=range(len(g.nodes())), node_size=1000, cmap=plt.cm.cool)
plt.show()
if __name__ == '__main__':
pass2()
|
StarcoderdataPython
|
8062181
|
<reponame>maxwnewcomer/OpenCVFacialRecognition
from imutils.video import VideoStream
from imutils.video import FPS
from tensorflow import keras
from datetime import datetime
import numpy as np
import argparse
import imutils
import pickle
import time
import cv2
import os
def recognize_video(detectorPath, embedding_model, recognizerPath, label, confidenceLim, projectPath):
print("[INFO] loading face detector...")
protoPath = os.path.sep.join([detectorPath, "deploy.prototxt"])
modelPath = os.path.sep.join([detectorPath, "res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(embedding_model)
recognizer = keras.models.load_model(recognizerPath)
le = pickle.loads(open(label, "rb").read())
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1)
fps = FPS().start()
total_saved = 0
mostrecentupload = datetime.now()
while True:
frame = vs.read()
frame = imutils.resize(frame, width=600)
(h, w) = frame.shape[:2]
imageBlob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0), swapRB=False, crop=False)
detector.setInput(imageBlob)
detections = detector.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > confidenceLim:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
preds = recognizer.predict(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
# if (proba > confidenceLim + .25) or (proba <= 1 and proba >= .90):
# try:
# if((datetime.now() - mostrecentupload).total_seconds() > 15):
# cv2.imwrite('{}/dataset/{}/{}.jpg'.format(projectPath, name, datetime.now().strftime("%d%m%Y::%H:%M:%S")), frame)
# print("[INFO] Saving image to data from video stream...")
# mostrecentupload = datetime.now()
# total_saved += 1
# except:
# print("[ERROR] coudn't save image...")
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
fps.update()
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
fps.stop()
print("[INFO] elasped time: {:.2f}secs".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
print("[INFO] total saved pictures: {}".format(total_saved))
print("[DONE] stream terminated")
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
|
StarcoderdataPython
|
3294602
|
<reponame>adrian2x/asgi-caches<filename>tests/examples/resources.py
from caches import Cache
cache = Cache("locmem://default", ttl=2 * 60)
special_cache = Cache("locmem://special", ttl=60)
|
StarcoderdataPython
|
3304095
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import urllib3
from bentoml.exceptions import BentoMLException, MissingDependencyException
def ensure_node_available_or_raise():
try:
subprocess.check_output(['node', '--version'])
except subprocess.CalledProcessError as error:
raise BentoMLException(
'Error executing node command: {}'.format(error.output.decode())
)
except FileNotFoundError:
raise MissingDependencyException(
'Node is required for Yatai web UI. Please visit '
'www.nodejs.org for instructions'
)
def parse_grpc_url(url):
'''
>>> parse_grpc_url("grpcs://yatai.com:43/query")
('grpcs', 'yatai.com:43/query')
>>> parse_grpc_url("yatai.com:43/query")
(None, 'yatai.com:43/query')
'''
parts = urllib3.util.parse_url(url)
return parts.scheme, url.replace(f"{parts.scheme}://", "", 1)
|
StarcoderdataPython
|
9648337
|
<filename>.github/check_import.py
import os
from pathlib import Path
def test_imports(path):
is_correct = True
print("Testing file:", path)
with open(path) as f:
lines = f.readlines()
for i, l in enumerate(lines):
if "#check_import" in l or "# check_import" in l:
l_ = l.strip()
try:
exec(l_)
except Exception as e: # DisplayNameError
print("IMPORT ERROR ({}) ON LINE {}: {}".format(e.__class__.__name__, i, l_))
print(e)
is_correct = False
return is_correct
def main():
project_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)
is_correct = True
for path in Path(project_path).rglob('*.py'):
if not "ste_env" in str(path) and not "check_import.py" in str(path):
if not test_imports(path):
is_correct = False
exit(not is_correct)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1751317
|
<filename>moredata/enricher/osm/osm_places_connector.py
from ..enricher import IEnricherConnector
from ...utils import OSM_util
import pandas as pd
from shapely import wkt
import geopandas
import pyproj
from functools import partial
from shapely.geometry import MultiPolygon, Polygon
from shapely.ops import transform
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from shapely.geometry import shape, mapping
from rtree import index as rtreeindex
from ...utils.util import geodesic_point_buffer
class OSMPlacesConnector(IEnricherConnector):
"""OSMconnector implements interface IEnricherConnector, so this is a connector that can be used to enrich data.
Parameters
----------
dict_keys: List[str]
dict_keys is the principal argument to the connector. The connector doesn't know where to get data to make the relationship, so you have to pass the keys or if your data isn't nested, just one key to the connector reach at the the right attribute.
key: str
class of OSM. eg.: 'amenity', 'leisure'.
value: str
value of location of OSM. eg.: 'hospital', 'stadium'.
place_name: str
name of local. eg.: 'Brasil', 'São Paulo'.
file: str, optional
name of file in CSV of downloaded polygons with must columns: key, value, polygon.
radius: numeric, optional
radius to around of point to intersect the polygon.
Attributes
----------
dict_keys: List[str]
key: str
value: str
place_name: str
file: str, optional
radius: numeric, optional
"""
def __init__(self, key, value, dict_keys=[], place_name="Brasil", file=None, radius=None, geometry_intersected=False):
self.key = key
self.value = value
self.place_name = place_name
self.file = file
self.radius = radius
self.dict_keys = dict_keys
self.geometry = geometry_intersected
if self.file is not None:
self._df = pd.read_csv(file)
self._df["geometry"] = self._df["geometry"].apply(wkt.loads)
def _get_polygons(self):
self.array_polygons = []
for index, row in self._df.iterrows():
pol = row["geometry"]
self.array_polygons.append(pol)
self.idx = rtreeindex.Index()
for pos, poly in enumerate(self.array_polygons):
self.idx.insert(pos, poly.bounds)
def _fence_check_local(self, point):
polygon_metadata = []
if self.radius is not None:
shp = Polygon(geodesic_point_buffer(point["latitude"], point["longitude"], self.radius))
else:
shp = Point(point["longitude"], point["latitude"])
for j in self.idx.intersection(shp.bounds):
if self.radius is None:
if shp.within(shape(self.array_polygons[j])):
polygon_metadata.append(self._df.iloc[j].to_frame().T)
else:
polygon_metadata.append(self._df.iloc[j].to_frame().T)
return polygon_metadata
def _traverse_dict(self, dict, keys):
for k in keys:
try:
dict = dict[k]
except KeyError as e:
return None
return dict
def _enrich_point(self, point):
if "latitude" in point.keys() and "longitude" in point.keys():
polygon_metadata = self._fence_check_local(point)
for p in polygon_metadata:
p["key"] = self.key
p["value"] = self.value
if not "local" in point.keys():
point["local"] = []
if not "geometry_intersected" in point.keys() and self.geometry:
point['geometry_intersected'] = []
if self.geometry:
polygons_intersected = list(p['geometry'])
for polygon in polygons_intersected:
point['geometry_intersected'].append(str(polygon))
point["local"].append(*p[["name", "key", "value"]].to_dict("records"))
def enrich(self, data, **kwargs):
"""Method overrided of interface. This method do enrichment using OSM data as a enricher. It walk through the keys to reach at the data that will be used to intersect the polygons. It uses a R tree to index polygons and search faster. If the radius attribute is passed the algorithm returns all polygons that intersect the point buffered with this radius else the algorithm returns all polygons that contains the point.
Parameters
----------
data: :obj:`Data`
"""
from fiona.crs import from_epsg
import geopandas
if self.file is None:
osm_util = OSM_util()
self._df = osm_util.get_places(self.place_name, self.key, self.value)
self._get_polygons()
for d in data.parse(**kwargs):
if not self.dict_keys:
points = d
else:
points = d[self.dict_keys[0]]
for k in range(1, len(self.dict_keys)):
try:
points = points[self.dict_keys[k]]
except KeyError as e:
return None
if isinstance(points, list):
for point in points:
self._enrich_point(point)
else:
self._enrich_point(points)
yield d
|
StarcoderdataPython
|
6446447
|
<reponame>AshivDhondea/DFTS_TF2
"""
Created on Wed Sep 2 10:36:41 2020.
tc_algos.py
Tensor Completion Algorithms:
1. Simple Low Rank Tensor Completion aka SiLRTC
2. High accurracy Low Rank Tensor Completion aka HalRTC
3.
Based on code developed by <NAME> (Multimedia Lab, Simon Fraser University).
SiLRTC-complete.py
The code has been modified to run with DFTS re-packetized tensors.
Ref:
1. <NAME> and <NAME>, “Tensor completion methods for collaborative
intelligence,” IEEE Access, vol. 8, pp. 41162–41174, 2020.
"""
# Libraries for tensor completion methods.
import numpy as np
import copy
from .tensorly_base import *
"""
If you able to install tensorly, you can import tensorly directly and comment out
the line above. If you are running your experiment on a ComputeCanada cluster,
you won't be able to install tensorly.
"""
# --------------------------------------------------------------------------- #
# General functions used by tensor completion methods.
def swap(a, b):
"""
Swap a and b.
Parameters
----------
a : TYPE
DESCRIPTION.
b : TYPE
DESCRIPTION.
Returns
-------
b : TYPE
DESCRIPTION.
a : TYPE
DESCRIPTION.
"""
return b, a
def makeOmegaSet_rowPacket(p, n, sizeOmega):
"""
Parameters
----------
p : TYPE
DESCRIPTION.
n : TYPE
DESCRIPTION.
sizeOmega : TYPE
DESCRIPTION.
Returns
-------
subs : TYPE
DESCRIPTION.
"""
if sizeOmega > np.prod(n):
print("OmegaSet size is too high, requested size of Omega is bigger than the tensor itself!")
row_omega = int(np.ceil(p * n[0] * n[-1]))
idx = np.random.randint(low=1, high=n[0]*n[-1], size=(row_omega, 1))
Omega = np.unique(idx)
while len(Omega) < row_omega:
Omega = np.reshape(Omega, (len(Omega), 1))
temp = np.random.randint(low=1, high=n[0]*n[-1], size=(row_omega-len(Omega), 1))
idx = np.concatenate((Omega, temp), axis=0)
Omega = np.unique(idx)
Omega = np.sort(Omega[0:row_omega])
subs = np.unravel_index(Omega, [n[0],n[-1]])
temp = np.ones((1, np.array(subs).shape[-1]))
# Create the columns (for each permutation increment by 1 so that all the columns get covered for each row)
res = []
for i in range(n[1]):
concat = np.concatenate((subs, i*temp), axis=0)
res.append(concat)
# Swap axis 2 and 3 to be correct
swap_res = []
for i in range(len(res)):
a = res[i][0]
b = res[i][1]
c = res[i][2]
b, c = swap(b, c)
swapped_order = np.stack((a,b,c))
swap_res.append(swapped_order)
# Concatenating the list formed above to give a 3 by x matrix
subs = swap_res[0]
for i in range(len(swap_res)-1):
subs = np.concatenate((subs, swap_res[i+1]), axis=1)
return subs
def ReplaceInd(X, subs, vals):
"""
Replace in X values given by vals in location given by subs.
Parameters
----------
X : TYPE
DESCRIPTION.
subs : TYPE
DESCRIPTION.
vals : TYPE
DESCRIPTION.
Returns
-------
X : TYPE
DESCRIPTION.
"""
for j in range(len(vals)):
x, y, z = subs[j,0], subs[j,1], subs[j,2]
X[x, y, z] = vals[j]
return X
def shrinkage(X, t):
"""
Perform shrinkage with threshold t on matrix X.
Refer to Bragilevsky, Bajic paper for explanation.
Parameters
----------
X : TYPE
DESCRIPTION.
t : TYPE
DESCRIPTION.
Returns
-------
d : TYPE
DESCRIPTION.
"""
# SVD decomposition with an offset (s needs to be made diagonal)
u, s, v = np.linalg.svd(X, full_matrices=False) # matlab uses 'econ' to not produce full matricies
s = np.diag(s)
for i in range(s.shape[0]):
s[i,i]=np.max(s[i,i]-t, 0)
# reconstructed matrix
d = np.matmul(np.matmul(u, s), v)
return d
# --------------------------------------------------------------------------- #
# Simple Low Rank Tensor Completion method.
# Adapted from Lior's code.
def fn_silrtc_demo(image,num_iters_K,p):
"""
Demonstrate operation of SILRTC with random loss.
Parameters
----------
image : TYPE
DESCRIPTION.
K : TYPE
DESCRIPTION.
p : TYPE
DESCRIPTION.
Returns
-------
X_estimated : TYPE
DESCRIPTION.
"""
n = list(image.shape)
subs = np.transpose(np.array(makeOmegaSet_rowPacket(p, n, np.uint32(np.round(p*np.prod(n))))))
subs = np.array(subs, dtype=np.uint32)
vals = list(map(lambda x, y, z: image[x][y][z], subs[:,0], subs[:,1], subs[:,2]))
X_corrupted = np.zeros(n)
X_corrupted = ReplaceInd(X_corrupted, subs, vals)
X_estimated = X_corrupted
b = np.abs(np.random.randn(3,1))/200
a = np.abs(np.random.randn(3,1))
a = a/np.sum(a)
for q in range(num_iters_K):
M = np.zeros(image.shape)
for i in range(3):
svd = shrinkage(unfold(X_estimated,i), a[i]/b[i])
M = M + fold(b[i]*svd, i, image.shape)
M = M/np.sum(b)
# Update both M & X as they are used in the next cycle
M = ReplaceInd(M, subs, vals)
X_estimated = M
return X_estimated
def fn_silrtc_damaged(X_corrupted,num_iters_K,subs,vals):
"""
Perform SiLRTC on damaged tensors.
Parameters
----------
X_corrupted : TYPE
DESCRIPTION.
num_iters_K : TYPE
DESCRIPTION.
subs : TYPE
DESCRIPTION.
vals : TYPE
DESCRIPTION.
Returns
-------
X_estimated : TYPE
DESCRIPTION.
"""
X_estimated = X_corrupted
b = np.abs(np.random.randn(3,1))/200
a = np.abs(np.random.randn(3,1))
a = a/np.sum(a)
for q in range(num_iters_K):
#print(f"SilRTC iteration {q}")
M = np.zeros(X_corrupted.shape)
for i in range(3):
svd = shrinkage(unfold(X_estimated,i), a[i]/b[i])
M = M + fold(b[i]*svd, i, X_corrupted.shape)
M = M/np.sum(b)
# Update both M & X as they are used in the next cycle
M = ReplaceInd(M, subs, vals)
X_estimated = M
return X_estimated
def fn_silrtc_damaged_error(X_corrupted,num_iters_K,subs,vals):
"""
Perform SiLRTC on damaged tensors. Keep track of error.
Parameters
----------
X_corrupted : TYPE
DESCRIPTION.
num_iters_K : TYPE
DESCRIPTION.
subs : TYPE
DESCRIPTION.
vals : TYPE
DESCRIPTION.
Returns
-------
X_estimated : TYPE
DESCRIPTION.
error_iters:
"""
X_estimated = copy.deepcopy(X_corrupted)
b = np.abs(np.random.randn(3,1))/200
a = np.abs(np.random.randn(3,1))
a = a/np.sum(a)
error_iters = np.zeros([num_iters_K],dtype=np.float64)
X_estimated_prev = np.zeros_like(X_estimated)
row, col, dep = X_corrupted.shape
ArrSize_iters = (row,col,dep,num_iters_K)
X_estimated_iters = np.zeros(ArrSize_iters)
for q in range(num_iters_K):
#print(f"SilRTC iteration {q}")
M = np.zeros(X_corrupted.shape)
for i in range(3):
svd = shrinkage(unfold(X_estimated,i), a[i]/b[i])
M = M + fold(b[i]*svd, i, X_corrupted.shape)
M = M/np.sum(b)
# Update both M & X as they are used in the next cycle
X_estimated = ReplaceInd(M, subs, vals)
error_iters[q] = np.sqrt(np.sum(np.square(np.subtract(X_estimated,X_estimated_prev))))
X_estimated_prev = X_estimated
X_estimated_iters[:,:,:,q] = X_estimated
return X_estimated_iters, error_iters
# --------------------------------------------------------------------------- #
# High accuracy Low Rank Tensor Completion.
# Adapted from Lior's code.
def fn_halrtc_damaged(X_corrupted,num_iters_K,subs,vals):
"""
Perform HaLRTC on damaged tensors.
Parameters
----------
X_corrupted : TYPE
DESCRIPTION.
num_iters_K : TYPE
DESCRIPTION.
subs : TYPE
DESCRIPTION.
vals : TYPE
DESCRIPTION.
Returns
-------
X_estimated : TYPE
DESCRIPTION.
"""
X_estimated = np.copy(X_corrupted)
a = np.abs(np.random.randn(3,1))
a = a/np.sum(a)
rho = 1e-6
# Create tensor holders for Mi and Yi done to simplify variable storage
row, col, dep = X_corrupted.shape
ArrSize = (row, col, dep, X_corrupted.ndim)
Mi = np.zeros(ArrSize)
Yi = np.zeros(ArrSize)
for q in range(num_iters_K):
#print(f"HalRTC iteration {q}")
# Calculate Mi tensors (Step 1)
for i in range(3):
temp = unfold(X_estimated,i) + (unfold(np.squeeze(Yi[:,:,:,i]),i)/rho)
Mi[:,:,:,i] = fold(shrinkage(temp, a[i]/rho), i, X_corrupted.shape)
# Update X (Step 2)
X_est = np.sum(Mi-(Yi/rho),axis=3)/3
X_estimated = ReplaceInd(X_est, subs, vals)
# Update Yi tensors (Step 3)
for i in range(ArrSize[-1]):
Yi[:,:,:,i] = np.squeeze(Yi[:,:,:,i])-rho*(np.squeeze(Mi[:,:,:,i])-X_estimated)
# Modify rho to help convergence
rho = 1.2*rho
return X_estimated
def fn_halrtc_damaged_error(X_corrupted,num_iters_K,subs,vals):
"""
Perform HaLRTC on damaged tensors.
Parameters
----------
X_corrupted : TYPE
DESCRIPTION.
num_iters_K : TYPE
DESCRIPTION.
subs : TYPE
DESCRIPTION.
vals : TYPE
DESCRIPTION.
Returns
-------
X_estimated : TYPE
DESCRIPTION.
"""
X_estimated = np.copy(X_corrupted) #copy.deepcopy(X_corrupted)
a = np.abs(np.random.randn(3,1))
a = a/np.sum(a)
rho = 1e-6
# Create tensor holders for Mi and Yi done to simplify variable storage
row, col, dep = X_corrupted.shape
ArrSize = (row, col, dep, X_corrupted.ndim)
Mi = np.zeros(ArrSize)
Yi = np.zeros(ArrSize)
error_iters = np.zeros([num_iters_K],dtype=np.float64)
X_estimated_prev = np.zeros_like(X_estimated)
ArrSize_iters = (row,col,dep,num_iters_K)
X_estimated_iters = np.zeros(ArrSize_iters)
for q in range(num_iters_K):
#print(f"HalRTC iteration {q}")
# Calculate Mi tensors (Step 1)
for i in range(3):
temp = unfold(X_estimated,i) + (unfold(np.squeeze(Yi[:,:,:,i]),i)/rho)
Mi[:,:,:,i] = fold(shrinkage(temp, a[i]/rho), i, X_corrupted.shape)
# Update X (Step 2)
X_est = np.sum(Mi-(Yi/rho),axis=3)/3
X_estimated = ReplaceInd(X_est, subs, vals)
X_estimated_iters[:,:,:,q] = X_estimated
# Update Yi tensors (Step 3)
for i in range(ArrSize[-1]):
Yi[:,:,:,i] = np.squeeze(Yi[:,:,:,i])-rho*(np.squeeze(Mi[:,:,:,i])-X_estimated)
# Modify rho to help convergence
rho = 1.2*rho
error_iters[q] = np.sqrt(np.sum(np.square(np.subtract(X_estimated,X_estimated_prev))))
X_estimated_prev = X_estimated
return X_estimated_iters, error_iters
|
StarcoderdataPython
|
5108922
|
<filename>src/dice_cli/fs/_copy_from_local.py
# https://arrow.apache.org/docs/python/generated/pyarrow.fs.HadoopFileSystem.html
# connect to HDFS
# create_dir(self, path, *, bool recursive=True)
# async open_output_stream(self, path[, …])
|
StarcoderdataPython
|
6609440
|
"""
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2021 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
import os, re, shutil
from datetime import datetime, timezone
import unicodecsv as csv
from flask import current_app, g
from flask_babel import gettext as _
from liberaforms import db
from sqlalchemy.dialects.postgresql import JSONB, ARRAY, TIMESTAMP
from sqlalchemy.ext.mutable import MutableDict, MutableList
from sqlalchemy.orm.attributes import flag_modified
from liberaforms.utils.database import CRUD
from liberaforms.models.log import FormLog
from liberaforms.models.answer import Answer, AnswerAttachment
from liberaforms.utils.storage.remote import RemoteStorage
from liberaforms.utils.consent_texts import ConsentText
from liberaforms.utils import sanitizers
from liberaforms.utils import validators
from liberaforms.utils import html_parser
from liberaforms.utils import utils
from pprint import pprint
""" Form properties
structure: A list of dicts that is built by and rendered by formbuilder.
fieldIndex: List of dictionaries. Each dict contains one formbuider field info.
[{"label": <displayed_field_name>, "name": <unique_field_identifier>}]
"""
class Form(db.Model, CRUD):
__tablename__ = "forms"
_site=None
id = db.Column(db.Integer, primary_key=True, index=True)
created = db.Column(TIMESTAMP, nullable=False)
slug = db.Column(db.String, unique=True, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
structure = db.Column(MutableList.as_mutable(ARRAY(JSONB)), nullable=False)
fieldIndex = db.Column(MutableList.as_mutable(ARRAY(JSONB)), nullable=False)
editors = db.Column(MutableDict.as_mutable(JSONB), nullable=False)
enabled = db.Column(db.Boolean, default=False)
expired = db.Column(db.Boolean, default=False)
sendConfirmation = db.Column(db.Boolean, default=False)
expiryConditions = db.Column(JSONB, nullable=False)
sharedAnswers = db.Column(MutableDict.as_mutable(JSONB), nullable=True)
shared_notifications = db.Column(MutableList.as_mutable(ARRAY(db.String)), nullable=False)
restrictedAccess = db.Column(db.Boolean, default=False)
adminPreferences = db.Column(MutableDict.as_mutable(JSONB), nullable=False)
introductionText = db.Column(MutableDict.as_mutable(JSONB), nullable=False)
afterSubmitText = db.Column(JSONB, nullable=False)
expiredText = db.Column(JSONB, nullable=False)
thumbnail = db.Column(db.String, nullable=True)
published_cnt = db.Column(db.Integer, default=0, nullable=False)
consentTexts = db.Column(ARRAY(JSONB), nullable=True)
author = db.relationship("User", back_populates="authored_forms")
answers = db.relationship("Answer", lazy='dynamic',
cascade="all, delete, delete-orphan")
log = db.relationship("FormLog", lazy='dynamic',
cascade="all, delete, delete-orphan")
def __init__(self, author, **kwargs):
self.created = datetime.now(timezone.utc)
self.author_id = author.id
self.editors = {self.author_id: self.new_editor_preferences(author)}
self.expiryConditions = {"totalAnswers": 0,
"expireDate": False,
"fields": {}}
self.slug = kwargs["slug"]
self.structure = kwargs["structure"]
self.fieldIndex = kwargs["fieldIndex"]
self.sharedAnswers = { "enabled": False,
"key": utils.gen_random_string(),
"password": <PASSWORD>,
"expireDate": False}
self.shared_notifications = []
self.introductionText = kwargs["introductionText"]
self.consentTexts = kwargs["consentTexts"]
self.afterSubmitText = kwargs["afterSubmitText"]
self.expiredText = kwargs["expiredText"]
self.sendConfirmation = self.structure_has_email_field(self.structure)
self.adminPreferences = {"public": True}
def __str__(self):
return utils.print_obj_values(self)
@property
def site(self):
if self._site:
return self._site
from liberaforms.models.site import Site
self._site = Site.query.first()
return self._site
@classmethod
def find(cls, **kwargs):
return cls.find_all(**kwargs).first()
@classmethod
def find_all(cls, **kwargs):
filters = []
if 'editor_id' in kwargs:
filters.append(cls.editors.has_key(str(kwargs['editor_id'])))
kwargs.pop('editor_id')
if 'key' in kwargs:
filters.append(cls.sharedAnswers.contains({'key': kwargs['key']}))
kwargs.pop('key')
for key, value in kwargs.items():
filters.append(getattr(cls, key) == value)
return cls.query.filter(*filters)
def get_author(self):
return self.author
def get_created_date(self):
return utils.utc_to_g_timezone(self.created).strftime("%Y-%m-%d")
def change_author(self, new_author):
if new_author.enabled:
if new_author.id == self.author_id:
return False
try:
del self.editors[str(self.author_id)]
except:
return False
self.author_id=new_author.id
if not self.is_editor(new_author):
self.add_editor(new_author)
self.save()
return True
return False
@staticmethod
def create_field_index(structure):
index=[]
# Add these RESERVED fields to the index.
# i18n: Column title on table, to sort whether the form entries are marked or not
index.append({'label': _("Marked"), 'name': 'marked'})
# i18n: Used for sort items by creation order, almost always as column title
index.append({'label': _("Created"), 'name': 'created'})
for element in structure:
if 'name' in element:
if 'label' not in element:
element['label']=_("Label")
index.append({'name': element['name'], 'label': element['label']})
return index
def update_field_index(self, newIndex):
if self.get_total_answers() == 0:
self.fieldIndex = newIndex
else:
deletedFieldsWithData=[]
# If the editor has deleted fields we want to remove them
# but we don't want to remove fields that already contain data in the DB.
for field in self.fieldIndex:
if not [i for i in newIndex if i['name'] == field['name']]:
# This field was removed by the editor. Can we safely delete it?
can_delete=True
for answer in self.answers:
if field['name'] in answer.data and \
answer.data[field['name']]:
# This field contains data
can_delete=False
break
if can_delete:
# A pseudo delete.
# We drop the field (it's reference) from the index
# (the empty field remains as is in each answer in the db)
pass
else:
# We don't delete this field from the index because it contains data
field['removed']=True
deletedFieldsWithData.append(field)
self.fieldIndex = newIndex + deletedFieldsWithData
def get_field_index_for_data_display(self, with_deleted_columns=False):
result=[]
for field in self.fieldIndex:
if 'removed' in field and not with_deleted_columns:
continue
item={'label': field['label'], 'name': field['name']}
result.append(item)
if self.data_consent["enabled"]:
# append dynamic DPL field
# i18n: Acronym for 'Data Privacy Law'
result.append({"name": "DPL", "label": _("DPL")})
return result
def has_removed_fields(self):
return any('removed' in field for field in self.fieldIndex)
@staticmethod
def is_email_field(field):
if "type" in field and field["type"] == "text" and \
"subtype" in field and field["subtype"] == "email":
return True
else:
return False
@classmethod
def structure_has_email_field(cls, structure):
for element in structure:
if cls.is_email_field(element):
return True
return False
def has_email_field(self):
return Form.structure_has_email_field(self.structure)
def has_field(self, field_name):
for field in self.structure:
if "name" in field and field["name"] == field_name:
return True
return False
def has_file_field(self):
for field in self.structure:
if "type" in field and field["type"] == "file":
return True
return False
def might_send_confirmation_email(self):
if self.sendConfirmation and self.has_email_field():
return True
else:
return False
def get_confirmation_email_address(self, answer):
for element in self.structure:
if Form.is_email_field(element):
if element["name"] in answer and answer[element["name"]]:
return answer[element["name"]].strip()
return False
def get_answers(self, oldest_first=False, **kwargs):
kwargs['oldest_first'] = oldest_first
kwargs['form_id'] = self.id
return Answer.find_all(**kwargs)
def get_answers_for_display(self, oldest_first=False):
answers = self.get_answers(oldest_first=oldest_first)
result = []
frmt = "%Y-%m-%d %H:%M:%S"
for answer in answers:
result.append({
'id': answer.id,
'created': utils.utc_to_g_timezone(answer.created)
.strftime(frmt),
'marked': answer.marked,
**answer.data})
return result
def get_total_answers(self):
return self.answers.count()
def get_last_answer_date(self):
last_answer = Answer.find(form_id=self.id)
if last_answer:
frmt = "%Y-%m-%d %H:%M:%S"
return utils.utc_to_g_timezone(last_answer.created).strftime(frmt)
return ""
def is_enabled(self):
if not (self.get_author().enabled and self.adminPreferences['public']):
return False
return self.enabled
@classmethod
def new_editor_preferences(cls, editor):
return {'notification': { 'newAnswer': editor.preferences[
"newAnswerNotification"
],
'expiredForm': True }}
def add_editor(self, editor):
if not editor.enabled:
return False
editor_id=str(editor.id)
if not editor_id in self.editors:
self.editors[editor_id]=Form.new_editor_preferences(editor)
self.save()
return True
return False
def remove_editor(self, editor):
editor_id=str(editor.id)
if editor_id == self.author_id:
return None
if editor_id in self.editors:
del self.editors[editor_id]
self.save()
return editor_id
return None
@property
def url(self):
return f"{self.site.host_url}{self.slug}"
@property
def embed_url(self):
return f"{self.site.host_url}embed/{self.slug}"
def get_opengraph(self):
default_img_src = self.site.get_logo_uri()
image_src = self.thumbnail if self.thumbnail else default_img_src
opengraph = {
"title": self.slug,
"url": self.url,
"image": image_src,
"description": self.get_short_description(),
}
return opengraph
def set_thumbnail(self):
html = self.introductionText['html']
images_src = html_parser.extract_images_src(html)
self.thumbnail = images_src[0] if images_src else None
def set_short_description(self):
text = html_parser.get_opengraph_text(self.introductionText['html'])
self.introductionText['short_text'] = text
def get_short_description(self):
if 'short_text' in self.introductionText:
return self.introductionText['short_text']
self.set_short_description()
self.save()
return self.introductionText['short_text']
@property
def data_consent(self):
return self.consentTexts[0]
def get_consent_for_display(self, id):
#print(self.consentTexts)
return ConsentText.get_consent_for_display(id, self)
def save_consent(self, id, data):
return ConsentText.save(id, self, data)
def get_data_consent_for_display(self):
return self.get_consent_for_display(self.data_consent['id'])
def get_default_data_consent_for_display(self):
return ConsentText.get_consent_for_display( g.site.DPL_consent_id,
self.author)
def toggle_data_consent_enabled(self):
return ConsentText.toggle_enabled(self.data_consent['id'], self)
@staticmethod
def new_data_consent():
consent = ConsentText.get_empty_consent(
g.site.DPL_consent_id,
name="DPL",
enabled=g.site.data_consent['enabled'])
return consent
@staticmethod
def default_expired_text():
text=_("Sorry, this form has expired.")
return {"markdown": f"## {text}", "html": f"<h2>{text}</h2>"}
@property
def expired_text_html(self):
if self.expiredText['html']:
return self.expiredText['html']
else:
return Form.default_expired_text()["html"]
@property
def expired_text_markdown(self):
if self.expiredText['markdown']:
return self.expiredText['markdown']
else:
return Form.default_expired_text()["markdown"]
def save_expired_text(self, markdown):
markdown=markdown.strip()
if markdown:
self.expiredText = {'markdown': sanitizers.escape_markdown(markdown),
'html': sanitizers.markdown2HTML(markdown)}
else:
self.expiredText = {'html':"", 'markdown':""}
self.save()
@staticmethod
def defaultAfterSubmitText():
# i18n: Thanks text displayed when completing form as user
text=_("Thank you!!")
return {"markdown": f"## {text}", "html": f"<h2>{text}</h2>"}
@property
def after_submit_text_html(self):
if self.afterSubmitText['html']:
return self.afterSubmitText['html']
else:
return Form.defaultAfterSubmitText()['html']
@property
def after_submit_text_markdown(self):
if self.afterSubmitText['markdown']:
return self.afterSubmitText['markdown']
else:
return Form.defaultAfterSubmitText()['markdown']
def save_after_submit_text(self, markdown):
markdown=markdown.strip()
if markdown:
self.afterSubmitText = {'markdown': sanitizers.escape_markdown(markdown),
'html': sanitizers.markdown2HTML(markdown)}
else:
self.afterSubmitText = {'html':"", 'markdown':""}
self.save()
def get_available_number_type_fields(self):
result={}
for element in self.structure:
if "type" in element and element["type"] == "number":
if element["name"] in self.expiryConditions['fields']:
element_name = self.expiryConditions['fields'][element["name"]]
result[element["name"]] = element_name
else:
result[element["name"]]={"type":"number", "condition": None}
return result
def get_multichoice_fields(self):
result=[]
for element in self.structure:
if "type" in element:
if element["type"] == "checkbox-group" or \
element["type"] == "radio-group" or \
element["type"] == "select":
result.append(element)
return result
def get_field_label(self, fieldName):
for element in self.structure:
if 'name' in element and element['name']==fieldName:
return element['label']
return None
def save_expiry_date(self, expireDate):
self.expiryConditions['expireDate']=expireDate
self.expired=self.has_expired()
flag_modified(self, "expiryConditions")
self.save()
def save_expiry_total_answers(self, total_answers):
try:
total_answers = int(total_answers)
except:
total_answers = 0
total_answers = 0 if total_answers < 0 else total_answers
self.expiryConditions['totalAnswers']=total_answers
self.expired = self.has_expired()
flag_modified(self, "expiryConditions")
self.save()
return self.expiryConditions['totalAnswers']
def save_expiry_field_condition(self, field_name, condition):
available_fields=self.get_available_number_type_fields()
if not field_name in available_fields:
return False
if not condition:
if field_name in self.expiryConditions['fields']:
del self.expiryConditions['fields'][field_name]
self.expired=self.has_expired()
flag_modified(self, "expiryConditions")
self.save()
return False
field_type = available_fields[field_name]['type']
if field_type == "number":
try:
condition_dict = {"type": field_type, "condition": int(condition)}
self.expiryConditions['fields'][field_name] = condition_dict
except:
condition = False
if field_name in self.expiryConditions['fields']:
del self.expiryConditions['fields'][field_name]
self.expired=self.has_expired()
flag_modified(self, "expiryConditions")
self.save()
return condition
return False
def update_expiryConditions(self):
saved_expiry_fields = [field for field in self.expiryConditions['fields']]
available_expiry_fields = []
#available_expiry_fields=[element["name"] for element in self.structure if "name" in element]
for element in self.structure:
if "name" in element:
available_expiry_fields.append(element["name"])
for field in saved_expiry_fields:
if not field in available_expiry_fields:
del self.expiryConditions['fields'][field]
def get_expiry_numberfield_positions_in_field_index(self):
field_positions=[]
for fieldName, condition in self.expiryConditions['fields'].items():
if condition['type'] == 'number':
for position, field in enumerate(self.fieldIndex):
if field['name'] == fieldName:
field_positions.append(position)
break
return field_positions
def delete(self):
self.delete_all_answers()
super().delete()
def get_attachment_dir(self):
return os.path.join(current_app.config['UPLOADS_DIR'],
current_app.config['ATTACHMENT_DIR'],
str(self.id))
def delete_all_answers(self):
self.answers.delete()
attachment_dir = self.get_attachment_dir()
if os.path.isdir(attachment_dir):
shutil.rmtree(attachment_dir, ignore_errors=True)
else:
current_app.logger.debug(f"Local attachment dir not found: {attachment_dir}")
if current_app.config['ENABLE_REMOTE_STORAGE'] == True:
RemoteStorage().remove_directory(f"attachments/{self.id}")
def is_author(self, user):
return True if self.author_id == user.id else False
def is_editor(self, user):
return True if str(user.id) in self.editors else False
def get_editors(self):
from liberaforms.models.user import User
editors=[]
for editor_id in self.editors:
user=User.find(id=editor_id)
if user:
editors.append(user)
else:
# remove editor_id from self.editors
pass
return editors
def can_expire(self):
if self.expiryConditions["totalAnswers"]:
return True
if self.expiryConditions["expireDate"]:
return True
if self.expiryConditions["fields"]:
return True
return False
def has_expired(self):
if not self.can_expire():
return False
if self.expiryConditions["totalAnswers"] and \
self.answers.count() >= self.expiryConditions["totalAnswers"]:
return True
if self.expiryConditions["expireDate"] and not \
validators.is_future_date(self.expiryConditions["expireDate"]):
return True
for fieldName, value in self.expiryConditions['fields'].items():
if value['type'] == 'number':
total=self.tally_number_field(fieldName)
if total >= int(value['condition']):
return True
return False
def tally_number_field(self, fieldName):
total=0
for answer in self.get_answers():
try:
total = total + int(answer.data[fieldName])
except:
continue
return total
def is_public(self):
if not self.is_enabled() or self.expired:
return False
else:
return True
def is_shared(self):
if self.are_answers_shared():
return True
if len(self.editors) > 1:
return True
return False
def are_answers_shared(self):
return self.sharedAnswers['enabled']
def get_shared_answers_url(self, part="results"):
return f"{self.url}/{part}/{self.sharedAnswers['key']}"
"""
Used when editing a form.
We don't want the Editor to change the option values if an
answer.data[key] with a value is already present in the database
"""
def get_multichoice_options_with_saved_data(self):
result = {}
if not self.answers:
return result
multiChoiceFields = {} # {field.name: [option.value, option.value]}
for field in self.get_multichoice_fields():
multiChoiceFields[field['name']] = []
for value in field['values']:
multiChoiceFields[field['name']].append(value['value'])
for answer in self.answers:
removeFieldsFromSearch=[]
for field in multiChoiceFields:
if field in answer.data.keys():
for savedValue in answer.data[field].split(', '):
if savedValue in multiChoiceFields[field]:
if not field in result:
result[field]=[]
result[field].append(savedValue)
multiChoiceFields[field].remove(savedValue)
if multiChoiceFields[field] == []:
# all option.values are present in database
removeFieldsFromSearch.append(field)
for field_to_remove in removeFieldsFromSearch:
del(multiChoiceFields[field_to_remove])
if multiChoiceFields == {}: # no more fields to check
return result
return result
def get_answers_for_json(self):
result=[]
answers = self.get_answers_for_display(oldest_first=True)
for saved_answer in answers:
answer={}
for field in self.get_field_index_for_data_display():
#value=saved_answer[field['name']] if field['name'] in saved_answer else ""
if field['name'] in saved_answer:
value = saved_answer[field['name']]
else:
value = ""
answer[field['label']]=value
result.append(answer)
return result
def get_chart_data(self):
chartable_time_fields=[]
total={'answers':0}
time_data={'answers':[]}
for field in self.get_available_number_type_fields():
label=self.get_field_label(field)
total[label]=0
time_data[label]=[]
chartable_time_fields.append({'name':field, 'label':label})
multichoice_fields=self.get_multichoice_fields()
multi_choice_for_chart=[]
for field in multichoice_fields:
field_for_chart={ "name":field['name'], "title":field['label'],
"axis_1":[], "axis_2":[]}
multi_choice_for_chart.append(field_for_chart)
for value in field['values']:
field_for_chart['axis_1'].append(value['label'])
field_for_chart['axis_2'].append(0) #start counting at zero
for answer in self.get_answers_for_display(oldest_first=True):
total['answers']+=1
time_data['answers'].append({ 'x': answer['created'],
'y': total['answers']})
for field in chartable_time_fields:
try:
total[field['label']]+=int(answer[field['name']])
time_data[field['label']].append({'x': answer['created'],
'y': total[field['label']]
})
except:
continue
for field in multichoice_fields:
if not (field['name'] in answer and answer[field['name']]):
continue
field_for_chart=[item for item in multi_choice_for_chart if item["name"]==field['name']][0]
answer_values=answer[field['name']].split(', ')
for idx, field_value in enumerate(field['values']):
if field_value['value'] in answer_values:
field_for_chart['axis_2'][idx]+=1
return {'multi_choice':multi_choice_for_chart,
'time_chart':time_data}
def toggle_enabled(self):
if self.expired or self.adminPreferences['public']==False:
return False
else:
self.enabled = False if self.enabled else True
self.save()
return self.enabled
def toggle_admin_form_public(self):
public = self.adminPreferences['public']
self.adminPreferences['public'] = False if public else True
self.save()
return self.adminPreferences['public']
def toggle_shared_answers(self):
enabled = self.sharedAnswers['enabled']
self.sharedAnswers['enabled'] = False if enabled else True
self.save()
return self.sharedAnswers['enabled']
def toggle_restricted_access(self):
self.restrictedAccess = False if self.restrictedAccess else True
self.save()
return self.restrictedAccess
def toggle_notification(self, editor_id):
editor_id = str(editor_id)
if editor_id in self.editors:
if self.editors[editor_id]['notification']['newAnswer']:
self.editors[editor_id]['notification']['newAnswer']=False
else:
self.editors[editor_id]['notification']['newAnswer']=True
flag_modified(self, 'editors')
self.save()
return self.editors[editor_id]['notification']['newAnswer']
return False
def toggle_expiration_notification(self, editor_id):
editor_id = str(editor_id)
if editor_id in self.editors:
if self.editors[editor_id]['notification']['expiredForm']:
self.editors[editor_id]['notification']['expiredForm']=False
else:
self.editors[editor_id]['notification']['expiredForm']=True
flag_modified(self, 'editors')
self.save()
return self.editors[editor_id]['notification']['expiredForm']
return False
def toggle_send_confirmation(self):
self.sendConfirmation = False if self.sendConfirmation else True
self.save()
return self.sendConfirmation
def add_log(self, message):
log = FormLog( user_id=g.current_user.id,
form_id=self.id,
message=message)
log.save()
def write_csv(self, with_deleted_columns=False):
fieldnames=[]
fieldheaders={}
for field in self.get_field_index_for_data_display(with_deleted_columns):
fieldnames.append(field['name'])
fieldheaders[field['name']]=field['label']
csv_name = os.path.join(os.environ['TMP_DIR'], f"{self.slug}.csv")
with open(csv_name, mode='wb') as csv_file:
writer = csv.DictWriter(csv_file,
fieldnames=fieldnames,
extrasaction='ignore')
writer.writerow(fieldheaders)
answers = self.get_answers_for_display(oldest_first=True)
for answer in answers:
for field_name in answer.keys():
if field_name.startswith('file-'):
# extract attachment url
url = re.search(r'https?:[\'"]?([^\'" >]+)',
answer[field_name])
if url:
answer[field_name] = url.group(0)
writer.writerow(answer)
return csv_name
@staticmethod
def default_introduction_text():
# i18n: Example title in template for new form
title=_("Form title")
# i18n: Example subtitle in template for new form
context=_("Context")
# i18n: Example content in template for new form. '\n' is used for linebreak.
content=_(" * Describe your form.\n * Add relevant content, links, images, etc.")
return "## {}\n\n### {}\n\n{}".format(title, context, content)
|
StarcoderdataPython
|
9644785
|
#!/usr/bin/python
import json
import urllib
def showsome(searchfor):
query = urllib.urlencode({'q': searchfor})
print query
url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s' % query
search_response = urllib.urlopen(url)
search_results = search_response.read()
results = json.loads(search_results)
data = results['responseData']
print 'Total results: %s' % data['cursor']['estimatedResultCount']
hits = data['results']
print 'Top %d hits:' % len(hits)
for h in hits: print ' ', h['url']
print 'For more results, see %s' % data['cursor']['moreResultsUrl']
showsome('ermanno olmi')
|
StarcoderdataPython
|
11219858
|
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" debug module is the new name of the human_readably module """
from __future__ import (absolute_import, division, print_function)
from ansible.plugins.callback.default \
import CallbackModule as CallbackModule_default
__metaclass__ = type
class CallbackModule(CallbackModule_default): # pylint: \
# disable=too-few-public-methods,no-init
'''
Override for the default callback module.
Render std err/out outside of the rest of the result which it prints with
indentation.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'debug'
def _dump_results(self, result):
'''Return the text to output for a result.'''
# Enable JSON identation
result['_ansible_verbose_always'] = True
save = {}
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg']:
if key in result:
save[key] = result.pop(key)
output = CallbackModule_default._dump_results(self, result)
for key in ['stdout', 'stderr', 'msg']:
if key in save and save[key]:
output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key])
for key, value in save.items():
result[key] = value
return output
|
StarcoderdataPython
|
11308814
|
import sys
from pybtex.database.input import bibtex
import jinja2
import jinja2.sandbox
import re
from calendar import month_name
_months = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6,
'jul': 7, 'aug': 8, 'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12,
}
def _author_fmt(author):
return u' '.join(author.first_names + author.middle_names + author.last_names)
def _andlist(ss, sep=', ', seplast=', and ', septwo=' and '):
if len(ss) <= 1:
return ''.join(ss)
elif len(ss) == 2:
return septwo.join(ss)
else:
return sep.join(ss[0:-1]) + seplast + ss[-1]
def _author_list(authors):
return _andlist(list(map(_author_fmt, authors)))
def _venue_type(entry):
venuetype = ''
if entry.type == 'inbook':
venuetype = 'Chapter in '
elif entry.type == 'techreport':
venuetype = 'Technical Report '
elif entry.type == 'phdthesis':
venuetype = 'Ph.D. thesis, {}'.format(entry.fields['school'])
elif entry.type == 'mastersthesis':
venuetype = 'Master\'s thesis, {}'.format(entry.fields['school'])
return venuetype
def _venue(entry):
f = entry.fields
venue = ''
if entry.type == 'article':
venue = f['journal']
try:
if f['volume'] and f['number']:
venue += ' {0}({1})'.format(f['volume'], f['number'])
except KeyError:
pass
elif entry.type == 'inproceedings':
venue = f['booktitle']
try:
if f['series']:
venue += ' ({})'.format(f['series'])
except KeyError:
pass
elif entry.type == 'inbook':
venue = f['title']
elif entry.type == 'techreport':
venue = '{0}, {1}'.format(f['number'], f['institution'])
elif entry.type == 'phdthesis' or entry.type == 'mastersthesis':
venue = ''
else:
venue = 'Unknown venue (type={})'.format(entry.type)
return venue
def _title(entry):
if entry.type == 'inbook':
title = entry.fields['chapter']
else:
title = entry.fields['title']
# remove curlies from titles -- useful in TeX, not here
#title = title.translate(None, '{}')
#title = title.translate(str.maketrans('','','{}'))
return title
def _main_url(entry):
urlfields = ('url', 'ee')
for f in urlfields:
if f in entry.fields:
return entry.fields[f]
return None
def _extra_urls(entry):
"""Returns a dict of URL types to URLs, e.g.
{ 'nytimes': 'http://nytimes.com/story/about/research.html',
... }
"""
urls = {}
for k, v in entry.fields.items():
k = k.lower()
if not k.endswith('_url'):
continue
k = k[:-4]
urltype = k.replace('_', ' ')
urls[urltype] = v
return urls
def _month_match (mon):
if re.match('^[0-9]+$', mon):
return int(mon)
return _months[mon.lower()[:3]]
def _month_name (monthnum):
try:
return month_name[int(monthnum)]
except:
return ''
def _sortkey(entry):
e = entry.fields
year = '{:04d}'.format(int(e['year']))
try:
monthnum = _month_match(e['month'])
year += '{:02d}'.format(monthnum)
except KeyError:
year += '00'
return year
def main(bibfile, template):
# Load the template.
tenv = jinja2.sandbox.SandboxedEnvironment()
tenv.filters['author_fmt'] = _author_fmt
tenv.filters['author_list'] = _author_list
tenv.filters['title'] = _title
tenv.filters['venue_type'] = _venue_type
tenv.filters['venue'] = _venue
tenv.filters['main_url'] = _main_url
tenv.filters['extra_urls'] = _extra_urls
tenv.filters['monthname'] = _month_name
with open(template) as f:
tmpl = tenv.from_string(f.read())
# Parse the BibTeX file.
with open(bibfile) as f:
db = bibtex.Parser().parse_stream(f)
# Include the bibliography key in each entry.
for k, v in db.entries.items():
v.fields['key'] = k
# Render the template.
bib_sorted = sorted(db.entries.values(), key=_sortkey, reverse=True)
out = tmpl.render(entries=bib_sorted)
print(out)
if __name__ == '__main__':
main(*sys.argv[1:])
|
StarcoderdataPython
|
327989
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 24 15:55:28 2016
@author: sasha
"""
import os
from .init import QTVer
if QTVer == 4:
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
if QTVer == 5:
from PyQt5 import QtWidgets as QtGui
from PyQt5 import QtCore
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.widgets import SpanSelector
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.optimize import least_squares, curve_fit
from .xaesa_ft import FT, BFTWindow, BFT, GETPHASE
from .xaesa_constants_formulas import windowGauss10
#from tkinter.filedialog import askopenfilename, asksaveasfilename
#from tkinter import Tk
import gc
def exafsfit_lsq(x, k, exafs, amp, pha, parametrs, var_par, nr_shells, kpow):
#create mixed array with variables and params
XX = np.zeros(len(var_par))
varcnt = 0
parcnt = 0
for i in range(len(var_par)):
if(var_par[i]==1): #variable
XX[i] = x[varcnt]
varcnt = varcnt + 1
else:
XX[i] = parametrs[parcnt]
parcnt = parcnt + 1
# print("x", x)
#print("XX", XX)
chi_model = np.zeros(len(k))
for i in range(nr_shells):
chi_model = chi_model + (XX[i*7]/(k*XX[i*7+1]**2)) * amp[i] * \
np.exp(-2*XX[i*7+2]*k*k + (2/3)*XX[i*7+4]*k**4 - (4/45)*XX[i*7+6]*k**6) * \
np.sin(2*k*XX[i*7+1] - (4/3)*XX[i*7+3]*k**3 + (4/15)*XX[i*7+5]*k**5 + pha[i])
# chi_model = SO2 * (x[2]/(k*x[3]*x[3])) * amp * exp(-2*x[4]*k*k) * sin(2*k*x[3] + pha)
return chi_model*k**kpow - exafs
def exafsfit(x, N, R, sigma2):
k = x[0]
amp = x[1]
pha= x[2]
SO2 = x[3]
# dE0 = X[4]
# C4 = X[5]
# C5 = X[6]
# C6 = X[7]
chi_model = SO2 * (N/(k*R*R)) * amp * np.exp(-2*sigma2*k*k) * np.sin(2*k*R + pha)
return chi_model*k*k
class FitWindow(QtGui.QDialog):
def __init__(self):
super(FitWindow, self).__init__()
self.bft = []
self.k = []
self.kamp = [[]]
self.kpha = [[] ]
self.amp_orig = [[]]
self.pha_orig = [[]]
self.fit_result = []
self.initUI()
def initUI(self):
self.shellnr = 1
self.savedshellnr = 1
self.isfitted = 0
self.fig = plt.figure(3, figsize=(12, 6))
self.ax_bft = plt.subplot2grid((1,2), (0,0))
self.ax_bftft = plt.subplot2grid((1,2), (0,1))
self.canv = FigureCanvas(self.fig)
self.tbar = NavigationToolbar(self.canv, self)
self.fig.tight_layout()
# self.lblNrShells = QtGui.QLabel("Number of shells")
# self.edtNrShells = QtGui.QLineEdit("1")
self.lblkmin = QtGui.QLabel("K min")
self.lblkmax = QtGui.QLabel("K max")
self.lbldk = QtGui.QLabel("dK")
self.edtkmin = QtGui.QLineEdit("0.5")
self.edtkmax = QtGui.QLineEdit("15")
self.edtdk = QtGui.QLineEdit("0.05")
self.lblMaxiterations = QtGui.QLabel("Max number of iterations")
self.edtMaxiterations = QtGui.QLineEdit("1000")
self.tabShells = QtGui.QTabWidget()
self.tabs = []
self.tabs.append(QtGui.QFrame())
self.tabShells.addTab(self.tabs[0],"Shell 1")
self.ltShell = []
self.shellN = []
self.shellR = []
self.shellSigma = []
self.shellC3 = []
self.shellC4 = []
self.shellC5 = []
self.shellC6 = []
# self.shellE0 = []
self.shellAmp = []
self.shellPha = []
lblN = QtGui.QLabel("N")
lblR = QtGui.QLabel("R")
lblSigma = QtGui.QLabel("Sigma")
lblC3 = QtGui.QLabel("C3")
lblC4 = QtGui.QLabel("C4")
lblC5 = QtGui.QLabel("C5")
lblC6 = QtGui.QLabel("C6")
# lblE0 = QtGui.QLabel("E0")
lblAmp = QtGui.QLabel("Ampl")
lblPha = QtGui.QLabel("Phase")
self.ltShell.append(QtGui.QGridLayout())
self.shellN.append( [QtGui.QLineEdit("4"), QtGui.QLineEdit("0"), QtGui.QLineEdit("8"), QtGui.QCheckBox()])
self.shellR.append([QtGui.QLineEdit("2"), QtGui.QLineEdit("0"), QtGui.QLineEdit("4"), QtGui.QCheckBox()])
self.shellSigma.append([QtGui.QLineEdit("0.001"), QtGui.QLineEdit("0"), QtGui.QLineEdit("1"), QtGui.QCheckBox()])
self.shellC3.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC4.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC5.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC6.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
# self.shellE0.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("0"), QtGui.QLineEdit("0"), QtGui.QLineEdit("0.0001"), QtGui.QCheckBox()])
self.shellAmp.append(QtGui.QComboBox())
self.shellPha.append(QtGui.QComboBox())
self.shellAmp[-1].addItem("")
self.shellPha[-1].addItem("")
self.shellAmp[-1].currentIndexChanged.connect(self.AmpChanged)
self.shellPha[-1].currentIndexChanged.connect(self.PhaChanged)
self.shellN[len(self.shellN)-1][3].setChecked(True)
self.shellR[len(self.shellR)-1][3].setChecked(True)
self.shellSigma[len(self.shellSigma)-1][3].setChecked(True)
self.ltShell[0].addWidget(lblN, 0, 0)
self.ltShell[0].addWidget(lblR, 1, 0)
self.ltShell[0].addWidget(lblSigma, 2, 0)
self.ltShell[0].addWidget(lblC3, 3, 0)
self.ltShell[0].addWidget(lblC4, 4, 0)
self.ltShell[0].addWidget(lblC5, 5, 0)
self.ltShell[0].addWidget(lblC6, 6, 0)
# self.ltShell[0].addWidget(lblE0, 7, 0)
self.ltShell[0].addWidget(lblAmp, 7, 0)
self.ltShell[0].addWidget(lblPha, 8, 0)
for i in range(4):
self.ltShell[0].addWidget(self.shellN[0][i], 0, 2*i+1)
self.ltShell[0].addWidget(self.shellR[0][i], 1, 2*i+1)
self.ltShell[0].addWidget(self.shellSigma[0][i], 2, 2*i+1)
self.ltShell[0].addWidget(self.shellC3[0][i], 3, 2*i+1)
self.ltShell[0].addWidget(self.shellC4[0][i], 4, 2*i+1)
self.ltShell[0].addWidget(self.shellC5[0][i], 5, 2*i+1)
self.ltShell[0].addWidget(self.shellC6[0][i], 6, 2*i+1)
# self.ltShell[0].addWidget(self.shellE0[0][i], 7, 2*i+1)
self.ltShell[0].addWidget(self.shellAmp[0], 7, 1, 1, 7)
self.ltShell[0].addWidget(self.shellPha[0], 8, 1, 1, 7)
# self.shellAmp[0].addItem("E:/work/development/xaslib/fit/amp0001.dat")
# self.shellPha[0].addItem("E:/work/development/xaslib/fit/pha0001.dat")
for j in range(7):
self.ltShell[0].addWidget(QtGui.QLabel("Min. limit"), j, 2)
self.ltShell[0].addWidget(QtGui.QLabel("Max. limit"), j, 4)
# self.ltShell[0].addWidget(QtGui.QLabel("Accuracy"), j, 6)
self.tabs[0].setLayout(self.ltShell[0])
self.lblFuncEval = QtGui.QLabel("Number of function evaluations done")
self.edtFuncEval = QtGui.QLineEdit()
self.lblFitMessage = QtGui.QLabel("Termination reason")
self.edtFitMessage = QtGui.QLineEdit()
self.lblOptimality = QtGui.QLabel("Cost function")
self.edtOptimality = QtGui.QLineEdit()
lfit = QtGui.QGridLayout()
lfit.addWidget(self.lblFitMessage, 0, 0)
lfit.addWidget(self.edtFitMessage, 0, 1)
lfit.addWidget(self.lblFuncEval, 1, 0)
lfit.addWidget(self.edtFuncEval, 1, 1)
lfit.addWidget(self.lblOptimality, 2, 0)
lfit.addWidget(self.edtOptimality, 2, 1)
self.btnFit = QtGui.QPushButton('Fit')
self.btnFit.clicked.connect(self.Fit_leastsquares)
self.btnSaveFit = QtGui.QPushButton('Save Fit results')
self.btnSaveFit.clicked.connect(self.saveFit)
self.btnApply = QtGui.QPushButton('Apply')
self.btnApply.clicked.connect(self.apply)
self.btnCancel = QtGui.QPushButton('Cancel')
self.btnCancel.clicked.connect(self.cancel)
self.btnAddShell = QtGui.QPushButton('Add shell')
self.btnAddShell.clicked.connect(self.addshell)
self.btnRemoveShell = QtGui.QPushButton('Remove shell')
self.btnRemoveShell.clicked.connect(self.removeshell)
self.btnOpenAmp = QtGui.QPushButton('Open amplitude file(s) ...')
self.btnOpenAmp.clicked.connect(self.openamp)
self.btnOpenPha = QtGui.QPushButton('Open phase file(s) ...')
self.btnOpenPha.clicked.connect(self.openpha)
self.btnOpenFeff = QtGui.QPushButton('Open feff file(s) ...')
self.btnOpenFeff.clicked.connect(self.openfeff)
self.btnSaveFitResults = QtGui.QPushButton('Save fit Results ...')
# self.btnSaveFitResults.clicked.connect(self.saveFitResults)
lb = QtGui.QGridLayout()
lb.addWidget(self.btnOpenAmp, 0,0)
lb.addWidget(self.btnOpenPha, 0,1)
lb.addWidget(self.btnOpenFeff, 1,0)
lb.addWidget(self.btnAddShell, 2,0)
lb.addWidget(self.btnRemoveShell, 2,1)
lfig = QtGui.QGridLayout()
lfig.addWidget(self.tbar, 0, 0)
lfig.addWidget(self.canv, 1, 0, 2, 1)
lfig.addLayout(lfit, 3, 0)
lfig.addWidget(self.btnFit, 4, 0)
lfig.addWidget(self.btnSaveFit, 5, 0)
lfig.addWidget(self.btnApply, 6, 0)
lfig.addWidget(self.btnCancel, 7, 0)
lfig.addWidget(self.lblkmin, 0,1)
lfig.addWidget(self.edtkmin, 0,2)
lfig.addWidget(self.lblkmax, 0,3)
lfig.addWidget(self.edtkmax, 0,4)
lfig.addWidget(self.lbldk, 0,5)
lfig.addWidget(self.edtdk, 0,6)
lfig.addWidget(self.lblMaxiterations, 1, 1)
lfig.addWidget(self.edtMaxiterations, 1, 2, 1, 4)
lfig.addWidget(self.tabShells, 2, 1, 2, 6)
lfig.addLayout(lb, 4,1, 2, 6)
self.setLayout(lfig)
def updateUI(self):
if self.savedshellnr > 1:
for i in range(0,self.savedshellnr-1):
self.addshell()
self.edtkmin.setText("{:.2f}".format(self.ksettings[0][0]))
self.edtkmax.setText("{:.2f}".format(self.ksettings[0][1]))
self.edtdk.setText("{:.3f}".format(self.ksettings[0][2]))
if self.isfitted == 1: #fill with saved fitting params
self.edtOptimality.setText("{:E}".format(self.costfunction))
for i in range(self.shellnr):
self.shellN[i][0].setText("{:.4f}".format(self.fit_params[i][0][0]))
self.shellN[i][1].setText("{:.4f}".format(self.fit_params[i][0][1]))
self.shellN[i][2].setText("{:.4f}".format(self.fit_params[i][0][2]))
self.shellN[i][3].setChecked(bool(self.fit_params[i][0][3]))
self.shellR[i][0].setText("{:.4f}".format(self.fit_params[i][1][0]))
self.shellR[i][1].setText("{:.4f}".format(self.fit_params[i][1][1]))
self.shellR[i][2].setText("{:.4f}".format(self.fit_params[i][1][2]))
self.shellR[i][3].setChecked(bool(self.fit_params[i][1][3]))
self.shellSigma[i][0].setText("{:.4f}".format(self.fit_params[i][2][0]))
self.shellSigma[i][1].setText("{:.4f}".format(self.fit_params[i][2][1]))
self.shellSigma[i][2].setText("{:.4f}".format(self.fit_params[i][2][2]))
self.shellSigma[i][3].setChecked(bool(self.fit_params[i][2][3]))
self.shellC3[i][0].setText("{:.4E}".format(self.fit_params[i][3][0]))
self.shellC3[i][1].setText("{:.4f}".format(self.fit_params[i][3][1]))
self.shellC3[i][2].setText("{:.4f}".format(self.fit_params[i][3][2]))
self.shellC3[i][3].setChecked(bool(self.fit_params[i][3][3]))
self.shellC4[i][0].setText("{:.4E}".format(self.fit_params[i][4][0]))
self.shellC4[i][1].setText("{:.4f}".format(self.fit_params[i][4][1]))
self.shellC4[i][2].setText("{:.4f}".format(self.fit_params[i][4][2]))
self.shellC4[i][3].setChecked(bool(self.fit_params[i][4][3]))
self.shellC5[i][0].setText("{:.4E}".format(self.fit_params[i][5][0]))
self.shellC5[i][1].setText("{:.4f}".format(self.fit_params[i][5][1]))
self.shellC5[i][2].setText("{:.4f}".format(self.fit_params[i][5][2]))
self.shellC5[i][3].setChecked(bool(self.fit_params[i][5][3]))
self.shellC6[i][0].setText("{:.4E}".format(self.fit_params[i][6][0]))
self.shellC6[i][1].setText("{:.4f}".format(self.fit_params[i][6][1]))
self.shellC6[i][2].setText("{:.4f}".format(self.fit_params[i][6][2]))
self.shellC6[i][3].setChecked(bool(self.fit_params[i][6][3]))
# for i in range(int(len(self.fit_amps)/2)):
self.kamp[i] = self.fit_amps[2*i]
self.amp_orig[i] = self.fit_amps[2*i+1]
self.kpha[i] = self.fit_phases[2*i]
self.pha_orig[i] = self.fit_phases[2*i+1]
# print(self.fit_amps)
pass
def Fit_curvefit(self):
kstart = float(self.edtkmin.text())
kend = float(self.edtkmax.text())
dk = float(self.edtdk.text())
common_k = np.arange(kstart, kend, dk)
guess = [0,0,0]
guess[0] = float(self.shellN[0][0].text())
guess[1] = float(self.shellR[0][0].text())
guess[2] = float(self.shellSigma[0][0].text())
varbounds = []
varbounds.append( ( float(self.shellN[0][1].text()), float(self.shellR[0][1].text()), float(self.shellSigma[0][1].text()) ) )
varbounds.append( ( float(self.shellN[0][2].text()), float(self.shellR[0][2].text()), float(self.shellSigma[0][2].text()) ) )
kamp, amp_orig = np.genfromtxt("E:/work/development/xaslib/fit/amp0001.dat", usecols=(1,0), unpack=True)
kpha, pha_orig = np.genfromtxt("E:/work/development/xaslib/fit/pha0001.dat", usecols=(1,0), unpack=True)
splamp = InterpolatedUnivariateSpline(kamp,amp_orig)
splpha = InterpolatedUnivariateSpline(kpha, pha_orig)
splbft = InterpolatedUnivariateSpline(self.k, self.bft)
amp = splamp(common_k)
pha = splpha(common_k)
common_bft = splbft(common_k)
# lsq_result = least_squares(exafsfit, np.array(X), \
# method = 'lm',
## bounds = varbounds,
# args=(self.k, self.bft, amp, pha, 1))
# print(lsq_result.x)
x = []
x.append(common_k)
x.append(amp)
x.append(pha)
x.append(1)
popt, pcov = curve_fit(exafsfit, x, common_bft , \
#method = 'lm',
bounds = varbounds,
p0 = guess)
self.ax_bft.clear()
self.ax_bftft.clear()
self.ax_bft.plot(self.k, self.bft)
# self.ax_bft.plot(self.k, exafsfit(lsq_result.x, self.k, self.bft, amp, pha, 1)+self.bft)
# self.ax_bft.plot(self.k, exafsfit(X, self.k, self.bft, amp, pha, 1)+self.bft)
self.ax_bft.plot(common_k, exafsfit(x, popt[0], popt[1], popt[2]))
print(popt)
print(pcov)
self.canv.draw()
def Fit_leastsquares(self):
for i in range(self.shellnr):
if(self.kamp[i]==[]):
QtGui.QMessageBox.information(self,"Load Amplitude", "Amplitude in shell {:d} not loaded".format(i+1))
return
if(self.kpha[i]==[]):
QtGui.QMessageBox.information(self,"Load Phase", "Phase in shell {:d} not loaded".format(i+1))
return
kstart = float(self.edtkmin.text())
kend = float(self.edtkmax.text())
dk = float(self.edtdk.text())
self.common_k = np.arange(kstart, kend, dk)
maxiterations = int(self.edtMaxiterations.text())
#prepare variable and parameter array
splbft = InterpolatedUnivariateSpline(self.k, self.bft)
self.common_bft = splbft(self.common_k)
varbounds = [[],[]]
par = []
var_par = []
X = []
edtVarBoxes = []
amp = []
pha = []
for i in range(self.shellnr):
if self.shellN[i][3].isChecked():
X.append(float(self.shellN[i][0].text()))
varbounds[0].append(float(self.shellN[i][1].text()))
varbounds[1].append(float(self.shellN[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellN[i][0])
else:
par.append(float(self.shellN[i][0].text()))
var_par.append(0)
if self.shellR[i][3].isChecked():
X.append(float(self.shellR[i][0].text()))
varbounds[0].append(float(self.shellR[i][1].text()))
varbounds[1].append(float(self.shellR[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellR[i][0])
else:
par.append(float(self.shellR[i][0].text()))
var_par.append(0)
if self.shellSigma[i][3].isChecked():
X.append(float(self.shellSigma[i][0].text()))
varbounds[0].append(float(self.shellSigma[i][1].text()))
varbounds[1].append(float(self.shellSigma[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellSigma[i][0])
else:
par.append(float(self.shellSigma[i][0].text()))
var_par.append(0)
if self.shellC3[i][3].isChecked():
X.append(float(self.shellC3[i][0].text()))
varbounds[0].append(float(self.shellC3[i][1].text()))
varbounds[1].append(float(self.shellC3[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellC3[i][0])
else:
par.append(float(self.shellC3[i][0].text()))
var_par.append(0)
if self.shellC4[i][3].isChecked():
X.append(float(self.shellC4[i][0].text()))
varbounds[0].append(float(self.shellC4[i][1].text()))
varbounds[1].append(float(self.shellC4[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellC4[i][0])
else:
par.append(float(self.shellC4[i][0].text()))
var_par.append(0)
if self.shellC5[i][3].isChecked():
X.append(float(self.shellC5[i][0].text()))
varbounds[0].append(float(self.shellC5[i][1].text()))
varbounds[1].append(float(self.shellC5[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellC5[i][0])
else:
par.append(float(self.shellC5[i][0].text()))
var_par.append(0)
if self.shellC6[i][3].isChecked():
X.append(float(self.shellC6[i][0].text()))
varbounds[0].append(float(self.shellC6[i][1].text()))
varbounds[1].append(float(self.shellC6[i][2].text()))
var_par.append(1)
edtVarBoxes.append(self.shellC6[i][0])
else:
par.append(float(self.shellC6[i][0].text()))
var_par.append(0)
splamp = InterpolatedUnivariateSpline(self.kamp[i], self.amp_orig[i])
splpha = InterpolatedUnivariateSpline(self.kpha[i], self.pha_orig[i])
amp.append(splamp(self.common_k))
pha.append(splpha(self.common_k))
varbounds[0] = tuple(varbounds[0])
varbounds[1] = tuple(varbounds[1])
lsq_result = least_squares(exafsfit_lsq, np.array(X), \
# method = 'dogbox',
ftol = 1e-12,
max_nfev = maxiterations,
bounds = varbounds,
# tr_solver = 'lsmr',
# jac = '3-point',
# loss='soft_l1',
# f_scale=0.1,
verbose = 0,
# x_scale = [1,1,0.001],
args=(self.common_k, self.common_bft, amp, pha, par, var_par, self.shellnr, 2))
self.edtFuncEval.setText("{:d}".format(lsq_result.nfev))
self.edtOptimality.setText("{:e}".format(lsq_result.cost))
self.edtFitMessage.setText(lsq_result.message)
for i in range(len(lsq_result.x)):
if i in [0,1,2]:
edtVarBoxes[i].setText("{:.5f}".format(lsq_result.x[i]))
else:
edtVarBoxes[i].setText("{:.2E}".format(lsq_result.x[i]))
self.window = windowGauss10(self.common_k, kstart, kend)
self.bftw = self.common_bft * self.window
self.r, self.fr, self.fi = FT(self.common_k, self.bftw, 0, 4, 0.02)
self.efr = np.sqrt(self.fr*self.fr + self.fi*self.fi)
self.efi = self.fi * (-1)
self.fit_result = exafsfit_lsq(lsq_result.x, self.common_k, self.common_bft, amp, pha, par, var_par, self.shellnr, 2)+self.common_bft
fit_result_w = self.fit_result * self.window
self.res_r, res_fr, res_fi = FT(self.common_k, fit_result_w, 0, 4, 0.02)
self.res_efr = np.sqrt(res_fr*res_fr + res_fi*res_fi)
self.res_efi = res_fi * (-1)
self.ax_bft.clear()
self.ax_bftft.clear()
self.ax_bft.plot(self.k, self.bft)
self.ax_bft.plot(self.common_k, self.fit_result)
self.ax_bftft.clear()
line1, line2 = self.ax_bftft.plot( self.r, self.efr, self.r, self.efi)
line1.set_color('b')
line2.set_color('b')
line2.set_linestyle('dotted')
line1, line2 = self.ax_bftft.plot( self.res_r, self.res_efr, self.res_r, self.res_efi)
line1.set_color('r')
line2.set_color('r')
line2.set_linestyle('dotted')
self.canv.draw()
def apply(self):
self.fit_params = []
self.fit_amps = []
self.fit_phases = []
self.ksettings = []
for i in range(self.shellnr):
self.fit_params.append([ [float(self.shellN[i][0].text()),
float(self.shellN[i][1].text()),
float(self.shellN[i][2].text()),
int(self.shellN[i][3].isChecked())],
[float(self.shellR[i][0].text()),
float(self.shellR[i][1].text()),
float(self.shellR[i][2].text()),
int(self.shellR[i][3].isChecked())],
[float(self.shellSigma[i][0].text()),
float(self.shellSigma[i][1].text()),
float(self.shellSigma[i][2].text()),
int(self.shellSigma[i][3].isChecked())],
[float(self.shellC3[i][0].text()),
float(self.shellC3[i][1].text()),
float(self.shellC3[i][2].text()),
int(self.shellC3[i][3].isChecked())],
[float(self.shellC4[i][0].text()),
float(self.shellC4[i][1].text()),
float(self.shellC4[i][2].text()),
int(self.shellC4[i][3].isChecked())],
[float(self.shellC5[i][0].text()),
float(self.shellC5[i][1].text()),
float(self.shellC5[i][2].text()),
int(self.shellC5[i][3].isChecked())],
[float(self.shellC6[i][0].text()),
float(self.shellC6[i][1].text()),
float(self.shellC6[i][2].text()),
int(self.shellC6[i][3].isChecked())]])
self.fit_amps.append( self.kamp[i])
self.fit_amps.append( self.amp_orig[i])
self.fit_phases.append( self.kpha[i])
self.fit_phases.append( self.pha_orig[i])
self.costfunction = float(self.edtOptimality.text())
self.ksettings.append( [float(self.edtkmin.text()),
float(self.edtkmax.text()),
float(self.edtdk.text())] )
self.accept()
def cancel(self):
self.close()
def bftft(self):
self.window = windowGauss10(self.k, self.k[0], self.k[len(self.k)-1])
self.bftw = self.bft * self.window
self.r, self.fr, self.fi = FT(self.k, self.bftw, 0, 4, 0.02)
self.efr = np.sqrt(self.fr*self.fr + self.fi*self.fi)
self.efi = self.fi * (-1)
def plot(self):
self.ax_bft.clear()
self.ax_bftft.clear()
self.ax_bft.plot(self.k, self.bft)
line1, line2 = self.ax_bftft.plot( self.r, self.efr, self.r, self.efi)
line1.set_color('b')
line2.set_color('b')
line2.set_linestyle('dotted')
if(self.fit_result != []):
kstart = float(self.edtkmin.text())
kend = float(self.edtkmax.text())
dk = float(self.edtdk.text())
self.common_k = np.arange(kstart, kend, dk)
self.ax_bft.plot(self.common_k, self.fit_result)
def openamp(self):
dlg = QtGui.QFileDialog()
dlg.setFileMode(QtGui.QFileDialog.ExistingFiles)
dlg.setAcceptMode(0) # open dialog
dlg.setNameFilters(["All files (*.*)", "Amplitude files (*.amp)"])
# dlg.setDirectory(os.getcwd())
if dlg.exec():
self.fnamp = dlg.selectedFiles()
else:
return
self.fnamp.sort()
for i in range(len(self.shellAmp)):
self.shellAmp[i].addItems(self.fnamp)
def openpha(self):
dlg = QtGui.QFileDialog()
dlg.setFileMode(QtGui.QFileDialog.ExistingFiles)
dlg.setAcceptMode(0) # open dialog
dlg.setNameFilters(["All files (*.*)", "Amplitude files (*.pha)"])
# dlg.setDirectory(os.getcwd())
if dlg.exec():
self.fnpha = dlg.selectedFiles()
else:
return
self.fnpha.sort()
for i in range(len(self.shellPha)):
self.shellPha[i].addItems(self.fnpha)
def openfeff(self):
dlg = QtGui.QFileDialog()
dlg.setFileMode(QtGui.QFileDialog.ExistingFiles)
dlg.setAcceptMode(0) # open dialog
dlg.setNameFilters(["All files (*.*)"])
# dlg.setDirectory(os.getcwd())
if dlg.exec():
self.fnfeff = dlg.selectedFiles()
else:
return
self.fnfeff.sort()
#Extract amplitude and phase from feff files and save to disk
for i in range(len(self.fnfeff)):
state = 0
data = []
f = open(self.fnfeff[i])
for line in f:
cols = line.split()
if cols[0] == '-----------------------------------------------------------------------':
state = 1
continue
if cols[0] =='k':
state = 2
continue
if state == 1:
r = float(cols[2])
state = 0
continue
if state == 2:
data.append(cols)
new_data_amp = []
new_data_pha = []
for j in range(len(data)):
k = float(data[j][0])
pha = float(data[j][1]) + float(data[j][3])
amp = float(data[j][2]) * np.exp( -2 * r / float(data[j][5])) * float(data[j][4])
new_data_amp.append([k, amp])
new_data_pha.append([k, pha])
np.savetxt(self.fnfeff[i] + '.amp', new_data_amp)
np.savetxt(self.fnfeff[i] + '.pha', new_data_pha)
for j in range(len(self.shellPha)):
self.shellAmp[j].addItem(self.fnfeff[i] + '.amp')
for j in range(len(self.shellPha)):
self.shellPha[j].addItem(self.fnfeff[i] + '.pha')
def addshell(self):
self.tabs.append(QtGui.QFrame())
caption = "Shell"+str(self.shellnr+1)
self.tabShells.addTab(self.tabs[self.shellnr], caption)
lblN = QtGui.QLabel("N")
lblR = QtGui.QLabel("R")
lblSigma = QtGui.QLabel("Sigma")
lblC3 = QtGui.QLabel("C3")
lblC4 = QtGui.QLabel("C4")
lblC5 = QtGui.QLabel("C5")
lblC6 = QtGui.QLabel("C6")
# lblE0 = QtGui.QLabel("E0")
lblAmp = QtGui.QLabel("Amplitude")
lblPha = QtGui.QLabel("Phase")
self.ltShell.append(QtGui.QGridLayout())
self.shellN.append( [QtGui.QLineEdit("4"), QtGui.QLineEdit("0"), QtGui.QLineEdit("8"), QtGui.QCheckBox()])
self.shellR.append([QtGui.QLineEdit("2"), QtGui.QLineEdit("0"), QtGui.QLineEdit("4"), QtGui.QCheckBox()])
self.shellSigma.append([QtGui.QLineEdit("0.001"), QtGui.QLineEdit("0"), QtGui.QLineEdit("1"), QtGui.QCheckBox()])
self.shellC3.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC4.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC5.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
self.shellC6.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("-0.1"), QtGui.QLineEdit("0.1"), QtGui.QCheckBox()])
# self.shellE0.append([QtGui.QLineEdit("0"), QtGui.QLineEdit("0"), QtGui.QLineEdit("0"), QtGui.QLineEdit("0.0001"), QtGui.QCheckBox()])
self.shellAmp.append(QtGui.QComboBox())
self.shellPha.append(QtGui.QComboBox())
self.shellAmp[-1].currentIndexChanged.connect(self.AmpChanged)
self.shellPha[-1].currentIndexChanged.connect(self.PhaChanged)
self.shellN[len(self.shellN)-1][3].setChecked(True)
self.shellR[len(self.shellR)-1][3].setChecked(True)
self.shellSigma[len(self.shellSigma)-1][3].setChecked(True)
AllItemsAmp = [self.shellAmp[0].itemText(i) for i in range(self.shellAmp[0].count())]
AllItemsPha = [self.shellPha[0].itemText(i) for i in range(self.shellPha[0].count())]
self.shellAmp[len(self.shellAmp)-1].addItems(AllItemsAmp)
self.shellPha[len(self.shellPha)-1].addItems(AllItemsPha)
self.ltShell[self.shellnr].addWidget(lblN, 0, 0)
self.ltShell[self.shellnr].addWidget(lblR, 1, 0)
self.ltShell[self.shellnr].addWidget(lblSigma, 2, 0)
self.ltShell[self.shellnr].addWidget(lblC3, 3, 0)
self.ltShell[self.shellnr].addWidget(lblC4, 4, 0)
self.ltShell[self.shellnr].addWidget(lblC5, 5, 0)
self.ltShell[self.shellnr].addWidget(lblC6, 6, 0)
# self.ltShell[self.shellnr].addWidget(lblE0, 7, 0)
self.ltShell[self.shellnr].addWidget(lblAmp, 7, 0)
self.ltShell[self.shellnr].addWidget(lblPha, 8, 0)
for i in range(4):
self.ltShell[self.shellnr].addWidget(self.shellN[self.shellnr][i], 0, 2*i+1)
self.ltShell[self.shellnr].addWidget(self.shellR[self.shellnr][i], 1, 2*i+1)
self.ltShell[self.shellnr].addWidget(self.shellSigma[self.shellnr][i], 2, 2*i+1)
self.ltShell[self.shellnr].addWidget(self.shellC3[self.shellnr][i], 3, 2*i+1)
self.ltShell[self.shellnr].addWidget(self.shellC4[self.shellnr][i], 4, 2*i+1)
self.ltShell[self.shellnr].addWidget(self.shellC5[self.shellnr][i], 5, 2*i+1)
self.ltShell[self.shellnr].addWidget(self.shellC6[self.shellnr][i], 6, 2*i+1)
# self.ltShell[self.shellnr].addWidget(self.shellE0[self.shellnr][i], 7, 2*i+1)
self.ltShell[self.shellnr].addWidget(self.shellAmp[self.shellnr], 7, 1, 1, 7)
self.ltShell[self.shellnr].addWidget(self.shellPha[self.shellnr], 8, 1, 1, 7)
for j in range(7):
self.ltShell[self.shellnr].addWidget(QtGui.QLabel("Min. limit"), j, 2)
self.ltShell[self.shellnr].addWidget(QtGui.QLabel("Max. limit"), j, 4)
# self.ltShell[self.shellnr].addWidget(QtGui.QLabel("Accuracy"), j, 6)
self.tabs[self.shellnr].setLayout(self.ltShell[self.shellnr])
self.kamp.append([])
self.kpha.append([])
self.amp_orig.append([])
self.pha_orig.append([])
self.shellnr = self.shellnr +1
def removeshell(self):
self.tabs.pop()
self.tabShells.removeTab(self.shellnr-1)
self.ltShell.pop()
self.shellN.pop()
self.shellR.pop()
self.shellSigma.pop()
self.shellC3.pop()
self.shellC4.pop()
self.shellC5.pop()
self.shellC6.pop()
# self.shellE0.pop()
self.shellAmp.pop()
self.shellPha.pop()
self.kamp.pop()
self.kpha.pop()
self.amp_orig.pop()
self.pha_orig.pop()
self.shellnr = self.shellnr -1
gc.collect()
def AmpChanged(self):
which_shell = -1
sender = self.sender()
for i in range(len(self.shellAmp)):
if self.shellAmp[i] == sender:
which_shell = i
if self.shellAmp[which_shell].currentText() == "":
return
ampk, ampo = np.genfromtxt(self.shellAmp[which_shell].currentText(), usecols=(0,1), unpack=True)
self.kamp[which_shell] = ampk
self.amp_orig[which_shell] = ampo
def PhaChanged(self):
which_shell = -1
sender = self.sender()
for i in range(len(self.shellPha)):
if self.shellPha[i] == sender:
which_shell = i
if self.shellPha[which_shell].currentText() == "":
return
phak, phao = np.genfromtxt(self.shellPha[which_shell].currentText(), usecols=(0,1), unpack=True)
self.kpha[which_shell] = phak
self.pha_orig[which_shell] = phao
def saveFit(self):
fn = self.savefiledialog_qtgui()
if fn == "":
return
column_captions = ""
save_data = []
for i in range(self.shellnr):
column_captions = column_captions + "Shell{:d} ".format(i)
values = []
values.append(float(self.shellN[i][0].text()))
values.append(float(self.shellR[i][0].text()))
values.append(float(self.shellSigma[i][0].text()))
values.append(float(self.shellC3[i][0].text()))
values.append(float(self.shellC4[i][0].text()))
values.append(float(self.shellC5[i][0].text()))
values.append(float(self.shellC6[i][0].text()))
save_data.append(values)
np.savetxt(fn + ".fitdata", np.transpose(save_data), header=column_captions)
column_captions = "k exafs_fit exafs_exp"
save_array = []
save_array.append(self.common_k)
save_array.append(self.fit_result)
save_array.append(self.common_bft)
np.savetxt(fn + ".fitexafs", np.transpose(save_array), header=column_captions)
column_captions = "r_fit ft_real_fit ft_im_fit r_exp ft_real_exp ft_im_exp"
save_array = []
save_array.append(self.res_r)
save_array.append(self.res_efr)
save_array.append(self.res_efi)
save_array.append(self.r)
save_array.append(self.efr)
save_array.append(self.efi)
np.savetxt(fn + ".fitft", np.transpose(save_array), header=column_captions)
def savefiledialog_qtgui(self):
dlg = QtGui.QFileDialog()
dlg.setFileMode(QtGui.QFileDialog.AnyFile)
dlg.setAcceptMode(1) # save dialog
dlg.setNameFilters(["All files (*.*)"])
# dlg.setDirectory(self.currentdir)
if dlg.exec_():
flist = dlg.selectedFiles()
return flist[0]
else:
return ""
|
StarcoderdataPython
|
8064428
|
"""
Networking code.
"""
import logging
import signal
import socket
import tornado
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.tcpserver import TCPServer
from . import utils
logger = logging.getLogger("uwhoisd")
def handle_signal(sig, frame):
"""
Stop the main loop on signal.
"""
IOLoop.instance().add_callback(IOLoop.instance().stop)
class WhoisClient(object):
"""
Whois client.
"""
def __init__(self, server, port):
"""
A WHOIS client for Tornado.
:param server string: hostname of downstream server.
:param port int: port on downstream server to connect to.
"""
self.server = server
self.port = port
def __enter__(self):
"""
Initialize a `with` statement.
"""
self.sock = socket.create_connection((self.server, self.port))
self.sock.settimeout(10)
return self
def __exit__(self, type, value, traceback):
"""
Terminate a `with` statement.
"""
self.sock.close()
def whois(self, query):
"""
Perform a query against the server.
"""
to_return = ""
try:
bytes_whois = b""
self.sock.sendall("{0}\r\n".format(query).encode())
while True:
data = self.sock.recv(2048)
if data:
bytes_whois += data
continue
break
to_return = str(bytes_whois, "utf-8", "ignore")
except OSError as e:
# Catches all socket.* exceptions
return "{0}: {1}\n".format(self.server, e)
except Exception:
logger.exception("Unknown exception when querying '%s'", query)
return to_return
class WhoisListener(TCPServer):
"""
Listener for whois clients.
"""
def __init__(self, whois):
"""
Listen to queries from whois clients.
"""
super(WhoisListener, self).__init__()
self.whois = whois
@gen.coroutine
def handle_stream(self, stream, address):
"""
Respond to a single request.
"""
self.stream = stream
try:
whois_query = yield self.stream.read_until_regex(b"\n")
whois_query = whois_query.decode().strip().lower()
if not utils.is_well_formed_fqdn(whois_query):
whois_entry = "; Bad request: '{0}'\r\n".format(whois_query)
else:
whois_entry = self.whois(whois_query)
yield self.stream.write(whois_entry.encode())
except tornado.iostream.StreamClosedError:
logger.warning("Connection closed by %s.", address)
except Exception:
logger.exception("Unknown exception by '%s'", address)
self.stream.close()
def start_service(iface, port, whois):
"""
Start the service.
"""
signal.signal(signal.SIGINT, handle_signal)
signal.signal(signal.SIGTERM, handle_signal)
server = WhoisListener(whois)
logger.info("Listen on %s:%d", iface, port)
server.bind(port, iface)
server.start(None)
IOLoop.instance().start()
IOLoop.instance().close()
|
StarcoderdataPython
|
8158720
|
import os, sys, string, re, commands
######################
# Requirement
# parseUniprot.py
######################################
######################################
#read each line until <name type="ORF" is read
#continue to read the same line and replace matching ME49 or VEG or GT
#and replace the <name type="ORF" with type='ME49" or "VEG" or "GT"
#def replaceGeneNameUniprot(inputFile, outputFile) :
# infile = open(inputFile)
# raw = infile.readlines()
# infile.close()
#
# outfile = open(outputFile,'w')
#
# for row in range(0,len(raw)) :
# m = re.search(r'<name type="ORF"', string.strip(raw[row]))
# if m :
# n = re.search(r'>(TGME49_[0-9]*)<', string.strip(raw[row]))
# if n :
# outfile.write(re.sub(r'ORF', 'TGME49', string.strip(raw[row])))
# outfile.write("\n")
# else :
# o = re.search(r'>(TGVEG_[0-9]*)<', string.strip(raw[row]))
# if o :
# outfile.write(re.sub(r'ORF', 'TGVEG', string.strip(raw[row])))
# outfile.write("\n")
# else :
# p = re.search(r'>(TGGT1_[0-9]*)<', string.strip(raw[row]))
# if p :
# outfile.write(re.sub(r'ORF', 'TGGT1', string.strip(raw[row])))
# outfile.write("\n")
# else :
# outfile.write(raw[row])
# else :
# z = re.search(r'<dbReference type="EuPathDB"', string.strip(raw[row]))
# if z :
# outfile.write(re.sub(r'id="ToxoDB:', 'id="', string.strip(raw[row])))
# outfile.write("\n")
# else :
# outfile.write(raw[row])
#
# outfile.close()
# programCall = 'mv ' + inputFile + ' ' + inputFile + '.old'
# commands.getstatusoutput(programCall)
# programCall = 'mv ' + outputFile + ' ' + inputFile
# commands.getstatusoutput(programCall)
#
#inputFile = '%s'%sys.argv[1]
#outputFile = os.path.join(inputFile + '.parsed')
#replaceGeneNameUniprot(inputFile, outputFile)
#
#inputFile = '%s'%sys.argv[2]
#outputFile = os.path.join(inputFile + '.parsed')
#replaceGeneNameUniprot(inputFile, outputFile)
#replace
def replaceGeneNameUniprot(inputFile, outputFile) :
infile = open(inputFile)
raw = infile.readlines()
infile.close()
outfile = open(outputFile,'w')
for row in range(0,len(raw)) :
z = re.search(r'<dbReference type="EuPathDB"', string.strip(raw[row]))
if z :
outfile.write(re.sub(r'id="ToxoDB:', 'id="', string.strip(raw[row])))
outfile.write("\n")
else :
outfile.write(raw[row])
outfile.close()
programCall = 'mv ' + inputFile + ' ' + inputFile + '.old'
commands.getstatusoutput(programCall)
programCall = 'mv ' + outputFile + ' ' + inputFile
commands.getstatusoutput(programCall)
inputFile = '%s'%sys.argv[1]
outputFile = os.path.join(inputFile + '.parsed')
replaceGeneNameUniprot(inputFile, outputFile)
inputFile = '%s'%sys.argv[2]
outputFile = os.path.join(inputFile + '.parsed')
replaceGeneNameUniprot(inputFile, outputFile)
|
StarcoderdataPython
|
8175512
|
<filename>python/pysvso/localization/pnp.py
import cv2
import numpy as np
# used to evaluated pose graph optimization inside PnP Solver
class Measurement:
def __init__(self, pt3d, frame1, px2d1, frame2, px2d2):
self.pt3d = pt3d
#
self.frame1 = frame1
self.px2d1 = px2d1
self.frame2 = frame2
self.px2d2 = px2d2
def __str__(self):
return "<Measurement p(%d)-> [Frame#%d, Frame%d]>" % (
self.pt3d.seq,
self.frame1.seq,
self.frame2.seq
)
# available method: projection | BF
def fetchByRoI(cur_frame, mapblock, method="projection"):
detections = cur_frame._detections
detected_rois = [] if detections is (None, list) else [obj.findParent() for obj in detections]
frames = mapblock.get_frames()
slidingWindows = set()
#
kps, kp_feats = cur_frame.kps, cur_frame.kps_feats
img_shp = cur_frame.img.shape[0:2]
# init feat_map
feat_map = np.full(img_shp, -1)
for i, kp in enumerate(kps):
x, y = kp.pt
if int(y) >= img_shp[0] or int(y) < 0 or \
int(x) >= img_shp[1] or int(x) < 0:
continue
feat_map[int(y), int(x)] = i
def _hamming_distance(x, y):
from scipy.spatial import distance
return distance.hamming(x, y)
def _get_neighbors(R, row, col, feat_map, img_shp):
H, W = img_shp[0:2]
x1, y1 = (col - R, row - R)
x2, y2 = (col + R, row + R)
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 >= W:
x2 = W - 1
if y2 >= H:
y2 = H - 1
try:
indice = feat_map[y1:y2, x1:x2] != -1
except Exception as e:
print(e)
print("Slicer(y1, y2, x1, x2) : Slicer(%d, %d, %d, %d)" % (y1, y2, x1, x2))
raise e
return feat_map[y1:y2, x1:x2][indice]
# retrieve mappoints observed
points = {}
measurements = {}
print("Begin to fetch matched points for PnP ...")
print("Total fetched ROIs: %d" % len(detected_rois))
# fetch points visible to the last frame
camera = cur_frame.pre.camera
# backup codes
if camera is None:
# find a nearest camera to use
cf = cur_frame.pre
while camera is None:
cf = cf.pre
if cf is not None:
camera = cf.camera
else:
break
for roi in detected_rois:
pointCloud = roi.points
print("Total fetched points for RoI %s: %d" % (roi, len(pointCloud)))
for _, point in pointCloud.items():
# print("viewing point %s using camera from Frame#%d" % (point, cur_frame.pre.seq))
cam_pt = camera.viewWorldPoint(point)
projection = camera.view(cam_pt)
if projection is None:
continue
if points.get(point.seq, None) is not None:
continue
# @todo : TODO get predicted projection with OpticalFlowKPntPredictor
row = projection.y # + flow[projection.x, projection.y][1]
col = projection.x # + flow[projection.x, projection.y][0]
# set searching radius to a large number
indice = _get_neighbors(50, int(row), int(col), feat_map, img_shp)
if len(indice) == 0:
continue
# we can see the point
points[point.seq] = point
# print("Adding %s point" % point)
# print("The point has been observed by %d frames" % len(point.frames))
# for frame_key, pixel_pos in point.frames.items():
for frame_key, pixel_pos in point.observations.items():
# frame = frames[frame_key - 1]
frame = mapblock.findFrame(frame_key)
px_l = frame.pixels.get(pixel_pos, None)
if px_l is None:
print("Could not find %s projection at Frame#%d pixel location %d" % (
point,
frame_key,
pixel_pos
))
print(frame)
raise Exception("Unexpected Value!")
feat_l = px_l.feature
# KNN Search
dist = None
min_dist, min_ind = np.inf, None
for ind in indice:
feat_r = kp_feats[ind]
dist = _hamming_distance(feat_l, feat_r)
if min_dist > dist:
min_dist = dist
min_ind = ind
pass # indice
# add to measurement
x, y = kps[min_ind].pt
H, W = img_shp[:2]
px_r = cur_frame.pixels.get(int(y * W + x), None)
if px_r is None:
raise Exception("Unexpected Value!")
measurements[point.seq] = Measurement(point, frame, px_l, cur_frame, px_r)
# print("Adding measurement for %s" % point)
# update sliding window
slidingWindows.add(frame_key)
# associate with frame
pass # observations
pass # pointCloud
pass # detected_rois
return points, measurements, list(slidingWindows)
# checking whether our results from PoseOptimization close to this, see Tracker._PnP (PoseOptimization) method
# defaults to cv2.solvePnPRansac
class PnPSolver:
MIN_INLIERS = 10
def __init__(self, frame, mapblock):
self.frame = frame
self._map = mapblock
self._impl = cv2.solvePnPRansac
self.inliers = None
pass
# @todo : TODO
def solve(self, points, measurements):
K = self.frame.camera.K
pointCloud = []
observations = []
for _, point in points.items():
pointCloud.append(point.data)
measurement = measurements[point.seq]
observations.append(measurement.px2d2.data)
if len(pointCloud) < 6:
print("Not Enough Points for PnP Solver!")
return None, None
try:
_, rot_vec, tvec, inliers = self._impl(np.float32(pointCloud), np.float32(observations), K, None, None,
None,
False, 100, 4.0, 0.99, None)
except Exception as e:
print(e)
return None, None
R, _ = cv2.Rodrigues(rot_vec)
t = tvec.reshape((3,1))
if inliers is None or len(inliers) < self.MIN_INLIERS:
print("inliners:", inliers)
return None, None
self.inliers = inliers
R = np.linalg.inv(R)
t = -R.dot(t)
return R, t
|
StarcoderdataPython
|
4933492
|
from flask import Flask, render_template, request, redirect, url_for, g
from flask_sqlalchemy import SQLAlchemy
import jyserver.Flask as jsf
import numpy as np
import time
import random
from python_code.q_learning import QLearning
from python_code.robot import _raspi
import python_code.constantes as const
# Si se está ejecutando en una Raspi,
# determinar si es en producción (False - mediante Access Point)
# o desarrollo (True - mediante cable ethernet)
if _raspi:
_dev = False
# Creación de la aplicación Flask y establecimiento de la base de datos
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./crawler-database.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
db.init_app(app)
class QTable(db.Model):
'''
Modelo de la base de datos que establece una tabla de SQL
donde estarán almacenadas las tablas Q entrenadas.
'''
__tablename__ = 'qtable'
id = db.Column(db.Integer, primary_key=True)
q_table = db.Column(db.PickleType, nullable=False)
@staticmethod
def get_by_id(id):
return User.query.get(id)
@jsf.use(app)
class App:
'''
Clase que modela la aplicación Flask para su utilización con jyserver.
Establece los metodos que pueden ser ejecutados a partir de eventos
producidos en la interfaz web como si de javascript se tratara.
De igual manera, permite la ejecución de las funciones de javascript
creadas, accediendo mediante self.js.
Aporta métodos a la vista para la ejecución de las funciones del
algoritmo Q-Learning, así como las del movimiento del robot.
'''
def __init__(self):
'''
Constructor de la clase App.
Almacena una instancia de la clase QLearning y carga una tabla Q
desde la base de datos para almacenarla como tabla inicial.
Si la entrada no se encuentra creada o la base de datos está vacía se crea
dicha entrada y/o se almacena una tabla Q vacía (con todos sus valores en 0).
'''
self.Q = QLearning(self)
# Inicialización de las variables utilizadas para la administración de alertas en la vista
self.post_params = False
self.alertas = []
self.check = 0
# Cargado o inicialización de la base de datos
if "qtable" in db.inspect(db.engine).get_table_names(): # Si la tabla existe en la base de datos
database = QTable.query.all()
if not database == []: # Si existe y tiene elementos, toma el último
self.entrada_db = database[-1]
q_table = self.entrada_db.q_table
self.Q.inicializar_q_table(q_table)
else: # Si existe pero se encuentra vacía, crea una entrada nueva
self.Q.inicializar_q_table()
# print(q_table)
self.entrada_db = QTable(q_table=self.Q.q_table)
db.session.add(self.entrada_db)
db.session.commit()
else: # Si la tabla no existe en la base de datos
db.create_all()
self.Q.inicializar_q_table()
self.entrada_db = QTable(q_table=self.Q.q_table)
db.session.add(self.entrada_db)
db.session.commit()
def entrenar(self):
'''
Función que inicia el entrenamiento y guarda la tabla Q que ha
sido generada tras la ejecución del mismo, en la base de datos.
'''
self.Q.semaforo_done.acquire()
self.Q.done = False
self.Q.semaforo_done.release()
q_table = self.Q.entrenar()
self.entrada_db.query.update({"q_table" : q_table})
db.session.commit()
def avanzar(self):
'''
Función que inicia el movimiento del robot utilizando la tabla
que se encuentra almacenada en la variable Q. Que puede ser una
recientemente entrenada o la cargada inicialmente desde la BD.
'''
self.Q.semaforo_done.acquire()
self.Q.done = False
self.Q.semaforo_done.release()
self.Q.avanzar()
def detener(self):
'''
Detener la ejecución del entrenamiento o del movimiento segun corresponda.
'''
self.Q.semaforo_done.acquire()
self.Q.done=True
self.Q.semaforo_done.release()
def reset_table(self):
'''
Función que resetea toda la tabla Q a 0 en Q,
la actualiza en la interfaz y en la base de datos.
'''
self.Q.inicializar_q_table()
q_table = self.Q.q_table
self.entrada_db.query.update({"q_table" : q_table})
db.session.commit()
self.js.update_table(list(q_table.flatten()), list(self.Q.robot.reset()))
@app.route('/', methods=['GET'])
def index():
'''
Metodo correspondiente a la ruta "/" de la web.
Envía algunos parámetros a la vista como la tabla Q inicial,
los parámetros de entrenamiento, las alertas y las constantes definidas.
'''
App.detener() # Si hubiese quedado en movimiento el Robot, el mismo es detenido
# Valores a ser enviados a la vista
q_table = App.Q.q_table
config = App.Q.get_params()
state = App.Q.robot.state
# Parámetros enviados a la vista
data={
'titulo': 'Crawler Server',
'q_table': list(q_table.flatten()),
'config': config,
'state': state,
'check': App.check,
'alertas': App.alertas,
'minimos': const.minimos,
'maximos': const.maximos,
'steps': const.steps
}
# Si se han actualizado los parámetros de entrenamiento, limpia las variables de alerta
if App.post_params:
App.post_params = False
App.check = 0
App.alertas = []
return App.render(render_template('index.html', data=data))
@app.route("/actualizar_parametros", methods=["POST"])
def actualizar_parametros():
'''
Se encarga de la gestión del formulario de actualización de los parámetros de entrenamiento.
Actualiza los parámetros de entrenamiento utilizando los valores del formulario o seteando
los valores por defecto según se indique.
Realiza validaciones de valores máximos y mínimos de estos parámetros y agrega a la lista de
alertas los resultados de la actualización.
'''
App.post_params = True
if 'aplicar' in request.form: # Si se ha clickeado en el botón aplicar
# Se reciben los parámetros del modelo desde el formulario
learning_rate = float(request.form['learning_rate'])
discount_factor = float(request.form['discount_factor'])
epsilon = float(request.form['epsilon'])
learning_epsilon = float(request.form['learning_epsilon'])
min_epsilon = float(request.form['min_epsilon'])
max_movements = int(float(request.form['max_movements']))
win_reward = int(float(request.form['win_reward']))
loss_reward = int(float(request.form['loss_reward']))
dead_reward = int(float(request.form['dead_reward']))
loop_reward = int(float(request.form['loop_reward']))
# Checkeo de los valores mínimos y máximos recibidos, crea alertas
App.check = 1
i = 0
if not(learning_rate>=const.minimos[i] and learning_rate<=const.maximos[i]):
App.alertas.append(f"Learning Rate fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(discount_factor>=const.minimos[i] and discount_factor<=const.maximos[i]):
App.alertas.append(f"Discount Factor fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(epsilon>=const.minimos[i] and epsilon<=const.maximos[i]):
App.alertas.append(f"Epsilon fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(learning_epsilon>=const.minimos[i] and learning_epsilon<=const.maximos[i]):
App.alertas.append(f"Learning Epsilon fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(min_epsilon>=const.minimos[i] and min_epsilon<=const.maximos[i]):
App.alertas.append(f"Min Epsilon fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(max_movements>=const.minimos[i] and max_movements<=const.maximos[i]):
App.alertas.append(f"Max Movements fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(win_reward>=const.minimos[i] and win_reward<=const.maximos[i]):
App.alertas.append(f"Win Reward fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(loss_reward>=const.minimos[i] and loss_reward<=const.maximos[i]):
App.alertas.append(f"Loss Reward fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(dead_reward>=const.minimos[i] and dead_reward<=const.maximos[i]):
App.alertas.append(f"Dead Reward fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
i+=1
if not(loop_reward>=const.minimos[i] and loop_reward<=const.maximos[i]):
App.alertas.append(f"Loop Reward fuera de los limites: ({const.minimos[i]}, {const.maximos[i]})")
App.check = -1
if App.check == 1: # Si no hay errores, actualiza los parámetros y envia la anecdota
App.Q.set_params(
learning_rate,
discount_factor,
epsilon,
learning_epsilon,
min_epsilon,
max_movements,
win_reward,
loss_reward,
dead_reward,
loop_reward
)
App.alertas.append("Parámetros actualizados satisfactoriamente")
elif 'reset' in request.form: # Si se ha clickeado en el botón resetear
App.Q.set_default_params()
App.check = 1
App.alertas.append("Parámetros actualizados satisfactoriamente")
return redirect(url_for('index'))
def pagina_no_encontrada(error):
'''
Función que muestra el cartel de error 404
'''
data = {
'titulo': 'Error 404!'
}
return render_template('404.html', data=data), 404
if __name__=='__main__':
'''
Programa principal
Registra el manejador de error 404.
Levanta el servidor de Flask en la dirección IP correspondiente.
'''
app.register_error_handler(404, pagina_no_encontrada)
if _raspi:
if _dev:
print(" * Red local mediante ethernet")
app.run(host='0.0.0.0', port=5000) # Para red local mediante ethernet
else:
print(" * Access point")
app.run(host='192.168.4.1', port=5000) # Cuando está como access point
else:
print(" * Run PC - Debug")
app.run(debug=True) # Cuando está ejecutandose en PC
|
StarcoderdataPython
|
3478057
|
<reponame>felix-salfelder/sage
"""
Schur symmetric functions
"""
#*****************************************************************************
# Copyright (C) 2007 <NAME> <<EMAIL>>
# 2012 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#*****************************************************************************
import classical
import sage.libs.symmetrica.all as symmetrica
from sage.rings.all import ZZ, QQ, Integer
class SymmetricFunctionAlgebra_schur(classical.SymmetricFunctionAlgebra_classical):
def __init__(self, Sym):
"""
A class for methods related to the Schur symmetric function basis
INPUT:
- ``self`` -- a Schur symmetric function basis
- ``Sym`` -- an instance of the ring of the symmetric functions
TESTS::
sage: s = SymmetricFunctions(QQ).s()
sage: s == loads(dumps(s))
True
sage: TestSuite(s).run(skip=['_test_associativity', '_test_distributivity', '_test_prod'])
sage: TestSuite(s).run(elements = [s[1,1]+s[2], s[1]+2*s[1,1]])
"""
classical.SymmetricFunctionAlgebra_classical.__init__(self, Sym, "Schur", 's')
def _dual_basis_default(self):
"""
Returns the default value for ``self.dual_basis()``
This method returns the dual basis to the Schur basis with respect to the standard
scalar product. Since the Schur basis is self-dual, it returns itself.
EXAMPLES::
sage: s = SymmetricFunctions(QQ).s()
sage: ds = s.dual_basis()
sage: s is ds
True
sage: zee = lambda x : x.centralizer_size()
sage: S = s.dual_basis(zee); S
Dual basis to Symmetric Functions over Rational Field in the Schur basis
sage: S[2,1].scalar(s[2,1])
1
TESTS::
sage: s._dual_basis_default() is s.dual_basis()
True
"""
return self
def _multiply(self, left, right): # TODO: factor out this code for all bases (as is done for coercions)
"""
Returns the product of ``left`` and ``right``.
INPUT:
- ``self`` -- a Schur symmetric function basis
- ``left``, ``right`` -- instances of the Schur basis ``self``.
OUPUT:
- an element of the Schur basis, the product of ``left`` and ``right``
TESTS::
sage: s = SymmetricFunctions(QQ).s()
sage: a = s([2,1]) + 1; a
s[] + s[2, 1]
sage: a^2 # indirect doctest
s[] + 2*s[2, 1] + s[2, 2, 1, 1] + s[2, 2, 2] + s[3, 1, 1, 1] + 2*s[3, 2, 1] + s[3, 3] + s[4, 1, 1] + s[4, 2]
::
sage: QQx.<x> = QQ[]
sage: s = SymmetricFunctions(QQx).s()
sage: a = x^2*s([2,1]) + 2*x; a
2*x*s[] + x^2*s[2, 1]
sage: a^2
4*x^2*s[] + 4*x^3*s[2, 1] + x^4*s[2, 2, 1, 1] + x^4*s[2, 2, 2] + x^4*s[3, 1, 1, 1] + 2*x^4*s[3, 2, 1] + x^4*s[3, 3] + x^4*s[4, 1, 1] + x^4*s[4, 2]
::
::
sage: 0*s([2,1])
0
"""
#Use symmetrica to do the multiplication
A = left.parent()
R = A.base_ring()
if R is ZZ or R is QQ:
if left and right:
return symmetrica.mult_schur_schur(left, right)
else:
return A._from_dict({})
z_elt = {}
for (left_m, left_c) in left._monomial_coefficients.iteritems():
for (right_m, right_c) in right._monomial_coefficients.iteritems():
c = left_c * right_c
d = symmetrica.mult_schur_schur({left_m:Integer(1)}, {right_m:Integer(1)})._monomial_coefficients
for m in d:
if m in z_elt:
z_elt[ m ] = z_elt[m] + c * d[m]
else:
z_elt[ m ] = c * d[m]
return A._from_dict(z_elt)
def coproduct_on_basis(self, mu):
r"""
Returns the coproduct of ``self(mu)``.
Here ``self`` is the basis of Schur functions in the ring of symmetric functions.
INPUT:
- ``self`` -- a Schur symmetric function basis
- ``mu`` -- a partition
OUTPUT:
- an element of the tensor square of the Schur basis
EXAMPLES::
sage: Sym = SymmetricFunctions(QQ)
sage: s = Sym.schur()
sage: s.coproduct_on_basis([2])
s[] # s[2] + s[1] # s[1] + s[2] # s[]
"""
import sage.libs.lrcalc.lrcalc as lrcalc
T = self.tensor_square()
return T._from_dict( lrcalc.coprod(mu, all=1) )
class Element(classical.SymmetricFunctionAlgebra_classical.Element):
def __pow__(self, n):
"""
Returns the naive powering of an instance of ``self``.
INPUT:
- ``self`` -- an element of the Schur symmetric function basis
- ``n`` -- a nonnegative integer
OUTPUT:
- the ``n`-th power of an instance of ``self`` in the Schur basis
See ``Monoids.Element.__pow__`` and ``Monoids.Element._pow_naive``.
EXAMPLES::
sage: s = SymmetricFunctions(QQ[x]).s()
sage: len(s([2,1])^8) # long time (~ 4 s)
1485
sage: len(s([2,1])^9) # long time (~10 s)
2876
Binary exponentiation does not seem to bring any speedup for
schur functions. This most likely is because of the
explosion of the number of terms.
# sage: s = SymmetricFunctions(QQ).s(); y = s([1])
# sage: n = 24
# sage: %timeit y**n # using binary exponentiation
# 10 loops, best of 3: 1.22 s per loop
# sage: %timeit prod(y for i in range(n))
# 10 loops, best of 3: 1.06 s per loop
With polynomial coefficients, this is actually much *slower*
(although this should be profiled further; there seems to
be an unreasonable number of polynomial multiplication involved,
besides the fact that 1 * QQ[x].one() currently involves a
polynomial multiplication)
# sage: sage: s = SymmetricFunctions(QQ[x]).s()
# sage: y = s([2,1])
# sage: %timeit y**7
# 10 loops, best of 3: 18.9 s per loop
# sage: %timeit y*y*y*y*y*y*y
# 10 loops, best of 3: 1.73 s per loop
Todo: do the same for the other non multiplicative bases?
"""
return self._pow_naive(n)
def omega(self):
"""
Returns the image of ``self`` under the Frobenius / omega automorphism.
INPUT:
- ``self`` -- an element of the Schur symmetric function basis
OUTPUT:
- the image of ``self`` under omega as an element of the Schur basis
EXAMPLES::
sage: s = SymmetricFunctions(QQ).s()
sage: s([2,1]).omega()
s[2, 1]
sage: s([2,1,1]).omega()
s[3, 1]
"""
conj = lambda part: part.conjugate()
return self.map_support(conj)
def scalar(self, x, zee=None):
"""
Returns the standard scalar product between ``self`` and `x`.
Note that the Schur functions are self-dual with respect to this
scalar product. They are also lower-triangularly related to the
monomial symmetric functions with respect to this scalar product.
INPUT:
- ``self`` -- an element of the Schur symmetric function basis
- ``x`` -- an element of the symmetric functions
- ``zee`` -- an optional function that specifies the scalar product
between two power sum symmetric functions indexed by the same
partition. If ``zee`` is not specified.
OUTPUT:
- the scalar product between ``self`` and ``x``
EXAMPLES::
sage: s = SymmetricFunctions(ZZ).s()
sage: a = s([2,1])
sage: b = s([1,1,1])
sage: c = 2*s([1,1,1])
sage: d = a + b
sage: a.scalar(a)
1
sage: b.scalar(b)
1
sage: b.scalar(a)
0
sage: b.scalar(c)
2
sage: c.scalar(c)
4
sage: d.scalar(a)
1
sage: d.scalar(b)
1
sage: d.scalar(c)
2
::
sage: m = SymmetricFunctions(ZZ).monomial()
sage: p4 = Partitions(4)
sage: l = [ [s(p).scalar(m(q)) for q in p4] for p in p4]
sage: matrix(l)
[ 1 0 0 0 0]
[-1 1 0 0 0]
[ 0 -1 1 0 0]
[ 1 -1 -1 1 0]
[-1 2 1 -3 1]
"""
if zee is None:
s = self.parent()
R = s.base_ring()
one = R(1)
f = lambda p1, p2: one
x = s(x)
return s._apply_multi_module_morphism(self, x, f, orthogonal=True)
else:
p = self.parent().realization_of().power()
return p(self).scalar( x, zee=zee )
def expand(self, n, alphabet='x'):
"""
Expands the symmetric function as a symmetric polynomial in `n` variables.
INPUT:
- ``self`` -- an element of the Schur symmetric function basis
- ``n`` -- a positive integer
- ``alphabet`` -- a variable for the expansion (default: `x`)
OUTPUT: a monomial expansion of an instance of ``self`` in `n` variables
EXAMPLES::
sage: s = SymmetricFunctions(QQ).s()
sage: a = s([2,1])
sage: a.expand(2)
x0^2*x1 + x0*x1^2
sage: a.expand(3)
x0^2*x1 + x0*x1^2 + x0^2*x2 + 2*x0*x1*x2 + x1^2*x2 + x0*x2^2 + x1*x2^2
sage: a.expand(4)
x0^2*x1 + x0*x1^2 + x0^2*x2 + 2*x0*x1*x2 + x1^2*x2 + x0*x2^2 + x1*x2^2 + x0^2*x3 + 2*x0*x1*x3 + x1^2*x3 + 2*x0*x2*x3 + 2*x1*x2*x3 + x2^2*x3 + x0*x3^2 + x1*x3^2 + x2*x3^2
sage: a.expand(2, alphabet='y')
y0^2*y1 + y0*y1^2
sage: a.expand(2, alphabet=['a','b'])
a^2*b + a*b^2
sage: s([1,1,1,1]).expand(3)
0
sage: (s([]) + 2*s([1])).expand(3)
2*x0 + 2*x1 + 2*x2 + 1
"""
condition = lambda part: len(part) > n
return self._expand(condition, n, alphabet)
# Backward compatibility for unpickling
from sage.structure.sage_object import register_unpickle_override
register_unpickle_override('sage.combinat.sf.schur', 'SymmetricFunctionAlgebraElement_schur', SymmetricFunctionAlgebra_schur.Element)
|
StarcoderdataPython
|
1902051
|
from osbot_utils.utils.Files import file_create, file_not_exists
from osbot_utils.utils.Process import start_process
def run(event, context=None):
target_host = event.get('target_host' )
ssh_key = event.get('ssh_key' )
ssh_key_name = event.get('ssh_key_name' )
ssh_user = event.get('ssh_user' )
ssh_command = event.get('ssh_command' )
# port_forward = event.get('port_forwards' ) # not implemented:
include_stderr = event.get('include_stderr')
ssh_key_file = f'/tmp/{ssh_key_name}'
if file_not_exists(ssh_key_file): # create local key if it doesn't exist
file_create(ssh_key_file, ssh_key)
start_process('chmod', ['600', ssh_key_file])
ssh_params = ['-o', 'StrictHostKeyChecking=no'] # todo: add support for updating the local hosts file
if ssh_key_file:
ssh_params.append('-i') # set key to use
ssh_params.append(ssh_key_file)
# if port_forward: # todo see if we do actually need this (main use case would be to allow direct HTTP access to an internal server)
# local_port = port_forward.get('local_port' ) # need to see if Lambda will allow binding ports like this
# remote_ip = port_forward.get('remote_ip' )
# remote_port = port_forward.get('remote_port')
ssh_params.append(f'{ssh_user}@{target_host}') # set user and target ip
ssh_params.append(ssh_command) # add command to execute
result = start_process("ssh", ssh_params) # execute command
if include_stderr: # see if we need to include stderr in return value
return result.get('stdout') + result.get('stderr')
return result.get('stdout')
|
StarcoderdataPython
|
8082864
|
#!/usr/bin/python
import os, os.path, re, copy
from ..utils.misc import json_dumps, get_string
try:
from html import escape
except ImportError:
from cgi import escape
__all__ = ["add_entries"]
config = {"__Instructions__":"In filename and main_header, {} is replaced with the log title",
"path": os.path.join(os.path.expanduser('~'),'Dropbox','Journal'),
"filename":"Journal_{}.md","main_header":"Journal for {}",
"short_date":"%Y-%m-%d","long_date":"%x",
"date_time":"%c"}
space_re = re.compile('\s+')
def html_escape(input):
return escape(input, True)
def add_entries_helper(entries_to_send, entries, key):
if not entries: return
if not key: return
if not key.replace(os.sep,'.') in entries_to_send: entries_to_send[key.replace(os.sep,'.')] = {}
subs = find_sub_keys(entries, key)
if len(subs) > 0:
entries_to_send[key.replace(os.sep,'.')]["subs"] = {}
for sk in subs:
add_entries_helper(entries_to_send[key.replace(os.sep,'.')]["subs"], entries, sk)
if key in entries:
entries_to_send[key.replace(os.sep,'.')]["entries"] = entries[key]
del entries[key]
for e in entries_to_send[key.replace(os.sep,'.')]["entries"]:
e["original"] = copy.deepcopy(e)
e["title"] = space_re.sub(' ', e["title"])
if "tags" in e: e["tags"] = map(get_string, e["tags"])
for e_key in e.keys():
if e_key == "note" or e_key == "notes":
val = re.sub('^ ', '', re.sub('^', ' ', e[e_key], 0, re.M))
while re.search('\n \n', val):
val = re.sub('\n \n', '\n\n', val)
e[e_key] = val
if not entries_to_send[key.replace(os.sep,'.')]: del entries_to_send[key.replace(os.sep,'.')]
def find_sub_keys(entries, key):
if not entries: return []
if not key: return []
r = []
for k in entries:
if k.startswith(key + os.sep): r.append(k)
return r
def get_diary(entries, key, header = '', use_title = False):
diary = ""
if key in entries:
for e in entries[key]:
if use_title: diary = diary + '### ' + e['title'] + "\n\n"
diary = diary + e["text"]
diary = diary + "\n\n"
del entries[key]
for k in find_sub_keys(entries, key):
for e in entries[k]:
if use_title: diary = diary + '### ' + e['title'] + "\n\n"
diary = diary + e["text"]
diary = diary + "\n\n"
del entries[k]
if bool(header) and bool(diary):
diary = '## ' + header + '\n\n' + diary
return diary
def add_entries(entries):
if not entries: return
import os, copy, codecs, jinja2
if not os.path.isdir(config["path"]): os.mkdir(config["path"])
c = copy.deepcopy(entries)
entries_to_send = {}
date = c[list(c.keys())[0]][0]["date"]
diary = get_diary(c, "diary")
diary = diary + get_diary(c, "dreams", "Dreams", True)
f = os.path.join(config["path"], config["filename"].format(date.strftime(config["short_date"])))
fo = codecs.open(f, 'w', encoding='utf-8')
title = config["main_header"].format(date.strftime(config["long_date"]))
for key in list(c.keys()):
if not key in c: continue
if key.count(os.sep) > 0:
i = key.index(os.sep)
add_entries_helper(entries_to_send, c, key[:i])
else:
add_entries_helper(entries_to_send, c, key)
templates_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
env = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_path))
env.filters['to_json'] = json_dumps
env.filters['html_escape'] = html_escape
temp = env.get_template("markdown.tpl")
out = temp.render(title=title, diary=diary, config=config, entries=entries_to_send)
fo.write(out)
fo.close()
|
StarcoderdataPython
|
1658930
|
<reponame>andrew0harney/Semantic-encoding-model
from ldaUtils import LdaEncoder,LdaEncoding,createLabeledCorpDict
import numpy as np
from gensim import models
import pickle
import heapq
#<NAME> 28/04/14
#This scripts produces nExemplars for each of the topic models
#(Ordered by probability of belonging to a topic)
nExemplars = 10
labeledDocuments = #
imgaeSourceReg = #
#Load documents to be examined
docs = createLabeledCorpDict(labeledDocuments,imgaeSourceReg,output=True)
fnames = docs.keys() #Get file names
modelDir = #
#Load LDA model
modelName = #
lda = models.LdaModel.load(modelDir+modelName+'model')
ldaDict = pickle.load(open(modelDir+modelName+'dictionary','r'))
ldaEncoder = LdaEncoder(ldaDict,docs,lda)
#Probability encoding of each documents
encoding = []
#Encode each of the files
for fname in fnames:
encoding.append(LdaEncoding(fname,ldaEncoder[{'label':fname}]))
#Output the topic nExemplars for each topic
outf = file(modelDir+modelName+'exemplars','w')
for i in range(ntopics):
print 'Fininding exempalars for topic '+str(i)
[e.setTopicN(i) for e in encoding] #Set the topic number to e compared
exemplars = heapq.nlargest(nExemplars,encoding) #Create limited heap
outf.write('Topic %d\n%s\n'%(i,'_'*10))
outf.write(str([exemplar.__str__(topicN=i) for exemplar in exemplars])+'\n\n')
outf.close()
|
StarcoderdataPython
|
8071312
|
<reponame>Bazinga0426/Crowd-Counting-for-FYP
import cv2
import os, time
import random
import pandas as pd
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
try:
from termcolor import cprint
except ImportError:
cprint = None
try:
from pycrayon import CrayonClient
except ImportError:
CrayonClient = None
def log_print(text, color=None, on_color=None, attrs=None):
if cprint is not None:
cprint(text, color=color, on_color=on_color, attrs=attrs)
else:
print(text)
class Timer(object):
def __init__(self):
self.tot_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.tot_time += self.diff
self.calls += 1
self.average_time = self.tot_time / self.calls
if average:
return self.average_time
else:
return self.diff
class ImgDataLoader():
def __init__(self, data_path, gt_path, shuffle=False, gt_downsample=False, pre_load=False):
self.data_path = data_path
self.gt_path = gt_path
self.gt_downsample = gt_downsample
self.pre_load = pre_load
self.data_files = [filename for filename in os.listdir(data_path) \
if os.path.isfile(os.path.join(data_path, filename))]
self.data_files.sort()
self.shuffle = shuffle
if shuffle:
random.seed(2020)
self.num_samples = len(self.data_files)
self.blob_list = {}
self.id_list = [i for i in range(0, self.num_samples)]
if self.pre_load:
idx = 0
for filename in self.data_files:
img = cv2.imread(os.path.join(self.data_path, filename), 0)
img = img.astype(np.float32, copy=False)
ht = img.shape[0]
wd = img.shape[1]
ht_1 = int((ht / 4) * 4)
wd_1 = int((wd / 4) * 4)
img = cv2.resize(img, (wd_1, ht_1))
img = img.reshape((1, 1, img.shape[0], img.shape[1]))
den = pd.read_csv(os.path.join(self.gt_path, os.path.splitext(filename)[0] + '.csv'), sep=',', header=None).values#.as_matrix()
den = den.astype(np.float32, copy=False)
if self.gt_downsample:
wd_1 = wd_1 // 4
ht_1 = ht_1 // 4
den = cv2.resize(den, (wd_1, ht_1))
den = den * ((wd * ht) / (wd_1 * ht_1))
else:
den = cv2.resize(den, (wd_1, ht_1))
den = den * ((wd * ht) / (wd_1 * ht_1))
den = den.reshape((1, 1, den.shape[0], den.shape[1]))
blob = {}
blob['data'] = img
blob['gt_density'] = den
blob['filename'] = filename
self.blob_list[idx] = blob
idx = idx + 1
if idx % 500 == 0:
print('Loaded ', idx, '/', self.num_samples, 'files')
print(' Loading images completed', idx, 'files')
def __iter__(self):
if self.shuffle:
if self.pre_load:
random.shuffle(self.id_list)
else:
random.shuffle(self.data_files)
files = self.data_files
id_list = self.id_list
for idx in id_list:
if self.pre_load:
blob = self.blob_list[idx]
blob['idx'] = idx
else:
filename = files[idx]
img = cv2.imread(os.path.join(self.data_path, filename), 0)
img = img.astype(np.float32, copy=False)
ht = img.shape[0]
wd = img.shape[1]
ht_1 = (ht // 4) * 4
wd_1 = (wd // 4) * 4
img = cv2.resize(img, (wd_1, ht_1))
img = img.reshape((1, 1, img.shape[0], img.shape[1]))
den = pd.read_csv(os.path.join(self.gt_path, os.path.splitext(filename)[0] + '.csv'), sep=',', header=None).as_matrix()
den = den.astype(np.float32, copy=False)
if self.gt_downsample:
wd_1 = wd_1 // 4
ht_1 = ht_1 // 4
den = cv2.resize(den, (wd_1, ht_1))
den = den * ((wd * ht) / (wd_1 * ht_1))
else:
den = cv2.resize(den, (wd_1, ht_1))
den = den * ((wd * ht) / (wd_1 * ht_1))
den = den.reshape((1, 1, den.shape[0], den.shape[1]))
blob = {}
blob['data'] = img
blob['gt_density'] = den
blob['filename'] = filename
yield blob
def get_num_samples(self):
return self.num_samples
class MCNN(nn.Module):
'''
Multi-column CNN
'''
def __init__(self, bn=False):
super(MCNN, self).__init__()
self.branch1 = nn.Sequential(Conv2d(1, 16, 9, same_padding=True, bn=bn), nn.MaxPool2d(2),Conv2d(16, 32, 7, same_padding=True, bn=bn),nn.MaxPool2d(2),
Conv2d(32, 16, 7, same_padding=True, bn=bn),
Conv2d(16, 8, 7, same_padding=True, bn=bn))
self.branch2 = nn.Sequential(Conv2d(1, 20, 7, same_padding=True, bn=bn), nn.MaxPool2d(2), Conv2d(20, 40, 5, same_padding=True, bn=bn), nn.MaxPool2d(2),
Conv2d(40, 20, 5, same_padding=True, bn=bn),
Conv2d(20, 10, 5, same_padding=True, bn=bn))
self.branch3 = nn.Sequential(Conv2d(1, 24, 5, same_padding=True, bn=bn), nn.MaxPool2d(2),Conv2d(24, 48, 3, same_padding=True, bn=bn),nn.MaxPool2d(2),
Conv2d(48, 24, 3, same_padding=True, bn=bn),
Conv2d(24, 12, 3, same_padding=True, bn=bn))
self.fuse = nn.Sequential(Conv2d(30, 1, 1, same_padding=True, bn=bn))
def forward(self, im_data):
x1 = self.branch1(im_data)
x2 = self.branch2(im_data)
x3 = self.branch3(im_data)
x = torch.cat((x1, x2, x3), 1)
x = self.fuse(x)
return x
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, same_padding=False, bn=False):
super(Conv2d, self).__init__()
padding = int((kernel_size - 1) / 2) if same_padding else 0
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True) if bn else None
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class FC(nn.Module):
def __init__(self, in_features, out_features, relu=True):
super(FC, self).__init__()
self.fc = nn.Linear(in_features, out_features)
self.relu = nn.ReLU(inplace=True) if relu else None
def forward(self, x):
x = self.fc(x)
if self.relu is not None:
x = self.relu(x)
return x
def save_net(filename, net):
import h5py
h5f = h5py.File(filename, mode='w')
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(filename, net):
import h5py
h5f = h5py.File(filename, mode='r')
for k, v in net.state_dict().items():
param = torch.from_numpy(np.asarray(h5f[k]))
v.copy_(param)
def np_to_variable(x, is_cuda=True, is_training=False, dtype=torch.FloatTensor):
if is_training:
v = Variable(torch.from_numpy(x).type(dtype))
else:
v = Variable(torch.from_numpy(x).type(dtype), requires_grad=False, volatile=True)
if is_cuda:
v = v.cuda()
return v
def set_trainable(model, requires_grad):
for param in model.parameters():
param.requires_grad = requires_grad
def weights_normal_init(model, dev=0.01):
if isinstance(model, list):
for m in model:
weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
# print torch.sum(m.weight)
m.weight.data.normal_(0.0, dev)
if m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
class CrowdCounter(nn.Module):
def __init__(self):
super(CrowdCounter, self).__init__()
self.DME = MCNN()
self.loss_fn = nn.MSELoss()
@property
def loss(self):
return self.loss_mse
def forward(self, im_data, gt_data=None):
im_data = np_to_variable(im_data, is_cuda=True, is_training=self.training)
img_map = self.DME(im_data)
if self.training:
# print('gt_data')
gt_data = np_to_variable(gt_data, is_cuda=True, is_training=self.training)
self.loss_mse = self.build_loss(img_map, gt_data)
return img_map
def build_loss(self, img_map, gt_data):
loss = self.loss_fn(img_map, gt_data)
return loss
def evaluate_model(trained_model, data_loader):
net = CrowdCounter()
load_net(trained_model, net)
net.cuda()
net.eval()
mae = 0.0
mse = 0.0
for blob in data_loader:
im_data = blob['data']
gt_data = blob['gt_density']
img_map = net(im_data, gt_data)
img_map = img_map.data.cpu().numpy()
real_count = np.sum(gt_data)
pre_count = np.sum(img_map)
mae += abs(real_count - pre_count)
mse += ((real_count - pre_count) * (real_count - pre_count))
mae = mae / data_loader.get_num_samples()
mse = np.sqrt(mse / data_loader.get_num_samples())
return mae, mse
|
StarcoderdataPython
|
11352069
|
friends = ["Gaurav", "Kritika", "Sachin", "Batra", "Taya", "Taya"]
print("List friends: " + str(friends))
friends.sort()
print("Sorted friends: " + str(friends)) # sort does not work on complex list while reverse does
numbers = [1, 2, 3, 4, 5]
print("List numbers: " + str(numbers))
friends.extend(numbers)
print("Extend numbers to friends: " + str(friends))
friends.reverse()
print("Reverse sort friends: " + str(friends))
friends.append("Me")
print("Append Me to friends: " + str(friends))
friends.insert(0, "Alok")
print("Insert Alok at the beginning of friends: " + str(friends))
friends.remove("Me")
print("Remove Me from friends: " + str(friends))
friends.pop()
print("Pop last element from friends" + str(friends))
print("Index of Taya: " + str(friends.index("Taya")))
print("Count of Taya: " + str(friends.count("Taya")))
friends2 = friends.copy()
print("Copy for friends, friends2: ", str(friends2))
friends.clear()
print("Cleared friends list: " + str(friends))
|
StarcoderdataPython
|
3520824
|
<reponame>bozcani/yolov3-tensorflow2
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.regularizers import l2
import time
from absl import app, flags, logging
def create_model(size, yolo_anchors, yolo_anchor_masks, classes, training=False):
inputs = tf.keras.Input(shape=(size, size, 3))
outputs = YoloV3(size, yolo_anchors, yolo_anchor_masks, classes)(inputs, training=training)
return tf.keras.Model(inputs, outputs, name='yolov3')
class DarknetConv(layers.Layer):
def __init__(self, filters, size, strides=1, is_batch_norm=True):
super(DarknetConv, self).__init__()
self.filters = filters
self.size = size
self.is_batch_norm = is_batch_norm
self.strides = strides
self.zeropadding = layers.ZeroPadding2D(((1,0),(1,0)))
self.conv2d = layers.Conv2D(filters, size,
strides, padding = ('same' if strides == 1 else 'valid'),
use_bias = not is_batch_norm, kernel_regularizer=l2(0.0005))
self.batchnorm = layers.BatchNormalization(momentum = 0.9, epsilon = 1e-05)
self.leakyrelu = layers.LeakyReLU(alpha=0.1)
def call(self, x, training=True):
if self.strides > 1:
x = self.zeropadding(x)
x = self.conv2d(x)
if self.is_batch_norm :
x = self.batchnorm(x)
x = self.leakyrelu(x)
return x
class DarknetResidual(layers.Layer):
def __init__(self, filters):
super(DarknetResidual, self).__init__()
self.filters = filters
self.darknetconv1 = DarknetConv(filters, 1)
self.darknetconv2 = DarknetConv(filters * 2, 3)
self.add = layers.Add()
def call(self, x, training=True):
shortcut = x
x = self.darknetconv1(x, training=training)
x = self.darknetconv2(x, training=training)
x = self.add([shortcut, x])
return x
class DarknetBlock(layers.Layer):
def __init__(self, filters, blocks):
super(DarknetBlock, self).__init__()
self.filters = filters
self.blocks = blocks
self.darknetconv = DarknetConv(filters, 3, strides=2)
self.darknetblocks = [DarknetResidual(filters//2) for _ in range(blocks)]
def call(self, x, training=True):
x = self.darknetconv(x, training=training)
for i in range(self.blocks):
x = self.darknetblocks[i](x, training=training)
return x
class Darknet(tf.keras.Model):
def __init__(self, name, **kwargs):
super(Darknet, self).__init__(name=name, **kwargs)
#self.name = name
self.conv1 = DarknetConv(32, 3)
self.block1 = DarknetBlock(64, 1)
self.block2 = DarknetBlock(128, 2)
self.block3 = DarknetBlock(256, 8)
self.block4 = DarknetBlock(512, 8)
self.block5 = DarknetBlock(1024, 4)
def call(self, x, training=True):
x = self.conv1(x, training=training)
x = self.block1(x, training=training)
x = self.block2(x, training=training)
x = route_1 = self.block3(x, training=training)
x = route_2 = self.block4(x, training=training)
x = self.block5(x, training=training)
return route_1, route_2, x
class YoloConv(layers.Layer):
def __init__(self, filters, is_first=True):
super(YoloConv, self).__init__()
self.is_first = is_first
if not self.is_first :
self.conv1 = DarknetConv(filters, 1)
self.upsampling = layers.UpSampling2D()
self.concat = layers.Concatenate()
self.conv2 = DarknetConv(filters, 1)
self.conv3 = DarknetConv(filters * 2, 3)
self.conv4 = DarknetConv(filters, 1)
self.conv5 = DarknetConv(filters * 2, 3)
self.conv6 = DarknetConv(filters, 1)
def call(self, x, training=True):
if not self.is_first :
x, x_skip = x
x = self.conv1(x, training=training)
x = self.upsampling(x)
x = self.concat([x, x_skip])
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
x = self.conv4(x, training=training)
x = self.conv5(x, training=training)
x = self.conv6(x, training=training)
return x
class YoloOutput(layers.Layer):
def __init__(self, filters, anchors, classes ):
super(YoloOutput, self).__init__()
self.filters = filters
self.anchors = anchors
self.classes = classes
self.darkconv = DarknetConv(filters*2, 3)
self.biasconv = DarknetConv(anchors * (classes + 5), 1, is_batch_norm=False)
def call(self, x, training=True):
x = self.darkconv(x, training=training)
x = self.biasconv(x, training=training)
x = tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2], self.anchors, self.classes+5))
return x
class YoloV3(tf.keras.Model):
def __init__(self, size, anchors, anchor_masks, classes ):
super(YoloV3, self).__init__()
self.size = size
self.anchors = anchors / size
self.anchor_masks = anchor_masks
self.classes = classes
self.darknet53 = Darknet(name='yolo_darknet')
self.yoloconv1 = YoloConv(512)
self.output1 = YoloOutput(512, len(self.anchor_masks[0]), classes)
self.yoloconv2 = YoloConv(256, is_first=False)
self.output2 = YoloOutput(256, len(self.anchor_masks[1]), classes)
self.yoloconv3 = YoloConv(128, is_first=False)
self.output3 = YoloOutput(128, len(self.anchor_masks[2]), classes)
'''
self.yolo_blocks = []
self.yolo_outputs = []
self.yolo_num_layers = [512, 256, 128]
for i in range(len(self.yolo_num_layers)):
self.yolo_blocks.append(YoloConv(self.yolo_num_layers[i]))
self.yolo_outputs.append(YoloOutput(self.yolo_num_layers[i], len(self.anchor_masks[i]), classes)
'''
def call(self, x, training=True):
route_1, route_2, x = self.darknet53(x, training=training)
x = self.yoloconv1(x, training=training)
output_0 = self.output1(x, training=training)
x = self.yoloconv2((x, route_2), training=training)
output_1 = self.output2(x, training=training)
x = self.yoloconv3((x, route_1), training=training)
output_2 = self.output3(x, training=training)
'''
outputs = []
for i in range(len(self.yolo_num_layers)):
x = yolo_blocks[i](x, training=training)
outputs[i] =
'''
boxes_0 = self.yolo_boxes(output_0, self.anchors[self.anchor_masks[0]], self.classes)
boxes_1 = self.yolo_boxes(output_1, self.anchors[self.anchor_masks[1]], self.classes)
boxes_2 = self.yolo_boxes(output_2, self.anchors[self.anchor_masks[2]], self.classes)
if training :
print('traing true')
return (boxes_0, boxes_1, boxes_2)
else:
print('traing false')
pred_0 = tf.reshape(boxes_0, (tf.shape(boxes_0)[0], len(self.anchor_masks[0]) * tf.shape(boxes_0)[1] * tf.shape(boxes_0)[2], 5 + self.classes))
pred_1 = tf.reshape(boxes_1, (tf.shape(boxes_1)[0], len(self.anchor_masks[1]) * tf.shape(boxes_1)[1] * tf.shape(boxes_1)[2], 5 + self.classes))
pred_2 = tf.reshape(boxes_2, (tf.shape(boxes_2)[0], len(self.anchor_masks[2]) * tf.shape(boxes_2)[1] * tf.shape(boxes_2)[2], 5 + self.classes))
boxes = tf.concat([pred_0, pred_1, pred_2], axis=1)
return self.yolo_nms(boxes, self.anchors, self.anchor_masks, self.classes)
def yolo_boxes(self, pred, anchors, classes):
grid_size = tf.shape(pred)[1]
#pred = tf.reshape(pred, (tf.shape(pred)[0], len(anchors) * grid_size * grid_size, 5 + classes))
box_centers, box_wh, confidence, class_probs = tf.split(pred, (2, 2, 1, classes), axis=-1)
box_centers = tf.sigmoid(box_centers)
confidence = tf.sigmoid(confidence)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_centers, box_wh), axis=-1)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
box_centers = (box_centers + tf.cast(grid, tf.float32)) / tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_centers - box_wh /2
box_x2y2 = box_centers + box_wh /2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
pred = tf.concat([bbox, confidence, class_probs], axis=-1)
print(pred.shape)
#logging.info(pred.shape)
#pred = tf.reshape(pred, (tf.shape(pred)[0], len(anchors) * grid_size * grid_size, 5 + classes))
return pred
def yolo_nms(self, boxes, anchors, masks, classes):
bbox, confs, class_probs = tf.split(boxes, [4,1,-1], axis=-1)
scores = confs * class_probs
'''
logging.info(bbox.shape)
bbox = tf.reshape(bbox, [-1, 4])
scores = tf.reshape(scores, [-1, classes])
mask = tf.greater_equal(scores, tf.constant(0.5))
boxes_list, label_list, score_list = [], [], []
for i in range(classes):
filter_boxes = tf.boolean_mask(bbox, mask[:,i])
filter_scores = tf.boolean_mask(scores[:,i], mask[:,i])
nms_indices = tf.image.non_max_suppression(boxes=filter_boxes,
scores=filter_scores,
max_output_size=tf.constant(50),
iou_threshold=tf.constant(0.5), name='nms_indices')
label_list.append(tf.ones_like(tf.gather(filter_scores, nms_indices), 'int32')*i)
boxes_list.append(tf.gather(filter_boxes, nms_indices))
score_list.append(tf.gather(filter_scores, nms_indices))
#print("=> nms time=%.2f ms" %(1000*(time.time()-start)))
boxes = tf.concat(boxes_list, axis=0)
scores = tf.concat(score_list, axis=0)
label = tf.concat(label_list, axis=0)
above 864ms
2000ms
'''
start = time.time()
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=10,
max_total_size=50,
iou_threshold=0.5,
score_threshold=0.5
)
logging.info("=> combined_non_max_suppression time=%.2f ms" %(1000*(time.time()-start)))
return boxes, scores, classes, valid_detections
#return boxes, scores, label
|
StarcoderdataPython
|
4972720
|
"""Terraform module."""
import logging
import os
import re
import subprocess
import sys
from future.utils import viewitems
import hcl
from send2trash import send2trash
from . import RunwayModule, run_module_command, warn_on_skipped_configs
from ..util import change_dir, which
LOGGER = logging.getLogger('runway')
def gen_backend_tfvars_files(environment, region):
"""Generate possible Terraform backend tfvars filenames."""
return [
"backend-%s-%s.tfvars" % (environment, region),
"backend-%s.tfvars" % environment,
"backend-%s.tfvars" % region,
"backend.tfvars"
]
def get_backend_tfvars_file(path, environment, region):
"""Determine Terraform backend file."""
backend_filenames = gen_backend_tfvars_files(environment, region)
for name in backend_filenames:
if os.path.isfile(os.path.join(path, name)):
return name
return backend_filenames[-1] # file not found; fallback to last item
def gen_workspace_tfvars_files(environment, region):
"""Generate possible Terraform workspace tfvars filenames."""
return [
# Give preference to explicit environment-region files
"%s-%s.tfvars" % (environment, region),
# Fallback to environment name only
"%s.tfvars" % environment
]
def get_workspace_tfvars_file(path, environment, region):
"""Determine Terraform workspace-specific tfvars file name."""
for name in gen_workspace_tfvars_files(environment, region):
if os.path.isfile(os.path.join(path, name)):
return name
return "%s.tfvars" % environment # fallback to generic name
def remove_stale_tf_config(path, backend_tfvars_file):
"""Ensure TF is ready for init.
If deploying a TF module to multiple regions (or any scenario requiring
multiple backend configs), switching the backend will cause TF to
compare the old and new backends. This will frequently cause an access
error as the creds/role for the new backend won't always have access to
the old one.
This method compares the defined & initialized backend configs and
trashes the terraform directory if they're out of sync.
"""
terrform_dir = os.path.join(path, '.terraform')
tfstate_filepath = os.path.join(terrform_dir, 'terraform.tfstate')
if os.path.isfile(tfstate_filepath):
LOGGER.debug('Comparing previous & desired Terraform backend '
'configs')
with open(tfstate_filepath, 'r') as fco:
state_config = hcl.load(fco)
if state_config.get('backend') and state_config['backend'].get('config'): # noqa
backend_tfvars_filepath = os.path.join(path,
backend_tfvars_file)
with open(backend_tfvars_filepath, 'r') as fco:
backend_config = hcl.load(fco)
if any(state_config['backend']['config'][key] != value for (key, value) in viewitems(backend_config)): # noqa pylint: disable=line-too-long
LOGGER.info("Desired and previously initialized TF "
"backend config is out of sync; trashing "
"local TF state directory %s",
terrform_dir)
send2trash(terrform_dir)
def run_terraform_init(module_path, backend_file_name, env_name, env_region,
env_vars):
"""Run Terraform init."""
init_cmd = ['terraform', 'init']
if os.path.isfile(os.path.join(module_path, backend_file_name)): # noqa
LOGGER.info('Using backend config file %s',
backend_file_name)
remove_stale_tf_config(module_path, backend_file_name)
run_module_command(
cmd_list=init_cmd + ['-backend-config=%s' % backend_file_name], # noqa pylint: disable=line-too-long
env_vars=env_vars
)
else:
LOGGER.info(
"No backend tfvars file found -- looking for one "
"of \"%s\" (proceeding with bare 'terraform "
"init')",
', '.join(gen_backend_tfvars_files(
env_name,
env_region)))
run_module_command(cmd_list=init_cmd,
env_vars=env_vars)
def run_tfenv_install(path, env_vars):
"""Ensure appropriate Terraform version is installed."""
if which('tfenv') is None:
LOGGER.error('"tfenv" not found (and a Terraform version is '
'specified in .terraform-version). Please install '
'tfenv.')
sys.exit(1)
with change_dir(path):
subprocess.check_call(['tfenv', 'install'], env=env_vars)
class Terraform(RunwayModule):
"""Terraform Runway Module."""
def run_terraform(self, command='plan'): # noqa pylint: disable=too-many-branches,too-many-statements
"""Run Terraform."""
response = {'skipped_configs': False}
tf_cmd = ['terraform', command]
if not which('terraform'):
LOGGER.error('"terraform" not found in path or is not executable; '
'please ensure it is installed correctly.')
sys.exit(1)
if command == 'destroy':
tf_cmd.append('-force')
elif command == 'apply':
if 'CI' in self.context.env_vars:
tf_cmd.append('-auto-approve=true')
else:
tf_cmd.append('-auto-approve=false')
workspace_tfvars_file = get_workspace_tfvars_file(self.path,
self.context.env_name, # noqa
self.context.env_region) # noqa
backend_tfvars_file = get_backend_tfvars_file(self.path,
self.context.env_name,
self.context.env_region)
workspace_tfvar_present = os.path.isfile(
os.path.join(self.path, workspace_tfvars_file)
)
if workspace_tfvar_present:
tf_cmd.append("-var-file=%s" % workspace_tfvars_file)
if isinstance(self.options.get('environments',
{}).get(self.context.env_name),
dict):
for (key, val) in self.options['environments'][self.context.env_name].items(): # noqa
tf_cmd.extend(['-var', "%s=%s" % (key, val)])
if self.options.get('environments', {}).get(self.context.env_name) or (
workspace_tfvar_present):
LOGGER.info("Preparing to run terraform %s on %s...",
command,
os.path.basename(self.path))
if os.path.isfile(os.path.join(self.path,
'.terraform-version')):
run_tfenv_install(self.path, self.context.env_vars)
with change_dir(self.path):
if not os.path.isdir(os.path.join(self.path,
'.terraform')):
LOGGER.info('.terraform directory missing; running '
'"terraform init"...')
run_terraform_init(
module_path=self.path,
backend_file_name=backend_tfvars_file,
env_name=self.context.env_name,
env_region=self.context.env_region,
env_vars=self.context.env_vars
)
LOGGER.debug('Checking current Terraform workspace...')
current_tf_workspace = subprocess.check_output(
['terraform',
'workspace',
'show'],
env=self.context.env_vars
).strip().decode()
if current_tf_workspace != self.context.env_name:
LOGGER.info("Terraform workspace current set to %s; "
"switching to %s...",
current_tf_workspace,
self.context.env_name)
LOGGER.debug('Checking available Terraform '
'workspaces...')
available_tf_envs = subprocess.check_output(
['terraform', 'workspace', 'list'],
env=self.context.env_vars
).decode()
if re.compile("^[*\\s]\\s%s$" % self.context.env_name,
re.M).search(available_tf_envs):
run_module_command(
cmd_list=['terraform', 'workspace', 'select',
self.context.env_name],
env_vars=self.context.env_vars
)
else:
LOGGER.info("Terraform workspace %s not found; "
"creating it...",
self.context.env_name)
run_module_command(
cmd_list=['terraform', 'workspace', 'new',
self.context.env_name],
env_vars=self.context.env_vars
)
LOGGER.info('Running "terraform init" after workspace '
'creation/switch...')
run_terraform_init(
module_path=self.path,
backend_file_name=backend_tfvars_file,
env_name=self.context.env_name,
env_region=self.context.env_region,
env_vars=self.context.env_vars
)
if 'SKIP_TF_GET' not in self.context.env_vars:
LOGGER.info('Executing "terraform get" to update remote '
'modules')
run_module_command(
cmd_list=['terraform', 'get', '-update=true'],
env_vars=self.context.env_vars
)
else:
LOGGER.info('Skipping "terraform get" due to '
'"SKIP_TF_GET" environment variable...')
LOGGER.info("Running Terraform %s on %s (\"%s\")",
command,
os.path.basename(self.path),
" ".join(tf_cmd))
run_module_command(cmd_list=tf_cmd,
env_vars=self.context.env_vars)
else:
response['skipped_configs'] = True
LOGGER.info("Skipping Terraform %s of %s",
command,
os.path.basename(self.path))
LOGGER.info(
"(no tfvars file for this environment/region found -- looking "
"for one of \"%s\")",
', '.join(gen_workspace_tfvars_files(
self.context.env_name,
self.context.env_region)))
return response
def plan(self):
"""Run tf plan."""
result = self.run_terraform(command='plan')
warn_on_skipped_configs(result, self.context.env_name,
self.context.env_vars)
def deploy(self):
"""Run tf apply."""
result = self.run_terraform(command='apply')
warn_on_skipped_configs(result, self.context.env_name,
self.context.env_vars)
def destroy(self):
"""Run tf destroy."""
result = self.run_terraform(command='destroy')
warn_on_skipped_configs(result, self.context.env_name,
self.context.env_vars)
|
StarcoderdataPython
|
278681
|
<reponame>michael-golden/pulumi-aws
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Stack(pulumi.CustomResource):
capabilities: pulumi.Output[list]
"""
A list of capabilities.
Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, or `CAPABILITY_AUTO_EXPAND`
"""
disable_rollback: pulumi.Output[bool]
"""
Set to true to disable rollback of the stack if stack creation failed.
Conflicts with `on_failure`.
"""
iam_role_arn: pulumi.Output[str]
"""
The ARN of an IAM role that AWS CloudFormation assumes to create the stack. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.
"""
name: pulumi.Output[str]
"""
Stack name.
"""
notification_arns: pulumi.Output[list]
"""
A list of SNS topic ARNs to publish stack related events.
"""
on_failure: pulumi.Output[str]
"""
Action to be taken if stack creation fails. This must be
one of: `DO_NOTHING`, `ROLLBACK`, or `DELETE`. Conflicts with `disable_rollback`.
"""
outputs: pulumi.Output[dict]
"""
A map of outputs from the stack.
"""
parameters: pulumi.Output[dict]
"""
A map of Parameter structures that specify input parameters for the stack.
"""
policy_body: pulumi.Output[str]
"""
Structure containing the stack policy body.
Conflicts w/ `policy_url`.
"""
policy_url: pulumi.Output[str]
"""
Location of a file containing the stack policy.
Conflicts w/ `policy_body`.
"""
tags: pulumi.Output[dict]
"""
A list of tags to associate with this stack.
"""
template_body: pulumi.Output[str]
"""
Structure containing the template body (max size: 51,200 bytes).
"""
template_url: pulumi.Output[str]
"""
Location of a file containing the template body (max size: 460,800 bytes).
"""
timeout_in_minutes: pulumi.Output[float]
"""
The amount of time that can pass before the stack status becomes `CREATE_FAILED`.
"""
def __init__(__self__, resource_name, opts=None, capabilities=None, disable_rollback=None, iam_role_arn=None, name=None, notification_arns=None, on_failure=None, parameters=None, policy_body=None, policy_url=None, tags=None, template_body=None, template_url=None, timeout_in_minutes=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a CloudFormation Stack resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
network = aws.cloudformation.Stack("network",
parameters={
"VPCCidr": "10.0.0.0/16",
},
template_body=\"\"\"{
"Parameters" : {
"VPCCidr" : {
"Type" : "String",
"Default" : "10.0.0.0/16",
"Description" : "Enter the CIDR block for the VPC. Default is 10.0.0.0/16."
}
},
"Resources" : {
"myVpc": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : { "Ref" : "VPCCidr" },
"Tags" : [
{"Key": "Name", "Value": "Primary_CF_VPC"}
]
}
}
}
}
\"\"\")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] capabilities: A list of capabilities.
Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, or `CAPABILITY_AUTO_EXPAND`
:param pulumi.Input[bool] disable_rollback: Set to true to disable rollback of the stack if stack creation failed.
Conflicts with `on_failure`.
:param pulumi.Input[str] iam_role_arn: The ARN of an IAM role that AWS CloudFormation assumes to create the stack. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.
:param pulumi.Input[str] name: Stack name.
:param pulumi.Input[list] notification_arns: A list of SNS topic ARNs to publish stack related events.
:param pulumi.Input[str] on_failure: Action to be taken if stack creation fails. This must be
one of: `DO_NOTHING`, `ROLLBACK`, or `DELETE`. Conflicts with `disable_rollback`.
:param pulumi.Input[dict] parameters: A map of Parameter structures that specify input parameters for the stack.
:param pulumi.Input[str] policy_body: Structure containing the stack policy body.
Conflicts w/ `policy_url`.
:param pulumi.Input[str] policy_url: Location of a file containing the stack policy.
Conflicts w/ `policy_body`.
:param pulumi.Input[dict] tags: A list of tags to associate with this stack.
:param pulumi.Input[str] template_body: Structure containing the template body (max size: 51,200 bytes).
:param pulumi.Input[str] template_url: Location of a file containing the template body (max size: 460,800 bytes).
:param pulumi.Input[float] timeout_in_minutes: The amount of time that can pass before the stack status becomes `CREATE_FAILED`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['capabilities'] = capabilities
__props__['disable_rollback'] = disable_rollback
__props__['iam_role_arn'] = iam_role_arn
__props__['name'] = name
__props__['notification_arns'] = notification_arns
__props__['on_failure'] = on_failure
__props__['parameters'] = parameters
__props__['policy_body'] = policy_body
__props__['policy_url'] = policy_url
__props__['tags'] = tags
__props__['template_body'] = template_body
__props__['template_url'] = template_url
__props__['timeout_in_minutes'] = timeout_in_minutes
__props__['outputs'] = None
super(Stack, __self__).__init__(
'aws:cloudformation/stack:Stack',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, capabilities=None, disable_rollback=None, iam_role_arn=None, name=None, notification_arns=None, on_failure=None, outputs=None, parameters=None, policy_body=None, policy_url=None, tags=None, template_body=None, template_url=None, timeout_in_minutes=None):
"""
Get an existing Stack resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] capabilities: A list of capabilities.
Valid values: `CAPABILITY_IAM`, `CAPABILITY_NAMED_IAM`, or `CAPABILITY_AUTO_EXPAND`
:param pulumi.Input[bool] disable_rollback: Set to true to disable rollback of the stack if stack creation failed.
Conflicts with `on_failure`.
:param pulumi.Input[str] iam_role_arn: The ARN of an IAM role that AWS CloudFormation assumes to create the stack. If you don't specify a value, AWS CloudFormation uses the role that was previously associated with the stack. If no role is available, AWS CloudFormation uses a temporary session that is generated from your user credentials.
:param pulumi.Input[str] name: Stack name.
:param pulumi.Input[list] notification_arns: A list of SNS topic ARNs to publish stack related events.
:param pulumi.Input[str] on_failure: Action to be taken if stack creation fails. This must be
one of: `DO_NOTHING`, `ROLLBACK`, or `DELETE`. Conflicts with `disable_rollback`.
:param pulumi.Input[dict] outputs: A map of outputs from the stack.
:param pulumi.Input[dict] parameters: A map of Parameter structures that specify input parameters for the stack.
:param pulumi.Input[str] policy_body: Structure containing the stack policy body.
Conflicts w/ `policy_url`.
:param pulumi.Input[str] policy_url: Location of a file containing the stack policy.
Conflicts w/ `policy_body`.
:param pulumi.Input[dict] tags: A list of tags to associate with this stack.
:param pulumi.Input[str] template_body: Structure containing the template body (max size: 51,200 bytes).
:param pulumi.Input[str] template_url: Location of a file containing the template body (max size: 460,800 bytes).
:param pulumi.Input[float] timeout_in_minutes: The amount of time that can pass before the stack status becomes `CREATE_FAILED`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["capabilities"] = capabilities
__props__["disable_rollback"] = disable_rollback
__props__["iam_role_arn"] = iam_role_arn
__props__["name"] = name
__props__["notification_arns"] = notification_arns
__props__["on_failure"] = on_failure
__props__["outputs"] = outputs
__props__["parameters"] = parameters
__props__["policy_body"] = policy_body
__props__["policy_url"] = policy_url
__props__["tags"] = tags
__props__["template_body"] = template_body
__props__["template_url"] = template_url
__props__["timeout_in_minutes"] = timeout_in_minutes
return Stack(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
StarcoderdataPython
|
3245974
|
# 016
# Ask the user if it is raining and convert their answer to lower
# case so it doesn’t matter what case they type it in. If they answer
# “yes”, ask if it is windy. If they answer “yes” to this second
# question, display the answer “It is too windy for an umbrella”,
# otherwise display the message “Take an umbrella”.
# If they did not answer yes to the first question, display the answer
# “Enjoy your day”.
import sys
yes_list = ['yes', 'y', '1']
no_list = ['no', 'n']
def input_checker(string):
while True:
try:
answer = input(string)
if answer.lower() not in yes_list and answer.lower() \
not in no_list:
print('Please enter yes or no!')
continue
elif answer.lower() in yes_list:
return True
else:
return False
except Exception as e:
print(e)
if __name__ == '__main__':
question = 'Is it raining? '
result = input_checker(question)
if not result:
print('Enjoy your day!')
sys.exit()
question = 'Is it windy? '
result = input_checker(question)
if result:
print('It is too windy for an umbrella.')
sys.exit()
print('Take an umbrella')
|
StarcoderdataPython
|
6492942
|
import random
import numba
import numpy as np
import torch
from collections import defaultdict
import random
import logging
logger = logging.getLogger(__name__)
# Taken and modified from https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow/blob/master/contents/5.2_Prioritized_Replay_DQN/RL_brain.py
@numba.jit(nopython=True)
def _update(tree, tree_index, priority, total_priority):
# Change = new priority score - former priority score
change = priority - tree[tree_index]
tree[tree_index] = priority
# then propagate the change through tree
# this method is faster than the recursive loop
while tree_index != 0:
tree_index = (tree_index - 1) // 2
tree[tree_index] += change
# assert total_priority > 0
return tree
@numba.jit(nopython=True)
def _get_leaf_ids(n, tree, priority_segment):
idx = np.empty((n,), dtype=np.uint32)
for i in range(n):
# A value is uniformly sample from each range
value = priority_segment * i + random.random() * priority_segment
# print("value:", value)
# Experience index that correspond to each value is retrieved
idx[i] = _get_leaf_index(tree, value)
return idx
@numba.jit(nopython=True)
def _get_leaf_index(tree, v):
parent_index = 0
while True:
left_child_index = 2 * parent_index + 1
right_child_index = left_child_index + 1
# If we reach bottom, end the search
if left_child_index >= len(tree):
leaf_index = parent_index
break
else: # downward search, always search for a higher priority node
if v <= tree[left_child_index]:
parent_index = left_child_index
else:
v -= tree[left_child_index]
parent_index = right_child_index
return leaf_index
class SumTree:
# Here we initialize the tree with all nodes = 0, and initialize the data with all values = 0
def __init__(self, capacity):
self.data_pointer = 0
# Number of leaf nodes (final nodes) that contains experiences
self.capacity = capacity
self.num_items = 0
# Generate the tree with all nodes values = 0
# To understand this calculation (2 * capacity - 1) look at the schema below
# Remember we are in a binary node (each node has max 2 children) so 2x size of leaf (capacity) - 1 (root node)
# Parent nodes = capacity - 1
# Leaf nodes = capacity
self.tree = np.zeros(2 * capacity - 1)
# Contains the experiences (so the size of data is capacity)
self.data = np.zeros(capacity, dtype=object)
def add(self, priority, data):
# Look at what index we want to put the experience
tree_index = self.data_pointer + self.capacity - 1
""" tree:
0
/ \
0 0
/ \ / \
tree_index 0 0 0 We fill the leaves from left to right
"""
# Update data frame
self.data[self.data_pointer] = data
# Update the leaf
self.update(tree_index, priority)
# Add 1 to data_pointer
self.data_pointer += 1
if self.data_pointer >= self.capacity: # If we're above the capacity, we go back to first index (we overwrite)
self.data_pointer = 0
else:
self.num_items += 1
def update(self, tree_index, priority):
self.tree = _update(self.tree, tree_index, priority, self.total_priority)
# @numba.jit(nopython=True)
def get_leaf(self, v):
leaf_index = _get_leaf_index(self.tree, v)
data_index = leaf_index - self.capacity + 1
# assert isinstance(self.data[data_index], dict)
return leaf_index, self.tree[leaf_index], self.data[data_index]
@property
def total_priority(self):
return self.tree[0] # Returns the root node
class PriorityReplayBuffer:
"""
A lot is going on here, which needs some explaining:
1. We want to use priority replay to draw more often from memories/transitions, which have a higher proportion of
information.
2. Memories are weighted according to the temporal difference error. Naively implementing this would be inefficient
(e.g. sorting the array by weights for example) -> SumTree helps here
3. Due to the weights introduced, we actually contradict our first reason to introduce a random replay buffer
decorrelation of memories. To avoid this, we borrow an idea from importance sampling.
4. When calculating the error between q targets and predicted q values, we assign the memories with a high
priority/high temporal difference error a lower weight. The rationale behind this: "Hey you will see this values quite often,
so do not overemphasis it too much.
"""
def __init__(self, max_length=None, dtype=torch.float, device=torch.device("cpu")):
# Making the tree
self.dtype = dtype
self.device = device
if max_length is None:
raise ValueError("PriorityReplayBuffer needs max length!")
self.tree = SumTree(max_length)
self.absolute_error_upper = 1.0 # clipped abs error
# stored as ( state, action, reward, next_state ) in SumTree
self.epsilon = 0.01 # Hyperparameter that we use to avoid some experiences to have 0 probability of being taken
self.alpha = 0.6 # Hyperparameter that we use to make a tradeoff between taking only exp with high priority and sampling randomly
self.beta = 0.4 # importance-sampling, from initial value increasing to 1
self.beta_increment = 0.001
def store(self, **experience):
# Find the max priority
max_priority = np.max(self.tree.tree[-self.tree.capacity :])
# If the max priority = 0 we can't put priority = 0 since this experience will never have a chance to be selected
# So we use a minimum priority
if max_priority == 0:
max_priority = self.absolute_error_upper
self.tree.add(max_priority, experience) # set the max priority for new priority
def sample(self, n):
priority_segment = self.tree.total_priority / n # priority segment
self.beta = np.min([1.0, self.beta + self.beta_increment])
start_idx = len(self.tree.tree) - self.tree.capacity
end_idx = start_idx + self.tree.num_items
min_prob = np.min(self.tree.tree[start_idx:end_idx]) / self.tree.total_priority # for later calculate ISweight
minibatch, b_idx, importance_smapling_weights = self.get_samples(min_prob, n, priority_segment)
# for key, value in minibatch.items(): # convert to arrays
# value = self.stackify(value)
# minibatch[key] = value
return b_idx, importance_smapling_weights, minibatch
def get_samples(self, min_prob, n, priority_segment):
leaf_idx = _get_leaf_ids(n, self.tree.tree, priority_segment)
data_idx = leaf_idx - self.tree.capacity + 1
priorities = self.tree.tree[leaf_idx]
data_batch = self.tree.data[data_idx]
assert not 0 in data_batch, "Wrong data in sample detected"
probs = priorities / self.tree.total_priority
importance_smapling_weights = np.power(probs / min_prob, -self.beta)
# assert isinstance(self.data[data_index], dict)
minibatch = {k: [dic[k] for dic in data_batch] for k in data_batch[0]}
# for x in data_batch:
# for key, value in x.items():
# minibatch[key].append(value)
return minibatch, leaf_idx, importance_smapling_weights
def batch_update(self, tree_idx, abs_errors):
"""'
must be called to update priorities
"""
abs_errors += self.epsilon # convert to abs and avoid 0
if isinstance(abs_errors, torch.Tensor):
abs_errors = abs_errors.cpu().numpy()
clipped_errors = np.minimum(abs_errors, self.absolute_error_upper)
ps = clipped_errors ** self.alpha
for ti, p in zip(tree_idx, ps):
self.tree.update(ti, p)
def __len__(self):
return self.tree.num_items
class History:
""" Generic replay buffer. Can accommodate arbitrary fields. """
def __init__(self, max_length=None, dtype=torch.float, device=torch.device("cpu")):
self.memories = None
self.max_length = max_length
self.data_pointer = 0
self.is_full = False
if max_length:
self.memories = np.empty((max_length,), dtype=object)
else:
self.memories = np.empty((128,), dtype=object) # double memory size each time limit is hit
self.device = device
self.dtype = dtype
def store(self, **kwargs):
self.memories[self.data_pointer] = kwargs
self.is_full = False
self.data_pointer += 1
if self.max_length is not None and self.data_pointer >= self.max_length:
self.data_pointer = 0
self.is_full = True
if self.data_pointer >= self.memories.shape[0] and self.max_length is None:
# self.memories.resize(self.memories.shape * 2) # Raises some ValueError
self.memories = np.resize(self.memories, self.memories.shape[0] * 2)
# @timeit
def sample(self, n):
idx = random.sample(range(len(self)), k=n)
data_batch = self.memories[idx]
minibatch = {k: [dic[k] for dic in data_batch] for k in data_batch[0]}
return idx, None, minibatch
def rollout(self, n=None):
""" When n is not None, returns only the last n entries """
data_batch = self.memories[: len(self)] if n is None else self.memories[len(self) - n : len(self)]
minibatch = {k: [dic[k] for dic in data_batch] for k in data_batch[0]}
return minibatch
def __len__(self):
if self.max_length is None:
return self.data_pointer
else:
if self.is_full:
return self.max_length
else:
return self.data_pointer
def clear(self):
if self.max_length:
self.memories = np.empty((self.max_length,), dtype=object)
else:
self.memories = np.empty((128,), dtype=object) # double memory size each time limit is hit
self.data_pointer = 0
def __add__(self, other):
raise DeprecationWarning("Is not used anymore... I hope?")
assert list(self.memories.keys()) == list(other.memories.keys())
history = History(self.max_length)
history.memories = dict()
for key, val in self.memories.items():
history.memories[key] = val + other.memories[key]
return history
class SequentialHistory(History):
""" Generic replay buffer where each entry represents a sequence of events. Can accommodate arbitrary fields. """
def __init__(self, max_length=None, dtype=torch.float, device=torch.device("cpu")):
super().__init__(max_length=max_length, dtype=dtype, device=device)
self.current_sequence = dict()
def current_sequence_length(self):
if len(self.current_sequence) == 0:
return 0
else:
return len(self.current_sequence[list(self.current_sequence.keys())[0]])
def store(self, **kwargs):
# Store in temporary sequence buffer
if self.current_sequence_length() == 0: # Nothing saved in current sequence
for key, val in kwargs.items():
self.current_sequence[key] = [val]
self.current_sequence["first"] = [True]
else:
for key, val in kwargs.items():
self.current_sequence[key].append(val)
self.current_sequence["first"].append(False)
def flush(self):
""" Push current sequence to ("long-term") memory """
assert self.current_sequence_length() > 0
super().store(**self.current_sequence)
self.current_sequence = dict()
|
StarcoderdataPython
|
8045814
|
"""misc build utility functions"""
# Copyright (c) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import sys
import logging
from distutils import ccompiler
from distutils.sysconfig import customize_compiler
from pipes import quote
from subprocess import Popen, PIPE
pjoin = os.path.join
if sys.version_info[0] >= 3:
u = lambda x: x
else:
u = lambda x: x.decode('utf8', 'replace')
def customize_mingw(cc):
# strip -mno-cygwin from mingw32 (Python Issue #12641)
for cmd in [cc.compiler, cc.compiler_cxx, cc.compiler_so, cc.linker_exe, cc.linker_so]:
if '-mno-cygwin' in cmd:
cmd.remove('-mno-cygwin')
# remove problematic msvcr90
if 'msvcr90' in cc.dll_libraries:
cc.dll_libraries.remove('msvcr90')
def get_compiler(compiler, **compiler_attrs):
"""get and customize a compiler"""
if compiler is None or isinstance(compiler, str):
cc = ccompiler.new_compiler(compiler=compiler)
# customize_compiler(cc)
if cc.compiler_type == 'mingw32':
customize_mingw(cc)
else:
cc = compiler
for name, val in compiler_attrs.items():
setattr(cc, name, val)
return cc
def get_output_error(cmd):
"""Return the exit status, stdout, stderr of a command"""
if not isinstance(cmd, list):
cmd = [cmd]
logging.debug("Running: %s", ' '.join(map(quote, cmd)))
try:
result = Popen(cmd, stdout=PIPE, stderr=PIPE)
except IOError as e:
return -1, u(''), u('Failed to run %r: %r' % (cmd, e))
so, se = result.communicate()
# unicode:
so = so.decode('utf8', 'replace')
se = se.decode('utf8', 'replace')
return result.returncode, so, se
|
StarcoderdataPython
|
9691496
|
<gh_stars>1-10
import unittest, doctest
import test_ini
import test_misc
import test_fuzz
import test_compat
import test_unicode
from holland.backup.mysqldump.util import config
from holland.backup.mysqldump.util import ini
class suite(unittest.TestSuite):
def __init__(self):
unittest.TestSuite.__init__(self, [
doctest.DocTestSuite(config),
doctest.DocTestSuite(ini),
test_ini.suite(),
test_misc.suite(),
test_fuzz.suite(),
test_compat.suite(),
test_unicode.suite(),
])
|
StarcoderdataPython
|
6533840
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class OerhoernchenscrapyItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
project_key = scrapy.Field()
filename = scrapy.Field()
project_url = scrapy.Field() #or key?
page_url = scrapy.Field()
meta_title = scrapy.Field()
og_title = scrapy.Field()
meta_keywords = scrapy.Field()
meta_description = scrapy.Field()
a_rel_license = scrapy.Field()
link_rel_license = scrapy.Field()
link_rel_copyright = scrapy.Field()
manual_License_override = scrapy.Field() # must be converted in DB import
content = scrapy.Field()
|
StarcoderdataPython
|
6640659
|
from eth_abi import encode_single, decode_single
from eth_utils import function_signature_to_4byte_selector
def parse_signature(signature):
"""
Breaks 'func(address)(uint256)' into ['func', '(address)', '(uint256)']
"""
parts = []
stack = []
start = 0
for end, letter in enumerate(signature):
if letter == '(':
stack.append(letter)
if not parts:
parts.append(signature[start:end])
start = end
if letter == ')':
stack.pop()
if not stack: # we are only interested in outermost groups
parts.append(signature[start:end + 1])
start = end + 1
return parts
class Signature:
def __init__(self, signature):
self.signature = signature
self.parts = parse_signature(signature)
self.input_types = self.parts[1]
self.output_types = self.parts[2]
self.function = ''.join(self.parts[:2])
self.fourbyte = function_signature_to_4byte_selector(self.function)
def encode_data(self, args=None):
return self.fourbyte + encode_single(self.input_types, args) if args else self.fourbyte
def decode_data(self, output):
return decode_single(self.output_types, output)
|
StarcoderdataPython
|
314281
|
import time
import board
import busio
uart = busio.UART(board.GP0, board.GP1, baudrate=9600)
while True:
# await incoming message
bytes_waiting = uart.in_waiting
if bytes_waiting:
incoming_msg = uart.readline()
print(incoming_msg)
# re-transmit
uart.write(incoming_msg)
|
StarcoderdataPython
|
11212002
|
<filename>src/python/magnum/test/test_trade.py
#
# This file is part of Magnum.
#
# Copyright © 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019
# <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import os
import sys
import unittest
from corrade import pluginmanager
from magnum import *
from magnum import trade
class ImageData(unittest.TestCase):
def test(self):
# The only way to get an image instance is through a manager
importer = trade.ImporterManager().load_and_instantiate('StbImageImporter')
importer.open_file(os.path.join(os.path.dirname(__file__), "rgb.png"))
image = importer.image2d(0)
self.assertFalse(image.is_compressed)
self.assertEqual(image.storage.alignment, 1) # libPNG has 4 tho
self.assertEqual(image.format, PixelFormat.RGB8_UNORM)
self.assertEqual(image.pixel_size, 3)
self.assertEqual(image.size, Vector2i(3, 2))
# TODO: ugh, report as bytes, not chars
self.assertEqual(ord(image.pixels[1, 2, 2]), 181)
self.assertEqual(ord(image.data[9 + 6 + 2]), 181) # libPNG has 12 +
def test_compressed(self):
# The only way to get an image instance is through a manager
importer = trade.ImporterManager().load_and_instantiate('DdsImporter')
importer.open_file(os.path.join(os.path.dirname(__file__), "rgba_dxt1.dds"))
image = importer.image2d(0)
self.assertEqual(len(image.data), 8)
self.assertTrue(image.is_compressed)
# TODO: compressed properties
# No compressed-image-related APIs exposed ATM, so just verifying the
# uncompressed ones fail properly
with self.assertRaisesRegex(AttributeError, "image is compressed"):
image.storage
with self.assertRaisesRegex(AttributeError, "image is compressed"):
image.format
with self.assertRaisesRegex(AttributeError, "image is compressed"):
image.pixel_size
with self.assertRaisesRegex(AttributeError, "image is compressed"):
image.pixels
def test_convert_view(self):
# The only way to get an image instance is through a manager
importer = trade.ImporterManager().load_and_instantiate('StbImageImporter')
importer.open_file(os.path.join(os.path.dirname(__file__), "rgb.png"))
image = importer.image2d(0)
view = ImageView2D(image)
mutable_view = MutableImageView2D(image)
def test_convert_view_compressed(self):
# The only way to get an image instance is through a manager
importer = trade.ImporterManager().load_and_instantiate('DdsImporter')
importer.open_file(os.path.join(os.path.dirname(__file__), "rgba_dxt1.dds"))
image = importer.image2d(0)
# No compressed-image-related APIs exposed ATM, so just verifying the
# uncompressed ones fail properly
with self.assertRaisesRegex(RuntimeError, "image is compressed"):
view = ImageView2D(image)
with self.assertRaisesRegex(RuntimeError, "image is compressed"):
mutable_view = MutableImageView2D(image)
class MeshData(unittest.TestCase):
def test(self):
# The only way to get a mesh instance is through a manager
importer = trade.ImporterManager().load_and_instantiate('TinyGltfImporter')
importer.open_file(os.path.join(os.path.dirname(__file__), 'mesh.glb'))
mesh = importer.mesh3d(0)
self.assertEqual(mesh.primitive, MeshPrimitive.TRIANGLES)
# TODO: test more, once it's exposed
class Importer(unittest.TestCase):
def test(self):
manager = trade.ImporterManager()
self.assertIn('StbImageImporter', manager.alias_list)
self.assertEqual(manager.load_state('StbImageImporter'), pluginmanager.LoadState.NOT_LOADED)
self.assertTrue(manager.load('StbImageImporter') & pluginmanager.LoadState.LOADED)
self.assertEqual(manager.unload('StbImageImporter'), pluginmanager.LoadState.NOT_LOADED)
with self.assertRaisesRegex(RuntimeError, "can't load plugin"):
manager.load('NonexistentImporter')
with self.assertRaisesRegex(RuntimeError, "can't unload plugin"):
manager.unload('NonexistentImporter')
def test_no_file_opened(self):
importer = trade.ImporterManager().load_and_instantiate('StbImageImporter')
self.assertFalse(importer.is_opened)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.mesh2d_count
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.mesh3d_count
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.mesh2d_for_name('')
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.mesh3d_for_name('')
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.mesh2d_name(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.mesh3d_name(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.mesh2d(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.mesh3d(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image1d_count
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image2d_count
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image3d_count
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image1d_for_name('')
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image2d_for_name('')
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image3d_for_name('')
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image1d_name(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image2d_name(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image3d_name(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image1d(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image2d(0)
with self.assertRaisesRegex(RuntimeError, "no file opened"):
importer.image3d(0)
def test_index_oob(self):
importer = trade.ImporterManager().load_and_instantiate('StbImageImporter')
importer.open_file(os.path.join(os.path.dirname(__file__), 'rgb.png'))
with self.assertRaises(IndexError):
importer.mesh2d_name(0)
with self.assertRaises(IndexError):
importer.mesh3d_name(0)
with self.assertRaises(IndexError):
importer.mesh2d(0)
with self.assertRaises(IndexError):
importer.mesh3d(0)
with self.assertRaises(IndexError):
importer.image1d_name(0)
with self.assertRaises(IndexError):
importer.image2d_name(1)
with self.assertRaises(IndexError):
importer.image3d_name(0)
with self.assertRaises(IndexError):
importer.image1d(0)
with self.assertRaises(IndexError):
importer.image2d(1)
with self.assertRaises(IndexError):
importer.image3d(0)
def test_open_failed(self):
importer = trade.ImporterManager().load_and_instantiate('StbImageImporter')
with self.assertRaisesRegex(RuntimeError, "opening nonexistent.png failed"):
importer.open_file('nonexistent.png')
with self.assertRaisesRegex(RuntimeError, "opening data failed"):
importer.open_data(b'')
def test_mesh3d(self):
# importer refcounting tested in image2d
importer = trade.ImporterManager().load_and_instantiate('TinyGltfImporter')
importer.open_file(os.path.join(os.path.dirname(__file__), 'mesh.glb'))
self.assertEqual(importer.mesh3d_count, 3)
self.assertEqual(importer.mesh3d_name(0), 'Non-indexed mesh')
self.assertEqual(importer.mesh3d_for_name('Non-indexed mesh'), 0)
mesh = importer.mesh3d(0)
self.assertEqual(mesh.primitive, MeshPrimitive.TRIANGLES)
def test_image2d(self):
manager = trade.ImporterManager()
manager_refcount = sys.getrefcount(manager)
# Importer references the manager to ensure it doesn't get GC'd before
# the plugin instances
importer = manager.load_and_instantiate('StbImageImporter')
self.assertIs(importer.manager, manager)
self.assertEqual(sys.getrefcount(manager), manager_refcount + 1)
importer.open_file(os.path.join(os.path.dirname(__file__), 'rgb.png'))
self.assertEqual(importer.image2d_count, 1)
self.assertEqual(importer.image2d_name(0), '')
self.assertEqual(importer.image2d_for_name(''), -1)
image = importer.image2d(0)
self.assertEqual(image.size, Vector2i(3, 2))
# Deleting the importer should decrease manager refcount again
del importer
self.assertEqual(sys.getrefcount(manager), manager_refcount)
def test_image2d_data(self):
importer = trade.ImporterManager().load_and_instantiate('StbImageImporter')
with open(os.path.join(os.path.dirname(__file__), "rgb.png"), 'rb') as f:
importer.open_data(f.read())
image = importer.image2d(0)
self.assertEqual(image.size, Vector2i(3, 2))
def test_image2d_failed(self):
importer = trade.ImporterManager().load_and_instantiate('StbImageImporter')
importer.open_data(b'bla')
with self.assertRaisesRegex(RuntimeError, "import failed"):
image = importer.image2d(0)
|
StarcoderdataPython
|
1834052
|
#!/usr/bin/env python
# coding: utf-8
#
# Author: <NAME>
# Email: yanpx (at) mail2.sysu.edu.cn
from __future__ import absolute_import, division, print_function
import os
import sys
sys.path.append('flownet2')
import torch
import torch.nn as nn
from torch.utils import data
from torchvision.transforms import functional as TF
import argparse
from tqdm import tqdm
from libs.datasets import get_transforms, get_datasets
from libs.networks.pseudo_label_generator import FGPLG
from libs.utils.pyt_utils import load_model
parser = argparse.ArgumentParser()
# Dataloading-related settings
parser.add_argument('--data', type=str, default='data/datasets/',
help='path to datasets folder')
parser.add_argument('--checkpoint', default='models/pseudo_label_generator_5.pth',
help='path to the pretrained checkpoint')
parser.add_argument('--dataset-config', default='config/datasets.yaml',
help='dataset config file')
parser.add_argument('--pseudo-label-folder', default='data/pseudo-labels',
help='location to save generated pseudo-labels')
parser.add_argument("--label_interval", default=5, type=int,
help="the interval of ground truth labels")
parser.add_argument("--frame_between_label_num", default=1, type=int,
help="the number of generated pseudo-labels in each interval")
parser.add_argument('-j', '--num_workers', default=1, type=int, metavar='N',
help='number of data loading workers.')
# Model settings
parser.add_argument('--size', default=448, type=int,
help='image size')
parser.add_argument('--os', default=16, type=int,
help='output stride.')
# FlowNet setting
parser.add_argument("--fp16", action="store_true",
help="Run model in pseudo-fp16 mode (fp16 storage fp32 math).")
parser.add_argument("--rgb_max", type=float, default=1.)
args = parser.parse_args()
cuda = torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")
if cuda:
torch.backends.cudnn.benchmark = True
current_device = torch.cuda.current_device()
print("Running on", torch.cuda.get_device_name(current_device))
else:
print("Running on CPU")
data_transforms = get_transforms(
input_size=(args.size, args.size),
image_mode=False
)
dataset = get_datasets(
name_list=["DAVIS2016", "FBMS", "VOS"],
split_list=["train", "train", "train"],
config_path=args.dataset_config,
root=args.data,
training=True, # provide labels
transforms=data_transforms['test'],
read_clip=True,
random_reverse_clip=False,
label_interval=args.label_interval,
frame_between_label_num=args.frame_between_label_num,
clip_len=args.frame_between_label_num+2
)
dataloader = data.DataLoader(
dataset=dataset,
batch_size=1, # only support 1 video clip
num_workers=args.num_workers,
shuffle=False,
drop_last=True
)
pseudo_label_generator = FGPLG(args=args, output_stride=args.os)
# load pretrained models
if os.path.exists(args.checkpoint):
print('Loading state dict from: {0}'.format(args.checkpoint))
pseudo_label_generator = load_model(model=pseudo_label_generator, model_file=args.checkpoint, is_restore=True)
else:
raise ValueError("Cannot find model file at {}".format(args.checkpoint))
pseudo_label_generator.to(device)
pseudo_label_folder = os.path.join(args.pseudo_label_folder, "{}_{}".format(args.frame_between_label_num, args.label_interval))
if not os.path.exists(pseudo_label_folder):
os.makedirs(pseudo_label_folder)
def generate_pseudo_label():
pseudo_label_generator.eval()
for data in tqdm(dataloader):
images = []
labels = []
for frame in data:
images.append(frame['image'].to(device))
labels.append(frame['label'].to(device) if 'label' in frame else None)
with torch.no_grad():
for i in range(1, args.frame_between_label_num+1):
pseudo_label = pseudo_label_generator.generate_pseudo_label(images[i], images[0], images[-1], labels[0], labels[-1])
labels[i] = torch.sigmoid(pseudo_label).detach()
# save pseudo-labels
for i, label_ in enumerate(labels):
for j, label in enumerate(label_.detach().cpu()):
dataset = data[i]['dataset'][j]
image_id = data[i]['image_id'][j]
pseudo_label_path = os.path.join(pseudo_label_folder, "{}/{}.png".format(dataset, image_id))
height = data[i]['height'].item()
width = data[i]['width'].item()
result = TF.to_pil_image(label)
result = result.resize((height, width))
dirname = os.path.dirname(pseudo_label_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
result.save(pseudo_label_path)
if __name__ == "__main__":
print("Generating pseudo-labels at {}".format(args.pseudo_label_folder))
print("label interval: {}".format(args.label_interval))
print("frame between label num: {}".format(args.frame_between_label_num))
generate_pseudo_label()
|
StarcoderdataPython
|
6614830
|
import numpy as np
import abc
from constants import *
from vector import Vector
from dist import Dist
# Definition of a body
class Body():
def __init__(self, m=constants.cons_m, pos, v = Vector(), size=, i):
self.x, self.y = x, y
self.vx, self.vy = vx, vy
self.m = m
self.pos = pos
self.v = v
self.a = Vector()
def accelleration(self, m, pos, epsilon):
x = pos[:, 0, 1]
y = pos[:, 1, 2]
dx = x.T - x
dy = y.T - y
r3_inv = (dx**2 + dy**2 + epsilon**2)** (-1.5)
ax = constants.G * (dx * r3_inv) @ mass
ay = constants.G * (dy * r3_inv) @ mass
a = np.hstack((ax, ay))
return a
# a = (ax, ay, az) gravitacijski!
# Leap-Frog integration: kick + drift + kick
def move(self, dt, ax, ay):
vx += ax * 0.5 * dt
vy += ay * 0.5 * dt
x += vx * dt
y += vy * dt
acc = accelleration(self, m, x, y, G, epsilon)
vx += ax * 0.5 * dt
vy += ay * 0.5 * dt
t += dt
EKin, EPot = energy(self, m, G, vx, vy, x, y)
# E = E_k + E_pot
def energy(self, m, G, v, pos):
Ekin = 0.5 * np.sum(np.sum( m * v **2))
x = pos[:, 0, 1]
y = pos[:, 1, 2]
dx = x.T - x
dy = y.T - y
r_inv = np.sqrt(dx**2 + dy**2)
self.vy = (self.m * vy + other.m * other.vy) / (self.m + other.m)
self.m += other.m
def pydraw(self, pd, surface):
vmag = self.v.mag()
#color =
x = math.floor(self.pos.x)
y = math.flor(self.pos.y)
pd.circle(surface, color, (x, y), 1 + math.floor(0.2 * self.m/#particle mass))
def __repr__(self):
return "Body: ({0}.x, {0}.y), mass= {0}.m".format(self)
class Drawable(object, metaclass=abc.ABCMeta):
@abctractmethod
def pydraw(self, pd, surface):
raise NotImplementedError('Must implement Pydraw function!')
|
StarcoderdataPython
|
215568
|
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: <NAME>, <NAME>, <NAME>
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from pymor.algorithms.basisextension import trivial_basis_extension, gram_schmidt_basis_extension, pod_basis_extension
from pymor.algorithms.ei import interpolate_operators, ei_greedy, deim
from pymor.algorithms.greedy import greedy
from pymor.analyticalproblems.advection import InstationaryAdvectionProblem
from pymor.analyticalproblems.burgers import BurgersProblem, Burgers2DProblem
from pymor.analyticalproblems.elliptic import EllipticProblem
from pymor.analyticalproblems.thermalblock import ThermalBlockProblem
from pymor.core.cache import clear_caches, enable_caching, disable_caching
from pymor.core.defaults import print_defaults, write_defaults_to_file, load_defaults_from_file, set_defaults
from pymor.core.logger import set_log_levels, getLogger
from pymor.core.pickle import dump, dumps, load, loads
from pymor.discretizations.basic import StationaryDiscretization, InstationaryDiscretization
from pymor.domaindescriptions.basic import RectDomain, CylindricalDomain, TorusDomain, LineDomain, CircleDomain
from pymor.domaindescriptions.boundarytypes import BoundaryType
from pymor.domaindiscretizers.default import discretize_domain_default
from pymor.discretizers.advection import discretize_nonlinear_instationary_advection_fv
from pymor.discretizers.elliptic import discretize_elliptic_cg
from pymor.functions.basic import ConstantFunction, GenericFunction, ExpressionFunction, LincombFunction
from pymor.grids.boundaryinfos import EmptyBoundaryInfo, BoundaryInfoFromIndicators, AllDirichletBoundaryInfo
from pymor.grids.oned import OnedGrid
from pymor.grids.rect import RectGrid
from pymor.grids.tria import TriaGrid
from pymor.la.basic import induced_norm, cat_arrays
from pymor.la.gram_schmidt import gram_schmidt
from pymor.la.interfaces import VectorSpace
from pymor.la.numpyvectorarray import NumpyVectorArray, NumpyVectorSpace
from pymor.la.pod import pod
from pymor.operators.numpy import NumpyGenericOperator, NumpyMatrixOperator
from pymor.operators.constructions import (LincombOperator, Concatenation, ComponentProjection, IdentityOperator,
ConstantOperator, VectorArrayOperator, VectorOperator, VectorFunctional,
FixedParameterOperator)
from pymor.operators.ei import EmpiricalInterpolatedOperator
from pymor.parameters.base import Parameter
from pymor.parameters.functionals import (ProjectionParameterFunctional, GenericParameterFunctional,
ExpressionParameterFunctional)
from pymor.parameters.spaces import CubicParameterSpace
from pymor.reductors.basic import reduce_generic_rb, reduce_to_subbasis
from pymor.reductors.stationary import reduce_stationary_coercive
from pymor.tools.floatcmp import float_cmp, float_cmp_all
from pymor.tools.random import new_random_state
|
StarcoderdataPython
|
6677141
|
<gh_stars>0
#!/usr/bin/python
# socket_client.py
import socket
import sys
import os
def socket_send(command):
try:
sock = socket.socket()
sock.connect(('127.0.0.1', 1000))
sock.send(command)
result = sock.recv(2048)
sock.close()
return result
except KeyboardInterrupt:
return 'canceled by user'
if __name__ == '__main__':
cmd = os.getcwd() + '#*#*#' + ' '.join(sys.argv[1:])
print(socket_send(cmd))
|
StarcoderdataPython
|
4830517
|
<filename>python_modules/dagster/dagster/grpc/types.py<gh_stars>1-10
from collections import namedtuple
from enum import Enum
from dagster import check
from dagster.core.code_pointer import CodePointer
from dagster.core.instance.ref import InstanceRef
from dagster.core.origin import PipelineOrigin, PipelinePythonOrigin, RepositoryOrigin
from dagster.serdes import whitelist_for_serdes
from dagster.utils.error import SerializableErrorInfo
@whitelist_for_serdes
class ExecutionPlanSnapshotArgs(
namedtuple(
"_ExecutionPlanSnapshotArgs",
"pipeline_origin solid_selection run_config mode step_keys_to_execute pipeline_snapshot_id",
)
):
def __new__(
cls,
pipeline_origin,
solid_selection,
run_config,
mode,
step_keys_to_execute,
pipeline_snapshot_id,
):
return super(ExecutionPlanSnapshotArgs, cls).__new__(
cls,
pipeline_origin=check.inst_param(pipeline_origin, "pipeline_origin", PipelineOrigin),
solid_selection=check.opt_list_param(solid_selection, "solid_selection", of_type=str),
run_config=check.dict_param(run_config, "run_config"),
mode=check.str_param(mode, "mode"),
step_keys_to_execute=check.opt_list_param(
step_keys_to_execute, "step_keys_to_execute", of_type=str
),
pipeline_snapshot_id=check.str_param(pipeline_snapshot_id, "pipeline_snapshot_id"),
)
@whitelist_for_serdes
class ExecuteRunArgs(namedtuple("_ExecuteRunArgs", "pipeline_origin pipeline_run_id instance_ref")):
def __new__(cls, pipeline_origin, pipeline_run_id, instance_ref):
return super(ExecuteRunArgs, cls).__new__(
cls,
pipeline_origin=check.inst_param(pipeline_origin, "pipeline_origin", PipelineOrigin),
pipeline_run_id=check.str_param(pipeline_run_id, "pipeline_run_id"),
instance_ref=check.opt_inst_param(instance_ref, "instance_ref", InstanceRef),
)
@whitelist_for_serdes
class ExecuteStepArgs(
namedtuple(
"_ExecuteStepArgs",
"pipeline_origin pipeline_run_id instance_ref mode step_keys_to_execute run_config retries_dict",
)
):
def __new__(
cls,
pipeline_origin,
pipeline_run_id,
instance_ref=None,
mode=None,
step_keys_to_execute=None,
run_config=None,
retries_dict=None,
):
return super(ExecuteStepArgs, cls).__new__(
cls,
pipeline_origin=check.inst_param(
pipeline_origin, "pipeline_origin", PipelinePythonOrigin
),
pipeline_run_id=check.str_param(pipeline_run_id, "pipeline_run_id"),
instance_ref=check.opt_inst_param(instance_ref, "instance_ref", InstanceRef),
mode=check.opt_str_param(mode, "mode"),
step_keys_to_execute=check.opt_list_param(
step_keys_to_execute, "step_keys_to_execute", of_type=str
),
run_config=check.opt_dict_param(run_config, "run_config"),
retries_dict=check.opt_dict_param(retries_dict, "retries_dict"),
)
@whitelist_for_serdes
class LoadableRepositorySymbol(
namedtuple("_LoadableRepositorySymbol", "repository_name attribute")
):
def __new__(cls, repository_name, attribute):
return super(LoadableRepositorySymbol, cls).__new__(
cls,
repository_name=check.str_param(repository_name, "repository_name"),
attribute=check.str_param(attribute, "attribute"),
)
@whitelist_for_serdes
class ListRepositoriesResponse(
namedtuple(
"_ListRepositoriesResponse",
"repository_symbols executable_path repository_code_pointer_dict",
)
):
def __new__(
cls, repository_symbols, executable_path=None, repository_code_pointer_dict=None,
):
return super(ListRepositoriesResponse, cls).__new__(
cls,
repository_symbols=check.list_param(
repository_symbols, "repository_symbols", of_type=LoadableRepositorySymbol
),
# These are currently only used by the GRPC Repository Location, but
# we will need to migrate the rest of the repository locations to use this.
executable_path=check.opt_str_param(executable_path, "executable_path"),
repository_code_pointer_dict=check.opt_dict_param(
repository_code_pointer_dict,
"repository_code_pointer_dict",
key_type=str,
value_type=CodePointer,
),
)
@whitelist_for_serdes
class ListRepositoriesInput(
namedtuple("_ListRepositoriesInput", "module_name python_file working_directory attribute")
):
def __new__(cls, module_name, python_file, working_directory, attribute):
check.invariant(not (module_name and python_file), "Must set only one")
check.invariant(module_name or python_file, "Must set at least one")
return super(ListRepositoriesInput, cls).__new__(
cls,
module_name=check.opt_str_param(module_name, "module_name"),
python_file=check.opt_str_param(python_file, "python_file"),
working_directory=check.opt_str_param(working_directory, "working_directory"),
attribute=check.opt_str_param(attribute, "attribute"),
)
@whitelist_for_serdes
class PartitionArgs(
namedtuple("_PartitionArgs", "repository_origin partition_set_name partition_name")
):
def __new__(cls, repository_origin, partition_set_name, partition_name):
return super(PartitionArgs, cls).__new__(
cls,
repository_origin=check.inst_param(
repository_origin, "repository_origin", RepositoryOrigin
),
partition_set_name=check.str_param(partition_set_name, "partition_set_name"),
partition_name=check.str_param(partition_name, "partition_name"),
)
@whitelist_for_serdes
class PartitionNamesArgs(namedtuple("_PartitionNamesArgs", "repository_origin partition_set_name")):
def __new__(cls, repository_origin, partition_set_name):
return super(PartitionNamesArgs, cls).__new__(
cls,
repository_origin=check.inst_param(
repository_origin, "repository_origin", RepositoryOrigin
),
partition_set_name=check.str_param(partition_set_name, "partition_set_name"),
)
@whitelist_for_serdes
class PartitionSetExecutionParamArgs(
namedtuple(
"_PartitionSetExecutionParamArgs", "repository_origin partition_set_name partition_names",
)
):
def __new__(cls, repository_origin, partition_set_name, partition_names):
return super(PartitionSetExecutionParamArgs, cls).__new__(
cls,
repository_origin=check.inst_param(
repository_origin, "repository_origin", RepositoryOrigin
),
partition_set_name=check.str_param(partition_set_name, "partition_set_name"),
partition_names=check.list_param(partition_names, "partition_names", of_type=str),
)
@whitelist_for_serdes
class PipelineSubsetSnapshotArgs(
namedtuple("_PipelineSubsetSnapshotArgs", "pipeline_origin solid_selection")
):
def __new__(cls, pipeline_origin, solid_selection):
return super(PipelineSubsetSnapshotArgs, cls).__new__(
cls,
pipeline_origin=check.inst_param(pipeline_origin, "pipeline_origin", PipelineOrigin),
solid_selection=check.list_param(solid_selection, "solid_selection", of_type=str)
if solid_selection
else None,
)
@whitelist_for_serdes
class ScheduleExecutionDataMode(Enum):
# Just return the schedule data and tags, don't check whether we should excute the schedule or return it
PREVIEW = "PREVIEW"
# Return schedule data, tags, and whether we should execute the schedule
LAUNCH_SCHEDULED_EXECUTION = "LAUNCH_SCHEDULED_EXECUTION"
@whitelist_for_serdes
class ExternalScheduleExecutionArgs(
namedtuple(
"_ExternalScheduleExecutionArgs",
"repository_origin instance_ref schedule_name schedule_execution_data_mode "
"scheduled_execution_timestamp_utc",
)
):
def __new__(
cls,
repository_origin,
instance_ref,
schedule_name,
schedule_execution_data_mode,
scheduled_execution_timestamp_utc,
):
return super(ExternalScheduleExecutionArgs, cls).__new__(
cls,
repository_origin=check.inst_param(
repository_origin, "repository_origin", RepositoryOrigin
),
instance_ref=check.inst_param(instance_ref, "instance_ref", InstanceRef),
schedule_name=check.str_param(schedule_name, "schedule_name"),
schedule_execution_data_mode=check.inst_param(
schedule_execution_data_mode,
"schedule_execution_data_mode",
ScheduleExecutionDataMode,
),
scheduled_execution_timestamp_utc=check.opt_float_param(
scheduled_execution_timestamp_utc, "scheduled_execution_timestamp_utc"
),
)
@whitelist_for_serdes
class ExternalExecutableArgs(
namedtuple("_ExternalExecutableArgs", "repository_origin instance_ref name",)
):
def __new__(cls, repository_origin, instance_ref, name):
return super(ExternalExecutableArgs, cls).__new__(
cls,
repository_origin=check.inst_param(
repository_origin, "repository_origin", RepositoryOrigin
),
instance_ref=check.inst_param(instance_ref, "instance_ref", InstanceRef),
name=check.str_param(name, "name"),
)
@whitelist_for_serdes
class ShutdownServerResult(namedtuple("_ShutdownServerResult", "success serializable_error_info")):
def __new__(cls, success, serializable_error_info):
return super(ShutdownServerResult, cls).__new__(
cls,
success=check.bool_param(success, "success"),
serializable_error_info=check.opt_inst_param(
serializable_error_info, "serializable_error_info", SerializableErrorInfo
),
)
@whitelist_for_serdes
class CancelExecutionRequest(namedtuple("_CancelExecutionRequest", "run_id")):
def __new__(cls, run_id):
return super(CancelExecutionRequest, cls).__new__(
cls, run_id=check.str_param(run_id, "run_id"),
)
@whitelist_for_serdes
class CancelExecutionResult(
namedtuple("_CancelExecutionResult", "success message serializable_error_info")
):
def __new__(cls, success, message, serializable_error_info):
return super(CancelExecutionResult, cls).__new__(
cls,
success=check.bool_param(success, "success"),
message=check.opt_str_param(message, "message"),
serializable_error_info=check.opt_inst_param(
serializable_error_info, "serializable_error_info", SerializableErrorInfo
),
)
@whitelist_for_serdes
class CanCancelExecutionRequest(namedtuple("_CanCancelExecutionRequest", "run_id")):
def __new__(cls, run_id):
return super(CanCancelExecutionRequest, cls).__new__(
cls, run_id=check.str_param(run_id, "run_id"),
)
@whitelist_for_serdes
class CanCancelExecutionResult(namedtuple("_CancelExecutionResult", "can_cancel")):
def __new__(cls, can_cancel):
return super(CanCancelExecutionResult, cls).__new__(
cls, can_cancel=check.bool_param(can_cancel, "can_cancel"),
)
@whitelist_for_serdes
class StartRunResult(namedtuple("_StartRunResult", "success message serializable_error_info")):
def __new__(cls, success, message, serializable_error_info):
return super(StartRunResult, cls).__new__(
cls,
success=check.bool_param(success, "success"),
message=check.opt_str_param(message, "message"),
serializable_error_info=check.opt_inst_param(
serializable_error_info, "serializable_error_info", SerializableErrorInfo
),
)
@whitelist_for_serdes
class GetCurrentImageResult(
namedtuple("_GetCurrentImageResult", "current_image serializable_error_info")
):
def __new__(cls, current_image, serializable_error_info):
return super(GetCurrentImageResult, cls).__new__(
cls,
current_image=check.opt_str_param(current_image, "current_image"),
serializable_error_info=check.opt_inst_param(
serializable_error_info, "serializable_error_info", SerializableErrorInfo
),
)
|
StarcoderdataPython
|
3264424
|
from app import create_app
from app.database import db_session
APP = create_app()
@APP.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
|
StarcoderdataPython
|
5142254
|
<filename>abexp/core/planning.py
# MIT License
#
# Copyright (c) 2021 Playtika Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from abexp.core.design import SampleSize
class Planning:
@staticmethod
def planning_diff_mean(avg_n_users_per_day, mean_contr, mean_treat, std_contr, alpha=0.05, power=0.8):
"""
Use the sample size determination with means comparison from the core.design.SampleSize class
to estimate the number of days that a test must run to achieve the desired significance and power level.
Parameters
----------
avg_n_users_per_day : int
The number users per day which can be directed to the variant.
mean_contr : float
Mean of the control group.
mean_treat : float
Mean of the treatment group.
std_contr : float > 0
Standard deviation of the control group. It assumes that the standard deviation of the control group is
equal to the standard deviation of the treatment group.
alpha : float in interval (0,1)
Significance level, default 0.05. It is the probability of a type I error, that is wrong rejections if the
null hypothesis is true.
power : float in interval (0,1)
Statistical power of the test, default 0.8. It is one minus the probability of a type II error. Power is
the probability that the test correctly rejects the null hypothesis if the alternative hypothesis is true.
Returns
-------
n_days : int
Minimum number of days to run the A/B test.
"""
# If the means are equals the function returns infinity
if mean_contr == mean_treat:
return np.Inf
# If the number of users per day is invalid returns infinity
if avg_n_users_per_day <= 0:
return np.Inf
# Compute required sample size
sample_size = SampleSize.ssd_mean(mean_contr, mean_treat, std_contr, alpha, power)
return int(np.ceil(sample_size/avg_n_users_per_day))
@staticmethod
def planning_diff_prop(avg_n_users_per_day, prop_contr, prop_treat, alpha=0.05, power=0.8):
"""
Use the sample size determination with proportions comparison from the core.design.SampleSize class
to estimate the number of days that a test must run to achieve the desired significance and power level.
Parameters
----------
avg_n_users_per_day : int
The number users per day which can be directed to the variant.
prop_contr : float in interval (0,1)
Proportion in the control group.
prop_treat : float in interval (0,1)
Proportion in the treatment group.
alpha : float in interval (0,1)
Significance level, default 0.05. It is the probability of a type I error, that is wrong rejections if the
Null Hypothesis is true.
power : float in interval (0,1)
Statistical Power of the test, default 0.8. It is one minus the probability of a type II error. Power is
the probability that the test correctly rejects the Null Hypothesis if the Alternative Hypothesis is true.
Returns
-------
n_days : int
Minimum number of days to run the A/B test.
"""
# If the means are equals the function returns infinity
if prop_contr == prop_treat:
return np.Inf
# If the number of users per day is invalid returns infinity
if avg_n_users_per_day <= 0:
return np.Inf
# Compute required sample size
sample_size = SampleSize.ssd_prop(prop_contr, prop_treat, alpha, power)
return int(np.ceil(sample_size / avg_n_users_per_day))
|
StarcoderdataPython
|
3437531
|
<reponame>dannyroberts/commcare-hq
from __future__ import absolute_import, unicode_literals
import json
import six
import datetime
from copy import copy
from django.core.serializers.json import (
Serializer as JsonSerializer,
DjangoJSONEncoder)
from dimagi.utils.parsing import json_format_datetime
class JsonLinesSerializer(JsonSerializer):
"""
Convert a queryset to JSON outputting one object per line
"""
def start_serialization(self):
self._init_options()
def end_serialization(self):
pass
def end_object(self, obj):
# self._current has the field data
json_kwargs = copy(self.json_kwargs)
json_kwargs['cls'] = CommCareJSONEncoder
json_dump = json.dumps(self.get_dump_object(obj), **json_kwargs)
if six.PY3:
json_dump = json_dump.encode('utf-8')
self.stream.write(json_dump)
self.stream.write(b"\n")
self._current = None
class CommCareJSONEncoder(DjangoJSONEncoder):
"""
Custom version of the DjangoJSONEncoder that formats datetime's with all 6 microsecond digits
"""
def default(self, o):
if isinstance(o, datetime.datetime):
return json_format_datetime(o)
else:
return super(CommCareJSONEncoder, self).default(o)
|
StarcoderdataPython
|
9615852
|
#!/usr/bin/env python3
import unittest
import numpy as np
import torch
from pytorch_translate import rnn # noqa
from pytorch_translate.beam_decode import SequenceGenerator
from pytorch_translate.ensemble_export import BeamSearchAndDecode
from pytorch_translate.tasks import pytorch_translate_task as tasks
from pytorch_translate.test import utils as test_utils
class TestBeamSearchAndDecode(unittest.TestCase):
def test_beam_search_and_decode_generate(self):
"""
A basic test that the output given by BeamSearchAndDecode class
is the same as SequenceGenerator
"""
test_args = test_utils.ModelParamsDict(arch="rnn")
test_args.sequence_lstm = True
BEAM_SIZE = 1
WORD_REWARD = 1
UNK_REWARD = -1
LENGTH_PENALTY = 0
PLACEHOLDER_SEQ_LENGTH = 5
NBEST = 2
MAX_SEQ_LEN = 7
src_tokens = torch.LongTensor([[0, 0, 0]])
src_lengths = torch.LongTensor([3])
# Build model list
samples, src_dict, tgt_dict = test_utils.prepare_inputs(test_args)
task = tasks.DictionaryHolderTask(src_dict, tgt_dict)
models = task.build_model(test_args)
# Placeholder inputs for BeamSearchAndDecode
placeholder_src_tokens = torch.LongTensor(
np.ones((PLACEHOLDER_SEQ_LENGTH, 1), dtype="int64")
)
placeholder_src_lengths = torch.IntTensor(
np.array([PLACEHOLDER_SEQ_LENGTH], dtype="int32")
)
prev_token = torch.LongTensor([tgt_dict.eos()])
prev_scores = torch.FloatTensor([0.0])
attn_weights = torch.zeros(src_lengths[0].item())
prev_hypos_indices = torch.zeros(BEAM_SIZE, dtype=torch.int64)
num_steps = torch.LongTensor([MAX_SEQ_LEN])
# Generate output using SequenceGenerator
translator = SequenceGenerator(
[models],
task.target_dictionary,
beam_size=BEAM_SIZE,
word_reward=WORD_REWARD,
unk_reward=UNK_REWARD,
)
encoder_input = {"src_tokens": src_tokens, "src_lengths": src_lengths}
top_seq_gen_hypothesis = translator.generate(
encoder_input, beam_size=BEAM_SIZE, maxlen=MAX_SEQ_LEN
)[0]
# Generate output using BeamSearch/BeamDecode
placeholder_src_tokens = torch.LongTensor(
np.ones((PLACEHOLDER_SEQ_LENGTH, 1), dtype="int64")
)
placeholder_src_lengths = torch.IntTensor(
np.array([PLACEHOLDER_SEQ_LENGTH], dtype="int32")
)
# Generate output using BeamSearchAndDecode class
beam_search_and_decode = BeamSearchAndDecode(
[models],
tgt_dict=tgt_dict,
src_tokens=placeholder_src_tokens,
src_lengths=placeholder_src_lengths,
eos_token_id=tgt_dict.eos(),
length_penalty=LENGTH_PENALTY,
nbest=NBEST,
beam_size=BEAM_SIZE,
stop_at_eos=True,
word_reward=WORD_REWARD,
unk_reward=UNK_REWARD,
quantize=True,
)
beam_search_and_decode_output = beam_search_and_decode(
src_tokens.transpose(0, 1),
src_lengths,
prev_token,
prev_scores,
attn_weights,
prev_hypos_indices,
num_steps[0],
)
for hyp_index in range(
min(len(beam_search_and_decode_output), len(top_seq_gen_hypothesis))
):
beam_search_and_decode_hypothesis = beam_search_and_decode_output[hyp_index]
# Compare two outputs
# We always look only from 0 to MAX_SEQ_LEN, because sequence generator
# adds an EOS at the end after MAX_SEQ_LEN
# Compare two hypotheses
np.testing.assert_array_equal(
top_seq_gen_hypothesis[hyp_index]["tokens"].tolist()[0:MAX_SEQ_LEN],
beam_search_and_decode_hypothesis[0].tolist()[0:MAX_SEQ_LEN],
)
# Compare token level scores
np.testing.assert_array_almost_equal(
top_seq_gen_hypothesis[hyp_index]["positional_scores"].tolist()[
0:MAX_SEQ_LEN
],
beam_search_and_decode_hypothesis[2][0:MAX_SEQ_LEN],
decimal=1,
)
# Compare attention weights
np.testing.assert_array_almost_equal(
top_seq_gen_hypothesis[hyp_index]["attention"].numpy()[
:, 0:MAX_SEQ_LEN
],
beam_search_and_decode_hypothesis[3].numpy()[:, 0:MAX_SEQ_LEN],
decimal=1,
)
# Not testing the hypothesis score as sequence generator is adding EOS
# at the end, it changes the final score
|
StarcoderdataPython
|
8199458
|
'''
---------------------------
Licensing and Distribution
---------------------------
Program name: TorsiFlex
Version : 2021.3
License : MIT/x11
Copyright (c) 2021, <NAME> (<EMAIL>) and
<NAME> (<EMAIL>)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
---------------------------
*----------------------------------*
| Module : common |
| Sub-module : partfns |
| Last Update: 2020/06/04 (Y/M/D) |
| Main Author: <NAME> |
*----------------------------------*
This module contains some functions related
to the calculation of basic partition functions
'''
#===============================================#
import numpy as np
#-----------------------------------------------#
import common.physcons as pc
from common.fncs import exp128
#===============================================#
#===============================================#
# Partition functions #
#===============================================#
def pf_partinbox(mass,T):
return (pc.TWOPI*pc.KB*mass*T)**(3./2.)/(pc.H**3)
#-----------------------------------------------#
def pf_rigidrotor(imoments,T,rotsymnum=1):
beta = pc.KB * T
rot_const = [(pc.HBAR**2)/Ii/2 for Ii in imoments]
# linear case
if len(imoments) == 1: qrr = beta / rot_const[0]
else : qrr = np.sqrt(pc.PI) * np.sqrt(beta**3 / np.prod(rot_const))
return qrr/rotsymnum
#-----------------------------------------------#
def pf_harmosc1D(angfreq,T,imag=1E10):
if angfreq < 0.0: return imag
elif angfreq == 0.0: return 1.0
exp = exp128(-pc.HBAR*angfreq/pc.KB/T)
qHO = 1.0/(1.0-exp)
return qHO
#-----------------------------------------------#
def pf_harmosc(angfreqs,T,imag=1E10):
qHO = np.prod([pf_harmosc1D(angfreq,T,imag) for angfreq in angfreqs])
return qHO
#-----------------------------------------------#
def pf_electr(eslist,T):
pf, beta = 0.0, 1.0 / pc.KB / T
for deg, relE in eslist: pf += deg * exp128(-relE * beta)
return pf
#===============================================#
#===============================================#
# Partition functions and derivatives #
#===============================================#
def pfnder_tr(mass,T):
ph_tra = pf_partinbox(mass,T)
# at constant volume
fdln_trV = -1.5*pc.KB*T
sdln_trV = +1.5*(pc.KB*T)**2
# at constant pressure
fdln_trP = -2.5*pc.KB*T
sdln_trP = +2.5*(pc.KB*T)**2
return ph_tra,fdln_trV,sdln_trV,fdln_trP,sdln_trP
#-----------------------------------------------#
def pfnder_rot(imoments,T,rotsigma=1):
pfn_rot = pf_rigidrotor(imoments,T,rotsigma)
fdln_rot = -1.5*pc.KB*T
sdln_rot = +1.5*(pc.KB*T)**2
return pfn_rot, fdln_rot, sdln_rot
#-----------------------------------------------#
def pfnder_vib_i(freq,T):
'''
Calculates vibrational partition function (Q) and
log(Q) derivatives with regards to beta
Zero taken as the energy of level n=0 (i.e. the ZPE)
------
T: float or np.array
'''
zpe, qvib, fdln, sdln = 0.0, 1.0, 0.0, 0.0 # for imag freqs
if freq > 0.0:
zpe = pc.HBAR * freq / 2.0
hw = pc.HBAR * freq
bhw = (1.0/pc.KB/T) * hw
exp = np.exp(-bhw)
# partition functions and derivatives (of logQ)
qvib = 1.0/(1.0-exp)
fdln = - hw * exp / (1.0-exp)
sdln = hw**2 / (1/exp - 1.0) / (1.0-exp)
return zpe, qvib, fdln, sdln
#-----------------------------------------------#
def pfnder_vib(freqs,Tlist):
zpe = 0.0
pfn_vib = [1.0 for T in Tlist]
fdln_vib = [0.0 for T in Tlist]
sdln_vib = [0.0 for T in Tlist]
for idx,T in enumerate(Tlist):
for freq in freqs:
zpe_i, qvib_i, fdln_i, sdln_i = pfnder_vib_i(freq,T)
# update zpe
if idx == 0: zpe += zpe_i
# update partition fncs and derivatives
pfn_vib[idx] *= qvib_i
fdln_vib[idx] += fdln_i
sdln_vib[idx] += sdln_i
return zpe, pfn_vib, fdln_vib, sdln_vib
#-----------------------------------------------#
def pfnder_ele(les,Tlist):
pfn_ele = [0.0 for T in Tlist]
fd_ele = [0.0 for T in Tlist]
sd_ele = [0.0 for T in Tlist]
for idx,T in enumerate(Tlist):
beta = 1.0/pc.KB/T
for mtp,relE in les:
pfn_ele[idx] += mtp*exp128(-beta*relE)
fd_ele[idx] += (relE )*mtp*exp128(-beta*relE)
sd_ele[idx] += (relE**2)*mtp*exp128(-beta*relE)
# convert to array
pfn_ele = np.array(pfn_ele)
fd_ele = np.array(fd_ele)
sd_ele = np.array(sd_ele)
# we care about derivatives of logQ
fdln_ele = fd_ele / pfn_ele
sdln_ele = sd_ele / pfn_ele - (fd_ele/pfn_ele)**2
return pfn_ele,fdln_ele,sdln_ele
#===============================================#
#==========================================================#
# Calculation of equilibrium/rate constants #
#==========================================================#
def Qs2Kc(ltemp,QA,QB,VA,VB,nR=1,nP=1):
'''
Qi; partition function per unit volume
Kc = [B]/[A]
'''
term_V = pc.VOL0**(nP-nR)
return [term_V*QB[idx]/QA[idx]*exp128(-(VB-VA)/pc.KB/T) for idx,T in enumerate(ltemp)]
#----------------------------------------------------------#
def Kc2GFE(ltemp,Kc):
return [ -pc.KB * T * np.log(Kc[idx]) for idx,T in enumerate(ltemp)]
#----------------------------------------------------------#
def Kc2rate(ltemp,Kc):
return [(pc.KB*T/pc.H)*Kc[idx] for idx,T in enumerate(ltemp)]
#----------------------------------------------------------#
def rate2Kc(ltemp,k):
return [k[idx]*pc.H/(pc.KB*T) for idx,T in enumerate(ltemp)]
#----------------------------------------------------------#
def rate2GFE(ltemp,rates,nR=1):
term_V = pc.VOL0**(1-nR)
GFE = [-pc.KB * T * np.log(term_V*ki*pc.H/pc.KB/T) for T,ki in zip(ltemp,rates)]
return GFE
#==========================================================#
|
StarcoderdataPython
|
116085
|
<reponame>pathakraul/internetradio-client
#!/usr/bin/python
# Internet Radio Client using Shoutcast service
# to search and play random channels with genre search
# Usage ./internetradio.py --genre <GENRE>
import os
import logging
import argparse
import requests as rq
import xml.etree.ElementTree as et
#----------------------------------------------------------------------------------------
# Set your shoutcast devId here
#----------------------------------------------------------------------------------------
devId = ""
#----------------------------------------------------------------------------------------
logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
#----------------------------------------------------------------------------------------
baseUrl = "http://api.shoutcast.com/"
tuneinUrl = "http://yp.shoutcast.com/"
base = "sbin/tunein-station.pls"
randomSearch = "station/randomstations?k={0}&f=xml&mt=audio/mpeg&limit=1&genre=".format(devId)
#----------------------------------------------------------------------------------------
playerCommand = "mplayer -quiet {0}\n"
#----------------------------------------------------------------------------------------
def keywordSearch(keyword):
log.debug("Making Keyword Search\n")
searchurl=baseUrl+randomSearch+keyword
log.debug("Searching for {0} on Shoutcast".format(keyword))
res = rq.get(searchurl, proxies=proxies)
if res.status_code == 200:
srtContent = res.content
return str(res.content)
else:
log.error("Search response failure from server {0}".format(res.status_code))
#----------------------------------------------------------------------------------------
'''
<response>
<statusCode>200</statusCode> #tree[0]
<statusText>Ok</statusText> #tree[1]
<data> #tree[2]
<stationlist> #tree[2][0]
<tunein base="/sbin/tunein-station.pls" base-m3u="/sbin/tunein-station.m3u" base-xspf="/sbin/tunein-station.xspf"/> #tree[2][0][0]
<station name="Bollywood Hits" genre="Hindi" ct="Baamulaiza - Mika <NAME>" mt="audio/mpeg" id="312131" br="96" lc="13" ml="100"/> #tree[2][0][1]
</stationlist>
</data>
</response>
'''
def getRandomChannel(data):
tree = et.fromstring(data)
cid = tree[2][0][1].attrib['id']
cn = tree[2][0][1].attrib['name']
log.info("Playing Channel [{0}], Channel ID [{1}]".format(cn, cid))
return cid
#----------------------------------------------------------------------------------------
def playChannel(channelId):
channelurl = tuneinUrl+base+"?id="+ str(channelId)
channelstream = parseStreamUrl(channelurl)
print channelstream
command = playerCommand.format(channelstream)
print command
os.system(command)
#----------------------------------------------------------------------------------------
def searchChannelAndPlay(genreKeyword):
responsedata = keywordSearch(genreKeyword)
channelid = getRandomChannel(responsedata)
playChannel(channelid)
#----------------------------------------------------------------------------------------
def parseStreamUrl(channelurl):
response = rq.get(channelurl, stream=True, proxies=proxies)
with open("stream.pls", "wb") as handle:
for data in response.iter_content():
handle.write(data)
x = []
datafile = file('./stream.pls')
for line in datafile:
if "File1=" in line:
x = str(line)
return x[6:-1]
#----------------------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--genre", help="Search random channel of particular genre")
args = parser.parse_args()
if args.genre:
genreKeyword = args.genre
else:
log.error("Provide any genre to search a random channel")
searchChannelAndPlay(genreKeyword)
#----------------------------------------------------------------------------------------
|
StarcoderdataPython
|
4836351
|
import os
# -*- coding: utf-8 -*-
# available languages
LANGUAGES = {
'fr': 'Français',
'en': 'English'
}
STRIPE_KEYS = {
'secret_key': os.environ['SECRET_KEY'],
'publishable_key': os.environ['PUBLISHABLE_KEY'],
'endpoint_secret': os.environ['ENDPOINT_SECRET']
}
SQLALCHEMY_DATABASE_URI = 'sqlite:///vimebook.db'
|
StarcoderdataPython
|
5184766
|
<filename>clipik.py
from PIL import ImageGrab
import time
# Change the following path according to your folder of choice.
fileName = 'E:\myScreenshots\Screenshot_' + time.strftime("%Y%m%d_%H%M%S") + '.JPG'
def save_clipboard_image():
ImageGrab.grabclipboard().save(fileName)
print('Image saved to %s' % fileName)
save_clipboard_image()
|
StarcoderdataPython
|
3423137
|
import numpy as np
import os
import torch
import logging
from visualsearch.utils import ProcessingStats
class NumpyRepo:
def __init__(self, numpy_file):
self.use_gpu = torch.cuda.is_available()
self.numpy_file = numpy_file
self.stats = ProcessingStats()
if os.path.isfile(numpy_file):
# it is more efficient to append features to a Python list (CPython implementation)
# than working with numpy arrays, because the latter resize the
# the array every time a new feature vector is appended and copy the whole array again.
# More details: https://docs.python.org/2/faq/design.html#how-are-lists-implemented-in-cpython
self.np_features_mat = np.load(self.numpy_file)
self.features_mat = self.np_features_mat.tolist()
else:
self.features_mat = []
def commit(self):
np.save(self.numpy_file, np.array(self.features_mat, dtype=np.float32))
def add(self, features):
self.features_mat.append(features)
# self.features_mat = np.append(self.features_mat, np.array([features]), axis=0)
feat_idx = len(self.features_mat) - 1
return feat_idx
def find_similars(self, features_v, topk):
self.stats.start("numpyrepo - find similars")
self.stats.start("numpyrepo - conversion to tensor")
features_mat = torch.from_numpy(self.np_features_mat)
self.stats.end("numpyrepo - conversion to tensor")
if self.use_gpu:
features_mat = features_mat.cuda()
features_v = features_v.cuda()
self.stats.start("numpyrepo - matrix multiplication")
logging.info(features_mat.dtype)
logging.info(features_v.dtype)
similarities_v = torch.matmul(features_mat, features_v.T)
self.stats.end("numpyrepo - matrix multiplication")
self.stats.start("numpyrepo - top k")
top_similarities = torch.topk(similarities_v, topk)
self.stats.end("numpyrepo - top k")
self.stats.end("numpyrepo - find similars")
logging.info(str(self.stats))
return top_similarities.indices.cpu(), top_similarities.values.cpu()
|
StarcoderdataPython
|
3229467
|
def main(argv):
import importlib # pylint: disable=C0415
import argparse # pylint: disable=C0415
from . import __version__ # pylint: disable=C0415
prog_name = "tesserae"
subcommands = {
"align": "tesserae.command.align.main",
}
parser = argparse.ArgumentParser(
prog=prog_name,
description="Graph-based mosaic read alignment and exploration algorithms.",
)
parser.add_argument(
"--version", action="version", version=f"%(prog)s version {__version__}"
)
verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument(
"-q", "--quiet", help="silence logging except errors", action="store_true"
)
verbosity_group.add_argument(
"-v", "--verbose", help="increase output verbosity", action="store_true"
)
verbosity_group.add_argument(
"-vv", "--veryverbose", help="maximal output verbosity", action="store_true"
)
parser.add_argument(
"subcommand",
choices=sorted(subcommands.keys()),
help=f"{prog_name} sub-command",
)
parser.add_argument("args", nargs=argparse.REMAINDER, help="sub-command arguments")
args = parser.parse_args(argv[1:])
from . import log # pylint: disable=C0415
log.configure_logging(args)
package_string, method_string = subcommands[args.subcommand].rsplit(".", 1)
module = importlib.import_module(package_string)
return getattr(module, method_string)(args.args)
|
StarcoderdataPython
|
6508213
|
<gh_stars>1-10
from assets.forms import SimpleSearchForm
def common_variables(request):
form = SimpleSearchForm()
return {'simple_search_form': form}
|
StarcoderdataPython
|
4957689
|
<gh_stars>0
#from api import *
from .ev3 import *
from .mproc import MprocModel
|
StarcoderdataPython
|
6687300
|
import cv2
import sys
import os
def append_file_text(filename, text):
name, ext = os.path.splitext(filename)
return "{name}_{uid}{ext}".format(name=name, uid=text, ext=ext)
# Input error catching
num_args = len(sys.argv)
if num_args != 3:
print("Usage: python3 ./resize_image <filename> <width (px)>")
exit()
img_path = sys.argv[1]
width = int(sys.argv[2])
img = cv2.imread(img_path)
img_size = img.shape
act_height = int(img_size[0])
act_width = int(img_size[1])
width_ratio = width / act_width
#height_ratio = height / act_height
#ratio = min(width_ratio, height_ratio)
ratio = width_ratio
resized_width = int(act_width * ratio)
resized_height = int(act_height * ratio)
new_size = (resized_width, resized_height)
resized_img = cv2.resize(img, new_size)
save_path = append_file_text(img_path, "resized")
print(save_path)
cv2.imwrite(save_path, resized_img)
|
StarcoderdataPython
|
6468805
|
<gh_stars>0
#! /opt/stack/bin/python3
#
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
class ProfileBase:
def pre(self, client):
pass
def main(self, client):
pass
def post(self, client):
pass
|
StarcoderdataPython
|
3497107
|
<reponame>JustinTW/pulumi-eks<filename>python/pulumi_eks/_inputs.py
# coding=utf-8
# *** WARNING: this file was generated by pulumi-gen-eks. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from .vpc_cni import VpcCni
import pulumi_aws
import pulumi_kubernetes
__all__ = [
'ClusterNodeGroupOptionsArgs',
'CoreDataArgs',
'CreationRoleProviderArgs',
'FargateProfileArgs',
'KubeconfigOptionsArgs',
'RoleMappingArgs',
'StorageClassArgs',
'TaintArgs',
'UserMappingArgs',
'VpcCniOptionsArgs',
]
@pulumi.input_type
class ClusterNodeGroupOptionsArgs:
def __init__(__self__, *,
ami_id: Optional[pulumi.Input[str]] = None,
auto_scaling_group_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
bootstrap_extra_args: Optional[pulumi.Input[str]] = None,
cloud_formation_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
cluster_ingress_rule: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroupRule']] = None,
desired_capacity: Optional[pulumi.Input[int]] = None,
encrypt_root_block_device: Optional[pulumi.Input[bool]] = None,
extra_node_security_groups: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ec2.SecurityGroup']]]] = None,
gpu: Optional[pulumi.Input[bool]] = None,
instance_profile: Optional[pulumi.Input['pulumi_aws.iam.InstanceProfile']] = None,
instance_type: Optional[pulumi.Input[str]] = None,
key_name: Optional[pulumi.Input[str]] = None,
kubelet_extra_args: Optional[pulumi.Input[str]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
max_size: Optional[pulumi.Input[int]] = None,
min_size: Optional[pulumi.Input[int]] = None,
node_associate_public_ip_address: Optional[pulumi.Input[bool]] = None,
node_public_key: Optional[pulumi.Input[str]] = None,
node_root_volume_size: Optional[pulumi.Input[int]] = None,
node_security_group: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroup']] = None,
node_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
node_user_data: Optional[pulumi.Input[str]] = None,
node_user_data_override: Optional[pulumi.Input[str]] = None,
spot_price: Optional[pulumi.Input[str]] = None,
taints: Optional[pulumi.Input[Mapping[str, pulumi.Input['TaintArgs']]]] = None,
version: Optional[pulumi.Input[str]] = None):
"""
Describes the configuration options accepted by a cluster to create its own node groups.
:param pulumi.Input[str] ami_id: The AMI ID to use for the worker nodes.
Defaults to the latest recommended EKS Optimized Linux AMI from the AWS Systems Manager Parameter Store.
Note: `amiId` and `gpu` are mutually exclusive.
See for more details:
- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] auto_scaling_group_tags: The tags to apply to the NodeGroup's AutoScalingGroup in the CloudFormation Stack.
Per AWS, all stack-level tags, including automatically created tags, and the `cloudFormationTags` option are propagated to resources that AWS CloudFormation supports, including the AutoScalingGroup. See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
Note: Given the inheritance of auto-generated CF tags and `cloudFormationTags`, you should either supply the tag in `autoScalingGroupTags` or `cloudFormationTags`, but not both.
:param pulumi.Input[str] bootstrap_extra_args: Additional args to pass directly to `/etc/eks/bootstrap.sh`. Fror details on available options, see: https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh. Note that the `--apiserver-endpoint`, `--b64-cluster-ca` and `--kubelet-extra-args` flags are included automatically based on other configuration parameters.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] cloud_formation_tags: The tags to apply to the CloudFormation Stack of the Worker NodeGroup.
Note: Given the inheritance of auto-generated CF tags and `cloudFormationTags`, you should either supply the tag in `autoScalingGroupTags` or `cloudFormationTags`, but not both.
:param pulumi.Input['pulumi_aws.ec2.SecurityGroupRule'] cluster_ingress_rule: The ingress rule that gives node group access.
:param pulumi.Input[int] desired_capacity: The number of worker nodes that should be running in the cluster. Defaults to 2.
:param pulumi.Input[bool] encrypt_root_block_device: Encrypt the root block device of the nodes in the node group.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ec2.SecurityGroup']]] extra_node_security_groups: Extra security groups to attach on all nodes in this worker node group.
This additional set of security groups captures any user application rules that will be needed for the nodes.
:param pulumi.Input[bool] gpu: Use the latest recommended EKS Optimized Linux AMI with GPU support for the worker nodes from the AWS Systems Manager Parameter Store.
Defaults to false.
Note: `gpu` and `amiId` are mutually exclusive.
See for more details:
- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html
- https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html
:param pulumi.Input['pulumi_aws.iam.InstanceProfile'] instance_profile: The ingress rule that gives node group access.
:param pulumi.Input[str] instance_type: The instance type to use for the cluster's nodes. Defaults to "t2.medium".
:param pulumi.Input[str] key_name: Name of the key pair to use for SSH access to worker nodes.
:param pulumi.Input[str] kubelet_extra_args: Extra args to pass to the Kubelet. Corresponds to the options passed in the `--kubeletExtraArgs` flag to `/etc/eks/bootstrap.sh`. For example, '--port=10251 --address=0.0.0.0'. Note that the `labels` and `taints` properties will be applied to this list (using `--node-labels` and `--register-with-taints` respectively) after to the expicit `kubeletExtraArgs`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Custom k8s node labels to be attached to each woker node. Adds the given key/value pairs to the `--node-labels` kubelet argument.
:param pulumi.Input[int] max_size: The maximum number of worker nodes running in the cluster. Defaults to 2.
:param pulumi.Input[int] min_size: The minimum number of worker nodes running in the cluster. Defaults to 1.
:param pulumi.Input[bool] node_associate_public_ip_address: Whether or not to auto-assign public IP addresses on the EKS worker nodes. If this toggle is set to true, the EKS workers will be auto-assigned public IPs. If false, they will not be auto-assigned public IPs.
:param pulumi.Input[str] node_public_key: Public key material for SSH access to worker nodes. See allowed formats at:
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
If not provided, no SSH access is enabled on VMs.
:param pulumi.Input[int] node_root_volume_size: The size in GiB of a cluster node's root volume. Defaults to 20.
:param pulumi.Input['pulumi_aws.ec2.SecurityGroup'] node_security_group: The security group for the worker node group to communicate with the cluster.
This security group requires specific inbound and outbound rules.
See for more details:
https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
Note: The `nodeSecurityGroup` option and the cluster option`nodeSecurityGroupTags` are mutually exclusive.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_subnet_ids: The set of subnets to override and use for the worker node group.
Setting this option overrides which subnets to use for the worker node group, regardless if the cluster's `subnetIds` is set, or if `publicSubnetIds` and/or `privateSubnetIds` were set.
:param pulumi.Input[str] node_user_data: Extra code to run on node startup. This code will run after the AWS EKS bootstrapping code and before the node signals its readiness to the managing CloudFormation stack. This code must be a typical user data script: critically it must begin with an interpreter directive (i.e. a `#!`).
:param pulumi.Input[str] node_user_data_override: User specified code to run on node startup. This code is expected to handle the full AWS EKS bootstrapping code and signal node readiness to the managing CloudFormation stack. This code must be a complete and executable user data script in bash (Linux) or powershell (Windows).
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/worker.html
:param pulumi.Input[str] spot_price: Bidding price for spot instance. If set, only spot instances will be added as worker node.
:param pulumi.Input[Mapping[str, pulumi.Input['TaintArgs']]] taints: Custom k8s node taints to be attached to each worker node. Adds the given taints to the `--register-with-taints` kubelet argument
:param pulumi.Input[str] version: Desired Kubernetes master / control plane version. If you do not specify a value, the latest available version is used.
"""
if ami_id is not None:
pulumi.set(__self__, "ami_id", ami_id)
if auto_scaling_group_tags is not None:
pulumi.set(__self__, "auto_scaling_group_tags", auto_scaling_group_tags)
if bootstrap_extra_args is not None:
pulumi.set(__self__, "bootstrap_extra_args", bootstrap_extra_args)
if cloud_formation_tags is not None:
pulumi.set(__self__, "cloud_formation_tags", cloud_formation_tags)
if cluster_ingress_rule is not None:
pulumi.set(__self__, "cluster_ingress_rule", cluster_ingress_rule)
if desired_capacity is not None:
pulumi.set(__self__, "desired_capacity", desired_capacity)
if encrypt_root_block_device is not None:
pulumi.set(__self__, "encrypt_root_block_device", encrypt_root_block_device)
if extra_node_security_groups is not None:
pulumi.set(__self__, "extra_node_security_groups", extra_node_security_groups)
if gpu is not None:
pulumi.set(__self__, "gpu", gpu)
if instance_profile is not None:
pulumi.set(__self__, "instance_profile", instance_profile)
if instance_type is not None:
pulumi.set(__self__, "instance_type", instance_type)
if key_name is not None:
pulumi.set(__self__, "key_name", key_name)
if kubelet_extra_args is not None:
pulumi.set(__self__, "kubelet_extra_args", kubelet_extra_args)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if max_size is not None:
pulumi.set(__self__, "max_size", max_size)
if min_size is not None:
pulumi.set(__self__, "min_size", min_size)
if node_associate_public_ip_address is not None:
pulumi.set(__self__, "node_associate_public_ip_address", node_associate_public_ip_address)
if node_public_key is not None:
pulumi.set(__self__, "node_public_key", node_public_key)
if node_root_volume_size is not None:
pulumi.set(__self__, "node_root_volume_size", node_root_volume_size)
if node_security_group is not None:
pulumi.set(__self__, "node_security_group", node_security_group)
if node_subnet_ids is not None:
pulumi.set(__self__, "node_subnet_ids", node_subnet_ids)
if node_user_data is not None:
pulumi.set(__self__, "node_user_data", node_user_data)
if node_user_data_override is not None:
pulumi.set(__self__, "node_user_data_override", node_user_data_override)
if spot_price is not None:
pulumi.set(__self__, "spot_price", spot_price)
if taints is not None:
pulumi.set(__self__, "taints", taints)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="amiId")
def ami_id(self) -> Optional[pulumi.Input[str]]:
"""
The AMI ID to use for the worker nodes.
Defaults to the latest recommended EKS Optimized Linux AMI from the AWS Systems Manager Parameter Store.
Note: `amiId` and `gpu` are mutually exclusive.
See for more details:
- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html.
"""
return pulumi.get(self, "ami_id")
@ami_id.setter
def ami_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ami_id", value)
@property
@pulumi.getter(name="autoScalingGroupTags")
def auto_scaling_group_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The tags to apply to the NodeGroup's AutoScalingGroup in the CloudFormation Stack.
Per AWS, all stack-level tags, including automatically created tags, and the `cloudFormationTags` option are propagated to resources that AWS CloudFormation supports, including the AutoScalingGroup. See https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-resource-tags.html
Note: Given the inheritance of auto-generated CF tags and `cloudFormationTags`, you should either supply the tag in `autoScalingGroupTags` or `cloudFormationTags`, but not both.
"""
return pulumi.get(self, "auto_scaling_group_tags")
@auto_scaling_group_tags.setter
def auto_scaling_group_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "auto_scaling_group_tags", value)
@property
@pulumi.getter(name="bootstrapExtraArgs")
def bootstrap_extra_args(self) -> Optional[pulumi.Input[str]]:
"""
Additional args to pass directly to `/etc/eks/bootstrap.sh`. Fror details on available options, see: https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh. Note that the `--apiserver-endpoint`, `--b64-cluster-ca` and `--kubelet-extra-args` flags are included automatically based on other configuration parameters.
"""
return pulumi.get(self, "bootstrap_extra_args")
@bootstrap_extra_args.setter
def bootstrap_extra_args(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bootstrap_extra_args", value)
@property
@pulumi.getter(name="cloudFormationTags")
def cloud_formation_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The tags to apply to the CloudFormation Stack of the Worker NodeGroup.
Note: Given the inheritance of auto-generated CF tags and `cloudFormationTags`, you should either supply the tag in `autoScalingGroupTags` or `cloudFormationTags`, but not both.
"""
return pulumi.get(self, "cloud_formation_tags")
@cloud_formation_tags.setter
def cloud_formation_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "cloud_formation_tags", value)
@property
@pulumi.getter(name="clusterIngressRule")
def cluster_ingress_rule(self) -> Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroupRule']]:
"""
The ingress rule that gives node group access.
"""
return pulumi.get(self, "cluster_ingress_rule")
@cluster_ingress_rule.setter
def cluster_ingress_rule(self, value: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroupRule']]):
pulumi.set(self, "cluster_ingress_rule", value)
@property
@pulumi.getter(name="desiredCapacity")
def desired_capacity(self) -> Optional[pulumi.Input[int]]:
"""
The number of worker nodes that should be running in the cluster. Defaults to 2.
"""
return pulumi.get(self, "desired_capacity")
@desired_capacity.setter
def desired_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "desired_capacity", value)
@property
@pulumi.getter(name="encryptRootBlockDevice")
def encrypt_root_block_device(self) -> Optional[pulumi.Input[bool]]:
"""
Encrypt the root block device of the nodes in the node group.
"""
return pulumi.get(self, "encrypt_root_block_device")
@encrypt_root_block_device.setter
def encrypt_root_block_device(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypt_root_block_device", value)
@property
@pulumi.getter(name="extraNodeSecurityGroups")
def extra_node_security_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ec2.SecurityGroup']]]]:
"""
Extra security groups to attach on all nodes in this worker node group.
This additional set of security groups captures any user application rules that will be needed for the nodes.
"""
return pulumi.get(self, "extra_node_security_groups")
@extra_node_security_groups.setter
def extra_node_security_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.ec2.SecurityGroup']]]]):
pulumi.set(self, "extra_node_security_groups", value)
@property
@pulumi.getter
def gpu(self) -> Optional[pulumi.Input[bool]]:
"""
Use the latest recommended EKS Optimized Linux AMI with GPU support for the worker nodes from the AWS Systems Manager Parameter Store.
Defaults to false.
Note: `gpu` and `amiId` are mutually exclusive.
See for more details:
- https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami.html
- https://docs.aws.amazon.com/eks/latest/userguide/retrieve-ami-id.html
"""
return pulumi.get(self, "gpu")
@gpu.setter
def gpu(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "gpu", value)
@property
@pulumi.getter(name="instanceProfile")
def instance_profile(self) -> Optional[pulumi.Input['pulumi_aws.iam.InstanceProfile']]:
"""
The ingress rule that gives node group access.
"""
return pulumi.get(self, "instance_profile")
@instance_profile.setter
def instance_profile(self, value: Optional[pulumi.Input['pulumi_aws.iam.InstanceProfile']]):
pulumi.set(self, "instance_profile", value)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> Optional[pulumi.Input[str]]:
"""
The instance type to use for the cluster's nodes. Defaults to "t2.medium".
"""
return pulumi.get(self, "instance_type")
@instance_type.setter
def instance_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "instance_type", value)
@property
@pulumi.getter(name="keyName")
def key_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the key pair to use for SSH access to worker nodes.
"""
return pulumi.get(self, "key_name")
@key_name.setter
def key_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key_name", value)
@property
@pulumi.getter(name="kubeletExtraArgs")
def kubelet_extra_args(self) -> Optional[pulumi.Input[str]]:
"""
Extra args to pass to the Kubelet. Corresponds to the options passed in the `--kubeletExtraArgs` flag to `/etc/eks/bootstrap.sh`. For example, '--port=10251 --address=0.0.0.0'. Note that the `labels` and `taints` properties will be applied to this list (using `--node-labels` and `--register-with-taints` respectively) after to the expicit `kubeletExtraArgs`.
"""
return pulumi.get(self, "kubelet_extra_args")
@kubelet_extra_args.setter
def kubelet_extra_args(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kubelet_extra_args", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Custom k8s node labels to be attached to each woker node. Adds the given key/value pairs to the `--node-labels` kubelet argument.
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="maxSize")
def max_size(self) -> Optional[pulumi.Input[int]]:
"""
The maximum number of worker nodes running in the cluster. Defaults to 2.
"""
return pulumi.get(self, "max_size")
@max_size.setter
def max_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_size", value)
@property
@pulumi.getter(name="minSize")
def min_size(self) -> Optional[pulumi.Input[int]]:
"""
The minimum number of worker nodes running in the cluster. Defaults to 1.
"""
return pulumi.get(self, "min_size")
@min_size.setter
def min_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_size", value)
@property
@pulumi.getter(name="nodeAssociatePublicIpAddress")
def node_associate_public_ip_address(self) -> Optional[pulumi.Input[bool]]:
"""
Whether or not to auto-assign public IP addresses on the EKS worker nodes. If this toggle is set to true, the EKS workers will be auto-assigned public IPs. If false, they will not be auto-assigned public IPs.
"""
return pulumi.get(self, "node_associate_public_ip_address")
@node_associate_public_ip_address.setter
def node_associate_public_ip_address(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "node_associate_public_ip_address", value)
@property
@pulumi.getter(name="nodePublicKey")
def node_public_key(self) -> Optional[pulumi.Input[str]]:
"""
Public key material for SSH access to worker nodes. See allowed formats at:
https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
If not provided, no SSH access is enabled on VMs.
"""
return pulumi.get(self, "node_public_key")
@node_public_key.setter
def node_public_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_public_key", value)
@property
@pulumi.getter(name="nodeRootVolumeSize")
def node_root_volume_size(self) -> Optional[pulumi.Input[int]]:
"""
The size in GiB of a cluster node's root volume. Defaults to 20.
"""
return pulumi.get(self, "node_root_volume_size")
@node_root_volume_size.setter
def node_root_volume_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "node_root_volume_size", value)
@property
@pulumi.getter(name="nodeSecurityGroup")
def node_security_group(self) -> Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroup']]:
"""
The security group for the worker node group to communicate with the cluster.
This security group requires specific inbound and outbound rules.
See for more details:
https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
Note: The `nodeSecurityGroup` option and the cluster option`nodeSecurityGroupTags` are mutually exclusive.
"""
return pulumi.get(self, "node_security_group")
@node_security_group.setter
def node_security_group(self, value: Optional[pulumi.Input['pulumi_aws.ec2.SecurityGroup']]):
pulumi.set(self, "node_security_group", value)
@property
@pulumi.getter(name="nodeSubnetIds")
def node_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The set of subnets to override and use for the worker node group.
Setting this option overrides which subnets to use for the worker node group, regardless if the cluster's `subnetIds` is set, or if `publicSubnetIds` and/or `privateSubnetIds` were set.
"""
return pulumi.get(self, "node_subnet_ids")
@node_subnet_ids.setter
def node_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "node_subnet_ids", value)
@property
@pulumi.getter(name="nodeUserData")
def node_user_data(self) -> Optional[pulumi.Input[str]]:
"""
Extra code to run on node startup. This code will run after the AWS EKS bootstrapping code and before the node signals its readiness to the managing CloudFormation stack. This code must be a typical user data script: critically it must begin with an interpreter directive (i.e. a `#!`).
"""
return pulumi.get(self, "node_user_data")
@node_user_data.setter
def node_user_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_user_data", value)
@property
@pulumi.getter(name="nodeUserDataOverride")
def node_user_data_override(self) -> Optional[pulumi.Input[str]]:
"""
User specified code to run on node startup. This code is expected to handle the full AWS EKS bootstrapping code and signal node readiness to the managing CloudFormation stack. This code must be a complete and executable user data script in bash (Linux) or powershell (Windows).
See for more details: https://docs.aws.amazon.com/eks/latest/userguide/worker.html
"""
return pulumi.get(self, "node_user_data_override")
@node_user_data_override.setter
def node_user_data_override(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "node_user_data_override", value)
@property
@pulumi.getter(name="spotPrice")
def spot_price(self) -> Optional[pulumi.Input[str]]:
"""
Bidding price for spot instance. If set, only spot instances will be added as worker node.
"""
return pulumi.get(self, "spot_price")
@spot_price.setter
def spot_price(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "spot_price", value)
@property
@pulumi.getter
def taints(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['TaintArgs']]]]:
"""
Custom k8s node taints to be attached to each worker node. Adds the given taints to the `--register-with-taints` kubelet argument
"""
return pulumi.get(self, "taints")
@taints.setter
def taints(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['TaintArgs']]]]):
pulumi.set(self, "taints", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
Desired Kubernetes master / control plane version. If you do not specify a value, the latest available version is used.
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@pulumi.input_type
class CoreDataArgs:
def __init__(__self__, *,
cluster: pulumi.Input['pulumi_aws.eks.Cluster'],
cluster_security_group: pulumi.Input['pulumi_aws.ec2.SecurityGroup'],
endpoint: pulumi.Input[str],
instance_roles: pulumi.Input[Sequence[pulumi.Input['pulumi_aws.iam.Role']]],
node_group_options: pulumi.Input['ClusterNodeGroupOptionsArgs'],
provider: pulumi.Input['pulumi_kubernetes.Provider'],
subnet_ids: pulumi.Input[Sequence[pulumi.Input[str]]],
vpc_id: pulumi.Input[str],
aws_provider: Optional[pulumi.Input['pulumi_aws.Provider']] = None,
eks_node_access: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ConfigMap']] = None,
encryption_config: Optional[pulumi.Input['pulumi_aws.eks.ClusterEncryptionConfigArgs']] = None,
fargate_profile: Optional[pulumi.Input['pulumi_aws.eks.FargateProfile']] = None,
kubeconfig: Optional[Any] = None,
node_security_group_tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
oidc_provider: Optional[pulumi.Input['pulumi_aws.iam.OpenIdConnectProvider']] = None,
private_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
public_subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
storage_classes: Optional[pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.storage.v1.StorageClass']]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
vpc_cni: Optional[pulumi.Input['VpcCni']] = None):
"""
Defines the core set of data associated with an EKS cluster, including the network in which it runs.
"""
pulumi.set(__self__, "cluster", cluster)
pulumi.set(__self__, "cluster_security_group", cluster_security_group)
pulumi.set(__self__, "endpoint", endpoint)
pulumi.set(__self__, "instance_roles", instance_roles)
pulumi.set(__self__, "node_group_options", node_group_options)
pulumi.set(__self__, "provider", provider)
pulumi.set(__self__, "subnet_ids", subnet_ids)
pulumi.set(__self__, "vpc_id", vpc_id)
if aws_provider is not None:
pulumi.set(__self__, "aws_provider", aws_provider)
if eks_node_access is not None:
pulumi.set(__self__, "eks_node_access", eks_node_access)
if encryption_config is not None:
pulumi.set(__self__, "encryption_config", encryption_config)
if fargate_profile is not None:
pulumi.set(__self__, "fargate_profile", fargate_profile)
if kubeconfig is not None:
pulumi.set(__self__, "kubeconfig", kubeconfig)
if node_security_group_tags is not None:
pulumi.set(__self__, "node_security_group_tags", node_security_group_tags)
if oidc_provider is not None:
pulumi.set(__self__, "oidc_provider", oidc_provider)
if private_subnet_ids is not None:
pulumi.set(__self__, "private_subnet_ids", private_subnet_ids)
if public_subnet_ids is not None:
pulumi.set(__self__, "public_subnet_ids", public_subnet_ids)
if storage_classes is not None:
pulumi.set(__self__, "storage_classes", storage_classes)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vpc_cni is not None:
pulumi.set(__self__, "vpc_cni", vpc_cni)
@property
@pulumi.getter
def cluster(self) -> pulumi.Input['pulumi_aws.eks.Cluster']:
return pulumi.get(self, "cluster")
@cluster.setter
def cluster(self, value: pulumi.Input['pulumi_aws.eks.Cluster']):
pulumi.set(self, "cluster", value)
@property
@pulumi.getter(name="clusterSecurityGroup")
def cluster_security_group(self) -> pulumi.Input['pulumi_aws.ec2.SecurityGroup']:
return pulumi.get(self, "cluster_security_group")
@cluster_security_group.setter
def cluster_security_group(self, value: pulumi.Input['pulumi_aws.ec2.SecurityGroup']):
pulumi.set(self, "cluster_security_group", value)
@property
@pulumi.getter
def endpoint(self) -> pulumi.Input[str]:
return pulumi.get(self, "endpoint")
@endpoint.setter
def endpoint(self, value: pulumi.Input[str]):
pulumi.set(self, "endpoint", value)
@property
@pulumi.getter(name="instanceRoles")
def instance_roles(self) -> pulumi.Input[Sequence[pulumi.Input['pulumi_aws.iam.Role']]]:
return pulumi.get(self, "instance_roles")
@instance_roles.setter
def instance_roles(self, value: pulumi.Input[Sequence[pulumi.Input['pulumi_aws.iam.Role']]]):
pulumi.set(self, "instance_roles", value)
@property
@pulumi.getter(name="nodeGroupOptions")
def node_group_options(self) -> pulumi.Input['ClusterNodeGroupOptionsArgs']:
return pulumi.get(self, "node_group_options")
@node_group_options.setter
def node_group_options(self, value: pulumi.Input['ClusterNodeGroupOptionsArgs']):
pulumi.set(self, "node_group_options", value)
@property
@pulumi.getter
def provider(self) -> pulumi.Input['pulumi_kubernetes.Provider']:
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: pulumi.Input['pulumi_kubernetes.Provider']):
pulumi.set(self, "provider", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "subnet_ids", value)
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "vpc_id")
@vpc_id.setter
def vpc_id(self, value: pulumi.Input[str]):
pulumi.set(self, "vpc_id", value)
@property
@pulumi.getter(name="awsProvider")
def aws_provider(self) -> Optional[pulumi.Input['pulumi_aws.Provider']]:
return pulumi.get(self, "aws_provider")
@aws_provider.setter
def aws_provider(self, value: Optional[pulumi.Input['pulumi_aws.Provider']]):
pulumi.set(self, "aws_provider", value)
@property
@pulumi.getter(name="eksNodeAccess")
def eks_node_access(self) -> Optional[pulumi.Input['pulumi_kubernetes.core.v1.ConfigMap']]:
return pulumi.get(self, "eks_node_access")
@eks_node_access.setter
def eks_node_access(self, value: Optional[pulumi.Input['pulumi_kubernetes.core.v1.ConfigMap']]):
pulumi.set(self, "eks_node_access", value)
@property
@pulumi.getter(name="encryptionConfig")
def encryption_config(self) -> Optional[pulumi.Input['pulumi_aws.eks.ClusterEncryptionConfigArgs']]:
return pulumi.get(self, "encryption_config")
@encryption_config.setter
def encryption_config(self, value: Optional[pulumi.Input['pulumi_aws.eks.ClusterEncryptionConfigArgs']]):
pulumi.set(self, "encryption_config", value)
@property
@pulumi.getter(name="fargateProfile")
def fargate_profile(self) -> Optional[pulumi.Input['pulumi_aws.eks.FargateProfile']]:
return pulumi.get(self, "fargate_profile")
@fargate_profile.setter
def fargate_profile(self, value: Optional[pulumi.Input['pulumi_aws.eks.FargateProfile']]):
pulumi.set(self, "fargate_profile", value)
@property
@pulumi.getter
def kubeconfig(self) -> Optional[Any]:
return pulumi.get(self, "kubeconfig")
@kubeconfig.setter
def kubeconfig(self, value: Optional[Any]):
pulumi.set(self, "kubeconfig", value)
@property
@pulumi.getter(name="nodeSecurityGroupTags")
def node_security_group_tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "node_security_group_tags")
@node_security_group_tags.setter
def node_security_group_tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_security_group_tags", value)
@property
@pulumi.getter(name="oidcProvider")
def oidc_provider(self) -> Optional[pulumi.Input['pulumi_aws.iam.OpenIdConnectProvider']]:
return pulumi.get(self, "oidc_provider")
@oidc_provider.setter
def oidc_provider(self, value: Optional[pulumi.Input['pulumi_aws.iam.OpenIdConnectProvider']]):
pulumi.set(self, "oidc_provider", value)
@property
@pulumi.getter(name="privateSubnetIds")
def private_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "private_subnet_ids")
@private_subnet_ids.setter
def private_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "private_subnet_ids", value)
@property
@pulumi.getter(name="publicSubnetIds")
def public_subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
return pulumi.get(self, "public_subnet_ids")
@public_subnet_ids.setter
def public_subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "public_subnet_ids", value)
@property
@pulumi.getter(name="storageClasses")
def storage_classes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.storage.v1.StorageClass']]]]:
return pulumi.get(self, "storage_classes")
@storage_classes.setter
def storage_classes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input['pulumi_kubernetes.storage.v1.StorageClass']]]]):
pulumi.set(self, "storage_classes", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="vpcCni")
def vpc_cni(self) -> Optional[pulumi.Input['VpcCni']]:
return pulumi.get(self, "vpc_cni")
@vpc_cni.setter
def vpc_cni(self, value: Optional[pulumi.Input['VpcCni']]):
pulumi.set(self, "vpc_cni", value)
@pulumi.input_type
class CreationRoleProviderArgs:
def __init__(__self__, *,
provider: pulumi.Input['pulumi_aws.Provider'],
role: pulumi.Input['pulumi_aws.iam.Role']):
"""
Contains the AWS Role and Provider necessary to override the `[system:master]` entity ARN. This is an optional argument used when creating `Cluster`. Read more: https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html
"""
pulumi.set(__self__, "provider", provider)
pulumi.set(__self__, "role", role)
@property
@pulumi.getter
def provider(self) -> pulumi.Input['pulumi_aws.Provider']:
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: pulumi.Input['pulumi_aws.Provider']):
pulumi.set(self, "provider", value)
@property
@pulumi.getter
def role(self) -> pulumi.Input['pulumi_aws.iam.Role']:
return pulumi.get(self, "role")
@role.setter
def role(self, value: pulumi.Input['pulumi_aws.iam.Role']):
pulumi.set(self, "role", value)
@pulumi.input_type
class FargateProfileArgs:
def __init__(__self__, *,
pod_execution_role_arn: Optional[pulumi.Input[str]] = None,
selectors: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.FargateProfileSelectorArgs']]]] = None,
subnet_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
Defines how Kubernetes pods are executed in Fargate. See aws.eks.FargateProfileArgs for reference.
:param pulumi.Input[str] pod_execution_role_arn: Specify a custom role to use for executing pods in Fargate. Defaults to creating a new role with the `arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy` policy attached.
:param pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.FargateProfileSelectorArgs']]] selectors: Specify the namespace and label selectors to use for launching pods into Fargate.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subnet_ids: Specify the subnets in which to execute Fargate tasks for pods. Defaults to the private subnets associated with the cluster.
"""
if pod_execution_role_arn is not None:
pulumi.set(__self__, "pod_execution_role_arn", pod_execution_role_arn)
if selectors is not None:
pulumi.set(__self__, "selectors", selectors)
if subnet_ids is not None:
pulumi.set(__self__, "subnet_ids", subnet_ids)
@property
@pulumi.getter(name="podExecutionRoleArn")
def pod_execution_role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Specify a custom role to use for executing pods in Fargate. Defaults to creating a new role with the `arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy` policy attached.
"""
return pulumi.get(self, "pod_execution_role_arn")
@pod_execution_role_arn.setter
def pod_execution_role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "pod_execution_role_arn", value)
@property
@pulumi.getter
def selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.FargateProfileSelectorArgs']]]]:
"""
Specify the namespace and label selectors to use for launching pods into Fargate.
"""
return pulumi.get(self, "selectors")
@selectors.setter
def selectors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['pulumi_aws.eks.FargateProfileSelectorArgs']]]]):
pulumi.set(self, "selectors", value)
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specify the subnets in which to execute Fargate tasks for pods. Defaults to the private subnets associated with the cluster.
"""
return pulumi.get(self, "subnet_ids")
@subnet_ids.setter
def subnet_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subnet_ids", value)
@pulumi.input_type
class KubeconfigOptionsArgs:
def __init__(__self__, *,
profile_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None):
"""
Represents the AWS credentials to scope a given kubeconfig when using a non-default credential chain.
The options can be used independently, or additively.
A scoped kubeconfig is necessary for certain auth scenarios. For example:
1. Assume a role on the default account caller,
2. Use an AWS creds profile instead of the default account caller,
3. Use an AWS creds creds profile instead of the default account caller,
and then assume a given role on the profile. This scenario is also
possible by only using a profile, iff the profile includes a role to
assume in its settings.
See for more details:
- https://docs.aws.amazon.com/eks/latest/userguide/create-kubeconfig.html
- https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-role.html
- https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html
:param pulumi.Input[str] profile_name: AWS credential profile name to always use instead of the default AWS credential provider chain.
The profile is passed to kubeconfig as an authentication environment setting.
:param pulumi.Input[str] role_arn: Role ARN to assume instead of the default AWS credential provider chain.
The role is passed to kubeconfig as an authentication exec argument.
"""
if profile_name is not None:
pulumi.set(__self__, "profile_name", profile_name)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
@property
@pulumi.getter(name="profileName")
def profile_name(self) -> Optional[pulumi.Input[str]]:
"""
AWS credential profile name to always use instead of the default AWS credential provider chain.
The profile is passed to kubeconfig as an authentication environment setting.
"""
return pulumi.get(self, "profile_name")
@profile_name.setter
def profile_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "profile_name", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Role ARN to assume instead of the default AWS credential provider chain.
The role is passed to kubeconfig as an authentication exec argument.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@pulumi.input_type
class RoleMappingArgs:
def __init__(__self__, *,
groups: pulumi.Input[Sequence[pulumi.Input[str]]],
role_arn: pulumi.Input[str],
username: pulumi.Input[str]):
"""
Describes a mapping from an AWS IAM role to a Kubernetes user and groups.
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: A list of groups within Kubernetes to which the role is mapped.
:param pulumi.Input[str] role_arn: The ARN of the IAM role to add.
:param pulumi.Input[str] username: The user name within Kubernetes to map to the IAM role. By default, the user name is the ARN of the IAM role.
"""
pulumi.set(__self__, "groups", groups)
pulumi.set(__self__, "role_arn", role_arn)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def groups(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of groups within Kubernetes to which the role is mapped.
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Input[str]:
"""
The ARN of the IAM role to add.
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The user name within Kubernetes to map to the IAM role. By default, the user name is the ARN of the IAM role.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class StorageClassArgs:
def __init__(__self__, *,
type: pulumi.Input[str],
allow_volume_expansion: Optional[pulumi.Input[bool]] = None,
default: Optional[pulumi.Input[bool]] = None,
encrypted: Optional[pulumi.Input[bool]] = None,
iops_per_gb: Optional[pulumi.Input[int]] = None,
kms_key_id: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']] = None,
mount_options: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
reclaim_policy: Optional[pulumi.Input[str]] = None,
volume_binding_mode: Optional[pulumi.Input[str]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
StorageClass describes the inputs to a single Kubernetes StorageClass provisioned by AWS. Any number of storage classes can be added to a cluster at creation time. One of these storage classes may be configured the default storage class for the cluster.
:param pulumi.Input[str] type: The EBS volume type.
:param pulumi.Input[bool] allow_volume_expansion: AllowVolumeExpansion shows whether the storage class allow volume expand.
:param pulumi.Input[bool] default: True if this storage class should be a default storage class for the cluster.
Note: As of Kubernetes v1.11+ on EKS, a default `gp2` storage class will always be created automatically for the cluster by the EKS service. See https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html
Please note that at most one storage class can be marked as default. If two or more of them are marked as default, a PersistentVolumeClaim without `storageClassName` explicitly specified cannot be created. See: https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/#changing-the-default-storageclass
:param pulumi.Input[bool] encrypted: Denotes whether the EBS volume should be encrypted.
:param pulumi.Input[int] iops_per_gb: I/O operations per second per GiB for "io1" volumes. The AWS volume plugin multiplies this with the size of a requested volume to compute IOPS of the volume and caps the result at 20,000 IOPS.
:param pulumi.Input[str] kms_key_id: The full Amazon Resource Name of the key to use when encrypting the volume. If none is supplied but encrypted is true, a key is generated by AWS.
:param pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
:param pulumi.Input[Sequence[pulumi.Input[str]]] mount_options: Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. ["ro", "soft"]. Not validated - mount of the PVs will simply fail if one is invalid.
:param pulumi.Input[str] reclaim_policy: Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.
:param pulumi.Input[str] volume_binding_mode: VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: The AWS zone or zones for the EBS volume. If zones is not specified, volumes are generally round-robin-ed across all active zones where Kubernetes cluster has a node. zone and zones parameters must not be used at the same time.
"""
pulumi.set(__self__, "type", type)
if allow_volume_expansion is not None:
pulumi.set(__self__, "allow_volume_expansion", allow_volume_expansion)
if default is not None:
pulumi.set(__self__, "default", default)
if encrypted is not None:
pulumi.set(__self__, "encrypted", encrypted)
if iops_per_gb is not None:
pulumi.set(__self__, "iops_per_gb", iops_per_gb)
if kms_key_id is not None:
pulumi.set(__self__, "kms_key_id", kms_key_id)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
if reclaim_policy is not None:
pulumi.set(__self__, "reclaim_policy", reclaim_policy)
if volume_binding_mode is not None:
pulumi.set(__self__, "volume_binding_mode", volume_binding_mode)
if zones is not None:
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
The EBS volume type.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="allowVolumeExpansion")
def allow_volume_expansion(self) -> Optional[pulumi.Input[bool]]:
"""
AllowVolumeExpansion shows whether the storage class allow volume expand.
"""
return pulumi.get(self, "allow_volume_expansion")
@allow_volume_expansion.setter
def allow_volume_expansion(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_volume_expansion", value)
@property
@pulumi.getter
def default(self) -> Optional[pulumi.Input[bool]]:
"""
True if this storage class should be a default storage class for the cluster.
Note: As of Kubernetes v1.11+ on EKS, a default `gp2` storage class will always be created automatically for the cluster by the EKS service. See https://docs.aws.amazon.com/eks/latest/userguide/storage-classes.html
Please note that at most one storage class can be marked as default. If two or more of them are marked as default, a PersistentVolumeClaim without `storageClassName` explicitly specified cannot be created. See: https://kubernetes.io/docs/tasks/administer-cluster/change-default-storage-class/#changing-the-default-storageclass
"""
return pulumi.get(self, "default")
@default.setter
def default(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "default", value)
@property
@pulumi.getter
def encrypted(self) -> Optional[pulumi.Input[bool]]:
"""
Denotes whether the EBS volume should be encrypted.
"""
return pulumi.get(self, "encrypted")
@encrypted.setter
def encrypted(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "encrypted", value)
@property
@pulumi.getter(name="iopsPerGb")
def iops_per_gb(self) -> Optional[pulumi.Input[int]]:
"""
I/O operations per second per GiB for "io1" volumes. The AWS volume plugin multiplies this with the size of a requested volume to compute IOPS of the volume and caps the result at 20,000 IOPS.
"""
return pulumi.get(self, "iops_per_gb")
@iops_per_gb.setter
def iops_per_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "iops_per_gb", value)
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> Optional[pulumi.Input[str]]:
"""
The full Amazon Resource Name of the key to use when encrypting the volume. If none is supplied but encrypted is true, a key is generated by AWS.
"""
return pulumi.get(self, "kms_key_id")
@kms_key_id.setter
def kms_key_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kms_key_id", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['pulumi_kubernetes.meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. ["ro", "soft"]. Not validated - mount of the PVs will simply fail if one is invalid.
"""
return pulumi.get(self, "mount_options")
@mount_options.setter
def mount_options(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "mount_options", value)
@property
@pulumi.getter(name="reclaimPolicy")
def reclaim_policy(self) -> Optional[pulumi.Input[str]]:
"""
Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.
"""
return pulumi.get(self, "reclaim_policy")
@reclaim_policy.setter
def reclaim_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reclaim_policy", value)
@property
@pulumi.getter(name="volumeBindingMode")
def volume_binding_mode(self) -> Optional[pulumi.Input[str]]:
"""
VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.
"""
return pulumi.get(self, "volume_binding_mode")
@volume_binding_mode.setter
def volume_binding_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "volume_binding_mode", value)
@property
@pulumi.getter
def zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The AWS zone or zones for the EBS volume. If zones is not specified, volumes are generally round-robin-ed across all active zones where Kubernetes cluster has a node. zone and zones parameters must not be used at the same time.
"""
return pulumi.get(self, "zones")
@zones.setter
def zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "zones", value)
@pulumi.input_type
class TaintArgs:
def __init__(__self__, *,
effect: pulumi.Input[str],
value: pulumi.Input[str]):
"""
Represents a Kubernetes `taint` to apply to all Nodes in a NodeGroup. See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/.
:param pulumi.Input[str] effect: The effect of the taint.
:param pulumi.Input[str] value: The value of the taint.
"""
pulumi.set(__self__, "effect", effect)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def effect(self) -> pulumi.Input[str]:
"""
The effect of the taint.
"""
return pulumi.get(self, "effect")
@effect.setter
def effect(self, value: pulumi.Input[str]):
pulumi.set(self, "effect", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the taint.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class UserMappingArgs:
def __init__(__self__, *,
groups: pulumi.Input[Sequence[pulumi.Input[str]]],
user_arn: pulumi.Input[str],
username: pulumi.Input[str]):
"""
Describes a mapping from an AWS IAM user to a Kubernetes user and groups.
:param pulumi.Input[Sequence[pulumi.Input[str]]] groups: A list of groups within Kubernetes to which the user is mapped to.
:param pulumi.Input[str] user_arn: The ARN of the IAM user to add.
:param pulumi.Input[str] username: The user name within Kubernetes to map to the IAM user. By default, the user name is the ARN of the IAM user.
"""
pulumi.set(__self__, "groups", groups)
pulumi.set(__self__, "user_arn", user_arn)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def groups(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of groups within Kubernetes to which the user is mapped to.
"""
return pulumi.get(self, "groups")
@groups.setter
def groups(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "groups", value)
@property
@pulumi.getter(name="userArn")
def user_arn(self) -> pulumi.Input[str]:
"""
The ARN of the IAM user to add.
"""
return pulumi.get(self, "user_arn")
@user_arn.setter
def user_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "user_arn", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The user name within Kubernetes to map to the IAM user. By default, the user name is the ARN of the IAM user.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class VpcCniOptionsArgs:
def __init__(__self__, *,
cni_configure_rpfilter: Optional[pulumi.Input[bool]] = None,
cni_custom_network_cfg: Optional[pulumi.Input[bool]] = None,
cni_external_snat: Optional[pulumi.Input[bool]] = None,
custom_network_config: Optional[pulumi.Input[bool]] = None,
disable_tcp_early_demux: Optional[pulumi.Input[bool]] = None,
enable_pod_eni: Optional[pulumi.Input[bool]] = None,
enable_prefix_delegation: Optional[pulumi.Input[bool]] = None,
eni_config_label_def: Optional[pulumi.Input[str]] = None,
eni_mtu: Optional[pulumi.Input[int]] = None,
external_snat: Optional[pulumi.Input[bool]] = None,
image: Optional[pulumi.Input[str]] = None,
init_image: Optional[pulumi.Input[str]] = None,
log_file: Optional[pulumi.Input[str]] = None,
log_level: Optional[pulumi.Input[str]] = None,
node_port_support: Optional[pulumi.Input[bool]] = None,
security_context_privileged: Optional[pulumi.Input[bool]] = None,
veth_prefix: Optional[pulumi.Input[str]] = None,
warm_eni_target: Optional[pulumi.Input[int]] = None,
warm_ip_target: Optional[pulumi.Input[int]] = None,
warm_prefix_target: Optional[pulumi.Input[int]] = None):
"""
Describes the configuration options available for the Amazon VPC CNI plugin for Kubernetes.
:param pulumi.Input[bool] cni_configure_rpfilter: Specifies whether ipamd should configure rp filter for primary interface. Default is `false`.
:param pulumi.Input[bool] cni_custom_network_cfg: Specifies that your pods may use subnets and security groups that are independent of your worker node's VPC configuration. By default, pods share the same subnet and security groups as the worker node's primary interface. Setting this variable to true causes ipamd to use the security groups and VPC subnet in a worker node's ENIConfig for elastic network interface allocation. You must create an ENIConfig custom resource for each subnet that your pods will reside in, and then annotate or label each worker node to use a specific ENIConfig (multiple worker nodes can be annotated or labelled with the same ENIConfig). Worker nodes can only be annotated with a single ENIConfig at a time, and the subnet in the ENIConfig must belong to the same Availability Zone that the worker node resides in. For more information, see CNI Custom Networking in the Amazon EKS User Guide. Default is `false`
:param pulumi.Input[bool] cni_external_snat: Specifies whether an external NAT gateway should be used to provide SNAT of secondary ENI IP addresses. If set to true, the SNAT iptables rule and off-VPC IP rule are not applied, and these rules are removed if they have already been applied. Disable SNAT if you need to allow inbound communication to your pods from external VPNs, direct connections, and external VPCs, and your pods do not need to access the Internet directly via an Internet Gateway. However, your nodes must be running in a private subnet and connected to the internet through an AWS NAT Gateway or another external NAT device. Default is `false`
:param pulumi.Input[bool] custom_network_config: Specifies that your pods may use subnets and security groups (within the same VPC as your control plane resources) that are independent of your cluster's `resourcesVpcConfig`.
Defaults to false.
:param pulumi.Input[bool] disable_tcp_early_demux: Allows the kubelet's liveness and readiness probes to connect via TCP when pod ENI is enabled. This will slightly increase local TCP connection latency.
:param pulumi.Input[bool] enable_pod_eni: Specifies whether to allow IPAMD to add the `vpc.amazonaws.com/has-trunk-attached` label to the node if the instance has capacity to attach an additional ENI. Default is `false`. If using liveness and readiness probes, you will also need to disable TCP early demux.
:param pulumi.Input[bool] enable_prefix_delegation: IPAMD will start allocating (/28) prefixes to the ENIs with ENABLE_PREFIX_DELEGATION set to true.
:param pulumi.Input[str] eni_config_label_def: Specifies the ENI_CONFIG_LABEL_DEF environment variable value for worker nodes. This is used to tell Kubernetes to automatically apply the ENIConfig for each Availability Zone
Ref: https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html (step 5(c))
Defaults to the official AWS CNI image in ECR.
:param pulumi.Input[int] eni_mtu: Used to configure the MTU size for attached ENIs. The valid range is from 576 to 9001.
Defaults to 9001.
:param pulumi.Input[bool] external_snat: Specifies whether an external NAT gateway should be used to provide SNAT of secondary ENI IP addresses. If set to true, the SNAT iptables rule and off-VPC IP rule are not applied, and these rules are removed if they have already been applied.
Defaults to false.
:param pulumi.Input[str] image: Specifies the container image to use in the AWS CNI cluster DaemonSet.
Defaults to the official AWS CNI image in ECR.
:param pulumi.Input[str] init_image: Specifies the init container image to use in the AWS CNI cluster DaemonSet.
Defaults to the official AWS CNI init container image in ECR.
:param pulumi.Input[str] log_file: Specifies the file path used for logs.
Defaults to "stdout" to emit Pod logs for `kubectl logs`.
:param pulumi.Input[str] log_level: Specifies the log level used for logs.
Defaults to "DEBUG"
Valid values: "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
:param pulumi.Input[bool] node_port_support: Specifies whether NodePort services are enabled on a worker node's primary network interface. This requires additional iptables rules and that the kernel's reverse path filter on the primary interface is set to loose.
Defaults to true.
:param pulumi.Input[bool] security_context_privileged: Pass privilege to containers securityContext. This is required when SELinux is enabled. This value will not be passed to the CNI config by default
:param pulumi.Input[str] veth_prefix: Specifies the veth prefix used to generate the host-side veth device name for the CNI.
The prefix can be at most 4 characters long.
Defaults to "eni".
:param pulumi.Input[int] warm_eni_target: Specifies the number of free elastic network interfaces (and all of their available IP addresses) that the ipamD daemon should attempt to keep available for pod assignment on the node.
Defaults to 1.
:param pulumi.Input[int] warm_ip_target: Specifies the number of free IP addresses that the ipamD daemon should attempt to keep available for pod assignment on the node.
:param pulumi.Input[int] warm_prefix_target: WARM_PREFIX_TARGET will allocate one full (/28) prefix even if a single IP is consumed with the existing prefix. Ref: https://github.com/aws/amazon-vpc-cni-k8s/blob/master/docs/prefix-and-ip-target.md
"""
if cni_configure_rpfilter is not None:
pulumi.set(__self__, "cni_configure_rpfilter", cni_configure_rpfilter)
if cni_custom_network_cfg is not None:
pulumi.set(__self__, "cni_custom_network_cfg", cni_custom_network_cfg)
if cni_external_snat is not None:
pulumi.set(__self__, "cni_external_snat", cni_external_snat)
if custom_network_config is not None:
pulumi.set(__self__, "custom_network_config", custom_network_config)
if disable_tcp_early_demux is not None:
pulumi.set(__self__, "disable_tcp_early_demux", disable_tcp_early_demux)
if enable_pod_eni is not None:
pulumi.set(__self__, "enable_pod_eni", enable_pod_eni)
if enable_prefix_delegation is not None:
pulumi.set(__self__, "enable_prefix_delegation", enable_prefix_delegation)
if eni_config_label_def is not None:
pulumi.set(__self__, "eni_config_label_def", eni_config_label_def)
if eni_mtu is not None:
pulumi.set(__self__, "eni_mtu", eni_mtu)
if external_snat is not None:
pulumi.set(__self__, "external_snat", external_snat)
if image is not None:
pulumi.set(__self__, "image", image)
if init_image is not None:
pulumi.set(__self__, "init_image", init_image)
if log_file is not None:
pulumi.set(__self__, "log_file", log_file)
if log_level is not None:
pulumi.set(__self__, "log_level", log_level)
if node_port_support is not None:
pulumi.set(__self__, "node_port_support", node_port_support)
if security_context_privileged is not None:
pulumi.set(__self__, "security_context_privileged", security_context_privileged)
if veth_prefix is not None:
pulumi.set(__self__, "veth_prefix", veth_prefix)
if warm_eni_target is not None:
pulumi.set(__self__, "warm_eni_target", warm_eni_target)
if warm_ip_target is not None:
pulumi.set(__self__, "warm_ip_target", warm_ip_target)
if warm_prefix_target is not None:
pulumi.set(__self__, "warm_prefix_target", warm_prefix_target)
@property
@pulumi.getter(name="cniConfigureRpfilter")
def cni_configure_rpfilter(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether ipamd should configure rp filter for primary interface. Default is `false`.
"""
return pulumi.get(self, "cni_configure_rpfilter")
@cni_configure_rpfilter.setter
def cni_configure_rpfilter(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cni_configure_rpfilter", value)
@property
@pulumi.getter(name="cniCustomNetworkCfg")
def cni_custom_network_cfg(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies that your pods may use subnets and security groups that are independent of your worker node's VPC configuration. By default, pods share the same subnet and security groups as the worker node's primary interface. Setting this variable to true causes ipamd to use the security groups and VPC subnet in a worker node's ENIConfig for elastic network interface allocation. You must create an ENIConfig custom resource for each subnet that your pods will reside in, and then annotate or label each worker node to use a specific ENIConfig (multiple worker nodes can be annotated or labelled with the same ENIConfig). Worker nodes can only be annotated with a single ENIConfig at a time, and the subnet in the ENIConfig must belong to the same Availability Zone that the worker node resides in. For more information, see CNI Custom Networking in the Amazon EKS User Guide. Default is `false`
"""
return pulumi.get(self, "cni_custom_network_cfg")
@cni_custom_network_cfg.setter
def cni_custom_network_cfg(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cni_custom_network_cfg", value)
@property
@pulumi.getter(name="cniExternalSnat")
def cni_external_snat(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether an external NAT gateway should be used to provide SNAT of secondary ENI IP addresses. If set to true, the SNAT iptables rule and off-VPC IP rule are not applied, and these rules are removed if they have already been applied. Disable SNAT if you need to allow inbound communication to your pods from external VPNs, direct connections, and external VPCs, and your pods do not need to access the Internet directly via an Internet Gateway. However, your nodes must be running in a private subnet and connected to the internet through an AWS NAT Gateway or another external NAT device. Default is `false`
"""
return pulumi.get(self, "cni_external_snat")
@cni_external_snat.setter
def cni_external_snat(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cni_external_snat", value)
@property
@pulumi.getter(name="customNetworkConfig")
def custom_network_config(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies that your pods may use subnets and security groups (within the same VPC as your control plane resources) that are independent of your cluster's `resourcesVpcConfig`.
Defaults to false.
"""
return pulumi.get(self, "custom_network_config")
@custom_network_config.setter
def custom_network_config(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "custom_network_config", value)
@property
@pulumi.getter(name="disableTcpEarlyDemux")
def disable_tcp_early_demux(self) -> Optional[pulumi.Input[bool]]:
"""
Allows the kubelet's liveness and readiness probes to connect via TCP when pod ENI is enabled. This will slightly increase local TCP connection latency.
"""
return pulumi.get(self, "disable_tcp_early_demux")
@disable_tcp_early_demux.setter
def disable_tcp_early_demux(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "disable_tcp_early_demux", value)
@property
@pulumi.getter(name="enablePodEni")
def enable_pod_eni(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether to allow IPAMD to add the `vpc.amazonaws.com/has-trunk-attached` label to the node if the instance has capacity to attach an additional ENI. Default is `false`. If using liveness and readiness probes, you will also need to disable TCP early demux.
"""
return pulumi.get(self, "enable_pod_eni")
@enable_pod_eni.setter
def enable_pod_eni(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_pod_eni", value)
@property
@pulumi.getter(name="enablePrefixDelegation")
def enable_prefix_delegation(self) -> Optional[pulumi.Input[bool]]:
"""
IPAMD will start allocating (/28) prefixes to the ENIs with ENABLE_PREFIX_DELEGATION set to true.
"""
return pulumi.get(self, "enable_prefix_delegation")
@enable_prefix_delegation.setter
def enable_prefix_delegation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_prefix_delegation", value)
@property
@pulumi.getter(name="eniConfigLabelDef")
def eni_config_label_def(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the ENI_CONFIG_LABEL_DEF environment variable value for worker nodes. This is used to tell Kubernetes to automatically apply the ENIConfig for each Availability Zone
Ref: https://docs.aws.amazon.com/eks/latest/userguide/cni-custom-network.html (step 5(c))
Defaults to the official AWS CNI image in ECR.
"""
return pulumi.get(self, "eni_config_label_def")
@eni_config_label_def.setter
def eni_config_label_def(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "eni_config_label_def", value)
@property
@pulumi.getter(name="eniMtu")
def eni_mtu(self) -> Optional[pulumi.Input[int]]:
"""
Used to configure the MTU size for attached ENIs. The valid range is from 576 to 9001.
Defaults to 9001.
"""
return pulumi.get(self, "eni_mtu")
@eni_mtu.setter
def eni_mtu(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "eni_mtu", value)
@property
@pulumi.getter(name="externalSnat")
def external_snat(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether an external NAT gateway should be used to provide SNAT of secondary ENI IP addresses. If set to true, the SNAT iptables rule and off-VPC IP rule are not applied, and these rules are removed if they have already been applied.
Defaults to false.
"""
return pulumi.get(self, "external_snat")
@external_snat.setter
def external_snat(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "external_snat", value)
@property
@pulumi.getter
def image(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the container image to use in the AWS CNI cluster DaemonSet.
Defaults to the official AWS CNI image in ECR.
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image", value)
@property
@pulumi.getter(name="initImage")
def init_image(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the init container image to use in the AWS CNI cluster DaemonSet.
Defaults to the official AWS CNI init container image in ECR.
"""
return pulumi.get(self, "init_image")
@init_image.setter
def init_image(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "init_image", value)
@property
@pulumi.getter(name="logFile")
def log_file(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the file path used for logs.
Defaults to "stdout" to emit Pod logs for `kubectl logs`.
"""
return pulumi.get(self, "log_file")
@log_file.setter
def log_file(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_file", value)
@property
@pulumi.getter(name="logLevel")
def log_level(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the log level used for logs.
Defaults to "DEBUG"
Valid values: "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
"""
return pulumi.get(self, "log_level")
@log_level.setter
def log_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "log_level", value)
@property
@pulumi.getter(name="nodePortSupport")
def node_port_support(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether NodePort services are enabled on a worker node's primary network interface. This requires additional iptables rules and that the kernel's reverse path filter on the primary interface is set to loose.
Defaults to true.
"""
return pulumi.get(self, "node_port_support")
@node_port_support.setter
def node_port_support(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "node_port_support", value)
@property
@pulumi.getter(name="securityContextPrivileged")
def security_context_privileged(self) -> Optional[pulumi.Input[bool]]:
"""
Pass privilege to containers securityContext. This is required when SELinux is enabled. This value will not be passed to the CNI config by default
"""
return pulumi.get(self, "security_context_privileged")
@security_context_privileged.setter
def security_context_privileged(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "security_context_privileged", value)
@property
@pulumi.getter(name="vethPrefix")
def veth_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the veth prefix used to generate the host-side veth device name for the CNI.
The prefix can be at most 4 characters long.
Defaults to "eni".
"""
return pulumi.get(self, "veth_prefix")
@veth_prefix.setter
def veth_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "veth_prefix", value)
@property
@pulumi.getter(name="warmEniTarget")
def warm_eni_target(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of free elastic network interfaces (and all of their available IP addresses) that the ipamD daemon should attempt to keep available for pod assignment on the node.
Defaults to 1.
"""
return pulumi.get(self, "warm_eni_target")
@warm_eni_target.setter
def warm_eni_target(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "warm_eni_target", value)
@property
@pulumi.getter(name="warmIpTarget")
def warm_ip_target(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of free IP addresses that the ipamD daemon should attempt to keep available for pod assignment on the node.
"""
return pulumi.get(self, "warm_ip_target")
@warm_ip_target.setter
def warm_ip_target(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "warm_ip_target", value)
@property
@pulumi.getter(name="warmPrefixTarget")
def warm_prefix_target(self) -> Optional[pulumi.Input[int]]:
"""
WARM_PREFIX_TARGET will allocate one full (/28) prefix even if a single IP is consumed with the existing prefix. Ref: https://github.com/aws/amazon-vpc-cni-k8s/blob/master/docs/prefix-and-ip-target.md
"""
return pulumi.get(self, "warm_prefix_target")
@warm_prefix_target.setter
def warm_prefix_target(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "warm_prefix_target", value)
|
StarcoderdataPython
|
5000768
|
<filename>ltr/admin/multigpu.py<gh_stars>1-10
import os
import torch.nn as nn
from gpustat import GPUStatCollection
def is_multi_gpu(net):
return isinstance(net, (MultiGPU, nn.DataParallel))
class MultiGPU(nn.DataParallel):
"""Wraps a network to allow simple multi-GPU training."""
def __getattr__(self, item):
try:
return super().__getattr__(item)
except:
pass
return getattr(self.module, item)
def query_gpu():
gpu_stat = GPUStatCollection.new_query()
gpu_free_idx = 0 if gpu_stat[0].memory_free >= gpu_stat[1].memory_free else 1
print('Query time: {} -- GPU[{}]: {}MB -- '.format(gpu_stat.query_time, gpu_free_idx,
gpu_stat[gpu_free_idx].memory_free))
os.environ['CUDA_VISIBLE_DEVICES'] = "{}".format(gpu_free_idx)
|
StarcoderdataPython
|
5014946
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
import sys
from termcolor import colored
from DIR import *
def Hello():
print("▒▒▒▒▒▒▒█▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀█")
print("▒▒▒▒▒▒▒█░▒▒▒▒▒▒▒▓▒▒▓▒▒▒▒▒▒▒░█")
print("▒▒▒▒▒▒▒█░▒▒▓▒▒▒▒▒▒▒▒▒▄▄▒▓▒▒░█░▄▄")
print("▒▒▄▀▀▄▄█░▒▒▒▒▒▒▓▒▒▒▒█░░▀▄▄▄▄▄▀░░█")
print("▒▒█░░░░█░▒▒▒▒▒▒▒▒▒▒▒█░░░░░░░░░░░█")
print("▒▒▒▀▀▄▄█░▒▒▒▒▓▒▒▒▓▒█░░░█▒░░░░█▒░░█")
print("▒▒▒▒▒▒▒█░▒▓▒▒▒▒▓▒▒▒█░░░░░░░▀░░░░░█")
print("▒▒▒▒▒▄▄█░▒▒▒▓▒▒▒▒▒▒▒█░░█▄▄█▄▄█░░█")
print("▒▒▒▒█░░░█▄▄▄▄▄▄▄▄▄▄█░█▄▄▄▄▄▄▄▄▄█")
print("▒▒▒▒█▄▄█░░█▄▄█░░░░░░█▄▄█░░█▄▄█")
pass
def derp():
print(" ─────────▄▄───────────────────▄▄──")
print(" ──────────▀█───────────────────▀█─")
print(" ──────────▄█───────────────────▄█─")
print(" ──█████████▀───────────█████████▀─")
print(" ───▄██████▄─────────────▄██████▄──")
print(" ─▄██▀────▀██▄─────────▄██▀────▀██▄")
print(" ─██────────██─────────██────────██")
print(" ─██───██───██─────────██───██───██")
print(" ─██────────██─────────██────────██")
print(" ──██▄────▄██───────────██▄────▄██─")
print(" ───▀██████▀─────────────▀██████▀──")
print(" ──────────────────────────────────")
print(" ──────────────────────────────────")
print(" ──────────────────────────────────")
print(" ───────────█████████████──────────")
print(" ──────────────────────────────────")
print(" ──────────────────────────────────")
pass
def Bunny():
print("........▓▓▓▓.......................................")
print("......▓▓......▓....................................")
print("......▓▓......▓▓..................▓▓▓▓.............")
print("......▓▓......▓▓..............▓▓......▓▓▓▓.........")
print("......▓▓....▓▓..............▓......▓▓......▓▓......")
print("........▓▓....▓............▓....▓▓....▓▓▓....▓▓....")
print("..........▓▓....▓........▓....▓▓..........▓▓...▓...")
print("............▓▓..▓▓....▓▓..▓▓................▓▓.....")
print("............▓▓......▓▓....▓▓.......................")
print("...........▓......................▓................")
print(".........▓.........................▓...............")
print("........▓......^..........^......▓.................")
print("........▓............❤............▓................")
print("........▓..........................▓...............")
print("..........▓..........ٮ..........▓..................")
print("..............▓▓..........▓▓.......................")
pass
def hacking():
print colored(' ====================================================== ', 'green', attrs=['bold'])
print colored(' ██╗ ██╗ █████╗ ██████╗██╗ ██╗██╗███╗ ██╗ ██████╗ ', 'red', attrs=['bold'])
print colored(' ██║ ██║██╔══██╗██╔════╝██║ ██╔╝██║████╗ ██║██╔════╝ ', 'red', attrs=['bold'])
print colored(' ███████║███████║██║ █████╔╝ ██║██╔██╗ ██║██║ ███╗ ', 'red', attrs=['bold'])
print colored(' ██╔══██║██╔══██║██║ ██╔═██╗ ██║██║╚██╗██║██║ ██║ ', 'red', attrs=['bold'])
print colored(' ██║ ██║██║ ██║╚██████╗██║ ██╗██║██║ ╚████║╚██████╔╝ ', 'red', attrs=['bold'])
print colored(' ╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚═╝╚═╝ ╚═══╝ ╚═════╝ ', 'red', attrs=['bold'])
print colored(' ====================================================== ', 'green', attrs=['bold'])
pass
def start_menu():
print colored('================================================', 'red', attrs=['bold'])
print colored(' ##:::: ##: ##::::::: ####: ##:::: ##: ########:', 'green', attrs=['bold'])
print colored('. ##:: ##:: ##:::::::. ##:: ###:: ###: ##.....::', 'green', attrs=['bold'])
print colored(':. ## ##::: ##:::::::: ##:: #### ####: ##:::::::', 'green', attrs=['bold'])
print colored('::. ###:::: ##:::::::: ##:: ## ### ##: ######:::', 'green', attrs=['bold'])
print colored(':: ## ##::: ##:::::::: ##:: ##. #: ##: ##...::::', 'green', attrs=['bold'])
print colored(': ##:. ##:: ##:::::::: ##:: ##:.:: ##: ##:::::::', 'green', attrs=['bold'])
print colored(' ##:::. ##: ########: ####: ##:::: ##: ########:', 'green', attrs=['bold'])
print colored('..:::::..::........::....::..:::::..::........::', 'green', attrs=['bold'])
print colored('====================', 'red', attrs=['bold']), colored('X_LIME', 'blue', attrs=['bold']), colored('====================', 'red', attrs=['bold'])
print (d_sign + '\033[1;33mSELECT AN OPTION TO BEGIN: \033[1;m')
print (a1_sign + x1 + a2_sign + '\033[1;32m Decomplie apk\033[1;m')
print (a1_sign + x2 + a2_sign + '\033[1;32m Backdoor-APK\033[1;m')
print (a1_sign + x3 + a2_sign + '\033[1;32m Apk-2-Jar\033[1;m')
print (a1_sign + x4 + a2_sign + '\033[1;32m Geo-Location\033[1;m')
print (a1_sign + x5 + a2_sign + '\033[1;32m Exit\033[1;m')
pass
def penguin():
print colored(" _nnnn_ ", 'green', attrs=['bold'])
print colored(" dGGGGMMb ", 'green', attrs=['bold'])
print colored(" @p~qp~~qMb X_LIME! ", 'green', attrs=['bold'])
print colored(" M|@||@) M| _;", 'green', attrs=['bold'])
print colored(" @,----.JM| -'", 'green', attrs=['bold'])
print colored(" JS^\__/ qKL", 'green', attrs=['bold'])
print colored(" dZP qKRb", 'green', attrs=['bold'])
print colored(" dZP qKKb", 'green', attrs=['bold'])
print colored(" fZP SMMb", 'green', attrs=['bold'])
print colored(" HZM MMMM", 'green', attrs=['bold'])
print colored(" FqM MMMM", 'green', attrs=['bold'])
print colored(" __| . |\dS qML", 'green', attrs=['bold'])
print colored(" | `. | `' \Zq", 'green', attrs=['bold'])
print colored("_) \.___.,| .'", 'green', attrs=['bold'])
print colored("\____ )MMMMMM| .'", 'green', attrs=['bold'])
print colored(" `-' `--' ", 'green', attrs=['bold'])
pass
|
StarcoderdataPython
|
1760929
|
from PyQt5.QtWidgets import QLabel
from PyQt5.QtCore import pyqtSignal
class ClickableLabel(QLabel):
sig_send = pyqtSignal(int)
def __init__(self, pos, parent=None):
super(ClickableLabel, self).__init__(parent)
self.pos = pos
def mousePressEvent(self, event):
QLabel.mousePressEvent(self, event)
self.sig_send.emit(self.pos)
|
StarcoderdataPython
|
1922815
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MicrosoftgraphidentitySet(Model):
"""identitySet.
:param application:
:type application: ~users.models.Microsoftgraphidentity
:param device:
:type device: ~users.models.Microsoftgraphidentity
:param user:
:type user: ~users.models.Microsoftgraphidentity
"""
_attribute_map = {
'application': {'key': 'application', 'type': 'Microsoftgraphidentity'},
'device': {'key': 'device', 'type': 'Microsoftgraphidentity'},
'user': {'key': 'user', 'type': 'Microsoftgraphidentity'},
}
def __init__(self, application=None, device=None, user=None):
super(MicrosoftgraphidentitySet, self).__init__()
self.application = application
self.device = device
self.user = user
|
StarcoderdataPython
|
3268601
|
<filename>scielomanager/export/forms.py
# coding: utf-8
from django import forms
from django.core.exceptions import ValidationError
from journalmanager import models as jm_models
class BlidModelChoiceField(forms.ModelChoiceField):
def to_python(self, value):
try:
issue_pk = int(value)
except ValueError:
raise ValidationError(self.error_messages['invalid_choice'])
try:
return jm_models.Issue.objects.get(pk=issue_pk)
except jm_models.Issue.DoesNotExist:
raise ValidationError(self.error_messages['invalid_choice'])
class MarkupFilesForm(forms.Form):
journal = forms.ModelChoiceField(queryset=jm_models.Journal.objects.none())
issue = BlidModelChoiceField(queryset=jm_models.Issue.objects.none())
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
super(MarkupFilesForm, self).__init__(*args, **kwargs)
if user:
self.fields['journal'].queryset = jm_models.Journal.objects.all_by_user(user)
|
StarcoderdataPython
|
346847
|
<reponame>oliverwy/PhysicalTestReportingAuxiliarySystem
# Generated by Django 2.2.6 on 2019-10-26 10:44
import computed_property.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BMITestNormals',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('classtype', models.CharField(default=0, max_length=10, verbose_name='等级')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项分值')),
('male', models.FloatField(default='0.0', verbose_name='BMI男范围')),
('female', models.FloatField(default='0.0', verbose_name='BMI女范围')),
],
options={
'verbose_name': '体重指数(BMI)单项评分表',
'verbose_name_plural': '体重指数(BMI)单项评分表',
},
),
migrations.CreateModel(
name='Class',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('class_name', models.CharField(max_length=100, verbose_name='专业班级')),
],
options={
'verbose_name': '专业班级表',
'verbose_name_plural': '专业班级表',
},
),
migrations.CreateModel(
name='FiftymeterTestNormal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('classtype', models.CharField(default=0, max_length=10, verbose_name='等级')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项分值')),
('malefirst', models.FloatField(default=0, verbose_name='大一大二指标男')),
('maleSecond', models.FloatField(default=0, verbose_name='大三大四指标男')),
('femalefirst', models.FloatField(default=0, verbose_name='大一大二指标女')),
('femaleSecond', models.FloatField(default=0, verbose_name='大三大四指标女')),
],
options={
'verbose_name': '50米跑单项评分表(单位:秒)',
'verbose_name_plural': '50米跑单项评分表(单位:秒)',
},
),
migrations.CreateModel(
name='ItemWeight',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False, verbose_name='序号')),
('BMI', models.FloatField(default=0, verbose_name='体重指数BMI')),
('lungcapacity', models.FloatField(default=0, verbose_name='肺活量')),
('fiftymeter', models.FloatField(default=0, verbose_name='50米跑')),
('sitandreach', models.FloatField(default=0, verbose_name='坐位体前屈')),
('standingbroadjump', models.FloatField(default=0, verbose_name='立定跳远')),
('pull_ups', models.FloatField(default=0, verbose_name='引体向上')),
('Oneminutesitups', models.FloatField(default=0, verbose_name='一分钟仰卧起坐')),
('middle_distancerun', models.FloatField(default=0, verbose_name='中长跑')),
],
options={
'verbose_name': '单项指标与权重',
'verbose_name_plural': '单项指标与权重',
},
),
migrations.CreateModel(
name='LungCapacityTestNormal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('classtype', models.CharField(default=0, max_length=10, verbose_name='等级')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项分值')),
('malefirst', models.IntegerField(default=0, verbose_name='大一大二指标男')),
('maleSecond', models.IntegerField(default=0, verbose_name='大三大四指标男')),
('femalefirst', models.IntegerField(default=0, verbose_name='大一大二指标女')),
('femaleSecond', models.IntegerField(default=0, verbose_name='大三大四指标女')),
],
options={
'verbose_name': '肺活量单项评分表(单位:毫升)',
'verbose_name_plural': '肺活量单项评分表(单位:毫升)',
},
),
migrations.CreateModel(
name='MiddleDistanceRunPlusTestNormal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项加分值')),
('malefirst', models.CharField(default=0, max_length=10, verbose_name='大一大二指标男')),
('maleSecond', models.CharField(default=0, max_length=10, verbose_name='大三大四指标男')),
('femalefirst', models.CharField(default=0, max_length=10, verbose_name='大一大二指标女')),
('femaleSecond', models.CharField(default=0, max_length=10, verbose_name='大三大四指标女')),
],
options={
'verbose_name': '男生1000米跑、女生800米加分表(单位:分·秒)',
'verbose_name_plural': '男生1000米跑、女生800米加分表(单位:分·秒)',
},
),
migrations.CreateModel(
name='MiddleDistanceRunTestNormal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('classtype', models.CharField(default=0, max_length=10, verbose_name='等级')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项分值')),
('malefirst', models.FloatField(default=0, verbose_name='大一大二指标男')),
('maleSecond', models.FloatField(default=0, verbose_name='大三大四指标男')),
('femalefirst', models.FloatField(default=0, verbose_name='大一大二指标女')),
('femaleSecond', models.FloatField(default=0, verbose_name='大三大四指标女')),
],
options={
'verbose_name': '男生1000米、女生800米耐力跑单项评分表(单位:分·秒)',
'verbose_name_plural': '男生1000米、女生800米耐力跑单项评分表(单位:分·秒)',
},
),
migrations.CreateModel(
name='OneMinuteSitupsAndPullUpPlusNormal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项加分值')),
('malefirst', models.IntegerField(default=0, verbose_name='大一大二指标男')),
('maleSecond', models.IntegerField(default=0, verbose_name='大三大四指标男')),
('femalefirst', models.IntegerField(default=0, verbose_name='大一大二指标女')),
('femaleSecond', models.IntegerField(default=0, verbose_name='大三大四指标女')),
],
options={
'verbose_name': '男生引体向上、女生仰卧起坐加分表(单位:次)',
'verbose_name_plural': '男生引体向上、女生仰卧起坐加分表(单位:次)',
},
),
migrations.CreateModel(
name='OneMinuteSitupsAndPullUpTestNormal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('classtype', models.CharField(default=0, max_length=10, verbose_name='等级')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项分值')),
('malefirst', models.IntegerField(default=0, verbose_name='大一大二指标男')),
('maleSecond', models.IntegerField(default=0, verbose_name='大三大四指标男')),
('femalefirst', models.IntegerField(default=0, verbose_name='大一大二指标女')),
('femaleSecond', models.IntegerField(default=0, verbose_name='大三大四指标女')),
],
options={
'verbose_name': '男生引体向上、女生一分钟仰卧起坐评分表(单位:次)',
'verbose_name_plural': '男生引体向上、女生一分钟仰卧起坐评分表(单位:次)',
},
),
migrations.CreateModel(
name='SitandReachTestNoraml',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('classtype', models.CharField(default=0, max_length=10, verbose_name='等级')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项分值')),
('malefirst', models.FloatField(default=0, verbose_name='大一大二指标男')),
('maleSecond', models.FloatField(default=0, verbose_name='大三大四指标男')),
('femalefirst', models.FloatField(default=0, verbose_name='大一大二指标女')),
('femaleSecond', models.FloatField(default=0, verbose_name='大三大四指标女')),
],
options={
'verbose_name': '坐位体前屈单项评分表(单位:厘米)',
'verbose_name_plural': '坐位体前屈单项评分表(单位:厘米)',
},
),
migrations.CreateModel(
name='StandingBroadJumpTestNormal',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('classtype', models.CharField(default=0, max_length=10, verbose_name='等级')),
('ItemScore', models.IntegerField(default=0, verbose_name='单项分值')),
('malefirst', models.FloatField(default=0, verbose_name='大一大二指标男')),
('maleSecond', models.FloatField(default=0, verbose_name='大三大四指标男')),
('femalefirst', models.FloatField(default=0, verbose_name='大一大二指标女')),
('femaleSecond', models.FloatField(default=0, verbose_name='大三大四指标女')),
],
options={
'verbose_name': '立定跳远单项评分表(单位:厘米)',
'verbose_name_plural': '立定跳远单项评分表(单位:厘米)',
},
),
migrations.CreateModel(
name='Students',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('gradeclass', models.CharField(max_length=20, verbose_name='年级')),
('classname', models.CharField(max_length=50, verbose_name='专业班级')),
('address', models.CharField(blank=True, max_length=250, verbose_name='家庭住址')),
('studentid', models.CharField(max_length=18, verbose_name='学号')),
('nationality', models.CharField(blank=True, max_length=50, verbose_name='民族')),
('name', models.CharField(max_length=50, verbose_name='学生姓名')),
('gender', models.CharField(choices=[('男', '男'), ('女', '女')], max_length=10, verbose_name='性别')),
('bithday', computed_property.fields.ComputedCharField(compute_from='computer_bithday', editable=False, max_length=30, null=True, verbose_name='出生日期')),
('idcardno', models.CharField(max_length=20, verbose_name='身份证号')),
('stature', models.FloatField(default=0, verbose_name='身高')),
('weight', models.FloatField(default=0, verbose_name='体重')),
('lungcapacity', models.IntegerField(default=0, verbose_name='肺活量')),
('middle_distancerun', models.FloatField(default=0, verbose_name='中长跑')),
('fiftymeter', models.FloatField(default=0, verbose_name='50米')),
('standingbroadjump', models.FloatField(default=0, verbose_name='立定跳远')),
('pull_ups', models.IntegerField(default=0, verbose_name='引体向上')),
('sitandreach', models.FloatField(default=0, verbose_name='坐位体前屈')),
('Oneminutesitups', models.IntegerField(default=0, verbose_name='一分钟仰卧起坐')),
('score', computed_property.fields.ComputedFloatField(compute_from='caculate_it', editable=False, verbose_name='分数')),
('enrollment', models.CharField(blank=True, max_length=50, null=True, verbose_name='入学时间')),
('remarks', models.TextField(blank=True, verbose_name='备注')),
],
options={
'verbose_name': '体能测试成绩表',
'verbose_name_plural': '体能测试成绩表',
},
),
migrations.CreateModel(
name='TecherInfo',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('techerName', models.CharField(max_length=20, verbose_name='教师姓名')),
('techerGender', models.CharField(choices=[('男', '男'), ('女', '女')], max_length=10, verbose_name='性别')),
('mobileNo', models.CharField(max_length=20, verbose_name='手机号码')),
('officeNo', models.CharField(blank=True, max_length=20, verbose_name='座机号码')),
('office', models.CharField(blank=True, max_length=50, verbose_name='办公室')),
],
options={
'verbose_name': '教师信息表',
'verbose_name_plural': '教师信息表',
},
),
migrations.CreateModel(
name='TestDatetime',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('testTime', models.CharField(max_length=100, verbose_name='测试时间')),
],
options={
'verbose_name': '测试时间',
'verbose_name_plural': '测试时间',
},
),
migrations.CreateModel(
name='TestSite',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('siteName', models.CharField(max_length=100, verbose_name='测试场地')),
],
options={
'verbose_name': '测试场地',
'verbose_name_plural': '测试场地',
},
),
migrations.CreateModel(
name='TestSchedule',
fields=[
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='序号')),
('testClass', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Class', verbose_name='测试时间')),
('testdatetime', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.TestDatetime', verbose_name='测试时间')),
('testsite', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.TestSite', verbose_name='测试场地')),
('testtecher', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.TecherInfo', verbose_name='测试教师')),
],
options={
'verbose_name': '测试时间场地安排表',
'verbose_name_plural': '测试时间场地安排表',
},
),
]
|
StarcoderdataPython
|
6547621
|
# django
from django.conf.urls import url
# views
from cooggerapp.views import users
urlpatterns = [
# url(r'^upload/pp/$', users.Uploadpp.as_view(), name="user_upload_pp"),
url(r"^about/@(?P<username>.+)/$", users.UserAboutBaseClass.as_view(), name="userabout"),
url(r'^(?P<utopic>.+)/@(?P<username>.+)/$', users.UserTopic.as_view(), name="utopic"),
url(r'^@(?P<username>.+)/$', users.UserClassBased.as_view(), name="user"),
url(r'^history/@(?P<username>.+)$', users.UserHistory.as_view(), name="history"),
url(r'^wallet/@(?P<username>.+)$', users.UserWallet.as_view(), name="wallet"),
]
|
StarcoderdataPython
|
3272631
|
<reponame>sinedie/Flask-Svelte-Websockets-Nginx-Docker<gh_stars>1-10
from flask import Blueprint, jsonify, request
from flask_jwt_extended import jwt_required, get_jwt_identity, fresh_jwt_required
api = Blueprint('api', __name__, url_prefix="/api")
@api.route('/', methods=["GET"])
def healt_check():
return jsonify({'msg': 'Server is healty'}), 200
@api.route('/protected', methods=['GET'])
@jwt_required
def protected():
current_user = get_jwt_identity()
return jsonify({'logged_in_as': current_user}), 200
@api.route('/protected-fresh', methods=['GET'])
@fresh_jwt_required
def protected_fresh():
username = get_jwt_identity()
return jsonify({'fresh_logged_in_as': username}), 200
|
StarcoderdataPython
|
8081235
|
<reponame>agrija9/Software-Development-Project
import rospy
import numpy as np
from std_msgs.msg import String
import threading
import time
import rosgraph
import socket
from datetime import datetime
from flask import Flask, render_template
from flask import request, redirect
from gevent.pywsgi import WSGIServer
from werkzeug import secure_filename
import os
def callback(data, args):
print('Time : ' + args[1] + ', Topic : ' + args[0] + ', Data : ' + data.data)
return ('Time : ' + args[1] + ', Topic : ' + args[0] + ', Data : ' + data.data)
def listener(strlist):
rospy.init_node('listener', anonymous=True)
for i in strlist:
rospy.Subscriber(i, String, callback, (i, datetime.now().strftime("%d/%m/%Y %H:%M:%S:%f")))
rospy.spin()
def check_master(check):
while check == False:
try:
rosgraph.Master('/rostopic').getPid()
except socket.error:
check = True
rospy.signal_shutdown('exit')
if __name__ == '__main__':
x = threading.Thread(target=check_master, args=(False,))
x.start()
lists = rospy.get_published_topics()
strlist = []
for i in lists:
if i[1] == 'std_msgs/String':
strlist.append(i[0])
listener(strlist)
|
StarcoderdataPython
|
6661969
|
<gh_stars>0
#!/usr/bin/env python3
import sys,re
import getopt
from glob import glob
import os
import shutil
import time
import tempfile
import subprocess
import json
import pprint
import pickle
from datasketch import MinHash, LeanMinHash
import itertools
start = time.time()
'''
schema of dictionary db:
<hash:List[filename_funcname,..]>
'''
MINHASHDB = {}
def debug(*args, **kwargs):
if VERBOSE:
print(*args,file=sys.stderr, **kwargs)
def elog(*args, **kwargs):
print(*args,file=sys.stderr, **kwargs)
def usage():
elog("usage:\n%s <-i/-s> [options] <path to binary or .ll files> .."%sys.argv[0] )
# print("action can include extract, tokenize, minhash, ssdeep, ssdeep_ll, simhash, simhash_ft compare, compare_ll, confusion_matrix")
elog('''
arguments:
-s : search mode, lookup similar functions
-i : index mode, indexes binaries/ll files into db pickle
-f path_to_pickle : path to pickle file of bcd
-p permutations : number of permutations for minhash
-t threshold : threshold for matching in minhash and simhash (e.g 0.5 for minhash, 10 for simhash)
-v : verbose debugging messages
''')
def tokenize(instruction):
'''
takes an llvm IR instruction and returns a list of string tokens
'''
tokens = instruction.split()
result_tokens = []
intsizes = ['i4', 'i8', 'i16', 'i32', 'i64',
'u4', 'u8', 'u16', 'u32', 'u64']
# when a token starts with a shoterner, truncate it to the shortener.
shorteners = ['%stack_var', '%dec_label', '%global', '@global']
for i in range(len(tokens)):
# run replacement rules
t = tokens[i]
replaced = False
for s in shorteners:
if t.startswith(s):
debug(f'replacing {t} with {s}')
result_tokens.append(s)
replaced = True
break
if replaced:
continue
elif t[:3] in intsizes:
debug(f'dropping {t}')
continue
elif t.startswith('%') and not ("(" in t):
# generic variable reference
newt = '%r'
debug(f'replacing {t} with {newt}')
result_tokens.append(newt)
elif t == '!insn.addr': # stop processing
break
else:
newt = t
for it in intsizes:
newt = newt.replace(it, '')
# newt = t.replace()
result_tokens.append(newt)
# can use lookahead to determine nature of token
if result_tokens != []:
#result_tokens.append(";")
debug(result_tokens)
return result_tokens # signify end of instruction
return None
def extract_functions_retdecLL(filepath):
'''
extract functions from retdec LLVM IR
return a dictionary of funcname:funccode?
'''
# function regex for llvm ir from retdec
func_re = r'define .* (@.*){\n'
pattern = re.compile(func_re)
with open(filepath) as f:
data = f.read()
debug(f"[extract_functions_retdecLL] done reading {filepath} into mem..")
res = {}
r = pattern.search(data)
prev = None
count = 0
skipCount = 0
# the goal is to dump out the entire block, by reading from end of last label match to start of current match
while r:
# print the match
# print(r.group())
# read until end of function (marked by '}')
funcEnd = data[r.start():].find('}')
# debug(f"start: {r.start()} funcEnd:{funcEnd}")
funcCode = data[r.start():r.start() + funcEnd] + '}'
fheader = funcCode.split('{')[0]
fname = fheader.split('(')[0].split(' ')[-1]
if res.get(fname) != None:
elog(f"duplicate function f{fname}")
res[fname] = funcCode
r = pattern.search(data, r.start() + 1)
count += 1
if skipCount > 0:
debug(f"skipped {skipCount} functions")
return res
def lift(binaryPath):
# if this program from retdec is not in your path, use full path
# install from https://github.com/avast/retdec
retdecDecompilerPath = "retdec-decompiler"
# make temp directory and copy file over
tmpd = tempfile.mkdtemp(prefix="tmp-"+os.path.basename(binaryPath)+'_', dir='./tmp')
newbin = shutil.copy(binaryPath, tmpd)
# decompile
if VERBOSE:
os.system(f"{retdecDecompilerPath} {newbin}")
else:
os.system(f"{retdecDecompilerPath} {newbin} >/dev/null")
# remove copied bin
os.remove(newbin)
llFile = f"{newbin}.ll"
if not os.path.exists(llFile):
elog("error - lifted LL file not found")
exit(2)
# import code
# code.interact(local=locals())
# exit(1)
return llFile
def getTuple1(t):
''''
return 1st (0 indexed) element of a tuple
'''
return t[1]
def lookupPath(path, db=MINHASHDB):
'''
decompile a binary (or all binaries in a directory), calculate hashes for each function and then look it up in the database
'''
if os.path.isdir(path):
dirpath = path
for i in os.walk(dirpath):
files = i[2]
for file in files:
filepath = os.path.join(dirpath, file)
# print(path)
lookupPath(filepath)
# lift binary using retdec
if path.endswith('.ll'):
llpath = path
else:
llpath = lift(path)
functions = extract_functions_retdecLL(llpath)
# os.remove(llpath)
lstart = time.time()
# schema: funcname:[(filefunc, match_score)]
matches = {}
# get the minhash values of each
for fname in functions:
functokens = tokenize(functions[fname])
# using LeanMinHash because the pipeline does, to be consistent
m = MinHash(num_perm=MINHASH_PERMS)
for t in functokens:
m.update(t.encode('utf8'))
# m.update(t)
lm = LeanMinHash(m)
hashvals = lm.hashvalues
# print(f'{fname}:{hashvals}')
# for each function, find all similar functions in the db (each function would be O(64) for 64 hash lookups)
# funcname: hash match
hashcounts = {}
for h in hashvals:
if db.get(h) == None: # no match
continue
for filefunc in db.get(h):
if hashcounts.get(filefunc) == None:
hashcounts[filefunc] = 0
hashcounts[filefunc] += 1
for filefunc in hashcounts:
score = hashcounts[filefunc] / MINHASH_PERMS
if score >= THRESHOLD:
if matches.get(fname) == None:
matches[fname] = []
matches[fname].append((filefunc, score))
# pprint.pprint(matches, indent=2)
# rank results based on score
for function_key in matches:
matches[function_key].sort(key=getTuple1, reverse=True)
elog("lookupPath took", (time.time() - lstart))
return matches
def indexPath(path, db=MINHASHDB):
'''
decompile a binary (or all binaries in a directory), calculate hashes for each function and then store it in the database
'''
global MINHASHDB
if os.path.isdir(path):
dirpath = path
for i in os.walk(dirpath):
files = i[2]
for file in files:
filepath = os.path.join(dirpath, file)
# print(path)
indexPath(filepath)
# lift binary using retdec
if path.endswith('.ll'):
llpath = path
else:
llpath = lift(path)
functions = extract_functions_retdecLL(llpath)
# os.remove(llpath)
lstart = time.time()
# schema: funcname:[(filefunc, match_score)]
matches = {}
# get the minhash values of each
for fname in functions:
functokens = tokenize(functions[fname])
# using LeanMinHash because the pipeline does, to be consistent
m = MinHash(num_perm=MINHASH_PERMS)
for t in functokens:
m.update(t.encode('utf8'))
# m.update(t)
lm = LeanMinHash(m)
hashvals = lm.hashvalues
# print(f'{fname}:{hashvals}')
# for each function, find all similar functions in the db (each function would be O(64) for 64 hash lookups)
# funcname: hash match
hashcounts = {}
filename_funcname = os.path.basename(path) + ":" + fname
for h in hashvals:
if db.get(h) == None:
db[h] = set()
# if filename_funcname not in db[h]
elif type(db.get(h)) == list:
# convert entry to set if its a list (old version)
db[h] = set(db[h])
db[h].add(filename_funcname)
print("indexPath took", (time.time() - lstart))
MINHASH_PERMS = 64
THRESHOLD = 0.5
VERBOSE = False
PICKLEFILE = 'db_dict.pkl'
# OUTPUT_DBPATHS = {'extract':'ll_extract.db', 'tokenize':'tokens.db', 'hash':'hashes.db'}
MINHASH_PERMS = 64
MODE = 'lookup'
# main
if __name__ == '__main__':
funcNames = None
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hvisd:a:t:p:f:')
for tup in opts:
o,a = tup[0], tup[1]
if o == '-h':
usage()
exit(0)
elif o == '-i':
MODE = 'index'
elif o == '-s':
MODE = 'lookup'
elif o == '-f':
PICKLEFILE = a
elif o == '-p':
MINHASH_PERMS = int(a)
# elif o == '-a':
# ALGO = a
elif o == '-v':
VERBOSE = True
elif o == '-t':
THRESHOLD = float(a)
if len(args) < 1:
print('missing path to file.')
usage()
exit(1)
allfilefuncs = set()
if not os.path.exists(PICKLEFILE):
MINHASHDB = {}
else:
with open(PICKLEFILE,'rb') as f:
MINHASHDB = pickle.load(f)
elog(f"finished loading db dictionary, elapsed {time.time() - start}")
elog(f"hashes in db: {len(MINHASHDB)}")
for targetpath in args:
if MODE == 'lookup':
if not os.path.exists(PICKLEFILE):
elog("no db pickle file specified, can't do lookup")
exit(1)
matches = lookupPath(targetpath, MINHASHDB)
print(json.dumps(matches, indent=2))
elif MODE == 'index':
indexPath(targetpath, MINHASHDB)
elog(f"hashes in db after indexing: {len(MINHASHDB)}")
with open(PICKLEFILE,'wb') as f:
pickle.dump(MINHASHDB, f)
elog(f"updated db at {PICKLEFILE}")
elog("elapsed:", time.time() - start)
#import code
#code.interact(local=locals())
|
StarcoderdataPython
|
6637201
|
# line_reader.py
import os
# Function: Will read and return the first line from a file.
# The filename and path to this pile will be passed in as
# an argument to the function, and the function will open
# that file, read in the first line, and return it.
def read_from_file(filename):
if not os.path.exists(filename):
raise Exception("Error: File Not Found.")
infile = open(filename, "r")
line = infile.readline()
return line
|
StarcoderdataPython
|
11269936
|
from .help import dp
from .start import dp
from .rules import dp
from .settings import dp
from .death_note_list import dp
from .top_users import dp
from .shop import dp
from .write import dp
from .write_down import dp
__all__ = ["dp"]
|
StarcoderdataPython
|
309506
|
<gh_stars>1-10
from django.apps import AppConfig
class UnclassedConfig(AppConfig):
name = 'Unclassed'
|
StarcoderdataPython
|
6599101
|
<filename>sco_py/expr.py
import numdifftools as nd
import numpy as np
from scipy.linalg import eigvalsh
DEFAULT_TOL = 1e-4
"""
Utility classes to represent expresions. Each expression defines an eval, grad,
hess, and convexify method. Variables and values are assumed to be 2D numpy
arrays.
"""
N_DIGS = 6 # 10
class Expr(object):
"""
by default, expressions are defined by black box functions
"""
def __init__(self, f, grad=None, hess=None, **kwargs):
self.f = f
self._grad = grad
self._hess = hess
self._eval_cache = {}
self._grad_cache = {}
self._convexify_cache = {}
def _get_key(self, x):
return tuple(x.round(N_DIGS).flatten())
def eval(self, x):
key = self._get_key(x)
if key in self._eval_cache:
return self._eval_cache[key]
val = self.f(x)
self._eval_cache[key] = val.copy()
return val
def _get_flat_f(self, x):
"""
Utility function which reshapes and flattens for compatibility with
numdifftools' Jacobian and Hessian function.
"""
if len(x.shape) == 2:
rows, cols = x.shape
assert cols == 1
def flat_f(x):
return self.f(x.reshape((rows, cols))).flatten()
return flat_f
elif len(x.shape) == 1:
return self.f
else:
raise Exception("Input shape not supported")
def _num_grad(self, x):
"""
Returns a numerically computed gradient.
Flattening is necessary for compatibility with numdifftools' Jacobian
function.
"""
grad_fn = nd.Jacobian(self._get_flat_f(x))
return grad_fn(x)
def _debug_grad(self, g1, g2, atol=DEFAULT_TOL):
for i, g_row in enumerate(g1):
for j, g in enumerate(g_row):
if not np.allclose(g, g2[i, j], atol=atol):
print("{}, {}".format(i, j))
print(g, g2[i, j])
def grad(self, x, num_check=False, atol=DEFAULT_TOL):
"""
Returns the gradient. Can numerically check the gradient.
"""
key = self._get_key(x)
if key in self._grad_cache:
return self._grad_cache[key].copy()
assert not num_check or self._grad is not None
if self._grad is None:
return self._num_grad(x)
gradient = self._grad(x)
if num_check:
num_grad = self._num_grad(x)
if not np.allclose(num_grad, gradient, atol=atol):
self._debug_grad(gradient, num_grad, atol=atol)
raise Exception(
"Numerical and analytical gradients aren't close. \
\nnum_grad: {0}\nana_grad: {1}\n".format(
num_grad, gradient
)
)
self._grad_cache[key] = gradient.copy()
return gradient
def _num_hess(self, x):
"""
Returns a numerically computed hessian.
Flattening is necessary for compatibility with numdifftools' Hessian
function.
"""
hess_fn = nd.Hessian(self._get_flat_f(x))
return hess_fn(x.flatten())
def hess(self, x, num_check=False, atol=DEFAULT_TOL):
"""
Returns the hessian. Can numerically check the hessian.
"""
assert not num_check or self._hess is not None
if self._hess is None:
return self._num_hess(x)
hessian = self._hess(x)
if num_check:
num_hess = self._num_hess(x)
if not np.allclose(num_hess, hessian, atol=atol):
raise Exception(
"Numerical and analytical hessians aren't close. \
\nnum_hess: {0}\nana_hess: {1}\n".format(
num_hess, hessian
)
)
return hessian
def convexify(self, x, degree=1):
"""
Returns an Expression object that represents the convex approximation of
self at x where degree 1 is an affine approximation and degree 2 is a
quadratic approximation. If the hessian has negative eigenvalues, the
hessian is adjusted so that it is positive semi-definite.
"""
res = None
if degree == 1:
A = self.grad(x)
b = -A.dot(x) + self.eval(x)
res = AffExpr(A, b)
elif degree == 2:
hess = self.hess(x)
eig_vals = eigvalsh(hess)
min_eig_val = min(eig_vals)
if min_eig_val < 0:
hess = hess - np.eye(hess.shape[0]) * min_eig_val
grad = self.grad(x)
Q = hess
A = grad - np.transpose(x).dot(hess)
b = 0.5 * np.transpose(x).dot(hess).dot(x) - grad.dot(x) + self.eval(x)
res = QuadExpr(Q, A, b)
else:
raise NotImplementedError
return res
class AffExpr(Expr):
"""
Affine Expression
"""
def __init__(self, A, b):
"""
expr is Ax + b
"""
assert b.shape[0] == A.shape[0]
self.A = A
self.b = b
self.x_shape = (A.shape[1], 1)
def eval(self, x):
return self.A.dot(x) + self.b
def grad(self, x):
return self.A.T
def hess(self, x):
return np.zeros((self.x_shape[0], self.x_shape[0]))
class QuadExpr(Expr):
"""
Quadratic Expression
"""
def __init__(self, Q, A, b):
"""
expr is 0.5*x'Qx + Ax + b
"""
assert A.shape[0] == 1, "Can only define scalar quadrative expressions"
# ensure the correct shapes for all the arguments
assert Q.shape[0] == Q.shape[1]
assert Q.shape[0] == A.shape[1]
assert b.shape[0] == 1
self.Q = Q
self.A = A
self.b = b
self.x_shape = (A.shape[1], 1)
def eval(self, x):
return 0.5 * x.T.dot(self.Q.dot(x)) + self.A.dot(x) + self.b
def grad(self, x):
assert x.shape == self.x_shape
return 0.5 * (self.Q.dot(x) + self.Q.T.dot(x)) + self.A.T
def hess(self, x):
return self.Q.copy()
class AbsExpr(Expr):
"""
Absolute value expression
"""
def __init__(self, expr):
self.expr = expr
def eval(self, x):
return np.absolute(self.expr.eval(x))
def grad(self, x):
"""
Since the absolute value expression is not smooth, a subgradient is
returned instead of the gradient.
"""
raise NotImplementedError
def hess(self, x):
raise NotImplementedError
class HingeExpr(Expr):
"""
Hinge expression
"""
def __init__(self, expr):
self.expr = expr
def eval(self, x):
v = self.expr.eval(x)
zeros = np.zeros(v.shape)
return np.maximum(v, zeros)
def grad(self, x):
"""
Since the hinge expression is not smooth, a subgradient is returned
instead of the gradient.
"""
raise NotImplementedError
def hess(self, x):
raise NotImplementedError
class CompExpr(Expr):
"""
Comparison Expression
"""
def __init__(self, expr, val):
"""
expr: Expr object, the expression that is being compared to val
val: numpy array, the value that the expression is being compared to
"""
self.expr = expr
self.val = val.copy()
self._convexify_cache = {}
def eval(self, x, tol=DEFAULT_TOL):
"""
Returns True if the comparison holds true within some tolerace and 0
otherwise.
"""
raise NotImplementedError
def grad(self, x):
raise Exception(
"The gradient is not well defined for comparison \
expressions"
)
def hess(self, x):
raise Exception(
"The hessian is not well defined for comparison \
expressions"
)
def convexify(self, x, degree=1):
raise NotImplementedError
class EqExpr(CompExpr):
"""
Equality Expression
"""
def eval(self, x, tol=DEFAULT_TOL, negated=False):
"""
Tests whether the expression at x is equal to self.val with tolerance
tol.
"""
assert tol >= 0.0
if negated:
return not np.allclose(self.expr.eval(x), self.val, atol=tol)
return np.allclose(self.expr.eval(x), self.val, atol=tol)
def convexify(self, x, degree=1):
"""
Returns an AbsExpr that is the l1 penalty expression, a measure of
constraint violation.
The constraint h(x) = 0 becomes |h(x)|
"""
assert degree == 1
key = self._get_key(x)
if key in self._convexify_cache:
return self._convexify_cache[key]
aff_expr = self.expr.convexify(x, degree=1)
aff_expr.b = aff_expr.b - self.val
res = AbsExpr(aff_expr)
self._convexify_cache[key] = res
return res
class LEqExpr(CompExpr):
"""
Less than or equal to expression
"""
def eval(self, x, tol=DEFAULT_TOL, negated=False):
"""
Tests whether the expression at x is less than or equal to self.val with
tolerance tol.
"""
assert tol >= 0.0
expr_val = self.expr.eval(x)
if negated:
## need the tolerance to go the other way if its negated
return not np.all(expr_val <= self.val - tol * np.ones(expr_val.shape))
else:
return np.all(expr_val <= self.val + tol * np.ones(expr_val.shape))
def convexify(self, x, degree=1):
"""
Returns a HingeExpr that is the hinge penalty expression, a measure of
constraint violation.
The constraint g(x) <= 0 becomes |g(x)|+ where |g(x)|+ = max(g(x), 0)
"""
assert degree == 1
key = self._get_key(x)
if key in self._convexify_cache:
return self._convexify_cache[key]
aff_expr = self.expr.convexify(x, degree=1)
aff_expr.b = aff_expr.b - self.val
res = HingeExpr(aff_expr)
self._convexify_cache[key] = res
return res
class LExpr(CompExpr):
"""
Less than expression
"""
def eval(self, x, tol=DEFAULT_TOL, negated=False):
"""
Tests whether the expression at x is less than or equal to self.val with
tolerance tol.
"""
assert tol >= 0.0
expr_val = self.expr.eval(x)
if negated:
## need the tolerance to go the other way if its negated
return not np.all(expr_val < self.val - tol * np.ones(expr_val.shape))
else:
return np.all(expr_val < self.val + tol * np.ones(expr_val.shape))
def convexify(self, x, degree=1):
"""
Returns a HingeExpr that is the hinge penalty expression, a measure of
constraint violation.
The constraint g(x) <= 0 becomes |g(x)|+ where |g(x)|+ = max(g(x), 0)
"""
assert degree == 1
key = self._get_key(x)
if key in self._convexify_cache:
return self._convexify_cache[key]
aff_expr = self.expr.convexify(x, degree=1)
aff_expr.b = aff_expr.b - self.val
res = HingeExpr(aff_expr)
self._convexify_cache[key] = res
return res
class BoundExpr(object):
"""
Bound expression
Bound expression is composed of an Expr and a Variable. Please note that the
variable ordering matters
"""
def __init__(self, expr, var):
self.expr = expr
self.var = var
def eval(self):
"""
Returns the current value of the bound expression
"""
return self.expr.eval(self.var.get_value())
def convexify(self, degree=1):
"""
Returns a convexified BoundExpr at the variable's current value.
"""
assert self.var.get_value() is not None
cvx_expr = self.expr.convexify(self.var.get_value(), degree)
return BoundExpr(cvx_expr, self.var)
class TFExpr(Expr):
"""
TODO
wrapper around exprs defined by a tensorflow graph. Leverages
automated differentition.
"""
def __init__(self, f, grad=None, hess=None, sess=None):
self.sess = sess
return super(TFExpr, self).__init__(f, grad, hess)
|
StarcoderdataPython
|
11294480
|
<reponame>syeomvols/SISSO<gh_stars>0
# Created by <NAME> and <NAME>, 2022.2
# Variable Selection for SISSO (SISSO-SV). Please refer to [<NAME>, <NAME>, et al., xxx] for more details.
# Usage: prepare the normal SISSO.in and train.dat in working directory, and then run the VarSelect.py with
# proper input parameters below.
# Running the code: python3 VarSelect.py (use python3 and newer versions)
###################################################### User Input ################################################
# The values below are good for usual computing power. Remember to change the 'runSISSO' for your machine.
# ----------------------------------------------------------------------------------------------------------------
n_init = 10 # initial size of the subset of input features (the S in the paper)
n_RS = 4 # number of newly selected input features by random search (the Sa in the paper)
n_max =23 # maximal size of the subset (the S in the paper)
nstep_max =100 # maximal iterations
nstep_converge = 20 # converged and stop if the model error unchanged after certain number of steps.
restart = 0 # 0: start from scratch, 1: continue the unfinished job
runSISSO = 'mpirun -np 64 SISSO.3.1 > SISSO.log' # set your mpi command to run SISSO (v3.1) !!!
##################################################################################################################
import os
import copy
import time
import random
import math
def SISSO_out_reader(SISSO_out_folder, dimension, all_features, maths_operators):
# From SISSO.out, read the errors and involved primary features in all the models
SISSO_out_file = open('%s/SISSO.out' % SISSO_out_folder, 'r').readlines()
for i in range(len(SISSO_out_file)):
SISSO_out_file[i] = SISSO_out_file[i].strip()
feature_list = []
descriptor_dict = {}
descriptor_dim = 0
for i in range(len(SISSO_out_file)):
if SISSO_out_file[i].startswith('@@@descriptor'):
descriptor_dim += 1
descriptor_list = []
for d in range(descriptor_dim):
descriptor = SISSO_out_file[i + d + 1]
for st in range(len(descriptor)):
if descriptor[st] == ':':
descriptor = descriptor[st + 1:]
break
for feature in descriptor_2_features(descriptor, all_features, maths_operators):
feature_list.append(feature)
feature_list = list(set(feature_list))
descriptor_list.append(descriptor)
descriptor_dict.update({descriptor_dim: feature_list})
total_RMSE_dict = {}
RMSE_dim = 0
for i in range(len(SISSO_out_file)):
if SISSO_out_file[i].startswith('RMSE and MaxAE:'):
RMSE_dim += 1
RMSE_str = SISSO_out_file[i].replace('RMSE and MaxAE:', '').strip()
RMSE = ''
for j in list(RMSE_str):
if j != ' ':
RMSE += j
else:
break
total_RMSE_dict.update({RMSE_dim: float(RMSE)})
return feature_list, total_RMSE_dict[dimension]
def descriptor_2_features(descriptor, all_features,maths_operators):
# Identify the primary features in a descriptor formula
import copy
brace = []
brace_position = []
for i in range(len(descriptor)):
if descriptor[i] == '(':
brace.append(0)
brace_position.append(i)
if descriptor[i] == ")":
brace.append(1)
brace_position.append(i)
features = []
while brace:
for i in range(len(brace)):
if (brace[i] == 0) and (brace[i + 1] == 1):
features.append(descriptor[brace_position[i] + 1:brace_position[i + 1]])
# if features[-1].startswith('('):
# del features[-1]
del brace[i:i + 2]
del brace_position[i:i + 2]
break
features_new = []
for feature in features:
features_new.append(feature)
for Feature in features:
maths_operator_position = []
maths_operator_length = []
for i in range(len(Feature)):
for operator in maths_operators:
op_len = len(operator)
if Feature[i:i + op_len] == operator:
maths_operator_position.append(i)
maths_operator_length.append(op_len)
break
Feature_cp = copy.copy(Feature)
count = 0
count_max = len(copy.copy(maths_operator_position))
while count < count_max:
for j in range(len(maths_operator_position)):
features_new.append(Feature_cp[:maths_operator_position[j]])
features_new.append(Feature_cp[maths_operator_position[j] + maths_operator_length[j]:])
maths_operator_length_0 = maths_operator_length[:1][0] + maths_operator_position[:1][0]
Feature_cp = Feature_cp[maths_operator_length_0:]
del maths_operator_length[:1]
del maths_operator_position[:1]
for j in range(len(maths_operator_position)):
maths_operator_position[j] = maths_operator_position[j] - maths_operator_length_0
count += 1
features_out = []
for i in features_new:
if (i not in features_out) & (i in all_features):
features_out.append(i)
return features_out
def initial_SISSO_in_2_output_parameter(initial_file_dir, all_features_list, output_parameter):
# Read information from SISSO.in
parameter_startwith_dict = {1: "dimclass=", 2: "opset=", 3: 'desc_dim='}
SISSO_in_file = open('%s/SISSO.in' % initial_file_dir, 'r').readlines()
for i in range(len(SISSO_in_file)):
SISSO_in_file[i] = SISSO_in_file[i].strip()
output_para_in_file = ''
for i in range(len(SISSO_in_file)):
if SISSO_in_file[i].startswith(parameter_startwith_dict[output_parameter]):
output_para_in_file = SISSO_in_file[i]
output_para_in_file = output_para_in_file.replace(parameter_startwith_dict[output_parameter], '')
if output_parameter == 1: # dimclass
dimclass = output_para_in_file
dimclass = dimclass.replace('(', "")
for alp in range(len(dimclass)):
if dimclass[alp] == '!':
dimclass = dimclass[:alp].strip()
break
if dimclass == ')':
return [[]]
else:
dimclass_list = []
operator = ''
for i in dimclass:
if i == '!':
break
if (i != ':' and i != ")"):
operator += i
else:
dimclass_list.append(int(operator))
operator = ''
if (dimclass_list[-1] > len(all_features_list)):
exit('Error: dimclass out of range!\n'
'Check the parameter \'dimclass\' and \'nsf\' in SISSO.in')
if len(dimclass_list) % 2 != 0:
exit('Error: wrong \'dimclass\' setting! \n'
'Check the parameter \'dimclass\' in SISSO.in')
feature_class = []
list_new = []
for i in range(len(dimclass_list)):
if i % 2 == 1:
if i != len(dimclass_list) - 1:
list_new += all_features_list[dimclass_list[i]:dimclass_list[i + 1] - 1]
else:
list_new += all_features_list[dimclass_list[i]:]
continue
feature_class.append(all_features_list[dimclass_list[i] - 1:dimclass_list[i + 1]])
# if list_new != []:
# feature_class.append(list_new)
return feature_class
elif output_parameter == 2: # opset
# print(output_para_in_file)
operators = output_para_in_file
operators = operators.replace('\'', '').replace('(', '')
# print(operators)
operators_list = []
operator = ''
for i in operators:
if i == '!':
break
if (i != ':' and i != ")"):
operator += i
else:
operators_list.append(operator)
operator = ''
return operators_list
elif output_parameter == 3:
desc_dim = output_para_in_file
desc_dim = desc_dim.replace('desc_dim=', '')
desc_dim = desc_dim[:3]
desc_dim = desc_dim.strip()
return int(desc_dim)
def initial_train_dat_2_output_parameter(train_dat_folder, output_parameter):
# Read data from train.dat
train_dat_lines = open('%s/train.dat' % train_dat_folder).readlines()
for line in range(len(train_dat_lines)):
train_dat_lines[line] = train_dat_lines[line].replace(',',' ').replace('\t',' ')
train_dat_lines[line] = train_dat_lines[line].strip()
if not train_dat_lines[line]:
train_dat_lines.remove('')
for line in range(len(train_dat_lines)):
train_dat_lines[line] = train_dat_lines[line].split()
features_name_list = train_dat_lines[0][2:]
materials_name = train_dat_lines[0][0]
property_name = train_dat_lines[0][1]
train_dat = {}
for line in range(len(train_dat_lines)):
if line == 0:
for num in range(len(train_dat_lines[line])):
train_dat.update({train_dat_lines[line][num]: []})
else:
for num in range(len(train_dat_lines[line])):
list_temp = train_dat[train_dat_lines[0][num]]
list_temp.append(train_dat_lines[line][num])
train_dat.update({train_dat_lines[0][num]: list_temp})
if output_parameter == 1:
return materials_name
elif output_parameter == 2:
return property_name
elif output_parameter == 3:
return list(features_name_list)
elif output_parameter == 4:
return train_dat
def build_SISSO_in(initial_SISSO_in_folder, new_SISSO_in_folder, new_features_class, features_list):
# Update SISSO.in for new iteration
import os
# new_features_class = [ ["f1","f2","f3"], ["f4","f5"],["f6"] ]
number_feature = len(features_list)
if new_features_class == []:
dim_class = 'dim_class=()\n'
else:
n_group = []
for i in range(len(new_features_class)):
n_group.append(len(new_features_class[i]))
dim_class_list = [1]
dim_class = 'dimclass=(1:'
for i in range(len(n_group)):
if i == 0:
dim_class_list.append(dim_class_list[0] + n_group[i] - 1)
dim_class += (str(dim_class_list[-1]) + ')')
else:
dim_class_list.append(dim_class_list[-1] + 1)
dim_class += ('(' + str(dim_class_list[-1]) + ':')
dim_class_list.append(n_group[i] - 1 + dim_class_list[-1])
dim_class += (str(dim_class_list[-1]) + ')')
dim_class += '\n'
nsf = 'nsf=%s\n' % number_feature
SISSO_in = open('%s/SISSO.in' % initial_SISSO_in_folder, 'r').readlines()
for i in range(len(SISSO_in)):
# SISSO_in[i] = SISSO_in[i].lstrip()
if SISSO_in[i].startswith('dimclass'):
SISSO_in[i] = dim_class
if SISSO_in[i].startswith('nsf'):
SISSO_in[i] = nsf
if os.path.exists(new_SISSO_in_folder):
open('%s/SISSO.in' % new_SISSO_in_folder, 'w').writelines(SISSO_in)
else:
os.mkdir(new_SISSO_in_folder)
open('%s/SISSO.in' % new_SISSO_in_folder, 'w').writelines(SISSO_in)
def features_classification(features_list, all_features_class):
# Group the primary features for creating new train.dat according to their dimensions/units
features_class = []
for i in all_features_class:
list_new = list(set(i).intersection(features_list))
features_class.append(list_new)
features_class_new = []
for i in features_class:
if i:
features_class_new.append(i)
return features_class_new
def build_train_dat(new_train_dat_folder, new_features_class, initial_train_dat, compounds_column_name,
property_column_name, features_list):
# Creat train.dat for new iterations.
import copy
dimensionless_features = copy.copy(features_list)
if new_features_class:
for i in new_features_class:
for j in i:
dimensionless_features.remove(j)
new_train_dat_lines = []
sample_num = len(initial_train_dat[property_column_name])
for tmp_0 in range(sample_num):
if tmp_0 == 0:
tmp_line = ''
for tmp in (compounds_column_name, property_column_name):
tmp_line += '%s ' % tmp
for tmp_1 in new_features_class + [dimensionless_features]:
for tmp_2 in tmp_1:
if tmp_1:
tmp_line += '%s ' % tmp_2
new_train_dat_lines.append(tmp_line + '\n')
tmp_line = ''
for tmp in (initial_train_dat[compounds_column_name][tmp_0], initial_train_dat[property_column_name][tmp_0]):
tmp_line += '%s ' % tmp
for tmp_1 in new_features_class + [dimensionless_features]:
for tmp_2 in tmp_1:
if tmp_2:
tmp_line += '%s ' % initial_train_dat[tmp_2][tmp_0]
new_train_dat_lines.append(tmp_line + '\n')
open('%s/train.dat' % new_train_dat_folder, 'w').writelines(new_train_dat_lines)
def check_done(task_folder):
# Check if the SISSO job was done successfully.
import os, time
file_list = []
for root, folders, files in os.walk(task_folder):
for j in files:
file_list.append(j)
if 'SISSO.out' in file_list:
SISSO_out_read = open('%s/SISSO.out' % task_folder, 'r').readlines()
if len(SISSO_out_read) != 0:
if SISSO_out_read[-4].startswith('Total time (second):'):
os.system('rm -rf %s/feature_space' % task_folder)
return 1
else:
return 0
else:
return 0
else:
return 0
def random_features_list(all_features, selected_features, alpha_dict, n_init):
# Update of the primary features for new train.dat
unselected_features_list = list(set(all_features) - set(selected_features))
rand_list = []
if 1 in alpha_dict.values():
for i in unselected_features_list:
rand_list.append([i, random.random() * (alpha_dict[i])])
if 1 not in alpha_dict.values():
for i in unselected_features_list:
rand_list.append([i, random.random()])
# bubble sort
for i in range(len(rand_list)):
for j in range(len(rand_list) - i - 1):
if rand_list[j][1] < rand_list[j + 1][1]:
rand_list[j], rand_list[j + 1] = rand_list[j + 1], rand_list[j]
feature_new = []
for i in rand_list[: n_init - len(selected_features)]:
feature_new.append(i[0])
return selected_features + feature_new
def update_alpha_list(alpha_dict, selected_features, features_list, alpha):
# Update the penalty factor
# features_list_dropped = list(set(features_list) - set(selected_features))
for i in features_list:
alpha_old = alpha_dict[i]
alpha_dict.update({i: alpha_old * alpha})
return alpha_dict
def read_feature_list_from_train_data(task_folder):
train_dat = open('%s/train.dat' % task_folder, 'r').readlines()
columns = train_dat[0].split()
return columns[2:], len(columns[2:])
def check_last_step():
os.system('ls -F | grep \'/$\' > .temp_file')
time.sleep(5)
dir_list = open('./.temp_file', 'r').readlines()
for i in range(len(dir_list)):
dir_list[i] = dir_list[i].strip()[:-1]
max_num = -1
for i in dir_list:
if i.isnumeric():
if max_num < int(i):
max_num = int(i)
IF_continue_step_done = 0
if max_num != -1:
file_list = os.listdir('./%s' % str(max_num))
if 'SISSO.out' in file_list:
SISSO_out_read = open('%s/SISSO.out' % str(max_num), 'r').readlines()
if len(SISSO_out_read) != 0:
if SISSO_out_read[-4].startswith('Total time (second):'):
IF_continue_step_done = 1
os.system('rm -f .temp_file')
return max_num, IF_continue_step_done
# -----------------------------
time_start = time.time()
initial_file_folder = './'
compounds_column_name = initial_train_dat_2_output_parameter(initial_file_folder, 1)
property_column_name = initial_train_dat_2_output_parameter(initial_file_folder, 2)
all_features = initial_train_dat_2_output_parameter(initial_file_folder, 3)
train_dat = initial_train_dat_2_output_parameter(initial_file_folder, 4)
initial_maths_operators = initial_SISSO_in_2_output_parameter(initial_file_folder, all_features, 2)
all_features_class = initial_SISSO_in_2_output_parameter(initial_file_folder, all_features, 1)
desc_dim = initial_SISSO_in_2_output_parameter(initial_file_folder, all_features, 3)
selected_features = []
selected_features_list = []
features_list = []
# train_features=[]
features_list_list = []
RMSE_list = []
min_RMSE_list = []
VS_results = open('./VS_results', 'a')
VS_results.write(
"iterations \t'percentage_of_visited_variables'\t'RMSE_of_this_step'\t'Lowest_RMSE_of_all_steps'\t[variables in the lowest-RMSE-model]\t[variables deselected by SISSO in this step]\t\n")
VS_results.close()
alpha_dict = {}
for i in all_features:
alpha_dict.update({i: 1})
min_RMSE_step = 0
visited_set = set()
alpha = 0
continue_step, IF_continue_step_done = check_last_step()
if continue_step >= 0:
# os.system('cp VS_results VS_results_old')
if IF_continue_step_done == 0:
os.system('rm -rf %s' % str(continue_step))
for i in range(nstep_max):
if restart :
if i < continue_step + IF_continue_step_done:
features_list, n_init = read_feature_list_from_train_data('./%s' % str(i))
selected_features, RMSE = SISSO_out_reader('./%s' % str(i), desc_dim, all_features, initial_maths_operators)
alpha_dict = update_alpha_list(alpha_dict, selected_features, features_list, alpha)
# train_features = initial_train_dat_2_output_parameter('./%s' % str(i), 3)
features_list_list.append(features_list)
RMSE_list.append(RMSE)
selected_features_list.append(selected_features)
visited_set.update(features_list)
completed_percent = float(len(visited_set)) / float(len(all_features))
if RMSE < RMSE_list[min_RMSE_step]:
min_RMSE_step = i
elif RMSE == RMSE_list[min_RMSE_step] and len(selected_features) <= len(selected_features_list[min_RMSE_step]):
min_RMSE_step = i
else:
selected_features, RMSE = SISSO_out_reader('./%s' % str(min_RMSE_step), desc_dim, all_features,
initial_maths_operators)
min_RMSE_list.append(RMSE_list[min_RMSE_step])
continue
VS_results = open('./VS_results', 'a')
VS_log = open("./VS_log", 'a')
new_folder = './%s' % str(i)
try:
os.mkdir(new_folder)
except:
print('folder %s/ already exist!\n' % str(i))
VS_log.write('==========' * 10)
VS_log.write('\niteration\t%s\n' % str(i))
alpha_dict = update_alpha_list(alpha_dict, selected_features, features_list, alpha)
features_list = random_features_list(all_features, selected_features, alpha_dict, n_init)
new_features_class = features_classification(features_list, all_features_class)
build_SISSO_in(initial_file_folder, new_folder, new_features_class, features_list)
build_train_dat(new_folder, new_features_class, train_dat, compounds_column_name, property_column_name,features_list)
os.chdir(new_folder)
os.system('%s' % runSISSO)
os.chdir('../')
time.sleep(5)
while True:
check_num = check_done(new_folder)
time.sleep(5)
if check_num == 1:
break
selected_features, RMSE = SISSO_out_reader('./%s' % str(i), desc_dim, all_features, initial_maths_operators)
features_list_list.append(features_list)
RMSE_list.append(RMSE)
selected_features_list.append(selected_features)
features_list = initial_train_dat_2_output_parameter('./%s' % str(i), 3)
visited_set.update(features_list)
completed_percent = float(len(visited_set)) / float(len(all_features))
# if len(visited_set) == len(all_features):
# alpha = 1
# for f in all_features:
# alpha_dict.update({f: 1})
if RMSE < RMSE_list[min_RMSE_step]:
min_RMSE_step = i
elif RMSE == RMSE_list[min_RMSE_step] and len(selected_features) <= len(selected_features_list[min_RMSE_step]):
min_RMSE_step = i
else:
selected_features, RMSE = SISSO_out_reader('./%s' % str(min_RMSE_step), desc_dim, all_features,
initial_maths_operators)
VS_results.write('interation %s\t%s\t%.06f\t%s\t%s\t%s\t\n' % (
str(i), format(completed_percent, '.1%'), RMSE_list[-1], RMSE_list[min_RMSE_step],
selected_features, list(set(features_list).difference(set(selected_features)))))
n_init = len(selected_features) + n_RS
if n_init > n_max:
VS_results.write('Warning: The subset size hits maximum, the n_RS for the next step is reduced to %s\n' % str(
n_RS -(n_init - n_max)))
n_init = n_max
VS_log.write('Variables in the lowest-RMSE-model is %s, at iteration %s, with the RMSE %06f\n' % (
str(len(selected_features)), str(min_RMSE_step), RMSE_list[min_RMSE_step]))
VS_log.write('Size of the next subset is %s \n' % str( n_init))
VS_log.write("Unvisited variables (1) : \n%s\n\n" % alpha_dict)
VS_log.close()
min_RMSE_list.append(RMSE_list[min_RMSE_step])
if len(RMSE_list) >= nstep_converge:
if len(list(set(min_RMSE_list[-nstep_converge:]))) == 1:
VS_results.write('Stop! \n')
VS_results.close()
break
time_end = time.time()
VS_results = open('./VS_results', 'a')
VS_results.write('%s\t%s\n' % ('Total time (second):', str(round(time_end - time_start, 2))))
VS_results.close()
|
StarcoderdataPython
|
11390561
|
import logging
import os
import sqlite3
from time import sleep
def _log_msg(msg, project_id=None):
"""Return the log message with project_id."""
if project_id is not None:
return f"Project {project_id} - {msg}"
return msg
def get_db(db_file):
db = sqlite3.connect(str(db_file), detect_types=sqlite3.PARSE_DECLTYPES)
db.row_factory = sqlite3.Row
return db
def release_all_locks(db_file):
db = get_db(db_file)
db.execute('DELETE FROM locks;')
db.close()
class SQLiteLock():
def __init__(self,
db_file,
lock_name="global",
blocking=False,
timeout=30,
polling_rate=0.4,
project_id=None):
self.db_file = db_file
self.lock_name = lock_name
self.lock_acquired = False
self.timeout = timeout
self.polling_rate = polling_rate
self.project_id = project_id
# acquire
self.acquire(
blocking=blocking, timeout=timeout, polling_rate=polling_rate)
def acquire(self, blocking=False, timeout=30, polling_rate=0.4):
if self.lock_acquired:
return
if not os.path.isfile(self.db_file):
self.init_db()
cur_timeout = 0
while True and not self.lock_acquired:
db = get_db(self.db_file)
try:
db.isolation_level = 'EXCLUSIVE'
db.execute('BEGIN EXCLUSIVE')
lock_entry = db.execute('SELECT * FROM locks WHERE name = ?',
(self.lock_name, )).fetchone()
if lock_entry is None:
db.execute('INSERT INTO locks (name) VALUES (?)',
(self.lock_name, ))
self.lock_acquired = True
logging.debug(
_log_msg(f"Acquired lock {self.lock_name}",
self.project_id))
db.commit()
except sqlite3.OperationalError as e:
logging.error(
_log_msg(f"Encountering operational error {e}",
self.project_id))
db.close()
if self.lock_acquired or not blocking:
break
cur_timeout += polling_rate
sleep(polling_rate)
def init_db(self):
db = get_db(self.db_file)
db.executescript('DROP TABLE IF EXISTS locks; '
'CREATE TABLE locks (name TEXT NOT NULL);')
db.close()
def locked(self):
return self.lock_acquired
def __enter__(self):
return self
def __exit__(self, *_, **__):
self.release()
def release(self):
if not self.locked():
return
while True:
db = get_db(self.db_file)
try:
db.execute('DELETE FROM locks WHERE name = ?',
(self.lock_name, ))
db.commit()
db.close()
break
except sqlite3.OperationalError:
pass
db.close()
sleep(0.4)
logging.debug(
_log_msg(f"Released lock {self.lock_name}", self.project_id))
self.lock_acquired = False
|
StarcoderdataPython
|
178036
|
import numpy as np
from skimage import feature
from sklearn import preprocessing
class LBP:
def __init__(self, p, r):
self.p = p
self.r = r
def getVecLength(self):
return 2**self.p
def getFeature(self, imgMat):
feat = feature.local_binary_pattern(
imgMat, self.p, self.r, method='uniform')
re, _ = np.histogram(feat, bins=range(
256), normed=True)
return re
def getFeatVecs(self, imgList, load=0):
if load == 1:
feats = np.load(r"featVectLbp.npy")
types = np.load(r"typesLbp.npy")
return (feats, types)
feats = None
# i=0
types = np.float32([]).reshape((0, 1))
for mat, type in imgList:
# print("[lbp]:"+str(i))
# i+=1
if mat is None:
continue
feat = self.getFeature(mat)
if feats is None:
feats = feat.reshape((1, -1))
else:
# print(feat.shape)
# print(feats.shape)
feats = np.append(feats, feat.reshape((1, -1)), axis=0)
types = np.append(types, np.array(type).reshape((1, 1)))
np.save(r"featVectLbp.npy", feats)
np.save(r"typesLbp.npy", types)
return (feats, types)
class HOG:
def getVecLength(self):
return 1764
def getFeature(self, imgMat):
feat = feature.hog(imgMat, orientations=9, pixels_per_cell=(
16, 16), cells_per_block=(2, 2), block_norm='L2-Hys')
feat = feat.reshape((1, -1))
feat = preprocessing.normalize(feat)
return feat
def getFeatVecs(self, imgList, load=0):
if load == 1:
feats = np.load(r"featVectHog.npy")
types = np.load(r"typesHog.npy")
return (feats, types)
feats = None
# i=0
types = np.float32([]).reshape((0, 1))
for mat, type in imgList:
# print("[hog]:"+str(i))
# i+=1
# print(mat.shape)
feat = self.getFeature(mat)
if feats is None:
feats = feat.copy()
else:
feats = np.append(feats, feat, axis=0)
types = np.append(types, np.float32([type]).reshape((1, 1)))
np.save(r"featVectHog.npy", feats)
np.save(r"typesHog.npy", types)
return (feats, types)
def extractfeature(data, tags):
print("[feature] start")
matList = []
for i in range(len(data)):
matList.append((data[i], tags[i]))
hog = HOG()
lbp = LBP(8, 1)
print("[feature]hog")
featHog, types = hog.getFeatVecs(matList, load=0)
print("[feature] lbp")
featLbp, _ = lbp.getFeatVecs(matList, load=0)
feats = np.append(featHog, featLbp, axis=1)
# feats=featHog
print("[feature] end")
return (feats, types)
|
StarcoderdataPython
|
3536612
|
<filename>pie.py
import data
import matplotlib.pyplot as plt
def pie_chart():
df, emails, timestamp = data.get_data()
# queries to filter responses by social media, labels are self-explanatory
tiktok = df[df["Social Media"] == "Tiktok"]
instagram = df[df["Social Media"] == "Instagram"]
youtube = df[df["Social Media"] == "Youtube"]
reddit = df[df["Social Media"] == "Reddit"]
twitter = df[df["Social Media"] == "Twitter"]
facebook = df[df["Social Media"] == "Facebook"]
snapchat = df[df["Social Media"] == "Snapchat"]
whatsapp = df[df["Social Media"] == "WhatsApp"]
# put all the filtered data into a list
social_medias = [
tiktok,
instagram,
youtube,
reddit,
twitter,
facebook,
snapchat,
whatsapp,
]
# create labels for each social media
labels = [
"Tiktok",
"Instagram",
"Youtube",
"Reddit",
"Twitter",
"Facebook",
"Snapchat",
"WhatsApp",
]
for i in social_medias:
i.reindex() # reindex all of the filtered data
# get number of each social media's users
data1 = [len(i.index) for i in social_medias]
fig, ax = plt.subplots()
ax.pie(data1, labels=labels, autopct="%1.1f%%", pctdistance=0.9, normalize=True)
ax.axis("equal")
fig.savefig("Piechart\piechart.jpg")
|
StarcoderdataPython
|
161121
|
from typing import Dict, List
import logging
from pydoc import locate
from attrdict import AttrDict
from flask_sqlalchemy import Model
from api.models import * # noqa
logger = logging.getLogger(__name__)
class Endpoint(object):
def __init__(
self,
name: str,
model: str,
version: str = None,
path: str = "/",
mappings: Dict[str, str] = None,
url_params: List[str] = None,
exclude: List[str] = None,
options: List[str] = None,
normalize: bool = False,
updated_column: str = "updated_at",
enabled: bool = True,
ignore_unknown: bool = True,
**kwargs,
):
self.name = name
self.version = version
self.path = path
self.mappings = AttrDict(mappings or {})
self.model_name = model
self._model = None
self.updated_column = updated_column
self.url_params = url_params
self._exclude = exclude or []
self.normalize = normalize
self.options = options or []
self.enabled = enabled
self.ignore_unknown = ignore_unknown
def __repr__(self):
return self.path
def __iter__(self):
attrs = [x for x in dir(self) if not x.startswith("_")]
for a in attrs:
yield a, getattr(self, a)
@property
def model(self) -> Model:
if self._model is None:
self._model = self.locate_model(self.model_name)
return self._model
@property
def exclude(self):
return self._exclude
@property
def known_columns(self):
return self._exclude + self.mapped_names
@property
def alias_map(self) -> Dict[str, str]:
return self.mappings.get("aliases", {})
@property
def mapped_names(self) -> List[str]:
return list(self.alias_map.keys())
@property
def mapped_aliases(self) -> List[str]:
return list(self.alias_map.values())
def locate_model(self, model_name: str) -> Model:
model: Model = None
try:
# try to import dotted model name. ex: api.models.MyModel
model = locate(model_name)
logger.debug(f"Found model: {model_name}")
except ModuleNotFoundError:
logger.debug(
f"Failed to import module '{model_name}' from project directory"
)
try:
# try to import model from global namespace
model = globals()[model_name]
logger.debug(f"Model '{model_name}' found in global namespace")
except ModuleNotFoundError:
raise ModuleNotFoundError(
f"Name '{model_name}' not found in project or global namespace"
)
except Exception as e:
raise Exception(f"Unable to locate module {model_name} -- {e}")
return model
@staticmethod
def load_from_config(
app_config: object, load_disabled: bool = False
) -> Dict[str, "Endpoint"]:
endpoints: dict = {}
try:
endpoints = app_config.endpoints # type: ignore
except AttributeError:
raise AttributeError("Config object has no attribute 'endpoints'")
loaded: Dict[str, Endpoint] = {}
for ep in endpoints.items():
try:
new = Endpoint(name=ep[0], **ep[1])
if new.enabled or load_disabled:
loaded[ep[0]] = new
# print(f"Created endpoint ({ep[0]})")
# else:
# print(f"Skipping endpoint ({ep[0]})")
except Exception as e:
logger.error(f"Failed to create endpoint ({ep[0]}) -> {e}")
return loaded
@staticmethod
def from_dict(name: str, data: dict):
return Endpoint(name, **data)
if __name__ == "__main__":
from config import get_active_config
conf = get_active_config()
endpoints = conf.endpoints
# [x for x in endpoints.items()]
Endpoint.load_from_config(conf)
# e = Endpoint("test", **{"model": "test"})
e = Endpoint(name="test", **endpoints["frac_schedules"])
e.exclude
|
StarcoderdataPython
|
3427900
|
# Licensed under MIT License - see LICENSE
"""
An N-dimensional lattice class with an identify_cluster method.
"""
import numpy as np
__all__ = ["latticeND"]
class latticeND():
"""
An N-dimensional lattice class.
"""
def __init__(self, data, level):
"""
Args:
data (`numpy.ndarray` of scalar): Data to make dendrogram tree.
level (scalar): level of this lattice.
"""
self._shape = data.shape
self._dim = len(self._shape)
self._len = np.prod(self._shape)
self._lattice = data > level
self._label = None
@property
def shape(self):
"""`numpy.ndarray` of int: Shape of the lattice."""
return self._shape
@property
def dim(self):
"""int: Dimension of the lattice."""
return self._dim
@property
def len(self):
"""int: Total number of elements in the lattice."""
return self._len
@property
def lattice(self):
"""`numpy.ndarray` of int: Area ocupied by the lattice."""
return self._lattice
@property
def label(self):
"""`numpy.ndarray` of int: Label of clusters."""
return self._label
def identify_cluster(self):
"""
Identify clusters in the lattice.
A cluster is a group of connected (neighboring) pixels.
Returns:
`numpy.ndarray` of int: Label of clusters.
"""
# for simplicity, use flattened labels
label_flat = -1 * np.ones(self._len, dtype=int)
lattice_flat = self._lattice.flatten()
# define proper labels, which change over time
max_label = self._len / 2 + 1
proper_label = np.arange(max_label)
new_label = 0
for i in range(self._len):
if not lattice_flat[i]:
continue
len_j = self._len
neighbors = -1 * np.ones(2*self._dim, dtype=int)
# find neighbors in each dimension
for j in range(self._dim):
# length of the j-th dimension
len_j = len_j // self._shape[j]
idx = i // len_j # index in block
off = i % len_j # offset in block
# 2 neighbors
if idx > 0:
neighbors[2*j] = label_flat[i-len_j]
if idx < self._shape[j]-1:
neighbors[2*j+1] = label_flat[i+len_j]
# set label to the first identified cluster
if np.max(neighbors) < 0:
label_flat[i] = new_label
new_label += 1
else:
nonzero = np.unique(proper_label[neighbors[neighbors>-1]])
label_flat[i] = np.min(nonzero)
# set connecting clusters to the same lable
same_label = np.min(nonzero)
if nonzero.shape[0] > 1:
for non in nonzero:
replace = np.where(proper_label==non)[0]
proper_label[replace] = (same_label *
np.ones(replace.shape[0], dtype=int))
# update labels (only those with lable > -1)
label_flat[lattice_flat] = proper_label[label_flat[lattice_flat]]
self._label = label_flat.reshape(self._shape)
return self._label
|
StarcoderdataPython
|
5195185
|
<gh_stars>1-10
import settings
import schedules.messages as messages
#TODO: Multiprocessing, Real Scheduling
if __name__ == "__main__":
messages.process_scheduled_messages()
|
StarcoderdataPython
|
6498509
|
from RedditPy import RedditPy
re = RedditPy("<username>", "<password>")
re.subscribe("test") # optional: Type="user", unsub=True
|
StarcoderdataPython
|
5000072
|
<reponame>YAPP-18th/ML-Team-Backend
from app.database.base_class import Base
from app.models.users import User
|
StarcoderdataPython
|
1808874
|
<filename>src/pycairo/examples/cairo_snippets/c_to_python.py
#!/usr/bin/env python
"""
translate C <snippet>.cairo to Python <snippet>.py
; -> ''
cairo_ -> cr.
'(cr, ' -> ( but not snippet_normalize (cr, width, height)
(cr) -> ()
/* -> #/*
CAIRO_ -> cairo.
"""
import sys
if len(sys.argv) != 2 or not sys.argv[1].endswith('.cairo'):
raise SystemExit('usage: c_to_python.py <file>.cairo')
filename_in = sys.argv[1]
filename_out = filename_in.replace('.cairo', '.py')
file_in = file(filename_in)
file_out = file(filename_out, 'w')
for line in file_in:
line = line.replace(';', '') # should be ';' and whitespace to EOL only -> \n
if not line.startswith('snippet_'):
line = line.replace('cairo_', 'cr.')
line = line.replace('(cr, ', '(')
line = line.replace('(cr)', '()')
line = line.replace('/*', '#/*')
line = line.replace(' ', '')
line = line.replace('CAIRO_', 'cairo.')
file_out.write (line)
|
StarcoderdataPython
|
6614755
|
import pytest
from remove_popular import solution
def test_solution_simple():
actual = solution([1], 0)
expected = []
assert actual == expected
def test_solution():
actual = solution([1, 1, 1, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 4, 2, 4], 3)
expected = [3, 5, 6, 6]
assert actual == expected
|
StarcoderdataPython
|
3457375
|
<reponame>OceanAtlas/QC_Library<filename>setup.py
# -*- mode: python; coding: utf-8 -*
# Copyright (c) <NAME>
# Licensed under the 2-clause BSD License
from __future__ import absolute_import, division, print_function
from setuptools import setup, Extension
import os
import io
with io.open('README.md', 'r', encoding='utf-8') as readme_file:
readme = readme_file.read()
setup_args = {
'name': 'QC_Library',
'author': '<NAME>/<NAME>',
'url': 'https://github.com/OceanAtlas/QC_Library',
'license': 'BSD',
'description': 'A Library of routines to despike TS data',
'long_description': readme,
'long_description_content_type': 'text/markdown',
'package_dir': {'QC_Library': 'QC_Library'},
'packages': ['QC_Library'],
'version': '0.0.1',
'include_package_data': True,
# 'setup_requires': ['numpy', 'scipy'],
'install_requires': ['numpy', 'scipy'],
'classifiers': ['Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering'],
'keywords': 'oceanography seawater carbon, time series, quality control'
}
if __name__ == '__main__':
setup(**setup_args)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.