max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
vis/viscell.py
rnbwdsh/VisRNN_ICLR_2016_Text
0
12791551
<gh_stars>0 import json import os.path as path import numpy as np from utils import * from model import * import pdb def vis_cell(test_set, int_to_char, char_to_int, config): # no trained model, train a new one if not path.exists(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str(config.n_layers) + '.pth')): raise Exception('No such a trained model! Please train a new model first!') # load a trained model char_rnn = CharRNNs(tokens=(int_to_char, char_to_int), n_hidden=config.hidden_size, model=config.model, n_layers=config.n_layers) char_rnn.load_state_dict(torch.load(path.join(config.model_dir, config.model + '_' + str(config.hidden_size) + '_' + str( config.n_layers) + '.pth'))) char_rnn.eval() #problem here!!!! # initialize hidden state hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden]) seq = [] # store all test sequences in character form cell = [] # 2d array, store all cell state values; each character corresponds to a row; each row is a c_n stop_flag = False # flag to stop counter = 0 total = 0 for x, y in get_batches(test_set, config.batch_size, config.seq_length): if stop_flag: break counter += 1 for i in range(config.seq_length): seq.extend([int_to_char[xs] for xs in x[i]]) hidden = char_rnn.init_hidden(1) # here, batch_size = 1 if torch.cuda.is_available() and config.cuda: hidden = tuple([x.cuda() for x in hidden]) # One-hot encode our data and make them Torch tensors xp = one_hot_encode(x[i], len(char_to_int.keys())) xp, y = torch.from_numpy(xp), torch.from_numpy(y) # Creating new variables for the hidden state, otherwise # we'd backprop through the entire training history if config.model == 'lstm': hidden = tuple([each.data for each in hidden]) inputs, targets = xp, y if config.cuda: inputs, targets = inputs.cuda(), targets.cuda() hidden = tuple([each.cuda() for each in hidden]) total += 1 out, hidden = char_rnn(inputs, hidden) (_, c_n) = hidden cell.append(c_n.data.cpu().squeeze().numpy()) # print progress information print('Processing [batch: %d, sequence: %3d]...' % (counter, i)) print(total) if total >= 10000: stop_flag = True if stop_flag: break # write seq and cell into a json file for visualization char_cell = {} char_cell['cell_size'] = config.hidden_size char_cell['seq'] = ''.join(seq) # allocate space for cell values for j in range(config.n_layers): char_cell['cell_layer_' + str(j + 1)] = [] total_char = len(cell) for i in range(total_char): # for each character (time step) for j in range(config.n_layers): # for each layer char_cell['cell_layer_' + str(j + 1)].append(cell[i][j].tolist()) with open(path.join(config.vis_dir, 'char_cell.json'), 'w') as json_file: json.dump(char_cell, json_file)
2.234375
2
extensions/guessing_game.py
bk62/botter.py
0
12791552
import random import asyncio from discord.ext import commands import discord import typing from base import BaseCog # https://github.com/Rapptz/discord.py/blob/v1.7.2/examples/guessing_game.py class GuessingGame(BaseCog, name="Free guessing game -- with nothing at stake."): def __init__(self, bot): self.bot = bot async def tick(self, ctx, correct): emoji = '\N{WHITE HEAVY CHECK MARK}' if correct else '\N{CROSS MARK}' try: await ctx.message.add_reaction(emoji) except discord.HTTPException: pass @commands.command( name='free_guess_now', help='Guess a random number from 1-9', ) async def free_guess_now(self, ctx, num: int): answer = random.randint(1, 9) correct = num == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True) @commands.command( name='free_guess', help='Guess a random number between 1-99 or a provided range.' ) async def free_guess(self, ctx, start: typing.Optional[int] = 1, end: typing.Optional[int]= 99): await ctx.send(f'Guess a number between {start}-{end}') def is_correct(m): return m.author == ctx.message.author and m.content.isdigit() answer = random.randint(start, end) try: guess = await self.bot.wait_for('message', check=is_correct, timeout=5.0) except asyncio.TimeoutError: return await ctx.reply(f'Sorry, you took too long. The answer is {answer}') correct = int(guess.content) == answer await self.tick(ctx, correct) await ctx.reply('Correct!' if correct else f'Incorrect. The answer is {answer}', mention_author=True) def setup(bot): bot.add_cog(GuessingGame(bot))
3.09375
3
tests/test_py33_exceptions.py
haypo/trollius
175
12791553
# -*- coding: utf-8 -*- """ Tests for py33_exceptions. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest from trollius import py33_exceptions class TestWrapErrors(unittest.TestCase): def test_ebadf_wrapped_to_OSError(self): # https://github.com/jamadden/trollius/issues/17 import socket import os import errno s = socket.socket() os.close(s.fileno()) with self.assertRaises(socket.error) as exc: s.send(b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) with self.assertRaises(OSError) as exc: py33_exceptions.wrap_error(s.send, b'abc') self.assertEqual(exc.exception.errno, errno.EBADF) if __name__ == '__main__': unittest.main()
2.5625
3
emotion/emoji_emotion.py
Datalab-AUTH/MSc---Lampridis---MANIFEST
3
12791554
<gh_stars>1-10 # anger, fear, joy, sadness emoji_emotions = { ":person_surfing:": [0.12, 0.195, 0.08800000000000001, 0.222], ":locked:": [0.146, 0.141, 0.196, 0.212], ":hammer:": [0.33299999999999996, 0.42700000000000005, 0.221, 0.18600000000000003], ":confused_face:": [0.331, 0.34299999999999997, 0.105, 0.34], ":fast-forward_button:": [0.327, 0.322, 0.17, 0.265], ":office_building:": [0.18100000000000002, 0.359, 0.22, 0.19], ":radio:": [0.187, 0.222, 0.316, 0.361], ":guitar:": [0.14400000000000002, 0.125, 0.257, 0.304], ":pig_face:": [0.179, 0.214, 0.165, 0.337], ":hamster_face:": [0.215, 0.196, 0.305, 0.19399999999999998], ":police_officer:": [0.34, 0.493, 0.161, 0.27], ":green_heart:": [0.126, 0.159, 0.373, 0.19], ":input_latin_letters:": [0.134, 0.126, 0.166, 0.121], ":weary_face:": [0.23600000000000002, 0.27399999999999997, 0.18600000000000003, 0.23399999999999999], ":recycling_symbol:": [0.261, 0.271, 0.33399999999999996, 0.152], ":full_moon:": [0.17600000000000002, 0.284, 0.312, 0.20800000000000002], ":jack-o-lantern:": [0.129, 0.327, 0.09, 0.092], ":wind_chime:": [0.214, 0.17600000000000002, 0.271, 0.166], ":open_hands:": [0.203, 0.18899999999999997, 0.16699999999999998, 0.23], ":flexed_biceps:": [0.225, 0.251, 0.231, 0.204], ":down_arrow:": [0.33899999999999997, 0.268, 0.142, 0.252], ":snowboarder:": [0.13699999999999998, 0.132, 0.028999999999999998, 0.20600000000000002], ":collision:": [0.16899999999999998, 0.16399999999999998, 0.048, 0.2], ":locked_with_pen:": [0.168, 0.138, 0.19899999999999998, 0.12300000000000001], ":tired_face:": [0.264, 0.376, 0.155, 0.303], ":red_apple:": [0.251, 0.182, 0.195, 0.121], ":pistol:": [0.259, 0.38799999999999996, 0.081, 0.128], ":Japanese_secret_button:": [0.19699999999999998, 0.2, 0.221, 0.24], ":ATM_sign:": [0.128, 0.179, 0.135, 0.171], ":radio_button:": [0.218, 0.209, 0.158, 0.261], ":clipboard:": [0.157, 0.233, 0.331, 0.21100000000000002], ":persevering_face:": [0.327, 0.516, 0.175, 0.41600000000000004], ":down-left_arrow:": [0.13699999999999998, 0.171, 0.151, 0.12], ":dango:": [0.27899999999999997, 0.193, 0.139, 0.054000000000000006], ":doughnut:": [0.152, 0.259, 0.136, 0.15], ":fire:": [0.306, 0.225, 0.10300000000000001, 0.179], ":oden:": [0.12300000000000001, 0.077, 0.069, 0.166], ":angry_face_with_horns:": [0.385, 0.257, 0.03, 0.21100000000000002], ":kissing_face_with_smiling_eyes:": [0.203, 0.126, 0.256, 0.138], ":woman’s_hat:": [0.175, 0.17, 0.281, 0.151], ":ON!_arrow:": [0.126, 0.139, 0.068, 0.21100000000000002], ":cooked_rice:": [0.203, 0.126, 0.222, 0.289], ":saxophone:": [0.107, 0.16, 0.244, 0.21600000000000003], ":raising_hands:": [0.122, 0.10099999999999999, 0.254, 0.23600000000000002], ":up_arrow:": [0.382, 0.293, 0.21899999999999997, 0.284], ":teacup_without_handle:": [0.156, 0.237, 0.429, 0.07], ":page_with_curl:": [0.201, 0.294, 0.282, 0.27], ":BACK_arrow:": [0.075, 0.166, 0.062, 0.20199999999999999], ":winking_face_with_tongue:": [0.126, 0.059000000000000004, 0.139, 0.129], ":Aries:": [0.214, 0.212, 0.284, 0.196], ":meat_on_bone:": [0.177, 0.218, 0.213, 0.106], ":round_pushpin:": [0.16399999999999998, 0.26899999999999996, 0.14800000000000002, 0.29], ":television:": [0.322, 0.247, 0.22699999999999998, 0.222], ":face_blowing_a_kiss:": [0.233, 0.022000000000000002, 0.215, 0.14400000000000002], ":information:": [0.17800000000000002, 0.259, 0.264, 0.284], ":flower_playing_cards:": [0.18100000000000002, 0.21100000000000002, 0.067, 0.134], ":growing_heart:": [0.151, 0.067, 0.348, 0.13], ":smiling_face_with_heart-eyes:": [0.307, 0.18, 0.308, 0.13699999999999998], ":kissing_face:": [0.215, 0.171, 0.159, 0.272], ":glowing_star:": [0.191, 0.215, 0.38, 0.134], ":person_swimming:": [0.175, 0.159, 0.086, 0.245], ":ogre:": [0.37, 0.419, 0.109, 0.257], ":chart_increasing:": [0.22399999999999998, 0.259, 0.42700000000000005, 0.215], ":pouting_face:": [0.46799999999999997, 0.36200000000000004, 0.07400000000000001, 0.401], ":fish_cake_with_swirl:": [0.10800000000000001, 0.21600000000000003, 0.355, 0.149], ":cookie:": [0.11699999999999999, 0.18, 0.168, 0.1], ":running_shirt:": [0.138, 0.081, 0.20199999999999999, 0.203], ":heart_decoration:": [0.13699999999999998, 0.046, 0.315, 0.141], ":scroll:": [0.254, 0.267, 0.276, 0.235], ":TOP_arrow:": [0.162, 0.185, 0.205, 0.191], ":fearful_face:": [0.344, 0.389, 0.08800000000000001, 0.332], ":house:": [0.13699999999999998, 0.27399999999999997, 0.18600000000000003, 0.235], ":peach:": [0.344, 0.204, 0.128, 0.11900000000000001], ":roller_coaster:": [0.065, 0.133, 0.111, 0.18899999999999997], ":trumpet:": [0.128, 0.17800000000000002, 0.20600000000000002, 0.221], ":mouth:": [0.245, 0.136, 0.321, 0.121], ":frog_face:": [0.408, 0.29100000000000004, 0.19699999999999998, 0.16699999999999998], ":flashlight:": [0.07400000000000001, 0.19699999999999998, 0.14300000000000002, 0.131], ":downcast_face_with_sweat:": [0.321, 0.496, 0.17300000000000001, 0.447], ":custard:": [0.16399999999999998, 0.17600000000000002, 0.158, 0.131], ":cocktail_glass:": [0.032, 0.14300000000000002, 0.146, 0.046], ":Japanese_dolls:": [0.053, 0.14, 0.07, 0.08], ":chart_decreasing:": [0.28800000000000003, 0.396, 0.294, 0.38299999999999995], ":upwards_button:": [0.264, 0.261, 0.23800000000000002, 0.295], ":yellow_heart:": [0.158, 0.177, 0.27, 0.262], ":Gemini:": [0.228, 0.132, 0.262, 0.177], ":hibiscus:": [0.085, 0.218, 0.316, 0.151], ":notebook_with_decorative_cover:": [0.139, 0.15, 0.278, 0.185], ":mahjong_red_dragon:": [0.171, 0.263, 0.128, 0.212], ":sushi:": [0.134, 0.196, 0.13699999999999998, 0.214], ":two-hump_camel:": [0.151, 0.263, 0.131, 0.154], ":white_flower:": [0.187, 0.141, 0.19, 0.14400000000000002], ":weary_cat_face:": [0.251, 0.27, 0.095, 0.242], ":clinking_beer_mugs:": [0.096, 0.10099999999999999, 0.179, 0.132], ":smiling_face_with_sunglasses:": [0.036000000000000004, 0.092, 0.026000000000000002, 0.09300000000000001], ":white_small_square:": [0.276, 0.22699999999999998, 0.125, 0.161], ":heavy_large_circle:": [0.154, 0.17800000000000002, 0.122, 0.315], ":cityscape_at_dusk:": [0.053, 0.24, 0.259, 0.23399999999999999], ":steaming_bowl:": [0.183, 0.129, 0.16699999999999998, 0.226], ":factory:": [0.205, 0.306, 0.24600000000000002, 0.21], ":disappointed_face:": [0.318, 0.467, 0.131, 0.39399999999999996], ":fireworks:": [0.051, 0.165, 0.191, 0.165], ":tongue:": [0.316, 0.062, 0.136, 0.133], ":videocassette:": [0.213, 0.25, 0.312, 0.20800000000000002], ":eight_o’clock:": [0.11800000000000001, 0.341, 0.222, 0.24600000000000002], ":night_with_stars:": [0.09, 0.174, 0.298, 0.289], ":tulip:": [0.175, 0.245, 0.37, 0.188], ":snake:": [0.37, 0.35200000000000004, 0.16899999999999998, 0.166], ":floppy_disk:": [0.168, 0.324, 0.341, 0.308], ":orange_book:": [0.18100000000000002, 0.193, 0.18600000000000003, 0.217], ":Japanese_castle:": [0.092, 0.23199999999999998, 0.16399999999999998, 0.149], ":chestnut:": [0.212, 0.16699999999999998, 0.16899999999999998, 0.078], ":curry_rice:": [0.161, 0.172, 0.175, 0.145], ":school_backpack:": [0.127, 0.154, 0.174, 0.094], ":diamond_with_a_dot:": [0.222, 0.179, 0.32, 0.249], ":antenna_bars:": [0.16399999999999998, 0.122, 0.151, 0.132], ":pouting_cat_face:": [0.45399999999999996, 0.268, 0.11900000000000001, 0.295], ":index_pointing_up:": [0.254, 0.233, 0.49200000000000005, 0.36], ":chart_increasing_with_yen:": [0.175, 0.248, 0.305, 0.20800000000000002], ":satellite_antenna:": [0.204, 0.259, 0.303, 0.27], ":mobile_phone:": [0.127, 0.26899999999999996, 0.172, 0.309], ":white_medium-small_square:": [0.305, 0.22699999999999998, 0.126, 0.187], ":white_large_square:": [0.348, 0.19399999999999998, 0.155, 0.22899999999999998], ":sparkler:": [0.10300000000000001, 0.209, 0.221, 0.20600000000000002], ":fish:": [0.131, 0.16699999999999998, 0.147, 0.102], ":person_wearing_turban:": [0.212, 0.293, 0.302, 0.239], ":crystal_ball:": [0.16899999999999998, 0.22, 0.354, 0.196], ":moon_viewing_ceremony:": [0.149, 0.14300000000000002, 0.43700000000000006, 0.231], ":tropical_fish:": [0.063, 0.271, 0.14, 0.122], ":paw_prints:": [0.266, 0.249, 0.129, 0.155], ":running_shoe:": [0.23199999999999998, 0.094, 0.08900000000000001, 0.185], ":sad_but_relieved_face:": [0.3, 0.474, 0.145, 0.391], ":Christmas_tree:": [0.13699999999999998, 0.17, 0.285, 0.081], ":chicken:": [0.16899999999999998, 0.192, 0.218, 0.127], ":sparkling_heart:": [0.217, 0.068, 0.42200000000000004, 0.163], ":heart_with_arrow:": [0.22, 0.07400000000000001, 0.373, 0.10099999999999999], ":dizzy_face:": [0.34700000000000003, 0.45799999999999996, 0.12300000000000001, 0.361], ":footprints:": [0.21, 0.21, 0.163, 0.179], ":postbox:": [0.26899999999999996, 0.171, 0.21899999999999997, 0.175], ":one_o’clock:": [0.14400000000000002, 0.341, 0.209, 0.198], ":kissing_cat_face:": [0.18899999999999997, 0.11900000000000001, 0.215, 0.21], ":backhand_index_pointing_down:": [0.39299999999999996, 0.196, 0.317, 0.28600000000000003], ":sailboat:": [0.10400000000000001, 0.225, 0.142, 0.205], ":horse_face:": [0.254, 0.16399999999999998, 0.078, 0.159], ":left_arrow_curving_right:": [0.138, 0.275, 0.228, 0.22899999999999998], ":palm_tree:": [0.035, 0.34299999999999997, 0.129, 0.23800000000000002], ":honeybee:": [0.381, 0.285, 0.128, 0.111], ":rabbit_face:": [0.165, 0.222, 0.217, 0.037000000000000005], ":pensive_face:": [0.261, 0.40399999999999997, 0.145, 0.313], ":anchor:": [0.22, 0.179, 0.245, 0.243], ":ice_cream:": [0.228, 0.18899999999999997, 0.23199999999999998, 0.114], ":bento_box:": [0.136, 0.16, 0.159, 0.212], ":woman’s_clothes:": [0.20800000000000002, 0.154, 0.179, 0.242], ":goblin:": [0.42, 0.35, 0.149, 0.301], ":person_getting_haircut:": [0.237, 0.215, 0.266, 0.153], ":Cancer:": [0.209, 0.21899999999999997, 0.201, 0.255], ":expressionless_face:": [0.415, 0.308, 0.11, 0.319], ":person_raising_hand:": [0.068, 0.084, 0.08, 0.156], ":sweat_droplets:": [0.26, 0.11900000000000001, 0.081, 0.16899999999999998], ":cherries:": [0.171, 0.139, 0.155, 0.087], ":electric_plug:": [0.124, 0.14, 0.078, 0.139], ":cloud:": [0.18, 0.231, 0.266, 0.295], ":watch:": [0.183, 0.276, 0.172, 0.235], ":church:": [0.20800000000000002, 0.276, 0.773, 0.366], ":cyclone:": [0.16899999999999998, 0.28800000000000003, 0.177, 0.214], ":black_large_square:": [0.396, 0.159, 0.222, 0.263], ":first_quarter_moon:": [0.24100000000000002, 0.233, 0.265, 0.284], ":eyes:": [0.272, 0.218, 0.049, 0.063], ":mobile_phone_with_arrow:": [0.098, 0.142, 0.156, 0.20600000000000002], ":black_small_square:": [0.319, 0.249, 0.141, 0.22699999999999998], ":spade_suit:": [0.24600000000000002, 0.213, 0.187, 0.27899999999999997], ":performing_arts:": [0.159, 0.10800000000000001, 0.204, 0.162], ":baby_chick:": [0.156, 0.23800000000000002, 0.125, 0.057], ":snail:": [0.162, 0.239, 0.19899999999999998, 0.17], ":cat_face:": [0.147, 0.185, 0.19699999999999998, 0.16699999999999998], ":panda_face:": [0.069, 0.23199999999999998, 0.091, 0.153], ":four_o’clock:": [0.165, 0.361, 0.171, 0.282], ":jeans:": [0.2, 0.109, 0.134, 0.209], ":blossom:": [0.20199999999999999, 0.299, 0.314, 0.242], ":fishing_pole:": [0.174, 0.14800000000000002, 0.075, 0.128], ":triangular_ruler:": [0.198, 0.201, 0.284, 0.168], ":three_o’clock:": [0.16699999999999998, 0.369, 0.209, 0.282], ":sunflower:": [0.203, 0.243, 0.354, 0.212], ":lady_beetle:": [0.228, 0.22, 0.20800000000000002, 0.153], ":hatching_chick:": [0.099, 0.171, 0.16, 0.125], ":heavy_dollar_sign:": [0.203, 0.149, 0.113, 0.228], ":Taurus:": [0.22, 0.2, 0.257, 0.253], ":right_arrow_curving_down:": [0.257, 0.276, 0.287, 0.245], ":roasted_sweet_potato:": [0.191, 0.21899999999999997, 0.25, 0.121], ":crossed_flags:": [0.114, 0.048, 0.039, 0.207], ":input_latin_uppercase:": [0.182, 0.175, 0.161, 0.182], ":kitchen_knife:": [0.321, 0.449, 0.075, 0.125], ":straight_ruler:": [0.249, 0.20600000000000002, 0.215, 0.155], ":squinting_face_with_tongue:": [0.083, 0.14, 0.027000000000000003, 0.14300000000000002], ":books:": [0.16699999999999998, 0.157, 0.35100000000000003, 0.141], ":milky_way:": [0.16699999999999998, 0.201, 0.43700000000000006, 0.22], ":ticket:": [0.10800000000000001, 0.08199999999999999, 0.10099999999999999, 0.327], ":vibration_mode:": [0.075, 0.17600000000000002, 0.083, 0.134], ":person_gesturing_OK:": [0.155, 0.142, 0.141, 0.23], ":volcano:": [0.207, 0.247, 0.141, 0.22], ":department_store:": [0.081, 0.231, 0.19899999999999998, 0.18], ":man_with_Chinese_cap:": [0.255, 0.262, 0.126, 0.17600000000000002], ":kiss:": [0.188, 0.122, 0.358, 0.22699999999999998], ":closed_umbrella:": [0.136, 0.20199999999999999, 0.201, 0.295], ":waving_hand:": [0.256, 0.252, 0.146, 0.19899999999999998], ":rice_cracker:": [0.24100000000000002, 0.156, 0.111, 0.153], ":speak-no-evil_monkey:": [0.214, 0.2, 0.081, 0.147], ":hot_springs:": [0.21, 0.228, 0.128, 0.17300000000000001], ":tent:": [0.105, 0.18899999999999997, 0.247, 0.151], ":pineapple:": [0.11699999999999999, 0.19399999999999998, 0.133, 0.12300000000000001], ":construction_worker:": [0.204, 0.298, 0.24100000000000002, 0.19699999999999998], ":person_bowing:": [0.256, 0.331, 0.262, 0.299], ":tennis:": [0.174, 0.198, 0.174, 0.327], ":sleeping_face:": [0.266, 0.23399999999999999, 0.33, 0.255], ":red_paper_lantern:": [0.111, 0.235, 0.225, 0.163], ":ribbon:": [0.20199999999999999, 0.203, 0.345, 0.193], ":link:": [0.258, 0.217, 0.179, 0.262], ":grinning_face_with_smiling_eyes:": [0.184, 0.19699999999999998, 0.188, 0.149], ":folded_hands:": [0.187, 0.17800000000000002, 0.485, 0.35100000000000003], ":musical_note:": [0.26, 0.191, 0.341, 0.32799999999999996], ":monkey:": [0.179, 0.379, 0.083, 0.032], ":mouse_face:": [0.18899999999999997, 0.20800000000000002, 0.136, 0.094], ":person_getting_massage:": [0.264, 0.23199999999999998, 0.258, 0.282], ":ballot_box_with_check:": [0.305, 0.295, 0.20600000000000002, 0.251], ":four_leaf_clover:": [0.17, 0.16, 0.324, 0.156], ":wrapped_gift:": [0.076, 0.188, 0.326, 0.057999999999999996], ":face_without_mouth:": [0.34, 0.335, 0.15, 0.359], ":blue_heart:": [0.14800000000000002, 0.064, 0.28600000000000003, 0.281], ":Japanese_symbol_for_beginner:": [0.222, 0.121, 0.237, 0.28], ":moai:": [0.131, 0.153, 0.11800000000000001, 0.095], ":wolf_face:": [0.185, 0.289, 0.083, 0.172], ":laptop_computer:": [0.127, 0.23399999999999999, 0.35, 0.255], ":mushroom:": [0.188, 0.239, 0.21, 0.084], ":grinning_face_with_big_eyes:": [0.19399999999999998, 0.177, 0.21600000000000003, 0.17], ":diamond_suit:": [0.305, 0.17800000000000002, 0.226, 0.213], ":high-heeled_shoe:": [0.278, 0.11900000000000001, 0.23199999999999998, 0.195], ":input_symbols:": [0.10800000000000001, 0.195, 0.138, 0.17], ":tanabata_tree:": [0.16399999999999998, 0.239, 0.28800000000000003, 0.122], ":currency_exchange:": [0.159, 0.20800000000000002, 0.127, 0.226], ":house_with_garden:": [0.115, 0.24, 0.268, 0.153], ":spiral_shell:": [0.106, 0.301, 0.316, 0.174], ":backhand_index_pointing_right:": [0.19699999999999998, 0.258, 0.249, 0.258], ":handbag:": [0.099, 0.154, 0.223, 0.293], ":Libra:": [0.14400000000000002, 0.193, 0.275, 0.161], ":watermelon:": [0.152, 0.14300000000000002, 0.133, 0.071], ":glasses:": [0.157, 0.17800000000000002, 0.12300000000000001, 0.149], ":face_with_medical_mask:": [0.436, 0.34600000000000003, 0.159, 0.406], ":telephone:": [0.257, 0.204, 0.221, 0.267], ":trophy:": [0.131, 0.19399999999999998, 0.10099999999999999, 0.27399999999999997], ":american_football:": [0.185, 0.21, 0.165, 0.354], ":bank:": [0.23600000000000002, 0.284, 0.23800000000000002, 0.233], ":baby_angel:": [0.20600000000000002, 0.19699999999999998, 0.414, 0.371], ":bar_chart:": [0.213, 0.255, 0.41, 0.228], ":locked_with_key:": [0.20600000000000002, 0.095, 0.28, 0.16], ":ten_o’clock:": [0.141, 0.304, 0.191, 0.309], ":red_triangle_pointed_up:": [0.321, 0.243, 0.25, 0.214], ":grinning_face_with_sweat:": [0.19, 0.307, 0.23199999999999998, 0.27699999999999997], ":loudly_crying_face:": [0.24600000000000002, 0.276, 0.198, 0.272], ":hamburger:": [0.177, 0.122, 0.18600000000000003, 0.113], ":necktie:": [0.20199999999999999, 0.094, 0.11199999999999999, 0.147], ":card_index:": [0.147, 0.196, 0.249, 0.212], ":red_triangle_pointed_down:": [0.304, 0.242, 0.207, 0.185], ":pine_decoration:": [0.115, 0.271, 0.336, 0.17], ":grinning_cat_face:": [0.149, 0.192, 0.145, 0.25], ":hourglass_not_done:": [0.19699999999999998, 0.31, 0.266, 0.25], ":sun_behind_cloud:": [0.11199999999999999, 0.27899999999999997, 0.345, 0.252], ":balloon:": [0.042, 0.128, 0.102, 0.077], ":family:": [0.249, 0.132, 0.418, 0.215], ":exclamation_question_mark:": [0.188, 0.248, 0.085, 0.21899999999999997], ":poultry_leg:": [0.121, 0.183, 0.215, 0.122], ":sunset:": [0.065, 0.19899999999999998, 0.28600000000000003, 0.201], ":carp_streamer:": [0.125, 0.212, 0.131, 0.095], ":smiling_face_with_smiling_eyes:": [0.067, 0.073, 0.248, 0.247], ":mount_fuji:": [0.196, 0.225, 0.253, 0.172], ":play_button:": [0.168, 0.284, 0.17, 0.17800000000000002], ":high_voltage:": [0.252, 0.244, 0.147, 0.228], ":banana:": [0.21899999999999997, 0.29600000000000004, 0.184, 0.086], ":thumbs_down:": [0.442, 0.465, 0.13699999999999998, 0.47100000000000003], ":person_tipping_hand:": [0.361, 0.099, 0.19699999999999998, 0.11199999999999999], ":down-right_arrow:": [0.23, 0.242, 0.10300000000000001, 0.175], ":wedding:": [0.092, 0.139, 0.631, 0.252], ":money_bag:": [0.185, 0.17300000000000001, 0.14300000000000002, 0.177], ":ledger:": [0.115, 0.17, 0.256, 0.182], ":shooting_star:": [0.17600000000000002, 0.16, 0.377, 0.2], ":seedling:": [0.223, 0.289, 0.503, 0.16899999999999998], ":snowman_without_snow:": [0.11900000000000001, 0.203, 0.128, 0.278], ":OK_hand:": [0.153, 0.21, 0.20600000000000002, 0.16], ":man_and_woman_holding_hands:": [0.075, 0.096, 0.266, 0.131], ":part_alternation_mark:": [0.203, 0.12300000000000001, 0.201, 0.305], ":magnifying_glass_tilted_right:": [0.177, 0.253, 0.244, 0.12300000000000001], ":red_circle:": [0.244, 0.22, 0.11199999999999999, 0.27899999999999997], ":eggplant:": [0.353, 0.23399999999999999, 0.17800000000000002, 0.165], ":shaved_ice:": [0.213, 0.168, 0.152, 0.096], ":video_game:": [0.138, 0.20199999999999999, 0.145, 0.25], ":speech_balloon:": [0.233, 0.302, 0.22699999999999998, 0.214], ":alien:": [0.15, 0.231, 0.155, 0.152], ":name_badge:": [0.26899999999999996, 0.25, 0.147, 0.201], ":sheaf_of_rice:": [0.188, 0.259, 0.38299999999999995, 0.215], ":graduation_cap:": [0.162, 0.10300000000000001, 0.392, 0.126], ":inbox_tray:": [0.205, 0.126, 0.14, 0.213], ":confounded_face:": [0.392, 0.531, 0.172, 0.433], ":loudspeaker:": [0.271, 0.19899999999999998, 0.15, 0.21600000000000003], ":convenience_store:": [0.191, 0.17800000000000002, 0.17600000000000002, 0.17600000000000002], ":bird:": [0.22, 0.243, 0.213, 0.142], ":clutch_bag:": [0.12300000000000001, 0.17, 0.253, 0.31], ":hundred_points:": [0.254, 0.147, 0.145, 0.12300000000000001], ":tear-off_calendar:": [0.139, 0.267, 0.095, 0.299], ":closed_mailbox_with_raised_flag:": [0.191, 0.10800000000000001, 0.09699999999999999, 0.136], ":sun:": [0.11, 0.251, 0.267, 0.18], ":rose:": [0.129, 0.161, 0.33399999999999996, 0.19899999999999998], ":bread:": [0.142, 0.205, 0.18100000000000002, 0.156], ":hotel:": [0.075, 0.24600000000000002, 0.196, 0.184], ":lipstick:": [0.276, 0.168, 0.502, 0.141], ":smiling_face_with_halo:": [0.10800000000000001, 0.092, 0.28, 0.12300000000000001], ":smirking_face:": [0.258, 0.040999999999999995, 0.096, 0.146], ":face_screaming_in_fear:": [0.292, 0.263, 0.133, 0.21], ":person_gesturing_NO:": [0.28800000000000003, 0.28800000000000003, 0.11, 0.29600000000000004], ":fountain:": [0.10099999999999999, 0.22899999999999998, 0.276, 0.292], ":seat:": [0.155, 0.24, 0.067, 0.13699999999999998], ":reverse_button:": [0.256, 0.262, 0.114, 0.29600000000000004], ":heart_suit:": [0.165, 0.12300000000000001, 0.336, 0.193], ":trident_emblem:": [0.228, 0.14400000000000002, 0.21899999999999997, 0.257], ":five_o’clock:": [0.126, 0.335, 0.21, 0.264], ":unamused_face:": [0.418, 0.297, 0.11900000000000001, 0.33299999999999996], ":bouquet:": [0.09, 0.251, 0.326, 0.18100000000000002], ":page_facing_up:": [0.196, 0.31, 0.3, 0.29], ":notebook:": [0.128, 0.14400000000000002, 0.281, 0.174], ":black_square_button:": [0.361, 0.212, 0.235, 0.228], ":winking_face:": [0.098, 0.053, 0.129, 0.171], ":light_bulb:": [0.237, 0.19899999999999998, 0.306, 0.225], ":computer_disk:": [0.19399999999999998, 0.187, 0.32799999999999996, 0.22899999999999998], ":face_with_tongue:": [0.242, 0.19, 0.142, 0.14], ":hospital:": [0.128, 0.376, 0.305, 0.184], ":zzz:": [0.142, 0.213, 0.41100000000000003, 0.289], ":wrench:": [0.25, 0.313, 0.337, 0.13699999999999998], ":hear-no-evil_monkey:": [0.303, 0.27699999999999997, 0.094, 0.28300000000000003], ":circus_tent:": [0.113, 0.196, 0.111, 0.204], ":monkey_face:": [0.19399999999999998, 0.327, 0.079, 0.061], ":bookmark:": [0.257, 0.174, 0.182, 0.289], ":cat_face_with_wry_smile:": [0.25, 0.083, 0.078, 0.121], ":tomato:": [0.284, 0.22, 0.294, 0.23600000000000002], ":blue_book:": [0.156, 0.191, 0.149, 0.193], ":headphone:": [0.16699999999999998, 0.14800000000000002, 0.266, 0.316], ":crown:": [0.33299999999999996, 0.11800000000000001, 0.268, 0.127], ":dizzy:": [0.233, 0.147, 0.359, 0.134], ":six_o’clock:": [0.14300000000000002, 0.39299999999999996, 0.16899999999999998, 0.326], ":astonished_face:": [0.348, 0.41100000000000003, 0.138, 0.327], ":grinning_squinting_face:": [0.165, 0.21600000000000003, 0.11900000000000001, 0.188], ":white_circle:": [0.22, 0.16399999999999998, 0.121, 0.217], ":old_woman:": [0.235, 0.299, 0.326, 0.27899999999999997], ":optical_disk:": [0.22, 0.165, 0.332, 0.261], ":magnifying_glass_tilted_left:": [0.222, 0.276, 0.203, 0.131], ":Sagittarius:": [0.17, 0.217, 0.21, 0.22], ":fuel_pump:": [0.375, 0.161, 0.138, 0.185], ":ear_of_corn:": [0.141, 0.156, 0.182, 0.16699999999999998], ":pot_of_food:": [0.18, 0.149, 0.177, 0.193], ":two_o’clock:": [0.122, 0.35, 0.191, 0.298], ":Pisces:": [0.16899999999999998, 0.17600000000000002, 0.233, 0.239], ":soccer_ball:": [0.147, 0.332, 0.115, 0.41100000000000003], ":Santa_Claus:": [0.131, 0.226, 0.254, 0.166], ":fast_reverse_button:": [0.301, 0.233, 0.18899999999999997, 0.282], ":violin:": [0.17600000000000002, 0.139, 0.298, 0.22399999999999998], ":beating_heart:": [0.171, 0.078, 0.32299999999999995, 0.157], ":grinning_face:": [0.163, 0.17300000000000001, 0.171, 0.18600000000000003], ":girl:": [0.22699999999999998, 0.16, 0.214, 0.146], ":pushpin:": [0.299, 0.263, 0.136, 0.177], ":anguished_face:": [0.309, 0.485, 0.14, 0.369], ":flushed_face:": [0.281, 0.263, 0.102, 0.231], ":person_frowning:": [0.34600000000000003, 0.374, 0.145, 0.42100000000000004], ":smiling_face:": [0.095, 0.13, 0.245, 0.17600000000000002], ":skis:": [0.10300000000000001, 0.077, 0.051, 0.192], ":clapping_hands:": [0.21899999999999997, 0.256, 0.18899999999999997, 0.214], ":kiss_mark:": [0.272, 0.10800000000000001, 0.273, 0.16699999999999998], ":large_orange_diamond:": [0.33, 0.21100000000000002, 0.223, 0.335], ":hushed_face:": [0.314, 0.355, 0.13699999999999998, 0.353], ":umbrella_with_rain_drops:": [0.184, 0.242, 0.254, 0.37], ":herb:": [0.152, 0.282, 0.509, 0.138], ":guard:": [0.19, 0.23, 0.081, 0.17600000000000002], ":love_hotel:": [0.040999999999999995, 0.141, 0.22899999999999998, 0.155], ":alien_monster:": [0.128, 0.228, 0.087, 0.19699999999999998], ":file_folder:": [0.151, 0.217, 0.158, 0.205], ":megaphone:": [0.239, 0.214, 0.16699999999999998, 0.22], ":bug:": [0.268, 0.27, 0.174, 0.102], ":blowfish:": [0.21, 0.214, 0.155, 0.138], ":bear_face:": [0.205, 0.256, 0.129, 0.196], ":keycap_10:": [0.217, 0.109, 0.086, 0.17300000000000001], ":kissing_face_with_closed_eyes:": [0.179, 0.08, 0.217, 0.168], ":front-facing_baby_chick:": [0.135, 0.147, 0.152, 0.151], ":barber_pole:": [0.135, 0.163, 0.174, 0.18], ":backhand_index_pointing_left:": [0.19899999999999998, 0.262, 0.226, 0.251], ":input_numbers:": [0.174, 0.18600000000000003, 0.21899999999999997, 0.139], ":worried_face:": [0.349, 0.397, 0.09699999999999999, 0.348], ":foggy:": [0.162, 0.301, 0.317, 0.28300000000000003], ":turtle:": [0.10800000000000001, 0.251, 0.239, 0.08], ":Tokyo_tower:": [0.115, 0.092, 0.168, 0.24], ":money_with_wings:": [0.12300000000000001, 0.096, 0.166, 0.121], ":fax_machine:": [0.24100000000000002, 0.35200000000000004, 0.16699999999999998, 0.226], ":baseball:": [0.14300000000000002, 0.242, 0.099, 0.369], ":honey_pot:": [0.177, 0.18100000000000002, 0.264, 0.14300000000000002], ":credit_card:": [0.14400000000000002, 0.08900000000000001, 0.24100000000000002, 0.213], ":video_camera:": [0.301, 0.29, 0.235, 0.20199999999999999], ":green_apple:": [0.16, 0.188, 0.405, 0.102], ":bust_in_silhouette:": [0.294, 0.34700000000000003, 0.18600000000000003, 0.27899999999999997], ":woman_dancing:": [0.11199999999999999, 0.11599999999999999, 0.138, 0.139], ":pager:": [0.14400000000000002, 0.191, 0.22899999999999998, 0.259], ":anxious_face_with_sweat:": [0.34299999999999997, 0.439, 0.134, 0.35200000000000004], ":tropical_drink:": [0.14400000000000002, 0.17800000000000002, 0.159, 0.11900000000000001], ":baby:": [0.266, 0.201, 0.457, 0.156], ":wheelchair_symbol:": [0.18, 0.179, 0.09300000000000001, 0.264], ":Ophiuchus:": [0.213, 0.17, 0.233, 0.228], ":elephant:": [0.22399999999999998, 0.23399999999999999, 0.163, 0.156], ":yen_banknote:": [0.217, 0.182, 0.171, 0.302], ":warning:": [0.264, 0.293, 0.107, 0.212], ":shortcake:": [0.126, 0.196, 0.166, 0.08900000000000001], ":dragon_face:": [0.198, 0.298, 0.205, 0.157], ":END_arrow:": [0.285, 0.32, 0.23199999999999998, 0.40299999999999997], ":beaming_face_with_smiling_eyes:": [0.091, 0.251, 0.12300000000000001, 0.079], ":new_moon:": [0.239, 0.221, 0.258, 0.29100000000000004], ":man’s_shoe:": [0.276, 0.174, 0.11, 0.17300000000000001], ":bride_with_veil:": [0.193, 0.268, 0.502, 0.185], ":skull:": [0.36200000000000004, 0.267, 0.055999999999999994, 0.218], ":pill:": [0.195, 0.253, 0.182, 0.203], ":package:": [0.126, 0.18600000000000003, 0.214, 0.14300000000000002], ":castle:": [0.069, 0.20199999999999999, 0.132, 0.222], ":bookmark_tabs:": [0.13699999999999998, 0.228, 0.301, 0.23], ":face_savoring_food:": [0.128, 0.107, 0.16899999999999998, 0.091], ":woman’s_sandal:": [0.24600000000000002, 0.23600000000000002, 0.20800000000000002, 0.23], ":man:": [0.243, 0.131, 0.29100000000000004, 0.098], ":ghost:": [0.147, 0.201, 0.017, 0.10800000000000001], ":telephone_receiver:": [0.179, 0.16699999999999998, 0.10099999999999999, 0.311], ":club_suit:": [0.188, 0.228, 0.128, 0.248], ":wavy_dash:": [0.235, 0.287, 0.253, 0.268], ":bowling:": [0.07400000000000001, 0.165, 0.073, 0.275], ":oncoming_fist:": [0.23600000000000002, 0.253, 0.13, 0.273], ":nail_polish:": [0.418, 0.121, 0.314, 0.099], ":nine_o’clock:": [0.15, 0.36700000000000005, 0.19699999999999998, 0.331], ":rice_ball:": [0.10300000000000001, 0.254, 0.092, 0.262], ":memo:": [0.147, 0.235, 0.26899999999999996, 0.203], ":face_with_open_mouth:": [0.271, 0.29, 0.16, 0.295], ":double_exclamation_mark:": [0.157, 0.125, 0.063, 0.086], ":fast_up_button:": [0.243, 0.23600000000000002, 0.251, 0.256], ":white_medium_star:": [0.237, 0.175, 0.29, 0.16], ":dashing_away:": [0.363, 0.18, 0.102, 0.16399999999999998], ":Virgo:": [0.17, 0.109, 0.264, 0.195], ":fallen_leaf:": [0.133, 0.16699999999999998, 0.28600000000000003, 0.168], ":top_hat:": [0.172, 0.214, 0.11199999999999999, 0.207], ":thumbs_up:": [0.20199999999999999, 0.265, 0.264, 0.19399999999999998], ":woman:": [0.24100000000000002, 0.215, 0.29, 0.142], ":two_hearts:": [0.172, 0.08800000000000001, 0.38299999999999995, 0.142], ":dollar_banknote:": [0.21, 0.19, 0.149, 0.192], ":camera:": [0.198, 0.29600000000000004, 0.287, 0.19699999999999998], ":small_orange_diamond:": [0.258, 0.162, 0.23399999999999999, 0.271], ":map_of_Japan:": [0.122, 0.213, 0.24100000000000002, 0.152], ":boar:": [0.187, 0.26899999999999996, 0.122, 0.158], ":boy:": [0.171, 0.155, 0.225, 0.159], ":open_book:": [0.196, 0.207, 0.259, 0.243], ":clockwise_vertical_arrows:": [0.22899999999999998, 0.23399999999999999, 0.23, 0.262], ":green_book:": [0.154, 0.24, 0.391, 0.107], ":SOON_arrow:": [0.12300000000000001, 0.179, 0.191, 0.302], ":cooking:": [0.078, 0.221, 0.139, 0.11800000000000001], ":slot_machine:": [0.085, 0.16899999999999998, 0.067, 0.23], ":unlocked:": [0.207, 0.20600000000000002, 0.17, 0.109], ":leaf_fluttering_in_wind:": [0.231, 0.19399999999999998, 0.382, 0.139], ":closed_mailbox_with_lowered_flag:": [0.184, 0.19, 0.109, 0.18100000000000002], ":sleepy_face:": [0.267, 0.375, 0.205, 0.36700000000000005], ":rainbow:": [0.183, 0.207, 0.317, 0.261], ":microphone:": [0.121, 0.081, 0.237, 0.29], ":musical_score:": [0.149, 0.09, 0.371, 0.315], ":white_square_button:": [0.35100000000000003, 0.226, 0.225, 0.16699999999999998], ":angry_face:": [0.493, 0.375, 0.07400000000000001, 0.44299999999999995], ":Aquarius:": [0.17800000000000002, 0.306, 0.23199999999999998, 0.228], ":ear:": [0.299, 0.33, 0.316, 0.35700000000000004], ":dvd:": [0.184, 0.14300000000000002, 0.319, 0.307], ":up-right_arrow:": [0.19899999999999998, 0.22399999999999998, 0.159, 0.243], ":right_arrow_curving_up:": [0.262, 0.255, 0.222, 0.22899999999999998], ":pizza:": [0.142, 0.109, 0.149, 0.11], ":incoming_envelope:": [0.24, 0.196, 0.168, 0.248], ":hot_beverage:": [0.142, 0.2, 0.317, 0.106], ":poodle:": [0.18600000000000003, 0.21600000000000003, 0.168, 0.152], ":dress:": [0.183, 0.16, 0.292, 0.242], ":blond-haired_person:": [0.257, 0.23, 0.226, 0.166], ":love_letter:": [0.13, 0.15, 0.331, 0.142], ":bomb:": [0.22, 0.196, 0.163, 0.205], ":direct_hit:": [0.177, 0.213, 0.098, 0.09], ":anger_symbol:": [0.316, 0.20800000000000002, 0.036000000000000004, 0.289], ":speaker_high_volume:": [0.259, 0.187, 0.154, 0.348], ":small_blue_diamond:": [0.23, 0.18100000000000002, 0.24600000000000002, 0.23199999999999998], ":grinning_cat_face_with_smiling_eyes:": [0.12, 0.161, 0.17600000000000002, 0.201], ":birthday_cake:": [0.055, 0.185, 0.317, 0.122], ":carousel_horse:": [0.11900000000000001, 0.128, 0.125, 0.17300000000000001], ":cinema:": [0.273, 0.207, 0.20600000000000002, 0.218], ":people_with_bunny_ears:": [0.24100000000000002, 0.11, 0.052000000000000005, 0.18], ":revolving_hearts:": [0.2, 0.09699999999999999, 0.42700000000000005, 0.142], ":spaghetti:": [0.055999999999999994, 0.149, 0.149, 0.159], ":french_fries:": [0.16399999999999998, 0.154, 0.14, 0.177], ":soft_ice_cream:": [0.156, 0.18100000000000002, 0.141, 0.09], ":Japanese_post_office:": [0.19, 0.309, 0.226, 0.249], ":nose:": [0.38299999999999995, 0.272, 0.18600000000000003, 0.52], ":closed_book:": [0.19899999999999998, 0.162, 0.256, 0.16], ":basketball:": [0.171, 0.209, 0.11800000000000001, 0.39799999999999996], ":pig_nose:": [0.212, 0.188, 0.16699999999999998, 0.392], ":Scorpio:": [0.185, 0.218, 0.302, 0.27399999999999997], ":black_circle:": [0.335, 0.212, 0.17600000000000002, 0.3], ":left_arrow:": [0.282, 0.221, 0.126, 0.19899999999999998], ":princess:": [0.39799999999999996, 0.198, 0.337, 0.175], ":key:": [0.165, 0.157, 0.239, 0.11599999999999999], ":maple_leaf:": [0.27899999999999997, 0.172, 0.20800000000000002, 0.147], ":musical_keyboard:": [0.132, 0.10800000000000001, 0.34, 0.265], ":school:": [0.15, 0.268, 0.29600000000000004, 0.162], ":newspaper:": [0.222, 0.33799999999999997, 0.27, 0.24600000000000002], ":right_arrow_curving_left:": [0.18100000000000002, 0.292, 0.179, 0.20800000000000002], ":chocolate_bar:": [0.147, 0.11699999999999999, 0.152, 0.10800000000000001], ":candy:": [0.192, 0.184, 0.188, 0.12], ":Leo:": [0.24100000000000002, 0.221, 0.212, 0.24100000000000002], ":Japanese_congratulations_button:": [0.158, 0.162, 0.255, 0.19899999999999998], ":waxing_gibbous_moon:": [0.18100000000000002, 0.245, 0.327, 0.221], ":penguin:": [0.151, 0.188, 0.134, 0.141], ":cow_face:": [0.142, 0.222, 0.129, 0.185], ":tiger_face:": [0.13, 0.392, 0.07400000000000001, 0.259], ":sunrise:": [0.107, 0.292, 0.4, 0.158], ":artist_palette:": [0.136, 0.11800000000000001, 0.442, 0.057999999999999996], ":battery:": [0.08199999999999999, 0.179, 0.196, 0.111], ":face_with_steam_from_nose:": [0.39899999999999997, 0.21, 0.043, 0.22], ":white_medium_square:": [0.395, 0.255, 0.16899999999999998, 0.231], ":flag_in_hole:": [0.134, 0.207, 0.222, 0.175], ":person_running:": [0.162, 0.297, 0.062, 0.2], ":fast_down_button:": [0.287, 0.247, 0.22, 0.22399999999999998], ":grapes:": [0.17600000000000002, 0.155, 0.179, 0.17600000000000002], ":koala:": [0.11900000000000001, 0.217, 0.11599999999999999, 0.109], ":paperclip:": [0.289, 0.21899999999999997, 0.19399999999999998, 0.231], ":outbox_tray:": [0.204, 0.22899999999999998, 0.19699999999999998, 0.19399999999999998], ":woman’s_boot:": [0.221, 0.095, 0.127, 0.239], ":syringe:": [0.21, 0.245, 0.142, 0.124], ":dotted_six-pointed_star:": [0.249, 0.161, 0.34299999999999997, 0.282], ":globe_showing_Asia-Australia:": [0.163, 0.242, 0.261, 0.188], ":melon:": [0.282, 0.313, 0.262, 0.077], ":strawberry:": [0.153, 0.198, 0.19699999999999998, 0.10400000000000001], ":droplet:": [0.19899999999999998, 0.223, 0.203, 0.248], ":cat_face_with_tears_of_joy:": [0.43799999999999994, 0.17800000000000002, 0.11599999999999999, 0.282], ":crescent_moon:": [0.098, 0.13699999999999998, 0.287, 0.218], ":ferris_wheel:": [0.092, 0.168, 0.141, 0.156], ":e-mail:": [0.26, 0.225, 0.21, 0.24], ":black_medium-small_square:": [0.392, 0.21100000000000002, 0.18600000000000003, 0.255], ":backhand_index_pointing_up:": [0.259, 0.142, 0.46, 0.299], ":downwards_button:": [0.195, 0.258, 0.182, 0.225], ":twelve_o’clock:": [0.18600000000000003, 0.34700000000000003, 0.165, 0.349], ":kimono:": [0.14400000000000002, 0.196, 0.23800000000000002, 0.222], ":broken_heart:": [0.244, 0.34, 0.19899999999999998, 0.332], ":see-no-evil_monkey:": [0.183, 0.27, 0.08900000000000001, 0.135], ":cactus:": [0.087, 0.245, 0.192, 0.034], ":gem_stone:": [0.17300000000000001, 0.073, 0.5429999999999999, 0.10800000000000001], ":purple_heart:": [0.183, 0.131, 0.341, 0.207], ":mobile_phone_off:": [0.17600000000000002, 0.247, 0.146, 0.245], ":up-down_arrow:": [0.27399999999999997, 0.27699999999999997, 0.207, 0.276], ":fried_shrimp:": [0.138, 0.15, 0.191, 0.165], ":bell:": [0.27, 0.21899999999999997, 0.242, 0.42700000000000005], ":seven_o’clock:": [0.15, 0.35, 0.08900000000000001, 0.33], ":smiling_face_with_horns:": [0.213, 0.055, 0.081, 0.193], ":up-left_arrow:": [0.193, 0.214, 0.18600000000000003, 0.124], ":joker:": [0.233, 0.28600000000000003, 0.051, 0.177], ":dolphin:": [0.107, 0.184, 0.11699999999999999, 0.204], ":t-shirt:": [0.21899999999999997, 0.078, 0.11599999999999999, 0.226], ":purse:": [0.105, 0.196, 0.302, 0.20199999999999999], ":old_man:": [0.27, 0.263, 0.276, 0.215], ":calendar:": [0.174, 0.21, 0.131, 0.225], ":frowning_face_with_open_mouth:": [0.37, 0.423, 0.128, 0.355], ":alarm_clock:": [0.17600000000000002, 0.28, 0.154, 0.22699999999999998], ":wine_glass:": [0.046, 0.124, 0.218, 0.059000000000000004], ":octopus:": [0.098, 0.23399999999999999, 0.19899999999999998, 0.086], ":ring:": [0.171, 0.073, 0.46, 0.17300000000000001], ":chequered_flag:": [0.221, 0.184, 0.125, 0.263], ":couple_with_heart:": [0.165, 0.113, 0.409, 0.25], ":relieved_face:": [0.127, 0.182, 0.254, 0.13699999999999998], ":grimacing_face:": [0.161, 0.32799999999999996, 0.1, 0.21100000000000002], ":lollipop:": [0.092, 0.163, 0.158, 0.055], ":fork_and_knife:": [0.053, 0.078, 0.126, 0.285], ":pile_of_poo:": [0.35, 0.342, 0.151, 0.446], ":large_blue_diamond:": [0.249, 0.053, 0.23600000000000002, 0.278], ":Statue_of_Liberty:": [0.09, 0.226, 0.113, 0.18600000000000003], ":black_medium_square:": [0.445, 0.245, 0.21, 0.264], ":Capricorn:": [0.196, 0.172, 0.3, 0.179], ":pool_8_ball:": [0.257, 0.09, 0.059000000000000004, 0.204], ":no_entry:": [0.312, 0.445, 0.136, 0.344], ":water_wave:": [0.106, 0.29, 0.12300000000000001, 0.222], ":horse:": [0.281, 0.172, 0.14800000000000002, 0.212], ":ewe:": [0.29, 0.16899999999999998, 0.12, 0.292], ":dog_face:": [0.13, 0.18, 0.257, 0.084], ":no_one_under_eighteen:": [0.109, 0.136, 0.051, 0.179], ":left-right_arrow:": [0.32899999999999996, 0.37200000000000005, 0.214, 0.335], ":smiling_cat_face_with_heart-eyes:": [0.304, 0.1, 0.319, 0.145], ":clapper_board:": [0.213, 0.196, 0.237, 0.162], ":first_quarter_moon_face:": [0.11, 0.10300000000000001, 0.32, 0.22699999999999998], ":sake:": [0.145, 0.255, 0.282, 0.145], ":game_die:": [0.126, 0.162, 0.09, 0.179], ":person_pouting:": [0.293, 0.244, 0.196, 0.299], ":sunrise_over_mountains:": [0.10300000000000001, 0.28, 0.392, 0.205], ":tangerine:": [0.16899999999999998, 0.19899999999999998, 0.284, 0.237], ":beer_mug:": [0.157, 0.12, 0.16699999999999998, 0.09699999999999999], ":spouting_whale:": [0.16, 0.184, 0.09, 0.159], ":crying_face:": [0.284, 0.385, 0.21, 0.33299999999999996], ":hourglass_done:": [0.205, 0.305, 0.25, 0.266], ":movie_camera:": [0.142, 0.17800000000000002, 0.233, 0.158], ":eleven_o’clock:": [0.12300000000000001, 0.282, 0.11900000000000001, 0.316], ":bridge_at_night:": [0.079, 0.151, 0.24, 0.247], ":briefcase:": [0.17300000000000001, 0.192, 0.28600000000000003, 0.175], ":musical_notes:": [0.149, 0.131, 0.326, 0.31], ":open_file_folder:": [0.213, 0.263, 0.171, 0.276], ":input_latin_lowercase:": [0.193, 0.191, 0.17300000000000001, 0.129], ":cherry_blossom:": [0.122, 0.19699999999999998, 0.31, 0.13], ":heart_with_ribbon:": [0.106, 0.172, 0.41700000000000004, 0.14400000000000002], ":bikini:": [0.13, 0.132, 0.177, 0.187], ":nut_and_bolt:": [0.18100000000000002, 0.276, 0.175, 0.17800000000000002], ":blue_circle:": [0.203, 0.24100000000000002, 0.11699999999999999, 0.336], ":face_with_tears_of_joy:": [0.381, 0.231, 0.099, 0.326], ":neutral_face:": [0.415, 0.309, 0.149, 0.322], ":ant:": [0.26899999999999996, 0.308, 0.098, 0.11199999999999999], ":envelope_with_arrow:": [0.251, 0.08800000000000001, 0.063, 0.19899999999999998], ":crying_cat_face:": [0.257, 0.264, 0.24600000000000002, 0.344] }
1.851563
2
chapter-7-image-cap-multimodal-transformers/caption-training.py
EluMichael/Advanced-Natural-Language-Processing-with-TensorFlow-2
91
12791555
import tensorflow as tf from tensorflow.keras.applications.resnet50 import preprocess_input from tensorflow.keras.preprocessing.image import img_to_array from tensorflow.keras.preprocessing.image import load_img import tensorflow_datasets as tfds import numpy as np import pandas as pd from sklearn.model_selection import train_test_split import json from glob import glob from PIL import Image import pickle import re import os import time import datetime from tqdm import tqdm # our visual transformer code import visual_transformer as vt ####### GPU CONFIGS FOR RTX 2070/NVidia GPU ############### ## Please comment out if not training on GPU ## ## this is important for running CuDNN on GPU ## tf.keras.backend.clear_session() # - for easy reset of notebook state # chck if GPU can be seen by TF tf.config.list_physical_devices('GPU') # tf.debugging.set_log_device_placement(True) # only to check GPU usage gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: # Restrict TensorFlow to only use the first GPU try: tf.config.experimental.set_memory_growth(gpus[0], True) tf.config.experimental.set_visible_devices(gpus[0], 'GPU') logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU") except RuntimeError as e: # Visible devices must be set before GPUs have been initialized print(e) ############################################### ######################### # Load Data file mapping captions to images ######################### prefix = './data/' save_prefix = prefix + "features/" # for storing prefixes annot = prefix + 'data.csv' inputs = pd.read_csv(annot, header=None, names=["caption", "image"]) print("Data file loaded") ######################### # Tokenize Captions ######################### cap_tokenizer = tfds.features.text.SubwordTextEncoder.load_from_file( "captions") print(cap_tokenizer.encode("A man riding a wave on top of a surfboard.".lower())) print("Tokenizer hydrated") # Max length of captions split by spaces lens = inputs['caption'].map(lambda x: len(x.split())) # Max length of captions after tokenization # tfds demonstrated in earlier chapters # This is a quick way if data fits in memory lens = inputs['caption'].map(lambda x: len(cap_tokenizer.encode(x.lower()))) # We will set this as the max length of captions # which cover 99% of the captions without truncation max_len = int(lens.quantile(0.99) + 1) # for special tokens start = '<s>' end = '</s>' inputs['tokenized'] = inputs['caption'].map( lambda x: start + x.lower().strip() + end) print("Some prepared captions: ", inputs.tokenized[:5]) def tokenize_pad(x): x = cap_tokenizer.encode(x) if len(x) < max_len: x = x + [0] * int(max_len - len(x)) return x[:max_len] inputs['tokens'] = inputs.tokenized.map(lambda x: tokenize_pad(x)) print("Captions tokenized and padded/truncated") # now to compute a column with the new name of the saved image feature file inputs['img_features'] = inputs['image'].map(lambda x: save_prefix + x.split('/')[-1][:-3] + 'npy') ######################### # Prepare tf.DataSet for training ######################### captions = inputs.tokens.tolist() img_names = inputs.img_features.tolist() # we only took half validation examples so we dont need to split # img_train, img_val, cap_train, cap_val = train_test_split(img_names, # captions, # test_size=0.2, # random_state=42) img_train, cap_train = img_names, captions # Load the numpy file with extracted ResNet50 feature def load_image_feature(img_name, cap): img_tensor = np.load(img_name.decode('utf-8')) return img_tensor, cap dataset = tf.data.Dataset.from_tensor_slices((img_train, cap_train)) # Use map to load the numpy files in parallel dataset = dataset.map(lambda item1, item2: tf.numpy_function( load_image_feature, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=tf.data.experimental.AUTOTUNE) # To verify for img, cap in dataset.take(2): print(img.shape) print(cap.numpy()) print("Training dataset prepared.") ######################### # Build Transformer Model ######################### # These parameters control the size and complexity of the model # BERT (base) uses 12 layers, 768 as embedding dim, 12 attention heads # and 4H (4x768) as feedforward size # Small Model num_layers = 4 d_model = 128 dff = d_model * 4 num_heads = 8 # BERT Base Model # num_layers = 12 # d_model = 768 # dff = d_model * 4 # as per BERT paper # num_heads = 12 target_vocab_size = cap_tokenizer.vocab_size # already includes start/end tokens dropout_rate = 0.1 EPOCHS = 20 # should see results in 4-10 epochs also transformer = vt.Transformer(num_layers, d_model, num_heads, dff, target_vocab_size, pe_input=49, # 7x7 pixels pe_target=target_vocab_size, rate=dropout_rate, use_pe=False ) ######################### # Training Setup ######################### # Learning Rate Schedule, as per `Attention is All You Need' paper class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule): def __init__(self, d_model, warmup_steps=4000): super(CustomSchedule, self).__init__() self.d_model = d_model self.d_model = tf.cast(self.d_model, tf.float32) self.warmup_steps = warmup_steps def __call__(self, step): arg1 = tf.math.rsqrt(step) arg2 = step * (self.warmup_steps ** -1.5) return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2) learning_rate = CustomSchedule(d_model) optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9) # Visualize the schedule: uncomment to plot # import matplotlib.pyplot as plt # temp_learning_rate_schedule = CustomSchedule(d_model) # # plt.plot(temp_learning_rate_schedule(tf.range(40000, dtype=tf.float32))) # plt.ylabel("Learning Rate") # plt.xlabel("Train Step") ######################### # Loss and Metrics loss_object = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def loss_function(real, pred): mask = tf.math.logical_not(tf.math.equal(real, 0)) loss_ = loss_object(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask return tf.reduce_sum(loss_) / tf.reduce_sum(mask) train_loss = tf.keras.metrics.Mean(name='train_loss') train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy( name='train_accuracy') ######################### # Helper function for creating masks def create_masks(inp, tar): # Encoder padding mask - This should just be 1's # input shape should be (batch_size, 49, 2048) inp_seq = tf.ones([inp.shape[0], inp.shape[1]]) # all pixels to be used enc_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 2nd attention block in the decoder. # This padding mask is used to mask the encoder outputs. dec_padding_mask = vt.create_padding_mask(inp_seq) # Used in the 1st attention block in the decoder. # It is used to pad and mask future tokens in the input received by # the decoder. look_ahead_mask = vt.create_look_ahead_mask(tf.shape(tar)[1]) dec_target_padding_mask = vt.create_padding_mask(tar) combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask) return enc_padding_mask, combined_mask, dec_padding_mask # Checkpoints setup checkpoint_path = "./checkpoints/train-small-model-nope-20ep" ckpt = tf.train.Checkpoint(transformer=transformer, optimizer=optimizer) ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5) # if a checkpoint exists, restore the latest checkpoint. if ckpt_manager.latest_checkpoint: ckpt.restore(ckpt_manager.latest_checkpoint) print('Latest checkpoint restored!!') ######################### # Training Loops ######################### # setup training parameters BUFFER_SIZE = 1000 BATCH_SIZE = 64 # can reduce or increase depending on GPU capacity # Shuffle and batch dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE) dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # Perform one step of raining on one batch in an epoch @tf.function def train_step(inp, tar): tar_inp = tar[:, :-1] tar_real = tar[:, 1:] enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) with tf.GradientTape() as tape: predictions, _ = transformer(inp, tar_inp, True, enc_padding_mask, combined_mask, dec_padding_mask) loss = loss_function(tar_real, predictions) gradients = tape.gradient(loss, transformer.trainable_variables) optimizer.apply_gradients(zip(gradients, transformer.trainable_variables)) train_loss(loss) train_accuracy(tar_real, predictions) # Begin Training for epoch in range(EPOCHS): start_tm = time.time() train_loss.reset_states() train_accuracy.reset_states() # inp -> images, tar -> caption for (batch, (inp, tar)) in enumerate(dataset): train_step(inp, tar) if batch % 100 == 0: ts = datetime.datetime.now().strftime("%d-%b-%Y (%H:%M:%S)") print('[{}] Epoch {} Batch {} Loss {:.6f} Accuracy {:.6f}'.format( ts, epoch + 1, batch, train_loss.result(), train_accuracy.result())) if (epoch + 1) % 2 == 0: ckpt_save_path = ckpt_manager.save() print('Saving checkpoint for epoch {} at {}'.format(epoch + 1, ckpt_save_path)) print('Epoch {} Loss {:.6f} Accuracy {:.6f}'.format(epoch + 1, train_loss.result(), train_accuracy.result())) print('Time taken for 1 epoch: {} secs\n'.format(time.time() - start_tm)) transformer.summary()
2.25
2
src/main.py
djrlj694/COVID-19
0
12791556
#!/usr/bin/env python3 """ main.py - The main module for processing data and creating visual summaries for this study. """ # =========================================================================== # # METADATA # =========================================================================== # __author__ = 'Robert (Bob) <NAME>' __credits__ = ['Robert (Bob) <NAME>'] __created_date__ = 'Sep 16, 2020' __modified_date__ = 'Sep 16, 2020' # =========================================================================== # # EXPORTS # =========================================================================== # # Define the module's API -- the list of exportable objects (classes, # functions, etc.) -- when performing a "wild import" (`from field import *`). __all__ = [ 'DEBUG', ] # =========================================================================== # # IMPORTS # =========================================================================== # # -- Python Standard Library -- # import os # -- 3rd Party -- # import matplotlib.dates as mpl_dates import matplotlib.pyplot as plt import pandas as pd import seaborn as sns # =========================================================================== # # CONSTANTS # =========================================================================== # # -- Data -- # DAILY = 'daily' WEEKLY = 'weekly' COLUMNS = { 'positive': 'pos', 'negative': 'neg', 'negativeIncrease': 'negIncrease', 'positiveIncrease': 'posIncrease', } DOW = [ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', ] # -- Debugging -- # DEBUG = True # -- Filesytem -- # ROOT_DIR = os.path.join(os.getcwd(), '..') DATA_DIR = '../data' RESULTS_DIR = '../results' # -- URLs -- # SOURCE_URL = 'https://covidtracking.com/api/v1/us/daily.csv' # =========================================================================== # # FUNCTIONS # =========================================================================== # # -- Data Analytics -- # def plot_series(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) ax.xaxis.set_major_formatter( mpl_dates.DateFormatter('%m-%d-%Y'), ) sns.lineplot( data=df, x='date', y='posIncrease', marker='o', ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Date') ax.set_ylabel('Count of Cases') ax.xaxis_date() # plt.show() # Debug data frame. DEBUG and preview(df, plot_series.__name__) fig.savefig(f'{RESULTS_DIR}/plot_series.png') def set_figure_defaults(): # Use seaborn style defaults. Set the default figure size. sns.set( style='darkgrid', rc={'figure.figsize': (16, 9)}, ) def summarize_by_dow(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='posIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow.png') def summarize_by_dow_percent(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='pctWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Percent of Weekly Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_percent.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_percent.png') def summarize_by_dow_zscore(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.boxplot( data=df, x='dow', y='zscoreWeeklyPosIncrease', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Z-Score of Weekly Count of Cases') # Debug data frame. DEBUG and preview(df, summarize_by_dow_zscore.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_dow_zscore.png') def summarize_maxima(df: pd.DataFrame): fig = plt.figure() ax = plt.subplot(111) sns.countplot( data=df, x='dow', order=DOW, ) ax.set_title('COVID-19 | Year 2020 | USA | Daily New Positive Cases') ax.set_xlabel('Day of Week') ax.set_ylabel('Count of Local Maxima of Cases') # Debug data frame. DEBUG and preview(df, summarize_maxima.__name__) # plt.show() fig.savefig(f'{RESULTS_DIR}/summarize_maxima.png') def visualize_data(df: pd.DataFrame): set_figure_defaults() plot_series(df.sort_values('date')) summarize_by_dow(df) summarize_by_dow_percent(df) summarize_by_dow_zscore(df) summarize_maxima(df[df['localMaximum'].eq(True)]) # Debug data frame. DEBUG and preview(df, visualize_data.__name__) # Return data frame for reuse. return df # -- Data Processing: Extract -- # def extract_data() -> pd.DataFrame: # Download source data as CSV from an API. df = pd.read_csv(SOURCE_URL) # Save a copy of the extracted data. df.to_csv( f'{DATA_DIR}/01_raw/{DAILY}_extract_data.csv', index=False, ) # Debug data frame. DEBUG and preview(df, extract_data.__name__) # Return data frame for reuse. return df # -- Data Processing: Transform -- # def transform_data(df: pd.DataFrame) -> pd.DataFrame: df = rename_columns(df) df = add_columns(df) # Debug data frame. DEBUG and preview(df, transform_data.__name__) # Return data frame for reuse. return df def add_columns(df: pd.DataFrame): # Format date. df.date = pd.to_datetime(df.date, format='%Y%m%d') # Set the date as the DataFrame's index. df = df.set_index('date') # Add date-derived columns. df['date'] = df.index.date df['year'] = df.index.year df['month'] = df.index.month df['week'] = df.index.week df['dow'] = df.index.day_name() df['dowIndex'] = df.index.dayofweek # Add group-summarization columns. df_weekly = df.groupby('week', as_index=False)['posIncrease'].agg( { 'weeklyPosIncrease': 'sum', 'meanWeeklyPosIncrease': 'mean', 'stdWeeklyPosIncrease': 'std', }, ) df = pd.merge( df, df_weekly, how='left', on='week', ) df['pctWeeklyPosIncrease'] = percent(df.posIncrease, df.weeklyPosIncrease) df['zscoreWeeklyPosIncrease'] = zScore( df.posIncrease, df.meanWeeklyPosIncrease, df.stdWeeklyPosIncrease, ) # Add delta columns. df['day1LagDelta'] = lag_delta(df.posIncrease, 1) df['day1LeadDelta'] = lead_delta(df.posIncrease, 1) # Add local extrema columns. df['localMaximum'] = df.apply(local_max, axis=1) df['localMinimum'] = df.apply(local_min, axis=1) # Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_add_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df, add_columns.__name__) # Return data frame for reuse. return df def rename_columns(df: pd.DataFrame) -> pd.DataFrame: # Rename columns. df.rename(columns=COLUMNS, inplace=True) # Save a copy of the processed data. df.to_csv( f'{DATA_DIR}/02_intermediate/{DAILY}_rename_columns.csv', index=True, ) # Debug data frame. DEBUG and preview(df, rename_columns.__name__) # Return data frame for reuse. return df # -- Data Processing: Load -- # # -- Utilities -- # def lag_delta(series, period): return series - series.shift(period) def lead_delta(series, period): return series.shift(-period) - series def local_max(row): if row['day1LagDelta'] > 0 and row['day1LeadDelta'] < 0: return True else: return False def local_min(row): if row['day1LagDelta'] < 0 and row['day1LeadDelta'] > 0: return True else: return False def percent(num, denom): return 100 * num / denom def preview(df: pd.DataFrame, func_name: str): print(f'INSIDE {func_name}(): type =', type(df).__name__) print(df.head(5)) def zScore(x, mean, std): return (x - mean) / std # -- Main Program -- # def main(): df = extract_data() df = transform_data(df) visualize_data(df) # =========================================================================== # # MAIN EXECUTION # =========================================================================== # # -- Main Program -- # # If this module is in the main module, call the main() function. if __name__ == '__main__': main()
1.984375
2
k8s/config.py
jasonquekavalon/k8s
0
12791557
<gh_stars>0 #!/usr/bin/env python # -*- coding: utf-8 # Copyright 2017-2019 The FIAAS Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Singleton configuration for k8s client""" #: API server URL api_server = "https://kubernetes.default.svc.cluster.local" #: API token api_token = "" #: API certificate cert = None #: Should the client verify the servers SSL certificates? verify_ssl = True #: Enable debugging debug = False #: Default timeout for most operations timeout = 20 #: Default timeout for streaming operations stream_timeout = 3600 #: Default size of Watcher cache watcher_cache_size = 1000
1.179688
1
govee_api_laggat/govee_api_laggat.py
riishh/python-govee-api
0
12791558
"""Govee API client package.""" import asyncio import logging import time import math from contextlib import asynccontextmanager from dataclasses import dataclass from datetime import datetime from events import Events from typing import Any, List, Optional, Tuple, Union import aiohttp from govee_api_laggat.__version__ import VERSION from govee_api_laggat.learning_storage import ( GoveeAbstractLearningStorage, GoveeLearnedInfo, ) _LOGGER = logging.getLogger(__name__) _API_BASE_URL = "https://developer-api.govee.com" _API_PING = _API_BASE_URL + "/ping" _API_DEVICES = _API_BASE_URL + "/v1/devices" _API_DEVICES_CONTROL = _API_BASE_URL + "/v1/devices/control" _API_DEVICES_STATE = _API_BASE_URL + "/v1/devices/state" # API rate limit header keys _RATELIMIT_TOTAL = "Rate-Limit-Total" # The maximum number of requests you're permitted to make per minute. _RATELIMIT_REMAINING = "Rate-Limit-Remaining" # The number of requests remaining in the current rate limit window. _RATELIMIT_RESET = "Rate-Limit-Reset" # The time at which the current rate limit window resets in UTC epoch seconds. # return state from hisory for n seconds after controlling the device DELAY_GET_FOLLOWING_SET_SECONDS = 2 # do not send another control within n seconds after controlling the device DELAY_SET_FOLLOWING_SET_SECONDS = 1 @dataclass class GoveeDevice(object): """ Govee Device DTO """ device: str model: str device_name: str controllable: bool retrievable: bool support_cmds: List[str] support_turn: bool support_brightness: bool support_color: bool support_color_tem: bool online: bool power_state: bool brightness: int color: Tuple[int, int, int] color_temp: int timestamp: int source: str error: str lock_set_until: int lock_get_until: int learned_set_brightness_max: int learned_get_brightness_max: int before_set_brightness_turn_on: bool config_offline_is_off: bool # this is the learning config, possibly overridden by a global config class GoveeError(Exception): """Base Exception thrown from govee_api_laggat.""" class GoveeDeviceNotFound(GoveeError): """Device is unknown.""" class Govee(object): """Govee API client.""" async def __aenter__(self): """Async context manager enter.""" self._session = aiohttp.ClientSession() return self async def __aexit__(self, *err): """Async context manager exit.""" if self._session: await self._session.close() self._session = None def __init__( self, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): """Init with an API_KEY and storage for learned values.""" _LOGGER.debug("govee_api_laggat v%s", VERSION) self._online = True # assume we are online self.events = Events() self._api_key = api_key self._devices = {} self._rate_limit_on = 5 # safe available call count for multiple processes self._limit = 100 self._limit_remaining = 100 self._limit_reset = 0 self._config_offline_is_off = None self._learning_storage = learning_storage if not self._learning_storage: # use an internal learning storage as long as we run. # we will need to re-learn every time again. self._learning_storage = GoveeAbstractLearningStorage() @classmethod async def create( cls, api_key: str, *, learning_storage: Optional[GoveeAbstractLearningStorage] = None, ): """Use create method if you want to use this Client without an async context manager.""" self = Govee(api_key, learning_storage=learning_storage) await self.__aenter__() return self async def close(self): """Use close when your are finished with the Client without using an async context manager.""" await self.__aexit__() def _getHeaders(self, auth: bool): """Return Request headers with/without authentication.""" if auth: return {"Govee-API-Key": self._api_key} return {} @asynccontextmanager async def _api_put(self, *, auth=True, url: str, json): """API HTTP Put call.""" async with self._api_request_internal( lambda: self._session.put( url=url, headers=self._getHeaders(auth), json=json ) ) as response: yield response @asynccontextmanager async def _api_get(self, *, auth=True, url: str, params=None): """API HTTP Get call.""" async with self._api_request_internal( lambda: self._session.get( url=url, headers=self._getHeaders(auth), params=params ) ) as response: yield response @asynccontextmanager async def _api_request_internal(self, request_lambda): """API Methond handling all HTTP calls. This also handles: - rate-limiting - online/offline status """ err = None await self.rate_limit_delay() try: async with request_lambda() as response: self._set_online(True) # we got something, so we are online self._track_rate_limit(response) # return the async content manager response yield response except aiohttp.ClientError as ex: # we are offline self._set_online(False) err = "error from aiohttp: %s" % repr(ex) except Exception as ex: err = "unknown error: %s" % repr(ex) if err: class error_response: def __init__(self, err_msg): self._err_msg = err_msg status = -1 async def text(self): return self._err_msg yield error_response("_api_request_internal: " + err) def _utcnow(self): """Helper method to get utc now as seconds.""" return datetime.timestamp(datetime.now()) def _track_rate_limit(self, response): """Track rate limiting.""" if response.status == 429: _LOGGER.warning( f"Rate limit exceeded, check if other devices also utilize the govee API" ) limit_unknown = True if ( _RATELIMIT_TOTAL in response.headers and _RATELIMIT_REMAINING in response.headers and _RATELIMIT_RESET in response.headers ): try: self._limit = int(response.headers[_RATELIMIT_TOTAL]) self._limit_remaining = int(response.headers[_RATELIMIT_REMAINING]) self._limit_reset = float(response.headers[_RATELIMIT_RESET]) _LOGGER.debug( f"Rate limit total: {self._limit}, remaining: {self._limit_remaining} in {self.rate_limit_reset_seconds} seconds" ) limit_unknown = False except Exception as ex: _LOGGER.warning(f"Error trying to get rate limits: {ex}") if limit_unknown: self._limit_remaining -= 1 async def rate_limit_delay(self): """Delay a call when rate limiting is active.""" # do we have requests left? if self.rate_limit_remaining <= self.rate_limit_on: # do we need to sleep? sleep_sec = self.rate_limit_reset_seconds if sleep_sec > 0: _LOGGER.warning( f"Rate limiting active, {self._limit_remaining} of {self._limit} remaining, sleeping for {sleep_sec}s." ) await asyncio.sleep(sleep_sec) @property def rate_limit_total(self): """Rate limit is counted down from this value.""" return self._limit @property def rate_limit_remaining(self): """Remaining Rate limit.""" return self._limit_remaining @property def rate_limit_reset(self): """UTC time in seconds when the rate limit will be reset.""" return self._limit_reset @property def rate_limit_reset_seconds(self): """Seconds until the rate limit will be reset.""" return self._limit_reset - self._utcnow() @property def rate_limit_on(self): """Remaining calls that trigger rate limiting. Defaults to 5, which means there is some room for other clients. """ return self._rate_limit_on @rate_limit_on.setter def rate_limit_on(self, val): """Set the remaining calls that trigger rate limiting.""" if val > self._limit: raise GoveeError( f"Rate limiter threshold {val} must be below {self._limit}" ) if val < 1: raise GoveeError(f"Rate limiter threshold {val} must be above 1") self._rate_limit_on = val @property def config_offline_is_off(self): """Get the global config option config_offline_is_off.""" return self._config_offline_is_off @config_offline_is_off.setter def config_offline_is_off(self, val: bool): """ Set global behavour when device is offline. None: default, use config_offline_is_off from learning, or False by default. False: an offline device doesn't change power state. True: an offline device is shown as off. """ self._config_offline_is_off = val @property def devices(self) -> List[GoveeDevice]: """Cached devices list.""" lst = [] for dev in self._devices: lst.append(self._devices[dev]) return lst def device(self, device) -> GoveeDevice: """Single device from cache.""" _, device = self._get_device(device) return device @property def online(self): """Last request was able to connect to the API.""" return self._online def _set_online(self, online: bool): """Set the online state and fire an event on change.""" if self._online != online: self._online = online # inform about state change self.events.online(self._online) if not online: # show all devices as offline for device in self.devices: device.online = False async def check_connection(self) -> bool: """Check connection to API.""" try: # this will set self.online await self.ping() except: pass return self.online async def ping(self) -> Tuple[float, str]: """Ping the api endpoint. No API_KEY is needed.""" _LOGGER.debug("ping") start = time.time() ping_ok_delay = None err = None async with self._api_get(url=_API_PING, auth=False) as response: result = await response.text() delay = int((time.time() - start) * 1000) if response.status == 200: if "Pong" == result: ping_ok_delay = max(1, delay) else: err = f"API-Result wrong: {result}" else: result = await response.text() err = f"API-Error {response.status}: {result}" return ping_ok_delay, err async def get_devices(self) -> Tuple[List[GoveeDevice], str]: """Get and cache devices.""" _LOGGER.debug("get_devices") devices = {} err = None async with self._api_get(url=_API_DEVICES) as response: if response.status == 200: result = await response.json() timestamp = self._utcnow() learning_infos = await self._learning_storage._read_cached() for item in result["data"]["devices"]: device_str = item["device"] model_str = item["model"] is_retrievable = item["retrievable"] # assuming defaults for learned/configured values learned_set_brightness_max = None learned_get_brightness_max = None before_set_brightness_turn_on = False config_offline_is_off = False # effenctive state # defaults by some conditions if not is_retrievable: learned_get_brightness_max = -1 if model_str == "H6104": before_set_brightness_turn_on = True # load learned/configured values if device_str in learning_infos: learning_info = learning_infos[device_str] learned_set_brightness_max = learning_info.set_brightness_max learned_get_brightness_max = learning_info.get_brightness_max before_set_brightness_turn_on = learning_info.before_set_brightness_turn_on config_offline_is_off = learning_info.config_offline_is_off # create device DTO devices[device_str] = GoveeDevice( device=device_str, model=model_str, device_name=item["deviceName"], controllable=item["controllable"], retrievable=is_retrievable, support_cmds=item["supportCmds"], support_turn="turn" in item["supportCmds"], support_brightness="brightness" in item["supportCmds"], support_color="color" in item["supportCmds"], support_color_tem="colorTem" in item["supportCmds"], # defaults for state online=True, power_state=False, brightness=0, color=(0, 0, 0), color_temp=0, timestamp=timestamp, source="history", error=None, lock_set_until=0, lock_get_until=0, learned_set_brightness_max=learned_set_brightness_max, learned_get_brightness_max=learned_get_brightness_max, before_set_brightness_turn_on=before_set_brightness_turn_on, config_offline_is_off=config_offline_is_off ) else: result = await response.text() err = f"API-Error {response.status}: {result}" # cache last get_devices result self._devices = devices return self.devices, err def _get_device(self, device: Union[str, GoveeDevice]) -> Tuple[str, GoveeDevice]: """Get a device by address or GoveeDevice DTO. returns: device_address, device_dto """ device_str = device if isinstance(device, GoveeDevice): device_str = device.device if not device_str in self._devices: device = None # disallow unknown devices elif isinstance(device, str) and device_str in self._devices: device = self._devices[device_str] else: raise GoveeDeviceNotFound(device_str) return device_str, device def _is_success_result_message(self, result) -> bool: """Given an aiohttp result checks if it is a success result.""" return "message" in result and result["message"] == "Success" async def turn_on(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: """Turn on a device, return success and error message.""" return await self._turn(device, "on") async def turn_off(self, device: Union[str, GoveeDevice]) -> Tuple[bool, str]: """Turn off a device, return success and error message.""" return await self._turn(device, "off") async def _turn( self, device: Union[str, GoveeDevice], onOff: str ) -> Tuple[bool, str]: """Turn command called by turn_on and turn_off.""" success = False err = None device_str, device = self._get_device(device) if not device: err = f"Invalid device {device_str}, {device}" else: command = "turn" params = onOff result, err = await self._control(device, command, params) success = False if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = "history" self._devices[device_str].power_state = onOff == "on" return success, err async def set_brightness( self, device: Union[str, GoveeDevice], brightness: int ) -> Tuple[bool, str]: """Set brightness to 0-254.""" success = False err = None device_str, device = self._get_device(device) if not device: err = f"Invalid device {device_str}, {device}" else: if brightness < 0 or brightness > 254: err = f"set_brightness: invalid value {brightness}, allowed range 0 .. 254" else: if brightness > 0 and device.before_set_brightness_turn_on: await self.turn_on(device) # api doesn't work if we don't sleep await asyncio.sleep(1) # set brightness as 0..254 brightness_set = brightness brightness_result = brightness_set brightness_set_100 = 0 if brightness_set > 0: brightness_set_100 = max(1, math.floor(brightness * 100 / 254)) brightness_result_100 = math.ceil(brightness_set_100 * 254 / 100) if device.learned_set_brightness_max == 100: # set brightness as 0..100 brightness_set = brightness_set_100 brightness_result = brightness_result_100 command = "brightness" result, err = await self._control(device, command, brightness_set) if err: # try again with 0-100 range if "API-Error 400" in err: # Unsupported Cmd Value # set brightness as 0..100 as 0..254 didn't work brightness_set = brightness_set_100 brightness_result = brightness_result_100 result, err = await self._control( device, command, brightness_set ) if not err: device.learned_set_brightness_max = 100 await self._learn(device) else: if brightness_set > 100: device.learned_set_brightness_max = 254 await self._learn(device) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = "history" self._devices[device_str].brightness = brightness_result self._devices[device_str].power_state = brightness_result > 0 return success, err async def _learn(self, device): """Persist learned information from device DTO.""" learning_infos: Dict[ str, GoveeLearnedInfo ] = await self._learning_storage._read_cached() changed = False # init Dict and entry for device if learning_infos == None: learning_infos = {} if device.device not in learning_infos: learning_infos[device.device] = GoveeLearnedInfo() # output what was lerned, and learn if ( learning_infos[device.device].set_brightness_max != device.learned_set_brightness_max ): _LOGGER.debug( "learned device %s uses range 0-%s for setting brightness.", device.device, device.learned_set_brightness_max, ) learning_infos[ device.device ].set_brightness_max = device.learned_set_brightness_max changed = True if ( learning_infos[device.device].get_brightness_max != device.learned_get_brightness_max ): _LOGGER.debug( "learned device %s uses range 0-%s for getting brightness state.", device.device, device.learned_get_brightness_max, ) if device.learned_get_brightness_max == 100: _LOGGER.info( "brightness range for %s is assumed. If the brightness slider doesn't match the actual brightness pull the brightness up to max once.", device.device, ) changed = True learning_infos[ device.device ].get_brightness_max = device.learned_get_brightness_max if changed: await self._learning_storage._write_cached(learning_infos) async def set_color_temp( self, device: Union[str, GoveeDevice], color_temp: int ) -> Tuple[bool, str]: """Set color temperature to 2000-9000.""" success = False err = None device_str, device = self._get_device(device) if not device: err = f"Invalid device {device_str}, {device}" else: if color_temp < 2000 or color_temp > 9000: err = f"set_color_temp: invalid value {color_temp}, allowed range 2000-9000" else: command = "colorTem" result, err = await self._control(device, command, color_temp) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = "history" self._devices[device_str].color_temp = color_temp return success, err async def set_color( self, device: Union[str, GoveeDevice], color: Tuple[int, int, int] ) -> Tuple[bool, str]: """Set color (r, g, b) where each value may be in range 0-255 """ success = False err = None device_str, device = self._get_device(device) if not device: err = f"Invalid device {device_str}, {device}" else: if len(color) != 3: err = f"set_color: invalid value {color}, must be tuple with (r, g, b) values" else: red = color[0] green = color[1] blue = color[2] if red < 0 or red > 255: err = ( f"set_color: invalid value {color}, red must be within 0 .. 254" ) elif green < 0 or green > 255: err = f"set_color: invalid value {color}, green must be within 0 .. 254" elif blue < 0 or blue > 255: err = f"set_color: invalid value {color}, blue must be within 0 .. 254" else: command = "color" command_color = {"r": red, "g": green, "b": blue} result, err = await self._control(device, command, command_color) if not err: success = self._is_success_result_message(result) if success: self._devices[device_str].timestamp = self._utcnow self._devices[device_str].source = "history" self._devices[device_str].color = color return success, err def _get_lock_seconds(self, utcSeconds: int) -> int: """Get seconds to wait.""" seconds_lock = utcSeconds - self._utcnow() if seconds_lock < 0: seconds_lock = 0 return seconds_lock async def _control( self, device: Union[str, GoveeDevice], command: str, params: Any ) -> Tuple[Any, str]: """Control led strips and bulbs.""" device_str, device = self._get_device(device) cmd = {"name": command, "value": params} _LOGGER.debug(f"control {device_str}: {cmd}") result = None err = None if not device: err = f"Invalid device {device_str}, {device}" else: if not device.controllable: err = f"Device {device.device} is not controllable" _LOGGER.debug(f"control {device_str} not possible: {err}") elif not command in device.support_cmds: err = f"Command {command} not possible on device {device.device}" _LOGGER.warning(f"control {device_str} not possible: {err}") else: while True: seconds_locked = self._get_lock_seconds(device.lock_set_until) if not seconds_locked: break; _LOGGER.debug(f"control {device_str} is locked for {seconds_locked} seconds. Command waiting: {cmd}") await asyncio.sleep(seconds_locked) json = {"device": device.device, "model": device.model, "cmd": cmd} await self.rate_limit_delay() async with self._api_put( url=_API_DEVICES_CONTROL, json=json ) as response: if response.status == 200: device.lock_set_until = ( self._utcnow() + DELAY_SET_FOLLOWING_SET_SECONDS ) device.lock_get_until = ( self._utcnow() + DELAY_GET_FOLLOWING_SET_SECONDS ) result = await response.json() else: text = await response.text() err = f"API-Error {response.status} on command {cmd}: {text} for device {device}" _LOGGER.warning(f"control {device_str} not possible: {err}") return result, err async def get_states(self) -> List[GoveeDevice]: """Request states for all devices from API.""" _LOGGER.debug("get_states") for device_str in self._devices: state, err = await self._get_device_state(device_str) if err: _LOGGER.warning("error getting state for device %s: %s", device_str, err, ) self._devices[device_str].error = err else: self._devices[device_str] = state self._devices[device_str].error = None return self.devices async def _get_device_state( self, device: Union[str, GoveeDevice] ) -> Tuple[GoveeDevice, str]: """Get state for one specific device.""" device_str, device = self._get_device(device) result = None err = None seconds_locked = self._get_lock_seconds(device.lock_get_until) if not device: err = f"Invalid device {device_str}" elif not device.retrievable: # device {device_str} isn't able to return state, return 'history' state self._devices[device_str].source = "history" result = self._devices[device_str] elif seconds_locked: # we just changed something, return state from history self._devices[device_str].source = "history" result = self._devices[device_str] _LOGGER.debug( f"state object returned from cache: {result}, next state for {device.device} from api allowed in {seconds_locked} seconds" ) else: params = {"device": device.device, "model": device.model} async with self._api_get(url=_API_DEVICES_STATE, params=params) as response: if response.status == 200: timestamp = self._utcnow() json_obj = await response.json() prop_online = False prop_power_state = False prop_brightness = False prop_color = (0, 0, 0) prop_color_temp = 0 for prop in json_obj["data"]["properties"]: # somehow these are all dicts with one element if "online" in prop: prop_online = prop["online"] is True elif "powerState" in prop: prop_power_state = prop["powerState"] == "on" elif "brightness" in prop: prop_brightness = prop["brightness"] elif "color" in prop: prop_color = ( prop["color"]["r"], prop["color"]["g"], prop["color"]["b"], ) elif "colorTemInKelvin" in prop: prop_color_temp = prop["colorTemInKelvin"] else: _LOGGER.debug(f"unknown state property '{prop}'") if not prop_online: if self.config_offline_is_off is not None: # global option if self.config_offline_is_off: prop_power_state = False elif device.config_offline_is_off: # learning option prop_power_state = False # autobrightness learning if device.learned_get_brightness_max == None or ( device.learned_get_brightness_max == 100 and prop_brightness > 100 ): device.learned_get_brightness_max = ( 100 # assumption, as we didn't get anything higher ) if prop_brightness > 100: device.learned_get_brightness_max = 254 await self._learn(device) if device.learned_get_brightness_max == 100: # scale range 0-100 up to 0-254 prop_brightness = math.floor( prop_brightness * 254 / 100 ) result = self._devices[device_str] result.online = prop_online result.power_state = prop_power_state result.brightness = prop_brightness result.color = prop_color result.color_temp = prop_color_temp result.timestamp = timestamp result.source = "api" result.error = None _LOGGER.debug( f"state returned from API: {json_obj}, resulting state object: {result}" ) else: errText = await response.text() err = f"API-Error {response.status}: {errText}" return result, err
2.46875
2
sstcam_sandbox/d181105_sim_telarray_cfg/spe_response.py
watsonjj/CHECLabPySB
0
12791559
""" Obtain the single photoelectron response for an SiPM. Can be used as an input to sim_telarray after normalisation with Konrads script """ import argparse from argparse import ArgumentDefaultsHelpFormatter as Formatter import numpy as np from scipy.special import binom from scipy.stats import norm from IPython import embed from matplotlib import pyplot as plt import os def sipm_enf(x, spe_sigma, opct, pap, dap): """ SiPM formula from Gentile 2010 http://adsabs.harvard.edu/abs/2010arXiv1006.3263G This implementation only considers the case for a 100% probability of a single inital fired microcell Parameters ---------- x : ndarray X points to evaluate at spe_sigma : float Width of the single photoelectron peak opct : float Probability of optical crosstalk pap : float Probability of afterpulse dap : float Distance of afterpulse peak from main peak """ n_peaks = 100 N = np.arange(n_peaks)[:, None] K = np.arange(1, n_peaks)[:, None] # Probability of n fired microcells due to optical crosstalk pct = ((1 - opct) * np.power(opct, N - 1) * binom(N - 1, 0))[:, 0] sap = spe_sigma papk = np.power(1 - pap, N[:, 0]) p0ap = pct * papk pap1 = pct * (1-papk) * papk pe_sigma = np.sqrt(K * spe_sigma ** 2) ap_sigma = np.sqrt(K * sap ** 2) signal = p0ap[K] * norm.pdf(x, K, pe_sigma) signal += pap1[K] * norm.pdf(x, K * (1.0-dap), ap_sigma) return signal.sum(0) def main(): description = ('Obtain the single photoelectron response for an SiPM. ' 'Can be used as an input to sim_telarray after ' 'normalisation with Konrads script') parser = argparse.ArgumentParser(description=description, formatter_class=Formatter) parser.add_argument('-o', '--output', dest='output_dir', action='store', required=True, help='Output directory for the files') parser.add_argument('--spe_sigma', dest='spe_sigma', action='store', default=0.1, type=float, help='Value for the standard deviation of the single ' 'photoelectron peak') parser.add_argument('--opct', dest='opct', action='store', default=0.1, type = float, help='Value for optical crosstalk') parser.add_argument('--pap', dest='pap', action='store', default=0, type=float, help='Value for the probability of afterpulses') parser.add_argument('--dap', dest='dap', action='store', default=0, type=float, help='Value for the distance of the afterpulse peak ' 'from main peak') args = parser.parse_args() output_dir = args.output_dir spe_sigma = args.spe_sigma opct = args.opct pap = args.pap dap = args.dap print( """ SPE Parameters: spe_sigma = {} opct = {} pap = {} dap = {} """.format(spe_sigma, opct, pap, dap) ) x = np.linspace(0, 100, 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) gt = y > 1E-15 x = x[gt] y = y[gt] # Resample x = np.linspace(x.min(), x.max(), 1000) y = sipm_enf(x, spe_sigma, opct, pap, dap) if not os.path.exists(output_dir): print("Creating directory: {}".format(output_dir)) os.makedirs(output_dir) output_path = os.path.join(output_dir, "checs_spe_spectrum.txt") np.savetxt(output_path, np.column_stack((x, y, y))) print("Created config : {}".format(output_path)) output_path = os.path.join(output_dir, "checs_spe_spectrum.pdf") plt.semilogy(x, y) plt.savefig(output_path, bbox_inches='tight') print("Created figure : {}".format(output_path)) if __name__ == '__main__': main()
3.078125
3
src/datastructures/lru_cache.py
seahrh/coding-interview
0
12791560
""" LRU Cache: Design and build a "least recently used" cache, which evicts the least recently used item. The cache should map from keys to values (allowing you to insert and retrieve a value associated with a particular key) and be initialized with a max size. When it is full, it should evict the least recently used item. You can assume the keys are integers and the values are strings. (16.25, p533) Solution: Use hash table to point into the nodes of a linked list, which represents last-use ordering. Do the following in average case O(1) time: Inserting Key, Value Pair: Create a linked list node with key, value. Insert into head of linked list. Insert key -> node mapping into hash table. Retrieving Value by Key: Look up node in hash table and return value. Update most recently used item Finding Least Recently Used: Least recently used item will be found at the end of the linked list. Updating Most Recently Used: Move node to front of linked list. Hash table does not need to be updated. Eviction: Remove tail of linked list. Get key from linked list node and remove key from hash table. """ from typing import NamedTuple, Dict from datastructures.linked_list import * class Item(NamedTuple): key: str value: str class LruCache: def __init__(self, capacity: int): if capacity < 1: raise ValueError("capacity must be a positive integer") self.capacity: int = capacity self.map: Dict[str, DLinkedList.Node] = {} self.use_ordering: DLinkedList = DLinkedList() def get(self, key: str) -> Optional[str]: if key in self.map: node: DLinkedList.Node = self.map[key] self.use_ordering.remove(node) self.use_ordering.append_left(node) data: Item = node.data return data.value return None def put(self, key: str, value: str) -> None: # eviction if cache is full if len(self.map) == self.capacity and key not in self.map: node = self.use_ordering.pop() if node is not None: del self.map[node.data.key] node = DLinkedList.Node(data=Item(key, value)) self.map[key] = node self.use_ordering.append_left(node)
3.96875
4
cpdb/cr/tests/views/test_cr_mobile_viewset.py
invinst/CPDBv2_backend
25
12791561
<reponame>invinst/CPDBv2_backend<filename>cpdb/cr/tests/views/test_cr_mobile_viewset.py from datetime import datetime, date from django.urls import reverse from django.contrib.gis.geos import Point from mock import patch from rest_framework.test import APITestCase from rest_framework import status from robber import expect import pytz from data.factories import ( OfficerFactory, AllegationFactory, OfficerAllegationFactory, ComplainantFactory, AreaFactory, PoliceWitnessFactory, InvestigatorFactory, InvestigatorAllegationFactory, AllegationCategoryFactory, AttachmentFileFactory, OfficerBadgeNumberFactory, VictimFactory ) from data.constants import MEDIA_TYPE_DOCUMENT from cr.tests.mixins import CRTestCaseMixin from data.cache_managers import officer_cache_manager, allegation_cache_manager from email_service.constants import CR_ATTACHMENT_REQUEST from email_service.factories import EmailTemplateFactory class CRMobileViewSetTestCase(CRTestCaseMixin, APITestCase): def test_retrieve(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=4.4, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR arrest report document', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ { 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [ { 'race': 'Black', 'gender': 'Male', 'age': 18 } ], 'victims': [ { 'race': 'Black', 'gender': 'Male', 'age': 53 } ], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2002-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_trr': '5.5000', 'percentile_allegation': '4.4000', } ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_badge(self): area = AreaFactory(name='Lincoln Square') officer1 = OfficerFactory( id=123, first_name='Mr', last_name='Foo', gender='M', race='White', rank='Officer', appointed_date=date(2001, 1, 1), birth_year=1993, complaint_percentile=4.4, civilian_allegation_percentile=1.1, internal_allegation_percentile=2.2, trr_percentile=3.3, allegation_count=1, sustained_count=1, ) OfficerBadgeNumberFactory(officer=officer1, star='12345', current=True) allegation = AllegationFactory( crid='12345', point=Point(12, 21), incident_date=datetime(2007, 2, 28, tzinfo=pytz.utc), add1=3510, add2='Michigan Ave', city='Chicago', location='Police Communications System', beat=area, is_officer_complaint=False, summary='Summary', first_start_date=date(2003, 3, 20), first_end_date=date(2006, 5, 26) ) ComplainantFactory(allegation=allegation, gender='M', race='Black', age='18') VictimFactory(allegation=allegation, gender='M', race='Black', age=53) OfficerAllegationFactory( officer=officer1, allegation=allegation, final_finding='SU', disciplined=True, final_outcome='Separation', start_date=date(2003, 3, 20), end_date=date(2006, 5, 26), allegation_category=AllegationCategoryFactory( category='Operation/Personnel Violations', allegation_name='Secondary/Special Employment' ) ) officer = OfficerFactory( id=3, first_name='Raymond', last_name='Piwinicki', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, trr_percentile=5.5, allegation_count=1, sustained_count=1, ) OfficerAllegationFactory( officer=officer, final_finding='SU', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) PoliceWitnessFactory(officer=officer, allegation=allegation) investigator = OfficerFactory( id=1, first_name='Ellis', last_name='Skol', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_2 = OfficerFactory( id=2, first_name='Jerome', last_name='Finnigan', appointed_date=date(2001, 5, 1), complaint_percentile=6.6, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) investigator_3 = OfficerFactory( id=4, first_name='Edward', last_name='May', appointed_date=date(2001, 5, 1), complaint_percentile=9.9, civilian_allegation_percentile=7.7, internal_allegation_percentile=8.8, allegation_count=1, sustained_count=0, ) OfficerBadgeNumberFactory(officer=investigator_2, star='456789', current=True) OfficerAllegationFactory( officer=investigator, final_finding='NS', start_date=date(2003, 2, 28), allegation__incident_date=datetime(2002, 2, 28, tzinfo=pytz.utc), allegation__is_officer_complaint=False ) investigator = InvestigatorFactory(officer=investigator) investigator_2 = InvestigatorFactory(officer=investigator_2) investigator_3 = InvestigatorFactory(officer=investigator_3) investigator_4 = InvestigatorFactory(first_name='Kevin', last_name='Osborn') InvestigatorAllegationFactory( allegation=allegation, investigator=investigator, current_star='123456' ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_2, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_3, current_star=None ) InvestigatorAllegationFactory( allegation=allegation, investigator=investigator_4, current_star=None ) AttachmentFileFactory( tag='TRR', allegation=allegation, title='CR document', id='123456', url='http://cr-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) AttachmentFileFactory( tag='AR', allegation=allegation, title='CR document 2', id='654321', url='http://AR-document.com/', file_type=MEDIA_TYPE_DOCUMENT ) officer_cache_manager.build_cached_columns() allegation_cache_manager.cache_data() response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '12345'})) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(dict(response.data)).to.eq({ 'crid': '12345', 'most_common_category': { 'category': 'Operation/Personnel Violations', 'allegation_name': 'Secondary/Special Employment' }, 'coaccused': [ { 'id': 123, 'full_name': '<NAME>', 'rank': 'Officer', 'final_outcome': 'Separation', 'final_finding': 'Sustained', 'allegation_count': 1, 'category': 'Operation/Personnel Violations', 'percentile_allegation': '4.4000', 'percentile_allegation_civilian': '1.1000', 'percentile_allegation_internal': '2.2000', 'percentile_trr': '3.3000', } ], 'complainants': [ { 'race': 'Black', 'gender': 'Male', 'age': 18 } ], 'victims': [ { 'race': 'Black', 'gender': 'Male', 'age': 53 } ], 'point': { 'lon': 12.0, 'lat': 21.0 }, 'summary': 'Summary', 'incident_date': '2007-02-28', 'start_date': '2003-03-20', 'end_date': '2006-05-26', 'address': '3510 Michigan Ave, Chicago', 'location': 'Police Communications System', 'beat': 'Lincoln Square', 'involvements': [ { 'involved_type': 'investigator', 'full_name': '<NAME>', 'badge': 'COPA/IPRA', }, { 'involved_type': 'investigator', 'officer_id': 4, 'full_name': '<NAME>', 'badge': 'COPA/IPRA', 'percentile_allegation': '9.9000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 2, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'investigator', 'officer_id': 1, 'full_name': '<NAME>', 'badge': 'CPD', 'percentile_allegation': '6.6000', 'percentile_allegation_civilian': '7.7000', 'percentile_allegation_internal': '8.8000', }, { 'involved_type': 'police_witness', 'officer_id': 3, 'full_name': '<NAME>', 'allegation_count': 1, 'sustained_count': 1, 'percentile_allegation': '9.9000', 'percentile_trr': '5.5000', } ], 'attachments': [ { 'title': 'CR document', 'file_type': 'document', 'url': 'http://cr-document.com/', 'id': '123456', } ] }) def test_retrieve_not_found(self): response = self.client.get(reverse('api-v2:cr-mobile-detail', kwargs={'pk': '45678'})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND) @patch('cr.views.send_attachment_request_email') def test_request_document(self, mock_send_attachment_request_email): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) AllegationFactory(crid='112233') response = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': '112233'}), {'email': '<EMAIL>'} ) expect(response.status_code).to.eq(status.HTTP_200_OK) expect(response.data).to.eq({ 'message': 'Thanks for subscribing', 'crid': '112233' }) expect(mock_send_attachment_request_email).to.be.called_once_with( '<EMAIL>', attachment_type='cr_request', pk='112233', ) def test_request_same_document_twice(self): EmailTemplateFactory(type=CR_ATTACHMENT_REQUEST) allegation = AllegationFactory(crid='112233') self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) response2 = self.client.post( reverse('api-v2:cr-mobile-request-document', kwargs={'pk': allegation.crid}), {'email': '<EMAIL>'} ) expect(response2.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response2.data).to.eq({ 'message': 'Email already added', 'crid': '112233' }) def test_request_document_without_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_email(self): AllegationFactory(crid='321') response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321}), {'email': 'invalid@email'}) expect(response.status_code).to.eq(status.HTTP_400_BAD_REQUEST) expect(response.data).to.eq({ 'message': 'Please enter a valid email' }) def test_request_document_with_invalid_allegation(self): response = self.client.post(reverse('api-v2:cr-mobile-request-document', kwargs={'pk': 321})) expect(response.status_code).to.eq(status.HTTP_404_NOT_FOUND)
2.0625
2
notebooks/csv_maker.py
CJHJ/spatiotemporal-transformer-paper
6
12791562
# %% import pandas as pd from collections import defaultdict import pickle from typing import DefaultDict cmap_data = pickle.load(open("./cmap_transformer.pkl", "rb")) mm_data = pickle.load(open("./mm_report_transformer.pkl", "rb")) # %% def convert_to_metric_first(data): rows = defaultdict(dict) for model, metrics in data.items(): for metric, values in metrics.items(): for i, value in enumerate(values): rows[metric][model + f"_{i}"] = value return rows def save_to_csv(data, save_path): df = pd.DataFrame(data) df.to_csv(save_path) save_to_csv( convert_to_metric_first(cmap_data), "./cmap_report_transformer.csv" ) save_to_csv(convert_to_metric_first(mm_data), "./mm_report_transformer.csv") # %%
2.640625
3
classification/migrations/0019_auto_20210201_1625.py
SACGF/variantgrid
5
12791563
<reponame>SACGF/variantgrid # Generated by Django 3.1 on 2021-02-01 05:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('classification', '0018_auto_20210201_1004'), ] operations = [ migrations.AlterField( model_name='conditiontext', name='status', field=models.CharField(choices=[('T', 'Terms Provided'), ('M', 'Multiple Terms Provided'), ('A', 'Auto-Matched'), ('N', 'Not Auto-Matched'), ('U', 'User Reviewed')], default='N', max_length=1), ), ]
1.6875
2
tasks/new_discussions.py
harej/reports_bot
2
12791564
# -*- coding: utf-8 -*- """ New Discussions -- Provides a list of new discussions within a WikiProject's scope Copyright (C) 2015 <NAME>, 2016 <NAME> Licensed under MIT License: http://mitlicense.org """ from collections import namedtuple from datetime import datetime import re from reportsbot.task import Task from reportsbot.util import join_full_title import mwparserfromhell from pywikibot.data.api import Request __all__ = ["NewDiscussions"] _Section = namedtuple("_Section", ["name", "timestamp"]) _Discussion = namedtuple("_Discussion", ["title", "name", "timestamp"]) class NewDiscussions(Task): """Updates a list of new discussions within a WikiProject's scope.""" DISCUSSION_TEMPLATE = "WPX new discussion" DISCUSSIONS_PER_PAGE = 15 DISCUSSIONS_BEFORE_FOLD = 4 @staticmethod def _parse_timestamp(text): """Return a datetime for the given timestamp string, or ValueError.""" return datetime.strptime(str(text), "%H:%M, %d %B %Y (UTC)") def _extract_sections(self, text): """Return a list of section tuples for the given page.""" code = mwparserfromhell.parse(text) sections = set() for section in code.get_sections(levels=[2]): clean = section.strip_code() match = re.search(r"\d\d:\d\d,\s\d\d?\s\w+\s\d{4}\s\(UTC\)", clean) if not match: continue try: timestamp = self._parse_timestamp(match.group(0)) except ValueError: continue name = str(section.get(0).title.strip_code()).strip() sections.add(_Section(name, timestamp)) return sections def _load_pages(self, titles): """Load a chunk of pages from the API.""" def _get_rev(page): try: return page["revisions"][0]["slots"]["main"]["content"] except (KeyError, IndexError): return "" req = Request(self._bot.site, parameters={ "action": "query", "prop": "revisions", "rvprop": "content", "rvslots": "main", "formatversion": "2", "titles": "|".join(titles) }) data = req.submit() return [(page["title"], _get_rev(page)) for page in data["query"]["pages"]] def _get_updated_discussions(self, start, end): """Return a dict mapping talk page titles to lists of section tuples. The only pages included in the dict are those that have been updated in the given time range. """ query = """SELECT DISTINCT rc_namespace, rc_title FROM recentchanges WHERE rc_timestamp >= ? AND rc_timestamp < ? AND rc_namespace % 2 = 1 AND rc_namespace != 3 AND (rc_type = 0 OR rc_type = 1 OR rc_type = 3) AND rc_bot = 0""" startts = start.strftime("%Y%m%d%H%M%S") endts = end.strftime("%Y%m%d%H%M%S") self._logger.info("Fetching discussions updated between %s and %s", startts, endts) with self._bot.wikidb as cursor: cursor.execute(query, (startts, endts)) titles = [join_full_title(self._bot.site, ns, title.decode("utf8")) for (ns, title) in cursor.fetchall()] self._logger.debug("Fetching sections for %s pages", len(titles)) sections = {} chunksize = 50 for start in range(0, len(titles), chunksize): chunk = titles[start:start+chunksize] pages = self._load_pages(chunk) for title, text in pages: try: sections[title] = self._extract_sections(text) except mwparserfromhell.parser.ParserError: self._logger.exception("Failed to parse [[%s]]", title) return sections def _get_current_discussions(self, title): """Return a dict mapping talk page titles to lists of section tuples. Given a WikiProject new discussions page, return all discussions currently listed. """ text = self._bot.get_page(title).text code = mwparserfromhell.parse(text) discussions = {} for tmpl in code.filter_templates(): if tmpl.name != self.DISCUSSION_TEMPLATE: continue if not (tmpl.has("title") and tmpl.has("section") and tmpl.has("timestamp")): continue try: timestamp = self._parse_timestamp(tmpl.get("timestamp").value) except ValueError: continue title = str(tmpl.get("title").value) section = _Section(str(tmpl.get("section").value), timestamp) if title in discussions: discussions[title].add(section) else: discussions[title] = {section} return discussions def _process_discussions(self, pages, current, updated): """Return a sorted list of the most recent discussion tuples.""" sections = {} for page in pages: title = join_full_title(self._bot.site, page.ns + 1, page.title) if title in updated: sections[title] = updated[title] elif title in current: sections[title] = current[title] discussions = [_Discussion(title, section.name, section.timestamp) for title in sections for section in sections[title]] discussions.sort(key=lambda disc: disc.timestamp, reverse=True) discussions = discussions[:self.DISCUSSIONS_PER_PAGE] for disc in discussions: self._logger.debug(" [[%s#%s]] at %s", disc.title, disc.name, disc.timestamp.strftime("%Y %m %d, %H:%M:%S")) news = [disc.title for disc in discussions if disc.title not in current][:3] return discussions, news def _save_discussions(self, project, title, discussions, news): """Save the given list of discussions to the given page title.""" text = """<noinclude><div style="padding-bottom:1em;">{{Clickable button 2|%(projname)s|Return to WikiProject|class=mw-ui-neutral}}</div></noinclude> {{WPX action box|color={{{2|#086}}}|title=Have a question?|content= {{Clickable button 2|url={{fullurl:%(projtalk)s|action=edit&section=new}}|Ask the WikiProject|class=mw-ui-progressive mw-ui-block}} {{Clickable button 2|%(projtalk)s|View Other Discussions|class=mw-ui-block}} }} {{WPX list start|intro={{WPX last updated|%(title)s}}}} %(discussions)s {{WPX list end|more=%(title)s}} """ template = "{{WPX new discussion|color={{{1|#37f}}}|title=%(title)s|section=%(name)s|timestamp=%(timestamp)s}}" discitems = [ template % { "title": disc.title, "name": disc.name, "timestamp": disc.timestamp.strftime("%H:%M, %d %B %Y (UTC)") } for disc in discussions] fold = self.DISCUSSIONS_BEFORE_FOLD if len(discitems) > fold: before = "\n".join(discitems[:fold]) after = "\n".join(discitems[fold:]) disclist = before + "<noinclude>\n" + after + "</noinclude>" else: disclist = "\n".join(discitems) projtalk = self._bot.get_page(project.name).toggleTalkPage().title() page = self._bot.get_page(title) page.text = text % { "title": title, "projname": project.name, "projtalk": projtalk, "discussions": disclist } summary = "Updating new discussions" if news: summary += ": " + ", ".join("[[%s]]" % item for item in news) page.save(summary, minor=False) def _process(self, project, updated): """Process new discussions for the given project.""" self._logger.debug("Updating new discussions for %s", project.name) title = project.name + "/Discussions" pages = project.get_members() current = self._get_current_discussions(title) discussions, news = self._process_discussions(pages, current, updated) self._save_discussions(project, title, discussions, news) def run(self): start = self._bot.get_last_updated("new_discussions") end = datetime.utcnow() updated = self._get_updated_discussions(start, end) self._logger.info("Updating discussion reports") for project in self._bot.get_configured_projects(): if project.config.get("new_discussions"): self._process(project, updated) self._bot.set_last_updated("new_discussions", end)
2.796875
3
tests/RunTests/PythonTests/test2011_011.py
maurizioabba/rose
488
12791565
<gh_stars>100-1000 # tests for dictionary displays a = {} b = {1: 2} c = {3: 4, 5: 6} d = {7: "seven", 8: "eight", 9: "nine", 10: "one" + "zero"} print a print b print c print d
2.5
2
tests/tfTests/tfSim.py
Los-Phoenix/Word2vec_LP
1
12791566
#coding:UTF-8 #这个文件用tf实现一个单层神经网络,用来判断两个词是否是同义词 #输入是readerX产生的X@200 和Y@1 #划分测试集和监督集、测试集 #使用三层神经网进行训练,n个隐藏层和1个输出层?? import readerX import tensorflow as tf import numpy as np import random import gc data_dim = 200#输入数据的维度 # piece = 999999 # sample_num = piece * 49 xInTemp, yInTemp = readerX.genAllXY() yInTemp = [[i,1 - i] for i in yInTemp] print 'Reader Done' xIn = np.matrix(xInTemp) yIn = np.matrix(yInTemp) sample_num = len(yIn) - 1000 print 'Sample size is : ',sample_num del(yInTemp) del(xInTemp) gc.collect() print 'Matrix Prepared' x = tf.placeholder(tf.float32, shape = (None, data_dim)) y = tf.placeholder(tf.float32, shape = (None, 2)) num = 30 num2 = 20 print num, num2 with tf.variable_scope("Ez_flat"): W1 = tf.Variable(np.random.rand(data_dim, num), 'weight1', dtype=tf.float32) b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) # b1 = tf.Variable(np.random.rand(1, num), 'bias1', dtype=tf.float32) L1_in = tf.matmul(x, W1) + b1 #L1_out = tf.nn.softmax(L1_in) L1_out = tf.nn.sigmoid(L1_in) # W2 = tf.Variable(np.random.rand(num, num2), 'weight2', dtype=tf.float32) b2 = tf.Variable(np.random.rand(1, num2), 'bias2', dtype=tf.float32) # L2_in = tf.matmul(L1_out, W2) + b2 L2_out = tf.nn.sigmoid(L2_in) W3 = tf.Variable(np.random.rand(num2, 2), 'weight2', dtype=tf.float32) # b3 = tf.Variable(np.random.rand(1, 2), 'bias2', dtype=tf.float32) Lf_in = tf.matmul(L2_out, W3) Lf_out = tf.nn.softmax(Lf_in) # rst = L2_out * 50 + 50 # loss = tf.contrib.losses.mean_squared_error(L1_out, y) # loss = tf.contrib.losses.mean_squared_error(L1_out, y) loss = -tf.reduce_sum(y * tf.log(Lf_out)) # loss = tf.reduce_mean(loss) with tf.name_scope("training-accuracy") as scope: correct_prediction = tf.equal(tf.argmax(Lf_out,1), tf.argmax(y,1)) train_accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) train_accuracy_summary = tf.summary.scalar("training accuracy", train_accuracy) opt = tf.train.AdamOptimizer(0.01) train_op = opt.minimize(loss) with tf.Session() as sess: print 'Training Start' sess.run(tf.global_variables_initializer()) for cnt in xrange(100000000): randList = np.random.randint(0,sample_num, size=(1,200)) # inputSam = random.sample(inputLong,577) # print inputSam xSam = xIn[randList, :] ySam = yIn[randList, :] # print ySam # _, loss_val, W_val = sess.run([train_op, loss, W3], # feed_dict={x: xIn[:sample_num], # y: yIn[:sample_num].reshape((-1, 2))}) _, loss_val, W_val = sess.run([train_op, loss, W3], feed_dict={x: xSam.reshape(-1, data_dim), y: ySam.reshape(-1, 2)}) if cnt%1000 == 0 or( cnt < 2000 and cnt%100 == 0):#!!!!!!!!!!!在这里!!!!!!!! accu = train_accuracy.eval(feed_dict={x: xIn[:sample_num], y: yIn[:sample_num].reshape((-1, 2))}) accu_test = train_accuracy.eval(feed_dict={x: xIn[sample_num:], y: yIn[sample_num:].reshape((-1, 2))}) # print W1_val print '#' * 20 print 'cnt', cnt print loss_val print accu print accu_test # print W_val if accu > 0.99: print '#' * 20 print accu print accu_test print cnt print 'Done' break
2.71875
3
tests/conftest.py
Rshep3087/log-five-three-one
2
12791567
<filename>tests/conftest.py<gh_stars>1-10 from strength_log.models import User from strength_log import create_app, db from strength_log.config import Config from helium import start_firefox, kill_browser, click, Link, write import pytest HOME_PAGE = "https://www.strengthlog.app/" class TestConfig(Config): TESTING = True BCRYPT_LOG_ROUNDS = 4 SQLALCHEMY_DATABASE_URI = "sqlite://" WTF_CSRF_ENABLED = False @pytest.fixture def driver_home(): driver = start_firefox(HOME_PAGE, headless=True) yield driver kill_browser() @pytest.fixture def driver_login(): driver = start_firefox(HOME_PAGE, headless=True) click(Link("Login")) write("<EMAIL>", into="Email") write("test", into="Password") click("Submit") yield driver kill_browser() @pytest.fixture(scope="module") def new_user(): user = User("<EMAIL>", "strengthlog") return user @pytest.fixture(scope="module") def test_client(): app = create_app(config_class=TestConfig) testing_client = app.test_client() ctx = app.app_context() ctx.push() yield testing_client ctx.pop() @pytest.fixture(scope="module") def init_database(): # Create the database and tables db.create_all() # Insert user data user1 = User(email="<EMAIL>", password="<PASSWORD>") user2 = User(email="<EMAIL>", password="<PASSWORD>") db.session.add(user1) db.session.add(user2) # Commit changes db.session.commit() yield db # Testing happpens here db.drop_all()
2.265625
2
src/test.py
priba/graph_metric.pytorch
6
12791568
<gh_stars>1-10 # -*- coding: utf-8 -*- from __future__ import print_function, division """ Graph classification """ # Python modules import torch import glob import numpy as np import time import os # Own modules from options import Options from Logger import LogMetric from utils import load_checkpoint, knn_accuracy, mean_average_precision from models import models, distance from data.load_data import load_data from loss.contrastive import ContrastiveLoss, TripletLoss import dgl __author__ = "<NAME>" __email__ = "<EMAIL>" def test(data_loader, gallery_loader, nets, cuda, validation=False): batch_time = LogMetric.AverageMeter() acc = LogMetric.AverageMeter() meanap = LogMetric.AverageMeter() net, distance = nets # switch to test mode net.eval() distance.eval() end = time.time() dist_matrix = [] start = time.time() with torch.no_grad(): g_gallery = [] target_gallery = [] for j, (g, target) in enumerate(gallery_loader): if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) target_gallery.append(target) g_gallery.append(g) target_gallery = np.array(np.concatenate(target_gallery)) gdata = list(map(lambda g: g.gdata['std'], g_gallery)) g_gallery = dgl.batch(g_gallery) g_gallery.gdata = {'std': torch.cat(gdata)} target_query = [] for i, (g, target) in enumerate(data_loader): # Prepare input data if cuda: g.to(torch.device('cuda')) g.gdata['std'] = g.gdata['std'].cuda() # Output g = net(g) d = distance(g, g_gallery, mode='retrieval') dist_matrix.append(d) target_query.append(target) dist_matrix = torch.stack(dist_matrix) target_query = np.array(np.concatenate(target_query)) if validation: target_combined_query = target_query combined_dist_matrix = dist_matrix else: print('* Test No combine mAP {}'.format(mean_average_precision(dist_matrix, target_gallery, target_query))) target_combined_query = np.unique(target_query) combined_dist_matrix = torch.zeros(target_combined_query.shape[0], dist_matrix.shape[1]) for i, kw in enumerate(target_combined_query): ind = kw == target_query combined_dist_matrix[i] = dist_matrix[ind].min(0).values # K-NN classifier acc.update(knn_accuracy(combined_dist_matrix, target_gallery, target_combined_query, k=5)) # mAP retrieval meanap.update(mean_average_precision(combined_dist_matrix, target_gallery, target_combined_query)) batch_time.update(time.time()-start) print('* Test Acc {acc.avg:.3f}; mAP {meanap.avg: .5f} Time x Test {b_time.avg:.3f}' .format(acc=acc, meanap=meanap, b_time=batch_time)) return acc, meanap def main(): print('Loss & Optimizer') if args.loss=='triplet': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap) elif args.loss=='triplet_distance': args.triplet=True criterion = TripletLoss(margin=args.margin, swap=args.swap, dist=True) else: args.triplet=False criterion = ContrastiveLoss(margin=args.margin) print('Prepare data') train_loader, valid_loader, valid_gallery_loader, test_loader, test_gallery_loader, in_size = load_data(args.dataset, args.data_path, triplet=args.triplet, batch_size=args.batch_size, prefetch=args.prefetch) print('Create model') net = models.GNN(in_size, args.out_size, nlayers=args.nlayers, hid=args.hidden, J=args.pow) distNet = distance.SoftHd() print('Check CUDA') if args.cuda and args.ngpu > 1: print('\t* Data Parallel **NOT TESTED**') net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu))) if args.cuda: print('\t* CUDA') net, distNet = net.cuda(), distNet.cuda() criterion = criterion.cuda() start_epoch = 0 best_map = 0 early_stop_counter = 0 if args.load is not None: print('Loading model') checkpoint = load_checkpoint(args.load) net.load_state_dict(checkpoint['state_dict']) distNet.load_state_dict(checkpoint['state_dict_dist']) start_epoch = checkpoint['epoch'] best_map = checkpoint['best_map'] print('Loaded model at epoch {epoch} and mAP {meanap}%'.format(epoch=checkpoint['epoch'],meanap=checkpoint['best_map'])) print('***Test***') test(test_loader, test_gallery_loader, [net, distNet], args.cuda) if __name__ == '__main__': # Parse options args = Options().parse() print('Parameters:\t' + str(args)) # Check cuda & Set random seed args.cuda = args.ngpu > 0 and torch.cuda.is_available() np.random.seed(args.seed) torch.manual_seed(args.seed) if args.cuda: torch.cuda.manual_seed(args.seed) # Check Test and Load if args.load is None: raise Exception('Cannot test without loading a model.') main()
2.0625
2
Old/Market_demand.py
ntuecon/2018groupCE
3
12791569
price=input('Please detemine the market prices of [x,y]: ') income=input('Please detemine the income of individual: ') par=input('Please detemine the parameters of Cobb-Doglous[alpha1,alpha2]: ') nper=input('Please determine the number of individuals: ') import Consumer_class Market_D=[0,0] for i in range(nper): A=Consumer_class.Consumer(price,income,par) Market_D+=A.utility_max() print Market_D
3.65625
4
tests/test_events_triggered_unused.py
Pelmen323/Kaiserreich_Jenkins_PyTests
0
12791570
########################## # Test script to check if "is_triggered_only = yes" events are triggered from somewhere # If they not - they'll never be triggered # By Pelmen, https://github.com/Pelmen323 ########################## import re from ..test_classes.generic_test_class import DataCleaner, ResultsReporter from ..test_classes.events_class import Events FALSE_POSITIVES = ['ace_promoted.1', 'ace_promoted.2', 'ace_died.1', 'ace_killed_by_ace.1', 'ace_killed_other_ace.1', 'aces_killed_each_other.1', 'nuke_dropped.0'] def test_check_triggered_events(test_runner: object): all_events = [] triggered_events_id = dict() invoked_events_id = [] # 1. Get all events code all_events = Events.get_all_events(test_runner=test_runner, lowercase=True) # 2. Get the "triggered only events" for event in all_events: if "is_triggered_only = yes" in event: pattern_matches = re.findall('id = .*', event) event_id = pattern_matches[0].strip('\t').strip() # Only first match is taken if '#' in event_id: event_id = event_id[:event_id.index('#')].strip() # Clean up comments event_id = event_id[5:].strip() # Remove "id =" part triggered_events_id[event_id] = 0 # Default value is set to zero # 3. Get all events triggered in files triggered_events_id = DataCleaner.clear_false_positives(input_iter=triggered_events_id, false_positives=FALSE_POSITIVES) invoked_events_id = Events.get_all_triggered_events_names(test_runner=test_runner, lowercase=True) # 4. Check if events are used for event in invoked_events_id: if event in triggered_events_id.keys(): triggered_events_id[event] += 1 results = [i for i in triggered_events_id.keys() if triggered_events_id[i] == 0] ResultsReporter.report_results(results=results, message="Those events have 'is_triggered_only = yes' attr but are never triggered from outside. Check console output")
2.515625
3
0x04-python-more_data_structures/10-best_score.py
flourishcodes/holbertonschool-higher_level_programming
0
12791571
<reponame>flourishcodes/holbertonschool-higher_level_programming<filename>0x04-python-more_data_structures/10-best_score.py<gh_stars>0 #!/usr/bin/python3 def best_score(a_dictionary): if a_dictionary is None or a_dictionary == {}: return n = [] for new in a_dictionary: n.append(new) return max(n)
2.3125
2
Scripts/simulation/routing/waypoints/waypoint_generator_connected_points.py
velocist/TS4CheatsInfo
0
12791572
<gh_stars>0 # uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\routing\waypoints\waypoint_generator_connected_points.py # Compiled at: 2020-04-22 01:40:18 # Size of source mod 2**32: 3643 bytes import sims4 from event_testing.resolver import SingleSimResolver from routing.waypoints.tunable_waypoint_graph import TunableWaypointGraphSnippet, TunableWaypointWeightedSet from routing.waypoints.waypoint_generator import _WaypointGeneratorBase from sims4.tuning.tunable import TunableRange logger = sims4.log.Logger('WaypointGeneratorConnectedPoints', default_owner='miking') class _WaypointGeneratorConnectedPoints(_WaypointGeneratorBase): FACTORY_TUNABLES = {'waypoint_graph':TunableWaypointGraphSnippet(description='\n Defines the waypoints and connections between them.\n '), 'starting_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\n Waypoint for the generator to start at (will choose one based on the tests/weights).\n '), 'ending_waypoint':TunableWaypointWeightedSet.TunableFactory(description='\n Waypoint for the generator to end at (will choose one based on the tests/weights).\n '), 'max_waypoints':TunableRange(description='\n The maximum number of waypoints to visit. Set to 0 to keep going until ending_waypoint is reached.\n ', tunable_type=int, default=0, minimum=0, maximum=100)} def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._sim = self._context.sim resolver = SingleSimResolver(self._sim) self._starting_waypoint, self._start_constraint = self.starting_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) def get_start_constraint(self): return self._start_constraint def get_waypoint_constraints_gen(self, routing_agent, waypoint_count): resolver = SingleSimResolver(self._sim) if self.ending_waypoint is not None: ending_waypoint, _ = self.ending_waypoint.choose(self.waypoint_graph, self._routing_surface, resolver) else: ending_waypoint = None cur_waypoint = self._starting_waypoint prev_waypoint = None num_visited = 0 while num_visited < self.max_waypoints or self.max_waypoints == 0: connections = self.waypoint_graph.connections.get(cur_waypoint, None) if connections is None: logger.warn('No connections defined in waypoint graph for waypoint id {}.', cur_waypoint) break new_waypoint, waypoint_constraint = connections.choose(self.waypoint_graph, self._routing_surface, resolver, prev_waypoint) prev_waypoint = cur_waypoint cur_waypoint = new_waypoint if cur_waypoint is None: logger.warn('No connection chosen from waypoint graph for waypoint id {}.', prev_waypoint) break num_visited += 1 yield waypoint_constraint if cur_waypoint == ending_waypoint: break
1.90625
2
utils/tests.py
none-da/zeshare
0
12791573
from utils.alltests.models_tests import * from utils.alltests.views_tests import *
1.085938
1
Hillup/data/__init__.py
migurski/DEM-Tools
17
12791574
<gh_stars>10-100 """ Starting point for DEM retrieval utilities. """ from math import pi, sin, cos from os import unlink, close from itertools import product from tempfile import mkstemp from sys import modules import NED10m, NED100m, NED1km, SRTM1, SRTM3, VFP, Worldwide from ModestMaps.Core import Coordinate from TileStache.Geography import SphericalMercator from TileStache.Core import Layer, Metatile from TileStache.Config import Configuration from TileStache.Caches import Disk from osgeo import gdal, osr from PIL import Image import numpy from .. import save_slope_aspect # used to prevent clobbering in /vsimem/, see: # http://osgeo-org.1803224.n2.nabble.com/gdal-dev-Outputting-to-vsimem-td6221295.html vsimem_counter = 1 # # Set up some useful projections. # osr.UseExceptions() # <-- otherwise errors will be silent and useless. webmerc_proj = SphericalMercator() webmerc_sref = osr.SpatialReference() webmerc_sref.ImportFromProj4(webmerc_proj.srs) class SeedingLayer (Layer): """ Tilestache-compatible seeding layer for preparing tiled data. Intended for use in hillup-seed.py script for preparing a tile directory. """ def __init__(self, demdir, tiledir, tmpdir, source, size): """ """ cache = Disk(tiledir, dirs='safe') config = Configuration(cache, '.') Layer.__init__(self, config, SphericalMercator(), Metatile(), tile_height=size) self.provider = Provider(self, demdir, tmpdir, source) def name(self): return '.' class Provider: """ TileStache provider for generating tiles of DEM slope and aspect data. Source parameter can be "srtm-ned" (default) or "ned-only". See http://tilestache.org/doc/#custom-providers for information on how the Provider object interacts with TileStache. """ def __init__(self, layer, demdir, tmpdir=None, source='srtm-ned'): self.tmpdir = tmpdir self.demdir = demdir self.source = source def getTypeByExtension(self, ext): if ext.lower() != 'tiff': raise Exception() return 'image/tiff', 'TIFF' def renderArea(self, width, height, srs, xmin, ymin, xmax, ymax, zoom): """ Return an instance of SlopeAndAspect for requested area. """ assert srs == webmerc_proj.srs # <-- good enough for now if self.source == 'srtm-ned': providers = choose_providers_srtm(zoom) elif self.source == 'ned-only': providers = choose_providers_ned(zoom) elif self.source == 'vfp': providers = [(VFP, 1)] elif self.source == 'worldwide': providers = [(Worldwide, 1)] else: providers = load_func_path(self.source)(zoom) assert sum([proportion for (mod, proportion) in providers]) == 1.0 # # Prepare information for datasets of the desired extent and projection. # xres = (xmax - xmin) / width yres = (ymin - ymax) / height area_wkt = webmerc_sref.ExportToWkt() buffered_xform = xmin - xres, xres, 0, ymax - yres, 0, yres # # Reproject and merge DEM datasources into destination datasets. # driver = gdal.GetDriverByName('GTiff') composite_ds = make_empty_datasource(width+2, height+2, buffered_xform, area_wkt, self.tmpdir) proportion_complete = 0. for (module, proportion) in providers: cs2cs = osr.CoordinateTransformation(webmerc_sref, module.sref) # get a lat/lon bbox buffered by one pixel on all sides minlon, minlat, z = cs2cs.TransformPoint(xmin - xres, ymin + yres) maxlon, maxlat, z = cs2cs.TransformPoint(xmax + xres, ymax - yres) # # Keep a version of the composite without the # current layer applied for later alpha-blending. # do_blending = bool(proportion_complete > 0 and proportion < 1) if do_blending: composite_without = composite_ds.ReadAsArray() ds_args = minlon, minlat, maxlon, maxlat, self.demdir for ds_dem in module.datasources(*ds_args): # estimate the raster density across source DEM and output dem_samples = (maxlon - minlon) / ds_dem.GetGeoTransform()[1] area_pixels = (xmax - xmin) / composite_ds.GetGeoTransform()[1] if dem_samples > area_pixels: # cubic looks better squeezing down resample = gdal.GRA_Cubic else: # cubic spline looks better stretching out resample = gdal.GRA_CubicSpline gdal.ReprojectImage(ds_dem, composite_ds, ds_dem.GetProjection(), composite_ds.GetProjection(), resample) ds_dem = None # # Perform alpha-blending if needed. # if do_blending: proportion_with = proportion / (proportion_complete + proportion) proportion_without = 1 - proportion_with composite_with = composite_ds.ReadAsArray() * proportion_with composite_with += composite_without * proportion_without composite_ds.GetRasterBand(1).WriteArray(composite_with, 0, 0) proportion_complete += proportion elevation = composite_ds.ReadAsArray() unlink(composite_ds.GetFileList()[0]) composite_ds = None # # Calculate and save slope and aspect. # slope, aspect = calculate_slope_aspect(elevation, xres, yres) tile_xform = xmin, xres, 0, ymax, 0, yres return SlopeAndAspect(self.tmpdir, slope, aspect, area_wkt, tile_xform) class SlopeAndAspect: """ TileStache response object with PIL-like save() and crop() methods. This object knows only how to save two-band 8-bit GeoTIFFs. See http://tilestache.org/doc/#custom-providers for information on how the SlopeAndAspect object interacts with TileStache. """ def __init__(self, tmpdir, slope, aspect, wkt, xform): """ Instantiate with array of slope and aspect, and minimal geographic information. """ self.tmpdir = tmpdir self.slope = slope self.aspect = aspect self.w, self.h = self.slope.shape self.wkt = wkt self.xform = xform def save(self, output, format): """ Save a two-band GeoTIFF to output file-like object. """ if format != 'TIFF': raise Exception('File format other than TIFF for slope and aspect: "%s"' % format) save_slope_aspect(self.slope, self.aspect, self.wkt, self.xform, output, self.tmpdir) def crop(self, box): """ Returns a rectangular region from the current image. Box is a 4-tuple with left, upper, right, and lower pixels. Not yet implemented! """ raise NotImplementedError() def choose_providers_srtm(zoom): """ Return a list of data sources and proportions for given zoom level. Each data source is a module such as SRTM1 or SRTM3, and the proportions must all add up to one. Return list has either one or two items. """ if zoom <= SRTM3.ideal_zoom: return [(SRTM3, 1)] elif SRTM3.ideal_zoom < zoom and zoom < SRTM1.ideal_zoom: #bottom, top = SRTM3, SRTM1 # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom == SRTM1.ideal_zoom: #return [(SRTM1, 1)] # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif SRTM1.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = SRTM1, NED10m # SRTM1 looks terrible bottom, top = SRTM3, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def choose_providers_ned(zoom): """ Return a list of data sources and proportions for given zoom level. Each data source is a module such as NED10m or NED1km, and the proportions must all add up to one. Return list has either one or two items. """ if zoom <= NED1km.ideal_zoom: return [(NED1km, 1)] elif NED1km.ideal_zoom < zoom and zoom < NED100m.ideal_zoom: #bottom, top = NED1km, NED100m bottom, top = NED1km, NED100m elif zoom == NED100m.ideal_zoom: return [(NED100m, 1)] elif NED100m.ideal_zoom < zoom and zoom < NED10m.ideal_zoom: #bottom, top = NED100m, NED10m bottom, top = NED100m, NED10m elif zoom >= NED10m.ideal_zoom: return [(NED10m, 1)] difference = float(top.ideal_zoom) - float(bottom.ideal_zoom) proportion = 1. - (zoom - float(bottom.ideal_zoom)) / difference return [(bottom, proportion), (top, 1 - proportion)] def make_empty_datasource(width, height, xform, wkt, tmpdir): ''' ''' driver = gdal.GetDriverByName('GTiff') handle, filename = mkstemp(dir=tmpdir, prefix='dem-tools-hillup-data-render-', suffix='.tif') close(handle) ds = driver.Create(filename, width, height, 1, gdal.GDT_Float32) ds.SetGeoTransform(xform) ds.SetProjection(wkt) ds.GetRasterBand(1).WriteArray(numpy.ones((width, height), numpy.float32) * -9999, 0, 0) ds.GetRasterBand(1).SetNoDataValue(-9999) return ds def calculate_slope_aspect(elevation, xres, yres, z=1.0): """ Return a pair of arrays 2 pixels smaller than the input elevation array. Slope is returned in radians, from 0 for sheer face to pi/2 for flat ground. Aspect is returned in radians, counterclockwise from -pi at north around to pi. Logic here is borrowed from hillshade.cpp: http://www.perrygeo.net/wordpress/?p=7 """ width, height = elevation.shape[0] - 2, elevation.shape[1] - 2 window = [z * elevation[row:(row + height), col:(col + width)] for (row, col) in product(range(3), range(3))] x = ((window[0] + window[3] + window[3] + window[6]) \ - (window[2] + window[5] + window[5] + window[8])) \ / (8.0 * xres); y = ((window[6] + window[7] + window[7] + window[8]) \ - (window[0] + window[1] + window[1] + window[2])) \ / (8.0 * yres); # in radians, from 0 to pi/2 slope = pi/2 - numpy.arctan(numpy.sqrt(x*x + y*y)) # in radians counterclockwise, from -pi at north back to pi aspect = numpy.arctan2(x, y) return slope, aspect def load_func_path(funcpath): """ Load external function based on a path. Example funcpath: "Module.Submodule:Function". """ modname, objname = funcpath.split(':', 1) __import__(modname) module = modules[modname] _func = eval(objname, module.__dict__) if _func is None: raise Exception('eval(%(objname)s) in %(modname)s came up None' % locals()) return _func
2.28125
2
transform/src/parquet.py
halasystems/boxball
76
12791575
from pathlib import Path from typing import Dict, Type, Iterator, List, Tuple import pyarrow as pa from pyarrow import csv as pcsv from pyarrow import parquet as pq from sqlalchemy import MetaData as AlchemyMetadata, Table as AlchemyTable from sqlalchemy import Integer, SmallInteger, Float, String, CHAR, Text, Boolean, Date, DateTime from sqlalchemy.sql.type_api import TypeEngine from src.schemas import all_metadata from src import EXTRACT_PATH_PREFIX, TRANSFORM_PATH_PREFIX PARQUET_PREFIX = TRANSFORM_PATH_PREFIX.joinpath("parquet") CSV_PREFIX = TRANSFORM_PATH_PREFIX.joinpath("csv") # How many bytes in each CSV chunk to bring into memory. # Larger sizes result in better compression and slightly faster time, # but don't want to risk OOM issues on small build boxes. BUFFER_SIZE_BYTES = 1000000000 sql_type_lookup: Dict[Type[TypeEngine], str] = { Integer: 'int32', SmallInteger: 'int16', Float: 'float64', String: 'str', CHAR: 'str', Text: 'str', Boolean: 'bool', # Some Parquet targets can't handle Parquet dates, so we need to parse and pass timestamps Date: 'timestamp[ms]', DateTime: 'timestamp[ms]' } def get_fields(table: AlchemyTable) -> List[Tuple[str, str]]: cols = [(c.name, c.type) for c in table.columns.values() if c.autoincrement is not True] return [(name, sql_type_lookup[type(dtype)]) for name, dtype in cols] def write_files(metadata: AlchemyMetadata) -> None: """ Creates a Parquet file for each table in the schema. """ tables: Iterator[AlchemyTable] = metadata.tables.values() for table in tables: name = table.name print(name) def get_path(prefix: Path, suffix: str): parent_dir = prefix.joinpath(metadata.schema) parent_dir.mkdir(exist_ok=True, parents=True) return parent_dir.joinpath(name).with_suffix(suffix) extract_file = get_path(EXTRACT_PATH_PREFIX, ".csv.zst") parquet_file = get_path(PARQUET_PREFIX, ".parquet") arrow_schema = pa.schema(get_fields(table)) column_names = [name for name, dtype in get_fields(table)] read_options = pcsv.ReadOptions(column_names=column_names, block_size=1000000000) parse_options = pcsv.ParseOptions(newlines_in_values=True) convert_options = pcsv.ConvertOptions(column_types=arrow_schema, timestamp_parsers=["%Y%m%d", "%Y-%m-%d"], true_values=["1", "T"], false_values=["0", "F"], strings_can_be_null=True) parquet_writer = pq.ParquetWriter(parquet_file, schema=arrow_schema, compression='zstd', version="2.0", use_dictionary=True) stream_reader = pcsv.open_csv(extract_file, read_options=read_options, parse_options=parse_options, convert_options=convert_options) for batch in stream_reader: table = pa.Table.from_batches([batch]) parquet_writer.write_table(table) parquet_writer.close() if __name__ == "__main__": for m in all_metadata: write_files(m)
2.515625
3
codecast_client.py
pitchaim/codecast
0
12791576
<reponame>pitchaim/codecast<gh_stars>0 import os, subprocess from socket import * from threading import Thread import jack class Client(): def __init__(self): #@todo: # ---ESTABLISH SOCKET CONNECTION WITH SERVER--- # # ---ONCE ESTABLISHED, PROCEED--- # #slave - open jackd with net backend cmd0 = 'ijo=$(ps -ef | grep jackd); if [[ $ijo == *"/jackd"* ]]; then echo "RUNNING"; fi' output, error = self.runbash(cmd0) if not output[0] == 'RUNNING': cmd = 'jackd -R -d net' output, error = self.runbash(cmd) else: cmd = 'jackd -d net' output, error = self.runbash(cmd) #ISSUE: maybe doesn't work without -R realtime flag? #start jack client to manage connections jclient = jack.Client('JackClient') #wait for server to verify netjack RUNNING #look at ports, find incoming netjack channel, #name of net port will have some identifying label - #figure it out ... here pretend it's TOKEN net_in_p = jclient.get_ports('*TOKEN*') sys_out_p = jclient.get_ports('system:playback_*') # connect net_in_p to system out #print something to cmd line - hostname, time up, messages #passed through socket, etc. def runbash(self, cmd): process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output, error = process.communicate() return output, error if __name__ == "__main__": # nice opening message print('\n') print('---------------------------------------') print('-----------C-O-D-E-C-A-S-T-------------') print('-----------------v1.0------------------') print('---------------------------------------') print('-------(c) <NAME> 2018----------') print('---------------------------------------') print('------------~client edition~-----------') print('\n')
2.453125
2
python/plugins/detectbase.py
JumuFENG/iptv-m3u-maker
0
12791577
# -*- coding: utf-8 -*- import tools import time import db import threading from .threads import ThreadPool class DetectorBase(object): """the base class for detecting""" def __init__(self): self.T = tools.Tools() self.now = int(time.time() * 1000) def getItems(self): pass def onCompleted(self): pass def getSource(self): sourceItems = self.getItems() threads = ThreadPool(20) for info in sourceItems: threads.add_task(self.checkData, item = info) threads.wait_completion() self.onCompleted() def checkData(self, item): if 'url' in item and len(item['url']) > 0: self.T.logger('正在分析[ %s ]: %s' % (item['name'], item['url'])) netstat = self.T.chkPlayable(item['url']) item['online'] = 1 if netstat > 0 else 0 item['delay'] = netstat item['udTime'] = self.now if netstat == 0: item['failcount'] += 1 self.addData(item) def addData (self, data) : DB = db.DataBase() sql = "SELECT * FROM %s WHERE url = '%s'" % (DB.table, data['url']) result = DB.query(sql) if len(result) == 0 : DB.insert(data) else : id = result[0][0] if data['failcount'] >= 10: DB.delete(id) else: DB.edit(id, data)
2.578125
3
apps/comment/models.py
luismayta/python-example-elasticsearch
4
12791578
# -*- coding: utf-8 -*- from django.db import models from model_utils.models import TimeStampedModel from apps.post.models import Post class Comment(TimeStampedModel): """Comment for Post """ class Meta: db_table = "comment" ordering = ["-created"] post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="comments") text = models.CharField(max_length=200) author = models.CharField(max_length=20) def __str__(self): return self.text
2.3125
2
exemples/attaques/arp_someone.py
RedFou52/esgi_securite_informatique_pratique
12
12791579
from scapy.all import * BROADCAST = "FF:FF:FF:FF:FF:FF" HOW_MUCH_TO_SPAM_PPL = 7 victim_ip = "192.168.79.129" gate_ip = "192.168.79.254" interface = 'vmnet1' def get_mac(ip): """ Récupère l'adresse MAC associée à l'IP par une requête ARP. """ ans, unans = srp( Ether(dst=BROADCAST)/ARP(pdst=ip), # un paquet ARP à broadcaster sur le réseau. timeout=2, # timeout qu'on attend avant une réponse en secondes. iface=interface, # interface réseau (ici vmnet1 le réseau interne de VMware) inter=0.1) # intervalle entre deux paquets. # On itère sur les couples (envoyés, reçus) des answers (réponses). for snd, rcv in ans: # On renvoie l'adresse MAC de la réponse reçue. return rcv.sprintf(r"%Ether.src%") def build_arp_pair_packets(gate_info, victim_info): """ Crée une paire de paquets ARP où gate = routeur et victim = victime. La paire retournée sont deux paquets (p1, p2). p1 est un paquet ARP qui dit au routeur qu'il doit s'occuper de la victime. p2 est un paquet ARP qui dit à la victime qu'il doit maintenant discuter avec le routeur et nous on quitte la scène. """ gate_mac, gate_ip = gate_info victim_mac, victim_ip = victim_info # build construit un paquet ARP à destination de `pdst` pour lui dire que `psrc` a pour adresse MAC `hwsrc` build = lambda pdst, psrc, hwsrc: ARP(op=2, pdst=pdst, psrc=psrc, hwdst=BROADCAST, hwsrc=hwsrc) return build(gate_ip, victim_ip, victim_mac), build(victim_ip, gate_ip, gate_mac) def make_them_think_we_didnt_do_something_bad(): print("[+] Restauration des caches ARP des cibles") victim_mac = get_mac(victim_ip) gate_mac = get_mac(gate_ip) # On construit notre paire de restauration. # i.e. on reconnecte le routeur et la victime entre eux. for_gate, for_victim = build_arp_pair_packets((gate_mac, gate_ip), (victim_mac, victim_ip)) for pkt in (for_gate, for_victim): # Surtout, on l'envoie mais on CRIE sur le réseau qu'il faut qu'ils reparlent entre eux. # (ici on crie au sens de, on envoie 7 fois le paquet pour être sûr qu'ils le reçoivent.) send(pkt, count=HOW_MUCH_TO_SPAM_PPL) def trick_them(gm, vm): """ Fourberie de Scapy n°1. """ for pdst, psrc, hwdst in ((victim_ip, gate_ip, vm), (gate_ip, victim_ip, gm)): send( ARP(op=2, pdst=pdst, psrc=psrc, hwdst=hwdst) # Faisons croire à pdst que psrc est à l'adresse MAC hwsrc (qui est la nôtre en fait) en l'envoyeant bien à l'adresse MAC hwdst en réalité. ) def mitm(): try: victim_mac = get_mac(victim_ip) except Exception as e: print(e) print("[!] Impossible de trouver l'adresse MAC de la cible, échec de l'attaque") return try: gate_mac = get_mac(gate_ip) except Exception: print("[!] Impossible de trouver l'adresse MAC du routeur, échec de l'attaque") print("[+] Le cyanure va être déposé sur le réseau (MAC adresses obtenues)") while True: try: trick_them(gate_mac, victim_mac) time.sleep(1.5) except KeyboardInterrupt: make_them_think_we_didnt_do_something_bad() break if __name__ == '__main__': mitm()
2.515625
3
chess/ChessConstants.py
vinoo999/alpha-zero-general
2
12791580
####################################################### # BOARD CONSTANTS ####################################################### BLACK = 'b' WHITE = 'w' EMPTY = -1 PAWN = 'p' KNIGHT = 'n' BISHOP = 'b' ROOK = 'r' QUEEN = 'q' KING = 'k' SYMBOLS = 'pnbrqkPNBRQK' DEFAULT_POSITION = 'rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1' POSSIBLE_RESULTS = ['1-0', '0-1', '1/2-1/2', '*'] # Directions pawns can move: forward 1, forward 2, right (capture), left (capture) PAWN_OFFSETS = { 'b': [16, 32, 17, 15], 'w': [-16, -32, -17, -15] } # Directions different pieces can move PIECE_OFFSETS = { 'n': [-18, -33, -31, -14, 18, 33, 31, 14], 'b': [-17, -15, 17, 15], 'r': [-16, 1, 16, -1], 'q': [-17, -16, -15, 1, 17, 16, 15, -1], 'k': [-17, -16, -15, 1, 17, 16, 15, -1] } MCTS_MAPPING = { 'p' : 1, 'n' : 2, 'b' : 3, 'r' : 4, 'q' : 5, 'k' : 6, } MCTS_DECODER = { 1 : 'p', 2 : 'n', 3 : 'b', 4 : 'r', 5 : 'q', 6 : 'k' } MCTS_COLOR_MAP = { 'w' : 1, 'b' : -1 } ATTACKS = [ 20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0, 0, 0, 20, 0, 0, 0, 0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 2, 24, 2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0, 24,24,24,24,24,24,56, 0, 56,24,24,24,24,24,24, 0, 0, 0, 0, 0, 0, 2,53, 56, 53, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 2, 24, 2,20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 24, 0, 0,20, 0, 0, 0, 0, 0, 0, 0, 0,20, 0, 0, 0, 24, 0, 0, 0,20, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 24, 0, 0, 0, 0,20, 0, 0, 0, 0, 20,0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0,20, 0, 0, 20,0, 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0,20 ] RAYS = [ 17, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 15, 0, 0, 17, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0, 17, 0, 0, 0, 0, 16, 0, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 16, 0, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 16, 0, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 16, 0, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 16, 15, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, -1, -1, -1,-1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0,-15,-16,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0,-16, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0,-16, 0, 0,-17, 0, 0, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0,-16, 0, 0, 0,-17, 0, 0, 0, 0, 0, 0,-15, 0, 0, 0, 0,-16, 0, 0, 0, 0,-17, 0, 0, 0, 0,-15, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0,-17, 0, 0, -15, 0, 0, 0, 0, 0, 0,-16, 0, 0, 0, 0, 0, 0,-17 ] SHIFTS = { 'p': 0, 'n': 1, 'b': 2, 'r': 3, 'q': 4, 'k': 5 } FLAGS = { 'NORMAL': 'n', 'CAPTURE': 'c', 'BIG_PAWN': 'b', 'EP_CAPTURE': 'e', 'PROMOTION': 'p', 'KSIDE_CASTLE': 'k', 'QSIDE_CASTLE': 'q' } BITS = { 'NORMAL': 1, 'CAPTURE': 2, 'BIG_PAWN': 4, 'EP_CAPTURE': 8, 'PROMOTION': 16, 'KSIDE_CASTLE': 32, 'QSIDE_CASTLE': 64 } RANK_1 = 7 RANK_2 = 6 RANK_3 = 5 RANK_4 = 4 RANK_5 = 3 RANK_6 = 2 RANK_7 = 1 RANK_8 = 0 SQUARES = { 'a8': 0, 'b8': 1, 'c8': 2, 'd8': 3, 'e8': 4, 'f8': 5, 'g8': 6, 'h8': 7, 'a7': 16, 'b7': 17, 'c7': 18, 'd7': 19, 'e7': 20, 'f7': 21, 'g7': 22, 'h7': 23, 'a6': 32, 'b6': 33, 'c6': 34, 'd6': 35, 'e6': 36, 'f6': 37, 'g6': 38, 'h6': 39, 'a5': 48, 'b5': 49, 'c5': 50, 'd5': 51, 'e5': 52, 'f5': 53, 'g5': 54, 'h5': 55, 'a4': 64, 'b4': 65, 'c4': 66, 'd4': 67, 'e4': 68, 'f4': 69, 'g4': 70, 'h4': 71, 'a3': 80, 'b3': 81, 'c3': 82, 'd3': 83, 'e3': 84, 'f3': 85, 'g3': 86, 'h3': 87, 'a2': 96, 'b2': 97, 'c2': 98, 'd2': 99, 'e2': 100, 'f2': 101, 'g2': 102, 'h2': 103, 'a1': 112, 'b1': 113, 'c1': 114, 'd1': 115, 'e1': 116, 'f1': 117, 'g1': 118, 'h1': 119 } ROOKS = { 'w': [{'square': SQUARES['a1'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h1'], 'flag': BITS['KSIDE_CASTLE']}], 'b': [{'square': SQUARES['a8'], 'flag': BITS['QSIDE_CASTLE']}, {'square': SQUARES['h8'], 'flag': BITS['KSIDE_CASTLE']}] } KING_EVAL = [ [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -3.0, -4.0, -4.0, -5.0, -5.0, -4.0, -4.0, -3.0], [ -2.0, -3.0, -3.0, -4.0, -4.0, -3.0, -3.0, -2.0], [ -1.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -1.0], [ 2.0, 2.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0 ], [ 2.0, 3.0, 1.0, 0.0, 0.0, 1.0, 3.0, 2.0 ] ] QUEEN_EVAL = [ [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -0.5, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.0, -0.5], [ -1.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, -1.0], [ -1.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, -1.0], [ -2.0, -1.0, -1.0, -0.5, -0.5, -1.0, -1.0, -2.0] ] ROOK_EVAL = [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [ 0.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ -0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5], [ 0.0, 0.0, 0.0, 0.5, 0.5, 0.0, 0.0, 0.0] ] BISHOP_EVAL = [ [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0], [ -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0], [ -1.0, 0.0, 0.5, 1.0, 1.0, 0.5, 0.0, -1.0], [ -1.0, 0.5, 0.5, 1.0, 1.0, 0.5, 0.5, -1.0], [ -1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 0.0, -1.0], [ -1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, -1.0], [ -1.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, -1.0], [ -2.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -2.0] ] KNIGHT_EVAL = [ [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0], [-4.0, -2.0, 0.0, 0.0, 0.0, 0.0, -2.0, -4.0], [-3.0, 0.0, 1.0, 1.5, 1.5, 1.0, 0.0, -3.0], [-3.0, 0.5, 1.5, 2.0, 2.0, 1.5, 0.5, -3.0], [-3.0, 0.0, 1.5, 2.0, 2.0, 1.5, 0.0, -3.0], [-3.0, 0.5, 1.0, 1.5, 1.5, 1.0, 0.5, -3.0], [-4.0, -2.0, 0.0, 0.5, 0.5, 0.0, -2.0, -4.0], [-5.0, -4.0, -3.0, -3.0, -3.0, -3.0, -4.0, -5.0] ] PAWN_EVAL = [ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [1.0, 1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [0.5, 0.5, 1.0, 2.5, 2.5, 1.0, 0.5, 0.5], [0.0, 0.0, 0.0, 2.0, 2.0, 0.0, 0.0, 0.0], [0.5, -0.5, -1.0, 0.0, 0.0, -1.0, -0.5, 0.5], [0.5, 1.0, 1.0, -2.0, -2.0, 1.0, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ]
2.03125
2
course/models.py
Mutghost01/ailms
0
12791581
from django.core.validators import MaxValueValidator from django.core.validators import MaxValueValidator from django.db import models from django.utils import timezone class Faculty(models.Model): name = models.CharField(max_length=80, blank=True, null=True) faculty_describtion = models.TextField(blank=True, null=True) class Meta: managed = False db_table = 'faculty' def __str__(self): return f'Faculty {self.name}' class Course(models.Model): # id = models.IntegerField(primary_key=True) name = models.CharField(max_length=80, blank=True, null=True) course_describtion = models.TextField(blank=True, null=True) faculty = models.ForeignKey('Faculty', models.DO_NOTHING, db_column='faculty', blank=True, null=True) category = models.IntegerField(blank=True, null=True) class Meta: managed = False db_table = 'course' def __str__(self): return f'Course {self.id} | Name: {self.name}' class Category(models.Model): name = models.CharField(max_length=80, blank=True, null=True) parent = models.ForeignKey('self', models.DO_NOTHING, db_column='parent', blank=True, null=True) class Meta: managed = False db_table = 'category' def __str__(self): return f'Category {self.name}' class Subject(models.Model): name = models.CharField(max_length=80, blank=True, null=True) category = models.ForeignKey(Category, models.DO_NOTHING, db_column='category', blank=True, null=True) thumb = models.CharField(max_length=100, blank=True, null=True) pic = models.CharField(max_length=200, blank=True, null=True) description = models.CharField(max_length=1000, blank=True, null=True) class Meta: managed = False db_table = 'subject' def __str__(self): return f'Subject {self.id} | Name: {self.name}' class SubjectRating(models.Model): subject = models.ForeignKey(Subject, models.DO_NOTHING, db_column='subject', blank=True, null=True) student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student', blank=True, null=True) rating = models.IntegerField(blank=True, null=True) commence = models.TextField(blank=True, null=True) timestamp = models.DateTimeField(blank=True, null=True) class Meta: managed = False db_table = 'subject_rating' def __str__(self): return f'Subject: {self.subject.name} | Student {self.student.account.username} | Rating: {self.rating}' class Enrollment(models.Model): subject = models.ForeignKey('Subject', models.DO_NOTHING, db_column='subject') student = models.ForeignKey('users.Student', models.DO_NOTHING, db_column='student') status = models.IntegerField(blank=True, null=True) # lesson = models.ForeignKey('Lesson', models.DO_NOTHING, db_column='lesson', blank=True, null=True) class Meta: managed = False db_table = 'enrollment' def __str__(self): return f'Student {self.student.account.username} | Subject: {self.subject.name}'
2.21875
2
src/striga/service/sitebus/_stsvcsb_bus.py
ateska/striga
0
12791582
<gh_stars>0 import os, sys, re, functools, copy, logging as L import striga.core.exception ### class Bus(object): CustomSiteBusDefs = {} PathCheckRE = re.compile(r'^[^//]*$') def __init__(self): self.IntroBusItems = [] #Iterate thru this (call each) in the begining self.CrossroadBusItems = {} #Check path against this, if ok, call item from that self.DefaultBusItem = None #Call when not found anything in self.CrossroadBusItems self.RootDir = None self.Index = None self.ErrorBus = None def _InsertBusItem(self, path, createitemfnct): if path is None: if self.DefaultBusItem is not None: raise striga.core.exception.StrigaConfigurationError("Default bus item is already configured!") self.DefaultBusItem = createitemfnct() return self.DefaultBusItem if self.PathCheckRE.match(path) is None: raise striga.core.exception.StrigaConfigurationError("Invalid path '%s' given!" % path) if self.CrossroadBusItems.has_key(path): raise striga.core.exception.StrigaConfigurationError("Bus item for '%s' is already configured!" % path) nbi = createitemfnct() self.CrossroadBusItems[path] = nbi return nbi def _configure(self, conffilename, index = None, rootdir = None): if rootdir is None: rootdir = os.path.dirname(conffilename) self.RootDir = os.path.normpath(os.path.abspath(rootdir)) if not os.path.isdir(self.RootDir): if os.path.exists(self.RootDir): L.warning("Bus item root directory is not directory: '%s'" % (self.RootDir)) else: L.warning("Bus item root directory doesn't exist: '%s'" % (self.RootDir)) self.Index = index defs = {} for key, fn in self.CustomSiteBusDefs.iteritems(): defs[key] = functools.partial(fn, self) defs.update({ 'location' : self.__configure_location, 'errorbus' : self.__configure_errorbus, 'var' : self.__configure_var, 'serve' : self.__configure_serve, 'view' : self.__configure_view, 'exec' : self.__configure_exec, 'controller' : self.__configure_controller, 'componentbus' : self.__configure_componentbus, '!' : self._configure_finished, }) return defs def __configure_location(self, conffilename, path = None, index = None, rootdir = '.'): ''' @param path - can be None as location can be default sitebus item too ''' from ._stsvcsb_location import Location l = self._InsertBusItem(path, Location) rootdir = os.path.join(self.RootDir, rootdir) return l._configure(conffilename = conffilename, index = index, rootdir = rootdir) def __configure_errorbus(self, conffilename, rootdir = '.'): if self.ErrorBus is not None: L.warning("Bus item has already one errorbus defined - overwriting") from ._stsvcsb_errorbus import ErrorBus self.ErrorBus = ErrorBus() rootdir = os.path.join(self.RootDir, rootdir) return self.ErrorBus._configure(conffilename = conffilename, rootdir = rootdir) def __configure_var(self, conffilename, name, default = 0): self.IntroBusItems.append(functools.partial(BusVar, name, default)) def __configure_serve(self, conffilename, pattern, path = None, buffersize = 64*1024): ''' Config keyword - serve ''' from ._stsvcsb_serve import Serve self._InsertBusItem(path, functools.partial(Serve, self.RootDir, pattern, buffersize)) def __configure_view(self, conffilename, source, mode, path = None, entry='main', pathlimit='==0'): ''' Config keyword - view ''' from ._stsvcsb_view import View self._InsertBusItem(path, functools.partial(View, self.RootDir, source, mode, entry, pathlimit)) def __configure_componentbus(self, conffilename, component, path = None, busname = 'componentbus'): ''' Config keyword - componentbus ''' from ._stsvcsb_compbusref import ComponentBusRef self._InsertBusItem(path, functools.partial(ComponentBusRef, component, busname)) def __configure_exec(self, conffilename, source, entry, path = None, pathlimit = '==0', rootdir = '.'): ''' Config keyword - exec ''' from ._stsvcsb_exec import Exec rootdir = os.path.join(self.RootDir, rootdir) self._InsertBusItem(path, functools.partial(Exec, rootdir, source, entry, pathlimit)) def __configure_controller(self, conffilename, source, controllerclass = 'Controller', path = None, pathlimit = '==0', rootdir = '.'): ''' Config keyword - Controller ''' from ._stsvcsb_cntrlr import Controller rootdir = os.path.join(self.RootDir, rootdir) nbi = self._InsertBusItem(path, functools.partial(Controller, rootdir, source, controllerclass, pathlimit)) return nbi._configure(conffilename = conffilename) def _configure_finished(self): pass def __call__(self, ctx, path): ''' Entry point to this bus object ''' SiteBusVar = ctx.req.Vars.SITEBUS SiteBusVar['RootDir'] = self.RootDir SiteBusVar['Location'] = self try: #First iterate thru IntroBusItems for ibm in self.IntroBusItems: ibm(ctx, path) #Check again as IntroBusItems can change path array if len(path) == 0: if self.Index is not None: path.append(self.Index) else: raise striga.core.exception.StrigaBusError("NotFound") #Then find item in CrossroadBusItems & DefaultBusItem bm = self.CrossroadBusItems.get(path[0], None) if bm is None: bm = self.DefaultBusItem if bm is None: L.warning("Site bus path not found: {0}".format(path)) raise striga.core.exception.StrigaBusError('NotFound') else: ctx.req.Vars.SITEBUS['LastPath'] = path.pop(0) bm(ctx, path) except striga.core.exception._StrigaClientRedirectBase: # Redirections are not solved thru ErrorBus raise except: ctx.err.exctype, ctx.err.excvalue = sys.exc_info()[:2] prev_excvalue = copy.copy(ctx.err.excvalue) if self.ErrorBus is not None: if isinstance(ctx.err.excvalue, striga.core.exception.StrigaBusError): epath = ctx.err.excvalue.Name else: L.exception("Generic exception during bus processing (you should use StrigaBusError exceptions)") epath = str(ctx.err.exctype) try: self.ErrorBus(ctx, epath) return False except: L.exception("Exception during error bus processing:") raise prev_excvalue else: raise def BusStart(self): for bm in self.IntroBusItems: if hasattr(bm, 'BusStart'): bm.BusStart() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStart'): bm.BusStart() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStart'): self.DefaultBusItem.BusStart() if self.ErrorBus is not None: self.ErrorBus.BusStart() def BusStop(self): if self.ErrorBus is not None: self.ErrorBus.BusStop() if (self.DefaultBusItem is not None) and hasattr(self.DefaultBusItem, 'BusStop'): self.DefaultBusItem.BusStop() for bm in self.CrossroadBusItems.itervalues(): if hasattr(bm, 'BusStop'): bm.BusStop() for bm in self.IntroBusItems: if hasattr(bm, 'BusStop'): bm.BusStop() ### def BusVar(name, default, ctx, path): if len(path) == 0: raise striga.core.exception.StrigaBusError('NotFound') val = path.pop(0) if val == '': val = default ctx.req.Vars.SITEBUS[name] = val
2.3125
2
tests/test_vasprun.py
akiraakaishi/vasputils
0
12791583
<reponame>akiraakaishi/vasputils import unittest from vasputils.vasprun import from_string class VectorTest(unittest.TestCase): def test_parse(self): v = from_string('<v>0.0 1.0 1.2</v>') self.assertTrue(v.value, [0.0, 1.0, 1.2])
2.6875
3
modules/dbnd/src/dbnd/api/shared_schemas/alerts_def_schema.py
dmytrostriletskyi/dbnd
0
12791584
from dbnd._core.tracking.schemas.base import ApiStrictSchema from dbnd._vendor.marshmallow import fields, pre_load class MLAlert(ApiStrictSchema): sensitivity = fields.Float() look_back = fields.Integer() class AlertDefsSchema(ApiStrictSchema): severity = fields.Str(required=True) type = fields.Str(required=True) user_metric = fields.Str() operator = fields.Str() is_str_value = fields.Bool() created_at = fields.DateTime() scheduled_job_name = fields.Str(attribute="scheduled_job.name") source_instance_name = fields.Method("get_tracking_source_name") env = fields.Method("get_tracking_source_env") # TODO_CORE: API: Deprecate airflow_server_info airflow_instance_name = fields.Method("get_tracking_source_name") project_id = fields.Int(attribute="job.project_id") project_name = fields.Str(attribute="job.project.name") alert_on_historical_runs = fields.Bool() alert_group_uid = fields.Str(allow_none=True) uid = fields.Str(allow_none=True) value = fields.Str(allow_none=True) job_id = fields.Int(allow_none=True) summary = fields.Str(allow_none=True) job_name = fields.Str(attribute="job.name", allow_none=True) task_repr = fields.Str(allow_none=True) task_name = fields.Str(allow_none=True) custom_name = fields.Str(allow_none=True) original_uid = fields.Str(allow_none=True) advanced_json = fields.Str(allow_none=True) scheduled_job_uid = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) # Fields for DatasetSlaAlert/DatasetSlaAdvancedAlert alert # -------------------------------------- seconds_delta = fields.Int(allow_none=True) # Converts to datetime.timedelta dataset_partial_name = fields.Str(allow_none=True) datasets_uids = fields.List(fields.Str(), allow_none=True) # Fields for OperationColumnStatAdvancedAlert alert # -------------------------------------- dataset_uid = fields.Str(allow_none=True) # Operation type (e.g. "read", "write", None=any) to filter stats by operation_type = fields.Str(allow_none=True) # Type of MetricRule, found in dbnd_web. Used to build advanced_json metrics_rules = fields.List(fields.Dict(), allow_none=True) # Used only used by the UI affected_datasets = fields.List(fields.Dict(), allow_none=True, dump_only=True) is_system = fields.Function( lambda alert_def: alert_def.owner == "system", dump_only=True, ) def get_tracking_source_name(self, obj): return self._get_tracking_source_instance(obj).name def get_tracking_source_env(self, obj): return self._get_tracking_source_instance(obj).env def _get_tracking_source_instance(self, obj): if obj.job: return obj.job.tracking_source return obj.tracking_source @pre_load def prepere(self, data: dict, **kwargs): value = data.get("value", None) if value is not None: data["value"] = str(data["value"]) return data class GroupAlertDefsSchema(ApiStrictSchema): type = fields.Str(required=True) tracking_source_uid = fields.UUID(required=True) severity = fields.Str(required=True) user_metric = fields.Str(required=True) value = fields.Str(allow_none=True) operator = fields.Str(allow_none=True) ml_alert = fields.Nested(MLAlert, allow_none=True) owner = fields.Str(allow_none=True) jobs = fields.List(fields.Int(), allow_none=True) custom_name = fields.Str(allow_none=True) custom_description = fields.Str(allow_none=True) @pre_load def prepere(self, data: dict, **kwargs): value = data.get("value", None) if value is not None: data["value"] = str(data["value"]) return data
1.859375
2
fuel_neutron/extensions/fuel.py
rmoe/fuel-neutron
0
12791585
<reponame>rmoe/fuel-neutron from neutron.api.extensions import ExtensionDescriptor from neutron.api import extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import base from neutron.common import exceptions as nexception from neutron import manager from neutron.quota import resource_registry class NicNotFound(nexception.NotFound): message = _("Nic %(nic_id)s could not be found.") NICS = 'nics' RESOURCE_ATTRIBUTE_MAP = { NICS: { 'id': { 'allow_post': False, 'allow_put': False, 'validate': {'type:uuid': None}, 'is_visible': True, 'primary_key': True }, 'tenant_id': { 'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': attr.TENANT_ID_MAX_LEN}, 'is_visible': True }, 'name': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': attr.NAME_MAX_LEN } }, 'mac': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:macaddress': None } }, 'if_type': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'interface_properties': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'current_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } }, 'max_speed': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:non_negative': None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'bus_info': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'pxe': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:boolean': None } }, 'offloading_modes': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } }, 'driver': { 'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': '', 'validate': { 'type:string': None } } } } class Fuel(ExtensionDescriptor): @classmethod def get_name(self): return 'Fuel Integration' @classmethod def get_alias(self): return 'fuel' @classmethod def get_description(self): return 'Integrates Neutron with Fuel' @classmethod def get_updated(self): return '2016-03-15T09:00:00-08:00' @classmethod def get_resources(cls): """Returns Ext Resources.""" my_plurals = [(key, key[:-1]) for key in RESOURCE_ATTRIBUTE_MAP.keys()] attr.PLURALS.update(dict(my_plurals)) exts = [] plugin = manager.NeutronManager.get_service_plugins()['FUEL'] for resource_name in ['nic']: collection_name = resource_name.replace('_', '-') + "s" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict()) resource_registry.register_resource_by_name(resource_name) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=True, allow_pagination=True, allow_sorting=True) ex = extensions.ResourceExtension(collection_name, controller, path_prefix='fuel', attr_map=params) exts.append(ex) return exts def update_attributes_map(self, attributes): super(Fuel, self).update_attributes_map( attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
1.664063
2
src/__init__.py
owenvoke/covid-19-tracker
0
12791586
<gh_stars>0 from .utils import UpdateHandler up = UpdateHandler()
1.179688
1
main_investment_checker.py
ginkgodango/lgs
1
12791587
import datetime as dt import numpy as np import pandas as pd # START USER INPUT lgs_filepath = 'U:/CIO/#Data/output/investment/checker/lgs_table.csv' jpm_filepath = 'U:/CIO/#Data/input/jpm/report/investment/LGSS Preliminary Performance 202005.xlsx' lgs_dictionary_filepath = 'U:/CIO/#Data/input/lgs/dictionary/2020/06/New Dictionary_v10.xlsx' FYTD = 11 report_date = dt.datetime(2020, 5, 31) # End USER INPUT # Reads LGS table df_lgs = pd.read_csv(lgs_filepath) # Reads LGS dictionary df_lgs_dict = pd.read_excel( pd.ExcelFile(lgs_dictionary_filepath), sheet_name='Sheet1', header=0 ) # Reads JPM Performance Report df_jpm = pd.DataFrame() sheet_to_columns_dict = { 'Page 3 NOF': 'A:N', 'Page 5 NOF': 'B:O', 'Page 6 NOF': 'B:O', 'Page 7 NOF': 'B:O', 'Page 8': 'D:O' } for sheet, columns in sheet_to_columns_dict.items(): print('Accessing:', sheet) df_sheet = pd.read_excel( pd.ExcelFile(jpm_filepath), sheet_name=sheet, usecols=columns, skiprows=[0, 1, 2] ) df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 1': 'JPM ReportName', 'Unnamed: 2': 'JPM ReportName', } ) if sheet == 'Page 8': df_sheet = df_sheet.rename( columns={ 'Unnamed: 0': 'ModelCode', 'Unnamed: 4': 'JPM ReportName', } ) df_jpm = pd.concat([df_jpm, df_sheet], sort=False) df_jpm = df_jpm.reset_index(drop=True) df_jpm = df_jpm.replace('-', np.nan) df_jpm = df_jpm.drop(columns=['ModelCode'], axis=1) df_jpm['Market Value'] = (df_jpm['Market Value']/1000000).round(2) # Reads footers and removes them df_footers = pd.read_excel('U:/CIO/#Investment_Report/Data/input/testing/20191031 Footers.xlsx') remove_items = list(df_footers['Footers']) + [np.nan, 'Excess return'] df_jpm = df_jpm[~df_jpm['JPM ReportName'].isin(remove_items)].reset_index(drop=True) df_lgs_jpm = pd.merge( left=df_lgs, right=df_jpm, on=['JPM ReportName'], how='outer' ) df_later = df_lgs_jpm[df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) df_lgs_jpm = df_lgs_jpm[~df_lgs_jpm['Manager'].isin([np.nan])].reset_index(drop=True) # Creates LGS to JPM column dictionary lgscolumn_to_jpmcolumn_dict = { 'Market Value_x': 'Market Value_y', '1_Return': '1 Month', '3_Return': '3 Months', 'FYTD_Return': 'FYTD', '12_Return': '1 Year', '36_Return': '3 Years', '60_Return': '5 Years', '84_Return': '7 Years' } # Performs the deviant check df_deviations = pd.DataFrame() deviants = [] columns = [] deviations = [] jpm_missing = [] lgs_missing = [] total_count = 0 deviant_count = 0 for lgscolumn, jpmcolumn in lgscolumn_to_jpmcolumn_dict.items(): for i in range(0, len(df_lgs_jpm)): deviation = df_lgs_jpm[lgscolumn][i] - df_lgs_jpm[jpmcolumn][i] if deviation >= 0.01: deviants.append(df_lgs_jpm['Manager'][i]) columns.append(jpmcolumn) deviations.append(deviation) deviant_count += 1 if (not pd.isna(df_lgs_jpm[jpmcolumn][i])) and (pd.isna(df_lgs_jpm[lgscolumn][i])): lgs_missing.append((df_lgs_jpm['Manager'][i], lgscolumn)) if (pd.isna(df_lgs_jpm[jpmcolumn][i])) and (not pd.isna(df_lgs_jpm[lgscolumn][i])): jpm_missing.append((df_lgs_jpm['JPM ReportName'][i], jpmcolumn)) total_count += 1 # Fixes the column names columns_fix = [] for column in columns: if column == 'Market Value_y': columns_fix.append('Market Value') else: columns_fix.append(column) df_deviations['Manager'] = deviants df_deviations['Column'] = columns_fix df_deviations['Deviations'] = deviations df_lgs_missing = pd.DataFrame(lgs_missing, columns=['Manager', 'Column']) df_jpm_missing = pd.DataFrame(jpm_missing, columns=['Manager', 'Column']) # Calculates accuracy accuracy = round(((total_count - deviant_count)/total_count)*100, 2) # Prints accuracy results print('\nMissing during check from LGS', lgs_missing) print('\nMissing during check from JPM', jpm_missing) print('\nThe deviants are:\n') print(df_deviations, '\n') print('Total Count: ', total_count, 'Deviant Count: ', deviant_count, 'Accuracy: ', accuracy, '%') # Checks for managers that have been completely missed. # Creates set containing fund managers that are currently open accounts. df_lgs_open = df_lgs_dict[df_lgs_dict['LGS Open'].isin([1])].reset_index(drop=True) df_lgs_open = df_lgs_open.rename(columns={'LGS Name': 'Manager'}) lgs_open_set = set(list(df_lgs_open['Manager'])) # Creates set containing strategies. df_lgs_strategy = df_lgs_dict[df_lgs_dict['LGS Strategy Aggregate'].isin([1])].reset_index(drop=True) df_lgs_strategy = df_lgs_strategy.rename(columns={'LGS Name': 'Manager'}) lgs_strategy_set = set(list(df_lgs_strategy['Manager'])) # Creates set containing liquidity accounts. df_lgs_liquidity = df_lgs_dict[df_lgs_dict['LGS Liquidity'].isin([1])].reset_index(drop=True) df_lgs_liquidity = df_lgs_liquidity.rename(columns={'LGS Name': 'Manager'}) lgs_liquidity_set = set(list(df_lgs_liquidity['Manager'])) # Creates set containing fund managers that have been checked. lgs_check_set = set(list(df_lgs_jpm['Manager'])) # Creates set containing fund managers that are open accounts but are not checked. df_lgs_missing_completely = lgs_open_set - lgs_check_set - lgs_strategy_set - lgs_liquidity_set - {np.nan} # Prints open accounts that are missing from LGS. print('\nMissing completely from LGS', df_lgs_missing_completely) # Import JPM_IAP, Accounts; By ID; Include Closed Accounts; Select All; Mode: Portfolio Only # jpm_iap_filepath = 'U:/CIO/#Investment_Report/Data/input/testing/jpm_iap/' # jpm_iap_filenames = sorted(os.listdir(jpm_iap_filepath)) # df_jpm_iap = pd.DataFrame() # for filename in jpm_iap_filenames: # jpm_iap_xlsx = pd.ExcelFile(jpm_iap_filepath + filename) # df_jpm_iap_temp = pd.read_excel( # jpm_iap_xlsx, # sheet_name='Sheet1', # skiprows=[0, 1], # header=0 # ) # df_jpm_iap_temp['Date'] = dt.datetime(int(filename[:4]), int(filename[4:6]), int(filename[6:8])) # df_jpm_iap = pd.concat([df_jpm_iap, df_jpm_iap_temp], sort=False) # # df_jpm_iap = df_jpm_iap.rename(columns={'Account Id': 'Manager'}).reset_index(drop=True) # df_jpm_iap = df_jpm_iap[['Manager', 'Date', 'Market Value']] # # # Merges the market values from JPM IAP with JPM HTS # df_jpm_main = pd\ # .merge( # left=df_jpm_iap, # right=df_jpm, # left_on=['Manager', 'Date'], # right_on=['Manager', 'Date'], # how='right' # )\ # .sort_values(['Manager', 'Date'])\ # .reset_index(drop=True)
3.015625
3
S1B_imzML_error_fix.py
luketrichardson/RKMD-MS-Imaging-Annotation-and-Filtering
1
12791588
<reponame>luketrichardson/RKMD-MS-Imaging-Annotation-and-Filtering from pyimzml.ImzMLParser import ImzMLParser from pyimzml.ImzMLWriter import ImzMLWriter import numpy as np import os from tqdm import tqdm ## SCRIPT 1B: Run this script if SCRIPT 1A encounters a ValueError. def imzML_fix(data_path, imzml_file, polarity_str='positive'): imzml_path = os.path.join(data_path, imzml_file) print('-- Parsing imzML file --\n') p = ImzMLParser(imzml_path) print('-- Done --\n') with ImzMLWriter(os.path.join(data_path, f'{imzml_file[:-6]}_fixed.imzML'), polarity=polarity_str) as writer: for idx, coords in enumerate(tqdm(p.coordinates, desc='Loading MS data')): try: mzs, intensities = p.getspectrum(idx) writer.addSpectrum(mzs, intensities, coords) except ValueError: writer.addSpectrum(np.zeros(0), np.zeros(0)) if __name__ == '__main__': data_path = 'C:\\Path\\To\\IMS\\Data' # Provide path to data directory with imzML and ibd files imzml_file = 'image_data_file.imzML' # Provide imzML data file name imzML_fix(data_path, imzml_file)
2.3125
2
newsApp/docManager.py
adityabansal/newsAroundMe
9
12791589
import os import json import calendar import time from boto.s3.connection import S3Connection from boto.s3.key import Key from .cachingHelper import getCache from .constants import LINKTAG_PUBTIME, FEEDTAG_DO_NOT_CLUSTER from .dbhelper import parseConnectionString, getS3Connection from .doc import Doc def _getEpochSecs(t): return calendar.timegm(time.strptime(t[:19], "%Y-%m-%dT%H:%M:%S")) class DocManager: """ Manage documents stored in cloud. Contains functions for CRUD operations on documents """ def __init__(self): """ Instantiates a new instance of DocManager class 'bucketConnString' : connection string of s3 bucket in which docs are stored. """ self.bucketConnString = os.environ['DOCSBUCKET_CONNECTIONSTRING'] self.cache = getCache() self.__cacheExpiry= 900 def __getBucket(self): bucketConnParams = parseConnectionString(self.bucketConnString) conn = getS3Connection(self.bucketConnString) return conn.get_bucket(bucketConnParams['bucketName'], validate=False) def __isDocNew(self, key, timeLimit): if _getEpochSecs(key.last_modified) < timeLimit: return False doc = self.get(key.name) return (doc.tags[LINKTAG_PUBTIME] > timeLimit) and \ (FEEDTAG_DO_NOT_CLUSTER not in doc.tags) def put(self, doc): k = Key(self.__getBucket()) k.key = doc.key # not storing tags directly in blob's metadata as the maximum size # allowed there is only 2kb. tags = dict(doc.tags) tags['content'] = doc.content keyContents = json.dumps(tags) k.set_contents_from_string(keyContents) self.cache.set(k.key, keyContents, self.__cacheExpiry) def get(self, docKey): keyContents = self.cache.get(docKey) if not keyContents: k = Key(self.__getBucket()) k.key = docKey keyContents = k.get_contents_as_string() self.cache.set(docKey, keyContents, self.__cacheExpiry) storedTags = json.loads(keyContents) content = storedTags.pop('content', None) tags = storedTags return Doc(docKey, content, tags) def delete(self, docKey): k = Key(self.__getBucket()) k.key = docKey k.delete() self.cache.delete(docKey)
2.015625
2
proxy_check.py
Vaibhav/ProxyTools
1
12791590
import urllib2, socket socket.setdefaulttimeout(180) # read the list of proxy IPs in proxyList def is_bad_proxy(pip): try: proxy_handler = urllib2.ProxyHandler({'http': pip}) opener = urllib2.build_opener(proxy_handler) opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib2.install_opener(opener) req=urllib2.Request('http://www.google.com') # change the url address here sock=urllib2.urlopen(req) except urllib2.HTTPError, e: print 'Error code: ', e.code return e.code except Exception, detail: print "ERROR:", detail return 1 return 0 filename = "proxylist-2016-11-01-01-32-21.txt" f = open(filename) proxyList = []; for line in f: line = line.rstrip('\n') proxyList.append(line) print proxyList x = open("new.txt", 'w') count = 0 for item in proxyList: if is_bad_proxy(item): print "Bad Proxy", item count = count + 1; else: x.write(item); x.write('\n'); print "SO MANY BAD PROXIES " + str(count) x.close() f.close()
3.3125
3
genemethods/assemblypipeline/skesa.py
OLC-LOC-Bioinformatics/genemethods
1
12791591
<filename>genemethods/assemblypipeline/skesa.py<gh_stars>1-10 #!/usr/bin/env python3 from olctools.accessoryFunctions.accessoryFunctions import make_path, run_subprocess, write_to_logfile from genewrappers.biotools import bbtools from subprocess import CalledProcessError from click import progressbar import logging import shutil import os __author__ = 'adamkoziol' class Skesa(object): def main(self): self.skesa_assemble() self.best_assemblyfile() def skesa_assemble(self): """ Run skesa to assemble genomes """ with progressbar(self.metadata) as bar: for sample in bar: # Initialise the assembly command sample.commands.assemble = str() try: if sample.general.trimmedcorrectedfastqfiles: # If the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline try: status = sample.run.Description except AttributeError: status = 'unknown' if status == 'metagenome': self.merge(sample) else: # Set the output directory sample.general.assembly_output = os.path.join(sample.general.outputdirectory, 'assembly_output') make_path(sample.general.assembly_output) sample.general.assemblyfile = os.path.join(sample.general.assembly_output, '{name}_unfiltered.fasta' .format(name=sample.name)) sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output, '{name}.fasta' .format(name=sample.name)) fastqfiles = sample.general.trimmedcorrectedfastqfiles # Set the the forward fastq files sample.general.assemblyfastq = fastqfiles forward = fastqfiles[0] gz = True if '.gz' in forward else False # If there are two fastq files if len(fastqfiles) == 2: # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7 sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \ '--use_paired_ends --vector_percent 1 ' \ '--contigs_out {contigs}'\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # Same as above, but use single read settings for the assembler else: sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \ '--vector_percent 1 --contigs_out {contigs}'\ .format(fastqfiles=','.join(fastqfiles), threads=self.cpus, contigs=sample.general.assemblyfile) # If there are no fastq files, populate the metadata appropriately else: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.bestassemblyfile = 'NA' except AttributeError: sample.general.assembly_output = 'NA' sample.general.assemblyfastq = 'NA' sample.general.trimmedcorrectedfastqfiles = 'NA' sample.general.bestassemblyfile = 'NA' if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile): # Run the assembly out, err = run_subprocess(sample.commands.assemble) write_to_logfile(sample.commands.assemble, sample.commands.assemble, self.logfile, sample.general.logout, sample.general.logerr, None, None) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) def merge(self, sample): """ Use bbmerge to merge paired FASTQ files for use in metagenomics pipelines. Create a report with the total number of reads, and the number of reads that could be paired :param sample: metadata sample object flagged as a metagenome """ # Set the assembly file to 'NA' as assembly is not desirable for metagenomes sample.general.assemblyfile = 'NA' # Can only merge paired-end if len(sample.general.fastqfiles) == 2: outpath = os.path.join(sample.general.outputdirectory, 'merged_reads') make_path(outpath) # Merge path - keep all the merged FASTQ files in one directory merge_path = os.path.join(self.path, 'merged_reads') make_path(merge_path) # Set the name of the merged, and unmerged files sample.general.mergedreads = \ os.path.join(merge_path, '{}_paired.fastq.gz'.format(sample.name)) log = os.path.join(outpath, 'log') error = os.path.join(outpath, 'err') try: if not os.path.isfile(sample.general.mergedreads): # Run the merging command out, err, cmd = bbtools.bbmerge(forward_in=sorted(sample.general.trimmedcorrectedfastqfiles)[0], merged_reads=sample.general.mergedreads, mix=True, returncmd=True, threads=self.cpus) write_to_logfile(out, err, self.logfile, sample.general.logout, sample.general.logerr, None, None) with open(log, 'w') as log_file: log_file.write(out) with open(error, 'w') as error_file: error_file.write(err) except (CalledProcessError, IndexError): delattr(sample.general, 'mergedreads') # Set the name of the report to store the metagenome file merging results report = os.path.join(self.reportpath, 'merged_metagenomes.csv') # Extract the total number of reads, and the number of reads that could be paired from the bbmerge # err stream num_reads, num_pairs = self.reads(error) # If the report doesn't exist, create it with the header and the results from the first sample if not os.path.isfile(report): with open(report, 'w') as report_file: report_file.write('Sample,TotalReads,PairedReads\n{sample},{total},{paired}\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) # If the report exists, open it to determine which samples have already been added - useful if re-running # the analysis else: lines = list() with open(report, 'r') as report_file: for line in report_file: lines.append(line.split(',')[0]) # Add the results to the report if sample.name not in lines: with open(report, 'a+') as report_file: report_file.write('{sample},{total},{paired}\n' .format(sample=sample.name, total=num_reads, paired=num_pairs)) @staticmethod def reads(err_log): """ Parse the outputs from bbmerge to extract the total number of reads, as well as the number of reads that could be paired :param err_log: bbmerge outputs the stats in the error file :return: num_reads, the total number of reads, paired_reads, number of paired readds """ # Initialise variables num_reads = 0 paired_reads = 0 # Open the log file with open(err_log, 'r') as error_log: # Extract the necessary information for line in error_log: if 'Pairs:' in line: num_reads = line.split('\t')[-1].rstrip() elif 'Joined:' in line: paired_reads = line.split('\t')[-2].rstrip() return num_reads, paired_reads def best_assemblyfile(self): """ Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA' """ for sample in self.metadata: try: # Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA' def __init__(self, inputobject): self.metadata = inputobject.runmetadata.samples self.start = inputobject.starttime self.cpus = inputobject.cpus self.path = inputobject.path self.logfile = inputobject.logfile self.reportpath = inputobject.reportpath make_path(os.path.join(self.path, 'BestAssemblies')) make_path(os.path.join(self.path, 'raw_assemblies')) make_path(self.reportpath) logging.info('Assembling sequences')
2.109375
2
Simulating Orbits with Runge Kutta.py
alexodowd/physicsprojects
0
12791592
<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Mar 4 10:37:05 2020 @author: Alex """ import numpy as np import matplotlib.pyplot as plt plt.style.use('dark_background') from prettytable import PrettyTable G = 6.674e-11 Me = 5.972e24 #mass of the Earth (kg) rE = 6.371e6 #radius of earth (m) Mm = 7.348e22 #mass of the Moon (kg) Mr = 5e3 #mass of the rocket (kg) rM = 1.737e6 #radius of the moon (m) d = 3.844e8 # Distance between Earth and the Moon (m) #dx/dt def F1(vx): return vx #dy/dt def F2(vy): return vy #dvx/dt def F3(x,y): return -1 * (Me * x * G) / (((x**2)+(y**2))**(3/2)) #dvy/dt def F4(x,y): return -1 * (Me * y * G) / (((x**2)+(y**2))**(3/2)) #dvx/dt including influence of the Moon def F5(x,y): return (-1*(Mm * (x-d) * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * x * G) / (((x**2)+(y**2))**(3/2))) #dvy/dt including influence of the Moon def F6(x,y): return (-1*(Mm * y * G) / ((((x-d)**2)+(y**2))**(3/2))) + (-1 * (Me * y * G) / (((x**2)+(y**2))**(3/2))) def Exercise_4(): t0 = 0 t1 = 1.85e4 print(""" Welcome to Alex's Rocket Orbit Simulator! Please select which body you wish your rocket to orbit (enter 1 or 2): \n 1. Earth \n 2. Moon""") choice = input() if choice == "1": # Starting Coordinates (x,y) print(""" Please enter initial coordinates for your rocket (units are in m): (I reccommend starting with x = 7x10^6 and y = 0)""") x0 = input("x = ") y0 = input("y = ") # Starting Velocities (x,y) print(""" Please enter initial velocities along the x and y axis (units are in m/s) (If the previously reccommended altitude is set then start with x = 0, y = 7500)""") vx0 = input("Vx = ") vy0 = input("Vy = ") print(''' Calculating orbit... ''') h = 1 N=int(t1-t0/h) x, y, vx, vy, t, Ek, Ep, Et = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0 t[0] = 0 r = np.hypot(x,y) # x and y coordinates compounded in a single position variable 'r' v = np.hypot(vx,vy) # Velocities in the x and y directions compounded in single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Me*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the sum of these two # Part A RK4 Implementation for i in range (0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F3(x[i],y[i]) k1vy = F4(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 * k1vx) k2y = F2(vy[i] + h * 0.5 * k1vy) k2vx = F3(x[i] + h * k1x * 0.5, y[i] + h * k1y * 0.5) k2vy = F4(x[i] + h * k1x * 0.5, y[i] + h * k1y * 0.5) #3 k3x = F1(vx[i] + h * 0.5 * k2vx) k3y = F2(vy[i] + h * 0.5 * k2vy) k3vx = F3(x[i] + h * k2x * 0.5, y[i] + h * k2y * 0.5) k3vy = F4(x[i] + h * k2x * 0.5, y[i] + h * k2y * 0.5) #4 k4x = F1(vx[i] + h * k3vx) k4y = F2(vy[i] + h * k3vy) k4vx = F3(x[i] + h * k3x, y[i] + h * k3y) k4vy = F4(x[i] + h * k3x, y[i] + h * k3y) x[i+1] = x[i] + (h/6) * (k1x + 2 * k2x + 2 * k3x + k4x) y[i+1] = y[i] + (h/6) * (k1y + 2 * k2y + 2 * k3y + k4y) vx[i+1] = vx[i] + (h/6) * (k1vx + 2 * k2vx + 2 * k3vx +k4vx) vy[i+1] = vy[i] + (h/6) * (k1vy + 2 * k2vy + 2 * k3vy + k4vy) t[i+1] = t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy of the rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] if min(r) <= rE : print(""" "Your rocket has crashed!""") # Crash Test else: print(""" -------------------------- "Successful Flight! --------------------------""") print(""" DATA""") apsides = PrettyTable(['Min. Altitude (m)','Periapsis (m)','Apoapsis (m)']) apsides.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(r)),'{:.3}'.format(max(r))]) print(apsides) #Plotting fig1,ax1 = plt.subplots() ax1.set_title('Rocket orbit of Earth') ax1.set_xlabel('x position (m)') ax1.set_ylabel('y position (m)') ax1.plot(x,y, color = 'red') ax1.axis('equal') earth = plt.Circle([0,0], rE, color = 'teal') ax1.add_artist(earth) ax1.set_xlim([-2*rE,+2*rE]) ax1.set_ylim([-2*rE,+2*rE]) fig2,ax2 = plt.subplots() ax2.set_title('Energy analysis of a rocket orbiting Earth') ax2.set_xlabel('Time (s)') ax2.set_ylabel('Energy (J)') ax2.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax2.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax2.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax2.legend() plt.show() Exercise_4() if choice == "2": h = 50 N=int(t1-t0/h) x, y, vx, vy, Ek, Ep, Et, t = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) print(""" Please enter initial coordinates for your rocket:""") x0 = input("x = ") y0 = input("y = ") # Starting Velocities (x,y) print(""" Please enter initial velocities along the x and y axis""") vx0 = input("Vx = ") vy0 = input("Vy = ") print(''' Calculating orbit... ''') x[0] = x0 y[0] = y0 vx[0] = vx0 vy[0] = vy0 r = np.hypot(x,y) # x and y coordinates compounded in a single position variable 'r' v = np.hypot(vx,vy) # Velocities in the x and y directions compounded in single variable 'v' r[0] = np.hypot(x[0],y[0]) v[0] = np.hypot(vx[0],vy[0]) #Energy Ek[0] = Mr*(v[0]**2)/2 #Kinetic energy (1/2MV**2) Ep[0] = -Mm*Mr*G/r[0] #Gravitational potential energy (-GMm/r) Et[0] = Ek[0]+Ep[0] #Total energy is the sum of these two # Part B RK4 Implementation for i in range (0, N-1): #1 k1x = F1(vx[i]) k1y = F2(vy[i]) k1vx = F5(x[i],y[i]) k1vy = F6(x[i],y[i]) #2 k2x = F1(vx[i] + h * 0.5 * k1vx) k2y = F2(vy[i] + h * 0.5 * k1vy) k2vx = F5(x[i] + h * k1x * 0.5, y[i] + h * k1y * 0.5) k2vy = F6(x[i] + h * k1x * 0.5, y[i] + h * k1y * 0.5) #3 k3x = F1(vx[i] + h * 0.5 * k2vx) k3y = F2(vy[i] + h * 0.5 * k2vy) k3vx = F5(x[i] + h * k2x * 0.5, y[i] + h * k2y * 0.5) k3vy = F6(x[i] + h * k2x * 0.5, y[i] + h * k2y * 0.5) #4 k4x = F1(vx[i] + h * k3vx) k4y = F2(vy[i] + h * k3vy) k4vx = F5(x[i] + h * k3x, y[i] + h * k3y) k4vy = F6(x[i] + h * k3x, y[i] + h * k3y) x[i+1] = x[i] + (h/6) * (k1x + 2 * k2x + 2 * k3x + k4x) y[i+1] = y[i] + (h/6) * (k1y + 2 * k2y + 2 * k3y + k4y) vx[i+1] = vx[i] + (h/6) * (k1vx + 2 * k2vx + 2 * k3vx +k4vx) vy[i+1] = vy[i] + (h/6) * (k1vy + 2 * k2vy + 2 * k3vy + k4vy) t[i+1] = t[i] + h r[i+1] = np.hypot(x[i+1],y[i+1]) v[i+1] = np.hypot(vx[i+1],vy[i+1]) #Energy Ek[i+1] = 0.5*Mr*(v[i+1]**2) # Ek, Ep and total energy of the rocket for i+1 iterations Ep[i+1] = (-1*Mr*Me*G)/r[i+1] Et[i+1] = Ek[i+1] + Ep[i+1] param = PrettyTable(['Min. Altitude above Earth (m)','Min. Altitude above Moon (m)']) param.add_row(['{:.3}'.format(min(r)-rE),'{:.3}'.format(min(abs(d-r))),]) print(param) fig3,ax3 = plt.subplots() ax3.set_title('Rocket Orbit of the Moon') ax3.set_xlabel('Horizontal displacement') ax3.set_ylabel('Vertical displacement') ax3.plot(x,y, color = 'red') ax3.set_xlim([-5*rE,+1.2*d]) ax3.set_ylim([-0.5*d,+0.5*d]) earth = plt.Circle([0,0], rE, color = 'teal') moon = plt.Circle([d,0], rM, color = 'white') ax3.add_artist(earth) ax3.add_artist(moon) fig4,ax4 = plt.subplots() ax4.set_title('Energy analysis of Rocket Orbit of the Moon') ax4.set_xlabel('Time (s)') ax4.set_ylabel('Energy (J)') ax4.plot(t,Ek, label = 'Kinetic Energy', color = 'lime') ax4.plot(t,Ep, label = 'Potential Energy', color = 'cyan') ax4.plot(t,Et, label = 'Total Energy', color = '#F700FF') ax4.legend() plt.show() Exercise_4() Exercise_4()
2.75
3
blog/urls.py
admtomas/cybersecurity_blog
0
12791593
from django.urls import path from . import views app_name = 'blog' urlpatterns = [ path('', views.post_list, name='post_list'), path('<slug:post>/', views.post_detail, name='post_detail'), path('comment/reply/', views.reply_page, name='reply'), path('about', views.about_page, name='about'), ]
1.914063
2
plot_stats.py
ZhaomingXie/RLAlg
0
12791594
<reponame>ZhaomingXie/RLAlg import pickle import numpy as np import matplotlib.pyplot as plt import statistics class Stats: def __init__(self, file): with open (file, 'rb') as fp: stats = pickle.load(fp) self.mean = stats[2] self.noisy_mean = stats[4] self.std = stats[3] self.samples = stats[1] self.low = [] self.high = [] for i in range(len(self.mean)): self.low.append(self.mean[i]-self.std[i]) self.high.append(self.mean[i]+self.std[i]) #self.samples.append(i) def plot(self, ax, color='g', variance_color='lightgreen', label=''): ax.plot(self.noisy_mean, color, label=label) #ax.fill_between(self.samples, self.low, self.high, alpha=1, color=variance_color) stats_walker_no_contact_1 = Stats("stats/walker2d_no_contact_seed8_Iter201.stat") stats_walker_contact_1 = Stats("stats/walker2d_contact_seed8_Iter201.stat") stats_walker_contact_2 = Stats("stats/walker2d_contact_seed16_Iter201.stat") fig = plt.figure() ax = fig.add_subplot(111) stats_walker_no_contact_1.plot(ax, color='r', label='Walker_Non_Partition', variance_color='salmon') stats_walker_contact_1.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') stats_walker_contact_2.plot(ax, color='g', label='Walker_Partition', variance_color='lightgreen') plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0)) ax.set_xlim([0,201]) plt.legend(loc='upper left') plt.show()
2.828125
3
api-intent/app.py
ClimenteA/Python-Chalice-AWSLambda-APIGateway
1
12791595
<gh_stars>1-10 import requests from chalice import Chalice from chalicelib import ( productSchemaIsValid, cleanProductData, uploadFromMediaUrls, saveProductData, getProductDataByID, deleteProductDataByID ) app = Chalice(app_name='api-intent') # All commented lines are for debugging # app.debug = False # http localhost:8000 @app.route('/') def index(): return {'hello': 'world!'} # http POST localhost:8000/products # http POST localhost:8000/products URL="https://bad-url.nogood" # http POST localhost:8000/products URL="https://raw.githubusercontent.com/ClimenteA/Python-Chalice-AWSLambda-APIGateway/main/retailer-data.json" # image_list = [ # "https://softgata.com/assets/django.png", # "https://softgata.com/assets/fastapi.svg", # "https://softgata.com/assets/svelte.svg" # ] @app.route('/products', methods=['POST']) def saveProduct(): payload = app.current_request.json_body if not payload: return { "message": "URL not found!", "productData": None } if 'URL' in payload: try: product_data = requests.get(payload["URL"]).json() except: return { "message": "Invalid URL!", "productData": None } #product_data = {"bad": "productData"} if not productSchemaIsValid(product_data): return { "message": "Invalid product schema!", "productData": None } product_data = cleanProductData(product_data) product_data["media"]["uploadedImages"] = uploadFromMediaUrls(product_data["media"]["images"]) #product_data["media"]["uploadedImages"] = uploadFromMediaUrls(image_list) try: saveProductData(product_data) except Exception as e: return { "message": str(e), "productData": None } return { "message": "Success!", "productData": product_data } # http localhost:8000/products/42 @app.route('/products/{productId}', methods=['GET']) def getProduct(productId): try: product_data = getProductDataByID(productId) if not product_data: return {"message": "Product not found!"} return { "message": "Success!", "productData": product_data } except:#if not int return {"message": "Missing ID!", "productData": None} # http localhost:8000/products/ @app.route('/products', methods=['GET']) def failProduct(): return {"message": "Missing ID!", "productData": None} # http localhost:8000/products/42/delete @app.route('/products/{productId}/delete', methods=['GET']) def deleteProduct(productId): try: deleteProductDataByID(productId) return {"message": "Product deleted!"} except: return {"message": "Missing ID!"}
2.609375
3
__init__.py
acegiak/midicontrol-skill
0
12791596
from mycroft import MycroftSkill, intent_file_handler class Midicontrol(MycroftSkill): def __init__(self): MycroftSkill.__init__(self) @intent_file_handler('midicontrol.intent') def handle_midicontrol(self, message): self.speak_dialog('midicontrol') def create_skill(): return Midicontrol()
2.203125
2
st_rationale.py
microsoft/RationaleST
0
12791597
<gh_stars>0 """ Author: <NAME> (<EMAIL>) Code for Self-training for Rationale using few-shot learning. This code base is adapted from UST (https://github.com/microsoft/UST) """ from collections import defaultdict from sklearn.utils import shuffle from transformers import * import logging import math import models import numpy as np import os, sys import json import nltk import tensorflow as tf import tensorflow.keras as K import tensorflow.keras.backend as kb import tensorflow_addons as tfa from focal_loss import BinaryFocalLoss, SparseCategoricalFocalLoss import random from sklearn.metrics import f1_score from sklearn.metrics import precision_recall_fscore_support logger = logging.getLogger('STRationale') def create_learning_rate_scheduler(max_learn_rate=5e-5, end_learn_rate=1e-7, warmup_epoch_count=10, total_epoch_count=90): def lr_scheduler(epoch): if epoch < warmup_epoch_count: res = (max_learn_rate/warmup_epoch_count) * (epoch + 1) else: res = max_learn_rate*math.exp(math.log(end_learn_rate/max_learn_rate)*(epoch-warmup_epoch_count+1)/(total_epoch_count-warmup_epoch_count+1)) return float(res) learning_rate_scheduler = tf.keras.callbacks.LearningRateScheduler(lr_scheduler, verbose=1) return learning_rate_scheduler def train_model(max_seq_length, X, y, X_test, y_test, X_unlabeled, model_dir, tokenizer, sup_batch_size=4, unsup_batch_size=32, unsup_size=4096, sample_size=16384, TFModel=TFBertModel, Config=BertConfig, pt_teacher_checkpoint='bert-base-uncased', sample_scheme='easy_bald_class_conf', T=30, alpha=0.1, valid_split=0.5, sup_epochs=70, unsup_epochs=25, N_base=10, dense_dropout=0.5, attention_probs_dropout_prob=0.3, hidden_dropout_prob=0.3, test_data=None, unlabeled_data=None, class_weight=None, type_="token", X_dev=None, y_dev=None, task=None): #labels = [0, 1] #fix hardcoding labels = set(y[:,0]) logger.info ("Class labels {}".format(labels)) #split X and y to train and dev with valid_split if valid_split > 0: train_size = int((1. - valid_split)*len(X["input_ids"])) if '_neg' in type_: X_train, y_train = {"input_ids": X["input_ids"][:train_size], "token_type_ids": X["token_type_ids"][:train_size], "attention_mask": X["attention_mask"][:train_size], "input_ids_r":X["input_ids_r"][:train_size], "token_type_ids_r":X["token_type_ids_r"][:train_size], "attention_mask_r":X["attention_mask_r"][:train_size], "input_ids_neg":X["input_ids_neg"][:train_size], "token_type_ids_neg":X["token_type_ids_neg"][:train_size], "attention_mask_neg":X["attention_mask_neg"][:train_size]}, y[:train_size] X_dev, y_dev = {"input_ids": X["input_ids"][train_size:], "token_type_ids": X["token_type_ids"][train_size:], "attention_mask": X["attention_mask"][train_size:], "input_ids_r":X["input_ids_r"][train_size:], "token_type_ids_r":X["token_type_ids_r"][train_size:], "attention_mask_r":X["attention_mask_r"][train_size:], "input_ids_neg":X["input_ids_neg"][train_size:], "token_type_ids_neg":X["token_type_ids_neg"][train_size:], "attention_mask_neg":X["attention_mask_neg"][train_size:]}, y[train_size:] elif 'joint' in type_: X_train, y_train = {"input_ids": X["input_ids"][:train_size], "token_type_ids": X["token_type_ids"][:train_size], "attention_mask": X["attention_mask"][:train_size], "input_ids_r":X["input_ids_r"][:train_size], "token_type_ids_r":X["token_type_ids_r"][:train_size], "attention_mask_r":X["attention_mask_r"][:train_size]}, y[:train_size] X_dev, y_dev = {"input_ids": X["input_ids"][train_size:], "token_type_ids": X["token_type_ids"][train_size:], "attention_mask": X["attention_mask"][train_size:], "input_ids_r":X["input_ids_r"][train_size:], "token_type_ids_r":X["token_type_ids_r"][train_size:], "attention_mask_r":X["attention_mask_r"][train_size:]}, y[train_size:] else: X_train, y_train = {"input_ids": X["input_ids"][:train_size], "token_type_ids": X["token_type_ids"][:train_size], "attention_mask": X["attention_mask"][:train_size]}, y[:train_size] X_dev, y_dev = {"input_ids": X["input_ids"][train_size:], "token_type_ids": X["token_type_ids"][train_size:], "attention_mask": X["attention_mask"][train_size:]}, y[train_size:] else: X_train, y_train = X, y X_dev, y_dev = X_dev, y_dev logger.info("X Train Shape: {} {}".format(X_train["input_ids"].shape, y_train.shape)) logger.info("X Dev Shape: {} {}".format(X_dev["input_ids"].shape, y_dev.shape)) logger.info("X Test Shape: {} {}".format(X_test["input_ids"].shape, y_test.shape)) logger.info ("X Unlabeled Shape: {}".format(X_unlabeled["input_ids"].shape)) strategy = tf.distribute.MirroredStrategy() gpus = strategy.num_replicas_in_sync logger.info('Number of devices: {}'.format(gpus)) #run the base model n times with different initialization to select best base model based on validation loss best_base_model = None best_validation_loss = np.inf for counter in range(N_base): #original N_base=10 with strategy.scope(): if 'mtl' in type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_mtl(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="dense_3_classification_acc")])#, tf.keras.metrics.SparseCategoricalAccuracy(name="token_acc")]) #, sample_weight_mode="temporal") elif type_ == 'joint': rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model = models.construct_teacher_joint(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'l2_distance': None}) elif 'joint_neg' in type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) if 'focal' in type_: cce = SparseCategoricalFocalLoss(gamma=2, reduction=tf.keras.losses.Reduction.NONE) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.01*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) model = models.construct_teacher_joint_neg(TFModel, Config, pt_teacher_checkpoint, max_seq_length, len(labels), dense_dropout=dense_dropout, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) loss_weights = [1.0, 1.0, 1.0, 1.0] if '_noexp' in type_: loss_weights = [1.0, 0.0, 0.0, 0.0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier':rat_loss, 'rationale_task_classifier': None, 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'not_rationale_task_classifier': None}, loss_weights=loss_weights) if counter == 0: logger.info(model.summary()) model_file = os.path.join(model_dir, "model_label.h5") model_file_task = os.path.join(model_dir, "model_task.h5") model_file_best = os.path.join(model_dir, "model_best.h5") if os.path.exists(model_file): model.load_weights(model_file) #model_task.load_weights(model_file_task) best_base_model = model logger.info ("Model file loaded from {}".format(model_file)) break elif 'mtl' in type_ : logger.info(y_train.shape) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]]) elif '_neg' in type_ : y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=sup_batch_size*1, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]) #, class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]) elif 'joint' in type_: _placeholder_labels = np.empty((y_train.shape[0], y_train.shape[0])) model.fit(x=X_train, y=[y_train[:,0], y_train, y_train[:,0], np.ones(len(y_train))], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=sup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='loss', patience=5, restore_best_weights=True)]) # class_weight=class_weight) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]) logger.info ("Validation loss for run {} : {}".format(counter, val_loss)) if val_loss[0] < best_validation_loss: best_base_model = model best_validation_loss = val_loss[0] model = best_base_model ''' if 'mtl' in type_: logger.info ("Best validation acc for base model {}: {}".format(best_validation_loss, model.evaluate(X_dev, [y_dev[:,0],y_dev[:,1:]]))) ''' if not os.path.exists(model_file): model.save_weights(model_file) logger.info ("Model file saved to {}".format(model_file)) best_val_acc = 0. best_test_acc = 0. max_test_acc = 0. max_task_acc = 0. max_best_acc = 0. val_loss = 0. if 'mtl' in type_: logger.info("y_test: {}".format(y_test)) test_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[4] task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[3] val_loss = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[0] elif '_neg' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.full((len(y_test), len(labels)), 1/len(labels))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] elif 'joint' in type_: out = model.evaluate(X_test, [y_test[:,0], y_test, y_test[:,0], np.ones(len(y_test))]) task_acc, test_acc, r_acc = out[3], out[4], out[5] logger.info ("Test token acc for run {} : {}".format(counter, test_acc)) logger.info ("Best Test task acc for run {} with total loss : {}".format(counter, task_acc)) if 'mtl' in type_: class_acc = model.predict(X_test)[0] test_pred = model.predict(X_test)[1] class_acc = np.argmax(class_acc, axis=-1) elif 'joint' in type_: out = model.predict(X_test) class_acc, test_pred, r_acc = out[0], out[1], out[2] class_acc = np.argmax(class_acc, axis=-1) logger.info("Class predictions shape {}".format(class_acc.shape)) logger.info("Teacher model best score (macro/task): {}".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='macro'))) logger.info("Teacher model best score (micro/task): {}".format(precision_recall_fscore_support(class_acc, y_test[:,0], average='micro'))) logger.info("Token Predictions shape {}".format(test_pred.shape)) pred, truth = [], [] logger.info(test_pred) test_pred = np.argmax(tf.nn.softmax(test_pred, axis=-1), axis=-1) logger.info("Printing prediction data on teacher model for run {}: {}".format(counter, test_pred)) tp, fn, fp = 0, 0, 0 pred_1, pred_0, truth_1, truth_0 = 0, 0, 0, 0 for i in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test["input_ids"][i])[1:] for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: #to skip evaluation of the task label temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p: if word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info("Token-level: {}".format((tp)/(tp+(0.5*(fp+fn))))) logger.info("Rationale coverage (recall): {}".format(r)) logger.info("Token Precision: {}".format(p)) logger.info("Token overlap: {}".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0, 0.0, 0.0, 0.0 for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info("BLEU-1 score of rationales on test set (teacher model): {} ".format(score1/len(pred))) logger.info("BLEU-2 score of rationales on test set (teacher model): {} ".format(score2/len(pred))) logger.info("BLEU-3 score of rationales on test set (teacher model): {} ".format(score3/len(pred))) logger.info("BLEU-4 score of rationales on test set (teacher model): {} ".format(score4/len(pred))) best_loss = np.inf data = [] for i in range(len(X_test["input_ids"])): text = tokenizer.convert_ids_to_tokens(X_test["input_ids"][i]) temp = dict() temp['text'] = ' '.join(text) temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_teacher_'+type_+'.json'), 'w') as f: json.dump(data, f) model_student = None # model_task for epoch in range(unsup_epochs): logger.info ("Starting loop {}".format(epoch)) if type_ == 'mtl': test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-1] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[0] if task_acc > max_task_acc: logger.info ("Val acc (task) {}".format(task_acc)) max_task_acc = task_acc model.save_weights(model_file_best) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:]], verbose=0)[-2] test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0)[-2] elif 'joint_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) y_dev_plg = [y_dev[:,1:], y_dev[:,0], np.full((len(y_dev),len(labels)), 1/len(labels))] y_test_plg = [y_test[:,1:], y_test[:,0], np.full((len(y_test),len(labels)), 1/len(labels))] test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[-3] out1 = model.predict(X_test) acc1, y_pred1, r_acc1 = out1[0], out1[1], out1[2] y_pred1 = np.argmax(y_pred1, axis=-1) acc1 = np.argmax(acc1, axis=-1) r_acc1 = np.argmax(r_acc1, axis=-1) logger.info("Model performance for token (macro/task): {}".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='micro'))) logger.info("Model performance for token (macro/task): {}".format(precision_recall_fscore_support(y_pred1, y_test[:,1:], average='macro'))) logger.info("Model performance for task (macro/task): {}".format(precision_recall_fscore_support(acc1, y_test[:,0], average='macro'))) val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], y_neg_dev], verbose=0)[0] if task_acc > max_task_acc: logger.info ("Val acc (task) {}".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = task_acc #model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], y_neg_dev], verbose=0)[-3] if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0)[-3] elif type_ == 'joint': # or 'joint_neg' in type_: test_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-2] task_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] val_loss = model.evaluate(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.ones(len(y_dev))], verbose=0)[0] if task_acc > max_task_acc: logger.info ("Val acc (task) {}".format(task_acc)) max_task_acc = task_acc best_val_acc = task_acc model.save_weights(model_file_best) #_student = deepcopy(model) val_acc = model.evaluate(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))], verbose=0)[-3] ''' if val_loss < best_loss: best_loss = val_loss model.save_weights(model_file_best) #_student = deepcopy(model) ''' if test_acc > max_test_acc: max_test_acc = test_acc test_task_acc = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0)[-3] if '_neg' in type_: y_neg_dev = np.full((len(y_dev), len(labels)), 1/len(labels)) y_neg_test = np.full((len(y_test), len(labels)), 1/len(labels)) temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], y_neg_test], verbose=0) elif 'joint' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:], y_test[:,0], np.ones(len(y_test))], verbose=0) elif 'mtl' in type_: temp = model.evaluate(X_test, [y_test[:,0], y_test[:,1:]], verbose=0) logger.info("Print acc (task) for joint {}".format(temp)) logger.info ("Val acc (token) {}".format(test_acc)) logger.info ("Val acc (task) {}".format(task_acc)) logger.info ("Test acc (task) {}".format(test_task_acc)) if test_task_acc >= max_best_acc: max_best_acc = test_task_acc model_file = os.path.join(model_dir, "model_token_{}_{}.h5".format(epoch, sample_scheme)) model_file_task = os.path.join(model_dir, "model_task_{}_{}.h5".format(epoch, sample_scheme)) if os.path.exists(model_file): model.load_weights(model_file) logger.info ("Model file loaded from {}".format(model_file)) continue if 'mtl' in type_ : acc, y_pred = model.predict(X_unlabeled, batch_size=256) #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: out = model.predict(X_unlabeled, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] #y_val = np.amax(acc, axis=-1) #y_rat = np.amax(y_pred, axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #compute confidence on the unlabeled set if sample_size < len(X_unlabeled["input_ids"]): logger.info ("Evaluating confidence on {} number of instances sampled from {} unlabeled instances".format(sample_size, len(X_unlabeled["input_ids"]))) indices = np.random.choice(len(X_unlabeled["input_ids"]), sample_size, replace=False) if '_neg' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled["input_ids"][indices], 'token_type_ids': X_unlabeled["token_type_ids"][indices], 'attention_mask': X_unlabeled["attention_mask"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices], 'input_ids_neg':X_unlabeled['input_ids_neg'][indices], 'token_type_ids_neg':X_unlabeled['token_type_ids_neg'][indices], 'attention_mask_neg':X_unlabeled['attention_mask_neg'][indices]}, y_pred[indices] elif 'joint' in type_: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled["input_ids"][indices], 'token_type_ids': X_unlabeled["token_type_ids"][indices], 'attention_mask': X_unlabeled["attention_mask"][indices], 'input_ids_r':X_unlabeled['input_ids_r'][indices], 'token_type_ids_r':X_unlabeled['token_type_ids_r'][indices], 'attention_mask_r':X_unlabeled['attention_mask_r'][indices]}, y_pred[indices] else: X_unlabeled_sample, y_pred = {'input_ids': X_unlabeled["input_ids"][indices], 'token_type_ids': X_unlabeled["token_type_ids"][indices], 'attention_mask': X_unlabeled["attention_mask"][indices]}, y_pred[indices] else: logger.info ("Evaluating confidence on {} number of instances".format(len(X_unlabeled["input_ids"]))) X_unlabeled_sample = X_unlabeled #X_unlabeled_sample = {'input_ids': X_unlabeled["input_ids"][indices], 'token_type_ids': X_unlabeled["token_type_ids"][indices], 'attention_mask': X_unlabeled["attention_mask"][indices]} #logger.info (X_unlabeled_sample["input_ids"][:5]) if 'joint' in type_: ids = [] attention_mask_r = np.ones((len(y_pred), max_seq_length)) attention_mask_r[:,1:] = np.array(y_pred) #logger.info(y_pred.shape) #logger.info("Percentage of rationales selected: {}".format(np.mean(np.sum(attention_mask_r, axis=-1)))) attention_mask_r[:,0] = 1 negation_mask = np.where(attention_mask_r==0, 1, 0) negation_mask[:,0] = 1 X_sample = {"input_ids": np.array(X_unlabeled_sample["input_ids"]), "token_type_ids": np.array(X_unlabeled_sample['token_type_ids']), "attention_mask": attention_mask_r} #mask tokens that are not rationales u-r if '_neg' in type_: X_negation_sample = {"input_ids": np.array(X_unlabeled_sample["input_ids"]), "token_type_ids": np.array(X_unlabeled_sample['token_type_ids']), "attention_mask": negation_mask} for i in range(len(y_pred)): X_sample["input_ids"][i, 1:] = np.where(y_pred[i]==0, 103, X_sample["input_ids"][i, 1:]) if '_neg' in type_: X_negation_sample["input_ids"][i, 1:] = np.where(y_pred[i]==0, X_negation_sample["input_ids"][i, 1:], 103) X_negation_sample["input_ids"][:,0] = 101 X_sample["input_ids"][:,0] = 101 logger.info("Extracted rationale from teacher model as input for task: {}".format(X_sample["input_ids"][:5])) logger.info("Extracted rationale from teacher model as input for task: {}".format(X_negation_sample["input_ids"][:5])) y_mean, y_var, y_T = None, None, None if 'mtl' in type_: acc, y_pred = model.predict(X_unlabeled_sample, batch_size=256) y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) elif 'joint' in type_: if 'pruthi_' in type_: out = y_train acc, y_pred, r_acc = y_train[:,0], y_train[:,1:], y_train[:,0] y_val = acc y_rat = np.array(y_pred).astype('float') #y_rat = y_rat[:,1:] #y_pred = y_pred[:,1:] else: out = model.predict(X_unlabeled_sample, batch_size=64) acc, y_pred, r_acc = out[0], out[1], out[2] y_val = np.amax(tf.math.softmax(acc, axis=-1).numpy(), axis=-1) y_rat = np.amax(tf.math.softmax(y_pred, axis=-1).numpy(), axis=-1) y_pred = np.argmax(y_pred, axis=-1) #.flatten() acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) #y_rat = y_rat[:, 1:] #y_pred = y_pred[:,1:] # sample from unlabeled set if 'uni' in sample_scheme: logger.info ("Sampling uniformly") if unsup_size < len(X_unlabeled_sample['input_ids']): '''X_unlabeled_sample, y_pred = {"input_ids": X_unlabeled_sample['input_ids'][indices], "token_type_ids": X_unlabeled_sample['token_type_ids'][indices], "attention_mask": X_unlabeled_sample['attention_mask'][indices]}, y_pred[indices] if type_ == 'decoupled' or ('joint' in type_): X_sample = {"input_ids": X_sample['input_ids'][indices], "token_type_ids": X_sample['token_type_ids'][indices], "attention_mask": X_sample['attention_mask'][indices]} ''' #acc = acc[:,None] #y_batch = np.concatenate((acc[indices], y_pred), axis=1) acc = acc[:,None] y_batch = np.concatenate((acc, y_pred), axis=1) logging.info("y_batch shape {}".format(y_batch.shape)) indices = [] for i in labels: indx = np.where(y_batch[:,0]==i)[0] GLOBAL_SEED = int(os.getenv("PYTHONHASHSEED")) random.Random(GLOBAL_SEED).shuffle(indx) if len(indx) > unsup_size: indx = indx[:unsup_size] logger.info("Shape of predicted labels for class {} : {}".format(i, len(indx))) indices.extend(indx) indices = np.asarray(indices) #indices = np.random.choice(len(X_unlabeled_sample['input_ids']), unsup_size, replace=False) X_batch, y_batch = {"input_ids": X_unlabeled_sample['input_ids'][indices], "token_type_ids": X_unlabeled_sample['token_type_ids'][indices], "attention_mask": X_unlabeled_sample['attention_mask'][indices]}, y_batch[indices] if 'joint' in type_: X_rationale_batch = {"input_ids_r": X_sample['input_ids'][indices], "token_type_ids_r": X_sample['token_type_ids'][indices], "attention_mask_r": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {"input_ids_neg": X_negation_sample['input_ids'][indices], "token_type_ids_neg": X_negation_sample['token_type_ids'][indices], "attention_mask_neg": X_negation_sample['attention_mask'][indices]} else: indices = np.array([i for i in range(len(y_pred))]) acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices]), axis=1) X_batch = {"input_ids": X_unlabeled_sample['input_ids'][indices], "token_type_ids": X_unlabeled_sample['token_type_ids'][indices], "attention_mask": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch = {"input_ids_r": X_sample['input_ids'][indices], "token_type_ids_r": X_sample['token_type_ids'][indices], "attention_mask_r": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {"input_ids_neg": X_negation_sample['input_ids'][indices], "token_type_ids_neg": X_negation_sample['token_type_ids'][indices], "attention_mask_neg": X_negation_sample['attention_mask'][indices]} ''' probs = y_val[indices] X_conf = np.ones((len(y_batch), max_seq_length)) X_conf[:,0] = np.log(probs+1e-10)*alpha ''' else: logger.info("No sampling at the moment; choose all the unlabeled examples") X_batch = {"input_ids": X_unlabeled_sample['input_ids'][indices], "token_type_ids": X_unlabeled_sample['token_type_ids'][indices], "attention_mask": X_unlabeled_sample['attention_mask'][indices]} if 'joint' in type_: X_rationale_batch = {"input_ids_r": X_sample['input_ids'][indices], "token_type_ids_r": X_sample['token_type_ids'][indices], "attention_mask_r": X_sample['attention_mask'][indices]} if '_neg' in type_: X_neg_rationale_batch = {"input_ids_neg": X_negation_sample['input_ids'][indices], "token_type_ids_neg": X_negation_sample['token_type_ids'][indices], "attention_mask_neg": X_negation_sample['attention_mask'][indices]} elif 'joint' in type_: acc = acc[:,None] y_batch = np.concatenate((acc[indices], y_pred[indices][:, 1:]), axis=1) logger.info("y_batch shape: {}".format(y_batch.shape)) #X_batch, y_batch, X_conf = f_(tokenizer, X_unlabeled_sample, y_mean, y_var, acc, unsup_size, len(labels), y_T=y_T, type_=type_) probs = y_val[indices] probs_rat = y_rat[indices] cls = list(acc[indices]) logger.info(cls) X_conf = np.ones((len(y_batch), max_seq_length)) log_probs = (probs+1e-10) #+(1-y_batch[:,0])*np.log(1-probs+1e-10)) log_rationale = (probs_rat+1e-10) if 'rwt' in type_: #re-weight labels X_conf[:,0] = np.where(log_probs>0, log_probs, 0.00000001) if 'norm' in type_: X_conf[:,0] = tf.nn.softmax(X_conf[:,0], axis=0) if '_r_' in type_: #re-weight rationales X_conf[:,1:] = np.where(log_rationale>0, log_rationale, 0.000000001) if 'norm' in type_: X_conf[:,1:] = tf.nn.softmax(X_conf[:,1:], axis=0) #X_conf = np.ones((len(X_batch['input_ids']), max_seq_length)) for i in range(len(cls)): X_conf[i,0] = class_weight[cls[i][0]]*X_conf[i,0] #logger.info ("Weights {}".format(X_conf[:10])) logger.info("X_connf shape: {}".format(X_conf.shape)) if 'mtl' in type_: #model = model_student logger.info(y_batch.shape) model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='dense_3_classification_acc', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:]]) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = None if '_noexp' in type_: loss_weights = [1.0, 0.0] else: loss_weights = [0.5, 0.5] with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), rat_loss], metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="dense_3_classification_acc")])#, tf.keras.metrics.SparseCategoricalAccuracy(name="token_acc")]) #, sample_weight_mode="temporal") model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:]], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:]]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) #, sample_weight=[X_conf[:,0], X_conf[:,1:]]) elif type_ == 'joint': logger.info(type_) def custom_loss(y_true, y_pred): logger.info(y_pred) return kb.mean(y_true*y_pred, axis=-1) with strategy.scope(): model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'l2_distance': custom_loss}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'l2_distance':None}) #model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_task_classifier': None, 'l2_distance': custom_loss}, metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="acc"), tf.keras.metrics.SparseCategoricalAccuracy(name="acc"), tf.keras.metrics.SparseCategoricalAccuracy(name="acc"), tf.keras.metrics.Mean(name='mean')]) #X_batch.update(X_rationale_batch) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch, y_batch[:, 0], np.ones(len(y_batch))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev, y_dev[:,0], np.ones(len(y_dev))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) elif 'joint_neg' in type_: logger.info("Training for without rationales") with strategy.scope(): def custom_loss(y_true, y_pred): cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.NONE) tf.print(tf.size(y_true), tf.size(y_pred)) cce_loss = ((cce(y_true, y_pred))* 1/(unsup_batch_size*gpus)) l1_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred),axis=0)) coh_loss = tf.reduce_mean(tf.reduce_sum(tf.math.abs(y_pred[1:]-y_pred[:-1]), axis=0)) #l2_loss = 0.0 #logger.info(l1_loss) return cce_loss + 0.1*l1_loss + 0.01*coh_loss def custom_loss_neg(y_true, y_pred): cce = tf.keras.losses.CategoricalCrossentropy(from_logits=False, reduction=tf.keras.losses.Reduction.NONE) return tf.reduce_sum(cce(y_true, y_pred))*(1/(unsup_batch_size*gpus)) rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0] if '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': custom_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': custom_loss_neg}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) X_batch['input_ids_r'], X_batch['token_type_ids_r'], X_batch['attention_mask_r'] = X_rationale_batch['input_ids_r'], X_rationale_batch['token_type_ids_r'], X_rationale_batch['attention_mask_r'] X_batch['input_ids_neg'], X_batch['token_type_ids_neg'], X_batch['attention_mask_neg'] = X_neg_rationale_batch['input_ids_neg'], X_neg_rationale_batch['token_type_ids_neg'], X_neg_rationale_batch['attention_mask_neg'] model.fit(x=X_batch, y=[y_batch[:,0], y_batch[:,1:], y_batch[:, 0], np.full((len(y_batch),len(labels)), 1/len(labels))], shuffle=True, epochs=unsup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)], sample_weight=[X_conf[:,0], X_conf[:,1:], X_conf[:,0], np.ones((len(y_batch)))]) # class_weight=class_weight) if 'fine_tune_teacher' in type_: rat_loss = None if 'focal' in type_: rat_loss = SparseCategoricalFocalLoss(gamma=2) else: rat_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) with strategy.scope(): loss_weights = [1.0, 1.0, 1.0, 1.0] ''' if '_noexp' in type_: loss_weights = [1.0, 0, 0, 0] elif '_no_suffcomp' in type_: loss_weights = [1.0, 1.0, 0, 0] ''' model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08), loss={'task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'rationale_classifier': rat_loss, 'rationale_task_classifier': tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'not_rationale_task_classifier': None}, metrics={'task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'rationale_task_classifier':[tf.keras.metrics.SparseCategoricalAccuracy(name="acc")], 'not_rationale_task_classifier':None}, loss_weights=loss_weights) y_neg = np.full((len(y_train),len(labels)), 1/len(labels)) model.fit(x=X_train, y=[y_train[:,0], y_train[:,1:], y_train[:,0], y_neg], shuffle=True, epochs=sup_epochs, validation_data=(X_dev, [y_dev[:,0], y_dev[:,1:], y_dev[:,0], np.full((len(y_dev), len(labels)), 1/len(labels))]), batch_size=unsup_batch_size*gpus, callbacks=[tf.keras.callbacks.EarlyStopping(monitor='val_task_classifier_acc', patience=5, restore_best_weights=True)]) # class_weight=class_weight) tf.keras.backend.clear_session() if not os.path.exists(model_file): model.save_weights(model_file) logger.info ("Model file saved to {}".format(model_file)) model_student = model model_student.load_weights(model_file_best) if 'mtl' in type_: acc, y_pred = model_student.predict(X_test) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) #logger.info("Micro score (task): {}".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) elif 'joint' in type_: out = model_student.predict(X_test) acc, y_pred, r_acc = out[0], out[1], out[2] logger.info("Raw logits: {}".format(acc)) y_pred = np.argmax(y_pred, axis=-1) acc = np.argmax(acc, axis=-1) r_acc = np.argmax(r_acc, axis=-1) logger.info("Best task acc score: {}".format(precision_recall_fscore_support(acc, y_test[:,0], average='micro'))) logger.info("Best token acc score: {}".format(precision_recall_fscore_support(y_pred, y_test[:,1:], average='macro'))) pred, truth = [], [] #sys.exit(1) test_pred = y_pred #np.argmax(y_pred, axis=-1) logger.info("Printing prediction data on student model for run {}: {}".format(counter, test_pred)) tp, fn, fp = 0, 0, 0 pred_1, pred_0, truth_1, truth_0 = 0, 0, 0, 0 for i in range(len(test_pred)): temp_p, temp_t, ct = [],[], 0 temp = tokenizer.convert_ids_to_tokens(X_test["input_ids"][i])[1:] #logger.info("Test sample {}".format(temp)) for j in range(0,len(test_pred[0])-1): if test_pred[i][j] == 1: temp_p.append(temp[j]) if y_test[i][j+1] == 1: temp_t.append(temp[j]) pred_1 += test_pred[i].sum() pred_0+= max_seq_length-pred_1 truth_1 += y_test[i].sum() truth_0+= max_seq_length-truth_1 pred.append(' '.join(temp_p)) truth.append(' '.join(temp_t)) for word in temp_p: if word in temp_t: ct+=1 temp_t.remove(word) else: fp+=1 tp +=ct fn += (y_test[i].sum()-ct) p = tp/(tp+fp+0.0000001) r = tp/(tp+fn+0.0000001) logger.info("Token-level: {}".format((tp)/(tp+(0.5*(fp+fn))))) logger.info("Rationale coverage (recall): {}".format(r)) logger.info("Token Precision: {}".format(p)) logger.info("Token overlap: {}".format(tp/(tp+fp+fn))) score1, score2, score3, score4 = 0.0, 0.0, 0.0, 0.0 for i in range(len(pred)): score1 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(1, 0, 0, 0)) score2 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 1, 0, 0)) score3 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 1, 0)) score4 += nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split(), weights=(0, 0, 0, 1)) logger.info("BLEU-1 score of rationales on test set (student model): {} ".format(score1/len(pred))) logger.info("BLEU-2 score of rationales on test set (student model): {} ".format(score2/len(pred))) logger.info("BLEU-3 score of rationales on test set (student model): {} ".format(score3/len(pred))) logger.info("BLEU-4 score of rationales on test set (student model): {} ".format(score4/len(pred))) data = [] for i in range(len(X_test["input_ids"])): text = tokenizer.decode(X_test["input_ids"][i]) temp = dict() temp['text'] = text temp['truth'] = truth[i] temp['pred'] = pred[i] temp['score'] = nltk.translate.bleu_score.sentence_bleu([truth[i].split()],pred[i].split()) data.append(temp) with open(os.path.join(model_dir, 'rationale_output_test_'+type_+'.json'), 'w') as f: json.dump(data, f) logger.info ("Best accuracy (task) across all self-training iterations {}".format(max_best_acc))
2.3125
2
py/test_pat.py
frasertweedale/drill
1
12791598
import unittest from . import pat class PatTestCase(unittest.TestCase): def test_pat(self): self.assertTrue(pat.match('a*', '')) self.assertFalse(pat.match('.', '')) self.assertTrue(pat.match('ab*', 'a')) self.assertTrue(pat.match('a.', 'ab')) self.assertTrue(pat.match('a', 'a'))
3.140625
3
noah/VersionInfo.py
dasong2410/dataloader
0
12791599
#! /usr/bin/env python #_*_encoding:utf-8_*_ class VersionInfo: def __init__(self, program, version, date, author): self.program = program self.version = version self.date = date self.author = author #打印版本信息 def info(self): print """ Name: \033[33;2m%s\033[0m Version: \033[33;2m%s\033[0m Date: \033[33;2m%s\033[0m Author: \033[33;2m%s\033[0m """ %(self.program, self.version, self.date, self.author) #打印帮助信息 def usage(self): print """ Usage: %s -v 打印版本信息 -h 打印帮助信息 """%(self.program)
2.796875
3
soda/distributed_environment/entity.py
mpuk/SODA
0
12791600
<gh_stars>0 from zmq import Context, DONTWAIT, Poller, POLLIN, DEALER from threading import Thread from pickle import dumps, loads from logging import getLogger from soda.helpers import support_arguments from soda.distributed_environment.behavior import ActionNode, IfNode from subprocess import run, PIPE from shlex import split from copy import deepcopy _logger = getLogger(__name__) class Entity(Thread): def __init__(_self, _id, _ip, _in_port, _state, _term_states, _states_behaviors, _neighbours): Thread.__init__(_self) _self._id = _id _self._ip = _ip _self._in_port = _in_port _self._state = _state _self._term_states = _term_states _self._states_behaviors = _states_behaviors _self._neighbours = _neighbours _self._impulse = False _self._read_lock = False _self._count_sent_messages = 0 _context = Context() _self._in_socket = _context.socket(DEALER) _self._in_socket.bind("tcp://*:%s" % _self._in_port) _poller = Poller() _poller.register(_self._in_socket, POLLIN) _self.i_ID = int(_id) _self.i_NEIGHBOURS = [_n for _n in _neighbours] _self.__dict__['deepcopy'] = deepcopy _self.__dict__['LEN'] = len def read(): # V nekonečnom cykle sledujeme, či na soket prišla správa. while True: _socks = dict(_poller.poll()) # Ak prišla správa. if _socks.get(_self._in_socket) == POLLIN: # Správu prečítame a následne extrahujeme obsah správy a odosieľatela. _pickled_received_message = _self._in_socket.recv(flags=DONTWAIT) _received_message, _sender_entity_id = loads(_pickled_received_message) _logger.info("Entity: {0} | Action: RECEIVED | Message : {1} | From entity : {2} ".format(_self._id, _received_message, _sender_entity_id)) # Porovnámme prijatú správu so všetkými vzormi READ konštrukcií pre # aktuálny stav. for _pattern in list(filter(lambda _p: _p != 'IMPULSE', _self._states_behaviors[_self._state])): _result = [] # Porovnáme správu so vzorom. Ak je na rovnakej pozícii vo vzore a # prijatej správe tá istá hodnota a vo vzore nieje na poziícii premenná # uložíme si do premennej _result hodnotu True. Ak sa hodnoty nezhodujú # a vo vzore nie je na pozícii premenná úložíme hodnotu False. Pre pozície # kde je vo vzore premenná si uložíme hodnotu None. if len(_pattern[1]) == len(_received_message): for _i, _j in zip(_pattern[1], _received_message): if _i == _j and type(_i) is not tuple: _result.append(True) elif _i != _j and type(_i) is not tuple: _result.append(False) else: _result.append(None) # Ak v v poli _result nie je hodnota False znamená to, že prijatá správa # sa zhoduje so vzorom. if False not in _result: # Pre pozície kde je vo vzore premenná uložíme hodnotu z príslušnej # pozície v správe do tejto premennej. for _i, _j in zip(_pattern[1], _received_message): if type(_i) is tuple: _identifier, _ = _i if type(_j) is str: _j = "'" + _j + "'" _expression = "%s = %s" % (_identifier, _j) # Využijeme akciu entity pre priradenie. _self._actions["ASSIGN"]((_expression, )) # Uložíme odosieľatela do použitelnej premennej. _self.i_SENDER = _sender_entity_id _logger.info("Entity: {0} | Action: READ | Message : {1} | From entity : {2} ".format(_self._id, _received_message, _sender_entity_id)) # Nakoniec vrátime vzor, ktorý sa zhodoval so správou, ktorú sme # prijali aby sme mohli následne v metóde run() identifikovať # správanie príslušné tomuto vzoru. return _pattern @support_arguments def send(_message, _recipients): # Vykonáme evaluáciu správy a prijímateľov aby sme napríklad v prípade # argumentov, ktoré sú premennými dostali konkrétne hodnity. _message = _self._actions["EVALUATE"](str(_message)) _recipients = _self._actions["EVALUATE"](str(_recipients)) if type(_message) is not tuple: _message = (_message, ) # Ak je prijímateľ iba jeden pretypujeme ho na pole. if type(_recipients) is int: _recipients = [_recipients] * 1 # Pre každého prijímateľa vytvoríme nový soket typu DEALER [18]. Následne # odošleme správu spolu s identifikátorom odosieľatela a zvýšíme počet # odoslaných správ pre entitu o 1. for _n in _recipients: try: _out_socket = _context.socket(DEALER) _out_socket.connect("tcp://localhost:%s" % _self._neighbours[_n]["in_port"]) _message_content = (_message, _self._id) _pickled_message = dumps(_message_content) _out_socket.send(_pickled_message, flags=DONTWAIT) # Zalogovanie úspešného poslania správy. Zaznamenaný je identifikátor # odosielateľa, prijímateľa a samotná správa. _logger.info("Entity: {0} | Action: SEND | Message : {1} | To entity : {2} ".format(_self._id, _message, _n)) _self._count_sent_messages += 1 except KeyError: # Zalogovanie neúspešného odoslania správy. _logger.info("Entity: {0} | Action: SEND | Trying to send message to non existing neighbour! -> {1} ".format(_self._id, _n)) @support_arguments def become(_new_state): _logger.info("Entity: {0} | Action: BECOME | Old state : {1} | New state : {2} ".format(_self._id, _self._state, _new_state)) # Entita zmení svoj stav na nový. _self._state = _new_state # Ak je tento nový stav terminujúci tak ukončíme správanie. if _self._state in _self._term_states: exit() @support_arguments def assign(_expression): # Pre uskutočnenie priradenia do nejakej premennej využívame funkciu exec(), # ktorá je jednou zo vstavaných funkcií jazyka Python. Exec() dokáže vykonať # akýkolvek valídny Python príkaz. Príkaz, ktorý ma exec vykonať je definovaný # reťazcom _expression. Aby mala funkcia exec() prístup ku všetkým lokálnym # premenným entity, ktoré používateľ opísal v algoritme je nutné predať funkcii # exec() prostredníctvom tretieho argumenty atribút objektu __dict__, v ktorom # sú uchované všetky aktuálne referencie premenných a ich hodnôt. try: exec(_expression, {}, _self.__dict__) _logger.info("Entity: {0} | Action: ASSIGN | Expression : {1} ".format(_self._id, _expression)) except NameError as _Name: _logger.info("Entity: {0} | Action: ASSIGN | Undefined identifier! -> {1} -> {2} ".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute: _logger.info("Entity: {0} | Action: ASSIGN | Wrong type of identifier! -> {1} -> {2} ".format(_self._id, _Attribute, _expression)) exit() except TypeError as _Type: _logger.info("Entity: {0} | Action: ASSIGN | Wrong type of identifier! -> {1} -> {2} ".format(_self._id, _Type, _expression)) exit() @support_arguments def log(_expression): print("SODA: " + _self._actions["EVALUATE"](_expression)) def evaluate(_expression): result = None try: result = eval(_expression, {}, _self.__dict__) except NameError as _Name: _logger.info("Entity: {0} | Action: EVALUATE | Undefined identifier! -> {1} -> {2} ".format(_self._id, _Name, _expression)) exit() except AttributeError as _Attribute: _logger.info("Entity: {0} | Action: EVALUATE | Wrong type of identifier! -> {1} -> {2} ".format(_self._id, _Attribute, _expression)) exit() except ValueError as _Value: _logger.info("Entity: {0} | Action: EVALUATE | Wrong value! -> {1} -> {2} ".format(_self._id, _Value,_expression)) exit() return result @support_arguments def execute(_command, _output_type, _output, _input): _command = split(_command) _input = _self._actions["EVALUATE"](str(_input)) _process_output= None _completed_process = run(_command, input=str(_input), stdout=PIPE, universal_newlines=True, shell=True) # cast to correct output type if _output_type == 'string': _process_output = "'" + _completed_process.stdout + "'" elif _output_type == 'int': try: _process_output = int(_completed_process.stdout) except ValueError as _Value: _logger.info( "Entity: {0} | Action: EXEC | Wrong value for output cast to int! -> {1} -> {2} ".format(_self._id, _Value, _completed_process.stdout)) exit() elif _output_type == 'float': try: _process_output = float(_completed_process.stdout) except ValueError as _Value: _logger.info( "Entity: {0} | Action: EXEC | Wrong value for output cast to float! -> {1} -> {2} ".format(_self._id, _Value, _completed_process.stdout)) exit() _expression = "%s = %s" % (_output, _process_output) _self._actions["ASSIGN"]((_expression,)) @support_arguments def add(_array, _value): _expression = "%s.append(%s)" % (_array, _value) _self._actions["EVALUATE"](str(_expression)) @support_arguments def remove(_array, _value): _expression = "%s.remove(%s)" % (_array, _value) _self._actions["EVALUATE"](str(_expression)) @support_arguments def pop(_array, _output): _expression = "%s = %s.pop()" % (_output, _array) _self._actions["ASSIGN"]((_expression,)) _self._actions = { "READ": read, "SEND": send, "BECOME": become, "ASSIGN": assign, "LOG": log, "EVALUATE": evaluate, "EXEC": execute, "ADD": add, "REMOVE": remove, "POP": pop } def run(_self): # Entita vykonáva správanie pokiaľ sa nedostane do terminujúceho stavu. while _self._state not in _self._term_states: _current_state = _self._state # Entita sa spustí impulzom alebo začne čítať prijaté správy. if _self._impulse: _self._impulse = False _behavior = 'IMPULSE' _logger.info("Entity: {0} | Action: Started by IMPULSE ".format(_self._id)) else: _self._read_lock = True _behavior = _self._actions["READ"]() _self._read_lock = False # Nastavíme _n na prvý uzol správania príslušného pre aktuálny stav. _n = _self._states_behaviors[_current_state][_behavior].head _next_node = None # Iterujeme cez správanie. while _n is not None: # Vykonáme logiku uzlu. Logika uzlov je opísaná # v podkapitole 4.2.3 Správanie. if type(_n) is ActionNode: _next_node = _n.execute(_self) elif type(_n) is IfNode: _next_node = _n.execute(_self) if _next_node == "BECOME": break _n = _next_node
1.835938
2
HW9/YuliiaKutsyk/task_9_3_adam_and_eve.py
kolyasalubov/Lv-677.PythonCore
0
12791601
<filename>HW9/YuliiaKutsyk/task_9_3_adam_and_eve.py def God(): return[Man(), Woman()] class Human: pass class Man(Human): pass class Woman(Human): pass
2.15625
2
ospi_addon.py
noisymime/OSPi
1
12791602
<gh_stars>1-10 #!/usr/bin/python import ospi #### Add any new page urls here #### ospi.urls.extend(['/c1', 'ospi_addon.custom_page_1']) # example: (['/c1', 'ospi_addon.custom_page_1', '/c2', 'ospi_addon.custom_page_2', '/c3', 'ospi_addon.custom_page_3']) #### add new functions and classes here #### ### Example custom class ### class custom_page_1: """Add description here""" def GET(self): custpg = '<!DOCTYPE html>\n' #Insert Custom Code here. custpg += '<body>Hello form an ospi_addon program!</body>' return custpg
2.265625
2
panflute/utils.py
robert-shade/panflute
0
12791603
<gh_stars>0 """ Auxiliary functions that have no dependencies """ # --------------------------- # Imports # --------------------------- from collections import OrderedDict import sys import os.path as p from importlib import import_module # --------------------------- # Functions # --------------------------- def check_type(value, oktypes): # This allows 'Space' instead of 'Space()' if callable(value): value = value() if not isinstance(value, oktypes): tag = type(value).__name__ msg = 'received {} but expected {}'.format(tag, oktypes) raise TypeError(msg) else: return value def check_group(value, group): if value not in group: tag = type(value).__name__ msg = 'element {} not in group {}'.format(tag, repr(group)) raise TypeError(msg) else: return value def encode_dict(tag, content): return OrderedDict((("t", tag), ("c", content))) # --------------------------- # Classes # --------------------------- class ContextImport: """ Import module context manager. Temporarily prepends extra dir to sys.path and imports the module, Example: >>> # /path/dir/fi.py >>> with ContextImport('/path/dir/fi.py') as module: >>> # prepends '/path/dir' to sys.path >>> # module = import_module('fi') >>> module.main() >>> with ContextImport('dir.fi', '/path') as module: >>> # prepends '/path' to sys.path >>> # module = import_module('dir.fi') >>> module.main() """ def __init__(self, module, extra_dir=None): """ :param module: str module spec for import or file path from that only basename without .py is used :param extra_dir: str or None extra dir to prepend to sys.path if module then doesn't change sys.path if None if file then prepends dir if None """ def remove_py(s): return s[:-3] if s.endswith('.py') else s self.module = remove_py(p.basename(module)) if (extra_dir is None) and (module != p.basename(module)): extra_dir = p.dirname(module) self.extra_dir = extra_dir def __enter__(self): if self.extra_dir is not None: sys.path.insert(0, self.extra_dir) return import_module(self.module) def __exit__(self, exc_type, exc_value, traceback): if self.extra_dir is not None: sys.path.pop(0)
2.578125
3
arvan_client/vod/video.py
Sajadrahimi/arvan-client
1
12791604
<filename>arvan_client/vod/video.py from typing import List from arvan_client.arvan.types import DynamicType class FileInfo(DynamicType): class General(DynamicType): duration: int format: str bit_rate: int size: int class Video(DynamicType): codec: str width: int height: int frame_rate: str bit_rate: str class Audio(DynamicType): codec: str sample_rate: str bit_rate: str channel_layout: str def __init__(self, **kwargs): self.general = None self.video = None self.audio = None if 'general' in kwargs: self.general = self.General(**kwargs['general']) del kwargs['general'] if 'video' in kwargs: if kwargs['video']: self.video = self.Video(**kwargs['video']) del kwargs['video'] if 'audio' in kwargs: if kwargs['audio']: self.audio = self.Audio(**kwargs['audio']) del kwargs['audio'] super().__init__(**kwargs) def __str__(self): return self.general.format or super().__str__() class ConvertInfo(DynamicType): audio_bitrate: int video_bitrate: int resolution: str def __str__(self): return self.resolution class Video(DynamicType): video_id: str title: str description: str file_info: FileInfo thumbnail_time: int status: str job_status_url: str available: bool convert_mode: str convert_info: List[ConvertInfo] created_at: str updated_at: str completed_at: str parallel_convert: int directory_size: str config_url: str mp4_videos: List[str] hls_playlist: str dash_playlist: str thumbnail_url: str tooltip_url: str video_url: str player_url: str # channel: Channel def __init__(self, **kwargs): if 'convert_info' in kwargs: self.convert_info = [] if 'convert_info' in kwargs: if isinstance(kwargs['convert_info'], list): for convert_info in kwargs['convert_info']: self.convert_info.append(ConvertInfo(**convert_info)) del kwargs['convert_info'] if 'mp4_videos' in kwargs: self.mp4_videos = kwargs['mp4_videos'] del kwargs['mp4_videos'] if 'id' in kwargs: kwargs['video_id'] = kwargs.pop('id') del kwargs['channel'] super().__init__(**kwargs) def __str__(self): return self.title def __repr__(self): return self.__str__()
2.453125
2
napari_svg/__init__.py
Carreau/napari-svg
1
12791605
from .hook_implementations import ( napari_get_writer, napari_write_image, napari_write_labels, napari_write_points, napari_write_shapes, napari_write_vectors, )
0.949219
1
test/main/test_about_us.py
crockmitnic/question-paper-generator
6
12791606
from flaskapp import models from test.main.base_classes import BaseUser from test.main.utils import test_post_request class AboutUsTestCase(BaseUser): def test_about_us(self): response = self.client.get("/about-us") self.assertIn( b"Welcome to SetNow, We're dedicated to giving you the very best of our service.", response.data, ) self.assertIn( b"This website is created by students of DA-IICT (Gandhinagar, Gujrat).", response.data, ) self.assertIn( b"This effort was made under the guidence of Prof. <NAME>.", response.data, ) self.assertIn(b"Our Team", response.data) self.assertIn(b"<NAME> [201701184]", response.data) self.assertIn(b"UI/UX designer", response.data) self.assertIn(b"<NAME> [201701191]", response.data) self.assertIn(b"UI/UX designer", response.data) self.assertIn(b"<NAME> [201701203]", response.data) self.assertIn(b"Quality assurance engineer", response.data) self.assertIn(b"Team Back-end", response.data)
2.40625
2
Examples/two_ultrassonic_thread_HC_SR04 copy.py
BosonsHiggs/arduPython
0
12791607
<reponame>BosonsHiggs/arduPython ## See Figures/two_ultrassonics_thread.png from threading import Thread from pyfirmata import Arduino, pyfirmata, util from pyfirmata.util import ping_time_to_distance import time ### Start of pin configuration board = Arduino() # or Arduino(port) define board print("Communication successfully started!") it = util.Iterator(board) it.start() ## Note: Echo and Trigger pins connected to the same ports sonarEcho1 = board.get_pin('d:7:o') ## digital pin 7 OUTPUT sonarEcho2 = board.get_pin('d:8:o') ## digital pin 8 OUTPUT time.sleep(1) ### End set pins class Echo(Thread): def __init__ (self, echoPino, text:str=None): Thread.__init__(self) self.echoPino = echoPino self.text = text def run(self): while True: time = self.echoPino.ping() board.pass_time(0.06) #delay of 60ms -> see datasheet distance = ping_time_to_distance(time) print(f"{self.text}".format(time, distance)) ##Sonar 1: port 7 text_sonar1 = "sonar1: Time: {0}ms, distance: {1}cm" inicioEcho1 = Echo(sonarEcho1, text_sonar1) inicioEcho1.start() ##Sonar 2: port 8 text_sonar2 = "sonar2: Time: {0}ms, distance: {1}cm" inicioEcho2 = Echo(sonarEcho2, text_sonar2) inicioEcho2.start()
3.09375
3
source/nomenclator/dialog/__init__.py
buddly27/nomenclator-nuke
11
12791608
# -*- coding: utf-8 -*- from .comp_manager_dialog import CompoManagerDialog from .outputs_manager_dialog import OutputsManagerDialog from .project_manager_dialog import ProjectManagerDialog from .settings_dialog import SettingsDialog
1.015625
1
common/xrd-ui-tests-python/tests/xroad_cs_view_trusted_anchor/view_trusted_anchor.py
ria-ee/XTM
3
12791609
<gh_stars>1-10 import re from selenium.webdriver.common.by import By from tests.xroad_cs_upload_trusted_anchor.upload_trusted_anchor import get_generated_at from view_models.global_configuration import TRUSTED_ANCHORS_TAB_CSS from view_models.sidebar import GLOBAL_CONFIGURATION_CSS from view_models.trusted_anchor import TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH, INSTANCE_IDENTIFIER, UPLOAD_ANCHOR_BTN_ID, \ DOWNLOAD_BTN_XPATH, DELETE_BTN_XPATH, ANCHOR_HASH, ANCHOR_HASH_REGEX, GENERATED_AT_REGEX def test_view_trusted_anchors(self): def view_trusted_anchors(): self.log('Open global configuration view') self.wait_until_visible(type=By.CSS_SELECTOR, element=GLOBAL_CONFIGURATION_CSS).click() self.log('FED_01 1. Open "Trusted anchors" tab') self.wait_until_visible(type=By.CSS_SELECTOR, element=TRUSTED_ANCHORS_TAB_CSS).click() self.wait_jquery() self.log('FED_01 2. System displays trusted anchors') self.log('FED_02 2. The instance identifier of the X-Road instance the trusted anchor ' 'originates from is visible') self.wait_until_visible(type=By.XPATH, element=TRUSTED_ANCHOR_BY_IDENTIFIER_XPATH.format(INSTANCE_IDENTIFIER)) self.log('FED_02 2. The SHA-224 hash value of the trusted anchor file is visible') hash = filter(lambda x: x.size['height'] > 0, self.by_css(ANCHOR_HASH, multiple=True))[0].text self.is_true(re.match(ANCHOR_HASH_REGEX, hash)) self.log('FED_02 2. The generation date and time (UTC) of the trusted anchor file is visible') generated_at = get_generated_at(self) self.is_true(re.match(GENERATED_AT_REGEX, generated_at)) self.log('FED_02 The following user action options are displayed') self.log('FED_02 "Upload a trusted anchor" button is visible') self.is_not_none(self.by_id(UPLOAD_ANCHOR_BTN_ID)) self.log('FED_02 "Download a trusted anchor" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DOWNLOAD_BTN_XPATH, multiple=True))) > 0) self.log('FED_02 "Delete a trusted anchor" button is visible') self.is_true(len(filter(lambda x: x.size['height'] > 0, self.by_xpath(DELETE_BTN_XPATH, multiple=True))) > 0) return view_trusted_anchors
2.34375
2
tests/test_rebar.py
SurajDadral/pyconcrete
19
12791610
<gh_stars>10-100 import pytest import copy from pyconcrete import rebar @pytest.fixture def r1(): r = rebar.Rebar( length=5, diameter=20, count=1, insert=(0, 0)) return r @pytest.fixture def lr1(): r = rebar.LRebar( length=5, diameter=20, count=2, insert=(10, 20), v_align='top', h_align='left') return r @pytest.fixture def ur1(): r = rebar.URebar( length=200, diameter=16, count=4, insert=(0, 0), v_align='bot') return r def test_length(r1): assert r1.length == 5 def test_diameter(r1): assert r1.diameter == 20 def test_count(r1): assert r1.count == 1 def test_insert(r1): assert r1.insert == (0, 0) def test_points(r1): pts = [(0, 0), (5, 0)] assert r1.points() == pts def test_length_L(lr1): assert lr1.length == 5 def test_diameter_l(lr1): assert lr1.diameter == 20 def test_count_l(lr1): assert lr1.count == 2 def test_insert_l(lr1): assert lr1.insert == (10, 20) def test_points_l(lr1): pts = [(10, 14), (10, 20), (15, 20)] assert lr1.points() == pts def test_points_u(ur1): pts = [(0, 6), (0, 0), (200, 0), (200, 6)] assert ur1.points() == pts def test_points_along(r1, lr1, ur1): assert r1.points_along() == [(1.25, 0), (2.5, 0), (3.75, 0)] assert lr1.points_along() == [(11.25, 20), (12.5, 20), (13.75, 20)] assert ur1.points_along() == [(50, 0), (100, 0), (150, 0)] def test_text(r1, lr1, ur1): assert r1.text == '1~20' assert lr1.text == '2~20' assert ur1.text == '4~16' def test_text_len(r1, ur1): assert r1.text_len == 'L=5' assert ur1.text_len == 'L=200' r_scale = copy.deepcopy(r1) r_scale.scale(75, 20) assert r_scale.text_len == 'L=5' def test_xy_level(r1, lr1, ur1): assert r1.x1 == 0 assert r1.x2 == 5 assert r1.y == 0 assert ur1.x1 == 0 assert ur1.x2 == 200 assert ur1.y == 0 assert lr1.x1 == 10 assert lr1.x2 == 15 assert lr1.y == 20 # def test_real_length(r1, lr1, ur1): # assert r1.real_length == 5 # assert lr1.real_length == 200 # assert ur1.real_length == 250
2.171875
2
test/test_mw.py
freiburgermsu/WCMpy
0
12791611
<reponame>freiburgermsu/WCMpy<gh_stars>0 from pandas import DataFrame from shutil import rmtree from math import isclose from glob import glob import chemw import re, os def test_inits(): # import the class modules chem_mw = chemw.ChemMW() phreeq_db = chemw.PHREEQdb() print(os.getcwd()) for TF in [chem_mw.verbose, chem_mw.final, chem_mw.end, phreeq_db.verbose]: assert type(TF) is bool rmtree(phreeq_db.output_path) def test_accuracy(): # calculate the MW for chemicals of known MW test_chemicals = { 'Na2.43_Cl_(OH)2_(OH)1.2_(OH)': 162.7, 'Na2.43Cl(Ca(OH)2)1.2':180.2, 'Na2.43Cl:2H2O': 127.3, 'Na2.43Cl2.5:2H2O': 180.5, 'CaCl2:(MgCl2)2:12H2O': 517.6, 'Na2SO4:3K2SO4': 664.8, 'K2SO4:CaSO4:H2O': 328.4, 'Na.96Al.96Si2.04O6:H2O ': 219.2, 'Ca1.019Na.136K.006Al2.18Si6.82O18:7.33H2O': 714.4 } # calculate the MW for the dictionary of chemicals chem_mw = chemw.ChemMW() for chemical in test_chemicals: chem_mw.mass(chemical) tolerance = chem_mw.mw*0.001 # 99.9% accuracy if not isclose(chem_mw.raw_mw, test_chemicals[chemical], rel_tol = tolerance): assert False else: assert True # affirm that iterated entities are zero for zero in [chem_mw.groups, chem_mw.layer, chem_mw.skip_characters]: assert zero == 0 def test_phreeq_db(): # process the PHREEQ databases phreeq_databases = [db for db in glob('databases/*.dat')] # output_path = os.path.join(os.path.dirname(__file__), 'PHREEQqb') phreeq_db = chemw.PHREEQdb() for db in phreeq_databases: print('\n\n\n', re.search('([A-Za-z0-9_\.]+(?=\.dat))',db).group(), 'database\n', '='*len(db)) phreeq_db.process(db) # verify the output folder and its contents for db in phreeq_databases: json_name = re.search('([A-Za-z0-9_\.]+(?=\.dat))', db).group()+'.json' assert os.path.exists(os.path.join(phreeq_db.output_path, json_name)) assert type(phreeq_db.db_name) is str assert type(phreeq_db.db) is DataFrame # delete the directory rmtree(phreeq_db.output_path)
2.40625
2
Generate_Key.py
ChenhaoJimmyZou/Encryption-For-Any-File
1
12791612
from Crypto.Hash import SHA256 def getKey(passWord): hash = SHA256.new(passWord.encode('utf-8')) return hash.digest()
2.71875
3
xmpath/translate.py
xmake-io/pxmake
1
12791613
from os.path import expanduser from os import sep from re import split from functools import reduce from xmtrace import xmtrace @xmtrace def xm_path_translate(lua, ph): return expanduser(reduce(lambda a, b: a + sep + b, split(r"\\|/", ph)))
1.828125
2
Class-Example/CLASS_REPRACTRICED.py
emehrawn/Python-Codes
0
12791614
<filename>Class-Example/CLASS_REPRACTRICED.py #Write a Python class named Circle constructed by a radius and two methods #which will compute the area and the perimeter of a circle. import math class circle: def area1(self,radius): self.radius = radius area= radius*radius*math.pi print(area) def perimeter2(self,radius): self.radius = radius perimeter = radius*2*math.pi print(perimeter) circle().area1(6) circle().perimeter2(6)
4.3125
4
moni-alert/bin/comm/log.py
jimdn/monitor-toolkits
4
12791615
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import logging import logging.handlers CRITICAL = 1 ERROR = 2 WARNING = 3 INFO = 4 DEBUG = 5 class Logger: def __init__(self, fileName, level=DEBUG): dictLevel = { CRITICAL: logging.CRITICAL, ERROR: logging.ERROR, WARNING: logging.WARNING, INFO: logging.INFO, DEBUG: logging.DEBUG } if level < CRITICAL or level > DEBUG: level = DEBUG logLevel = dictLevel[level] # mkdir abspath = os.path.abspath(fileName) dir = os.path.dirname(abspath) if not os.path.exists(dir): os.makedirs(dir) self.logger = logging.getLogger(dir) self.logger.setLevel(logLevel) fmt = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s') # 控制台日志 sh = logging.StreamHandler() sh.setFormatter(fmt) sh.setLevel(logLevel) # 文件日志 fh = logging.handlers.RotatingFileHandler(fileName, maxBytes=50*1024*1024, backupCount=20) fh.setFormatter(fmt) fh.setLevel(logLevel) self.logger.addHandler(fh) def debug(self, message): self.logger.debug(message) def info(self, message): self.logger.info(message) def warn(self, message): self.logger.warn(message) def error(self, message): self.logger.error(message) def critical(self, message): self.logger.critical(message) if __name__ == "__main__": logger = Logger('test.log', INFO) logger.debug('this is a debug message') logger.info('this is a info message') logger.warn('this is a warn message')
3.109375
3
examples/LUH16A/COMP/plotcomp.py
dcmvdbekerom/exojax
0
12791616
import matplotlib.pyplot as plt import numpy as np nus_lpf,mu_lpf=np.load("clpf.npz",allow_pickle=True)["arr_0"] nus_modit,mu_modit=np.load("cmodit4500.npz",allow_pickle=True)["arr_0"] fig=plt.figure(figsize=(8,4)) plt.plot(nus_modit,mu_modit,label="MODIT",color="C1") plt.plot(nus_lpf,mu_lpf,label="DIRECT",ls="dashed",color="C0") plt.xlabel("wavenumber (cm-1)") plt.ylabel("spectrum") plt.legend() plt.savefig("compspec_luhman16A.png") plt.show()
2.390625
2
test_module.py
EdvardasDlugauskas/Auto-Rain
1
12791617
import icon_get import unittest import mainutils import iconmanager test_ini = """ [Rainmeter] Author=<EMAIL>.<EMAIL>.<EMAIL> Name=Mid Dock ------------------------------------------------------------------------ ;Metadata added by RainBrowser ;http://rainmeter.net/RainCMS/?q=Rainmeter101_AnatomyOfASkin [Metadata] Name= Config= Description= Instructions= Version= Tags= License= Variant= Preview= ;End of added Metadata ;----------------------------------------------------------------------- ; MEASURES MEASURES [MeasurePower] Measure=Plugin Plugin=Plugins\PowerPlugin.dll PowerState=PERCENT [MeasureBin] Measure=Plugin Plugin=RecycleManager.dll RecycleType=COUNT Drives=ALL [MeasureBin2] Measure=Plugin Plugin=RecycleManager.dll RecycleType=SIZE Drives=ALL [BinAction] Measure=Calc Formula=MeasureBin IfAboveAction=!execute [!RainmeterHideMeter IconEmpty][!RainmeterShowMeter IconFull] IfAboveValue=0 IfEqualAction=!execute [!RainmeterHideMeter IconFull][!RainmeterShowMeter IconEmpty] IfEqualValue=0 ;--------------------------------------------------------------------------- ; APPLICATIONS [video editor] Meter=Button Y=2R ButtonImage="E:\Desktop\Test junk\video editor icon.png" ButtonCommand=!execute ["E:\Desktop\Test junk\video editor.exe"] [stackoverflow help] Meter=Button Y=2R ButtonImage="E:\Desktop\Test junk\stackoverflow help icon.png" ButtonCommand=!execute ["E:\Desktop\Test junk\stackoverflow help.exe"] [movie maker] Meter=Button Y=2R ButtonImage="E:\Desktop\Test junk\movie maker icon.png" ButtonCommand=!execute ["E:\Desktop\Test junk\movie maker.exe"] [Terraria] Meter=Button Y=2R ButtonImage="E:\Desktop\Test junk\Terraria icon.png" ButtonCommand=!execute ["E:\Desktop\Test junk\Terraria.url"]""" class SmokeTests(unittest.TestCase): def test_get_urls(self): T = icon_get.get_urls assert T("minecraft") assert T("Dota 2") assert T("Photoshop") def test_sorting_by_ini(self): icon_names = ["Terraria", "movie maker", "video editor", "stackoverflow help", "new program"] icons = [iconmanager.IconManager(name=icon_name, image_save_path=".", app_path=".") for icon_name in icon_names] correctly_sorted_names = ["new program", "video editor", "stackoverflow help", "movie maker", "Terraria"] icons = mainutils.sort_by_ini(icons, ini_str=test_ini) for correct_name, actual_icon in zip(correctly_sorted_names, icons): assert actual_icon.name == correct_name, "Incorrectly sorted icons"
1.703125
2
tools/save_waveform.py
ml-postech/LISA
0
12791618
<filename>tools/save_waveform.py import argparse import os import torch import torchaudio import torchaudio.functional as F import torchaudio.transforms as T import librosa import matplotlib.pyplot as plt def plot_waveform(waveform, sample_rate, filename, title="Waveform", xlim=None, ylim=None): waveform = waveform.numpy() num_channels, num_frames = waveform.shape time_axis = torch.arange(0, num_frames) / sample_rate figure, axes = plt.subplots(num_channels, 1) if num_channels == 1: axes = [axes] for c in range(num_channels): axes[c].plot(time_axis, waveform[c], linewidth=1) axes[c].grid(True) if num_channels > 1: axes[c].set_ylabel(f'Channel {c+1}') if xlim: axes[c].set_xlim(xlim) if ylim: axes[c].set_ylim(ylim) figure.suptitle(title) plt.show(block=False) plt.savefig(filename) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--input', default='input.wav') parser.add_argument('--output', default='output.png') parser.add_argument('--time1', type=float, default=0.5) parser.add_argument('--time2', type=float, default=1) parser.add_argument('--gpu', default='0') args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu waveform1, sr1 = torchaudio.load(args.input) # waveform2, sr2 = torchaudio.load(args.input2) # diff = waveform2 - waveform1 # print(diff.shape) # diff = diff[0.5 * sr1:1 * sr1] print(waveform1.shape) waveform1 = waveform1[:, :] plot_waveform(waveform1, sr1, filename=args.output, title="", xlim=[args.time1, args.time2])
2.5625
3
preprocessor/__init__.py
AntonYermilov/gec-dataset-analyzer
0
12791619
<reponame>AntonYermilov/gec-dataset-analyzer from .aesw import AESWPreprocessor from .lang8 import Lang8Preprocessor from .fce import FCEPreprocessor from .jfleg import JFLEGPreprocessor class DatasetPreprocessorNotFoundError(ValueError): def __init__(self, preprocessor_name: str): super().__init__() self.message = f'Dataset preprocessor with name {preprocessor_name} was not found.' def get_dataset_preprocessor(preprocessor_name: str): preprocessors = { 'aesw': AESWPreprocessor, 'lang8': Lang8Preprocessor, 'fce': FCEPreprocessor, 'jfleg': JFLEGPreprocessor } if preprocessor_name not in preprocessors: raise DatasetPreprocessorNotFoundError(preprocessor_name=preprocessor_name) return preprocessors[preprocessor_name]
2.046875
2
mdct/windows.py
tombackstrom/mdct
40
12791620
<reponame>tombackstrom/mdct """ Module for windowing functions not found in SciPy """ from __future__ import division import numpy as np from scipy.signal import kaiser __all__ = [ 'kaiser_derived', ] def kaiser_derived(M, beta): """ Return a Kaiser-Bessel derived window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. beta : float Kaiser-Bessel window shape parameter. Returns ------- w : ndarray The window, normalized to fulfil the Princen-Bradley condition. Notes ----- This window is only defined for an even number of taps. References ---------- .. [1] Wikipedia, "Kaiser window", https://en.wikipedia.org/wiki/Kaiser_window """ M = int(M) try: from scipy.signal import kaiser_derived as scipy_kd return scipy_kd(M, beta) except ImportError: pass if M < 1: return np.array([]) if M % 2: raise ValueError( "Kaiser Bessel Derived windows are only defined for even number " "of taps" ) w = np.zeros(M) kaiserw = kaiser(M // 2 + 1, beta) csum = np.cumsum(kaiserw) halfw = np.sqrt(csum[:-1] / csum[-1]) w[:M//2] = halfw w[-M//2:] = halfw[::-1] return w
2.296875
2
tests/test_base_element.py
oiakinat/shawl
3
12791621
<reponame>oiakinat/shawl # -*- coding: utf-8 -*- # pylint:disable=protected-access from shawl import BaseElement def test_load_elements(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert b_element._element is None assert b_element._selector == ('xpath', '//div') def test_check_str_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) str_ = ("Selector: ('xpath', '//div'), " 'Element: None') assert str(b_element) == str_ def test_check_repr(): b_element = BaseElement('driver', **{'xpath': '//div'}) assert repr(b_element) == "BaseElement: ('xpath', '//div')" c_element = BaseElement('driver', repr_name='Test', **{'xpath': '//div'}) assert repr(c_element) == 'Test'
2.25
2
BOJ/review/boj_2110.py
mrbartrns/swacademy_structure
0
12791622
# BOJ 2110 import sys si = sys.stdin.readline n, m = map(int, si().split()) arr = [int(si()) for _ in range(n)] arr.sort() MIN = 1 MAX = arr[-1] - arr[0] def get_count(gap): cnt = 1 last = arr[0] for i in range(1, n): if arr[i] - last >= gap: cnt += 1 last = arr[i] return cnt def search(start, end, k): while start <= end: mid = (start + end) // 2 cnt = get_count(mid) if cnt >= k: start = mid + 1 res = mid else: end = mid - 1 return res print(search(MIN, MAX, m))
2.96875
3
CHAPTER 07 (linked_list)/position_class.py
ahammadshawki8/Data-Structures-Algorithms-in-Python-
0
12791623
<filename>CHAPTER 07 (linked_list)/position_class.py class Position: """An abstraction representing the location of a single element.""" def __init__(self,container,node): """Constructor should not be invoked by the user.""" self._container = container self._node = node def element(self): """Return the element stored at this position.""" return self._node._element def __eq__(self,other): """Return True if other is a Position representing the same location.""" return ((type(other) is type(self)) and (other._node is self._node)) def __ne__(self,other): """Return True if other does not represent the same location.""" return not(self == other) # opposite of __eq__
4.1875
4
fdxExceptionDetails/fdxExceptionDetailsForNameError.py
SkyLined/mDebugOutput
2
12791624
<gh_stars>1-10 import re; def fdxExceptionDetailsForNameError(oException): if len(oException.args) != 1: return {}; if isinstance(oException, UnboundLocalError): oUnboundLocalErrorMessageMatch = re.match(r"^local variable '([_\w]+)' referenced before assignment$", oException.args[0]); if not oUnboundLocalErrorMessageMatch: return {}; sVariableName = oUnboundLocalErrorMessageMatch.group(1); sProblemDescription = "Uninitialised variable"; else: oNameErrorMessageMatch = re.match(r"^(?:global )?name '([_\w]+)' is not defined$", oException.args[0]); if not oNameErrorMessageMatch: return {}; sVariableName = oNameErrorMessageMatch.group(1); if sVariableName != oException.name: return {}; # Sanity check. sProblemDescription = "Undefined variable"; return { "aasConsoleOutputLines": [ [ guExceptionInformationColor, sProblemDescription, " ", guExceptionInformationHighlightColor, sVariableName, guExceptionInformationColor, ".", ], ], "dxHiddenProperties": { "args": oException.args, "name": oException.name, "with_traceback": oException.with_traceback, }, }; from ..mColorsAndChars import *;
2.28125
2
answers/justshivam/Day 3/question2.py
arc03/30-DaysOfCode-March-2021
22
12791625
def check(num): stage = [] for i in num: if i in stage: print(f"{num} is not a Unique Number.") return stage.append(i) print(f'{num} is a Unique Number.') num = input('Enter the number: ') check(num)
3.8125
4
aaem/components/residential_buildings/component.py
gina-alaska/alaska_affordable_energy_model
1
12791626
<gh_stars>1-10 """ Residential Efficiency component body ------------------------------------- """ import numpy as np from pandas import DataFrame import os from aaem.components.annual_savings import AnnualSavings from aaem.community_data import CommunityData from aaem.forecast import Forecast from aaem.diagnostics import Diagnostics import aaem.constants as constants from config import COMPONENT_NAME, UNKNOWN class ResidentialBuildings(AnnualSavings): """Residential energy efficiency component of the Alaska Affordable Energy Model: This module estimates the potential improvements to heating efficiency of residential buildings (homes). Consumption and savings are based on the number of units that have not been retrofit as of 2010, the performance improvements as a percentage of the pre-retrofit consumption, and the forecasted price of offset heating fuels. The cost to retrofit each home is also calculated. Parameters ---------- community_data : CommunityData CommunityData Object for a community forecast : Forecast forecast for a community diagnostics : diagnostics, optional diagnostics for tracking error/warning messages prerequisites : dictionary of components, optional prerequisite component data this component has no prerequisites leave empty Attributes ---------- diagnostics : diagnostics for tracking error/warning messages initial value: diag or new diagnostics object forecast : forecast community forecast for estimating future values initial value: forecast cd : dictionary general data for a community. Initial value: 'community' section of community_data comp_specs : dictionary component specific data for a community. Initial value: 'Residential Buildings' section of community_data See also -------- aaem.community_data : community data module, see information on CommunityData Object aaem.forecast : forecast module, see information on Forecast Object aaem.diagnostics : diagnostics module, see information on Diagnostics Object """ def __init__ (self, community_data, forecast, diag = None, prerequisites = {}): """Class initialiser Parameters ---------- community_data : CommunityData CommunityData Object for a community forecast : Forecast forecast for a community diagnostics : diagnostics, optional diagnostics for tracking error/warning messages prerequisites : dictionary of components, optional prerequisite component data """ self.diagnostics = diag if self.diagnostics == None: self.diagnostics = diagnostics() self.intertie_data = community_data.intertie_data self.cd = community_data.get_section('community') #~ self.copied_elec = community_data.copies.\ #~ ix["yearly electric summary"].values[0] if self.cd["model electricity"]: self.elec_prices = community_data.get_item('community', 'electric prices') self.comp_specs = community_data.get_section(COMPONENT_NAME) self.component_name = COMPONENT_NAME self.forecast = forecast self.refit_cost_rate = \ self.comp_specs['average refit cost'] * \ community_data.get_item( 'community', 'regional construction multiplier' ) self.set_project_life_details( self.comp_specs["start year"], self.comp_specs["lifetime"] ) yr = int(self.comp_specs['data']['Year']) self.base_pop = int(self.forecast.population.ix[yr])#.values[0][0] peps_per_house = float(self.base_pop) / \ self.comp_specs['data']['Total Occupied'] households = np.round(self.forecast.population / np.float64(peps_per_house)) households.columns = ["HH"] self.households = households.ix[self.start_year:self.end_year].T.values[0] val = self.forecast.get_population(self.start_year) HH =self.comp_specs['data']['Total Occupied'] self.init_HH = int(round(HH*(val / self.base_pop))) def run (self, scalers = {'capital costs':1.0}): """Runs the component. The Annual Total Savings,Annual Costs, Annual Net Benefit, NPV Benefits, NPV Costs, NPV Net Benefits, Benefit-Cost Ratio, Levelized Cost of Energy, and Internal Rate of Return will all be calculated. There must be a known Heat Recovery project for this component to run. Parameters ---------- scalers : dictionary of valid scalers, optional Scalers to adjust normal run variables. See note on accepted scalers Attributes ---------- run : bool True in the component runs to completion, False otherwise reason : string lists reason for failure if run == False Notes ----- Accepted scalers: capital costs. """ self.was_run = True self.reason = "OK" tag = self.cd['file id'].split('+') if len(tag) > 1 and tag[1] != 'residential': self.was_run = False self.reason = "Not a residential project." return # needed for electric or HF component and has a default value self.calc_avg_consumption() if self.cd["model electricity"]: self.calc_baseline_kWh_consumption() self.calc_proposed_kWh_consumption() if self.cd["model heating fuel"]: #~ self.calc_init_HH() self.calc_savings_opportunities() self.calc_init_consumption() self.calc_baseline_fuel_consumption() self.calc_proposed_fuel_consumption() #~ self.set_forecast_columns() if self.cd["model financial"]: self.calc_capital_costs() self.get_diesel_prices() self.calc_baseline_fuel_cost() self.calc_proposed_fuel_cost() self.calc_baseline_kWh_cost() self.calc_proposed_kWh_cost() self.calc_annual_electric_savings() self.calc_annual_heating_savings() self.calc_annual_total_savings() self.calc_annual_costs(self.cd['interest rate'], scalers['capital costs']) self.calc_annual_net_benefit() self.calc_npv(self.cd['discount rate'], self.cd['current year']) self.calc_levelized_costs(0) def get_fuel_total_saved (self): """Get total fuel saved. Returns ------- float the total fuel saved in gallons """ base_heat = \ self.baseline_HF_consumption[:self.actual_project_life] post_heat = \ self.proposed_HF_consumption[:self.actual_project_life] return (base_heat - post_heat) * constants.mmbtu_to_gal_HF def get_total_energy_produced (self): """Get total energy produced. Returns ------- float the total energy produced """ # no electric return self.baseline_HF_consumption[:self.actual_project_life] - \ self.proposed_HF_consumption[:self.actual_project_life] def calc_avg_consumption (self): """Get the average monthly consumption of electricity for a house. Attributes ---------- avg_kWh_consumption_per_HH : float average electric consumption per household (kWh/year). >= 6000 """ # 500 average energy use, 12 months in a year. That's where the 6000.0 # comes from. con_threshold = self.comp_specs['min kWh per household'] yr = int(self.comp_specs['data']['Year']) #~ houses = int(self.comp_specs['data']['Total Occupied']) #~ r_con = self.forecast.base_res_consumption avg_con = float(self.comp_specs['data']['average kWh per house']) if not self.intertie_data is None: avg_con = self.intertie_data.get_item( 'Residential Energy Efficiency', 'data' )['average kWh per house'] #~ self.avg_monthly_consumption = ave_con/12 if (avg_con < con_threshold) or np.isnan(avg_con): avg_con = con_threshold self.diagnostics.add_note(self.component_name, ("Average residential Electric consumption" " corrected to "+ str(con_threshold)+" kWh per year")) self.avg_kWh_consumption_per_HH = avg_con self.diagnostics.add_note(self.component_name, "Average consumption was " + str(self.avg_kWh_consumption_per_HH) +\ " in " + str(yr)) def calc_init_HH (self): """Estimate the # of households for the first year of the project Attributes ---------- init_HH : int estimated households for first year of project """ val = self.forecast.get_population(self.start_year) HH = self.comp_specs['data']['Total Occupied'] pop = self.forecast.base_pop self.init_HH = int(round(HH*(val / pop))) def calc_init_consumption (self): """Calculate the initial consumption for each fuel type. Attributes ---------- init_HF : float initial heating oil consumption init_wood : float initial heating cordwood consumption init_gas : float initial natural gas fuel consumption init_LP : float initial propane consumption init_kWh : float initial electric consumption """ rd = self.comp_specs['data'] ## total consumption total = rd["Total Consumption (MMBtu)"] + \ rd["BEES Total Consumption (MMBtu)"] + \ rd["Pre-Retrofit Avg Area (SF)"] * \ rd["Pre-Retrofit Avg EUI (MMBtu/sf)"] * self.opportunity_HH #~ self.baseline_total_energy_consumption = total HH = self.init_HH percent_accounted = 0 amnt = np.float64(rd["Fuel Oil"]) / 100.0 percent_accounted += amnt self.init_HF = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_HF) amnt = np.float64(rd["Wood"]) / 100.0 percent_accounted += amnt self.init_wood = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_cords) amnt = np.float64(rd["Utility Gas"]) / 100.0 percent_accounted += amnt self.init_gas = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_Mcf) amnt = np.float64(rd["LP"]) / 100.0 percent_accounted += amnt self.init_LP = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_gal_LP) amnt = np.float64(rd["Electricity"]) / 100.0 percent_accounted += amnt self.init_kWh = self.calc_consumption_by_fuel(amnt, total, HH, constants.mmbtu_to_kWh) #~ self.init_coal #~ self.init_solar #~ self.init_other msg = str(round(percent_accounted)) + \ " of residential fuel sources accounted for" self.diagnostics.add_note(self.component_name, msg) def calc_savings_opportunities (self): """Calculate savings opportunities Attributes ---------- opportunity_HH : int Houses that can be retrofit savings_HF : float savings in heating oil consumption savings_wood : float savings in heating cordwood consumption savings_gas : float savings in natural gas fuel consumption savings_LP : float savings in propane consumption savings_kWh : float savings in electric consumption savings_mmbtu: float total savings in mmbtu """ rd = self.comp_specs['data'] ## #HH self.opportunity_HH = self.init_HH -rd["BEES Number"] -rd["Post-Retrofit Number"] self.opportunity_HH = np.float64( self.opportunity_HH ) #~ print self.opportunity_HH if self.opportunity_HH < 0: self.opportunity_HH = 0 self.diagnostics.add_note(self.component_name, "calculate Houses to retrofit was negative, setting to 0" ) ## % as decimal #~ self.percent_savings = rd["opportunity_total_percent_community_savings"] #~ self.percent_savings = np.float64( self.percent_savings) area = np.float64(rd["Pre-Retrofit Avg Area (SF)"]) EUI = np.float64(rd["Pre-Retrofit Avg EUI (MMBtu/sf)"]) avg_EUI_reduction = np.float64(rd["Post-Retrofit Avg. EUI Reduction"]) total = area * EUI # the one in each of these function calls is an identity amnt = np.float64(rd["Fuel Oil"]) / 100.0 self.savings_HF = avg_EUI_reduction * self.opportunity_HH * \ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_HF) amnt = np.float64(rd["Wood"]) / 100.0 self.savings_wood = avg_EUI_reduction * self.opportunity_HH * \ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_cords) amnt = np.float64(rd["Utility Gas"]) / 100.0 self.savings_gas = avg_EUI_reduction * self.opportunity_HH * \ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_Mcf) amnt = np.float64(rd["LP"]) / 100.0 self.savings_LP = avg_EUI_reduction * self.opportunity_HH * \ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_gal_LP) amnt = np.float64(rd["Electricity"]) / 100.0 self.savings_kWh = avg_EUI_reduction * self.opportunity_HH * \ self.calc_consumption_by_fuel(amnt, total, 1, constants.mmbtu_to_kWh) #~ self.savings_coal #~ self.savings_solar #~ self.savings_other self.savings_mmbtu = self.savings_HF * (1/constants.mmbtu_to_gal_HF) +\ self.savings_wood * (1/constants.mmbtu_to_cords) +\ self.savings_gas * (1/constants.mmbtu_to_Mcf) +\ self.savings_kWh * (1/constants.mmbtu_to_kWh) +\ self.savings_LP* (1/constants.mmbtu_to_gal_LP) def calc_consumption_by_fuel (self, fuel_amnt, total_consumption, HH, cf): """calculate consumption by fuel from the total consumption Parameters ---------- fuel_amnt: float % of fuel used total_consumption : float total consumption for residential buildings HH : float a # of houses cf: float conversion factor Returns ------- float: fuel consumed for a type of fuel """ HH_consumption = HH * self.avg_kWh_consumption_per_HH * \ constants.kWh_to_mmbtu return np.float64(fuel_amnt * (total_consumption - HH_consumption) * cf) def calc_baseline_fuel_consumption (self): """Calculate baseline fuel consumption Attributes: baseline_fuel_Hoil_consumption : np.array baseline heating fuel consumption baseline_fuel_wood_consumption : np.array baseline cordwood consumption baseline_fuel_gas_consumption : np.array baseline natural gas consumption baseline_fuel_LP_consumption : np.array baseline propane consumption baseline_fuel_kWh_consumption : np.array baseline electricity consumption baseline_HF_consumption : np.array baseline total heating fuel consumption """ rd = self.comp_specs['data'] self.fuel_oil_percent = rd["Fuel Oil"] / 100.0 HH = self.households #~ print HH area = np.float64(rd["Pre-Retrofit Avg Area (SF)"]) EUI = np.float64(rd["Pre-Retrofit Avg EUI (MMBtu/sf)"]) scaler = (HH - self.init_HH) * area * EUI self.baseline_fuel_Hoil_consumption = \ self.init_HF+np.float64(rd["Fuel Oil"]/100.0)*\ scaler * constants.mmbtu_to_gal_HF self.baseline_fuel_wood_consumption = \ self.init_wood+np.float64(rd["Wood"]/100.0)*\ scaler * constants.mmbtu_to_cords self.baseline_fuel_gas_consumption = self.init_gas + \ np.float64(rd["Utility Gas"]/100.0) * \ scaler * constants.mmbtu_to_Mcf self.baseline_fuel_LP_consumption = \ self.init_LP+np.float64(rd["LP"]/100.0)*\ scaler * constants.mmbtu_to_gal_LP self.baseline_fuel_kWh_consumption = self.init_kWh+\ np.float64(rd["Electricity"]/100.0)*\ scaler * constants.mmbtu_to_kWh #~ self.baseline_fuel_coal_consumption #~ self.baseline_fuel_solar_consumption #~ self.baseline_fuel_other_consumption if self.cd['natural gas price'] == 0: self.baseline_fuel_gas_consumption = 0 self.baseline_HF_consumption = \ self.baseline_fuel_Hoil_consumption * \ (1/constants.mmbtu_to_gal_HF) +\ self.baseline_fuel_wood_consumption * \ (1/constants.mmbtu_to_cords) +\ self.baseline_fuel_gas_consumption * (1/constants.mmbtu_to_Mcf) +\ self.baseline_fuel_kWh_consumption * (1/constants.mmbtu_to_kWh) +\ self.baseline_fuel_LP_consumption * (1/constants.mmbtu_to_gal_LP) def calc_baseline_kWh_consumption (self): """Calculate the baseline kWh consumption for a community Attributes ---------- baseline_kWh_consumption : np.array electric consumption per yer """ HH = self.households self.baseline_kWh_consumption = self.avg_kWh_consumption_per_HH * HH def calc_baseline_fuel_cost (self): """calculate base line heating fuel costs Attributes ---------- baseline_HF_cost : np.array baseline cost of heating fuels per year """ HF_price = (self.diesel_prices + self.cd['heating fuel premium']) self.hoil_price = HF_price wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.baseline_HF_cost = \ self.baseline_fuel_Hoil_consumption * HF_price + \ self.baseline_fuel_wood_consumption * wood_price + \ self.baseline_fuel_gas_consumption * gas_price + \ self.baseline_fuel_LP_consumption * LP_price + \ self.baseline_fuel_kWh_consumption * gas_price # coal,solar, other def calc_baseline_kWh_cost (self): """calculate baseline electricity costs Attributes ---------- baseline_kWh_cost : np.array baseline cost of electricity per year """ self.cd["electric prices"].index = \ self.cd["electric prices"].index.astype(int) #~ kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh #~ print len(self.baseline_kWh_consumption) kWh_cost = self.cd["electric prices"]\ .ix[self.start_year:self.end_year].T.values[0] #~ print len(kWh_cost) self.baseline_kWh_cost = self.baseline_kWh_consumption * kWh_cost def calc_proposed_fuel_consumption (self): """Calculate the proposed heating fuel consumption Attributes ---------- proposed_fuel_Hoil_consumption : np.array proposed heating oil consumption proposed_fuel_wood_consumption : np.array proposed cordwood consumption proposed_fuel_LP_consumption : np.array proposed LP consumption proposed_fuel_gas_consumption : np.array proposed natural gas consumption proposed_fuel_kWh_consumption : np.array proposed electric consumption proposed_HF_consumption : np.array proposed total electric consumption """ self.proposed_fuel_Hoil_consumption = \ self.baseline_fuel_Hoil_consumption - self.savings_HF self.proposed_fuel_wood_consumption = \ self.baseline_fuel_wood_consumption - self.savings_wood self.proposed_fuel_LP_consumption = \ self.baseline_fuel_LP_consumption - self.savings_LP self.proposed_fuel_gas_consumption = \ self.baseline_fuel_gas_consumption - self.savings_gas self.proposed_fuel_kWh_consumption = \ self.baseline_fuel_kWh_consumption - self.savings_kWh self.proposed_HF_consumption = \ self.baseline_HF_consumption - self.savings_mmbtu if self.cd['natural gas price'] == 0: self.proposed_fuel_gas_consumption = 0 # coal,solar, other def calc_proposed_kWh_consumption (self): """calculate the proposed kWh consumption for a community Attributes ---------- proposed_kWh_consumption : np.array set to baseline values """ self.proposed_kWh_consumption = self.baseline_kWh_consumption def calc_proposed_fuel_cost (self): """Calculate proposed heating cost Attributes ---------- proposed_HF_cost : np.array proposed heating fuel cost """ HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.elec_prices[self.start_year-self.start_year: self.end_year-self.start_year] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] self.proposed_HF_cost = \ self.proposed_fuel_Hoil_consumption * HF_price + \ self.proposed_fuel_wood_consumption * wood_price + \ self.proposed_fuel_gas_consumption * gas_price + \ self.proposed_fuel_LP_consumption * LP_price + \ self.proposed_fuel_kWh_consumption * gas_price def calc_proposed_kWh_cost (self): """Calculate post retrofit electricity costs Attributes ---------- proposed_kWh_cost: np.array proposed electricity cost """ kWh_cost = self.cd["electric prices"].\ ix[self.start_year:self.end_year] kWh_cost = kWh_cost.T.values[0] # kWh/yr*$/kWh self.proposed_kWh_cost = self.proposed_kWh_consumption * kWh_cost def calc_capital_costs (self): """Calculate the capital costs. Attributes ---------- capital_costs : float total cost of improvements ($) """ self.capital_costs = self.opportunity_HH * self.refit_cost_rate def calc_annual_electric_savings (self): """calculate annual electric savings created by the project Attributes ---------- annual_electric_savings : np.array electric savings ($/year) are the difference in the base and proposed fuel costs """ self.annual_electric_savings = np.zeros(self.project_life) def calc_annual_heating_savings (self): """calculate annual heating savings created by the project Attributes ---------- annual_heating_savings : np.array heating savings ($/year) """ self.annual_heating_savings = self.baseline_HF_cost - \ self.proposed_HF_cost def set_forecast_columns (self): """Set columns in the the forecast to values calculated in this component """ years = range(self.start_year,self.end_year) self.forecast.add_heating_fuel_column(\ "heating_fuel_residential_consumed [gallons/year]", years, self.baseline_fuel_Hoil_consumption) self.forecast.add_heating_fuel_column(\ "heating_fuel_residential_consumed [mmbtu/year]", years, self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF) self.forecast.add_heating_fuel_column(\ "cords_wood_residential_consumed [cords/year]", years, self.baseline_fuel_wood_consumption) self.forecast.add_heating_fuel_column(\ "cords_wood_residential_consumed [mmbtu/year]", years, self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords) self.forecast.add_heating_fuel_column(\ "gas_residential_consumed [Mcf/year]", years, self.baseline_fuel_gas_consumption) self.forecast.add_heating_fuel_column(\ "gas_residential_consumed [mmbtu/year]", years, self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf) self.forecast.add_heating_fuel_column(\ "electric_residential_consumed [kWh/year]", years, self.baseline_fuel_kWh_consumption) self.forecast.add_heating_fuel_column(\ "electric_residential_consumed [mmbtu/year]", years, self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh) self.forecast.add_heating_fuel_column(\ "propane_residential_consumed [gallons/year]", years, self.baseline_fuel_LP_consumption) self.forecast.add_heating_fuel_column(\ "propane_residential_consumed [mmbtu/year]", years, self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP) self.forecast.add_heat_demand_column(\ "heat_energy_demand_residential [mmbtu/year]", years, self.baseline_HF_consumption) def save_component_csv (self, directory): """Save the component output csv in directory Parameters ---------- directory : path output directory """ if not self.was_run: return if self.cd["model financial"]: HF_price = (self.diesel_prices + self.cd['heating fuel premium']) wood_price = self.cd['cordwood price'] elec_price = self.cd["electric prices"]\ .ix[self.start_year:self.end_year].T.values[0] LP_price = self.cd['propane price'] gas_price = self.cd['natural gas price'] else: HF_price = np.nan wood_price = np.nan elec_price = np.nan LP_price = np.nan gas_price = np.nan b_oil = self.baseline_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF r_oil = self.proposed_fuel_Hoil_consumption/constants.mmbtu_to_gal_HF s_oil = b_oil - r_oil b_oil_cost = self.baseline_fuel_Hoil_consumption * HF_price r_oil_cost = self.proposed_fuel_Hoil_consumption * HF_price s_oil_cost = b_oil_cost - r_oil_cost b_bio = self.baseline_fuel_wood_consumption/constants.mmbtu_to_cords r_bio = self.proposed_fuel_wood_consumption/constants.mmbtu_to_cords s_bio = b_bio - r_bio b_bio_cost = self.baseline_fuel_wood_consumption * wood_price r_bio_cost = self.proposed_fuel_wood_consumption * wood_price s_bio_cost = b_bio_cost - r_bio_cost b_elec = self.baseline_fuel_kWh_consumption/constants.mmbtu_to_kWh r_elec = self.proposed_fuel_kWh_consumption/constants.mmbtu_to_kWh s_elec = b_elec - r_elec b_elec_cost = self.baseline_fuel_kWh_consumption * elec_price r_elec_cost = self.proposed_fuel_kWh_consumption * elec_price s_elec_cost = b_elec_cost - r_elec_cost b_LP = self.baseline_fuel_LP_consumption/constants.mmbtu_to_gal_LP r_LP = self.proposed_fuel_LP_consumption/constants.mmbtu_to_gal_LP s_LP = b_LP - r_LP b_LP_cost = self.baseline_fuel_LP_consumption * LP_price r_LP_cost = self.proposed_fuel_LP_consumption * LP_price s_LP_cost = b_LP_cost - r_LP_cost b_NG = self.baseline_fuel_gas_consumption/constants.mmbtu_to_Mcf r_NG = self.proposed_fuel_gas_consumption/constants.mmbtu_to_Mcf s_NG = b_NG - r_NG b_NG_cost = self.baseline_fuel_gas_consumption * gas_price r_NG_cost = self.proposed_fuel_gas_consumption * gas_price s_NG_cost = b_NG_cost - r_NG_cost years = np.array(range(self.project_life)) + self.start_year df = DataFrame({ "Residential: Heating Fuel All (MMBtu/year) Consumption Baseline": self.get_base_HF_use(), "Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit": self.get_proposed_HF_use(), "Residential: Heating Fuel All (MMBtu/year) Consumption Savings": self.get_base_HF_use() -\ self.get_proposed_HF_use(), "Residential: Heating Fuel All (MMBtu/year) Cost Baseline": self.get_base_HF_cost(), "Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit": self.get_proposed_HF_cost(), "Residential: Heating Fuel All (MMBtu/year) Cost Savings": self.get_heating_savings_costs(), "Residential: Heating Oil (gallons/year) Consumption Baseline": b_oil, "Residential: Heating Oil (gallons/year) Consumption Post Retrofit": r_oil, "Residential: Heating Oil (gallons/year) Consumption Savings": s_oil, "Residential: Heating Oil (gallons/year) Cost Baseline": b_oil_cost, "Residential: Heating Oil (gallons/year) Cost Post Retrofit": r_oil_cost , "Residential: Heating Oil (gallons/year) Cost Savings": s_oil_cost, "Residential: Heating Biomass (cords/year) Consumption Baseline": b_bio, "Residential: Heating Biomass (cords/year) Consumption Post Retrofit": r_bio, "Residential: Heating Biomass (cords/year) Consumption Savings": s_bio, "Residential: Heating Biomass (cords/year) Cost Baseline": b_bio_cost, "Residential: Heating Biomass (cords/year) Cost Post Retrofit": r_bio_cost, "Residential: Heating Biomass (cords/year) Cost Savings": s_bio_cost, "Residential: Electric Heat (kWh/year) Consumption Baseline": b_elec, "Residential: Electric Heat (kWh/year) Consumption Post Retrofit": r_elec, "Residential: Electric Heat (kWh/year) Consumption Savings": s_elec, "Residential: Electric Heat (kWh/year) Cost Baseline": b_elec_cost, "Residential: Electric Heat (kWh/year) Cost Post Retrofit": r_elec_cost, "Residential: Electric Heat (kWh/year) Cost Savings": s_elec_cost, "Residential: Heating Propane (gallons/year) Consumption Baseline": b_LP, "Residential: Heating Propane (gallons/year) Consumption Post Retrofit": r_LP, "Residential: Heating Propane (gallons/year) Consumption Savings": s_LP, "Residential: Heating Propane (gallons/year) Cost Baseline": b_LP_cost, "Residential: Heating Propane (gallons/year) Cost Post Retrofit": r_LP_cost, "Residential: Heating Propane (gallons/year) Cost Savings": s_LP_cost, "Residential: Heating Natural Gas (Mcf/year) Consumption Baseline": b_NG, "Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit": r_NG, "Residential: Heating Natural Gas (Mcf/year) Consumption Savings": s_NG, "Residential: Heating Natural Gas (Mcf/year) Cost Baseline": b_NG_cost, "Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit": r_NG_cost, "Residential: Heating Natural Gas (Mcf/year) Cost Savings": s_NG_cost, "Residential: Total Cost Savings ($/year)": self.get_total_savings_costs(), "Residential: Net Benefit ($/year)": self.get_net_benefit(), }, years) try: df = df.round().astype(int) except ValueError: pass df = df[[ "Residential: Heating Oil (gallons/year) Consumption Baseline", "Residential: Heating Oil (gallons/year) Consumption Post Retrofit", "Residential: Heating Oil (gallons/year) Consumption Savings", "Residential: Heating Biomass (cords/year) Consumption Baseline", "Residential: Heating Biomass (cords/year) Consumption Post Retrofit", "Residential: Heating Biomass (cords/year) Consumption Savings", "Residential: Electric Heat (kWh/year) Consumption Baseline", "Residential: Electric Heat (kWh/year) Consumption Post Retrofit", "Residential: Electric Heat (kWh/year) Consumption Savings", "Residential: Heating Propane (gallons/year) Consumption Baseline", "Residential: Heating Propane (gallons/year) Consumption Post Retrofit", "Residential: Heating Propane (gallons/year) Consumption Savings", "Residential: Heating Natural Gas (Mcf/year) Consumption Baseline", "Residential: Heating Natural Gas (Mcf/year) Consumption Post Retrofit", "Residential: Heating Natural Gas (Mcf/year) Consumption Savings", "Residential: Heating Fuel All (MMBtu/year) Consumption Baseline", "Residential: Heating Fuel All (MMBtu/year) Consumption Post Retrofit", "Residential: Heating Fuel All (MMBtu/year) Consumption Savings", "Residential: Heating Oil (gallons/year) Cost Baseline", "Residential: Heating Oil (gallons/year) Cost Post Retrofit", "Residential: Heating Oil (gallons/year) Cost Savings", "Residential: Heating Biomass (cords/year) Cost Baseline", "Residential: Heating Biomass (cords/year) Cost Post Retrofit", "Residential: Heating Biomass (cords/year) Cost Savings", "Residential: Electric Heat (kWh/year) Cost Baseline", "Residential: Electric Heat (kWh/year) Cost Post Retrofit", "Residential: Electric Heat (kWh/year) Cost Savings", "Residential: Heating Propane (gallons/year) Cost Baseline", "Residential: Heating Propane (gallons/year) Cost Post Retrofit", "Residential: Heating Propane (gallons/year) Cost Savings", "Residential: Heating Natural Gas (Mcf/year) Cost Baseline", "Residential: Heating Natural Gas (Mcf/year) Cost Post Retrofit", "Residential: Heating Natural Gas (Mcf/year) Cost Savings", "Residential: Heating Fuel All (MMBtu/year) Cost Baseline", "Residential: Heating Fuel All (MMBtu/year) Cost Post Retrofit", "Residential: Heating Fuel All (MMBtu/year) Cost Savings", "Residential: Total Cost Savings ($/year)", "Residential: Net Benefit ($/year)" ]] df["community"] = self.cd['name'] df["population"] = self.forecast.get_population(self.start_year, self.end_year).astype(int) df = df[df.columns[-2:].tolist() + df.columns[:-2].tolist()] fname = os.path.join(directory, self.cd['name'] + '_' +\ self.component_name.lower() + "_output.csv") fname = fname.replace(" ","_") # save to end of project(actual lifetime) df.ix[:self.actual_end_year].to_csv(fname, index_label="year")
2.296875
2
nginx_log_monitor/clients/overwatch_client.py
messa/nginx-log-monitor
0
12791627
<gh_stars>0 from aiohttp import ClientSession from logging import getLogger from reprlib import repr as smart_repr logger = getLogger(__name__) post_timeout_s = 30 class OverwatchClientNotConfiguredError (Exception): pass class OverwatchClientReportError (Exception): pass class OverwatchClient: def __init__(self, client_session, report_url, report_token): self._session = client_session self._report_url = report_url self._report_token = report_token async def send_report(self, report_data): assert isinstance(report_data, dict) if not self._report_url: raise OverwatchClientNotConfiguredError('No report_url') if self._session is None: raise Exception('Not in context block') post_kwargs = dict( json=report_data, headers={ 'Accept': 'application/json', 'Authorization': 'token ' + self._report_token, }, timeout=post_timeout_s) logger.debug('Sending Overwatch report - POST %s with payload: %s', self._report_url, smart_repr(report_data)) try: async with self._session.post(self._report_url, **post_kwargs) as resp: logger.debug('Response: %r', resp) resp.raise_for_status() except Exception as e: raise OverwatchClientReportError('Failed to post report to {!r}: {!r}'.format(self._report_url, e))
2.234375
2
ACM ICPC/DP/MatrixChain_multiplication/matrixchain_mul.py
shreejitverma/GeeksforGeeks
2
12791628
<gh_stars>1-10 import sys def MatrixChainOrder(p, i, j): ''' Matrix A[i] has dimension p[i-1] x p[i] for i = 1..n ''' if i == j: return 0 _min = sys.maxsize # place parenthesis at different places # between first and last matrix, # recursively calculate count of # multiplications for each parenthesis # placement and return the minimum count for k in range(i, j): count = (MatrixChainOrder(p, i, k) + MatrixChainOrder(p, k + 1, j) + p[i - 1] * p[k] * p[j]) if count < _min: _min = count # Return minimum count return _min # Driver program to test above function arr = [1, 2, 3, 4, 3] n = len(arr) print("Minimum number of multiplications is ", MatrixChainOrder(arr, 1, n - 1))
3.5625
4
dev/frontend/controllers/__init__.py
frederikgram/describe
2
12791629
<reponame>frederikgram/describe from .template_builders import * from .actions import * from .startup import *
1.03125
1
streamlit_qa.py
manisnesan/fastchai
2
12791630
from transformers import pipeline import wikipedia import warnings import streamlit as st warnings.filterwarnings("ignore") def get_context_from_wiki(query: str) -> str: "Given a query, return the summary about the query from wikipedia" results = wikipedia.search(query) # There could be more than 1 due to Disambiguation issue try: summary = wikipedia.summary(results[0], sentences=10) except wikipedia.DisambiguationError as e: ambiguous_terms = e.options # take the first one from the list of ambiguous terms and try again return wikipedia.summary(ambiguous_terms[0], sentences=10) return summary def get_qa_pipeline(): qa_pipeline = pipeline("question-answering") return qa_pipeline def answer_question(pipeline, question, context): result = pipeline(question=question, context=context) #return f"Answer: {result['answer']}, score: {round(result['score'], 4)}, start: {result['start']}, end: {result['end']}" return result if __name__ == '__main__': st.title("Extractive Question Answering") pipeline = get_qa_pipeline() add_select_option = st.sidebar.selectbox( "Exploration Options", ("Query Based", "Paragraph based") ) if add_select_option == "Query Based": paragraph_slot = st.empty() query = st.text_area("WIKI SEARCH TERM", "") if query: context = get_context_from_wiki(query) paragraph_slot.markdown(context) elif add_select_option == "Paragraph based": question = st.empty() context = st.text_area("Enter the paragraph to explore", value="...") question = st.text_input("QUESTION", "") # print(f"Context: {context}\n") # print(f"Question: {question}\n") # print(answer_question(pipeline, question=question, context=context)) if question: try: answer = answer_question(pipeline, question=question, context=context) st.write(answer['answer']) except: st.write("Provide a valid paragraph")
3.46875
3
module_02/youtube_ex.py
AngieGarciaT/2021_python_selenium
0
12791631
<gh_stars>0 from selenium.webdriver.common.by import By from selenium.webdriver.support.select import Select from selenium.webdriver.support.wait import WebDriverWait from common.webdriver_factory import get_driver from selenium.webdriver.support import expected_conditions as EC driver = get_driver('chrome') driver.get('https://www.youtube.com/') wait = WebDriverWait(driver, 20) search_locator = (By.ID, 'search') search_box = wait.until(EC.element_to_be_clickable(search_locator)) search_box.send_keys("<PASSWORD>") search_btn_locator = (By.ID, 'search-icon-legacy') search_button = wait.until(EC.element_to_be_clickable(search_btn_locator)) search_button.click() results_locator = [By.ID, 'video-title'] results = wait.until(EC.visibility_of_all_elements_located(results_locator)) print(f'Titulos: {len(results)}') for result in results: print(result.text) driver.quit()
3.25
3
src/sites/product_model.py
FelipeCRamos/Wishlist-Scrape
4
12791632
<filename>src/sites/product_model.py<gh_stars>1-10 class ProductModel: def __init__( self, title = '', price = 0.0, hasDiscount = False, hasError = False, isIndisponible = False ): self.title = title self.price = price self.hasDiscount = hasDiscount self.hasError = hasError self.isIndisponible = isIndisponible
2.234375
2
MFD_vio_plot.py
hghodke/mfd_FRET
0
12791633
<filename>MFD_vio_plot.py # -*- coding: utf-8 -*- """ Created on Sat Sep 25 11:19:10 2021 @author: ribis """ import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv("C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p35_p5.csv") # df = pd.read_csv("C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/20211003_Lifetimes_summary_p7_p4.csv") # df1 = df.iloc[:,4:7] # df1 = df.iloc[:,0:4] # df1 = df.iloc[:,7:16] print(df1.head) sns.set_style("whitegrid") plt.figure(figsize=(5,5)) sns.violinplot(data=df1, cut=0) # sns.violinplot(data=df1, cut=0) plt.xticks(rotation=45, ha="right") plt.ylabel("Time (s)") plt.ylim(0, 80) plt.savefig("C:/Users/harshad/Dropbox/Mfd project/Mfd FRET paper/Data/Reviewer experiments/lifetimes/vioplot_p35_p5_NusG.eps", dpi=600)
2.25
2
basic/p_extract_clips.py
magland/pyms
1
12791634
<filename>basic/p_extract_clips.py<gh_stars>1-10 import numpy as np import sys,os parent_path=os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(parent_path) from mlpy import writemda64,writemda32,readmda,DiskReadMda from common import TimeseriesChunkReader # import the C++ code # we no longer use cppimport # import cppimport # cpp=cppimport.imp('basic_cpp') # Do this first: # g++ -O3 -Wall -shared -std=c++11 -fPIC `python3 -m pybind11 --includes` basic_cpp.cpp -o basic_cpp`python3-config --extension-suffix` -I../mlpy import basic_cpp as cpp processor_name='pyms.extract_clips' processor_version='0.1' def extract_clips(*,timeseries,firings,clips_out,clip_size=100): """ Extract clips corresponding to spike events Parameters ---------- timeseries : INPUT Path of timeseries mda file (MxN) from which to draw the event clips (snippets) firings : INPUT Path of firings mda file (RxL) where R>=2 and L is the number of events. Second row are timestamps. clips_out : OUTPUT Path of clips mda file (MxTxL). T=clip_size clip_size : int (Optional) clip size, aka snippet size, aka number of timepoints in a single clip """ F=readmda(firings) times=F[1,:] clips=extract_clips_helper(timeseries=timeseries,times=times,clip_size=clip_size) return writemda32(clips,clips_out) def extract_clips_helper(*,timeseries,times,clip_size=100,verbose=False): X=DiskReadMda(timeseries) M,N = X.N1(),X.N2() L=times.size T=clip_size extract_clips_helper._clips=np.zeros((M,T,L)) def _kernel(chunk,info): inds=np.where((info.t1<=times)&(times<=info.t2))[0] times0=times[inds]-info.t1+info.t1a clips0=np.zeros((M,clip_size,len(inds)),dtype=np.float32,order='F'); cpp.extract_clips(clips0,chunk,times0,clip_size) extract_clips_helper._clips[:,:,inds]=clips0 return True TCR=TimeseriesChunkReader(chunk_size_mb=100, overlap_size=clip_size*2, verbose=verbose) if not TCR.run(timeseries,_kernel): return None return extract_clips_helper._clips extract_clips.name=processor_name extract_clips.version=processor_version def test_extract_clips(): M,T,L,N = 5,100,100,1000 X=np.random.rand(M,N).astype(np.float32) writemda32(X,'tmp.mda') F=np.zeros((2,L)) F[1,:]=200+np.random.randint(N-400,size=(1,L)) writemda64(F,'tmp2.mda') ret=extract_clips(timeseries='tmp.mda',firings='tmp2.mda',clips_out='tmp3.mda',clip_size=T) assert(ret) clips0=readmda('tmp3.mda') assert(clips0.shape==(M,T,L)) t0=int(F[1,10]) a=int(np.floor((T+1)/2-1)) np.array_equal(clips0[:,:,10],X[:,t0-a:t0-a+T]) #np.testing.assert_almost_equal(clips0[:,:,10],X[:,t0-a:t0-a+T],decimal=4) return True extract_clips.test=test_extract_clips if __name__ == '__main__': print ('Running test') test_extract_clips()
2.234375
2
webapp/project.py
YajanaRao/WebApp
1
12791635
<filename>webapp/project.py from flask import render_template from webapp import app from webapp.route import bp from webapp.database import setup # app.register_blueprint(bp) setup() @app.route('/') def index(): return render_template('index.html') @app.errorhandler(404) def pagenotfound(error): return render_template('error.html')
2.15625
2
Exercicios do curso em video/pythonProject/pythonexercicios/ex029.py
HiCosta/Exercicios-de-Python
0
12791636
vel = int(input('Digite a velocidade que o carro está: ')) if vel > 80: print('Você foi multado em {} reais'.format((vel - 80) * 7))
3.6875
4
resource/globals.py
Kyando2/TLE
1
12791637
<filename>resource/globals.py import json def get_channels(): with open('bin/channels.json', 'r') as f: parsed = json.loads(f.read()) f.close() return parsed
2.453125
2
_templates/jinja2/cookiecutter-py/{{cookiecutter.name}}/choices/testfrwk/pytest/tests/test_classic.py
thebridge0491/intro_py
0
12791638
# -*- coding: utf-8 -*- '''Test cases for Classic module.'''from __future__ import (absolute_import, division, print_function, unicode_literals) import pytest from future.builtins import (ascii, filter, hex, map, oct, zip, object, range) from {{cookiecutter.parent}}.{{cookiecutter.project}} import classic def in_epsilon(val_a, val_b, tolerance=0.001): #return ((abs(val_a) - tolerance) <= abs(val_b) and # (abs(val_a) + tolerance) >= abs(val_b)) delta = abs(tolerance) #return (val_a - delta) <= val_b and (val_a + delta) >= val_b return (not (val_a + delta) < val_b) and (not (val_b + delta) < val_a) def bound_values(*min_max_groups): avg_vals = [(min_m + max_m) // 2 for (min_m, max_m) in min_max_groups] axis_bounds = [(min_m, min_m + 1, (min_m + max_m) // 2, max_m - 1, max_m) for (min_m, max_m) in min_max_groups] bound_vals = [tuple(avg_vals[:ndx] + [el] + avg_vals[(ndx + 1):]) for ndx, axis in enumerate(axis_bounds) for el in axis] return set(bound_vals) def setup_module(module): print("\nSetup module: {0}".format(module.__name__)) def teardown_module(module): print("\nTeardown module: {0}".format(module.__name__)) def setup_function(function): print("Setup function: {0}".format(function.__name__)) def teardown_function(function): print("\nTeardown function: {0}".format(function.__name__)) @pytest.fixture def fixture_func1(request): print("Setup function (Fixture1): {0}".format(request.function.__name__)) def fin(): print("\nTeardown function (Fixture1): {0}".format(request.function.__name__)) request.addfinalizer(fin) def test_fact(fixture_func1): for fn1, num in [(f, n) for (n,) in bound_values(*[(0, 18)]) # for n in [0, 9, 18] for f in [classic.fact_i, classic.fact_lp]]: assert ([1] + [a for a in [1] for b in range(1, num + 1) for a in [a * b]])[-1] == fn1(num) def test_expt(fixture_func1): for fn1, base, num in [(f, b, n) for (b, n) in bound_values(*[(2.0, 20.0), (3.0, 10.0)]) # for b in [2.0, 11.0, 20.0] for n in [3.0, 6.0, 10.0] for f in [classic.expt_i, classic.expt_lp]]: #assert (base ** num) == fn1(base, num) assert in_epsilon(base ** num, fn1(base, num), 0.001 * (base ** num))
2.296875
2
help/examples/only_analyze_demo.py
foxrenderfarm/rayvision_clarisse
1
12791639
<filename>help/examples/only_analyze_demo.py # -*- coding: utf-8 -*- """only analyze clarisse""" from rayvision_clarisse.analyse_clarisse import AnalyzeClarisse analyze_info = { "cg_file": r"D:\files\CG FILE\clarisse_test1.project", "workspace": "c:/workspace", "software_version": "clarisse_ifx_4.0_sp3", "project_name": "Project1", "plugin_config": {} } AnalyzeClarisse(**analyze_info).analyse()
1.640625
2
google-cloud-sdk/lib/third_party/cloud_ml_engine_sdk/features/__init__.py
bopopescu/searchparty
0
12791640
# Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for defining the data used to train machine learning models. Data to be used in training, prediction and evaluation is described in terms of features. This module provides functionality to define those features, and data transformations to apply to produce those features. """ from _features import CategoricalFeatureColumn from _features import Feature from _features import FeatureColumn from _features import FeatureFormat from _features import FeatureMetadata from _features import ImageFeatureColumn from _features import KeyFeatureColumn from _features import NumericFeatureColumn from _features import TargetFeatureColumn from _features import TextFeatureColumn from _predict import FeatureProducer from _registries import register_analyzer from _registries import register_transformer from _transforms import ExampleProtoFormatter from _transforms import FeatureVector def key(name): """Creates a feature representing the key of the instance. Args: name: Name of feature column. Returns: An instance of KeyFeatureColumn. """ return KeyFeatureColumn(name) def target(name='target'): """Creates a feature representing the target label or value of the instance. Args: name: Name of feature column. Returns: An instance of TargetFeatureColumn. """ return TargetFeatureColumn(name) def numeric(name, default=None, log_base=0): """Creates a numeric column within a feature. Args: name: Name of feature column. default: Default value for the column. log_base: Base of logarithm to be applied. Returns: An instance of NumericFeatureColumn. """ return NumericFeatureColumn(name, default=default, log_base=log_base) def categorical(name, default=None, frequency_threshold=5, split_regex=None): r"""Creates a categorical or discrete value column within a feature. Args: name: Name of feature column. default: Default value for the column. frequency_threshold: Frequency threshold below which words are not added to the vocab. split_regex: Regex rule to extract the column value. Defaults to None, which means no splitting. Examples: - Use r'\w{1,}' to group alphanumerical characters of len 1. - Use r'\w{3,}' to group alphanumerical characters of len 3. - Use r'\S+' to group on non-whitespace. Returns: An instance of CategoricalFeatureColumn. """ return CategoricalFeatureColumn(name, default=default, frequency_threshold=frequency_threshold, split_regex=split_regex) def text(name, default=None, sampling_percentage=100, split_regex=r'\w{3,}', stop_words='english', use_stemmer=False, ngrams=1, use_tf_idf=False, normalize=False, strip_html=False, removable_tags=None, word2vec_dict=None, frequency_threshold=0): """Creates a free-form text value column within a feature. Args: name: Name of feature column. default: Default value for the column. sampling_percentage: Percentage value (0-100) for the number of rows that should be sampled for constructing the vocabulary/ngrams. split_regex: Regex rule to split text stop_words: Either list or set, specifying the stop words to be ignored or a string representing the language of stopwords to be requested from nltk. Use [] for no stopwords. For more info nltk.corpus.stopwords.readme() use_stemmer: Boolean on whether text should be stemmed ngrams: number of ngrams the tokenizer should generate (2 for bigrams etc) use_tf_idf: Boolean on whether the BOW representation should be tf*idf normalize: Boolean on whether sparse vector (BOW or tf*idf) should be normalize (used with L2 norm) strip_html: Boolean on whether html_markup should be removed before processing removable_tags: list of html tags whose text should be ignored word2vec_dict: Dictionary of word -> word_vectors. If it is not empty, then the words will be replaced with a matrix, one row for each word frequency_threshold: Frequency threshold below which words/ngrams are not added to the vocab. Returns: An instance of TextFeatureColumn. """ return TextFeatureColumn( name, default=default, sampling_percentage=sampling_percentage, split_regex=split_regex, stop_words=stop_words, use_stemmer=use_stemmer, ngrams=ngrams, use_tf_idf=use_tf_idf, normalize=normalize, strip_html=strip_html, removable_tags=removable_tags, word2vec_dict=word2vec_dict, frequency_threshold=frequency_threshold) def image(name, default=None): """Creates an image column within a feature.. Args: name: name of image feature default: Default value for the column. Returns: An instance of ImageFeatureColumn. """ return ImageFeatureColumn(name, default=default)
2.890625
3
main.py
daniel20159050454/Biblioteca
0
12791641
import PyQt5 from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5.QtWidgets import QMainWindow, QMessageBox, QApplication import sys from Telas_Usuario.tela_de_login import Ui_Tela_Login from Telas_Usuario.adm_livro import Ui_MainWindow as Ui_Adm_Livro from Telas_Usuario.cad_livro import Ui_MainWindow as Ui_Cadastro_Livro from Telas_Usuario.editar_livro import Ui_MainWindow as Ui_Editar_Livro from Telas_Usuario.listar_livro import Ui_MainWindow as Ui_Listar_Livro from Telas_Usuario.remover_livro import Ui_MainWindow as Ui_Remover_Livro import os from PyQt5.QtCore import pyqtSlot from firebase import firebase firebaseConfig = { 'apiKey': "<KEY>", 'authDomain': "biblioteca-b2317.firebaseapp.com", 'databaseURL': "https://biblioteca-b2317.firebaseio.com", 'projectId': "biblioteca-b2317", 'storageBucket': "", 'messagingSenderId': "1080656799035", 'appId': "1:1080656799035:web:0064e0d7e84c5e7d"} class Ui_Main(QtWidgets.QWidget): def setupUi(self, Main): Main.setObjectName('Main') Main.resize(800, 600) self.QtStack = QtWidgets.QStackedLayout() self.stack0 = QtWidgets.QMainWindow() self.stack1 = QtWidgets.QMainWindow() self.stack2 = QtWidgets.QMainWindow() self.stack3 = QtWidgets.QMainWindow() self.stack4 = QtWidgets.QMainWindow() self.stack5 = QtWidgets.QMainWindow() self.tela_login = Ui_Tela_Login() self.tela_login.setupUi(self.stack0) self.adm_livro = Ui_Adm_Livro() self.adm_livro.setupUi(self.stack1) self.cadastro_livro = Ui_Cadastro_Livro() self.cadastro_livro.setupUi(self.stack2) self.listar_livroo = Ui_Listar_Livro() self.listar_livroo.setupUi(self.stack3) self.editar_livroo = Ui_Editar_Livro() self.editar_livroo.setupUi(self.stack4) self.remover_livroo = Ui_Remover_Livro() self.remover_livroo.setupUi(self.stack5) self.QtStack.addWidget(self.stack0) self.QtStack.addWidget(self.stack1) self.QtStack.addWidget(self.stack2) self.QtStack.addWidget(self.stack3) self.QtStack.addWidget(self.stack4) self.QtStack.addWidget(self.stack5) class Main(QMainWindow, Ui_Main): def __init__(self, parent=None): super(Main, self).__init__(parent) self.setupUi(self) self._firebase = firebase(firebaseConfig) self.tela_login.entrar.clicked.connect(self.admnistracao_livro) self.adm_livro.cad_livro.clicked.connect(self.cadast_livro) self.adm_livro.list_livro.clicked.connect(self.listar_livros) self.adm_livro.edit_livro.clicked.connect(self.edit_livro) self.adm_livro.remove_livro.clicked.connect(self.deletar_livro) self.adm_livro.voltar.clicked.connect(self.voltar_P_login) self.cadastro_livro.voltar.clicked.connect(self.voltar_P_telaAdm) self.listar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.editar_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.remover_livroo.voltar.clicked.connect(self.voltar_P_telaAdm) self.cadastro_livro.salvar_livro.clicked.connect(self.cadastrando_livro) self.remover_livroo.bottonBisbn_2.clicked.connect(self.apagar) #self.remover_livroo.excluir_livro.clicked.connect(self.apagar) self.listar_livroo.bottonBisbn.clicked.connect(self.buscar) def admnistracao_livro(self): '''if self.tela_login.email_login.text() == '' or self.tela_login.senha.text() == '': return QMessageBox.about(self, 'Atenção', 'Desculpe, campos invalidos!') else:''' self.QtStack.setCurrentIndex(1) def cadast_livro(self): self.QtStack.setCurrentIndex(2) def listar_livros(self): self.QtStack.setCurrentIndex(3) def edit_livro(self): self.QtStack.setCurrentIndex(4) def deletar_livro(self): self.QtStack.setCurrentIndex(5) def voltar_P_login(self): self.QtStack.setCurrentIndex(0) def voltar_P_telaAdm(self): self.QtStack.setCurrentIndex(1) def cadastrando_livro(self): livro = { 'titulo': self.cadastro_livro.title.text(), 'autor' : self.cadastro_livro.autor.text(), 'isbn' : self.cadastro_livro.isbn.text(), 'editora' : self.cadastro_livro.editora.text(), } self._firebase.addLivro(livro) self.QtStack.setCurrentIndex(1) def apagar(self): result = self.remover_livroo.BuscaISBN_2.text() self._firebase.remover_Livro(result) self.QtStack.setCurrentIndex(1) def buscar(self): isbn = self.listar_livroo.BuscaISBN.text() if isbn == self._firebase.buscaFirebase() self.listar_livroo.result_buscaLivro.setText('adasd') if __name__ == '__main__': app = QApplication(sys.argv) show_main = Main() sys.exit(app.exec_())
1.976563
2
plot_outs.py
dieman95/deepimportance_code_release
0
12791642
<gh_stars>0 import numpy as np import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt from keras.models import load_model, model_from_json from utils import load_MNIST from utils import get_layer_outs_new from utils import filter_val_set from utils import load_layerwise_relevances from lrp_toolbox.model_io import read experiment_folder = 'experiments' selected_class = 0 X_train, Y_train, X_test, Y_test = load_MNIST(channel_first=False) img_rows, img_cols = 28, 28 X_test, Y_test = filter_val_set(selected_class, X_test, Y_test) relevant_neurons = load_layerwise_relevances('%s/%s_%d_%d_%d' %(experiment_folder, 'LeNet5', 8, #rn selected_class, 7)) #layer print(relevant_neurons) json_file = open('neural_networks/LeNet5.json', 'r') #Read Keras model parameters (stored in JSON file) file_content = json_file.read() json_file.close() model = model_from_json(file_content) model.load_weights('neural_networks/LeNet5.h5') # Compile the model before using model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) outfile = open('outfile.csv', 'w') relfile = open('relfile.csv', 'w') outs = get_layer_outs_new(model, X_test) preds = model.predict(X_test) lrpmodel = read('neural_networks/LeNet5.txt', 'txt') # 99.16% prediction accuracy lrpmodel.drop_softmax_output_layer() # drop softnax output layer for analysis Rs = [] for inp in X_test: ypred = lrpmodel.forward(np.expand_dims(inp, axis=0)) mask = np.zeros_like(ypred) mask[:,np.argmax(ypred)] = 1 Rinit = ypred*mask R_inp, R_all = lrpmodel.lrp(Rinit,'alphabeta',3) Rs.append(R_all[-1]) for i in range(len(X_test)): #100 inputs #out_data = [] #rel_data = [] out_row = '' rel_row = '' for j in range(outs[-3].shape[-1]): #out_data.append(outs[-3][j][i]) #rel_data.append(Rs[j][0][i]) out_row += str(outs[-3][i][j]) + ',' rel_row += str(Rs[i][0][j]) + ',' out_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\n' rel_row += str(Y_test[i].argmax(axis=-1)) + ',' + str(preds[i].argmax(axis=-1)) + '\n' outfile.write(out_row) relfile.write(rel_row) outfile.close() relfile.close() # plt.clf() # plt.plot(range(10), out_data) # plt.plot(range(10), rel_data) # plt.savefig("./plots/plt"+str(i)+".png") ''' for i in range(outs[-3].shape[-1]): out_data = [] for j in range(10): #100 inputs out_data.append(Rs[j][0][i]) plt.clf() plt.plot(range(10), out_data) plt.savefig("./plots/rel"+str(i)+".png") '''
2.34375
2
analysis.py
lanl/SHELTIE
2
12791643
<gh_stars>1-10 #!/usr/bin/env python2 from test_json import test_json from cleansing import parse_sublogs, build_graph import os.path import subprocess import sys import json import argparse from git import Repo import re import pprint import git import matplotlib import binascii parser = argparse.ArgumentParser() parser.add_argument("repo_dir", help="The repo that contains the productivity logs as notes") args = parser.parse_args() if(not os.path.isdir(os.path.join(args.repo_dir, ".git"))): print "Cannot find %s directory. Not a git repo." % os.path.join(args.repo_dir, ".git") sys.exit(1) PRODUCTIVITY_NOTES_NAMESPACE="refs/notes/productivity" repo = Repo(args.repo_dir) #commits = list(repo.iter_commits("sharrell")) commits = list(build_graph(repo).nodes) #git notes --ref refs/notes/productivity show # git notes --ref refs/notes/productivity show eaa1b0f4a7ee65ab33d0ec0e28f6fdc04fd8fbe2 logs = [] for commit in commits: try: logs.append([commit.hexsha, subprocess.check_output(["git", "--git-dir", os.path.join(args.repo_dir, ".git"), "notes", "--ref", PRODUCTIVITY_NOTES_NAMESPACE, "show", commit.hexsha])]) except: pass log_list = [] for hexsha, log in logs: try: for sublog in parse_sublogs(log): log_list.append([hexsha, sublog]) except: print(hexsha) import pprint pp = pprint.PrettyPrinter(indent=4) #pp.pprint(log_list) lines_per_commit = [] total_time_per_commit = [] for log in log_list: report = log[1]['log'] if not 'user_responses' in report: continue if len(report) == 0: continue user_responses = report['user_responses'] if len(user_responses) == 1: continue time_categories = report['user_responses']['time_categories'] #nasa_tlx = report['NASA-TLX'] for category in time_categories: if 'refectoring' in category: break else: time_categories['refactoring'] = {u'time_spent': 0.0, u'difficulty': 0} #pp.pprint(log) total_time = (time_categories['planning']['time_spent'] + time_categories['coding']['time_spent'] + time_categories['refactoring']['time_spent'] + time_categories['debugging']['time_spent'] + time_categories['optimising']['time_spent']) total_time_per_commit.append(total_time) commit = git.Commit(repo, binascii.unhexlify(log[0])) lines_changed = 0 print commit.diff(commit.parents[0])[0].diff diff = subprocess.check_output(["git", "--git-dir", os.path.join(args.repo_dir, ".git"), 'diff', commit.hexsha, commit.parents[0].hexsha]) sloc = 0 for line in diff.split('\n'): if not (line.startswith("-") or line.startswith("+")): continue if line.strip() == "+" or line.strip() == "-": continue if line.startswith('+++') or line.startswith('---'): continue sloc += 1 total_time_per_commit = sloc
2.328125
2
blinky/blinky.py
tve/mpy-mqtt
80
12791644
<filename>blinky/blinky.py<gh_stars>10-100 import logging log = logging.getLogger(__name__) from uasyncio import Loop as loop, sleep_ms from board import act_led class Blinker: def __init__(self, mqclient, topic, period): self.mqclient = mqclient self.topic = topic self.period = period async def blinker(self): while True: act_led(1) await sleep_ms(self.period // 2) act_led(0) await sleep_ms(self.period // 2) def period(self, millisecs): self.period = millisecs def on_msg(self, topic, msg, retained, qos, dup): topic = str(topic, "utf-8") log.info("on_msg: %s (len=%d ret=%d qos=%d dup=%d)", topic, len(msg), retained, qos, dup) if topic == self.topic: try: p = int(msg) if p < 50 or p > 10000: raise ValueError("period must be in 50..10000") self.period = p except Exception as e: log.exc(e, "Invalid incoming message") async def hook_it_up(self, mqtt): log.info("hook_it_up called") mqtt.on_msg(self.on_msg) await mqtt.client.subscribe(self.topic, qos=1) log.info("Subscribed to %s", self.topic) # start is called by the module launcher loop in main.py; it is passed a handle onto the MQTT # dispatcher and to the "blinky" config dict in board_config.py def start(mqtt, config): period = config.get("period", 1000) # get period from config with a default of 1000ms log.info("start called, period=%d", period) bl = Blinker(mqtt.client, config["topic"], period) loop.create_task(bl.blinker()) mqtt.on_init(bl.hook_it_up(mqtt))
2.875
3
speech_mixer.py
ZhihaoDU/speech_feature_extractor
111
12791645
<reponame>ZhihaoDU/speech_feature_extractor # coding = utf-8 import numpy as np from read_sphere_wav import read_sphere_wav from scipy.io import wavfile from feature_extractor import * from matplotlib import pyplot as plt def SNR(x1, x2): from numpy.linalg import norm return 20 * np.log10(norm(x1) / norm(x2)) def mix_by_db(x1, x2, snr, handle_method): x1 = x1.astype(np.int32) x2 = x2.astype(np.int32) l1 = x1.shape[0] l2 = x2.shape[0] if l1 != l2: if handle_method == 'cut': ll = min(l1, l2) x1 = x1[:ll] x2 = x2[:ll] elif handle_method == 'append': ll = max(l1, l2) if l1 < ll: x1 = np.append(x1, x1[:ll-l1]) if l2 < ll: x2 = np.append(x2, x2[:ll-l1]) from numpy.linalg import norm x2 = x2 / norm(x2) * norm(x1) / (10.0 ** (0.05 * snr)) mix = x1 + x2 return mix if __name__ == '__main__': speech_data, wav_header = read_sphere_wav(u"/media/neo/000C6F0F00042510/Doctor/dataset/TIMIT/train/dr1/fcjf0/sa1.wav") fs, noise_data = wavfile.read('/media/neo/000C6F0F00042510/Doctor/dataset/DEMAND/PCAFETER/ch01.wav') plt.figure() spect = log_power_spectrum_extractor(speech_data, 320, 160, 'hanning', True) plt.subplot(311) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(312) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, 0, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(313) plt.imshow(spect) plt.figure() noisy_speech = mix_by_db(speech_data, noise_data, -5, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(211) plt.imshow(spect) noisy_speech = mix_by_db(speech_data, noise_data, -10, 'cut') spect = log_power_spectrum_extractor(noisy_speech, 320, 160, 'hanning', True) plt.subplot(212) plt.imshow(spect) plt.show() #sd.play(noisy_speech.astype(np.int32), fs, blocking=True)
2.765625
3
pickle_details.py
garudlab/mother_infant
0
12791646
<gh_stars>0 # Script to pickle very specific information (allele identity and counts) # for small set of given QP pairs at given sites from utils import sample_utils as su, parse_midas_data, substitution_rates_utils, config, temporal_changes_utils, snps_utils, core_gene_utils, gene_diversity_utils import numpy as np from numpy.random import choice, random as np_random, randint import random from collections import defaultdict import pickle import bz2 import numpy import os, sys # Desired samples and sites # sites are given as gene_id, contig, location tuples # inconsistent but bleh desired_host_species_sites = [(['M0806-C'], \ 'Bacteroides_vulgatus_57955', \ [('435590.9.peg.1499', 'NC_009614', 1915720L), \ ('435590.9.peg.1499', 'NC_009614', 1915720L)]), \ (['3-I'], \ 'Bifidobacterium_catenulatum_58257', \ [('566552.4.peg.96', 'NZ_ABXY01000001', 124565L), \ ('566552.4.peg.96', 'NZ_ABXY01000001', 124565L)]), \ (['67-I'], \ 'Bacteroides_fragilis_54507', \ [('1339327.3.peg.4421', 'JGDJ01000264', 9480L), \ ('1339327.3.peg.4421', 'JGDJ01000264', 9480L)]), \ (['M0901-C'], \ 'Blautia_wexlerae_56130', \ [('1121115.4.peg.32', 'AXVN01000001', 35625L), \ ('1121115.4.peg.32', 'AXVN01000001', 35625L)]), \ (['1-I'], \ 'Bacteroides_fragilis_54507', \ [('1339327.3.peg.2283', 'JGDJ01000171', 5687L), \ ('1339327.3.peg.2283', 'JGDJ01000171', 5687L)])] # desired_host_species_sites = [(['C02143-I', 'C02143-M'], 'Bifidobacterium_bifidum_55065', [('500634.3.peg.1861','AWSW01000054',37945), ('500634.3.peg.945','AWSW01000030',35925), ('500634.3.peg.1636','AWSW01000046',21960), ('500634.3.peg.875','AWSW01000027',7916), ('500634.3.peg.952','AWSW01000030',45351), ('500634.3.peg.1619','AWSW01000046',4226)]), \ # (['59-I', '59-M'], 'Bifidobacterium_adolescentis_56815', [('592977.3.peg.642','JGZQ01000005',14361),('592977.3.peg.860','JGZQ01000006',69020),('592977.3.peg.1216','JGZQ01000008',284119),('592977.3.peg.1129','JGZQ01000008',186577), ('592977.3.peg.39','JGZQ01000001',53358), ('592977.3.peg.1732','JGZQ01000009',58203), ('592977.3.peg.1705','JGZQ01000009',29026)])] # desired_samples = ['ERR3405741', 'ERR3405661', 'ERR3406235'] # =========================================================================== # Loads allele counts for specific samples at specific sites # where sites are provided as (contig, location, gene_id) tuples # TODO: move to parse_midas_data later # =========================================================================== def parse_snps_specify_sites_details(species_name, desired_samples=[], desired_sites=[], prev_cohort='all'): # Alternate version without gene names desired_sites_no_gene = [(contig, location) for contig, location, gene_id in desired_sites] # SNPs directory snps_dir = "%s/snps/%s/" % (config.data_directory, species_name) # Load population freqs (for polarization purposes) population_freqs = snps_utils.parse_population_freqs(prev_cohort, species_name, polarize_by_consensus=False) # Open post-processed MIDAS output snp_file = bz2.BZ2File("%s/annotated_snps.txt.bz2" % snps_dir, 'r') # ===================================================================== # Process allele information # ===================================================================== # sample -> site -> (ref allele, alt allele) sample_site_allele_dict = defaultdict(dict) # Load snps_alt_allele.txt snp_alleles_file = bz2.BZ2File("%s/snps_alt_allele.txt.bz2" % snps_dir, 'r') items = snp_alleles_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) for line in snp_alleles_file: items = line.split() # Load information about site info_items = items[0].split("|") contig = info_items[0] location = long(info_items[1]) ref_allele = info_items[2] if (contig, location) not in desired_sites_no_gene: continue for idx in desired_sample_idxs: alt_allele = items[1+idx] sample = samples[idx] sample_site_allele_dict[sample][(contig, location)] = (ref_allele, alt_allele) snp_alleles_file.close() # ===================================================================== # Process annotated_snps information # ===================================================================== # Open post-processed MIDAS output snp_file = bz2.BZ2File("%s/annotated_snps.txt.bz2" % snps_dir, 'r') # Get lists of desired sample idxs items = snp_file.readline().strip().split()[1:] samples = list(su.parse_merged_sample_names(items)) desired_sample_idxs = [] for sample in desired_samples: if sample in samples: desired_sample_idxs.append(samples.index(sample)) desired_sample_idxs = numpy.array(sorted(desired_sample_idxs)) # Map: sample -> site (contig, location, gene_id) -> allele count allele_counts_map = defaultdict(dict) # Map: site (contig, location, gene_id) -> variant type variant_type_map = {} num_sites_processed = 0 # Loop over sites in annotated_snps.txt file for line in snp_file: if num_sites_processed>0 and num_sites_processed%10000==0: sys.stderr.write("%d0k sites processed...\n" % (num_sites_processed/10000)) num_sites_processed += 1 items = line.split() # Load information about site info_items = items[0].split("|") contig = info_items[0] location = long(info_items[1]) gene_name = info_items[2] variant_type = info_items[3] polarization = 'R' # note R was assigned indiscriminately pvalue = float(info_items[5]) # Only look at sites of interest if (contig, location, gene_name) not in desired_sites: continue # Store variant type variant_type_map[(contig, location, gene_name)] = variant_type # Store alt and depth counts at this site for all desired samples for idx in desired_sample_idxs: alt, depth = [float(num) for num in items[1+idx].split(",")] sample = samples[idx] allele_counts_map[sample][(contig, location, gene_name)] = (alt, depth) snp_file.close() return allele_counts_map, sample_site_allele_dict, variant_type_map # Load a few things subject_sample_map = su.parse_subject_sample_map() # Set up pickle directory ddir = config.data_directory pdir = "%s/pickles/reversion_examples/" % (ddir) os.system('mkdir -p %s' % pdir) # Store these two dicts for each host-species pair for subjects, species, sites in desired_host_species_sites: # Get all samples within the host desired_samples = [] for subject in subjects: desired_samples += subject_sample_map[subject].keys() # Reformat sites desired_sites = [] for gene_id, contig, location in sites: desired_sites.append((contig, location, gene_id)) # Obtain desired dicts allele_counts_map, sample_site_allele_dict, variant_type_map = parse_snps_specify_sites_details(species, desired_samples, desired_sites=desired_sites) # Pickle dicts sys.stderr.write("Pickling...\n") pickle.dump((allele_counts_map, sample_site_allele_dict, variant_type_map), open('%s/allele_info_%s_%s.pkl' % (pdir, ('_').join(subjects), species), 'wb')) sys.stderr.write("Done!\n")
2.109375
2
tests/conftest.py
zillow/aiographite
14
12791647
<reponame>zillow/aiographite import pytest @pytest.fixture def metric_parts(): return ['sproc performance', '<EMAIL>', '::EH12'] @pytest.fixture def timestamp(): return 1471640923 @pytest.fixture def metric_value_tuple_list(): return [ ('zillow', 124), ('trulia', 223), ('hotpad', 53534), ('streeteasy', 13424)] @pytest.fixture def metric_value_timestamp_list(): return [ ('zillow', 124, 1471640958), ('trulia', 223, 1471640923), ('hotpad', 53534, 1471640943), ('streeteasy', 13424, 1471640989)]
1.796875
2
aedes_server/core/management/commands/compute_clusters.py
henriquenogueira/aedes
0
12791648
from aedes_server.core.clusters import compute_clusters from django.core.management import BaseCommand class Command(BaseCommand): help = 'Calculate clusters for AedeSpot app.' def handle(self, *args, **options): ''' Computing clusters. ''' compute_clusters()
1.828125
2
pymusicterm/ui/labels.py
EGAMAGZ/Terminal-Music-Player
2
12791649
import py_cui from pymusicterm.music import SongFile from pymusicterm.util.file import File, FileMetadata class SongInfoBlockLabel: _row:int=0 _column:int=2 _row_span:int=2 _column_span:int=3 _center:bool=False window:py_cui.widget_set.WidgetSet def __init__(self,window:py_cui.widget_set.WidgetSet): self.window=window self.block_label=self.window.add_block_label(self._initial_text(),self._row,self._column, row_span=self._row_span,column_span=self._column_span,center=self._center) self.__config() def _initial_text(self): file_path=File().get_file_path() text=""" Actual path: {} No Song Selected """.format(file_path) return text def set_song_info(self,song_file:SongFile): pass def __config(self): """ Function that configure the widget """ self.block_label._draw_border=True
2.75
3
jcasts/podcasts/migrations/0043_alter_podcast_hub_token.py
danjac/jcasts
13
12791650
# Generated by Django 3.2.7 on 2021-09-18 10:42 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("podcasts", "0042_podcast_hub_exception"), ] operations = [ migrations.AlterField( model_name="podcast", name="hub_token", field=models.UUIDField(blank=True, editable=False, null=True, unique=True), ), ]
1.554688
2