query_id
stringlengths
32
32
query
stringlengths
9
4.01k
positive_passages
listlengths
1
1
negative_passages
listlengths
88
101
95eb8785976f83f94f082197f7eb07b6
Base scoring of a misere contract round.
[ { "docid": "3c27f353c5304059c0ee539b1c8b74c9", "score": "0.0", "text": "def score_misere(self, player, tricks):\n # Successful misere, player receives value of contract into the pool\n if tricks == 0:\n self.inc_pool(player, self.CONTRACT_VALUE[MISERE])\n # Failed misere, player receives value of contract for every trick taken\n # into the hill.\n else:\n self.inc_hill(player, self.CONTRACT_VALUE[MISERE] * tricks)\n # All other opponents (including dealer) write a consolation in whists\n # against player.\n self.score_consolation(player, tricks)", "title": "" } ]
[ { "docid": "b64b6377b3fb44c0722b7c52a8961ab4", "score": "0.64262015", "text": "def score(self, round_number: int = 0) -> int:\n # We are assuming 1-indexed round numbers\n if round_number:\n if round_number > len(self.hand):\n return 0\n return self.hand[round_number - 1].score\n return sum([card.score for card in self.hand])", "title": "" }, { "docid": "8812575ea77fc8af021cce36a6b3d72c", "score": "0.63496363", "text": "def get_default_score(self):\n raise NotImplementedError", "title": "" }, { "docid": "1055f7c49d0dfecf13ef5a218a9fddfe", "score": "0.6339408", "text": "def scoring_algorithm(self):\n\n self.final_score = 0\n if self.board_size == 3:\n self.final_score += 2000\n elif self.board_size == 5:\n self.final_score += 4000\n elif self.board_size == 7:\n self.final_score += 6000\n\n if self.limit == 3:\n self.final_score += 4000\n elif self.limit == 5:\n self.final_score += 6000\n elif self.limit == 7:\n self.final_score += 8000\n\n self.final_score = int(self.final_score / (self.score + 1))", "title": "" }, { "docid": "1faaba2f3e464c2bb1d5d789d4aa2514", "score": "0.6198906", "text": "def score_round(f_round: Round) -> (int, int):\n return score_tricks(f_round.tricks, f_round.trump_caller, f_round.trump)", "title": "" }, { "docid": "219f292290a5a588c948135a2b0b736c", "score": "0.6119075", "text": "def custom_score_10(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n y, x = game.get_player_location(player)\n d= float((3 - y)**2 + (3 - x)**2)/10.\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)-d", "title": "" }, { "docid": "645900bf685ab5a4f7be39271e05c43b", "score": "0.6086114", "text": "def custom_score(game, player):\r\n ## Number of legal moves by player divided by number of legal moves by\r\n ## opponent. Returns higher values the less moves the opponent has.\r\n if float(len(game.get_legal_moves(player)))== 0:\r\n return(float(0))\r\n elif float(len(game.get_legal_moves(game.get_opponent(player)))) == 0 :\r\n return(float(\"inf\"))\r\n else:\r\n return(5*float(len(game.get_legal_moves(player)))/float(len(game.get_legal_moves(game.get_opponent(player)))))", "title": "" }, { "docid": "8259149c614195b5f3b14212bde08e30", "score": "0.608317", "text": "def calculate_score(self):\n if self.buys or self.sells:\n self.score = (self.buys - self.sells) / float(self.buys + self.sells)", "title": "" }, { "docid": "f5722e57aa90b480f69b717b8bc28a3f", "score": "0.6071542", "text": "def get_r_score(self):\n return self.r_score", "title": "" }, { "docid": "912a1ac007fed1b1234e6d3733fc0bc5", "score": "0.60586816", "text": "def current_score(game, turn_total):\n pass", "title": "" }, { "docid": "912a1ac007fed1b1234e6d3733fc0bc5", "score": "0.60586816", "text": "def current_score(game, turn_total):\n pass", "title": "" }, { "docid": "80322a501853e2180fd64db5727d1835", "score": "0.6041727", "text": "def custom_score_5(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n #opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves)", "title": "" }, { "docid": "e6ff0e0fac836aa33141a6b8819875a2", "score": "0.59925437", "text": "def pre_round(self):\r\n self.rounds += 1\r\n self.log.info(\"Round %s\" % self.rounds)\r\n if self.rounds > 30:\r\n self.end_combat()\r\n return\r\n if self.check_victory():\r\n self.end_combat()\r\n return\r\n # for attackers: try to rally units who are routing then get targs\r\n self.formation_atk.check_rally()\r\n self.formation_atk.get_targs_for_units(self.formation_def)\r\n # for defenders: try to rally units who are routing then get targs\r\n self.formation_def.check_rally()\r\n self.formation_def.get_targs_for_units(self.formation_atk)\r\n self.combat_round()", "title": "" }, { "docid": "932b1835a741178badbbf8afda48003a", "score": "0.5982354", "text": "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n\n #return improved_improved_score(game, player)\n #return search_one_extra_penalty(game, player)\n return search_one_extra(game, player)", "title": "" }, { "docid": "a425a092ba37e2fe6bd9c5a04fae672e", "score": "0.5975383", "text": "def score():\n raise NotImplementedError(\"score() is not implemented for the base\" +\n \"Class\")", "title": "" }, { "docid": "632536739476b569f4d6bbf3efea791a", "score": "0.59735644", "text": "def custom_score(game, player):\n\n # TODO: finish this function!\n \n own_moves = game.get_legal_moves(player)\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n \n if not opp_moves and game.inactive_player == player:\n return float(\"inf\")\n \n elif not own_moves and game.active_player == player:\n return float(\"-inf\")\n \n \n # for the first 3 moves in the game the agent goes after opponent aggresively to minimize its movements\n # coeefficient 4 is obtained through grid search in range of [1,5]\n elif GetOpenSpaces(game) > 43:\n return float(len(own_moves) - 4 * len(opp_moves))\n \n # score metric is evaluated by going deep one level and calculates average number of moves available at the next level\n else:\n temps = []\n score = 0\n for move in game.get_legal_moves():\n game2 = game.forecast_move(move)\n temp = len(game2.get_legal_moves())\n if game2.active_player == player:\n temps.append(temp)\n else:\n temps.append(-temp)\n if temps:\n score = sum(temps) / float(len(temps))\n\n return float(len(own_moves) - 0.2 * len(opp_moves) + .5 * score)", "title": "" }, { "docid": "894b512c455a5861d7738f12c24c9bed", "score": "0.5972578", "text": "def custom_score_4(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n \n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "title": "" }, { "docid": "61c55d6a2518b526ab2b73e20a622f8b", "score": "0.59350026", "text": "def custom_score(game, player):\n #PlayerCentralityRatio is winner, please see below for definition\n score = custom_score1(game, player) \n return score", "title": "" }, { "docid": "25b4d9660938e956df7e96c933685553", "score": "0.59342223", "text": "def test_brevity_score_custom_r(self):\n\n algo = DGS()\n\n text = 'a pipe is not a cigar'\n self.assertEqual(0.84648, round(algo._brevity_score(text, r=7), 5))\n text = 'a pipe is not a cigar'\n self.assertEqual(0.60653, round(algo._brevity_score(text, r=9), 5))", "title": "" }, { "docid": "b4c1e949e9ca524c4b881135324510ea", "score": "0.5923257", "text": "def custom_score_2(game, player):\r\n ## custom_score with the addition of the number of legal moves of the player\r\n ## minus the number of legal moves by the opponent. Hoping to get a result\r\n ## of \"best of both worlds\" here.\r\n if float(len(game.get_legal_moves(player)))== 0:\r\n return(float(0))\r\n elif float(len(game.get_legal_moves(game.get_opponent(player)))) == 0:\r\n return(float(\"inf\"))\r\n else:\r\n subtract = 2*float(len(game.get_legal_moves(player)))-float(len(game.get_legal_moves(game.get_opponent(player))))\r\n divide = 5*float(len(game.get_legal_moves(player)))/float(len(game.get_legal_moves(game.get_opponent(player))))\r\n return(subtract+divide)", "title": "" }, { "docid": "d15d5d88dd84ad1c03a396e6bbeb5f7a", "score": "0.5916837", "text": "def get_score(self):\n raise Exception(\"Not implemented!\")", "title": "" }, { "docid": "d4bf2b92e72fbc1599ddf44ca00914ee", "score": "0.5913574", "text": "def custom_score_1(game,player): \n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n return 0", "title": "" }, { "docid": "14501a302db52b6dfb42b5a71ad251e6", "score": "0.589344", "text": "def custom_score(game, player):\n # TODO: finish this function!\n # raise NotImplementedError\n\n\n # test for terminal conditions\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n \n # this is the improved_score heuristic\n # temporary testing for now\n #own_moves = len(game.get_legal_moves(player))\n #opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n #return float(own_moves - opp_moves)\n\n\n # Short Term Fortune Teller\n # This heuristic simply calculates the number of escape moves for the current legal move list. The intent is to reward \n # boards that have the most options for escaping. Allows the current iteration so see one level past the current one\n own_moves = game.get_legal_moves(player)\n own_escape_count = 0\n\n for move in own_moves:\n forecasted_game = game.forecast_move(move)\n own_escape_count += len(forecasted_game.get_legal_moves(player))\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n opp_escape_count = 0\n\n for move in opp_moves:\n forecasted_game = game.forecast_move(move)\n opp_escape_count += len(forecasted_game.get_legal_moves(game.get_opponent(player))) \n\n return float(own_escape_count - opp_escape_count)", "title": "" }, { "docid": "7e35a60f6ffe71d1559c7a143268bf63", "score": "0.58902305", "text": "def custom_score_9(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n y, x = game.get_player_location(player)\n d= float((3 - y)**2 + (3 - x)**2)/10.\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)+d", "title": "" }, { "docid": "152fcd27832ef390a536508f64d30d76", "score": "0.5866196", "text": "def custom_score_3(game, player):\r\n ## Same idea as custom_score_2, except that \"subtract\" is less heavily\r\n ## weighted as divide.\r\n if float(len(game.get_legal_moves(player)))== 0:\r\n return(float(0))\r\n elif float(len(game.get_legal_moves(game.get_opponent(player)))) == 0:\r\n return(float(\"inf\"))\r\n else:\r\n subtract = float(len(game.get_legal_moves(player)))-float(len(game.get_legal_moves(game.get_opponent(player))))\r\n divide = 5*float(len(game.get_legal_moves(player)))/float(len(game.get_legal_moves(game.get_opponent(player))))\r\n return(subtract+divide)", "title": "" }, { "docid": "c094d10c89c3cd1074e3e285a7d2d9ec", "score": "0.58510166", "text": "def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n # if own_moves == 0 and opp_moves != 0:\n # return float(\"-inf\")\n # elif opp_moves == 0 and own_moves != 0:\n # return float(\"inf\")\n\n return quad_score[own_moves][opp_moves]", "title": "" }, { "docid": "bc784bc139c84697f5cfca53947dcfd7", "score": "0.5833202", "text": "def __reward(self, game):\n if game.winner == self.value_player:\n return 1.0\n elif game.winner:\n return -1.0\n else:\n return 0.0", "title": "" }, { "docid": "e4130c9fba94d40e914f5eb31af7d1db", "score": "0.5831396", "text": "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n return mixed_score[own_moves][opp_moves]", "title": "" }, { "docid": "41c44e93836e33ea8029b286e2a808b8", "score": "0.5821888", "text": "def custom_score(game, player):\n\n # If the board is empty, or the player has won or lost, use the const score.\n res = const_score(game, player)\n if res:\n return res\n\n # Forward to the selected heuristic\n return float(look_ahead_improved_score_v3(game, player))", "title": "" }, { "docid": "b0967cdf8fbaa813208d47fb42ec8cc4", "score": "0.58130956", "text": "def raw_score( self ):\n\t\tfor quad in self.dicQuadsTraj_Init.keys():\n\t\t\tself.dicQuadsTraj[quad] = self.dicQuadsTraj_Init[quad]\n\t\tcorr_w= 0.\n\t\tfor accCorr in self.accCorrs:\n\t\t\taccCorr.addOrbitChange(self.dicQuadsTraj)\n\t\t\tcorr_w = corr_w + accCorr.getWeight()\n\t\tdiff2 = 0.\n\t\ti = 0\n\t\tfor quad in self.dicQuadsTraj.keys():\n\t\t\tdiff = self.dicQuadsTraj[quad]\n\t\t\tdiff2 = diff2 + diff*diff\n\t\t\ti = i + 1\n\t\tif(i > 0): diff2=diff2/i\n\t\treturn (diff2+corr_w)", "title": "" }, { "docid": "04ee76795696aaaaecc7d791717e1935", "score": "0.5806661", "text": "def _calc_score(self):\n profit = -123456\n if self._actions:\n actions_copy = copy.copy(self._actions)\n\n if actions_copy[-1]['direction'] == 'b':\n del actions_copy[-1]\n\n spent = 0\n earned = 0\n\n for action in actions_copy:\n if action['direction'] == 'b':\n spent += float(action['price']) * float(action['coins'])\n if action['direction'] == 's':\n earned += float(action['purse'])\n profit = earned - spent\n\n return profit", "title": "" }, { "docid": "392b0c1e1f624ce6b12a3986e4cb9645", "score": "0.58028376", "text": "def get_canasta_score(self):\n if self.is_dirty():\n return Constants.DIRTY_SCORE\n elif self.is_pure():\n return Constants.PURE_SCORE\n elif self.is_five_hundred():\n return Constants.FIVE_HUNDRED_SCORE\n elif self.is_thousand():\n return Constants.THOUSAND_SCORE\n else:\n return 0", "title": "" }, { "docid": "8943dd8d007f85bc083d00f334265a11", "score": "0.5791184", "text": "def custom_score(game, player):\n # TODO: finish this function!\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n center = game.width/2\n\n own_position = game.get_player_location(player)\n opp_position = game.get_player_location(game.get_opponent(player))\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n own_dist_x = abs(center - own_position[0])\n own_dist_y = abs(center - own_position[1])\n\n opp_dist_x = abs(center - opp_position[0])\n opp_dist_y = abs(center - opp_position[1])\n\n return float(10 * (own_moves - opp_moves) +\n (own_dist_x + own_dist_y) - (opp_dist_x + opp_dist_y))", "title": "" }, { "docid": "9b18b6194b1213bcd7a1637967314618", "score": "0.5779885", "text": "def desire_score(self, id):\n prod = self.get_prod(id)\n true_def = self.get_true_def(id)\n\n # accounting for cheaters (dud w/ no defense), bases that cheat the system & get way too good scores cause of 0 values\n if true_def == 0 and prod == 0:\n true_def = 1\n\n # for all duds, has effect of basically tripling their desire score in comparison to the same base if it had 1 prod\n # also protects us from division by 0 error\n if prod == 0:\n prod = .33\n\n if true_def == 0:\n true_def = 1\n # subtracting prod just so that a base w/ 0 cybs and 3 prod more desirable than if it had 1 prod instead\n return true_def / prod", "title": "" }, { "docid": "17d378ff690e557efb9933d0fe0397a4", "score": "0.5776756", "text": "def extra_score(self, c):\n return 0.0", "title": "" }, { "docid": "4e123dc146e26dc6d763213b6064b43e", "score": "0.5771934", "text": "def __score__(self):\n raise NotImplementedError()", "title": "" }, { "docid": "1151580f3e9638fccbfeca5a8b1284c5", "score": "0.5771352", "text": "def get_r2_score(self):\n return self.r2_score", "title": "" }, { "docid": "20382fd9cfb005f5d510408459b7fbc8", "score": "0.5737324", "text": "def custom_score(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # return number_of_moves_and_blank_spaces(game, player)\n # return number_of_moves_and_location(game, player)\n return number_of_moves_improved(game, player)", "title": "" }, { "docid": "ba5d7b545ae03a88a2e6af015e963629", "score": "0.57369393", "text": "def cover_score(covered_area):\n \n if covered_area >= 99:\n return 10\n\n elif covered_area >= 95:\n return 5\n\n else:\n return 0", "title": "" }, { "docid": "37fc1f5c2276559162fc11cfa2ff824e", "score": "0.5732569", "text": "def get_R2_score(self):\n return self.r_2", "title": "" }, { "docid": "ff40bdf647aab7ebab45ee675dae0ede", "score": "0.57281274", "text": "def custom_score(game, player):\n\n # TODO: finish this function!\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n # First heuristic: Aggressive Chaser\n # return aggressive_chaser(game, player)\n # Second heuristic:\n #return second_heuristic(game, player)\n # Third heuristic:\n return third_heuristic(game, player)", "title": "" }, { "docid": "2f09837fa6219534d659dcc0fcb3a4b8", "score": "0.5718558", "text": "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n w, h = game.width / 2., game.height / 2.\n my_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opp_legal_moves = game.get_legal_moves(opponent)\n my_total = 0\n opp_total = 0\n for mx, my in my_legal_moves:\n # Make negative, since being closer to the edges should be penalized\n value = (-1 * (w - mx)**2 + (h - my)**2)\n my_total += value\n for ox, oy in opp_legal_moves:\n # Make negative, since being closer to the edges should be penalized\n value = (-1 * (w - ox)**2 + (h - oy)**2)\n opp_total += value\n\n # This function, in its current state has not been weighted to work with\n # other heuristics, since the score returned is often negative\n return float(my_total - opp_total)", "title": "" }, { "docid": "a372cee904a91de995d2c6460c60eccc", "score": "0.5715155", "text": "def custom_score_2(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n def going_on(game_state,l=0,m_max=0):\n legal_moves=game_state.get_legal_moves(player)\n if len(legal_moves)==0:\n if l>m_max:\n m_max=l\n return m_max\n for m in legal_moves:\n game_state._active_player=player\n m_max=going_on(game_state.forecast_move(m),l+1,m_max)\n return m_max\n \n if len(game.get_blank_spaces())<=15:\n game_state=game.copy()\n x,y =game_state.get_player_location(player)\n m_max=going_on(game_state)\n return float(m_max)\n \n return float(0)", "title": "" }, { "docid": "c6b1ede442ccabac8bc882966c339dc4", "score": "0.5709342", "text": "def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n # check the part of the game\n if len(game.get_blank_spaces()) < 41:\n moves_weights = game.get_legal_moves_weights()\n playerSum = 0.\n for move in game.get_legal_moves(player):\n playerSum = playerSum + moves_weights[move[0]][move[1]]\n\n oppSum = 0.\n for move in game.get_legal_moves(game.get_opponent(player)):\n oppSum = oppSum + moves_weights[move[0]][move[1]]\n return float(playerSum - oppSum)\n else:\n weights = {}\n weights[-2] = {-2: 1, -1: 4, 0: 3, 1: 4, 2: 1}\n weights[-1] = {-2: 4, -1: 3, 0: 2, 1: 3, 2: 4}\n weights[0] = {-2: 3, -1: 2, 0: 0, 1: 2, 2: 3}\n weights[1] = {-2: 4, -1: 3, 0: 2, 1: 3, 2: 4}\n weights[2] = {-2: 1, -1: 4, 0: 3, 1: 4, 2: 1}\n\n blank_spaces = game.get_blank_spaces()\n player_location = game.get_player_location(player)\n opp_location = game.get_player_location(game.get_opponent(player))\n # intialize the location weight of both players to zero\n player_loc_weight = 0\n opp_loc_weight = 0\n for s in blank_spaces:\n # subtracting current player location from the blank space gives an offset of the blank space\n # from the player location.\n # this way we can check if the blank space is part of the 3x3 matrix that surround the player\n player_weight_index = tuple(map(lambda x, y: x - y, s, player_location))\n if player_weight_index[0] in range(-2, 3) and player_weight_index[1] in range(-2, 3):\n # add up the weight of the current blank space to the total sum for the first player\n player_loc_weight = player_loc_weight + weights[player_weight_index[0]][player_weight_index[1]]\n\n # find the offset for the second player too\n opp_weight_index = tuple(map(lambda x, y: x - y, s, opp_location))\n if opp_weight_index[0] in range(-2, 3) and opp_weight_index[1] in range(-2, 3):\n # add up the weight of the current blank space to the total sum for the second player\n opp_loc_weight = opp_loc_weight + weights[opp_weight_index[0]][opp_weight_index[1]]\n\n return float(player_loc_weight - opp_loc_weight)", "title": "" }, { "docid": "61d93c2778b97b2308b599e6675f1ca6", "score": "0.5706612", "text": "def calc_relief(card,impc_dict,scs_rmn_avg,my_score):\n relief=MrGreed.BURDEN_DICT.get(card,0)\n if impc_dict['SQ']==False:\n relief+=MrGreed.BURDEN_DICT_S.get(card,0)\n if impc_dict['DJ']==False:\n relief+=MrGreed.BURDEN_DICT_D.get(card,0)\n if impc_dict['C10']==False:\n relief+=int(-1*MrGreed.BURDEN_DICT_C.get(card,0)*(scs_rmn_avg+my_score))\n return relief", "title": "" }, { "docid": "71e6f340768137d9253813888da9c77d", "score": "0.5706544", "text": "def custom_score(game, player):\n # Assuming the terminated State will be check in Iterative deepening serach\n # I can safely return only value only the node between root and terminted state node\n # First option for evaluation function: my_move - 1.3*my_opponent_move\n #return len(game.get_legal_move(player))-1.3*len(game.get_legal_moves(game.get_opponent(player)))\n # Second option for evaluation function: my_move - 1.5*my_opponent_move\n return len(game.get_legal_moves(player))-1.3*len(game.get_legal_moves(game.get_opponent(player)))\n #print (value)\n #return len(game.get_legal_moves(player))\n #return 1.\n #return value\n # Third option for evaluation function: my_move - 2*my_opponent_move\n #return len(game.get_legal_moves(player))-len(game.get_legal_moves(game.get_opponent(player)))\n\n #raise NotImplementedError", "title": "" }, { "docid": "7c070d5a57712968d648e91eb8da69f1", "score": "0.56998765", "text": "def getScore(n):\n return n.getExpectedValue() + UCTAgent.UCBPolicy.C*math.sqrt(2*math.log(n.parent.getExpanded())/n.getExpanded())", "title": "" }, { "docid": "338ae6153d94b0d79aa80ccd2321038a", "score": "0.56994796", "text": "def custom_score_2(game, player):\n \n # TODO: finish this function!\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n # We have moves to play. How many more than our opponent?\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n return float(own_moves - opp_moves)", "title": "" }, { "docid": "7cc7a6e243efb958de8ef8b0b0e1f9c6", "score": "0.5694147", "text": "def custom_score2(game, player):\n\n # Custom Heuristic 2: \n # InverseAvgOpenRadius\n \n #\n # This heuristic tries to favor moves that have more open squares\n # closer to the center of the board, assuming that the center of\n # the board is move favorable for winning. It works by finding\n # all the open squares, their distances from the center, and taking\n # the average. In order to make smaller averages be higher\n # score values, the inverse is taken as the score. So if the \n # average distance was 3 squares, the inverse would be 1/3=0.333 or\n # if the average distance was 2 squares, the inverse would be 1/2=0.5\n # which is assumed to be a better position to control the game play.\n # The lower limit of the average is set to 1, to make sure\n # the inverse is well-defined.\n #\n\n # Get list of open squares using provided utility function\n \n openSquares = game.get_blank_spaces()\n \n # Compute average distance of open squares\n \n rowCenter = game.height//2 # center row of board\n colCenter = game.width//2 # center column of board\n \n sumDist = 0.0\n for iSquare in openSquares:\n distToCenter=(iSquare[0]-rowCenter)**2 + (iSquare[1]-colCenter)**2\n distToCenter=math.sqrt(distToCenter)\n sumDist = sumDist+distToCenter\n \n numSquares = len(openSquares)\n avgDist = sumDist/numSquares\n \n # set lower limit to 1.0, to make inverse well-defined\n avgDist = max(1.0, avgDist)\n \n # Compute inverse and assign to score\n score = 1.0/avgDist\n \n return score", "title": "" }, { "docid": "37d75874810effb54555456d2d12e355", "score": "0.56782377", "text": "def custom_score(game, player):\n if game.is_loser(player):\n return NEGATIVE_INFINITY\n\n if game.is_winner(player):\n return POSITIVE_INFINITY\n\n # Option 0: \n # Score indicated by the #player_moves - #opponent_moves\n def better_with_more_moves_than_opponent():\n return float(len(game.get_legal_moves(player)) - len(game.get_legal_moves(game.get_opponent(player))))\n\n # Option 1:\n # Score indicated by the #player_moves - 2 * #opponent_moves\n def better_with_more_moves_than_opponent_aggressive():\n return float(len(game.get_legal_moves(player)) - 2 * len(game.get_legal_moves(game.get_opponent(player))))\n\n # Option 2:\n # Score indicated by the relative distance between players to the center of the board\n def better_with_closer_distance_to_center():\n player_location = game.get_player_location(player)\n opponent_location = game.get_player_location(game.get_opponent(player))\n center = (int(game.height / 2), int(game.width / 2))\n # euclidean distance\n player_distance = math.sqrt((player_location[0] - center[0]) ** 2 + (player_location[1] - center[1]) ** 2)\n if player_distance == 0:\n # return max distance\n return math.sqrt((0 - center[0]) ** 2 + (0 - center[1]) ** 2)\n opponent_distance = math.sqrt((opponent_location[0] - center[0]) ** 2 + (opponent_location[1] - center[1]) ** 2)\n # return ratio\n return opponent_distance / player_distance\n\n # Option 3:\n # Combining Option 0 and Option 2\n def better_with_more_moves_and_distance_to_center():\n return better_with_more_moves_than_opponent() + better_with_closer_distance_to_center()\n\n return better_with_more_moves_and_distance_to_center()", "title": "" }, { "docid": "4b33b2daa2a50130107b81315754dfef", "score": "0.5676883", "text": "def score(self) -> int:\n return 1000", "title": "" }, { "docid": "cf9d535e326dfb0295ada1d6c9b3bd8a", "score": "0.56767994", "text": "def scoreBoard(self, b):\n if b.winsFor(self.ox)==True:\n return 100.0\n elif b.winsFor(self.oppCh())==True:\n return 0.0\n else:\n return 50.0", "title": "" }, { "docid": "18d2fc40a33a44fc36b85b865554831b", "score": "0.5675266", "text": "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n ## Similar to center score, this bases score off of how far a piece is from\n ## an its too closest edges in comparison to its opponent.\n\n w, h = game.width, game.height\n my, mx = game.get_player_location(player)\n oy, ox = game.get_player_location(game.get_opponent(player))\n # I square the distance here to eliminate any negative values\n score = ((oy-h/2) ** 2 + (ox-w/2) **2) - ((my-h/2) ** 2 + (mx-w/2) **2)\n\n return float(score)", "title": "" }, { "docid": "e4e13847ea4bc6ca699fa530a16306cb", "score": "0.5668066", "text": "def custom_score_5(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n \n y, x = game.get_player_location(player)\n return float(abs(3 - y) + abs(3 - x))", "title": "" }, { "docid": "f3fe496bac0479e461fcaf5ac1506fd4", "score": "0.56678146", "text": "def custom_score(game, player):\n\n return improved_score(game, player)", "title": "" }, { "docid": "e303d66c811cfb2d212283cb33601bdd", "score": "0.56650573", "text": "def score(self, board):\r\n if board.hasWon(self.num):\r\n return 100.0\r\n elif board.hasWon(self.opp):\r\n return 0.0\r\n else:\r\n return 50.0", "title": "" }, { "docid": "e303d66c811cfb2d212283cb33601bdd", "score": "0.56650573", "text": "def score(self, board):\r\n if board.hasWon(self.num):\r\n return 100.0\r\n elif board.hasWon(self.opp):\r\n return 0.0\r\n else:\r\n return 50.0", "title": "" }, { "docid": "5656f06709d4c53b7312a8b0b8d54b29", "score": "0.56603205", "text": "def get_score(self, target: pd.Series) -> float:\n if pd.isnull(target[self.c.COLS.REGRESSION_PARAM]) or pd.isnull(target[self.c.COLS.REGRESSION_INTERCEPT]) \\\n or pd.isnull(target[self.c.COLS.ANNUAL_REDUCTION_RATE]):\n return self.fallback_score\n return target[self.c.COLS.REGRESSION_PARAM] * target[self.c.COLS.ANNUAL_REDUCTION_RATE] * 100 + target[\n self.c.COLS.REGRESSION_INTERCEPT]", "title": "" }, { "docid": "1c7f7cba3cc5d4df282365883e116503", "score": "0.564847", "text": "def custom_score_3(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n def going_on(game_state,l=0,m_max=0):\n\n legal_moves=game_state.get_legal_moves(player)\n \n if len(legal_moves)==0:\n if l>m_max:\n m_max=l\n \n return m_max\n for m in legal_moves:\n \n game_state._active_player=player\n m_max=going_on(game_state.forecast_move(m),l+1,m_max)\n \n return m_max\n \n if len(game.get_blank_spaces())<=15:\n game_state=game.copy()\n x,y =game_state.get_player_location(player)\n \n m_max=going_on(game_state)\n \n return m_max\n else:\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)", "title": "" }, { "docid": "156851f2225fbc12d8e7ce22dcddf49c", "score": "0.5636524", "text": "def add_round(self):\n self.rounds += 1", "title": "" }, { "docid": "f3146483cd5081a8bb23f239b636bacc", "score": "0.5633443", "text": "def custom_score_6(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n def going_on(game_state,l=0,m_max=0):\n #print(\"begin\")\n #print(\"l={}\".format(l))\n legal_moves=game_state.get_legal_moves(player)\n #print(\"legal_moves: {}\".format(legal_moves))\n if len(legal_moves)==0:\n if l>m_max:\n m_max=l\n #print(\"m_max={}\".format(m_max))\n return m_max\n for m in legal_moves:\n # print(\"m: {}\".format(m))\n game_state._active_player=player\n m_max=going_on(game_state.forecast_move(m),l+1,m_max)\n #print(\"max={}\".format(m_max))\n return m_max\n \n if len(game.get_blank_spaces())<=15:\n game_state=game.copy()\n x,y =game_state.get_player_location(player)\n #print(\"x={},y={}\".format(x,y))\n m_max=going_on(game_state)\n #print(\"final_max={}\".format(m_max))\n return m_max\n \n y, x = game.get_player_location(player)\n return float(abs(3 - y) + abs(3 - x))", "title": "" }, { "docid": "ec11f668dc0e1cee5e827dbb989e8b58", "score": "0.56298345", "text": "def _evaluate_card(trump_suit: BiddingSuit, suit_led: Suit, card: Card) -> int:\n score = card.rank.value[0]\n if card.suit == trump_suit.to_suit():\n score += 100\n elif card.suit != suit_led:\n score -= 100\n return score", "title": "" }, { "docid": "1bf78dfdad8686f26d25a6edac0fd442", "score": "0.56150657", "text": "def custom_score_2(game, player):\n\n # If the board is empty, or the player has won or lost, use the const score.\n res = const_score(game, player)\n if res:\n return res\n\n # Forward to the selected heuristic\n return float(look_ahead_improved_score_v2(game, player))", "title": "" }, { "docid": "1e51e2d0956e02d38a4ada23a0f3534e", "score": "0.55988353", "text": "def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n # get the actual matrix with possible moves values\n moves_weights = game.get_legal_moves_weights()\n # initialize the first player location sum to zero\n player_sum = 0.\n for move in game.get_legal_moves(player):\n # add up the current move possible moves to the total sum\n player_sum = player_sum + moves_weights[move[0]][move[1]]\n\n # initialize the second player location sum to zero\n opp_sum = 0.\n for move in game.get_legal_moves(game.get_opponent(player)):\n # add up the current move possible moves to the total sum\n opp_sum = opp_sum + moves_weights[move[0]][move[1]]\n return float(player_sum - opp_sum)", "title": "" }, { "docid": "e569f374711b00731e01a94d3610050f", "score": "0.55956453", "text": "def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n if game.is_winner(player):\n return math.inf\n \n player_moves = len(game.get_legal_moves(player))\n opponent_moves = len(game.get_legal_moves(game.get_opponent(player)))\n blank_spaces = game.get_blank_spaces()\n percent_board_occupied = int((len(blank_spaces)/(game.width * game.height)) * 100)\n \n if percent_board_occupied < 30 :\n return float(player_moves - 2*opponent_moves)\n else:\n return float(2*player_moves - opponent_moves)\n \n\n # list of locations that fall onto the walls of board ", "title": "" }, { "docid": "f3cdca3b45e42d8e7004dc0716d7352d", "score": "0.55886066", "text": "def _calc_current_score(self):\n return self.board.sum()", "title": "" }, { "docid": "2b94a3a88da0f2b195cb78e26ca5a77d", "score": "0.5584847", "text": "def custom_score_4(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n \n y, x = game.get_player_location(player)\n return float((3 - y)**2 + (3 - x)**2)", "title": "" }, { "docid": "92f0521a4b32cc30ee08b306cf845886", "score": "0.5584499", "text": "def getScore(self):\n pass", "title": "" }, { "docid": "6c01b007a4c825f5d909b86955011427", "score": "0.55825603", "text": "def score(total, correct, wrong):\n correct_value = 2\n return (\n (correct * correct_value + wrong) /\n (total * correct_value)\n ) * 100", "title": "" }, { "docid": "9f243a0b63f69639fe52b3129b7e3e9c", "score": "0.5582172", "text": "def override_gate(self, t):\n\t\tif(self.crossed or self.reward_acc == 0):\n\t\t\treturn 1.0\n\t\telse:\n\t\t\treturn 0.0", "title": "" }, { "docid": "0ca356e318b6e9076b98f483d27a5282", "score": "0.55766606", "text": "def custom_score_7(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n \n y, x = game.get_player_location(player)\n return float((3 - y)**2 + (3 - x)**2)", "title": "" }, { "docid": "5a419495a4983624cb19142e88b10a17", "score": "0.55756706", "text": "def custom_score1(game, player):\n\n # Custom Heuristic 1: \n # PlayerCentralityRatio\n \n #\n # This heuristic is based on the assumption that having\n # a position closer to the center of the board is stragically\n # favorable. It computes the distance of the student player from\n # the center, the distance of the opponent from the center,\n # and takes the ratio of opponent to student. Larger values\n # are better, since it indicates that the opponent is further\n # from the center than the student.\n #\n\n # get student agent player board location\n myLocation = game.get_player_location(player)\n \n # get opponent player board location\n oppLocation = game.get_player_location(game.get_opponent(player))\n \n # get center of game board\n rowCenter = game.height//2 # center row of board\n colCenter = game.width//2 # center column of board\n\n # Compute euclidian distance of student agent player from center\n myDistToCenter=(myLocation[0]-rowCenter)**2 + (myLocation[1]-colCenter)**2\n myDistToCenter=math.sqrt(myDistToCenter) \n\n # Compute euclidian distance of opponent from center\n oppDistToCenter=(oppLocation[0]-rowCenter)**2 + (oppLocation[1]-colCenter)**2\n oppDistToCenter=math.sqrt(oppDistToCenter) \n \n # Put lower bound of 1.0, so that the ratio is always defined\n # (no division by zero), and non-zero positive.\n myDistToCenter = max(1.0, myDistToCenter)\n oppDistToCenter = max(1.0, oppDistToCenter)\n \n # Compute ratio of opponent to student\n score = oppDistToCenter/myDistToCenter\n \n return score", "title": "" }, { "docid": "714b1bfbe93a6cd62658d6874bd4618d", "score": "0.5575454", "text": "def custom_score_2(game, player):\n\n\tpm = len(game.get_legal_moves(player))\n\tom = len(game.get_legal_moves(game.get_opponent(player)))\n\n\t#if this valus is bigger something good is happening! ( most of the time )\n\treturn pm - om", "title": "" }, { "docid": "54e0f92bbe242aa7f90476990232594b", "score": "0.5575033", "text": "def score_rounds(rounds: List[Round]) -> Measure:\n max_player_number = max(\n p for p in itertools.chain.from_iterable(t for r in rounds for t in r)\n )\n return Score(sum(measure(max_player_number, r) for r in rounds))", "title": "" }, { "docid": "b567447a810663e1b09f482430d5d7f1", "score": "0.5571288", "text": "def multiscale_brier_score(grid_pred, timed_points, size=1):\n risk, u = _brier_setup(grid_pred, timed_points)\n cell_area = grid_pred.xsize * grid_pred.ysize\n\n agg_risk, agg_u, cell_sizes = [], [], []\n for (s1, c1), (s2, c2) in zip(generate_aggregated_cells(risk, size),\n generate_aggregated_cells(u, size)):\n if c1 > 0:\n cell_sizes.append(c1)\n agg_risk.append(s1)\n agg_u.append(s2)\n\n agg_risk = _to_array_and_norm(agg_risk)\n agg_u = _to_array_and_norm(agg_u)\n cell_sizes = _to_array_and_norm(cell_sizes)\n\n score = _np.sum( cell_sizes * (agg_risk - agg_u)**2 )\n score_worst = _np.sum( cell_sizes * (agg_risk**2 + agg_u**2) )\n skill = 1 - score / score_worst\n return score / cell_area, skill", "title": "" }, { "docid": "db7b0c9366a478f703a4ed6fc8fa33ac", "score": "0.5566273", "text": "def custom_score_3(game, player):\n # TODO: finish this function!\n # raise NotImplementedError\n\n\n # test for terminal conditions\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n # Keeping My Options Open: \n # This heuristic simply returns the sum of the amount of legal moves and \n # the number of blank game locations. The intention is to reward a higher score for games that have the \n # most options available in each move.\n # We may need to adjust this strategy as the legal moves converges on the number of remaining blank game locations\n # This threshold is currently set to when 1/2 of the game locations have been filled\n # Once the threshold has been hit we move into an agressive imporved score heuristic\n # Temporary testing continues\n\n\n # get the number of legal moves\n number_own_moves = len(game.get_legal_moves(player))\n\n #get the number of legal moves for the opponent\n number_opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n # get the remaining blank spaces\n number_remaining_moves = len(game.get_blank_spaces())\n\n if (len(game.get_blank_spaces()) > game.width * game.height / 2):\n score = (number_own_moves + number_remaining_moves)\n else:\n score = number_own_moves-(2*number_opp_moves)\n\n\n return float(score)", "title": "" }, { "docid": "a4fa19f099f1aa83a2ff5e87a58293d6", "score": "0.55554765", "text": "def get_score(self, result):\n if self.is_correct(result):\n return 1\n else:\n return 0", "title": "" }, { "docid": "2cc154d465aa138da2ac53a0067f82b6", "score": "0.5554136", "text": "def evaluate_score(self):\r\n \r\n score = 0\r\n\r\n #Add the scores of the white player (MAX)\r\n for w in self.wp:\r\n if not w.captured:\r\n score += w.value\r\n\r\n #Subtract the scores of the black player (MIN)\r\n for b in self.bp:\r\n if not b.captured:\r\n score -= b.value\r\n\r\n return score", "title": "" }, { "docid": "0d0f6cc81b514244f2b4d0031e242120", "score": "0.5552427", "text": "def custom_score_8(game,player): \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n def going_on(game_state,l=0,m_max=0):\n #print(\"begin\")\n #print(\"l={}\".format(l))\n legal_moves=game_state.get_legal_moves(player)\n #print(\"legal_moves: {}\".format(legal_moves))\n if len(legal_moves)==0:\n if l>m_max:\n m_max=l\n #print(\"m_max={}\".format(m_max))\n return m_max\n for m in legal_moves:\n # print(\"m: {}\".format(m))\n game_state._active_player=player\n m_max=going_on(game_state.forecast_move(m),l+1,m_max)\n #print(\"max={}\".format(m_max))\n return m_max\n \n if len(game.get_blank_spaces())<=15:\n game_state=game.copy()\n x,y =game_state.get_player_location(player)\n #print(\"x={},y={}\".format(x,y))\n m_max=going_on(game_state)\n #print(\"final_max={}\".format(m_max))\n return m_max\n \n y, x = game.get_player_location(player)\n return float((3 - y)**2 +(3 - x)**2)", "title": "" }, { "docid": "a0614b77cfaf22f4e8b73a360421d5cc", "score": "0.5546869", "text": "def _peirce_skill_score(table):\r\n n = float(table.sum())\r\n nf = table.sum(axis=1)\r\n no = table.sum(axis=0)\r\n correct = float(table.trace())\r\n no_squared = (no * no).sum()\r\n if n ** 2 == no_squared:\r\n return correct / n\r\n else:\r\n return (n * correct - (nf * no).sum()) / (n ** 2 - no_squared)", "title": "" }, { "docid": "d460b0830424226cc3649f7a4bab0b22", "score": "0.55425566", "text": "def custom_score_2(game, player):\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n agent_moves, opponent_moves, blanks, agent_y, agent_x, opponent_y, opponent_x = get_game_info_helper(game, player)\n\n w, h = game.width / 2., game.height / 2.\n\n euclidean_agent = float((w - agent_y) ** 2 + (h - agent_x) ** 2)\n euclidean_opponent = float((w - opponent_y) ** 2 + (h - opponent_x) ** 2)\n\n return (euclidean_agent - euclidean_opponent)", "title": "" }, { "docid": "879ef792100f445281b50e14567c7da2", "score": "0.55394006", "text": "def custom_score_2(game, player):\n if game.is_loser(player):\n return -math.inf\n if game.is_winner(player):\n return math.inf\n\n player_moves = len(game.get_legal_moves(player))\n opponent_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(2*player_moves-opponent_moves)", "title": "" }, { "docid": "72ed2fb0ff25436b560bad56f70e501b", "score": "0.5538578", "text": "def _calc_score(self):\r\n score = 0\r\n black_king_found = False\r\n white_king_found = False\r\n for pos in self.positions():\r\n piece = self.at(pos)\r\n if piece == 'k':\r\n black_king_found = True\r\n elif piece == 'K':\r\n white_king_found = True\r\n score += Board.piece_values[piece]\r\n score += self._bonus_score(pos, piece)\r\n\r\n if not black_king_found:\r\n score = 100000\r\n if not white_king_found:\r\n score = -100000\r\n if self.move_num == 41:\r\n score = 0\r\n\r\n return score", "title": "" }, { "docid": "59636249a12e67fdd331462c5788c8b1", "score": "0.5536653", "text": "def bank_score(self):\n for player_score in self.player_scores:\n if player_score[0] == self.user_pk:\n player_score[1] += self.round_score\n self.banked_score = player_score[1]\n score_for_last_move = self.round_score\n self.reset_round_state()\n try:\n next_index = self.players.index(self.player_turn)+1\n self.player_turn = self.players[next_index]\n except IndexError:\n self.player_turn = self.players[0]\n return (\"bank,\" + str(score_for_last_move))", "title": "" }, { "docid": "a08431aff84e816ba27f91a0e61c43e9", "score": "0.55253994", "text": "def tickRound(self):\n r = self.__authoritative.round\n self._setAttrib(\"round\", r + 1)\n self._setAttrib(\"turn\", 0)", "title": "" }, { "docid": "54516395e6c5db085b39594c0d35f8e8", "score": "0.5519198", "text": "def rough_outcome(self) -> float:\n raise NotImplementedError", "title": "" }, { "docid": "d6148a87b4997e26e4394673cc2a10bc", "score": "0.5512673", "text": "def score_baseload(self, baseload_threshold=3, **kwargs):\n\n if self.intersect is not None and self.intersect > self.breakpoint:\n breakpoint = self.intersect\n else:\n breakpoint = self.breakpoint\n\n num_points = len(self.df[self.df.independent <= breakpoint]) # number of points below the breakpoint\n\n return min(1, float(num_points) / float(baseload_threshold))", "title": "" }, { "docid": "e7d4cca23d189aee9bb41c00a43f940d", "score": "0.55035096", "text": "def brier_skill_score(self):\n reliability, resolution, uncertainty = self.brier_score_components()\n return (resolution - reliability) / uncertainty", "title": "" }, { "docid": "835a11208f2f178be507508617ab7742", "score": "0.54953915", "text": "def score(self, ticker: str, ticker_data: TickerData) -> ScoreEntry:\n rnd_score = 0.0\n rnd_count = 0.0\n # TODO: add debt score\n # debt_score = 0\n\n income_list = ticker_data.income_list\n # balance_sheet_list = ticker_data.balance_sheet_list\n for i in range(len(income_list)):\n\n try:\n # debt_score += float(balance_sheet_list[i].TotalAssets) / max(\n # float(balance_sheet_list[i].TotalLiabilities),\n # float(balance_sheet_list[i].TotalAssets) / 5) # handle case of zero or very low debt for one year\n if income_list[i].RnDExpenses and income_list[i].OperatingExpenses:\n rnd_score += float(income_list[i].RnDExpenses) / float(income_list[i].OperatingExpenses)\n rnd_count += 1\n else:\n logging.info(f'failed to process inclcome list {i} for {ticker}')\n except IndexError as e:\n print(f'index {i!r} out of bounds for {ticker!r} -> {e}')\n except ZeroDivisionError as e:\n print(f'{ticker!r} revenue is zero for {income_list[i].Date} -> {e}')\n\n last_assets = ticker_data.balance_sheet_list[-1].TotalAssets\n last_liabilities = ticker_data.balance_sheet_list[-1].TotalLiabilities\n\n return ScoreEntry(\n ticker=ticker,\n grossProfitGrowth=avg_growth(ticker, income_list, 'GrossProfit'),\n incomeGrowth=avg_growth(ticker, income_list, 'NetIncome'),\n RnDRatio=rnd_score / rnd_count if rnd_count else 0,\n cashPerDebt=(float(last_assets) / float(last_liabilities) if\n last_assets and last_liabilities else 0),\n netIncome=average(income_list, 'NetIncome'),\n mktCap=float(ticker_data.profile[-1].mktCap)\n )", "title": "" }, { "docid": "0fd2337b1d085c8acdf5159be2e549a3", "score": "0.5494684", "text": "def custom_score_3(game, player):\n # TODO: finish this function!\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.is_loser(player):\n return float(\"-inf\")\n\n # We have moves to play. How many more than our opponent?\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n\n if own_moves != opp_moves:\n return float(own_moves - opp_moves)\n\n else:\n center_y_pos, center_x_pos = int(game.height / 2), int(game.width / 2)\n player_y_pos, player_x_pos = game.get_player_location(player)\n opponent_y_pos, opponent_x_pos = game.get_player_location(game.get_opponent(player))\n player_distance = abs(player_y_pos - center_y_pos) + abs(player_x_pos - center_x_pos)\n opponent_distance = abs(opponent_y_pos - center_y_pos) + abs(opponent_x_pos - center_x_pos)\n \n return float(opponent_distance - player_distance) / 10.", "title": "" }, { "docid": "e6414d39854a27a0a43588ec7df26479", "score": "0.5491052", "text": "def brier_score(grid_pred, timed_points):\n risk, u = _brier_setup(grid_pred, timed_points)\n area = grid_pred.xsize * grid_pred.ysize\n score = _np.mean((u - risk)**2) / area\n skill = 2 * _np.sum(u * risk) / (_np.sum(u * u + risk * risk))\n return score, skill", "title": "" }, { "docid": "1d688929a49e6d7833a848d43cd50582", "score": "0.54894644", "text": "def calculate_reward(self):", "title": "" }, { "docid": "5dcce4fc199a1102b04c4b221c4ea875", "score": "0.5489451", "text": "def custom_score_3(game, player):\n\n # If the board is empty, or the player has won or lost, use the const score.\n res = const_score(game, player)\n if res:\n return res\n\n # Forward to the selected heuristic\n return float(moves_ratio_score(game, player))", "title": "" }, { "docid": "5922fd4a5118be62b7d1b60286341d6d", "score": "0.54878247", "text": "def custom_score(game, player):\n return score_diff_opportunities_hunter(game, player)", "title": "" }, { "docid": "bfc08de13953933fed71f12d6f017089", "score": "0.54823923", "text": "def score(self, board: Block) -> int:\r\n raise NotImplementedError", "title": "" }, { "docid": "bfc08de13953933fed71f12d6f017089", "score": "0.54823923", "text": "def score(self, board: Block) -> int:\r\n raise NotImplementedError", "title": "" }, { "docid": "604c3de9aadb4cf46796719a290f3a89", "score": "0.54790425", "text": "def reward_function(success, max_round):\n\n reward = -1\n if success == FAIL:\n reward += -max_round\n elif success == SUCCESS:\n reward += 2 * max_round\n # elif success == UNSUITABLE:\n # reward += -(max_round/2)\n # elif success == GOOD_INFORM:\n # reward += max_round\n # elif success == NO_VALUE:\n # reward += 1\n return reward", "title": "" }, { "docid": "1b5c54416e776f0151858e0c312c8e08", "score": "0.5478513", "text": "def overall_recall(self, conf_threshold=0) -> float:\n true_positives = self.overall_true_positives(conf_threshold)\n\n try:\n return true_positives / self.num_snps\n except ZeroDivisionError:\n return 0.0", "title": "" }, { "docid": "d7912105b2684cd1ab0e3aeef8dc7be0", "score": "0.54752856", "text": "def net_penalty_rule(_m, g, t):\r\n\r\n return (m.EMISSIONS_RATE[g] - m.BASELINE[t]) * m.PERMIT_PRICE[g]", "title": "" }, { "docid": "61229a6f99fae7fe139cae4447a5a443", "score": "0.5475084", "text": "def round_score(score):\n\n return round(score * 10) * 0.1", "title": "" }, { "docid": "ec988984a72eb02616b4fc8bd264c402", "score": "0.54670167", "text": "def getNeighbourScore():\n num_matched = num_neighbours_matched + num_neighbours_mismatched\n # we don't any input\n if num_matched == 0:\n # start safe\n return 0\n # start safely\n safe_score = 1 if num_matched >= 10 else 0.5\n # calculate the ratio\n ratio = (num_neighbours_matched * 1.0 / num_matched) * safe_score\n if ratio > LOCATION_BOOST_LOW_THRESHOLD:\n ratio = 1\n return LOCATION_BOOST_SCORE * ratio", "title": "" }, { "docid": "45ecb88b121a356f838e542402181d2a", "score": "0.54662925", "text": "def add_round_score(self, roundscore):\n self.totalscore = self.totalscore + roundscore\n return", "title": "" } ]
58f4d7bbff54905795fc687c6a761f53
Bulk set variables from arguments. Intended for internal use by the Set and From methods.
[ { "docid": "f05fda4f6a55f882a4c7fa9334414708", "score": "0.0", "text": "def Apply(self, args, kwargs):\n if len(args) == 1:\n arg = args[0]\n if isinstance(arg, dict):\n self.ApplyMap(arg)\n else:\n self.ApplyMap(dict(arg))\n elif len(args) > 1:\n self.ApplyMap(dict(args))\n self.ApplyMap(kwargs)\n return self", "title": "" } ]
[ { "docid": "98838b7ea7102453dc9d6be12a0d2909", "score": "0.6670922", "text": "def set(self, **kwargs):\r\n setAllArgs(self, kwargs)", "title": "" }, { "docid": "98ec79dba3f8112d1c67e2200a55b1fc", "score": "0.656707", "text": "def __setFourParameters(self, args):\n self.setMetaValues(args[0])\n # cast the metaValues to int\n self.metaValues = [int(val) for val in self.metaValues]\n self.setTargetSE(args[1])\n self.setSourceSE(args[2])\n self.setDatatype(args[3])", "title": "" }, { "docid": "314714d70f01b10969df84c5ba80988c", "score": "0.65503263", "text": "def set(*args, **kw):\n\n if len(args) == 0:\n if len(kw) != 0:\n # normal case is only keyword,value pairs\n for keyword, value in kw.items():\n keyword = untranslateName(keyword)\n svalue = str(value)\n _varDict[keyword] = svalue\n else:\n # set with no arguments lists all variables (using same format\n # as IRAF)\n listVars(prefix=\" \", equals=\"=\")\n else:\n # The only other case allowed is the peculiar syntax\n # 'set @filename', which only gets used in the zzsetenv.def file,\n # where it reads extern.pkg. That file also gets read (in full cl\n # mode) by clpackage.cl. I get errors if I read this during\n # zzsetenv.def, so just ignore it here...\n #\n # Flag any other syntax as an error.\n if (len(args) != 1 or len(kw) != 0 or\n not isinstance(args[0], str) or args[0][:1] != '@'):\n raise SyntaxError(\"set requires name=value pairs\")", "title": "" }, { "docid": "b5f5fc6b2e0d615b5934a02a46078536", "score": "0.63758177", "text": "def setArgs(self, args):\n self.args = args", "title": "" }, { "docid": "99b96cc9b15863aab8ec7e6e02e92a4f", "score": "0.63695204", "text": "def __set_variables(self, variables):\n for name, value in variables.items():\n if name in variables:\n self.__variables[name] = value", "title": "" }, { "docid": "44412f8ae0e48aa48cdf9f5172a2ab8b", "score": "0.62382245", "text": "def set_members(self, *args):\n raise NotImplementedError", "title": "" }, { "docid": "7686cea6a8049f339dc3382a7fd3d179", "score": "0.62156653", "text": "def set(self, **kwargs):\n ...", "title": "" }, { "docid": "edd07857b7fccc5599e9d160242c41ea", "score": "0.61978555", "text": "def __setAllForParameters(self, args):\n self.setMetaValues(self.allFor)\n self.metaValues = [int(val) for val in self.metaValues]\n # place the indiviual entries as well.\n prodTemp = list(self.metaValues)\n for prodID in prodTemp:\n self.metaValues.append(prodID + 1)\n self.metaValues.append(prodID + 2)\n self.metaValues = sorted(self.metaValues)\n self.setTargetSE(args[0])\n self.setSourceSE(args[1])", "title": "" }, { "docid": "8220c679595ca54ee32e7dcaddfc03df", "score": "0.61469907", "text": "def SetArguments(self, *args):\n return _BOPDS.BOPDS_DS_SetArguments(self, *args)", "title": "" }, { "docid": "2da9de29427558b1f45230f4c5dd8503", "score": "0.61254275", "text": "def set_state_vars(self, *args) -> None:\n for arg in args:\n self._state_vars[arg[0]] = np.zeros(self._num_steps)\n self._state_functions.append((arg[0], arg[1]))", "title": "" }, { "docid": "9c1863a4a1b5072d9388a32e7bc68393", "score": "0.6064667", "text": "def gdxSetSpecialValues(*args):\n return _gdxcc.gdxSetSpecialValues(*args)", "title": "" }, { "docid": "528dbdf3c76183bb454c8365159e1467", "score": "0.6054956", "text": "def set_params(self,**kwargs):\n for key,value in list(kwargs.items()):\n setattr(self,key,value)", "title": "" }, { "docid": "160b5c758e9f6afe7cef06e8d46d6271", "score": "0.6038237", "text": "def __init__(self, *args):\n self.set(args)", "title": "" }, { "docid": "7b815a7068367bd8f0bbf2004747a2f7", "score": "0.6024174", "text": "def setVarArgs(self, hasVarArgs: bool) -> None:\n ...", "title": "" }, { "docid": "486578cf37c30b762b11bff760d4af0b", "score": "0.6021886", "text": "def set(self, **kwargs):\n\n for key, value in kwargs.items():\n setattr(self, key, value)", "title": "" }, { "docid": "e12027cac7714c7d2658e28ac726c5c4", "score": "0.60215", "text": "def set_var_list(self, var_list):\n self.x = var_list", "title": "" }, { "docid": "f184bc7ce74d9d40bd61f5f7ef19c28c", "score": "0.5964627", "text": "def _setter(setfunc: Callable[[Union[str, int], Any], None], *args) -> None:\n # check for all elements in args whether they are types\n if len(args) == 1 and \\\n all(isinstance(pair, Sequence) and len(pair) == 2 for pair in args[0]):\n for pair in args[0]:\n setfunc(*pair)\n elif len(args) == 2:\n setfunc(*args)\n else:\n raise QiskitOptimizationError(\"Invalid arguments: {}\".format(args))", "title": "" }, { "docid": "4ac5f412167d7e76f19342e00e1e5cc8", "score": "0.59497297", "text": "def __setstate__(self, args):\n if len(args) == 3: self._relative, self._envvar, self._hook = args\n else: self._relative, self._envvar = args", "title": "" }, { "docid": "93a1fb6da0790da5ecb59ee970651304", "score": "0.5946874", "text": "def set_many_values(names_and_values):\n if not names_and_values:\n return\n\n existing_vars = {var.name: var for var in Variable.query.filter(Variable.name.in_(names_and_values.keys())).all()}\n\n for name, value in names_and_values.items():\n if name in existing_vars:\n var = existing_vars[name]\n else:\n var = Variable()\n var.name = name\n\n var.value = value\n\n db.session.add(var)", "title": "" }, { "docid": "e14f2b9f3cc1f8bd4a7d9b9aa4d3aa40", "score": "0.59362555", "text": "def setArgs(self, **kwargs):\r\n for key, value in kwargs.items():\r\n if key in (\"verbose\", \"ver\", \"v\"):\r\n self._verbosity = value\r\n elif key in (\"max_epochs\"):\r\n self._max_epochs = value", "title": "" }, { "docid": "63d6219db89a31155ab83c89edf829ec", "score": "0.59315795", "text": "def set(self, *args):\n self._set_or_add(True, args)", "title": "" }, { "docid": "b46392677e0a6b0056833cf148add4b8", "score": "0.5912285", "text": "def apply(self, **objects):\n for key, values in list(objects.items()):\n self.locals[key] = value", "title": "" }, { "docid": "ead043d1ab55e33cab09b5c9e4275a00", "score": "0.5907402", "text": "def var_set(self):\n raise NotImplementedError()", "title": "" }, { "docid": "ead043d1ab55e33cab09b5c9e4275a00", "score": "0.5907402", "text": "def var_set(self):\n raise NotImplementedError()", "title": "" }, { "docid": "edcb3274f92e940edd4caddd12a266fe", "score": "0.5901608", "text": "def variables(self, variables):\n\n self._variables = variables", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "9b98d428a83d253d942fd3769208f087", "score": "0.5899951", "text": "def setValues(self):\n pass", "title": "" }, { "docid": "bb17c25161766b04f966b0c1028db75b", "score": "0.58867955", "text": "def arguments(self, args: Tuple):\n self.args = args", "title": "" }, { "docid": "55a6d2278f2d851e04b70eb534bb44c6", "score": "0.58691573", "text": "def set_values(self, **kwargs: any):\n if 'param_file' in kwargs:\n self.param_file = kwargs['param_file']\n if 'key' in kwargs:\n self.key = kwargs['key']\n if 'id' in kwargs:\n self.id = kwargs['id']\n if 'hkl' in kwargs:\n self.hkl = kwargs['hkl']\n if 'cellsetting' in kwargs:\n self.cellsetting = kwargs['cellsetting']\n if 'cutboxvector' in kwargs:\n self.cutboxvector = kwargs['cutboxvector']\n if 'shiftindex' in kwargs:\n self.shiftindex = kwargs['shiftindex']\n if 'sizemults' in kwargs:\n self.sizemults = kwargs['sizemults']\n if 'minwidth' in kwargs:\n self.minwidth = kwargs['minwidth']\n if 'even' in kwargs:\n self.even = kwargs['even']\n if 'family' in kwargs:\n self.family = kwargs['family']\n if 'a1vect_uvw' in kwargs:\n self.a1vect_uvw = kwargs['a1vect_uvw']\n if 'a2vect_uvw' in kwargs:\n self.a2vect_uvw = kwargs['a2vect_uvw']\n if 'faultpos_rel' in kwargs:\n self.faultpos_rel = kwargs['faultpos_rel']", "title": "" }, { "docid": "32483474aacbfbd07d58bcbfb78ce3b7", "score": "0.5860403", "text": "def values(**args):\n\tclass _obj(object):\n\t\tdef __init__(self):\n\t\t\tfor item in args.keys():\n\t\t\t\tsetattr(self,item,args[item])\n\t\t\tsuper(_obj,self).__init__(self)\n\treturn _obj", "title": "" }, { "docid": "5264a7b66e7db3d8d0517621e8ec9b5a", "score": "0.585973", "text": "def _set(arguments):\n \n if len(arguments) < 2:\n raise ExecutionError(\"'set' takes at least two argument\")\n \n if not arguments[0] in SETTING_FUNCTIONS:\n raise ExecutionError(\"Don't know how to set {0}\".format(arguments[0]))\n \n return SETTING_FUNCTIONS[arguments[0]](arguments[1:])", "title": "" }, { "docid": "0d3ec05f8339fe74eb493daa2d67a8b3", "score": "0.5834355", "text": "def set_param_values(self, values, borrow=False):\r\n for param, value in zip(self.get_params(), values):\r\n param.set_value(value, borrow=borrow)", "title": "" }, { "docid": "201ef364c850f7939fef2110b3afbc7f", "score": "0.5787381", "text": "def args(self, args):\n\n self._args = args", "title": "" }, { "docid": "9f8ebc20224adeee48c81cb60ea4b5e3", "score": "0.57760924", "text": "def use_variables(self, use_vars: dict) -> None:\n self.use_vars = use_vars", "title": "" }, { "docid": "a6756811132b0949e9535264c8e85a71", "score": "0.5747174", "text": "def setGlobalVariables(self, names_and_values):\n self._check_connection();\n params = { \"\" : \"setGlobalVariables\"};\n params[\"names_and_values\"] = names_and_values;\n self._stream.writeQVariantHash(params);\n self._flush(True);\n if not self._async:\n ret = self._read_return(\"setGlobalVariables\");", "title": "" }, { "docid": "39ef1427aa0ae54818ac21c9344e26cd", "score": "0.57016814", "text": "def __call__(self, **kwargs):\n for key, value in kwargs.items():\n self[key] = value", "title": "" }, { "docid": "d21187f77004be7702b4ea191b1ecdaf", "score": "0.5664855", "text": "def set_var(self, args):\n\n self.exit_on_unknown_key(\n args.property_name,\n ('Error: Can\\'t set override value for \"{}\" '\n 'because it is an unknown variable name.').format(args.property_name)\n )\n self.set_override(args.property_name, args.property_value)", "title": "" }, { "docid": "8a934e0835faca2064792c05814d8816", "score": "0.5658974", "text": "def set_params(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)\n return self", "title": "" }, { "docid": "60464e47258007320cb9a25a657cb2a7", "score": "0.56394184", "text": "def set(self, *args):\n if len(args) == 1:\n arg = args[0]\n self.x = float(arg[0])\n self.y = float(arg[1])\n elif len(args) == 2:\n self.x = float(args[0])\n self.y = float(args[1])\n else:\n raise TypeError(\"vec2.__init__() takes either one or two arguments; \"+len(args)+\" given.\")", "title": "" }, { "docid": "9b60273fd3c6458c06c104c773ff725d", "score": "0.5632945", "text": "def set(self, **kwargs):\n\n # Update the data with the given keyword arguments\n for key, value in kwargs.items():\n self._data[key] = value", "title": "" }, { "docid": "3f20d5272de6ae9c38644b7a8f98ab94", "score": "0.5628288", "text": "def set(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z", "title": "" }, { "docid": "f3c648f5634f5eb88ad229b416f2b0f4", "score": "0.56184214", "text": "def __parse_arguments(self, args):\n self.__args = args", "title": "" }, { "docid": "29598cb72775386c6a6e7d9719030abb", "score": "0.5614063", "text": "def set_values(self, x, y):\n self.x = x\n self.y = y", "title": "" }, { "docid": "315fe14178572ac3cf97e577f3332bbc", "score": "0.5606557", "text": "def autoassign(self, locals):\r\n for (key, value) in locals.iteritems():\r\n if key == 'self': \r\n continue\r\n setattr(self, key, value)", "title": "" }, { "docid": "b8b745051262635438db8a675ced681d", "score": "0.55915374", "text": "def setValue(self, *args):\n pass", "title": "" }, { "docid": "4a4e4cfd443e7a82c76336ebd51fcbe1", "score": "0.5588901", "text": "def __init__(self, args):\n # Set up attributes from command line arguments\n for key, value in args.__dict__.items():\n setattr(self, key, value)", "title": "" }, { "docid": "c8454675fb7002296e94e7e4442a76c4", "score": "0.558858", "text": "def set_params(self, use_table=None, **args):\n hoomd.util.print_status_line()\n\n if use_table is not None:\n self.cpp_force.setUseTable(use_table)\n\n # call base class method\n hoomd.util.quiet_status()\n _collective_variable.set_params(self, **args)\n hoomd.util.quiet_status()", "title": "" }, { "docid": "1a69991932bc063a568ba8e4776a27da", "score": "0.55839777", "text": "def set_parameters(self, values):\n self.parameters = values", "title": "" }, { "docid": "1a69991932bc063a568ba8e4776a27da", "score": "0.55839777", "text": "def set_parameters(self, values):\n self.parameters = values", "title": "" }, { "docid": "723cf2d87c58780e02257a7967ccaa40", "score": "0.5580787", "text": "def test_multi_assignment():\n x = y = z = 0\n a, b = 1, 2", "title": "" }, { "docid": "0aff277cf9203509a856376e1d833b74", "score": "0.55728346", "text": "def set_params(self,**kwargs):\n \n args, varargs, varkw, defaults = inspect.getargspec(self.__init__)\n \n if kwargs:\n for key in kwargs:\n if key in args:\n setattr(self,key,kwargs[key])", "title": "" }, { "docid": "657069211ce28fcb6b866619eb9dc9c5", "score": "0.55651253", "text": "def set_parameters(self, **kwargs):\n\n for key in kwargs:\n if key in self.par:\n self.par[key].value = kwargs[key]\n else:\n print(f'Parameter \\'{key}\\' does not exist.')", "title": "" }, { "docid": "ef4f956eef9345213de7a7a4eaf4dc92", "score": "0.5559466", "text": "def __init__(self,name,vars):\n\t\tself.name = name\n\t\ttype = vars[0]['type']\n\t\tfor var in vars:\n\t\t\tif var['type'] != type:\n\t\t\t\traise ParsingError('variables must be either all named or all positional')\n\t\tself.var_type = type\n\t\tif type == 'positional':\n\t\t\targs = []\n\t\t\tfor var in vars:\n\t\t\t\targs.append(var['value'])\n\t\telif type == 'named':\n\t\t\targs = {}\n\t\t\tfor var in vars:\n\t\t\t\targs[var['name']] = var['value']\n\t\telse:\n\t\t\traise 'internal error unknown type of function variable in PulseScript source'\n\t\tself.args = args", "title": "" }, { "docid": "d0fdb8dd345217b5bc17048bd2f360ff", "score": "0.55578625", "text": "def setAttrs(attrs):", "title": "" }, { "docid": "29364b48b078bd989eec10ba720b1c1c", "score": "0.55527", "text": "def set_attrs(self, **attrs) -> None:", "title": "" }, { "docid": "1280bf6861d09dc236d822d1911d9898", "score": "0.55456084", "text": "def initializePreValues(self, args):\n index = 0\n if not self.preValuesAlreadyCharged:\n for arg in args:\n self.instructions.insert(index, MovLiteral(index, int(arg)))\n index = index + 1\n self.variableSize = self.variableSize + index\n self.preValuesAlreadyCharged = True\n else:\n whereToStart = len(args)- 1\n for arg in args:\n self.instructions[whereToStart] = MovLiteral(index, int(arg))\n whereToStart = whereToStart - 1\n index = index + 1", "title": "" }, { "docid": "fd0fa41ac0296c2e683a3af27deddf10", "score": "0.5501257", "text": "def __init__(self, **kwargs):\n # A frozen (immutable) set of all the predefined values\n self.__values = frozenset(kwargs.values())\n for key, value in kwargs.iteritems():\n setattr(self, key, value)\n self.__is_frozen = True", "title": "" }, { "docid": "0ac1d52b6f98bf974be708db9d00e032", "score": "0.5496432", "text": "def _set_ta_vars(self, n, t, k, w, d, x):\n self.n = n\n self.t = t\n self.k = k\n self.w = w\n self.d = d\n self.x = x", "title": "" }, { "docid": "3eadd9731062c1222c087d7a8fb5e409", "score": "0.5486519", "text": "def absorb_variable_values(self, values):\n\n for i in range(len(self.variable_names)):\n name = self.variable_names[i]\n self.parameter_dict[name] = values[i]", "title": "" }, { "docid": "ca0802eb84b7e474a5cacfb6e056a5a5", "score": "0.5486052", "text": "def setAllArgs(obj, argdict):\r\n\r\n xmlstore = isinstance(obj, XMLBuildable)\r\n for n in argdict.keys():\r\n if hasattr(obj, n):\r\n setattr(obj, n, argdict[n])\r\n if xmlstore:\r\n obj.argdict[n] = argdict[n]\r\n else:\r\n print('Warning: parameter name', n, 'not found!')\r\n if xmlstore:\r\n if not hasattr(obj, '_unknown_argdict'):\r\n obj._unknown_argdict = {}\r\n obj._unknown_argdict[n] = argdict[n]", "title": "" }, { "docid": "ed2902e2146088bade7b0682506adf92", "score": "0.5483805", "text": "def process_args(self, cmd_args):\n # get cmdline variables from args\n reset_vars()\n args_no_vars = []\n for arg in cmd_args:\n if (arg[0] != '-') and ('=' in arg):\n name, value = arg.split('=', 1)\n set_var(name, value)\n else:\n args_no_vars.append(arg)\n return args_no_vars", "title": "" }, { "docid": "dfc2b393eca1bd0ebd269cc0392e3862", "score": "0.54789233", "text": "def values(self, *args):\n pass", "title": "" }, { "docid": "6eec242417d47dfd7ccb16a8db4b6e95", "score": "0.5477419", "text": "def _set_shared_vars(self, shared_vars):\n for key in shared_vars.keys():\n self.shared_vars[key].set_value(shared_vars[key])", "title": "" }, { "docid": "b3d5d98d50995ffe95298135ba160609", "score": "0.5475734", "text": "def setvalues(self, args: List[Union[None, int, float, str, Timing, Margin, Effect, Text]]) -> 'Evento':\r\n\r\n if isinstance(args, list) is False:\r\n raise TypeError(f\"{args} has to be a list.\")\r\n if len(args) != 10:\r\n raise ValueError(f\"{args} must have 10 elements\")\r\n assert self.getformato() is not None, f\"Format is not set for this instance.\"\r\n _format = self.getformato()\r\n # I know __convertvalue__ is being called twice for the same object\r\n # just trying to follow the practice of making sure that the output is never a reference to the input\r\n # _values = [self.__convertvalue__(_format[_], args[_]) for _ in range(10)]\r\n # changed my mind, just calling args instead of _values\r\n for _ in range(10):\r\n self.setdictvalue(_format[_], args[_])\r\n return self", "title": "" }, { "docid": "66067b3b933e72e99a22927813e3f438", "score": "0.54740083", "text": "def set(self, *args):\n args = list(args)\n value = args.pop()\n d = self\n for elem in args[0:-1]:\n if elem not in d.keys():\n d[elem] = dict()\n d = d[elem]\n d[args[-1]] = value", "title": "" }, { "docid": "9a814728df8066b44885d1b0b95be0a2", "score": "0.5462851", "text": "def set(self, *args):\n value = self.values\n # Loop over elements of path.\n for arg in args[:-2]:\n value = value.setdefault(arg, {})\n\n # Finally, set value at the right path.\n value[args[-2]] = args[-1]", "title": "" }, { "docid": "dcd6fba0f7068743abe4bbc1a4fafefb", "score": "0.5446725", "text": "def set_attributes(self, **kwargs):\n for attr_key, attr_value in kwargs.items():\n setattr(self, attr_key, attr_value)", "title": "" }, { "docid": "82c7ed33fbee86cb00f6aed9d803e6f0", "score": "0.54406404", "text": "def alias(self,**kwargs):\n for k,v in iteritems(kwargs):\n if 0: # deep copy:\n self[k][self.variables[v].dimensions] = self.variables[v][:]\n\n for attr_name in self.variables[v].ncattrs():\n setattr(self.variables[k],attr_name,\n getattr(self.variables[v],attr_name))\n else:\n self.variables[k]=self.variables[v]", "title": "" }, { "docid": "f060e8fdd5535346b1f3334ac15e94c5", "score": "0.54393744", "text": "def set_params(self, kwargs):\n self.load_params(kwargs)\n self.servername=kwargs['server']\n self.port=kwargs['port']\n self.username=kwargs['username']\n self.password=kwargs['password']\n self.sql_file=kwargs['sql_file']\n self.dialect =kwargs['dialect']", "title": "" }, { "docid": "ab230be2e78162530a3986400189a221", "score": "0.5438131", "text": "def update(self, *args, **kwargs):\n for src in args:\n if hasattr(src, 'keys'):\n for key in src:\n self[key] = src[key]\n else:\n for key, val in src:\n self[key] = val\n for key in kwargs:\n self[key] = kwargs[key]", "title": "" }, { "docid": "2b1ef250fc8d6f4a50b1daf68922b2b3", "score": "0.5433681", "text": "def unpack_testparams(self, bulk_params):\n for key in bulk_params.keys():\n setattr(self, key, bulk_params[key])", "title": "" }, { "docid": "ceb2e16c4c32d3c25e86c978feb4ea4c", "score": "0.54291344", "text": "def _set_params(self, *args, **kwargs):\n self._verify_not_readonly(*args, **kwargs)\n params_to_set = args[0]\n startup = False\n if len(args) > 1:\n startup = args[1]\n old_config = self._param_dict.get_all()\n\n # check if in range\n constraints = ParameterConstraint.dict()\n parameters = Parameter.reverse_dict()\n\n # step through the list of parameters\n for key, val in params_to_set.iteritems():\n # if constraint exists, verify we have not violated it\n constraint_key = parameters.get(key)\n if constraint_key in constraints:\n var_type, minimum, maximum = constraints[constraint_key]\n try:\n value = var_type(val)\n except ValueError:\n raise InstrumentParameterException(\n 'Unable to verify type - parameter: %s value: %s' % (key, val))\n if val < minimum or val > maximum:\n raise InstrumentParameterException(\n 'Value out of range - parameter: %s value: %s min: %s max: %s' %\n (key, val, minimum, maximum))\n\n # all constraints met or no constraints exist, set the values\n for key, val in params_to_set.iteritems():\n if key in old_config:\n self._param_dict.set_value(key, val)\n else:\n raise InstrumentParameterException(\n 'Attempted to set unknown parameter: %s value: %s' % (key, val))\n new_config = self._param_dict.get_all()\n\n # If we changed anything, raise a CONFIG_CHANGE event\n if old_config != new_config:\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "title": "" }, { "docid": "70d65a3d93309c91c96e5e355a057338", "score": "0.5417165", "text": "def set_value(self, **values):\n action = DA_ChangeVariables(self, values)\n return self.get_document()._perform(action)", "title": "" }, { "docid": "a55e1412e61789c722787aaaddae1dab", "score": "0.5413619", "text": "def set(self, **kwargs):\n changed_parameters = {}\n kwargs = capitalize_keys(kwargs)\n for key in kwargs:\n oldvalue = self.parameters.get(key)\n if key not in self.parameters:\n if isinstance(oldvalue, dict):\n # Special treatment for dictionary parameters:\n for name in value:\n if name not in oldvalue:\n raise KeyError(\n 'Unknown subparameter \"%s\" in '\n 'dictionary parameter \"%s\"' % (name, key))\n oldvalue.update(value)\n value = oldvalue\n changed_parameters[key] = kwargs[key]\n self.parameters[key] = kwargs[key]\n flag = 0\n for param in self.params:\n if key in self.params[param]:\n self.params[param][key] = kwargs[key]\n flag = 1\n break\n if flag ==0 and key in self.ase_params:\n self.ase_params[key] = kwargs[key]\n elif flag ==0 and key not in self.ase_params:\n raise TypeError('Parameter not defined: ' + key)", "title": "" }, { "docid": "4d8726ba79cea875b4c0ab594df25f08", "score": "0.5408294", "text": "def set_args(self, namespace):\r\n args = {}\r\n for key, value in namespace.__dict__.items():\r\n if value is not None: # Avoid unset options.\r\n args[key] = value\r\n self.set(args)", "title": "" }, { "docid": "f5663c1984bb12c3a8b93794fbe21861", "score": "0.5393617", "text": "def set_attr(self, **kwargs):\n var = self.attr().keys()\n\n # set specified values\n for key in kwargs:\n if key in var:\n # data container specification\n if isinstance(kwargs[key], data_container):\n if isinstance(kwargs[key], type(self.get_attr(key))):\n self.__dict__.update({key: kwargs[key]})\n\n else:\n msg = ('The keyword ' + key + ' expects a '\n 'data_container of type ' +\n str(type(self.get_attr(key))) +\n ', a data_container of type ' +\n str(type(kwargs[key])) + ' was supplied.')\n logging.error(msg)\n raise TypeError(msg)\n\n elif isinstance(self.get_attr(key), dc_cp):\n # value specification for component properties\n if (isinstance(kwargs[key], float) or\n isinstance(kwargs[key], np.float64) or\n isinstance(kwargs[key], np.int64) or\n isinstance(kwargs[key], int)):\n if np.isnan(kwargs[key]):\n self.get_attr(key).set_attr(is_set=False)\n self.get_attr(key).set_attr(is_var=False)\n\n else:\n self.get_attr(key).set_attr(val=kwargs[key])\n self.get_attr(key).set_attr(is_set=True)\n self.get_attr(key).set_attr(is_var=False)\n\n elif kwargs[key] == 'var':\n self.get_attr(key).set_attr(is_set=True)\n self.get_attr(key).set_attr(is_var=True)\n\n # invalid datatype for keyword\n else:\n msg = ('Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logging.error(msg)\n raise TypeError(msg)\n\n elif (isinstance(self.get_attr(key), dc_cc) or\n isinstance(self.get_attr(key), dc_cm)):\n # value specification for characteristics\n if (isinstance(kwargs[key], char_line) or\n isinstance(kwargs[key], char_map) or\n isinstance(kwargs[key], compressor_map)):\n self.get_attr(key).func = kwargs[key]\n\n # invalid datatype for keyword\n else:\n msg = ('Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logging.error(msg)\n raise TypeError(msg)\n\n elif isinstance(self.get_attr(key), dc_gcp):\n # value specification of grouped component parameter method\n if isinstance(kwargs[key], str):\n self.get_attr(key).method = kwargs[key]\n\n # invalid datatype for keyword\n else:\n msg = ('Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logging.error(msg)\n raise TypeError(msg)\n\n elif isinstance(self.get_attr(key), dc_simple):\n if (isinstance(kwargs[key], float) or\n isinstance(kwargs[key], np.float64) or\n isinstance(kwargs[key], np.int64) or\n isinstance(kwargs[key], int)):\n if np.isnan(kwargs[key]):\n self.get_attr(key).set_attr(is_set=False)\n\n else:\n self.get_attr(key).set_attr(\n val=kwargs[key], is_set=True)\n\n else:\n self.get_attr(key).set_attr(\n val=kwargs[key], is_set=True)\n\n elif key == 'design' or key == 'offdesign':\n if not isinstance(kwargs[key], list):\n msg = ('Please provide the ' + key + ' parameters as list '\n 'at ' + self.label + '.')\n logging.error(msg)\n raise TypeError(msg)\n if set(kwargs[key]).issubset(list(var)):\n self.__dict__.update({key: kwargs[key]})\n\n else:\n msg = ('Available parameters for (off-)design '\n 'specification are: ' + str(list(var)) + ' at '\n + self.label + '.')\n logging.error(msg)\n raise ValueError(msg)\n\n elif key == 'local_design' or key == 'local_offdesign':\n if not isinstance(kwargs[key], bool):\n msg = ('Please provide the parameter ' + key +\n ' as boolean at component ' + self.label + '.')\n logging.error(msg)\n raise TypeError(msg)\n\n else:\n self.__dict__.update({key: kwargs[key]})\n\n elif key == 'design_path':\n if isinstance(kwargs[key], str):\n self.__dict__.update({key: kwargs[key]})\n self.new_design = True\n\n elif np.isnan(kwargs[key]):\n self.design_path = None\n self.new_design = True\n\n else:\n msg = ('Please provide the ' + key + ' parameter as '\n 'string or as nan.')\n logging.error(msg)\n raise TypeError(msg)\n\n elif key == 'printout':\n if not isinstance(kwargs[key], bool):\n msg = ('Please provide the ' + key + ' as boolean.')\n logging.error(msg)\n raise TypeError(msg)\n else:\n self.__dict__.update({key: kwargs[key]})\n\n # invalid keyword\n else:\n msg = ('Component ' + self.label + ' has no attribute ' +\n str(key) + '.')\n logging.error(msg)\n raise KeyError(msg)", "title": "" }, { "docid": "2d06751a5f5aa94529f17211c8da2598", "score": "0.53908604", "text": "def update_parameters(self, **kwargs):\n [setattr(self, argument_name, value)\n for argument_name, value in kwargs.items()]", "title": "" }, { "docid": "2e9138f5d7bfd6bd57c19e251ff22411", "score": "0.53841406", "text": "def var_names(self, names: List[str]) -> None:\n if len(names) != self._n_var:\n raise ValueError(\"The number of names does not match the number of variables.\")\n if len(set(names)) != self._n_var:\n raise ValueError(\"The given names of the variables are not unique.\")\n self._var_names = list(names)", "title": "" }, { "docid": "600cae5c5d34c7894fa760e76922bc0f", "score": "0.5381394", "text": "def set_parameters(self, parameters):\n self.endpoint = parameters['endpoint']\n if 'args' in parameters.keys():\n for e, a in enumerate(parameters['args']):\n exec('self.arg_commands.setdefault(\"arg_{}\", a)'.format(e))", "title": "" }, { "docid": "4f0e09873cf3a86fb70b76abcd743ce7", "score": "0.5379049", "text": "def set(self, *arg):\n # set options\n def _set_option(name, value):\n if name in config.Option.options:\n config.Option.set(name, value)\n msg(\"%s = %s\" % (name, repr(value)))\n else:\n msg(\"Unknown option: %s\" % name)\n return\n\n # set args\n def _set_arg(*arg):\n cmd = \"set args\"\n for a in arg:\n try:\n s = eval('%s' % a)\n if isinstance(s, six.integer_types + six.string_types):\n a = s\n except:\n pass\n cmd += \" '%s'\" % a\n peda.execute(cmd)\n return\n\n # set env\n def _set_env(name, value):\n env = peda.execute_redirect(\"show env\")\n cmd = \"set env %s \" % name\n try:\n value = eval('%s' % value)\n except:\n pass\n cmd += '%s' % value\n peda.execute(cmd)\n\n return\n\n (opt, name, value) = normalize_argv(arg, 3)\n if opt is None:\n self._missing_argument()\n\n if opt.startswith(\"opt\"):\n if value is None:\n self._missing_argument()\n _set_option(name, value)\n elif opt.startswith(\"arg\"):\n _set_arg(*arg[1:])\n elif opt.startswith(\"env\"):\n _set_env(name, value)\n else:\n msg(\"Unknown set option: %s\" % known_args.opt)\n return", "title": "" }, { "docid": "c4afba092db637008103068ccad8566d", "score": "0.53781235", "text": "def fillVarSet(self):\n self.varset.add(Compiler.__actvar)\n for inst in self.sys.instances.itervalues():\n pt = self.sys.proctypes[inst.proctype]\n # fault active variables\n for f in [x for x in pt.faults if x.type != Types.Transient]:\n self.varset.add(self.compileFaultActive(inst.name, f.name))\n # common variables\n for var in pt.localvars:\n if not var.type.type == Types.Array:\n self.varset.add(self.compileLocalVar(inst.name, var.name))\n else:\n for v in self.arrayToVars(var):\n self.varset.add(self.compileLocalVar(inst.name, v))\n\n # instance program counters\n self.varset.add(self.compileIPC(inst.name))", "title": "" }, { "docid": "dab2981838b8ddb902f332c8d0cdd3fc", "score": "0.537506", "text": "def gdxSetReadSpecialValues(*args):\n return _gdxcc.gdxSetReadSpecialValues(*args)", "title": "" }, { "docid": "e56d480d3aeb12470051c1a9743f4e4e", "score": "0.53726697", "text": "def set_var_list(self) -> list:\n return ['lon', 'lat', 'beach']", "title": "" }, { "docid": "9b0f6455f71a676d0b02cf681aca0ac9", "score": "0.53613025", "text": "async def set_many(self, *args, **kwargs):\n kv2update = {\n **{self.make_key(k): v for k, v in dict(args).items()},\n **{self.make_key(k): v for k, v in kwargs.items()},\n }\n with self.get_async_context() as conn:\n if len(kv2update) > 0:\n result = await conn.mset(kv2update)\n else:\n raise TypeError(\"No keys for get_many, args=%s\" % str(args))\n return result", "title": "" }, { "docid": "46a3d8a791d35acef83ad448ae797987", "score": "0.5361123", "text": "def _setParams(self, params):\n if len(params) != 6:\n raise ValueError(\"Incorrect number of params submitted!\")\n self.x1 = params[0]\n self.y1 = params[1]\n self.x2 = params[2]\n self.y2 = params[3]\n self.start = params[4]\n self.extent = params[5]", "title": "" }, { "docid": "984a89b2a1320fc1a34418726cba28f7", "score": "0.53610665", "text": "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "title": "" }, { "docid": "984a89b2a1320fc1a34418726cba28f7", "score": "0.53610665", "text": "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "title": "" }, { "docid": "984a89b2a1320fc1a34418726cba28f7", "score": "0.53610665", "text": "def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)", "title": "" }, { "docid": "70736be4625f808491e6a38c10e1ab6c", "score": "0.5355747", "text": "def _set_parameters(self,update):\n raise PTParserError('Cannot set parameters in the environment.')", "title": "" }, { "docid": "2b84498daf2216ac7666abd477990c35", "score": "0.53464967", "text": "def __init__(self, target_value=\"\", args={}):\n self._args = args\n self._var_iterator = VarIterator(target_value=target_value)\n while self._var_iterator.has_next():\n var_item = self._var_iterator.get_var()", "title": "" }, { "docid": "2547901f070f195c748028b1a19c52d3", "score": "0.5343712", "text": "def Vars(*args):\n return tuple(Var(x) for x in args)", "title": "" }, { "docid": "934aaf74c2c355d9058ecc20c20f4118", "score": "0.53427994", "text": "def __init__(self, **kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])", "title": "" }, { "docid": "17a474a66f97ba4fd95e78219068c299", "score": "0.5341839", "text": "def __init__(self, *args):\n\n self._x = []\n self._y = []\n self._table = []\n self._tol = TOL\n self.set(*args) # Let's use 'set()' method to handle the setup", "title": "" }, { "docid": "cc224c46da86aec78cccda36c75a0be7", "score": "0.5325876", "text": "def set_parameters(self):\n raise NotImplementedError(\"This function needs to be implemented\")", "title": "" } ]
89f7327ed06d85dd9d8429a9a2ef528d
Shell sort. O(n^(3/2)) algorithm.
[ { "docid": "f42e521c6f005785f2a76c2ca39d9999", "score": "0.75712293", "text": "def shell_sort(array):\n # Gap between adjacent elements\n gap = int(len(array) / 2)\n while gap > 0:\n for position in range(gap, len(array)):\n # Insert element at position in its sub-array\n shell_sort_insertion(array, position, gap)\n\n # Reset gap for next pass\n if gap == 2:\n gap = 1\n else:\n gap = int(gap / 2)", "title": "" } ]
[ { "docid": "7b265fc09557b6bbe24693d4f7fb382f", "score": "0.75653803", "text": "def shell_sort(a_list):\n start = time.time()\n list_count = len(a_list) // 2\n while list_count > 0:\n for start_position in range(list_count):\n gap_insertion_sort(a_list, start_position, list_count)\n list_count = list_count // 2\n end = time.time()\n return end-start, a_list", "title": "" }, { "docid": "796457e2d2987e1d1788c5b839a5a622", "score": "0.7536447", "text": "def shell_sort(self):\n arr = copy.copy(self.array)\n sort_track = [(copy.copy(arr), 0)]\n\n increment = len(arr)\n while increment > 1:\n increment = increment / 3 + 1\n for i in xrange(0, increment):\n self._insert_sort_interval(i, len(arr) - 1, arr, sort_track, increment)\n\n return sort_track", "title": "" }, { "docid": "fb50231e4e23b3fc3f2b1f1dd7cd2adf", "score": "0.7182704", "text": "def shellSort(alist):\n sublist = len(alist) // 2\n while sublist > 0:\n #pass the value to insertsort and apply sorting\n for startpos in range(sublist):\n insertsort(alist,startpos,sublist)\n print(\"After increment of size\",sublist,\n \"The list is\",alist)\n sublist = sublist // 2", "title": "" }, { "docid": "b4822ad094cede4e6a9525fd8fcab90f", "score": "0.7144716", "text": "def shell_sort(self):\n n = len(self.id)\n gap = n // 2\n\n while gap > 0:\n for i in range(gap, n):\n temp = self.id[i]\n j = i\n\n while j >= gap and self.id[j - gap] > temp:\n self.id[j] = self.id[j - gap]\n j -= gap\n\n self.id[j] = temp\n gap //= 2\n\n return self.id", "title": "" }, { "docid": "007f1c47bb2d655330eac520fb14865c", "score": "0.69797415", "text": "def wiggleSort(self, nums):", "title": "" }, { "docid": "859fdc5fc5ab65300c70b8bf09eaf5df", "score": "0.6920273", "text": "def time_shell(lst):\n start = time.time()\n comp = shell_sort(lst)\n end = time.time()\n sorttime = end - start\n return sorttime, comp", "title": "" }, { "docid": "725cb031275894b5092e020b8f2c7717", "score": "0.6878905", "text": "def shell_sort(l, gaps=[701, 301, 132, 57, 23, 10, 4, 1]):\n l= list(l)\n for gap in gaps:\n if gap >= len(l):\n continue\n pieces= []\n for offset in range(gap):\n l[offset::gap]= insertion_sort(l[offset::gap])\n return l", "title": "" }, { "docid": "2a1332e2dd6571fd97234ef52a924006", "score": "0.6865403", "text": "def special_sort(array):", "title": "" }, { "docid": "9aaae1c7fff38e3e7463cdc289d33d9e", "score": "0.6838949", "text": "def merge_sort(sequence):", "title": "" }, { "docid": "9cbcb7667d278cd1e3627a0725a43a21", "score": "0.6817489", "text": "def quick_sort(values):\n quick_sort2(values,0,len(values)-1)\n return values", "title": "" }, { "docid": "2d1d94a455797190af028961e79a26c9", "score": "0.6705639", "text": "def heapsort(a):\n\n heapify(a, len(a))\n end = len(a)-1\n while end > 0:\n a[end], a[0] = a[0], a[end]\n end -= 1\n sift_down(a, 0, end)", "title": "" }, { "docid": "c74e7599371eb5e7b05602bd7df55af1", "score": "0.6688858", "text": "def cocktail_shaker_sort(data):\n length = len(data)\n\n left, right = 0, length - 1 # 有序序号 左边和右边\n\n while left < right:\n swapped = False # 是否一遍过去有没有改变 没有即 已有序\n for i in range(left, right):\n if data[i] < data[i + 1]: # 与后面一个元素对比\n data[i], data[i + 1] = data[i + 1], data[i]\n swapped = True\n right -= 1\n if not swapped: break\n for i in range(right, left, -1): # 与前面一个元素对比\n if data[i - 1] < data[i]:\n data[i], data[i - 1] = data[i - 1], data[i]\n swapped = True\n left += 1\n if not swapped: break\n\n return data", "title": "" }, { "docid": "ed2fa2cd3d72676c173f1f21f855e771", "score": "0.6653939", "text": "def wiggleSort(nums):\n nums.sort()\n lenOfList = int(len(nums) / 2)\n for i in range(lenOfList):\n temp = nums.pop(lenOfList + i)\n nums.insert(2 * i + 1, temp)\n return nums", "title": "" }, { "docid": "12a405ecde81fc1afd09d719a1250b33", "score": "0.6641673", "text": "def divide_sort(l):\n c = len(l) // 2\n l1 = l[:c]\n l2 = l[c:]\n insertion_sort(l1)\n insertion_sort(l2)\n return merge(l1, l2)", "title": "" }, { "docid": "4183ed854eed82f3e691dc167da9c51f", "score": "0.6602696", "text": "def sort(arr):\n quicksort(arr)", "title": "" }, { "docid": "f11ceef38f4e79396c60e7a26415505d", "score": "0.6588629", "text": "def my_sort(ls):\r\n \r\n tmp = None\r\n cond = True\r\n \r\n while cond == True: #sorting until proper order (if condition\r\n #always False after looking whole list)\r\n cond = False\r\n \r\n for index in range(1, len(ls)): \r\n if ls[index] > ls[index - 1]:\r\n tmp = ls[index - 1]\r\n ls[index - 1] = ls[index]\r\n ls[index] = tmp\r\n \r\n cond = True", "title": "" }, { "docid": "37fc5516759f1cc33baec2c2469c3647", "score": "0.6553054", "text": "def HEAPSORT(arr):\n data = arr\n n = len(data)\n for i in range(n, -1, -1):\n heapify(data, n, i)\n for i in range(n-1, 0, -1):\n data[i], data[0] = data[0], data[i]\n heapify(data, i, 0)\n return data", "title": "" }, { "docid": "fffc7fb75190a05149e3d7b1da2f0c24", "score": "0.65458566", "text": "def merge_sort(A):\n merge_sort2(A, 0, len(A) - 1)", "title": "" }, { "docid": "3e024d85fb9903a82dfb5740ac8e5d26", "score": "0.65435225", "text": "def quick_sort(seq):\n if len(seq) > 1:\n _quick_sort(seq, 0, len(seq))", "title": "" }, { "docid": "794d96a20a65b77a42c776051d9dde9b", "score": "0.65108556", "text": "def wiggleSort2(self, nums):\n nums.sort()\n half = len(nums[::2])\n nums[::2], nums[1::2] = nums[:half][::-1], nums[half:][::-1]", "title": "" }, { "docid": "2126eb28bf68129c74d1d8c1fc460e4b", "score": "0.650104", "text": "def quick_sort(arr, left, right):\n if left < right:\n division_ind = partition(arr, left, right) # choose a division point\n quick_sort(arr, left, division_ind-1)\n quick_sort(arr, division_ind+1, right)", "title": "" }, { "docid": "d05b531393539dff595be0cc57c3a5bb", "score": "0.64980906", "text": "def insertion_sort_2(array):\n n = len(array)\n\n for i in range(1, n):\n j = i\n while j > 0 and array[j] < array[j-1]:\n array[j], array[j-1] = array[j-1], array[j]\n j -= 1", "title": "" }, { "docid": "03c7a7a18e9d16c3cdd4968009ba759c", "score": "0.6497343", "text": "def merge_sort(S):\n n = len(S)\n logn = math.ceil(math.log(n,2))\n scr, dest = S, [None] * n # make temporary storage for dest\n for i in (2**k for k in range(logn)): # pass i creates all runs of length 2i\n for j in range(0, n, 2*i): # each pass merges two length i runs\n merge(src, dest, j, i)\n src, dest = dest, src # reverse roles of lists\n if S is not src:\n S[0:n] = src[0:n] # additional copy to get result to S", "title": "" }, { "docid": "286857d539f65bf92add3dc162a41e48", "score": "0.6492141", "text": "def wiggleSort(self, nums: List[int]) -> None:\n nums.sort()\n med = (len(nums) - 1) // 2\n nums[::2], nums[1::2] = nums[med::-1], nums[:med:-1]", "title": "" }, { "docid": "5dc90a59d6370a62d79cf9671d3cb10d", "score": "0.6459035", "text": "def sort(arr):\n for i, v in enumerate(arr):\n j = i\n while j > 0 and arr[j - 1] > arr[j]:\n arr[j - 1], arr[j] = arr[j], arr[j - 1]\n j -= 1\n return arr", "title": "" }, { "docid": "1f285cbcc7b530ad3915fc9aeea3a33a", "score": "0.64577466", "text": "def shuttleSort(array):\n for i in range(1, len(array)):\n for j in range(i, 0, -1):\n if array[j-1] > array[j]: \n temp = array[j-1]\n array[j-1] = array[j]\n array[j] = temp\n return array", "title": "" }, { "docid": "842b9f5d1d681bc07021994de0aee1df", "score": "0.64299875", "text": "def wiggleSort(self, nums: List[int]) -> None:\n nums.sort(reverse=True)\n mid = len(nums)//2\n nums[1::2],nums[0::2]=nums[:mid],nums[mid:]", "title": "" }, { "docid": "86b6ddd29a434ae58383f332c676f5c8", "score": "0.64192426", "text": "def shellSort(aList, *gap):\n print aList\n if len(aList) <= 1: #checking for unsortable and too short lists\n return aList\n elif isSorted(aList):\n return aList\n \n if gap != ():\n gap = gap[0]\n else:\n gap = 3\n \n listDict = {}\n intQuotient = len(aList)//gap\n remainder = len(aList) % gap\n sortedList = []\n longListList = []\n \n for i in range(gap): #setting up our arrays\n listDict['list' + str(i)] = []\n\n for j in range(gap): #Adding numbers to our sublists\n gapCounter = j\n while gapCounter <= len(aList):\n if gapCounter != 0:\n listDict['list' + str(j)] += [aList[gapCounter - 1]]\n gapCounter += gap\n \n for k in listDict: #sorting all lists within the dictionary\n listDict[k] = insertSort(listDict[k])\n \n for l in range(1, remainder + 1): #Appending lists to our list of large lists\n longListList.append('list' + str(l))\n \n for m in range(intQuotient): #At index m in all lists listn add to sortedList\n for n in range(gap):\n sortedList.append(listDict['list' + str(n)][m])\n print 'sorted list:'\n print sortedList\n for o in longListList: #Add the last value of all large lists to the end of the sortedList\n sortedList.append(listDict[o][-1])\n print remainder\n print longListList\n print listDict\n sortedList = insertSort(sortedList)\n return sortedList", "title": "" }, { "docid": "57329e461bd69c18f0463ea11b338b2e", "score": "0.6409674", "text": "def timsort2(lst):\r\n pass", "title": "" }, { "docid": "f3659a6fcc2fb74319a06c3b93e7753b", "score": "0.6404021", "text": "def increase_sort(array: list) -> list:\n for i in range(1, len(array)):\n j = i\n while j > 0 and array[j - 1] > array[i]:\n j -= 1\n array.insert(j, array.pop(i))\n return array", "title": "" }, { "docid": "4a4d85a9d35fc0a8b2c1d972bf220895", "score": "0.63917845", "text": "def heap_sort_lazy(a):\n h = []\n for value in a:\n heappush(h, value)\n return [heappop(h) for i in range(len(h))]", "title": "" }, { "docid": "10e71083e780cb9443383717a40c4fab", "score": "0.6388348", "text": "def heap_sort(array):\n build_heap(array)\n sort_heap(array)", "title": "" }, { "docid": "536efdbbcbf2092d22982eb56f6a4f59", "score": "0.6388273", "text": "def decrease_sort(array: list) -> list:\n for i in range(1, len(array)):\n j = i\n while j > 0 and array[j - 1] < array[i]:\n j -= 1\n array.insert(j, array.pop(i))\n return array", "title": "" }, { "docid": "c634b4c13c727ab8d46eda1b98b63046", "score": "0.63773793", "text": "def merge_sort(s): # divide and conquer O(nlogn)\n\n n = len(s)\n if n < 2:\n return # list is already sorted\n # divide\n mid = n // 2\n s1 = s[:mid] # copy of first half\n s2 = s[mid:] # copy of second half\n # conquer\n\n merge_sort(s1)\n merge_sort(s2)\n\n # merge results\n merge(s1, s2, s) # merge sorted halves back into s", "title": "" }, { "docid": "38dd3731e813cf913becfac0b5b4b0f6", "score": "0.6364448", "text": "def timsort(lst):\r\n runs = find_runs(lst)\r\n while len(runs) > 1:\r\n runs.pop()\r\n b = runs.pop()\r\n left = b[0]\r\n new_sorted_lst = mergesort(lst, left, len(lst))\r\n # we made a mistake here, just change len(lst-1) to len(lst)\r\n runs.append((left, len(lst) - 1))\r\n # append the new run representing the sorted list\r\n \r\n # Treat runs as a stack and repeatedly merge the top two runs\r\n # When the loop ends, the only run should be the whole list.\r\n # HINT: you should be able to use the \"merge\" function provided\r\n # in this file.\r", "title": "" }, { "docid": "d36cf9d4075d7015a3da48ab358b68f1", "score": "0.63560027", "text": "def heap_sort(l):\n l= list(l)\n l= heapify(l)\n end= len(l)-1\n while end > 0:\n l[0], l[end]= l[end], l[0]\n end+= -1\n sift_down(l, 0, end)\n return l", "title": "" }, { "docid": "bc58af4523f18c3e43f85f7d9a8b6764", "score": "0.63510084", "text": "def quicksort(lyst):\n quicksortHelper(lyst, 0, len(lyst) - 1)", "title": "" }, { "docid": "4b0698251194d38ccf2f482409f2f89e", "score": "0.63485384", "text": "def sort_sequence(seq):\n n = list(seq)\n n.sort()\n t = tur(seq)\n for i in n:\n t += tur2(i, seq)\n return t", "title": "" }, { "docid": "257b6b63e83fad71f497639bfdd64221", "score": "0.63483685", "text": "def quick_sort_in_place(data):\n def _quick_sort(data, l, r):\n if l > r:\n return\n mid = partition(data, l, r)\n _quick_sort(data, l, mid - 1)\n _quick_sort(data, mid + 1, r)\n return data\n return _quick_sort(data, 0, len(data) - 1) if len(data) > 0 else []", "title": "" }, { "docid": "93fb54761498167303705e8efb24555e", "score": "0.63461727", "text": "def insertionSort(a):\n n = len(a)\n for i in range(1, n):\n for j in range(i, 0, -1):\n if a[j] < a[j-1]:\n a[j], a[j-1] = a[j-1], a[j]", "title": "" }, { "docid": "de139dd7640e4f13a3d99cd0a708caa5", "score": "0.6335673", "text": "def bucket_sort(l, n=50):\n M= max(l)+1\n buckets= [[] for _ in range(n)]\n for i in l:\n buckets[bin_this(i, n, M)].append(i)\n print buckets\n output= []\n for i in range(n):\n if not buckets[i]:\n continue\n buckets[i]= insertion_sort(buckets[i])\n output.extend(buckets[i])\n return output", "title": "" }, { "docid": "736df7ec97c1c1c85e0e2d5590b18ff5", "score": "0.63287234", "text": "def quicksort(nums, left, right):\n if left >= right: \n return\n pivot = partition(nums, left, right)\n quicksort(nums, left, pivot - 1)\n quicksort(nums, pivot + 1, right)", "title": "" }, { "docid": "c6e0309cf0a24311c68d9ae79765d967", "score": "0.63180786", "text": "def sortSteps():", "title": "" }, { "docid": "5d85d807edc37969ecbdc5d782fa9028", "score": "0.6302511", "text": "def cubic_sort(v):\n direction = 1\n i = len(v)//2\n last_end = 0\n while 1:\n n_i = i + direction\n if n_i >= len(v) or n_i < 0:\n direction *= -1\n if n_i >= len(v):\n if last_end == -1:\n break\n last_end = 1\n if n_i < 0:\n if last_end == 1:\n break\n last_end = -1\n continue\n high = max(n_i, i)\n low = min(n_i, i)\n if v[high] < v[low]:\n v[low], v[high] = v[high], v[low]\n direction *= -1\n last_end = 0\n n_i = min(len(v) - 1, max(0, i + direction))\n i = n_i", "title": "" }, { "docid": "d78ba8945ea0b62dc006db39861ce851", "score": "0.63014287", "text": "def radix_sort(l):\n l= list(l)\n i= 0\n while True:\n buckets= radix_distribute(l, i)\n if len(buckets) == 1:\n return buckets[0]\n l= [item for sublist in buckets for item in sublist]\n i+= 1", "title": "" }, { "docid": "f924e8371d287eea400be9ff87fb7fde", "score": "0.6299003", "text": "def quick_sort(items):\n if len(items) > 1:\n pivot_index = len(items) // 2\n smaller_items = []\n larger_items = []\n \n for i, val in enumerate(items):\n if i != pivot_index:\n if val < items[pivot_index]:\n smaller_items.append(val)\n else:\n larger_items.append(val)\n \n quick_sort(smaller_items)\n quick_sort(larger_items)\n items[:] = smaller_items + [items[pivot_index]] + larger_items", "title": "" }, { "docid": "9de81aeaf98ea08345e37d20dbdb3ae5", "score": "0.6284126", "text": "def merge_sort(S):\n n = len(S)\n if n < 2:\n return \n # divide\n mid = n // 2\n S1 = S[0:mid] # note that slice exclude 'mid]'\n S2 = S[mid:n]\n # conquer (with recursion)\n merge_sort(S1)\n merge_sort(S2)\n # merge results\n merge(S1, S2, S)", "title": "" }, { "docid": "da6027f5b806bc77d09587b75bafce3c", "score": "0.62791216", "text": "def quick_sort(arr):\n less, same, more = [], [], [] # 3 partition lists\n if len(arr) <= 1:\n return arr\n else:\n pivot = arr[0]\n for i in arr:\n if i < pivot:\n less.append(i)\n elif i > pivot:\n more.append(i)\n else:\n same.append(i)\n less = quick_sort(less)\n more = quick_sort(more)\n return less + same + more", "title": "" }, { "docid": "da6027f5b806bc77d09587b75bafce3c", "score": "0.62791216", "text": "def quick_sort(arr):\n less, same, more = [], [], [] # 3 partition lists\n if len(arr) <= 1:\n return arr\n else:\n pivot = arr[0]\n for i in arr:\n if i < pivot:\n less.append(i)\n elif i > pivot:\n more.append(i)\n else:\n same.append(i)\n less = quick_sort(less)\n more = quick_sort(more)\n return less + same + more", "title": "" }, { "docid": "d0a402a10be3d61952e22a1827dd8868", "score": "0.6278372", "text": "def algo(a: list) -> list:\n for i in range(1, len(a)):\n j = i\n while j > 0 and a[j - 1] > a[j]:\n a[j - 1], a[j] = a[j], a[j - 1]\n j -= 1\n return a", "title": "" }, { "docid": "b6a3b2d074e2564a53b1137d5a799bc5", "score": "0.62681127", "text": "def quick_sort(lst, start, end):\n\n if start <end:\n s = split(lst, start, end)\n quick_sort(lst, start, s - 1)\n quick_sort(lst, s + 1, end)", "title": "" }, { "docid": "4e94536ee8e447e4d56cce6f9ea7c2e7", "score": "0.62598", "text": "def quick_sort(alist):\n\n def _quick_sort_helper(alist, first, last):\n if first < last:\n splitpoint = _partition(alist, first, last)\n _quick_sort_helper(alist, first, splitpoint - 1)\n _quick_sort_helper(alist, splitpoint + 1, last)\n\n def _partition(alist, first, last):\n pivotvalue = alist[first]\n\n leftmark = first + 1\n rightmark = last\n\n done = False\n while not done:\n\n while leftmark <= rightmark and alist[leftmark] <= pivotvalue:\n leftmark = leftmark + 1\n\n while alist[rightmark] >= pivotvalue and rightmark >= leftmark:\n rightmark = rightmark - 1\n\n if rightmark < leftmark:\n done = True\n else:\n temp = alist[leftmark]\n alist[leftmark] = alist[rightmark]\n alist[rightmark] = temp\n temp = alist[first]\n alist[first] = alist[rightmark]\n alist[rightmark] = temp\n return rightmark\n\n _quick_sort_helper(alist, 0, len(alist) - 1)", "title": "" }, { "docid": "95becb71ebb0c5b0cc5a01e58108441f", "score": "0.6244265", "text": "def sort(A):\n copy = list(A)\n mergesort_array(copy, A, 0, len(A))\n\n return A", "title": "" }, { "docid": "87b49c345368e6568e9963d074479ecf", "score": "0.6237906", "text": "def wiggleSort(self, nums: List[int]) -> None:\n nums.sort()\n\t\t\n for i in range(2, len(nums), 2):\n nums[i], nums[i-1] = nums[i-1], nums[i]", "title": "" }, { "docid": "3ce2bb052fd10c219d579eeee4d47c34", "score": "0.62377256", "text": "def insertionSort(x):\n\tfor i in range(len(x)):\n\t\tj = i\n\t\twhile j > 0 and x[j-1]>x[j]:\n\t\t\ttemp = x[j]\n\t\t\tx[j] = x[j-1]\n\t\t\tx[j-1] = temp\n\t\t\tj -= 1", "title": "" }, { "docid": "056ade9a29f770208e52f5e09908ae7a", "score": "0.6234248", "text": "def heap_sort(A):\n assert A.__len__() > 0\n heap_build(A)\n for i in xrange(A.__len__() - 1, 0, -1):\n tmp = A[0]\n A[0] = A[i]\n A[i] = tmp\n heap_max_heapify(A, 0, i)", "title": "" }, { "docid": "81e13941d10e672fe59871bba06c07a3", "score": "0.62328416", "text": "def sort(inp, cmpr=None):\n\tfor i in range(0, len(inp)-1):\n\t\tfor j in range(i+1, len(inp)):\n\t\t\tif cmpr(inp[j], inp[i]):\n\t\t\t\ttemp = inp[i]\n\t\t\t\tinp[i] = inp[j]\n\t\t\t\tinp[j] = temp\n\treturn inp", "title": "" }, { "docid": "7b42b4eb371b0e7fcc2e454362573aab", "score": "0.6232356", "text": "def ins_quick_sort(arr, left, right):\n if (right - left + 1 <= 8) and (left < right):\n insertion_sort(arr, left, right)\n elif left < right:\n division_ind = ins_partition(arr, left, right) # choose a division point\n ins_quick_sort(arr, left, division_ind-1)\n ins_quick_sort(arr, division_ind+1, right)", "title": "" }, { "docid": "7fdeb367daa24ef4efc5c1688fdba0ff", "score": "0.62112427", "text": "def _sort(a, aux, lo, hi, d):\n\t#recursion return condition\n\tif lo >= hi-1: return \n\n\t#cutoff to insertion sort \n\tif lo >= hi - CUTOFF: \n\t\tfor i in range(lo, hi):\n\t\t\tfor j in range(i, lo, -1):\n\t\t\t\tif a[j-1][d:] < a[j][d:]: break\n\t\t\t\ta[j-1], a[j] = a[j], a[j-1]\n\n\t#key-indexed counting (extra end-of-string character)\n\tcount = [0] * (RADIX + 2) #cannot recycle count[] array\n\tfor i in range(lo, hi): #pass 1 -- counting \n\t\tcount[ordinal(a[i], d) + 2] += 1\n\n\tfor i in range(RADIX+1): #pass 2 -- cumulative sum\n\t\tcount[i+1] += count[i]\n\n\tfor i in range(lo, hi): #pass 3 -- sorting \n\t\taux[count[ordinal(a[i], d) + 1]] = a[i]\n\t\tcount[ordinal(a[i], d) + 1] += 1\n\n\tfor i in range(lo, hi): #pass 4 -- copying \n\t\ta[i] = aux[i-lo]\n\n\t#sort R subarrays recursively\t\n\tfor i in range(1, RADIX+1): \n\t\t_sort(a, aux, lo+count[i], lo+count[i+1], d+1)", "title": "" }, { "docid": "70c4495188e07ed90aea95e9fcb9fef0", "score": "0.620843", "text": "def insertion_sort(a):\n\tfor i in range(1,len(a)):\n\t\ttemp = a[i]\n\t\tj = i-1\n\t\twhile j >= 0 and a[j] > temp:\n\t\t\t\ta[j+1] = a[j]\n\t\t\t\tj -= 1\n\t\ta[j+1] = temp", "title": "" }, { "docid": "f598a431c3b4fd9db96133fd602ff4ff", "score": "0.6203688", "text": "def quick_sort(lst):\n def partition(lst, start, end):\n follower = leader = start\n while leader < end:\n if lst[leader] <= lst[end]:\n lst[follower], lst[leader] = lst[leader], lst[follower]\n follower += 1\n leader += 1\n lst[follower], lst[end] = lst[end], lst[follower]\n return follower\n\n def sort(lst, start, end):\n if start >= end:\n return\n index = partition(lst, start, end)\n sort(lst, start, index - 1)\n sort(lst, index + 1, end)\n\n sort(lst, 0, len(lst) - 1)", "title": "" }, { "docid": "9857b0af5cde5192d9b3c803bec548ad", "score": "0.6200292", "text": "def heap_sort(a):\n n = len(a)\n # initialize the heap\n # keep in mind the heapify method\n # must be a min heap if sorting from low to high\n for i in range(n, -1, -1):\n heapify(a, n, i)\n \n # for \n for i in range(n-1, 0, -1):\n a[i], a[0] = a[0], a[i]\n heapify(a, i, 0)\n return arr", "title": "" }, { "docid": "8d4572c40ae9381b6de8681543170177", "score": "0.61974114", "text": "def quick_sort(c, p, q):\n\n if p < q:\n s = partizione(c, p, q)\n quick_sort(c, p, s - 1)\n quick_sort(c, s + 1, q)", "title": "" }, { "docid": "9010687276ddd62a1859bb10428e077d", "score": "0.6185538", "text": "def quipSort(lst):\n quipSortRec(lst, log2(len(lst)))", "title": "" }, { "docid": "c374346541a2fffb39259047e27cc9c4", "score": "0.61835116", "text": "def radix_sort(alist):\n radix = 10\n maxLength = False\n tmp, placement = -1, 1\n\n while not maxLength:\n maxLength = True\n buckets = [[] for _ in range(radix)]\n for i in alist:\n tmp = i / placement\n buckets[int(tmp) % radix].append(i)\n if maxLength and tmp > 0:\n maxLength = False\n a = 0\n for b in range(radix):\n buck = buckets[b]\n for i in buck:\n alist[a] = i\n a += 1\n placement *= radix\n return alist", "title": "" }, { "docid": "0dba2528ae3a22836fbbde228c1f6c9b", "score": "0.61724347", "text": "def __merge_sort_main(array, ibegin, iend):\n if ibegin < iend:\n imid = (ibegin + iend) // 2\n __merge_sort_main(array, ibegin, imid)\n __merge_sort_main(array, imid + 1, iend)\n __merge_sort_combine(array, ibegin, imid, iend)\n return array", "title": "" }, { "docid": "ea28897f53f2069ff5dfd9dad18c9c2e", "score": "0.61723846", "text": "def makeSorted(input):\r\n sortedS = StackList() \r\n tmpS = StackList()\r\n result = []\r\n\r\n for num in input:\r\n if sortedS.isEmpty() == True:\r\n sortedS.push(num)\r\n else:\r\n if num < sortedS.getTop():\r\n sortedS.push(num)\r\n elif num >= sortedS.getTop():\r\n while num >= sortedS.getTop():\r\n tmp.push(sortedS.getTop())\r\n sortedS.pop()\r\n sortedS.push(num)\r\n while num >= tmp.getTop():\r\n sortedS.push(tmp.getTop())\r\n tmp.pop() \r\n while sortedS.size() > 0:\r\n result.append(sortedS.getTop())\r\n sortedS.pop()\r\n return result", "title": "" }, { "docid": "9115e5a35f2dfa38ead63e99a7846be9", "score": "0.61687267", "text": "def insert_sort(s):\n for k in range(1, len(s)):\n while k > 0:\n if s[k] > s[k - 1]:\n break\n s[k], s[k - 1] = s[k - 1], s[k]\n k -= 1", "title": "" }, { "docid": "acfa615ea385f6a99b8460e6fcefabfa", "score": "0.61649364", "text": "def quick_sort(l):\n # return quick_sort_pythonic(l)\n quick_sort_iter(l, 0, len(l)-1)\n\n # print l\n return l", "title": "" }, { "docid": "529524750e94e42c4cca4569c0f4d518", "score": "0.616354", "text": "def radix_sort(lst, digits):\n length = len(lst)\n for digit in range(digits): # Numbers have size up-to 10^(digits) - 1\n lst = counting_sort(lst, length, digit)\n return lst", "title": "" }, { "docid": "a6d81c0d6ee1b2ad9ea75ea0fee95319", "score": "0.6157695", "text": "def quick_sort(array, start, end):\n\n if start >= end:\n return\n\n p = partition(array, start, end)\n\n quick_sort(array, start, p-1)\n quick_sort(array, p+1, end)\n\n return array", "title": "" }, { "docid": "08941e88efa7bd6ccf4436eb88922c8f", "score": "0.614594", "text": "def cocktailShakerSort(array: List[int]) -> None:\n\n for amt in range(\n len(array) // 2 + len(array) % 2\n ): # Iterate for half the length of the array plus one if odd\n swaps = 0\n section = (\n len(array[amt:-amt]) if amt > 0 else len(array)\n ) # Keep narrowing down the search\n\n for i in range(section - 1): # Iterate through the array\n if array[amt + i] > array[amt + i + 1]: # Swap if needed\n array[amt + i], array[amt + i + 1] = array[amt + i + 1], array[amt + i]\n swaps += 1\n\n if swaps == 0:\n break # Array is in order, this break statement could save time\n\n for i in range(section - 1, 0, -1):\n if array[amt + i] < array[amt + i - 1]: # Swap if needed\n array[amt + i], array[amt + i - 1] = array[amt + i - 1], array[amt + i]\n swaps += 1\n\n if swaps == 0:\n break # Array is in order", "title": "" }, { "docid": "64de26102695a0ff056f7c2fda1986d6", "score": "0.61447364", "text": "def quick_sort2(values,start,stop):\n if stop>start:\n pivot = partition(values,start,stop)\n quick_sort2(values,start,pivot-1)\n quick_sort2(values,pivot+1,stop)", "title": "" }, { "docid": "e21332e9ff6182079faf9fc54ad5c00d", "score": "0.6129116", "text": "def radixsort(lst):\n lst_length = len(lst)\n modulus = 10\n div = 1\n while True:\n new_list = [[] for x in range(10)]\n for value in lst:\n least_digit = value % modulus\n least_digit /= div\n new_list[int(least_digit)].append(value)\n modulus = modulus * 10\n div = div * 10\n if len(new_list[0]) == lst_length:\n return new_list[0]\n lst = []\n for x in new_list:\n for y in x:\n lst.append(y)", "title": "" }, { "docid": "23ab00de6068d96109cfaa06bfca1b02", "score": "0.6128971", "text": "def quick_sort(array: List):\n return _quick_sort(array, 0, len(array) - 1)", "title": "" }, { "docid": "7dda024b868620557793e76291ba2946", "score": "0.612261", "text": "def partial_sorting(array, k):\n build_heap(array)\n output = []\n for i in range(k):\n output.append(extract_max(array))\n return output", "title": "" }, { "docid": "9cf86e0ef9e2d52d2887e33c930ab0ba", "score": "0.61213547", "text": "def quicksort(l):\n return _quick_recursion(l, 0, len(l))", "title": "" }, { "docid": "96a48482bfe27b0afccd66f48920283a", "score": "0.61190534", "text": "def quickSort(arr):\n return recursiveDivide(arr, 0, len(arr) - 1)", "title": "" }, { "docid": "237d0a0b0267b9c91ce2e3157c0b81a5", "score": "0.61189336", "text": "def sort(input_list):\n if len(input_list) <= 1:\n return input_list\n\n mid_index = len(input_list) // 2\n left_list = sort(input_list[0:mid_index])\n right_list = sort(input_list[mid_index:])\n\n return merge(left_list, right_list)", "title": "" }, { "docid": "3a588a5cddee72f6c6e43dbee6d5aa83", "score": "0.6117142", "text": "def merge_sort(S):\n n = len(S)\n if n < 2:\n return # list is already sorted\n # divide\n mid = n // 2\n S1 = S[0:mid] # copy of first half\n S2 = S[mid:n] # copy of second half\n # conquer (with recursion)\n merge_sort(S1) # sort copy of first half\n merge_sort(S2) # sort copy of second half\n # merge results\n merge(S1, S2, S) # merge sorted halves back into S", "title": "" }, { "docid": "479d9345b23c3222de5162fbd46d3166", "score": "0.6113106", "text": "def faster_insertion_sort(A: Sequence) -> None:\n for i in range(1, len(A)):\n x = A[i]\n j = i\n while j > 0 and A[j - 1] > x:\n A[j] = A[j - 1]\n j -= 1\n A[j] = x", "title": "" }, { "docid": "a1c9ba5bd1f25b80bd03f09c7b942360", "score": "0.6109384", "text": "def quickSort(a):\n if len(a) < 2:\n return a\n else:\n pivot = 0\n tmp =[a[pivot]]\n less = [i for i in a[1:] if i <= a[pivot]]\n greater = [i for i in a[1:] if i > a[pivot]]\n return quickSort(less) + tmp + quickSort(greater)", "title": "" }, { "docid": "13139f9179f99d03810e6aafd933cb2b", "score": "0.6104204", "text": "def heap_sort(H):\n global heap_size\n heap_size = len(H)\n build_max_heap(H)\n for i in range(heap_size - 1, 0, -1):\n # swap first and last element\n H[0], H[i] = H[i], H[0]\n # dont consider last element\n heap_size -= 1\n # apply heap on first index of H\n max_heapify(H, 0)", "title": "" }, { "docid": "ac7df16033dc727a8fd9b0668a46e3c2", "score": "0.6102627", "text": "def quicksort(array, left, right):\n if left < right:\n pivot = partition(array, left, right)\n quicksort(array, left, pivot-1)\n quicksort(array, pivot+1, right)", "title": "" }, { "docid": "0db41c2b05739e097873cbfc8b00cbc0", "score": "0.6101245", "text": "def quick_sort(items):\n\n if len(items) == 0:\n return items\n pivot = items[0]\n b = [x for x in items if x == pivot]\n small = quick_sort([x for x in items if x < pivot])\n large = quick_sort([x for x in items if x > pivot])\n return small + b + large", "title": "" }, { "docid": "18e68870bf462805724fdf059da3f345", "score": "0.60981643", "text": "def sort(l, cmp):\n\n # See to.SBBXgrid.f lines 605 and following\n\n for n in range(len(l)-1):\n nlmax = n\n for nn in range(n+1, len(l)):\n if cmp(l[nn], l[nlmax]) < 0:\n nlmax = nn\n # swap items at n and nlmax\n t = l[nlmax]\n l[nlmax] = l[n]\n l[n] = t\n return", "title": "" }, { "docid": "0d01c8b5a94e6ef6f43d05ed3aa61340", "score": "0.60962343", "text": "def insertion_sort(arr):\n comparisons = 0\n for i in range(len(arr)):\n while i > 0 and arr[i] < arr[i-1]:\n arr[i], arr[i-1] = arr[i-1], arr[i]\n i -= 1\n comparisons += 1\n comparisons += 1\n return arr, comparisons", "title": "" }, { "docid": "cc8626a18241d6a4d25f65b10e7261dd", "score": "0.6094648", "text": "def bitonic_sort(array: list[int], low: int, length: int, direction: int) -> None:\n if length > 1:\n middle = int(length / 2)\n bitonic_sort(array, low, middle, 1)\n bitonic_sort(array, low + middle, middle, 0)\n bitonic_merge(array, low, length, direction)", "title": "" }, { "docid": "7c5ba8a1ae5f8ecfd0ec6f37e7994d84", "score": "0.6093321", "text": "def sort_without_built_in_function(arr):\n less = []\n pivotList = []\n more = []\n if len(arr) <= 1:\n return arr\n else:\n pivot = arr[0]\n for i in arr:\n if i < pivot:\n less.append(i)\n elif i > pivot:\n more.append(i)\n else:\n pivotList.append(i)\n less = sort_without_built_in_function(less)\n more = sort_without_built_in_function(more)\n return less + pivotList + more", "title": "" }, { "docid": "81d1d6d4269e9937f5a3379d95c43d49", "score": "0.6084731", "text": "def counting_sort(l):\n hist= {i:0 for i in range(min(l), max(l)+1)}\n for i in l:\n hist[i]+= 1\n for i in range(min(l)+1, max(l)+1):\n hist[i]+= hist[i-1]\n output= [0 for _ in range(len(l))]\n for i in l[::-1]:\n j= hist[i]\n hist[i]-= 1\n output[j-1]= i\n return output", "title": "" }, { "docid": "d4db7c0b5b22dfc24860df49055d0366", "score": "0.6079576", "text": "def sort(A, p, r):\n if p < r:\n q = int((p + r) / 2)\n sort(A, p, q)\n sort(A, q + 1, r)\n merge(A, p, q, r)", "title": "" }, { "docid": "a6f7498bac1ab0383d8cf902b7976c1e", "score": "0.6079024", "text": "def wiggleSort(self, nums: List[int]) -> None:\n nums.sort()\n smalls = nums[:math.ceil(len(nums)/2)]\n larges = nums[math.ceil(len(nums)/2):]\n smalls.reverse()\n larges.reverse()\n for i in range(len(nums)):\n if(i%2==0):\n nums[i] = smalls[int(i/2)]\n else:\n nums[i] = larges[int((i-1)/2)]", "title": "" }, { "docid": "27e608a0cc2a948d6086d740a7caf5e3", "score": "0.60784835", "text": "def quicksort(args: List[T]) -> List[T]:\n quicksort_(args, 0, len(args) - 1)\n return args", "title": "" }, { "docid": "36a995933a9ff20aa3605d8ff8b709e0", "score": "0.60745484", "text": "def quick_sort_2_way(collection):\n length = len(collection)\n if length <= 1:\n return collection\n else:\n pivot = collection.pop()\n lesser, greater = [], []\n for element in collection:\n if element <= pivot:\n lesser.append(element)\n else:\n greater.append(element)\n return quick_sort_2_way(lesser) + [pivot] + quick_sort_2_way(\n greater) # recursive here. the lesser ahead runs ascending order.", "title": "" }, { "docid": "7c9dc9613bb757fbab0946e04f061891", "score": "0.60742617", "text": "def sort_012(input_list):\n if input_list == None:\n return None\n\n if input_list == []:\n return []\n\n aux1 = 0\n aux2 = 0\n middle = 1\n\n L = len(input_list) - 1\n\n while aux2 <= L:\n if input_list[aux2] < middle:\n swap(input_list, aux1, aux2)\n aux1 += 1\n aux2 += 1\n\n elif input_list[aux2] > middle:\n swap(input_list, aux2, L)\n L -= 1\n\n else:\n aux2 += 1\n\n return input_list", "title": "" }, { "docid": "f77d3c4200e597519d414051c3e06960", "score": "0.60670084", "text": "def insertionsort(a, dir):\n\n A = []\n A.append(a[0])\n for n in range(1, len(a)):\n m = n - 1\n key = A[m]\n while m >= 0 and (a[n] < key if dir == 0 else a[n] > key):\n if n == m + 1:\n A.append(A[m])\n else:\n A[m + 1] = A[m]\n m -= 1\n key = A[m]\n if n == m + 1:\n A.append(a[n])\n else:\n A[m + 1] = a[n]\n return A", "title": "" }, { "docid": "7d5180a4f93d457ede12d764b3867bd1", "score": "0.60601354", "text": "def merge_sort_iterative(a):\n length = len(a)\n i = 1\n while i <= length:\n for j in xrange(i, length, 2 * i):\n a = merge(a, j - i, j, min(j + i, length))\n i *= 2\n return a", "title": "" }, { "docid": "41c7a73d5ceaa26602a002e6b8a77397", "score": "0.60542095", "text": "def quicksort(the_list, count_list):\n compares = 0\n # add sort\n count_list.append(compares)\n return the_list", "title": "" }, { "docid": "d18234aefb417e0f0e20b5c8eec29898", "score": "0.60539216", "text": "def sort(x):\n return sorted(x)", "title": "" }, { "docid": "ebd27f087008543559699636e960e620", "score": "0.60489774", "text": "def merge_sort_recursive(a):\n length = len(a)\n if length <= 1:\n return a\n left = merge_sort_recursive(a[0:length/2])\n right = merge_sort_recursive(a[length/2:length])\n for i in xrange(0, length):\n if len(left) == 0:\n a[i] = right[0]\n right = right[1:len(right)]\n elif len(right) == 0:\n a[i] = left[0]\n left = left[1:len(left)]\n elif left[0] < right[0]:\n a[i] = left[0]\n left = left[1:len(left)]\n else:\n a[i] = right[0]\n right = right[1:len(right)]\n return a", "title": "" } ]
6fb552c67c4bda0ffa2529c12ddc20a1
Run shell command with args and keywords
[ { "docid": "7076452a671592094e1121a1ae0d679d", "score": "0.6708656", "text": "def cmd(self, shellcmd, *args, **kwargs):\n _cmd = shellcmd.format(*args, **kwargs)\n self.log.info(_cmd)\n os.system(_cmd)", "title": "" } ]
[ { "docid": "dfba1f779d4fa088eb710c699712bfde", "score": "0.7237709", "text": "def do_shell(args):\n os.system(args)", "title": "" }, { "docid": "7e5155bf0d386e91f282e3d483ecd603", "score": "0.7085518", "text": "def do_shell(self, arg):\n run(arg.split())", "title": "" }, { "docid": "269ada85b209ccd6206073d9041b7218", "score": "0.7034835", "text": "def do_shell(self, args):\n system(args)", "title": "" }, { "docid": "4f4fc674084f9e7c7adde5b2a0b67a3d", "score": "0.6865966", "text": "def execute_tool(description, *args):\n command_line = list(args) + files_and_directories\n click.echo(f\"{description}: {' '.join(command_line)}\")\n rv = call(command_line)\n if rv != 0:\n exit(rv)", "title": "" }, { "docid": "8b5ad07d47ff679fdf0dfe7cfdb08193", "score": "0.6707915", "text": "def do_shell(self, arg):\n os.system(arg)", "title": "" }, { "docid": "a6473aa14eebe33a507344fa9fadda37", "score": "0.6690284", "text": "def run(cmd, *args, **kwargs):\n distutils.log.info('> ' + list2cmdline(cmd))\n kwargs['shell'] = (sys.platform == 'win32')\n return subprocess.check_call(cmd, *args, **kwargs)", "title": "" }, { "docid": "82b48bc98c8432c7938e7708417b1612", "score": "0.6642766", "text": "def sh(*cmdline):\n logger.info(\"Running command: %s\", cmdline)\n subprocess.run(cmdline, stdin=subprocess.DEVNULL,\n shell=False, timeout=120, check=True)", "title": "" }, { "docid": "77363541da612333f8fa8450d50e7862", "score": "0.6604703", "text": "def test_commands(arguments):\n logger.debug(\"Running command: sdrun %s\", \" \".join(arguments))\n ret = subprocess.run([\"sdrun\"] + arguments)\n assert ret.returncode == 0", "title": "" }, { "docid": "2d646195aab2b5d26a2229a5f4f6518e", "score": "0.65768987", "text": "def run(self, command, *args):\n self.command('run', command, *args)", "title": "" }, { "docid": "20e50bd18cdd3344471e206a729a0102", "score": "0.65319926", "text": "def run_cmd(*cmd_args, **kwargs):\n cmd_args_str = map(str, cmd_args)\n cmd_str = ''.join(cmd_args_str)\n \n verbose = config.verbose\n if 'verbose' in kwargs:\n verbose = kwargs['verbose']\n del kwargs['verbose']\n if verbose:\n print ' * Running: ' + cmd_str\n \n return subprocess.Popen(cmd_str, shell=True, **kwargs)", "title": "" }, { "docid": "9270dd52342d5e84c8d95c57166af9f0", "score": "0.65134346", "text": "def run_command(*args, **kwargs):\n return subprocess.run(\n *args,\n stdout=subprocess.PIPE,\n check=True,\n **kwargs,\n )", "title": "" }, { "docid": "b0585b581ad75a34e396cb51477a24f7", "score": "0.6510776", "text": "def _command(self, args, check_return_code=True):\n cmd = self.hivecmd + args\n return sh(cmd, check_return_code)", "title": "" }, { "docid": "0b99421633ba3c45cd1afe630b96b014", "score": "0.649969", "text": "def _run(self, shell_cmd, pipe_text='', **kwargs):\n if self.is_windows():\n return self._run_windows(shell_cmd, pipe_text=pipe_text, **kwargs)\n else:\n return self._run_linux(shell_cmd, pipe_text=pipe_text, **kwargs)", "title": "" }, { "docid": "8988fafdb1415a6285a13dfa68e710a1", "score": "0.6487381", "text": "def __run(args, stdin=None): # type: (list[str], str) -> ShellCommand\n try:\n p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n stdout, stderr = p.communicate(input=stdin)\n except OSError as os_err:\n log.debug('Unable to execute command: %s: %s', args, os_err)\n raise\n\n cmd = ShellCommand(args, code=p.returncode, stdout=stdout, stderr=stderr)\n log.debug(cmd)\n return cmd", "title": "" }, { "docid": "647cb4da172c374e82fbfddee1827d09", "score": "0.6457332", "text": "def run(command):\n print command\n os.system(command)", "title": "" }, { "docid": "c134dbcd830f6a2653f1baa8d0a430b9", "score": "0.6452583", "text": "def commandToExecute(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "0986cb91ab4eeb56ea7f21dfb1fa5dd7", "score": "0.6442191", "text": "def shell(self,\n args: Sequence[Text],\n check: bool = None) -> subprocess.CompletedProcess:\n return self.run(['shell'] + args, check)", "title": "" }, { "docid": "a04972e89a0ddcaf7a9a5aa31ae5ff4a", "score": "0.64019257", "text": "def shell(self, extra_args):\n if isinstance(extra_args, str) or isinstance(extra_args, unicode):\n extra_args = extra_args.split()\n if not isinstance(extra_args, list):\n msg = \"invalid arguments: %s\\nshould be list or str, %s given\" % (extra_args, type(extra_args))\n self.logger.warning(msg)\n raise ADBException(msg)\n\n shell_extra_args = [\"shell\"] + extra_args\n return self.run_cmd(shell_extra_args)", "title": "" }, { "docid": "310eb910a94f56f7f5069f0a4c6a2965", "score": "0.63852364", "text": "def commandRunner(command):\n\tcall(command, shell=True)", "title": "" }, { "docid": "e367c50f63bf2e1eeb5fd68998b6dc21", "score": "0.6382469", "text": "def run(self, *largs):\n self._orders.append({'type': 'run', 'command': largs})\n return self", "title": "" }, { "docid": "dab8aca5629dc5aae766b010497fcad7", "score": "0.63806945", "text": "def _run_command(self, opts, args):\r\n cmd = self.search_commands(args[0])\r\n\r\n if opts.debug:\r\n LOGGER.setLevel(logging.DEBUG)\r\n LERR.setLevel(logging.DEBUG)\r\n\r\n if not (opts.nologo or cmd.nologo) and not self.interactive:\r\n sys.stdout.write(FIPSSTR)\r\n CLI.version(self._progname, versioning.__version__,\\\r\n versioning.__extracontent__, fileh=sys.stdout)\r\n if len(args) > 1:\r\n return cmd.run(args[1:])\r\n\r\n return cmd.run([])", "title": "" }, { "docid": "1a0884df697e34be81c23adb5e07844a", "score": "0.637706", "text": "def execute(args):\n # Note: Candidate for slapos.lib.recipe\n os.execv(args[0], args + sys.argv[1:])", "title": "" }, { "docid": "1a1e0a676e357ad33218d6ab053eaede", "score": "0.6372734", "text": "def call(args):\n #syslog.syslog(\"About to execute %s \" % ' '.join(args))\n subprocess.check_call(args, stderr=subprocess.STDOUT)", "title": "" }, { "docid": "97dce6dc37ef4ccceead96bee2dcb0d6", "score": "0.6350499", "text": "def run(cmd):\n print('+ ' + ' '.join(cmd))\n subprocess.check_call(cmd)", "title": "" }, { "docid": "547ccf73e0b4a14199c6009f43aafe6d", "score": "0.63393587", "text": "def execute(args):\n return subprocess.run(args, capture_output=True, check=True)", "title": "" }, { "docid": "74fe7b11e9b7ba377148370078468e96", "score": "0.6298193", "text": "def run_command(command, *args, **kw):\n _input= kw.pop('_input', None)\n cwd = kw.pop('cwd', os.getcwd())\n\n with_extended_output = kw.pop('with_extended_output', False)\n with_exceptions = kw.pop('with_exceptions', True)\n with_raw_output = kw.pop('with_raw_output', False)\n\n # if command is a string split to a list\n if isinstance(command, basestring):\n command = command.split()\n\n # if more kwargs are given, convert them to command line args\n if kw:\n kwarglist = python_to_args(**kw)\n else:\n kwarglist = []\n command += kwarglist + list(args)\n\n # If stdin is a string, create a pipe so we can write the contents\n if _input:\n stdin = subprocess.PIPE\n else:\n stdin = None\n\n # Start the process\n os.chdir(cwd)\n return get_command_output(' '.join(command))", "title": "" }, { "docid": "efa6e59fc8a5ca3df01e7a572867f5dd", "score": "0.6296136", "text": "def run():\n\n arg = get_cli_args()\n\n return", "title": "" }, { "docid": "b5be02e8ce508a03dcd8120afb0f3896", "score": "0.6269322", "text": "def pythonCommandToExecute(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "60a7f5b2545430c3f9c3f78c75986803", "score": "0.62670654", "text": "def run_shell_cmd(args: list) -> subprocess.CompletedProcess:\n full_command = \" \".join(args)\n print(\"Running command:\", full_command)\n\n try:\n output = subprocess.run(\n full_command,\n capture_output=True,\n shell=True,\n check=True,\n )\n\n return output\n except subprocess.CalledProcessError as e:\n print(\"Command failed\")\n print(f\"stderr was {e.stderr}\")\n raise e", "title": "" }, { "docid": "0f201a4cb68d3c1a9d78b5d009266d43", "score": "0.62584233", "text": "def cmd(binary, subcommand, *args, **kwargs):\n ret = [binary, subcommand]\n for key, value in kwargs.items():\n key = '--' + key.replace('_', '-')\n ret.extend(_keyword_arguments(value, key))\n ret.extend(args)\n return ret", "title": "" }, { "docid": "35b224e088add4ba34828722dd185131", "score": "0.6221393", "text": "def run_shell_command(self):\n from libzilla.cli.shell import ShellCommand\n ShellCommand(docopt(ShellCommand.__doc__)).cmdloop()", "title": "" }, { "docid": "002cd3cfbc72bdcdbf681950b68c584d", "score": "0.6219337", "text": "def run_shell_command(cmd):\n print(\"Starting \" + cmd)\n subprocess.run([cmd], shell=True) \n print(\"Finished \" + cmd)", "title": "" }, { "docid": "e14ff1bc1575982272a4d76a433c4ea7", "score": "0.6212542", "text": "def run_cmd(self, extra_args):\n if isinstance(extra_args, str) or isinstance(extra_args, unicode):\n extra_args = extra_args.split()\n if not isinstance(extra_args, list):\n msg = \"invalid arguments: %s\\nshould be list or str, %s given\" % (extra_args, type(extra_args))\n self.logger.warning(msg)\n raise ADBException(msg)\n\n args = [] + self.cmd_prefix\n args += extra_args\n\n self.logger.debug(\"command:\")\n self.logger.debug(args)\n r = subprocess.check_output(args).strip()\n self.logger.debug(\"return:\")\n self.logger.debug(r)\n return r", "title": "" }, { "docid": "b7769c73b5c737c5b32650b890dc8be9", "score": "0.61971813", "text": "def run_command(self, command, args=None, cleanup=True):\n if not self.shell_id:\n self.shell_id = protocol.open_shell()\n cmd_id = self.protocol.run_command(self.shell_id, command, args)\n out,err,code = self.protocol.get_command_output(self.shell_id, cmd_id)\n err = self.clean_error_msg(err)\n if cleanup:\n self.protocol.cleanup_command(self.shell_id, cmd_id)\n return (out,err,code)", "title": "" }, { "docid": "39da023d744ea557c93ca6bb671ac262", "score": "0.61849827", "text": "def run(cmd: list[str]):\n\n print(\"\\n\", \" \".join(cmd), \"\\n\")\n\n if sys.platform == \"win32\":\n subprocess.run(\" \".join(cmd), shell=True)\n else:\n subprocess.run(cmd)", "title": "" }, { "docid": "ae8345e29b365ce5f491665ad228bfe0", "score": "0.6155557", "text": "def run():\n args = parse_args(sys.argv[1:])\n setup_logging(args.loglevel)\n if not os.path.isdir(args.directory):\n print(\"Not a directory\")\n sys.exit(1)\n c = cmd.Cmd(args.directory)\n c.run()", "title": "" }, { "docid": "b48d8ee851f573a536f84b902c98c552", "score": "0.6148496", "text": "def runCommand(args):\n logging.debug('running command: %s', ' '.join(args))\n\n try:\n subprocess.check_output(args, stderr=subprocess.STDOUT, shell=False)\n\n except subprocess.CalledProcessError as pErr:\n logging.error('command failed: %s', ' '.join(args))\n raise Exception(pErr.stdout.rstrip()) from pErr", "title": "" }, { "docid": "c0661087f1569a8dab2c8143016d32fe", "score": "0.614347", "text": "def run_shell_cmd(cmd):\n\n return call_subprocess(cmd, shell=True)", "title": "" }, { "docid": "7a2623e686e41d1199aafd170b8c8842", "score": "0.61360687", "text": "def run_command(self, command, args=[], **kw):\n self.term.feed('Executing ')\n self.term.feed(command, '34;1')\n self.term.feed(' %s\\r\\n' % args)\n self.pid = self.term.fork_command(command, args, **kw)\n # give the terminal focus\n self.term.grab_focus()", "title": "" }, { "docid": "fe27ab02a2de3a1f0d5b7426e20207ef", "score": "0.61304754", "text": "def f_shell(args):\n repos, cmds = utils.parse_repos_and_rest(args.man, args.quote_mode)\n if not cmds:\n print(\"Missing commands\")\n sys.exit(2)\n\n cmds = \" \".join(cmds) # join the shell command into a single string\n for name, prop in repos.items():\n # TODO: pull this out as a function\n got = subprocess.run(\n cmds,\n cwd=prop[\"path\"],\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n print(utils.format_output(got.stdout.decode(), name))", "title": "" }, { "docid": "4cf257f9675056cd7925ea1f0af9f726", "score": "0.61183023", "text": "def shell(self,cmd):\n\n shell(cmd,self.verbose,self.debug,self.header)", "title": "" }, { "docid": "2ba28e38dd734a77cfae05e34fe2109d", "score": "0.61152136", "text": "def run(cmd):\n\tprint(cmd)\n\t(exit_code) = os.system(cmd)\n\tif exit_code != 0:\n\t\traise Exception", "title": "" }, { "docid": "1965459b385a285946da7229a3f9b142", "score": "0.61023957", "text": "def run_command(args, config):\n commands = { 'on': on_command,\n 'off': off_command,\n 'reset': reset_command,\n 'list': list_command,\n 'set-alias': set_alias_command,\n 'rm-alias': rm_alias_command,\n 'set-host': set_host_command,\n }\n for command in commands:\n if not command in args:\n raise ValueError('Invalid command key %s' % command)\n if args[command]:\n return commands[command](args, config)\n raise ValueError('Must pass in at least one True command')", "title": "" }, { "docid": "e6a5490958979f94e76a5b9720425be8", "score": "0.61014956", "text": "def cli(**kwargs):\n pass", "title": "" }, { "docid": "a4f4c2400a5d0b15b54bee12ab9dddfd", "score": "0.6099058", "text": "def _run_command(args, check=True, **kwargs) -> subprocess.CompletedProcess:\n env = kwargs.get('env', {})\n\n # Log the command-line for debugging failed tests. Note that we convert\n # tokens to strings for _shlex_join.\n env_str = ['env', '-i'] + ['%s=%s' % (k, v) for k, v in env.items()]\n args_str = [str(t) for t in args]\n\n # Override some defaults. Note that 'check' deviates from this pattern to\n # avoid getting warnings about using subprocess.run without an explicitly set\n # `check` parameter.\n kwargs.setdefault('capture_output', True)\n kwargs.setdefault('universal_newlines', True)\n\n logging.debug('Running command: %s', _shlex_join(env_str + args_str))\n\n return subprocess.run(args, check=check, **kwargs)", "title": "" }, { "docid": "11d69bd99d18f3524b3b49e118d88fe7", "score": "0.6098898", "text": "def do_shell(command, context=None, **kwargs):\n logging.info(\"%s: executing %s\", context, command)\n\n child_env = {\"CRANKD_CONTEXT\": context}\n\n # We'll pull a subset of the available information in for shell scripts.\n # Anyone who needs more will probably want to write a Python handler\n # instead so they can reuse things like our logger & config info and avoid\n # ordeals like associative arrays in Bash\n for k in [\"info\", \"key\"]:\n if k in kwargs and kwargs[k]:\n child_env[\"CRANKD_%s\" % k.upper()] = str(kwargs[k])\n\n if \"user_info\" in kwargs:\n for k, v in kwargs[\"user_info\"].items():\n child_env[create_env_name(k)] = str(v)\n\n try:\n rc = subprocess.call(command, shell=True, env=child_env)\n if rc == 0:\n logging.debug(\"`%s` returned %d\", command, rc)\n elif rc < 0:\n logging.error(\"`%s` was terminated by signal %d\", command, -rc)\n else:\n logging.error(\"`%s` returned %d\", command, rc)\n except OSError as exc:\n logging.error(\"Got an exception when executing %s: %s\", command, exc)", "title": "" }, { "docid": "5a08e2468d0edff92677f94258dea07a", "score": "0.60871035", "text": "def runcommand():\n # See https://hitchdev.com/commandlib/\n # -- for more details on how to use commandlib\n Command(\"ls\", \"-l\").in_dir(DIR.key).run()\n\n # N.B. DIR.key is the folder this file - key.py - is in", "title": "" }, { "docid": "54c9cae16947fb9eb327fbb790eb75d6", "score": "0.60698354", "text": "def run(*cmdline, **kwargs):\n print()\n print('Running: $ ' + ' '.join(cmdline))\n print()\n return subprocess.run(cmdline, cwd=gitroot_path, check=True, **kwargs)", "title": "" }, { "docid": "1e21751de8b0e48906c5c97dff699b9d", "score": "0.6067874", "text": "def run_command( *args, **kwargs ):\n echo = kwargs.get( 'echo', True )\n cd = kwargs.get( 'chdir', None )\n raise_on_failure = kwargs.get( 'raise_on_failure', True )\n redirect = kwargs.get( 'redirect', None )\n append = kwargs.get( 'append', False )\n mach = kwargs.get( 'machine', None )\n sshexe = kwargs.get( 'sshexe', None )\n\n cmd,scmd = _assemble_command( *args )\n if mach:\n ss = 'ssh'\n if sshexe:\n ss = sshexe\n cmd,scmd = _assemble_command( ss, mach, scmd )\n\n dryrun = _is_dryrun( cmd )\n\n outfp = None\n fdout = None\n if not dryrun and redirect != None:\n if type(redirect) == type(2):\n fdout = redirect\n elif type(redirect) == type(''):\n fn = redirect\n if cd and not os.path.isabs( redirect ):\n fn = os.path.join( cd, redirect )\n if append: outfp = open( fn, \"a\" )\n else: outfp = open( fn, \"w\" )\n fdout = outfp.fileno()\n\n if echo:\n tm = time.time()\n L = []\n if cd: L.append( 'dir='+cd )\n else: L.append( 'dir='+os.getcwd() )\n if outfp != None:\n L.append( 'logfile='+redirect )\n startid = 'start='+str(tm)\n L.append( startid )\n L.append( 'cmd='+scmd )\n sys.stdout.write( '['+time.ctime(tm)+'] runcmd: '+repr(L)+'\\n' )\n sys.stdout.flush()\n\n # build the arguments for subprocess.Popen()\n argD = {}\n\n if type(cmd) == type(''):\n argD['shell'] = True\n\n argD['bufsize'] = -1 # use system buffer size (is this needed?)\n\n if fdout != None:\n argD['stdout'] = fdout\n argD['stderr'] = subprocess.STDOUT\n\n if cd:\n cwd = os.getcwd()\n os.chdir( cd )\n\n try:\n if dryrun:\n x = 0\n else:\n p = subprocess.Popen( cmd, **argD )\n x = p.wait()\n finally:\n if cd:\n os.chdir( cwd )\n\n if outfp != None:\n outfp.close()\n outfp = None\n fdout = None\n\n if echo:\n L = [ 'exit='+str(x), startid, 'cmd='+scmd ]\n sys.stdout.write( '['+time.ctime()+'] return: '+repr(L)+'\\n' )\n sys.stdout.flush()\n\n if raise_on_failure and x != 0:\n raise CommandException( '\\nCommand failed: '+scmd )\n\n return x", "title": "" }, { "docid": "d2575349fdb053577cd86d72e1f3ddb2", "score": "0.60581493", "text": "def run(args):\n pass", "title": "" }, { "docid": "dcd619273412d153610fdc82bdc6f553", "score": "0.60489786", "text": "def run(cmd):\n return subprocess.call(cmd, shell=True)", "title": "" }, { "docid": "d5064ed25cf503114db97ac21dd6378b", "score": "0.6045257", "text": "def run_command(args):\n run = {\n \"ping\": lambda: \"pong\",\n \"getloglevel\": lambda: logger.level,\n \"setloglevel\": setloglevel,\n \"stats\": testasync.stats,\n \"tree\": lambda: Collector.accept(testasync.TreeVisitor()),\n \"reload\": load,\n \"shutdown\": stop,\n \"help\": lambda: \"Command must be one of {}\".format(\n [c for c in run.keys()]\n ),\n }\n logger.info(\"Ctrl: {}\".format(args))\n cmd = args[0]\n if cmd not in run.keys():\n return \"Command '{}' unknown\".format(cmd)\n else:\n try:\n if len(args) == 1:\n return run[cmd]()\n else:\n return run[cmd](*args[1:])\n except Exception as e:\n return e", "title": "" }, { "docid": "14d653e05d1b126fad43bead5bb63f38", "score": "0.60412323", "text": "def start(self):\r\n commandString = \"{0} {1}\".format(self.command, \" \".join(self.arguments))\r\n os.system(commandString)", "title": "" }, { "docid": "5d516febe54621df47ee614f214801b9", "score": "0.604003", "text": "def command_line_tool():\n dispatch_command(run_script)", "title": "" }, { "docid": "9c8b1974191acbac422607e69ba60f0b", "score": "0.6029447", "text": "def test_run_cmd(self, fake_run_command):\n virtual_machine._run_cmd(vcenter=MagicMock(),\n the_vm=MagicMock(),\n cmd='/bin/ls',\n args='-la /tmp',\n user='sally',\n password='DogzAreGreat!',\n logger=MagicMock())\n\n the_args, the_kwargs = fake_run_command.call_args\n full_command = '{} {}'.format(the_args[-1], the_kwargs['arguments'])\n expected = \"/bin/bash -c '/bin/echo DogzAreGreat! | /bin/sudo -S /bin/ls -la /tmp'\"\n\n self.assertEqual(full_command, expected)", "title": "" }, { "docid": "2137f1a47d982def69009df4e9bc185b", "score": "0.6013457", "text": "def cmd(self, *pargs, **kwargs):\n return self.__addStep(acquireStep(\"cmd\", pargs, kwargs))", "title": "" }, { "docid": "04bc0ffb770b2f3ffbbed79497d4c8d8", "score": "0.6007019", "text": "async def run_command(*args, **kwargs):\n fn = asyncio.subprocess.create_subprocess_exec\n if kwargs.pop(\"shell\", False):\n fn = asyncio.subprocess.create_subprocess_shell\n check = kwargs.pop(\"check\", False)\n process = await fn(*args, **kwargs)\n stdout, stderr = await process.communicate()\n if check:\n if process.returncode != 0:\n raise Exception(f\"Command failed: {args}\")\n return process.returncode, stdout, stderr", "title": "" }, { "docid": "ad5c9855fcbbf9a830abd8984878aa5a", "score": "0.60052544", "text": "def command_run():\n def command_run_(command_name, args=None, options=None):\n args = args or []\n options = options or {}\n return call_command(command_name, *args, **options)\n\n return command_run_", "title": "" }, { "docid": "e0bc52cd73167fccdb8ca19c9988aa3c", "score": "0.59895027", "text": "def runcommand(args):\n stdout, stderr = Popen(args, stdout=PIPE, stderr=PIPE).communicate()\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n\n if stderr:\n DEBUG(\"ERROR when running command '%s':\\n%s\" % (args, stderr))\n sys.exit(1)\n\n return stdout", "title": "" }, { "docid": "20e32bf703d21dd344b8e50ea54f09ec", "score": "0.59892166", "text": "def run_command(self, command):\n subprocess.call(command, shell=True)", "title": "" }, { "docid": "25dfae458a8a717db2080a45bf1ab807", "score": "0.59878784", "text": "def shell(cmd):\n return G.DEVICE.shell(cmd)", "title": "" }, { "docid": "6abdb6d555599f0b322abd2f53a8c1b8", "score": "0.5979024", "text": "def Launch(cmd):\n os.popen(cmd)", "title": "" }, { "docid": "b1d9c223aad35378c119a125b33255cd", "score": "0.59739363", "text": "def run(args, stdin=None): # type: (list[str]) -> ShellCommand\n log.debug(\"run(args=%s)\", repr(args))\n\n if os.path.basename(args[0]).startswith('su'):\n log.info('run() called with illegal command `%s`', args)\n raise RuntimeError('Unauthorized use of run; use sudo_run')\n\n return __run(args, stdin)", "title": "" }, { "docid": "ab6c6d1cd3f84f21e6329b9b76f084bd", "score": "0.59730625", "text": "def __run(cmd, args=\"\"):\n # Depending on argument type, assemble command to be run in list 'cmdl'\n cmdl = [cmd]\n if args:\n if type(args) == str:\n cmdl += args.split(\" \")\n elif type(args) == list:\n cmdl += args\n else:\n errmsg = \"Can't run command: unsupported argument type of \" \\\n + str(args) + \" = \" + str(type(args))\n return \"\", errmsg, 2\n \n if use_sudo:\n if user:\n cmdl = [ 'sudo','-E','-u', user ] + cmdl\n #print(\"__run: cmdl %s\" % cmdl)\n # Run the command\n try:\n p = subprocess.Popen(cmdl, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so,se = p.communicate()\n except:\n errmsg = \"With sudo: got exception while invoking \" + \" \".join(cmdl)\n return \"\", errmsg, 2\n else:\n set_user = \"nobody\"\n if user:\n set_user = user\n else:\n if 'USER' in os.environ:\n set_user = os.environ['USER']\n\n pw_record = pwd.getpwnam(set_user)\n user_name = pw_record.pw_name\n user_home_dir = pw_record.pw_dir\n user_uid = pw_record.pw_uid\n user_gid = pw_record.pw_gid\n env = os.environ.copy()\n env['HOME'] = user_home_dir\n env['LOGNAME'] = user_name\n #env['PWD'] = cwd\n env['USER'] = user_name\n #print(\"__run: cmdl=%s, user=%s\" % (cmdl, user_name))\n try:\n p = subprocess.Popen(cmdl, preexec_fn=demote(user_uid, user_gid), \\\n env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so,se = p.communicate()\n except:\n errmsg = \"Without sudo: got exception while invoking \" + \" \".join(cmdl)\n return \"\", errmsg, 2\n\n # Check for error codes\n if p.returncode:\n errmsg = \"Running '\" + \" \".join(cmdl) + \"' resulted in error code: \" \\\n + str(p.returncode) + \"\\nError Output:\\n\" + se\n return \"\", errmsg, 2\n\n # And for error output\n if se:\n errmsg = \"Command \" + \" \".join(cmd) + \\\n \" has returned the following stderr output: \" + se\n return so, errmsg, 0\n\n # Return stdout\n return so, \"\", 0", "title": "" }, { "docid": "73495f72ed803f51c199161715cc7c23", "score": "0.59686345", "text": "def run(arg):\n\n # Open output and write some info about the command to be written, including\n # name of command and arguments.\n # This could be modified to adjust how much is printed via a DEBUG variable.\n with open(os.path.join(os.curdir, \"output.log\"), 'a') as outFile:\n\toutFile.write(\"Command: \")\n\tfor a in arg:\n\t outFile.write(a,)\n\t outFile.write(\" \")\n\toutFile.write(\"\\n\")\n # Open output and error log file and append to them the output of the commands\n with open(os.path.join(os.curdir, \"output.log\"), 'a') as outFile:\n\twith open(os.path.join(os.curdir, \"error.log\"), 'a') as errorFile:\n\t # Call the subprocess using convenience method\n\n\t retval = subprocess.call(arg, -1, None, None, outFile, errorFile)\n\t # Check the process exit code, print error information if it exists\n\t if not retval == 0:\n\t\terrData = errorFile.read()\n\t\traise Exception(\"Error executing command: \" + repr(errData))", "title": "" }, { "docid": "a9deb0201b273f26bc58bf5199d22fa1", "score": "0.5967138", "text": "def run_cmd( cmd, ignore_errors=False ):\n res = os.system( cmd )\n if not ignore_errors:\n if res != 0:\n raise StandardError( \"Error ::: command failed ::: \"+cmd )", "title": "" }, { "docid": "fb467ebd79371c6f2f35da85c4dffde9", "score": "0.5960874", "text": "def run(*args, **kwargs):\n verbose = kwargs.get(\"verbose\", True)\n args = map(str, args)\n if verbose:\n log.debug('run: args = %r', args)\n\n if not verbose:\n popen_kwargs = dict(stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n else:\n popen_kwargs = {}\n\n p = subprocess.Popen(args, **popen_kwargs)\n\n stdout, stderr = p.communicate()\n\n if verbose:\n log.debug('run: stdout = %r', stdout)\n log.debug('run: stderr = %r', stderr)\n log.debug('run: status = %r', p.returncode)\n\n if p.returncode:\n if not verbose:\n logging.debug('run: args = %r', args)\n logging.debug('run: status = %r', p.returncode)\n raise Exception(\"The command failed\")\n\n return stdout", "title": "" }, { "docid": "a77fc0660402681e276e1ba5c9f4060c", "score": "0.5956601", "text": "def _RunCommand(self, *command):\n args = [os.path.join(self._android_home, 'platform-tools', 'adb'),\n '-s', self.serial, 'shell']\n args.extend(command)\n self.GetLogger().info('Running at %s: %s', self.serial, args)\n process = subprocess.Popen(args, stdout=subprocess.PIPE)\n output, _ = process.communicate()\n if process.returncode != 0:\n raise AndroidUtilSubprocessError(\n 'Failed to run the command: ' + ' '.join(command))\n for i in output.splitlines():\n self.GetLogger().info(i)\n return output", "title": "" }, { "docid": "46144b2204c101e9ef833526a1802bc1", "score": "0.59547585", "text": "def run_command(cmd):\n shell = os.name == 'nt'\n p = Subprocess(cmd, shell=shell)\n print('\\n\\nRunning command: \"%s\"\\n\\n' % ' '.join(cmd))\n p.set_exit_callback(sys.exit)", "title": "" }, { "docid": "ece59f463d0466f8dea05ff44681dde5", "score": "0.5954328", "text": "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "title": "" }, { "docid": "9c7937d174447051fae53c1a0b4906dc", "score": "0.59493685", "text": "def run_command_from_args(args):\n return args.func(args) # this executes the function previously associated with the subparser with set_defaults", "title": "" }, { "docid": "06c399cefe1b6e4c78112241654740c6", "score": "0.59457487", "text": "def execute_command(self, args):\n \n try:\n execute_from_command_line(args)\n except SystemExit:\n if not self.expect_failure:\n self.fail(\"Command '%s' failed and exited. Message was: '%s'\" % ( \n \" \".join( args ) , \n \"\".join(sys.stderr.getvalue().rsplit(\"\\n\", 1)) ) )", "title": "" }, { "docid": "18d5f71df4a07ec79daad8e4aa7e7588", "score": "0.5942987", "text": "def run() -> None:\n if len(sys.argv) < 2:\n _print_and_quit(\n \"Did not understand command.\\nUsage: ac <app> <command> [paramaters]\"\n )\n app = sys.argv[1]\n run_command(app, sys.argv[2:])", "title": "" }, { "docid": "faa029d923379f4c3f5f692a3640ce9e", "score": "0.59416366", "text": "def execute( cls, *args ):\n parser = cls._configure()\n parsed = parser.parse_args( *args )\n Command = parsed.data[ \"command\" ]\n Command( **vars( parsed ) ).run()", "title": "" }, { "docid": "5e4fd81b5b796ed45e464192df892f84", "score": "0.59262544", "text": "def run_cmd(args_list):\n print(\"Running system command: {0}\".format(\" \".join(args_list)))\n proc = subprocess.Popen(\n args_list, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n s_output, s_err = proc.communicate()\n if s_output:\n return s_output.decode(\"utf8\").strip()\n else:\n print(f\"SOMETHING GO WRONG \\U0001F447\\n{s_err}\")", "title": "" }, { "docid": "10a41ab0670ed66e4e915576d4c13e7d", "score": "0.5916768", "text": "def _run_cmd(self, args):\n cmd = [self.py3_exec, self.console_script]\n cmd.extend(args)\n proc = subprocess.Popen(cmd, universal_newlines=True,\n env=self.environ,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n return proc", "title": "" }, { "docid": "963ec277eff4e91539070aa3dc16ae1b", "score": "0.5915747", "text": "def cmdline(self, *args):\n return self.factory.cmdline(*args)", "title": "" }, { "docid": "ae7606ef42df7d85ab5c504a06193ccb", "score": "0.5911031", "text": "def dispatchCommand(cmd, *args):", "title": "" }, { "docid": "420c4719ec106c879c429f945210aa37", "score": "0.5905943", "text": "def execute(self, cmd: t.List[str]) -> None:", "title": "" }, { "docid": "62ffbac6cde2ac9754cc53a837c88028", "score": "0.58932644", "text": "def main(args):\n if not args or args[0] in ['help', '-h', '--help']:\n usage()\n sys.exit(1)\n\n cmd = args[0]\n cmd_dict = get_command_dict()\n if cmd not in cmd_dict:\n hidden_cmd = cmd + \"_hidden\"\n if hidden_cmd not in cmd_dict:\n sys.stderr.write(\"Unknown command: %s\\n\" % cmd)\n sys.exit(1)\n else:\n cmd = hidden_cmd\n\n cmd_dict[cmd](args[1:])", "title": "" }, { "docid": "f4d218fb3c27a0ff71aaac2725c776bb", "score": "0.5872757", "text": "def start_exec(self, cmd):", "title": "" }, { "docid": "2a977a8b2f85d08d3663d568804e5361", "score": "0.58516014", "text": "def run(args):\n pass", "title": "" }, { "docid": "1a4417217ca5dbad597dd0365ae68138", "score": "0.5842413", "text": "def run(magic, command, args, options):\n magicword = 'code' if len(args) == 0 else args[0]\n\n uri = magic.get_uri(magicword)\n\n if not uri:\n return None, GotoWarning(\"magicword_does_not_exist\", magicword=magicword)\n\n try:\n subprocess.check_call('subl \"%s\"' % uri, shell=True)\n except subprocess.CalledProcessGotoError:\n return None, GotoError(\"subl_launch_failed\")\n\n return None, None", "title": "" }, { "docid": "4735280c1c9bfd29d4e675e643418fa5", "score": "0.5839221", "text": "def main(args):\n run(args)", "title": "" }, { "docid": "b5dfbf4983b67e5fa79984725afae1c7", "score": "0.58371544", "text": "def _execute_command(self, command, env=None, cwd=None, with_shell=False):\n raise NotImplementedError", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.58336693", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.58336693", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.58336693", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.58336693", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "90db884693e1c25112f9ae71fd464390", "score": "0.58336693", "text": "def runCommand(cmd):\n p = Popen(cmd.split(' '), stdout=PIPE)\n return p.communicate()", "title": "" }, { "docid": "aabdd76bd7d131291c7478805a27053d", "score": "0.5831793", "text": "def _command(self, *args):\n self._dockermachine = subprocess.check_output([\"docker-machine\"] + list(args)).strip()", "title": "" }, { "docid": "c59c7527f3f2995eaa47240c47942ad1", "score": "0.5830942", "text": "def _run_cmd(vcenter, the_vm, cmd, args, user, password, logger, one_shot=False):\n shell = '/bin/bash'\n the_args = \"-c '/bin/echo {} | /bin/sudo -S {} {}'\".format(password, cmd, args)\n result = run_command(vcenter,\n the_vm,\n shell,\n user=user,\n password=password,\n arguments=the_args,\n timeout=1800,\n one_shot=one_shot,\n init_timeout=1200)\n if result.exitCode:\n logger.error(\"failed to execute: {} {}\".format(shell, the_args))", "title": "" }, { "docid": "130f5a32aba3bc00a2545cbe4993d38d", "score": "0.5819397", "text": "def run_cmd(cmd):\n print (cmd)\n return subprocess.call(cmd, shell=True)", "title": "" }, { "docid": "a5f7526979eaafcf0ab6022656b06658", "score": "0.5799954", "text": "def run(*arg, interactive=False):\n stdout = subprocess.PIPE\n stderr = subprocess.PIPE\n if interactive:\n stdout = None\n stderr = None\n try:\n ret = subprocess.run(\n arg, stdout=stdout, stderr=stderr, check=True, shell=False, encoding=\"utf-8\"\n )\n except subprocess.CalledProcessError as ret:\n print(f\"command line exception with args: {arg}\")\n if not interactive:\n sys.stdout.write(ret.stdout)\n sys.stderr.write(ret.stderr)\n sys.exit(ret.returncode)\n except BaseException:\n raise\n return ret", "title": "" }, { "docid": "53de02c587bfea824ecf1f13e0a68af1", "score": "0.5794259", "text": "def execute_command():\n\n parser = argparse.ArgumentParser(\n description=\"Taming the bull: Change management for Agresso systems\")\n\n parser.add_argument(\n 'command',\n type=str,\n help='Command')\n\n parser.add_argument(\n '-l', '--logging',\n type=str,\n default='console',\n dest='logging_destination',\n help='logging (none, console or file)')\n\n parser.add_argument(\n '-v', '--verbosity',\n type=str,\n default='INFO',\n help='Logging level. DEBUG, INFO, ERROR or CRITICAL')\n\n try:\n args, sub_args = parser.parse_known_args()\n _setup_logging(args.logging_destination, args.verbosity)\n except:\n parser.print_help()\n sys.exit()\n\n commands[args.command]()", "title": "" }, { "docid": "18b2fb1c5c6ff4c70ee943851587203e", "score": "0.57921004", "text": "def run_cmd(cmd, verbose=False):\n if verbose:\n print('RUN:', ' '.join(cmd), file=sys.stderr)\n subprocess.check_call(cmd)", "title": "" }, { "docid": "e240d2b169438e788cd605a41e13eb05", "score": "0.5769168", "text": "def _runCommand(command, env=None):\n myenv = {}\n for k, v in env.items():\n myenv[str(k)] = str(v)\n env = myenv\n if SCons.Util.is_List(command):\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env,\n universal_newlines=False)\n else:\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env,\n universal_newlines=False,\n shell=True)\n out = p.stdout.read()\n p.stdout.close()\n err = p.stderr.read()\n p.stderr.close()\n status = p.wait()\n\n return (status, out, err)", "title": "" }, { "docid": "88b2d36bbc96e0c9cb45d2952227daf8", "score": "0.5767421", "text": "def run(args=None):\n global usage\n if args:\n cmd, _, action = str(args).partition(' ')\n if 'payment' in cmd:\n return request_payment(action)\n elif 'decrypt' in cmd:\n return decrypt_files(action)\n elif 'encrypt' in cmd:\n reg_key = _winreg.CreateKey(_winreg.HKEY_CURRENT_USER, globals()['registry_key'])\n return encrypt_files(action)\n return usage", "title": "" }, { "docid": "14eea82cd4a8cd6a94e7937ca7222f9e", "score": "0.5765065", "text": "def call_command(self,*args):\n args = list(args)\n args.append(\"-v %s\"%self.verbosity)\n call_command(*args)", "title": "" }, { "docid": "e7cee7568d0872a016e7e9f1337d9d72", "score": "0.57644176", "text": "def _execute(self, cmd, *args, **kwargs):\n\n if hasattr(os, 'geteuid') and os.geteuid() != 0 and not self._root_helper:\n return\n\n command = [LKVM_PATH]\n if self._root_helper:\n command = [self._root_helper] + command\n command.append(cmd)\n command.extend(*args)\n\n command = [str(c) for c in command]\n\n LOG.debug('Executing command : %s ', command)\n if kwargs.get('background'):\n command.insert(0, 'nohup')\n command = ' '.join(command) + ' >/dev/null 2>&1'\n subprocess.Popen(command, shell=True)\n else:\n _PIPE = subprocess.PIPE\n obj = subprocess.Popen(command,\n stdin=_PIPE,\n stdout=_PIPE,\n stderr=_PIPE)\n try:\n result = obj.communicate()\n obj.stdin.close()\n except OSError as err:\n if isinstance(err, ProcessExecutionError):\n err_msg = ('{e[description]}\\ncommand: {e[cmd]}\\n'\n 'exit code: {e[exit_code]}\\nstdout: {e[stdout]}\\n'\n 'stderr: {e[stderr]}').format(e=err)\n raise LKVMException(err_msg)\n if result[1]:\n raise LKVMException(result[1])\n\n return result[0]", "title": "" } ]
ea0e1657d25fa363957a6c8004a0f67e
Allow header advertises REPORT
[ { "docid": "13b96c906996f5e22de31346f0ef2ac6", "score": "0.60892195", "text": "def test_allow_header_deltav(self):\n def do_test(response):\n response = IResponse(response)\n\n allow = response.headers.getHeader(\"allow\")\n if not allow:\n self.fail(\"no Allow header: %s\" % (response.headers,))\n self.assertIn(\"REPORT\", allow, \"no REPORT support\")\n\n request = SimpleRequest(self.site, \"OPTIONS\", \"/\")\n\n return self.send(request, do_test)", "title": "" } ]
[ { "docid": "8d2003e1f6a54240820966697c8de90e", "score": "0.62994033", "text": "def report_only(self) -> None:\n self.header = \"Content-Security-Policy-Report-Only\"", "title": "" }, { "docid": "997e22d61331e7d9a7eff0758254bbbb", "score": "0.6258528", "text": "def report_header(self, header):\n info_str = '# Source:{}\\n# MJD: {:18.12f}\\tRA: {}\\tDEC: {}\\n# DELTAT: {:10.6f}\\tDELTAF(Hz): {:10.6f}\\tmax_drift_rate: {:10.6f}\\tobs_length: {:10.6f}\\n' \\\n .format(header['SOURCE'],header['MJD'], header['RA'], header['DEC'], header['DELTAT'], header['DELTAF']*1e6, header['max_drift_rate'], header['obs_length'])\n\n self.write(info_str)\n self.write('# --------------------------\\n')\n info_str = '# Top_Hit_# \\t'\n info_str += 'Drift_Rate \\t'\n info_str += 'SNR \\t'\n info_str += 'Uncorrected_Frequency \\t'\n info_str += 'Corrected_Frequency \\t'\n info_str += 'Index \\t'\n info_str += 'freq_start \\t'\n info_str += 'freq_end \\t'\n info_str += 'SEFD \\t'\n info_str += 'SEFD_freq \\t'\n info_str += 'Coarse_Channel_Number \\t'\n info_str += 'Full_number_of_hits \\t'\n info_str +='\\n'\n self.write(info_str)\n self.write('# --------------------------\\n')", "title": "" }, { "docid": "69985a97bf52b573318893a4583fdaea", "score": "0.6135733", "text": "def show_header():", "title": "" }, { "docid": "6585ae9a9f58a16fe7b3febafa829607", "score": "0.59888655", "text": "def WriteHeader(self):\n pass", "title": "" }, { "docid": "34b5ffd6fc0ce209998c7c4528dc6518", "score": "0.58672696", "text": "def extract_headers(report):\r\n if END_HEADERS in report:\r\n return report[:report.find(END_HEADERS)]\r\n return report", "title": "" }, { "docid": "40bf119bcacc4fe1652d6d6a3836c2eb", "score": "0.58514047", "text": "def write_header(self):\n raise NotImplementedError", "title": "" }, { "docid": "85618ea245d5cedc702a9c0c54be6069", "score": "0.58172756", "text": "def test_no_header_wildcard(self):\n\n requirement = self.tool_basic_requirement()\n\n requirement.header = \".*\"\n\n requirement.save()\n\n helper = self.tool_get_helper()\n\n # Don't send a header and expect that to work\n\n helper.connect(\"\", \"\", \"1.1.1.1\", \"\", {})\n helper.eoh({})\n\n self.assertTrue(\n helper.enabled,\n \"Helper wasn't enabled after sending no header\"\n )", "title": "" }, { "docid": "f2dc55ec1252e9a204ad0f4e4d0369f8", "score": "0.58001304", "text": "def send_header(self, keyword,value):\n\t\tpass", "title": "" }, { "docid": "2b51a21bc31fe7660b21591b80f3273a", "score": "0.57965106", "text": "def _writeHeader(self, header):\n super()._writeHeader(header)\n header[\"W_VISIT\"] = (self.visit, \"Visit number\")\n header.update(self.header)", "title": "" }, { "docid": "cf714c1e70098cb123102b0cc465ccfb", "score": "0.5774962", "text": "def append_header():\r\n hdr = ['#,setup_header,test_system,Manual,,,\\n']\r\n hdr += ['#,setup_header,version,1,,,\\n']\r\n hdr += ['#,MipiVerification,clusters,,,,\\n']\r\n hdr += ['enabled,type,usid,regAddr,writeMask,regWriteData,' +\r\n 'expectedReadData,regWriteArrayItem0,regWriteArrayItem1,' +\r\n 'regWriteArrayItem2,regWriteArrayItem3\\n']\r\n return hdr", "title": "" }, { "docid": "a4ba7cc5f34d40e90c2a00362cb1e7f5", "score": "0.57365286", "text": "def test_no_header_specific(self):\n\n requirement = self.tool_basic_requirement()\n\n requirement.header = \"Test: Test\"\n\n requirement.save()\n\n helper = self.tool_get_helper()\n\n # Don't send a header and expect that to work\n\n helper.connect(\"\", \"\", \"1.1.1.1\", \"\", {})\n helper.eoh({})\n\n self.assertFalse(\n helper.enabled,\n \"Helper was enabled after sending no header\"\n )", "title": "" }, { "docid": "32cb68811c3924d753511588de52ef4e", "score": "0.57277906", "text": "def test_header_only_log(self, verbose=False, strict=False):\n self.setupFlag = False \n self._test_header_only_log(verbose, strict)", "title": "" }, { "docid": "725a590d8809df21f5c8b36487c7e303", "score": "0.56822413", "text": "def _write_header(self):\n for line in self.header.lines:\n print(line.serialize(), file=self.stream)\n if self.header.samples.names:\n print(\n \"\\t\".join(list(parser.REQUIRE_SAMPLE_HEADER) + self.header.samples.names),\n file=self.stream,\n )\n else:\n print(\"\\t\".join(parser.REQUIRE_NO_SAMPLE_HEADER), file=self.stream)", "title": "" }, { "docid": "b930c8cdbf5a845bed954b1e0758d217", "score": "0.56796163", "text": "def reportHeader():\n print(\"{:25s} | {:11s} | {:9s} | {:12s}\".format(\n \"Donor Name\", \"Total Given\", \"Num Gifts\", \"Average Gift\"))\n print(\"-\" * 66)", "title": "" }, { "docid": "59396c75202cfb9f49ce3fb6648105a1", "score": "0.5639489", "text": "def generate_report():", "title": "" }, { "docid": "9faf35956536c3b73aab7853128e4781", "score": "0.5637856", "text": "def _write_only_header(config, header):\n names, lengths = _parse_header(header)\n with pysam.Samfile(\"-\", \"wbu\", text = \"\".join(header),\n referencenames = names,\n referencelengths = lengths,\n add_sq_text = False) as handle:\n return 0", "title": "" }, { "docid": "5bfc5bce4ed89761b556e6ae74e2d026", "score": "0.56332165", "text": "def print_hdr():\n print \"\"\n print \"##############################\"\n print \"# #\"\n print \"# btsnoop parser v0.1 #\"\n print \"# #\"\n print \"##############################\"\n print \"\"", "title": "" }, { "docid": "a00bb0a1312291120df7b07a68837742", "score": "0.56243294", "text": "def onHeader(self, name, value):\n return CONTINUE", "title": "" }, { "docid": "0b7cb7c49d8214c765b6d2cdb7aa908c", "score": "0.5618065", "text": "def _handle_send_report(self, request_json: dict):\n if self.headers.get('Authorization') is None:\n self._set_response(400, 'text/html')\n self.wfile.write(b'missing token')\n return\n\n if self.headers.get('Authorization') != self.token:\n self._set_response(401, 'text/html')\n self.wfile.write(b'Invalid Token')\n return\n\n if request_json.get('xmlReport') is None:\n self._set_response(400, 'text/html')\n self.wfile.write(b'missing xmlReport')\n return\n\n self._set_report_context(request_json)\n self._create_incident(request_json)\n self._set_response(200, 'text/html')", "title": "" }, { "docid": "e449adb18195960aca6c76cbe645cda3", "score": "0.5602165", "text": "def formatReport():", "title": "" }, { "docid": "ea185efbd32b538e43e3bda50d09035a", "score": "0.55572915", "text": "def report(self):", "title": "" }, { "docid": "a0d7e34e98ddbe4bf6a219a18b6be306", "score": "0.5556447", "text": "def pytest_report_header(config, startdir):\n capabilities = config.getoption('capabilities')\n if capabilities:\n return 'capabilities: {0}'.format(capabilities)", "title": "" }, { "docid": "6fe25ecd9ef7c83077d77a911339d322", "score": "0.5555327", "text": "def send_header(s):\n try:\n header = open(\"header.gcode\", \"r\")\n print(\"Header file found. Sending it to the printer.\")\n cnt = 1\n for line in header:\n print(\">>>> Sending line\", cnt, \"--\", line.strip())\n cnt += 1\n s.write(str.encode(line + \"\\n\"))\n print(\"Header sent.\")\n except FileNotFoundError:\n print(\"Header file not found.\")\n except Exception as e:\n print(\"ERR! An error occured:\", e)\n return\n s.flushInput()\n wait_for_signal(s, \"wait\", 10000)", "title": "" }, { "docid": "1500e84e58f835ff408676193feca11c", "score": "0.55542874", "text": "def __in_header_func(self, line):\n\n\n if self.__cb_count == self.__header_bracket_count:\n self.__in_header = 0\n self.__write_obj.write(line)\n self.__write_to_head_obj.write(\n 'mi<mk<head___clo\\n')\n self.__write_to_head_obj.write(\n 'mi<tg<close_____<header-or-footer\\n')\n self.__write_to_head_obj.write(\n 'mi<mk<header-clo\\n')\n else:\n self.__write_to_head_obj.write(line)", "title": "" }, { "docid": "17044ecb98dd8f9299f17b42f4e9ef60", "score": "0.55509317", "text": "def _generate_report(self):\n raise NotImplementedError", "title": "" }, { "docid": "3ff8d977f8a6f1c1e1dd6cc398ba3c23", "score": "0.5543416", "text": "def header (buf):\n # Print header data to stderr\n import sys\n\n sys.stderr.write(buf)\n # Returning None implies that all bytes were written", "title": "" }, { "docid": "39c7fbb0ce16431996c305af7394a7b8", "score": "0.5543216", "text": "def setupLoggingHeader(context, REQUEST):\n response = REQUEST.RESPONSE\n dlh = context.discoverLoggingHeader()\n idx = dlh.rindex(\"</table>\")\n dlh = dlh[:idx]\n idx = dlh.rindex(\"</table>\")\n dlh = dlh[:idx]\n response.write(str(dlh[:idx]))\n\n return setWebLoggingStream(response)", "title": "" }, { "docid": "0de22fc7323e1350e18d229254808566", "score": "0.55071694", "text": "def _writeHeader(self):\n\t\tstyle = xlwt.XFStyle()\n\n\t\t# font\n\t\tfont = xlwt.Font()\n\t\tfont.bold = True\n\t\tstyle.font = font\n\n\t\tfor i, field in enumerate(self.header):\n\t\t\tself.sheet.write(0, i, field, style=style)", "title": "" }, { "docid": "cf1310cc2699fb95dd7c0b77c8c3232d", "score": "0.55064017", "text": "def write_header(self, header: list):\n raise NotImplementedError", "title": "" }, { "docid": "c9b948079aace1480014eb7e00e59f02", "score": "0.547406", "text": "def write_header(self):\n self.handle.write(\"##maf version=1 scoring=none\\n\")\n self.handle.write(\"# generated by Biopython\\n\\n\")", "title": "" }, { "docid": "05d65677137362f121bc3c67280ce486", "score": "0.5470208", "text": "def set_minimal_headers(self):\n self._min = True", "title": "" }, { "docid": "d6f3fefbc8e64216b9b2d3124c5fdf42", "score": "0.54549104", "text": "def header(self, spectrum_id_or_path):\n if isinstance(spectrum_id_or_path,int):\n try: \n H = self.dict(\"SELECT * FROM spectra WHERE id={}\".format(spectrum_id_or_path)).fetchone()['header']\n if H: return H\n else: print 'No header for spectrum {}'.format(spectrum_id_or_path)\n except TypeError: print 'No spectrum with id {}'.format(spectrum_id_or_path)\n elif os.path.isfile(spectrum_id_or_path):\n if spectrum_id_or_path.endswith('.fits'):\n return clean_header(spectrum_id_or_path)\n else:\n txt, H = open(spectrum_id_or_path), []\n for i in txt: \n if i.startswith('#'): H.append(i)\n txt.close()\n print ''.join(H) if H else 'No header for spectrum {}'.format(spectrum_id_or_path)\n else: print 'No such file {}'.format(spectrum_id_or_path)", "title": "" }, { "docid": "665105c68658deefcc0740b4e6d3719f", "score": "0.54137754", "text": "def ClearHeader():", "title": "" }, { "docid": "db65433391fdde6ac476121299e803c6", "score": "0.5402825", "text": "def _add_method_header(self, hdr):\n if self.method['fitclass'] is not None:\n try:\n hdr['SCTYPE'] = self.method['fitclass'].fit_type\n hdr['SCMETH'] = self.method['fitclass'].fit_method\n except:\n if not self.quiet and self.hardcopy:\n warnings.warn('Fit class object does not have fit_type and/or fit_method ' \\\n 'attributes. No parameters written to header.')\n if self.method['fitpar'] is not None:\n try:\n hdr = self.method['fitpar'].toheader(hdr)\n except:\n if not self.quiet and self.hardcopy:\n warnings.warn('Fit parameter class has no toheader() function. No ' \\\n 'parameters written to header.')\n return hdr", "title": "" }, { "docid": "aa151cb76f52d891abf53b864cd48f2f", "score": "0.5400253", "text": "def disable_header(self, disable_header):\n\n self._disable_header = disable_header", "title": "" }, { "docid": "99927cd51332fb05fb9cefa16181135b", "score": "0.539921", "text": "def _PrefixReport(self, host_name, tester, uid, uname):\n s = ['PyreRing Test Report',\n 'Project: %s' % self.project_name,\n 'Suites: %s' % self.test_name,\n 'Start Time: %s' % self.start_time,\n 'Host Name: %s' % host_name,\n 'Tester: %s' % tester,\n 'UID: %s' % uid,\n 'uname: %s' % str(uname),\n '=' * 80,\n ]\n\n self._WriteToRecord(HEAD, '\\n'.join(s), OVERWRITE)", "title": "" }, { "docid": "d98eb1196686e0eef0e79e59075a9299", "score": "0.53892416", "text": "def write_header(self):\n self.fp = open(self.fname, 'wb')\n tophdr = MIMEHeader()\n tophdr['MIME-Version'] = ['1.0', ]\n tophdr['Content-Type'] = ['multipart/mixed', 'boundary='+self.mb1]\n tophdr['Content-Description'] = [\n 'EVLA/CORRELATOR/WIDAR/FULL_RESOLUTION', ]\n # How do we generate a new unique name?\n nsxl = self.sdmDataHeader.nsmap['xl']\n uid = self.sdmDataHeader.dataOID.attrib['{%s}href' % nsxl][5:]\n tophdr['Content-Location'] = ['http://evla.nrao.edu/wcbe/XSDM' + uid, ]\n self.fp.write(bytes(tophdr.tostring() + '\\n', 'utf-8'))\n\n self.fp.write(bytes('--' + self.mb1 + '\\n', 'utf-8'))\n xhdr = MIMEHeader()\n xhdr['Content-Type'] = ['text/xml', 'charset=utf-8']\n xhdr['Content-Location'] = ['sdmDataHeader.xml', ]\n self.fp.write(bytes(xhdr.tostring() + '\\n', 'utf-8'))\n self.fp.write(etree.tostring(self.sdmDataHeader,\n standalone=True, encoding='utf-8') + b'\\n')", "title": "" }, { "docid": "b680e20f3c611a7a093314b911ddf55f", "score": "0.5383278", "text": "def process_header(self, response: Response, request: Request):", "title": "" }, { "docid": "a8b165d1ba93c00da36a836af727c15c", "score": "0.5369993", "text": "def export_header(self):\n header = []\n for key, parm in self.header.items():\n\n if key != 'parameter':\n # parameter header\n head = key.ljust(21) + parm.export() + '\\n'\n header.append(head)\n else:\n # regular header\n for i in parm:\n head = key.ljust(21) + parm[i].export() + '\\n'\n header.append(head)\n return header", "title": "" }, { "docid": "1fd99704bf8da902a377c03d83b27ab9", "score": "0.5368689", "text": "def _base_header(self, hdr=None):\n _hdr = super(EdgeTraceSet, self)._base_header(hdr=hdr)\n _hdr['QAPATH'] = 'None' if self.qa_path is None else self.qa_path\n self.par.to_header(_hdr)\n self.bitmask.to_header(_hdr)\n return _hdr", "title": "" }, { "docid": "500aea4006ab0d97f74eb256a0c0bd05", "score": "0.5365746", "text": "async def report(self, ctx, *, report = None):\n if not report:\n raise CustomPermissionError\n try:\n await ctx.bot.log.send(embed = await Macro.Embed.infraction(\n f\"{ctx.author.name} from {ctx.guild} said this:\\n{report}\"))\n except Exception as error:\n await ctx.send(embed = await Macro.send(\"The report was not sent\"))\n raise error\n await ctx.send(embed = await Macro.send(\"The report has been sent\"))", "title": "" }, { "docid": "1abf6182938ae714339d28efa59e236a", "score": "0.53624105", "text": "def test_wrong_header(self):\n\n requirement = self.tool_basic_requirement()\n\n requirement.header = \"Test: Test\"\n\n requirement.save()\n\n helper = self.tool_get_helper()\n\n # Try to mimic a valid header\n\n helper.connect(\"\", \"\", \"1.1.1.1\", \"\", {})\n helper.header(\"Test\", \"Test\", {})\n helper.eoh({})\n\n self.assertTrue(\n helper.enabled,\n \"Helper wasn't enabled after sending to the right header\"\n )\n\n # Try to mimic a wrong header by not specifying a header at all\n\n helper = self.tool_get_helper()\n\n helper.connect(\"\", \"\", \"1.1.1.1\", \"\", {})\n helper.eoh({})\n\n self.assertFalse(\n helper.enabled,\n \"Helper was enabled after sending to the wrong header \"\n \"(=no header at all)\"\n )", "title": "" }, { "docid": "6e575bb538a840b45feccf42502e2c93", "score": "0.53471816", "text": "def write_header(self, header: list):\n\n self.header = header", "title": "" }, { "docid": "f4efa5c9ca8e6c1d356fa2cf98912e97", "score": "0.5345165", "text": "def _WriteToReport(self):\n self.report_pipe = open(self.report_file, 'w')\n self.report_pipe.write(''.join(['-' * 40, '\\n']))\n for message in [self.header, self.summary, self.pre_body,\n self.body, self.extra]:\n self.report_pipe.write(message)\n self.report_pipe.write(''.join(['-' * 40, '\\n']))\n self.report_pipe.flush()\n self.report_pipe.close()", "title": "" }, { "docid": "ee076d2233c7a3f113e32ecc60826f5b", "score": "0.53415287", "text": "def test_write_data_include_header(self):\n tabfile = TabFile(fp=self.fp,first_line_is_header=True)\n fp = io.StringIO()\n tabfile.write(fp=fp,include_header=True)\n self.assertEqual(fp.getvalue(),self.header+self.data)\n fp.close()", "title": "" }, { "docid": "bae9c50398a645fceab488194620cedb", "score": "0.5332012", "text": "def print_header():\n logger = logging.getLogger(__name__)\n logger.info('')\n logger.info('| Normal | Windows | Cisco | Vendor')\n logger.info('|-------------------|-------------------|----------------|-----------------------------------')", "title": "" }, { "docid": "4fe4ba5df2a74e808846e322fe916022", "score": "0.53296024", "text": "def generate_header(self, config):\r\n raise NotImplementedError()", "title": "" }, { "docid": "18ace4afc78e5d61f75f868ae519cf89", "score": "0.5329342", "text": "def _writeHeader(self, header):\n header[\"RA\"] = (self.raBoresight, \"[degree] pfsDesign field center RA\")\n header[\"DEC\"] = (self.decBoresight, \"[degree] pfsDesign field center DEC\")\n header[\"POSANG\"] = (self.posAng, \"[degree] PFI position angle\")\n header[\"ARMS\"] = (self.arms, \"Exposed arms\")\n header[\"DSGN_NAM\"] = (self.designName, \"Name of design\")\n header[\"DAMD_VER\"] = (3, \"PfsDesign/PfsConfig datamodel version\")\n header[\"W_PFDSGN\"] = (self.pfsDesignId, \"Identifier for fiber configuration\")\n header[\"VARIANT\"] = (self.variant, \"Which variant of PFDSGN0 we are.\")\n header[\"PFDSGN0\"] = (self.designId0, \"The base design of which we are a variant\")", "title": "" }, { "docid": "d4f922a24145db151480b49a1e0aaf96", "score": "0.5328053", "text": "def _test_header_only_log(self, verbose, strict ):\n passFail = True\n passFail = self._setup(True, verbose)\n if not(passFail): return False\n \n passFail = self.test_index_curve(verbose)\n if not(passFail): return False \n \n if self._isTimeBasedLog(_get( 'indexType')) == False: \n passFail = self._log_validate_start_end_index_depth(verbose)\n if not(passFail): return False \n \n passFail = self.test_log_curve_info_min_max_depth(verbose)\n if not(passFail): return False \n \n if strict:\n passFail = self.test_log_curve_info_min_max_depth_uom(verbose)\n if not(passFail): return False \n \n else:\n passFail = self._log_validate_start_end_index_time(verbose) \n if not(passFail): return False \n \n passFail = self.test_log_curve_info_min_max_time(verbose)\n if not(passFail): return False \n \n passFail = self.test_log_curve_array_header(verbose) \n if not(passFail): return False \n \n return True", "title": "" }, { "docid": "093cf89fcceda5e6897801e3e1a4f978", "score": "0.5322106", "text": "def handle_addl_headers(self, headers):\n pass", "title": "" }, { "docid": "093cf89fcceda5e6897801e3e1a4f978", "score": "0.5322106", "text": "def handle_addl_headers(self, headers):\n pass", "title": "" }, { "docid": "e76d0c99b4dc3709a033124549d81264", "score": "0.531761", "text": "def write_header(self, header: list):\n\n self.write_line(header)", "title": "" }, { "docid": "fb20f86c2e24336cc2da7f74fa9c2a8e", "score": "0.5287779", "text": "def add_to_header(gal,EXPTIME,m0,GAIN,NCOMBINE,pix2sec,ron):\n\thdulist = pyfits.open(gal, do_not_scale_image_data=True, mode='update')\n\tprihdr = hdulist[0].header\n\n\tprihdr.update('EXPTIME',EXPTIME)#,before='DATE')\n\tif 'GAIN' in prihdr: z=1\n\telse:\n \t\tprihdr.update('GAIN',GAIN)\n\tif 'NCOMBINE' in prihdr: z=1\n\telse:\n \t\tprihdr.update('NCOMBINE',NCOMBINE)\n\tif 'RDNOISE' in prihdr: z=1\n\telse:\n \t\tprihdr.update('RDNOISE',ron)\n\tprihdr.update('m0',m0)\n\thdulist.flush()", "title": "" }, { "docid": "acce1f237b903757361da0f36c4ceca3", "score": "0.52792394", "text": "def GetReportDownloadHeaders(self, kwargs):\n headers = self._adwords_client.oauth2_client.CreateHttpHeader()\n headers.update({\n 'Content-type': self._CONTENT_TYPE,\n 'developerToken': str(self._adwords_client.developer_token),\n 'clientCustomerId': str(self._adwords_client.client_customer_id),\n 'User-Agent': ''.join([self._adwords_client.user_agent, self._LIB_SIG,\n ',gzip'])\n })\n\n for kw in kwargs:\n try:\n headers.update({_REPORT_HEADER_KWARGS[kw]: str(kwargs[kw])})\n except KeyError:\n raise googleads.errors.GoogleAdsValueError(\n 'The provided keyword \"%s\" is invalid. Accepted keywords are: %s'\n % (kw, _REPORT_HEADER_KWARGS.keys()))\n\n return headers", "title": "" }, { "docid": "03e621ae02bc6cda242742c365deb47c", "score": "0.527646", "text": "def write_report(self):\n self.doc.start_paragraph(\"SRC-ReportTitle\")\n title = self.title_string\n mark = IndexMark(title, INDEX_TYPE_TOC, 1) \n self.doc.write_text(title, mark)\n self.doc.end_paragraph()\n \n self.doc.start_paragraph(\"SRC-ReportTitle\")\n title = self.subtitle_string\n mark = IndexMark(title, INDEX_TYPE_TOC, 1) \n self.doc.write_text(title, mark)\n self.doc.end_paragraph()\n \n self.listpersonref()\n \n self.doc.start_paragraph('SRC-Footer')\n self.doc.write_text(self.footer_string)\n self.doc.end_paragraph()", "title": "" }, { "docid": "bc3ed8d1225c0c962effa64a6c2de114", "score": "0.52703017", "text": "def edit_header(self, header):\n super().edit_header(header)\n self.detector_array.edit_header(header)\n\n freq = (1.0 / self.sampling_interval).to('Hz').value\n info = [('COMMENT', \"<------ FIFI-LS Header Keys ------>\"),\n ('SMPLFREQ', to_header_float(freq),\n \"(Hz) Detector readout rate.\")]\n\n insert_info_in_header(header, info, delete_special=True)", "title": "" }, { "docid": "d06bd2266b7f8317834686dc942839f2", "score": "0.5269677", "text": "def header(self, header):\n self._header = header", "title": "" }, { "docid": "d06bd2266b7f8317834686dc942839f2", "score": "0.5269677", "text": "def header(self, header):\n self._header = header", "title": "" }, { "docid": "3549a028283cffd336deebb8139df24b", "score": "0.52623326", "text": "def writeHeader(self, path):\n \n # print heading, OnsetDetector parameters\n heading = 'Check onsetdetector results'\n self.write(heading+'\\n'+'='*len(heading)+'\\n')\n \n with open(os.path.join(path, 'ondet_params.txt')) as fileobj:\n header = fileobj.read()\n \n header += 'Onset tolerance: {:.3f}ms\\n'.format(self.tolerance)\n \n self.write(header, '\\n\\n')", "title": "" }, { "docid": "82d836e94110abe4ee525baa47f30fbd", "score": "0.5244906", "text": "def write_header(self) -> None:\n last = HEADER_BYTE_4\n if self.share_keys:\n last |= HEADER_BIT_HAS_SHARED_NAMES\n if self.share_values:\n last |= HEADER_BIT_HAS_SHARED_STRING_VALUES\n if not self.encode_as_7bit:\n last |= HEADER_BIT_HAS_RAW_BINARY\n self.write_bytes(HEADER_BYTE_1, HEADER_BYTE_2, HEADER_BYTE_3, int(last))", "title": "" }, { "docid": "e4086778aa17d8c85a5b47cdf72c0621", "score": "0.5240993", "text": "def WriteHeader(self):\n self.filehandle.WriteLine(u'{')\n self._event_counter = 0", "title": "" }, { "docid": "f071a7ab7719e21c1637ec470c7301c0", "score": "0.523989", "text": "def set_header(self, header):\n self.header = header", "title": "" }, { "docid": "de8cf6c99b22360d50f03a8a8a5594ea", "score": "0.5238201", "text": "def WriteFileHeader(self,codingParams):\n pass # default is do nothing", "title": "" }, { "docid": "09cffa4d02141b3a95087dec51c88f09", "score": "0.52200955", "text": "def printHeader(config, mailCount):\n if mailCount == 0:\n print(':envelope:')\n else:\n print(':envelope.fill: {} | color={},{}'.format(mailCount,\n config[UNREAD_LIGHT],\n config[UNREAD_DARK]))\n print('---')\n return", "title": "" }, { "docid": "e5702f605b2497a2711fde4184c19cae", "score": "0.52116585", "text": "def _getFITSHeader(self, hdulist, options):\n JWInstrument._getFITSHeader(self, hdulist, options)\n hdulist[0].header.update('FOCUSPOS',0,'FGS focus mechanism not yet modeled.')", "title": "" }, { "docid": "d1afd0f9d15dbeecb41ccccd68a891f1", "score": "0.52059895", "text": "def result_headers(self, rh, hdr):\n return hdr", "title": "" }, { "docid": "1454c030471a6252891294b0d90ec94b", "score": "0.5179189", "text": "def set_full_headers(self):\n self._min = False", "title": "" }, { "docid": "2afb638c198df9302cf7ad10fe721ff4", "score": "0.51785755", "text": "def _collectHeader(self,header,otherHeader):\n returnHeader=\"\"\n if not len(otherHeader)>0:\n returnHeader=header[0]\n else:\n returnHeader=otherHeader\n return self._headerFilter(returnHeader)", "title": "" }, { "docid": "d119495482a27f3f9cda85ac06e74b8e", "score": "0.51775193", "text": "def hvp_writeheader( self ):\n dsw = self.dsw\n columns_line = dsw.delimiter.join(self.fieldnames)\n if self.verbosity and self.verbosity is not None:\n print (\"hvp_writeheader:header_name=%s, Columns='%s'\"\n % (dsw.header_name,repr(columns_line)))\n\n with open(self.dsw.header_name, 'wb') as fh:\n fh.write(columns_line)", "title": "" }, { "docid": "845ceec3ff04e0cd2a5b0e939528f820", "score": "0.5173698", "text": "def _speak_header_always_inherit(self, element):\n\n self._speak_header_once_inherit(element)\n\n cell_elements = self.html_parser.find(element).find_descendants(\n 'td[headers],th[headers]'\n ).list_results()\n accessible_display = AccessibleDisplayImplementation(\n self.html_parser,\n self.configure\n )\n for cell_element in cell_elements:\n accessible_display.display_cell_header(cell_element)", "title": "" }, { "docid": "35e5491f171ddaeed60411f9cb642201", "score": "0.51699835", "text": "def render_header_forwards(self):\n raise NotImplementedError", "title": "" }, { "docid": "1d71db70b880b65bbdd96241527490e2", "score": "0.51650494", "text": "def getHeader(self):\n pass", "title": "" }, { "docid": "38929247b1b4f25dd9f79e96c4392487", "score": "0.51640016", "text": "def _speak_header_once_inherit(self, element):\n\n header_elements = self.html_parser.find(element).find_descendants(\n '['\n + AccessibleDisplayImplementation.DATA_ATTRIBUTE_HEADERS_OF\n + ']'\n ).list_results()\n for header_element in header_elements:\n header_element.remove_node()", "title": "" }, { "docid": "462776c71271f65870156817f5a6f9d2", "score": "0.5162595", "text": "def report(self):\n raise NotImplementedError", "title": "" }, { "docid": "462776c71271f65870156817f5a6f9d2", "score": "0.5162595", "text": "def report(self):\n raise NotImplementedError", "title": "" }, { "docid": "462776c71271f65870156817f5a6f9d2", "score": "0.5162595", "text": "def report(self):\n raise NotImplementedError", "title": "" }, { "docid": "4692858f6f8008f21ddabc44599fa584", "score": "0.5156575", "text": "def header(self, value: Header_IESTM2714):\n\n attest(\n isinstance(value, Header_IESTM2714),\n f'\"header\" property: \"{value}\" type is not \"Header_IESTM2714\"!',\n )\n\n self._header = value", "title": "" }, { "docid": "da88ec670c229b9cef6ae7f949027b94", "score": "0.5153467", "text": "def print_header(self):\n for colnum in xrange(len(self.dataorder)):\n self.ws.write(self.rownum, colnum, self.dataorder[colnum][1],\n self.header_style)\n self.ws.col(colnum).width = len(self.dataorder[colnum][1]) * 300\n self.rownum += 1", "title": "" }, { "docid": "3d1ea7d2e9312e29408531815e3daf39", "score": "0.51529616", "text": "def headers(self):\n raise NotImplementedError", "title": "" }, { "docid": "e696a2e7dbaa860ec97a8ed1a8680a24", "score": "0.5137274", "text": "def write_header(self):\n if self.header_was_written:\n return\n header_msg = serialize.format_header(\n format_str=self.serializer.format_str,\n field_names=self.serializer.field_names,\n field_units=self.serializer.field_units,\n comment=self.comment, newline=self.newline,\n delimiter=self.delimiter)\n self.fd.write(header_msg)\n self.header_was_written = True", "title": "" }, { "docid": "5a71ae56544e08614da73d7802fd2529", "score": "0.513569", "text": "def render_header_impl(self):\n raise NotImplementedError", "title": "" }, { "docid": "296f83c7df09300e748187e6bdb6016a", "score": "0.5133511", "text": "def report(self, **kw):\n self._tables, self._plots = self._select_report_sections(kw)\n header = self.header()\n if header:\n display(header)\n for value in self.SECTIONS:\n if value not in self._tables and value not in self._plots:\n continue\n # display title\n title = HTML('<h2>{}</h2>'.format(self.SECTIONS[value]))\n display(title)\n # if there is a table, show it\n if value in self._tables:\n meth = getattr(self, '{}_table'.format(value))\n display(meth(*self._tables[value]))\n # if there is a graph, show it\n if value in self._plots:\n meth = getattr(self, '{}_plot'.format(value))\n plot = meth(*self._plots[value])\n display()\n footer = self.footer()\n if footer:\n display(footer)", "title": "" }, { "docid": "94d9eb47df94a586f74e61ebaa51abf7", "score": "0.5129157", "text": "def ShowHeader(self):\n text = re.sub(\" +\", \" \", self.hdul[0].header.tostring())\n top = Toplevel()\n top.title('SPEX::FITSHEADER')\n top.geometry(\"500x600\")\n scrollbar1 = Scrollbar(top)\n scrollbar1.pack(side=RIGHT, fill=Y)\n header = Text(top, width=450, height=450)\n header.insert(END, (text).splitlines())\n header.config(state=DISABLED)\n header.pack()\n scrollbar1.config(command=header.yview)", "title": "" }, { "docid": "1caaa6b173c7dea1e5e62677992fda36", "score": "0.5126397", "text": "def output(self, attrs,header):\n\t\tpass", "title": "" }, { "docid": "5cc47c94a07dfbfd32999d2913492baa", "score": "0.51230043", "text": "def header_send(self, f):\n self._write(f, self.header_encode())", "title": "" }, { "docid": "fb10dad5c8057920ebbd60105927d0e8", "score": "0.5116257", "text": "def describe_header(fcs,of):\n of.write(\"*** FCS Header Information ***\\n\")\n of.write(\"Version: \"+fcs.version+\"\\n\")\n text_string = str(len(fcs.text.bytes))+' bytes'\n of.write(\"TEXT: \"+text_string+\"\\n\")\n data_string = str(fcs.standard.ENDDATA-fcs.standard.BEGINDATA+1)+' bytes'\n of.write(\"DATA: \"+data_string+\"\\n\")\n analysis_string = 'False'\n if fcs.standard.BEGINANALYSIS != 0:\n analysis_string = 'True, '\n analysis_string +=str(fcs.standard.ENDANLYSIS-fcs.standard.BEGINANALYSIS+1)+' bytes'\n of.write(\"ANALYSIS: \"+analysis_string+\"\\n\")\n other_string = 'False'\n if len(fcs.other) > 0:\n other_string = 'True, '\n word = 'segment'\n if len(fcs.other) > 1: word = 'segments'\n other_string += str(len(fcs.other))+' '+word+', '\n other_string += str(sum([len(x) for x in fcs.other]))+' bytes'\n of.write('OTHER: '+other_string+\"\\n\")", "title": "" }, { "docid": "2fc0169fd44691e9619d8c7faa128723", "score": "0.5106425", "text": "def _add_header(self, pull_request):\n pull_request.body = \"\" if pull_request.body is None else pull_request.body\n if self.UNAGGREGATED_PR_HEADER not in pull_request.body:\n pull_request.body += self.UNAGGREGATED_PR_HEADER", "title": "" }, { "docid": "993fe82438aa13468456f72443716930", "score": "0.51044595", "text": "def end_headers(self):\r\n if self.request_version != 'HTTP/0.9':\r\n self.wfile.write(\"\\r\\n\")", "title": "" }, { "docid": "1d1c6e3f0622bbf1ee627492e913ae28", "score": "0.5100655", "text": "def print_header():\r\n\tprint('\\n')\r\n\tprint('======================================================================')\r\n\tprint('######## ## ## ######## ## ####### ## ## #### ######## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ##### ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ## ## ## ## ## ## ## ## ## ## ')\r\n\tprint(' ## ####### ## ######## ####### ## ## #### ## ')\r\n\tprint('======================================================================')\r\n\tprint('\\n')", "title": "" }, { "docid": "8fd59e4ee39a5ba2492a9da3c0faf547", "score": "0.5090078", "text": "def on_request(self, connection, request):\n if request[\"expect\"].lower() == \"100-continue\":\n connection.write(writer.compose_headers(\"100\", \"Continue\", {}))", "title": "" }, { "docid": "46a256c883828dc71b1a5cccac739a61", "score": "0.50899357", "text": "def create_response_header(cls, header):\n resp = cls.copy(header)\n resp.command_flags = header.command_flags & FLAG_PROXIABLE\n return resp", "title": "" }, { "docid": "93b9a68f51540203f9b210102af20f5f", "score": "0.50880253", "text": "def stdout_header():\n header = \"\"\"\n______ _ ______ __\n| ___ \\ | | | _ \\ / _|\n| |_/ /___ _ _| |_ ___ _ __ | | | |___| |_ ___ _ __ ___ ___\n| // _ \\| | | | __/ _ \\ '__| | | | / _ \\ _/ _ \\ '_ \\/ __|/ _ \\\\\n| |\\ \\ (_) | |_| | || __/ | | |/ / __/ || __/ | | \\__ \\ __/\n\\_| \\_\\___/ \\__,_|\\__\\___|_| |___/ \\___|_| \\___|_| |_|___/\\___|\n\n=[ Cisco IOS security assessment tool\n=[ https://github.com/pello/routerdefense \n=[ version 2013.12\n\n\"\"\"\n return header;", "title": "" }, { "docid": "549b41b6301bad1f27be1ded806e465c", "score": "0.50872195", "text": "def add_to_header(self, header_f, outfilename, line_ann):\n\n of = open(outfilename, 'w')\n\n # getting the type of line that is being passed\n p = re.compile(\"^##(\\w+)=\")\n\n m1 = p.match(line_ann)\n type_ann = m1.group(1)\n\n line_seen = False\n with open(header_f) as f:\n for line in f:\n line = line.rstrip(\"\\n\")\n m2 = p.match(line)\n if m2 is None:\n of.write(line+\"\\n\")\n continue\n type1_ann = m2.group(1)\n if type_ann == type1_ann and line_seen is False:\n line_seen = True\n of.write(line+\"\\n\"+line_ann+\"\\n\")\n continue\n else:\n of.write(line+\"\\n\")\n of.close()\n\n return outfilename", "title": "" }, { "docid": "3e0cf007122c07e8d1a232a078f4bc16", "score": "0.50846666", "text": "def test_report(self):\n report = self.create_report()\n response = self._get()\n self._check_report(response, report)", "title": "" }, { "docid": "9087d255b359215ef97815d7e5c9a0d1", "score": "0.50832015", "text": "def collect_headers(self):\n headers = set()\n for obj in self.objects.itervalues():\n if 'coord' in obj:\n filename = obj['coord']['file']\n # check if it should be ignored.\n ignore = False\n for pattern in IGNORED_HEADERS:\n if pattern.match(filename):\n ignore = True\n if not ignore:\n headers.add(filename)\n # Generate code.\n code = []\n for header in headers:\n name = os.path.splitext(header)[0]\n code.append('include %s' % name)\n code.append('')\n self.codegens['!headers'] = code", "title": "" }, { "docid": "8d0d44b8d17667435a81339a3ec922a1", "score": "0.5068629", "text": "def show_header_only(self):\n # First cleat the screen\n if os.name == 'nt':\n os.system(\"cls\")\n else:\n os.system('clear')\n\n # Then print the header\n empty_line = '= ' + \\\n ' =\\n'\n print('========================================' +\n '========================================\\n' + empty_line +\n '= LightUpPi' +\n ' Alarm =')\n # If there are active alarms, print the next schedule time to alert\n next_alarm = self.alarm_mgr.get_next_alarm()\n if next_alarm is not None:\n print(empty_line + AlarmCli.dashes_line + '\\n' + empty_line +\n '= Next scheduled alert'\n ' is for Alarm ID %3d =' % next_alarm.id_)\n print(empty_line + AlarmCli.dashes_line + '\\n' + empty_line +\n '= Use the \"help\" command for informat' +\n 'ion about how to use this program. =\\n' +\n '= This program must remain open for t' +\n 'he alarms to be active and running. =\\n' + empty_line +\n '========================================' +\n '========================================')\n\n print('\\n') # Empty line for visual spacing", "title": "" }, { "docid": "c5d7c3ac69898585efb4937dcdded181", "score": "0.50657314", "text": "def print_header():\n print(\n \"+--------------+-------------+------------+--------------\"\n \"+-----------+-------------+---------------+------------\"\n \"+---------------------+-----------+\"\n )", "title": "" }, { "docid": "f54838c7bf445b290232acc940c2efb0", "score": "0.5063822", "text": "def __add_header(header: str, color: Color) -> None:\n ContextPrinter.self.headers.append(color + header + Color.END)", "title": "" }, { "docid": "cf8ebd5acd1c5eba0c76af51056e657d", "score": "0.50495344", "text": "def pytest_report_header(config):\n header = \"Testing against CIM_ENV \" + config.getoption(\"cim_env\")\n header += \"\\r\\nURL: \" + config.getoption(\"url\")\n header += \"\\r\\nRunning from directory \" + os.getcwd()\n header += '\\r\\nTest shim directory ' + os.path.dirname(__file__)\n header += \"\\r\\nDNS IP: \" + config.getoption(\"dns_ip\")\n return header", "title": "" }, { "docid": "85880ccdb907ea8c7dac4af4389557f4", "score": "0.5048237", "text": "def _getFITSHeader(self, hdulist, options):\n JWInstrument._getFITSHeader(self, hdulist, options)\n hdulist[0].header.update('GRATING', 'None', 'NIRSpec grating element name')\n hdulist[0].header.update('APERTURE', str(self.image_mask), 'NIRSpec slit aperture name')", "title": "" } ]
605c3d2d364735d62b5ed1a9a8991a37
Get map of var_name > set(input var names) for the model
[ { "docid": "9e6522c73a1371b171b810c30a983e33", "score": "0.6106772", "text": "def make_compute_graph(self):\n input_map = {}\n for var_name in self.var_names:\n input_map[var_name] = self.get_parents(self.model[var_name])\n # add in constants\n input_map[var_name] = input_map[var_name] | self.get_constant_parents(self.model[var_name])\n \n return input_map", "title": "" } ]
[ { "docid": "432c08e5e8607e48a63c1e3f3d993946", "score": "0.7184372", "text": "def variables(model: Model) -> AbstractSet[str]:\n assert is_model(model)\n return model.keys()", "title": "" }, { "docid": "9790cb9ad8fbc2913d8a9a64c0c38c45", "score": "0.69497913", "text": "def input_variables(self):\n return self.layer['fuzzify'].varmfs.items()", "title": "" }, { "docid": "b4e1b0c922db7f1cfd464384a84839c1", "score": "0.6856305", "text": "def model_variables(self):\n \n return list(self._varnames.values())", "title": "" }, { "docid": "909feca9a84fe811afb99ac401709456", "score": "0.6447515", "text": "def variables(model):\n return set(node[1] for node in walk(model) if node[0] in (DEREF,LOOP))", "title": "" }, { "docid": "a1c6dead5a4a3159012e442d5585e34c", "score": "0.6049075", "text": "def get_variables_indices(self, name):\r\n return self.variables_dict[name]", "title": "" }, { "docid": "7a9777653b524b8cead508a900669808", "score": "0.6040368", "text": "def get_vars(self) -> Set[str]:\n acc = set()\n for mon, _ in self.monomials():\n acc.update(mon.get_vars())\n return acc", "title": "" }, { "docid": "56613fa642e9b509d127ee0f6cb4e891", "score": "0.5995766", "text": "def get_vars():\n return [\n \"U:0\",\n \"A:0\"\n ]", "title": "" }, { "docid": "e6332b118304a0d062e1141d78b0a18e", "score": "0.5990869", "text": "def variables(self):\r\n # This is a set, because if it were a regular list,\r\n #there would be duplicates when parameters are tied.\r\n return set(variable for params in self.params_list for variable in params.variables)", "title": "" }, { "docid": "131229d576e72c9000abe8cdac07475c", "score": "0.5986631", "text": "def get_var_models(self, in_log=True):\n return self.getter(self.var_models)", "title": "" }, { "docid": "15fedcebff970d3f0c653295aa968d72", "score": "0.5962523", "text": "def _get_model_params(self):\n with self._graph.as_default():\n gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n return {gvar.op.name: value for gvar, value in zip(gvars, self._session.run(gvars))}", "title": "" }, { "docid": "83c6e46f3db86d8e3a015f011039a2ba", "score": "0.5922019", "text": "def getVarsNames( _vars, symboltable ) :\r\n return [ getVarName( var, symboltable ) for var in _vars ]", "title": "" }, { "docid": "ad07b7196a097d6b2175e804710ea6d4", "score": "0.5914631", "text": "def getVarNames(self):\n return self.__VariableValuesDict.keys()", "title": "" }, { "docid": "a9a262af15f0f1990667aa87fe8bb75c", "score": "0.5899374", "text": "def _getModel(variables):\n return {name: cvc_var.getvalue() for (name, cvc_var) in variables.items()}", "title": "" }, { "docid": "8b21cdb7a9def68171d037d703f5bd27", "score": "0.5891618", "text": "def variables(self):\n vars_ = []\n for arg in self.input_nodes:\n vars_ += arg.variables()\n unordered = list(set(vars_)) # Make unique, order by uuid.\n return sorted(unordered, key=lambda x: x.uuid)", "title": "" }, { "docid": "e8202c9caace9317ba29f8ef0feeede8", "score": "0.58615065", "text": "def get_varnames(problem, variable_dict):\n\n return [problem.getVariable(item).name for item in range(len(variable_dict))]", "title": "" }, { "docid": "ba60465312779773e595d95192edf7b5", "score": "0.584825", "text": "def get_regularizable_vars(self):\n trainable = self._variable_scope.global_variables()\n return [\n var for var in trainable\n if 'hidden' in var.name and 'kernel' in var.name\n ]", "title": "" }, { "docid": "56827af482655c0a012f19600f9f4740", "score": "0.583184", "text": "def get_names(self):\n return varlist", "title": "" }, { "docid": "e65a6b2133702249a5e7585ffcd9a811", "score": "0.5823245", "text": "def getStateVarNames():\n nameOf = lambda x: a3t.evalAtom3Type( x.name )\n return list(map(nameOf, getStateVars()))", "title": "" }, { "docid": "e9f70cc8e33fb48b69daaeebd9b116c1", "score": "0.58153784", "text": "def savable_variables(self):\n params = []\n for v in tf.global_variables():\n split_name = v.name.split('/')\n if split_name[0] == 'v0' or not v.name.startswith('v'):\n params.append(v)\n return params", "title": "" }, { "docid": "e9f70cc8e33fb48b69daaeebd9b116c1", "score": "0.58153784", "text": "def savable_variables(self):\n params = []\n for v in tf.global_variables():\n split_name = v.name.split('/')\n if split_name[0] == 'v0' or not v.name.startswith('v'):\n params.append(v)\n return params", "title": "" }, { "docid": "0717e79e3176aef6b693b9a25974dfd5", "score": "0.5802954", "text": "def state(self):\n return map((lambda var: var.get()), self.vars)", "title": "" }, { "docid": "0dfdd4bc7237c87408eb53a48ee7b8fd", "score": "0.57996094", "text": "def fvt(t: Term) -> Set[Variable]:\n return get_vars(t, unique=True)", "title": "" }, { "docid": "f8362228ce469f48129125337112694d", "score": "0.577894", "text": "def variational_parameter_names(self):\n pass", "title": "" }, { "docid": "ec495e8a3938a17487db2b4216dabc29", "score": "0.57769084", "text": "def get_relevant_input_variables(boolean_func, component):\n input_variables = []\n null_implicants = boolean_func.primes[component][0]\n for implicant in null_implicants:\n input_variables += implicant.keys()\n return list(set(input_variables))", "title": "" }, { "docid": "a3f9834d19a29af3d96811c9ccb98468", "score": "0.57768667", "text": "def get_variables(self):\n variables = set()\n for k, v in self.get_components_values().items():\n vs = v.get_variables()\n print('%s: %s' % (k, vs))\n variables.update(vs)\n \n return variables", "title": "" }, { "docid": "e722f11f5856b1137f9771adb1426287", "score": "0.57746774", "text": "def named_variables(self):\n return dict(zip(map(str, itertools.count()), self.variables))", "title": "" }, { "docid": "a2852e744d0618ac8c914865304edcb6", "score": "0.5746087", "text": "def getCharacterizingVariableNames(self):\n if self._paramNames is None:\n if self._paramRealization is not None:\n # if we trained already?,then we can return keys\n self._paramNames = list(self._paramRealization.keys())\n else:\n # otherwise we build the names predictively\n names = []\n for algo in self._tsaAlgorithms:\n names.extend(algo.getParamNames(self._tsaAlgoSettings[algo]))\n self._paramNames = names\n return self._paramNames", "title": "" }, { "docid": "e1bd1fce401f034cd93a2b09ca382760", "score": "0.5732805", "text": "def get_vars(all_vars, scope_name, index):\r\n\tckpt_vars = [var for var in all_vars if var.op.name.startswith(scope_name)]\r\n\tckpt_var_dict = {}\r\n\tfor var in ckpt_vars:\r\n\t\tactual_var_name = var.op.name\r\n\t\tif actual_var_name.find('Logits') ==-1:\r\n\t\t\tclip_var_name = actual_var_name[index:]\r\n\t\t\tckpt_var_dict[clip_var_name] = var\r\n\t\t\r\n\treturn ckpt_var_dict", "title": "" }, { "docid": "4795950f9b51d51e91d6e53b11a5837a", "score": "0.5731967", "text": "def _vars_in(self, expr):\n res = set()\n if self.use_lookup_tables and isinstance(expr, mathml) and self.is_lookup_table(expr):\n key_var = self.varobj(expr.getAttributeNS(NSS['lut'], u'var'))\n key_var = key_var.get_source_variable(recurse=True)\n res.add(key_var)\n elif isinstance(expr, mathml_ci):\n varobj = getattr(expr, '_cml_variable', None)\n if not varobj:\n varname = unicode(expr)\n varobj = self.varobj(varname.strip())\n if varobj:\n res.add(varobj)\n elif isinstance(expr, mathml_apply) and expr.operator().localName == u'diff':\n dep_varname = unicode(expr.ci)\n varobj = self.varobj(dep_varname.strip())\n res.add(varobj.get_ode_dependency(self.free_vars[0]))\n elif hasattr(expr, 'xml_children'):\n for child in expr.xml_children:\n res.update(self._vars_in(child))\n return res", "title": "" }, { "docid": "2fe8665e7978a49b23b18f6edfed1ac8", "score": "0.57118905", "text": "def get_variables(self):\n with self.__var_lock:\n names = list(filter(lambda n: len(n) == 1, reversed(list(self.__vars.keys()))))\n return [{\n 'name': name,\n 'type': self.__vars[name].objectType\n } for name in names]", "title": "" }, { "docid": "7597152890b918f6c16ed6dadf66c535", "score": "0.57072693", "text": "def get_vars_by_scope(scope):\n\n vars_dict = {}\n vars_dict['all'] = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)\n vars_dict['trainable'] = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)\n vars_dict['maskable'] = []\n conv2d_pattern = re.compile(r'/Conv2D$')\n conv2d_ops = get_ops_by_scope_n_pattern(scope, conv2d_pattern)\n for var in vars_dict['trainable']:\n for op in conv2d_ops:\n for op_input in op.inputs:\n if op_input.name == var.name.replace(':0', '/read:0'):\n vars_dict['maskable'] += [var]\n\n return vars_dict", "title": "" }, { "docid": "59328d48a4939d006aa207e9c9c70a8b", "score": "0.5686393", "text": "def get_train_var_list(self):\n return [var for var in tf.trainable_variables()]", "title": "" }, { "docid": "5623719b0ca7462e655d1f080f5fb6ff", "score": "0.5671458", "text": "def get_model_params():\n gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n return {\n gvar.op.name: value\n for gvar, value in zip(gvars,\n tf.get_default_session().run(gvars))\n }", "title": "" }, { "docid": "924e4132301e718f245d1cb91f410006", "score": "0.56593513", "text": "def list_var_obj(self):\n return [self.dats[n] for n in self.var_names]", "title": "" }, { "docid": "6afe67718faf563de4706da23bcf28b3", "score": "0.56588054", "text": "def add_vars(self, name, N, v0=None, vl=None, vu=None):\n ## prevent duplicate named var sets\n if name in self.var[\"idx\"][\"N\"]:\n stderr.write('opf_model.add_vars: variable set named ''%s'' already exists\\n' % name)\n\n if v0 is None or len(v0) == 0:\n v0 = zeros(N) ## init to zero by default\n\n if vl is None or len(vl) == 0:\n vl = -Inf * ones(N) ## unbounded below by default\n\n if vu is None or len(vu) == 0:\n vu = Inf * ones(N) ## unbounded above by default\n\n\n ## add info about this var set\n self.var[\"idx\"][\"i1\"][name] = self.var[\"N\"] #+ 1 ## starting index\n self.var[\"idx\"][\"iN\"][name] = self.var[\"N\"] + N ## ing index\n self.var[\"idx\"][\"N\"][name] = N ## number of vars\n self.var[\"data\"][\"v0\"][name] = v0 ## initial value\n self.var[\"data\"][\"vl\"][name] = vl ## lower bound\n self.var[\"data\"][\"vu\"][name] = vu ## upper bound\n\n ## update number of vars and var sets\n self.var[\"N\"] = self.var[\"idx\"][\"iN\"][name]\n self.var[\"NS\"] = self.var[\"NS\"] + 1\n\n ## put name in ordered list of var sets\n# self.var[\"order\"][self.var[\"NS\"]] = name\n self.var[\"order\"].append(name)", "title": "" }, { "docid": "c152eaa143056ccde21ad6c93a891945", "score": "0.5641836", "text": "def variable_names(self):\n _keys = []\n for key in self._dataset.to_dataframe().keys():\n _keys.append(key)\n return _keys", "title": "" }, { "docid": "df2df80eb75d7ab40b1efaabcbcf5d91", "score": "0.5635582", "text": "def variables(self) -> Set[str]:\n # Task 7.5.2\n final_set = set()\n self.__collect_vars(final_set, is_variable)\n return final_set", "title": "" }, { "docid": "e2e4e212ff819a6c9f728eacb0289b98", "score": "0.5633014", "text": "def trainable_db_variables(self,mode,for_optimization=False):\n if for_optimization:\n return self.xc.getParamVariables(declare.asMode(mode))\n else:\n return self.xc.getParamHandles(declare.asMode(mode))", "title": "" }, { "docid": "44945ee26f0f0362230cdc361313f2eb", "score": "0.56329465", "text": "def get_variables(self):\n return []", "title": "" }, { "docid": "8274a2fa1d6772025097ba14fa2ee775", "score": "0.5630355", "text": "def collect_model_vars(self, model):\n model_vars = {}\n for var, reporter in self.model_reporters.items():\n model_vars[var] = reporter(model)\n return model_vars", "title": "" }, { "docid": "83a60489dad98d31310a8831ac95a1d3", "score": "0.56292796", "text": "def _get_variables_names(self):\n return [variable.name for variable in self.variables]", "title": "" }, { "docid": "c4d54f995ddae3afd6f365425b2fd31b", "score": "0.56161416", "text": "def list_variables(self):\n return self.var_ids", "title": "" }, { "docid": "98ca1fd190910ac3dcc272e24493fac0", "score": "0.5614597", "text": "def get_inputs(var):\n assert isinstance(var, _mgb.SymbolVar)\n return _mgb._get_owner_opr_inputs(var)", "title": "" }, { "docid": "926fa7387830d94d19591fa44b28dbed", "score": "0.56145644", "text": "def variables(self):\n return set()", "title": "" }, { "docid": "af90a468c39aa881634820ab6a53ef1b", "score": "0.5607275", "text": "def __call__(self , **inputs):\n ret = {}\n for model in self.AllModels:\n ret[model.Name] = model(**inputs)\n return ret", "title": "" }, { "docid": "f6e3f40b3f8c5b86019a581846080320", "score": "0.559127", "text": "def get_model_action_set(env_name):\n return [k for k, v in get_model_to_env_action_dict(env_name).items()]", "title": "" }, { "docid": "e8a442c689f560a3c2fba2eb8e02c295", "score": "0.5587895", "text": "def getVarNames(self, **kwargs):\n return list(self.DVs.keys())", "title": "" }, { "docid": "236cc2ce91729640bc53535621716552", "score": "0.55815864", "text": "def variables(self) -> Mapping[str, str]:\n return pulumi.get(self, \"variables\")", "title": "" }, { "docid": "1eae5e2fdd04b721bc49ddc172bcce59", "score": "0.5554775", "text": "def test_nonstring_var_names():\n mu = TestRandomVariable(\"mu\")\n samples = np.random.randn(10)\n data = dict_to_dataset({mu: samples})\n assert _var_names([mu], data) == [mu]", "title": "" }, { "docid": "2b0ea678d83341676ddceb509f860226", "score": "0.5540112", "text": "def getInputAttrMap(self):\r\n\r\n return self._getAttrMap(self._INPUTS_STR_ATTR_NAME, is_input=True)", "title": "" }, { "docid": "faa642c9092dfdab41011a56f4513cd6", "score": "0.55371", "text": "def get_variables(self):\n return [self.s1_t, self.s2_t]", "title": "" }, { "docid": "c014726464f6e4f1dbdabb9b44b0fe0d", "score": "0.5536976", "text": "def inputs(self) -> Mapping[str, Mapping[int, str]]:\n raise NotImplementedError()", "title": "" }, { "docid": "ad804468c496dc92e474153225604296", "score": "0.55356205", "text": "def vars_to_restore_fn():\n vars_to_restore = {}\n for v in tf.global_variables():\n if is_blacklisted(v.op.name):\n print(v.op.name, 'is blacklisted')\n continue\n if v.op.name.startswith('depth_prediction'):\n name = v.op.name.replace('moving_mean', 'mu')\n name = name.replace('moving_variance', 'sigma')\n vars_to_restore[name[len('depth_prediction') + 1:]] = v\n return vars_to_restore", "title": "" }, { "docid": "103b2e090fdc59b4295c7f03d4dade51", "score": "0.55279565", "text": "def _get_variable_mapping(self, seqfile_path):\n LOG.debug(\"Converting variables to magnet names.\")\n variables = self._variables\n mapping = sequence_parser.load_or_parse_variable_mapping(seqfile_path, \"dictionary\")\n\n for order in (\"K0L\", \"K0SL\", \"K1L\", \"K1SL\"):\n if order not in mapping:\n mapping[order] = {}\n\n # check if all variables can be found\n check_var = [var for var in variables\n if all(var not in mapping[order] for order in mapping)]\n if len(check_var) > 0:\n raise ValueError(\"Variables '{:s}' cannot be found in sequence!\".format(\n \", \".join(check_var)\n ))\n\n # drop mapping for unused variables\n [mapping[order].pop(var) for order in mapping for var in mapping[order].keys()\n if var not in variables]\n\n return mapping", "title": "" }, { "docid": "be13139379f80ec4e5144cbf0eb03017", "score": "0.5527576", "text": "def get_variables(self, names):\n pass", "title": "" }, { "docid": "d916b04b43bcc11d8148ca60e0f5c92f", "score": "0.55142236", "text": "def get_trainable_variables(self):\n var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope) \n return var_list", "title": "" }, { "docid": "d916b04b43bcc11d8148ca60e0f5c92f", "score": "0.55142236", "text": "def get_trainable_variables(self):\n var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope) \n return var_list", "title": "" }, { "docid": "593d3d8cbd115b3156b0d3bcdbf9d728", "score": "0.5512296", "text": "def get_variable_names(self):\n return self.variable_names", "title": "" }, { "docid": "8aa51d5318c59fa1099785faca45496d", "score": "0.54932034", "text": "def get_model_parameters_names(self):\n print(self.model.parameter_names_flat().tolist())\n return self.model.parameter_names_flat().tolist()", "title": "" }, { "docid": "1f1ea3d385f5fb1e1ca20f0475356309", "score": "0.5486678", "text": "def get_variables():\n return VARIABLES", "title": "" }, { "docid": "ca7db848fa67357cf63a627dbbffc538", "score": "0.5477875", "text": "def _get_input_elements(self):\n v2e = self._var_to_el\n tw = self._twiss\n\n el_in = dict.fromkeys(v2e.keys())\n for order in el_in:\n el_order = []\n for var in v2e[order]:\n el_order += upper(v2e[order][var].index)\n el_in[order] = tw.loc[list(set(el_order)), \"S\"].sort_values().index.tolist()\n return el_in", "title": "" }, { "docid": "cb42536616a46aa329622e403a25017d", "score": "0.5468677", "text": "def output_variables(self):\n return self.outvarnames", "title": "" }, { "docid": "0f81c2bbdb03aeedc8a9cc74e987a77b", "score": "0.546422", "text": "def variables(self):\n return [\n *self.guess_embedding.variables,\n *self.feedback_embedding.variables,\n *self.lstm.variables,\n *self.dense.variables\n ]", "title": "" }, { "docid": "c0004e1a62abf422a14d725a7f52f11b", "score": "0.54632354", "text": "def mk_variables(self):\n var=copy.deepcopy(self._variables)\n for i in range (0,len(self._variables)):\n var[i]['value']=self.value.get(self._variables[i]['name'])\n return var", "title": "" }, { "docid": "bcbf47098b2d418500b8686a209ec11a", "score": "0.5451104", "text": "def read_variable_map(self) -> Dict[str, Variable]:\n return self.read_variable(VariableType.STRUCT).value", "title": "" }, { "docid": "f6f01c7c8143a5b2dcd9a4f40ac8049e", "score": "0.5442175", "text": "def _var2var_in_file(self, var):\n for dataset_name, dataset in self.config.items():\n if var in dataset['variables'].keys():\n return dataset['variables'][var], dataset_name", "title": "" }, { "docid": "ae3f3dd052d4b19bfa14302b2c49d5c6", "score": "0.54353106", "text": "def getVars(self):\n for k, v in self.varList.items():\n print(self.varList[k].get())", "title": "" }, { "docid": "3c8004ce6f609d99963ce8cda083c447", "score": "0.54275507", "text": "def getallparamswith(self,dictofconsts):#use previous\n dictofmatches={} #for each input dict item\n for varvalpair in dictofconsts.iteritems():\n varvalmatch=frozenset([])#[]\n for aparamset in self.runsi.keys():\n if varvalpair in aparamset:\n #varvalmatch=varvalmatch.union([self.runsi[aparamset]])\n varvalmatch=varvalmatch.union([aparamset])\n dictofmatches.update({varvalpair:varvalmatch})\n return dictofmatches", "title": "" }, { "docid": "14f8ee6dbb19463c5daee13b70045d49", "score": "0.5424165", "text": "def get_variables(options):\r\n split_eq = lambda x: x.split('=')\r\n data = dict(map(split_eq, options.D))\r\n return data", "title": "" }, { "docid": "a0f2d43492e1a16a9acd98ba079c7792", "score": "0.54224044", "text": "def get_clim_model_filenames(config, variable):\n model_filenames = {}\n for key, value in config['input_data'].items():\n if value['short_name'] == variable:\n model_filenames[value['dataset']] = key\n return model_filenames", "title": "" }, { "docid": "76960c2e99d73d92460d90cc24938f08", "score": "0.541946", "text": "def collect_vars_from_ineq(ineq):\n assert isinstance(ineq, InEquation)\n _vars = set()\n for jar in ineq.jars:\n _vars.add(jar.var)\n\n return _vars", "title": "" }, { "docid": "9df7a098b6a0c117935f1c84c2544028", "score": "0.54184616", "text": "def variables(self):\n # Task 1.5\n return self.vars", "title": "" }, { "docid": "fd0238068287d5f69fd07e8bfd6258e3", "score": "0.54180723", "text": "def variables(self):\n # Task 1.2\n result = set()\n \"\"\" A recursive function to find the variables \"\"\"\n def rec_func(formula):\n if is_constant(formula.root):\n return\n elif is_variable(formula.root):\n result.add(formula.root)\n elif is_unary(formula.root):\n result.add(rec_func(formula.first))\n else:\n result.add(rec_func(formula.first))\n result.add(rec_func(formula.second))\n rec_func(self)\n if None in result:\n result.remove(None)\n return result", "title": "" }, { "docid": "abc99a35069c9547f6063de9e8508080", "score": "0.5414839", "text": "def _create_model_inputs(self) -> Dict[str, Dict]:\n return {\n x[\"name\"]: {\n \"shape\": tuple(int(i) for i in x[\"shape\"]),\n \"datatype\": x[\"datatype\"],\n }\n for x in self.model_metadata[\"inputs\"]\n }", "title": "" }, { "docid": "5206aadb9840d8c9774b8db77c42284a", "score": "0.54096156", "text": "def variables(self) -> Set[str]:\n return self.set_creator(self.VAR_SET_OPTION)\n # Task 1.2", "title": "" }, { "docid": "c0075b3b8592b3e67fd7893b40a57bcd", "score": "0.5403294", "text": "def appendVarNames(self):\n return self.__env['append'].keys()", "title": "" }, { "docid": "4b474b8d9f4e86e5e63a24a49ccf02ec", "score": "0.5395981", "text": "def GetVariableAndType(var_name, split_by_commponents):\n KratosGlobals = KratosMultiphysics.KratosGlobals\n\n if KratosGlobals.Kernel.HasDoubleVariable(var_name):\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetDoubleVariable(var_name)}\n elif KratosGlobals.Kernel.HasArrayVariable(var_name):\n if split_by_commponents:\n for component in [\"X\",\"Y\",\"Z\",\"XX\",\"YY\",\"XY\"]:\n if KratosGlobals.Kernel.HasDoubleVariable(var_name+\"_\"+component):\n yield {\"name\": var_name+\"_\"+component, \"value\": KratosGlobals.Kernel.GetDoubleVariable(var_name+\"_\"+component)}\n else:\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetArrayVariable(var_name)}\n elif KratosGlobals.Kernel.HasArray4Variable(var_name):\n if split_by_commponents:\n for component in [\"XX\",\"XY\",\"YX\",\"YY\"]:\n yield {\"name\": var_name+\"_\"+component, \"value\": KratosGlobals.Kernel.GetDoubleVariable(var_name+\"_\"+component)}\n else:\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetArray4Variable(var_name)}\n elif KratosGlobals.Kernel.HasArray6Variable(var_name):\n if split_by_commponents:\n for component in [\"XX\",\"YY\",\"ZZ\",\"XY\",\"YZ\",\"XZ\"]:\n yield {\"name\": var_name+\"_\"+component, \"value\": KratosGlobals.Kernel.GetDoubleVariable(var_name+\"_\"+component)}\n else:\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetArray6Variable(var_name)}\n elif KratosGlobals.Kernel.HasArray9Variable(var_name):\n if split_by_commponents:\n for component in [\"XX\",\"XY\",\"XZ\",\"YX\",\"YY\",\"YZ\",\"ZX\",\"ZY\",\"ZZ\"]:\n yield {\"name\": var_name+\"_\"+component, \"value\": KratosGlobals.Kernel.GetDoubleVariable(var_name+\"_\"+component)}\n else:\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetArray9Variable(var_name)}\n elif KratosGlobals.Kernel.HasBoolVariable(var_name):\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetBoolVariable(var_name)}\n elif KratosGlobals.Kernel.HasIntVariable(var_name):\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetIntVariable(var_name)}\n elif KratosGlobals.Kernel.HasUnsignedIntVariable(var_name):\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetUnsignedIntVariable(var_name)}\n elif KratosGlobals.Kernel.HasVectorVariable(var_name):\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetVectorVariable(var_name)}\n elif KratosGlobals.Kernel.HasMatrixVariable(var_name):\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetMatrixVariable(var_name)}\n elif KratosGlobals.Kernel.HasStringVariable(var_name):\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetStringVariable(var_name)}\n elif KratosGlobals.Kernel.HasFlagsVariable(var_name):\n yield {\"name\": var_name, \"value\": KratosGlobals.Kernel.GetFlagsVariable(var_name)}\n elif KratosGlobals.Kernel.HasVariableData(var_name):\n raise ValueError(\"\\nKernel.GetVariable() ERROR: Variable {0} is defined but is of unsupported type\\n\".format(var_name))\n else:\n raise ValueError(\"\\nKernel.GetVariable() ERROR: Variable {0} is unknown. Maybe you need to import the application where it is defined?\\n\".format(var_name))", "title": "" }, { "docid": "6213284ed8860760800bf3bef01c897c", "score": "0.5379231", "text": "def model_variables(price_model, hlcm, elcm, wah_lcm, developer_access_cols):\n return set().union(*[\n price_model.columns_used(),\n hlcm.columns_used(),\n elcm.columns_used(),\n wah_lcm.columns_used(),\n developer_access_cols\n ])", "title": "" }, { "docid": "92c87a0fec4d5e011b5725921a56d929", "score": "0.53748184", "text": "def get_variables(self):\n var_list = []\n\n for layer in self.components:\n var_list.extend(layer.get_variables())\n\n return var_list", "title": "" }, { "docid": "f29274bb9e27f69152fc9b0141582527", "score": "0.537336", "text": "def get_variable_names(cols):\n todels = ['max', 'min', 'ground', 'mean', 'std']\n varnames = []\n for col in cols:\n split_col = col.split('_')\n for todel in todels:\n split_col = list(filter(lambda x : x != todel, split_col))\n varnames.append('_'.join(split_col))\n varnames = list(dict.fromkeys(varnames))\n\n return varnames", "title": "" }, { "docid": "baccdccd0eb4f78b94ea08d72f9b0a3a", "score": "0.53697634", "text": "def get_variables(self):\n return [self.variables[key] for key in sorted(self.variables)]", "title": "" }, { "docid": "0c4f6b51762a360a6a21223c8d03df43", "score": "0.53532946", "text": "def varNames(self):\n return NotImplementedError(\"need varNames property\")", "title": "" }, { "docid": "221dcfeaaeed637ff1628e2bc08be7e9", "score": "0.53392565", "text": "def _get_input_map(self, node):\n return {\n (link[\"GetNode\"], link[\"GetAttr\"]): link[\"SetAttr\"]\n for _, link in self.links.loc[self.links.SetNode == node].iterrows()\n }", "title": "" }, { "docid": "6dc15813136654cfeaf5c5e37b0e74a7", "score": "0.53273714", "text": "def variables(self):\n return self.expression.atoms(Variable)", "title": "" }, { "docid": "190548350986d319bd230264fae331ab", "score": "0.53270787", "text": "def __var_dict(self):\n \n var_list = [self.ayear, self.amonth, self.aday, self.ajday, self.ahour, self.alat,\\\n self.alon, self.alatm12, self.alonm12, self.adir, self.aspeed, self.armw,\\\n self.avmx, self.avmxm12, self.abasin, self.asnum, self.asyear, self.asname,\\\n self.syear, self.sjday, self.smonth, self.sday, self.shour, self.smin, self.ssec, self.slat,\\\n self.slon, self.clat, self.clon,\\\n self.xya_p, self.xya_lon, self.xya_lat,\\\n self.xya_u,self.xya_v,self.xya_t,self.xya_z,\\\n self.xya_slp,self.xya_sclw,self.xya_st,self.xya_su,self.xya_sv,\\\n self.xya_slat, self.xya_slon,\\\n self.swath_time,\\\n self.storm_name,self.basin,self.storm_num,self.year_str,\\\n self.lon0,self.lat0,\\\n self.lon,self.lat,\\\n self.slon,self.slat]\n\n var_names_list = ['ayear', 'amonth', 'aday', 'ajday', 'ahour', 'alat',\\\n 'alon', 'alatm12', 'alonm12', 'adir', 'aspeed', 'armw',\\\n 'avmx', 'avmxm12', 'abasin', 'asnum', 'asyear', 'asname',\\\n 'syear', 'sjday','smonth','sday', 'shour', 'smin', 'ssec', 'slat',\\\n 'slon', 'clat', 'clon',\\\n 'xya_p', 'xya_lon', 'xya_lat',\\\n 'xya_u','xya_v','xya_t','xya_z',\\\n 'xya_slp','xya_sclw','xya_st','xya_su','xya_sv',\\\n 'xya_slat', 'xya_slon',\\\n 'swath_time',\\\n 'storm_name','basin','storm_num','year_str',\\\n 'lon0','lat0',\\\n 'lon','lat',\\\n 'slon','slat']\n\n var_xya_dict = dict(zip(var_names_list,var_list)) \n \n return var_xya_dict", "title": "" }, { "docid": "ee35e928557a4050125127f4687ec618", "score": "0.5311478", "text": "def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n \n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n \n if name == 'output_weights:0' or name == 'output_bias:0':\n continue\n \n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name_to_variable[name]\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n \n return(assignment_map, initialized_variable_names)", "title": "" }, { "docid": "e2f5351c8965aad2220d31677b409b91", "score": "0.53105736", "text": "def _set_variables(self, inputs):\n \n formatted_column = self._prepare_data_for_ngram_model(inputs)\n self._ngrams = []\n \n for row in formatted_column:\n ngrams_zipped = (ngrams(row, self._n))\n for unziped in ngrams_zipped:\n \n #convert the n-tuple representing a ngram into a string and append this\n #string as a list to our ngrams\n self._ngrams.append([' '.join(list(unziped))])\n \n # compute the frequency distribution of our ngrams\n # and select the most common\n freq_dist = get_freq_dist(self._ngrams)\n self._ngrams = [element[0] for element in freq_dist.most_common(self._num_ngrams)]\n \n # rename each feature dimension according to its corresponding ngram\n self._feature_name = [\"{0}_{1}\".format(self._input_column, ngram) for ngram in self._ngrams]", "title": "" }, { "docid": "2aecde55ef6636b04c990997bd6a36d5", "score": "0.53052497", "text": "def variablename(var):\r\n return [tpl[0] for tpl in filter(lambda x: var is x[1], globals().items())]", "title": "" }, { "docid": "4837f530afb46912a55c2c0d87d21ea5", "score": "0.53009534", "text": "def subs(self):\n return {pymc: param\n for pymc, param in zip(self.transformed, self.param_vars)}", "title": "" }, { "docid": "57bbc2eb8e0def4ef3449410ca1d9876", "score": "0.529575", "text": "def variables(self) -> Set[str]:\n # Task 7.6.2\n if is_equality(self.root) or is_relation(self.root):\n s = set()\n for term in self.arguments:\n s.update(term.variables())\n return s\n elif is_unary(self.root):\n return self.first.variables()\n elif is_binary(self.root):\n return self.first.variables().union(self.second.variables())\n else:\n # quantifier\n return self.predicate.variables().union({self.variable})", "title": "" }, { "docid": "d511b44c9ec4916fa954b3bc966cd7d6", "score": "0.52941597", "text": "def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return (assignment_map, initialized_variable_names)", "title": "" }, { "docid": "a36c2614552c8dc15774f7e23ac08e93", "score": "0.52874017", "text": "def rename_vars_in_ckpt(ckpt_path, name_map, output_path):\n reader = pywrap_tensorflow.NewCheckpointReader(ckpt_path)\n var_to_dtype_map = reader.get_variable_to_dtype_map()\n\n var_names = sorted(var_to_dtype_map.keys())\n\n sess = tf.Session()\n for var_name in var_names:\n var = reader.get_tensor(var_name)\n dtype = var_to_dtype_map[var_name]\n if var_name in name_map.keys():\n newname = name_map[var_name]\n # modify: newname can be a list!\n if type(newname) is list or type(newname) is tuple:\n for n in newname:\n tf.get_variable(name=n, dtype=dtype, initializer=var)\n else:\n tf.get_variable(name=newname, dtype=dtype, initializer=var)\n else:\n print('Ignoring {}'.format(var_name))\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.save(sess, save_path=output_path)\n sess.close()\n print('Renamed Model Saved')", "title": "" }, { "docid": "65fa5e4d215326a4d927049b248e7798", "score": "0.528098", "text": "def get_assignment_map_from_checkpoint(tvars, init_checkpoint):\n assignment_map = {}\n initialized_variable_names = {}\n\n name_to_variable = collections.OrderedDict()\n for var in tvars:\n name = var.name\n m = re.match(\"^(.*):\\\\d+$\", name)\n if m is not None:\n name = m.group(1)\n name_to_variable[name] = var\n\n init_vars = tf.train.list_variables(init_checkpoint)\n\n assignment_map = collections.OrderedDict()\n for x in init_vars:\n (name, var) = (x[0], x[1])\n if name not in name_to_variable:\n continue\n assignment_map[name] = name\n initialized_variable_names[name] = 1\n initialized_variable_names[name + \":0\"] = 1\n\n return assignment_map, initialized_variable_names", "title": "" }, { "docid": "0cd2e84a1b1dd4df4e80cddde4ab2d3a", "score": "0.5279201", "text": "def get_variables(self):\n return self.vars", "title": "" }, { "docid": "2085fb4cc0348693743e4b10f8654520", "score": "0.5269347", "text": "def get_variables(self):\n return self.name", "title": "" }, { "docid": "183b809b4c1043df874556df122dde41", "score": "0.5264714", "text": "def register_variables(self, model):\n for layer in model.layers:\n for variable in layer.variables:\n self.variables[variable] = {\"m\": 0.0, \"v\": 0.0}", "title": "" }, { "docid": "31aeeca1140b0c1e8528e2261f03e7e0", "score": "0.5262352", "text": "def getVariables(self, sdict):\n if not self.useVariables:\n return []\n\n return [\n # basic variables\n # If you do not use stdVariables, the first variable is not allowed\n # to be a list\n # sensor variables\n # accelerometer\n SVariable(\"Accel_mss\"), # amplitude of [x,y,z]\n SVariable(\"AccelX_mss\"), # amplitude in x-direction\n SVariable(\"AccelY_mss\"), # amplitude in y-direction\n SVariable(\"AccelZ_mss\"), # amplitude in z-direction\n\n # gyroscope\n SVariable(\"Gyro_rads\"),\n SVariable(\"GyroX_rads\"),\n SVariable(\"GyroY_rads\"),\n SVariable(\"GyroZ_rads\"),\n\n # magnetometer\n SVariable(\"Mag_uT\"),\n SVariable(\"MagX_uT\"),\n SVariable(\"MagY_uT\"),\n SVariable(\"MagZ_uT\"),\n\n # temperature\n SVariable(\"Temperature_C\"),\n ]", "title": "" }, { "docid": "89a1dc24f6465966114877b43f63e3ef", "score": "0.526146", "text": "def _get_input_activation_vars(self):\n return self._call_function_on_blobs_within_inputs(\n 'get_activation_vars')", "title": "" }, { "docid": "54a5ff34694bfe4ead609d8a15088ffc", "score": "0.52569497", "text": "def get_model_names(self) -> Iterable[str]:", "title": "" }, { "docid": "23e89e17f8783c95df7438d4a775e5ba", "score": "0.5251709", "text": "def input_keys(self) -> List[str]:\n return self.prompt.input_variables", "title": "" } ]
596e3ab4ff641e249fde2c3008184dfb
Align post title to the Jekyll post name requirements.
[ { "docid": "5a92ae4ebde3914fad22ce99722836f8", "score": "0.0", "text": "def sanitize(str):\n res = str.lower()\n return res.replace(' ', '-')", "title": "" } ]
[ { "docid": "33b47465d97f487c74fc8226c22da8d1", "score": "0.62691516", "text": "def get_title(self, entry):\n title = _('%(title)s (%(word_count)i words)') % \\\n {'title': entry.title, 'word_count': entry.word_count}\n return title", "title": "" }, { "docid": "9a6dce8b964aaee168f530b648ad6536", "score": "0.6145411", "text": "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "title": "" }, { "docid": "477d66727f73dd1fd5a9d31351727163", "score": "0.6134752", "text": "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "title": "" }, { "docid": "760b9b3af378a38896c5d53dd1adeaff", "score": "0.60511994", "text": "def title(self):\n # Use the first line of the articles text as title, if not title\n # exists.\n title = self._text[:min(32, self._text.find(\"\\n\"))]\n return title", "title": "" }, { "docid": "78b1db68d11c41bf392cc9c7ee8438e0", "score": "0.5950012", "text": "def pretty_title(title):\n output = '-' * 5 + ' ' + title.lower() + ' ' + '-' * 5\n return output", "title": "" }, { "docid": "739e4942de1b9662033b4f90729ae4ec", "score": "0.59054476", "text": "def short_title(self):\r\n return truncate_words(self.title, settings.CAMPAIGN_SHORT_TITLE_WORDS)", "title": "" }, { "docid": "8c7e36d8a12b752169895365607dd9c2", "score": "0.5859179", "text": "def make_title(words):", "title": "" }, { "docid": "905b5407be367959f88483e199692d2d", "score": "0.58460987", "text": "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "title": "" }, { "docid": "824f153280a243cd185a3145d599329a", "score": "0.5818391", "text": "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "title": "" }, { "docid": "96be7c3e630d9eb11fbaae32294f6b65", "score": "0.5798461", "text": "def get_meta_title(self):\n return self.meta_title.replace(\"<title>\", self.title)", "title": "" }, { "docid": "457be5eb7f0058c4326a0b1b24890753", "score": "0.57929593", "text": "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title", "title": "" }, { "docid": "bc9f569247543bef17181a5323c7d226", "score": "0.5791462", "text": "def get_short_name(self):\n split = self.name.split(' - ')\n # author, year, and first couple of words of paper title\n return \"{} ({}), {}\".format(split[0], split[1], \" \".join(split[2].split(' ')[:3]))", "title": "" }, { "docid": "1a9e6b3cc604f0e506423bce54127e64", "score": "0.57873595", "text": "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "title": "" }, { "docid": "4d87e1476d0b739e63ae1ee6ed40e20c", "score": "0.57804185", "text": "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "title": "" }, { "docid": "bad8b730f6f8772fd58846ea333640bf", "score": "0.5761538", "text": "def book_title(title):\n # this will capitalize the first letter of every word\n title = title.title()\n pre_title = []\n pre_title = title.split(\" \")\n new_title = \"\"\n for word in pre_title:\n # If the word is the first word of the title it has to be capitalize\n if word != pre_title[0]:\n # If the word is in the small word list make it lower case\n if word.lower() in small_words:\n word = word.lower()\n new_title = new_title + word + ' '\n# Remove the lagging space \n return new_title.strip()", "title": "" }, { "docid": "2a23a75309acf0c1d9e532cbba308993", "score": "0.5758245", "text": "def get_full_name_with_academic_title(self) -> str:\n base_name = super().get_full_name()\n return f'{self.title} {base_name}' if self.title else base_name", "title": "" }, { "docid": "1ab4b4671b5cfff31313125af1c99251", "score": "0.57399654", "text": "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "title": "" }, { "docid": "ec2dfd9f7670e4b928a25ee302e3adc6", "score": "0.57225716", "text": "def normalize_title(self, title):\n return \" \".join(w[0].capitalize() + w[1:] for w in title.split())", "title": "" }, { "docid": "ffd267316a55c424d1201412c0d58a3e", "score": "0.5715547", "text": "def getTitle(pan: str) -> str:\n src = open(pan).read()\n lines = src.split(\"\\n\")\n if len(lines)==0: return \"\"\n t = mark.render(lines[0].strip(\" #\"))\n if t.startswith(\"<p>\"): t = t[3:]\n if t.endswith(\"</p>\"): t = t[:-4]\n return t", "title": "" }, { "docid": "e8965f63fbab69e4533c915c420670e1", "score": "0.5673342", "text": "def get_title():", "title": "" }, { "docid": "36a00f48d2763383cf559408149638bd", "score": "0.5623393", "text": "def setTitle(title, height=48, justify='center', pos='above', offset=0):\n titles = title.split(\"\\n\")[:4]\n i = 1\n for line in titles:\n dislin.titlin(line, i)\n i = i + 1\n dislin.titjus(justdict[justify])\n pdict = {'above':'ABOVE', 'below':'BELOW'}\n dislin.titpos(pdict[pos])\n dislin.htitle(height)\n dislin.vkytit(offset)\n dislin.title()", "title": "" }, { "docid": "320e7a5d7ef7c9baef5d371b26c8ec46", "score": "0.56008154", "text": "def getTitle(self, article):\n \n title = ''\n doc = article.doc\n \n titleElem = Parser.getElementsByTag(doc, tag='title')\n # no title found\n if titleElem is None or len(titleElem) == 0:\n return title\n \n # title elem found\n titleText = Parser.getText(titleElem[0])\n usedDelimeter = False\n \n # split title with |\n if '|' in titleText:\n titleText = self.doTitleSplits(titleText, PIPE_SPLITTER)\n usedDelimeter = True\n \n # split title with -\n if not usedDelimeter and '-' in titleText:\n titleText = self.doTitleSplits(titleText, DASH_SPLITTER)\n usedDelimeter = True\n \n # split title with »\n if not usedDelimeter and u'»' in titleText:\n titleText = self.doTitleSplits(titleText, ARROWS_SPLITTER)\n usedDelimeter = True\n \n # split title with :\n if not usedDelimeter and ':' in titleText:\n titleText = self.doTitleSplits(titleText, COLON_SPLITTER)\n usedDelimeter = True\n \n title = MOTLEY_REPLACEMENT.replaceAll(titleText)\n return title", "title": "" }, { "docid": "daf970891f5e02ed832c0c1bb4116de5", "score": "0.55959", "text": "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "title": "" }, { "docid": "58298b17b6a96df97cffdf06cb282d3d", "score": "0.55569464", "text": "def getTitle(self):\n\n # print(self.soupObject.title.string)\n try:\n s = self.soupObject.find(\"meta\", attrs={\"name\": \"twitter:title\"})\n self.title = str(s['content'])\n self.title = self.title.replace(\"/\", \"\")\n self.title = self.title.strip()\n if not self.title:\n s = int(\"deliberateError\")\n\n # except\n except:\n self.title = \"Amazonsubtitles\"\n\n pass", "title": "" }, { "docid": "4aa83f4ae5142e3e1b988c615d9d797d", "score": "0.5552118", "text": "def name_title(self) -> str:\n return self._name_title", "title": "" }, { "docid": "8d113de78b103ec1b5af9a24618e0fa5", "score": "0.5545652", "text": "def get_title(self, article: BeautifulSoup):\n return self.get_text(article, self.parsing_template.title)", "title": "" }, { "docid": "abce20dfc3b9b7ef4888e03fb05837c2", "score": "0.55333966", "text": "def title_all(e_title: str) -> str:\n title_customizations = [\n BibEntryCustomization.title_strip,\n BibEntryCustomization.title_capitalization,\n ]\n for f in title_customizations:\n e_title = f(e_title)\n return e_title", "title": "" }, { "docid": "44f537f2fc43aebccf27fd7b8d89df21", "score": "0.5522261", "text": "def convert_post_title(title):\n post_title = []\n for c in title.lower():\n if not c.isalnum():\n c = c.replace(c, '-')\n post_title.append(c)\n return ''.join(post_title)", "title": "" }, { "docid": "a33f046e55285428e564bfbe4a7004c8", "score": "0.5519164", "text": "def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"", "title": "" }, { "docid": "7df26574717490fad48655375b5fdd74", "score": "0.5490409", "text": "def get_title(self):\n\n title = ''\n doc = self.article.doc\n\n title_element = self.parser.getElementsByTag(doc, tag='title')\n # no title found\n if title_element is None or len(title_element) == 0:\n return title\n\n # title elem found\n title_text = self.parser.getText(title_element[0])\n used_delimeter = False\n\n # split title with |\n if '|' in title_text:\n title_text = self.split_title(title_text, PIPE_SPLITTER)\n used_delimeter = True\n\n # split title with -\n if not used_delimeter and '-' in title_text:\n title_text = self.split_title(title_text, DASH_SPLITTER)\n used_delimeter = True\n\n # split title with »\n if not used_delimeter and u'»' in title_text:\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\n used_delimeter = True\n\n # split title with :\n if not used_delimeter and ':' in title_text:\n title_text = self.split_title(title_text, COLON_SPLITTER)\n used_delimeter = True\n\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\n return title", "title": "" }, { "docid": "a3f4ddb416ec0b46caeb7bd5a3f7c1e8", "score": "0.5474231", "text": "def get_title(self):\n title_tag = self.soup.find('title').text\n title_list = string.split(sep='-')\n self.title = title_list[0].strip()", "title": "" }, { "docid": "991bc66466c7790e7a6954a4824ba5fc", "score": "0.5473441", "text": "def _update_title(self, title, tag, lid):\n return title", "title": "" }, { "docid": "dcba501e43353b7dcc476e7a0f6ef154", "score": "0.5460513", "text": "def Title(self, **kwargs):\n full_name = ''\n if self.getFirstname() == '' or self.getLastname() == '':\n if not self.getOrganization():\n return '...'\n else:\n return self.getOrganization()\n format = kwargs.get('format', None)\n if format == 'natural':\n full_name = '%s %s' % (self.getFirstname(), self.getLastname())\n else:\n full_name = '%s %s' % (self.getLastname(), self.getFirstname())\n return '%s' % full_name", "title": "" }, { "docid": "9ce3a64cf5bdf27546f896496dc16226", "score": "0.5435167", "text": "def setTitle(self, meta):\n\n title = ''\n try:\n title += meta['date'] + ' '\n except KeyError:\n pass\n try:\n title += meta['time'] + ' '\n except KeyError:\n pass\n try:\n title += meta['trial']\n except KeyError:\n pass\n\n meta['title'] = title.strip()", "title": "" }, { "docid": "e320adc7e22b8f97d6f6542a96ecc1da", "score": "0.5418165", "text": "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "title": "" }, { "docid": "508b3647da55be44bab12a4550a9e04c", "score": "0.5414049", "text": "def get_title(self) -> str:\n pass", "title": "" }, { "docid": "afe86feb629b78ac01065db578ec3353", "score": "0.5402452", "text": "def title(self):\n\t\treturn self.page_title", "title": "" }, { "docid": "f369569f3534c1408a98e48a923cafb4", "score": "0.5402337", "text": "def numbered_title(self):\n return f\"{self.title}\"", "title": "" }, { "docid": "f369569f3534c1408a98e48a923cafb4", "score": "0.5402337", "text": "def numbered_title(self):\n return f\"{self.title}\"", "title": "" }, { "docid": "907ad4701d647dcae6e66d5b6bd30188", "score": "0.53753453", "text": "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "title": "" }, { "docid": "6f1230333cc7e6560d1bdd6854f49c1d", "score": "0.537167", "text": "def get_page_title(self):\n title = strip_tags(self.object.subject)\n return u'%s: %s' % (_('Notification'), title[:30] + \"...\")", "title": "" }, { "docid": "9b166a96fa5a7b9cf7c28d1de5f0f831", "score": "0.5370979", "text": "def title(self):\n for cell in self.markdown_cells():\n m = MARKDOWN_HEADER.match(cell.source)\n if m and len(m.group('level')) == 1:\n return m.group('header').strip()\n return None", "title": "" }, { "docid": "e293fdfd3d7a74e33e2ad83a89a6bdb7", "score": "0.5367679", "text": "def get_title(self, article):\r\n\r\n title = ''\r\n doc = article.doc\r\n\r\n title_element = self.parser.getElementsByTag(doc, tag='title')\r\n # no title found\r\n if title_element is None or len(title_element) == 0:\r\n return title\r\n\r\n # title elem found\r\n title_text = self.parser.getText(title_element[0])\r\n used_delimeter = False\r\n\r\n # split title with |\r\n if '|' in title_text:\r\n title_text = self.split_title(title_text, PIPE_SPLITTER)\r\n used_delimeter = True\r\n\r\n # split title with -\r\n if not used_delimeter and '-' in title_text:\r\n title_text = self.split_title(title_text, DASH_SPLITTER)\r\n used_delimeter = True\r\n\r\n # split title with »\r\n if not used_delimeter and u'»' in title_text:\r\n title_text = self.split_title(title_text, ARROWS_SPLITTER)\r\n used_delimeter = True\r\n\r\n # split title with :\r\n if not used_delimeter and ':' in title_text:\r\n title_text = self.split_title(title_text, COLON_SPLITTER)\r\n used_delimeter = True\r\n\r\n title = MOTLEY_REPLACEMENT.replaceAll(title_text)\r\n return title", "title": "" }, { "docid": "0c3174b068803de6c85f8756a403668f", "score": "0.53616554", "text": "def get_title(self):\n title = self.title\n if not title and self.parent_id:\n title = self.parent.title\n return title", "title": "" }, { "docid": "8d9ecdc477c34c7978683d64e60d286f", "score": "0.53586465", "text": "def configured_title(self):\n return self.get('title', self.DEFAULT_SPACE_TITLE)", "title": "" }, { "docid": "067fded23e464308f72b5119479b0005", "score": "0.53576386", "text": "def summary_title(tile_summary):\n return f\"Slide tile_summary.slide_name Tile Summary:\"", "title": "" }, { "docid": "5814c8f8d90c37f2c2c912c6a3d6248e", "score": "0.5353498", "text": "def parsed_title(self):\n return parse_pr_title(self.title)", "title": "" }, { "docid": "d8ebf50f158586107b5a6f9d122640af", "score": "0.5352832", "text": "def create_slug_from_post_title(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = slugify(instance.title)", "title": "" }, { "docid": "eec62eee15fe5949277f82b79bb93827", "score": "0.534261", "text": "def get_title(self):\n return self.metadata['title']", "title": "" }, { "docid": "56bde80b132da547aebe12704001c96b", "score": "0.5339183", "text": "def title(self):\n return ' '.join(self._title)", "title": "" }, { "docid": "09917ab124cded0cc2a278d7ebb28b65", "score": "0.5326086", "text": "def title(self):\n return self.container['title']", "title": "" }, { "docid": "8217f5070a8bbd6938312707b3ffd51a", "score": "0.53225034", "text": "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "title": "" }, { "docid": "6e8e24c0fbe82bff00edf4a1ef46d360", "score": "0.531927", "text": "def formatName(self):\r\n return self.title.getVal() + \" \" + self.first.getVal() + \" \" + self.last.getVal()", "title": "" }, { "docid": "a876841363baebba36dac3b7a174f611", "score": "0.5307989", "text": "def title(self, title):\n\n self.container['title'] = title", "title": "" }, { "docid": "e63343de7f7044236b2ec2ab5335b91c", "score": "0.53062534", "text": "def title(self):\n return self.definition.title", "title": "" }, { "docid": "4c908f4fe853731150ccc92033be2322", "score": "0.530089", "text": "def _defaultSyncTitle(self):\n return f'{self.grandparentTitle} - {self.parentTitle} - ({self.seasonEpisode}) {self.title}'", "title": "" }, { "docid": "9753f017bd770b96d603d2320458979e", "score": "0.52914995", "text": "def get_descriptive_name(self):\n long_name = f\"{self.make} {self.model} {self.year}\"\n \n return long_name.title()", "title": "" }, { "docid": "0c33fad6e8a0dda79f756c024743cc22", "score": "0.52886105", "text": "def title(self) -> str:\n pass", "title": "" }, { "docid": "fea89710b1f563761941780d6b8f9bd8", "score": "0.5283027", "text": "def ftitle(self, text):\n return \"{} - {}\".format(self._app_name, text)", "title": "" }, { "docid": "90afb619b155ddb3fb172e5ab415f4c7", "score": "0.5280268", "text": "def getStoryTitle(self, source):\n titleStart = source.find('>', source.find('>')+1) + 1\n titleEnd = source.find('</a>')\n title = source[titleStart:titleEnd]\n title = title.lstrip() # Strip trailing whitespace characters.\n return title", "title": "" }, { "docid": "73159650cccc7759a22b4646a73a95a4", "score": "0.5275387", "text": "def title(self) -> str:\n return pulumi.get(self, \"title\")", "title": "" }, { "docid": "73159650cccc7759a22b4646a73a95a4", "score": "0.5275387", "text": "def title(self) -> str:\n return pulumi.get(self, \"title\")", "title": "" }, { "docid": "73159650cccc7759a22b4646a73a95a4", "score": "0.5275387", "text": "def title(self) -> str:\n return pulumi.get(self, \"title\")", "title": "" }, { "docid": "0d6094035923087e7b760f1b55a5286b", "score": "0.5274416", "text": "def format_title(self, data):\n return data", "title": "" }, { "docid": "cdbb17f2eb384b2f684b0aaa5e7e0219", "score": "0.5269374", "text": "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "title": "" }, { "docid": "be39aeea2219c836ca5697c1a19c3fe2", "score": "0.52658176", "text": "def get_title(self):\n try:\n return self.get_translation().title\n except MissingTranslation:\n return _(\"(No title)\")", "title": "" }, { "docid": "54dc81561c099081898bb263f38bb2d4", "score": "0.52569556", "text": "def _parse_title(self, item):\n title_str = \" \".join(item.css(\"td:first-child *::text\").extract()).strip()\n content_match = re.search(r\"(?<=\\().*(?=\\))\", title_str)\n if not content_match:\n return \"Advisory Board\"\n return content_match.group().title()", "title": "" }, { "docid": "0b1fd98bd87f5e89c8535c6c8833cd5c", "score": "0.5254468", "text": "def get_descriptive_name(self):\n long_name = f\"{self.year} {self.make} {self.model}\"\n return long_name.title()", "title": "" }, { "docid": "4d074f6f5b0e8f3bb9c9e42875b879d6", "score": "0.52537996", "text": "def _extract_title(self) -> str:\n soup = BeautifulSoup(self.content.content, self._parser)\n soup_title = soup.title\n if soup_title is None or len(soup_title.text) == 0 \\\n or '|' not in soup_title.text:\n title = \"\"\n else:\n title = soup.title.text.split('|')[1]\n return self._clean_title(title)", "title": "" }, { "docid": "286eda18e8706723aa3d8095b13a6550", "score": "0.52520955", "text": "def slugify(self, space='-'):\n #strtitle = unicode(self.title)\n strtitle = self.title\n import unicodedata\n import re\n strtitle = unicodedata.normalize('NFKD', strtitle).encode('ascii', 'ignore')\n strtitle = unicode(re.sub('[^\\w\\s-]', '', strtitle).strip().lower())\n return re.sub('[-\\s]+', space, strtitle)", "title": "" }, { "docid": "a33eb16e6b40fb8e34d9782de486cba6", "score": "0.52385473", "text": "def _normalize_title(self, article_title):\n morph = pymorphy2.MorphAnalyzer()\n title_words = article_title.lower().strip(\n string.punctuation + \"«»\"\n ).split(' ')\n title_morphs = (morph.parse(word)[0] for word in title_words)\n title_nouns = filter(\n lambda word_morph: 'NOUN' in word_morph.tag, title_morphs\n )\n return collections.Counter(\n map(lambda noun: noun.normal_form, title_nouns)\n )", "title": "" }, { "docid": "c72626185cf9a25c299e4c192a7b3a20", "score": "0.5233773", "text": "def _get_title_tag(self, item):\n tag = '<{heading}><a href=\"{href}\">{title}</a></{heading}>'\n if self._field_is_visible(\"title\"):\n tile_conf = self.get_tile_configuration()\n title_conf = tile_conf.get(\"title\", None)\n if title_conf:\n heading = title_conf.get(\"htmltag\", \"h2\")\n href = item.absolute_url()\n title = item.Title()\n return tag.format(heading=heading, href=href, title=title)", "title": "" }, { "docid": "4853cc248a3933bb77357e87d6dbe28b", "score": "0.5233042", "text": "def get_title(article):\n title = article.find(\"div\", class_=\"col-sm-6 product_main\").h1.text\n return title", "title": "" }, { "docid": "f24e299bcbad4b2a94030acd240d76bd", "score": "0.5222651", "text": "def titleize(title):\n titleized = []\n for idx, word in enumerate(title.split()):\n if idx == 0 or word not in ['a', 'of', 'in', 'the', 'v']:\n word = word.capitalize()\n\n titleized.append(word)\n\n return ' '.join(titleized)", "title": "" }, { "docid": "8e1bd5fe7a1d4e377d294e92b4d4e283", "score": "0.52167165", "text": "def inject_title(self,template,title):\n return re.sub('TITLE',title,template)", "title": "" }, { "docid": "73670a0fb377375d0367dbe75f096cec", "score": "0.5208389", "text": "def _parse_title(self, links):\n for link in links:\n if \"hearing\" in link[\"title\"].lower():\n return link[\"title\"].replace(\"Notice\", \"\").strip()\n if \"special\" in link[\"title\"].lower():\n return \"Special Meeting\"\n return \"Illinois Medical District Commission\"", "title": "" }, { "docid": "119f55baec970d5eba94d34a8ede94ec", "score": "0.5207907", "text": "async def title(self):\n if not hasattr(self, \"_title\"):\n self._title = await Stack.fetch_stack_value(self, \"http://purl.org/dc/terms/title\", await self.uuid)\n return self._title", "title": "" }, { "docid": "6e7fd9d2e05144f1037327630152fdbf", "score": "0.5207695", "text": "def get_title(self):\n return self.title", "title": "" }, { "docid": "6e7fd9d2e05144f1037327630152fdbf", "score": "0.5207695", "text": "def get_title(self):\n return self.title", "title": "" }, { "docid": "6e7fd9d2e05144f1037327630152fdbf", "score": "0.5207695", "text": "def get_title(self):\n return self.title", "title": "" }, { "docid": "88a8b9adc9f42b492e06d158cb1be1bb", "score": "0.52040815", "text": "def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")", "title": "" }, { "docid": "0a682325378b5ffabc0faa182b83563f", "score": "0.52035433", "text": "def title(self):\n return self.get(self._names[\"title\"])", "title": "" }, { "docid": "0a682325378b5ffabc0faa182b83563f", "score": "0.52035433", "text": "def title(self):\n return self.get(self._names[\"title\"])", "title": "" }, { "docid": "45339b76a14d47492c4e0b371899591a", "score": "0.520295", "text": "def normalise_title(title):\n normalised = title.lower()\n if normalised.startswith('the '):\n normalised = normalised[4:]\n normalised = re.sub('[^a-z ]', '', normalised)\n normalised = re.sub(' +', ' ', normalised)\n normalised = normalised.replace(' the ', ' ')\n return normalised", "title": "" }, { "docid": "587cc3c1a840b92a45f20cb5612ee37d", "score": "0.5200608", "text": "def title(self):\n return self['title']", "title": "" }, { "docid": "6bac08c2ec9284ccb8286a0e75f7c8ba", "score": "0.5196368", "text": "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "title": "" }, { "docid": "e254736812c6f56d855446c19ee94d94", "score": "0.5195053", "text": "def TitlePrint(title):\n titleLength = len(title)\n barLength = titleLength + 12\n fmtdTitle = '----- {0} -----'.format(title)\n bar = '-' * barLength\n print(bar, fmtdTitle, bar,\n sep='\\n', end='\\n\\n')", "title": "" }, { "docid": "8825a2fffa4e877adc252fee9784c786", "score": "0.5190451", "text": "def get_title(nb: Notebook) -> Optional[str]:\n # loop through all cells\n for cell in nb.cells:\n # only consider markdown\n if cell[\"cell_type\"] == \"markdown\":\n # grab source\n text = cell[\"source\"][0]\n # loop through lines\n for line in text.split(\"\\n\"):\n # if finding a level-1 heading, consider it a title\n if line.startswith(\"# \"):\n title = line.lstrip(\"#\").strip(\" .\").strip(\"\\n\")\n return title\n return None", "title": "" }, { "docid": "ce539dda793275d0fb6f1d71373087e0", "score": "0.51882", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "title": "" }, { "docid": "ce539dda793275d0fb6f1d71373087e0", "score": "0.51882", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "title": "" }, { "docid": "ce539dda793275d0fb6f1d71373087e0", "score": "0.51882", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "title": "" }, { "docid": "ce539dda793275d0fb6f1d71373087e0", "score": "0.51882", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "title": "" }, { "docid": "ce539dda793275d0fb6f1d71373087e0", "score": "0.51882", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "title": "" }, { "docid": "ce539dda793275d0fb6f1d71373087e0", "score": "0.51882", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "title": "" }, { "docid": "ce539dda793275d0fb6f1d71373087e0", "score": "0.51882", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "title": "" }, { "docid": "ce539dda793275d0fb6f1d71373087e0", "score": "0.51882", "text": "def get_descriptive_name(self):\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\n return long_name.title()", "title": "" }, { "docid": "599f9d297fac61d796f31d16ca97dac2", "score": "0.51880556", "text": "def namify(title):\n name = title.replace(\" \",\"_\") # replace spaces with underscores\n name = re.sub(r'\\W+', '', name) # strip non-alphanumeric chars (allows '_')\n return name.lower()", "title": "" }, { "docid": "b8a15691e662056d6601015840a35ee1", "score": "0.51871526", "text": "def title(self):\n return self.metadata.get('title')", "title": "" }, { "docid": "737446baabce23d670db8e05edbf8834", "score": "0.51858157", "text": "def unify_field_title(title: str):\n\n # Convert to lowercase\n prep_title = title.lower()\n # Now remove all these unneeded ugly symbols\n prep_title = re.sub('[.,:;\\|/\\{\\}\\(\\)\\[\\]\\'\\\"\\+]','', prep_title)\n # Replace multiple whitespaces with one\n prep_title = re.sub(' +', ' ', prep_title)\n\n # The real title is now the replacement of dashes with whitespace\n real_title = prep_title.title()\n # The unified title is the removal of the dashes and whitespace\n unified_title = re.sub('[ \\-]','', prep_title)\n trimmed_unified_title = unified_title[:150]\n return trimmed_unified_title, real_title", "title": "" }, { "docid": "dc128e40e895d774b5e47f99af9d8e97", "score": "0.5185799", "text": "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n return long_name.title()", "title": "" }, { "docid": "c5cb30f9136d970435bdfec874d040cf", "score": "0.51693285", "text": "def title(self, title):\n\t\tself.head += '<title>' + title + '</title>\\n'", "title": "" } ]
55aaa4dd8d5d0246f24c4c30303c69ca
Return True if entity is available.
[ { "docid": "9834f7b54461beea8e74ee1e5e1e64fe", "score": "0.0", "text": "def available(self) -> bool:\n return (\n super().available\n and self.serial_number in self.coordinator.data[\"climate\"]\n and \"temperature\" in self.coordinator.data[\"climate\"][self.serial_number]\n )", "title": "" } ]
[ { "docid": "1c89ccd3cdc881324348895730d33496", "score": "0.7533917", "text": "def available(self):\n return self.fetch.data is not None", "title": "" }, { "docid": "68b280265c7e1a24bcec0f5e12030eca", "score": "0.7303804", "text": "def available(self) -> bool:\n return self._handle(\"available\")", "title": "" }, { "docid": "ab619967c61f651c2defc95eb389caff", "score": "0.72486687", "text": "def available(self):\n return True", "title": "" }, { "docid": "ab619967c61f651c2defc95eb389caff", "score": "0.72486687", "text": "def available(self):\n return True", "title": "" }, { "docid": "ab619967c61f651c2defc95eb389caff", "score": "0.72486687", "text": "def available(self):\n return True", "title": "" }, { "docid": "cdc7248b5dcee82f06b5b4ccb43db939", "score": "0.7247748", "text": "def isAvailable(self, _):\n return True", "title": "" }, { "docid": "162cb4e102f8d0f4e2df9290f3f8c714", "score": "0.7227121", "text": "def available(self):\n return True", "title": "" }, { "docid": "4ae3dbfdb1ffad5e0b708e6c6bbc18a6", "score": "0.71567917", "text": "def is_available(self):\r\n\t\treturn self.available", "title": "" }, { "docid": "4ae3dbfdb1ffad5e0b708e6c6bbc18a6", "score": "0.71567917", "text": "def is_available(self):\r\n\t\treturn self.available", "title": "" }, { "docid": "2abfae82e7321e43247ebc7491681d28", "score": "0.711709", "text": "def available(self) -> bool:\n return self._name in self._fetcher._state # type: ignore", "title": "" }, { "docid": "1fe87102db61a7f08276d667be245f9f", "score": "0.7114685", "text": "def available(self):\n return False", "title": "" }, { "docid": "c5a6c0886e41d08d14f29d4a3ae6273f", "score": "0.7102069", "text": "def available(self) -> bool:\n return self._api.available", "title": "" }, { "docid": "f93ec6bb8e766f27ecaae48c55742b17", "score": "0.7089643", "text": "def async_available(self, entity_id: str) -> bool:\n entity_id = entity_id.lower()\n return entity_id not in self._states and entity_id not in self._reservations", "title": "" }, { "docid": "2a0ea9be5bbfa2f7e19367b515c3e66e", "score": "0.70734996", "text": "def available(self) -> bool:\n return self._device.status == \"AVAILABLE\"", "title": "" }, { "docid": "e93e17173cf8461d5c80a2d4c3d3fdbf", "score": "0.7073167", "text": "def available(self):\n return self.is_available", "title": "" }, { "docid": "e93e17173cf8461d5c80a2d4c3d3fdbf", "score": "0.7073167", "text": "def available(self):\n return self.is_available", "title": "" }, { "docid": "e93e17173cf8461d5c80a2d4c3d3fdbf", "score": "0.7073167", "text": "def available(self):\n return self.is_available", "title": "" }, { "docid": "7460343f3e07522641c884bd5c35bd38", "score": "0.70694166", "text": "def available(self):\n if self.articles():\n return True\n return False", "title": "" }, { "docid": "e64f3f5070e5f1f57b5e164b4dc4af4f", "score": "0.70312834", "text": "def available(self) -> bool:\n return self.api.available", "title": "" }, { "docid": "71d6736679d186b0829e2b86140a89b9", "score": "0.70150876", "text": "def available(self) -> bool:\n return self._available", "title": "" }, { "docid": "71d6736679d186b0829e2b86140a89b9", "score": "0.70150876", "text": "def available(self) -> bool:\n return self._available", "title": "" }, { "docid": "71d6736679d186b0829e2b86140a89b9", "score": "0.70150876", "text": "def available(self) -> bool:\n return self._available", "title": "" }, { "docid": "71d6736679d186b0829e2b86140a89b9", "score": "0.70150876", "text": "def available(self) -> bool:\n return self._available", "title": "" }, { "docid": "a719f8d1ccd77115bac06c0e40bd51cf", "score": "0.70117986", "text": "def is_entity_active(self):\n return True", "title": "" }, { "docid": "91606ee2d9416b7624c783451c615b5f", "score": "0.6989233", "text": "def available(self) -> bool:\n return self._device.available", "title": "" }, { "docid": "ac9fc4f8ae47836fa9bf5387b1023e7c", "score": "0.69568115", "text": "def async_check_if_available(self) -> bool:\n if self.alarm:\n return True\n\n _LOGGER.debug(\"%s has been deleted\", self.entity_id)\n\n entity_registry = er.async_get(self.hass)\n if entity_registry.async_get(self.entity_id):\n entity_registry.async_remove(self.entity_id)\n\n return False", "title": "" }, { "docid": "69b0efe17808a5a1359adf537ebd97f7", "score": "0.69134575", "text": "def exists(self, entity):\n return self._exists(entity)", "title": "" }, { "docid": "fe564e869e3d579b218750705a022db0", "score": "0.69091296", "text": "def available(self) -> bool:\n return self._bridge.api.available", "title": "" }, { "docid": "d9c95294043d9834144bf2583a34a748", "score": "0.6906465", "text": "def available(self) -> bool:\n return self._state is not None", "title": "" }, { "docid": "57bd3c1591ece555052a60f94426e830", "score": "0.687745", "text": "def available(self):\n return bool(self.data)", "title": "" }, { "docid": "fbc4ef0ec9a35d36dd7b193c9742062c", "score": "0.6870258", "text": "def is_available(self):\n\n if self.available >= 1:\n return True\n else:\n return False", "title": "" }, { "docid": "38fe99233d24a244de6f6b766b96aa0f", "score": "0.6860529", "text": "def isAvailable(self):\n return self.path is not None", "title": "" }, { "docid": "7c3bf1aea5b7141749613788b6558d6e", "score": "0.68445843", "text": "def available(self) -> bool:\n return (\n self.coordinator.get_first(\n \"$.smartPlugs[?(@.deviceLabel == '%s')]\", self._device_label\n )\n is not None\n )", "title": "" }, { "docid": "5193d91e587c2d14e7775cb6776044ca", "score": "0.68071765", "text": "def available(self):\n return self._state is not None", "title": "" }, { "docid": "5193d91e587c2d14e7775cb6776044ca", "score": "0.68071765", "text": "def available(self):\n return self._state is not None", "title": "" }, { "docid": "c328e1eb34ec0ce9ce65a21a5c49bb63", "score": "0.6799699", "text": "def available(self) -> bool:\n return self._available and self._heos.connection_state == const.STATE_CONNECTED", "title": "" }, { "docid": "006c64aeac3413e3534616328693fc5c", "score": "0.6752848", "text": "def entity_exists_in_hass(hass, entity_id):\r\n if hass.states.get(entity_id) is None:\r\n return False\r\n else:\r\n return True", "title": "" }, { "docid": "06b49456f6643635a0b7312e08f779cf", "score": "0.6726407", "text": "def isAvailable(cls):\n return os.path.exists(settings.GOVERNOR_INTERFACE)", "title": "" }, { "docid": "fc31fb4314ebadf202bfddef3b7ba15a", "score": "0.67237914", "text": "def available(self):\n try:\n self.read()\n return True\n except IOError:\n return False", "title": "" }, { "docid": "0dba2c9ee11b90b5c18200963c4763bf", "score": "0.670637", "text": "def available(self) -> bool:\n return super().available and self._diffuser.hub_data[STATUS] == AVAILABLE_STATE", "title": "" }, { "docid": "4591370b1d10721404c704ea621c2bdd", "score": "0.6703902", "text": "def available(self) -> bool:\n return self.coordinator.last_update_success and self.module.reachable", "title": "" }, { "docid": "0e92bf8ff5925b2f2756fd1e7f06103b", "score": "0.67000103", "text": "def available(self) -> bool | None:\n if self.update_manager is None:\n return False\n return self.update_manager.available", "title": "" }, { "docid": "0cff0078dd20dce9928428a69b1b7e08", "score": "0.6673949", "text": "def available(self):\n return bool(self.airly.data)", "title": "" }, { "docid": "2784678456bec217fb4e560432054b5c", "score": "0.6668475", "text": "def available(self) -> bool:\n return self._client.connected", "title": "" }, { "docid": "f672c7d9a1516a293c0297a5fbf4aa9c", "score": "0.6631547", "text": "def available(self):\n if self.articles():\n return False\n return self.context.salable or self.view.adapter().articles(salable=True)", "title": "" }, { "docid": "5239e4ae67053991dd4ef6cded036891", "score": "0.6608031", "text": "def is_on(self) -> bool:\n return self.device.status.get(self.entity_description.key, False)", "title": "" }, { "docid": "97abeb77b0560e63404e928d7654bcca", "score": "0.6602508", "text": "def available(self):\n return self._api.available", "title": "" }, { "docid": "97abeb77b0560e63404e928d7654bcca", "score": "0.6602508", "text": "def available(self):\n return self._api.available", "title": "" }, { "docid": "6f2d910f9985df1a8cd1524d36de1fde", "score": "0.6602249", "text": "def available(self) -> bool:\n return super().available and self.coordinator.data.get(\n self._sensor_type[\"device_value_key\"]\n )", "title": "" }, { "docid": "b7702171b314dfc689081325848ae336", "score": "0.6590265", "text": "def available(self) -> bool:\n return self._available and self.coordinator.last_update_success", "title": "" }, { "docid": "a3235d7c968f5783ae2ba0e15e89edf1", "score": "0.65862566", "text": "def exists(self):\n try:\n self.get_metadata()\n return True\n except Exception as e:\n return False", "title": "" }, { "docid": "06ec79d295cadda43612c056fe0b0b86", "score": "0.6556873", "text": "def __resource_allocated(self):\n if self.resource:\n return True\n else:\n self.logger.prn_err(\"FastModel resource not available!\")\n return False", "title": "" }, { "docid": "bb9d269056ab1b13edf5953529b6c8dd", "score": "0.6529856", "text": "def available(self) -> bool:\n if not self._device.lost_connection:\n return True\n\n return False", "title": "" }, { "docid": "fbd28a46016913ce2cdd3ff3cf6eeb1a", "score": "0.6487512", "text": "def is_on(self):\n state = self.device.states.get(self.entity_description.key)\n\n if not state:\n return None\n\n return self.entity_description.is_on(state.value)", "title": "" }, { "docid": "c61cbd7bd15c1103f9df6c1569d8e299", "score": "0.6484298", "text": "def available(self) -> bool:\n return self._aircon.get_online()", "title": "" }, { "docid": "e4d2f2b8c91677a2fd8acb095e1b2e39", "score": "0.647968", "text": "def available(self) -> bool:\n return self._device.system_availability", "title": "" }, { "docid": "3d3dff564051bb5fa330566e2ed2019e", "score": "0.64755464", "text": "def available(self) -> bool:\n return (\n super().available\n and self.serial_number in self.coordinator.data[\"mice\"]\n and \"detections\" in self.coordinator.data[\"mice\"][self.serial_number]\n )", "title": "" }, { "docid": "9188ae56173d90940fd23707a35f5b6b", "score": "0.6467282", "text": "def available(self):\n return self._value is not None", "title": "" }, { "docid": "781eb7e10ac17f19cd684bac870a1610", "score": "0.64665717", "text": "def is_valid_entity(self):\n pass", "title": "" }, { "docid": "59465284dfd6109da6109414e5f3a4dc", "score": "0.64633524", "text": "def available(self) -> bool:\n no_recent_updates = self._timers['statusUpdated'] < datetime.now() - \\\n self._params[CONF_SCAN_INTERVAL] * 3.1\n\n if no_recent_updates:\n # unavailable because no successful update()s (but why?)\n self._available = False\n debug_code = '0x01'\n\n elif not self._status: # self._status == {}\n # unavailable because no status (but how? other than at startup?)\n self._available = False\n debug_code = '0x02'\n\n elif self._status and (self._type & EVO_CHILD):\n # (un)available because (web site via) client api says so\n self._available = \\\n bool(self._status['temperatureStatus']['isAvailable'])\n debug_code = '0x03' # only used if above is False\n\n else: # is available\n self._available = True\n\n if not self._available and \\\n self._timers['statusUpdated'] != datetime.min:\n # this isn't the first (un)available (i.e. after STARTUP)\n _LOGGER.warning(\n \"available(%s) = %s (debug code %s), \"\n \"self._status = %s, self._timers = %s\",\n self._id,\n self._available,\n debug_code,\n self._status,\n self._timers\n )\n\n# _LOGGER.debug(\"available(%s) = %s\", self._id, self._available) # noqa: E501; pylint: disable=line-too-long; ZXDEL\n return self._available", "title": "" }, { "docid": "24aa78254896e94d263833ee49c9d76b", "score": "0.6441498", "text": "def is_available(self, **kwargs: Any) -> bool:\n ...", "title": "" }, { "docid": "35aae8e8b681edcb966ad743a16a3fa5", "score": "0.6414341", "text": "def available(self) -> bool:\n return self._coordinator.last_update_success", "title": "" }, { "docid": "fe15fd77a76e164004bd14221a8f1a53", "score": "0.63948953", "text": "def exists(cls) -> bool:\n return cls._instance is not None", "title": "" }, { "docid": "dc76d71e5f78c5c164cefab4ca2e545f", "score": "0.638815", "text": "def available(self):\n if self.view.cart_articles():\n return True\n else:\n return False", "title": "" }, { "docid": "a25041c86335b4b88c4ca9c267ba4802", "score": "0.6384411", "text": "def is_on(self):\n return self._entity.get(ENTITY_STATE)", "title": "" }, { "docid": "3419628a5f26f38c4d4b126cf10041fe", "score": "0.63482344", "text": "def available(self):\n return self._inner.available()", "title": "" }, { "docid": "46003f8432837be83d73a6b78faae609", "score": "0.6341935", "text": "def is_available(cls) -> bool:\n if cls._stop:\n return False\n\n list_index = cls._current_readings_list_index\n if len(cls._readings_lists[list_index]) < cls._readings_list_size:\n return True\n\n if cls._max_concurrent_readings_inserts > 1:\n for list_index in range(cls._max_concurrent_readings_inserts):\n if len(cls._readings_lists[list_index]) < cls._readings_list_size:\n cls._current_readings_list_index = list_index\n return True\n\n _LOGGER.warning('The ingest service is unavailable %s', list_index)\n return False", "title": "" }, { "docid": "a49a768444a47eded2a48e884f48ef6c", "score": "0.63274956", "text": "def is_available(self, cmd):\n return IBMBackend().is_available(cmd)", "title": "" }, { "docid": "e4683a61ed65eaf321a9c5de9b3c810c", "score": "0.6318569", "text": "def is_on(self) -> bool:\n encharge_inventory = self.data.encharge_inventory\n assert encharge_inventory is not None\n return self.entity_description.value_fn(encharge_inventory[self._serial_number])", "title": "" }, { "docid": "d2fab89f2d3ed7062c6589f484d8a77d", "score": "0.6310407", "text": "def isEntityExists(self, domainId, entityId):\n pass", "title": "" }, { "docid": "91f17dd8a54d9830606de2b4e08cb270", "score": "0.6300883", "text": "def available(self):\n return self._available", "title": "" }, { "docid": "91f17dd8a54d9830606de2b4e08cb270", "score": "0.6300883", "text": "def available(self):\n return self._available", "title": "" }, { "docid": "91f17dd8a54d9830606de2b4e08cb270", "score": "0.6300883", "text": "def available(self):\n return self._available", "title": "" }, { "docid": "91f17dd8a54d9830606de2b4e08cb270", "score": "0.6300883", "text": "def available(self):\n return self._available", "title": "" }, { "docid": "91f17dd8a54d9830606de2b4e08cb270", "score": "0.6300883", "text": "def available(self):\n return self._available", "title": "" }, { "docid": "91f17dd8a54d9830606de2b4e08cb270", "score": "0.6300883", "text": "def available(self):\n return self._available", "title": "" }, { "docid": "91f17dd8a54d9830606de2b4e08cb270", "score": "0.6297995", "text": "def available(self):\n return self._available", "title": "" }, { "docid": "a7bf8ad4830e15b4e83e4f43ed6bdce7", "score": "0.62979496", "text": "def exists(self):\n return True", "title": "" }, { "docid": "1b9c86e4439d0e5c8be8c9118dc84c18", "score": "0.6279232", "text": "def available(self) -> bool:\n return self._bridge.available(CONF_CHANNEL, self._area, self._channel)", "title": "" }, { "docid": "d3d2d697bba62947d8c0e5b983b895ff", "score": "0.6278819", "text": "def available(self):\n if self.view.shopping_site().cart_articles():\n return True\n else:\n return False", "title": "" }, { "docid": "e9fdf9f6220486e3bea1d66fe857ffdf", "score": "0.62766755", "text": "def is_used(self) -> bool:\n return self._status is BellResourceStatus.USED", "title": "" }, { "docid": "f89be27fa04fb0e8a18bda542ff877de", "score": "0.6270385", "text": "def is_available(self):\n return self.current_loan_key() == None", "title": "" }, { "docid": "90e6b0c4574134ca31889efbbb84bbc0", "score": "0.62610835", "text": "def available(self) -> bool:\n return (\n super().available\n and self.serial_number in self.coordinator.data[\"climate\"]\n and \"humidity\" in self.coordinator.data[\"climate\"][self.serial_number]\n )", "title": "" }, { "docid": "97fcdc025118f285945858977cbfb92f", "score": "0.624986", "text": "def available(self) -> bool:\n if self.coordinator.data is None:\n return False\n\n if not self.coordinator.data[KEY_GATEWAY][ATTR_AVAILABLE]:\n return False\n\n return self.coordinator.data[self._blind.mac][ATTR_AVAILABLE]", "title": "" }, { "docid": "1d5169c2f5fc25e9166084158279121e", "score": "0.6242894", "text": "def is_entity(n):\n if n.type in person_model.entities:\n return True\n else:\n return False", "title": "" }, { "docid": "41189c86ef28afcb0096fc2eca002846", "score": "0.62426454", "text": "def is_entity(n):\r\n if n.type in person_model.entities:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "6794e08fd1fa8124267022c2938bc797", "score": "0.6235371", "text": "def __nonzero__(self):\n if self._cluster_list or self._client_list:\n return True\n return False\n # False means this EquipmentProvider is empty; no equipment has\n # been loaded yet", "title": "" }, { "docid": "b431ab6f75fd3c73bebcd0dde080a950", "score": "0.62226486", "text": "def available(self):\n return self._sensor_type == BINARY_SENSOR_ONLINE or self._api.available", "title": "" }, { "docid": "29ec1b9b3f6f20e2cfc9568e317e71d7", "score": "0.6213804", "text": "def can_execute(self, entity):\n return self.bucket > 0", "title": "" }, { "docid": "5f8069e4ef180750e7999e1fe85c6c36", "score": "0.62109756", "text": "def available(self) -> bool:\n return self._static_info.is_status_binary_sensor or super().available", "title": "" }, { "docid": "5832212004b6b736eeb78afa9c27a397", "score": "0.62043166", "text": "def is_provider_available(cls) -> bool:\n return lxd.is_installed()", "title": "" }, { "docid": "9258fecfef7dff396fe8a941a9b2a254", "score": "0.6203311", "text": "def _Exists(self):\n _, _, retcode = self.DescribeInstance()\n return retcode == 0", "title": "" }, { "docid": "9635355785fed9ccd5cedc8352595a7b", "score": "0.6190033", "text": "def is_using(self) -> bool:\n return self._status is BellResourceStatus.USING", "title": "" }, { "docid": "7de137ae82412ff15ea70c0417fce9ad", "score": "0.618699", "text": "def exists(self):\n return self.itemData().exists", "title": "" }, { "docid": "0ba08b4b8460f77783ecff295a7b214d", "score": "0.61843324", "text": "def available(self) -> bool:\n return super().available and self._diffuser.is_online", "title": "" }, { "docid": "5ddbe506b419655fd430e35dadf32fde", "score": "0.6179291", "text": "def available(self):\n return self._data.available", "title": "" }, { "docid": "d36cfcd774d859b11da9d771edeb2cd2", "score": "0.6176462", "text": "def isPresent(self) -> bool:\n return self._item is not None", "title": "" }, { "docid": "8d34967749af47fc5cc7a665b5e62146", "score": "0.6166746", "text": "def available(self) -> bool:\n return self._ctrl.connected()", "title": "" }, { "docid": "a78a4aa1e9141c8f14a3ce4a4bb68338", "score": "0.6161179", "text": "def checkAvailable():\n # TODO check for PSUADE\n return True", "title": "" }, { "docid": "6add599b6b7e89e243e2cda670e9cc47", "score": "0.61600477", "text": "def available(self):\n\n return self._available", "title": "" }, { "docid": "19002011ad63e90f2fa68fbbb94c9868", "score": "0.6151681", "text": "def is_available(self):\n\n return self.games_available > 0", "title": "" } ]
b1a1f78c1400329b156016f857d80ba4
MarkovNetwork.test_update_input_states() with invalid input
[ { "docid": "6f2c1d384b9725b8379bfd720da2b0eb", "score": "0.8879466", "text": "def test_update_input_states_invalid_input():\n np.random.seed(98342)\n test_mn = MarkovNetwork(2, 4, 2)\n try:\n test_mn.update_input_states([1, 1, 0])\n except Exception as e:\n assert type(e) is ValueError", "title": "" } ]
[ { "docid": "417db7ac04437880cc0294f26acee2ef", "score": "0.89389145", "text": "def test_update_input_states_bad_input():\n np.random.seed(98342)\n test_mn = MarkovNetwork(2, 4, 2)\n test_mn.update_input_states([-7, 2])\n assert np.all(test_mn.states[:2] == np.array([1, 1]))", "title": "" }, { "docid": "0e5ca1c75e36e2eab21890e433016dc3", "score": "0.83010876", "text": "def test_get_output_states_bad_input():\n np.random.seed(32480)\n test_mn = MarkovNetwork(2, 4, 2)\n test_mn.update_input_states([-7, 2])\n test_mn.activate_network()\n assert np.all(test_mn.get_output_states() == np.array([1, 0]))", "title": "" }, { "docid": "0639400c712ab79f7b15dd07c92419da", "score": "0.7967954", "text": "def test_activate_network_bad_input():\n np.random.seed(32480)\n test_mn = MarkovNetwork(2, 4, 2)\n test_mn.states[0:2] = np.array([-7, 2])\n test_mn.activate_network()\n assert np.all(test_mn.states[-2:] == np.array([1, 0]))", "title": "" }, { "docid": "806845221b161c5a45673adc99321959", "score": "0.6451784", "text": "def test_incorrect_input(self):\n # init LMWrapper\n lmodel = LangModel(logfile=\"lmwrap.log\")\n lmodel.init()\n # try to get priors\n with self.assertRaises(StatusCodeError):\n lmodel.state_update(['3'])", "title": "" }, { "docid": "637518ddd6d62aa2edc67f198bf5ca6f", "score": "0.6356342", "text": "def check_input(self, input_state: BasicState):", "title": "" }, { "docid": "61f55ba0599413dec4e9a6f9f9265007", "score": "0.60255224", "text": "def test_invalid_input_checkpoint_step(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input '\n '`checkpoint_step` is invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, 0, -1, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf,\n 0j, 1j, '', b'', (), [], {}, set(), object(), lambda x: x, type,\n None, NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n BaseConfig(checkpoint_step=invalid_input)\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`checkpoint_step` must be an instance of `int`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n '`checkpoint_step` must be bigger than or equal to `1`.',\n msg=msg2\n )", "title": "" }, { "docid": "001895a028430fb558c3f38b0df2b028", "score": "0.5955649", "text": "def test_invalid_inputs(self):\n # really extreme parameters.\n Psi = np.eye(2)\n B = np.eye(2)\n pi = np.array([1, 0])\n model = HiddenMarkovModel(K=2, L=2, Psi=Psi, B=B, pi=pi)\n\n with pytest.warns(RuntimeWarning):\n alphas = model._alphas(np.array([1, 1], dtype=np.int8))\n for y in np.nditer(alphas):\n assert np.isnan(y)\n\n with pytest.warns(RuntimeWarning):\n alphas = model._alphas(np.array([0, 1], dtype=np.int8))\n testing.assert_array_equal(alphas, np.array([[1, np.nan], [0, np.nan]]))", "title": "" }, { "docid": "642114cb213331f263d57cb9a390e995", "score": "0.59465367", "text": "def validate_state(self, state: np.ndarray):\n pass", "title": "" }, { "docid": "e0695da2aa6fb090123fefe3d6893fa9", "score": "0.59157896", "text": "def test_state_change_calc(self):\n # create mock input - supply every possible need\n fake_input = {\n 'feed': 1.0,\n 'pet': 1.0,\n 'excercise': 1.0,\n 'immunize': 1.0,\n 'clean': 1.0,\n }\n\n # set state to average before\n self.t.state = np.zeros(len(self.t.state), dtype=np.float64)\n\n # send the message\n self.input_sender.send_message(fake_input)\n sleep(0.1) # allow for message propogation\n\n # calculate state change based on fake input\n self.t._process_input_queue()\n\n self.assertTrue((self.t.state == np.ones(4, dtype=np.float64)).all())", "title": "" }, { "docid": "e7cee8f3fda1e27bbcf172ac8923e31c", "score": "0.57846665", "text": "def invalid(self, invalid):\n for input in self.inputs:\n input.invalid = invalid", "title": "" }, { "docid": "cbbef638d9b767de716aa2ca1b5b5395", "score": "0.57780045", "text": "def check_markov_inputs(self):\n StateCount = self.MrkvArray[0].shape[0]\n\n # Check that arrays are the right shape\n if not isinstance(self.Rfree, np.ndarray) or self.Rfree.shape != (StateCount,):\n raise ValueError(\n \"Rfree not the right shape, it should an array of Rfree of all the states.\"\n )\n\n # Check that arrays in lists are the right shape\n for MrkvArray_t in self.MrkvArray:\n if not isinstance(MrkvArray_t, np.ndarray) or MrkvArray_t.shape != (\n StateCount,\n StateCount,\n ):\n raise ValueError(\n \"MrkvArray not the right shape, it should be of the size states*statres.\"\n )\n for LivPrb_t in self.LivPrb:\n if not isinstance(LivPrb_t, np.ndarray) or LivPrb_t.shape != (StateCount,):\n raise ValueError(\n \"Array in LivPrb is not the right shape, it should be an array of length equal to number of states\"\n )\n for PermGroFac_t in self.PermGroFac:\n if not isinstance(PermGroFac_t, np.ndarray) or PermGroFac_t.shape != (\n StateCount,\n ):\n raise ValueError(\n \"Array in PermGroFac is not the right shape, it should be an array of length equal to number of states\"\n )\n\n # Now check the income distribution.\n # Note IncShkDstn is (potentially) time-varying, so it is in time_vary.\n # Therefore it is a list, and each element of that list responds to the income distribution\n # at a particular point in time. Each income distribution at a point in time should itself\n # be a list, with each element corresponding to the income distribution\n # conditional on a particular Markov state.\n # TODO: should this be a numpy array too?\n for IncShkDstn_t in self.IncShkDstn:\n if not isinstance(IncShkDstn_t, list):\n raise ValueError(\n \"self.IncShkDstn is time varying and so must be a list\"\n + \"of lists of Distributions, one per Markov State. Found \"\n + f\"{self.IncShkDstn} instead\"\n )\n elif len(IncShkDstn_t) != StateCount:\n raise ValueError(\n \"List in IncShkDstn is not the right length, it should be length equal to number of states\"\n )", "title": "" }, { "docid": "10b30bba75d9e1d9274710cc1d7e5498", "score": "0.5753996", "text": "def test_train_with_state_and_batches(self):\n m3gnet = M3GNet(n_blocks=1, units=5, is_intensive=True, include_states=True)\n trainer = Trainer(model=m3gnet, optimizer=tf.keras.optimizers.Adam(1e-2))\n\n # 50 > 32\n trainer.train(self.structures[:50], self.bandgaps[:50], batch_size=32, epochs=2, train_metrics=[\"mae\"])\n self.assertTrue(m3gnet.predict_structures(self.structures[:2]).numpy().shape == (2, 1))", "title": "" }, { "docid": "c677a4ec307082e739b6f574a23a574a", "score": "0.5717528", "text": "def initial_state_for_inputs(self, inputs, **kwargs):\n pass", "title": "" }, { "docid": "9338ea395fb6005565fa44c867d31f64", "score": "0.56327957", "text": "def test_init_seed_bad_genome():\n np.random.seed(4303423)\n seed_genome = np.random.randint(0, 256, 10000)\n seed_genome[0:2] = np.array([42, 213])\n seed_genome[-10:-8] = np.array([42, 213])\n\n test_mn = MarkovNetwork(num_input_states=4,\n num_memory_states=5,\n num_output_states=6,\n probabilistic=False,\n genome=seed_genome)\n\n assert np.all(test_mn.genome == seed_genome)\n assert len(test_mn.markov_gates) == 1", "title": "" }, { "docid": "043a0b1c5f33da4c4710ddcfbedb89a8", "score": "0.56204176", "text": "def test_change_trainable():\n model = deepmoji_transfer(5, 30)\n change_trainable(model.layers[0], False)\n assert not model.layers[0].trainable\n change_trainable(model.layers[0], True)\n assert model.layers[0].trainable", "title": "" }, { "docid": "0bd11c6db4679739f5882c4f8a935329", "score": "0.5605008", "text": "def test_is_accepted_state_raises_exception_on_invalid_parameter (self):\n with pytest.raises (ValueError):\n state = ''.join (random.choice (string.letters) for i in xrange (10))\n self.client.is_accepted_state (state)", "title": "" }, { "docid": "2826296a43c8f920b7d2bf06e2eb3061", "score": "0.5577821", "text": "def assert_params_changed(model, batch, exclude=[]):\n # save state-dict\n torch.save(model.state_dict(), \"before\")\n # do one training step\n optimizer = Adam(model.parameters())\n loss_fn = MSELoss()\n pred = model(batch)\n loss = loss_fn(pred, torch.rand(pred.shape))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n # check if all trainable parameters have changed\n after = model.state_dict()\n before = torch.load(\"before\")\n for key in before.keys():\n if np.array([key.startswith(exclude_layer) for exclude_layer in exclude]).any():\n continue\n assert (\n before[key] != after[key]\n ).any(), \"{} layer has not been updated!\".format(key)", "title": "" }, { "docid": "75f9d70a74fefe64b35346b864b2176e", "score": "0.5558598", "text": "def test_prediction_input():\n\twith pytest.raises(ValueError) as excinfo1:\n\t\t# test data input\n\t\tpath_to_model = 'test/bad_test_model.pkl'\n\t\tsample_data='test/one_sample.csv'\n\t\tprediction(path_to_model, sample_data)\n\n\tassert str(excinfo1.value) == 'Number of features of the model must match the input. Model n_features is 9 and input n_features is 8 '", "title": "" }, { "docid": "98afb31ef7d61802eb6d998a2a618f73", "score": "0.55184275", "text": "def test_invalid_input_num_rnn_layers(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input '\n '`num_rnn_layers` is invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, 0, -1, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf,\n 0j, 1j, '', b'', (), [], {}, set(), object(), lambda x: x, type,\n None, NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n BaseConfig(\n dataset='test',\n experiment='test',\n num_rnn_layers=invalid_input\n )\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`num_rnn_layers` must be an instance of `int`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n '`num_rnn_layers` must be bigger than or equal to `1`.',\n msg=msg2\n )", "title": "" }, { "docid": "874bb4a8b708de913ded06a551ddf04a", "score": "0.54903877", "text": "def testChangeState(self):\n\n a = LedSwitcher(\"../test/testinputs/test1LineItem.txt\")\n a.parseFile()\n self.assertTrue(a.changeState(0, 0, True) == True)", "title": "" }, { "docid": "2da75e60df04ac107c9eee7ff59bef28", "score": "0.54737973", "text": "def test_run(self):\n # Loading the time matrix provided\n import operator\n Time_matrix = np.load(\"TM.npy\")\n print(\"CURRENT STATE: {}\".format(self.state_init))\n\n # Check request at the init state\n requests = self.requests(self.state_init)\n print(\"REQUESTS: {}\".format(requests))\n\n # compute rewards\n rewards = []\n for req in requests[1]:\n r = self.reward_func(self.state_init, req, Time_matrix)\n rewards.append(r)\n print(\"REWARDS: {}\".format(rewards))\n\n new_states = []\n for req in requests[1]:\n s = self.next_state_func(self.state_init, req, Time_matrix)\n new_states.append(s)\n print(\"NEW POSSIBLE STATES: {}\".format(new_states))\n\n # if we decide the new state based on max reward\n index, max_reward = max(enumerate(rewards), key=operator.itemgetter(1))\n self.state_init = new_states[index]\n print(\"MAXIMUM REWARD: {}\".format(max_reward))\n print (\"ACTION : {}\".format(requests[1][index]))\n print(\"NEW STATE: {}\".format(self.state_init))\n print(\"NN INPUT LAYER (ARC-1): {}\".format(self.state_encod_arch1(self.state_init)))\n print(\"NN INPUT LAYER (ARC-2): {}\".format(self.state_encod_arch2(self.state_init, requests[1][index])))", "title": "" }, { "docid": "0092ee325a308ff55f7abc27862f7d4e", "score": "0.5413459", "text": "def test_as_number_invalid_cases(self):\n for _state in ('', 'foo', 'foo.bar', None, False, True, object,\n object()):\n with pytest.raises(ValueError):\n state.state_as_number(ha.State('domain.test', _state, {}))", "title": "" }, { "docid": "535624a4c7a2361be293f2a207f6cf0e", "score": "0.54110444", "text": "def test_predict_future_reward(self):\n good_sequence = [\n ([0,0,0,0],1,[0,0,0,1]),\n ([0,0,0,1],0,[1,0,1,0]),\n ([1,0,1,0],1,[1,1,1,1]),\n ]\n bad_sequence = [\n ([0,0,0,0],0,[1,0,0,1]),\n ([1,0,0,1],1,[0,0,1,0]),\n ([0,0,1,0],1,[0,1,1,1]),\n ]\n def expand(r, final_reward):\n results = []\n for i,(state,action,new_state) in enumerate(r):\n record = {\n 'state': np.array(state,'f'),\n 'new_state': np.array(new_state,'f'),\n 'action': action,\n 'done': i >= len(r),\n 'reward': final_reward\n }\n results.append(record)\n assert results[-1]['reward'] == final_reward\n return results \n records = expand(good_sequence,1.0) + expand(bad_sequence,-1.0)\n print(records)\n records = records * 256\n model = main.build_model(env)\n main.train_model( model, records, env, batch_size=8)\n for (state,action,new_state) in good_sequence:\n prediction = main.predict(model,state)\n assert np.argmax(prediction) == action, (state,action,prediction)\n \n for (state,action,new_state) in bad_sequence:\n prediction = main.predict(model,state)\n assert np.argmax(prediction) != action, (state,action,prediction)", "title": "" }, { "docid": "f7e058f68b0982bd9c2b3fddce02a090", "score": "0.53970486", "text": "def test_invalid_qubit_state_vector(self, rep):\n dev = DefaultTensorTF(wires=2, representation=rep)\n state = np.array([0, 123.432])\n\n with pytest.raises(\n ValueError, match=r\"can apply QubitStateVector only to all of the 2 wires\"\n ):\n dev.execute([qml.QubitStateVector(state, wires=[0])], [], {})", "title": "" }, { "docid": "45b970f70b9866cd5efc5a9db6faf67f", "score": "0.5394204", "text": "def test_error_on_nan(self):\n model = TransE(triples_factory=self.triples_factory)\n training_loop = NaNTrainingLoop(model=model, patience=2)\n\n with self.assertRaises(NonFiniteLossError):\n training_loop.train(num_epochs=3, batch_size=self.batch_size)", "title": "" }, { "docid": "d142e4490d73c59606222e27c534aa42", "score": "0.53643453", "text": "def test_train_model_input():\n with pytest.raises(ValueError) as message:\n\n # load sample test data\n df = pd.read_csv('data/bank_processed.csv')\n # change the data type of age from int to string \n df['age'] = df['age'].astype(str) \n method = 'xgboost'\n\n kwargs = {'params':{\"max_depth\":3, 'n_estimators': 300,'learning_rate': 0.05},\n 'split_data': {'train_size':0.7, 'test_size':0.3, 'random_state': 42},\n \"get_target\":{'target':'y'}, \n 'choose_features':{'features_to_use': \n ['age', 'job', 'marital', 'education', \n 'default', 'balance', 'housing','loan', \n 'contact', 'day', 'month', 'campaign', \n 'pdays', 'previous','poutcome']}\n }\n # fit the model\n model = tm.train_model(df, method, **kwargs)\n\n # raise AssertionError if error message is not as expected\n # remove trailing white space and space in the message \n assert str(message.value).replace(\" \", \"\").replace('\\n','') == 'DataFrame.dtypesfordatamustbeint,floatorbool.Didnotexpectthedatatypesinfieldsage'", "title": "" }, { "docid": "2c98eb5c13308b103349e344a8000c1f", "score": "0.5354566", "text": "def test_state_param_error(self):\n st.status(\"label\", state=\"error\")\n\n status_block = self.get_delta_from_queue()\n self.assertEqual(status_block.add_block.expandable.label, \"label\")\n self.assertEqual(status_block.add_block.expandable.expanded, False)\n self.assertEqual(status_block.add_block.expandable.icon, \"error\")", "title": "" }, { "docid": "6ab19bde03ddd40cb0c45f506226868f", "score": "0.5345522", "text": "def _validate_state(self, *states):\n if self._state not in states:\n raise ValueError('Invalid state: %s' % self._state)", "title": "" }, { "docid": "05e55288a08c7fd280d33af9bf5b03c6", "score": "0.5344657", "text": "def test_transform_verbose_invalid():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.clean()\n pytest.raises(ValueError, atom.transform, X_bin, verbose=3)", "title": "" }, { "docid": "f77d0d59a9086e5c74172f5652f0ff61", "score": "0.53385675", "text": "def test_mask_input(make_missing_data):\n _, m, _, _ = make_missing_data\n s = ab.MaskInputLayer(name='myname')\n\n F, KL = s(myname=m)\n tc = tf.test.TestCase()\n with tc.test_session():\n f = F.eval()\n assert KL == 0.0\n assert np.array_equal(f, m)", "title": "" }, { "docid": "f69b7d98540c636f370ae5060c7f3a5e", "score": "0.53384984", "text": "def test_weights_change(self):\n after_param = self.model.state_dict()\n for name in self.init_params:\n param = self.init_params[name]\n self.assertTrue(torch.all(param.eq(after_param[name])))", "title": "" }, { "docid": "ce690aa356521e678b65937cf176d6a9", "score": "0.5336309", "text": "def _reset_internal_states(self, x_test):\n if self._options.lstm_stateful:\n # In stateful mode, we assume sequential_mini_step is 1\n if self._options.sequential_mini_step != 1:\n logging.error('Stateful mode assumes sequential mini step 1!')\n sys.exit(1)\n # In this case, x data shape is (samples, 1, lags, series)\n x_shape = x_test.shape\n # If there is no stateful model already, or has different test size\n if (self._inference_model is None or\n self._inference_batch_size != x_shape[0]):\n input_layer = layers.Input(\n batch_shape=([x_shape[0], 1] + list(x_shape[2:])))\n last_layer = self._create_net(input_layer, stateful=True)\n self._inference_model = models.Model(\n inputs=input_layer, outputs=last_layer)\n # The model is identical to the trained one, but with stateful\n # LSTMs. We copy weights from the original model to the new one\n self._inference_model.set_weights(self._model.get_weights())\n self._inference_batch_size = x_shape[0]\n # We reset states and then pass data for times we already know.\n self._inference_model.reset_states()\n # Note that in predict calls below we get slices of length 1 step\n for step in range(x_shape[1] - 1):\n # We pass batch size equal to all samples\n self._inference_model.predict(\n x_test[:, step:step + 1, :, :],\n batch_size=self._inference_batch_size)", "title": "" }, { "docid": "e4d07013caecdc02a3c6206081982331", "score": "0.532501", "text": "def test_tflearn_training_flag(self):\n with tf.Graph().as_default():\n N = 10000\n p = .3\n data = np.ones([N, 1], dtype=np.float32)\n input_ph = tfhelper.make_placeholder_for('input', data)\n output_tensor = tflearn.dropout(input_ph, keep_prob=p)\n f = tfhelper.make_function(input_ph, output_tensor)\n _ = Bookkeeper.for_graph() # ensure flags are created\n\n with tf.Session() as sess:\n sess.run(global_variables_initializer())\n with tfhelper.set_training_phase(TrainingPhase.TRAINING):\n output = f(data)\n self.assertFalse(np.all(data == output))\n output_mean = np.average(output)\n\n # According to large number theory, the mean error follows\n # N(0, sqrt(p(1-p)/N)). Thus the following statement should\n # be satisfied with a very high probability.\n self.assertLess(np.abs(output_mean * p - p),\n 3 * np.sqrt(p * (1 - p) / N))\n\n # Dropout should be a binomial distribution * (v/keep_prob)\n self.assertTrue(\n np.all(\n (np.abs(output - data / p) < 1e-5) |\n (np.abs(output) < 1e-5)\n )\n )\n\n with tfhelper.set_training_phase(TrainingPhase.NOT_SET):\n output = f(data)\n np.testing.assert_array_equal(data, output)", "title": "" }, { "docid": "5ae297b07c333424f903c2caef855e5d", "score": "0.53102237", "text": "def train_mlp(input_data, labels, n_epochs, bias=0.8, random_state=42, weights=None):\n # initialize weight matrices\n if weights is None:\n weights = []\n if random_state == 0:\n weights.append(np.zeros((3,2)))\n weights.append(np.zeros((2,1)))\n elif random_state == 1:\n weights.append(np.ones((3,2)))\n weights.append(np.ones((2,1)))\n elif random_state == 42:\n weights.append(np.array([\n [1.1,0.5],\n [0.1, -0.5],\n [1, 2]\n ]))\n weights.append(np.array([\n [0.2],\n [0.4]\n ]))\n\n else:\n weights.append(np.random.rand(3, 2))\n weights.append(np.random.rand(2, 1))\n\n else:\n assert isinstance(weights, np.ndarray)\n\n # add bias to each weight matrix\n for i, w in enumerate(weights):\n\n weights[i] = np.vstack((\n w,\n np.array([bias] * weights[i].shape[1]).reshape(1,-1)\n ))\n\n for j, w in enumerate(weights):\n print('initial weight matrix layer ' + str(j + 1) + ': ')\n print(w)\n print('--------')\n\n # construct sigmoid lambda function\n sigmoid_function = lambda z: 1 / (1 + (np.e ** (- z)))\n\n # initialize loss and data array for caching intermediate computations\n loss = None\n data = [input_data]\n\n for i in range(n_epochs):\n print('========\\n========')\n print('epoch ' + str(i + 1))\n print('========\\n========')\n\n # forward pass\n print('........\\nforward pass\\n........')\n inputs = input_data\n for idx, weight_matrix in enumerate(weights):\n inputs = np.hstack((\n inputs,\n np.ones((len(inputs), 1))\n ))\n inputs = np.matmul(inputs, weight_matrix)\n inputs = sigmoid_function(inputs)\n print('output of layer ' + str(idx) + ': ')\n print(inputs)\n\n # data.append(np.hstack((inputs, np.ones((len(inputs), 1)))))\n data.append(inputs)\n\n predictions = inputs\n\n print('predictions: \\t' + str(predictions.ravel()))\n print('ground-truth labels: ' + str(labels))\n\n loss = np.dot((predictions.ravel() - labels), (predictions.ravel() - labels)) / 2\n print('halved mean-squared error: ' + str(loss))\n\n # backpropagate\n print('........\\nbackward pass\\n........')\n errors = [loss]\n grads = []\n reverse_data = list(reversed(data))\n for j, datum in enumerate(reverse_data[0:-1]):\n print('layer -' + str(j + 1))\n if j == 0:\n prev_datum = np.hstack((reverse_data[j + 1],\n np.ones((len(reverse_data[j + 1]), 1))))\n grad = np.matmul(prev_datum.T,\n (\n (labels.reshape(-1,1) - datum) *\n (datum * (1 - datum))))\n grad /= len(labels)\n print('gradient matrix for backwards layer ' + str(j) + ': ')\n print(grad)\n grads.append(grad)\n else:\n prev_datum = np.hstack((reverse_data[j + 1],\n np.ones((len(reverse_data[j + 1]), 1))))\n grad = np.matmul(\n prev_datum.T,\n grads[j - 1] * datum * (1 - datum)\n )\n grad /= len(datum)\n print('gradient matrix for backwards layer ' + str(j) + ': ')\n print(grad)\n grads.append(grad)\n\n for j, (w, g) in enumerate(list(zip(weights, list(reversed(grads))))):\n weights[j] = weights[j] + g\n\n print('new weights: ')\n for j, w in enumerate(weights):\n print(j)\n print(w)\n print('========')\n data = [input_data]", "title": "" }, { "docid": "943ef936bee68f1ea8c3b22dd242bc37", "score": "0.52980536", "text": "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n settings, notFound = paramInput.findNodesAndExtractValues(['actuators','stateVariables', 'initStateVariables',\n 'subtractNormUXY','singleValuesTruncationTol'])\n # notFound must be empty\n assert(not notFound)\n # Truncation threshold to apply to single values\n self.sTruncationTol = settings.get('singleValuesTruncationTol')\n # Extract the Actuator Variable Names (u)\n self.actuatorsID = settings.get('actuators')\n # Extract the State Variable Names (x)\n self.stateID = settings.get('stateVariables')\n # Extract the Initialization State Variable Names (x). Optional. If not\n # found, the state is initialized with the initial values in the state field\n self.initStateID = settings.get('initStateVariables')\n # FIXME 1718\n check = [el.endswith('_init') for el in self.initStateID]\n if not np.all(check):\n missingVars = ', '.join(np.asarray(self.initStateID)[np.logical_not(check)].tolist())\n self.raiseAnError(IndexError, \"initStateVariables must be named {stateVariable}_init. Missing state variables are: {missingVars}\")\n varsToCheck = [el.strip()[:-5] for el in self.initStateID]\n self.initStateID = [self.initStateID[cnt] for cnt, el in enumerate(varsToCheck) if el in self.stateID]\n # END FIXME 1718\n # whether to subtract the nominal(initial) value from U, X and Y signal for calculation\n self.dmdParams['centerUXY'] = settings.get('subtractNormUXY')\n # some checks\n # check if state ids in target\n if not (set(self.stateID) <= set(self.target)):\n self.raiseAnError(IOError,'stateVariables must also be listed among <Target> variables!')\n # check if state ids in target\n if not (set(self.initStateID) <= set(self.features)):\n self.raiseAnError(IOError,'initStateVariables must also be listed among <Features> variables!')\n\n # Extract the Output Names (Output, Y)\n self.outputID = [x for x in self.target if x not in (set(self.stateID) | set([self.pivotParameterID]))]\n # check if there are parameters\n self.parametersIDs = list(set(self.features) - set(self.actuatorsID))\n for i in range(len(self.parametersIDs)-1,-1,-1):\n if str(self.parametersIDs[i]).endswith('_init'):\n self.parametersIDs.remove(self.parametersIDs[i])", "title": "" }, { "docid": "41221834cebb46ab0b69e7bd3e8aa429", "score": "0.5297311", "text": "def test_invalid_input_seed(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input `seed` is '\n 'invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, 0, -1, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf,\n 0j, 1j, '', b'', (), [], {}, set(), object(), lambda x: x, type,\n None, NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n BaseConfig(\n dataset='test',\n experiment='test',\n seed=invalid_input\n )\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`seed` must be an instance of `int`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n '`seed` must be bigger than or equal to `1`.',\n msg=msg2\n )", "title": "" }, { "docid": "63c0978f397186ed3861166fa251f3ed", "score": "0.5292276", "text": "def train_step(self, inputs, state=None):\n pass", "title": "" }, { "docid": "f40a039950fbfc61f2fe82c652a8f612", "score": "0.5269854", "text": "def states_update(pattern_detect_model, state_info, org_uniq_event, \n current_states, current_event, \n seq_flag = False, overlap_flag = True):\n state_num = len(state_info)\n \n \n new_states = np.zeros_like(current_states)\n \n # if pattern: state inherit; If sequence: dont inherit\n if not seq_flag:\n new_states[current_states != 0] = 1\n new_states[-1] = 0 # final states keeps empty\n else:\n # keep the first initial state active(1)\n new_states[0] = 1\n \n# print(new_states)\n for i in range(state_num-1):\n # dont need to calculate the 'next state' of final state.\n \n# # print(new_states)\n # encode state to one-hot\n if current_states[i]!=0:\n current_state = one_hot_id(state_num, i) # convert state index into one-hot\n \n next_state = FSM_core(pattern_detect_model, state_info, org_uniq_event, \n current_state, current_event,\n diagnose = 0)\n# print(current_state, current_event, next_state)\n\n # dependency check\n n_state_idx = next_state.argmax()\n # if state doesn't push forward, do nothing; else:\n# print(current_state,next_state )\n if (current_state != next_state).any():\n if (current_states[n_state_idx]!=0) and (current_states[n_state_idx-1]!=0):\n new_states[n_state_idx] = 3\n elif (current_states[n_state_idx]!=0) and (current_states[n_state_idx-1]==0):\n new_states[n_state_idx] = 1\n elif (current_states[n_state_idx]==0) and (current_states[n_state_idx-1]!=0):\n new_states[n_state_idx] = 2\n else:\n raise ValueError('??? with FSM: ',\n current_states,\n current_event,\n new_state)\n# print(new_states)\n\n # keep the first initial state active(1)\n new_states[0] = 1\n \n # for sequence, replace all 3 into 2.\n if seq_flag:\n new_states[new_states == 3] = 2\n \n # for non-overlapping case:\n # if the final state is reached,\n # abandon all the other activated states\n if overlap_flag == False:\n if new_states[-1] !=0:\n for i in range(1, state_num-1):\n new_states[i] = 0\n\n print(\"Current_states: \", current_states, \n \"\\nCurrent_event: \", current_event, \n \"\\nNew_states: \", new_states)\n \n return new_states", "title": "" }, { "docid": "d23b4ab59d889c797b0b6eefc1d12cae", "score": "0.52658004", "text": "def test_wrong_input(self):\n transformer = BraindecodeDatasetLoader()\n with pytest.raises(ValueError):\n transformer.fit_transform(np.random.normal(size=(2, 1, 10)), y=np.array([0]))", "title": "" }, { "docid": "a9778464096960d6f42101577fb0824b", "score": "0.52453214", "text": "def __is_valid__(self, state):\n return False", "title": "" }, { "docid": "8e45f0fb73f98175b819821dcbdab77b", "score": "0.5241079", "text": "def test_strict_validation(self):\n state = self.question.question_state_data\n state.interaction.solution = None\n self._assert_validation_error(\n 'Expected the question to have a solution')\n state.interaction.hints = []\n self._assert_validation_error(\n 'Expected the question to have at least one hint')\n state.interaction.default_outcome.dest = 'abc'\n self._assert_validation_error(\n 'Expected all answer groups to have destination as None.')\n state.interaction.default_outcome.labelled_as_correct = False\n self._assert_validation_error(\n 'Expected at least one answer group to have a correct answer')", "title": "" }, { "docid": "a562cba8ef16607f2957f42ffb6abd9e", "score": "0.5234075", "text": "def invalid_input(self, match=None, context=None, next_state=None):\r\n self.state_machine.previous_line() # back up so parent SM can reassess\r\n raise EOFError", "title": "" }, { "docid": "a562cba8ef16607f2957f42ffb6abd9e", "score": "0.5234075", "text": "def invalid_input(self, match=None, context=None, next_state=None):\r\n self.state_machine.previous_line() # back up so parent SM can reassess\r\n raise EOFError", "title": "" }, { "docid": "82d645d5cc2ede114739a35e95caa6a0", "score": "0.52311355", "text": "def invalid_input(self, match=None, context=None, nextstate=None):\n self.statemachine.previousline() # back up so parent SM can reassess\n raise EOFError", "title": "" }, { "docid": "c7a1697602edbdac5bf01ed82fd987db", "score": "0.52106935", "text": "def test_state_dict(self):\n # Module.\n fx = TaylorNet(dim=4)\n\n # Get state dict.\n state_dict1 = get_state_dict(fx)\n\n # Set state dict.\n set_state_dict(fx, state_dict1)\n\n # Compare state dicts.\n state_dict2 = get_state_dict(fx)\n for key in state_dict1.keys():\n assert array_equal(state_dict1[key].cpu().numpy(), state_dict2[key].cpu().numpy())", "title": "" }, { "docid": "8ecd0f6bf6adb5e9cab83e770be58a98", "score": "0.51986504", "text": "def markLearnStates():\n\n # Structure of cells in colums:\n # Each column has \"config.SM['M']\" cells. Let us assume \"config.SM['M'] = 32\"\n # Cells in columns are numbers in column major fashion starting from zero:\n # i.e second cell in first column is numbered 1. last cell in first column is numbered 31.\n # First cell in second column is numbered 32..\n\n config.SM['cellLearn'][:] = 0\n activeCols = np.nonzero(config.SM['input'])\n\n \"\"\" Mark the correctly predicted active cells with dendrites that are also\n predictive based on on learning states. \"\"\"\n xL = np.nonzero(config.SM['dendriteLearn'])[0] # active learning dendrites\n uL = np.unique(config.SM['dendriteToCell'][xL]) # Cell to which active learning dendrites are connected\n\n # If predictedActive is non-empty array\n if np.prod(np.shape(config.SM['predictedActive'])) != 0:\n # Cells that are connected to Active learning dendrites and are predicted active.\n # lc_cols: contains the cell numbers in column major\n lc_cols = uL[config.SM['predictedActive'][np.unravel_index(uL, np.shape(config.SM['predictedActive']), 'F')] > 0]\n [R, C] = np.unravel_index(lc_cols, (config.SM['M'], config.SM['N']), 'F')\n # select only one active cell per column\n [C, IA] = np.unique(C, return_index = True)\n R = R[IA]\n config.SM['cellLearn'][tuple(R), tuple(C)] = True\n else:\n lc_cols = np.empty([])\n\n\n # lc_cols: represents the cell numbers, extract only column numbers of these cells\n # following line assumes that np.shape(config.SM['predictedActive']) is equal to (config.SM['M'], config.SM['N'])\n [r_num, c_num] = np.unravel_index(lc_cols, (config.SM['M'], config.SM['N']), 'F')\n unique_lc_cols = np.unique(c_num)\n # Find the active columns without a learnCell state set -- activeCols\n activeCols = np.setdiff1d(activeCols, unique_lc_cols)\n\n # Iterate through the remaining columns selecting a single learnState cell in each\n n = np.size(activeCols)\n [row_i, col_i] = np.nonzero(config.SM['cellActive'])\n [cellRowPrev, cellColPrev] = np.nonzero(config.SM['cellLearnPrevious'])\n cellIDPrevious = np.ravel_multi_index((cellRowPrev, cellColPrev), np.shape(config.SM['cellLearnPrevious']), order='F')\n dCells = np.zeros((config.SM['N'], 1))\n nDCells = 0\n expandDendrites = np.zeros(config.SM['N'], 1)\n\n for k in range(0, n):\n # Iterate through columns looking for cell to set learnState\n # [ToDo: check if shape of activeCols is (,n) => use activeCols[k] or (1,n) => use activeCols[0][k]\n j = activeCols[k]\n # Find the row indices (row_i) of active cells in column j\n i = row_i[col_i == j] # i can have more than one value, it can be a vector\n [cellChosen, newSynapsesToDendrite, updateFlag] = getBestMachingCell(j, i)\n\n # If the column is shared between two time instant, use the locations chosen earlier.\n if ((updateFlag == True) and (newSynapsesToDendrite < 0)):\n xJ = np.nonzero(cellColPrev == j)\n if (np.size(xJ) > 0):\n cellChosen = cellIDPrevious[xJ[0][0]]\n config.SM['cellLearn'][np.unravel_index(cellChosen, (config.SM['M'], config.SM['N']), 'F')] = True\n if updateFlag:\n dCells[nDCells] = cellChosen\n expandDendrites[nDCells] = newSynapsesToDendrite\n nDCells = nDCells + 1\n\n addDendrites.addDendrites(dCells, expandDendrites, nDCells)", "title": "" }, { "docid": "bd912997d7793ccaf0900828b0940c27", "score": "0.5195231", "text": "def test_invalid_input(self):\n with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):\n gan_metrics.run_inception(tf.ones([7, 50, 50, 3]))\n\n p = tf.zeros([8, 10])\n p_logits = tf.zeros([8, 10])\n q = tf.zeros([10])\n with self.assertRaisesRegexp(ValueError, 'must be floating type'):\n gan_metrics._kl_divergence(tf.zeros([8, 10], dtype=tf.int32), p_logits, q)\n\n with self.assertRaisesRegexp(ValueError, 'must be floating type'):\n gan_metrics._kl_divergence(p, tf.zeros([8, 10], dtype=tf.int32), q)\n\n with self.assertRaisesRegexp(ValueError, 'must be floating type'):\n gan_metrics._kl_divergence(p, p_logits, tf.zeros([10], dtype=tf.int32))\n\n with self.assertRaisesRegexp(ValueError, 'must have rank 2'):\n gan_metrics._kl_divergence(tf.zeros([8]), p_logits, q)\n\n with self.assertRaisesRegexp(ValueError, 'must have rank 2'):\n gan_metrics._kl_divergence(p, tf.zeros([8]), q)\n\n with self.assertRaisesRegexp(ValueError, 'must have rank 1'):\n gan_metrics._kl_divergence(p, p_logits, tf.zeros([10, 8]))", "title": "" }, { "docid": "6063cfeb0e6cee31463c562d0be26b60", "score": "0.5180625", "text": "def _preprocess(self, _, required_states):\n pass", "title": "" }, { "docid": "a2af9c27ac428331eb85088e3d08ed5a", "score": "0.51800853", "text": "def test_get_inputs_returns_expected_inputs(self):\n inputs = self.network.get_inputs(' OX ', 'X')\n self.assertTrue(\n inputs == [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n )", "title": "" }, { "docid": "b5a6e738fbf03f8ebd50ace5e7f5c78e", "score": "0.5178043", "text": "def train_imitation(self, update):\n # maintain list of sampled episodes(batchwise) and append to list. Then do Imitation learning simply\n _ , _ , _ , states, _ , _ = self.sample_batch(self.args.num_states*3)\n if states is not None:\n with torch.no_grad():\n states = torch.tensor(states, dtype=torch.float32)\n if self.args.cuda:\n states = states.cuda()\n value, _ = self.network(states)\n sorted_indices = value.cpu().numpy().reshape(-1).argsort()[-self.args.num_states:][::-1]\n # Select high value states under currect valuation\n hv_states = states[sorted_indices.tolist()]\n\n for n in range(self.args.num_traces):\n # An iteration of sampling recall traces and doing imitation learning\n mb_actions, mb_states_prev = [], []\n states_next = hv_states\n for step in range(self.args.trace_size):\n with torch.no_grad():\n pi = self.bw_actgen(states_next)\n actions = select_actions(pi)\n mu = self.bw_stategen(states_next, self.indexes_to_one_hot(actions))\n # s_t = s_t+1 + Δs_t\n states_prev = states_next + select_state(mu, True)\n states_next = states_prev\n # Add to list\n mb_actions.append(actions.cpu().numpy())\n mb_states_prev.append(states_prev.cpu().numpy())\n # Update state\n # Begin to do Imitation Learning\n mb_actions = torch.tensor(mb_actions, dtype=torch.int64).unsqueeze(1).view(self.args.num_states*self.args.trace_size, -1)\n mb_states_prev = torch.tensor(mb_states_prev, dtype=torch.float32).view(self.batch_obs_state_shape)\n if self.args.per_weight:\n max_nlogp = torch.tensor(np.ones((self.args.num_states*self.args.trace_size, 1)) * self.args.max_nlogp, dtype=torch.float32)\n\n if self.args.cuda:\n mb_actions = mb_actions.cuda()\n mb_states_prev = mb_states_prev.cuda()\n if self.args.per_weight:\n max_nlogp = max_nlogp.cuda()\n # Pass through network\n _, pi = self.network(mb_states_prev)\n\n if self.args.per_weight:\n action_log_probs, dist_entropy = evaluate_actions_sil(pi, mb_actions)\n action_log_probs = -action_log_probs\n clipped_nlogp = torch.min(action_log_probs, max_nlogp)\n total_loss = (torch.sum(clipped_nlogp) -self.args.entropy_coef*torch.sum(dist_entropy)) / (self.args.num_states*self.args.trace_size)\n # Start to update Policy Network Parameters\n else:\n criterion = torch.nn.NLLLoss()\n total_loss = criterion(torch.log(pi), mb_actions.squeeze(1))\n self.optimizer.zero_grad()\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), self.args.max_grad_norm)\n self.optimizer.step()\n return total_loss", "title": "" }, { "docid": "32318a40e9d7799770cd9d19843b2044", "score": "0.51745415", "text": "def test_correct_process(self):\n # init LMWrapper\n lmodel = LangModel(logfile=\"lmwrap.log\")\n # init LM\n lmodel.init()\n # get priors\n priors = lmodel.state_update(['T'])\n # display priors\n assert priors['letter'][0][0] == 'H'\n priors = lmodel.state_update(['H'])\n assert priors['letter'][0][0] == 'E'\n lmodel.reset()\n priors = lmodel.state_update(list('THE'))\n assert priors['letter'][0][0] == '_'", "title": "" }, { "docid": "65a93443d3fca53dbe7787964758170e", "score": "0.51714116", "text": "def testmutate(self):\n\t\twith self.assertRaises(OutOfRangeError):\n\t\t\tIndividual.mutate(self,-1)", "title": "" }, { "docid": "a76ebca8377e711ed5763df814038438", "score": "0.51713604", "text": "def validate_given_state(self, episodes, inputfile_number, outputfile_number, input_car_states=None, no_of_cars=1,\n destination_index_array=[0, 0, 0, 0], destination_choose_arrays=[0, 0, 0], waitkey=0):\n # f = open(\"data/qvalue_files/up_right.txt\", 'w', encoding='utf-8')\n self.env_config.no_of_cars = no_of_cars\n env.no_of_cars = no_of_cars\n for i in range(0, episodes):\n total_cars = 5\n # self.__init__(8 * total_cars * 2, 5, total_cars, self.sess)\n self.__init__(8 * total_cars * 2, 5, total_cars, env_config=self.env_config, sess=self.sess)\n\n self.model = load_model('models/' + inputfile_number, custom_objects={'huber_loss': huber_loss})\n\n \"\"\"\n Defining initial sources and destinations for cars.\n \"\"\"\n env.reset()\n env.initialize_cars(no_of_cars)\n env.initialize(destination_index_array, input_car_states, destination_choose_arrays)\n counts_in_one_episode = 0\n self.episode_array = []\n # self.blocked_nodes_list = []\n self.validation_done = 0\n self.epsilon = 0\n while not self.validation_done:\n counts_in_one_episode += 1\n num = env.update_simulator_display(waitkey) - 48\n print(\"number passes by key press\", num)\n if num >= env.no_of_cars:\n print(\"in valid car id\")\n continue\n if env.check_car_training(num):\n continue\n # print (i.source_node, \"source_node\")\n state = env.encodestate(num, 1)\n action, action_index = self.getaction(state, 0)\n # f.write(state + '|' + action + '|\\n')\n print(\"for state,\", state, \"action provided\", action)\n next_state = env.get_successor_state(deepcopy(state), action)\n next_state_array = env.state_to_array(next_state)\n env.update_car(num, deepcopy(next_state_array[0][0]), deepcopy(next_state_array[0][1]))\n env.update_simulator_display(waitkey)\n self.validation_done = env.is_game_ended()\n if self.validation_done:\n break", "title": "" }, { "docid": "56c764ea66804a3b15099f3aa8b15892", "score": "0.51700145", "text": "def test_mnist_fails_transfer_learning(self) -> None:\n self.assertRaises(\n NotImplementedError, lambda: self.mnist_test(model=\"FakeModelIsFake\")\n )", "title": "" }, { "docid": "3bf7825b4ac6253e8b3a47cb0fc04953", "score": "0.51541966", "text": "def fit(self, input_data, input_truths, initial_weights=None):\n X, X_test, y, y_test = train_test_split(input_data, input_truths, test_size=0.25, random_state=42)\n patternSize = X.shape[1]\n batchSize = X.shape[0]\n # print(inputWeights.shape)\n # print(hiddenWeights.shape)\n bias = np.array([1.0])\n self.input_weights , self.hidden_weights = self.initialize_random_weights(patternSize)\n self.changed_input_weights, self.changed_hidden_weights = self.init_zero_weights(patternSize)\n misses = 0\n correct = 0\n num_epoch = 0\n while(True and num_epoch < 50):\n for i in range(batchSize):\n currPat = np.zeros(patternSize)\n for j in range(patternSize):\n currPat[j] = X[i][j]\n input_layer = np.concatenate((currPat, bias))\n #print(\"input layer \", input_layer)\n #print(\"input weights\", self.input_weights)\n hidden_netArray = self.computeNet(self.input_weights, input_layer)\n #print(\"hidden net \", hidden_netArray)\n hidden_outputArray = self.computeOutput(hidden_netArray)\n #print(\"output hidden \", hidden_outputArray)\n hidden_layer = np.concatenate((hidden_outputArray, bias))\n #print(\"hidden layer \", hidden_layer)\n outputNet = self.computeNet(self.hidden_weights, hidden_layer)\n #print(\"output final \", outputNet)\n my_pred = self.computeOutput(outputNet)\n\n self.recordTestMSE(my_pred, y[i])\n \n if my_pred < .333333:\n round_pred = 0\n elif my_pred < .6666666 and my_pred > .3333333:\n round_pred = 1\n else:\n round_pred = 2\n\n if round_pred != y[i]:\n self.changeWeight(round_pred, y[i], hidden_layer, input_layer)\n misses += 1\n else:\n correct += 1\n\n # if(round(my_pred[0], 1) != y[i]/10):\n # self.changeWeight(my_pred, y[i]/10, hidden_layer, input_layer)\n # misses += 1\n # else:\n # correct +=1\n\n #\n # if(round(my_pred[0], 0) != y[i]):\n # self.changeWeight(my_pred, y[i], hidden_layer, input_layer)\n # misses += 1\n # else:\n # correct +=1\n self.calculateTestMSE()\n breakCondition = self.validation_set(X_test, y_test)\n if breakCondition:\n break\n\n if self._shuffle_data:\n X, y = self._shuffle_data(X, y)\n \n num_epoch += 1\n\n\n \n print(\"num of epoch \", num_epoch)\n print(\"num of misses \", misses)\n print(\"corrects \", correct)\n #print(\"MSE loss \", self.MSELossArray[-1])\n\n \n return self", "title": "" }, { "docid": "c16e3b8d803e1ed999cf6b35ea836026", "score": "0.5152684", "text": "def test08(self):\n model = self.setup_model02()\n model.x[1].fix(1)\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n to_json(model, fname=self.fname, wts=StoreSpec.suffix())\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n model.g.deactivate()\n model.x[1].setlb(-4)\n model.x[1].unfix()\n model.x[2].fix(6)\n\n from_json(model, fname=self.fname, wts=StoreSpec.suffix())\n assert(abs(value(model.x[1]) - 1) < 1e-5)\n assert(abs(value(model.x[2]) - 6) < 1e-5)\n assert(not model.x[1].fixed)\n assert(model.x[2].fixed)\n assert(not model.g.active)\n assert(abs(model.dual[model.g] - 1) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[1]] - 1) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[2]] - 1) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[1]] - 1) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[2]] - 1) < 1e-5)\n assert(abs(model.x[1].lb + 4) < 1e-5)", "title": "" }, { "docid": "cf9be7b6492a4232ba310396323f244f", "score": "0.5142797", "text": "def test_translate_state(self):\n line = ElementaryLine([0.0, 1.3, 0.8])\n\n # Translate IS.\n line.translate_state(\"IS\", -0.2)\n self.assertEqual(line.y[0], -0.2)\n\n # Translate TS.\n ref_y = line.eigen_points.C[1] + 0.1\n line.translate_state(\"TS\", 0.1)\n self.assertAlmostEqual(ref_y, line.eigen_points.C[1], places=2)\n\n # Translate FS.\n ref_y = line.eigen_points.E[1] - 0.2\n line.translate_state(\"FS\", -0.2)\n self.assertAlmostEqual(ref_y, line.eigen_points.E[1], places=2)\n\n # Check invalid state name.\n self.assertRaises(ValueError, line.translate_state, \"asd\", 0.3)", "title": "" }, { "docid": "3f5e77a13297b9cb3b5da07f939a5b59", "score": "0.51400465", "text": "def test_invalid_input_is_uncased(self):\n msg1 = 'Must raise `TypeError` when input `is_uncased` is invalid.'\n msg2 = 'Inconsistent error message.'\n examples = (\n 0, 1, -1, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf, 0j,\n 1j, '', b'', (), [], {}, set(), object(), lambda x: x, type, None,\n NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(TypeError, msg=msg1) as ctx_man:\n BaseConfig(\n dataset='test',\n experiment='test',\n is_uncased=invalid_input\n )\n\n self.assertEqual(\n ctx_man.exception.args[0],\n '`is_uncased` must be an instance of `bool`.',\n msg=msg2\n )", "title": "" }, { "docid": "90e2eec4ba4f675b56b79a52af9fcab5", "score": "0.5132372", "text": "def test_invalid_input_dropout(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input `dropout` is '\n 'invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n -1, -1.0, 1.1, math.nan, -math.nan, math.inf, -math.inf, 0j, 1j,\n '', b'', (), [], {}, set(), object(), lambda x: x, type, None,\n NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n BaseConfig(dataset='test', dropout=invalid_input)\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`dropout` must be an instance of `float`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n '`dropout` must range from `0.0` to `1.0`.',\n msg=msg2\n )", "title": "" }, { "docid": "42188f884e76799367e6af84360616a5", "score": "0.51304674", "text": "def test_correct_behavior(self):\n inputs = [\n SpeakerMsg.RIGHT_LANE,\n SpeakerMsg.OVERTAKING_ZONE,\n SpeakerMsg.LEFT_LANE,\n SpeakerMsg.RIGHT_LANE,\n SpeakerMsg.NO_OVERTAKING_ZONE,\n ]\n states = [\n OvertakingStateMachine.off,\n OvertakingStateMachine.right,\n OvertakingStateMachine.left,\n OvertakingStateMachine.right,\n OvertakingStateMachine.off,\n ]\n\n self.state_machine_assert_on_input(\n OvertakingStateMachine(self.callback), inputs, states, 4\n )", "title": "" }, { "docid": "9c0de0e87378bd49170798bb3d7474c4", "score": "0.5130435", "text": "def test_line_outofrange(self):\n self.st.append( (4,0) )\n self.o.state = self.st\n self.assertTrue(self.o.timer == 0, \"timer is wrong\")\n self.assertTrue(self.o.state == (), \"state is wrong\")\n self.assertEqual(self.o.board.count(0), self.o.nbl*self.o.nbc,\n \"board is wrong\")", "title": "" }, { "docid": "e1522e1d488adba69959776211814be9", "score": "0.5130062", "text": "def sanity_check(state_dict, pretrained_weights, linear_keyword):\n print(\"=> loading '{}' for sanity check\".format(pretrained_weights))\n checkpoint = paddle.load(pretrained_weights)\n state_dict_pre = checkpoint['state_dict']\n\n for k in list(state_dict.keys()):\n # only ignore linear layer\n if '%s.weight' % linear_keyword in k or '%s.bias' % linear_keyword in k:\n continue\n\n # name in pretrained model\n k_pre = 'base_encoder.' + k\n\n assert ((state_dict[k].cpu() == state_dict_pre[k_pre]).all()), \\\n '{} is changed in linear classifier training.'.format(k)\n\n print(\"=> sanity check passed.\")", "title": "" }, { "docid": "8ff61133d4557f7828babf53a661d099", "score": "0.51236385", "text": "def test_nochange_does_not_change_results(self):\n # Expected value.\n exp = [\n [\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n ],\n [\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n ],\n ]\n\n # Set up test data and state.\n a = np.array([\n [\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n ],\n [\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n [-0x80, 0x00, 0x80, 0xff, 0x180,],\n ],\n ], dtype=float)\n action = ease.nochange\n\n # Run test and determine if passed.\n overflows_test(self, exp, a, action)", "title": "" }, { "docid": "1ddefa8f481bf2ddaeb7dfcced4feb3e", "score": "0.51235247", "text": "def test_invalid(self, state, until):\n state.until = until\n with pytest.raises(TypeError) as e:\n _ = state.to_dict()\n assert str(type(until)) in str(e.value)", "title": "" }, { "docid": "0c4f0436f94e4afa9454a207c16a153b", "score": "0.51142526", "text": "def test_invalid_input_num_linear_layers(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input '\n '`num_linear_layers` is invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, 0, -1, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf,\n 0j, 1j, '', b'', (), [], {}, set(), object(), lambda x: x, type,\n None, NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n BaseConfig(\n dataset='test',\n experiment='test',\n num_linear_layers=invalid_input\n )\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`num_linear_layers` must be an instance of `int`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n '`num_linear_layers` must be bigger than or equal to `1`.',\n msg=msg2\n )", "title": "" }, { "docid": "1abde325f66549ee7bb8cd620dc5613f", "score": "0.5106199", "text": "def _validate_initial_statevector(self):\n if self._initial_statevector is not None:\n raise C3QiskitError(\n \"Setting initial statevector is not implemented in this simulator\"\n )\n else:\n pass", "title": "" }, { "docid": "37107e7fbb536479ffd6ba41b3820c00", "score": "0.510251", "text": "def is_valid_state(state):\n # This function is not currently used\n return True", "title": "" }, { "docid": "29a34a35b7c342095fb0f311174a8357", "score": "0.5095329", "text": "def test_validate_pmtct_loss_optouts_malformed_data(self):\n # Setup\n change_data = {\n \"registrant_id\": \"mother01\",\n \"action\": \"pmtct_loss_switch\",\n \"data\": {\n \"reason\": \"not a reason we accept\"\n },\n \"source\": self.make_source_normaluser()\n }\n change = Change.objects.create(**change_data)\n # Execute\n c = validate_implement.validate(change)\n # Check\n change.refresh_from_db()\n self.assertEqual(c, False)\n self.assertEqual(change.validated, False)\n self.assertEqual(change.data[\"invalid_fields\"], [\n 'Invalid UUID registrant_id', 'Not a valid loss reason']\n )", "title": "" }, { "docid": "d8509411fa940fb2e0cf443ff407fde0", "score": "0.5094885", "text": "def test_allbad_network(self):\n # setup environment\n run_command_blocking(netem_change.format(\n \"corrupt 1% duplicate 10% loss 10% 25% delay 20ms reorder 25% 50%\"\n ))\n\n run_command_blocking(TestbTCPFramework.start_client)\n self.assertTrue(filecmp.cmp(TestbTCPFramework.input_file, \"out.file\"))", "title": "" }, { "docid": "e5b15ba0480c08f43b4c18bffc67dc2f", "score": "0.5088943", "text": "def test_model_type(self):\n with self.assertRaises(ValueError):\n tfgan.losses.cycle_consistency_loss(self._model_x2y)", "title": "" }, { "docid": "7cd7f9e552727eab83be30cee2219416", "score": "0.50856465", "text": "def test_random_state(self):\n self.plotter_tailored_LOGS.umap(n_neighbors=15, random_state=1, min_dist=0.9)\n self.assertEqual(self.plotter_tailored_LOGS.umap_fit.random_state, 1)", "title": "" }, { "docid": "78a9d4332aafc9721cca67fd1838f9c9", "score": "0.50831723", "text": "def test_update_cap_bad_state(self):\n with self.assertRaisesRegex(ValueError, 'Equipment .* has invali'):\n self.ind._update_cap(self.map['capbank0a'],\n glm_mgr=self.fresh_mgr)", "title": "" }, { "docid": "b7686203a1afa6f95af3127f88334bf8", "score": "0.5083171", "text": "def find_states(markov_model, output):\n mm = markov_model\n N = len(mm.states)\n \n # _viterbi does calculations in log space. Add a tiny bit to the\n # matrices so that the logs will not break.\n x = mm.p_initial + VERY_SMALL_NUMBER\n y = mm.p_transition + VERY_SMALL_NUMBER\n z = mm.p_emission + VERY_SMALL_NUMBER\n lp_initial, lp_transition, lp_emission = map(numpy.log, (x, y, z))\n # Change output into a list of indexes into the alphabet.\n indexes = itemindex(mm.alphabet)\n output = [indexes[x] for x in output]\n \n # Run the viterbi algorithm.\n results = _viterbi(N, lp_initial, lp_transition, lp_emission, output)\n\n for i in range(len(results)):\n states, score = results[i]\n results[i] = [mm.states[x] for x in states], numpy.exp(score)\n return results", "title": "" }, { "docid": "1ec2795abef51f6778311d55b4348e9d", "score": "0.5075184", "text": "def testInvalidIdxValue(self):\n input1 = tf.placeholder(tf.float32, shape=[2, 3, 4, 5, 6])\n input2 = tf.placeholder(tf.float32, shape=[7, 8])\n\n invalid_idx = 2\n mod = snt.SelectInput(idx=[invalid_idx])\n\n err = (r\"`idx` contains out of bound entries \\(they should be in the \"\n r\"range \\[0, 2\\)\\)\")\n with self.assertRaisesRegexp(ValueError, err):\n mod(input1, input2)", "title": "" }, { "docid": "99f75f11dbbce1039100ea2e7bf6b4e0", "score": "0.5074249", "text": "def test_validate_momconnect_loss_optouts_malformed_data(self):\n # Setup\n change_data = {\n \"registrant_id\": \"mother01\",\n \"action\": \"momconnect_loss_switch\",\n \"data\": {\n \"reason\": \"not a reason we accept\"\n },\n \"source\": self.make_source_normaluser()\n }\n change = Change.objects.create(**change_data)\n # Execute\n c = validate_implement.validate(change)\n # Check\n change.refresh_from_db()\n self.assertEqual(c, False)\n self.assertEqual(change.validated, False)\n self.assertEqual(change.data[\"invalid_fields\"], [\n 'Invalid UUID registrant_id', 'Not a valid loss reason']\n )", "title": "" }, { "docid": "08a74bfcbfb2024036cc199752c84a6c", "score": "0.50688946", "text": "def test_evaluation_input_model():\n\twith pytest.raises(ValueError) as excinfo1:\n\t\twith open('test/test_model.pkl','rb') as f:\n\t\t\tfit = pickle.load(f)\n\n\t\t#split data\n\t\tfinal_df = pd.read_csv('test/bad_test_data.csv')\n\t\tY = np.log10(final_df['price'])\n\t\tX = final_df.drop(['price'], axis = 'columns', inplace = False)\n\t\t#Split into train and validation\n\t\tX_train, X_val, y_train, y_val = train_test_split(X, Y, test_size=0.33, random_state = 3)\n\t\t#predict with test dataset\n\t\ty_pred = fit.predict(X_val)\n\tassert str(excinfo1.value) == 'Number of features of the model must match the input. Model n_features is 8 and input n_features is 9 '", "title": "" }, { "docid": "7f1ad2adaa7275b1e76bbf2544b947c7", "score": "0.50679624", "text": "def _test_update_state_fn(self):\n return encoding_stage._tf_style_update_state(\n lambda _, s, sut, name: {'state': s['state'] + sut['tensor']})", "title": "" }, { "docid": "59c7f78da900772b7c06b19fe1e7172b", "score": "0.50599873", "text": "def pretrain(self):\n\n ord = self.ord\n num = 50\n delta = 2.0 / (num - 1)\n test_state = -np.ones((num * num, 2))\n test_label = np.ones((num * num, 1))\n for i in range(num):\n for j in range(num):\n o = i * num + j\n s = np.array([i * delta, j * delta])\n test_state[o] += s\n if ord == 0:\n if test_state[o, 0] > 0 and test_state[o, 1] > 0:\n test_label[o] = 0.0\n elif ord == 1:\n if test_state[o, 0] < 0 < test_state[o, 1]:\n test_label[o] = 0.0\n elif ord == 2:\n if test_state[o, 0] < 0 and test_state[o, 1] < 0:\n test_label[o] = 0.0\n elif ord == 3:\n if test_state[o, 1] < 0 < test_state[o, 0]:\n test_label[o] = 0.0\n\n bound = 1e-2\n while True:\n self.sess.run(self.oop, feed_dict={\n self.s: test_state,\n self.prob: test_label\n })\n a = self.sess.run(self.diff, feed_dict={\n self.s: test_state,\n self.prob: test_label\n })\n if a < bound:\n break", "title": "" }, { "docid": "59c7f78da900772b7c06b19fe1e7172b", "score": "0.50599873", "text": "def pretrain(self):\n\n ord = self.ord\n num = 50\n delta = 2.0 / (num - 1)\n test_state = -np.ones((num * num, 2))\n test_label = np.ones((num * num, 1))\n for i in range(num):\n for j in range(num):\n o = i * num + j\n s = np.array([i * delta, j * delta])\n test_state[o] += s\n if ord == 0:\n if test_state[o, 0] > 0 and test_state[o, 1] > 0:\n test_label[o] = 0.0\n elif ord == 1:\n if test_state[o, 0] < 0 < test_state[o, 1]:\n test_label[o] = 0.0\n elif ord == 2:\n if test_state[o, 0] < 0 and test_state[o, 1] < 0:\n test_label[o] = 0.0\n elif ord == 3:\n if test_state[o, 1] < 0 < test_state[o, 0]:\n test_label[o] = 0.0\n\n bound = 1e-2\n while True:\n self.sess.run(self.oop, feed_dict={\n self.s: test_state,\n self.prob: test_label\n })\n a = self.sess.run(self.diff, feed_dict={\n self.s: test_state,\n self.prob: test_label\n })\n if a < bound:\n break", "title": "" }, { "docid": "b7b2ba4ac2816f166c0a3568a6cae36f", "score": "0.5052037", "text": "def runTest(self):\n \n # Since we are creating new states for experiments from the first one \n # the test is going to create the first state from all the others by applying\n # first experiment changes and then check if it produces the same state\n \n state, experiments = readFromFile('inputs/testExperiments.dat')\n results = createStatesFromExperiments(state, experiments)\n \n firstState, firstExperiment = results[0]\n for state, _ in results[1:]:\n state = addStateForExperiment(firstExperiment, state)\n \n #Buses\n buses = {}\n for route in state.routes:\n buses[route.number] = 0\n buses2 = deepcopy(buses)\n for bus in state.buses:\n buses[bus.routeNr] += 1\n for bus in firstState.buses:\n buses2[bus.routeNr] += 1\n \n self.failUnless(buses == buses2, 'The number of buses in states are not the same: %(one)s and %(two)s' % {'one':buses, 'two':buses2})\n \n \n #Capacity\n for bus in state.buses:\n for bus2 in firstState.buses:\n if bus.id == bus2.id:\n self.failUnless(bus.capacity == bus2.capacity, 'Bus capacities are not the same for buses: %(one)s and %(two)s' % {'one':bus.__dict__, 'two':bus2.__dict__})\n \n \n #Roads\n for road in state.roads:\n for road2 in firstState.roads:\n if road.starts == road2.starts and road.ends == road2.ends:\n self.failUnless(road.__eq__(road2), 'Roads from %(starts)s to %(ends)s are not the same' % {'starts':road.starts, 'ends':road.ends})\n \n #Boards rate\n self.failUnless(firstState.boards == state.boards, 'Board rates are not the same for states: %(one)s and %(two)s' % {'one':state.__dict__, 'two':state.__dict__})\n \n #Disembarks rate\n self.failUnless(firstState.disembarks == state.disembarks, 'Disembarks rates are not the same for states: %(one)s and %(two)s' % {'one':state.__dict__, 'two':state.__dict__})\n \n #Depart rate\n self.failUnless(firstState.busDeparts == state.busDeparts, 'Bus depart rates are not the same for states: %(one)s and %(two)s' % {'one':state.__dict__, 'two':state.__dict__})\n \n #New passengers rate\n self.failUnless(firstState.paxArrives == state.paxArrives, 'New passenger rates are not the same for states: %(one)s and %(two)s' % {'one':state.__dict__, 'two':state.__dict__})", "title": "" }, { "docid": "ca6a5a3d883409961b407e5a98bca5b6", "score": "0.50501597", "text": "def validate_input_layer(self, incoming):\n assert len(self.input_shape) == 2, (\n \"[{}] Input shape error: expected \"\n \"(batch_size, num_units)\".format(self.type))\n return True", "title": "" }, { "docid": "6989c9241af15a2b758373758a8897ba", "score": "0.50468147", "text": "def _predict_one_step(self, state_vals, control_input_vals):\r\n pass", "title": "" }, { "docid": "e714a380594a27ddc7cbffe80b1b34ec", "score": "0.50406533", "text": "def test_bad_treatment(self):\n with mn.model(treatments=['foo']) as m:\n mn.constant('InterestRate', 0.04)\n self.assertEqual(\n m.validate_and_set('InterestRate', 'bar', 0.05),\n {\n 'success': False, \n 'variable': 'InterestRate',\n 'treatment': 'bar',\n 'amount': 0.05,\n 'error_code': 'UnknownTreatment',\n 'error_message': 'Treatment bar not known.',\n })", "title": "" }, { "docid": "af0479563aa5be127dde8c7314d7d840", "score": "0.5038226", "text": "def verify_state(rec, orig):\n pass", "title": "" }, { "docid": "04c5f599c568bfac0cd347e83c8f76c6", "score": "0.50382185", "text": "def train_bw(states, alphabet, training_data, \n pseudo_initial=None, pseudo_transition=None, pseudo_emission=None,\n update_fn=None, \n ):\n N, M = len(states), len(alphabet)\n if not training_data:\n raise ValueError(\"No training data given.\")\n if pseudo_initial!=None:\n pseudo_initial = numpy.asarray(pseudo_initial)\n if pseudo_initial.shape != (N,):\n raise ValueError(\"pseudo_initial not shape len(states)\")\n if pseudo_transition!=None:\n pseudo_transition = numpy.asarray(pseudo_transition)\n if pseudo_transition.shape != (N,N):\n raise ValueError(\"pseudo_transition not shape \" + \\\n \"len(states) X len(states)\")\n if pseudo_emission!=None:\n pseudo_emission = numpy.asarray(pseudo_emission)\n if pseudo_emission.shape != (N,M):\n raise ValueError(\"pseudo_emission not shape \" + \\\n \"len(states) X len(alphabet)\")\n \n # Training data is given as a list of members of the alphabet.\n # Replace those with indexes into the alphabet list for easier\n # computation.\n training_outputs = []\n indexes = itemindex(alphabet)\n for outputs in training_data:\n training_outputs.append([indexes[x] for x in outputs])\n\n # Do some sanity checking on the outputs.\n lengths = map(len, training_outputs)\n if min(lengths) == 0:\n raise ValueError(\"I got training data with outputs of length 0\")\n\n # Do the training with baum welch.\n x = _baum_welch(N, M, training_outputs,\n pseudo_initial=pseudo_initial,\n pseudo_transition=pseudo_transition,\n pseudo_emission=pseudo_emission,\n update_fn=update_fn)\n p_initial, p_transition, p_emission = x\n return MarkovModel(states, alphabet, p_initial, p_transition, p_emission)", "title": "" }, { "docid": "270f13f51819ee7581c9fbd59efe8f98", "score": "0.5030384", "text": "def is_valid(self, state):\n return True", "title": "" }, { "docid": "040bb8c1c8154722b89b7726987bed2b", "score": "0.50233865", "text": "def anihilation(i,state_in):\n if not (state_in[i] == 0):\n coef = np.sqrt(state_in[i])\n state_out=state_in.copy()\n state_out[i]=state_out[i]-1\n stop = False\n return state_out,coef,stop\n else:\n #print('This state cant be lowered at', i,'!', )\n stop = True \n state_out= []\n coef=0\n return state_out,coef,stop", "title": "" }, { "docid": "cdc5832fa9d94940695f794ca74fcacc", "score": "0.5015712", "text": "def test_validate_momconnect_nonloss_optouts_missing_data(self):\n # Setup\n change_data = {\n \"registrant_id\": \"mother01-63e2-4acc-9b94-26663b9bc267\",\n \"action\": \"momconnect_loss_switch\",\n \"data\": {},\n \"source\": self.make_source_normaluser()\n }\n change = Change.objects.create(**change_data)\n # Execute\n c = validate_implement.validate(change)\n # Check\n change.refresh_from_db()\n self.assertEqual(c, False)\n self.assertEqual(change.validated, False)\n self.assertEqual(change.data[\"invalid_fields\"], [\n 'Optout reason is missing']\n )", "title": "" }, { "docid": "c0978bd790d2570f6e7fb415b5f81c2a", "score": "0.5013508", "text": "def test_raises_invalid_metric_consecutive_runs():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\", metric=\"recall\")\n pytest.raises(ValueError, atom.run, \"Tree\", metric=\"f1\")", "title": "" }, { "docid": "70c06b9f3b6189bee12710ed7e18a164", "score": "0.5012923", "text": "def reset_state(self, default_input):\n\n # reset the service statuses\n if default_input is None:\n num_nodes_up = (self.size_graph * self.config.ratios[0]) // np.sum(self.config.ratios)\n num_nodes_down = (self.size_graph * self.config.ratios[1]) // np.sum(self.config.ratios)\n num_nodes_unavailable = self.size_graph - (num_nodes_down + num_nodes_up)\n self.nn_input = np.concatenate([np.zeros(num_nodes_down, dtype=np.int),\n np.ones(num_nodes_up, dtype=np.int),\n np.ones(num_nodes_unavailable, dtype=np.int) * -1,\n np.zeros(2, dtype=np.int)])\n np.random.shuffle(self.nn_input)\n self.nn_input[-2] = self.config.att_points\n self.nn_input[-1] = self.config.def_points\n else:\n np.copyto(self.nn_input, default_input)\n\n # reset the scores\n self.reset_scores()\n\n # reset the actions\n self.reset_actions()", "title": "" }, { "docid": "ac9d8d6c79d8c7179295a958c7f83e43", "score": "0.500918", "text": "def test_correct_state(self, rep, tol):\n\n dev = qml.device(\"default.tensor.tf\", wires=2, representation=rep)\n\n state = dev._state()\n\n expected = np.array([[1, 0], [0, 0]])\n assert np.allclose(state, expected, atol=tol, rtol=0)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=0)\n return qml.expval(qml.PauliZ(0))\n\n circuit()\n state = dev._state()\n\n expected = np.array([[1, 0], [1, 0]]) / np.sqrt(2)\n assert np.allclose(state, expected, atol=tol, rtol=0)", "title": "" }, { "docid": "541adf2d6cbc796142906ad40b4d5759", "score": "0.50081015", "text": "def test_inf(self):\n agent = Agent()\n agent.change_state()\n self.assertEqual(agent.state, 'I')", "title": "" }, { "docid": "3b8b1d24ac1a453f4342af0e0a9038de", "score": "0.50032306", "text": "def sample_state_uninformed(self):\n return self.sample_an_init_state()", "title": "" }, { "docid": "0a4d6df5cc18ce5864ec2654c12a89b9", "score": "0.5002355", "text": "def test_bad_input(self):\n bad_node_taxid = \"\"\"\\t|\\t6\\t|\\tspecies\\t|\\tAC\\t|\\t0\\t|\\t1\\t|\\t11\\t|\\t1\\t|\\t0\\t|\\t1\\t|\\t0\\t|\\t0\\t|\\t\\t|\\n\"\"\" # contains no taxon_id; not valid\n bad_node_parentid = \"\"\"7\\t|\\t\\t|\\tspecies\\t|\\tAC\\t|\\t0\\t|\\t1\\t|\\t11\\t|\\t1\\t|\\t0\\t|\\t1\\t|\\t0\\t|\\t0\\t|\\t\\t|\\n\"\"\" # contains no parent_id; not valid\n self.assertRaises(ValueError, NcbiTaxon, bad_node_taxid)\n self.assertRaises(ValueError, NcbiTaxon, bad_node_parentid)", "title": "" }, { "docid": "bcd250942301d1d3df6055f736fea9c4", "score": "0.49985078", "text": "def test_parse_input(self):\n input_dict = {\n 'feed': 1.0,\n 'pet': 1.0,\n 'excercise': 1.0,\n 'immunize': 1.0,\n 'clean': 1.0,\n }\n\n parsed_input = self.t._parse_input(input_dict)\n\n self.assertEqual(parsed_input.shape, (len(ALLOWABLE_INPUTS), 1))", "title": "" }, { "docid": "5ae083ccd360e63ba9f878462b3b3cb4", "score": "0.499665", "text": "def test_apply_transition(self):\n\n # Eager mode testing\n hparams = get_default_hparams()\n energy_fn, _, _ = l2hmc.get_scg_energy_fn()\n dynamics = l2hmc.Dynamics(\n x_dim=hparams.x_dim,\n minus_loglikelihood_fn=energy_fn,\n n_steps=hparams.n_steps,\n eps=hparams.eps)\n samples = tf.random_normal(shape=[hparams.n_samples, hparams.x_dim])\n x_, v_, x_accept_prob, x_out = dynamics.apply_transition(samples)\n\n self.assertEqual(x_.shape, v_.shape)\n self.assertEqual(x_out.shape, samples.shape)\n self.assertEqual(x_.shape, x_out.shape)\n self.assertEqual(x_accept_prob.shape, (hparams.n_samples,))\n\n # Graph mode testing\n with tf.Graph().as_default():\n energy_fn, _, _ = l2hmc.get_scg_energy_fn()\n dynamics = l2hmc.Dynamics(\n x_dim=hparams.x_dim,\n minus_loglikelihood_fn=energy_fn,\n n_steps=hparams.n_steps,\n eps=hparams.eps)\n x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])\n x_, v_, x_accept_prob, x_out = dynamics.apply_transition(x)\n samples = npr.normal(size=[hparams.n_samples, hparams.x_dim])\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n np_x_, np_v_, np_x_accept_prob, np_x_out = sess.run(\n [x_, v_, x_accept_prob, x_out], feed_dict={x: samples})\n\n self.assertEqual(np_x_.shape, np_v_.shape)\n self.assertEqual(samples.shape, np_x_out.shape)\n self.assertEqual(np_x_.shape, np_x_out.shape)\n self.assertEqual(np_x_accept_prob.shape, (hparams.n_samples,))", "title": "" }, { "docid": "5425db944c63330e78d1796456e5fbd2", "score": "0.4987878", "text": "def test_invalid_input_d_emb(self):\n msg1 = (\n 'Must raise `TypeError` or `ValueError` when input `d_emb` is '\n 'invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n False, 0, -1, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf,\n 0j, 1j, '', b'', (), [], {}, set(), object(), lambda x: x, type,\n None, NotImplemented, ...\n )\n\n for invalid_input in examples:\n with self.assertRaises(\n (TypeError, ValueError),\n msg=msg1\n ) as ctx_man:\n BaseConfig(d_emb=invalid_input)\n\n if isinstance(ctx_man.exception, TypeError):\n self.assertEqual(\n ctx_man.exception.args[0],\n '`d_emb` must be an instance of `int`.',\n msg=msg2\n )\n else:\n self.assertEqual(\n ctx_man.exception.args[0],\n '`d_emb` must be bigger than or equal to `1`.',\n msg=msg2\n )", "title": "" }, { "docid": "4b0390cde2545bcab5df5bbca7002366", "score": "0.4987416", "text": "def validate(args, trainer, task, epoch_itr, subsets):\n valid_losses = []\n for subset in subsets:\n # Initialize data iterator\n itr = task.get_batch_iterator(\n dataset=task.dataset(subset),\n max_tokens=args.max_tokens_valid,\n max_sentences=args.max_sentences_valid,\n max_positions=utils.resolve_max_positions(\n task.max_positions(),\n trainer.get_model().max_positions(),\n ),\n ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=args.required_batch_size_multiple,\n seed=args.seed,\n num_shards=args.distributed_world_size,\n shard_id=args.distributed_rank,\n num_workers=args.num_workers,\n ).next_epoch_itr(shuffle=False)\n progress = progress_bar.build_progress_bar(\n args, itr, epoch_itr.epoch,\n prefix='valid on \\'{}\\' subset'.format(subset),\n no_progress_bar='simple'\n )\n\n # reset validation loss meters\n for k in ['valid_loss', 'valid_nll_loss']:\n meter = trainer.get_meter(k)\n if meter is not None:\n meter.reset()\n extra_meters = collections.defaultdict(lambda: AverageMeter())\n preds, targets = [], []\n for sample in progress:\n # print(sample)\n log_output = trainer.valid_step(sample)\n # print(\"valid_step_log_output\",log_output)\n for k, v in log_output.items():\n if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:\n continue\n elif k in ['f1','accuracy',\"acc_f1_avg\"]:\n continue\n elif k == 'preds':\n preds.extend(v.tolist())\n elif k == \"targets\":\n targets.extend(v.tolist())\n else:\n extra_meters[k].update(v)\n print(preds,targets)\n \n output_eval_file = os.path.join(args.save_dir, \"eval_results.txt\")\n with open(output_eval_file, \"w\") as writer:\n #rte\n # label_list =[\"entailment\",\"not_entailment\"]\n #copa\n label_list =[0,1]\n #cb\n # label_list =['contradiction','entailment','neutral']\n # print(label_list)\n for i in range(len(preds)):\n label_i = label_list[preds[i]]\n # json_i= \"\\\"idx: %d, \\\"label\\\": \\\"label_i\\\"\"\n writer.write(\"{\\\"idx\\\": %d, \\\"label\\\": \\\"%s\\\"}\\n\"%(i,label_i))\n\n\n # log validation stats\n # print(\"extra_meters: \", extra_meters)\n # print(f1_score(targets, preds, average='macro'))\n f1= f1_score(targets, preds, average='macro')\n acc= accuracy_score(targets, preds)\n # print(\"acc: \",acc, \" f1: \",f1)\n extra_meters['f1'].update(f1)\n extra_meters['accuracy'].update(acc)\n extra_meters['acc_f1_avg'].update((acc+f1)/2.0)\n # print(f1,acc)\n stats = get_valid_stats(trainer, args, extra_meters)\n # print(\"stats2: \", stats)\n for k, meter in extra_meters.items():\n # print(k,meter)\n stats[k] = meter.avg\n # print(stats)\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n # print(\"stats3: \", stats)\n valid_losses.append(\n stats[args.best_checkpoint_metric].avg\n if args.best_checkpoint_metric == 'loss'\n else stats[args.best_checkpoint_metric]\n )\n return valid_losses", "title": "" }, { "docid": "ae35d4a5f5b06954ff4920d286f9e77d", "score": "0.49845305", "text": "def mutate(self, network):\n\n # Choose a random key.\n mutation = random.choice(list(self.nn_param_choices.keys()))\n\n # Mutate one of the params.\n if mutation == 'optimizer':\n # Self explanatory.\n network.network['optimizer'] = random.choice(self.nn_param_choices['optimizer'])\n elif mutation == 'final_act':\n # Self explanatory.\n network.network['final_act'] = random.choice(self.nn_param_choices['final_act'])\n elif mutation == 'layer_info':\n # Mutate the hidden layers.\n if network.network['n_layers'] == 0:\n # If there are no hidden layers, mutating them is equivalent to doing nothing.\n pass\n else:\n for i in range(network.network['n_layers']):\n if self.mutate_chance > random.random():\n network.network['layer_info'][i] = random.choice(self.nn_param_choices['layer_info'])\n else:\n pass\n elif mutation == 'n_layers':\n # Select new number of hidden layers.\n new_n = random.choice(self.nn_param_choices['n_layers'])\n if new_n == network.network['n_layers']:\n # If it's the same, do nothing.\n pass\n elif new_n < network.network['n_layers']:\n # If new_n is smaller than the original, select all layer_info from the original up to new_n.\n new_layer_info_smaller = []\n for i in range(new_n):\n new_layer_info_smaller += [network.network['layer_info'][i]]\n network.network['layer_info'] = new_layer_info_smaller\n network.network['n_layers'] = new_n\n elif new_n > network.network['layer_info']:\n # If new_n is bigger than the original, select all layer_info from the original up to new_n and then complete with random layer_info.\n new_layer_info_bigger = []\n for i in range(new_n):\n try:\n new_layer_info_bigger += [network.network['layer_info'][i]]\n except:\n new_layer_info_bigger += [random.choice(self.nn_param_choices['layer_info'])]\n network.network['layer_info'] = new_layer_info_bigger\n network.network['n_layers'] = new_n\n\n return network", "title": "" }, { "docid": "e9d7faa519c8cbd1063e93a81352201a", "score": "0.49792996", "text": "def check_model(model):\n (initial, tran_model, obs_model) = model\n for state in range(len(initial)):\n assert((abs(sum(tran_model[state,:]) - 1)) <= 0.01)\n assert((abs(sum(obs_model[state,:]) - 1)) <= 0.01)\n assert((abs(sum(initial) - 1)) <= 0.01)", "title": "" } ]
2889e4d95e1a2afe4725a0fa1246c57e
Ingest nodes capture data
[ { "docid": "91cea2ea0f95cb8ff3af87f82893d91c", "score": "0.0", "text": "async def capture_done(self):\n return {\"status\": \"capture_done\"}", "title": "" } ]
[ { "docid": "39747bb26a7f7002dc443639e32f9fb3", "score": "0.66707087", "text": "def execute_ingest(self):", "title": "" }, { "docid": "77c68669678699f652f264717b388401", "score": "0.6277186", "text": "def ingest(self, **kwargs) -> None:\n pass", "title": "" }, { "docid": "c966a38eb7120656fe64b945effd11a4", "score": "0.5781501", "text": "def on_generate_data_for_slave(data):", "title": "" }, { "docid": "279ed3ac8d3b0a929961a26927f2c255", "score": "0.57294023", "text": "def ingest(self) -> List[str]:\n data = json.load(open(self.path, \"r\"))\n export_df = pd.DataFrame(\n {\":START_ID(Neuron)\": data[\"neuron_1\"], \":END_ID(Neuron)\": data[\"neuron_2\"]}\n )\n\n node_names = (\n export_df[\":START_ID(Neuron)\"].append(export_df[\":END_ID(Neuron)\"])\n ).unique()\n\n node_names_dd = dd.from_pandas(\n pd.DataFrame({\"neuronId:ID(Neuron)\": node_names}), npartitions=1\n )\n\n node_fnames = node_names_dd.to_csv(\n self.export_dir + \"export-neurons-*.csv\",\n index=False,\n header=[\"neuronId:ID(Neuron)\"],\n )\n\n # This is absurd, but neo4j can't tolerate file headers in every CSV,\n # and dask can't NOT.\n # So We print off a header file first.\n export_df_dd = dd.from_pandas(export_df, npartitions=1)\n headerpath = self.export_dir + \"export-synapses-header.csv\"\n with open(headerpath, \"w\") as headerfile:\n headerfile.write(\":START_ID(Neuron),:END_ID(Neuron)\")\n\n edge_fnames = export_df_dd.to_csv(\n self.export_dir + \"export-synapses-zdata-*.csv\", index=False, header=False\n )\n\n return node_fnames + [headerpath] + edge_fnames", "title": "" }, { "docid": "59601ebfa9637427e6c3c3a41af3bcdc", "score": "0.5713418", "text": "def process_data():\n add_gps_properties_to_location_nodes()", "title": "" }, { "docid": "f6a74579ebdb64dfbbf465b3e6f69b1c", "score": "0.5634891", "text": "def _collect_node_logs(self) -> None:\n try:\n # Collect the logs on the test machine into a compressed tarball\n self._log.info(\"Collecting logs on test machine [%s]...\", self.context.node.name)\n stdout = self.context.ssh_client.run_command(\"collect-logs\", use_sudo=True)\n self._log.info(stdout)\n\n # Copy the tarball to the local logs directory\n remote_path = \"/tmp/waagent-logs.tgz\"\n local_path = self.context.log_path/'{0}.tgz'.format(self.context.image_name)\n self._log.info(\"Copying %s:%s to %s\", self.context.node.name, remote_path, local_path)\n self.context.ssh_client.copy(remote_path, local_path, remote_source=True)\n except: # pylint: disable=bare-except\n self._log.exception(\"Failed to collect logs from the test machine\")", "title": "" }, { "docid": "cbe9bb4f931b69809eddb9d4dfff4dff", "score": "0.55803925", "text": "def ingest(self) -> List[str]:\n ...", "title": "" }, { "docid": "7ec1b4b36d9344647ccc4d5b99e48cad", "score": "0.554087", "text": "def ingest(ctx, subid):\n\n IngestHydrator(ctx.obj.graph, subid).hydrate()", "title": "" }, { "docid": "2f6799d719a9e06f54628fb715cd47fa", "score": "0.54921776", "text": "def collect(self, config):\n\n results = self.new_data()\n for ds in config.datasources:\n self.component = ds.component\n try:\n ip, port = ds.title.split(':')\n except ValueError:\n # Exception when runing NameNodeMonitor on Device\n ip = ds.manageIp\n port = ds.zHadoopNameNodePort\n\n if ds.datasource == 'NameNodeMonitor':\n conf_url = hadoop_url(\n scheme=ds.zHadoopScheme,\n port=ds.zHadoopNameNodePort,\n host=ds.manageIp,\n endpoint='/conf'\n )\n\n jmx_url = hadoop_url(\n scheme=ds.zHadoopScheme,\n port=port,\n host=ip,\n endpoint='/jmx'\n )\n headers_json = hadoop_headers(\n accept='application/json',\n username=ds.zHadoopUsername,\n passwd=ds.zHadoopPassword\n )\n\n headers_xml = hadoop_headers(\n accept='application/xml',\n username=ds.zHadoopUsername,\n passwd=ds.zHadoopPassword\n )\n\n res = {}\n try:\n if ds.datasource == 'NameNodeMonitor':\n res['conf'] = yield getPage(conf_url, headers=headers_xml)\n res['jmx'] = yield getPage(jmx_url, headers=headers_json)\n except Exception as e:\n # Add event if can't connect to some node\n e = check_error(\n e, ds.device,\n \" \".join(re.findall('[A-Z][^A-Z]*', ds.template))\n ) or e\n severity = ZenEventClasses.Error\n summary = str(e)\n results['maps'].extend(self.add_maps(\n res, ds, state=NODE_HEALTH_DEAD)\n )\n\n if res.get('jmx'):\n severity = ZenEventClasses.Clear\n summary = 'Monitoring ok'\n results['values'][self.component] = self.form_values(\n res['jmx'], ds\n )\n results['maps'].extend(self.add_maps(\n res, ds, state=NODE_HEALTH_NORMAL)\n )\n\n results['events'].append({\n 'component': self.component,\n 'summary': summary,\n 'eventKey': ds.datasource,\n 'eventClass': '/Status',\n 'severity': severity,\n })\n\n defer.returnValue(results)", "title": "" }, { "docid": "9dc9d5b99279f5b1a58e3d0427adab84", "score": "0.54839855", "text": "def ingest(self) -> List[str]:\n df = dd.read_csv(self.path).dropna()\n export_df = df.copy()\n export_df[\":START_ID(Neuron)\"] = df[\"presyn_segid\"].astype(\"int\")\n export_df[\":END_ID(Neuron)\"] = df[\"postsyn_segid\"].astype(\"int\")\n for col in df.columns:\n del export_df[col]\n\n node_names = (\n (export_df[\":START_ID(Neuron)\"].append(export_df[\":END_ID(Neuron)\"]))\n .unique()\n .dropna()\n )\n\n node_fnames = node_names.to_csv(\n self.export_dir + \"export-neurons-*.csv\",\n index=False,\n header=[\"neuronId:ID(Neuron)\"],\n )\n\n # This is absurd, but neo4j can't tolerate file headers in every CSV,\n # and dask can't NOT.\n # So We print off a header file first.\n headerpath = self.export_dir + \"export-synapses-header.csv\"\n with open(headerpath, \"w\") as headerfile:\n headerfile.write(\":START_ID(Neuron),:END_ID(Neuron)\")\n\n edge_fnames = export_df.to_csv(\n self.export_dir + \"export-synapses-zdata-*.csv\", index=False, header=False\n )\n\n return node_fnames + [headerpath] + edge_fnames", "title": "" }, { "docid": "67ba9f16f33d9799dce8bd53efbbae7b", "score": "0.5465624", "text": "def _start_ingest_pipeline(ingest_type=\"h5\", source_type='E', provided_ingest_files=None, mdmap=None):\n \n f = lambda p: p if p is None else len(p)\n logger.info(f\"Ingest Parameters: ingest_type -> {ingest_type}, source_type -> {source_type}, provided_ingest_files-> {f(provided_ingest_files)}\")\n # assign the list of files to ingest to `ingest_files` if no list is provided. \n ingest_files = provided_ingest_files if provided_ingest_files is not None else _auto_file_discovery(ingest_type=ingest_type, source_type=source_type)\n\n # check if there were any ingest files\n if ingest_files:\n\n # Create a unique identifier for this ingest\n ingest_id = uuid1()\n\n # TODO: Write to database that an ingest was started.\n\n if ingest_type == 'csv' or ingest_type == 'CSV':\n _process_csv(ingest_files, ingest_id=ingest_id, single_msid=False)\n \n elif ingest_type == 'h5':\n # Preprocess HDF5 files to match DF interface\n logger.info('Starting HDF5 file pre-processing ...')\n ingest_file_data, chunks = _preprocess_hdf(ingest_files)\n logger.info('Completed HDF5 file pre-processing ...')\n\n if not ingest_file_data:\n logger.info('Empty ingest list.') # LITA-181\n return\n\n logger.info('Starting HDF5 file data ingest ...')\n # out = db.fetchone('SELECT count(*) FROM ingest_history')\n processed_files = _process_hdf(\n ingest_file_data=ingest_file_data,\n mdmap=mdmap\n )\n logger.info('Completed HDF5 file data ingest ALL data sequence ...') \n else:\n raise ValueError('Ingest type parameter is invalid. Valid options are csv or h5.')\n logger.info(f'Moving {len(processed_files)} HDF5 ingest file(s) to tmp storage ... ')\n move_archive_files(processed_files)\n \n \n if int(os.environ.get('JETA_UPDATE_STATS', True)):\n # Once data ingest is complete update the 5min and daily stats data\n from jeta.archive import update\n update.main()\n else:\n logger.info(f'Skipping stats update.') # LITA-191\n else:\n logger.info('No ingest files discovered in {STAGING_DIRECTORY}')", "title": "" }, { "docid": "872b58626eda858e3d4973509aa33bdb", "score": "0.5454327", "text": "def ingest_data(ingested_dataset_path: str, base_artifact_path: str):\n # timestamp as unique id for the component execution\n timestamp = int(time.time())\n\n # create directory to store the actual data\n target_path = f\"{base_artifact_path}/ingestion/ingested_dataset_{timestamp}.jsonl\"\n # if the target path is a google cloud storage path convert the path to the gcsfuse path\n target_path_gcsfuse = target_path.replace(\"gs://\", \"/gcs/\")\n Path(target_path_gcsfuse).parent.mkdir(parents=True, exist_ok=True)\n\n with open(target_path_gcsfuse, 'w') as f:\n f.writelines([\n \"\"\"{\"image_id\": 318556, \"id\": 255, \"caption\": \"An angled view of a beautifully decorated bathroom.\", \"image_url\": \"http://farm4.staticflickr.com/3133/3378902101_3c9fa16b84_z.jpg\", \"image_name\": \"COCO_train2014_000000318556.jpg\", \"image_license\": \"Attribution-NonCommercial-ShareAlike License\"}\\n\"\"\",\n \"\"\"{\"image_id\": 476220, \"id\": 314, \"caption\": \"An empty kitchen with white and black appliances.\", \"image_url\": \"http://farm7.staticflickr.com/6173/6207941582_b69380c020_z.jpg\", \"image_name\": \"COCO_train2014_000000476220.jpg\", \"image_license\": \"Attribution-NonCommercial License\"}\\n\"\"\",\n \"\"\"{\"image_id\": 134754, \"id\": 425, \"caption\": \"Two people carrying surf boards on a beach.\", \"image_url\": \"http://farm9.staticflickr.com/8500/8398513396_b6a1f11a4b_z.jpg\", \"image_name\": \"COCO_train2014_000000134754.jpg\", \"image_license\": \"Attribution-NonCommercial-NoDerivs License\"}\"\"\"\n ])\n\n # the directory where the output file is created may or may not exists\n # so we have to create it.\n # KFP v1 components can only write output to files. The output of this\n # component is written to ingested_dataset_path and contains the path\n # of the actual ingested data\n Path(ingested_dataset_path).parent.mkdir(parents=True, exist_ok=True)\n with open(ingested_dataset_path, 'w') as f:\n f.write(target_path)", "title": "" }, { "docid": "1a8f92a7d4ef08b8324af51f1f8eef07", "score": "0.5429928", "text": "def process_data(self):", "title": "" }, { "docid": "386a0f81469b583ada9736e07315150c", "score": "0.54160565", "text": "def read_data(self):\r\n self.data = []\r\n self.node_data = []\r\n for i, f in enumerate(self.files[0:self.n_nodes]):\r\n # print(str(i) + \". reading: \" + f)\r\n fdata = self.dc.read_data(join(\"data/sensors/\", f))\r\n data = copy.copy(fdata)\r\n self.data.append(data)\r\n node = Constants.NODE_MODEL\r\n node[\"id\"] = i\r\n self.node_data.append(copy.deepcopy(node))", "title": "" }, { "docid": "86093b07ca3a9bb8f810e996a9a2df13", "score": "0.5410222", "text": "def collect(self, config):\n\n results = self.new_data()\n ds0 = config.datasources[0]\n ip_ds = self.check_data_nodes(config)\n # List of IP addresses and data sources if Data Node\n # and IP address and None if the Device\n for ip in ip_ds:\n url = hadoop_url(\n scheme=ds0.zHadoopScheme,\n port=ds0.zHBaseMasterPort,\n host=ip,\n endpoint='/master-status'\n )\n headers = hadoop_headers(\n accept='application/json',\n username=ds0.zHadoopUsername,\n passwd=ds0.zHadoopPassword\n )\n try:\n # Check if HBase into Hadoop Data Node\n check = yield getPage(url, headers=headers)\n except Exception:\n continue\n module = DS_TO_RELATION.get('DataNodeMonitor')\n if ds0.zHBaseAutodiscover and module:\n if ip_ds[ip]:\n summary = 'HBase was discovered on {} data node'.format(\n ip_ds[ip].title\n )\n component = ip_ds[ip].component\n # Execute setHBaseAutodiscover method if\n # HBase on Hadoop Data Node\n results['maps'].append(ObjectMap({\n \"compname\": \"{}/{}\".format(module[0], ds0.component),\n \"modname\": module[1],\n \"setHBaseAutodiscover\": ip\n }))\n else:\n summary = 'HBase was discovered on {} device'.format(ip)\n component = None\n results['events'].append({\n 'component': component,\n 'summary': summary,\n 'eventKey': 'HadoopHbaseAutodiscover',\n 'eventClass': '/Status',\n 'severity': ZenEventClasses.Info,\n })\n defer.returnValue(results)", "title": "" }, { "docid": "a62bb453e25ec157c6881ac8c1ef133a", "score": "0.5399114", "text": "def on_generate_data_for_master(data):", "title": "" }, { "docid": "6d2b2a49532c76eea1cb580fe2000435", "score": "0.53833944", "text": "def data_nodes_remodel(self, data, device):\n nodes_oms = []\n try:\n values = json.loads(data['jmx'])\n except Exception:\n raise HadoopException('Error parsing collected data for {} '\n 'monitoring template'.format(device.template))\n for value in values.get('beans'):\n if value.get('name') == 'Hadoop:service=NameNode,name=NameNodeInfo':\n for key, val in (('LiveNodes', NODE_HEALTH_NORMAL),\n ('DeadNodes', NODE_HEALTH_DEAD),\n ('DecomNodes', NODE_HEALTH_DECOM)):\n nodes_oms.extend(\n node_oms(\n log, device, value.get(key), val, data['conf'], True\n )\n )\n rm = RelationshipMap(\n relname='hadoop_data_nodes',\n modname=MODULE_NAME['HadoopDataNode'],\n objmaps=nodes_oms)\n if list(rm):\n return [rm]\n return []", "title": "" }, { "docid": "f201ce6bbd478cf3074dd3cc62c1eb2a", "score": "0.53551394", "text": "def on_apply_data_from_slave(data, slave):", "title": "" }, { "docid": "0df6a31afc9e548b5914bdfa8c631465", "score": "0.53386444", "text": "def execute(ingest_type='h5', source_type='E', provided_ingest_files=None):\n logger.info('INGEST BEGIN >>>')\n logger.info('Ingest Module: {}'.format(os.path.abspath(__file__)))\n logger.info('Fetch Module: {}'.format(os.path.abspath(fetch.__file__)))\n logger.info('Loading system configuration ... ')\n\n from jeta.archive.operations import load_config\n current_settings = load_config()\n logger.info(f'System configuration loaded: \\n{current_settings}')\n\n # processed_msids = [x for x in pickle.load(open(msid_files['colnames'].abs, 'rb'))\n # if x not in fetch.IGNORE_COLNAMES]\n\n with h5py.File(ALL_KNOWN_MSID_METAFILE, 'r') as h5:\n i = [h5[msid].attrs['id'] for msid in h5.keys()]\n mdmap = {id:name for id, name in zip(i, h5.keys())}\n\n _start_ingest_pipeline(\n mdmap=mdmap, # map between msids names as strings and their numerical id\n ingest_type=ingest_type, # hdf5 or fof\n provided_ingest_files=provided_ingest_files, # a list of ingest files or None\n source_type=source_type # C, E, or R\n )\n\n logger.info(f'INGEST COMPLETE <<<')", "title": "" }, { "docid": "58f1020848f4dc0ba6d62270bd163d66", "score": "0.5311746", "text": "def nodes(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "db5d49f462aead9cdc51dcc363f8841a", "score": "0.5302282", "text": "def parse(data):\n node_list = []\n # 1. get the name of all nodes\n name_list = data[\"datasets\"].keys()\n for t in data[\"tasks\"]:\n name_list.append(t[\"name\"])\n # 2. create all nodes and save them to node_list\n # note: here the order of node in the node_list is the same as the\n # order in the datasets, and we will take advantage of this fact in\n # the following design and development\n for item in name_list:\n temp = Node(item)\n node_list.append(temp)\n # 3. get detaied info for tasks\n # and save them to corresponding node\n layers_length = len(data[\"layers\"])\n for task in data[\"tasks\"]:\n idx = name_list.index(task[\"name\"])\n node_list[idx].set_runtime(task[\"runtime\"])\n node_list[idx].set_collectionTool(task[\"collectionTool\"])\n node_list[idx].set_placed_layer(task[\"name\"], layers_length)\n # 4. get detaied info for dataset\n # and save them to corresponding node\n for item in data[\"datasets\"]:\n idx = name_list.index(item)\n node_list[idx].set_size(data[\"datasets\"][item][\"size\"])\n node_list[idx].set_type(data[\"datasets\"][item][\"type\"])\n # 5. traverse events to add more details to each node\n for event in data[\"events\"]:\n # get the index of the corresponding node in\n # datasets, which is the same as in node_list\n idx = name_list.index(event[\"dataset\"])\n # set the layer of the node, if the method is \"read\",\n # then the origin is the layer where the node should be placed\n if event[\"method\"] == \"r\":\n layer_idx = data[\"layers\"].index(event[\"origin\"]) + 1\n if node_list[idx].get_placed_layer_idx() == -1:\n node_list[idx].set_placed_layer(event[\"origin\"], layer_idx)\n # the node for destination is its child_node\n child_idx = name_list.index(event[\"destination\"])\n node_list[idx].set_child(node_list[child_idx])\n node_list[idx].set_direction(1)\n node_list[child_idx].set_parent(node_list[idx])\n # if the method is \"write\", then the destination is always a layer\n # where the node should be placed\n elif event[\"method\"] == \"w\":\n layer_idx = data[\"layers\"].index(event[\"destination\"]) + 1\n if node_list[idx].get_placed_layer_idx() == -1:\n node_list[idx].set_placed_layer(event[\"destination\"],\n layer_idx)\n # the node for destination is its child_node\n child_idx = name_list.index(event[\"origin\"])\n node_list[idx].set_child(node_list[child_idx])\n node_list[idx].set_direction(2)\n node_list[child_idx].set_parent(node_list[idx])\n # if the method is \"read/write\", then the origin is always a layer\n # where the node should be placed\n elif event[\"method\"] == \"rw\":\n layer_idx = data[\"layers\"].index(event[\"origin\"]) + 1\n if node_list[idx].get_placed_layer_idx() == -1:\n node_list[idx].set_placed_layer(event[\"origin\"], layer_idx)\n # the node for destination is its child_node\n child_idx = name_list.index(event[\"destination\"])\n node_list[idx].set_child(node_list[child_idx])\n node_list[idx].set_direction(3)\n node_list[child_idx].set_parent(node_list[idx])\n elif event[\"method\"] == \"m\":\n layer_idx = data[\"layers\"].index(event[\"origin\"]) + 1\n if node_list[idx].get_placed_layer_idx() == -1:\n node_list[idx].set_placed_layer(event[\"origin\"], layer_idx)\n temp = Node(event[\"dataset\"] + \" \")\n temp.copy_dataset_info(node_list[idx])\n node_list[idx].set_child(temp)\n node_list[idx].set_direction(1)\n temp.set_parent(node_list[idx])\n temp_layer_idx = data[\"layers\"].index(event[\"destination\"]) + 1\n temp.set_placed_layer(event[\"destination\"], temp_layer_idx)\n node_list.append(temp)\n return node_list", "title": "" }, { "docid": "1dad6091f7b6d8ccdd0d480a52d813e5", "score": "0.5287021", "text": "def push_data(channel):\n\n # Init the Ingest service\n ingest = rpc_pb2_grpc.IngestServiceStub(channel=channel)\n\n # Create entity user\n # User details\n new_user_details = user_pb2.Details()\n name = names.get_first_name()\n surename = names.get_last_name()\n username = \"{}.{}\".format(name.lower(),surename.lower())\n new_user_details.name = \"{} {}\".format(name,surename)\n new_user_details.userName = username\n new_user_details.email = \"{}@{}.com\".format(name.lower(),surename.lower())\n\n # Push new details to the Ingest server by the synchronous ingest request using a random uuid\n uuid_val = str(uuid.uuid4())\n response = ingest.ingest(\n rpc_pb2.Ingest(uuid=uuid_val,\n entity=\"user\",\n attribute=\"details\",\n key=username,\n value=new_user_details.SerializeToString()))\n lg.info(\">>> Synchronous ingest service: new user details\")\n lg.debug(\"request uuid: {}\".format(uuid_val))\n lg.debug(new_user_details)\n lg.debug(\"<<< Response\")\n if response.status != 200:\n lg.warning(\"*** Error Status code ***\")\n lg.warning(response)\n else:\n lg.debug(response)\n\n # Create entity product\n # Number of products\n num_products = 5\n new_products = []\n product_ids = []\n for i in range(num_products):\n # Create some dummy product attributes (details, price and categories)\n # Product details\n new_product_details = product_pb2.Details()\n new_product_details.name = random.choice(lorem.paragraph()[:-1].split(\" \")).capitalize()\n new_product_id = random.randint(1,100)\n product_ids.append(new_product_id)\n new_product_details.id = new_product_id\n new_product_details.description = lorem.sentence()\n\n new_products.append(rpc_pb2.Ingest(uuid=str(uuid.uuid4()),\n entity=\"product\",\n attribute=\"details\",\n key=str(new_product_id),\n value=new_product_details.SerializeToString()))\n\n # Product price\n new_product_price = product_pb2.Price()\n new_product_price.price = random.uniform(100, 100000)\n new_product_price.priceVat = random.uniform(0, 0.5)\n\n new_products.append(rpc_pb2.Ingest(uuid=str(uuid.uuid4()),\n entity=\"product\",\n attribute=\"price\",\n key=str(new_product_id),\n value=new_product_price.SerializeToString()))\n\n # Product category (the product can be in more than one category)\n for pc in random.sample(range(10),random.randint(1,5)):\n new_product_category = product_pb2.Category()\n new_product_category.categoryId = pc\n\n new_products.append(rpc_pb2.Ingest(uuid=str(uuid.uuid4()),\n entity=\"product\",\n attribute=\"category.{}\".format(str(new_product_category.categoryId)),\n key=str(new_product_id),\n value=new_product_category.SerializeToString()))\n\n # Push new details to the Ingest server by stream ingestion with a single ingest request\n responses = ingest.ingestSingle(iter(new_products))\n lg.info(\">>> Stream ingestion with single ingest request: new product details\")\n for r in new_products:\n lg.debug(r)\n lg.info(\"<<< Stream responses\")\n for r in responses:\n if r.status != 200:\n lg.warning(\"*** Error Status code ***\")\n lg.warning(r)\n else:\n lg.debug(r)\n\n events = []\n for product_id in product_ids:\n new_event = event_pb2.BaseEvent()\n new_event.userName = username\n new_event.type = random.choice(event_pb2.BaseEvent.Type.values())\n new_event.productId = product_id\n new_event.stamp = int(round(time.time() * 1000000))\n events.append(rpc_pb2.Ingest(uuid=str(uuid.uuid4()),\n entity=\"event\",\n attribute=\"data\",\n key=username,\n value=new_event.SerializeToString()))\n\n # Push new details to the Ingest server by stream ingestion single bulk request\n responses = ingest.ingestBulk(iter([rpc_pb2.IngestBulk(ingest=iter(events))]))\n lg.info(\">>> Stream ingestion with ingestBulk request: new user events\")\n uuids = []\n for r in events:\n lg.debug(\"uuid: {}\".format(r.uuid))\n uuids.append(r.uuid)\n lg.debug(event_pb2.BaseEvent.FromString(r.value))\n lg.info(\"<<< Stream StatusBulk response\")\n for r in responses:\n for rr in r.status:\n if rr.status != 200:\n lg.warning(\"*** Error Status code ***\")\n lg.warning(rr)\n else:\n lg.debug(rr)\n uuids.remove(rr.uuid)\n\n if uuids:\n lg.warning(\"*** There was no response to following requests ***\")\n lg.warning(uuids)\n\n return username, product_ids", "title": "" }, { "docid": "03ee23e027493107cc09ad103646e387", "score": "0.52740896", "text": "def process(self):\n self.append_data()", "title": "" }, { "docid": "c03c3bf7c80ad47da0c41e833eb47a9d", "score": "0.5265086", "text": "def ingest(self, dataset) -> IterableDataset:\n raise NotImplementedError", "title": "" }, { "docid": "6afc138893ed55ed703ea2fee5ccd611", "score": "0.52574253", "text": "def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()", "title": "" }, { "docid": "58365a6916f4148cbf629ad4a36f5e91", "score": "0.52571344", "text": "def run(self):\n self.data_reader()\n self.data_transformer()\n self.data_writer()", "title": "" }, { "docid": "d10e3ae0bba367e8e2d037364a4f2f58", "score": "0.524502", "text": "def run(self):\r\n print(\"Start LogDataStagingLoadEtlStep\")\r\n loader_attr = AttrDict({\"s3_path\": self.services_config.get('S3','LOG_DATA'),\"jsonpath\" : self.services_config.get('S3','LOG_JSONPATH') , \"is_manifest\" : False})\r\n self.redshiftStagingLoader.loadIntoRedshift(\"staging.stg_events\", self.infra_settings.RoleArn, self.infra_settings.S3DataRegion, loader_attr)\r\n print(\" End LogDataStagingLoadEtlStep\")", "title": "" }, { "docid": "9321e3d5ffa7f76fec9b024875b200e2", "score": "0.52193743", "text": "def perform(self):\n\n class PseudoFile(object):\n\n def __init__(self):\n self.data = \"\"\n\n def write(self, chunk):\n self.data += chunk\n\n try:\n for node_name, node_ip in self.nodes.items():\n stdout = PseudoFile()\n stderr = PseudoFile()\n\n LOG.info(\"Execute action %s on %s(%s)\", self.action, node_name, node_ip)\n \n ssh_client = self.get_ssh_client(node_ip)\n code = ssh_client.run(self.action, stdout=stdout, stderr=stderr)\n self.status_code = self.IS_EXECUTION_OK\n\n LOG.info(\"Finished execution of '%s' on %s(%s): %s\", self.action, node_name, node_ip, code)\n LOG.debug(\"Incident stdout: %s\", stdout.data)\n LOG.debug(\"Incident stderr: %s\", stderr.data)\n\n if self.stdout_regexp is not None:\n m = re.compile(self.stdout_regexp, re.DOTALL)\n if m.match(stdout.data) is None:\n LOG.error(\"Regexp {} is not matched\".format(self.stdout_regexp))\n self.status_code = self.IS_EXECUTION_FAILED\n except Exception, e:\n LOG.error(str(e))\n self.status_code = self.IS_EXECUTION_FAILED", "title": "" }, { "docid": "8040669f4aff3f274b6467ece706e308", "score": "0.5215192", "text": "def pipeline_censo():\n\n @task\n def emr_process_enem_data():\n cluster_id = client.run_job_flow(\n Name='EMR-Jeferson-Censo-IGTI',\n ServiceRole='EMR_DefaultRole',\n JobFlowRole='EMR_EC2_DefaultRole',\n VisibleToAllUsers=True,\n LogUri='s3://datalake-igti-mod01-desafio/emr-logs',\n ReleaseLabel='emr-6.3.0',\n Instances={\n 'InstanceGroups': [\n {\n 'Name': 'Master nodes',\n 'Market': 'SPOT',\n 'InstanceRole': 'MASTER',\n 'InstanceType': 'm5.xlarge',\n 'InstanceCount': 1,\n },\n {\n 'Name': 'Worker nodes',\n 'Market': 'SPOT',\n 'InstanceRole': 'CORE',\n 'InstanceType': 'm5.xlarge',\n 'InstanceCount': 1,\n }\n ],\n 'Ec2KeyName': 'jeferson=igti-teste',\n 'KeepJobFlowAliveWhenNoSteps': True,\n 'TerminationProtected': False,\n 'Ec2SubnetId': 'subnet-0c2d4f69'\n },\n\n Applications=[{'Name': 'Spark'}],\n\n Configurations=[{\n \"Classification\": \"spark-env\",\n \"Properties\": {},\n \"Configurations\": [{\n \"Classification\": \"export\",\n \"Properties\": {\n \"PYSPARK_PYTHON\": \"/usr/bin/python3\",\n \"PYSPARK_DRIVER_PYTHON\": \"/usr/bin/python3\"\n }\n }]\n },\n {\n \"Classification\": \"spark-hive-site\",\n \"Properties\": {\n \"hive.metastore.client.factory.class\": \"com.amazonaws.glue.catalog.metastore.AWSGlueDataCatalogHiveClientFactory\"\n }\n },\n {\n \"Classification\": \"spark-defaults\",\n \"Properties\": {\n \"spark.submit.deployMode\": \"cluster\",\n \"spark.speculation\": \"false\",\n \"spark.sql.adaptive.enabled\": \"true\",\n \"spark.serializer\": \"org.apache.spark.serializer.KryoSerializer\"\n }\n },\n {\n \"Classification\": \"spark\",\n \"Properties\": {\n \"maximizeResourceAllocation\": \"true\"\n }\n }\n ],\n\n Steps=[{\n 'Name': 'Processamento do censo 2020',\n 'ActionOnFailure': 'TERMINATE_CLUSTER',\n 'HadoopJarStep': {\n 'Jar': 'command-runner.jar',\n 'Args': ['spark-submit',\n '--packages', 'io.delta:delta-core_2.12:1.0.0', \n '--conf', 'spark.sql.extensions=io.delta.sql.DeltaSparkSessionExtension', \n '--conf', 'spark.sql.catalog.spark_catalog=org.apache.spark.sql.delta.catalog.DeltaCatalog', \n '--master', 'yarn',\n '--deploy-mode', 'cluster',\n 's3://datalake-igti-mod01-desafio/emr-code/pyspark/csv_to_parquet_job_spark.py'\n ]\n }\n }],\n )\n return cluster_id[\"JobFlowId\"]\n\n\n @task\n def wait_emr_step(cid: str):\n waiter = client.get_waiter('step_complete')\n steps = client.list_steps(\n ClusterId=cid\n )\n stepId = steps['Steps'][0]['Id']\n\n waiter.wait(\n ClusterId=cid,\n StepId=stepId,\n WaiterConfig={\n 'Delay': 30,\n 'MaxAttempts': 120\n }\n )\n return True\n\n\n\n @task\n def terminate_emr_cluster(success_before: str, cid: str):\n if success_before:\n res = client.terminate_job_flows(\n JobFlowIds=[cid]\n )\n\n\n # Encadeando a pipeline\n cluid = emr_process_enem_data()\n res_emr = wait_emr_step(cluid)\n res_ter = terminate_emr_cluster(res_emr, cluid)", "title": "" }, { "docid": "7469f2ea15fe785481fcc21a9bfd2a2e", "score": "0.5212808", "text": "def _get_node_data_for_export(wildcards):\n # Define inputs shared by specific builds.\n inputs = [\n rules.refine.output.node_data,\n rules.ancestral.output.node_data,\n rules.translate.output.node_data,\n rules.convert_translations_to_json.output.translations,\n rules.clades_by_haplotype.output.clades,\n rules.delta_frequency.output.delta_frequency,\n rules.distances.output.distances,\n rules.cross_immunities.output.cross_immunities,\n rules.lbi.output.lbi\n ]\n\n # Define node data that only make sense for natural populations\n # such as titer models.\n if wildcards.type == \"natural\":\n inputs.extend([\n rules.traits.output.node_data,\n BUILD_TIMEPOINT_PATH + \"titers-tree-model.json\",\n BUILD_TIMEPOINT_PATH + \"titers-sub-model.json\",\n rules.titer_cross_immunities.output.cross_immunities,\n rules.titer_tree_cross_immunities.output.cross_immunities\n ])\n\n build = config[\"builds\"][wildcards.type][wildcards.sample]\n if \"fra_titers\" in build:\n inputs.append(rules.rename_fields_in_fra_titers_tree.output.titers_model)\n inputs.append(rules.fra_titer_tree_cross_immunities.output.cross_immunities)\n\n elif wildcards.type == \"simulated\":\n inputs.extend([\n rules.normalize_fitness.output.fitness\n ])\n\n # Convert input files from wildcard strings to real file names.\n inputs = [input_file.format(**wildcards) for input_file in inputs]\n return inputs", "title": "" }, { "docid": "f852ad2af54b5c37bbd5743c050ed0e2", "score": "0.5204315", "text": "def start():\n\trospy.init_node('data_node', anonymous=True)\n\tdh = DataHandler()\n\tdh.start()", "title": "" }, { "docid": "1b1956c76d1e2027caa1e060ef363e1a", "score": "0.519907", "text": "async def collect_data(self) -> None:\n pass", "title": "" }, { "docid": "38c9fc0bf3447d45d7bf785cd5dfbdd9", "score": "0.51807606", "text": "def import_data(self, **kwargs): \n\n self.import_data_pump_probe(**kwargs)", "title": "" }, { "docid": "db5ac2575df7d4f5908a78f34141a6d8", "score": "0.51773584", "text": "def process_incoming_data(self):\n while not self.incoming_data.empty():\n\n data, feed = self.incoming_data.get()\n agg_params = feed['agg_params']\n\n if agg_params.get('exclude_aggregator', False):\n continue\n\n address = feed['address']\n sessid = feed['session_id']\n\n pid = self.pids.get((address, sessid))\n if pid is None:\n prov_kwargs = {}\n for key in ['frame_length', 'fresh_time']:\n if key in agg_params:\n prov_kwargs[key] = agg_params[key]\n\n pid = self.add_provider(address, sessid, **prov_kwargs)\n\n prov = self.providers[pid]\n prov.save_to_block(data)", "title": "" }, { "docid": "d8f60a8a77c3bdd0c20432a5620eb76a", "score": "0.5173597", "text": "def process_data(self):\n pass", "title": "" }, { "docid": "36632f67fd16f7c2469fd1d0a6203768", "score": "0.51693", "text": "def read_nodes(self, nodes):\n activity_type = \"Overpy nodes\"\n\n positions = []\n altitudes = []\n distances = []\n\n nodes = list(map(self.__map_payload, nodes))\n\n payload = {\"locations\": nodes}\n json_data = requests.post(url=self.open_elevation_api, json=payload).content\n data = json.loads(json_data)\n for node in data[\"results\"]:\n altitudes.append(node[\"elevation\"])\n\n node: overpy.node\n prevNode = nodes[0]\n\n for i in range(len(nodes)):\n node = nodes[i]\n positions.append((node[\"latitude\"], node[\"longitude\"]))\n if i != 0:\n flat_distance = distance.distance(\n (node[\"latitude\"], node[\"longitude\"]),\n (prevNode[\"latitude\"], prevNode[\"longitude\"]),\n ).meters\n euclidean_distance = math.sqrt(\n flat_distance ** 2 + abs(altitudes[i] - altitudes[i - 1]) ** 2\n )\n distances.append(euclidean_distance)\n else:\n distances.append(0)\n prevNode = node\n try:\n total_distance = sum(distances)\n except BaseException:\n total_distance = None\n\n interpreted_way = {\n \"activity_type\": activity_type,\n \"positions\": positions,\n \"altitudes\": altitudes,\n \"distances\": distances,\n \"total_distance\": total_distance,\n }\n\n return interpreted_way", "title": "" }, { "docid": "05756a0ecdfc8e60ca73b91d3c71e838", "score": "0.5141797", "text": "def stream_events(self, inputs, ew):", "title": "" }, { "docid": "a0bfca1a254c05fa02682e795de88390", "score": "0.5134044", "text": "def harvest_data(self):\n clusters = []\n for group, nodes in self.groups():\n funcs = [node.name for node in nodes]\n clusters.append({\n 'cluster': group,\n 'nodes': funcs\n })\n nodes = []\n for node in self.nodes():\n nodes.append({\n 'name': node.name,\n 'calls': node.calls.value,\n 'time': node.time.value,\n 'avg': node.time.value / node.calls.value\n })\n edges = []\n for edge in self.edges():\n edges.append({\n 'from': edge.src_func,\n 'to': edge.dst_func\n })\n return {\n 'clusters': clusters,\n 'nodes': nodes,\n 'edges': edges\n }", "title": "" }, { "docid": "8e534009f7664d70e17b9d9ff7616326", "score": "0.5125151", "text": "def on_apply_data_from_master(data):", "title": "" }, { "docid": "eab37fdd3e5471db464ec1ae741f949f", "score": "0.50944126", "text": "def event(self, message):\n\n print(message)\n is_valid_node = self.gateway\n\n if is_valid_node:\n\n stamp = datetime.now()\n if message.node_id not in self.live_stamping:\n self.live_stamping[message.node_id] = {\"last_seen\": stamp}\n else:\n self.live_stamping[message.node_id][\"last_seen\"] = stamp\n\n is_child_update_or_req = (\n message.child_id != 255\n and message.type in [1, 2]\n and self.gateway.is_sensor(message.node_id, message.child_id)\n )\n if is_child_update_or_req:\n child = self.gateway.sensors[message.node_id].children[message.child_id]\n\n if message.sub_type in [\n 0,\n 1,\n 3,\n 4,\n 6,\n 7,\n 8,\n 9,\n 10,\n 11,\n 12,\n 13,\n 14,\n 17,\n 18,\n 23,\n 34,\n 35,\n 37,\n 38,\n 39,\n ]:\n \"\"\" Send float types as float\"\"\"\n payload = float(message.payload)\n elif message.sub_type in [\n 2,\n 15,\n 16,\n ]:\n \"\"\" Send int types as int\"\"\"\n payload = int(message.payload)\n else:\n payload = message.payload\n\n child_json = {\n \"node_id\": message.node_id,\n \"child_id\": message.child_id,\n \"child_type\": child.type,\n \"data_type\": message.sub_type,\n \"payload\": payload,\n }\n\n if message.type == 1:\n print(\"sensor_updated: {}\".format(json.dumps(child_json)))\n stamp = datetime.now()\n\n if message.child_id not in self.live_stamping[message.node_id]:\n self.live_stamping[message.node_id][message.child_id] = {\n child.type: stamp\n }\n else:\n self.live_stamping[message.node_id][message.child_id][\n child.type\n ] = stamp\n\n db_interface = self.getDBInterface()\n if db_interface is not None:\n try:\n child_json[\"date\"] = datetime.now()\n x = db_interface.insert_one(child_json)\n except Exception as e:\n print(\"Could not connect to database: %s\" % e)\n\n elif message.type == 2:\n print(\"sensor_request: {}\".format(json.dumps(child_json)))", "title": "" }, { "docid": "e04d6babb59dddb1aceb68b269847cc7", "score": "0.50937724", "text": "def ingest(self, *, skip_checks: bool = False, **kwargs) -> None:\n assert self.parent, \"must have a parent experiment\"\n self._configure_layers_proxy(\"decode\", **kwargs)\n self.update_ingestion_tag()\n\n self.logger.debug(\"<--- ingesting begun! --->\")\n self.logger.debug(\"producing rawstream <- reading raw measurements\")\n self.i_rawstream = self.parent.layers.io.get_measurements()\n\n if \"skip_io_decode\" not in kwargs:\n self.logger.debug(\n \"producing rdpstream <- choosing data and preprocessing rawstream\"\n )\n self.i_rdpstream = self.parent.layers.io.decode(self.i_rawstream, skip_checks)\n else:\n self.logger.debug(\"skipped producing rdpstream\")\n\n self.collect_intermediates(self.parent.layers)", "title": "" }, { "docid": "b7044e36982fb7ede5331b4ee5c6b46b", "score": "0.5093191", "text": "def pub_start(self, data):\n message = {\n 'event_type': 'start',\n 'node_name': data['node_name'],\n 'time': data['time'],\n }\n self.pub_event(message)", "title": "" }, { "docid": "cbce88d633a597b787c6dad055147922", "score": "0.50731236", "text": "def ingest_dataset(dataset_id, data):\n\n uri = f\"https://data.terra.bio/api/repository/v1/datasets/{dataset_id}/ingest\"\n\n headers = {\"Authorization\": \"Bearer \" + get_access_token(),\n \"accept\": \"application/json\",\n \"Content-Type\": \"application/json\"}\n\n response = requests.post(uri, headers=headers, data=data)\n status_code = response.status_code\n\n if status_code != 202: # if ingest start-up fails\n raise ValueError(response.text)\n\n # if ingest start-up succeeds\n return json.loads(response.text)", "title": "" }, { "docid": "f7b100d14c933bf30f1dd88cfc6d2816", "score": "0.5063031", "text": "def output_influxdb(self):\n\n for data in self.data_list:\n for field in data:\n \n # Remove command key from the fields\n # Just so we don't clutter each datapoint with useless stuff\n measurement = field['command']\n field.pop('command', None)\n print field\n\n # Build JSON body for the REST API call\n json_body = [\n {\n 'measurement': measurement,\n 'tags': {\n 'host': self.hostname\n },\n 'fields': field\n }\n ]\n \n client = InfluxDBClient(db_host, db_port, db_user, db_password, db_name)\n client.write_points(json_body)", "title": "" }, { "docid": "050d5f2d34b07ba75a2b1e4015a2105f", "score": "0.5062115", "text": "def aggregate_map_data(self, n_topics=2, n_features=5):\n n_nodes = self.attributes[\"n_nodes\"]\n nodes = np.arange(n_nodes)\n n_cols = self.attributes[\"kshape\"][1]\n data = np.column_stack((nodes,\n np.divide(nodes, n_cols),\n nodes % n_cols,\n self.cluster_.mapper_data[nodes]))\n self.nodes_data = pd.DataFrame(data, columns=[\"node\", \"x\", \"y\", \"cluster\"])\n for topic in xrange(n_topics):\n self.nodes_data[\"topic_%d\" % (1 + topic)] = \\\n json.dumps(self.getTokensByNode(topic, n_features, n_topics))\n self.nodes_data[\"hits\"] = self.nodes_data.groupby(\"node\").size()\n self.nodes_data.fillna(0, inplace=True)\n return self.nodes_data", "title": "" }, { "docid": "c83e66c511bc5e92843f4791b799a4b7", "score": "0.50611365", "text": "def _gather_logs(self):", "title": "" }, { "docid": "600d4504b2e2b6f0a3bed2424496ac94", "score": "0.505016", "text": "def import_nodes(self, nodes_list):\n for values in nodes_list:\n try:\n self.put(*values) # Star-input allows us to use 3 or 4 args\n except FingerError as exc:\n self.log.error(\"Error importing finger: %s\", exc.message)", "title": "" }, { "docid": "c484b9f4b36857a7e4efd08f5b45b780", "score": "0.5039153", "text": "def process_event(event):\n global node_ready\n global nodecluster\n\n # This may be too obvious for a comment but, we are looking for the Deployed event of the Cluster\n if event['resource_uri'].encode('UTF-8') == nodecluster.resource_uri:\n if event['state'].encode('UTF-8') == 'Deployed':\n print 'All nodes have been deployed.'\n node_ready = True", "title": "" }, { "docid": "f86f43cf8be4b95b1d5621402de52882", "score": "0.5031817", "text": "def parse_data(data):\n nodes = data['nodes']\n timestamp = data['timestamp'] # UTC time in epoch from the node\n\n # Update global last_seen\n update_last_seen(timestamp)\n\n nodes = nodes.split(\"\\n\")\n # Remove first 2 lines because it is just the header\n nodes.pop(0)\n nodes.pop(0)\n\n # Update/add each node to the database\n # name,blocked,primaryIp,routes,viaIp,viaDev,metric,lastDesc,lastRef,\n for node_raw_data in nodes:\n node_raw_data = node_raw_data.split(',')\n node_data = {\n 'pk': node_raw_data[2][-19:],\n 'name': node_raw_data[0],\n 'blocked': node_raw_data[1],\n 'primaryIp': node_raw_data[2],\n 'routes': node_raw_data[3],\n 'viaIp': node_raw_data[4],\n 'viaDev': node_raw_data[5],\n 'metric': node_raw_data[6],\n 'lastDesc': node_raw_data[7],\n 'lastRef': node_raw_data[8],\n 'lastSeen': timestamp,\n }\n add_node(node_data)\n\n # Check if we have any new nodes\n new_nodes = db_session.query(Node).filter(Node.firstSeen == timestamp)\n for node in new_nodes:\n send_message(\"New Node added!\", node.name + \" \" + node.primaryIp)\n\n # Check to see what nodes are down\n down_timestamp = int(timestamp)-360\n if DEBUG:\n down_timestamp = int(timestamp)-90\n down_nodes = db_session.query(Node).filter(and_(Node.lastSeen > down_timestamp,\n Node.lastSeen != timestamp\n )\n )\n down_nodes_message = \"\"\n for node in down_nodes:\n down_nodes_message += node.name + \" \" + node.primaryIp + \"\\n\\n\"\n\n if down_nodes_message != \"\":\n send_message(\"Nodes went down\", down_nodes_message)", "title": "" }, { "docid": "fd29bb15dc75331c767a83539c65708a", "score": "0.50254554", "text": "def _process_data(self):\n pass", "title": "" }, { "docid": "c15b9c706505f0838cea7ddf81e68770", "score": "0.50108165", "text": "def execute(conf, train, test):\n blocks = conf.network.blocks\n block = blocks[0]\n spikesources = []\n spikedetectors = []\n nodes = {}\n simtime = 100.0\n\n nest.ResetKernel()\n for node in block.nodes:\n nodes[node.id] = create_node(node)\n\n for edge in block.edges:\n create_edge(nodes, edge)\n \n #for in_nodes in block.inputs:\n # source = create_source(\n\n for output in block.outputs:\n detector = create_detector(nodes[node.id])\n spikedetectors.append(detector)\n\n nest.Simulate(simtime)\n\n print(spikedetectors[0][0])", "title": "" }, { "docid": "129d6a57574f8077ec8978f400473c10", "score": "0.4999704", "text": "def _save_node_data(self, node: Node) -> Node:\n node.public_ip = self._run_machine(\"ip {}\".format(node.name), show_output=False)\n node.cluster_iface = node.config.get(\"cluster-interface\", DEFAULT_CLUSTER_INTERFACE)\n\n result = self._run_machine(\"ssh {} \\\"ip addr sh {} | awk '/inet / {{ print \\$2 }}'\\\"\".format(\n node.name, node.cluster_iface),\n use_shell=True, show_output=False)\n\n node.cluster_ip = re.sub(\"/[0-9]+$\", \"\", result)\n\n return node", "title": "" }, { "docid": "e977c0087d03fa2313feeb7bb90eb189", "score": "0.49957874", "text": "def submit():\n start_node = app.source_node_data.get()\n print_adjacency()\n bfs(start_node)", "title": "" }, { "docid": "fc3482bcd6e7a839cded6f82ca606850", "score": "0.49916536", "text": "def generate_and_insert_data( inputs ):\n global gpudb_ingestor\n\n batch_size, num_batches = inputs\n\n my_id = int(random.random() * 100)\n\n null_percentage = 0.1\n alphanum = (string.ascii_letters + string.digits)\n\n # Nested loop\n # Outer loop controls how many batches of records are added to the ingestor\n for i in range(0, num_batches):\n print (\"thread {_id:>5} outer loop: {i:>5}\".format( _id = my_id, i = i ))\n records = []\n # Inner loop generated records for this batch\n for j in range(0, batch_size):\n _i_plus_j = (i + j)\n record = collections.OrderedDict()\n record[ \"i1\" ] = i * j\n record[ \"i2\" ] = random.randint( -_i_plus_j, _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"i8\" ] = random.randint( -128, 127 ) if (random.random() >= null_percentage) else None\n record[ \"i16\" ] = random.randint( -32768, 32767 ) if (random.random() >= null_percentage) else None\n record[ \"d1\" ] = (random.random() * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"f1\" ] = (random.random() * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"l1\" ] = (random.randint( 0,_i_plus_j ) * _i_plus_j ) if (random.random() >= null_percentage) else None\n record[ \"timestamp\" ] = random.randint( -30610239758979, 29379542399999 ) if (random.random() >= null_percentage) else None\n record[ \"s1\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 2, 200 ) )] )\n record[ \"date\" ] = None if (random.random() < null_percentage) \\\n else strftime( datetime.date( random.randint( 1000, 2900 ), # year\n random.randint( 1, 12 ), # month\n random.randint( 1, 28 ) # day\n ), \"%Y-%m-%d\" )\n record[ \"datetime\" ] = None if (random.random() < null_percentage) \\\n else ( strftime( datetime.date( random.randint( 1000, 2900 ), # year\n random.randint( 1, 12 ), # month\n random.randint( 1, 28 ) # day\n ), \"%Y-%m-%d\" ) \\\n + \" \"\n + ( datetime.time( random.randint( 0, 23 ), # hour\n random.randint( 0, 59 ), # minute\n random.randint( 0, 59 ) # seconds\n ).strftime( \"%H:%M:%S\" ) )\n + (\".%d\" % random.randint( 0, 999 ) ) ) # milliseconds\n record[ \"decimal\" ] = None if (random.random() < null_percentage) \\\n else ( str( random.randint( -922337203685477, 922337203685477 ) )\n + \".\" + str( random.randint( 0, 9999 ) ) )\n record[ \"ipv4\" ] = None if (random.random() < null_percentage) \\\n else '.'.join( [ str( random.randint( 0, 255 ) ) for n in range(0, 4)] )\n record[ \"time\" ] = None if (random.random() < null_percentage) \\\n else ( datetime.time( random.randint( 0, 23 ), # hour\n random.randint( 0, 59 ), # minute\n random.randint( 0, 59 ) # seconds\n ).strftime( \"%H:%M:%S\" ) \\\n + (\".%d\" % random.randint( 0, 999 ) ) ) # milliseconds\n record[ \"c1\" ] = None if (random.random() < null_percentage) \\\n else random.choice( alphanum )\n record[ \"c2\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 2 ) )] )\n record[ \"c4\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 4 ) )] )\n record[ \"c8\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 8 ) )] )\n record[ \"c16\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 16 ) )] )\n record[ \"c32\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 32 ) )] )\n record[ \"c64\" ] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 64 ) )] )\n record[ \"c128\"] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 128 ) )] )\n record[ \"c256\"] = None if (random.random() < null_percentage) \\\n else ''.join( [random.choice( alphanum ) for n in range( 0, random.randint( 0, 256 ) )] )\n\n # Add the record to the list of records\n records.append( record )\n # end for loop\n\n # Add the records to the ingestor\n gpudb_ingestor.insert_records( records )\n # end generating data\n\n\n # Need to flush here since the gpudb_ingestor of the parent\n # thread won't get this child thread's state\n gpudb_ingestor.flush()", "title": "" }, { "docid": "29d47dc019acad83f071f359730e9d0d", "score": "0.49800348", "text": "def __extract_nodes(data_frame):\n # Find unique values to create a look up table\n # Returns a numpy array\n print('Extracting unique values/nodes.....', log_type='info')\n unique_values = pd.unique(data_frame[['source', 'target']].values.ravel('K'))\n\n # Create a PANDAS data frame out of numpy array with unique nodes\n print('Converting values into pandas data frame.....', log_type='info')\n lookup_data = pd.DataFrame(unique_values, columns=['label'])\n lookup_data['id'] = lookup_data.index\n print('Total detected nodes/values: ', log_type='info', end='')\n print('{}'.format(len(lookup_data.index)), color='cyan', text_format='bold')\n\n # Create a mapping dictionary for the node labels\n print('Creating mapping table.....', log_type='info')\n mapping_dict = dict(zip(lookup_data.label, lookup_data.id))\n\n # Return mapping dictionary\n return mapping_dict", "title": "" }, { "docid": "6fe109cafa07f970ec65c8575e76a4ca", "score": "0.4977983", "text": "def process(self, input_data, topic=None):\n pass", "title": "" }, { "docid": "98897c7c710280f7aa071146e9c0931c", "score": "0.4970056", "text": "def attach_nodes(nodes, ev_ids):\n global EV\n\n for node in nodes:\n if not node in STATE['nodes']:\n vlog(3, 'skipping not bad node %s' % (node)) \n continue\n \n for ev_id in ev_ids:\n if not ev_id in STATE['nodes'][node]['extraview']:\n STATE['nodes'][node]['extraview'].append(ev_id)\n vlog(3, 'node %s add extraview %s' % (node, ev_id)) \n\n save_state()", "title": "" }, { "docid": "ec651b5c127effd48bbac6801b0d951b", "score": "0.4967889", "text": "def on_train_begin(self, logs={}):\n pass", "title": "" }, { "docid": "9faadb06be0fd4900d1d40f6244c2578", "score": "0.49657193", "text": "def collect_events(self, ew):\n input_module.collect_events(self, ew)", "title": "" }, { "docid": "f648ae7d4495bbc78173a77b823378e6", "score": "0.49652967", "text": "def collect_node_messages(self):\r\n \r\n data = []\r\n for node in self.env.nodes:\r\n history = [*node[\"obj\"].in_msg_history,\r\n *node[\"obj\"].out_msg_history]\r\n\r\n data.append({\"node_id\": node[\"obj\"].id, \"total_msgs\": len(history), \"out_msgs\": len(node[\"obj\"].out_msg_history),\r\n \"in_msgs\": len(node[\"obj\"].in_msg_history)})\r\n df = pd.DataFrame(data=data, columns=[\r\n \"node_id\", \"total_msgs\", \"out_msgs\", \"in_msgs\"])\r\n return df", "title": "" }, { "docid": "2649f06cd6743d90a3a984e50f177241", "score": "0.49578807", "text": "def trainNode(self, data, attributes, class_label, indices=None):", "title": "" }, { "docid": "e557511dee76ce99ff6530b524a4d8e8", "score": "0.4950909", "text": "def main():\n args = arg_parser_init().parse_args()\n\n # Test is Kwapi-sensor can monitor this node\n if not is_kwapi_available(args):\n LOGGER.error(\"Kwapi-sensor not available for the node \" + args.node_name)\n sys.exit(-1)\n\n output = connect_mongodb(args)\n url = get_kwapi_value_url(args.city_name,\n args.node_name,\n args.timestamp_start,\n args.timestamp_stop)\n data = requests.get(url,\n auth=(args.g5k_login,\n args.g5k_pass),\n verify=False).json()\n\n # If there is no data, -1 everywhere\n if len(data['items']) > 0:\n offset = 0\n for ts in range(int(args.timestamp_start), int(args.timestamp_stop)):\n # If offset is outofrange or\n # data timestamp is different from the current ts\n if (offset >= len(data['items'][0]['timestamps']) or\n ts != data['items'][0]['timestamps'][offset]):\n continue\n new_row = create_data(ts, \"kwapi-sensor\", data['items'][0]['values'][offset])\n output.insert_one(new_row)\n offset += 1", "title": "" }, { "docid": "2fa36260444895fa101cda6ef09f84e8", "score": "0.4940771", "text": "def __init__(self, **kwargs):\n super(DataIngestion, self).__init__(**kwargs)\n\n self.random_indices = None\n\n if 'seed' not in self.userdata:\n # choose random seed and add to userdata so it gets persisted\n self.userdata['seed'] = random.randint(0, 1000)\n\n random.seed(self.userdata['seed'])", "title": "" }, { "docid": "d2a2b44b0aa60c96b743a7e4161e67df", "score": "0.4925837", "text": "def process(self, event):\n\n if self.count >= 10:\n elasticsearch.helpers.bulk(self.es_client, self.docs)\n #print(self.docs)\n self.count = 0\n else:\n with open(event.src_path, 'r') as f:\n if 'ssl' in f.name:\n self.type = 'HTTPS'\n else:\n self.type = 'HTTP'\n f.seek(self.offset)\n while True:\n line = f.readline()\n if not line:\n self.offset = f.tell()\n break\n else:\n self.append_docs(line)\n self.count += 1\n print(self.docs)\n print(self.count)", "title": "" }, { "docid": "9e10481766ad66f985418f3b7175130c", "score": "0.49051937", "text": "def construct_data_payload(self, data):\n for key, payload in data.items():\n data_map = self.data_map_master[key]\n topic = data_map['topic']\n name = data_map['field']\n value = payload['value']\n meta = data_map['meta']\n self.time_step = payload['time']\n self.output_data[topic][0].update({name: value})\n self.output_data[topic][1].update({name: meta})", "title": "" }, { "docid": "1f379f1c3deca4fcd5fc64513662c7c8", "score": "0.4900166", "text": "def main():\n\n spark, logs = start_spark(app_name='realtime_job')\n\n logs.info('Real time job is up-and-running')\n\n # Rather than Spark infering schema , best option to use a sample json to retrieve Schema from the sample json\n schema = spark.read.options(multiLine=True).json(\"/app/xapo/data/input/sample.json\").schema\n\n logs.info('Reading from Kafka topic...')\n df = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"broker:9092\") \\\n .option(\"subscribe\", \"xapo\") \\\n .option(\"startingOffsets\", \"earliest\") \\\n .load()\n\n string_df = df.selectExpr(\"CAST(value AS STRING)\")\n\n json_df = string_df \\\n .withColumn(\"jsonData\", from_json(col(\"value\"), schema)) \\\n .select(\"jsonData.block_height\",\"jsonData.block_time\", \"jsonData.fee\", \"jsonData.outputs\") \\\n .withColumn(\"exp_outputs\",explode(col(\"outputs\"))) \\\n .select(\"block_height\",\"block_time\",\"fee\",\"exp_outputs.*\") \\\n .select(\"block_height\",\"block_time\",\"fee\",\"addresses\",\"value\") \\\n .withColumn(\"exp_address\", explode(col(\"addresses\"))) \\\n .select(\"block_height\",\"block_time\",\"fee\",col(\"exp_address\").alias(\"addresses\"),\"value\") \\\n .selectExpr(\"block_height\",\"to_timestamp(block_time) as block_time\",\"fee\",\"addresses\",\"value\") \\\n .filter(col(\"addresses\") != \"\")\n json_df.printSchema()\n \n logs.info('Writing the stream ouput...')\n json_df \\\n .writeStream \\\n .format(\"csv\") \\\n .option(\"path\",\"/app/xapo/data/output/streamoutput/\") \\\n .trigger(once=True) \\\n .option(\"checkpointLocation\", \"/tmp/checkpoint/\") \\\n .option(\"header\", True) \\\n .outputMode(\"append\") \\\n .start() \\\n .awaitTermination()\n\n logs.info('Real time job is finished...')", "title": "" }, { "docid": "d856e99231cd195efbfc7f83999b6909", "score": "0.4894529", "text": "def insert_data_task():\n logger.info(\"START OAI Data discovery...\")\n\n # Exit early if harvesting is disable\n oai_settings = oai_settings_api.get()\n if not oai_settings.enable_harvesting:\n logger.info(\"Harvesting OFF. Exiting discovery...\")\n return\n\n try:\n # Retrieve the Data ids in OAI Data\n oai_data = oai_data_api.get_all()\n oai_data_ids = serializers.serialize(\"json\", oai_data.only(\"data\"))\n registered_data_id = [\n data[\"fields\"][\"data\"] for data in json.loads(oai_data_ids)\n ]\n\n # Retrieve all data not registered in OAI and insert them in OAI data\n data = data_system_api.get_all_except(registered_data_id)\n logger.debug(\"XML Data retrieved.\")\n\n for document in data:\n oai_data_api.upsert_from_data(document, force_update=False)\n\n logger.debug(\"OAI Data inserted.\")\n except Exception as exception:\n logger.error(\"Impossible to init the OAI-PMH data: %s\", str(exception))\n\n logger.info(\"OAI Data discovery done\")", "title": "" }, { "docid": "fa01f4b5f1fcb6d2b350940f639bba64", "score": "0.48915276", "text": "def nodelist():\n process = sp.Popen(['pbsnodes -av'], stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n out, err = process.communicate()\n lines, types, status, used_cores, cores, used_memory, mem = [], [], [], [], [], [], []\n nodes_list = []\n node = ''\n for l in out.splitlines():\n l = l.decode('utf-8')\n node += l\n if l == '':\n nodes_list.append(node)\n node = ''\n\n for node in nodes_list:\n type_match = re.search(r'(\\w\\w\\w)\\d\\d\\d', node)\n\n cpu_match = re.search(r'resources_available.ncpus = (\\w+)', node)\n used_cpu_match = re.search(r'resources_assigned.ncpus = (\\w+)', node)\n\n mem_match = re.search(r'resources_available.mem = (\\w+)kb', node)\n used_mem_match = re.search(r'resources_assigned.mem = (\\w+)kb', node)\n\n status_match = re.search(r'state = ([a-z\\-]+ [a-z]*)', node)\n if type_match and cpu_match and mem_match and status_match:\n type = type_match.group(1)\n total_cpu = int(cpu_match.group(1))\n used_cpu = int(used_cpu_match.group(1))\n total_mem = int(int(mem_match.group(1)) / 1024 / 1024)\n used_mem = int(int(used_mem_match.group(1)) / 1024 / 1024)\n node_status = status_match.group(1).strip()\n if node_status == 'free' and used_cpu != 0:\n node_status = 'partially free'\n\n types.append(type)\n status.append(node_status)\n cores.append(total_cpu)\n mem.append(total_mem)\n used_cores.append(used_cpu)\n used_memory.append(used_mem)\n df = pd.DataFrame(dict(\n type=types,\n cores=cores,\n used_cores=used_cores,\n memory=mem,\n used_memory=used_memory,\n status=status\n ))\n return df", "title": "" }, { "docid": "7ae0dd869d60954fb181accda6385038", "score": "0.48883793", "text": "def get_nodes(self):", "title": "" }, { "docid": "94e2438da8a182f3ed94588aeaae5977", "score": "0.48852518", "text": "def index(data_root):\n # currently, only combine view references and query_edgelist\n data_root = ensure_folder(data_root)\n edges = []\n nodes = []\n\n for edgelist in data_root.glob(\"**/query_log_edges.json\"):\n rows = json.loads(edgelist.read_text())\n logging.info(\n f\"merging {edgelist.relative_to(data_root)} with {len(rows)} query references\"\n )\n edges += rows\n\n for nodelist in data_root.glob(\"**/query_log_nodes.json\"):\n rows = json.loads(nodelist.read_text())\n logging.info(\n f\"merging {nodelist.relative_to(data_root)} with {len(rows)} queries\"\n )\n nodes += rows\n\n # write the file to disk as both csv and json, csv target is gephi compatible\n with (data_root / \"edges.json\").open(\"w\") as fp:\n json.dump(edges, fp, indent=2)\n with (data_root / \"nodes.json\").open(\"w\") as fp:\n json.dump(nodes, fp, indent=2)\n logging.info(\"wrote nodes.json\")\n with (data_root / \"edges.csv\").open(\"w\") as fp:\n fp.write(\"Source,Target\\n\")\n for edge in edges:\n fp.write(f\"{edge['referenced_table']},{edge['destination_table']}\\n\")\n logging.info(\"wrote edges.csv\")\n\n # generate some stats\n stats = {}\n G = nx.DiGraph()\n for edge in edges:\n G.add_edge(edge[\"referenced_table\"], edge[\"destination_table\"])\n stats[\"number_of_nodes\"] = G.number_of_nodes()\n stats[\"number_of_edges\"] = G.number_of_edges()\n stats[\"in_degree\"] = sorted(dict(G.in_degree()).values())\n stats[\"out_degree\"] = sorted(dict(G.out_degree()).values())\n stats[\"degree\"] = sorted(dict(G.degree()).values())\n for value in [\"in_degree\", \"out_degree\", \"degree\"]:\n stats[f\"avg_{value}\"] = statistics.mean(stats[value])\n stats[\n \"number_strongly_connected_components\"\n ] = nx.number_strongly_connected_components(G)\n stats[\"number_weakly_connected_components\"] = nx.number_weakly_connected_components(\n G\n )\n\n with (data_root / \"network_stats.json\").open(\"w\") as fp:\n json.dump(stats, fp, indent=2)\n logging.info(\"wrote network_stats.json\")\n\n # also generate a manifest so we can download the files via the app\n with (data_root / \"manifest.json\").open(\"w\") as fp:\n json.dump(\n sorted(\n [\n {\n \"path\": str(p.relative_to(data_root.parent)),\n \"size_bytes\": p.stat().st_size,\n }\n for p in data_root.glob(\"**/*\")\n if p.name != \"manifest.json\" and p.is_file()\n ],\n key=lambda x: x[\"path\"],\n ),\n fp,\n indent=2,\n )\n logging.info(\"wrote manifest.json\")", "title": "" }, { "docid": "32e74fa7a666a62133f0e8cd20b59457", "score": "0.48820916", "text": "def test_create_dataset_config_and_event_subscriber(self):\n\n\n #------------------------------------------------------------------------\n # Get the ingestion process instances:\n #------------------------------------------------------------------------\n\n transforms = [self.rr_cli.read(assoc.o)\n for assoc in self.rr_cli.find_associations(self.ingestion_configuration_id, PRED.hasTransform)]\n\n proc_1 = self.container.proc_manager.procs[transforms[0].process_id]\n log.info(\"PROCESS 1: %s\" % str(proc_1))\n\n proc_2 = self.container.proc_manager.procs[transforms[1].process_id]\n log.info(\"PROCESS 2: %s\" % str(proc_2))\n\n #------------------------------------------------------------------------\n # Set up the gevent events\n #------------------------------------------------------------------------\n\n # Over ride the call back for the event subscriber\n ar_1 = gevent.event.AsyncResult()\n def message_received_1(message, headers):\n ar_1.set(message)\n\n proc_1.event_subscriber._callback = message_received_1\n\n # Over ride the call back for the event subscriber\n proc_2 = self.container.proc_manager.procs[transforms[1].process_id]\n log.info(\"PROCESS 2: %s\" % str(proc_2))\n\n ar_2 = gevent.event.AsyncResult()\n def message_received_2(message, headers):\n ar_2.set(message)\n\n proc_2.event_subscriber._callback = message_received_2\n\n\n # Create a dataset ingestion config which sends an event\n\n dataset_config_id = self.ingestion_cli.create_dataset_configuration(\n dataset_id = self.input_dataset_id,\n archive_data = True,\n archive_metadata = False,\n ingestion_configuration_id = self.ingestion_configuration_id\n )\n\n\n dataset_config = self.rr_cli.read(dataset_config_id)\n\n #--------------------------------------------------------------------------------------------------------\n # Do assertions!\n #--------------------------------------------------------------------------------------------------------\n\n self.assertEquals(dataset_config.configuration.stream_id, self.input_stream_id)\n self.assertEquals(dataset_config.configuration.archive_data, True)\n self.assertEquals(dataset_config.configuration.archive_metadata, False)\n\n\n self.assertEqual(ar_1.get(timeout=10).configuration.dataset_id,self.input_dataset_id)\n self.assertEqual(ar_2.get(timeout=10).configuration.dataset_id,self.input_dataset_id)", "title": "" }, { "docid": "f4079054bf0b80284b9e14aced9af5a6", "score": "0.48772228", "text": "def _handle_read_subscribers(self, data):\n self._input_stream.append(data.execution_result)\n rospy.loginfo(\n \"I received data from ROS Topic : \\\"\" +\n self._input_stream[-1] +\n \"\\\" - [\"+ data.execution_result +\"] \"\n )\n self._notify()", "title": "" }, { "docid": "ed010c319c20c4ff51df77898b4a0976", "score": "0.48739696", "text": "def publish(service, nodeIdentifier, items=[]):", "title": "" }, { "docid": "ef98e05f8f163556e63916796aafaf67", "score": "0.4870724", "text": "def run_test(self):\n\n self.test_collect_data()", "title": "" }, { "docid": "fedce1073e4fd7b01651a8a8453bdca2", "score": "0.48700514", "text": "def ingest_data() -> None:\n secrets = sts.get_secrets()\n DB_NAME = secrets[\"DB_NAME\"]\n HOST = secrets[\"HOST\"]\n PORT_NAME = secrets[\"PORT_NAME\"]\n USER = secrets[\"USER\"]\n PASSWORD = secrets[\"PASSWORD\"]\n\n try:\n logging.debug(\"Connecting to redshift warehouse.\")\n wh_conn = psycopg2.connect(\n dbname=DB_NAME, host=HOST, port=PORT_NAME, user=USER, password=PASSWORD\n )\n wh_conn.autocommit = True\n cur = wh_conn.cursor()\n\n try:\n logging.debug(\n \"Executing ingest_tweets_aapl_sentiments_minute_sum_query query.\"\n )\n cur.execute(staq.ingest_tweets_aapl_sentiments_minute_sum_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_aapl_minute_avg_query query.\")\n cur.execute(staq.ingest_tickers_aapl_minute_avg_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_aapl_query query.\")\n cur.execute(staq.ingest_tickers_aapl_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\n \"Executing ingest_tweets_goog_sentiments_minute_sum_query query.\"\n )\n cur.execute(staq.ingest_tweets_goog_sentiments_minute_sum_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_goog_minute_avg_query query.\")\n cur.execute(staq.ingest_tickers_goog_minute_avg_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_goog_query query.\")\n cur.execute(staq.ingest_tickers_goog_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\n \"Executing ingest_tweets_amzn_sentiments_minute_sum_query query.\"\n )\n cur.execute(staq.ingest_tweets_amzn_sentiments_minute_sum_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_amzn_minute_avg_query query.\")\n cur.execute(staq.ingest_tickers_amzn_minute_avg_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n try:\n logging.debug(\"Executing ingest_tickers_amzn_query query.\")\n cur.execute(staq.ingest_tickers_amzn_query)\n except Exception as error:\n logging.error(error)\n raise error\n\n logging.debug(\"Commiting cursor execution.\")\n wh_conn.commit()\n wh_conn.close()\n cur.close()\n except psycopg2.DatabaseError as error:\n logging.error(error)\n raise (error)", "title": "" }, { "docid": "68f34e340db3a05509f95dbd7232d1c4", "score": "0.48682654", "text": "def get_nodes(self):\n path = self.end_point + \"nodes\"\n data = self._get_data(path)\n return data", "title": "" }, { "docid": "dd9447c15edb6aef09a3c8b33edba2fc", "score": "0.4867842", "text": "def loadData():\n #training\n print(\"Hello\")", "title": "" }, { "docid": "152f130ad390f8498daf733002943da5", "score": "0.48632967", "text": "def _process(self):\n if len(self._output_stream):\n rospy.wait_for_service(self._service_name)\n try:\n rospy.loginfo(\n \"I am sending data to Vision Server : \\\"\" +\n \"node_name : \" + self._output_stream[0][0] +\n \" filterchain_name : \" + self._output_stream[0][1] +\n \" media_name : \" + self._output_stream[0][2] +\n \" cmd : \" + str(self._output_stream[0][3]) + \"\\\"\")\n self._input_stream.append(str(self._service_response(\n self._output_stream[0][0], self._output_stream[0][1],\n self._output_stream[0][2], self._output_stream[0][3])))\n self._output_stream = self._output_stream[1:]\n if not self.is_empty:\n rospy.loginfo(\n \"I received data from Vision Server : \\\"\" +\n self._input_stream[-1] + \"\\\"\")\n self._notify()\n except rospy.ServiceException, e:\n rospy.logerr(\"Service call failed: %s\" % e)", "title": "" }, { "docid": "78e95d1ef1db5cec5d9a9fd33b954144", "score": "0.4862141", "text": "def your_code_goes_here():\n # ----------------- YOUR CODE STARTS HERE ------------------------------\n from recipes.recipe_nix_add_on import Nix_Linux_add_on as recipe\n # ----------------- YOUR CODE ENDS HERE --------------------------------\n rcp = recipe(SPLUNK_HOST, SPLUNK_PORT, SPLUNK_USERNAME, SPLUNK_PASSWORD, TIME_FRAME, VERBOSE)\n for device in splunker.hosts:\n data = rcp.get_data(device)\n dparser.parser(data)", "title": "" }, { "docid": "acfc08adff0c1f46ae2682ea399e2046", "score": "0.48599166", "text": "def run(self, data_file: Optional[str] = None) -> None:\n self.node_header = ['id', 'category', 'provided_by']\n self.edge_header = ['id', 'subject', 'edge_label', 'object', 'relation', 'provided_by', 'type']\n\n # ChEMBL molecules\n data = self.get_chembl_molecules()\n molecule_nodes = self.parse_chembl_molecules(data)\n\n # ChEMBL assay\n data = self.get_chembl_assays()\n assay_nodes = self.parse_chembl_assay(data)\n\n # ChEMBL document\n data = self.get_chembl_documents()\n document_nodes = self.parse_chembl_document(data)\n\n # ChEMBL activity\n data = self.get_chembl_activities()\n activity_edges = self.parse_chembl_activity(data)\n\n self.node_header.extend(self._node_header)\n self.edge_header.extend(self._edge_header)\n\n node_handle = open(self.output_node_file, 'w')\n edge_handle = open(self.output_edge_file, 'w')\n node_handle.write(\"\\t\".join(sorted(self.node_header)) + \"\\n\")\n edge_handle.write(\"\\t\".join(sorted(self.edge_header)) + \"\\n\")\n\n for n in molecule_nodes:\n write_node_edge_item(\n fh=node_handle,\n header=sorted(self.node_header),\n data=[n[x] if x in n else '' for x in sorted(self.node_header)]\n )\n for n in assay_nodes:\n write_node_edge_item(\n fh=node_handle,\n header=sorted(self.node_header),\n data=[n[x] if x in n else '' for x in sorted(self.node_header)]\n )\n\n for n in document_nodes:\n write_node_edge_item(\n fh=node_handle,\n header=sorted(self.node_header),\n data=[n[x] if x in n else '' for x in sorted(self.node_header)]\n )\n\n for e in activity_edges:\n write_node_edge_item(\n fh=edge_handle,\n header=sorted(self.edge_header),\n data=[e[x] if x in e else '' for x in sorted(self.edge_header)]\n )", "title": "" }, { "docid": "c74d82f0737ec7bb4e813dc94733450a", "score": "0.48582956", "text": "def test_dataset_config_implementation_for_science_data(self):\n\n\n #--------------------------------------------------------------------------------------------------------\n # Get the ingestion process instances:\n #--------------------------------------------------------------------------------------------------------\n\n transforms = [self.rr_cli.read(assoc.o)\n for assoc in self.rr_cli.find_associations(self.ingestion_configuration_id, PRED.hasTransform)]\n\n proc_1 = self.container.proc_manager.procs[transforms[0].process_id]\n log.info(\"PROCESS 1: %s\" % str(proc_1))\n\n proc_2 = self.container.proc_manager.procs[transforms[1].process_id]\n log.info(\"PROCESS 2: %s\" % str(proc_2))\n\n #--------------------------------------------------------------------------------------------------------\n # Create a dataset config\n #--------------------------------------------------------------------------------------------------------\n\n dataset_config_id = self.ingestion_cli.create_dataset_configuration(\n dataset_id = self.input_dataset_id,\n archive_data = True,\n archive_metadata = True,\n ingestion_configuration_id = self.ingestion_configuration_id\n )\n\n #--------------------------------------------------------------------------------------------------------\n # Set up the gevent event AsyncResult queue\n #--------------------------------------------------------------------------------------------------------\n\n queue=gevent.queue.Queue()\n\n def call_to_persist(packet):\n queue.put(packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Grab the ingestion worker processes\n #--------------------------------------------------------------------------------------------------------\n\n # when persist_immutable() is called, then call_to_persist() is called instead....\n proc_1.persist_immutable = call_to_persist\n proc_2.persist_immutable = call_to_persist\n\n #--------------------------------------------------------------------------------------------------------\n # Create a packet and publish it\n #--------------------------------------------------------------------------------------------------------\n\n ctd_packet = self._create_packet(self.input_stream_id)\n\n self.ctd_stream1_publisher.publish(ctd_packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that the packets were handled according to the dataset config\n #--------------------------------------------------------------------------------------------------------\n\n # test that the ingestion worker tries to persist the ctd_packet in accordance to the dataset config\n self.assertEquals(queue.get(timeout=10).stream_resource_id, ctd_packet.stream_resource_id)\n\n #--------------------------------------------------------------------------------------------------------\n # Now change the dataset config for the same stream\n #--------------------------------------------------------------------------------------------------------\n\n dataset_config = self.ingestion_cli.read_dataset_config(dataset_config_id)\n\n dataset_config.configuration.archive_metadata = False\n\n self.ingestion_cli.update_dataset_config(dataset_config)\n\n #--------------------------------------------------------------------------------------------------------\n # Create a new packet and publish it\n #--------------------------------------------------------------------------------------------------------\n\n ctd_packet = self._create_packet(self.input_stream_id)\n\n self.ctd_stream1_publisher.publish(ctd_packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that the packets were handled according to the new dataset config...\n # This time, the packet should not be persisted since archive_metadata is False\n #--------------------------------------------------------------------------------------------------------\n\n with self.assertRaises(gevent.queue.Empty):\n queue.get(timeout=0.25)\n\n #--------------------------------------------------------------------------------------------------------\n # Now just do this thing one more time, with an updated dataset config...\n #\n # Change the dataset config for the same stream for the third time, updating archive_metadata back to True\n #--------------------------------------------------------------------------------------------------------\n\n dataset_config = self.ingestion_cli.read_dataset_config(dataset_config_id)\n dataset_config.configuration.archive_metadata = True\n self.ingestion_cli.update_dataset_config(dataset_config)\n\n #--------------------------------------------------------------------------------------------------------\n # Create a new packet and publish it\n #--------------------------------------------------------------------------------------------------------\n\n ctd_packet = self._create_packet(self.input_stream_id)\n self.ctd_stream1_publisher.publish(ctd_packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that the packets were handled according to the new dataset config\n #--------------------------------------------------------------------------------------------------------\n\n self.assertEquals(queue.get(timeout=10).stream_resource_id, ctd_packet.stream_resource_id)\n\n\n #--------------------------------------------------------------------------------------------------------\n #--------------------------------------------------------------------------------------------------------\n # Check that the dataset id is passed properly in the GranuleIngestedEvent\n #--------------------------------------------------------------------------------------------------------\n #--------------------------------------------------------------------------------------------------------\n\n ar = gevent.event.AsyncResult()\n def granule_ingested_hook(msg, headers):\n ar.set(msg)\n\n\n #Start the event subscriber - really - what a mess!\n event_subscriber = EventSubscriber(\n event_type=\"GranuleIngestedEvent\",\n origin=self.input_dataset_id,\n callback=granule_ingested_hook\n )\n\n self.gl.append(spawn(event_subscriber.listen))\n event_subscriber._ready_event.wait(timeout=5)\n self.event_subscribers.append(event_subscriber)\n\n\n\n # Set up the gevent Result queue for the hook in the workers\n\n queue=gevent.queue.Queue()\n\n def config_event_hook(packet, headers):\n queue.put(packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Override the worker processes methods with the gevent event hooks\n #--------------------------------------------------------------------------------------------------------\n\n proc_1.dataset_configs_event_test_hook = config_event_hook\n proc_2.dataset_configs_event_test_hook = config_event_hook\n\n dataset_config = self.ingestion_cli.read_dataset_config(dataset_config_id)\n dataset_config.configuration.archive_metadata = True\n self.ingestion_cli.update_dataset_config(dataset_config)\n\n #--------------------------------------------------------------------------------------------------------\n # Create a new packet and publish it\n #--------------------------------------------------------------------------------------------------------\n\n ctd_packet = self._create_packet(self.input_stream_id)\n self.ctd_stream1_publisher.publish(ctd_packet)\n\n #--------------------------------------------------------------------------------------------------------\n # Assert that the dataset id got from the dataset_config event hook is what it should be in both procs\n #--------------------------------------------------------------------------------------------------------\n\n self.assertEquals(queue.get(timeout=10).configuration.dataset_id,self.input_dataset_id)\n self.assertEquals(queue.get(timeout=10).configuration.dataset_id,self.input_dataset_id)\n self.assertTrue(queue.empty())\n\n\n event_msg = ar.get(timeout=10)\n self.assertEquals(event_msg.origin, self.input_dataset_id)\n\n data_stream_id = self.ctd_stream_def.data_stream_id\n element_count_id = self.ctd_stream_def.identifiables[data_stream_id].element_count_id\n record_count = ctd_packet.identifiables[element_count_id].value\n\n self.assertEquals(event_msg.ingest_attributes['number_of_records'], record_count)", "title": "" }, { "docid": "508b1922fb9e1342815df6dadd9efc06", "score": "0.48538142", "text": "def create_nodes(self):", "title": "" }, { "docid": "ea2a53cd740916a7c245789730b1a6d5", "score": "0.48494446", "text": "def feed_data(kernel: Kernel):\n new_data = request.json\n kernel.update_single(new_data)\n return 'OK'", "title": "" }, { "docid": "b99d6a008aca03bdbeae80faf9b45f78", "score": "0.48489535", "text": "def save_sensor_data(self,node_id,data):\n\n # the data table is processed for content extraction\n # the process returns a dictionary object.\n data = self.find_data_values(data)\n\n # Initiating connection with the database.\n db = Sn_db()\n db.connect_db()\n # the data is being inserted to a specific table in the database.\n db.insert_client_data(node_id,\"cn2_data\",data)\n time.sleep(0.3)\n db.disconnect_db()", "title": "" }, { "docid": "345db519c0d5563cc50cc2bf5a42cc3b", "score": "0.48454794", "text": "def main(self):\n self.vectorizer().reducer().classifier()\n self.aggregate_map_data(n_topics=4, n_features=5)\n self.store_in_mongo(self.MONGO_DBNAME_OUT,self.MONGO_COLLECTION_OUT)", "title": "" }, { "docid": "5b0a3edbae6ec871444bb74949dd27b9", "score": "0.4841799", "text": "def monitor(event, context):\n arns = parseArnsString(READER_ROLE_ARNS)\n\n # get elbs\n v1ELBs = getAllV1ELBs()\n v2ELBs = getAllV2ELBs()\n elbs = [] + v1ELBs + v2ELBs\n for arn in arns: elbs += getAllV1ELBs(arn) + getAllV2ELBs(arn)\n \n # convert to valid json strings\n jsonStrings = [\n json.dumps(elb, default=datetimeSerializer).encode(\"utf-8\") for elb in elbs\n ]\n\n # create valid insert data for fivetran response\n monitored_time = datetime.datetime.utcnow().isoformat()\n fivetranInserts = [\n {\n 'RAW_DATA': json.loads(jsonString),\n 'SNOWWATCH_MONITORED_TIME_UTC': monitored_time\n }\n for jsonString in jsonStrings\n ]\n\n # return monitoring results to fivetran\n response = {\n 'state': 0,\n 'hasMore': False,\n 'insert': {\n 'elastic_load_balancers': fivetranInserts\n }\n }\n return response", "title": "" }, { "docid": "6cf79bdf620c8a0b83138b764c023a7d", "score": "0.4836343", "text": "def test_nodes_post(self):\n pass", "title": "" }, { "docid": "f02e26053c95cb9aaf79c3041cea8c3f", "score": "0.48343435", "text": "def read_data(self):", "title": "" }, { "docid": "0c2598828a6afac3e4fb8ee174a4b470", "score": "0.48324698", "text": "def on_start(self, node_monitor: NodeMonitor) -> None:\n pass", "title": "" }, { "docid": "0ecb56ab4e4a5d3dfb35511bc7e075f2", "score": "0.4831964", "text": "def log_nodes_cb(node, status):\n\n if status != 'end':\n return\n\n # Import packages\n import json\n import logging\n import nipype.pipeline.engine.nodes as nodes\n\n logger = logging.getLogger('callback')\n\n if isinstance(node, nodes.MapNode):\n return\n\n runtime = node.result.runtime\n\n status_dict = {\n 'name': node.name,\n 'id': str(node),\n 'start': getattr(runtime, 'startTime'),\n 'finish': getattr(runtime, 'endTime'),\n 'duration': getattr(runtime, 'duration'),\n 'runtime_threads': getattr(runtime, 'cpu_percent', 'N/A'),\n 'runtime_memory_gb': getattr(runtime, 'mem_peak_gb', 'N/A'),\n 'estimated_memory_gb': node.mem_gb,\n 'num_threads': node.n_procs,\n }\n\n if status_dict['start'] is None or status_dict['finish'] is None:\n status_dict['error'] = True\n\n logger.debug(json.dumps(status_dict))", "title": "" }, { "docid": "f2e038400354efed2e3287e38e7fa925", "score": "0.4829451", "text": "def nodeCallbacks(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "9263034b6c99d3edc026a29ffc6c4813", "score": "0.48230544", "text": "def import_data(self): \n\n self.import_data_scan_spectrum()", "title": "" }, { "docid": "0837b0ff63a869647f166755f231e5fc", "score": "0.48182255", "text": "def on_data_received(self, training_context):\n\n pass", "title": "" }, { "docid": "74176f644a92a6ac49b2bb4c6ba5a882", "score": "0.48162743", "text": "def _ingest_virtual_dataset(ref_data, mdmap, vds_tstart=None, vds_tstop=None):\n logger.info('Ingesting virtual dataset temp file VDS.hdf5')\n with h5py.File(\"VDS.hdf5\", 'r', libver='latest') as vds:\n # Get all the MSIDs data from the VDS (Virtual Data Set) \n # and use it to create a Pandas Dataframe\n df = pd.DataFrame(vds['/data'][...].byteswap().newbyteorder())\n \n # Remove samples with id == 0\n df = df.loc[(df['id'] != 0)]\n\n # Remove duplicate entries\n df.drop_duplicates(subset=['id', 'observatoryTime', 'engineeringNumericValue'], inplace=True)\n\n df = df.sort_values(by=['observatoryTime'])\n df['observatoryTime'] = Time(df['observatoryTime']/1000, format='unix').jd\n \n if vds_tstart and vds_tstop:\n vds_tstart = Time(vds_tstart, format='unix').jd\n vds_tstop = Time(vds_tstop, format='unix').jd\n df = df.loc[(df['observatoryTime'] >= vds_tstart) & (df['observatoryTime'] <= vds_tstop)]\n \n # Entries with ID 0 are padding and can be ignored\n ids = df['id'].unique()\n ids = ids[ids != 0]\n ids = np.intersect1d(ids, np.array(list(mdmap.keys()),dtype=int))\n\n df = df.groupby([\"id\"])[['observatoryTime', 'engineeringNumericValue', 'apid']]\n \n for msid_id in ids:\n tlm = df.get_group(msid_id)\n\n if 'last_ingested_timestamp' not in list(ref_data[mdmap[msid_id]].attrs):\n # Set the default value to DEC 24 2021, 00:00:00\n ref_data[mdmap[msid_id]].attrs['last_ingested_timestamp'] = 2459572.5\n\n if tlm['observatoryTime'].min() <= ref_data[mdmap[msid_id]].attrs['last_ingested_timestamp']:\n if tlm['observatoryTime'].max() > ref_data[mdmap[msid_id]].attrs['last_ingested_timestamp']:\n # remove overlap \n tlm = tlm.loc[tlm['observatoryTime'] > ref_data[mdmap[msid_id]].attrs['last_ingested_timestamp']]\n else:\n # do not ingest. time range already covered.\n continue\n \n times = tlm['observatoryTime'].to_numpy()\n values = tlm['engineeringNumericValue'].to_numpy()\n last_ingested_timestamp = times[-1]\n\n # Get this MSID numpy datatype\n # npt = ref_data[mdmap[msid_id]].attrs['numpy_datatype'].replace('np.', '')\n\n # Set datatypes for archive data\n times.dtype = np.float64\n # values = values.astype(npt)\n\n # Ensure data is sorted in time order\n # TODO: Verify this can be removed permanently\n # since data should come order by msid \n # sort_msid_data_by_time(mid, append=False)\n\n # Finally append the data to the archive.\n _append_h5_col_tlm(msid=mdmap[msid_id], epoch=times[0], times=times, values=values, apply_direct=True)\n \n # update the saved last_ingested_timestamp after successful ingest (LITA-184)\n ref_data[mdmap[msid_id]].attrs['last_ingested_timestamp'] = last_ingested_timestamp # last timestamp in the ingest", "title": "" }, { "docid": "3ff3673a7eb706cd42b0d404b1d692df", "score": "0.48154262", "text": "async def send_to_node_output_api(request: web.Request):\n body = await request.body\n log.debug(\n request.match_info[\"nodeInstanceUUID\"],\n request.match_info[\"outputKey\"],\n request.match_info[\"apiCall\"],\n body,\n )\n\n raise NotImplementedError()", "title": "" }, { "docid": "243e74abdd01d63068f1d3910691a433", "score": "0.48131406", "text": "def load_nodes(self, node_records):\n\n for node in node_records:\n self.load_node(node)", "title": "" }, { "docid": "4a217b883b61269cc255602322a0858b", "score": "0.48049763", "text": "def process_event(self):\n models = self.get_all_models()\n assets = self.get_all_assets(models)\n payload = self.generate_payload(models, assets)\n self.upload_to_s3(payload)\n\n # model_count = self.get_entity_count(models)\n # asset_count = self.get_entity_count(assets)\n # log.info(f'Total number of models: {model_count}')\n # log.info(f'Total number of assets: {asset_count}')", "title": "" }, { "docid": "e2df5cb746a97f398fff67caee1cd58d", "score": "0.48042214", "text": "def process_node(layer, node_data):\n input_tensors = []\n for input_data in node_data:\n inbound_layer_name = input_data[0]\n inbound_node_index = input_data[1]\n inbound_tensor_index = input_data[2]\n if len(input_data) == 3:\n kwargs = {}\n elif len(input_data) == 4:\n kwargs = input_data[3]\n else:\n raise ValueError('Improperly formatted model config.')\n inbound_layer = created_layers[inbound_layer_name]\n # Raise an error if the corresponding layer node\n # has not yet been created\n if len(inbound_layer._inbound_nodes) <= inbound_node_index:\n raise LookupError\n inbound_node = inbound_layer._inbound_nodes[inbound_node_index]\n input_tensors.append(\n inbound_node.output_tensors[inbound_tensor_index])\n\n # Call layer on its inputs, thus creating the node\n # and building the layer if needed.\n if input_tensors:\n layer(unpack_singleton(input_tensors), **kwargs)", "title": "" }, { "docid": "8a4637734f28f21a7befd84e888c631a", "score": "0.47980213", "text": "def init_node(self):\n super(CompetitionDriver, self).init_node()\n # use this to set the proper channels for the Driver to listen to for data\n self.position = rospy.Subscriber('/odom', Odometry, self.process_position)\n\n # use this to set the proper channels for the Driver to publish on\n self.last_pose_data = rospy.Publisher('/last_pose_data', Odometry, queue_size=1)\n self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=1)", "title": "" }, { "docid": "618ee6978747f4ec5c21b5cdc7f92d80", "score": "0.47898927", "text": "def nodes(self):", "title": "" }, { "docid": "f58905b459d45966df40cbe0ea86f4b6", "score": "0.4786759", "text": "def ingest(e ,D) :\n\n\n if e['type'] == 'CUSTOMER':\n\n \"\"\"\n case verb = new and customer id not in D : Insert\n case verb = new and customer id in D : Update , event after previous site_visit event\n case verb = update and customer id in D : Update\n case verb = update and customer id not in D : exception\n \"\"\"\n\n weekNum = getWeekNum(e['event_time'])\n\n lastName, city, state = checkNonMandatory(e)\n\n if e['verb'] == 'NEW' and e['key'] not in D:\n # Insert\n D[e['key']] = {'visitCount':1, 'visits':{weekNum:1}, 'orders':{},'pages':[],'images': [],'revenue':0.00, 'lastName': lastName, 'city': city, 'state': state}\n\n elif e['verb'] == 'NEW' and e['key'] in D :\n # update , event after previous site_visit event\n D[e['key']]['lastName'] = lastName\n D[e['key']]['city'] = city\n D[e['key']]['state'] = state\n\n D[e['key']]['visitCount'] += 1\n\n if weekNum not in D[e['key']].setdefault('visits',{}) :\n D[e['key']]['visits'][weekNum] = 1\n else:\n D[e['key']]['visits'][weekNum] += 1\n\n elif e['verb'] == 'UPDATE' and e['key'] in D:\n # update\n D[e['key']]['lastName'] = lastName\n D[e['key']]['city'] = city\n D[e['key']]['state'] = state\n\n D[e['key']]['visitCount'] += 1\n\n if weekNum not in D[e['key']].setdefault('visits',{}) :\n D[e['key']]['visits'][weekNum] = 1\n else:\n D[e['key']]['visits'][weekNum] += 1\n\n elif e['verb'] == 'UPDATE' and e['key'] not in D :\n # exception\n logging.debug('UPDATE CUSTOMER without previous customer_id : %s', e['key'])\n\n elif e['type'] == 'SITE_VISIT':\n\n \"\"\"\n Assumptions : SITE VISIT event can occur before CUSTOMER event as it was not clear from description \n Events with same page_id will be treated as duplicate as key will be unique. \n \"\"\"\n\n weekNum = getWeekNum(e['event_time'])\n\n if e['customer_id'] not in D:\n # first visit for the customer_id\n\n D[e['customer_id']] = {'visitCount': 1, 'visits': {weekNum:1}, 'orders':{},'images': [],'pages':[e['key']],'revenue': 0.00, 'lastName': None, 'city': None, 'state': None}\n\n elif e['customer_id'] in D and e['key'] in D[e['customer_id']]['pages']:\n # page_id is duplicate\n\n logging.debug('NEW SITE_VISIT with existing page_id :%s', e['key'])\n\n elif e['customer_id'] in D and e['key'] not in D[e['customer_id']]['pages']:\n # page_id is new for the customer\n\n D[e['customer_id']]['pages'].append(e['key'])\n D[e['customer_id']]['visitCount'] += 1\n\n if weekNum not in D[e['customer_id']].setdefault('visits',{}) :\n D[e['customer_id']]['visits'][weekNum] = 1\n else:\n D[e['customer_id']]['visits'][weekNum] += 1\n\n elif e['type'] == 'IMAGE':\n\n \"\"\"\n Assumption : UPLOAD IMAGE event cannot occur before SITE VISIT or CUSTOMER event. \n We are not processing such events.\n \n case : customer uploaded different image i.e for same customer id there is one more image_id (new image)\n case : Events with same image_id will be treated as duplicate. Since image_id (key) will be unique. \n \n \"\"\"\n\n weekNum = getWeekNum(e['event_time'])\n\n if e['customer_id'] not in D :\n\n logging.debug('UPLOAD IMAGE event without SITE_VISIT/CUSTOMER event for customer_id : %s', e['customer_id'])\n\n elif e['customer_id'] in D and e['key'] not in D[e['customer_id']]['images']:\n # new image uploaded by customer\n D[e['customer_id']]['images'].append(e['key'])\n\n D[e['customer_id']]['visitCount'] += 1\n\n if weekNum not in D[e['customer_id']].setdefault('visits',{}) :\n D[e['customer_id']]['visits'][weekNum] = 1\n else:\n D[e['customer_id']]['visits'][weekNum] += 1\n\n elif e['customer_id'] in D and e['key'] in D[e['customer_id']]['images']:\n # image_id is duplicate\n\n logging.debug('UPLOAD IMAGE with existing image_id :%s', e['key'])\n\n elif e['type'] == 'ORDER':\n\n \"\"\"\n case verb = new and order id not in D : Insert\n case verb = new and order id in D : exception\n case verb = update and order id in D : Update\n case verb = update and order id not in D : exception\n \n Limitation : Update ORDER event should not update customer_id, as key of D is customer_id. \n \"\"\"\n try:\n # in case order amount is having junk data it will be captured in valueError\n\n order_amount = float(e['total_amount'].replace(\" USD\", \"\"))\n\n weekNum = getWeekNum(e['event_time'])\n\n if e['customer_id'] not in D:\n logging.debug('ORDER event without customer_id : %s', e['customer_id'])\n\n elif e['verb'] == 'NEW' and e['key'] not in D[e['customer_id']]['orders']:\n # Insert\n if e['customer_id'] not in D:\n # exception, direct order without customer id\n logging.debug('NEW ORDER without customer_id : %s',e['customer_id'])\n else:\n D[e['customer_id']]['orders'][e['key']] = order_amount\n D[e['customer_id']]['visitCount'] += 1\n\n if weekNum not in D[e['customer_id']].setdefault('visits', {}):\n D[e['customer_id']]['visits'][weekNum] = 1\n else:\n D[e['customer_id']]['visits'][weekNum] += 1\n\n elif e['verb'] == 'NEW' and e['key'] in D[e['customer_id']]['orders'] :\n # exception, order id is key\n logging.debug('NEW ORDER with existing order_id :%s', e['key'])\n\n elif e['verb'] == 'UPDATE' and e['key'] in D[e['customer_id']]['orders'] :\n # Update\n D[e['customer_id']]['orders'][e['key']]=order_amount\n D[e['customer_id']]['visitCount'] += 1\n\n if weekNum not in D[e['customer_id']].setdefault('visits',{}) :\n D[e['customer_id']]['visits'][weekNum] = 1\n else:\n D[e['customer_id']]['visits'][weekNum] += 1\n\n elif e['verb'] == 'UPDATE' and e['key'] not in D[e['customer_id']]['orders']:\n # exception, no existing order to update\n logging.debug('UPDATE ORDER without previous order_id : %s', e['key'])\n except ValueError:\n logging.debug('Invalid total_amount for order_id : %s', e['key'])\n\n else:\n return D", "title": "" } ]
9d5cf97fdf742bcc546ec4fe25d1aa88
M3VTEF creates the M3VTEF database and associated tables
[ { "docid": "e2ff9e012ab00969a943fb20e1aa7044", "score": "0.71232563", "text": "def run_M3VTEF_DB(db_connection, vegtype_ef_out, ClassID, Jrating):\n # Queries need to be executed in specific order\n # due to dependency of tables generated from previous queries\n print(\"\\n BEGINNING M3VTEF DB GENERATION\")\n SLA_vegID(db_connection, Jrating)\n SLA_genus(db_connection)\n SLA_family(db_connection)\n SLA_growthform(db_connection)\n SLA(db_connection)\n EF_VEGID_Jrating(db_connection)\n EF_VEGID(db_connection, ClassID, Jrating)\n EF_genus(db_connection)\n EF_family(db_connection)\n EF_growthform(db_connection)\n Vegtype_EF_output = Vegtype_EF(db_connection)\n\n Vegtype_EF_output.to_csv(vegtype_ef_out, index=False, header=['VegID', 'VegEF%s' % ClassID]) # Save Vegtype EF table to CSV\n print(\"Vegtype EF Table CSV Generated: %s\" % vegtype_ef_out)", "title": "" } ]
[ { "docid": "f719ae0ee9f79c9006012525b8888d3e", "score": "0.7367714", "text": "def make_M3VTEF_tables(conn, csv_input_dir, Description_Vegetation, DB_SLA, DB_emissions):\n\n print(\"Creating M3VTEF DB tables from: %s\\n\" % csv_input_dir)\n csv_Canopy_Position_adjust = pd.read_csv(csv_input_dir + 'Canopy_Position_adjust.csv')\n csv_Description_Class = pd.read_csv(csv_input_dir + 'Description_Class.csv')\n csv_Description_Vegetation = pd.read_csv(csv_input_dir + Description_Vegetation)\n csv_Description_Compounds = pd.read_csv(csv_input_dir + 'Description_Compounds.csv')\n csv_Description_References = pd.read_csv(csv_input_dir + 'Description_References.csv')\n csv_DB_SLA = pd.read_csv(csv_input_dir + DB_SLA)\n csv_DB_emissions = pd.read_csv(csv_input_dir + DB_emissions)\n\n csv_Canopy_Position_adjust.to_sql(\"Canopy Position adjust\", conn, flavor='sqlite', if_exists='replace')\n print(\"'Canopy Position adjust' Table Loaded from: %s\" % csv_input_dir + 'Canopy_Position_adjust.csv')\n\n csv_Description_Class.to_sql(\"Description Class\", conn, flavor='sqlite', if_exists='replace')\n print(\"'Description Class' Table Loaded from: %s\" % csv_input_dir + 'Description_Class.csv')\n\n csv_Description_Vegetation.to_sql(\"Description Vegetation\", conn, flavor='sqlite', if_exists='replace')\n print(\"'Description Vegetation' Table Loaded from: %s\" % csv_input_dir + Description_Vegetation)\n\n csv_Description_Compounds.to_sql(\"Description Compounds\", conn, flavor='sqlite', if_exists='replace')\n print(\"'Description Compounds' Table Loaded from: %s\" % csv_input_dir + 'Description_Compounds.csv')\n\n csv_Description_References.to_sql(\"Description References\", conn, flavor='sqlite', if_exists='replace')\n print(\"'Description References' Table Loaded from: %s\" % csv_input_dir + 'Description_References.csv')\n\n csv_DB_SLA.to_sql(\"DB SLA\", conn, flavor='sqlite', if_exists='replace')\n print(\"'DB SLA' Table Loaded from: %s\" % csv_input_dir + DB_SLA)\n\n csv_DB_emissions.to_sql(\"DB emissions\", conn, flavor='sqlite', if_exists='replace')\n print(\"'DB emissions' Table Loaded from: %s\" % csv_input_dir + DB_emissions)", "title": "" }, { "docid": "37b3b916867559495871cdc6025e1476", "score": "0.6356724", "text": "def CreateSqlite3Tables(self):\n self.LogSQL = True\n # Need to call this to initialise parameters...\n self.GetConnection(test_tables=False)\n # Don't test the table existence for duh reasons\n # Can't use the \"?\" query parameters for table names\n create_1 = \"\"\"\n CREATE TABLE IF NOT EXISTS {0} (\n series_id INTEGER PRIMARY KEY, \n ticker_full TEXT NOT NULL, \n series_provider_code TEXT NOT NULL, \n ticker_query TEXT NOT NULL,\n ticker_local TEXT\n )\n \"\"\".format(self.MetaTable)\n self.Execute(create_1)\n create_2 = \"\"\"\n CREATE UNIQUE INDEX IF NOT EXISTS index_ticker_full ON {0} (ticker_full)\n \"\"\".format(self.MetaTable)\n self.Execute(create_2)\n create_3 = \"\"\"\n CREATE TABLE IF NOT EXISTS {0} (\n series_id INTEGER,\n series_dates TEXT NOT NULL,\n series_values REAL NOT NULL,\n FOREIGN KEY(series_id) REFERENCES {1}(series_id) ON DELETE CASCADE ON UPDATE CASCADE,\n PRIMARY KEY (series_id, series_dates)\n )\"\"\".format(self.DataTable, self.MetaTable)\n self.Execute(create_3)\n create_4 = \"\"\"\nCREATE VIEW SummaryView as \nSELECT m.series_id, m.ticker_full, \n m.series_provider_code, m.ticker_query, m.ticker_local,\n min(d.series_dates) as start_date, max(d.series_dates) as end_date \n from {0} as m, {1} as d\nWHERE m.series_id = d.series_id\n \"\"\".format(self.MetaTable, self.DataTable)\n self.Execute(create_4)\n self.Connection.commit()\n self.TestTablesExist()\n self.Connection.commit()", "title": "" }, { "docid": "e7a7b594f57d3a79e6f2f03facd4648d", "score": "0.6281734", "text": "def create_structure(self):\n\n cursor = self.conn.cursor()\n #----------------------------------------------------------\n # drop tables if they exist\n stmts = (\"DROP TABLE IF EXISTS Partitionfunctions;\",\n \"DROP TABLE IF EXISTS Transitions;\",)\n\n for stmt in stmts:\n cursor.execute(stmt)\n #----------------------------------------------------------\n\n\n #-------------------------------------------------\n # INSERT TRANSITIONS\n\n sql_create_transitions = \"\"\"CREATE TABLE Transitions (\n T_Name TEXT,\n T_Frequency REAL,\n T_Intensity REAL,\n T_EinsteinA REAL,\n T_Uncertainty REAL,\n T_EnergyLower REAL,\n T_UpperStateDegeneracy INTEGER,\n T_NuclearSpinIsomer TEXT,\n T_HFS TEXT,\n T_Case TEXT,\n T_UpperStateQuantumNumbers TEXT,\n T_LowerStateQuantumNumbers TEXT) \"\"\"\n\n\n sql_create_partitionfunctions = \"\"\" CREATE TABLE Partitionfunctions (\n PF_Name TEXT,\n PF_VamdcSpeciesID TEXT,\n PF_SpeciesID TEXT,\n PF_NuclearSpinIsomer TEXT,\n PF_HFS TEXT,\n PF_1_072 REAL,\n PF_1_148 REAL,\n PF_1_230 REAL,\n PF_1_318 REAL,\n PF_1_413 REAL,\n PF_1_514 REAL,\n PF_1_622 REAL,\n PF_1_738 REAL,\n PF_1_862 REAL,\n PF_1_995 REAL,\n PF_2_138 REAL,\n PF_2_291 REAL,\n PF_2_455 REAL,\n PF_2_630 REAL,\n PF_2_725 REAL,\n PF_2_818 REAL,\n PF_3_020 REAL,\n PF_3_236 REAL,\n PF_3_467 REAL,\n PF_3_715 REAL,\n PF_3_981 REAL,\n PF_4_266 REAL,\n PF_4_571 REAL,\n PF_4_898 REAL,\n PF_5_000 REAL,\n PF_5_248 REAL,\n PF_5_623 REAL,\n PF_6_026 REAL,\n PF_6_457 REAL,\n PF_6_918 REAL,\n PF_7_413 REAL,\n PF_7_943 REAL,\n PF_8_511 REAL,\n PF_9_120 REAL,\n PF_9_375 REAL,\n PF_9_772 REAL,\n PF_10_471 REAL,\n PF_11_220 REAL,\n PF_12_023 REAL,\n PF_12_882 REAL,\n PF_13_804 REAL,\n PF_14_791 REAL,\n PF_15_849 REAL,\n PF_16_982 REAL,\n PF_18_197 REAL,\n PF_18_750 REAL,\n PF_19_498 REAL,\n PF_20_893 REAL,\n PF_22_387 REAL,\n PF_23_988 REAL,\n PF_25_704 REAL,\n PF_27_542 REAL,\n PF_29_512 REAL,\n PF_31_623 REAL,\n PF_33_884 REAL,\n PF_36_308 REAL,\n PF_37_500 REAL,\n PF_38_905 REAL,\n PF_41_687 REAL,\n PF_44_668 REAL,\n PF_47_863 REAL,\n PF_51_286 REAL,\n PF_54_954 REAL,\n PF_58_884 REAL,\n PF_63_096 REAL,\n PF_67_608 REAL,\n PF_72_444 REAL,\n PF_75_000 REAL,\n PF_77_625 REAL,\n PF_83_176 REAL,\n PF_89_125 REAL,\n PF_95_499 REAL,\n PF_102_329 REAL,\n PF_109_648 REAL,\n PF_117_490 REAL,\n PF_125_893 REAL,\n PF_134_896 REAL,\n PF_144_544 REAL,\n PF_150_000 REAL,\n PF_154_882 REAL,\n PF_165_959 REAL,\n PF_177_828 REAL,\n PF_190_546 REAL,\n PF_204_174 REAL,\n PF_218_776 REAL,\n PF_225_000 REAL,\n PF_234_423 REAL,\n PF_251_189 REAL,\n PF_269_153 REAL,\n PF_288_403 REAL,\n PF_300_000 REAL,\n PF_309_030 REAL,\n PF_331_131 REAL,\n PF_354_813 REAL,\n PF_380_189 REAL,\n PF_407_380 REAL,\n PF_436_516 REAL,\n PF_467_735 REAL,\n PF_500_000 REAL,\n PF_501_187 REAL,\n PF_537_032 REAL,\n PF_575_440 REAL,\n PF_616_595 REAL,\n PF_660_693 REAL,\n PF_707_946 REAL,\n PF_758_578 REAL,\n PF_812_831 REAL,\n PF_870_964 REAL,\n PF_933_254 REAL,\n PF_1000_000 REAL,\n PF_ResourceID TEXT,\n PF_URL TEXT,\n PF_Comment TEXT,\n PF_Timestamp)\"\"\"\n\n cursor.execute(sql_create_transitions)\n cursor.execute(sql_create_partitionfunctions)\n\n #-------------------------------------------------------------\n\n return", "title": "" }, { "docid": "a3f58c0c4c0309223350bfbfec6bfb4f", "score": "0.61255515", "text": "def createTables():\n os.system('sqlite3 -echo %s < %s' % (RESOURCE('afloat.db'), RESOURCE('tables.sql'),))", "title": "" }, { "docid": "d371e7fd1bd99943f019c48f7d809b99", "score": "0.6111881", "text": "def CreateDatabase(self):", "title": "" }, { "docid": "d371e7fd1bd99943f019c48f7d809b99", "score": "0.6111881", "text": "def CreateDatabase(self):", "title": "" }, { "docid": "45402bf723d70ac67c6ec92fd3a6d78a", "score": "0.60926443", "text": "def create_sqlite3_tables():\n obj = DatabaseSqlite3()\n obj.CreateSqlite3Tables()\n # Close the connection...\n del obj\n # Do test.\n obj2 = DatabaseSqlite3()\n obj2.GetConnection(test_tables=True)", "title": "" }, { "docid": "9be1e419809e06d870476452cd7a731e", "score": "0.5983625", "text": "def main():\n connection = records.Database(config.DATABASE_URL)\n creator = DatabaseCreator(connection)\n creator.create_tables()", "title": "" }, { "docid": "7c4bb43147d489bbe95369c9e3c02d62", "score": "0.5945702", "text": "def create_database_main():\n initialize_database(drop_tables)\n initialize_database(create_table_queries)\n initialize_database(insert_values_in_catalogs)\n insert_data_from_csv()", "title": "" }, { "docid": "5c5a430e19277b80720bece09a7854e5", "score": "0.59311944", "text": "def create_tables(self):\n try:\n self.conn.execute(\n '''\n PRAGMA foreign_key = ON;\n '''\n\n )\n self.conn.execute(\n '''\n CREATE TABLE tenants(\n -- uuid for the tenant, which is generated by create_tenant() API\n id TEXT PRIMARY KEY NOT NULL,\n -- name of the tenant, which is specified by user when creating the tenant\n -- this field can be changed later by using set_name() API\n name TEXT UNIQUE NOT NULL,\n -- brief description of the tenant, which is specified by user when creating the tenant\n -- this field can be changed laster by using set_description API\n description TEXT,\n -- default_datastore url\n default_datastore_url TEXT\n )\n '''\n )\n\n self.conn.execute(\n '''\n CREATE TABLE vms(\n -- uuid for the VM, which is generated when VM is created\n -- this uuid will be passed in to executeRequest()\n -- this field need to be specified when adding a VM to a tenant\n vm_id TEXT PRIMARY KEY NOT NULL,\n -- id in tenants table\n tenant_id TEXT NOT NULL,\n FOREIGN KEY(tenant_id) REFERENCES tenants(id)\n );\n '''\n )\n\n\n self.conn.execute(\n '''\n CREATE TABLE privileges(\n -- id in tenants table\n tenant_id TEXT NOT NULL,\n -- datastore url\n datastore_url TEXT NOT NULL,\n -- a boolean value, if it is set to True, tenant has full\n -- privilege on this datastore; it it is set to False\n -- tenant only has mount/unmount privilege on this datastore\n allow_create INTEGER,\n -- The unit of \"max_volume_size\" is \"MB\"\n max_volume_size INTEGER,\n -- The unit of usage_quota is \"MB\"\n usage_quota INTEGER,\n PRIMARY KEY (tenant_id, datastore_url),\n FOREIGN KEY(tenant_id) REFERENCES tenants(id)\n );\n '''\n )\n\n self.conn.execute(\n '''\n CREATE TABLE volumes (\n -- id in tenants table\n tenant_id TEXT NOT NULL,\n -- datastore url\n datastore_url TEXT NOT NULL,\n volume_name TEXT,\n -- The unit of \"volume_size\" is \"MB\"\n volume_size INTEGER,\n PRIMARY KEY(tenant_id, datastore_url, volume_name),\n FOREIGN KEY(tenant_id) REFERENCES tenants(id)\n );\n '''\n )\n\n self.conn.execute(\n '''\n CREATE TABLE versions (\n id INTEGER PRIMARY KEY NOT NULL,\n -- DB major version\n major_ver INTEGER NOT NULL,\n -- DB minor version\n minor_ver INTEGER NOT NULL,\n -- VMODL major version\n vmodl_major_ver INTEGER NOT NULL,\n -- VMODL minor version\n vmodl_minor_ver INTEGER NOT NULL\n );\n '''\n )\n\n # insert latest DB version and VMODL version to table \"versions\"\n self.conn.execute(\n \" INSERT INTO versions(id, major_ver, minor_ver, vmodl_major_ver, vmodl_minor_ver) VALUES (?, ?, ?, ?, ?)\",\n (0, DB_MAJOR_VER, DB_MINOR_VER, VMODL_MAJOR_VER, VMODL_MINOR_VER)\n )\n\n self.conn.commit()\n except sqlite3.Error as e:\n logging.error(\"Error %s when creating auth DB tables\", e)\n return str(e)\n\n return None", "title": "" }, { "docid": "a3627c1dff1a2699ee66d17f3ecf7fa9", "score": "0.59284306", "text": "def __createDatabase(self):\n print(\"Creating all database tables.\")\n Base.metadata.create_all(self.engine)\n print(\"Successfully created all database tables.\")", "title": "" }, { "docid": "3be9bc1654c9c5664d9547c2f07628d2", "score": "0.5887862", "text": "def setup_database():\n try:\n os.remove(db)\n except:\n pass\n conn = sqlite3.connect('database.db')\n c = conn.cursor()\n c.execute('CREATE TABLE agents (`ip` text, `hostname` text, `os` text, PRIMARY KEY(ip))')\n c.execute('CREATE TABLE actions (`ip` text, `name` text, `description` text)')\n c.execute('CREATE TABLE data (`ip` text, `timestamp` integer, `item` text, `value` integer)')\n conn.commit()\n conn.close()", "title": "" }, { "docid": "281484c3d50c6c0bba7a6638a4904d9e", "score": "0.5875842", "text": "def createDB(self):\n pass", "title": "" }, { "docid": "2eeb6149ba2e353b2ae691d47c454d56", "score": "0.5865373", "text": "def create(self):\n\n schema = DB.schema\n # create the file\n self.connection = sqlite3.connect(self.database)\n self.cursor = self.connection.cursor()\n self.close()\n\n # create the tables\n if self.verbose:\n print (\"creating databse\")\n with DB() as db:\n for table in schema:\n cols = ', '.join([\"%s %s\" % (c, schema[table][c]) for c in schema[table]])\n for c in DB.constraints:\n if c['create_table'] == table:\n con = \"\"\"CONSTRAINT %s\n FOREIGN KEY (%s)\n REFERENCES %s(%s)\n ON DELETE CASCADE\"\"\" % (c['name'], c['field'], c['reference_table'], c['field'])\n cols = (cols + ', ' + con)\n s = \"CREATE TABLE %s (%s);\" % (table, cols)\n db.execute(s)\n\n # create the policy record\n s = \"INSERT INTO policy DEFAULT VALUES;\"\n db.execute(s)\n\n s = 'CREATE UNIQUE INDEX idx_vhosts_id ON vhosts (id);'\n db.execute(s)\n\n s = 'CREATE UNIQUE INDEX idx_group_name ON groups (name);'\n db.execute(s)", "title": "" }, { "docid": "21b848f6d5f2677acb386f94c490794b", "score": "0.58593345", "text": "def setup_tables():\n db = config['Default']['database']\n try:\n cm = ConnectionManager(db)\n cm.connect()\n metadata.create_all(cm.engine)\n print(\"Tables successfully created.\")\n except Exception as e:\n print(\"Failed to create tables. Encountered the following error(s): {}.\".format(e))", "title": "" }, { "docid": "e3c49e3eb0c86fd423e33ae2f87e4c3c", "score": "0.5839066", "text": "def create_database_objects(cli_arguments):\n print('Creating Database schema')\n init()\n metadata.create_all(engine, checkfirst=True)\n print('done')", "title": "" }, { "docid": "090bdcfecea37af150ea329684be37bd", "score": "0.5796197", "text": "def init_db():\n global db\n db = sqlite3.connect('iot.db')\n c = db.cursor()\n c.execute(\"CREATE TABLE IF NOT EXISTS meta (key TEXT UNIQUE, value TEXT)\")\n\n # check DB version to see if we need to update it\n version = get_setting(\"schema_version\")\n if version != None:\n version = int(version)\n\n\n while version != 3:\n if version == None:\n logger.info(\"Creating database...\")\n set_setting(\"schema_version\",1)\n c.executescript(\"\"\"\n PRAGMA foreign_keys = ON;\n CREATE TABLE articles (\n id INTEGER PRIMARY KEY,\n url TEXT UNIQUE);\n CREATE TABLE threads (\n id INTEGER PRIMARY KEY,\n article_id INTEGER,\n poster TEXT,\n subreddit TEXT,\n permalink TEXT,\n karma INTEGER,\n comment_count INTEGER,\n posted_at TEXT,\n FOREIGN KEY(article_id) REFERENCES articles(id));\n CREATE TABLE comments (\n id INTEGER PRIMARY KEY,\n thread_id INTEGER,\n poster TEXT,\n body TEXT,\n permalink TEXT,\n karma INTEGER,\n comment_count INTEGER,\n posted_at TEXT,\n FOREIGN KEY(thread_id) REFERENCES threads(id));\n CREATE INDEX article_urls ON articles(url);\n CREATE INDEX thread_permalinks ON threads(permalink);\n CREATE INDEX comment_permalinks ON comments(permalink);\n \"\"\")\n version = int(get_setting('schema_version'))\n elif version == 1:\n logger.info(\"Updating database to version 2...\")\n set_setting(\"schema_version\",2)\n c.executescript(\"\"\"\n ALTER TABLE threads ADD COLUMN handled INTEGER NOT NULL DEFAULT 0;\n \"\"\")\n version = int(get_setting('schema_version'))\n elif version == 2:\n logger.info(\"Updating database to version 3...\")\n set_setting(\"schema_version\",3)\n c.executescript(\"\"\"\n CREATE TABLE xposts (\n source_id INTEGER NOT NULL,\n target_id INTEGER NOT NULL,\n status TEXT,\n comment_id INTEGER,\n FOREIGN KEY(source_id) REFERENCES threads(id),\n FOREIGN KEY(target_id) REFERENCES threads(id));\n CREATE INDEX xpost_sources ON xposts(source_id);\n CREATE INDEX xpost_targets ON xposts(target_id);\n \"\"\")\n version = int(get_setting('schema_version'))\n elif version > 3:\n logger.critical(\"Unknown database version %d.\", version)\n exit(1)", "title": "" }, { "docid": "67d782200c1bd098e91f1d6b8135be89", "score": "0.57861686", "text": "def main():\n create_schema_names = ['temp', 'raw', 'std', 'dwh']\n\n try:\n connection = create_db_connection('yelp_business_db')\n create_schema(connection, create_schema_names)\n create_db_config(connection)\n create_temp_table(connection)\n create_raw_table(connection)\n create_std_table(connection)\n create_dim_table(connection)\n create_fact_table(connection)\n \n except Exception as e:\n print(\"An error occurred: {}\".format(e))\n \n else:\n print(\"Migration completed successfully.\")\n connection.close()", "title": "" }, { "docid": "70051af74509b48fac527981ba06b2b4", "score": "0.5778465", "text": "def _dbsetup(self):\n self._dbconn = sqlite3.connect(self._db_file)\n # Create table for multiplicons\n sql = '''CREATE TABLE multiplicons\n (id, genome_x, list_x, parent, genome_y, list_y, level,\n number_of_anchorpoints, profile_length, begin_x, end_x,\n begin_y, end_y, is_redundant)'''\n self._dbconn.execute(sql)\n # Create table for multiplicons ('order' appears to be reserved)\n sql = '''CREATE TABLE segments\n (id, multiplicon, genome, list, first, last, ord)'''\n self._dbconn.execute(sql)\n self._dbconn.commit()", "title": "" }, { "docid": "39bab0e5d9281364e65ad12a44e2ab0c", "score": "0.57617414", "text": "def make_tables():\n\tENGINE = create_engine('postgresql://localhost:5432/BioRetro', echo=False)\n\tBase.metadata.create_all(ENGINE)", "title": "" }, { "docid": "5c620d06fc5b1ebe31278195089275ad", "score": "0.574787", "text": "def init_db():\n print('creating tables from metadata')\n metadata.create_all(bind=engine)\n print('...done')", "title": "" }, { "docid": "e607ffc30f95752bf72fcd8cf066c597", "score": "0.5736681", "text": "def create_db():\n schema.define_schemas(client)", "title": "" }, { "docid": "de39622409043b236eaf442d362b196e", "score": "0.57350075", "text": "def _setup():\n DB_CLIENT.send_query(\n \"\"\"\n CREATE TABLE spend_log(\n username text NOT NULL,\n category text NOT NULL,\n amount real NOT NULL,\n notes text,\n tx_timestamp TIMESTAMP NOT NULL\n );\n \"\"\",\n commit=True,\n )\n DB_CLIENT.send_query(\n \"\"\"\n CREATE TABLE monthly_budgets(\n category text,\n max_budget real,\n max_tx_amount real\n );\n \"\"\",\n commit=True,\n )\n vals = \",\".join([f\"('{cat_i}', NULL, NULL)\" for cat_i in const.EXPENSE_CATEGORIES])\n DB_CLIENT.send_query(\n f\"\"\"\n INSERT INTO monthly_budgets (category, max_budget, max_tx_amount)\n VALUES {vals};\n \"\"\",\n commit=True,\n )", "title": "" }, { "docid": "957289a36aa71637b2b3b810094e9aac", "score": "0.5723727", "text": "def __setup_sqlite3(self):\n\n self.__db_connection = sqlite3.connect(':memory:')\n self.__logger.info('Connect to SQLITE3 database')\n create_datalogger_table = f'CREATE TABLE IF NOT EXISTS {self.__table_name} (id INTEGER PRIMARY KEY, timestamp REAL NOT NULL, elapsed REAL NOT NULL)'\n cursor = self.__db_connection.cursor()\n cursor.execute(create_datalogger_table)\n\n for name, value in flatten(self.datalogger_current).items():\n cursor.execute(f'ALTER TABLE {self.__table_name} ADD COLUMN {name} TEXT')\n\n self.__logger.info('Complete SQLITE3 database setup')", "title": "" }, { "docid": "56ade3f0051b05d6924f59d66baaa499", "score": "0.5716618", "text": "def set_up_static_db(this_database):\n this_database.clear()\n # add parameters:\n this_database.add_row(t2s.PARAM_TABLENAME,\n {t2s.PARAM_TESTNAME: TESTNAME,\n t2s.PARAM_PID: 1,\n t2s.PARAM_K: 80,\n t2s.PARAM_D: 10,\n t2s.PARAM_L: 20})\n # add circuits:\n this_database.add_row(t2s.CIRCUIT_TABLENAME,\n {t2s.CIRCUIT_TESTNAME: TESTNAME,\n t2s.CIRCUIT_CID: 1,\n t2s.CIRCUIT_W: 30,\n t2s.CIRCUIT_PID: 1,\n t2s.CIRCUIT_NUMADDS: 3,\n t2s.CIRCUIT_NUMADDCONSTS: 4,\n t2s.CIRCUIT_NUMMULS: 5,\n t2s.CIRCUIT_NUMMULCONSTS: 6,\n t2s.CIRCUIT_NUMROTATES: 7,\n t2s.CIRCUIT_NUMSELECTS: 8,\n t2s.CIRCUIT_NUMLEVELS: 9,\n t2s.CIRCUIT_NUMGATES: 42,\n t2s.CIRCUIT_OUTPUTGATETYPE: \"LADD\",\n t2s.CIRCUIT_TESTTYPE: \"RANDOM\"})\n\n # add inputs:\n this_database.add_row(t2s.INPUT_TABLENAME,\n {t2s.INPUT_TESTNAME: TESTNAME,\n t2s.INPUT_IID: 1,\n t2s.INPUT_CID: 1,\n t2s.INPUT_NUMZEROS: 299,\n t2s.INPUT_NUMONES: 301})\n this_database.add_row(t2s.INPUT_TABLENAME,\n {t2s.INPUT_TESTNAME: TESTNAME,\n t2s.INPUT_IID: 2,\n t2s.INPUT_CID: 1,\n t2s.INPUT_NUMZEROS: 298,\n t2s.INPUT_NUMONES: 302})\n\n # add performer key generation results:\n this_database.add_row(t2s.PERKEYGEN_TABLENAME,\n {t2s.PERKEYGEN_TESTNAME: TESTNAME,\n t2s.PERKEYGEN_TIMESTAMP: 1.0,\n t2s.PERKEYGEN_PERFORMERNAME: PERFORMER_NAME,\n t2s.PERKEYGEN_PID: 1,\n t2s.PERKEYGEN_TRANSMITLATENCY: .01,\n t2s.PERKEYGEN_LATENCY: 10.0,\n t2s.PERKEYGEN_KEYSIZE: 55.0})\n\n # add performer circuit ingestion results:\n this_database.add_row(t2s.PERINGESTION_TABLENAME,\n {t2s.PERINGESTION_TESTNAME: TESTNAME,\n t2s.PERINGESTION_TIMESTAMP: 2.0,\n t2s.PERINGESTION_PERFORMERNAME: PERFORMER_NAME,\n t2s.PERINGESTION_CID: 1,\n t2s.PERINGESTION_TRANSMITLATENCY: .01,\n t2s.PERINGESTION_LATENCY: 12.0})\n\n # add performer evaluation results:\n this_database.add_row(t2s.PEREVALUATION_TABLENAME,\n {t2s.PEREVALUATION_TESTNAME: TESTNAME,\n t2s.PEREVALUATION_TIMESTAMP: 3.0,\n t2s.PEREVALUATION_PERFORMERNAME: PERFORMER_NAME,\n t2s.PEREVALUATION_IID: 1,\n t2s.PEREVALUATION_INPUTTRANSMITLATENCY: .01,\n t2s.PEREVALUATION_OUTPUTTRANSMITLATENCY: .01,\n t2s.PEREVALUATION_ENCRYPTIONLATENCY: 14.0,\n t2s.PEREVALUATION_EVALUATIONLATENCY: 16.0,\n t2s.PEREVALUATION_DECRYPTIONLATENCY: 18.0,\n t2s.PEREVALUATION_OUTPUT: \"111\",\n t2s.PEREVALUATION_OUTPUTSIZE: 200.0,\n t2s.PEREVALUATION_INPUTSIZE: 400.0})\n this_database.add_row(t2s.PEREVALUATION_TABLENAME,\n {t2s.PEREVALUATION_TESTNAME: TESTNAME,\n t2s.PEREVALUATION_TIMESTAMP: 4.0,\n t2s.PEREVALUATION_PERFORMERNAME: PERFORMER_NAME,\n t2s.PEREVALUATION_IID: 2,\n t2s.PEREVALUATION_INPUTTRANSMITLATENCY: .01,\n t2s.PEREVALUATION_OUTPUTTRANSMITLATENCY: .01,\n t2s.PEREVALUATION_ENCRYPTIONLATENCY: 20.0,\n t2s.PEREVALUATION_EVALUATIONLATENCY: 22.0,\n t2s.PEREVALUATION_DECRYPTIONLATENCY: 24.0,\n t2s.PEREVALUATION_OUTPUT: \"000\",\n t2s.PEREVALUATION_OUTPUTSIZE: 200.0,\n t2s.PEREVALUATION_INPUTSIZE: 400.0})\n this_database.add_row(t2s.PEREVALUATION_TABLENAME,\n {t2s.PEREVALUATION_TESTNAME: TESTNAME,\n t2s.PEREVALUATION_TIMESTAMP: 5.0,\n t2s.PEREVALUATION_PERFORMERNAME: PERFORMER_NAME,\n t2s.PEREVALUATION_IID: 2,\n t2s.PEREVALUATION_INPUTTRANSMITLATENCY: .01,\n t2s.PEREVALUATION_OUTPUTTRANSMITLATENCY: .01,\n t2s.PEREVALUATION_ENCRYPTIONLATENCY: 26.0,\n t2s.PEREVALUATION_EVALUATIONLATENCY: 28.0,\n t2s.PEREVALUATION_DECRYPTIONLATENCY: 30.0,\n t2s.PEREVALUATION_OUTPUT: \"111\",\n t2s.PEREVALUATION_OUTPUTSIZE: 200.0,\n t2s.PEREVALUATION_INPUTSIZE: 400.0})", "title": "" }, { "docid": "9b59ae9ca36d435c2e51714ad4e2d58a", "score": "0.5700505", "text": "def create():\n db.create_all()\n def process():\n add_roles()\n add_users()\n add_order_statuses()\n add_product_data()\n click.echo(\"Finished creating tables!!! \\n\")\n process()", "title": "" }, { "docid": "89e03af401d25bcfa6b89388ca205ee5", "score": "0.5699799", "text": "def create_db():\n db.create_all()\n load_movies()\n load_links()", "title": "" }, { "docid": "424292503ca9293e266d21b076dd95b8", "score": "0.56728613", "text": "def init_database():\n meta.create_all(engine)", "title": "" }, { "docid": "cbf3f483fa53945ecaf4449ad059cdb2", "score": "0.5671859", "text": "def create_db_tables():\n conn = connect()\n\n try:\n for i in Database.DICT_MAKE_TABLES:\n conn.execute(i)\n\n for key, values in Database.DICT_CONTENT_TABLES.items():\n set_data_db(key, values)\n\n except sqlite3.OperationalError as error:\n print(error)\n conn.close()", "title": "" }, { "docid": "babbe3817e5f537b2a85a177f028c180", "score": "0.56708133", "text": "def __build_db(self):\n self.cursor.execute(\n \"CREATE TABLE metadata (id integer primary key, metadata text)\"\n )\n self.cursor.execute(\n \"CREATE TABLE cn (id integer primary key, nt INTEGER, pid INTEGER, idx INTEGER)\"\n )\n self.cursor.execute(\n \"CREATE TABLE cnp (id integer primary key, cn_id INTEGER, pos integer, content text, content_idx integer)\"\n )\n\n self.cursor.execute(\"CREATE TABLE n_type (id integer primary key, name text)\")\n self.cursor.execute(\"CREATE TABLE f_type (id integer primary key, name text)\")\n self.cursor.execute(\n \"\"\"CREATE TABLE ft\n (\n id integer primary key,\n cn_id integer,\n f_type INTEGER,\n binary_value blob,\n single integer,\n tag_uuid text\n )\"\"\"\n )\n\n self.cursor.execute(\"CREATE UNIQUE INDEX n_type_uk ON n_type(name);\")\n self.cursor.execute(\"CREATE UNIQUE INDEX f_type_uk ON f_type(name);\")\n self.cursor.execute(\"CREATE INDEX cn_perf ON cn(nt);\")\n self.cursor.execute(\"CREATE INDEX cn_perf2 ON cn(pid);\")\n self.cursor.execute(\"CREATE INDEX cnp_perf ON cnp(cn_id, pos);\")\n self.cursor.execute(\"CREATE INDEX f_perf ON ft(cn_id);\")\n self.cursor.execute(\"CREATE INDEX f_perf2 ON ft(tag_uuid);\")\n self.cursor.execute(\n \"\"\"CREATE TABLE content_exceptions\n (\n id integer primary key,\n tag text,\n message text,\n exception_details text,\n group_uuid text,\n tag_uuid text,\n exception_type text,\n exception_type_id text,\n severity text,\n node_uuid text\n )\"\"\"\n )\n self.cursor.execute(\n \"CREATE TABLE model_insights (id integer primary key,model_insight text);\"\n )\n self.document.version = \"6.0.0\"\n\n self.__update_metadata()", "title": "" }, { "docid": "f2c16874db0ec20ab6db9d7ca942eb6b", "score": "0.5645927", "text": "def main():\n #create_database()\n drop_tables()\n create_tables()", "title": "" }, { "docid": "8fe66140b73fda51a25bb693b246f881", "score": "0.563715", "text": "def setup_database():\n print \"Setting up SQL database at\",get_database_filename()\n if os.path.exists(get_database_filename()):\n print \"Creating a backup at\", get_database_filename()+\".bak\"\n shutil.copyfile(get_database_filename(), get_database_filename()+\".bak\")\n \n db = SQLgetConnection()\n\n c = db.cursor()\n try:\n c.execute(\"DROP TABLE TestRuns;\")\n c.execute(\"DROP TABLE Revisions;\")\n except:\n print \"Error dropping tables. Perhaps one does not exist (this is normal on first run).\"\n \n c.execute(\"\"\"CREATE TABLE TestRuns (\n testID INTEGER PRIMARY KEY,\n date DATETIME, name VARCHAR(60), type VARCHAR(20), \n host VARCHAR(30), environment VARCHAR(50), runner VARCHAR(20), \n revision INT, commitid VARCHAR(45), \n runtime DOUBLE, cpu_fraction DOUBLE, \n success BOOL,\n status VARCHAR(50), logarchive VARCHAR(80),\n variables VARCHAR(200)\n ); \"\"\")\n \n # Now a table that is just one entry per run (a fake \"revision\")\n \n c.execute(\"\"\"CREATE TABLE Revisions (\n revision INTEGER PRIMARY KEY,\n date DATETIME\n ); \"\"\")", "title": "" }, { "docid": "9fe9d072c3e0eb0a90f3211ab5ee9b54", "score": "0.56345695", "text": "def setup_output_database(self):\n settings.OUTPUT_DB_MANAGER.create_engine(self.argument_parser.output_db_url)\n settings.OUTPUT_DB_MANAGER.create_database_if_not_exists()\n settings.OUTPUT_DB_MANAGER.create_table_if_not_exists(Results, settings.OUTPUT_TABLE_NAME)", "title": "" }, { "docid": "16c895dbedc7b427032652c8f1dfd131", "score": "0.56320226", "text": "def setup_function():\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n logger.info(\"Tests temp db was initiated\")", "title": "" }, { "docid": "60b8aad1c7a41ab2628234c8637599ac", "score": "0.5631784", "text": "def create_database (self):\n try:\n self.databaseCreator = DBCreator(self.hs,self.pt,self.us,self.ps,self.bfred,self)\n self.databaseCreator.show()\n\n except Exception as error:\n self.application_error(error)\n pass", "title": "" }, { "docid": "b6635c71504a4e855c4926ac1ab80571", "score": "0.56296515", "text": "def create_tables(self):\r\n conn = sqlite3.connect(self.database_filename)\r\n cur = conn.cursor()\r\n\r\n cur.execute(\"\"\"\r\n create table TOLLS_CASE (\r\n Case_ID integer primary key autoincrement not null,\r\n Case_Number varchar not null,\r\n Case_Agency varchar not null,\r\n Case_Agent varchar not null,\r\n Case_Analyst varchar not null,\r\n Case_Target_Number varchar not null\r\n );\r\n \"\"\")\r\n\r\n conn.commit()\r\n\r\n cur.execute(\"\"\"\r\n create table TOWER (\r\n Tower_ID integer primary key autoincrement not null,\r\n Tower_Case_ID integer not null,\r\n Tower_Cell_Site_ID varchar not null,\r\n Tower_Latitude varchar not null,\r\n Tower_Longitude varchar not null,\r\n Tower_Sector varchar not null,\r\n Tower_Azimuth integer not null\r\n );\r\n \"\"\")\r\n\r\n conn.commit()\r\n\r\n cur.execute(\"\"\"\r\n create table CDR (\r\n CDR_ID integer primary key autoincrement not null,\r\n CDR_Case_ID integer not null,\r\n CDR_Calling_Number varchar null,\r\n CDR_Called_Number varchar not null,\r\n CDR_Dialed_Digits varchar null,\r\n CDR_Call_Direction varchar null,\r\n CDR_Start_Date varchar null,\r\n CDR_End_Date varchar null,\r\n CDR_Duration varchar null,\r\n CDR_Cell_Site_ID varchar not null,\r\n CDR_Sector varchar not null\r\n );\r\n \"\"\")\r\n\r\n conn.commit()\r\n conn.close()", "title": "" }, { "docid": "e5c0ed28784df17d1a4dcf3a166d97da", "score": "0.56125563", "text": "def create_tables(self):\n queries = [\n \"\"\"\n CREATE TABLE IF NOT EXISTS users(\n user_id serial PRIMARY KEY NOT NULL,\n firstname VARCHAR (24) NOT NULL,\n lastname VARCHAR (24) NOT NULL,\n othername VARCHAR (24),\n email VARCHAR (30) NOT NULL UNIQUE,\n phonenumber VARCHAR (30) NOT NULL,\n password VARCHAR (128) NOT NULL,\n passportUrl VARCHAR (200),\n isAdmin BOOLEAN DEFAULT FALSE\n );\"\"\",\n \"\"\"CREATE TABLE IF NOT EXISTS office (\n office_id serial PRIMARY KEY NOT NULL,\n name VARCHAR (50) NOT NULL,\n office_type VARCHAR (50) NOT NULL\n );\"\"\",\n \"\"\"CREATE TABLE IF NOT EXISTS voters (\n id serial NOT NULL,\n createdOn TIMESTAMP NULL DEFAULT NOW(),\n createdBy INTEGER NOT NULL,\n office_id INTEGER,\n candidate_id INTEGER,\n PRIMARY KEY(createdBy, office_id),\n FOREIGN KEY (createdBy) REFERENCES users(user_id) ON DELETE CASCADE,\n FOREIGN KEY (office_id) REFERENCES office(office_id) ON DELETE CASCADE,\n FOREIGN KEY (candidate_id) REFERENCES users(user_id) ON DELETE CASCADE\n\n );\"\"\",\n \"\"\"CREATE TABLE IF NOT EXISTS parties(\n party_id SERIAL PRIMARY KEY NOT NULL,\n name VARCHAR(50) NOT NULL,\n hqaddress VARCHAR(50) NOT NULL,\n logourl VARCHAR(50) NOT NULL\n );\"\"\",\n \"\"\"CREATE TABLE IF NOT EXISTS candidates(\n id SERIAL NOT NULL,\n office_id INTEGER NOT NULL,\n party_id INTEGER NOT NULL,\n candidate_id INTEGER NOT NULL,\n PRIMARY KEY (office_id, party_id, candidate_id),\n FOREIGN KEY (candidate_id) REFERENCES users(user_id) ON DELETE CASCADE,\n FOREIGN KEY (party_id) REFERENCES parties(party_id) ON DELETE CASCADE,\n FOREIGN KEY (office_id) REFERENCES office(office_id) ON DELETE CASCADE\n );\"\"\"\n ]\n try:\n for query in queries:\n self.curr.execute(query)\n self.save()\n except Exception as e:\n print(e)\n return e", "title": "" }, { "docid": "a913a3d4b894abe53f83d33dd22bccc6", "score": "0.5607263", "text": "def create_db():\n Base.metadata.create_all(db.engine)\n print \"--- Create database\"", "title": "" }, { "docid": "f4261e408cb0e6bb72d6fa1c47bc3116", "score": "0.56048703", "text": "def set_up_database():\n database_file = \"meal_planner.db\"\n now = datetime.datetime.now()\n dt = datetime.date(now.year, now.month, now.day)\n week_number = dt.isocalendar()[1]\n\n with sqlite3.connect(database_file) as conn:\n # create the tables if they haven't been created yet\n recipe_table_name = \"recipes_\" + str(week_number)\n conn.execute('''CREATE TABLE IF NOT EXISTS ''' + recipe_table_name + ''' (recipe text, row int, column int)''')\n ingredients_table_name = \"ingredients_\" + str(week_number)\n conn.execute('''CREATE TABLE IF NOT EXISTS ''' + ingredients_table_name + ''' (ingredients text)''')\n conn.execute('''CREATE TABLE IF NOT EXISTS recipe (name text, time int, servings int, favorite text, ingredients text, directions text)''')", "title": "" }, { "docid": "50a32caa9ae631f9c9831350bc623cf0", "score": "0.5596605", "text": "def initialize_database(self):\n os.makedirs(self.data_dir, exist_ok=True)\n self._create_customer_credentials_table()\n self._create_customers_table()\n self._create_employee_credentials_table()\n self._create_employees_table()\n self._create_orders_table()\n self._create_products_table()\n self._create_reviews_table()", "title": "" }, { "docid": "6be9d455b230439a2e90c3dfe48b103e", "score": "0.55893594", "text": "def create_db():\n cursor = connection.cursor()\n create_table_1 = ''' CREATE TABLE interactions\n (id SERIAL PRIMARY KEY,\n usr VARCHAR(22),\n other_usr VARCHAR(22),\n interaction VARCHAR(22),\n time TIMESTAMP NOT NULL,\n topic_code VARCHAR(22)),\n twid VARCHAR (255); '''\n cursor.execute(create_table_1)\n create_table_2 = ''' CREATE TABLE topics\n (id SERIAL PRIMARY KEY,\n topic_code VARCHAR(22),\n hashtag VARCHAR(200)); '''\n cursor.execute(create_table_2)\n connection.commit()", "title": "" }, { "docid": "d970e768affe44a27bbce12c3925aee9", "score": "0.5584978", "text": "def create_db():\n scripts.create_db()", "title": "" }, { "docid": "c3dd1ee56564bec1b4d498f554734b16", "score": "0.5581307", "text": "def createUpdateDatabase(self):\n create_table = '''CREATE TABLE IF NOT EXISTS main (id INTEGER PRIMARY KEY, created TEXT, week_day TEXT, hour TEXT, tracking_time TEXT, tag_id INTEGER )'''\n\n conn = sqlite3.connect(self.mainDB)\n\n db = conn.cursor()\n\n db.execute(create_table)\n\n log.debug(\"Successfully created db. %s\" % self.mainDB)", "title": "" }, { "docid": "fdcc1168bb0ff43c37a4efe83f2a57c3", "score": "0.557767", "text": "def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()", "title": "" }, { "docid": "a0632c74545ad504faff1d8017523f2c", "score": "0.5576486", "text": "def createTables():\n from oonib.db import models\n for model_name in models.__all__:\n try:\n model = getattr(m, model_name)\n except Exception, e:\n log.err(\"Error in db initting\")\n log.err(e)\n try:\n log.debug(\"Creating %s\" % model)\n yield tables.runCreateTable(model, transactor, database)\n except Exception, e:\n log.debug(str(e))", "title": "" }, { "docid": "44ffdc2738ff091d12fc95364f4dfac4", "score": "0.55722904", "text": "def initializeDb(self):\n\n cursor = self.DB_CONN.cursor()\n cursor.execute(\"\"\"Drop Table IF EXISTS Models\"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE Models(\n modelid text,\n parentmodel text,\n pipelineName text,\n timestamp real,\n data_centroid array,\n trainingModel text,\n trainingData text,\n testData text,\n precision real,\n recall real,\n fscore real,\n type text,\n active integer,\n PRIMARY KEY(modelid),\n FOREIGN KEY(parentmodel) REFERENCES Models(trainingModel)\n )\n \"\"\")\n self.DB_CONN.commit()\n cursor.close()", "title": "" }, { "docid": "7c3d750a9baf090ac4e481417ad2a188", "score": "0.5569125", "text": "def setUp(self): \n self.db = Database('test_rss.db')\n self.db.create_tables()", "title": "" }, { "docid": "449c38e03a4c34dac547aa7745206410", "score": "0.55637896", "text": "def create_table(self):", "title": "" }, { "docid": "1f43b6882ff66daee89d523f97a6cc1c", "score": "0.556127", "text": "def init_db():\n import data_model\n Base.metadata.create_all(engine)", "title": "" }, { "docid": "f6baec782beb2e28eea300576fc76781", "score": "0.5547222", "text": "def create_database():\n \n # connect to default database\n conn = connection()\n cur = conn.cursor()\n \n # create sparkify database with UTF8 encoding\n cur.execute(\"DROP DATABASE IF EXISTS sparkifydb\")\n cur.execute(\"CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0\")\n\n # close connection to default database\n conn.close()", "title": "" }, { "docid": "c3e25d6fe977c287e7be494b740683da", "score": "0.5536924", "text": "def init_database() -> None:\n log.info('Creating database objects')\n Base.metadata.create_all(engine)", "title": "" }, { "docid": "bd6f1589d029712d9319259f15a0a424", "score": "0.5536153", "text": "def createtables(self):\n\n self.cursor.execute(Statement.CREATE_DOCUMENTS)\n self.cursor.execute(Statement.CREATE_OBJECTS)\n self.cursor.execute(Statement.CREATE_SECTIONS % \"sections\")\n self.cursor.execute(Statement.CREATE_SECTIONS_INDEX)", "title": "" }, { "docid": "997db9b7bef6c5eb5b90c0aae4c99a6e", "score": "0.55324125", "text": "def create_new_metadata_db():\n table_name = 'metadata'\n variables = cfg.read_variables()\n attr_names = get_dictionary_attribute_keys(variables)\n column_names_str = get_column_names_string(attr_names)\n conn = connect(db_name)\n c = conn.cursor()\n c.execute('DROP TABLE IF EXISTS ' + table_name)\n # Create the table\n sql = 'CREATE TABLE ' + table_name + ' ' + column_names_str\n try:\n c.execute(sql)\n\n # Fill the table\n fields_string = ('?,' * len(attr_names))[:-1]\n for name, values in variables.iteritems():\n sql = \"INSERT INTO \" + table_name + \" VALUES (\" + fields_string + \")\"\n sql_values = []\n for attr_name in attr_names:\n if attr_name == 'name':\n sql_values.append(name)\n elif attr_name in values['attribute']:\n sql_values.append(values['attribute'][attr_name])\n else:\n sql_values.append(None)\n c.execute(sql, sql_values)\n conn.commit()\n except sqlite3.OperationalError:\n pass", "title": "" }, { "docid": "e8e6b5ed30db111c0c36101ca8ccb933", "score": "0.5524021", "text": "def create_tables(self):\n self.clean_table()\n #self.create_product_table()\n #self.create_category_table()\n #self.create_store_table()\n #self.create_product_category_table()\n #self.create_product_store_table()\n self.create_favorite_table()", "title": "" }, { "docid": "6b2cdc786f908a4714988af46f80b62c", "score": "0.5516316", "text": "def __create_tables(self):\n self.Product = Product\n self.Category = Category\n self.Categorized = Categorized\n self.Favorite = Favorite\n myDB.connect()\n myDB.create_tables(\n [self.Product, self.Category, self.Categorized, self.Favorite], safe=True\n )", "title": "" }, { "docid": "1a8fb39fe7e8f170a23fe58b76fec623", "score": "0.5515452", "text": "def createTablesIfNeeded(self):\n self.sql.execute('''CREATE TABLE IF NOT EXISTS _file (\n id integer NOT NULL PRIMARY KEY,\n name text,\n dir text\n )''')\n self.sql.execute('''CREATE TABLE IF NOT EXISTS _comp (\n id integer NOT NULL PRIMARY KEY,\n fid integer NOT NULL REFERENCES _file(id),\n name text\n )''')\n self.sql.execute('''CREATE TABLE IF NOT EXISTS _lang (\n id integer NOT NULL PRIMARY KEY,\n name text\n )''')\n self.sql.execute('''CREATE TABLE IF NOT EXISTS _trans (\n fid integer NOT NULL REFERENCES _file(id),\n cid integer NOT NULL REFERENCES _comp(id),\n lid integer NOT NULL REFERENCES _lang(id),\n key text,\n value text\n )''')", "title": "" }, { "docid": "f8e418090675fba1c074f9854d21f612", "score": "0.5509168", "text": "def create_db_tables():\n database.create_tables(\n [Person, Message, Diaper, DiaperContent, DiaperContentType, Bottle, BottleType]\n )", "title": "" }, { "docid": "3288d44219cba741e210965df25694c8", "score": "0.54988915", "text": "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgresql://{}:{}@{}/{}\".format('postgres','e2806387','localhost:5432', self.database_name)\n setup_db(self.app, self.database_path) \n \n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "title": "" }, { "docid": "ce5b43cf95c1733d31369082f16c0dce", "score": "0.54976505", "text": "def create_tables(self):\r\n self._metadata.create_all(self.engine)", "title": "" }, { "docid": "bb2acb2c142f9558ed466b31b5054339", "score": "0.5494729", "text": "def create_db(self):\n\n # Connect to the database.\n with self._db_context.get_connection_provider(False) as connection:\n cursor = connection.cursor\n\n # Create tables.\n cursor.execute(\n 'CREATE TABLE audio_artist ('\n 'id INTEGER PRIMARY KEY, artist VARCHAR(1024))')\n cursor.execute(\n 'CREATE TABLE audio_album ('\n 'id INTEGER PRIMARY KEY,'\n 'id_artist INTEGER,'\n 'album VARCHAR(1024),'\n 'FOREIGN KEY(id_artist) REFERENCES audio_artist(id))')\n cursor.execute(\n 'CREATE TABLE audio_file ('\n 'id INTEGER PRIMARY KEY,'\n 'id_album INTEGER,'\n 'number INTEGER,'\n 'title VARCHAR(1024),'\n 'path VARCHAR(1024),'\n 'FOREIGN KEY(id_album) REFERENCES audio_album(id))')\n\n # Fill DB with initial data.\n self._inflate_db()", "title": "" }, { "docid": "d183af8e7c6be42733159aefbe6eb6f1", "score": "0.5485866", "text": "def databasesetup(sharepoint,exchange,onedrivefaculty,onedrivestudent):\r\n try:\r\n\r\n conn = sqlite3.connect(\"UH_Office365_Migration_Database.db\")\r\n cur = conn.cursor()\r\n sharepoint.to_sql('Sharepoint', conn, if_exists = 'replace')\r\n\r\n exchange.to_sql('Exchange', conn, if_exists = 'replace')\r\n onedrivefaculty.to_sql('Onedrive_Faculty_data', conn, if_exists = 'replace')\r\n onedrivestudent.to_sql('Onedrive_Student_data', conn, if_exists = 'replace')\r\n\r\n conn.commit()\r\n conn.close()\r\n tkinter.messagebox.showinfo('Success!', 'Database has been updated.')\r\n\r\n except:\r\n tkinter.messagebox.showinfo('Error!', 'Database not updated, check files.')", "title": "" }, { "docid": "8716d357459fd5fa3bb68055f499a4c1", "score": "0.5484055", "text": "def create_database(self):\n db_query = \"\"\"CREATE TABLE IF NOT EXISTS `{0}` (\n `ID` int(11) NOT NULL,\n `DATE` int(11) NOT NULL,\n `PRICE` float NOT NULL,\n `AMOUNT` float NOT NULL,\n PRIMARY KEY (`ID`)\n ) ENGINE=InnoDB DEFAULT CHARSET=latin1;\"\"\".format(self.table)\n self.cursor.execute(db_query)", "title": "" }, { "docid": "55de6831956f1c3cdefc53a944ab935a", "score": "0.54809237", "text": "def createDataBase():\n Base.metadata.create_all(engine)", "title": "" }, { "docid": "de3082c5fea37499e413bfc3dd330ec2", "score": "0.54734385", "text": "def create_tables(conn):\n conn.execute(\"create table if not exists file (file_id INTEGER PRIMARY KEY, url TEXT, file_functional_id TEXT, filename TEXT, local_path TEXT, data_node TEXT, checksum TEXT, checksum_type TEXT, duration INT, size INT, rate INT, start_date TEXT, end_date TEXT, crea_date TEXT, status TEXT, error_msg TEXT, sdget_status TEXT, sdget_error_msg TEXT, priority INT, tracking_id TEXT, model TEXT, project TEXT, variable TEXT, last_access_date TEXT, dataset_id INT, insertion_group_id INT, timestamp TEXT)\")\n conn.execute(\"create table if not exists dataset (dataset_id INTEGER PRIMARY KEY, dataset_functional_id TEXT, status TEXT, crea_date TEXT, path TEXT, path_without_version TEXT, version TEXT, local_path TEXT, last_mod_date TEXT, latest INT, latest_date TEXT, last_done_transfer_date TEXT, model TEXT, project TEXT, template TEXT, timestamp TEXT)\")\n\n conn.execute(\"create table if not exists export (dataset_id INTEGER, export_date TEXT)\")\n\n conn.execute(\"create table if not exists selection (selection_id INTEGER PRIMARY KEY, filename TEXT, checksum TEXT,status TEXT)\")\n conn.execute(\"create table if not exists selection__file (selection_id INT NOT NULL, file_id INT NOT NULL)\")\n\n conn.execute(\"create table if not exists file_without_selection (file_id INTEGER)\")\n conn.execute(\"create table if not exists file_without_dataset (file_id INTEGER)\")\n\n conn.execute(\"create table if not exists param (name TEXT, value TEXT)\")\n conn.execute(\"create table if not exists version (version TEXT)\")\n conn.execute(\"create table if not exists history (history_id INTEGER PRIMARY KEY, action TEXT, selection_filename TEXT, crea_date TEXT, insertion_group_id INT)\")\n\n conn.execute(\"create table if not exists event (event_id INTEGER PRIMARY KEY, name TEXT, status TEXT, project TEXT, model TEXT, dataset_pattern TEXT, variable TEXT, filename_pattern TEXT, crea_date TEXT, priority INT)\")\n\n conn.execute(\"create table if not exists generic_cache (realm TEXT, name TEXT, value TEXT)\")\n\n conn.commit()", "title": "" }, { "docid": "4af96a9c9bd8ceec1a889850c9444683", "score": "0.5472646", "text": "def setUp(self):\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n #self.database_path = \"postgres://{}:{}@{}/{}\".format('postgres', 'moknine2020','localhost:5432', self.database_name)\n self.database_path = os.environ['DATABASE_TEST_PATH']\n\t\tsetup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()", "title": "" }, { "docid": "5a8e33d3bcab01ccf978a1b0b30b1522", "score": "0.54703987", "text": "def sql_create(database):\n\n database.execute('''\n CREATE TABLE IF NOT EXISTS stadium_members (\n stadium TEXT,\n team TEXT,\n PRIMARY KEY (stadium, team),\n FOREIGN KEY (stadium) REFERENCES stadiums(stadium)\n FOREIGN KEY (team) REFERENCES teams(team)\n )\n ''')", "title": "" }, { "docid": "26dbbed7621e4b0323944ba7082e9de7", "score": "0.5465738", "text": "def create_tables(self):\n self.base.metadata.create_all(bind=self.engine)", "title": "" }, { "docid": "bc3818aa61dfe1947d77489b91ea2d31", "score": "0.5463257", "text": "def create_database(self):\n # Check connection\n self.check_connection()\n\n # Prepare csv databases for trades and quotes\n self.db.initialize_trade_csv()\n self.db.initialize_quote_csv()\n\n # Prepare sql database and tables\n self.db.initialize_sql_db()\n self.db.initialize_trade_sql()\n self.db.initialize_quote_sql()\n\n # Access data from BtfxWss and return a Queue object for the pair:\n self.trade_q = self.wss.trades(self.symbol)\n self.quote_q = self.wss.books(self.symbol)\n\n # Take a snapshot of the orderbook\n quote_snapshot = self.quote_q.get()\n\n # Input the snapshot to database\n self.db.create_order_book(quote_snapshot)\n self.db.create_quote_csv(quote_snapshot)\n self.db.create_quote_sql(quote_snapshot)\n\n logger.info('Databases created.')", "title": "" }, { "docid": "d199e6c7c014865dcfe750f394352fc8", "score": "0.5459833", "text": "def create_tables(self, schema=None):\n con = sqlite3.connect(self.db_path)\n #Database created from schema\n if schema is None:\n schema = DEFAULT_SCHEMA\n try:\n with io.open(schema, encoding=\"utf-8\") as f:\n sql = f.read()\n cur = con.cursor()\n cur.executescript(sql)\n finally:\n con.close()", "title": "" }, { "docid": "8fc498289f850438f549e9dada96249a", "score": "0.5459613", "text": "def init_db():\n Item.logger.info('Initializing database')\n db.create_all() # make our sqlalchemy tables", "title": "" }, { "docid": "f06bc505ff46b8a4c2d028a2c9ca2ddb", "score": "0.54537", "text": "def sql_create(database):\n\n database.execute('''\n CREATE TABLE IF NOT EXISTS city_elevations (\n city TEXT,\n state TEXT,\n elevation INTEGER,\n PRIMARY KEY (city, state)\n )\n ''')", "title": "" }, { "docid": "331ab6239bf74eabeee36a3aeb28f9ab", "score": "0.54494613", "text": "def create_database(dbname,engine):\n con = engine.connect()\n con.execute(\"COMMIT\") # need to close current transaction\n con.execute(\"CREATE DATABASE {}\".format(dbname))\n con.execute(\"COMMIT\") \n con.close()\n msg = \"Target database created: {}\".format(dbname)\n logging.info(msg)", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.5443992", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.5443992", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.5443992", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.5443992", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.5443992", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.5443992", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "e5d7e8d159193a0ae38bfda54ba225f3", "score": "0.5443992", "text": "def create_db():\n db.create_all()", "title": "" }, { "docid": "4707279fcaf737049c283fff41fb59f9", "score": "0.54349375", "text": "def init_database(self):\n self._create_table_document()", "title": "" }, { "docid": "b7388dff4a2f59350427abbf5cac70fe", "score": "0.5430094", "text": "def prepare_db():\n try:\n con = psycopg2.connect(dbname='postgres', user=USER, password=PASSWORD)\n except psycopg2.Error as e:\n raise e\n logging.info('Connected to database postgres')\n con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = con.cursor()\n try:\n cur.execute('CREATE DATABASE ' + DB_NAME)\n except psycopg2.Error as e:\n logging.info('DROP OLD DATABASE')\n logging.info('CREATE NEW DATABASE')\n cur.execute('DROP DATABASE ' + DB_NAME)\n cur.execute('CREATE DATABASE ' + DB_NAME)\n cur.close()\n con.close()\n\n con = psycopg2.connect(dbname=DB_NAME, user=USER, password=PASSWORD)\n cur = con.cursor()\n cur.execute('CREATE EXTENSION CUBE')\n # cur.execute('CREATE TABLE train (id serial, name text, url text, vector cube);')\n # cur.execute('CREATE TABLE dev (id serial, name text, url text, vector cube);')\n cur.execute('CREATE TABLE images (id serial, name text, url text, vector cube);')\n con.commit()\n cur.close()\n con.close()", "title": "" }, { "docid": "18e156b3af2a015598d9a73fe8d8be7e", "score": "0.5426933", "text": "def test_create(self):\n\n import sys\n from model.model import BaseModel\n from model import model\n import peewee\n #from model.model import database as db\n\n models=list()\n\n for cls in sys.modules[\"model.model\"].__dict__.values():\n try:\n if BaseModel in cls.__bases__:\n if type(cls) is peewee.ModelBase:\n #print(type(cls))\n models.append(cls)\n except:\n pass\n\n #model.database.initialize(MySQLDatabase('tuxlog_build', **{'host': '10.8.1.1', 'use_unicode': True, 'user': 'root', 'password': 'messwert', 'charset': 'utf8'}))\n model.database.initialize(SqliteDatabase(':memory:'))\n model.database.connect()\n\n for table in models:\n try:\n model.database.create_tables([table])\n except:\n print ( \"Exception => %s (%s)\" % (table, str(sys.exc_info())) )\n\n\n #print( model.database.get_tables() )", "title": "" }, { "docid": "af85116838e42035f8827d22c37d3dbe", "score": "0.5416249", "text": "def create_tables(self):\n try:\n # Create the sources table.\n self.execute(\n \"CREATE TABLE source(\"\n \"id INTEGER PRIMARY KEY NOT NULL, \"\n \"url TEXT UNIQUE NOT NULL)\")\n\n # Create the keywords table.\n self.execute(\n \"CREATE TABLE keyword( \"\n \"id INTEGER PRIMARY KEY, \"\n \"source_id INTEGER, \"\n \"name TEXT UNIQUE NOT NULL, \"\n \"FOREIGN KEY(source_id) REFERENCES source(id) ON DELETE \"\n \"CASCADE)\")\n\n # Create the articles table.\n self.execute(\n \"CREATE TABLE article(\"\n \"id INTEGER PRIMARY KEY NOT NULL, \"\n \"source_id INTEGER NOT NULL, \"\n \"url TEXT UNIQUE NOT NULL, \"\n \"title TEXT NOT NULL, \"\n \"date DATE NOT NULL, \"\n \"author TEXT NOT NULL, \"\n \"tags TEXT NOT NULL, \"\n \"FOREIGN KEY(source_id) REFERENCES source(id) ON DELETE \"\n \"CASCADE)\")\n\n # Create the watchlist table.\n self.execute(\n \"CREATE TABLE watch(\"\n \"id INTEGER PRIMARY KEY NOT NULL, \"\n \"url TEXT UNIQUE NOT NULL,\"\n \"domain TEXT UNIQUE NOT NULL)\")\n\n # Create the reference table.\n self.execute(\n \"CREATE TABLE ref(\"\n \"id INTEGER PRIMARY KEY, \"\n \"source_id INTEGER NOT NULL, \"\n \"child_id INTEGER NOT NULL, \"\n \"parent_id INTEGER, \"\n \"FOREIGN KEY(source_id) REFERENCES source(id) ON DELETE \"\n \"CASCADE, \"\n \"FOREIGN KEY(child_id) REFERENCES article(id) ON DELETE \"\n \"CASCADE, \"\n \"FOREIGN KEY(parent_id) REFERENCES article(id) ON DELETE \"\n \"CASCADE, \"\n \"UNIQUE(child_id, parent_id), \"\n \"CHECK (child_id != parent_id))\")\n\n # Create the duplicate detection trigger for references.\n self.execute(\n \"CREATE TRIGGER ref_dup_check BEFORE INSERT ON ref \"\n \"WHEN NEW.parent_id IS NULL BEGIN SELECT \"\n \"RAISE(ABORT, 'Duplicated article/source reference.') \"\n \"WHERE EXISTS (SELECT * FROM ref \"\n \"WHERE source_id=NEW.source_id \"\n \"AND child_id=NEW.child_id LIMIT 1); END;\")\n\n # Creating the auto delete trigger for deleting a reference with\n # the same source as an added link\n self.execute(\n \"CREATE TRIGGER ref_clean AFTER INSERT on ref \"\n \"WHEN NEW.parent_id IS NOT NULL BEGIN \"\n \"DELETE FROM ref WHERE \"\n \"source_id=NEW.source_id AND child_id=NEW.child_id \"\n \"AND parent_id IS NULL; END;\")\n\n # Create the trigger that prevents adding a reference from an\n # article to its publisher source.\n self.execute(\n \"CREATE TRIGGER ref_source_check BEFORE INSERT ON ref \"\n \"BEGIN \"\n \"SELECT RAISE(ABORT, \"\n \"'Reference source and article source are the same.') \"\n \"WHERE EXISTS (SELECT * FROM article WHERE \"\n \"id = NEW.child_id AND source_id = NEW.source_id); \"\n \"END;\")\n except sqlite3.OperationalError:\n # Tables already exist.\n pass", "title": "" }, { "docid": "146659e07af34d0845cb0cfb5dac44f9", "score": "0.5414825", "text": "def db_init(self):\n query = \"\"\"CREATE TABLE IF NOT EXISTS db_ schema ( \n id INT AUTO_INCREMENT PRIMARY KEY,\n name TEXT NOT NULL, \n subject TEXT NOT NULL, \n data LONGTEXT NOT NULL\n ) \"\"\"\n self.db_execute_query(query)\n\n for table in TABLES:\n query = \"CREATE TABLE IF NOT EXISTS \" + table + \" LIKE db_schema\"\n self.db_execute_query(query)", "title": "" }, { "docid": "61b7c4643cc50fa2d6d1f79a2e94a7ba", "score": "0.54132116", "text": "def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):\n try:\n myvnfd = vnfd_catalog.vnfd()\n try:\n pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True,\n skip_unknown=True)\n except Exception as e:\n raise NfvoException(\"Error. Invalid VNF descriptor format \" + str(e), httperrors.Bad_Request)\n db_vnfs = []\n db_nets = []\n db_vms = []\n db_vms_index = 0\n db_interfaces = []\n db_images = []\n db_flavors = []\n db_ip_profiles_index = 0\n db_ip_profiles = []\n uuid_list = []\n vnfd_uuid_list = []\n vnfd_catalog_descriptor = vnf_descriptor.get(\"vnfd:vnfd-catalog\")\n if not vnfd_catalog_descriptor:\n vnfd_catalog_descriptor = vnf_descriptor.get(\"vnfd-catalog\")\n vnfd_descriptor_list = vnfd_catalog_descriptor.get(\"vnfd\")\n if not vnfd_descriptor_list:\n vnfd_descriptor_list = vnfd_catalog_descriptor.get(\"vnfd:vnfd\")\n for vnfd_yang in myvnfd.vnfd_catalog.vnfd.values():\n vnfd = vnfd_yang.get()\n\n # table vnf\n vnf_uuid = str(uuid4())\n uuid_list.append(vnf_uuid)\n vnfd_uuid_list.append(vnf_uuid)\n vnfd_id = get_str(vnfd, \"id\", 255)\n db_vnf = {\n \"uuid\": vnf_uuid,\n \"osm_id\": vnfd_id,\n \"name\": get_str(vnfd, \"name\", 255),\n \"description\": get_str(vnfd, \"description\", 255),\n \"tenant_id\": tenant_id,\n \"vendor\": get_str(vnfd, \"vendor\", 255),\n \"short_name\": get_str(vnfd, \"short-name\", 255),\n \"descriptor\": str(vnf_descriptor)[:60000]\n }\n\n for vnfd_descriptor in vnfd_descriptor_list:\n if vnfd_descriptor[\"id\"] == str(vnfd[\"id\"]):\n break\n\n # table ip_profiles (ip-profiles)\n ip_profile_name2db_table_index = {}\n for ip_profile in vnfd.get(\"ip-profiles\").values():\n db_ip_profile = {\n \"ip_version\": str(ip_profile[\"ip-profile-params\"].get(\"ip-version\", \"ipv4\")),\n \"subnet_address\": str(ip_profile[\"ip-profile-params\"].get(\"subnet-address\")),\n \"gateway_address\": str(ip_profile[\"ip-profile-params\"].get(\"gateway-address\")),\n \"dhcp_enabled\": str(ip_profile[\"ip-profile-params\"][\"dhcp-params\"].get(\"enabled\", True)),\n \"dhcp_start_address\": str(ip_profile[\"ip-profile-params\"][\"dhcp-params\"].get(\"start-address\")),\n \"dhcp_count\": str(ip_profile[\"ip-profile-params\"][\"dhcp-params\"].get(\"count\")),\n }\n dns_list = []\n for dns in ip_profile[\"ip-profile-params\"][\"dns-server\"].values():\n dns_list.append(str(dns.get(\"address\")))\n db_ip_profile[\"dns_address\"] = \";\".join(dns_list)\n if ip_profile[\"ip-profile-params\"].get('security-group'):\n db_ip_profile[\"security_group\"] = ip_profile[\"ip-profile-params\"]['security-group']\n ip_profile_name2db_table_index[str(ip_profile[\"name\"])] = db_ip_profiles_index\n db_ip_profiles_index += 1\n db_ip_profiles.append(db_ip_profile)\n\n # table nets (internal-vld)\n net_id2uuid = {} # for mapping interface with network\n net_id2index = {} # for mapping interface with network\n for vld in vnfd.get(\"internal-vld\").values():\n net_uuid = str(uuid4())\n uuid_list.append(net_uuid)\n db_net = {\n \"name\": get_str(vld, \"name\", 255),\n \"vnf_id\": vnf_uuid,\n \"uuid\": net_uuid,\n \"description\": get_str(vld, \"description\", 255),\n \"osm_id\": get_str(vld, \"id\", 255),\n \"type\": \"bridge\", # TODO adjust depending on connection point type\n }\n net_id2uuid[vld.get(\"id\")] = net_uuid\n net_id2index[vld.get(\"id\")] = len(db_nets)\n db_nets.append(db_net)\n # ip-profile, link db_ip_profile with db_sce_net\n if vld.get(\"ip-profile-ref\"):\n ip_profile_name = vld.get(\"ip-profile-ref\")\n if ip_profile_name not in ip_profile_name2db_table_index:\n raise NfvoException(\"Error. Invalid VNF descriptor at 'vnfd[{}]':'vld[{}]':'ip-profile-ref':\"\n \"'{}'. Reference to a non-existing 'ip_profiles'\".format(\n str(vnfd[\"id\"]), str(vld[\"id\"]), str(vld[\"ip-profile-ref\"])),\n httperrors.Bad_Request)\n db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]][\"net_id\"] = net_uuid\n else: #check no ip-address has been defined\n for icp in vld.get(\"internal-connection-point\").values():\n if icp.get(\"ip-address\"):\n raise NfvoException(\"Error at 'vnfd[{}]':'vld[{}]':'internal-connection-point[{}]' \"\n \"contains an ip-address but no ip-profile has been defined at VLD\".format(\n str(vnfd[\"id\"]), str(vld[\"id\"]), str(icp[\"id\"])),\n httperrors.Bad_Request)\n\n # connection points vaiable declaration\n cp_name2iface_uuid = {}\n cp_name2vdu_id = {}\n cp_name2vm_uuid = {}\n cp_name2db_interface = {}\n vdu_id2cp_name = {} # stored only when one external connection point is presented at this VDU\n\n # table vms (vdus)\n vdu_id2uuid = {}\n vdu_id2db_table_index = {}\n mgmt_access = {}\n for vdu in vnfd.get(\"vdu\").values():\n\n for vdu_descriptor in vnfd_descriptor[\"vdu\"]:\n if vdu_descriptor[\"id\"] == str(vdu[\"id\"]):\n break\n vm_uuid = str(uuid4())\n uuid_list.append(vm_uuid)\n vdu_id = get_str(vdu, \"id\", 255)\n db_vm = {\n \"uuid\": vm_uuid,\n \"osm_id\": vdu_id,\n \"name\": get_str(vdu, \"name\", 255),\n \"description\": get_str(vdu, \"description\", 255),\n \"pdu_type\": get_str(vdu, \"pdu-type\", 255),\n \"vnf_id\": vnf_uuid,\n }\n vdu_id2uuid[db_vm[\"osm_id\"]] = vm_uuid\n vdu_id2db_table_index[db_vm[\"osm_id\"]] = db_vms_index\n if vdu.get(\"count\"):\n db_vm[\"count\"] = int(vdu[\"count\"])\n\n # table image\n image_present = False\n if vdu.get(\"image\"):\n image_present = True\n db_image = {}\n image_uuid = _lookfor_or_create_image(db_image, mydb, vdu)\n if not image_uuid:\n image_uuid = db_image[\"uuid\"]\n db_images.append(db_image)\n db_vm[\"image_id\"] = image_uuid\n if vdu.get(\"alternative-images\"):\n vm_alternative_images = []\n for alt_image in vdu.get(\"alternative-images\").values():\n db_image = {}\n image_uuid = _lookfor_or_create_image(db_image, mydb, alt_image)\n if not image_uuid:\n image_uuid = db_image[\"uuid\"]\n db_images.append(db_image)\n vm_alternative_images.append({\n \"image_id\": image_uuid,\n \"vim_type\": str(alt_image[\"vim-type\"]),\n # \"universal_name\": str(alt_image[\"image\"]),\n # \"checksum\": str(alt_image[\"image-checksum\"]) if alt_image.get(\"image-checksum\") else None\n })\n\n db_vm[\"image_list\"] = yaml.safe_dump(vm_alternative_images, default_flow_style=True, width=256)\n\n # volumes\n devices = []\n if vdu.get(\"volumes\"):\n for volume_key in vdu[\"volumes\"]:\n volume = vdu[\"volumes\"][volume_key]\n if not image_present:\n # Convert the first volume to vnfc.image\n image_present = True\n db_image = {}\n image_uuid = _lookfor_or_create_image(db_image, mydb, volume)\n if not image_uuid:\n image_uuid = db_image[\"uuid\"]\n db_images.append(db_image)\n db_vm[\"image_id\"] = image_uuid\n else:\n # Add Openmano devices\n device = {\"name\": str(volume.get(\"name\"))}\n device[\"type\"] = str(volume.get(\"device-type\"))\n if volume.get(\"size\"):\n device[\"size\"] = int(volume[\"size\"])\n if volume.get(\"image\"):\n device[\"image name\"] = str(volume[\"image\"])\n if volume.get(\"image-checksum\"):\n device[\"image checksum\"] = str(volume[\"image-checksum\"])\n\n devices.append(device)\n\n if not db_vm.get(\"image_id\"):\n if not db_vm[\"pdu_type\"]:\n raise NfvoException(\"Not defined image for VDU\")\n # create a fake image\n\n # cloud-init\n boot_data = {}\n if vdu.get(\"cloud-init\"):\n boot_data[\"user-data\"] = str(vdu[\"cloud-init\"])\n elif vdu.get(\"cloud-init-file\"):\n # TODO Where this file content is present???\n # boot_data[\"user-data\"] = vnfd_yang.files[vdu[\"cloud-init-file\"]]\n boot_data[\"user-data\"] = str(vdu[\"cloud-init-file\"])\n\n if vdu.get(\"supplemental-boot-data\"):\n if vdu[\"supplemental-boot-data\"].get('boot-data-drive'):\n boot_data['boot-data-drive'] = True\n if vdu[\"supplemental-boot-data\"].get('config-file'):\n om_cfgfile_list = list()\n for custom_config_file in vdu[\"supplemental-boot-data\"]['config-file'].values():\n # TODO Where this file content is present???\n cfg_source = str(custom_config_file[\"source\"])\n om_cfgfile_list.append({\"dest\": custom_config_file[\"dest\"],\n \"content\": cfg_source})\n boot_data['config-files'] = om_cfgfile_list\n if boot_data:\n db_vm[\"boot_data\"] = yaml.safe_dump(boot_data, default_flow_style=True, width=256)\n\n db_vms.append(db_vm)\n db_vms_index += 1\n\n # table interfaces (internal/external interfaces)\n flavor_epa_interfaces = []\n # for iface in chain(vdu.get(\"internal-interface\").values(), vdu.get(\"external-interface\").values()):\n for iface in vdu.get(\"interface\").values():\n flavor_epa_interface = {}\n iface_uuid = str(uuid4())\n uuid_list.append(iface_uuid)\n db_interface = {\n \"uuid\": iface_uuid,\n \"internal_name\": get_str(iface, \"name\", 255),\n \"vm_id\": vm_uuid,\n }\n flavor_epa_interface[\"name\"] = db_interface[\"internal_name\"]\n if iface.get(\"virtual-interface\").get(\"vpci\"):\n db_interface[\"vpci\"] = get_str(iface.get(\"virtual-interface\"), \"vpci\", 12)\n flavor_epa_interface[\"vpci\"] = db_interface[\"vpci\"]\n\n if iface.get(\"virtual-interface\").get(\"bandwidth\"):\n bps = int(iface.get(\"virtual-interface\").get(\"bandwidth\"))\n db_interface[\"bw\"] = int(math.ceil(bps / 1000000.0))\n flavor_epa_interface[\"bandwidth\"] = \"{} Mbps\".format(db_interface[\"bw\"])\n\n if iface.get(\"virtual-interface\").get(\"type\") == \"OM-MGMT\":\n db_interface[\"type\"] = \"mgmt\"\n elif iface.get(\"virtual-interface\").get(\"type\") in (\"VIRTIO\", \"E1000\", \"PARAVIRT\"):\n db_interface[\"type\"] = \"bridge\"\n db_interface[\"model\"] = get_str(iface.get(\"virtual-interface\"), \"type\", 12)\n elif iface.get(\"virtual-interface\").get(\"type\") in (\"SR-IOV\", \"PCI-PASSTHROUGH\"):\n db_interface[\"type\"] = \"data\"\n db_interface[\"model\"] = get_str(iface.get(\"virtual-interface\"), \"type\", 12)\n flavor_epa_interface[\"dedicated\"] = \"no\" if iface[\"virtual-interface\"][\"type\"] == \"SR-IOV\" \\\n else \"yes\"\n flavor_epa_interfaces.append(flavor_epa_interface)\n else:\n raise NfvoException(\"Error. Invalid VNF descriptor at 'vnfd[{}]':'vdu[{}]':'interface':'virtual\"\n \"-interface':'type':'{}'. Interface type is not supported\".format(\n vnfd_id, vdu_id, iface.get(\"virtual-interface\").get(\"type\")),\n httperrors.Bad_Request)\n\n if iface.get(\"mgmt-interface\"):\n db_interface[\"type\"] = \"mgmt\"\n\n if iface.get(\"external-connection-point-ref\"):\n try:\n cp = vnfd.get(\"connection-point\")[iface.get(\"external-connection-point-ref\")]\n db_interface[\"external_name\"] = get_str(cp, \"name\", 255)\n cp_name2iface_uuid[db_interface[\"external_name\"]] = iface_uuid\n cp_name2vdu_id[db_interface[\"external_name\"]] = vdu_id\n cp_name2vm_uuid[db_interface[\"external_name\"]] = vm_uuid\n cp_name2db_interface[db_interface[\"external_name\"]] = db_interface\n for cp_descriptor in vnfd_descriptor[\"connection-point\"]:\n if cp_descriptor[\"name\"] == db_interface[\"external_name\"]:\n break\n else:\n raise KeyError()\n\n if vdu_id in vdu_id2cp_name:\n vdu_id2cp_name[vdu_id] = None # more than two connection point for this VDU\n else:\n vdu_id2cp_name[vdu_id] = db_interface[\"external_name\"]\n\n # port security\n if str(cp_descriptor.get(\"port-security-enabled\")).lower() == \"false\":\n db_interface[\"port_security\"] = 0\n elif str(cp_descriptor.get(\"port-security-enabled\")).lower() == \"true\":\n db_interface[\"port_security\"] = 1\n except KeyError:\n raise NfvoException(\"Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':\"\n \"'interface[{iface}]':'vnfd-connection-point-ref':'{cp}' is not present\"\n \" at connection-point\".format(\n vnf=vnfd_id, vdu=vdu_id, iface=iface[\"name\"],\n cp=iface.get(\"vnfd-connection-point-ref\")),\n httperrors.Bad_Request)\n elif iface.get(\"internal-connection-point-ref\"):\n try:\n for icp_descriptor in vdu_descriptor[\"internal-connection-point\"]:\n if icp_descriptor[\"id\"] == str(iface.get(\"internal-connection-point-ref\")):\n break\n else:\n raise KeyError(\"does not exist at vdu:internal-connection-point\")\n icp = None\n icp_vld = None\n for vld in vnfd.get(\"internal-vld\").values():\n for cp in vld.get(\"internal-connection-point\").values():\n if cp.get(\"id-ref\") == iface.get(\"internal-connection-point-ref\"):\n if icp:\n raise KeyError(\"is referenced by more than one 'internal-vld'\")\n icp = cp\n icp_vld = vld\n if not icp:\n raise KeyError(\"is not referenced by any 'internal-vld'\")\n\n # set network type as data\n if iface.get(\"virtual-interface\") and iface[\"virtual-interface\"].get(\"type\") in \\\n (\"SR-IOV\", \"PCI-PASSTHROUGH\"):\n db_nets[net_id2index[icp_vld.get(\"id\")]][\"type\"] = \"data\"\n db_interface[\"net_id\"] = net_id2uuid[icp_vld.get(\"id\")]\n if str(icp_descriptor.get(\"port-security-enabled\")).lower() == \"false\":\n db_interface[\"port_security\"] = 0\n elif str(icp_descriptor.get(\"port-security-enabled\")).lower() == \"true\":\n db_interface[\"port_security\"] = 1\n if icp.get(\"ip-address\"):\n if not icp_vld.get(\"ip-profile-ref\"):\n raise NfvoException\n db_interface[\"ip_address\"] = str(icp.get(\"ip-address\"))\n except KeyError as e:\n raise NfvoException(\"Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':\"\n \"'interface[{iface}]':'internal-connection-point-ref':'{cp}'\"\n \" {msg}\".format(\n vnf=vnfd_id, vdu=vdu_id, iface=iface[\"name\"],\n cp=iface.get(\"internal-connection-point-ref\"), msg=str(e)),\n httperrors.Bad_Request)\n if iface.get(\"position\"):\n db_interface[\"created_at\"] = int(iface.get(\"position\")) * 50\n if iface.get(\"mac-address\"):\n db_interface[\"mac\"] = str(iface.get(\"mac-address\"))\n db_interfaces.append(db_interface)\n\n # table flavors\n db_flavor = {\n \"name\": get_str(vdu, \"name\", 250) + \"-flv\",\n \"vcpus\": int(vdu[\"vm-flavor\"].get(\"vcpu-count\", 1)),\n \"ram\": int(vdu[\"vm-flavor\"].get(\"memory-mb\", 1)),\n \"disk\": int(vdu[\"vm-flavor\"].get(\"storage-gb\", 0)),\n }\n # TODO revise the case of several numa-node-policy node\n extended = {}\n numa = {}\n if devices:\n extended[\"devices\"] = devices\n if flavor_epa_interfaces:\n numa[\"interfaces\"] = flavor_epa_interfaces\n if vdu.get(\"guest-epa\"): # TODO or dedicated_int:\n epa_vcpu_set = False\n if vdu[\"guest-epa\"].get(\"numa-node-policy\"): # TODO or dedicated_int:\n numa_node_policy = vdu[\"guest-epa\"].get(\"numa-node-policy\")\n if numa_node_policy.get(\"node\"):\n numa_node = next(iter(numa_node_policy[\"node\"].values()))\n if numa_node.get(\"num-cores\"):\n numa[\"cores\"] = numa_node[\"num-cores\"]\n epa_vcpu_set = True\n if numa_node.get(\"paired-threads\"):\n if numa_node[\"paired-threads\"].get(\"num-paired-threads\"):\n numa[\"paired-threads\"] = int(numa_node[\"paired-threads\"][\"num-paired-threads\"])\n epa_vcpu_set = True\n if len(numa_node[\"paired-threads\"].get(\"paired-thread-ids\")):\n numa[\"paired-threads-id\"] = []\n for pair in numa_node[\"paired-threads\"][\"paired-thread-ids\"].values():\n numa[\"paired-threads-id\"].append(\n (str(pair[\"thread-a\"]), str(pair[\"thread-b\"]))\n )\n if numa_node.get(\"num-threads\"):\n numa[\"threads\"] = int(numa_node[\"num-threads\"])\n epa_vcpu_set = True\n if numa_node.get(\"memory-mb\"):\n numa[\"memory\"] = max(int(numa_node[\"memory-mb\"] / 1024), 1)\n if vdu[\"guest-epa\"].get(\"mempage-size\"):\n if vdu[\"guest-epa\"][\"mempage-size\"] != \"SMALL\":\n numa[\"memory\"] = max(int(db_flavor[\"ram\"] / 1024), 1)\n if vdu[\"guest-epa\"].get(\"cpu-pinning-policy\") and not epa_vcpu_set:\n if vdu[\"guest-epa\"][\"cpu-pinning-policy\"] == \"DEDICATED\":\n if vdu[\"guest-epa\"].get(\"cpu-thread-pinning-policy\") and \\\n vdu[\"guest-epa\"][\"cpu-thread-pinning-policy\"] != \"PREFER\":\n numa[\"cores\"] = max(db_flavor[\"vcpus\"], 1)\n else:\n numa[\"threads\"] = max(db_flavor[\"vcpus\"], 1)\n epa_vcpu_set = True\n if vdu[\"guest-epa\"].get(\"cpu-quota\") and not epa_vcpu_set:\n cpuquota = get_resource_allocation_params(vdu[\"guest-epa\"].get(\"cpu-quota\"))\n if cpuquota:\n extended[\"cpu-quota\"] = cpuquota\n if vdu[\"guest-epa\"].get(\"mem-quota\"):\n vduquota = get_resource_allocation_params(vdu[\"guest-epa\"].get(\"mem-quota\"))\n if vduquota:\n extended[\"mem-quota\"] = vduquota\n if vdu[\"guest-epa\"].get(\"disk-io-quota\"):\n diskioquota = get_resource_allocation_params(vdu[\"guest-epa\"].get(\"disk-io-quota\"))\n if diskioquota:\n extended[\"disk-io-quota\"] = diskioquota\n if vdu[\"guest-epa\"].get(\"vif-quota\"):\n vifquota = get_resource_allocation_params(vdu[\"guest-epa\"].get(\"vif-quota\"))\n if vifquota:\n extended[\"vif-quota\"] = vifquota\n if numa:\n extended[\"numas\"] = [numa]\n if extended:\n extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256)\n db_flavor[\"extended\"] = extended_text\n # look if flavor exist\n temp_flavor_dict = {'disk': db_flavor.get('disk', 0),\n 'ram': db_flavor.get('ram'),\n 'vcpus': db_flavor.get('vcpus'),\n 'extended': db_flavor.get('extended')\n }\n existing_flavors = mydb.get_rows(FROM=\"flavors\", WHERE=temp_flavor_dict)\n if existing_flavors:\n flavor_uuid = existing_flavors[0][\"uuid\"]\n else:\n flavor_uuid = str(uuid4())\n uuid_list.append(flavor_uuid)\n db_flavor[\"uuid\"] = flavor_uuid\n db_flavors.append(db_flavor)\n db_vm[\"flavor_id\"] = flavor_uuid\n\n # VNF affinity and antiaffinity\n for pg in vnfd.get(\"placement-groups\").values():\n pg_name = get_str(pg, \"name\", 255)\n for vdu in pg.get(\"member-vdus\").values():\n vdu_id = get_str(vdu, \"member-vdu-ref\", 255)\n if vdu_id not in vdu_id2db_table_index:\n raise NfvoException(\"Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'placement-groups[{pg}]':\"\n \"'member-vdus':'{vdu}'. Reference to a non-existing vdu\".format(\n vnf=vnfd_id, pg=pg_name, vdu=vdu_id),\n httperrors.Bad_Request)\n db_vms[vdu_id2db_table_index[vdu_id]][\"availability_zone\"] = pg_name\n # TODO consider the case of isolation and not colocation\n # if pg.get(\"strategy\") == \"ISOLATION\":\n\n # VNF mgmt configuration\n if vnfd[\"mgmt-interface\"].get(\"vdu-id\"):\n mgmt_vdu_id = get_str(vnfd[\"mgmt-interface\"], \"vdu-id\", 255)\n if mgmt_vdu_id not in vdu_id2uuid:\n raise NfvoException(\"Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'vdu-id':\"\n \"'{vdu}'. Reference to a non-existing vdu\".format(\n vnf=vnfd_id, vdu=mgmt_vdu_id),\n httperrors.Bad_Request)\n mgmt_access[\"vm_id\"] = vdu_id2uuid[mgmt_vdu_id]\n mgmt_access[\"vdu-id\"] = mgmt_vdu_id\n # if only one cp is defined by this VDU, mark this interface as of type \"mgmt\"\n if vdu_id2cp_name.get(mgmt_vdu_id):\n if cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]:\n cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]][\"type\"] = \"mgmt\"\n\n if vnfd[\"mgmt-interface\"].get(\"ip-address\"):\n mgmt_access[\"ip-address\"] = str(vnfd[\"mgmt-interface\"].get(\"ip-address\"))\n if vnfd[\"mgmt-interface\"].get(\"cp\") and vnfd.get(\"vdu\"):\n if vnfd[\"mgmt-interface\"][\"cp\"] not in cp_name2iface_uuid:\n raise NfvoException(\"Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp'['{cp}']. \"\n \"Reference to a non-existing connection-point\".format(\n vnf=vnfd_id, cp=vnfd[\"mgmt-interface\"][\"cp\"]),\n httperrors.Bad_Request)\n mgmt_access[\"vm_id\"] = cp_name2vm_uuid[vnfd[\"mgmt-interface\"][\"cp\"]]\n mgmt_access[\"interface_id\"] = cp_name2iface_uuid[vnfd[\"mgmt-interface\"][\"cp\"]]\n mgmt_access[\"vdu-id\"] = cp_name2vdu_id[vnfd[\"mgmt-interface\"][\"cp\"]]\n # mark this interface as of type mgmt\n if cp_name2db_interface[vnfd[\"mgmt-interface\"][\"cp\"]]:\n cp_name2db_interface[vnfd[\"mgmt-interface\"][\"cp\"]][\"type\"] = \"mgmt\"\n\n default_user = get_str(vnfd.get(\"vnf-configuration\", {}).get(\"config-access\", {}).get(\"ssh-access\", {}),\n \"default-user\", 64)\n if default_user:\n mgmt_access[\"default_user\"] = default_user\n\n required = get_str(vnfd.get(\"vnf-configuration\", {}).get(\"config-access\", {}).get(\"ssh-access\", {}),\n \"required\", 6)\n if required:\n mgmt_access[\"required\"] = required\n\n password_ = get_str(vnfd.get(\"vnf-configuration\", {}).get(\"config-access\", {}),\n \"password\", 64)\n if password_:\n mgmt_access[\"password\"] = password_\n\n if mgmt_access:\n db_vnf[\"mgmt_access\"] = yaml.safe_dump(mgmt_access, default_flow_style=True, width=256)\n\n db_vnfs.append(db_vnf)\n db_tables=[\n {\"vnfs\": db_vnfs},\n {\"nets\": db_nets},\n {\"images\": db_images},\n {\"flavors\": db_flavors},\n {\"ip_profiles\": db_ip_profiles},\n {\"vms\": db_vms},\n {\"interfaces\": db_interfaces},\n ]\n\n logger.debug(\"create_vnf Deployment done vnfDict: %s\",\n yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )\n mydb.new_rows(db_tables, uuid_list)\n return vnfd_uuid_list\n except NfvoException:\n raise\n except Exception as e:\n logger.error(\"Exception {}\".format(e))\n raise # NfvoException(\"Exception {}\".format(e), httperrors.Bad_Request)", "title": "" }, { "docid": "44b92c6ff56ef4a3be65ee46c9239a2d", "score": "0.54074097", "text": "def setup_non_temp_tables(self):\n from maintcal.model import metadata, engine\n from maintcal.model import db_sess\n\n print \"dropping schema\"\n db_sess.execute('DROP SCHEMA IF EXISTS maintcal_test CASCADE');\n print \"creating schema\"\n db_sess.execute('CREATE SCHEMA maintcal_test');\n db_sess.commit()\n\n print \"creating trigger functions\"\n print \"NOTE: make sure to run 'CREATE LANGUAGE plpgsql' as db user postgres if the mcal_test database is recreated.\"\n db_sess.execute('CREATE OR REPLACE FUNCTION creation_date() RETURNS \"trigger\" as $$ DECLARE BEGIN NEW.creation_date := now(); RETURN NEW; END; $$ language plpgsql')\n db_sess.execute('CREATE OR REPLACE FUNCTION modification_date() RETURNS \"trigger\" as $$ DECLARE BEGIN NEW.modification_date := now(); RETURN NEW; END; $$ language plpgsql')\n\n print \"creating tables\"\n metadata.create_all(bind=engine)\n\n print \"Attaching triggers to creation_date and modification_date columns\"\n\n db_sess.execute('CREATE TRIGGER available_defaults_creation_date BEFORE INSERT ON maintcal_test.available_defaults FOR EACH ROW EXECUTE PROCEDURE creation_date();')\n db_sess.execute('CREATE TRIGGER available_defaults_modification_date BEFORE INSERT OR UPDATE ON maintcal_test.available_defaults FOR EACH ROW EXECUTE PROCEDURE modification_date();')\n\n db_sess.execute('CREATE TRIGGER available_exceptions_creation_date BEFORE INSERT ON maintcal_test.available_exceptions FOR EACH ROW EXECUTE PROCEDURE creation_date();')\n db_sess.execute('CREATE TRIGGER available_exceptions_modification_date BEFORE INSERT OR UPDATE ON maintcal_test.available_exceptions FOR EACH ROW EXECUTE PROCEDURE modification_date();')\n\n db_sess.commit()\n print \"all tables and triggers created with no data.\"\n self._create_indexes(db_sess)\n print \"all indexes installed.\"", "title": "" }, { "docid": "1e1f50166f49c472a4087553cb75b086", "score": "0.540539", "text": "def create():\n engine = create_engine('sqlite:///briochefood.db', echo=True)\n Base.metadata.create_all(engine)", "title": "" }, { "docid": "9e81f2b25d757e2eeef9189024ea968c", "score": "0.54032344", "text": "def create_db():\n db.create_all()\n db.session.commit()", "title": "" }, { "docid": "857b11ec72d1b24fae723cb49ff171d1", "score": "0.5402023", "text": "def create_all_tables(self, models, db):\n for model in models:\n self.create_table(model, db)", "title": "" }, { "docid": "9bc2b938ac78d22bc0eaecdaca5bea45", "score": "0.5395596", "text": "def create_tables():\n commands = (\n \"\"\"\n CREATE TABLE vendors (\n vendor_id SERIAL PRIMARY KEY,\n vendor_name VARCHAR(255) NOT NULL\n )\n \"\"\",\n \"\"\" CREATE TABLE parts (\n part_id SERIAL PRIMARY KEY,\n part_name VARCHAR(255) NOT NULL\n )\n \"\"\",\n \"\"\"\n CREATE TABLE part_drawings (\n part_id INTEGER PRIMARY KEY,\n file_extension VARCHAR(5) NOT NULL,\n drawing_data BYTEA NOT NULL,\n FOREIGN KEY (part_id)\n REFERENCES parts (part_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n )\n \"\"\",\n \"\"\"\n CREATE TABLE vendor_parts (\n vendor_id INTEGER NOT NULL,\n part_id INTEGER NOT NULL,\n PRIMARY KEY (vendor_id , part_id),\n FOREIGN KEY (vendor_id)\n REFERENCES vendors (vendor_id)\n ON UPDATE CASCADE ON DELETE CASCADE,\n FOREIGN KEY (part_id)\n REFERENCES parts (part_id)\n ON UPDATE CASCADE ON DELETE CASCADE\n )\n \"\"\")\n conn = None\n try:\n # read the connection parameters\n # connect to the PostgreSQL server\n creds = json.loads(get_secret())\n conn = psycopg2.connect(\n host=creds['host'],\n database=\"postgres\",\n user=creds['username'],\n password=creds['password']\n )\n cur = conn.cursor()\n # create table one by one\n for command in commands:\n cur.execute(command)\n # close communication with the PostgreSQL database server\n cur.close()\n # commit the changes\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()", "title": "" }, { "docid": "8f985b45eb886c23d862a9717a3d44e0", "score": "0.5392607", "text": "def _create_tables(self):\n for template in self.templates:\n sql = template.get_create_table_sql()\n self.c.execute(sql)", "title": "" }, { "docid": "004f9258da15172ae0b8c1b4660a5b95", "score": "0.5392153", "text": "def create_databases(filespec: str) -> None:\n print(\"Creating databases\")\n\n with sqlite3.connect(filespec) as db:\n db_trial_def = \"CREATE TABLE trial(\\n{}\\n)\"\n\n db_imp_def = \"CREATE TABLE imp(\\n\" \\\n \"eudract_id TEXT NOT NULL,\" \\\n \"\\n{}\\n\" \\\n \")\"\n\n db_sponsor_def = \"CREATE TABLE sponsor(\\n\" \\\n \"eudract_id TEXT NOT NULL,\" \\\n \"\\n{}\\n\" \\\n \")\"\n\n db_location_def = \"CREATE TABLE location(\\n\" \\\n \"eudract_id TEXT NOT NULL,\\n\" \\\n \"location TEXT NOT NULL\\n\" \\\n \")\"\n\n db_location_index = \"CREATE INDEX idx_location on location (eudract_id)\"\n db_sponsor_index = \"CREATE INDEX idx_imp on imp (eudract_id)\"\n db_imp_index = \"CREATE INDEX idx_sponsor on sponsor (eudract_id)\"\n\n db.execute(db_trial_def\n .format(\", \\n\".join([\"{} {}\".format(x, trial[x].field_type) for x in sorted(trial)])))\n db.execute(db_imp_def\n .format(\", \\n\".join([\"{} {}\".format(x, imp[x].field_type) for x in sorted(imp)])))\n db.execute(db_sponsor_def\n .format(\", \\n\".join([\"{} {}\" .format(x, sponsor[x].field_type) for x in sorted(sponsor)])))\n db.execute(db_location_def)\n db.execute(db_location_index)\n db.execute(db_sponsor_index)\n db.execute(db_imp_index)\n print(\"databases created!\")\n db.close()", "title": "" }, { "docid": "de17f26fd6cafb9c99211b25c5b06a3c", "score": "0.5390254", "text": "def setUp(self):\r\n\r\n _database = '/tmp/tempdb.rtk'\r\n self._dao = _dao(_database)\r\n self._dao.execute(\"PRAGMA foreign_keys = ON\", commit=False)\r\n\r\n self.DUT = SimilarItem()\r\n self.DUT.dao = self._dao", "title": "" }, { "docid": "04f2566ffe68dcc21a62c8ffd4b0af99", "score": "0.53897125", "text": "def main():\n db.create_all()\n\n # Uncomment the function call bellow to import flights into database\n # import_flights()", "title": "" }, { "docid": "0a1b49302599ab05f1a2be4ad9e697d4", "score": "0.5386038", "text": "def create_tables(self):\n self.Base.metadata.create_all(self.engine)", "title": "" }, { "docid": "b0b2019f6b1f96e8b967d3beaa8d0f9f", "score": "0.53858995", "text": "def setUp(self) -> None:\n sqlite_db = setup_sqlite_in_memory_db()\n sqlite_db.create_tables([\n ContainerDwellTimeDistribution\n ])", "title": "" }, { "docid": "6c4141d8aaf4e05d2832a56d4e3b54d3", "score": "0.5384438", "text": "def create_table (self):\n try:\n if not (self.mydb.database is None):\n self.tableCreator = TBCreator(str(self.mydb.database),self.hs,self.pt,self.us,self.ps,self.bfred,self)\n self.tableCreator.show()\n else:\n self.application_error(\"myqt_1:\\nNo database selected\")\n except Exception as error:\n self.application_error(error)\n pass", "title": "" }, { "docid": "f9045d0a7e63a88a2eef6d0c36e40dbc", "score": "0.5380286", "text": "def init_models():\n\n if not m.Coffee.exists():\n m.Coffee.create_table(wait=True)\n\n if not m.User.exists():\n m.User.create_table(wait=True)\n\n if not m.Topic.exists():\n m.Topic.create_table(wait=True)", "title": "" }, { "docid": "f1fbe19468acd04f8f18a8f616368bb2", "score": "0.53778374", "text": "def setUp(self):\n self.db = hgtv.models.db\n self.db.create_all()\n path_to_file = os.path.abspath(os.path.dirname(__file__))\n json_data = unicode(open(os.path.join(path_to_file,\n 'test_cascading.json')).read(), 'utf-8')\n data = json.loads(json_data)\n ashow = hgtv.models.Show(**data['show'])\n aseason = hgtv.models.Season(**data['season'])\n video1 = hgtv.models.Video(**data['video'][0])\n video2 = hgtv.models.Video(**data['video'][1])\n self.db.session.add(ashow)\n self.db.session.add(aseason)\n self.db.session.add(video1)\n self.db.session.add(video2)\n self.db.session.commit()", "title": "" }, { "docid": "f4223238da61c6bbdedfe1f1a698163e", "score": "0.5376202", "text": "def create(self):\n for t in Database.tables:\n t.create(checkfirst=True)", "title": "" } ]
195e6e8f38cc9fb6d6dc260e659ea85e
This function sends a register service command to the manager. THe manager then logs the event and add the local endpoint as the specified service (service_name).
[ { "docid": "84212171bf1d9a2ecd0ec53ab0d2bf8a", "score": "0.7933257", "text": "async def request_register_service(self):\n register_service = RegisterService(\n service=self._service_name,\n local_endpoint=self._local_endpoint,\n pid=self.pid,\n depth=self.depth\n )\n\n tcpserver_log.info(\"[%s]: registering with the manager service. Payload: %s\",\n self._service_name,\n register_service.payload\n )\n\n await self._manager_endpoint.write(register_service)", "title": "" } ]
[ { "docid": "248ba0e97e0bdabed00c3d08d6622eaf", "score": "0.73495746", "text": "def _register_with_service_manager(self):\n process_id = os.getpid()\n routed_message = \\\n service_manager_messages.SvcMgrRegisterLocalServiceRoutedMessage(\n send_to='ServiceManager_' + self.node_name,\n service_guid_=self.service_guid,\n node_name_=self.node_name,\n service_name_=self.service_name,\n service_group_name_=self.service_group_name,\n reply_to_queue_=self.control_queue_name,\n process_id_=process_id\n )\n self.publish_routed_message(routed_message)\n self.current_state = (\n service_manager_messages.ServiceState(service_manager_messages.ServiceState.REGISTERED)\n )", "title": "" }, { "docid": "8f05b519a4f6a4c0b1d9d4422ccbbb81", "score": "0.7330222", "text": "def register_service(self):\r\n pass", "title": "" }, { "docid": "cc2440380f8894eb98837cb154fc65af", "score": "0.6856513", "text": "def register_service(self, service):\n self.registered_services.append(service)", "title": "" }, { "docid": "b2e776195b44d4f881b401620e4101bd", "score": "0.6808238", "text": "def post(self, *args, **kwargs):\n\n service_id = uuid.UUID(args[0]) if args else None\n\n params = kwargs['params'] if 'params' in kwargs else {}\n\n service = \\\n self.service.env.register_service(name=kwargs['name'],\n params=params,\n service_id=service_id)\n\n self.set_header(\"Location\", \"/api/v1/workers/%s\" % service.service_id)", "title": "" }, { "docid": "0c2132ff7164260ce691078263344906", "score": "0.6668928", "text": "def register_new_service(self,upstream_url, service_name, route):", "title": "" }, { "docid": "9e0e17ab5129e1ebe68686a1b368816a", "score": "0.65950835", "text": "def register_service(self, service):\n service.register()\n self._services.append(service)", "title": "" }, { "docid": "5dccc6bc00f1a8d738c764bf2ff8b075", "score": "0.64938754", "text": "def register_server(self, serv_addr):\r\n\r\n print('Registering with Administator Server....')\r\n\r\n # the sub-server connects to the the Admin Server and sends a dictionary containing the desired service (in this case to register) and its address\r\n\r\n sock = socket(AF_INET, SOCK_STREAM)\r\n sock.connect(serv_addr)\r\n\r\n # the data is sent in the format of a pickled dictionary\r\n\r\n sock.send(pickle.dumps({'service': 'register', 'addr': self.addr}))", "title": "" }, { "docid": "84193f3e4d0ce4e15d0179931ba2ea6b", "score": "0.64490837", "text": "def _register_service(self) -> None:\n strategy = cast(AliceStrategy, self.context.strategy)\n description = strategy.get_register_service_description()\n oef_search_dialogues = cast(\n OefSearchDialogues, self.context.oef_search_dialogues\n )\n oef_search_msg, _ = oef_search_dialogues.create(\n counterparty=self.context.search_service_address,\n performative=OefSearchMessage.Performative.REGISTER_SERVICE,\n service_description=description,\n )\n self.context.outbox.put_message(message=oef_search_msg)\n self.context.logger.info(\"registering Alice service on SOEF.\")", "title": "" }, { "docid": "8ddb0502d820b523e6091d53d0e1f90e", "score": "0.63754207", "text": "def register_consul(self):\n urls = concat([c['keys'] for c in\n concat([self.caddyfile(s) for s in self.services])])\n urls.extend(self.consul_extra_check_urls(self.services))\n # default for most cases\n svc = json.dumps({\n 'Name': self.name,\n 'Checks': [{\n 'HTTP': url,\n 'Interval': '60s'} for url in urls if url]})\n\n # use a file if provided\n path = '{}/service.json'.format(self.path)\n if exists(path):\n with open(path) as f:\n svc = json.dumps(json.loads(f.read()))\n\n url = 'http://localhost:8500/v1/agent/service/register'\n res = requests.put(url, svc)\n if res.status_code != 200:\n msg = 'Consul service register failed: {}'.format(res.reason)\n log.error(msg)\n raise RuntimeError(msg)\n log.info(\"Registered %s in consul\", self.name)", "title": "" }, { "docid": "0b2b66c23921a252bd59bdc80c6f42f9", "score": "0.6025544", "text": "def publish(self):\n if self.service_register:\n for service_name, provider_meta in self.service_register:\n self.service_register.publish(self.address, service_name,\n provider_meta)", "title": "" }, { "docid": "cebd9bcae634ccf40534af5168bf2f3f", "score": "0.59783053", "text": "def register_worker():\n params = {\n \"worker\": settings.WORKER,\n \"services\": json.dumps(SERVICE_LIST)\n }\n url = \"%s/register-worker?%s\" % (settings.MASTER, urllib.urlencode(params))\n http_client = httpclient.HTTPClient()\n try:\n response = http_client.fetch(url)\n LOGGER.info(response.body)\n LOGGER.info(\"register worker ok\")\n except httpclient.HTTPError as e:\n LOGGER.error(\"register worker failed, \" + str(e))\n except Exception as e:\n LOGGER.errpr(\"register worker failed, \" + str(e))\n http_client.close()", "title": "" }, { "docid": "11b24ea5e139478d58eebccd3ad880e3", "score": "0.59191644", "text": "def register(self):\n\t\tcontext = zmq.Context()\n\t\tself._socketOut = context.socket(zmq.REQ)\n\t\tself._socketOut.connect('tcp://127.0.0.1:%s' % self.DAEMON_PORT)\n\t\ttime.sleep(0.01)\t\t\t## Needed for socket initialization\n\t\tpayload = {\n\t\t\t'opr': 'register',\n\t\t\t'port': self.PORT\n\t\t}\n\t\tlogging.debug('socket initiated')\n\t\ttry:\n\t\t\tself._socketOut.send(json.dumps(payload))\n\t\t\t## Pend until ACK signal is received\n\t\t\t# TODO: Insert acknowledgement signal here\n\t\t\tself._socketOut.recv()\n\t\texcept: # Catch ZMQ errors (buffer overflow) and fail silently\n\t\t\tlogging.error('Socket down. Failed to send: %s' % json.dumps(payload))", "title": "" }, { "docid": "c5b77602e876c5a6635730a885fc480b", "score": "0.5901077", "text": "def register(self):\n query = {\n 'query': 'REGISTER_STRATEGY',\n 'data': {\n 'strategy_id': self.STRATEGY_ID,\n 'strategy_address': self.HOST,\n 'strategy_port': self.PORT,\n 'mode': self.MODE,\n }\n }\n\n # send registration request to manager in this thread, since it is vital for everything\n message_to_address(self.MANAGER_ADDRESS,\n self.MANAGER_PORT,\n query,\n True,\n self.register_callback)", "title": "" }, { "docid": "6d3e73993e47e286f9e717fba527e6af", "score": "0.5873206", "text": "def register(self, voice=False):\n payload = {\"type\": \"register\", \"username\": self.username, \"voice\": voice}\n self._send_command(payload)", "title": "" }, { "docid": "65594ec0827633ccbde2a29a1dbfe155", "score": "0.58583087", "text": "def register_scheduling_service(service: SchedulingService, server: Server):\n add_SchedulingServiceServicer_to_server(service, server)", "title": "" }, { "docid": "f0a3d60b64256a879f97b58e77c7befa", "score": "0.5834662", "text": "def register(cls, service):\n if not hasattr(service, 'name'):\n logger.error(\"Service does not have a name.\")\n return\n\n if not hasattr(service, 'group'):\n logger.error(\"Service does not belong to a group.\")\n return\n\n key = '.'.join((service.group, service.name))\n\n if key in cls.core:\n logger.warn(\"This service already exists.\")\n else:\n cls.core[key] = service", "title": "" }, { "docid": "8b582749ac2a175b90b1610168312f21", "score": "0.58255404", "text": "def add_register_args(parser):\n # Service registration\n registerp = parser.add_parser('register',\n help='Register a service for this node')\n registerp.add_argument('name', help='The service name')\n registerp.add_argument('-a', '--address', default=None,\n help='Specify an address')\n registerp.add_argument('-p', '--port', default=None, type=int,\n help='Specify a port')\n registerp.add_argument('-s', '--service-id', default=None,\n help='Specify a service ID')\n registerp.add_argument('-t', '--tags', default=[],\n help='Specify a comma delimited list of tags')\n rsparsers = registerp.add_subparsers(dest='ctype',\n title='Service Check Options')\n check = rsparsers.add_parser('check',\n help='Define an external script-based check')\n check.add_argument('interval', default=10, type=int,\n help='How often to run the check script')\n check.add_argument('path', default=None,\n help='Path to the script invoked by Consul')\n httpcheck = rsparsers.add_parser('httpcheck',\n help='Define an HTTP-based check')\n httpcheck.add_argument('interval', default=10, type=int,\n help='How often to run the check script')\n httpcheck.add_argument('url', default=None,\n help='HTTP URL to be polled by Consul')\n rsparsers.add_parser('no-check', help='Do not enable service monitoring')\n ttl = rsparsers.add_parser('ttl', help='Define a duration based TTL check')\n ttl.add_argument('duration', type=int, default=10,\n help='TTL duration for a service with missing check data')", "title": "" }, { "docid": "7c9246f61aa9043ef330a0974e727408", "score": "0.57716095", "text": "def register_service(namespace_id: str, service_name: str, service_description: str, client) -> str:\n response = client.create_service(\n Name=service_name,\n NamespaceId=namespace_id,\n Description=service_description,\n HealthCheckCustomConfig={\n 'FailureThreshold': 1\n },\n Type='HTTP'\n )\n return response.get(\"Service\").get('Id')", "title": "" }, { "docid": "4a734263b24efb52518abd0197c01397", "score": "0.576649", "text": "def add_service(self, service_metadata):\n headers = {\n 'Content-Type': 'application/json',\n }\n\n if 'service' not in service_metadata:\n service_metadata = dict(service=service_metadata)\n\n body = jsonutils.dumps(service_metadata)\n print body\n\n res = self.do_request(\"POST\", \"/services\", body=body, headers=headers)\n # Registry returns a JSONified dict(image=image_info)\n data = jsonutils.loads(res.read())\n return data['service']", "title": "" }, { "docid": "b95abefd27b28494f7fc875315fc39ec", "score": "0.5718693", "text": "def register(self):\n msg = '!F*p'\n self._send_message(msg)", "title": "" }, { "docid": "9fbdd97ad2666049c03ea830407b2254", "score": "0.57058495", "text": "def _register_services(self):\n register_services(self.engine, register_event_handler)", "title": "" }, { "docid": "5a9cd81e352826f292f8e34a486dbd0b", "score": "0.5699783", "text": "def EnableService(self, service_name: str) -> None:\n\n services_client = self.GsuApi().services() # pylint: disable=no-member\n name = 'projects/' + self.project_id + '/services/' + service_name\n request = {'name': name}\n response = common.ExecuteRequest(services_client, 'enable', request)[0]\n self._BlockOperation(response)", "title": "" }, { "docid": "fdcd427955fc937be9363a5824bd7516", "score": "0.56827927", "text": "def post_registration(self, component_name, service_name, is_async):\n \n rostype = None\n \n # TODO: I access here an internal member of the parent class to \n # retrieve the ROS type, if set. _services should probably be \n # 'officially' exposed\n method, is_async = self._services[(component_name, service_name)]\n \n if is_async:\n return self.register_ros_action(method, component_name, service_name)\n else:\n return self.register_ros_service(method, component_name, service_name)", "title": "" }, { "docid": "aae64f9f7fcfc1acb3508d73cb266fe5", "score": "0.56807995", "text": "def add_service(self, zeroconf, _type, name):\n info = zeroconf.get_service_info(_type, name)\n if info:\n if self.is_a_zapp_device(info.name):\n _ip = socket.inet_ntoa(info.address)\n self._add_service_handler(name, _ip, info.port)", "title": "" }, { "docid": "8eb7c2420b9157d470f73e3c6fd59200", "score": "0.56787777", "text": "def service_start(name):\n return service_(name, 'start')", "title": "" }, { "docid": "8cca28104b4ccd0bd7cf0673adf1910a", "score": "0.55873555", "text": "async def send_registration(self):\n logger.info(\n \"Station {} sent proposal to register to directory {}\".format(\n self.agent.name, self.agent.directory_id\n )\n )\n\n content = {\n \"jid\": str(self.agent.jid),\n \"type\": self.agent.station_type,\n \"status\": self.agent.status,\n \"position\": self.agent.get_position(),\n \"charge\": self.agent.power,\n }\n msg = Message()\n msg.to = str(self.agent.directory_id)\n msg.set_metadata(\"protocol\", REGISTER_PROTOCOL)\n msg.set_metadata(\"performative\", REQUEST_PERFORMATIVE)\n msg.body = json.dumps(content)\n await self.send(msg)", "title": "" }, { "docid": "f98f78ae3f127389d4918a35bced9fe7", "score": "0.5573379", "text": "def add_service(self, environment, data, session):\n return self.murano.services.post(environment.id,\n path='/', data=data,\n session_id=session.id)", "title": "" }, { "docid": "a9045e971be6f7c4932fadd3077a5e51", "score": "0.5568516", "text": "def send_register_message_to_self(self):\n\n for tenant in RUNTIME.tenants.values():\n for app in tenant.components.values():\n app.vbs_up(self.vbs)\n\n for handler in self.server.pt_types_handlers[PT_REGISTER]:\n handler(self.vbs)", "title": "" }, { "docid": "1fb42d8f45f72a573ec828ea07659f65", "score": "0.5552028", "text": "async def register(self, adapter: RemoteDBusObject):\n await adapter.callRemote(\n \"RegisterApplication\",\n self.path,\n {},\n interface=defs.GATT_MANAGER_INTERFACE\n ).asFuture(self.loop)", "title": "" }, { "docid": "7cd0cfb80eaab84f9441627f9bf6959c", "score": "0.5523193", "text": "def start_service(self):\r\n return", "title": "" }, { "docid": "1e17f6a3dd72506426d4d4e6e5d70f4d", "score": "0.55204004", "text": "def register_tac(self) -> None:\n desc = Description(\n {\"version\": self.tac_version_id}, data_model=CONTROLLER_DATAMODEL\n )\n logger.debug(\n \"[{}]: Registering with {} data model\".format(\n self.agent_name, desc.data_model.name\n )\n )\n msg = OEFMessage(\n oef_type=OEFMessage.Type.REGISTER_SERVICE,\n id=1,\n service_description=desc,\n service_id=\"\",\n )\n msg_bytes = OEFSerializer().encode(msg)\n self.mailbox.outbox.put_message(\n to=DEFAULT_OEF,\n sender=self.crypto.public_key,\n protocol_id=OEFMessage.protocol_id,\n message=msg_bytes,\n )", "title": "" }, { "docid": "1c54c7a3a921a1a9a069e1e3a241da9d", "score": "0.5512562", "text": "def Start(self, serviceName):\n debug('ServiceManager::Start')\n command = \"sudo service \" + serviceName + \" start\"\n (output, returnCode) = executeCommand(command)\n debug('ServiceManager::Start command:' + command + \" output: \" + output)\n del output\n return returnCode", "title": "" }, { "docid": "924aad5257354b02273a129febe2fdbc", "score": "0.5464876", "text": "def local_registration(self, endpoint_name, local_client_port):\n self.logger.info(\"Local Registration Started for Endpoint: %s\", endpoint_name)\n \n self_object = Endpoint(endpoint_name, objects=None, lifetime=self.lifetime, version=self.version, \\\n sms_number=self.sms_number, binding_mode=self.binding_mode, \\\n local_ip=self.local_client_ip_, local_port=local_client_port, \\\n listener_ip=self.local_listener_ip, listener_port=self.local_listener_port)\n endpoint = self_object.endpoint\n response = self.registration.register_client(endpoint)\n \n \"\"\" Sending Client Registration to the DM Server \"\"\"\n registration_location = self.send_client_registration(endpoint, local_client_port)\n return endpoint, registration_location", "title": "" }, { "docid": "dd0cc612e0d6575822b9bd87a53d80e9", "score": "0.5449232", "text": "def add_service(self, name):\n s = Service(name)\n self.services[s.name] = s\n return s", "title": "" }, { "docid": "fffe1ab3e2abd51353bcda734e88a25d", "score": "0.5434866", "text": "def send_register_message_to_self(self):\n\n msg = {'version': PT_VERSION,\n 'type': PT_REGISTER,\n 'seq': self.pnfdev.seq,\n 'addr': self.pnfdev.addr}\n\n self.handle_message(msg)", "title": "" }, { "docid": "a759746dbb2e3ae2402840384c83d566", "score": "0.5422603", "text": "def create_service(self, service_name , detail=None):\n return service._create_service(self._get_resource_root(), self.cluster_name , service_name)", "title": "" }, { "docid": "f4baf6eb7b89987a846cce7242caa705", "score": "0.54196894", "text": "def start_registry(name):\n print(\"Starting registry at localhost:5000 named {}\".format(name))\n cmd = \"docker service create --name registry --publish published=5000,target=5000 registry:2\"\n _highlight(\"> {}\".format(cmd))\n ps = subprocess.Popen(cmd, shell=True,\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stat = ps.poll()\n while stat == None:\n stat = ps.poll()\n if stat != 0:\n _highlight(\"\\nFailed to create registry. You may already have another one running:\", fg='red')\n ps = subprocess.Popen(\"docker service ls | grep registry\", shell=True)\n ps.wait()\n if (click.prompt(\"Do you want to use an existing registry? (y/n)\")):\n return click.prompt(\"Enter registry name\", type=str)\n else:\n sys.exit()\n else:\n return name", "title": "" }, { "docid": "3bddb8962ffca04ad9f60ce540e50730", "score": "0.54156053", "text": "def __create_service(self, name=\"\", location=None):\n sms = ServiceManagementService(self.subscription, self.certificate)\n result = sms.check_hosted_service_name_availability(name)\n if not result:\n raise AzureError(\"The service name %s is not available\" % name)\n try:\n result = sms.create_hosted_service(name, name, name, location)\n sms.wait_for_operation_status(result.request_id)\n except Exception as ex:\n raise AzureError(\"The service name %s is not available\" % name)", "title": "" }, { "docid": "d99a4fcae25cfcac26f716a5567a12dd", "score": "0.5391036", "text": "def register_command1():\n data = request.get_data()\n return jsonify(command_registry().register_remote_serialized(data))", "title": "" }, { "docid": "2798c75e3365b15926bc65d6f2fa87a8", "score": "0.5387275", "text": "def register(server):\n service = CustomerService()\n\n return customers_pb2_grpc \\\n .add_CustomerServiceServicer_to_server(service, server)", "title": "" }, { "docid": "bb146234fdd4115eca5c7c82c809a02d", "score": "0.5380711", "text": "def registerDevice(self):\r\n\r\n if request.content_type != 'application/json':\r\n abort(400, 'Expected content_type application/json')\r\n device_to_register = request.json\r\n print(device_to_register)\r\n self.registered_devices.append(device_to_register)\r\n print(\"I received a PUT request to /register with the following data: %s\" % device_to_register)", "title": "" }, { "docid": "f641395156b639ad6521b77e66a238da", "score": "0.5379032", "text": "def startService(self):\n self.clientFactory.connect().addErrback(\n log.err, 'error starting the JSON-RPC client service %r' % (self,))\n service.Service.startService(self)", "title": "" }, { "docid": "1b28f6a9cbe41e1f67df1909137422cc", "score": "0.53683144", "text": "def start_service(self, service_name):\n try:\n if service_name in ['nginx']:\n Log.wait(self, \"Testing Nginx configuration \")\n # Check Nginx configuration before executing command\n sub = subprocess.Popen('nginx -t', stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, shell=True)\n output = sub.communicate()\n if 'emerg' not in str(output):\n Log.valide(self, \"Testing Nginx configuration \")\n Log.wait(self, \"Starting Nginx\")\n service_cmd = ('service {0} start'.format(service_name))\n retcode = subprocess.getstatusoutput(service_cmd)\n if retcode[0] == 0:\n Log.valide(self, \"Starting Nginx \")\n return True\n else:\n Log.failed(self, \"Starting Nginx\")\n else:\n Log.failed(self, \"Testing Nginx configuration \")\n return False\n else:\n service_cmd = ('service {0} start'.format(service_name))\n\n Log.info(self, \"Start : {0:10}\" .format(service_name), end='')\n retcode = subprocess.getstatusoutput(service_cmd)\n if retcode[0] == 0:\n Log.info(self, \"[\" + Log.ENDC + Log.OKGREEN +\n \"OK\" + Log.ENDC + Log.OKBLUE + \"]\")\n return True\n else:\n Log.debug(self, \"{0}\".format(retcode[1]))\n Log.info(self, \"[\" + Log.FAIL +\n \"Failed\" + Log.OKBLUE + \"]\")\n return False\n except OSError as e:\n Log.debug(self, \"{0}\".format(e))\n Log.error(self, \"\\nFailed to start service {0}\"\n .format(service_name))", "title": "" }, { "docid": "a627b1f03f0bea62a0f65d4e8b4cf527", "score": "0.5367198", "text": "def addservice(self, service):\n if service is not None:\n self.services.append(service)", "title": "" }, { "docid": "282ade45dc8c9573ce6b3e8c5fcb016d", "score": "0.536251", "text": "def post(self, *args, **kwargs):\n\n addr = self.service.upsert_mcast_service(**kwargs['params'])\n\n self.service.save_service_state()\n\n url = \"/api/v1/projects/%s/apps/%s/mcast_service/%s\" % \\\n (self.service.context.project_id, self.service.service_id, addr)\n\n self.set_header(\"Location\", url)", "title": "" }, { "docid": "42f631f2ca7128fdc6d38b3effd69384", "score": "0.53500754", "text": "def service_name(self, service_name):\n\n self._service_name = service_name", "title": "" }, { "docid": "9b1d12b6eb30ea4d054ff2f1519e984f", "score": "0.5337948", "text": "def register(self):\n address = self.command_groups.group(3)\n host, port = re.findall(r\":(\\w*)\", address)\n runner = {\"host\": host, \"port\": port}\n logger.info(f\"Registering new test runner {host}:{port}\")\n self.server.runners.append(runner)\n self.request.sendall(b\"OK\")", "title": "" }, { "docid": "c175198a24b505090ff274c0e0af9ea2", "score": "0.53294", "text": "def service_name(self, service_name):\n self._service_name = service_name", "title": "" }, { "docid": "955fdf07044f7b83ceffd1946dae2f28", "score": "0.5319161", "text": "def registerToServer(self):\n try:\n self.logger.info(\"Connecting to %s:%d\" % \\\n (self.serverIP, self.serverPort))\n connection = common.RequestMaker(self.serverIP, self.serverPort)\n handshake(connection)\n connection.send(common.MSG_CLIENT_REGISTER + \"\\n\")\n data = connection.readLine()\n if data != common.MSG_ACK:\n raise common.BurnerException, \\\n \"Server doesn't want to register us: \\\"%s\\\"\" % data\n connection.send(self.name + \"\\n\")\n connection.send(str(self.port) + \"\\n\")\n data = connection.readLine()\n if data != common.MSG_ACK:\n raise common.BurnerException, \\\n \"Server doesn't want to accept our registration: \\\"%s\\\"\" \\\n % data\n connection.send(common.MSG_CLIENT_HAS_ISOS + \"\\n\")\n connection.send(str(len(self.isos)) + \"\\n\")\n for iso in self.isos:\n connection.send(iso + \"\\n\")\n data = connection.readLine()\n if data != common.MSG_ACK:\n raise common.BurnerException, \\\n \"Server doesn't like our isos: \\\"%s\\\"\" % data\n self.logger.info(\"Registered to server.\")\n connection.close()\n except common.BurnerException, e:\n self.logger.error(e)\n sys.exit(1)\n except socket.error, e:\n self.logger.error(e)\n sys.exit(1)", "title": "" }, { "docid": "e1ffdddf5dd1152dfc928fe41522a147", "score": "0.53071195", "text": "def register(meta_ip, meta_port, data_ip, data_port):\n\n\t# Establish connection\n\t\n\t# Fill code\t\n\t\n\t# We create the socket we will be using\n\t\n\tcreated_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\n\t# We use the try to attempt to connect to the server\n\t# We prepare an error handle just in case the connection does not occur.\n\ttry:\n\t\tcreated_socket.connect((meta_ip, meta_port))\n\t\t\n\texcept socket.error, e:\n\n\t\tprint (\"Connection to the metadata server failed. Error: \" + str(e))\n\n\t\tsys.exit(1)\n\n\t#if no error occured connection was succesful!\n\n\tprint( \"Connection to metadata server was succesful.\" )\n\n\n\t# We use the following try to determine what happens with the registration. If there was an error, if it was duplicate or if it worked!\n\ttry:\n\t\tresponse = \"NAK\"\n\t\t\n\t\tcreate_packet = Packet()\n\t\t\n\t\twhile response == \"NAK\":\n\t\t\n\t\t\tcreate_packet.BuildRegPacket(data_ip, data_port)\n\t\t\t\n\t\t\tcreated_socket.sendall(create_packet.getEncodedPacket())\n\t\t\t\n\t\t\tresponse = created_socket.recv(1024)\n\n\t\t\tif response == \"DUP\":\n\t\t\t\tprint \"Duplicate Registration\"\n\t\t\t\t\n\t\t\t# we add the response condition \"ACK\"\n\t\t\t\n\t\t\tif response == \"ACK\":\n\t\t\t print (\"Registration process was completed!\")\n\n\t\t \tif response == \"NAK\":\n\t\t \t\tprint(\"error here\")\n\t\t\t\tprint \"Registratation ERROR\"\n\n\tfinally:\n\t\tcreated_socket.close()", "title": "" }, { "docid": "f2fa2b8b75b216b537a5033f6ba8c66f", "score": "0.52748775", "text": "def register_to_cloud():\n log(\"Starting nova-compute service\", DEBUG)\n service_resume('nova-compute')\n current_status = status_get()\n if current_status[0] == WORKLOAD_STATES.BLOCKED.value and \\\n current_status[1] == UNIT_REMOVED_MSG:\n status_set(WORKLOAD_STATES.ACTIVE, 'Unit is ready')\n\n nova_compute_hooks.update_status()\n function_set({\n 'command': 'openstack compute service list',\n 'message': \"Nova compute service started. It should get registered \"\n \"with the cloud controller in a short time. Use the \"\n \"'openstack' command to verify that it's registered.\"\n })", "title": "" }, { "docid": "c7c8d51d0aa4420c3328ed9caeab3b71", "score": "0.5238065", "text": "def add_service(self, obj):\n self.service[ obj.get_key() ] = obj", "title": "" }, { "docid": "753134bb654bb2b3562933f7f31a5bdc", "score": "0.5236541", "text": "def activate(self, service):\n self.log.info(\"Adding service %s to environment %s\" % (service._p_name, self.name))\n self.services.add(service)\n self.clear_cache()", "title": "" }, { "docid": "c0b9972ce2d418ce3953237402a9656e", "score": "0.5230066", "text": "def register(name):\n\n def decorator(func):\n # Perform the registration\n SubwayDaemon._register(name, func)\n return func\n\n return decorator", "title": "" }, { "docid": "7936538f0c90186b37256f8f51205ce6", "score": "0.5218187", "text": "def async_register(\n opp: OpenPeerPower, register: system_health.SystemHealthRegistration\n) -> None:\n register.async_register_info(system_health_info)", "title": "" }, { "docid": "c1a86cdea983c7ceeed1ba0130516c04", "score": "0.5213002", "text": "def configure_service(self, service_name, config_data):\n topic = ClaraUtils.build_topic(CConstants.SERVICE, service_name)\n\n config_data.metadata.action = xMsgMeta.CONFIGURE\n\n msg = self.base.serialize(topic, config_data, self.datatypes)\n self.base.send(msg)", "title": "" }, { "docid": "47670d9bd26ce65239035cdafb1d4509", "score": "0.5209425", "text": "def start_service(name, argv = None):\n with win32.OpenSCManager(\n dwDesiredAccess = win32.SC_MANAGER_CONNECT\n ) as hSCManager:\n with win32.OpenService(hSCManager, name,\n dwDesiredAccess = win32.SERVICE_START\n ) as hService:\n win32.StartService(hService)", "title": "" }, { "docid": "11d02f50f4390b8a032c1d4781e156aa", "score": "0.5199172", "text": "def start_service():\n parser = argparse.ArgumentParser()\n parser.add_argument('service', help='The service to start')\n args = parser.parse_args()\n\n monit_operator = MonitOperator()\n monit_retry = retry(max_retries=5, retry_on_exception=DEFAULT_RETRIES)\n send_w_retries = monit_retry(monit_operator.send_command_sync)\n send_w_retries(args.service, 'start')", "title": "" }, { "docid": "24820128764c1f0843024d5c133af196", "score": "0.51906586", "text": "def register(self, **kwargs):\n pass", "title": "" }, { "docid": "9f881e434c7fa36d52f97d0a94073ca5", "score": "0.51832336", "text": "def register(cls, context, hostname, type='conductor',\n update_existing=False):\n try:\n db_cond = cls.dbapi.register_service(\n {'hostname': hostname, 'type': type,\n 'workers': CONF.conductor.workers if\n type == 'conductor' else 1},\n update_existing=update_existing)\n except Exception as e:\n if 'Duplicate entry' in e.message:\n raise exception.ServiceAlreadyRegistered(\n service='%s_%s' % (hostname, type))\n else:\n raise e\n return cls._from_db_object(cls(context), db_cond)", "title": "" }, { "docid": "bc6af2d051e83c293deaec1b289cfd4f", "score": "0.5182276", "text": "def add_service(self, service, migrated=False):\n if not migrated:\n self.incoming_services += 1\n service.vehicle.allotted_fog_node = self\n print(\n f\"Service {service.id} is assigned to fog node {self.id}\")\n self.in_service = True\n service.curr_power_consumed = TRANSMIT_POWER_FN2VEHICLE if self.cache_array[\n service.content_type] else (TRANSMIT_POWER_FN2CLOUD + TRANSMIT_POWER_FN2VEHICLE)\n self._vehicle_services[service.vehicle.id] = {\n \"service\": service,\n \"process\": self.env.process(\n self._serve_vehicle(self.env, service, migrated))\n }", "title": "" }, { "docid": "dc9d91a60384584a0ea773eb2ff58242", "score": "0.51771855", "text": "def registerObjectAsService(strServiceName=\"ABCDK_NONE_SERVICE\", theObject=None, bForce=False):\n strMinQiVersion = '2.0.0.14'\n if not(hasattr(qi, '__version__')) or qi.__version__ < strMinQiVersion:\n if hasattr(qi, '__version__'):\n strCurrentVersion = qi.__version__\n else:\n strCurrentVersion = \"Unknowned\"\n print(\"abcdk.naoqitools.registerObjectAsService: this functionnality is available only in naoqi-sdk > %s, your version is %s\" % (strMinQiVersion, strCurrentVersion))\n return\n #app = qi.Application()\n session = qi.Session()\n import config\n strUri = \"\".join([\"tcp://\", config.strDefaultIP, \":\", str(config.nDefaultPort)])\n try:\n session.connect(strUri)\n except:\n print(\"failed to connect to %s\" % strUri)\n return\n if bForce:\n aListServices = session.services()\n aServicesToStop = [service for service in aListServices if service['name'] == strServiceName] # it's a dict only in last naoqi version qi > 2.0.0.14\n for service in aServicesToStop:\n print(\"Unregistering services %s (id: %s)\" % (service['name'], service['id']))\n session.unregisterService(service['id'])\n session.registerService(strServiceName, theObject)\n print(\"abcdk.naoqitools.registerObjectAsService: service is registered as %s\" % strServiceName)\n #qi.async(app.run) # we run it in background using async\n print(\"has been run in background\")\n time.sleep(2)\n print(\"fin du time sleep\")\n return session", "title": "" }, { "docid": "c1ed475e5c076bb72c0f2d95e757e4de", "score": "0.51669437", "text": "def register(self, computer_title, account_name, registration_password=\"\",\n tags=\"\", access_group=\"\", exchanger_factory=Exchanger):\n # The message is built.\n message_header = {\"type\": \"register\",\n \"api\": SERVER_API,\n \"timestamp\": time.time()}\n\n # The registration message itself.\n registration = {\"account_name\": account_name,\n \"computer_title\": computer_title}\n if registration_password:\n registration.update(\n {\"registration_password\": registration_password})\n if tags:\n registration.update({\"tags\": tags})\n if access_group:\n registration.update({\"access_group\": access_group})\n\n # Building the actual message now, by fusing both dicts above.\n message = {}\n message.update(registration)\n message.update(message_header)\n\n # Let's add the message to the message store.\n self.main_store.pile_message(message)\n\n # We would normally wait for the exchanger to be scheduled, but it\n # is not worth it to wait for anything if we're not registered, so\n # let's just run in-thread:\n exchange = exchanger_factory(self.config, main_store=self.main_store)\n exchange.run()", "title": "" }, { "docid": "2b7a66bcad5559998e66643d6cd0b027", "score": "0.5153031", "text": "def register_command(data):\n return jsonify(command_registry().register_remote_serialized(data.encode(\"ascii\")))", "title": "" }, { "docid": "3cefec764eeb0ae85fcdced90bf84568", "score": "0.51494443", "text": "def configure(self, service_name):\n # type: (str) -> None\n pass", "title": "" }, { "docid": "9c1dc98de89d08ac2d83de9aea9a639e", "score": "0.5145297", "text": "def add_service(self, name, function):\n service = _Service(\n self, name, function\n )\n with self.lock:\n self._services.append(service)\n service.run() # _HivemindAbstractObject\n return service", "title": "" }, { "docid": "f52eb00c9a40d0f9ed1dc160d6624bae", "score": "0.5138765", "text": "def get_service(self, service_name):", "title": "" }, { "docid": "6f6c123de34dae30a4702b834420d3fb", "score": "0.51358587", "text": "def register(\n name: str,\n author: str,\n version: str,\n license: str,\n description: str,\n shutdown_function: str,\n charset: str,\n):\n pass", "title": "" }, { "docid": "c969198d926d542e39e0c8f26f368f93", "score": "0.51290894", "text": "def service():", "title": "" }, { "docid": "2024692208ea8c8abe2c1f0dfbcc74fc", "score": "0.5114249", "text": "def do_register(argv):\n\n usage = ('%prog register [--all] participant..\\n')\n description = \"%prog registers a SkyNET participant in the BOSS engine.\"\n\n opts, args = default_oparser(argv, usage, description)\n reg = Registrar()\n\n for _ , (name, conf) in get_participant_confs().items():\n if name in args or opts.all:\n name_in = \"name\"\n if conf.has_option(\"participant\", \"regexp\"):\n name_in = \"regexp\"\n reg.register(conf.get(\"participant\", name_in),\n conf.get(\"participant\", \"queue\"))\n\n reg.chan.close()\n reg.conn.close()", "title": "" }, { "docid": "a288602c7d009a9d96f6c60c9a9949f5", "score": "0.51111794", "text": "def create_mgmt_service(self, service_setup_info):\n return self._put(\"service\", ApiService, data=service_setup_info)", "title": "" }, { "docid": "cfca3320ff8c583c33939468777285f2", "score": "0.51105464", "text": "def register(self,\n registrationData,\n ):\n pass", "title": "" }, { "docid": "cc784c5c1abd49a0fd979229a84a74a5", "score": "0.5108325", "text": "def _add(self, info: ServiceInfo) -> None:\r\n if info.key in self._services:\r\n raise ServiceNameAlreadyRegistered\r\n\r\n self._services[info.key] = info\r\n self.types.setdefault(info.type.lower(), []).append(info.key)\r\n self.servers.setdefault(info.server_key, []).append(info.key)", "title": "" }, { "docid": "0f15e3bc303af398b0b8dcb15641dd9f", "score": "0.5107069", "text": "def _register_agent(self) -> None:\n strategy = cast(AliceStrategy, self.context.strategy)\n description = strategy.get_location_description()\n oef_search_dialogues = cast(\n OefSearchDialogues, self.context.oef_search_dialogues\n )\n oef_search_msg, _ = oef_search_dialogues.create(\n counterparty=self.context.search_service_address,\n performative=OefSearchMessage.Performative.REGISTER_SERVICE,\n service_description=description,\n )\n self.context.outbox.put_message(message=oef_search_msg)\n self.context.logger.info(\"registering Alice on SOEF.\")", "title": "" }, { "docid": "0d96b3e128cc3a6cabf9fa2de7a643c5", "score": "0.51054037", "text": "def register(self, username, password):\n self.send_text('Register request,'+ username + ',' + password)\n response = self.recieve_text()\n time.sleep(1)\n if response == 'Created':\n return True\n else:\n return False", "title": "" }, { "docid": "124271d28a700d1c136eb7b3d492b139", "score": "0.5104156", "text": "def add_device_service(call):\n gateway = call.data.get(ATTR_GW_MAC)\n gateway.write_to_hub(gateway.sid, join_permission=\"yes\")\n hass.components.persistent_notification.async_create(\n \"Join permission enabled for 30 seconds! \"\n \"Please press the pairing button of the new device once.\",\n title=\"Xiaomi Aqara Gateway\",\n )", "title": "" }, { "docid": "1aea427ce235265f55f4b42cf4be02f0", "score": "0.50996155", "text": "def axmon_registry_test():\n (server, username, password) = _get_required_arguments('hostname', 'username', 'password')\n axmon.add_registry(server, username, password, save=False)\n return jsonify(result=\"ok\")", "title": "" }, { "docid": "f8ee77c43f9f0761f707073a72ac980a", "score": "0.5097487", "text": "async def register(self, ctx):\n manager = MessageManager(ctx)\n template = \"https://www.bungie.net/en/OAuth/Authorize?client_id={}&response_type=code&state={}\"\n auth_url = template.format(self.bot.bungie_client_id, ctx.author.id)\n bliz_name, xbox_name, psn_name, bliz_id, xbox_id, psn_id = (None,)*6\n\n if not isinstance(ctx.channel, discord.abc.PrivateChannel):\n await manager.send_message(\"Registration instructions have been messaged to you.\")\n\n # Prompt user with link to Bungie.net OAuth authentication\n e = discord.Embed(colour=constants.BLUE)\n e.title = \"Click Here to Register\"\n e.url = auth_url\n e.description = (\"Click the above link to register your Bungie.net account with Spirit. \"\n + \"Registering will allow Spirit to access your connected Destiny \"\n + \"2 accounts.\")\n registration_msg = await manager.send_private_embed(e)\n\n # Wait for user info from the web server via Redis\n res = await self.redis.subscribe(ctx.author.id)\n tsk = asyncio.ensure_future(self.wait_for_msg(res[0]))\n try:\n user_info = await asyncio.wait_for(tsk, timeout=120)\n except asyncio.TimeoutError:\n await manager.send_private_message(\"I'm not sure where you went. We can try this again later.\")\n await registration_msg.delete()\n return await manager.clean_messages()\n await ctx.author.dm_channel.trigger_typing()\n\n # Save OAuth credentials and bungie ID\n bungie_id = user_info.get('membership_id')\n access_token = user_info.get('access_token')\n refresh_token = user_info.get('refresh_token')\n self.bot.db.update_registration(bungie_id, access_token, refresh_token, ctx.author.id)\n\n # Fetch platform specific display names and membership IDs\n try:\n res = await self.bot.destiny.api.get_membership_data_by_id(bungie_id)\n except:\n await manager.send_private_message(\"I can't seem to connect to Bungie right now. Try again later.\")\n await registration_msg.delete()\n return await manager.clean_messages()\n\n if res['ErrorCode'] != 1:\n await manager.send_private_message(\"Oops, something went wrong during registration. Please try again.\")\n await registration_msg.delete()\n return await manager.clean_messages()\n\n if not self.user_has_connected_accounts(res):\n await manager.send_private_message(\"Oops, you don't have any public accounts attached to your Bungie.net profile.\")\n await registration_msg.delete()\n return await manager.clean_messages()\n\n for entry in res['Response']['destinyMemberships']:\n if entry['membershipType'] == 4:\n bliz_name = entry['displayName']\n bliz_id = entry['membershipId']\n elif entry['membershipType'] == 1:\n xbox_name = entry['displayName']\n xbox_id = entry['membershipId']\n elif entry['membershipType'] == 2:\n psn_name = entry['displayName']\n psn_id = entry['membershipId']\n\n bungie_name = res['Response']['bungieNetUser']['displayName']\n self.bot.db.update_display_names(ctx.author.id, bungie_name, bliz_name, xbox_name, psn_name)\n self.bot.db.update_membership_ids(ctx.author.id, bliz_id, xbox_id, psn_id)\n\n # Get references to platform emojis from Spirit Support server\n platform_reactions = []\n if bliz_name:\n platform_reactions.append(self.bot.get_emoji(constants.BNET_ICON))\n if xbox_name:\n platform_reactions.append(self.bot.get_emoji(constants.XBOX_ICON))\n if psn_name:\n platform_reactions.append(self.bot.get_emoji(constants.PS_ICON))\n\n # Display message with prompts to select a preferred platform\n e = self.registered_embed(bungie_name, bliz_name, xbox_name, psn_name)\n platform_msg = await manager.send_private_embed(e)\n await registration_msg.delete()\n\n # If only one account is connected, set it as preferred (don't display reactions)\n platform_names = (bliz_name, xbox_name, psn_name)\n if self.num_non_null_entries(platform_names) == 1:\n\n if bliz_name:\n platform_id = 4\n elif xbox_name:\n platform_id = 1\n else:\n platform_id = 2\n\n self.bot.db.update_platform(ctx.author.id, platform_id)\n return await manager.clean_messages()\n\n func = self.add_reactions(platform_msg, platform_reactions)\n self.bot.loop.create_task(func)\n\n def check_reaction(reaction, user):\n if reaction.message.id == platform_msg.id and user == ctx.author:\n for emoji in platform_reactions:\n if reaction.emoji == emoji:\n return True\n\n # Wait for platform reaction from user\n try:\n reaction, user = await self.bot.wait_for('reaction_add', timeout=120.0, check=check_reaction)\n except asyncio.TimeoutError:\n await platform_msg.delete()\n await manager.send_private_message(\"I'm not sure where you went. We can try this again later.\")\n return await manager.clean_messages()\n\n # Save preferred platform\n platform = constants.PLATFORMS.get(reaction.emoji.name)\n self.bot.db.update_platform(ctx.author.id, platform)\n\n # Update message with preferred platform\n e = self.registered_embed(bungie_name, bliz_name, xbox_name, psn_name, footer=True, platform=platform)\n await platform_msg.edit(embed=e)\n\n return await manager.clean_messages()", "title": "" }, { "docid": "2f77b17d5fc29cae87b1cbcb529498c4", "score": "0.50969243", "text": "def register(name, email, password):\n logger = logging.getLogger(__name__)\n logger.debug('Registering user...')\n logger.debug('Name: %s, Email: %s', name, email)\n\n # click.echo(json.dumps({'name': name, 'email': email}))\n result = user_cmd.register(name, email, password)\n click.echo(json.dumps(result))", "title": "" }, { "docid": "6fccd8de9c66fb1955a1484826d22d9d", "score": "0.5092526", "text": "def async_new_service(url_dict: dict, user_id: int, register_group_id: int, register_for_organization_id: int,\n external_auth: dict):\n # create ExternalAuthentication object\n if external_auth is not None:\n external_auth = ExternalAuthentication(\n username=external_auth[\"username\"],\n password=external_auth[\"password\"],\n auth_type=external_auth[\"auth_type\"],\n )\n\n # get current task id\n curr_task_id = async_new_service.request.id\n\n # set progress for current task to 0\n if curr_task_id is not None:\n task_helper.update_progress(async_new_service, 0)\n\n # restore objects from ids\n user = MrMapUser.objects.get(id=user_id)\n url_dict[\"service\"] = service_helper.resolve_service_enum(url_dict[\"service\"])\n url_dict[\"version\"] = service_helper.resolve_version_enum(url_dict[\"version\"])\n\n register_group = MrMapGroup.objects.get(id=register_group_id)\n if utils.resolve_none_string(str(register_for_organization_id)) is not None:\n register_for_organization = Organization.objects.get(id=register_for_organization_id)\n else:\n register_for_organization = None\n\n try:\n t_start = time.time()\n service = service_helper.create_service(\n url_dict.get(\"service\"),\n url_dict.get(\"version\"),\n url_dict.get(\"base_uri\"),\n user,\n register_group,\n register_for_organization,\n async_task=async_new_service,\n external_auth=external_auth\n )\n\n # update progress\n if curr_task_id is not None:\n task_helper.update_progress(async_new_service, PROGRESS_STATUS_AFTER_PARSING)\n\n # get db object\n if curr_task_id is not None:\n pending_task = PendingTask.objects.get(task_id=curr_task_id)\n # update db pending task information\n pending_task.description = json.dumps({\n \"service\": service.metadata.title,\n \"phase\": \"Persisting\",\n })\n pending_task.save()\n\n # update progress\n if curr_task_id is not None:\n task_helper.update_progress(async_new_service, 95)\n\n # after service AND documents have been persisted, we can now set the service being secured if needed\n if external_auth is not None:\n service.metadata.set_proxy(True)\n\n metadatas = Metadata.objects.filter(pk=service.metadata.pk)\n sub_elements = service.get_subelements().select_related('metadata')\n for sub_element in sub_elements:\n metadatas |= Metadata.objects.filter(pk=sub_element.metadata.pk)\n metadatas |= sub_element.metadata.get_related_dataset_metadatas()\n\n service_logger.debug(EXEC_TIME_PRINT % (\"total registration\", time.time() - t_start))\n user_helper.create_group_activity(service.metadata.created_by, user, SERVICE_REGISTERED, service.metadata.title)\n\n if curr_task_id is not None:\n task_helper.update_progress(async_new_service, 100)\n\n # delete pending task from db\n if curr_task_id is not None:\n pending_task = PendingTask.objects.get(task_id=curr_task_id)\n pending_task.delete()\n\n except (BaseException, XMLSyntaxError, XPathEvalError, InvalidURL, ConnectionError) as e:\n url = url_dict['base_uri'] + f\"SERVICE={url_dict['service'].value}&VERSION={url_dict['version'].value}&request={url_dict['request']}\"\n error_msg = f\"Error while trying to register new resource for url: {url}\\n\"\n\n response = requests.get(url)\n if response.status_code == 200:\n cap_doc = \"-----------------------------------------------------------\\n\"\\\n f\"We could receive the following capabilities document:\\n{response.text}\"\n error_msg += cap_doc\n\n service_logger.error(msg=error_msg)\n service_logger.exception(e, stack_info=True, exc_info=True)\n\n if curr_task_id is not None:\n pending_task = PendingTask.objects.get(task_id=curr_task_id)\n\n register_group = MrMapGroup.objects.get(id=register_group_id)\n error_report = ErrorReport(message=error_msg,\n traceback=traceback.format_exc(),\n created_by=register_group)\n error_report.save()\n\n descr = json.loads(pending_task.description)\n pending_task.description = json.dumps({\n \"service\": descr.get(\"service\", None),\n \"info\": {\n \"current\": \"0\",\n },\n \"exception\": e.__str__(),\n \"phase\": \"ERROR: Something went wrong! Click on generate error report to inform your serveradmin about this error.\",\n })\n pending_task.error_report = error_report\n pending_task.save()\n\n raise e", "title": "" }, { "docid": "de4ef6d3eed084af15ed029ad4c1429c", "score": "0.5091047", "text": "def register(msg_type_name, msg_spec):\n if VERBOSE:\n print('Register msg %s' % msg_type_name)\n REGISTERED_TYPES[msg_type_name] = msg_spec", "title": "" }, { "docid": "e513f8471500531d26a31cc26e470225", "score": "0.508771", "text": "def WriteRegister(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "3aa514ef20879424bb0c713626e17bf7", "score": "0.50851274", "text": "def create_service():\n check(add_document('services', service_schema))", "title": "" }, { "docid": "ddfdc468884d747d9bbf06182c3940e6", "score": "0.5062733", "text": "def add(self, name, **kwargs):\n\n if self.exists(name):\n raise ServiceExists(\"Service name already exists\")\n\n for k in kwargs.keys():\n if k not in self.defaults.keys():\n raise ServiceNotValid(\n \"Invalid service parameter '{}'\".format(k))\n\n service = self.defaults.copy()\n service.update(kwargs)\n\n # validate service\n try:\n self.validate(service)\n except Exception as e:\n raise ServiceNotValid(e.message)\n\n self._services[name] = service\n self._rule_deps[name] = []", "title": "" }, { "docid": "1461eeb0e7696295f4fc7ec714e5b07e", "score": "0.5061693", "text": "def _handle_register(self, msg_root):\n log.debug(\n \"Handling register: {}\".format(ElementTree.tostring(msg_root))\n )\n token = msg_root.attrib['token']\n renderer = self._lookup_executing_renderer_with_token(token)\n if not renderer:\n lmsg = 'Spoof handler registration attempt: token is {token}'\n log.warning(lmsg.format(token=token))\n else:\n renderer.subprocess.register()\n\n # Mark the renderer as 'registered'.\n with self._renderers_lock:\n renderer.has_registered = True\n\n reply = self._encapsulate_reply(\n self._generate_params(renderer.subprocess.handler_params)\n )\n\n log.debug(\"Registered handler {}\".format(msg_root))\n\n return reply", "title": "" }, { "docid": "f94a888fe138a60781fec18306d6d116", "score": "0.5052623", "text": "def add_services_args(parser):\n # Service registration\n registerp = parser.add_parser('services',\n help='List services for this node')\n\n registerp.add_argument('-i', '--indent', type=int, default=None, help='The indent level for output')", "title": "" }, { "docid": "7cb34a2e33f5599bf86e1cec4beec946", "score": "0.50498444", "text": "def add_service(self, service):\n sid = service.id()\n if sid in self.services:\n e_service = self.services[sid]\n assert e_service.user == service.user and e_service.pswd == service.pswd\n e_service.hosts |= service.hosts # union the hosts\n else:\n self.services[service.id()] = service", "title": "" }, { "docid": "0efe530a16e321a25c8e4a9177efb090", "score": "0.50466996", "text": "def _send_service_status_broadcast(self):\n service_status_broadcast = service_manager_messages.ServiceStatusBroadcast(\n self.node_name,\n self.service_guid,\n self.service_name,\n self.service_group_name,\n self.control_queue_name,\n self.current_state\n )\n self.publish_routed_message(service_status_broadcast)", "title": "" }, { "docid": "175784385699ea04e92550fe63df2d90", "score": "0.5042212", "text": "def register(update, context):\n chat_id = update.effective_chat.id\n with Session() as session:\n if session.query(User).filter(User.chat_id == chat_id).count() > 0:\n update.message.reply_text(Messages.ALREADY_REGISTERED)\n return\n\n try:\n uid = int(context.args[0])\n except IndexError:\n update.message.reply_text('Use /register <UID> to begin.')\n return\n except ValueError:\n update.message.reply_text('UID must be numeric.')\n return\n\n context.user_data['uid'] = uid\n logging.info(f'Registration started for UID: {uid}')\n update.message.reply_text(Messages.WELCOME.format(uid))", "title": "" }, { "docid": "13fae7182dac3b1e2d73038077fba51c", "score": "0.5038642", "text": "def register(type):\n if not request.forms.endpoint:\n abort(400, \"Missing endpoint\")\n\n if request.forms.endpoint == DEFAULT_GCM_ENDPOINT:\n if not request.forms.subscription_id:\n abort(400, \"Missing subscription_id\")\n registration = Registration.get_or_insert(request.forms.subscription_id,\n type=type,\n service=PushService.GCM)\n else:\n # Assume unknown endpoints are Firefox Simple Push.\n # TODO: Find a better way of distinguishing these.\n registration = Registration.get_or_insert(request.forms.endpoint,\n type=type,\n service=PushService.FIREFOX)\n registration.put()\n response.status = 201\n return \"\"", "title": "" }, { "docid": "a85d62de2606dfe442cd14d6b4188703", "score": "0.50365096", "text": "def register(self, device_token, alias_id=None, tag1=None, tag2=None, tag3=None, tag4=None, tag5=None, lat=None, lng=None):\n\t\t\n\t\n url = DEVICE_TOKEN_REGISTER_URL\n payload = {}\n\tpayload['device_token'] = device_token\n\tpayload['tag1'] = tag1\n\tpayload['tag2'] = tag2\n\tpayload['tag3'] = tag3\n\tpayload['tag4'] = tag4\n\tpayload['tag5'] = tag5\n\tpayload['lat'] = lat\n\tpayload['lng'] = lng\n\n if alias_id is not None:\n payload['alias_id'] = alias_id\n #body = json.dumps(payload)\n content_type = 'application/json'\n\n status, response = self._request('POST', payload, url, content_type)\n if not status in (200, 201):\n raise PushnotifFailure(status, response)\n return status == 201", "title": "" }, { "docid": "a08223a27bde0073be9505b168eddd34", "score": "0.50344646", "text": "def register_v1():\n\n conf = load_conf(conf_file)\n consul = consul_client(conf)\n\n fqdn = request.args.get('fqdn')\n token = hashlib.sha256(fqdn).hexdigest()\n key = conf['consul']['prefix'] + 'registered/' + fqdn + '/token'\n\n try:\n consul.kv.put(key, token)\n return token, 201\n except Exception as e:\n print e\n return \"Error while creating the key in Consul\", 500", "title": "" }, { "docid": "5a025fe75dc9057618edb43fe20f9737", "score": "0.50276816", "text": "def register(self, name, queue=None):\n if not queue:\n queue = name\n self.launch(\n \"\"\"\n Ruote.process_definition do\n sequence do\n boss_register\n end\n end\n \"\"\", {\"name\": name, \"queue\": queue})", "title": "" }, { "docid": "a8f8a33cec7b9f05fc8e13e3d66b35c6", "score": "0.50171757", "text": "def start_service(self, name):\n # check if the service is available\n if name not in self.available_services:\n logging.error('Service \"%s\" is not available.')\n return\n\n # check if the service is already running\n service = self.available_services[name]\n for running in self.running_services.values():\n if running['name'] == name:\n logging.info(\"Service '%s' is already running\", running['name'])\n return\n\n # figure out yaml path\n path = os.path.join(service['path'], service['yaml'])\n logging.debug('Using yaml file path %s', path)\n\n # find the correct (or default, or an available) port\n port = 8443\n with open(path) as spec_file:\n yaml_spec = yaml.safe_load(spec_file)\n logging.debug('Getting service port from specification.')\n try:\n port = int(yaml_spec.get('host', '').split(':')[-1])\n except ValueError:\n logging.warning(('Could not get port from specification, using '\n 'default value of %s'), port)\n while port in self.running_services:\n other = self.running_services[port]['name']\n logging.warning(('Service %s is set to run on port %s, but port is '\n 'used by %s. Trying %s.'), service['name'], port,\n other, port+1)\n port += 1\n\n # start the service\n logging.info('Starting %s on port %s', service['name'], port)\n logging.debug('Using label: %s_%s', service['name'], port)\n command = ['docker', 'run', '-p', '{port}:{port}'.format(port=port),\n '--label', service['label'],\n '-v',\n '%s:/opt/imposter/config' % os.path.abspath(service['path']),\n 'outofcoffee/imposter-openapi',\n '--plugin',\n 'com.gatehill.imposter.plugin.openapi.OpenApiPluginImpl',\n '--configDir', '/opt/imposter/config',\n '--listenPort', str(port)]\n\n logging.debug(\"Running command %s\", \" \".join(command))\n process = Popen(command, stdout=PIPE, stderr=PIPE)\n self.running_services[port] = {'name':service['name'], 'cmd':command,\n 'proc':process}", "title": "" }, { "docid": "5402c123548c7a45d13b58bfd9b2fdd8", "score": "0.5011174", "text": "def track_systemd_service(name):\n service_name = \"{0}.service\".format(name).lower()\n if CGroups.enabled() and not CGroupsTelemetry.is_tracked(service_name):\n cgroup = CGroups.for_systemd_service(service_name)\n tracker = CGroupsTelemetry(service_name, cgroup=cgroup)\n CGroupsTelemetry._tracked[service_name] = tracker", "title": "" }, { "docid": "2aa7dd9cdb2a6d8ea15160cc4189a738", "score": "0.50091964", "text": "def register(self):\n raise NotImplementedError('Must define a register method!')", "title": "" }, { "docid": "e51362f31c5b35dd0258c125ae5c7aa3", "score": "0.4996892", "text": "def service(self, service):\n\n self._service = service", "title": "" }, { "docid": "bfde0e65414b6b0424473c3adfc3cebb", "score": "0.49965164", "text": "def enableService(\n ServiceName: str,\n DomainName: str = None,\n UserName: str = None,\n Password: str = None,\n SecurityDomain: str = \"Native\",\n Gateway: str = None,\n ResilienceTimeout: int = None\n) -> namedtuple(\"EnableServiceResult\", ['retcode', 'stdout', 'stderr']):\n subcmd = \"EnableService\"\n options = [\"DomainName\", \"UserName\", \"Password\", \"SecurityDomain\", \"Gateway\", \"ResilienceTimeout\", \"ServiceName\", ]\n\n cmd = _checking_infacmd_env_and_ret_base_cmd(domainname=DomainName, username=UserName, password=Password,\n cmd=base_cmd, subcmd=subcmd)\n options_value_dict = locals()\n for option in options:\n cmd = _assemble_command_options(cmd, option, options_value_dict.get(option))\n\n mainLogger.debug(cmd)\n res = run_cmd(cmd, env=os.environ)\n\n stderr = res.stderr\n stdout = res.stdout\n if res.retcode != 0:\n stderr += stdout\n\n mainLogger.info(res)\n enableServiceResult = namedtuple(\"EnableServiceResult\", ['retcode', 'stdout', 'stderr'])\n return enableServiceResult(res.retcode, stdout, stderr)", "title": "" }, { "docid": "16962baf7b93edd0cdb2139f11a08b21", "score": "0.49900204", "text": "def add_service(self, service):\n self._children.append(service)", "title": "" }, { "docid": "551a061912703cef91a3c2cfb18b9c6a", "score": "0.49893174", "text": "def add_to_server(self, server):\n add_MobilityServiceServicer_to_server(self, server)", "title": "" } ]
b005e7c1fdfde1419f05fee06fbc5412
__init__ Function Initializes Constant Attributes for BitTimeConv Class
[ { "docid": "71881f03bec44c26ffe3d8dc64338714", "score": "0.58839476", "text": "def __init__(self):\r\n\r\n # Initialize Class Attributes\r\n self.START = timedelta(hours=3)\r\n self.END = timedelta(hours=23)\r\n self.T_UNIT = 15*60\r\n self.SPM = 60\r\n self.SPH = 3600", "title": "" } ]
[ { "docid": "a2ad54557cf04feb72ac398d2f684fa5", "score": "0.742722", "text": "def __init__(self):\n ## Convolution time per layer\n self.t_convolution = []\n ## Downsampling time per layer\n self.t_downsample_activation = []\n self.t_non_conv_layers = 0\n self.t_total = 0", "title": "" }, { "docid": "4489b2831581d1f49fde17812b8a57d9", "score": "0.6522294", "text": "def __init__(self):\n\n super(Default8BitConvWeightsQuantizer, self).__init__(\n num_bits=8, per_axis=True, symmetric=True, narrow_range=True)", "title": "" }, { "docid": "0754e17ae6862e290da54fd69dafdd21", "score": "0.65193933", "text": "def __init__(self):\n\n super(AutoEncoder, self).__init__()\n \n self.first_conv_layer = TransitionDown(in_channels=3, out_channels=3, kernel_size=3)\n self.first_t_up_layer = TransitionUp(in_channels=3, out_channels=3)", "title": "" }, { "docid": "30152085e1fded81ab3c95e00bee08c6", "score": "0.64869285", "text": "def initialize(self):\n\n # Hard-coded values for now:\n self.dt = 60.0\n self.timer = 0\n self.niter = 21600\n self.elbcfunc = lambda t : 5.0e0*(1-np.cos(2.0*np.pi * t / self.tfinal))\n #self.dummytimes = np.arange(0.0, self.dt*5, self.tfinal)\n #self.dummyvalues = 1.0e3*(1-np.cos(4.0*np.pi/self.dummytimes))\n\n self.btime = 0.0\n self.tprev = self.timer\n self.tfinal = self.niter", "title": "" }, { "docid": "16b8235cfa2e4899a6c799f596674785", "score": "0.6471474", "text": "def __init__(self, seq_module):\n super(MaskConv, self).__init__()\n self.seq_module = seq_module", "title": "" }, { "docid": "4aebd1404e94e8a309324dd8fcfe6db1", "score": "0.64679873", "text": "def __init__(self):\n super(Encoder,self).__init__()\n self.place=64\n self.Inception=conv2d_Inception(inplace=3,place=self.place)\n self.conv1=self._make_Sequential(in_channel=64,out_channel=64,bn=False)\n self.conv2 = self._make_Sequential(in_channel=64,out_channel=128,stride=2)\n self.conv3 = self._make_Sequential(in_channel=128, out_channel=256, stride=2)\n self.conv4 = self._make_Sequential(in_channel=256, out_channel=512, stride=2)\n self.conv5 = self._make_Sequential(in_channel=512, out_channel=512, stride=2)", "title": "" }, { "docid": "131d0ab7f98e93896eb7d00f502c484d", "score": "0.64609146", "text": "def __init__(self):\n _pymaxwell5.Crgb8Tword_swiginit(self, _pymaxwell5.new_Crgb8Tword())", "title": "" }, { "docid": "dc6cadb02bd8a2d14fd31312b15dd893", "score": "0.6429915", "text": "def __init__(self,):\n self.input_channels = {}\n self.output_channels = {}", "title": "" }, { "docid": "1224adf12a172941f7fff905ceddb21d", "score": "0.6419754", "text": "def __post_init__(self) -> None:\n self.bit_rate = AudioBitRates(int(self.bit_rate))\n self.sample_rate = AudioSampleRates(int(self.sample_rate))\n self.channel = AudioChannels(int(self.channel))", "title": "" }, { "docid": "276faecb03bdb9eb81e8edac34b3457a", "score": "0.6401499", "text": "def __init__(self):\n\n super(Default8BitConvTransposeWeightsQuantizer, self).__init__(\n num_bits=8, per_axis=False, symmetric=True, narrow_range=True)", "title": "" }, { "docid": "bedb0693d35c1e69755320adb7e42ba8", "score": "0.6401415", "text": "def __init__(self):\n _pymaxwell5.Crgb8Tbyte_swiginit(self, _pymaxwell5.new_Crgb8Tbyte())", "title": "" }, { "docid": "91abaf963d99dcc34cce0535ec0e5731", "score": "0.6288872", "text": "def __init__(self):\n self.__input_shape = None\n self.__corruption = None\n self.__corruption_min = None # 0\n self.__corruption_max = None # 0\n self.__depth_increasing = None\n self.__layers = None\n self.__patch_size = None\n self.__strides = None\n self.__padding = None # SAME / VALID\n self.__residual_learning = None\n self.__prefix_name = None", "title": "" }, { "docid": "852d8edd3f6b852f311795e4aff23176", "score": "0.6278167", "text": "def __init__(self, size):\n base = (c_int * (6 + size))()\n data = (c_int * size).from_buffer(base, 6 * BITSIZE['time'])\n super(Cell_Time, self).__init__(3, 0, size, 0, 1, cast(base, c_void_p), cast(data, c_void_p))", "title": "" }, { "docid": "7ad3a1f52a62823f8987775b0804fa2e", "score": "0.62774545", "text": "def __init__(self):\n # create look-up tables for increasing and decreasing a channel\n self.incr_ch_lut = self._create_LUT_8UC1([0, 64, 128, 192, 256],\n [0, 70, 140, 210, 256])\n self.decr_ch_lut = self._create_LUT_8UC1([0, 64, 128, 192, 256],\n [0, 30, 80, 120, 192])", "title": "" }, { "docid": "7ad3a1f52a62823f8987775b0804fa2e", "score": "0.62774545", "text": "def __init__(self):\n # create look-up tables for increasing and decreasing a channel\n self.incr_ch_lut = self._create_LUT_8UC1([0, 64, 128, 192, 256],\n [0, 70, 140, 210, 256])\n self.decr_ch_lut = self._create_LUT_8UC1([0, 64, 128, 192, 256],\n [0, 30, 80, 120, 192])", "title": "" }, { "docid": "378176951d785841e94e2bbf25750c3e", "score": "0.6271395", "text": "def __init__(self, seq_module):\n super(MaskConvStream, self).__init__()\n self.seq_module = seq_module\n self.left_1 = None\n self.left_2 = None", "title": "" }, { "docid": "87901f0c5b77b8e28515cfff0d8390ff", "score": "0.62580264", "text": "def __init__(self):\n\n self._DEBUG = 0\n\n self.btime = 0.0 # Double in Julian date\n self.dt = 0.0 # Double in seconds\n self.timer = 0 # Integer in seconds\n self.niter = 0 # Integer in seconds\n self.single_event_end = 0 # Integer in minutes\n self.go = 1 # Integer flag for running or not running the model\n\n self.tprev = 0.0 # Double in seconds # To be set to timer\n self.tfinal = 0.0 # Double in seconds # To be set to niter\n self.elevprev=0.0\n self.elevprev_t=0.0\n\n self.timefact=NN_TIME_FACTOR # Minutes to seconds conversion, since niter is in mins.\n\n #self.dummytimes = 0.0\n #self.dummyvalues = 0.0\n self.elev = 0.0\n\n self.runflag = 1 # Only for use in coupling with ADCIRC.", "title": "" }, { "docid": "45683e7d7b0df4132dd33e0375b68cea", "score": "0.62034476", "text": "def __init__(self):\n _pymaxwell5.Crgb8_swiginit(self, _pymaxwell5.new_Crgb8())", "title": "" }, { "docid": "e2fb4537f630add4cfc03ffdc2b1323c", "score": "0.6189795", "text": "def __init__(self, times, amplitudes):\n pass", "title": "" }, { "docid": "1c9545f20d3f4146f05121d5007ae455", "score": "0.6188743", "text": "def __init__(self):\n self.nbits = 5\n self.observation_space = spaces.Discrete(self.nbits)\n self.action_space = spaces.Discrete(2)\n self.observation_space.shape=[self.nbits,]\n self.reset()", "title": "" }, { "docid": "033be1b5e465250ec914c9bd2a919f71", "score": "0.61501235", "text": "def __init__(\n self,\n input_dim: int,\n layer: int = 8,\n stack: int = 3,\n bottleneck_dim: int = 128,\n hidden_dim: int = 512,\n kernel: int = 3,\n causal: bool = False,\n norm_type: str = \"gLN\",\n ):\n super().__init__()\n\n self.tcn = TemporalConvNet(\n N=input_dim,\n B=bottleneck_dim,\n H=hidden_dim,\n P=kernel,\n X=layer,\n R=stack,\n norm_type=norm_type,\n causal=causal,\n )\n\n self._output_dim = bottleneck_dim", "title": "" }, { "docid": "92f06733cbe692215ff8c8e33807912d", "score": "0.6130846", "text": "def __init__(self):\n _pymaxwell5.Crgb16_swiginit(self, _pymaxwell5.new_Crgb16())", "title": "" }, { "docid": "d14187bb26bf86d002eef3d6b5728633", "score": "0.61220574", "text": "def __init__(self):\n super(IlluminationSwapNet, self).__init__()\n self.encode = Encoder()\n self.decode = Decoder()", "title": "" }, { "docid": "85bb5e68f0ea02dc6a05340913df6566", "score": "0.6106404", "text": "def __init__(self):\n _pymaxwell5.Crgba8Tword_swiginit(self, _pymaxwell5.new_Crgba8Tword())", "title": "" }, { "docid": "9aa662e1dc57008ec3ea0dda8fcecde5", "score": "0.6101556", "text": "def __init__(self, *args):\n _pymaxwell5.Cattribute_swiginit(self, _pymaxwell5.new_Cattribute(*args))", "title": "" }, { "docid": "52aa71dddd4854df8c8b3cff592811a0", "score": "0.6095778", "text": "def __init__(self, bytes=None):\n # packet is 24 bytes long\n if bytes is None:\n self._bits = BitArray(length=192)\n else:\n self._bits = BitArray(bytes=bytes)", "title": "" }, { "docid": "6062f4c65d85b0cc38a79d91be1721c6", "score": "0.6087949", "text": "def __init__(self, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self._attribute = DeviceAttribute.PM25\n self._units = CONCENTRATION_MICROGRAMS_PER_CUBIC_METER\n self._device_class = SensorDeviceClass.PM25\n self._state_class = SensorStateClass.MEASUREMENT", "title": "" }, { "docid": "c98e7adcb05b6948d2efb6927d9b9932", "score": "0.60767305", "text": "def __init__(self, direct=False, ts=1/333): \r\n super().__init__(direct, ts)", "title": "" }, { "docid": "9ba49ea862151012ed9250a8519166e6", "score": "0.6068548", "text": "def __init__(self, direct=False, ts=1/333):\r\n super().__init__(direct, ts)", "title": "" }, { "docid": "7ce6139e721f464e805559409687415b", "score": "0.6067599", "text": "def __init__(self):\n # self.r1 = [0] * 8\n # self.r2 = [0] * 8\n # self.r3 = [0] * 8\n # self.r4 = [0] * 8\n # self.r5 = [0] * 8\n # self.r6 = [0] * 8\n # self.r7 = [0] * 8\n # self.r8 = [0] * 8\n\n self.reg = [0] * 8\n\n self.ram = [0] * 256\n\n self.pc = 0\n\n self.HLT = 0b00000001\n\n self.flag = [0] * 8", "title": "" }, { "docid": "0ad99e78f3771861fa0f5c6cf7969852", "score": "0.6054056", "text": "def __init__(self):\n _pymaxwell5.Crgb_swiginit(self, _pymaxwell5.new_Crgb())", "title": "" }, { "docid": "74d51e1ebfefe14b48e568aa3acf203c", "score": "0.6051731", "text": "def __init__(self, bw):\n super().__init__()\n self._bw_fac = bw", "title": "" }, { "docid": "1e9c78c54bb12cec58d754b9b514460e", "score": "0.6047427", "text": "def __init__(self,bits=1600,trace=False):\n if bits not in [200,400,800,1600]:\n raise KeccakError(\"KeccakF bits must be in [200,400,800,1600]\")\n self.bits = bits\n self.nbytes = bits//8\n self._trace = trace\n self._last = None", "title": "" }, { "docid": "4e2123a98e0ff29a2689efed50981a9c", "score": "0.60395235", "text": "def __init__(self):\n Binary.__init__(self)\n return", "title": "" }, { "docid": "2c3644b3ceafac16e16609d3ccdc6fda", "score": "0.60348", "text": "def __init__(self):\n self._n_layers = 0\n self._dim_layers = []\n self._layers = {}\n self._layers_input = {}\n return", "title": "" }, { "docid": "0bfeb7f3d67bef285d06862081839434", "score": "0.6034467", "text": "def __init__(self, s, commonInfPresentFlag, maxNumSubLayersMinus1):\n self.t='\\t\\t'\n if commonInfPresentFlag:\n self.nal_hrd_parameters_present_flag = s.read('uint:1')\n self.vcl_hrd_parameters_present_flag = s.read('uint:1')\n if self.nal_hrd_parameters_present_flag or self.vcl_hrd_parameters_present_flag:\n self.sub_pic_hrd_params_present_flag = s.read('uint:1')\n if self.sub_pic_hrd_params_present_flag:\n self.tick_divisor_minus2 = s.read('uint:8')\n self.du_cpb_removal_delay_increment_length_minus1 = s.read('uint:5')\n self.sub_pic_cpb_params_in_pic_timing_sei_flag = s.read('uint:1')\n self.dpb_output_delay_du_length_minus1 = s.read('uint:5')\n self.bit_rate_scale = s.read('uint:4')\n self.cpb_size_scale = s.read('uint:4')\n if self.sub_pic_hrd_params_present_flag:\n self.icpb_size_du_scale = s.read('uint:4')\n self.initial_cpb_removal_delay_length_minus1 = s.read('uint:5')\n self.au_cpb_removal_delay_length_minus1 = s.read('uint:5')\n self.dpb_output_delay_length_minus1 = s.read('uint:5')\n for i in range(maxNumSubLayersMinus1 + 1):\n self.fixed_pic_rate_general_flag[ i ] = s.read('uint:1')\n if not self.fixed_pic_rate_general_flag[i]:\n self.fixed_pic_rate_within_cvs_flag[i] = s.read('uint:1')\n if self.fixed_pic_rate_within_cvs_flag[i]:\n self.elemental_duration_in_tc_minus1[i] = s.read('ue')\n else:\n self.low_delay_hrd_flag[i] = s.read('uint:1')\n if not self.low_delay_hrd_flag[i]:\n self.cpb_cnt_minus1[i] = s.read('ue')\n if self.nal_hrd_parameters_present_flag:\n sub_layer_hrd_parameters(s, i)\n if self.vcl_hrd_parameters_present_flag:\n self.sub_layer_hrd_parameters(s,i)", "title": "" }, { "docid": "0a099cf449466e1de420466559484d9c", "score": "0.60239583", "text": "def __init__(self):\n super(self.__class__, self).__init__(PrefixedBytes(construct.Int16ub))", "title": "" }, { "docid": "44c97ba98aed5fe8adffd3ef7d1c8296", "score": "0.6002053", "text": "def __init__(self,im,vb=0,sim_tf=False): #Initialize class\n\n #Initialize variables\n self.vb=vb\n self.im=im\n self.currIm=im\n self.tmpIm=\"/tmp/easyroi_%d_%06d\" %(os.getpid(),random.randint(0,10000))\n self.sim_tf=sim_tf\n self.label=im\n self.dims=[]\n self.imType=\"mask\"", "title": "" }, { "docid": "f4d84622c5b1ee6e5a0d9c2e71b5022a", "score": "0.5986558", "text": "def __init__(self) -> object:\r\n self.__average_time_sum__: float = 0.0\r\n self.__average_time_count__: float = 0.0\r\n self.__average_time__: float = 0.0\r\n self.__current_time__: float = 0.0\r\n self.__cycle_start__: float = 0.0\r\n self.__cycle_end__: float = 0.0", "title": "" }, { "docid": "b44c8e40d3b76d1ee31e89855973d342", "score": "0.5985681", "text": "def __init__(self, *args):\n _Interface.Interface_BitMap_swiginit(self,_Interface.new_Interface_BitMap(*args))", "title": "" }, { "docid": "554b6e56ec59c9a862fa80d9c86a278c", "score": "0.5982539", "text": "def __init__(self, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self._attribute = DeviceAttribute.PM10\n self._units = CONCENTRATION_MICROGRAMS_PER_CUBIC_METER\n self._device_class = SensorDeviceClass.PM10\n self._state_class = SensorStateClass.MEASUREMENT", "title": "" }, { "docid": "7c304e42fe0a46e83b94e0412a215842", "score": "0.5972468", "text": "def __init__(self):\n _pymaxwell5.Crgba8Tbyte_swiginit(self, _pymaxwell5.new_Crgba8Tbyte())", "title": "" }, { "docid": "88536721fd6a7b8be04aa50695564d43", "score": "0.59527254", "text": "def __init__(self, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self._attribute = DeviceAttribute.CARBON_DIOXIDE\n self._units = CONCENTRATION_PARTS_PER_MILLION\n self._device_class = SensorDeviceClass.CO2\n self._state_class = SensorStateClass.MEASUREMENT", "title": "" }, { "docid": "7d7860b3899633f1f7572ed924f82aa8", "score": "0.59486115", "text": "def __init__(self, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self._attribute = DeviceAttribute.CARBON_MONOXIDE\n self._units = CONCENTRATION_PARTS_PER_MILLION\n self._device_class = SensorDeviceClass.CO\n self._state_class = SensorStateClass.MEASUREMENT", "title": "" }, { "docid": "1b63d5d81d0fddbc9a499e821d85cf94", "score": "0.5942943", "text": "def __init__(self): \r\n self.beam_length_penalty_weight = 1.25\r\n \r\n self.sampling_temperature = 0.5\r\n \r\n self.max_answer_words = 100\r\n \r\n self.conv_history_length = 6\r\n\r\n self.normalize_words = True\r\n \r\n self.log_summary = True\r\n\r\n self.log_chat = True", "title": "" }, { "docid": "ddc9e8ebec4107118089d15ad97c748f", "score": "0.59327906", "text": "def __init__(self, normalization, timescale):\n\n\t\t# Store them as attributes like so\n\t\tself._norm = normalization\n\t\tself._tau = timescale", "title": "" }, { "docid": "105e0f6d322223b03fb179110bc5858f", "score": "0.59292907", "text": "def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 192, 7, stride=2, padding=3)", "title": "" }, { "docid": "9889a9c72d3c8affda5b4e11a2816edb", "score": "0.59273267", "text": "def __init__(self, Cb):\n \n self.Cb = asarray(Cb)", "title": "" }, { "docid": "0b1981fb4987564181461a1d10b764a1", "score": "0.5926041", "text": "def __init__(self, *args, **kwargs):\n super(Flags, self).__init__(\n ('alte', Bits(maxlen=1)),\n ('lohs', Bits(maxlen=1)),\n ('lahs', Bits(maxlen=1)),\n ('mv', Bits(maxlen=1)),\n ('bb', Bits(maxlen=1)),\n ('cs', Bits(maxlen=1)),\n ('fix', Bits(maxlen=1)),\n ('vld', Bits(maxlen=1)),\n *args, **kwargs\n )", "title": "" }, { "docid": "8a9c4981f6320c7f5290799cb57be0e9", "score": "0.592514", "text": "def __init__(self):\n print('Initialize the class using one of the class methods:\\n'\n '>>> pyshtools.Slepian.from_cap\\n'\n '>>> pyshtools.Slepian.from_mask')", "title": "" }, { "docid": "065cb0067b4aaf6bbc362519b407ff87", "score": "0.5924868", "text": "def __init__(self):\r\n self.rnn_cell_type = \"lstm\"\r\n \r\n self.rnn_size = 256\r\n \r\n self.use_bidirectional_encoder = True\r\n \r\n self.encoder_num_layers = 2\r\n \r\n self.decoder_num_layers = 2\r\n \r\n self.encoder_embedding_size = 256\r\n \r\n self.decoder_embedding_size = 256\r\n\r\n self.encoder_embedding_trainable = True\r\n\r\n self.decoder_embedding_trainable = True\r\n \r\n self.share_embedding = True\r\n \r\n self.attention_type = \"normed_bahdanau\"\r\n \r\n self.beam_width = 10\r\n \r\n self.enable_sampling = False\r\n\r\n self.optimizer = \"adam\"\r\n \r\n self.max_gradient_norm = 5.\r\n \r\n self.gpu_dynamic_memory_growth = True", "title": "" }, { "docid": "63a989e21f21865bb1232b483706cd10", "score": "0.59244597", "text": "def _constants_initialization(self, *args):", "title": "" }, { "docid": "97b28c9e1d3620a447e7d1c1af39f692", "score": "0.5916356", "text": "def __init__(self):\n self.no_slices = 10\n self.no_time_steps = 59\n # we have to think about this. How to read/write\n self.image_dimensions = (512, 512)", "title": "" }, { "docid": "00d5ab1fd9cbddf33b05676a821d9f2d", "score": "0.59127015", "text": "def __init__(self):\n # general values\n self.m, self.t, self.c = 0.0, 0.0, 0.0\n\n # top values\n self.top_m, self.top_t, self.top_c = 0.0, 0.0, 0.0\n\n # concept values\n self.concept_m, self.concept_t, self.concept_c = 0.0, 0.0, 0.0\n\n # relation values\n self.relation_m, self.relation_t, self.relation_c = 0.0, 0.0, 0.0", "title": "" }, { "docid": "96608caacb2f58d5394019b40e2724c9", "score": "0.59124637", "text": "def __init__(self):\r\n\r\n super(Unijunction, self).__init__()\r\n\r\n # Initialize public scalar attributes.\r\n self.base_hr = 0.0083", "title": "" }, { "docid": "3f7c50bc1f181e01e5bc3c1ecce2c0aa", "score": "0.5908995", "text": "def __init__(self):\n\n\t\tself.data = bytearray()\n\t\tself.active = False", "title": "" }, { "docid": "b943aa46185012480e0316c06fd7aad5", "score": "0.5906709", "text": "def __init__(self, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self._attribute = DeviceAttribute.PM1\n self._units = CONCENTRATION_MICROGRAMS_PER_CUBIC_METER\n self._device_class = SensorDeviceClass.PM1\n self._state_class = SensorStateClass.MEASUREMENT", "title": "" }, { "docid": "4fc66f34d7e7bd788069055d2f095b71", "score": "0.59056765", "text": "def __init__(self, direct=False, ts=1/333): \r\n orn_init={'values': np.array([0, 0.15, 0, 0])}\r\n super().__init__(direct, ts, orn_init)", "title": "" }, { "docid": "4756fda1e7bde146169b29462a3c8dc8", "score": "0.5904637", "text": "def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 192, 7, stride=2, padding=3)\n self.bn_0 = torch.nn.BatchNorm2d(192)", "title": "" }, { "docid": "35b9416030eb0782e73a2ccb26900ea2", "score": "0.5899934", "text": "def _init(cls):\n pass", "title": "" }, { "docid": "7467851c6db2dbbc2107b04cb22aad8b", "score": "0.5895545", "text": "def __init__(self, *args, **kwargs):\n\n super().__init__(*args, **kwargs)\n self.min_ver = 90100\n self.max_ver = None", "title": "" }, { "docid": "93a69d73b908451ef0bdca4f1438792a", "score": "0.58903784", "text": "def __init__(\n self,\n sample_rate: int,\n duration: int,\n hop_length: int,\n num_mfcc: int,\n num_segments: int,\n num_channels: int,\n latent_len: int,\n latent_dim: int,\n n_classes: int,\n **kwargs\n ):\n\n super().__init__()\n self.samples_per_track = sample_rate * duration\n self.samples_per_segment = int(self.samples_per_track / num_segments)\n self.input_len = math.ceil(self.samples_per_segment / hop_length) * num_mfcc\n self.input_dim = num_channels\n self.latent_len = latent_len\n self.latent_dim = latent_dim\n self.output_len = 1\n self.output_dim = latent_dim\n self.n_classes = n_classes\n self.input_type = \"raw\"\n self.decoder_cross_attention = False\n self.decoder_residual = False\n self.decoder_projection = True\n\n for k, v in kwargs.items():\n setattr(self, k, v)", "title": "" }, { "docid": "14667246dd5319c5d9ee88d685e8b9b5", "score": "0.5886815", "text": "def __init__(self, *args, **kwds):\n if args or kwds:\n super(BCM2, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.LeftLightState is None:\n self.LeftLightState = 0\n if self.RightLightState is None:\n self.RightLightState = 0\n if self.PositionLampState is None:\n self.PositionLampState = 0\n if self.HighBeamState is None:\n self.HighBeamState = 0\n if self.LowBeamState is None:\n self.LowBeamState = 0\n if self.BackFogLightState is None:\n self.BackFogLightState = 0\n if self.FrontFogLightState is None:\n self.FrontFogLightState = 0\n else:\n self.LeftLightState = 0\n self.RightLightState = 0\n self.PositionLampState = 0\n self.HighBeamState = 0\n self.LowBeamState = 0\n self.BackFogLightState = 0\n self.FrontFogLightState = 0", "title": "" }, { "docid": "27248e6d7643adf9d3c0d2a28f95449f", "score": "0.5884265", "text": "def __init__(self,wordlength = 32,number_of_rounds = 4,parallellism = 1,tagsize =128):\n if wordlength != 32:\n print 'The wordlength must be 32.'\n assert False\n if parallellism != 1:\n print 'The parallellism degree must be 1.'\n assert False\n if tagsize != 128:\n print 'The tagsize must be four times the wordlength, i.e. 128 bits.'\n assert False\n\n\n self.__r_list = [8,11,16,31]\n u = [\n [ '0x243f6a88', '0x0', '0x0', '0x85a308d3' ],\n [ '0x0', '0x0', '0x0', '0x0' ],\n [ '0x13198a2e', '0x03707344', '0x254f537a', '0x38531d48' ],\n [ '0x839c6e83', '0xf97a3ae5', '0x8c91d88c', '0x11eafb59' ]]\n\n self.__state_matrix = [[self.bitarray_creator(u[i][k]) for k in range(4)] for i in range(4)]\n\n\n self.__wordlength = self.bitarray_creator(str(hex(wordlength)))\n self.__number_of_rounds = self.bitarray_creator(str(hex(number_of_rounds)))\n self.__parallellism = self.bitarray_creator(str(hex(parallellism)))\n self.__tagsize = self.bitarray_creator(str(hex(tagsize)))", "title": "" }, { "docid": "ca97537492bfd4b358354c8081cfd26e", "score": "0.58834594", "text": "def __init__(self):\n\t\tself.heading = [0, 0]\n\t\tself.speed_clamp = 1\n\t\tself.urgent = False\n\t\tself.timestamp = 0", "title": "" }, { "docid": "b6aab65ed1098b9a6e7f6a141561abcb", "score": "0.5873713", "text": "def __init__(self, timeData: TimeData, decParams: DecimationParameters) -> None:\n self.timeData: TimeData = timeData\n self.sampleFreq: float = timeData.sampleFreq * 1.0\n self.chans: List = timeData.chans\n self.numSamples: int = timeData.numSamples\n self.decParams: DecimationParameters = decParams\n config = loadConfig()\n self.minSamples: int = config[\"Decimation\"][\"minsamples\"]\n self.level: int = -1\n self.maxDownsampleFactor: int = 8", "title": "" }, { "docid": "69bc8b12539014aa0bbb8844b2d27c01", "score": "0.5872296", "text": "def __init__(self):\n _pymaxwell5.CemitterPair_swiginit(self, _pymaxwell5.new_CemitterPair())", "title": "" }, { "docid": "101da741a1e771ed4885658163e59159", "score": "0.58696884", "text": "def __init__(self):\n\n\t\tself.active = False\n\t\tself.data = bytearray()", "title": "" }, { "docid": "11c5d6e8dd66f110d0c5029263b2375a", "score": "0.58668077", "text": "def __init__(self):\n _pymaxwell5.Cxyz_swiginit(self, _pymaxwell5.new_Cxyz())", "title": "" }, { "docid": "ef30bc563d6a4083adb2e6e711b785c8", "score": "0.5865604", "text": "def __init__(self):\n _pymaxwell5.Crgba16_swiginit(self, _pymaxwell5.new_Crgba16())", "title": "" }, { "docid": "59a7df3ad824fdc579bb5a0bc2362381", "score": "0.58643675", "text": "def __init__(self, mask=None):\n self.mask = mask", "title": "" }, { "docid": "4d9b6e4fcc18ed66631fbced066f5ce5", "score": "0.5862238", "text": "def __init__(self, *args):\n _snap.TChAV_swiginit(self, _snap.new_TChAV(*args))", "title": "" }, { "docid": "b8dc185e54992411990802aae9c3586e", "score": "0.58533067", "text": "def __init__(self, type, time, data=None):\n self.type = type\n self.time = time\n self.data = data", "title": "" }, { "docid": "a18b029adfcf9770b4f5b79cf029d642", "score": "0.5844373", "text": "def __init__(self, init_mu, init_P, dt, W, V, n):\n self.mu = init_mu # initial guess of state mean\n self.P = init_P # initial guess of state covariance\n self.dt = dt # time step\n self.W = W # process noise \n self.V = V # observation noise\n self.n = n # number of map features", "title": "" }, { "docid": "f2b47ec37c5df154c9c056e2622eda82", "score": "0.5837328", "text": "def __init__(self, data: bytes) -> None:\n if not isinstance(data, bytes):\n raise TypeError(\"Invalid data\")\n\n self._systemTime = datetime.fromtimestamp(int.from_bytes(data[0:4], \"little\"))\n # ??? data[4:8]\n self._SSM2MechSetting = CHSesame2MechSettings(rawdata=data[8:20])\n self._SSM2MechStatus = CHSesame2MechStatus(rawdata=data[20:28])", "title": "" }, { "docid": "6a7eaf590a62b305538836545c54c80f", "score": "0.5829121", "text": "def __init__(self):\n self.imageData = None # image array\n self.imageFourier = None # Fourier Transformation\n self.imageFourierShifted = None # Shifted Fourier\n self.imageFourierInv = None # Fourier Inverse\n self.imageFourierInvShifted = None # Shifted Fourier Inverse\n self.dataType = None # The image data type\n self.imageShape = None # the image shape\n self.__epsilon = 10**-8 # a value used to avoid dividing by zero", "title": "" }, { "docid": "2418adeec497953f8f16b66209e114a4", "score": "0.5827194", "text": "def __init__(self, data=None):\n self._previous_values = [False] * 256\n self._values = [False] * 256\n if data is not None:\n self.update(data)\n pass", "title": "" }, { "docid": "e92f7b822f9dde7b92638d110e94a660", "score": "0.58233184", "text": "def __init__(self):\n self.name = None\n self.meta = None\n self.lims = None\n self.bounds = None\n self.time_dist = None\n self.use_files = None\n self.dim_names = None\n self.data = None\n self.time_name = None\n self.time = None", "title": "" }, { "docid": "980493083aaf7268addd0a332ca32f28", "score": "0.58201164", "text": "def ___init__():", "title": "" }, { "docid": "ba7e19d089016b3b36a5bd3fbc77fe7f", "score": "0.58150876", "text": "def __init__(self):\r\n self.__clock = 0.0", "title": "" }, { "docid": "e1e65559d60b6ac2fc2c0fcec5d7f4fc", "score": "0.58022016", "text": "def __init__():", "title": "" }, { "docid": "f8ecfdf9177d267200a1b51e0870a8cc", "score": "0.57976025", "text": "def __init__(self, idim, odim, args, ignore_id=-1):\n odim += 1 # for the mask token\n\n super().__init__(idim, odim, args, ignore_id)\n assert 0.0 <= self.mtlalpha < 1.0, \"mtlalpha should be [0.0, 1.0)\"\n\n self.mask_token = odim - 1\n self.sos = odim - 2\n self.eos = odim - 2\n self.odim = odim\n\n self.intermediate_ctc_weight = args.intermediate_ctc_weight\n self.intermediate_ctc_layers = None\n if args.intermediate_ctc_layer != \"\":\n self.intermediate_ctc_layers = [\n int(i) for i in args.intermediate_ctc_layer.split(\",\")\n ]\n\n if args.maskctc_use_conformer_encoder:\n if args.transformer_attn_dropout_rate is None:\n args.transformer_attn_dropout_rate = args.conformer_dropout_rate\n self.encoder = Encoder(\n idim=idim,\n attention_dim=args.adim,\n attention_heads=args.aheads,\n linear_units=args.eunits,\n num_blocks=args.elayers,\n input_layer=args.transformer_input_layer,\n dropout_rate=args.dropout_rate,\n positional_dropout_rate=args.dropout_rate,\n attention_dropout_rate=args.transformer_attn_dropout_rate,\n pos_enc_layer_type=args.transformer_encoder_pos_enc_layer_type,\n selfattention_layer_type=args.transformer_encoder_selfattn_layer_type,\n activation_type=args.transformer_encoder_activation_type,\n macaron_style=args.macaron_style,\n use_cnn_module=args.use_cnn_module,\n cnn_module_kernel=args.cnn_module_kernel,\n stochastic_depth_rate=args.stochastic_depth_rate,\n intermediate_layers=self.intermediate_ctc_layers,\n )\n self.reset_parameters(args)", "title": "" }, { "docid": "0e68cca6d1667bb9b92509e423d6539e", "score": "0.57879776", "text": "def __init__(self, data=None, time=None, **kwargs):\n\n # Initialize attributes common to all CotrendingBasisVector classes\n super(TessCotrendingBasisVectors, self).__init__(data=data,\n time=time, **kwargs)", "title": "" }, { "docid": "10777ae66dc84e28a8407cb26711a64f", "score": "0.57861966", "text": "def __init__(self):\n\t\tself.__hour = 0\n\t\tself.__minute = 0\n\t\tself.__second = 0", "title": "" }, { "docid": "14b9f8061a0ebe4ef2811d82912d9155", "score": "0.5784607", "text": "def __init__(self, status_byte, type_, arguments, length):\n self.status_byte = status_byte\n self.type = type_\n self.arguments = arguments\n self.length = length\n \n # Attributes that can be set on the object\n self.valid_attributes = set(self.arguments) | {'time'}", "title": "" }, { "docid": "3c1f025835d7504bf3c82902c9b0a44c", "score": "0.57836324", "text": "def __init__(self):\n _pymaxwell5.Crgba8_swiginit(self, _pymaxwell5.new_Crgba8())", "title": "" }, { "docid": "45db17e4611c95a863a27ac44e9cf520", "score": "0.57788306", "text": "def __init__(self) -> None:\n self._min = None\n self._max = None\n self._mean = None\n self._num = 0\n self._counts = [0]*111\n self._count_max = 0\n self._mode = None\n return None", "title": "" }, { "docid": "17b4d927c7985b93dca259ba0dea96cc", "score": "0.5778602", "text": "def __init__(self, in_channels, out_channels, stride):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride", "title": "" }, { "docid": "17b4d927c7985b93dca259ba0dea96cc", "score": "0.5778602", "text": "def __init__(self, in_channels, out_channels, stride):\n super().__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride", "title": "" }, { "docid": "73987e3b1ef885b8a0580c767e60c5d9", "score": "0.5776369", "text": "def setup_class(cls):\n cls.T_arr = np.array([1, 2]) * u.eV\n cls.n_arr = np.array([1e20, 2e20]) * u.cm**-3\n cls.ion = \"p\"\n cls.coulomb_log = 10", "title": "" }, { "docid": "73987e3b1ef885b8a0580c767e60c5d9", "score": "0.5776369", "text": "def setup_class(cls):\n cls.T_arr = np.array([1, 2]) * u.eV\n cls.n_arr = np.array([1e20, 2e20]) * u.cm**-3\n cls.ion = \"p\"\n cls.coulomb_log = 10", "title": "" }, { "docid": "ea4fc5b15cd27ce6b7b3f108d706514e", "score": "0.57741165", "text": "def __init__(self):\n self.eps = 1e-5\n self.use_global_stats = True\n self.workspace = 512\n self.units = (3, 4, 23, 3) # use for 101\n self.filter_list = [256, 512, 1024, 2048]", "title": "" }, { "docid": "df1eda0889437403e47c2f829abf660f", "score": "0.5769937", "text": "def custom_init(self):\n pass", "title": "" }, { "docid": "17d936f8acb2733285174eb18ce33c81", "score": "0.57657355", "text": "def __init__(self, nb_actions, hidden_neurons):\n super(CLSTMModel, self).__init__(nb_actions)\n\n self.hidden_neurons = hidden_neurons\n self._values = zeros(shape=(1, 1, 1), dtype=float32)\n self._model = None", "title": "" }, { "docid": "e8e88f06bb01ea6add4f9d8a5525e415", "score": "0.5760518", "text": "def __init__(self, *args: Any, **kwargs: Any):\n super().__init__(*args, **kwargs)\n self._attribute = DeviceAttribute.CUMULATIVE_CUBIC_METER\n self._units = UnitOfVolume.CUBIC_METERS\n self._device_class = SensorDeviceClass.WATER\n self._state_class = SensorStateClass.MEASUREMENT", "title": "" }, { "docid": "b1afb02ab6fe3beb6e99758d3d6c8183", "score": "0.5758819", "text": "def __init__(self,wt_conv,b_conv):\n self.filter_shape1 =c.filter_shape1\n self.filter_shape2 =c.filter_shape2\n self.poolsize = c.poolsize\n self.activation_fn=c.activation_fn\n\n self.w1 = wt_conv[0] \n self.w2 = wt_conv[1]\n self.b = b_conv\n del wt_conv,b_conv", "title": "" }, { "docid": "ea612fad61d3065315d65bc101bdb94b", "score": "0.57579315", "text": "def initialize(self, image, state, class_info=None):\n raise NotImplementedError", "title": "" }, { "docid": "efc99c8cb3df3ea9ce2fc1f0875cf52a", "score": "0.5757591", "text": "def __init__(self) -> None:\n\n super().__init__()\n\n self.initial: Dict[int, Tensor] = dict()\n \"\"\"\n The initial value for each key.\n \"\"\"\n\n self.last: Dict[int, Tensor] = dict()\n \"\"\"\n The last value detected for each key\n \"\"\"", "title": "" }, { "docid": "314bcf7f2a0cbb337b1de001796b584b", "score": "0.57552105", "text": "def __init__(self, channel, burst_time=pin_constants.BURST):\n super().__init__(channel, burst_time)", "title": "" }, { "docid": "314bcf7f2a0cbb337b1de001796b584b", "score": "0.57552105", "text": "def __init__(self, channel, burst_time=pin_constants.BURST):\n super().__init__(channel, burst_time)", "title": "" } ]
4b8718f5c1e2aa91801e74fabf31a1e9
Recursively convert Bunches in `d` to a regular dicts.
[ { "docid": "577f53696729e516eac0e979c595e16b", "score": "0.7504093", "text": "def unbunchify(d):\n return _convert(d, dict)", "title": "" } ]
[ { "docid": "250961978e717ef452175ddb37a456f1", "score": "0.67221117", "text": "def recursive_asdict(d):\n out = {}\n for k, v in asdict(d).iteritems():\n if hasattr(v, \"__keylist__\"):\n out[k] = recursive_asdict(v)\n elif isinstance(v, list):\n out[k] = []\n for item in v:\n if hasattr(item, \"__keylist__\"):\n out[k].append(recursive_asdict(item))\n else:\n out[k].append(item)\n else:\n out[k] = v\n return out", "title": "" }, { "docid": "444ff983036668df94c7ceaf7f7032a2", "score": "0.6479822", "text": "def to_flat_dict(d):\n\n def _inner(d, parents=None):\n if parents is None:\n parents = []\n for k, v in d.items():\n if not isinstance(v, d.__class__):\n if parents:\n k = tuple(parents + [k])\n yield (k, v)\n else:\n yield from _inner(d=v, parents=parents + [k])\n\n return {k: v for k, v in _inner(d)}", "title": "" }, { "docid": "6e5a5081aaaed7bde7791c5f438988d7", "score": "0.63482976", "text": "def restruct_dict(din, dout):\n nb_pb = len([k for k in din.keys() if \"__\" in k])\n if nb_pb > 0:\n for k, v in din.items():\n if \"__\" in k:\n keys = k.split('__')\n head = keys[0]\n other = \"__\".join(keys[1:])\n if head in dout:\n dout[head][other] = v\n else:\n dout[head] = {other: v}\n else:\n dout[k] = v\n for k, v in dout.items():\n if isinstance(v, dict):\n dout[k] = restruct_dict(dout[k], {})\n else:\n pass\n else:\n return din\n return dout", "title": "" }, { "docid": "561b205dd2a7a2548260860485704463", "score": "0.63072735", "text": "def flatten(d : Dict):\n r = dict()\n agenda = [ (key,[],d) for key in d.keys()]\n while agenda:\n key,path,d = agenda.pop()\n if not isinstance(d[key],dict):\n r[\"_\".join(path+[str(key)])] = d[key]\n else:\n for subkey in d[key].keys():\n agenda.append((subkey,path+[str(key)],d[key]))\n return r", "title": "" }, { "docid": "010e4d003a8ffe1f7137583bf1977e16", "score": "0.6288424", "text": "def rest_recursive_dict(d):\n out = {}\n for k, v in d.items():\n if v.__class__.__name__ == 'PropertyHolder':\n out[k] = v.__dict__\n else:\n out[k] = v\n return out", "title": "" }, { "docid": "9c3ac8faec1806fd291b2b5571e69892", "score": "0.625399", "text": "def soap_recursive_dict(d):\n out = {}\n for k, v in asdict(d).items():\n if hasattr(v, '__keylist__'):\n out[k] = soap_recursive_dict(v)\n elif isinstance(v, list):\n out[k] = []\n for item in v:\n if hasattr(item, '__keylist__'):\n out[k].append(soap_recursive_dict(item))\n else:\n out[k].append(item)\n else:\n out[k] = v\n return out", "title": "" }, { "docid": "6569ffedaffead30138e83bbbe47d390", "score": "0.6253366", "text": "def recursive_asdict(d):\n out = {}\n for k, v in asdict(d).items():\n k = k.lower()\n if hasattr(v, '__keylist__'):\n out[k] = recursive_asdict(v)\n elif isinstance(v, list):\n out[k] = []\n for item in v:\n if hasattr(item, '__keylist__'):\n out[k].append(recursive_asdict(item))\n else:\n out[k].append(\n item.title() if isinstance(item, Text) else item)\n else:\n out[k] = v.title() if isinstance(v, Text) else v\n return out", "title": "" }, { "docid": "a4db7f5cdade65acb69f998813579e1a", "score": "0.6202251", "text": "def deflate(self, d):\n new_d = {}\n for k, v in d.iteritems():\n if hasattr(v, \"__iter__\"):\n new_d[\"__json_\" + k] = True\n new_d[k] = json.dumps(v)\n else:\n new_d[k] = v\n assert not any((isinstance(x, dict) or\n isinstance(x, list) for x in new_d))\n return new_d", "title": "" }, { "docid": "c9ddfbce75a3e77b486676e1c52bd2c4", "score": "0.6142628", "text": "def to_shallow_dict_items(d, join=add, lift=lambda x: x):\n results = []\n\n def visit(subdict, results, partialKey):\n for k, v in subdict.items():\n newKey = lift(k) if partialKey == _FLAG_FIRST else join(\n partialKey, lift(k))\n if isinstance(v, Mapping):\n visit(v, results, newKey)\n else:\n results.append((newKey, v))\n visit(d, results, _FLAG_FIRST)\n return results", "title": "" }, { "docid": "8b534d35761ee8886395f0f38d900a96", "score": "0.6072584", "text": "def encode_feedparser_dict(d):\n if isinstance(d, fp.FeedParserDict) or isinstance(d, dict):\n j = {}\n for k in d.keys():\n j[k] = encode_feedparser_dict(d[k])\n return j\n elif isinstance(d, list):\n l = []\n for k in d:\n l.append(encode_feedparser_dict(k))\n return l\n else:\n return d", "title": "" }, { "docid": "27ddfa5d6247083d522420ffadcf2eb4", "score": "0.5998761", "text": "def reformat_dict(d):\n o = []\n for key, value in d.items():\n if type(value) is dict:\n rv =reformat_dict(value)\n new_d = {\"text\": key, \"nodes\":rv, \"state\":{\"expanded\": 1}}\n o.append(new_d)\n else:\n\t\tif value == EXPIRED_LINK_MARKER:\n\t\t\to.append({\"text\": key, \"selectable\":0, \"state\":{\"expanded\": 1, \"disabled\":1}})\n\t\telse:\n\t\t\to.append({\"text\": key, \"href\": value, \"state\":{\"expanded\": 1}})\n return o", "title": "" }, { "docid": "e2d677ea8d7363305a46027b07fc82dd", "score": "0.59417385", "text": "def encode_feedparser_dict(self, d):\n if isinstance(d, feedparser.FeedParserDict) or isinstance(d, dict):\n j = {}\n for k in d.keys():\n j[k] = self.encode_feedparser_dict(d[k])\n return j\n elif isinstance(d, list):\n l = []\n for k in d:\n l.append(self.encode_feedparser_dict(k))\n return l\n else:\n return d", "title": "" }, { "docid": "5ce759cb34592e53343d54c432e0d251", "score": "0.5932397", "text": "def data_to_dicts(h, d):\n return [list_to_dict(h, x) for x in d]", "title": "" }, { "docid": "b1f0851a99770eb8664e7572f0279ebf", "score": "0.5929389", "text": "def _make_mappable(d):\n res = dict(zip(\n d.keys(),\n map(_maybe_repeat, d.values())\n ))\n return res", "title": "" }, { "docid": "f3f12bf961c345f0ddc0cb7fc0568cf1", "score": "0.5911441", "text": "def flatten_dict(d):\n\n def expand(key, val):\n if isinstance(val, dict):\n return [ (key + '.' + k, v) for k, v in flatten_dict(val).items() ]\n else:\n return [ (key, val) ]\n \n items = [ item for k, v in d.items() for item in expand(k, v)]\n\n return dict(items)", "title": "" }, { "docid": "df7b099346da91d63e23d186969f8904", "score": "0.589035", "text": "def flatten(d, parent_key='', sep='__'):\n # http://stackoverflow.com/questions/6027558/flatten-nested-python-dictionaries-compressing-keys\n if type(d) == list:\n flat = []\n for entry in d:\n flat.append(flatten(entry, parent_key=parent_key, sep=sep))\n return flat\n else:\n items = []\n for k, v in list(d.items()):\n new_key = parent_key + sep + k if parent_key else k\n\n if isinstance(v, collections.abc.MutableMapping):\n items.extend(list(flatten(v, new_key, sep=sep).items()))\n else:\n items.append((new_key, v))\n\n return dict(items)", "title": "" }, { "docid": "75a98f269533307c30755b56a6f31d3d", "score": "0.5861966", "text": "def dict_unnest(data, separator='.'):\n res={}\n for k, v in data.iteritems():\n if isinstance(v, dict):\n v=dict_unnest(v, separator)\n for k1, v1 in v.iteritems():\n res[\"%s%s%s\" % (k, separator, k1)]=v1\n else:\n res[k]=v\n return res", "title": "" }, { "docid": "6f516150bdc4372306e305be10bd9ea1", "score": "0.58512896", "text": "def dict_2_dict_list(d: dict) -> dict:\n # ==\n # Base case: if it is a list\n if isinstance(d, list):\n return d\n # Base case: if it is a primitive\n if not isinstance(d, dict):\n return [d]\n # ==\n # Recursion if it is a dict\n new_d = {}\n for k in d:\n new_d[k] = dict_2_dict_list(d[k])\n return new_d", "title": "" }, { "docid": "bcfb787190d48a161aec75a193cac9f5", "score": "0.58325917", "text": "def flatten(_d,parent=''):\r\n d = {}\r\n for key in _d.keys():\r\n d[parent+str(key)] = _d[key]\r\n\r\n for key in d.keys():\r\n \r\n val = d[key]\r\n\r\n # search for any nested dictionaries\r\n if isinstance(val,dict):\r\n if not 'adler32' in val.keys():\r\n d.update(flatten(val,parent=str(key)+'$'))\r\n del d[key]\r\n\r\n return d", "title": "" }, { "docid": "3eaf914e4f01dcd8d458accb4875dc5c", "score": "0.5830542", "text": "def flatten(d, parent_key='', sep='.'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)", "title": "" }, { "docid": "fbd2b32493aaa40e5492841f9ace68dd", "score": "0.58104193", "text": "def dict_nest(data, separator='.'):\n res={}\n for k in data:\n levels=k.split(separator)\n d=res\n for k1 in levels[:-1]:\n d.setdefault(k1, {})\n d=d[k1]\n d[levels[-1]]=data[k]\n return res", "title": "" }, { "docid": "3e8331da1430b7c230c4ca9d28f3590c", "score": "0.579552", "text": "def to_nested_dict(d):\n return NestedOrderedDict(d)", "title": "" }, { "docid": "cd9fbd084cae529dda8d672175a80656", "score": "0.57514644", "text": "def flatten_dict(d, separator=':', _parent_key=''):\n items = []\n for k, v in d.items():\n new_key = _parent_key + separator + k if _parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v,\n separator=separator,\n _parent_key=new_key).items())\n else:\n items.append((new_key, v))\n return dict(items)", "title": "" }, { "docid": "b81c1b86c82e3a6e62214a1ce37a7046", "score": "0.57366335", "text": "def _flatten(d: Dict[str, Any]) -> Iterator[Dict[str, Any]]:\n if _is_flat_dictionary(d):\n yield d\n stack = []\n else:\n stack = [d]\n\n seen_state_hashes: Set[str] = set()\n while stack:\n state = stack.pop()\n next_states = _get_next_states(state)\n for state in next_states:\n state_hash = pprint.pformat(state)\n if state_hash in seen_state_hashes:\n continue\n\n seen_state_hashes.add(state_hash)\n if _is_flat_dictionary(state):\n yield state\n else:\n stack.append(state)", "title": "" }, { "docid": "83cb4ad6082477dcd18f3539058d43e1", "score": "0.5677464", "text": "def flatten(d, parent_key='', separator='.'):\n flat_fields = []\n for k, v in d.items():\n flat_key = parent_key + separator + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n flat_fields.extend(flatten(v, flat_key, separator=separator).items())\n else:\n flat_fields.append((flat_key, v))\n return dict(flat_fields)", "title": "" }, { "docid": "990bbee3216951e5a31f9f87cdf729da", "score": "0.56440055", "text": "def test__sterilizeDict_validArgumentRecursive_returnDict(self):\n # flat dict\n ret = hubblestack.extmods.fdg.process._sterilize_dict(\n {1: None, 2: 'a'})\n assert ret == {2: 'a'}\n # nested dicts\n ret = hubblestack.extmods.fdg.process._sterilize_dict(\n {1: None, 2: {3: {4: None, 5: 'a'}, 6: None, 7: 'b'}, 8: 'c', 9: {10: None}})\n assert ret == {2: {3: {5: 'a'}, 7: 'b'}, 8: 'c', 9: {}}\n # nested dicts & sequences\n ret = hubblestack.extmods.fdg.process._sterilize_dict(\n {1: None, 2: {3: [4, {5: None}], 6: {7: ('b', {9: None}), 8: None}}})\n assert ret == {2: {3: [4, {}], 6: {7: ['b', {}]}}}", "title": "" }, { "docid": "ee42ae6e556e97f8c1c423ca7ee6989b", "score": "0.56269795", "text": "def flatten_dict(\n d: Dict,\n format_func=slugify_key,\n) -> Dict:\n\n def items():\n for key, value in d.items():\n if isinstance(value, dict):\n for subkey, subvalue in flatten_dict(\n value, format_func=format_func\n ).items():\n yield format_func(key=key, subkey=subkey), subvalue\n else:\n yield format_func(key=key), value\n\n return dict(items())", "title": "" }, { "docid": "1d7cc2054d268150f73880cfbaaf2c68", "score": "0.5613742", "text": "def to_dict(self, flat=True):\r\n rv = {}\r\n for d in reversed(self.dicts):\r\n rv.update(d.to_dict(flat))\r\n return rv", "title": "" }, { "docid": "8419a1c1c36c84e6fe7203109e0aee87", "score": "0.5593883", "text": "def format_class_dict(d):\n out = {}\n for k, v in d.items():\n if isinstance(v, dict):\n v = format_class_dict(v)\n\n out_key = k.split('__', 1)[-1]\n out[k.replace(k, out_key)] = v\n return out", "title": "" }, { "docid": "1c0462618a54e0bad3cf5d8dbbb10c00", "score": "0.5579497", "text": "def unflatten(d, seperator=\"--\"):\r\n def isint(k):\r\n try:\r\n int(k)\r\n return True\r\n except ValueError:\r\n return False\r\n \r\n def setvalue(data, k, v):\r\n if '--' in k:\r\n k, k2 = k.split(seperator, 1)\r\n setvalue(data.setdefault(k, {}), k2, v)\r\n else:\r\n data[k] = v\r\n \r\n def makelist(d):\r\n \"\"\"Convert d into a list if all the keys of d are integers.\"\"\"\r\n if isinstance(d, dict):\r\n if all(isint(k) for k in d.keys()):\r\n return [makelist(d[k]) for k in sorted(d.keys(), key=int)]\r\n else:\r\n return web.storage((k, makelist(v)) for k, v in d.items())\r\n else:\r\n return d\r\n \r\n d2 = {}\r\n for k, v in d.items():\r\n setvalue(d2, k, v)\r\n return makelist(d2)", "title": "" }, { "docid": "43c985151a7de4a3ce9f26028a096933", "score": "0.5567386", "text": "def createDictOfDict(theDict):\n result = {}\n for p in theDict:\n if p==None:\n result[p] = theDict[p] \n elif '.' in p:\n parent = p[:p.find('.')]\n child = p[p.find('.')+1:]\n if type(result[parent])!=dict:\n result[parent] = {None : result[parent]}\n result[parent][child] = theDict[p] \n else:\n result[p] = theDict[p]\n for p in result:\n if type(result[p])==dict:\n result[p] = createDictOfDict(result[p])\n return result", "title": "" }, { "docid": "65dfaa3e8f644dbcf50425dd7b20ee15", "score": "0.5559319", "text": "def flatten_dict(d, parent_key='', joinchar='.'):\n\n items = []\n for k, v in d.items():\n new_key = parent_key + joinchar + k if parent_key else k\n if isinstance(v, collections.MutableMapping):\n items.extend(flatten_dict(v, new_key, joinchar).items())\n else:\n items.append((new_key, v))\n return dict(items)", "title": "" }, { "docid": "cca2bab791743209ada017b494581e49", "score": "0.55423355", "text": "def dictify(t):\n out = {'value': t.value, 'children': {}}\n for ch, child in t.children.items():\n out['children'][ch] = dictify(child)\n return out", "title": "" }, { "docid": "fc51b5cf5317f2a54e8319e4c24c73ab", "score": "0.5538471", "text": "def recursive_dict():\n return collections.defaultdict(recursive_dict)", "title": "" }, { "docid": "2baec4855e25081e8798716051c5143d", "score": "0.5533168", "text": "def flattendict(self,d,s=[]):\n for k, v in d.items():\n # Note that the condition below prevents the _count property from\n # being added to the list over and over again.\n if k[:1] != \"_\":\n s.append(k)\n if isinstance(v, dict) and v:\n s = self.flattendict(v, s)\n return s", "title": "" }, { "docid": "996ea4c8f467316e38944d3166c7475c", "score": "0.54970247", "text": "def flatten_dict(d: MutableMapping, parent_key: str='', delimiter: str='.'):\n\n def _flatten_dict(d, parent_key='', delimiter='.'):\n for k, v in d.items():\n key = str(parent_key) + delimiter + str(k) if parent_key else k\n if v and isinstance(v, MutableMapping):\n yield from flatten_dict(v, key, delimiter=delimiter).items()\n else:\n yield key, v\n return dict(_flatten_dict(d, parent_key, delimiter))", "title": "" }, { "docid": "4791fa430e999254584f4cd55109e879", "score": "0.54411423", "text": "def _deep_update(self, d, u):\n for k, v in u.items():\n if isinstance(v, collections.abc.Mapping):\n d[k] = self._deep_update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "title": "" }, { "docid": "6cc9cd334f4362ffe89f94571d0f24ff", "score": "0.54368436", "text": "def fix_depth_recursive_defaultdict(depth=3, t=int):\n if depth==1:\n return collections.defaultdict(t)\n else:\n return collections.defaultdict(\n lambda: fix_depth_recursive_defaultdict(depth-1, t))", "title": "" }, { "docid": "21a4b1d7c5a72ade793d7deeb1fbded3", "score": "0.54176456", "text": "def _get_next_states(d: Dict[str, Any]) -> Iterator[Dict[str, Any]]:\n for key, value in d.items():\n if isinstance(value, list):\n for child_value in value:\n new_state = {\n **d,\n **{key: child_value}\n }\n yield new_state\n elif isinstance(value, dict):\n # Create a copy\n new_state: Dict[str, Any] = copy.deepcopy(d)\n new_state.pop(key) # remove parent key\n for child_key, child_value in value.items():\n joined_key = \"{}__{}\".format(key, child_key)\n new_state[joined_key] = child_value\n yield new_state\n else:\n continue", "title": "" }, { "docid": "e973d8c0fac70efb4e5b167d825cba51", "score": "0.5375451", "text": "def flattendict(di, **kwargs):\n di = dict(di)\n if not kwargs.get('keep_private_fields', True):\n clearPrivateFields(di)\n\n # Default is squashing anything or the specified fields in squash\n squash = kwargs.get('squash', [])\n preserve = kwargs.get('preserve', None)\n\n # If both parameters indicated, don't squash anything\n if 'preserve' in kwargs and 'squash' in kwargs:\n squash = []\n # If only preserved was indicated, squash\n elif preserve is not None:\n squash = set(di.keys()) - set(preserve)\n\n for key in di.keys():\n value = di[key]\n if isinstance(value, dict) or isinstance(value, list):\n di[key] = flatten(value, **kwargs)\n else:\n decodeBSONEntity(di, key)\n newkey = deUnderescore(di, key)\n if key in squash or newkey in squash:\n del di[newkey]\n return di", "title": "" }, { "docid": "24af7b76a0397c76488660f8a6585173", "score": "0.5364677", "text": "def recursive_detach(in_dict: dict) -> dict:\n out_dict = {}\n for k, v in in_dict.items():\n if isinstance(v, dict):\n out_dict.update({k: recursive_detach(v)})\n elif callable(getattr(v, 'detach', None)):\n out_dict.update({k: v.detach()})\n else:\n out_dict.update({k: v})\n return out_dict", "title": "" }, { "docid": "203fa51936cf6580e47a968fcca2aa94", "score": "0.53644043", "text": "def recursive_dict_update(d, u):\n for k, v in u.items():\n if isinstance(v, Mapping):\n r = recursive_dict_update(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "title": "" }, { "docid": "b04624ccafe84bb485500be62795b9da", "score": "0.53625524", "text": "def dmap(f, d):\n try:\n target = d.iteritems()\n return {k: f(v) for k, v in target}\n except AttributeError:\n return dict(it.izip(d, it.imap(f, d)))", "title": "" }, { "docid": "87ed8ed79538dd657ed579b406e6202c", "score": "0.5348528", "text": "def nested_d():\n return defaultdict(nested_d)", "title": "" }, { "docid": "12926a5520d1d2711651b18327650bfc", "score": "0.534671", "text": "def __init__(self, d, sep=\"__\"):\n self.__dict__ = flatten(d, sep=sep)", "title": "" }, { "docid": "eaad29e053cf0a134d0a47829dd11c5d", "score": "0.53396034", "text": "def _inflate_dotted(input_dict):\n output_dict = {}\n for key, value in input_dict.items():\n ref = output_dict\n parts = key.split('.')\n for part in parts[:-1]:\n if part not in ref:\n ref[part] = {}\n ref = ref[part]\n ref[parts[-1]] = value\n return output_dict", "title": "" }, { "docid": "dd78a9f410d5afaa26091577227f05c7", "score": "0.53245133", "text": "def ld2dl(d):\n if not d:\n return d\n d_out = {k: [] for dd in d for k in dd.keys()}\n for o in d:\n for k, v in o.items():\n d_out[k].append(v)\n return d_out", "title": "" }, { "docid": "2116a7dc262ba788522590a70516bc0f", "score": "0.5311112", "text": "def strip_keys(d, nones=False, depth=0):\n ans = type(d)((str(k).strip(), v) for (k, v) in OrderedDict(d).iteritems() if (not nones or (str(k).strip() and str(k).strip() != 'None')))\n if int(depth) < 1:\n return ans\n if int(depth) > strip_keys.MAX_DEPTH:\n warnings.warn(RuntimeWarning(\"Maximum recursion depth allowance (%r) exceeded.\" % strip_keys.MAX_DEPTH))\n for k, v in ans.iteritems():\n if isinstance(v, Mapping):\n ans[k] = strip_keys(v, nones=nones, depth=int(depth)-1)\n return ans", "title": "" }, { "docid": "7007fefdb5b69d214029cdf36071aab4", "score": "0.53088814", "text": "def update_dict(d, u):\n for k in u:\n dv = d.get(k, {})\n if not isinstance(dv, collections.Mapping):\n d[k] = u[k]\n elif isinstance(u[k], collections.Mapping):\n d[k] = update_dict(dv, u[k])\n else:\n d[k] = u[k]\n return d", "title": "" }, { "docid": "0efc498b72e6cf8b595e90dc4fe9aa34", "score": "0.53027004", "text": "def convert_dict(self, d, is_binned=False, is_filtered=False):\n msd = MultiStateData.from_state_data(d)\n return self.convert_msd(msd, is_binned, is_filtered)", "title": "" }, { "docid": "799360797f388451c25e98831405993c", "score": "0.53000504", "text": "def nesting(data, keys):\n X = keys\n d = data\n\n for path in X:\n current_level = d\n for i,part in enumerate(path):\n if part not in current_level:\n current_level[part] = {}\n current_level = current_level[part]\n return d", "title": "" }, { "docid": "f187fdd9c2d4ff6ae439a2eb2c188e41", "score": "0.52917737", "text": "def dflatten(cls, mapping, parent_key='', sep='.'):\n d = {}\n for k, v in mapping.items():\n try:\n key = parent_key + sep + k if parent_key else k\n except TypeError:\n continue\n\n if isinstance(v, dict):\n cls.update(d, cls.dflatten(v, key, sep))\n else:\n d[key] = deepcopy(v)\n return d", "title": "" }, { "docid": "2604fd74738a886f2217f48176aaeb1b", "score": "0.5280755", "text": "def recursive_update(d: dict, u: dict) -> dict:\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n d[k] = recursive_update(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "title": "" }, { "docid": "b11f3eb4c5425d48aaa94b7fe406a1d1", "score": "0.5276376", "text": "def fold_dict(data, nb_level=10**5):\n if nb_level <= 0:\n return data\n\n groups = dict()\n levels = set()\n for key, value in data.items():\n idx = key.find('/')\n if idx > 0:\n level = key[:idx]\n group_dict = groups.setdefault(level, dict())\n group_dict[key[(idx + 1):]] = value\n levels.add(level)\n else:\n groups[key] = value\n for level in levels:\n groups[level] = fold_dict(groups[level], nb_level - 1)\n return groups", "title": "" }, { "docid": "f8002435ac9bc327bc93d80ba4ee1cba", "score": "0.5243325", "text": "def sbv6(d,reverse=False):\n return dict(sorted(d.iteritems(), key=itemgetter(1), reverse=True))", "title": "" }, { "docid": "7cb7939ce6ee83087cc09c81d0be397d", "score": "0.5240468", "text": "def flatten_dict(d, root=True):\n for k, v in d.items():\n\n if isinstance(v, dict):\n for item in flatten_dict(v, False):\n\n # At root level, break away the key path from the value.\n if root:\n yield ((k,) + item[:-1], item[-1])\n\n # Otherwise build up the key chain.\n else:\n yield (k,) + item\n\n else:\n yield (k, v)", "title": "" }, { "docid": "ef29a8110ae3c5d9d59b78cbb3b7c743", "score": "0.5224854", "text": "def grouptodict(h5group, d):\n for name in h5group:\n if isinstance(h5group[name], h5py.Group):\n if name == \"casts\": # Special case the list of Cast dicts\n d[\"casts\"] = []\n for _, group in h5group[name].items():\n d[\"casts\"].append(grouptodict(group, dict()))\n else:\n d[name] = grouptodict(h5group[name], dict())\n else:\n d[name] = h5group[name].value\n return d", "title": "" }, { "docid": "92804fd78ad18e87d98f4d181e3cad9a", "score": "0.52193946", "text": "def as_dict(self, flat=False, sanitize=True):\r\n\r\n SERIALIZABLE_TYPES = (tuple, dict, list, int, long, float,\r\n basestring, type(None), bool)\r\n def loop(d):\r\n newd = dict()\r\n for k, v in d.items():\r\n if k in (\"first\", \"second\"):\r\n if isinstance(v, self.__class__):\r\n newd[k] = loop(v.__dict__)\r\n elif isinstance(v, Field):\r\n newd[k] = {\"tablename\": v._tablename,\r\n \"fieldname\": v.name}\r\n elif isinstance(v, Expression):\r\n newd[k] = loop(v.__dict__)\r\n elif isinstance(v, SERIALIZABLE_TYPES):\r\n newd[k] = v\r\n elif isinstance(v, (datetime.date,\r\n datetime.time,\r\n datetime.datetime)):\r\n newd[k] = unicode(v)\r\n elif k == \"op\":\r\n if callable(v):\r\n newd[k] = v.__name__\r\n elif isinstance(v, basestring):\r\n newd[k] = v\r\n else: pass # not callable or string\r\n elif isinstance(v, SERIALIZABLE_TYPES):\r\n if isinstance(v, dict):\r\n newd[k] = loop(v)\r\n else: newd[k] = v\r\n return newd\r\n\r\n if flat:\r\n return loop(self.__dict__)\r\n else: return self.__dict__", "title": "" }, { "docid": "0656259f5bf7ecb7b5e2acd1575e91ec", "score": "0.5211727", "text": "def recursivedict():\n return defaultdict(recursivedict)", "title": "" }, { "docid": "a87490b07bbc5b47b9780774f316c688", "score": "0.52048284", "text": "def as_dict(x, depth=0, maxdepth=30):\n if depth == 0 and not x:\n return {}\n elif depth > maxdepth:\n raise exceptions.ConfigError()\n\n if type(x) is list:\n y = dict(ChainMap(*[dict(xi) for xi in x if type(xi) is OrderedDict]))\n z = [xi for xi in x if type(xi) is not OrderedDict]\n if len(z):\n if y:\n x = [y]\n else:\n x = []\n x.extend(z)\n else:\n x = y\n elif type(x) is OrderedDict:\n x = dict(x)\n\n if type(x) is dict:\n for key, value in x.items():\n x[key] = as_dict(value, depth + 1)\n\n return x", "title": "" }, { "docid": "e538de5256e763d8a5e088d288052bfb", "score": "0.5194768", "text": "def build_nested_dict(entries):\n result = {}\n if not entries:\n return {}\n for raw_path, value in entries.items():\n path = sanitize(raw_path)\n basename = os.path.basename(path)\n subpaths = path.split('/')[1:]\n subentry = result\n current = subentry\n for subpath in subpaths:\n current = subentry\n subentry = subentry.setdefault(subpath, {})\n current[basename] = value\n return result", "title": "" }, { "docid": "e3ae14948c58beef2decb6651681def7", "score": "0.5189723", "text": "def strip_empty_lists(self, d):\r\n def emptylist(x):\r\n return isinstance(x, list) and len(x) == 0\r\n\r\n return dict((k, v) for k, v in d.items() if not emptylist(v))", "title": "" }, { "docid": "76d730a1b419af5061bd0667391d55fa", "score": "0.5142", "text": "def invert_dict_fast(d):\r\n return dict(zip(d.values(), d.keys()))", "title": "" }, { "docid": "d0b4bb3e125ab930639b91aa07d8c352", "score": "0.51361066", "text": "def mapd(f, d):\n r = {}\n for k, v in d.iteritems():\n r[k] = f(v)\n return r", "title": "" }, { "docid": "ef190f1b24b063251e05dbfa955dcae1", "score": "0.51233816", "text": "def trim(d):\r\n return dict((k, v) for k, v in d.iteritems() if v)", "title": "" }, { "docid": "050a28e2beff40b79aaec9682691ee0d", "score": "0.51200837", "text": "def fixDict(d):\n\n newd = {}\n\n for k, v in d.iteritems():\n tmp = {}\n\n for k2, v2 in v.iteritems():\n\n if v2 is list:\n tmp[k2] = v2\n else:\n try:\n if not numpy.isnan(v2):\n tmp[k2] = v2\n except TypeError:\n tmp[k2] = v2\n\n newd[k] = tmp\n\n return newd", "title": "" }, { "docid": "ef7c8d7f5ff833b0bcf21a305e31670d", "score": "0.51051223", "text": "def recursive_update(d: Dict, u: Mapping) -> Dict:\n\n new = deepcopy(d)\n\n for k, v in u.items():\n if isinstance(v, collections.Mapping) and isinstance(d.get(k), (collections.Mapping, type(None))):\n new[k] = recursive_update(d.get(k, {}), v)\n\n elif isinstance(v, (set, list, tuple)):\n if isinstance(d.get(k), (set, list, tuple)):\n # Merge lists of uniques. I really want this helper to eat anything and return what it should. :)\n nv = list(set(d[k])) + list(v)\n try:\n # The types of values in list could be unhashable, so it is not that easy filter uniques.\n new[k] = list(set(nv))\n except TypeError:\n # In this case we just merge lists as is.\n new[k] = nv\n else:\n new[k] = v\n else:\n new[k] = v\n\n return new", "title": "" }, { "docid": "43d1b0a8167afb33c022d8268ed75d07", "score": "0.5104042", "text": "def make_nested_dict():\n return collections.defaultdict(make_nested_dict)", "title": "" }, { "docid": "df9c52e9d7b7ce1aa6f929cc9b2465df", "score": "0.5097377", "text": "def dotize(data):\n data = dotdict(data)\n for k, v in data.items():\n if isinstance(v, dict):\n data[k] = dotdict(v)\n return data", "title": "" }, { "docid": "228720db295f10126c4ee9b9c34fecd1", "score": "0.5095273", "text": "def _eval_recursive(dictionary):\n for k in dictionary:\n if isinstance(dictionary[k], dict):\n dictionary[k] = _eval_recursive(dictionary[k])\n else:\n try:\n dictionary[k] = eval(dictionary[k])\n except (NameError, SyntaxError, TypeError):\n pass\n return dictionary", "title": "" }, { "docid": "f8b90d4228fb11a42ef44eff28fad063", "score": "0.5091842", "text": "def de_dot_dict(data):\n for key, value in data.iteritems():\n if isinstance(value, dict):\n de_dot_dict(value)\n if '.' in key:\n data[key.replace('.', '-')] = data.pop(key)", "title": "" }, { "docid": "e71e21dbdef520070e2f7c950c4dce59", "score": "0.5072346", "text": "def flatten(d, parent_key='', separator='__'):\n items = []\n for k, v in d.items():\n new_key = parent_key + separator + k if parent_key else k\n if isinstance(v, dict):\n items.extend(flatten(v, new_key, separator).items())\n else:\n items.append((new_key, v))\n return OrderedDict(items)", "title": "" }, { "docid": "ad4da18f60699757b200325551da4e9d", "score": "0.50694263", "text": "def create_recursion_dict(m):\n new_dict = {}\n for r in range(len(m[\"maze\"])):\n for c in range(len(m[\"maze\"][0])):\n if m[\"maze\"][r][c] != 1:\n new_dict[(r, c)] = []\n if r + 1 < m[\"dimensions\"][0] and m[\"maze\"][r + 1][c] != 1:\n new_dict[(r, c)].append((r + 1, c))\n if c + 1 < m[\"dimensions\"][1] and m[\"maze\"][r][c + 1] != 1:\n new_dict[(r, c)].append((r, c + 1))\n return new_dict", "title": "" }, { "docid": "3ad19ed347cb6db193f1175201a35542", "score": "0.5067605", "text": "def asdict(data, prune=True):\n\n factory = pruned_dict if prune else dict\n\n return dataclasses.asdict(data, dict_factory=factory)", "title": "" }, { "docid": "f5ecfec9ec853b959a7ebc91d350ffce", "score": "0.50655425", "text": "def norm_dict(d):\n return {normalize(k): normalize(v) for k, v in dict(d).items()}", "title": "" }, { "docid": "120de9c48909ada10f33d718e8197e13", "score": "0.50596833", "text": "def rexpando(d):\n e = Expando()\n for k, v in d.iteritems():\n k = _key_to_attribute(k)\n if isinstance(v, dict):\n e[k] = rexpando(v)\n elif isinstance(v, _iter_types):\n e[k] = _rexpando_iter_helper(v)\n else:\n e[k] = v\n return e", "title": "" }, { "docid": "8c11dee67974d355c66ed017ef026908", "score": "0.50559723", "text": "def __init__(self, d):\n for a, b in d.items():\n if isinstance(b, (list, tuple)):\n setattr(self, a, [Dict2Obj(x) if isinstance(x, dict) else x for x in b])\n else:\n setattr(self, a, Dict2Obj(b) if isinstance(b, dict) else b)", "title": "" }, { "docid": "d83db23bc7117f25dac20b374ad5d7b3", "score": "0.50515777", "text": "def _flatten(d, parent_key='', sep='_', int_to_float=False, remove_null=False, flatten_list=False):\n\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n # Keep it as a list but continue to separate nested fields\n if isinstance(v, list):\n if flatten_list:\n my_elems = []\n for w in v:\n my_elems_w = []\n if isinstance(w, dict):\n my_elems_w.extend(_flatten(w, sep=sep, int_to_float=int_to_float, remove_null=remove_null, flatten_list=flatten_list).items())\n elif isinstance(w, str):\n my_elems.append(w)\n continue\n elif w is not None:\n my_elems.append(w)\n continue\n else:\n if not remove_null:\n my_elems.append('null')\n continue\n # Put in in alphabetical order\n my_elems_w = sorted(my_elems_w, key=lambda tup: tup[0])\n my_elems.append(dict(my_elems_w))\n items.append((new_key, my_elems))\n else:\n items.append((new_key, v))\n elif isinstance(v, dict):\n items.extend(_flatten(v, new_key, sep=sep, int_to_float=int_to_float, remove_null=remove_null, flatten_list=flatten_list).items())\n else:\n if isinstance(v, int) and int_to_float:\n items.append((new_key, float(v)))\n else:\n if v is not None:\n items.append((new_key, v))\n return dict(items)", "title": "" }, { "docid": "ffd4ea64dca4f2e9263020045e4f9d4c", "score": "0.5009567", "text": "def convert_boulders_dict(boulders):\n\n results = []\n\n for boulder in boulders:\n boulder_dict = convert_boulder_dict(boulder)\n\n results.append(boulder_dict)\n\n return results", "title": "" }, { "docid": "dc80e35ee7fdf3efbb262819088b5705", "score": "0.5009412", "text": "def nestedupdate(d, u):\n for k, v in u.items():\n if isinstance(v, collections.abc.Mapping):\n d[k] = nestedupdate(d.get(k, {}), v)\n else:\n d[k] = v\n return d", "title": "" }, { "docid": "ed4719630f4a4b0400cc2e2f83174ae7", "score": "0.5008236", "text": "def nestedDictUpdate(d, u):\n for k, v in u.items():\n if isinstance(v, collections.Mapping):\n r = nestedDictUpdate(d.get(k, {}), v)\n d[k] = r\n else:\n d[k] = u[k]\n return d", "title": "" }, { "docid": "0a9ca08a9b7a905a2e9db53e97b02048", "score": "0.5000662", "text": "def to_dict(obj, visited=None):\n visited = visited or []\n if is_list(obj) or is_tuple(obj) or is_set(obj):\n return map(lambda x: to_dict(x, visited), obj)\n elif hasattr(obj, '__dict__'):\n return dict([(k, to_dict(v, visited + [(k, v)])) for k, v in vars(obj).items()\n if not k.startswith('_') and not callable(v) and not (k, v) in visited])\n elif is_dict(obj):\n return dict([(k, to_dict(v, visited + [(k, v)])) for k, v in obj.items()\n if not (k, v) in visited])\n else:\n return obj", "title": "" }, { "docid": "8739f4991faaafbeae790795e9431bdf", "score": "0.49896803", "text": "def flatten_dict(dictionary, delimiter='_'):\n\n def unpack(parent_key, parent_value):\n \"\"\"Unpack one level of nesting in a dictionary\"\"\"\n try:\n items = parent_value.items()\n except AttributeError:\n # parent_value was not a dict, no need to flatten\n yield (parent_key, parent_value)\n else:\n for key, value in items:\n yield (parent_key + delimiter + key, value)\n\n while True:\n # Keep unpacking the dictionary until all value's are not dictionary's\n dictionary = dict(chain.from_iterable(starmap(unpack, dictionary.items())))\n if not any(isinstance(value, dict) for value in dictionary.values()):\n break\n\n return dictionary", "title": "" }, { "docid": "0da7e6a4bd0956cfa6c54dded4ea4124", "score": "0.4989298", "text": "def restructure_metabolisms(metabolism_data):\n restr_dict = {}\n for doc in metabolism_data:\n key = doc[\"drug_chembl_id\"]\n restr_dict.setdefault(key, []).append(doc)\n restr_dict = unlist(restr_dict)\n return restr_dict", "title": "" }, { "docid": "39517644b847cf3ab23f9ca2db1ca345", "score": "0.49842775", "text": "def dict_to_object(self, d):\r\n for k, v in d.items():\r\n if isinstance(v, basestring) and len(v) == 19:\r\n try:\r\n d[k] = datetime(*time.strptime(v, \"%Y-%m-%dT%H:%M:%S\")[:6])\r\n except ValueError:\r\n pass\r\n elif isinstance(v, list):\r\n d[k] = [self.string_to_datetime(elem) for elem in v]\r\n return DotDict(d)", "title": "" }, { "docid": "fac2ef42623c9595cacfcf69b9d49dbf", "score": "0.49782324", "text": "def reverse_dict(d):\n result = {}\n for key in d:\n result[key] = result.get(key, tuple())\n for val in d[key]:\n result[val] = result.get(val, tuple()) + (key, )\n return result", "title": "" }, { "docid": "bb5edaffdd4fdd9ef29c8c6bf799e37e", "score": "0.4975957", "text": "def _convert_flat_to_nested_cols(cls, dic, separator='.'):\n for key in list(dic.keys()):\n if separator in key:\n new_key, nested_key = key.split(separator, 1)\n new_value = dic.get(new_key, {})\n new_value = {} if new_value in [None, np.nan, 'nan'] else new_value\n new_value[nested_key] = dic[key]\n dic.pop(key, None)\n new_value = cls._convert_flat_to_nested_cols(\n new_value, separator\n )\n dic[new_key] = new_value\n return dic", "title": "" }, { "docid": "dd0e72c6e162112849d75153f54f5ce1", "score": "0.49675265", "text": "def nested_dict():\n return collections.defaultdict(nested_dict)", "title": "" }, { "docid": "8537b702806683eaaf0093937581cd90", "score": "0.4967059", "text": "def my_deepcopy(d : dict) -> dict:\n new_dict = {}\n\n for key, value in d.items():\n #print(key,value)\n new_value= value.copy()\n new_dict[key] = new_value\n return new_dict", "title": "" }, { "docid": "94748b1b2a07253549e6552838febd54", "score": "0.4958293", "text": "def update_dict(d, u, depth=-1, default_map=dict, default_set=set, prefer_update_type=False):\n arg_types = (type(d), type(u))\n dictish = arg_types[int(prefer_update_type) % 2] if arg_types[int(prefer_update_type) % 2] is Mapping else default_map\n #settish = types[int(prefer_update_type) % 2] if types[int(prefer_update_type) % 2] is (set, list, tuple) else default_set\n for k, v in u.iteritems():\n if isinstance(v, Mapping) and not depth == 0:\n r = update_dict(d.get(k, dictish()), v, depth=max(depth - 1, -1))\n d[k] = r\n elif isinstance(d, Mapping):\n d[k] = u[k]\n else:\n d = dictish([(k, u[k])])\n return d", "title": "" }, { "docid": "1a349eb89a8993c8f18c8ba7ef2eedcb", "score": "0.49509963", "text": "def schedule(d):\n\n for key, value in d.items():\n merge_sort(value, len(value))\n solution = activity_schedule(value)\n d[key] = solution\n\n return d", "title": "" }, { "docid": "8b922875318fff607aa39e4acb3e77e2", "score": "0.49442288", "text": "def unflatten(dictionary):\n\n resultDict = dict()\n for key, value in dictionary.items():\n parts = key.split(\".\")\n d = resultDict\n for part in parts[:-1]:\n if part not in d:\n d[part] = dict()\n d = d[part]\n d[parts[-1]] = value\n return resultDict", "title": "" }, { "docid": "0a803ab45c9b981569ee42bfac6832de", "score": "0.49414748", "text": "def dict_key_flatten(data):\n next_data = {}\n\n # check for non-string iterables\n if not any((isinstance(v, collections.Iterable) and not isinstance(v, str))\n for v in data.values()):\n return data\n\n for key, val in data.items():\n if isinstance(val, collections.Mapping):\n for n_k, n_v in val.items():\n next_data[\"%s.%s\" % (key, n_k)] = n_v\n elif isinstance(val, collections.Iterable) and not isinstance(val,\n str):\n for index, item in enumerate(val):\n next_data[\"%s%d\" % (key, index)] = item\n else:\n next_data[key] = val\n\n return dict_key_flatten(next_data)", "title": "" }, { "docid": "a112d36597bf449ded2bb3e33f751504", "score": "0.49405748", "text": "def _update_and_merge(self, din, u, depth=-1, do_copy=True):\n \n if do_copy:\n d = deepcopy(din)\n else:\n d = din \n for k, v in u.items():\n if isinstance(v, collections.Mapping) and not depth == 0:\n r = self._update_and_merge(d.get(k, {}), v, depth=max(depth - 1, -1))\n d[k] = r\n elif isinstance(d, collections.Mapping):\n d[k] = u[k]\n else:\n d = {k: u[k]}\n \n return d", "title": "" }, { "docid": "633cd5a10cc122fd3ac62d2287bb86bd", "score": "0.493949", "text": "def flatten_dict(a, b = None, res = None):\n if res == None:\n res = {}\n\n for x,y in a.items():\n if isinstance(y, dict):\n flatten_dict(y, x, res)\n else:\n if b is None:\n res[x] = y\n else:\n res[b + '.' + x] = y\n\n return res", "title": "" }, { "docid": "672d55fef7f1e1be6d8b0a0ade95d710", "score": "0.49393457", "text": "def _walk(data):\n if isinstance(data, dict):\n for key, value in data.items():\n if isinstance(value, basestring) or \\\n isinstance(value, float) or \\\n isinstance(value, int) or \\\n isinstance(value, long):\n flat_dict[key] = value\n elif isinstance(value, list) or isinstance(value, tuple):\n for item in value:\n _walk(item)\n elif isinstance(value, dict):\n _walk(value)\n elif isinstance(data, list) or isinstance(data, tuple):\n for item in data:\n _walk(item)", "title": "" }, { "docid": "c2f69a97f910fc48cfb218b3233f1b41", "score": "0.49274573", "text": "def convert_keys(data):\n cdata = {}\n for key, value in data.iteritems():\n node = cdata\n tokens = key.split(\".\")\n while len(tokens) > 1:\n key = tokens.pop(0)\n if key not in node:\n node[key] = {}\n node = node[key]\n key = tokens.pop()\n\n if type(value) == dict:\n value = convert_keys(value)\n\n node[key] = value\n return cdata", "title": "" }, { "docid": "39422e3b209d2bccd4690cc076c2223c", "score": "0.49249235", "text": "def walk_recursive(f, data):\n results = {}\n if isinstance(data, list):\n return [walk_recursive(f, d) for d in data]\n elif isinstance(data, dict):\n results = funcy.walk_keys(f, data)\n\n for k, v in data.iteritems():\n if isinstance(v, dict):\n results[f(k)] = walk_recursive(f, v)\n elif isinstance(v, list):\n results[f(k)] = [walk_recursive(f, d) for d in v]\n else:\n return f(data)\n\n return results", "title": "" }, { "docid": "b5ec055ce9ab8e7580c2761d233ddce5", "score": "0.49237502", "text": "def non_none_dict(d):\n return dict([a for a in d.items() if not a[1] is None])", "title": "" }, { "docid": "0613f1d74a8f5bd708df9dfa90a3e067", "score": "0.4903526", "text": "def dict(self):\r\n newdict = {}\r\n for entry in self:\r\n this_entry = self[entry]\r\n if isinstance(this_entry, Section):\r\n this_entry = this_entry.dict()\r\n elif isinstance(this_entry, list):\r\n # create a copy rather than a reference\r\n this_entry = list(this_entry)\r\n elif isinstance(this_entry, tuple):\r\n # create a copy rather than a reference\r\n this_entry = tuple(this_entry)\r\n newdict[entry] = this_entry\r\n return newdict", "title": "" } ]
9eb6839d03e64232b9bb7c334225d01a
Get the bcurve size with more options
[ { "docid": "0032ab02905825b65d578747246c6ddb", "score": "0.5603801", "text": "def IGetBCurveParamsSize3(self, WantCubicIn=defaultNamedNotOptArg, WantNRational=defaultNamedNotOptArg, ForceNonPeriodic=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(61, LCID, 1, (3, 0), ((11, 1), (11, 1), (11, 1)),WantCubicIn\n\t\t\t, WantNRational, ForceNonPeriodic)", "title": "" } ]
[ { "docid": "11706e10d76bacfd435dc896fd9ade88", "score": "0.7180612", "text": "def IGetBCurveParamsSize(self, WantCubicIn=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(16, LCID, 1, (3, 0), ((11, 1),),WantCubicIn\n\t\t\t)", "title": "" }, { "docid": "07a2610952d94e5bee604d2c9c9239ff", "score": "0.6706694", "text": "def IGetBCurveParamsSize2(self, WantCubic=defaultNamedNotOptArg, WantNRational=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(26, LCID, 1, (3, 0), ((11, 1), (11, 1)),WantCubic\n\t\t\t, WantNRational)", "title": "" }, { "docid": "801900973dcfb54bc7f642d1d85b70d8", "score": "0.64569163", "text": "def IConvertPcurveToBcurveSize(self, Dim=defaultNamedNotOptArg, Order=defaultNamedNotOptArg, Nsegs=defaultNamedNotOptArg, Coeffs=defaultNamedNotOptArg\n\t\t\t, Basis=defaultNamedNotOptArg, Xform=defaultNamedNotOptArg, ScaleFactor=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(22, LCID, 1, (3, 0), ((3, 1), (3, 1), (3, 1), (16389, 1), (3, 1), (16389, 1), (5, 1)),Dim\n\t\t\t, Order, Nsegs, Coeffs, Basis, Xform\n\t\t\t, ScaleFactor)", "title": "" }, { "docid": "24567aa123789bd7091bf4eca194c72b", "score": "0.6360128", "text": "def IGetBCurveParams(self):\n\t\treturn self._oleobj_.InvokeTypes(15, LCID, 1, (5, 0), (),)", "title": "" }, { "docid": "29e9168b9362573b6f4fea6ea163d692", "score": "0.61229604", "text": "def IConvertLineToBcurveSize(self, StartPoint=defaultNamedNotOptArg, EndPoint=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(17, LCID, 1, (3, 0), ((16389, 1), (16389, 1)),StartPoint\n\t\t\t, EndPoint)", "title": "" }, { "docid": "49ba9821219c79adc1a30c191255d4c5", "score": "0.611059", "text": "def IGetBSurfParamsSize(self, WantCubicRational=defaultNamedNotOptArg, Range=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(40, LCID, 1, (3, 0), ((11, 1), (16389, 1)),WantCubicRational\n\t\t\t, Range)", "title": "" }, { "docid": "ea8013643031285fb8309d0eac17d476", "score": "0.6020123", "text": "def IGetPCurveParamsSize(self):\n\t\treturn self._oleobj_.InvokeTypes(20, LCID, 1, (3, 0), (),)", "title": "" }, { "docid": "ab20b1d8d3e0cf8bad71ac284f341320", "score": "0.58716875", "text": "def IGetBSurfParamsSize2(self, WantCubic=defaultNamedNotOptArg, WantNonRational=defaultNamedNotOptArg, Range=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(45, LCID, 1, (3, 0), ((11, 1), (11, 1), (16389, 1)),WantCubic\n\t\t\t, WantNonRational, Range)", "title": "" }, { "docid": "170127d09facf7b36486a9ac97a6955e", "score": "0.581982", "text": "def IConvertArcToBcurveSize(self, Center=defaultNamedNotOptArg, Axis=defaultNamedNotOptArg, Start=defaultNamedNotOptArg, End=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(18, LCID, 1, (3, 0), ((16389, 1), (16389, 1), (16389, 1), (16389, 1)),Center\n\t\t\t, Axis, Start, End)", "title": "" }, { "docid": "543215dbe87bb1170b2282f604f6aa6a", "score": "0.5700617", "text": "def IGetBCurveParams3(self, ArraySize=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(60, LCID, 1, (5, 0), ((3, 1),),ArraySize\n\t\t\t)", "title": "" }, { "docid": "9a7bf424637acfb78c5d26a7bd20f5d0", "score": "0.56411976", "text": "def GetBCurveParams(self, WantCubicIn=defaultNamedNotOptArg):\n\t\treturn self._ApplyTypes_(7, 1, (12, 0), ((11, 1),), u'GetBCurveParams', None,WantCubicIn\n\t\t\t)", "title": "" }, { "docid": "5fa1111855ea62e69d3dacbaeab7888e", "score": "0.5631002", "text": "def get_beam_size():\n return beam_size", "title": "" }, { "docid": "fc3bd13a7ed2197aad974c5fb7a22a64", "score": "0.5625333", "text": "def IGetTrimCurveSize(self, WantCubic=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(64, LCID, 1, (3, 0), ((11, 1),),WantCubic\n\t\t\t)", "title": "" }, { "docid": "fc3bd13a7ed2197aad974c5fb7a22a64", "score": "0.5625333", "text": "def IGetTrimCurveSize(self, WantCubic=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(64, LCID, 1, (3, 0), ((11, 1),),WantCubic\n\t\t\t)", "title": "" }, { "docid": "0d03f101057e139ba2d88a15e25f840f", "score": "0.555909", "text": "def BLK_PROP(B: float) -> float:\n return np.sqrt(B) / 1000 + 0.63", "title": "" }, { "docid": "a621859fc94999b04a486f6999fd17e7", "score": "0.55543524", "text": "def getSize( self ):\n sx, sy = self.layConstraints['scale']\n return ( sx * self.sizeX, sy * self.sizeY)", "title": "" }, { "docid": "762e9709299704fa784204005f8cde99", "score": "0.547959", "text": "def GetCurveParams(self):\n\t\treturn self._ApplyTypes_(10, 1, (12, 0), (), u'GetCurveParams', None,)", "title": "" }, { "docid": "732efc3a595ae300f8635ccdd2a2859d", "score": "0.54756236", "text": "def GetCurveParams(self):\n\t\treturn self._ApplyTypes_(2, 1, (12, 0), (), u'GetCurveParams', None,)", "title": "" }, { "docid": "b44def7538de23a89b7bf7b372180add", "score": "0.5455278", "text": "def _getSolutionSize(points, verbiage):\n if not points:\n solSize = verbiage if verbiage else 'Unknown'\n elif points <= 3:\n solSize = \"Small\"\n elif points <= 8:\n solSize = \"Medium\"\n elif points <= 99:\n solSize = \"Large\"\n\n return solSize", "title": "" }, { "docid": "b250c3a8a7134e583ab975f8d61ef667", "score": "0.5449232", "text": "def getSizeRateFromBU(self):\n\t\tsizes, rates = [],[]\n\t\tfor bu in self.config.BUs:\n\t\t\tsizes.append(int(utils.getParam(bu, 'evb::BU',\n\t\t\t\t 'eventSize', 'xsd:unsignedInt')))\n\t\t\trates.append(int(utils.getParam(bu, 'evb::BU',\n\t\t\t\t 'eventRate', 'xsd:unsignedInt')))\n\t\tav_size = reduce(lambda a,b:a+b, sizes)/len(sizes) ## in bytes\n\t\tav_rate = reduce(lambda a,b:a+b, rates)/len(rates)\n\t\treturn av_size, av_rate", "title": "" }, { "docid": "9fd0a771cebb6707d65034749017b906", "score": "0.5390946", "text": "def getSize(self):\n return self.bbox", "title": "" }, { "docid": "fd84eacc2a1f2c2a0b3091e797bb6b3e", "score": "0.53887916", "text": "def IsBcurve(self):\n\t\treturn self._oleobj_.InvokeTypes(6, LCID, 1, (11, 0), (),)", "title": "" }, { "docid": "ad81d9b9f3758d74b25d164f58f02b08", "score": "0.53839374", "text": "def size(self):\n return len(self.bars)", "title": "" }, { "docid": "8314a7e230b26de21f924b9a1a609810", "score": "0.5371995", "text": "def test_spline_block_size(self):\n self.mesh.prepare_data()\n\n self.assertAlmostEqual(self.block_1.get_size(1), 1.2299269360237004)", "title": "" }, { "docid": "a002574f79c643a3ac542d89e931b696", "score": "0.5352933", "text": "def get_size(self):\n return (\n self.rect[2],\n self.rect[3]\n )", "title": "" }, { "docid": "cffcfc836394808bfc44cb132322d748", "score": "0.53434145", "text": "def size(self):\n return DRAW_SIZE", "title": "" }, { "docid": "a0b4ecdc3a849bbddc6f4860fdda5a61", "score": "0.53402585", "text": "def qsize(self):\n return self.size.value", "title": "" }, { "docid": "4670fa24ff78e83d8bdbf4dd405034ca", "score": "0.5334498", "text": "def _get_trade_size(self, mkt, size, cfg_name):\n tw = 1.0\n if self.tactical_wt is not None:\n try:\n tw = self.tactical_wt[mkt][cfg_name]\n except:\n self.logger.logInfo('tactical weight for %s-%s not found, using 1'%(mkt,cfg_name))\n size*=(self.default_scale/self.data.mkt[mkt]['contract_size']*tw)\n size*=self.sw\n size = min(np.round(size), self.mts_max_trade[mkt])\n return float(size)", "title": "" }, { "docid": "b9344d07d12220c1182e7832b2df41bb", "score": "0.5332281", "text": "def readSizeF(self):\n pass", "title": "" }, { "docid": "66fade3358cc01600ddfa374b7097342", "score": "0.53304297", "text": "def GetSize(self):\n ...", "title": "" }, { "docid": "b76bb7aeb8dbd6878bc0e99130703498", "score": "0.53260666", "text": "def calculate_block_size(self) -> int:\n fixed_block_size = 4 + 256\n if self.curve_name == 'secp256r1':\n return fixed_block_size + 32\n if self.curve_name == 'secp384r1':\n return fixed_block_size + 48\n raise SPSDKError(f\"Invalid curve name: {self.curve_name}\")", "title": "" }, { "docid": "9f253842880bb871b1f8a54f8773cf3e", "score": "0.5322706", "text": "def get_size(self, name):\r\n pt = self.get_product_table(name)\r\n return pt[0]['Size']", "title": "" }, { "docid": "5bc85ea89176c4b4848fdd4ddd07d1da", "score": "0.5320794", "text": "def getBottomSizes(self):\n self.bottomSizes = []\n for item in self.bottom:\n size = item[2] - item[1]\n self.bottomSizes.append(size)", "title": "" }, { "docid": "16463f99528d62007e15a235eaba31cb", "score": "0.53046894", "text": "def getSyringeSize_ml(self):\n size = self.ui.syringeComboBox.currentText()\n syringe_options = {\n '25 mL': 25,\n '10 mL': 10,\n '5 mL': 5,\n '1 mL': 1,\n '500 \\u03bcL': 0.5,\n '250 \\u03bcL': 0.25,\n '100 \\u03bcL': 0.1,\n '50 \\u03bcL': 0.05\n }\n return syringe_options[size]", "title": "" }, { "docid": "0b39068603575c2cede3dbe00b742507", "score": "0.53031856", "text": "def get_b_shape(self):\n return [self.num_filters]", "title": "" }, { "docid": "547a34e0a5c3a92392a5ba8104e96b9c", "score": "0.5294142", "text": "def size(self):\n return ((self.line.bounds[2] - self.line.bounds[0]) +\n (self.line.bounds[3] - self.line.bounds[1]))", "title": "" }, { "docid": "f6099a7c7167db6cce325d9a55025bdd", "score": "0.5281318", "text": "def GetEstimatedSize(self, vtkAlgorithm, p_int, p_int_1):\n ...", "title": "" }, { "docid": "6b5d8d2748236a30507bb2bbdf20dd4e", "score": "0.52798975", "text": "def tickSize(self, *args, **kwargs):\n return _swigibpy.EWrapper_tickSize(self, *args, **kwargs)", "title": "" }, { "docid": "a63d6a4308206942f2c8af0695ca6c7b", "score": "0.5270727", "text": "def _calc_BS(self):\r\n\r\n return self", "title": "" }, { "docid": "a2c4cf1d0110ada736823558accf02dc", "score": "0.52592844", "text": "def get_border_height():\n return 20", "title": "" }, { "docid": "6f9026defce19c5086d2f8f178923743", "score": "0.52582985", "text": "def size(self):\n return self._get_component(\"size\")", "title": "" }, { "docid": "3f02489f828f1016aedb4dbe6f268e8d", "score": "0.525111", "text": "def Curve(self, order: int) -> None:", "title": "" }, { "docid": "011f888daa0a1f04a59e44c448aecd45", "score": "0.5250519", "text": "def getBTagScale(pt):\n #recipe: https://twiki.cern.ch/twiki/bin/viewauth/CMS/BtagPOG#2012_Data_and_MC_Moriond13_presc\n #payload: https://twiki.cern.ch/twiki/pub/CMS/BtagPOG/SFb-pt_payload_Moriond13.txt\n if pt > 800: pt = 800\n if pt < 20: pt = 20\n return 0.726981*((1.+(0.253238*pt))/(1.+(0.188389*pt)));", "title": "" }, { "docid": "5abc69d470b9bdfa424ed896d597ad7c", "score": "0.5249578", "text": "def size(self) -> Optional[float]:\n return pulumi.get(self, \"size\")", "title": "" }, { "docid": "ddff97fab13b762c30d48e431eeec61c", "score": "0.52427655", "text": "def expected_bytes_length(cls, curve: Optional[Curve] = None,\n is_compressed: bool=True):\n curve = curve if curve is not None else default_curve()\n\n coord_size = curve.field_order_size_in_bytes\n\n if is_compressed:\n return 1 + coord_size\n else:\n return 1 + 2 * coord_size", "title": "" }, { "docid": "f535a92e8bc685cdebbb154896a40cb6", "score": "0.5239487", "text": "def IGetTrimCurveSize2(self, WantCubic=defaultNamedNotOptArg, WantNRational=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(69, LCID, 1, (3, 0), ((3, 1), (3, 1)),WantCubic\n\t\t\t, WantNRational)", "title": "" }, { "docid": "f535a92e8bc685cdebbb154896a40cb6", "score": "0.5239487", "text": "def IGetTrimCurveSize2(self, WantCubic=defaultNamedNotOptArg, WantNRational=defaultNamedNotOptArg):\n\t\treturn self._oleobj_.InvokeTypes(69, LCID, 1, (3, 0), ((3, 1), (3, 1)),WantCubic\n\t\t\t, WantNRational)", "title": "" }, { "docid": "c50f9f5780ebfe5712bacf731ae9bb72", "score": "0.5219414", "text": "def read_bh_line(widget):\n text = widget.text()\n settings.pp_bead_height = float(text)", "title": "" }, { "docid": "6594ef4db2d6b9762280fb29576038c2", "score": "0.52172387", "text": "def curve(self):\n return self.__curve", "title": "" }, { "docid": "869f6919d0b1b6ac5cc06b65102b8e43", "score": "0.52152795", "text": "def get_size(self):\n\n if(self.highlighted):\n text_size = self.h_text_size;\n else:\n text_size = self.text_size;\n\n font = pygame.font.SysFont(self.font_name, size=text_size);\n font.set_underline(self.underline);\n\n img_text_size = font.size(self.text);\n button_size = (img_text_size[0]+self.padding*2, img_text_size[1]+self.padding*2);\n\n return button_size;", "title": "" }, { "docid": "c23d2d88b6224722b5c1e3634f08ac8c", "score": "0.52101064", "text": "def size(self):\n size = 0\n naxis = self._header.get(\"NAXIS\", 0)\n\n if naxis > 0:\n simple = self._header.get(\"SIMPLE\", \"F\")\n random_groups = self._header.get(\"GROUPS\", \"F\")\n\n if simple == \"T\" and random_groups == \"T\":\n groups = 1\n else:\n groups = 0\n\n size = 1\n\n for idx in range(groups, naxis):\n size = size * self._header[\"NAXIS\" + str(idx + 1)]\n bitpix = self._header[\"BITPIX\"]\n gcount = self._header.get(\"GCOUNT\", 1)\n pcount = self._header.get(\"PCOUNT\", 0)\n size = abs(bitpix) * gcount * (pcount + size) // 8\n return size", "title": "" }, { "docid": "176580cb0e119f2c6c778e55b83fd1f1", "score": "0.5209353", "text": "def curve_lengths(self):\n r = self.pi_radii()\n da = self.deflection_angles()\n return r * np.abs(da)", "title": "" }, { "docid": "209642e975f32fee865735338f3f6146", "score": "0.52087516", "text": "def IGetCurveParams(self):\n\t\treturn self._oleobj_.InvokeTypes(12, LCID, 1, (5, 0), (),)", "title": "" }, { "docid": "5a745aae8397755eea580b4829104c6b", "score": "0.52056146", "text": "def get_source_size(self):\n smoothing = self.get_requested_smoothing(\n smooth_spec=self.configuration.get_string('smooth'))\n return np.hypot(super().get_source_size(), smoothing)", "title": "" }, { "docid": "f1f4897a2881f178eaa08a062fdc55d7", "score": "0.51961374", "text": "def GetActualSize(self):\n ...", "title": "" }, { "docid": "f41cb81848b3e379eebdb85abb8ab8ad", "score": "0.5194561", "text": "def get_step_size(self):\n return self.cla1.get_rel()", "title": "" }, { "docid": "eb9f60d1faa3b84ca2317664d54b6d57", "score": "0.5192875", "text": "def IGetCurveParams(self):\n\t\treturn self._oleobj_.InvokeTypes(15, LCID, 1, (5, 0), (),)", "title": "" }, { "docid": "903f3768cfe99b5c7a7abb9badb8b268", "score": "0.51888514", "text": "def size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"size_gb\")", "title": "" }, { "docid": "903f3768cfe99b5c7a7abb9badb8b268", "score": "0.51888514", "text": "def size_gb(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"size_gb\")", "title": "" }, { "docid": "f2ee0f94c2c06f063006ec215f3519d0", "score": "0.5185422", "text": "def get_size(self) -> Tuple[float, float]:\n return self.get_width(), self.get_height()", "title": "" }, { "docid": "2c8c12e348192538fffd0989692284fc", "score": "0.5180032", "text": "def make_bar(self):\n total_bar_height = self.subrect.height - 2*self.button_size[1]\n if self.length > self.opts_per:\n barsize = total_bar_height*(self.opts_per/self.length)\n perindex = total_bar_height/self.length\n if barsize < 10:\n barsize = 10\n else:\n barsize = total_bar_height\n perindex = 10\n return barsize,perindex", "title": "" }, { "docid": "890f8b220a4680ce3b0c289127d3f060", "score": "0.5176693", "text": "def size(self):\n return self.tk.getint(self.tk.call(self._w, 'size'))", "title": "" }, { "docid": "eb7b179dc9aa8bd65fd571c167619016", "score": "0.5176148", "text": "def get_size(self):\n for light in self.lights:\n if light.adr + light.width > self.size:\n self.size = light.adr + light.width", "title": "" }, { "docid": "7db13453ddbfc6f3a509ff96dde514ed", "score": "0.5174585", "text": "def size_gb(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"size_gb\")", "title": "" }, { "docid": "2441f166f5663db2d021188cd9b6be25", "score": "0.51710564", "text": "def get_size(self) -> float:\n\n return float(\n re.findall(r'\\d+\\.\\d+', self.__size_square_meter)[0]\n )", "title": "" }, { "docid": "ad59231bdc08e16df6a71c832dac2b67", "score": "0.51679903", "text": "def get_sb(self):\n\n self.prepare_nominal_thns() \n S = sum([x.Projection(0).Integral() for x in self.h_sig.values()])\n S = S * self.scaling\n B = sum([x.Projection(0).Integral() for x in self.h_bkg.values()]) \n B = B * self.scaling\n\n return S,B", "title": "" }, { "docid": "8ebdd8ed95507140c6118a92f6612290", "score": "0.51669425", "text": "def get_size():\n return size", "title": "" }, { "docid": "bf5f19f9246c6a1dc03bb6cf89b5cf97", "score": "0.51648515", "text": "def e_size(self) -> int:\n return self.__EdgeSize", "title": "" }, { "docid": "19ff6eb6d52461f084d0b6e5e69f36b4", "score": "0.5143008", "text": "def read_bw_line(widget):\n text = widget.text()\n settings.pp_bead_width = float(text)", "title": "" }, { "docid": "76b5d2f3c74b7c17556fd435fb6ee46a", "score": "0.5133586", "text": "def size( self):\n self._nie( \"size\" )", "title": "" }, { "docid": "e2fa89d32555c0671c9aaa0ff5a63882", "score": "0.5129769", "text": "def get_total_size(self):\n return self.x_size, self.y_size", "title": "" }, { "docid": "4e9e979dcc4267e46f16d8375ef68671", "score": "0.5124765", "text": "def size(self):\n return self.getparam(\"SIZE\")", "title": "" }, { "docid": "c5ecb2f2a0dc30a9e0f562079179eb64", "score": "0.5116442", "text": "def test_estimate_size_arithmetic(self):\n self.assertEqual(10, graphsize.estimate_size(10, 10, 5))\n self.assertEqual(100.4, graphsize.estimate_size(10, 100.4, 5))", "title": "" }, { "docid": "d822124637b38f032190c1839c59b6f8", "score": "0.5106854", "text": "def super_detailed(self):\n\n cfg = SVGConfig()\n cfg.bezier_points *= 10\n cfg.circle_points *= 10\n cfg.tolerance /= 100\n return cfg", "title": "" }, { "docid": "a4d15465befda3068197d7c325a8ca0e", "score": "0.5106266", "text": "def size(self):", "title": "" }, { "docid": "7eee205646f4e8f285658e37d4e7d3fc", "score": "0.51046", "text": "def size(self) -> int:\n sz = 1\n for option in self.options:\n # Each option can be applied or not.\n sz *= len(option) + 1\n return sz", "title": "" }, { "docid": "8d726b1600b6becc057c6125a2149570", "score": "0.51045513", "text": "def get_point_size(self):\n smoothing = self.get_requested_smoothing(\n smooth_spec=self.configuration.get_string('smooth'))\n return np.hypot(self.info.instrument.get_point_size(), smoothing)", "title": "" }, { "docid": "0529226b786ccc7422d01c23e7e3fdcb", "score": "0.5099049", "text": "def get_b_shape(self):\n return []", "title": "" }, { "docid": "911b2b5883585a069cbdcaf856e878ff", "score": "0.50889593", "text": "def size(self):\n return self.tk.call(self._w, \"size\")", "title": "" }, { "docid": "1c0fba8786be549a8783f4b02e2bf4ce", "score": "0.5088437", "text": "def size(self):\n return self.binning.size()", "title": "" }, { "docid": "fe1a89f3883fcef7deb2fa2f71802b61", "score": "0.50854623", "text": "def hparams_frame_size(hparams):\n if hparams.spec_type == 'raw':\n return hparams.spec_hop_length\n return hparams.spec_n_bins", "title": "" }, { "docid": "fbd802ef2b048046cd9d7097c0efb750", "score": "0.50850546", "text": "def set(self):\r\n plt.show()\r\n return self.threshold, self.min_bout_length", "title": "" }, { "docid": "38f2a71bd7964f870d26af407d32cb72", "score": "0.50847626", "text": "def sizeHint(self):\n sz = super(SpinWidget.Button, self).sizeHint()\n if self.parentWidget().layout().direction() \\\n == QBoxLayout.TopToBottom:\n sz.setHeight(sz.height() / 2)\n return sz", "title": "" }, { "docid": "7fa72d916a94aa7d08c2c735a5c6e8a0", "score": "0.5084056", "text": "def e_size(self) -> int:\n return self.edgeSize", "title": "" }, { "docid": "204147963effc75875ebec5270db038c", "score": "0.5083571", "text": "def get_bw(self):\n \n print 'Using a band width of %.0f' % self.bw\n return self.bw", "title": "" }, { "docid": "7f5b2475efd1e3c7114964d67511cbf9", "score": "0.50808567", "text": "def getSize(self) -> int:\n ...", "title": "" }, { "docid": "9ae284c281aa278a535dbcbda74d6b5b", "score": "0.5080619", "text": "def beta2size(beta,hdr):\n if type(beta)==list:\n return [np.abs(beta[0]*hdr['dra']),np.abs(beta[1]*hdr['ddec'])]\n else:\n return [np.abs(beta*hdr['dra']),np.abs(beta*hdr['ddec'])]", "title": "" }, { "docid": "a5b9e9f0803dd610dfe46730fb576f1e", "score": "0.5074004", "text": "def sz_bot(self):\n return float(self.element.attrib['sz_bot'])", "title": "" }, { "docid": "73cd60c53955ba11dff10d7f957d74b0", "score": "0.5073647", "text": "def sizesrc(self):\n return self[\"sizesrc\"]", "title": "" }, { "docid": "11bb9f1e38e4fe0582207e35c7cd3f24", "score": "0.5072285", "text": "def _getsizing(self, comminfo, cash, data, isbuy):\n \n if isbuy: # Buying\n target = self.broker.getvalue() * self.params.prop # Ideal total value of the position\n price = data.close[0]\n shares_ideal = target / price # How many shares are needed to get target\n batches = int(shares_ideal / self.params.batch) # How many batches is this trade?\n shares = batches * self.params.batch # The actual number of shares bought\n \n if shares * price > cash:\n return 0 # Not enough money for this trade\n else:\n return shares\n \n else: # Selling\n return self.broker.getposition(data).size # Clear the position", "title": "" }, { "docid": "16181a1396214d053144e1815706242e", "score": "0.5064367", "text": "def test_min_size(self):\n self.assertEqual(12, MeterBandHeader().get_size())", "title": "" }, { "docid": "4d8db0c37d2777c0b72c80516a144633", "score": "0.50598925", "text": "def test_min_size(self):\n self.assertEqual(16, MeterBandDrop().get_size())", "title": "" }, { "docid": "e988f6c12e30a495c59e480dc7324158", "score": "0.5059713", "text": "def shape(self):\n return self.nfrms, self.ysize, self.xsize", "title": "" }, { "docid": "36ef2ab9f6c3256b16eda4245f2539af", "score": "0.50577945", "text": "def size(cls):\n return (300, 300)", "title": "" }, { "docid": "d8c47b69d2e2857ae9b5d182aec4f1b8", "score": "0.505559", "text": "def height(self):\n return capi.get_band_ysize(self._ptr)", "title": "" }, { "docid": "1e0bb5c6e61d55dc9e88f666e684e790", "score": "0.50509125", "text": "def BurstSize(self):\n\t\treturn self._get_attribute('burstSize')", "title": "" }, { "docid": "62214b9f384f9d22f43a53a373f47073", "score": "0.50416493", "text": "def describe_battery_size(self):\n self.battery.describe_battery()", "title": "" }, { "docid": "0064094c5c837712132fe86d0c772b28", "score": "0.5039716", "text": "def getSize(self):\n\t\treturn self.size", "title": "" }, { "docid": "720b05048373fb866b01f202e650079b", "score": "0.5037676", "text": "def calc_BeamSize(d,v,verbose=False):\n\n c = 2.998e8 #speed of light\n\n if verbose:\n print '\\nBeam is: %f \\n'%(1.22* (c/(d*v)) *57.2958*60.)\n\n return (1.22* (c/(d*v)) *57.2958*60./2)**2*np.pi", "title": "" }, { "docid": "942a605f4e4c81652902529e01517547", "score": "0.5037606", "text": "def height(self):\n return self.size[1]", "title": "" } ]
e369648db1ac23f96685962920b8f75e
Load metadata from multiple datasets into a single table with unique utterance ids for every row.
[ { "docid": "15f414d2309c786d99f101a1bbb42e9f", "score": "0.0", "text": "def load_all(corpus_dir, langs, usecols=USE_COLUMNS, num_processes=os.cpu_count()):\n if num_processes > 0:\n with multiprocessing.Pool(processes=num_processes) as pool:\n lang_dfs = pool.starmap(load, ((corpus_dir, lang, usecols) for lang in langs))\n else:\n lang_dfs = (load(corpus_dir, lang, usecols) for lang in langs)\n return (pd.concat(lang_dfs, verify_integrity=True).sort_index())", "title": "" } ]
[ { "docid": "3a708ea54b609df8d7d514f6a89b6e5b", "score": "0.63951355", "text": "def load_metadata_adaptive_all(**kwargs):\n kwargs['cohort'] = 'both'\n metadata_emerson = load_metadata_emerson(**kwargs)\n metadata_lindau = load_metadata_lindau(**kwargs)\n metadata = pd.concat([metadata_emerson, metadata_lindau])\n return metadata", "title": "" }, { "docid": "42f9279d03bf68315984153d1fc4dc32", "score": "0.594423", "text": "def get_metadatas():\n global_metadata = {\n \"features\": [{\n \"name\": \"global\",\n \"dtype\": \"float\",\n \"shape\": [len(GLOBAL_FEATURE_VALUES)],\n \"isSparse\": True\n }, {\n \"name\": \"uid\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"weight\",\n \"dtype\": \"float\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"movie_id\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"user_id\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }],\n \"labels\" : [{\n \"name\": \"response\",\n \"dtype\": \"int\",\n \"shape\": [],\n \"isSparse\": False\n }]\n }\n\n per_user_metadata = {\n \"features\": [{\n \"name\": \"per_user\",\n \"dtype\": \"float\",\n \"shape\": [len(MOVIE_FEATURE_VALUES)],\n \"isSparse\": True\n }, {\n \"name\": \"uid\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"weight\",\n \"dtype\": \"float\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"movie_id\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"user_id\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }],\n \"labels\" : [{\n \"name\": \"response\",\n \"dtype\": \"int\",\n \"shape\": [],\n \"isSparse\": False\n }]\n }\n\n per_movie_metadata = {\n \"features\": [{\n \"name\": \"per_movie\",\n \"dtype\": \"float\",\n \"shape\": [len(USER_FEATURE_VALUES)],\n \"isSparse\": True\n }, {\n \"name\": \"uid\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"weight\",\n \"dtype\": \"float\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"movie_id\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }, {\n \"name\": \"user_id\",\n \"dtype\": \"long\",\n \"shape\": [],\n \"isSparse\": False\n }],\n \"labels\" : [{\n \"name\": \"response\",\n \"dtype\": \"int\",\n \"shape\": [],\n \"isSparse\": False\n }]\n }\n return (global_metadata, per_user_metadata, per_movie_metadata)", "title": "" }, { "docid": "0402b743d159b9da0629e46fd3d668c2", "score": "0.5832388", "text": "def prepare_data(savedir: Path) -> None:\n\n # Process OTUs\n values_path = fetch(\"maize_microbiome\", savedir / \"maize_microbiome.tsv\")\n values = pd.read_csv(values_path, sep=\"\\t\", index_col=0).T.sort_index()\n values.to_csv(values_path, sep=\"\\t\")\n\n # Process metadata (split into separate files)\n values_path = fetch(\"maize_metadata\", savedir / \"maize_metadata.tsv\")\n values = pd.read_csv(values_path, sep=\"\\t\", index_col=0).sort_index()\n values.index.name = None\n\n col_names = [\"Field\", \"INBREDS\", \"Maize_Line\"]\n var_names = [\"field\", \"variety\", \"line\"]\n\n for col_name, var_name in zip(col_names, var_names):\n category = values[col_name]\n category.name = var_name\n category.to_frame().to_csv(savedir / f\"maize_{var_name}.tsv\", sep=\"\\t\")\n\n data = values[[\"age\", \"Temperature\", \"Precipitation3Days\"]]\n data.columns = [col_name.lower() for col_name in data.columns]\n data.to_csv(values_path, sep=\"\\t\")\n\n with open(savedir / \"maize_ids.txt\", \"w\", encoding=\"utf-8\") as file:\n for name in data.index:\n file.write(f\"{name}\\n\")", "title": "" }, { "docid": "5373cbcfa114a6c5492ea1784401a565", "score": "0.5819526", "text": "def get_metadata(datasets):\n metadata = []\n for dataset in datasets:\n # Fetch the sample size of the original data\n file_handle = open(DIR+dataset, \"r\")\n lines = file_handle.readlines()\n file_handle.close()\n reference = dataset.split(\"-\")[0]\n name = dataset.split(\"-\")[1:-1]\n name = \" \".join(name)\n name = name.replace(\"_\", \" \")\n # Add information to our collection\n metadata.append({\n \"name\": name,\n \"reference\": reference,\n \"sampleSize\": len(lines),\n \"hasOutliers\": False\n })\n return metadata", "title": "" }, { "docid": "f006c476d2b4ad3a963a4c2fbdda28cb", "score": "0.5719651", "text": "def load_all_data(self):\n\n # load default if not exists\n if self.query_files == []:\n self.get_query_files()\n \n for qf in self.query_files:\n logger.info('Gather SIU Transp FILE {}'.format(qf))\n stqf = SIUTranspQueryFile(portal=self, path=qf)\n # open to read query params\n query_metadata = stqf.open()\n\n self.query_metadata[query_metadata['name']] = query_metadata", "title": "" }, { "docid": "a00085d77485bbaf66131ba3ef66470f", "score": "0.570109", "text": "def load_tables():\n global ratings_table, movies_table, data_table, tags_table # so that it can update global variable\n\n ratings_table = pd.read_csv('data/ratings.csv', sep=',', names=RATING_COLS, # read in file\n\t\tencoding='utf-8', skiprows = 1)\n\n movies_table = pd.read_csv('data/movies.csv', sep=',', names=MOVIE_COLS, # read in file\n\t\tencoding='utf-8', skiprows = 1)\n\n links_table = pd.read_csv('data/links.csv', sep=\",\", encoding='utf-8');\n movies_table = pd.merge(movies_table, links_table[[\"movieId\", \"tmdbId\"]], # merge in link to tmdb\n on=\"movieId\")\n\n data_table = pd.merge(ratings_table, movies_table, on=\"movieId\") # merge tables\n\n tags_table = pd.read_csv('data/tags.csv', sep=',', encoding = \"utf-8\"); # read in tags\n tags_table = tags_table[['movieId', 'tag']] # reduce to interesting columns\n tags_table = tags_table.groupby('movieId').apply(lambda x: (x + \" \").sum()) # group tags by movie id\n tags_table = tags_table.reset_index() # give an index column again\n\n print(\"Loaded Ratings and Movies\")", "title": "" }, { "docid": "6d8b3cafb7d70dc9d6e8ecdf1dfd5f6a", "score": "0.5682341", "text": "def create_H1_datasets(datasets):\n\n\tH1_datasets = []\n\n\tfor dataset_name in datasets:\n\t\treader = Reader(line_format='user item rating timestamp', sep=',')\n\t\ttrain_ensembles = Dataset.load_from_file(\"./created_data/\"+dataset_name+\"_train_ensembles.csv\", reader=reader)\n\n\t\tnegative_sampling = Dataset.load_from_file(\"./created_data/\"+dataset_name+\"_train_negative_sample.csv\", reader=reader)\n\t\tnegative_sampling_trainset = negative_sampling.build_full_trainset()\n\t\ttestset_ns = negative_sampling_trainset.build_testset()\n\t\t\t\n\t\tuf = UserFeatures(pd.DataFrame(train_ensembles.raw_ratings,columns = [\"userId\",\"movieId\",\"rating\",\"timestamp\"]),False)\n\t\tuser_features_df = uf.get_all_user_features()\t\t\n\n\t\tuser_train_time_features = pd.read_csv(\"./created_data/tmp/h2_\"+dataset_name+\"_user_train_time_features.csv\")\n\t\tuser_train_time_features[\"userId\"] = user_train_time_features[\"userId\"].astype(str)\n\n\t\tuser_train_time_val_features = pd.read_csv(\"./created_data/tmp/h2_\"+dataset_name+\"_user_train_time_features_val_set.csv\")\n\t\tuser_train_time_val_features[\"userId\"] = user_train_time_val_features[\"userId\"].astype(str)\n\n\t\tall_user_features_df = (user_features_df.merge(user_train_time_features,on=\"userId\")).merge(user_train_time_val_features,on=\"userId\",how=\"left\")\t\t\n\t\tall_user_features_df = all_user_features_df.fillna(0.0)\n\t\tassert user_features_df.userId.shape[0] == user_train_time_features.shape[0]\n\t\tassert user_features_df.userId.shape[0] == user_train_time_val_features.shape[0]\n\n\t\trecs_avg_errors = []\t\t\n\t\tfor rs in RS:\n\t\t\t#Memory error for 16GB machine or float division error for lastfm\n\t\t\tif(\"KNN\" in rs[\"name\"] and dataset_name in datasets_knn_mem_error):\n\t\t\t\tcontinue\n\t\t\tfile_name = os.path.expanduser('./created_data/trained_RS/dump_file_'+dataset_name+'_'+rs[\"name\"])\n\t\t\t_, loaded_algo = dump.load(file_name)\n\t\n\t\t\tpredictions = loaded_algo.test(train_ensembles.build_full_trainset().build_testset())\n\t\t\tpredictions_df = pd.DataFrame(predictions,columns = [\"userId\",\"movieId\",\"rating\",\"prediction\",\"details\"])\n\n\t\t\tpreds_ns = loaded_algo.test(testset_ns)\n\t\t\tpredictions_ns_df = pd.DataFrame(preds_ns,columns = [\"userId\",\"movieId\",\"rating\",\"prediction\",\"details\"])\t\t\t\t\t\n\t\t\tpredictions_df = pd.concat([predictions_df,predictions_ns_df])\n\t\t\tpredictions_with_relevance = remove_dataset_bias(predictions_df, has_ns = True)\n\n\t\t\tscores = predictions_with_relevance.groupby(\"userId\").agg(lambda r,f = calculate_ndcg_score: f(r,\"prediction\"))\n\t\t\tscores = scores[[scores.columns[0]]].rename(index=str,columns={scores.columns[0]:\"NDCG\"}).reset_index()\n\t\t\tscores[\"RS\"] = rs[\"name\"]\t\t\t\n\n\t\t\trecs_avg_errors.append(scores)\n\n\t\tall_avg_errors = pd.concat(recs_avg_errors).reset_index()\n\n\t\thypothesis_df = create_best_RS_userwise_dataset(all_avg_errors,all_user_features_df)\t\t\n\t\t\n\t\tH1_datasets.append(hypothesis_df[[c for c in hypothesis_df.columns if c not in [\"userId\",\"userId.1\",\"index\",\"NDCG\"]]])\n\n\treturn H1_datasets", "title": "" }, { "docid": "a4ea04835a10b85b6b07aeacd59a6cf3", "score": "0.5618928", "text": "def get_metadata_labelled(datasets):\n metadata = []\n for dataset in datasets:\n # Fetch the sample size of the labelled data\n file_handle = open(DIR+dataset, \"r\")\n outliers = 0\n samples = -1\n has_outliers = False\n for line in file_handle:\n samples = samples + 1\n if \"TRUE\" in line:\n has_outliers = True\n outliers = outliers + 1\n file_handle.close()\n reference = dataset.split(\"-\")[0]\n name = dataset.split(\"-\")[1:-1]\n name = \" \".join(name)\n name = name.replace(\"_\", \" \")\n # Add information to our collection\n metadata.append({\n \"name\": name,\n \"reference\": reference,\n \"sampleSize\": samples-outliers,\n \"hasOutliers\": has_outliers\n })\n return metadata", "title": "" }, { "docid": "4651c9902ccd883a478e8261fa6c5b15", "score": "0.5587408", "text": "def import_metadata(gen='one'):\r\n\r\n assert gen in ['one', 'two', 'three'], \\\r\n \"Error: gen must be in ['one', 'two', 'three']\"\r\n\r\n this_data_dir = os.path.join(__DATA_DIR, 'gen-%s/raw/' % gen)\r\n\r\n metadata = [] # Init list to return\r\n\r\n # For each experimental session in the dataset\r\n for expt_session in os.listdir(this_data_dir):\r\n\r\n # If the expt_session is a directory\r\n if os.path.isdir(os.path.join(this_data_dir, expt_session)):\r\n\r\n # Get the path to the -metadata.csv file for this\r\n # expt_session\r\n metadata_path = os.path.join(this_data_dir, expt_session,\r\n expt_session + '-metadata.csv')\r\n\r\n # Load the -metadata.csv file for this expt_session\r\n session_metadata = np.genfromtxt(metadata_path, delimiter=',',\r\n dtype=str)\r\n\r\n # Get the keys for the metadata\r\n metadata_keys = session_metadata[0, :]\r\n\r\n for md_key in metadata_keys:\r\n\r\n # Assert the metadata str is valid\r\n assert md_key in dtypes_dict.keys(), \\\r\n \"Error: invalid metadata str %s in file %s\" % (\r\n md_key, metadata_path\r\n )\r\n\r\n # For each individual scan within this expt_session\r\n for expt in range(1, np.size(session_metadata, axis=0)):\r\n\r\n # Get the metadata values (as str's) for this expt\r\n expt_metadata = session_metadata[expt, :]\r\n\r\n expt_metadata_dict = dict() # Init dict for this expt\r\n\r\n # Create counter for the different pieces of metadata\r\n # info\r\n info_counter = 0\r\n\r\n # For each info piece in the metadata\r\n for info_piece in metadata_keys:\r\n\r\n # If the value for this info piece is NOT missing\r\n if not expt_metadata[info_counter] == '':\r\n\r\n # Store the value as its proper dtype\r\n expt_metadata_dict[info_piece] = \\\r\n dtypes_dict[info_piece](\r\n expt_metadata[info_counter])\r\n\r\n else: # If the value for this info piece IS missing\r\n\r\n assert info_piece in dtypes_dict.keys(), \\\r\n '%s not valid info-piece, in file: %s' \\\r\n % (info_piece, metadata_path)\r\n\r\n # If the dtype for this info piece is an int\r\n # or float, store as NaN\r\n if (dtypes_dict[info_piece] == int or\r\n dtypes_dict[info_piece] == float):\r\n\r\n expt_metadata_dict[info_piece] = np.NaN\r\n\r\n # If the dtype for this info piece is a str,\r\n # store as empty str\r\n else:\r\n expt_metadata_dict[info_piece] = ''\r\n\r\n info_counter += 1 # Increase the info counter\r\n\r\n # Append the metadata dict for this expt to the list\r\n metadata.append(expt_metadata_dict)\r\n\r\n return metadata", "title": "" }, { "docid": "82ccb57b4f06df03b1fcca7fdc78a42a", "score": "0.55449456", "text": "def download_metadata(dataset):\n x = quandl.Dataset(dataset)\n df, l = x.data_fields(), x.to_list()\n return {df[i]: l[i] for i in range(len(l))}", "title": "" }, { "docid": "febc9c10f58f3855cbd9dff2b8da19d5", "score": "0.5522294", "text": "def create_H2_datasets(datasets):\n\n\tH2_datasets = []\n\n\tfor dataset_name in datasets:\n\t\treader = Reader(line_format='user item rating timestamp', sep=',')\t\t\n\t\ttrain = Dataset.load_from_file(\"./created_data/\"+dataset_name+\"_train.csv\", reader=reader)\n\t\ttrain_ensembles = Dataset.load_from_file(\"./created_data/\"+dataset_name+\"_train_ensembles.csv\", reader=reader)\n\t\t\t\n\t\tuf = UserFeatures(pd.DataFrame(train.raw_ratings,columns = [\"userId\",\"movieId\",\"rating\",\"timestamp\"]),False)\n\t\tuser_features_df = uf.get_all_user_features()\n\n\t\titemF = ItemFeatures(pd.DataFrame(train.raw_ratings,columns = [\"userId\",\"movieId\",\"rating\",\"timestamp\"]),False)\n\t\tdel(train)\n\t\titem_features_df = itemF.get_all_item_features()\n\t\titem_features_df.to_csv(\"./created_data/tmp/h1_\"+dataset_name+\"_item_features_df.csv\",index=False)\n\n\t\tuser_train_time_features = pd.read_csv(\"./created_data/tmp/h2_\"+dataset_name+\"_user_train_time_features.csv\")\n\t\tuser_train_time_features[\"userId\"] = user_train_time_features[\"userId\"].astype(str)\n\n\t\tuser_train_time_features_val_set = pd.read_csv(\"./created_data/tmp/h2_\"+dataset_name+\"_user_train_time_features_val_set.csv\")\n\t\tuser_train_time_features_val_set[\"userId\"] = user_train_time_features_val_set[\"userId\"].astype(str)\n\t\t\n\t\trecs_predictions = pd.DataFrame(train_ensembles.raw_ratings, columns = [\"userId\",\"movieId\",\"rating\",\"timestamp\"])\n\t\trecs_predictions[\"label\"] = recs_predictions[\"rating\"]\n\t\t\n\t\trecs_predictions_with_ns = pd.read_csv(\"./created_data/\"+dataset_name+\"_train_negative_sample.csv\", names = [\"userId\",\"movieId\",\"rating\",\"timestamp\"])\n\t\trecs_predictions_with_ns[\"label\"] = recs_predictions_with_ns[\"rating\"]\n\n\t\tfor rs in RS:\n\t\t\t#Memory error for 16GB machine or float division error for lastfm\n\t\t\tif(\"KNN\" in rs[\"name\"] and dataset_name in datasets_knn_mem_error):\n\t\t\t\tcontinue\n\t\t\tfile_name = os.path.expanduser('./created_data/trained_RS/dump_file_'+dataset_name+'_'+rs[\"name\"])\n\t\t\t_, loaded_algo = dump.load(file_name)\n\t\n\t\t\tpredictions = loaded_algo.test(train_ensembles.build_full_trainset().build_testset())\t\t\t\n\t\t\tpredictions_df = pd.DataFrame(predictions,columns = [\"userId\",\"movieId\",\"rating\",\"prediction_\"+rs[\"name\"],\"details\"])\t\t\t\n\t\t\trecs_predictions = recs_predictions.merge(predictions_df[[\"userId\",\"movieId\",\"prediction_\"+rs[\"name\"]]],on = [\"userId\",\"movieId\"])\t\t\t\n\t\t\t\t\t\t\n\t\t\tpredictions_ns = pd.read_csv(\"./created_data/l2r/predictions_train_ns_\"+dataset_name+\"_\"+rs[\"name\"]+\".csv\")\n\t\t\tpredictions_ns[\"prediction_\"+rs[\"name\"]] = predictions_ns[\"prediction\"]\n\t\t\tpredictions_ns = predictions_ns[[c for c in predictions_ns.columns if c != \"prediction\"]]\n\t\t\trecs_predictions_with_ns = recs_predictions_with_ns.merge(predictions_ns[[\"userId\",\"movieId\",\"prediction_\"+rs[\"name\"]]],on = [\"userId\",\"movieId\"])\n\t\t\n\t\tdel(loaded_algo)\n\t\tdel(train_ensembles)\n\n\t\tH2_dataset = recs_predictions.merge(user_features_df,on=\"userId\")\n\t\tH2_dataset = H2_dataset.merge(item_features_df,on=\"movieId\")\n\t\tuser_train_time_features = user_train_time_features.fillna(0.0)\n\t\tH2_dataset = H2_dataset.merge(user_train_time_features,on=\"userId\")\n\t\tuser_train_time_features_val_set = user_train_time_features_val_set.fillna(0.0)\n\t\tH2_dataset = H2_dataset.merge(user_train_time_features_val_set,on=\"userId\",how=\"left\")\n\t\t#this is ok as users with only one rating do not have some meta-features\n\t\tH2_dataset = H2_dataset.fillna(0.0)\t\t\t\t\n\t\t\n\t\tH2_dataset = H2_dataset[[c for c in H2_dataset.columns if c not in [\"index\",\"userId\",\"movieId\",\"timestamp\",\"rating\"]]]\n\n\t\tH2_datasets.append(H2_dataset)\n\t\tdel(H2_dataset)\n\n\t\t#dataset for l2r\t\t\n\t\trecs_predictions_with_ns['userId'] = recs_predictions_with_ns['userId'].astype('str') \n\t\trecs_predictions_with_ns['movieId'] = recs_predictions_with_ns['movieId'].astype('str') \n\t\tL2R_dataset = pd.concat([recs_predictions_with_ns,recs_predictions])\n\t\tdel(recs_predictions)\n\t\tdel(recs_predictions_with_ns)\n\t\tL2R_dataset = L2R_dataset.merge(user_features_df,on=\"userId\")\n\t\tdel(user_features_df)\n\t\tL2R_dataset = L2R_dataset.merge(item_features_df,on=\"movieId\")\n\t\tdel(item_features_df)\n\t\tL2R_dataset = L2R_dataset.merge(user_train_time_features,on=\"userId\")\n\t\tdel(user_train_time_features)\n\t\tL2R_dataset = L2R_dataset.merge(user_train_time_features_val_set,on=\"userId\",how=\"left\")\n\t\tdel(user_train_time_features_val_set)\t\t\n\n\t\tL2R_dataset.to_csv(\"./created_data/l2r/\"+dataset_name+\"_train.csv\",index=False)\n\n\t\tdel(L2R_dataset)\n\n\treturn H2_datasets", "title": "" }, { "docid": "a1b67724efb1bac46f5f0a7ab540abc8", "score": "0.55206734", "text": "def load_data():\n for table_class in [Movie, Studio, Director]:\n table_class.delete(force=True).run_sync()\n\n Director.insert(*[Director(**d) for d in DIRECTORS]).run_sync()\n Movie.insert(*[Movie(**m) for m in MOVIES]).run_sync()\n Studio.insert(*[Studio(**s) for s in STUDIOS]).run_sync()\n\n engine_type = Director._meta.db.engine_type\n\n if engine_type == \"postgres\":\n # We need to update the sequence, as we explicitly set the IDs.\n Director.raw(\n \"SELECT setval('director_id_seq', max(id)) FROM director\"\n ).run_sync()\n Director.raw(\n \"SELECT setval('movie_id_seq', max(id)) FROM movie\"\n ).run_sync()\n Studio.raw(\n \"SELECT setval('studio_id_seq', max(id)) FROM studio\"\n ).run_sync()", "title": "" }, { "docid": "3b068308039b94e0177c667685c55716", "score": "0.5481218", "text": "def combine_datasets():\n return {\n k: torch.cat([data[k] for _, data in data_set.items()], dim=0)\n for k in data_set['train'].keys() # keys collected from any data_t\n }", "title": "" }, { "docid": "60cb2f3065cdd4adeb10be500db1f028", "score": "0.5472108", "text": "def get_metadata(datasette, key, database, table):", "title": "" }, { "docid": "0c5e80e8e4fae4b560e2f5f55a1a367c", "score": "0.542634", "text": "def merge_dataframes(genre_df, lyrics_df, artist_title_df):\n\n genre_lyrics_df = pd.merge(genre_df, lyrics_df, how='left', left_on='tagtraum_trackId', right_on='track_id')\n all_data_df = pd.merge(genre_lyrics_df, artist_title_df, how='left', left_on='tagtraum_trackId', right_on='track_id')\n\n return all_data_df", "title": "" }, { "docid": "6b80717d29b8ebdb86b6c0107d5cb74d", "score": "0.5363878", "text": "def test_build_demo_metadata_from_tables():\n tables = load_demo(metadata=False)\n\n new_meta = Metadata()\n new_meta.add_table('users', data=tables['users'], primary_key='user_id')\n new_meta.add_table('sessions', data=tables['sessions'], primary_key='session_id',\n parent='users', foreign_key='user_id')\n transactions_fields = {\n 'timestamp': {\n 'type': 'datetime',\n 'format': '%Y-%m-%dT%H:%M'\n }\n }\n new_meta.add_table('transactions', tables['transactions'],\n fields_metadata=transactions_fields,\n primary_key='transaction_id', parent='sessions')\n\n assert DEMO_METADATA == new_meta.to_dict()", "title": "" }, { "docid": "b015141bf49604ac03e16f554f79af5c", "score": "0.5354961", "text": "def combine(list_of_data):\n \n pivot_tables = [\n pivot(data, is_combined_data=False) \n for data in list_of_data\n ]\n \n combined_data = pd.concat(pivot_tables)\n combined_data.reset_index(drop=True, inplace=True)\n combined_data.reset_index(inplace=True)\n \n combined_data = combined_data.melt(id_vars='index', value_name='price')\n combined_data.reset_index(drop=True, inplace=True)\n combined_data.rename(columns={'index': 'trend_id'}, inplace=True)\n \n return combined_data", "title": "" }, { "docid": "c522cfa7d55bd94a79292d247270d01d", "score": "0.5316224", "text": "def populate_all_metadata():\n for Metadata in registry.values():\n InstanceMetadata = Metadata._meta.get_model('modelinstance')\n if InstanceMetadata is not None:\n for model in Metadata._meta.seo_models:\n populate_metadata(model, InstanceMetadata)", "title": "" }, { "docid": "2eee614fd06c3c0ce4eabb57bd392d0b", "score": "0.530903", "text": "def load_all_data(dataset_fraction):\n # Load and preprocess data\n sentences, labels = load_data_and_labels(dataset_fraction)\n logger.info(\"\\tdata_helpers: padding strings...\")\n sentences_padded = pad_sentences(sentences)\n logger.info(\"\\tdata_helpers: [OK]\")\n logger.info(\"\\tdata_helpers: building vocabulary...\")\n vocabulary, vocabulary_inv = build_vocab()\n logger.info(\"\\tdata_helpers: [OK]\")\n logger.info(\"\\tdata_helpers: building processed datasets...\")\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n logger.info(\"\\tdata_helpers: [OK]\")\n return [x, y, vocabulary, vocabulary_inv]", "title": "" }, { "docid": "494834c971fcfa8b010e19df3097d7ba", "score": "0.5292605", "text": "def _collect_speaker_data(utt_sets: Collection[Collection[Utterance]]) -> Tuple[\n\t\tDict[str, Dict[str, str]], Dict[str, Dict[str, bool]]]:\n\t\t# Collect SPEAKER data and metadata\n\t\tspeakers_meta = defaultdict(lambda: defaultdict(str))\n\t\tspeakers_meta_conflict = defaultdict(lambda: defaultdict(bool))\n\t\tfor utt_set in utt_sets:\n\t\t\tfor utt in utt_set:\n\t\t\t\tfor meta_key, meta_val in utt.speaker.meta.items():\n\t\t\t\t\tcurr = speakers_meta[utt.speaker][meta_key]\n\t\t\t\t\tif curr != meta_val:\n\t\t\t\t\t\tif curr != \"\":\n\t\t\t\t\t\t\tspeakers_meta_conflict[utt.speaker][meta_key] = True\n\t\t\t\t\t\tspeakers_meta[utt.speaker][meta_key] = meta_val\n\n\t\treturn speakers_meta, speakers_meta_conflict", "title": "" }, { "docid": "b58c1ff333bf869764140909804c7445", "score": "0.52853423", "text": "def fetch_metadata(metadata_model):\n metadata = OrderedDict()\n metadata_query = metadata_model.objects.all()\n for table in metadata_query:\n for field in METADATA_FIELDS:\n metadata[field] = table.__dict__[field]\n # Add the metadata to metadata_dict\n metadata_dict[table.table_name] = metadata.copy()", "title": "" }, { "docid": "2ae144f04ea9ee31aa71268593aeef74", "score": "0.52837974", "text": "def set_dataset_association_annotations(goal):\n counter = 0\n \n for table in dataset_tables:\n column = table[len('dataset_'):]\n value = '%s%s' % (column[0].upper(), column[1:])\n\n if table == 'dataset_gender':\n value = 'Sex'\n\n # print ' Table=%s | Column=%s | Value=%s' % (table,column,value)\n \n goal.table('isa', table).display.update({'name': '%s' % value})\n counter = counter + set_ermrest_system_column_annotations(goal, 'isa', table)\n goal.table(\n 'isa', table\n ).foreign_keys[\n ('isa', '%s_%s_fkey' % (table, column))\n ].foreign_key.update({\n \"to_name\": \"%s\" % get_underline_space_title_case(column)\n })\n\n if table == 'dataset_gender':\n goal.table('isa', table).foreign_keys[('isa', '%s_%s_fkey' % (table, column))].foreign_key.update({\"to_name\": \"%s\" % get_underline_space_title_case(value)})\n\n \n if table == 'dataset_phenotype':\n goal.table(\n 'isa', table\n ).foreign_keys[\n ('isa', '%s_dataset_fkey' % (table))\n ].foreign_key.update({\n \"to_name\": \"Datasets\"\n })\n else:\n goal.table(\n 'isa', table\n ).foreign_keys[\n ('isa', '%s_dataset_id_fkey' % (table))\n ].foreign_key.update({\n \"to_name\": \"Datasets\"\n })\n \n\n \n counter = counter + 2\n \n print 'Setting %d annotations for the dataset association tables...' % counter", "title": "" }, { "docid": "4f1ecf2143d3adb95590448ccbaf49e7", "score": "0.5278701", "text": "def get_model_data(basenames=None, schema_name=\"model_data\",\n resolution_type=\"intersection\"):\n n_tables = len(basenames)\n data = dict(zip(basenames.keys(), [None] * n_tables))\n conn = pg_sed.db_connection()\n\n for table_type, table_name in basenames.items():\n query = \"SELECT * FROM %s.%s\" % (schema_name, table_name)\n logger.info(\"Executing: %s\", query)\n data[table_type] = pd.io.sql.read_sql(query, conn)\n\n data = resolve_unshared_features(data, resolution_type)\n conn.close()\n return data", "title": "" }, { "docid": "83a45dbf17195b2a5df27a628aaa8d8a", "score": "0.52737683", "text": "def import_metadata_df(gen='one'):\r\n\r\n assert gen in ['one', 'two', 'three'], \\\r\n \"Error: gen must be in ['one', 'two']\"\r\n\r\n # Load the metadata as a list of dicts\r\n metadata = import_metadata(gen=gen)\r\n\r\n metadata_df = pd.DataFrame() # Init dataframe to return\r\n\r\n # For each info-piece in the metadata\r\n for metadata_info in metadata[0].keys():\r\n\r\n # Make this a column\r\n metadata_df[metadata_info] = get_info_piece_list(metadata,\r\n metadata_info)\r\n\r\n return metadata_df", "title": "" }, { "docid": "dde37e3e2b17f457c84bd849c2ae81d3", "score": "0.52682984", "text": "def get_data():\r\n features_file = \"fma_metadata/features.csv\"\r\n genres_file = \"fma_metadata/tracks.csv\"\r\n\r\n features = pd.read_csv(features_file, header=[0, 1, 2], skiprows=[3])\r\n print(\"The shape of features is:{}\".format(features.shape))\r\n\r\n genres = pd.read_csv(genres_file, header=[0, 1], skiprows=[2])\r\n # Picked the most generic \"genres\" column\r\n genres = genres[\"track\", \"genres\"]\r\n print(\"The shape of genres is:{}\".format(genres.shape))\r\n\r\n # The number of rows should be equal in the shapes (106574)\r\n return features, genres", "title": "" }, { "docid": "3e44c5485c3ce3d42ef9d080ab82d478", "score": "0.5267227", "text": "def load_data(self):\n\n\t\tent_set, rel_set = OrderedSet(), OrderedSet()\n\t\tfor split in ['train', 'test', 'valid']:\n\t\t\tfor line in open('./data/{}/{}.txt'.format(self.p.dataset, split)):\n\t\t\t\tsub, rel, obj = line.strip().split('\\t')\n\t\t\t\tent_set.add(sub)\n\t\t\t\trel_set.add(rel)\n\t\t\t\tent_set.add(obj)\n\n\t\tself.p.entity_embed_dir = \"./data/\" + self.p.dataset + \"/entity_embed.npy\"\n\t\t# print(self.p.entity_embed_dir)\n\t\tentity_ids_dir = \"./data/\" + self.p.dataset + \"/entity_ids.json\"\n\t\tentity_types_dir = \"./data/\" + self.p.dataset + \"/entity_types.json\"\n\t\trelation_ids_dir = \"./data/\" + self.p.dataset + \"/relation_ids.json\"\n\n\t\twith open(entity_ids_dir, \"r\", encoding=\"utf-8\") as f:\n\t\t\tself.ent2id = json.loads(f.readline())\n\t\t\n\t\twith open(entity_types_dir, \"r\", encoding=\"utf-8\") as f:\n\t\t\tentity_types = json.loads(f.readline())\n\t\t\n\t\tself.p.num_ent\t\t= len(self.ent2id)\n\t\tself.p.ent_num\t\t= len(self.ent2id)\n\t\t# load entity types to type_id:\n\t\tself.ent_type_to_id = {}\n\t\tentity_type_num = 0\n\n\t\tfor ent_id in entity_types:\n\t\t\tif entity_types[ent_id] not in self.ent_type_to_id:\n\t\t\t\tself.ent_type_to_id.update({entity_types[ent_id]: entity_type_num})\n\t\t\t\tentity_type_num += 1\n\n\t\tself.id_to_ent_type = {idx: t for t, idx in self.ent_type_to_id.items()}\n\t\tself.p.entity_type_num = len(self.ent_type_to_id)\n\n\t\ttrain_ent_labels, test_ent_labels, valid_ent_labels = [], [], []\n\t\ttrain_ent_ids, test_ent_ids, valid_ent_ids = [], [], []\n\t\t\n\t\ttrain_num = int(self.p.ent_num * 0.8)\n\t\tvalid_num = int(self.p.ent_num * 0.1) + 1\n\t\ttest_num = self.p.ent_num - train_num - valid_num\n\n\t\t# print(train_num)\n\n\t\tfor ent_id in self.ent2id:\n\t\t\tent_idx = self.ent2id[ent_id]\n\t\t\tif ent_idx >=0 and ent_idx < train_num:\n\t\t\t\ttrain_ent_ids.append(ent_idx)\n\t\t\t\ttrain_ent_labels.append(self.ent_type_to_id[entity_types[ent_id]])\n\t\t\telif ent_idx >= train_num and ent_idx < train_num + valid_num:\n\t\t\t\tvalid_ent_ids.append(ent_idx)\n\t\t\t\tvalid_ent_labels.append(self.ent_type_to_id[entity_types[ent_id]])\n\t\t\telse:\n\t\t\t\ttest_ent_ids.append(ent_idx)\n\t\t\t\ttest_ent_labels.append(self.ent_type_to_id[entity_types[ent_id]])\n\t\t\n\t\tself.train_ent_labels = torch.LongTensor(train_ent_labels).to(self.device)\n\t\tself.train_ent_ids = torch.LongTensor(train_ent_ids).to(self.device)\n\n\t\tself.valid_ent_labels = torch.LongTensor(valid_ent_labels).to(self.device)\n\t\tself.valid_ent_ids = torch.LongTensor(valid_ent_ids).to(self.device)\n\n\t\tself.test_ent_labels = torch.LongTensor(test_ent_labels).to(self.device)\n\t\tself.test_ent_ids = torch.LongTensor(test_ent_ids).to(self.device)\n\n\t\t\n\t\twith open(relation_ids_dir, \"r\", encoding=\"utf-8\") as f:\n\t\t\tself.rel2id = json.loads(f.readline())\n\n\t\t# self.ent2id = {ent: idx for idx, ent in enumerate(ent_set)}\n\t\t# self.rel2id = {rel: idx for idx, rel in enumerate(rel_set)}\n\n\t\trel2id_dict = self.rel2id.copy()\n\t\tfor key in rel2id_dict:\n\t\t\tself.rel2id.update({key+'_reverse': rel2id_dict[key]+len(rel2id_dict)})\n\n\t\t# print(self.rel2id)\n\t\tself.id2ent = {idx: ent for ent, idx in self.ent2id.items()}\n\t\tself.id2rel = {idx: rel for rel, idx in self.rel2id.items()}\n\n\t\t''' \n\t\tent2id: {entity_id: 0}\n\t\trel2id: {rel_id: 0, rel_id_reverse: 1}\n\t\t'''\n\n\t\t# also load relation classification dataset\n\t\ttrain_rel_labels, train_rel_start, train_rel_end = [], [], []\n\t\tvalid_rel_labels, valid_rel_start, valid_rel_end = [], [], []\n\t\ttest_rel_labels, test_rel_start, test_rel_end = [], [], []\n\n\t\tself.p.num_rel = len(self.rel2id) // 2 \n\n\t\tfor line in open('./data/{}/train.txt'.format(self.p.dataset)):\n\t\t\tsub, rel, obj = line.strip().split('\\t')\n\t\t\tstart_idx = self.ent2id[sub]\n\t\t\ttrain_rel_start.append(start_idx)\n\t\t\tend_idx = self.ent2id[obj]\n\t\t\ttrain_rel_end.append(end_idx)\n\t\t\trel_idx = self.rel2id[rel]\n\t\t\ttrain_rel_labels.append(rel_idx)\n\t\t\n\t\tself.train_rel_labels = torch.LongTensor([train_rel_labels]).to(self.device).t()\n\t\tself.train_rel_start = torch.LongTensor([train_rel_start]).to(self.device).t()\n\t\tself.train_rel_end = torch.LongTensor([train_rel_end]).to(self.device).t()\n\n\t\tfor line in open('./data/{}/valid.txt'.format(self.p.dataset)):\n\t\t\tsub, rel, obj = line.strip().split('\\t')\n\t\t\tstart_idx = self.ent2id[sub]\n\t\t\tvalid_rel_start.append(start_idx)\n\t\t\tend_idx = self.ent2id[obj]\n\t\t\tvalid_rel_end.append(end_idx)\n\t\t\trel_idx = self.rel2id[rel]\n\t\t\tvalid_rel_labels.append(rel_idx)\n\t\t\n\t\tself.valid_rel_labels = torch.LongTensor([valid_rel_labels]).to(self.device).t()\n\t\tself.valid_rel_start = torch.LongTensor([valid_rel_start]).to(self.device).t()\n\t\tself.valid_rel_end = torch.LongTensor([valid_rel_end]).to(self.device).t()\n\n\t\tfor line in open('./data/{}/test.txt'.format(self.p.dataset)):\n\t\t\tsub, rel, obj = line.strip().split('\\t')\n\t\t\tstart_idx = self.ent2id[sub]\n\t\t\ttest_rel_start.append(start_idx)\n\t\t\tend_idx = self.ent2id[obj]\n\t\t\ttest_rel_end.append(end_idx)\n\t\t\trel_idx = self.rel2id[rel]\n\t\t\ttest_rel_labels.append(rel_idx)\n\t\t\n\t\tself.test_rel_labels = torch.LongTensor([test_rel_labels]).to(self.device).t()\n\t\tself.test_rel_start = torch.LongTensor([test_rel_start]).to(self.device).t()\n\t\tself.test_rel_end = torch.LongTensor([test_rel_end]).to(self.device).t()\n\n\t\t# 这里的num_rel的含义是relation type的数量 而不是relation edge的数量,其实edge的数量就等于triples的数量 \n\t\t# num_rel在这里指的是:没有添加reverse边的时候的数量\n\t\tself.p.embed_dim\t= self.p.k_w * self.p.k_h if self.p.embed_dim is None else self.p.embed_dim\n\n\t\tself.data = ddict(list)\n\t\tsr2o = ddict(set)\n\n\t\tfor split in ['train', 'test', 'valid']:\n\t\t\tfor line in open('./data/{}/{}.txt'.format(self.p.dataset, split)):\n\t\t\t\tsub, rel, obj = line.strip().split('\\t')\n\t\t\t\tsub, rel, obj = self.ent2id[sub], self.rel2id[rel], self.ent2id[obj]\n\t\t\t\tself.data[split].append((sub, rel, obj))\n\n\t\t\t\tif split == 'train': \n\t\t\t\t\tsr2o[(sub, rel)].add(obj)\n\t\t\t\t\tsr2o[(obj, rel+self.p.num_rel)].add(sub)\n\t\t# sr2o的格式:dict, {(h, r): Set({t1, t2, ...})}\n\t\t# for key in sr2o:\n\t\t# \tif len(sr2o[key]) > 1:\n\t\t# \t\tprint(key)\n\t\t# \t\tprint(sr2o[key])\n\t\t# \t\tprint('\\n')\n\t\t# print(sr2o)\n\t\tself.data = dict(self.data)\n\t\t# print(self.data)\n\t\t# self.data: {\"train\": [(h, r, t), (h, r, t)], \"valid\": [(h, r, t), (h, r, t)]}\n\t\tself.sr2o = {k: list(v) for k, v in sr2o.items()} # only contains training sr2os\n\t\t# self.sr2o的格式:dict, {(h, r): [t1, t2, ...])}\n\t\tfor split in ['test', 'valid']:\n\t\t\tfor sub, rel, obj in self.data[split]:\n\t\t\t\tsr2o[(sub, rel)].add(obj)\n\t\t\t\tsr2o[(obj, rel+self.p.num_rel)].add(sub)\n\n\t\tself.sr2o_all = {k: list(v) for k, v in sr2o.items()} # contains training, validataion, and testing sr2os\n\t\t# self.sr2o_all的格式:dict, {(h, r): [t1, t2, ...])}\n\t\t# print(type(self.sr2o))\n\t\t# print(type(self.sr2o[(6593,9)]))\n\t\tself.triples = ddict(list)\n\n\t\tfor (sub, rel), obj in self.sr2o.items():\n\t\t\tself.triples['train'].append({'triple':(sub, rel, -1), 'obj': obj, 'label': self.sr2o[(sub, rel)], 'sub_samp': 1})\n\n\t\t# self.triples['train']: {'triple': (h, r, -1), 'label': [t1, t2, t3,...], 'sub_samp': 1}\n\t\tfor split in ['test', 'valid']:\n\t\t\tfor sub, rel, obj in self.data[split]:\n\t\t\t\trel_inv = rel + self.p.num_rel\n\t\t\t\tself.triples['{}_{}'.format(split, 'tail')].append({'triple': (sub, rel, obj), \t 'label': self.sr2o_all[(sub, rel)]})\n\t\t\t\tself.triples['{}_{}'.format(split, 'head')].append({'triple': (obj, rel_inv, sub), 'label': self.sr2o_all[(obj, rel_inv)]})\n\n\t\t# 注意:self.triples[\"train\"] 里面只包含了训练集的所有triples,\"triple\"的最后一个值是-1,但是\"valid\", \"test\"里面的label包含了所有的可能的labels,(包括训练集和测试集)\n\n\t\tself.triples = dict(self.triples)\n\n\t\tdef get_data_loader(dataset_class, split, batch_size, shuffle=True):\n\t\t\treturn DataLoader(\n\t\t\t\t\tdataset_class(self.triples[split], self.p),\n\t\t\t\t\tbatch_size = batch_size,\n\t\t\t\t\tshuffle = shuffle,\n\t\t\t\t\tnum_workers = max(0, self.p.num_workers),\n\t\t\t\t\tcollate_fn = dataset_class.collate_fn\n\t\t\t\t)\n\n\t\tself.data_iter = {\n\t\t\t'train': \tget_data_loader(TrainDataset, 'train', \t self.p.batch_size),\n\t\t\t'valid_head': get_data_loader(TestDataset, 'valid_head', self.p.batch_size, False),\n\t\t\t'valid_tail': get_data_loader(TestDataset, 'valid_tail', self.p.batch_size, False),\n\t\t\t'test_head': \tget_data_loader(TestDataset, 'test_head', self.p.batch_size, False),\n\t\t\t'test_tail': \tget_data_loader(TestDataset, 'test_tail', self.p.batch_size, False),\n\t\t}\n\n\t\tself.load_events()\n\t\tself.id2evt = {idx: evt for evt, idx in self.evt2id.items()}\n\n\t\tself.edge_index, self.edge_type = self.construct_adj()\n\t\t# print(self.edge_type.shape)\n\t\t# print(self.edge_index.shape)\n\t\tself.event_edge_index, self.event_index, self.role_type, self.role_mask, self.entity_event_index, self.entity_mask = self.construct_event_adj()\n\t\t# print(self.entity_event_index.shape)", "title": "" }, { "docid": "b398b7e6cc01f7cb6c4d8353d6b49de8", "score": "0.5263577", "text": "def run_insert_data_into_multiple_tables():\n print(\"EXAMPLE - Insert data into multiple tables within a new Hyper file\")\n path_to_database = Path(\"superstore.hyper\")\n\n # Starts the Hyper Process with telemetry enabled to send data to Tableau.\n # To opt out, simply set telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU.\n with HyperProcess(telemetry=Telemetry.SEND_USAGE_DATA_TO_TABLEAU) as hyper:\n\n # Creates new Hyper file \"superstore.hyper\".\n # Replaces file with CreateMode.CREATE_AND_REPLACE if it already exists.\n with Connection(endpoint=hyper.endpoint,\n database=path_to_database,\n create_mode=CreateMode.CREATE_AND_REPLACE) as connection:\n\n # Create multiple tables.\n connection.catalog.create_table(table_definition=orders_table)\n connection.catalog.create_table(table_definition=customer_table)\n connection.catalog.create_table(table_definition=products_table)\n connection.catalog.create_table(table_definition=line_items_table)\n\n # Insert data into Orders table.\n orders_data_to_insert = [\n [399, \"DK-13375\", datetime(2012, 9, 7), \"CA-2011-100006\", datetime(2012, 9, 13), \"Standard Class\"],\n [530, \"EB-13705\", datetime(2012, 7, 8), \"CA-2011-100090\", datetime(2012, 7, 12), \"Standard Class\"]\n ]\n\n with Inserter(connection, orders_table) as inserter:\n inserter.add_rows(rows=orders_data_to_insert)\n inserter.execute()\n\n # Insert data into Customers table.\n customer_data_to_insert = [\n [\"DK-13375\", \"Dennis Kane\", 518, \"Consumer\"],\n [\"EB-13705\", \"Ed Braxton\", 815, \"Corporate\"]\n ]\n\n with Inserter(connection, customer_table) as inserter:\n inserter.add_rows(rows=customer_data_to_insert)\n inserter.execute()\n\n # Insert individual row into Product table.\n with Inserter(connection, products_table) as inserter:\n inserter.add_row(row=[\"TEC-PH-10002075\", \"Technology\", \"Phones\", \"AT&T EL51110 DECT\"])\n inserter.execute()\n\n # Insert data into Line Items table.\n line_items_data_to_insert = [\n [2718, \"CA-2011-100006\", \"TEC-PH-10002075\", 377.97, 3, 0.0, 109.6113],\n [2719, \"CA-2011-100090\", \"TEC-PH-10002075\", 377.97, 3, None, 109.6113]\n ]\n\n with Inserter(connection, line_items_table) as inserter:\n inserter.add_rows(rows=line_items_data_to_insert)\n inserter.execute()\n\n tables = [orders_table, customer_table, products_table, line_items_table]\n for table in tables:\n # `execute_scalar_query` is for executing a query that returns exactly one row with one column.\n row_count = connection.execute_scalar_query(query=f\"SELECT COUNT(*) FROM {table.table_name}\")\n print(f\"The number of rows in table {table.table_name} is {row_count}.\")\n\n print(\"The connection to the Hyper file has been closed.\")\n print(\"The Hyper process has been shut down.\")", "title": "" }, { "docid": "f75c6b81f966e64d83a92a5b43f4f937", "score": "0.52396345", "text": "def merge_dataset_ids(dataset_ids, mapping):\r\n datasets = [Dataset.find_one(dataset_id) for dataset_id in dataset_ids]\r\n datasets = [dataset for dataset in datasets if dataset.record]\r\n\r\n if len(datasets) < 2:\r\n raise MergeError(\r\n 'merge requires 2 datasets (found %s)' % len(datasets))\r\n\r\n new_dataset = Dataset.create()\r\n\r\n call_async(__merge_datasets_task, new_dataset, datasets, mapping)\r\n\r\n return new_dataset", "title": "" }, { "docid": "d5ab3d7f96556503db02bafc25f67881", "score": "0.52369636", "text": "def load_all_metas(dataset_path):\n container = list()\n meta_path = os.path.join(dataset_path, \"meta\")\n for filename in sorted(os.listdir(meta_path)):\n # Create path and load file\n file_path = os.path.join(meta_path, filename)\n with open(file_path) as json_file: meta_dict = json.load(json_file)\n # Convert the string to lists\n fix_meta(meta_dict)\n meta_dict['filename'] = filename[:-5]\n # Append to file\n container.append(meta_dict)\n \n return container", "title": "" }, { "docid": "d1de2f34b706af6f1e22a9cd027ddb62", "score": "0.5230644", "text": "def load_data(config):\n if config.load_raw_data :\n filepath = config.Original_user_track_dir\n user_track_data = pd.read_csv(\n filepath, sep='\\t', header=None,\n names=[\n 'user_id', 'timestamp', 'artist_id', 'artist_name', 'track_id', 'track_name'\n ],\n skiprows=[\n 2120260-1, 2446318-1, 11141081-1,\n 11152099-1, 11152402-1, 11882087-1,\n 12902539-1, 12935044-1, 17589539-1\n ]\n )\n user_track_data[\"timestamp\"] = pd.to_datetime(user_track_data.timestamp)\n user_track_data.sort_values(['user_id', 'timestamp'], ascending=True, inplace=True)\n user_track_data.dropna(inplace = True)\n user_track_data.reset_index(inplace = True, drop = True)\n print(f'Number of Records: {len(user_track_data):,}\\nUnique Users: {user_track_data.user_id.nunique()}\\nUnique Artist:{user_track_data.artist_id.nunique():,}')\n elif config.test_mode :\n filepath = config.Test_dataset_dir\n user_track_data = pd.read_csv(filepath)\n else :\n filepath = config.Processed_dataset_dir\n user_track_data = pd.read_csv(filepath)\n return user_track_data", "title": "" }, { "docid": "881cdb72c98e7b5c8e3ab299cb61dcb7", "score": "0.52227616", "text": "def test_merge_metadata(self):\n \n metadata=[[\"# samples\",\"s1\",\"s2\"],[\"feature1\",\"A\",\"B\"],[\"feature2\",1,2]]\n samples=[\"s1\",\"s2\"]\n values=[[\"bug1\",1,2],[\"bug2\",2,4]]\n \n merged, new_samples=utilities.merge_metadata(metadata, samples, values)\n \n expected=[[\"feature1\",\"A\",\"B\"],[\"feature2\",1,2],[\"bug1\",1,2],[\"bug2\",2,4]]\n expected_samples=[\"s1\",\"s2\"]\n \n self.assertEqual(merged, expected) \n self.assertEqual(new_samples, expected_samples)", "title": "" }, { "docid": "3abb7ff585c67526bcc38806f9f8881a", "score": "0.52200824", "text": "def combine_data_sets(*data_sets, data_type='small_data', unique_answers=False):\n questions, answers, qid2set, aid2set = dict(), dict(), dict(), dict()\n\n for set_name in data_sets:\n current_questions, current_answers = get_data_set(set_name, data_type, unique_answers)\n\n questions.update(current_questions)\n answers.update(current_answers)\n\n # Remember which set they came from\n qid2set.update({question_id: set_name for question_id in current_questions})\n aid2set.update({answer_id: set_name for answer_id in current_answers})\n\n return questions, answers, qid2set, aid2set", "title": "" }, { "docid": "3c1c4529695e42f04b29bf9f78846c8f", "score": "0.5219933", "text": "def dt_list_dataset_ids():", "title": "" }, { "docid": "7ea1c71edcb86b6a72dd495faea4337b", "score": "0.5217547", "text": "def set_meta(self, dataset, overwrite=True, **kwd):\n super(Otu, self).set_meta(dataset, overwrite=overwrite, **kwd)\n\n if dataset.has_data():\n label_names = set()\n otulabel_names = set()\n ncols = 0\n data_lines = 0\n comment_lines = 0\n\n headers = iter_headers(dataset.file_name, sep='\\t', count=-1)\n first_line = get_headers(dataset.file_name, sep='\\t', count=1)\n if first_line:\n first_line = first_line[0]\n # set otulabels\n if len(first_line) > 2:\n otulabel_names = first_line[2:]\n # set label names and number of lines\n for line in headers:\n if len(line) >= 2 and not line[0].startswith('@'):\n data_lines += 1\n ncols = max(ncols, len(line))\n label_names.add(line[0])\n else:\n comment_lines += 1\n # Set the discovered metadata values for the dataset\n dataset.metadata.data_lines = data_lines\n dataset.metadata.columns = ncols\n dataset.metadata.labels = list(label_names)\n dataset.metadata.labels.sort()\n dataset.metadata.otulabels = list(otulabel_names)\n dataset.metadata.otulabels.sort()", "title": "" }, { "docid": "900ce8a5f1d03e115ad17f6d98d8b959", "score": "0.5215668", "text": "def updatedata(username, lasttweet, imgurllist, taglist):\n cursor = db.cursor()\n for i, image in enumerate(imgurllist):\n cursor.execute(add_image, (username, image))\n for tag in taglist[i]:\n cursor.execute(add_tag, (image, tag))\n db.commit()\n cursor.close()", "title": "" }, { "docid": "ac4097d238879d9acc23e96af3c699cd", "score": "0.52116996", "text": "def load_id_2meta_info(df):\n id_2meta_info = dict()\n\n for index, row in df.iterrows():\n\n id_ = row['token_ids'][0]\n\n gold = row['source_wn_engs']\n system = row['lstm_output']\n emb_freq = row['emb_freq']\n candidates = set(emb_freq)\n\n id_2meta_info[id_] = {'gold': gold,\n 'system': system,\n 'candidates': candidates,\n 'predicted_embedding' : row['target_embedding']}\n\n return id_2meta_info", "title": "" }, { "docid": "6cb03063b32b474fbca9a036b5a26129", "score": "0.5185083", "text": "def set_dataset_table_annotations(goal):\n counter = 0\n\n print '===== Doing vocab.gene_summary annotations .....'\n\n goal.table('vocab', 'gene_summary').visible_columns.update({\n \"detailed\" : [[\"vocab\",\"gene_summary_gene_fkey\"],[\"vocab\",\"gene_summary_species_fkey\"],[\"vocab\",\"gene_summary_contributed_by_fkey\"],\"summary\"],\"compact\" : [[\"vocab\",\"gene_summary_gene_fkey\"],[\"vocab\",\"gene_summary_species_fkey\"],\"summary\"]\n }) \n\n \n print '===== Doing the isa.imaging_compact annotations in server=%s .....' % servername\n \n goal.table('isa', 'imaging_data').alternatives.update({\"compact\" : [\"isa\", \"imaging_compact\"],\"compact/brief\" : [\"isa\", \"imaging_compact\"]}) \n\n\n goal.table('isa', 'imaging_compact').visible_columns.update({\n \"compact\": [[\"isa\",\"imaging_compact_rid_fkey\"],\"replicate\",\"url\",\"thumbnails\",[\"isa\",\"imaging_compact_file_type_fkey\"],\"byte_count\", \"md5\",\"submitted_on\"], \n \"detailed\": [[\"isa\",\"imaging_compact_rid_fkey\"],\"replicate\",\"url\",\"thumbnails\",[\"isa\",\"imaging_compact_file_type_fkey\"],\"byte_count\", \"md5\",\"submitted_on\"]\n }) \n\n goal.column(\n 'isa','imaging_compact', 'filename'\n ).column_display.update({\n \"compact\": {\"markdown_pattern\":\"[**{{filename}}**]({{{url}}})\"}, \n \"detailed\": {\"markdown_pattern\":\"[**{{filename}}**]({{{url}}})\"}\n })\n\n goal.column(\n 'isa','imaging_compact', 'url'\n ).column_display.update({\n \"compact\": {\"markdown_pattern\":\"[**{{filename}}**]({{{url}}})\"}, \n \"detailed\": {\"markdown_pattern\":\"[**{{filename}}**]({{{url}}})\"}\n })\n\n goal.column(\n 'isa','imaging_compact', 'thumbnails'\n ).column_display.update({\n \"*\": {\"markdown_pattern\":\"{{#_thumbnails}} [![{{{filename}}}](https://%s{{{url}}}){height=90}](https://%s{{{url}}}){target=_blank} {{/_thumbnails}}\" % (servername,servername) } \n })\n\n\n goal.table('isa','imaging_compact').display.update({'name': 'Imaging Data'})\n \n\n print '===== Doing the isa.previews annotation in server=%s .....' % servername\n \n goal.table('isa', 'previews').table_display.update({\n \"compact\" :{\"row_markdown_pattern\":\":::iframe [**{{{filename}}}** -- click the **load** button below to view the downsampled preview](https://%s/_viewer/xtk/view_on_load.html?url=https://%s{{{preview_uri}}}){width=800 height=600 .iframe} \\n:::\" % (servername,servername) }\n }) \n\n goal.column(\n 'isa', 'previews', 'preview_uri'\n ).column_display.update({\n \"compact\" :{\"markdown_pattern\":\":::iframe [**{{{filename}}}** -- click the **load** button below to view the downsampled preview](https://%s/_viewer/xtk/view_on_load.html?url=https://%s{{{preview_uri}}}){width=800 height=600 .iframe} \\n:::\" % (servername,servername)}\n })\n\n goal.table('isa','previews').display.update({'name': 'Downsampled Image Previews'})\n \n\n # ---------\n \n goal.table('isa', 'person').table_display.update({\n \"row_name\": {\"row_markdown_pattern\": \"{{{first_name}}} {{{last_name}}}\"},\n \"*\" : {\"row_order\": [{\"column\": \"last_name\" , \"descending\": False}]} \n }) \n\n\n del goal.table('isa', 'dataset').annotations['tag:isrd.isi.edu,2016:table-display']\n \n goal.table('isa', 'dataset').table_display.update({\n \"row_name\": {\"row_markdown_pattern\": \"{{title}}\"},\n \"*\" : {\"row_order\": [{\"column\": \"accession\" , \"descending\": True}]} \n }) \n\n \n goal.table('isa', 'dataset').visible_columns.update({\n \"filter\": {\n \"and\": [\n {\"source\": [{\"inbound\": [\"isa\", \"dataset_organism_dataset_id_fkey\"]}, {\"outbound\": [\"isa\", \"dataset_organism_organism_fkey\"]}, \"dbxref\"], \"entity\": True, \"open\": False}, \n {\"source\": [{\"inbound\": [\"isa\", \"dataset_experiment_type_dataset_id_fkey\"]}, {\"outbound\": [\"isa\", \"dataset_experiment_type_experiment_type_fkey\"]}, \"dbxref\"], \"entity\": True, \"open\": False}, \n {\"source\": [{\"inbound\": [\"isa\", \"dataset_data_type_data_type_fkey\"]}, {\"outbound\": [\"isa\", \"dataset_data_type_dataset_id_fkey\"]}, \"dbxref\"], \"entity\": True, \"open\": False}, \n {\"source\": [{\"inbound\": [\"isa\", \"dataset_gene_dataset_id_fkey\"]}, {\"outbound\": [\"isa\", \"dataset_gene_gene_fkey\"]}, \"dbxref\"], \"entity\": True, \"open\": False}, \n {\"source\": [{\"inbound\": [\"isa\", \"dataset_stage_dataset_id_fkey\"]}, {\"outbound\": [\"isa\", \"dataset_stage_stage_fkey\"]}, \"dbxref\"], \"entity\": True, \"open\": False}, \n {\"source\": [{\"inbound\": [\"isa\", \"dataset_anatomy_dataset_id_fkey\"]}, {\"outbound\": [\"isa\", \"dataset_anatomy_anatomy_fkey\"]}, \"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": [{\"inbound\": [\"isa\", \"dataset_genotype_dataset_id_fkey\"]}, {\"outbound\": [\"isa\", \"dataset_genotype_genotype_fkey\"]}, \"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": [{\"inbound\": [\"isa\", \"dataset_phenotype_dataset_fkey\"]}, {\"outbound\": [\"isa\", \"dataset_phenotype_phenotype_fkey\"]}, \"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": [{\"inbound\": [\"isa\", \"dataset_chromosome_dataset_id_fkey\"]}, \"chromosome\"], \"entity\": True, \"open\": False, \"markdown_name\": \"Chromosome\"},\n {\"source\": [{\"inbound\": [\"isa\", \"publication_dataset_fkey\"]}, \"pmid\"], \"entity\": True, \"open\": False,\"markdown_name\": \"Pubmed ID\"},\n {\"source\": [{\"outbound\": [\"isa\", \"dataset_project_fkey\"]},{\"inbound\": [\"isa\", \"project_investigator_project_id_fkey\"]},{\"outbound\": [\"isa\", \"project_investigator_person_fkey\"]},\"RID\"], \"entity\": True, \"open\": False,\"markdown_name\": \"Project Investigator\"},\n {\"source\": \"accession\", \"entity\": False, \"open\": False},\n {\"source\": \"title\", \"entity\": False, \"open\": False},\n {\"source\": [{\"outbound\": [\"isa\", \"dataset_project_fkey\"]}, \"id\"], \"entity\": True, \"open\": False},\n {\"source\": \"release_date\", \"entity\": False, \"open\": False},\n {\"source\": [{\"outbound\": [\"isa\", \"dataset_status_fkey\"]}, \"name\"], \"entity\": True, \"open\": False}\n ]\n },\n \"compact\": [[\"isa\",\"dataset_RID_key\"],[\"isa\",\"accession_unique\"],\"title\",[\"isa\",\"dataset_project_fkey\"],\"status\",\"release_date\"],\n \"entry\": [\"accession\",\"title\",[\"isa\",\"dataset_project_fkey\"],\"description\",\"study_design\",\"release_date\",[\"isa\",\"dataset_status_fkey\"], \"show_in_jbrowse\"],\n \"detailed\": [[\"isa\",\"dataset_RID_key\"],\"accession\",\"description\",\"study_design\",[\"isa\",\"dataset_project_fkey\"],[\"isa\",\"dataset_status_fkey\"],\"funding\",\"release_date\",\"show_in_jbrowse\",\n [\"isa\",\"publication_dataset_fkey\"],\n [\"isa\",\"dataset_experiment_type_dataset_id_fkey\"],\n [\"isa\",\"dataset_data_type_dataset_id_fkey\"],\n [\"isa\",\"dataset_phenotype_dataset_fkey\"],\n [\"isa\",\"dataset_organism_dataset_id_fkey\"],\n [\"isa\",\"dataset_gene_dataset_id_fkey\"],\n [\"isa\",\"dataset_stage_dataset_id_fkey\"],\n [\"isa\",\"dataset_anatomy_dataset_id_fkey\"],\n [\"isa\",\"dataset_mutation_dataset_id_fkey\"],\n [\"isa\",\"dataset_enhancer_dataset_id_fkey\"],\n [\"isa\",\"dataset_mouse_genetic_background_dataset_id_fkey\"],\n [\"isa\",\"dataset_gender_dataset_id_fkey\"],\n [\"isa\",\"dataset_genotype_dataset_id_fkey\"],\n [\"isa\",\"dataset_instrument_dataset_id_fkey\"],\n [\"isa\",\"dataset_geo_dataset_id_fkey\"],\n [\"isa\",\"dataset_chromosome_dataset_id_fkey\"]\n ]\n }) \n\n del goal.table('isa', 'dataset').annotations['tag:isrd.isi.edu,2016:visible-foreign-keys']\n \n goal.table('isa', 'dataset').visible_foreign_keys.update({\n \"*\": [[\"isa\",\"thumbnail_dataset_fkey\"],[\"viz\",\"model_dataset_fkey\"],[\"isa\",\"previews_dataset_id_fkey\"],[\"isa\",\"experiment_dataset_fkey\"],[\"isa\",\"biosample_dataset_fkey\"],[\"isa\",\"enhancer_dataset_fkey\"],[\"isa\",\"clinical_assay_dataset_fkey\"],[\"isa\",\"file_dataset_fkey\"],[\"isa\",\"external_reference_id_fkey\"]]\n })\n\n\n\n \"\"\"\n Biosample\n \"\"\"\n \n goal.table('isa', 'biosample').visible_columns.update({\n \"filter\": {\n \"and\": [\n {\"source\": [{\"outbound\": [\"isa\", \"biosample_species_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": True},\n {\"source\": [{\"outbound\": [\"isa\", \"biosample_stage_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": [{\"outbound\": [\"isa\", \"biosample_anatomy_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": [{\"outbound\": [\"isa\", \"biosample_phenotype_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": [{\"outbound\": [\"isa\", \"biosample_gene_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": [{\"outbound\": [\"isa\", \"biosample_genotype_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": [{\"outbound\": [\"isa\", \"biosample_strain_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": False},\n {\"source\": \"local_identifier\", \"entity\": True, \"open\": False}\n ]\n },\n \"detailed\": [[\"isa\",\"biosample_pkey\"],\n [\"isa\",\"biosample_dataset_fkey\"],\n \"local_identifier\",\"summary\",\n [\"isa\",\"biosample_species_fkey\"],\n [\"isa\",\"biosample_specimen_fkey\"],\n [\"isa\",\"biosample_gene_fkey\"],\n [\"isa\",\"biosample_genotype_fkey\"],\n [\"isa\",\"biosample_strain_fkey\"],\n [\"isa\",\"biosample_mutation_fkey\"],\n [\"isa\",\"biosample_stage_fkey\"],\n [\"isa\",\"biosample_anatomy_fkey\"],\n [\"isa\",\"biosample_origin_fkey\"],\n [\"isa\",\"biosample_phenotype_fkey\"],\n [\"isa\",\"biosample_gender_fkey\"],\n \"litter\",\n \"collection_date\"\n ], \n \"compact\": [[\"isa\",\"biosample_pkey\"],\n [\"isa\",\"biosample_species_fkey\"],\n [\"isa\",\"biosample_genotype_fkey\"],\n [\"isa\",\"biosample_strain_fkey\"],\n [\"isa\",\"biosample_stage_fkey\"],\n [\"isa\",\"biosample_anatomy_fkey\"],\n [\"isa\",\"biosample_origin_fkey\"],\n [\"isa\",\"biosample_phenotype_fkey\"],\n \"local_identifier\"], \n \"entry\": [[\"isa\",\"biosample_dataset_fkey\"],\n \"local_identifier\",\n [\"isa\",\"biosample_species_fkey\"],\n [\"isa\",\"biosample_specimen_fkey\"],\n [\"isa\",\"biosample_gene_fkey\"],\n [\"isa\",\"biosample_genotype_fkey\"],\n [\"isa\",\"biosample_strain_fkey\"],\n [\"isa\",\"biosample_mutation_fkey\"],\n [\"isa\",\"biosample_stage_fkey\"],\n [\"isa\",\"biosample_anatomy_fkey\"],\n [\"isa\",\"biosample_origin_fkey\"],\n [\"isa\",\"biosample_phenotype_fkey\"],\n [\"isa\",\"biosample_gender_fkey\"],\n \"litter\",\n \"collection_date\"] \n }) \n\n\n \"\"\"\n Experiment\n \"\"\"\n \n goal.table('isa', 'experiment').visible_columns.update({\n \"filter\": {\n \"and\": [\n {\"source\": [{\"outbound\": [\"isa\", \"experiment_experiment_type_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": True},\n {\"source\": [{\"inbound\": [\"isa\", \"replicate_experiment_fkey\"]},{\"outbound\": [\"isa\", \"replicate_biosample_fkey\"]},{\"outbound\": [\"isa\", \"biosample_species_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": True},\n {\"source\": [{\"inbound\": [\"isa\", \"replicate_experiment_fkey\"]},{\"outbound\": [\"isa\", \"replicate_biosample_fkey\"]},{\"outbound\": [\"isa\", \"biosample_stage_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": True},\n {\"source\": [{\"inbound\": [\"isa\", \"replicate_experiment_fkey\"]},{\"outbound\": [\"isa\", \"replicate_biosample_fkey\"]},{\"outbound\": [\"isa\", \"biosample_anatomy_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": True},\n {\"source\": [{\"inbound\": [\"isa\", \"replicate_experiment_fkey\"]},{\"outbound\": [\"isa\", \"replicate_biosample_fkey\"]},{\"outbound\": [\"isa\", \"biosample_genotype_fkey\"]},\"dbxref\"], \"entity\": True, \"open\": True}\n ]\n },\n \"detailed\": [[\"isa\",\"experiment_pkey\"],\n [\"isa\",\"experiment_dataset_fkey\"],\n \"local_identifier\",\n [\"isa\",\"experiment_experiment_type_fkey\"],\n \"biosample_summary\",\n [\"isa\",\"experiment_molecule_type_fkey\"],\n [\"isa\",\"experiment_strandedness_fkey\"],\n [\"isa\",\"experiment_rnaseq_selection_fkey\"],\n [\"isa\",\"experiment_target_of_assay_fkey\"],\n [\"isa\",\"experiment_chromatin_modifier_fkey\"],\n [\"isa\",\"experiment_transcription_factor_fkey\"],\n [\"isa\",\"experiment_histone_modification_fkey\"],\n [\"isa\",\"experiment_control_assay_fkey\"],\n [\"isa\",\"experiment_protocol_fkey\"]\n ],\n \"compact\": [[\"isa\",\"experiment_pkey\"],\n [\"isa\",\"experiment_dataset_fkey\"],\n [\"isa\",\"experiment_experiment_type_fkey\"],\n \"biosample_summary\",\n [\"isa\",\"experiment_protocol_fkey\"],\n \"local_identifier\"],\n \"entry\": [[\"isa\",\"experiment_dataset_fkey\"],\n \"local_identifier\",\n \"biosample_summary\",\n [\"isa\",\"experiment_experiment_type_fkey\"],\n [\"isa\",\"experiment_molecule_type_fkey\"],\n [\"isa\",\"experiment_strandedness_fkey\"],\n [\"isa\",\"experiment_rnaseq_selection_fkey\"],\n [\"isa\",\"experiment_target_of_assay_fkey\"],\n [\"isa\",\"experiment_chromatin_modifier_fkey\"],\n [\"isa\",\"experiment_transcription_factor_fkey\"],\n [\"isa\",\"experiment_histone_modification_fkey\"],\n [\"isa\",\"experiment_control_assay_fkey\"],\n [\"isa\",\"experiment_protocol_fkey\"]]\n }) \n\n \"\"\"\n Replicate\n \"\"\"\n \n goal.table('isa', 'replicate').visible_columns.update({\n \"detailed\": [[\"isa\",\"replicate_pkey\"],[\"isa\",\"replicate_experiment_fkey\"],[\"isa\",\"replicate_biosample_fkey\"],\"bioreplicate_number\",\"technical_replicate_number\"],\n \"compact\": [[\"isa\",\"replicate_pkey\"],[\"isa\",\"replicate_biosample_fkey\"],\"bioreplicate_number\",\"technical_replicate_number\"],\n \"entry\": [[\"isa\",\"replicate_experiment_fkey\"],[\"isa\",\"replicate_biosample_fkey\"],\"bioreplicate_number\",\"technical_replicate_number\"],\n \"filter\" :{\"and\" : [\n {\"source\": [{\"outbound\": [\"isa\", \"replicate_dataset_fkey\"]},\"RID\"], \"entity\": True, \"open\": True,\"markdown_name\": \"Dataset\"},\n {\"source\": [{\"outbound\": [\"isa\", \"replicate_experiment_fkey\"]},\"RID\"], \"entity\": True, \"open\": True,\"markdown_name\": \"Experiment\"},\n {\"source\": [{\"outbound\": [\"isa\", \"replicate_biosample_fkey\"]},\"RID\"], \"entity\": True, \"open\": True,\"markdown_name\": \"Biosample\"}\n ]\n } \n }) \n\n \n counter = counter + 3\n \n print 'Setting %d annotations for the dataset table...' % counter", "title": "" }, { "docid": "38636b595bf0d0d6f08f5b609d9e1e3d", "score": "0.5180524", "text": "def artists_trans_load(df, output_data):\n # Select columns to create artists table\n artists_table = df.selectExpr(\n 'artist_id',\n 'artist_name as name',\n 'artist_location as location',\n 'artist_latitude as latitude',\n 'artist_longitude as longitude'\n ) \\\n .drop_duplicates()\n\n # Write artists table to parquet files\n artists_table.write \\\n .parquet('{}artists_table'.format(output_data), mode = 'ignore')", "title": "" }, { "docid": "3563fd14c46b84cd5728f1fceb889c00", "score": "0.5179757", "text": "def update_from_db_metadata(metadata, datasette, database, table):\n for db_name in datasette.databases:\n db = get_datasette_db(datasette, db_name)\n if \"__metadata\" not in db.table_names():\n continue\n meta_table = db[\"__metadata\"]\n # some basic bootstrapping here ....\n if \"databases\" not in metadata:\n metadata[\"databases\"] = {}\n if db_name not in metadata[\"databases\"]:\n metadata[\"databases\"][db_name] = {}\n for row in meta_table.rows:\n key = row.get(\"key\")\n row_value = row.get(\"value\")\n obj_value = json.loads(row_value)\n if isinstance(obj_value, dict):\n if key not in metadata[\"databases\"][db_name]:\n metadata[\"databases\"][db_name][key] = {}\n _metadata_recursive_update(\n metadata[\"databases\"][db_name][key], obj_value\n )\n elif obj_value:\n metadata[\"databases\"][db_name][key] = obj_value\n return metadata", "title": "" }, { "docid": "e88ec442202baaa2bc98bf172f0f2a14", "score": "0.517661", "text": "def create_datasets(dataset_dir, config, train_pct=.8):\n\n train_ann, val_ann = split_annotations(dataset_dir, config, train_pct=train_pct)\n\n print(annotation_stats(train_ann))\n print(annotation_stats(val_ann))\n\n train_ds = DetDataset(config)\n train_ds.load_by_annotations(dataset_dir, train_ann, config.CLASS_NAMES)\n\n val_ds = DetDataset(config)\n val_ds.load_by_annotations(dataset_dir, val_ann, config.CLASS_NAMES)\n\n assert len(train_ds.image_info) == len(train_ann) and len(val_ds.image_info) == len(val_ann)\n\n return train_ds, val_ds", "title": "" }, { "docid": "c9e124c28e1c09af8b5f90c201e99374", "score": "0.5171674", "text": "def _write_imagemeta_tables(self):\n dat_image = self._generate_image_table()\n dat_image, dat_roi = self._generate_roi_table(dat_image)\n dat_roi, dat_site = self._generate_site_table(dat_roi)\n dat_site, dat_slideac = self._generate_slideac_table(dat_site)\n dat_slideac, dat_slide = self._generate_slide_table(dat_slideac)\n dat_slide, dat_sampleblock = self._generate_sampleblock_table(dat_slide)\n\n self._bulkinsert(dat_sampleblock, db.sampleblocks)\n self._bulkinsert(dat_slide, db.slides)\n self._bulkinsert(dat_slideac, db.slideacs)\n self._bulkinsert(dat_site, db.sites)\n self._bulkinsert(dat_roi, db.acquisitions)\n self._bulkinsert(dat_image, db.images)\n self._bulkinsert(dat_image, db.valid_images)", "title": "" }, { "docid": "2965c88bea50e3e37b0908c3b28f7f9f", "score": "0.5169301", "text": "def load(corpus_dir, lang, usecols=USE_COLUMNS):\n split_dfs = []\n\n for split in SPLIT_NAMES:\n df = load_split(corpus_dir, lang, split, usecols)\n split_dfs.append(df)\n\n # Concatenate all split dataframes into a single table,\n # replace default integer indexing by utterance ids,\n # throwing an exception if there are duplicate utterance ids.\n return (pd.concat(split_dfs)\n .set_index(\"id\", drop=True, verify_integrity=True)\n .sort_index())", "title": "" }, { "docid": "28c5cea5a96e12380678ef484f0b2fc2", "score": "0.51634884", "text": "def setUp(self):\n fq_dataset_name = self.fq_table_names[0].split('.')\n self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])\n\n super().setUp()\n\n # test data for person table\n person_data_query = self.jinja_env.from_string(\"\"\"\n INSERT INTO\n `{{project_id}}.{{dataset_id}}.person` \n (person_id, gender_concept_id, year_of_birth, race_concept_id, ethnicity_concept_id)\n VALUES\n (123, 0, 1980, 0, 0),\n (345, 0, 1981, 0, 0),\n (678, 0, 1982, 0, 0),\n (910, 0, 1983, 0, 0),\n (1112, 0, 1984, 0, 0)\n \"\"\").render(project_id=self.project_id, dataset_id=self.dataset_id)\n\n # test data for observation table\n observation_data_query = self.jinja_env.from_string(\"\"\"\n INSERT INTO\n `{{project_id}}.{{dataset_id}}.observation` (observation_id,\n person_id,\n value_source_concept_id,\n value_as_concept_id,\n observation_source_concept_id,\n observation_concept_id, \n observation_date,\n observation_type_concept_id)\n VALUES\n (111, 123, 1585266, 0, 1585249, 0, date('2021-01-01'), 0),\n (222, 345, 1585266, 0, 1585249, 0, date('2021-01-01'), 0),\n (333, 678, 1585266, 0, 1585249, 0, date('2021-01-01'), 0),\n (444, 910, 1585266, 0, 1585249, 0, date('2021-01-01'), 0),\n (1122, 123, 1585847, 45878463, 1585845, 0, date('2021-01-01'), 0),\n (3344, 345, 1585847, 45878463, 1585845, 0, date('2021-01-01'), 0),\n (5566, 678, 1585847, 45878463, 1585845, 0, date('2021-01-01'), 0),\n (7788, 910, 1585847, 45878463, 1585845, 0, date('2021-01-01'), 0),\n (9910, 1112, 1585266, 0, 1585249, 0, date('2021-01-01'), 0),\n (1112, 1112, 1585848, 45878463, 1585845, 0, date('2021-01-01'), 0)\n \"\"\").render(project_id=self.project_id, dataset_id=self.dataset_id)\n\n # test data for concept table\n concept_data_query = self.jinja_env.from_string(\"\"\"\n INSERT INTO\n `{{project_id}}.{{dataset_id}}.concept` (\n concept_id,\n concept_code,\n concept_name,\n domain_id,\n vocabulary_id,\n concept_class_id,\n valid_start_date,\n valid_end_date)\n VALUES\n (1585266, 'PIIState_CA', 'PII State: CA', 'observation', 'PPI', 'answer', date('2017-04-24'), date('2099-12-31')),\n (1585847, 'SexAtBirth_Female', 'Female', 'observation', 'PPI', 'answer', date('2017-05-22'), date('2099-12-31'))\n \"\"\").render(project_id=self.project_id, dataset_id=self.dataset_id)\n\n # test data for observation_ext table\n observation_ext_data_query = self.jinja_env.from_string(\"\"\"\n INSERT INTO\n `{{project_id}}.{{dataset_id}}.observation_ext` (\n observation_id,\n src_id\n )\n VALUES\n (111, 'PPI/PM'),\n (222, 'PPI/PM'),\n (333, 'PPI/PM'),\n (444, 'PPI/PM'),\n (9910, 'PPI/PM')\n \"\"\").render(project_id=self.project_id,\n dataset_id=self.dataset_id)\n\n self.load_test_data([\n person_data_query, observation_data_query, concept_data_query,\n observation_ext_data_query\n ])", "title": "" }, { "docid": "0c5584c4ceb405bdc82f0fb9648d5ff8", "score": "0.5155755", "text": "def make_metadata(dataset: str, classes: Optional[Set[str]] = None\n ) -> Tuple[BinaryDatasetMetadata, BinaryDatasetMetadata]:\n labels = load_labels(dataset)\n train_metadata = []\n test_metadata = []\n for dirpath, dirnames, filenames in os.walk(dataset_dir(dataset)):\n for name in sorted(filenames):\n if name not in labels:\n continue\n\n label = labels[name]\n if classes is not None and label not in classes:\n continue\n\n data = (os.path.join(dirpath, name), label)\n folder = os.path.basename(dirpath)\n if folder == 'train':\n train_metadata.append(data)\n elif folder == 'test':\n test_metadata.append(data)\n return train_metadata, test_metadata", "title": "" }, { "docid": "fed817008e62d27df03a60579881b989", "score": "0.5151102", "text": "def set_meta( self, dataset, overwrite=True, **kwd ):\n\n types = set()\n with open(dataset.dataset.file_name, 'r') as fd:\n for line in fd:\n match = re.match(\"([A-Z]+):\\s\", line)\n if match is None:\n return False\n types.add(match.group(1))\n dataset.metadata.annotations = list(types)", "title": "" }, { "docid": "cf71e0bab50feef13255ad0da92f8864", "score": "0.5134266", "text": "def _collect_hdf5_meta_mpi(self, results):\n\n def create_dataset_params(sta_info):\n\n # Split the path\n tag_path = sta_info[\"path\"].strip(\"/\").split(\"/\")\n\n for path in tag_path:\n # Assert each path piece.\n tag_pattern = r\"^[a-zA-Z0-9][a-zA-Z0-9_]*[a-zA-Z0-9]$\"\n if re.match(tag_pattern, path) is None:\n raise ASDFValueError(\n \"Tag name '{name}' is invalid. It must validate \"\n \"against the regular expression '{pattern}'.\".format(\n name=path, pattern=tag_pattern))\n\n info = {\n \"data_name\": sta_info[\"path\"],\n \"dataset_creation_params\": {\n \"name\": sta_info[\"path\"],\n \"shape\": sta_info[\"object\"].shape,\n \"dtype\": sta_info[\"object\"].dtype,\n \"compression\": self.__compression[0],\n \"compression_opts\": self.__compression[1],\n \"shuffle\": self.__shuffle,\n \"fletcher32\": False,\n \"maxshape\": tuple([None] * len(sta_info[\"object\"].shape))\n },\n \"dataset_attrs\": sta_info[\"parameters\"],\n }\n return info\n\n meta_list = []\n for _sta, _sta_info in results.iteritems():\n for _chan_info in _sta_info:\n _meta = create_dataset_params(_chan_info)\n meta_list.append(_meta)\n\n gathered_meta = self.mpi.comm.gather(meta_list, root=0)\n all_meta = []\n if self.mpi.rank == 0:\n for _meta in gathered_meta:\n all_meta.extend(_meta)\n\n all_meta = self.mpi.comm.bcast(all_meta, root=0)\n\n # Likely not necessary as the gather two line above implies a\n # barrier but better be safe than sorry.\n self.mpi.comm.barrier()\n\n return all_meta", "title": "" }, { "docid": "5dd97afbc7c9f3cae80b130399fdd2d2", "score": "0.5133137", "text": "def getEntityMetadatas(self, *names, mustExist = True):\n\n raise NotImplementedError", "title": "" }, { "docid": "6c8e18efb910e7e1bb42a9c37ff6b77f", "score": "0.5128889", "text": "def annotate_rows_db(self,mt,*names):\n d, geneDict = DB.annotation_dataset_urls()\n reference_genome = mt.row_key.locus.dtype.reference_genome.name\n for name in names:\n gene_key = geneDict[(name)]\n if gene_key is True:\n gene_url = d[('gencode', reference_genome)]\n t = hl.read_table(gene_url)\n mt = mt.annotate_rows(gene_name=t[mt.locus].gene_name)\n url = d[(name, None)]\n t2 = hl.read_table(url)\n mt = mt.annotate_rows(**{name:t2[mt.gene_name]})\n mt = mt.drop('gene_name')\n else:\n url = d[(name,reference_genome)]\n t = hl.read_table(url)\n if len(t.key) > 1:\n mt = mt.annotate_rows(**{name:t[mt.row_key]})\n else:\n mt = mt.annotate_rows(**{name:t[mt.locus]})\n return mt", "title": "" }, { "docid": "3690e9514a8f5afe9e5743ba452e68c0", "score": "0.5128861", "text": "def fixture_dataset_metadata(testdir):\n return Metadata(dataset_id=DATASET_ID, metadata_path=testdir)", "title": "" }, { "docid": "165443030581d9457b81c65844435a07", "score": "0.51249915", "text": "def load_data():\n for location in Location.objects.all():\n LOCATIONS[location.name] = location\n LOCATIONS[location.display_name] = location\n for track in Track.objects.all():\n TRACKS[track.name] = track", "title": "" }, { "docid": "8cbb6dd41e3b745057c78083c47917ec", "score": "0.5116892", "text": "def read_twitter_csv_into_dataset(username=7, tweet=10, number_of_tweets=float('inf')):\n dataset_Cid_Tid = list()\n dict_of_usernames = dict()\n word_list = list()\n\n parent_path = Path(sys.path[0]).parent\n with open(os.path.join(parent_path, 'data/covid.csv'), encoding='utf-8') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=';')\n line_count = 0\n for row in csv_reader:\n if line_count == 0:\n print(f'Column names are {\", \".join(row)}')\n line_count += 1\n elif line_count > number_of_tweets:\n break\n else:\n username_string = row[username]\n # remove links\n tweet_str = re.sub(\"http(\\S)*\", \"\", row[tweet].lower())\n tweet_list = re.split('\\s|,|\\.', tweet_str)\n tweet_list = remove_stopwords(tweet_list)\n tweet_list = sorted(tweet_list)\n\n if username_string not in dict_of_usernames.keys():\n # add to username dict\n dict_of_usernames[username_string] = 1\n new_row = [username_string, 1, tweet_list]\n else:\n # update username dict\n new_value = dict_of_usernames[username_string] + 1\n dict_of_usernames[username_string] = new_value\n new_row = [username_string, new_value, tweet_list]\n dataset_Cid_Tid.append(new_row)\n\n for word in tweet_list:\n if word not in word_list:\n word_list.append(word)\n # remove duplicates\n word_list = list(dict.fromkeys(word_list))\n\n # print(username_string + ':' + row[tweet])\n line_count += 1\n print(f'Processed {line_count} lines.')\n return dataset_Cid_Tid, word_list, dict_of_usernames", "title": "" }, { "docid": "ec17282dfc9360cf29c008738e5fc12c", "score": "0.51102346", "text": "def datasets(args):\n if len(args.args) < 1:\n raise ParserError('not enough arguments')\n d = Path(args.args[0])\n if not d.exists() or not d.is_dir():\n raise ParserError('%s is not an existing directory' % d)\n for fname in sorted(d.glob('*' + MD_SUFFIX), key=lambda p: p.name):\n md = Metadata(load(fname))\n data = fname.parent.joinpath(\n md.get_table().url or fname.name[:-len(MD_SUFFIX)])\n if data.exists():\n print(data)\n if len(args.args) > 1:\n maxlen = max(len(a) for a in args.args[1:])\n for attr in args.args[1:]:\n if md.get(attr):\n print(' %s %s' % ((attr + ':').ljust(maxlen + 1), md[attr]))", "title": "" }, { "docid": "ea7d1d91dc41e0f7146338f4f4aa82e8", "score": "0.5107", "text": "def load_set_with_former_datapoints():\n with open(\"sms_spam_dataset.csv\", \"r\", encoding=\"utf-8\") as csvfile:\n spamreader = csv.reader(csvfile, delimiter=\",\")\n for row in spamreader:\n if not row:\n continue\n EXISTING_DATAPOINTS.add(tuple(row))\n csvfile.close()", "title": "" }, { "docid": "630323a34c27020841bcc9d9b5027a60", "score": "0.50985914", "text": "def test_humann2_join_tables_tsv(self):\n \n # create a temp file\n file_out, new_file=tempfile.mkstemp(prefix=\"humann2_temp\")\n \n # join the files\n utils.run_command([\"humann2_join_tables\",\"--input\",\n cfg.data_folder,\"--output\",new_file,\"--file_name\",\n cfg.multi_sample_genefamilies_split_basename,\"--verbose\"])\n \n # check the joined file is as expected\n self.assertTrue(utils.files_almost_equal(new_file, cfg.multi_sample_genefamilies))\n\n # remove the temp file\n utils.remove_temp_file(new_file)", "title": "" }, { "docid": "915db43e558d64f335f3ea05ffc72209", "score": "0.50975204", "text": "def get_metadata(self, urls):\n\n ret = []\n for url in urls:\n\n data = db.get(url) if db.db else None\n\n # If the URL has been in none of our static databases, we still want to return an ID\n if data is None:\n obj = urlserver_pb2.UrlMetadata()\n obj.id = make_url_id(URL(url))\n data = obj.SerializeToString()\n\n ret.append(data)\n\n return ret", "title": "" }, { "docid": "431e662f494348ce4c3820e87a9aa78e", "score": "0.5096695", "text": "def meta_load(a, **kwargs):\n pi = PageInfo(number_on_page=2, total=2)\n\n if kwargs.get('table_metadata_id') == 1:\n columnfortable = [ColumnMetadata(\n column_metadata_id=1, table_metadata_id=1,\n column_name=\"ENAME\", is_masked=True,\n data_type=\"VARCHAR2\", column_length=30,\n is_foreign_key=True,\n algorithm_name=\"LastNameLookup\",\n domain_name=\"LAST_NAME\")]\n elif kwargs.get('table_metadata_id') == 2:\n columnfortable = [ColumnMetadata(\n column_metadata_id=1, table_metadata_id=2,\n column_name=\"DNAME\", is_masked=True,\n data_type=\"VARCHAR2\", column_length=66,\n is_index=True,\n algorithm_name=\"TestNameLookup\",\n domain_name=\"TEST_NAME\")]\n elif kwargs.get('table_metadata_id') == 101:\n columnfortable = [ColumnMetadata(\n column_metadata_id=1, table_metadata_id=101,\n data_type=\"VARCHAR2\", column_length=30,\n column_name=\"ENAME\", is_masked=False)]\n elif kwargs.get('table_metadata_id') == 102:\n columnfortable = [ColumnMetadata(\n column_metadata_id=1, table_metadata_id=102,\n data_type=\"VARCHAR2\", column_length=66,\n column_name=\"DNAME\", is_masked=False)]\n\n clrpo = ColumnMetadataList(page_info=pi, response_list=columnfortable)\n return clrpo", "title": "" }, { "docid": "60c3771576a7b747debcd02520139a76", "score": "0.5087551", "text": "def extract_all_dataset():\n\n # weibo0813\n pro_weibo0813(\"/home/wangyida/git/data/dialog/cn/weibo_0813/raw/\", ROOTDIR)\n # weibo169\n pro_weibo169(\"/home/wangyida/git/data/dialog/cn/weibo169/\", ROOTDIR)\n # NLPCC2018\n pro_NLPCC2018_task5(\"/home/wangyida/git/data/dialog/cn/NLPCC2018_task5/train.txt\",\n ROOTDIR)\n # others\n pro_others(\"/home/wangyida/git/data/dialog/cn/LCCD/other/raw/\", ROOTDIR)\n return", "title": "" }, { "docid": "9b7dde96816903e89802e75643faba01", "score": "0.5086505", "text": "def meta_collate(batch):\n # print(batch[0][0].metadata)\n newbatch = default_collate(batch)\n\n for i, x in enumerate(newbatch):\n x.metadata = [batch[j][i].metadata if hasattr(batch[j][i], 'metadata') else {} for j in range(len(batch))]\n return newbatch", "title": "" }, { "docid": "22f618526cafce9946a2d63ca67ce220", "score": "0.50828516", "text": "def create_datasets_table(self):\n query = '''\n CREATE TABLE IF NOT EXISTS datasets (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n user_id INTEGER,\n celery_id INTEGER,\n file_id INTEGER,\n name text,\n graph_name text,\n public boolean,\n status text,\n start int,\n end int,\n ntriples int,\n error_message text,\n FOREIGN KEY(user_id) REFERENCES users(user_id),\n FOREIGN KEY(file_id) REFERENCES files(id)\n )\n '''\n self.execute_sql_query(query)", "title": "" }, { "docid": "14fea8976772b6430dee0352c30bfc7d", "score": "0.5077018", "text": "def get_dataset_list(tableName):\n\n #dataFromTable = Table.read(filename, format='ascii')\n datasetIDs = tableName['observationID']\n asnIDs = tableName['asnID']\n\n datasetNames = []\n\n # Determine if the data is part of an association or is an individual image\n for imgid,asnid in zip(datasetIDs,asnIDs):\n\n # If the asnID is the string NONE, this is an individual image,\n # and it is necessary to get the individual image dataset name.\n # Otherwise, this is an association dataset, so just add the asnID.\n if (asnid.upper() == \"NONE\"):\n datasetNames.append(imgid)\n else:\n datasetNames.append(asnid)\n\n return datasetNames", "title": "" }, { "docid": "3eb5b56265e2f4f37a963755b32ba4ca", "score": "0.5072862", "text": "def load_all_data(self):\n sql = \"SELECT * FROM movies\"\n movies = []\n movie_persons = []\n movie_genres = []\n movie_id_uuid = dict()\n for row in self.cursor.execute(sql):\n self._validate_na(row)\n\n # assign brand new uuid and remove ugly old id\n movie_uuid = uuid.uuid4()\n row[\"uuid\"] = movie_uuid\n movie_id_uuid[row[\"id\"]] = movie_uuid\n row.pop(\"id\")\n\n # ratings are None anyway\n row.pop(\"ratings\")\n\n # fill persons list\n writer_ids = self._combine_writers(row)\n row.pop(\"writer\")\n row.pop(\"writers\")\n for writer_id in writer_ids:\n movie_persons.append({\"movie_uuid\": movie_uuid,\n \"id\": writer_id,\n \"name\": None,\n \"role\": \"writer\"})\n\n directors = row.pop(\"director\")\n if directors:\n for director_name in directors.split(', '):\n # some directors have unnecessary comment\n director_name = re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", director_name)\n\n movie_persons.append({\"movie_uuid\": movie_uuid,\n \"id\": None,\n \"name\": director_name,\n \"role\": \"director\"})\n\n genres = row.pop(\"genre\")\n if genres:\n for genre in genres.split(', '):\n movie_genres.append({\"movie_uuid\": movie_uuid, \"genre\": genre})\n\n film_work = FilmWork(title=row[\"title\"],\n id=row[\"uuid\"],\n description=row[\"plot\"],\n imdb_rating=row[\"imdb_rating\"])\n movies.append(film_work)\n\n # get writer names from writers table\n sql_writers = \"\"\"SELECT name FROM writers WHERE id=(?);\"\"\"\n for person in movie_persons:\n if person[\"role\"] == \"writer\":\n self.cursor.execute(sql_writers, (person[\"id\"],))\n row = self.cursor.fetchone()\n person[\"name\"] = row[\"name\"]\n\n sql_movie_actors = \"\"\"\n SELECT movies.id, actors.name \n FROM actors \n INNER JOIN movie_actors ma ON actors.id = ma.actor_id\n INNER JOIN movies ON movies.id = ma.movie_id;\n \"\"\"\n for row in self.cursor.execute(sql_movie_actors):\n movie_persons.append({\"movie_uuid\": movie_id_uuid[row[\"id\"]],\n \"name\": row[\"name\"],\n \"role\": \"actor\"})\n # get rid of N/A persons\n movie_persons = [person for person in movie_persons if person[\"name\"] != \"N/A\"]\n\n # get unique persons (by unique name) and assign uuids\n unique_persons = set()\n for person in movie_persons:\n unique_persons.add(person[\"name\"])\n person_uuid = {person: uuid.uuid4() for person in unique_persons}\n\n # get unique genres and assign uuids\n unique_genres = set()\n for genre in movie_genres:\n unique_genres.add(genre[\"genre\"])\n genre_uuid = {genre: uuid.uuid4() for genre in unique_genres}\n return movies, movie_persons, movie_genres, person_uuid, genre_uuid", "title": "" }, { "docid": "d4ea0a5774d1011458a2733c365083d6", "score": "0.5069592", "text": "def get_metapartitions_for_stats(datasets):\n all_metapartitions = []\n for ktk_cube_dataset_id, ds in datasets.items():\n dataset_factory = metadata_factory_from_dataset(ds)\n for mp in dispatch_metapartitions_from_factory(\n dataset_factory=dataset_factory, dispatch_by=dataset_factory.partition_keys\n ):\n all_metapartitions.append((ktk_cube_dataset_id, mp))\n return all_metapartitions", "title": "" }, { "docid": "71735f669cd492ef4ddf5a0f7266797c", "score": "0.5069033", "text": "async def tagged_data(items: List[Request_Results]):\n normalized = []\n for item in items:\n for dataitem in item.data:\n normalized.append(\n {\n \"id\": item.id,\n \"table_name\": item.datatype,\n \"field_value\": dataitem.value,\n \"field_name\": dataitem.name,\n }\n )\n normalized_result = map(lambda item: Results(**item), normalized)\n output = await Results.bulk_create(normalized_result)\n print(output)\n return {\"success\": True}", "title": "" }, { "docid": "90d37891c60968fd19f13f2e9859a738", "score": "0.5065633", "text": "def populate_tables():\n db.create_tables([Scan, Spectrum, Voxel], safe=True)\n\n s = Seeder(Scan, n=2, groups=1)\n s = Seeder(Spectrum, n=10, groups=2)\n s = Seeder(Voxel, n=100, groups=10)", "title": "" }, { "docid": "f47dcc90cd230424f556c956a69bac33", "score": "0.5055237", "text": "def describe_dataset():\n questions = [\n inquirer.List(\n \"source\",\n message=\"Where is/will be the dataset synced to?\",\n choices=[\"git\", \"s3\"],\n ),\n inquirer.Text(\"name\", message=\"How would you like to name the dataset?\"),\n inquirer.Text(\"id\", message=\"Please specify a unique id for the dataset\"),\n inquirer.Text(\n \"description\",\n message=\"What is the dataset about? This will be the description of the dataset.\",\n ),\n inquirer.Text(\n \"uri\",\n message=\"What is the dataset's URI? This will be the URI of the dataset.\",\n ),\n ]\n\n answers = inquirer.prompt(questions)\n\n metadata_uri = get_metadata_uri(answers)\n\n meta = {\n \"source\": answers.get(\"source\"),\n \"name\": answers.get(\"name\", \"\"),\n \"description\": answers.get(\"description\", \"\"),\n \"uri\": answers.get(\"uri\", \"\"),\n \"metadata_uri\": metadata_uri,\n }\n\n return meta", "title": "" }, { "docid": "4d3ac6a6d0510834f0a9f425d0cce447", "score": "0.50548893", "text": "def load_tweets(args):\r\n df1 = pd.DataFrame()\r\n dir_listing = os.listdir(args.filepath)\r\n files = [fi for fi in dir_listing if fi.endswith(\".json\")]\r\n i = 1\r\n for filename in files:\r\n file = args.filepath + filename\r\n tempdf = load(file)\r\n print(i)\r\n i += 1\r\n df1 = pd.concat([df1, tempdf])\r\n with open(\"df1.csv\", \"w\", encoding=\"utf-8\", newline='') as reference:\r\n df1.to_csv(reference, sep=\",\", index=False, encoding=\"utf-8\")\r\n # ONLY TWEET NOT RETWEET\r\n isTweet = df1['tweet_type'] == 'T'\r\n df1_tweet = df1[isTweet]\r\n print(len(df1_tweet))\r\n\r\n df2_tweet = df1_tweet.groupby(['_birthday_screenName'], as_index=False)\r\n df2 = df2_tweet.first()\r\n print(len(df2_tweet))\r\n # check sample is balanced\r\n print(df2.loc[:, '_range_age'].value_counts())\r\n # balance dataset\r\n df2 = under_sampleto_min(df2, '_range_age')\r\n print(df2.loc[:, '_range_age'].value_counts())\r\n df2 = df2.dropna(axis=0, how='all')\r\n df2.reset_index()\r\n\r\n # CREATED SCORE DATASET\r\n result = pd.merge(df1_tweet[['text', 'followers_count', 'tweet_type',\r\n '_birthday_screenName', 'num_emoticon', 'num_pronoun', 'num_mention',\r\n 'num_url', 'num_punctuation', '_range_age', '_age', 'words']],\r\n df2[['_birthday_screenName']],\r\n how='left', on=['_birthday_screenName', '_birthday_screenName'])\r\n\r\n result.loc[result['_birthday_screenName'].isnull(), '_birthday_screenName'] = 0\r\n isBSNempty = result['_birthday_screenName'] == 0\r\n df_score = result[isBSNempty]\r\n print(len(df_score))\r\n with open(\"score.csv\", \"w\", encoding=\"utf-8\", newline='') as reference:\r\n df_score.to_csv(reference, sep=\",\", index=False, encoding=\"utf-8\")\r\n\r\n df_train = pd.merge(df1_tweet[['text', 'followers_count', 'tweet_type',\r\n '_birthday_screenName', 'num_emoticon', 'num_pronoun', 'num_mention',\r\n 'num_url', 'num_punctuation', '_range_age', '_age', 'words']],\r\n df2[['_birthday_screenName']],\r\n how='inner', on=['_birthday_screenName', '_birthday_screenName'])\r\n print(len(df_train))\r\n print(df_train.loc[:, '_range_age'].value_counts())\r\n df_train = under_sampleto_min(df_train, '_range_age')\r\n print(df_train.loc[:, '_range_age'].value_counts())\r\n with open(\"train.csv\", \"w\", encoding=\"utf-8\", newline='') as reference:\r\n df_train.to_csv(reference, sep=\",\", index=False, encoding=\"utf-8\")\r\n return df_train", "title": "" }, { "docid": "70b274b1c89d434dcfe91ae389e3f331", "score": "0.505375", "text": "def get_all_metadata():\n\tentries = session.query(TableEntities.Metadata).all()\n\treturn {'data': [entry.as_dict() for entry in entries]}", "title": "" }, { "docid": "add867657a3e30d24f0657425048e53d", "score": "0.5045959", "text": "def import_data(self):\n train = []\n test = []\n\n with open(self.train_file, 'r', encoding='utf-8') as f:\n for line in f:\n row = line.split('\\t')\n if len(row) != 4:\n break\n train.append(\n {'id': row[0], 'user': row[1], 'lang': row[2], 'tweet': row[3]})\n\n with open(self.test_file, 'r', encoding='utf-8') as f:\n for line in f:\n row = line.split('\\t')\n if len(row) != 4:\n break\n test.append({'id': row[0], 'user': row[1],\n 'lang': row[2], 'tweet': row[3]})\n\n return train, test", "title": "" }, { "docid": "0fbd32a4d00d5bda7f64a6dd90c7a071", "score": "0.50431585", "text": "def configure_data(pubmedfiles,reports, labels, concepts, jsondisp, jsonann, jsonall, username, password, topics,runs,tfidf):\n\n filename = ''\n language = 'english'\n error_location = 'database'\n report_usecases = []\n created_file = False\n today = str(date.today())\n\n try:\n with transaction.atomic():\n cursor = connection.cursor()\n cursor.execute(\"DELETE FROM annotate;\")\n cursor.execute(\"DELETE FROM linked;\")\n cursor.execute(\"DELETE FROM associate;\")\n cursor.execute(\"DELETE FROM contains;\")\n cursor.execute(\"DELETE FROM mention;\")\n cursor.execute(\"DELETE FROM belong_to;\")\n cursor.execute(\"DELETE FROM annotation_label;\")\n cursor.execute(\"DELETE FROM concept;\")\n cursor.execute(\"DELETE FROM ground_truth_log_file;\")\n cursor.execute(\"DELETE FROM topic_has_document;\")\n cursor.execute(\"DELETE FROM report;\")\n cursor.execute(\"DELETE FROM use_case;\")\n cursor.execute(\"DELETE FROM semantic_area;\")\n # connection.commit()\n cursor.execute(\"DELETE FROM public.user WHERE username = 'Test'\")\n cursor.execute(\"INSERT INTO semantic_area VALUES (%s)\",('default_area',))\n if username is not None and password is not None:\n cursor.execute(\"INSERT INTO public.user (username,password,profile,ns_id) VALUES(%s,%s,%s,%s);\",\n (str(username), hashlib.md5(str(password).encode()).hexdigest(), 'Admin', 'Human'))\n # cursor.execute(\"INSERT INTO public.user (username,password,profile,ns_id) VALUES(%s,%s,%s,%s);\",\n # (str(username), hashlib.md5(str(password).encode()).hexdigest(), 'Admin', 'Robot'))\n\n fields = []\n all_fields = []\n fields_to_ann = []\n\n jsonall = ''.join(jsonall)\n jsondisp = ''.join(jsondisp)\n jsonann = ''.join(jsonann)\n\n jsonall = jsonall.split(',')\n jsondisp = jsondisp.split(',')\n jsonann = jsonann.split(',')\n\n\n for el in jsonall:\n if len(el) > 0:\n all_fields.append(el)\n for el in jsondisp:\n if len(el) > 0:\n fields.append(el)\n if el not in all_fields:\n all_fields.append(el)\n for el in jsonann:\n if len(el) > 0:\n fields_to_ann.append(el)\n if el not in all_fields:\n all_fields.append(el)\n language = 'english'\n\n arr_to_ret = elaborate_runs(runs)\n error_location = 'Topic'\n for topic in topics:\n if topic.name.endswith('txt'):\n elaborate_TREC_topic_files(arr_to_ret,topic)\n elif topic.name.endswith('json'):\n process_topic_json_file(arr_to_ret,topic)\n elif topic.name.endswith('csv'):\n process_topic_csv_file(arr_to_ret,topic)\n\n error_location = 'Collection'\n\n for file in reports:\n reps = decompress_files([file])\n for f in reps:\n\n if isinstance(f, str):\n file_name = f\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n f = os.path.join(workpath, 'static\\\\tmp\\\\' + f)\n else:\n file_name = f.name\n if file_name.endswith('json'):\n find_docs_in_json_collection(arr_to_ret,f)\n elif file_name.endswith('csv'):\n find_docs_in_csv_collection(arr_to_ret,f)\n\n for file in pubmedfiles:\n # if file.name.endswith('json'):\n find_docs_in_json_pubmed_collection(arr_to_ret,file)\n\n\n error_location = 'Runs'\n for el in arr_to_ret:\n if len(el) == 3:\n language = el[2]\n topic = UseCase.objects.get(name = el[0])\n doc = Report.objects.get(id_report =str(el[1]),language = 'english')\n\n TopicHasDocument.objects.get_or_create(name = topic,language = doc.language,id_report =doc)\n\n\n if len(labels) > 0:\n labs = []\n error_location = 'Labels'\n for label_file in labels:\n if label_file.name.endswith('csv'):\n df_labels = pd.read_csv(label_file)\n df_labels = df_labels.where(pd.notnull(df_labels), None)\n df_labels = df_labels.reset_index(drop=True)\n # df_labels['usecase'] = df_labels['usecase'].str.lower()\n count_lab_rows = df_labels.shape[0]\n for i in range(count_lab_rows):\n label = str(df_labels.loc[i, 'label'])\n labs.append(label.rstrip())\n elif label_file.name.endswith('json'):\n d = json.load(label_file)\n labels = d['labels']\n for label in labels:\n labs.append(label.rstrip())\n elif label_file.name.endswith('txt'):\n lines = label_file.readlines()\n for line in lines:\n line = line.decode('utf-8')\n labs.append(line.replace('\\n',''))\n\n\n for label in labs:\n cursor.execute('SELECT * FROM annotation_label')\n ans = cursor.fetchall()\n if len(ans) == 0:\n seq_number = 1\n else:\n cursor.execute('SELECT seq_number FROM annotation_label ORDER BY seq_number DESC;')\n ans = cursor.fetchall()\n seq_number = int(ans[0][0]) + 1\n\n cursor.execute(\"SELECT * FROM annotation_label WHERE label = %s;\",\n (str(label),))\n ans = cursor.fetchall()\n if len(ans) == 0:\n cursor.execute(\"INSERT INTO annotation_label (label,seq_number) VALUES (%s,%s);\",\n (str(label), int(seq_number)))\n\n # Popolate the concepts table\n error_location = 'Concepts'\n # if load_concepts is not None and load_concepts != '' and load_concepts !=[] and len(concepts) == 0:\n # configure_concepts(cursor,load_concepts,'admin')\n\n for concept_file in concepts:\n if concept_file.name.endswith('csv'):\n df_concept = pd.read_csv(concept_file)\n df_concept = df_concept.where(pd.notnull(df_concept), None)\n df_concept = df_concept.reset_index(drop=True)\n # df_concept['usecase'] = df_concept['usecase'].str.lower()\n\n # print(df_concept)\n count_conc_rows = df_concept.shape[0]\n\n for i in range(count_conc_rows):\n df_concept = df_concept.where(pd.notnull(df_concept), None)\n concept_url = str(df_concept.loc[i, 'concept_url'])\n concept_name = str(df_concept.loc[i, 'concept_name'])\n # usecase = str(df_concept.loc[i, 'usecase'])\n # semantic_area = str(df_concept.loc[i, 'area'])\n\n cursor.execute(\"SELECT concept_url,json_concept FROM concept WHERE concept_url = %s;\",\n (str(concept_url),))\n ans = cursor.fetchall()\n if len(ans) == 0:\n # json_concept = json.dumps({'provenance': 'admin', 'insertion_author': 'admin'})\n cursor.execute(\"INSERT INTO concept (concept_url,name) VALUES (%s,%s);\",\n (str(concept_url), str(concept_name)))\n\n cursor.execute(\"SELECT * FROM belong_to WHERE concept_url = %s AND name=%s;\",\n (str(concept_url), 'default_area'))\n ans = cursor.fetchall()\n if len(ans) == 0:\n cursor.execute(\"INSERT INTO belong_to (concept_url,name) VALUES (%s,%s);\",\n (str(concept_url), 'default_area'))\n elif concept_file.name.endswith('json'):\n\n d = json.load(concept_file)\n count_conc_rows = len(d['concepts_list'])\n for i in range(count_conc_rows):\n concept_url = str(d['concepts_list'][i]['concept_url'])\n concept_name = str(d['concepts_list'][i]['concept_name'])\n # usecase = str(df_concept.loc[i, 'usecase'])\n # semantic_area = str(df_concept.loc[i, 'area'])\n\n cursor.execute(\"SELECT concept_url,json_concept FROM concept WHERE concept_url = %s;\",\n (str(concept_url),))\n ans = cursor.fetchall()\n if len(ans) == 0:\n # json_concept = json.dumps({'provenance': 'admin', 'insertion_author': 'admin'})\n cursor.execute(\"INSERT INTO concept (concept_url,name) VALUES (%s,%s);\",\n (str(concept_url), str(concept_name)))\n\n cursor.execute(\"SELECT * FROM belong_to WHERE concept_url = %s AND name=%s;\",\n (str(concept_url), 'default_area'))\n ans = cursor.fetchall()\n if len(ans) == 0:\n cursor.execute(\"INSERT INTO belong_to (concept_url,name) VALUES (%s,%s);\",\n (str(concept_url), 'default_area'))\n data = {}\n data['fields'] = fields\n data['fields_to_ann'] = fields_to_ann\n data['all_fields'] = all_fields\n version = get_version()\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n version_new = int(version) + 1\n filename = 'fields' + str(version_new)\n created_file = False\n with open(os.path.join(workpath, './config_files/data/' + filename + '.json'), 'w') as outfile:\n json.dump(data, outfile)\n created_file = True\n\n except (Exception, psycopg2.Error) as e:\n print(e)\n print('rollback')\n # connection.rollback()\n if created_file == True:\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n if filename != '' and filename != 'fields0':\n path = os.path.join(workpath, './config_files/data/' + filename + '.json')\n os.remove(path)\n\n json_resp = {'error': 'an error occurred in: ' + error_location + '.'}\n return json_resp\n else:\n # connection.commit()\n if created_file == True:\n for filen in os.listdir(os.path.join(workpath, './config_files/data')):\n if filen.endswith('json'):\n if filen != '' and filen != 'fields0.json' and filen != filename+'.json':\n path = os.path.join(workpath, './config_files/data/' + filen )\n os.remove(path)\n outfile.close()\n\n if tfidf is not None or (len(runs) > 0 and len(topics) > 0 and (len(reports) > 0) or len(pubmedfiles) > 0):\n print(str(tfidf))\n cursor = connection.cursor()\n cursor.execute('SELECT DISTINCT language FROM report')\n ans = cursor.fetchall()\n languages = []\n for el in ans:\n languages.append(el[0])\n st = time.time()\n if int(tfidf) > 0:\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n path1 = os.path.join(workpath, './config_files/config.json')\n g = open(path1,'r')\n data = json.load(g)\n data['TF-IDF_k'] = tfidf\n with open(path1, 'w') as f:\n json.dump(data, f)\n\n t = UseCase.objects.all()\n cursor = connection.cursor()\n\n json_to_write = {}\n for top in t:\n json_to_write[top.name] = {}\n topic = {}\n corpus = []\n cursor.execute(\n \"SELECT r.id_report,r.language,r.report_json FROM report as r inner join topic_has_document as t on t.id_report = r.id_report and r.language = t.language where t.name = %s\",\n [str(top.name)])\n ans = cursor.fetchall()\n for el in ans:\n e = json.loads(el[2])\n r_j1 = {}\n\n r_j1['document_id'] = str(el[0])\n r_j1['text'] = ''\n for k in e.keys():\n if k != 'document_id' or (str(el[0]).startswith('PUBMED_') and (k == 'abstract' or k == 'title')):\n r_j1['text'] = r_j1['text'] + ' ' + str(e[k])\n if el[1].lower() in LANGUAGES_NLTK:\n corpus.append(r_j1)\n if top.title is not None:\n topic['title'] = top.title\n if top.description is not None:\n topic['description'] = top.description\n # df_tfidf = gen_tfidf_map(corpus,language)\n if topic != {}:\n for el in ans:\n if el[1].lower() in LANGUAGES_NLTK:\n language = el[1].lower()\n start = time.time()\n print('working on ', str(el[0]))\n e = json.loads(el[2])\n r_j1 = {}\n r_j1['document_id'] = str(el[0])\n r_j1['text'] = ''\n for k in e.keys():\n # print(k)\n # print(e[k])\n if isinstance(e[k],list):\n e[k] = ', '.join(e[k])\n if k != 'document_id' and k != 'language' and e[k] is not None:\n r_j1['text'] = r_j1['text'] + ' ' + e[k]\n\n tfidf_matcher = QueryDocMatcher(topic=topic,doc= r_j1, corpus=corpus,language=language)\n top_k_matching_words = tfidf_matcher.get_words_to_highlight()\n\n # print(top_k_matching_words)\n\n # json_val = {}\n # json_val[str(el[0])] = top_k_matching_words\n # json_val['words'] = top_k_matching_words\n json_to_write[top.name][str(el[0])] = top_k_matching_words\n # print(json_to_write)\n end = time.time()\n print('elaborated in '+str(end-start)+' seconds')\n else:\n json_to_write = {}\n end = time.time()\n print('time',end-st)\n path2 = os.path.join(workpath, './config_files/tf_idf_map.json')\n with open(path2, 'w') as f:\n json.dump(json_to_write, f)\n\n json_resp = {'message': 'Ok'}\n return json_resp", "title": "" }, { "docid": "52571279dd599d1d4399517f6cc43ba1", "score": "0.50417817", "text": "def read_tables(engine, table_names=[\"FS_LIST_MONTHLY\",\"FS_CAL_MONTHLY\"\n ,\"FS_HOST_MONTHLY\",'FS_REVIEW_MONTHLY'\n ,\"FS_BOOKED_MONTHLY\",\"FS_LOCATION_MONTHLY\"\n ,\"FS_TIME_MONTHLY\",\"FS_PRICE_MONTHLY\"], date_start = None, date_end = None):\n # initialize\n df_all = pd.DataFrame(columns=['ID','YEAR_MONTH'])\n\n for table in table_names:\n df = read_table(engine, table, date_start, date_end)\n df_all = df_all.merge(df, on = ['ID','YEAR_MONTH'],how='outer')\n\n return df_all", "title": "" }, { "docid": "2db67858b35ba2d200bcd487fe1be50e", "score": "0.50391483", "text": "def TripleDataset(cam_names=CAM_NAMES, scene_ids=None):\n idces_offset_with_label = [\n ([0, 1, 2], 1), # positive_1\n ([0, 3, 2], 0),\n ([1, 0, 3], 0),\n ]\n# idces_offset_with_label = [\n# ([0, 1], 1), # forward or backward\n# ([1, 0], 0),\n# ]\n my_datasets = []\n for cam_name in cam_names: # maybe you want just one CAM_NAME\n for scene_id in scene_ids:\n for idces_offset, label in idces_offset_with_label:\n cur_dataset = _HelperForTripleDataset(cam_name, scene_id, idces_offset, label)\n my_datasets.append(cur_dataset)\n\n super_dataset = torch.utils.data.ConcatDataset(my_datasets)\n \n return super_dataset", "title": "" }, { "docid": "e29f2bca55cc237817b632076c09cce1", "score": "0.50347817", "text": "def load_data(self):\n \n table_list = [self.train_table, self.test_table]\n \n if 'is_student_relevant' not in self.feature_list:\n self.feature_list.append('is_student_relevant')\n\n for table in table_list:\n if not self._does_table_exist_in_db(table):\n print(\"creating feature table named\", table)\n sql_query = '''create table training.{} as (select person_id from training.mapping)'''.format(table)\n self.conn.execute(sql_query)\n\n for feature in flatten(self.feature_list): \n if not self._does_feature_exist_in_db(feature, table):\n fn = getattr(feature_generator, feature)\n fn1 = fn(table, self.conn)\n fn1.run()\n else: \n print('feature {col_name} already exists in table {table_name}'.format(col_name = feature, table_name = table))\n\n print(\"loading data now!\")\n\n sql_query_test = \"select person_id, {feature_list} from training.{table};\".format(\n feature_list=','.join(flatten(self.feature_list)), table=self.test_table)\n sql_query_train = \"select person_id, {feature_list} from training.{table};\".format(\n feature_list=','.join(flatten(self.feature_list)), table=self.train_table)\n \n features_df_test = pd.read_sql(sql_query_test, self.conn)\n features_df_train = pd.read_sql(sql_query_train, self.conn)\n \n sql_query = '''SELECT feature_name FROM training.feature_dictionary \n where feature_type = 'categorical' ''' \n \n categorial_list = pd.read_sql(sql_query, self.conn)\n features_categorial_list = categorial_list.values.tolist()\n features_categorial_list = list(itertools.chain.from_iterable(features_categorial_list))\n print ('feature list provided,' , flatten(self.feature_list))\n print ('feature list in dictionary', features_categorial_list)\n final_list = list(set(features_categorial_list).intersection(flatten(self.feature_list)))\n\n print (final_list)\n features_df_test['is_train'] = 0\n features_df_train['is_train'] = 1\n all_features = pd.concat([features_df_test, features_df_train])\n\n if self.normalize:\n print(\"Normalizing features to [0, 1]...\")\n for feature in all_features.columns:\n if not (feature in ('person_id', 'is_train') or feature in final_list):\n max_val = all_features[feature].max()\n min_val = all_features[feature].min()\n the_range = max_val - min_val + 1e-4\n all_features[feature] = (all_features[feature] - min_val) / (the_range)\n\n all_features = pp.get_dummies(all_features, columns=final_list)\n features_big_table_test = all_features[all_features['is_train'] == 0]\n features_big_table_train = all_features[all_features['is_train'] == 1]\n\n return features_big_table_train, features_big_table_test", "title": "" }, { "docid": "083be1e535b4cca8d1f1ab031f888b88", "score": "0.5033646", "text": "def load_dataset(name, dir):\n print(\"Loading \" + name + \" dataset ...\")\n x = pd.read_table(dir + name + \"_norm_counts_all.txt\", index_col=0)\n y = pd.read_table(dir + name + \"_celltypes.txt\")\n return (x, y)", "title": "" }, { "docid": "6c014c662e63e77acfb385c31610eaac", "score": "0.5028327", "text": "def load_dataset(set):\n return set.data, set.target", "title": "" }, { "docid": "8293fc6a6615797c6fe7bf5366fb089e", "score": "0.5027777", "text": "def fixture_table_metadata(testdir):\n return Metadata(dataset_id=DATASET_ID, table_id=TABLE_ID, metadata_path=testdir)", "title": "" }, { "docid": "5759a3d45279819dd1e92dde8bfedf33", "score": "0.50239575", "text": "def extractTablesData(tables):\n return [extractTableData(table) for table in tables]", "title": "" }, { "docid": "671e6684b704542aab8cfe49264be600", "score": "0.5022386", "text": "def ingest_contents(self, listfullnames, **kwargs):\n\n assert isinstance(listfullnames, list)\n\n for fname in listfullnames:\n miscutils.fwdebug_print(\"********************* %s\" % fname)\n numrows = dfiutils.datafile_ingest_main(self.dbh, self.filetype, fname,\n self.tablename, self.didatadefs)\n if numrows in [None, 0]:\n miscutils.fwdebug_print(f\"WARN: 0 rows ingested from {fname} for table {self.tablename}\")\n elif miscutils.fwdebug_check(1, 'FTMGMT_DEBUG'):\n miscutils.fwdebug_print(f\"INFO: {numrows} rows ingested from {fname} for table {self.tablename}\")\n\n numrows = dfiutils.datafile_ingest_main(self.dbh, self.filetype2, fname,\n self.tablename2, self.didatadefs2)\n if numrows in [None, 0]:\n miscutils.fwdebug_print(f\"WARN: 0 rows ingested from {fname} for table {self.tablename2}\")\n elif miscutils.fwdebug_check(1, 'FTMGMT_DEBUG'):\n miscutils.fwdebug_print(f\"INFO: {numrows} rows ingested from {fname} for table {self.tablename2}\")", "title": "" }, { "docid": "04f454e69681fe30c1af14671f34f136", "score": "0.5017355", "text": "def parseDataset():\n\twith open(dataset) as datasetFileHandler:\n\t\tdatarows = datasetFileHandler.readlines()\n\t\tfor datarow in datarows:\n\t\t\tdataframe = json.loads(datarow)\n\t\t\toutputDict = getBaselineDictionary()\n\n\t\t\t# cleanse DF as per reqs\n\t\t\toutputDict[\"user_id\"] = dataframe[\"user_id\"]\n\t\t\toutputDict[\"name\"] = dataframe[\"name\"]\n\t\t\toutputDict[\"review_count\"] = dataframe[\"review_count\"]\n\t\t\toutputDict[\"friends\"] = dataframe[\"friends\"]\n\n\t\t\toutputDataFramesList.append(outputDict)\n\n\t\t\t# control limit for testing, uncomment to test code\n\t\t\t# if(len(outputDataFramesList) == 10):\n\t\t\t# \tbreak", "title": "" }, { "docid": "dc5598719af35e4149014f71619e0937", "score": "0.5010695", "text": "def _get_existing_datasets(self):\n self.h5_new_spec_vals = self.h5_results_grp['Spectroscopic_Values']\n self.h5_cap = self.h5_results_grp['Capacitance']\n self.h5_variance = self.h5_results_grp['R_variance']\n self.h5_resistance = self.h5_results_grp['Resistance']\n self.h5_i_corrected = self.h5_results_grp['Corrected_Current']", "title": "" }, { "docid": "778ea8ca2d3fa92b7d09a53fa1d2cf62", "score": "0.50006837", "text": "def load_multi_dataset(data_dirs_pair: list):\n # Change the directory of image files in the log files.\n data_dirs = list(map(lambda x: os.path.join(parent_data_folder, x[0]), data_dirs_pair))\n\n all_df = []\n for i, ddir in enumerate(data_dirs):\n df = update_df(ddir)\n # Eliminate low steering angle data if data_dirs_pair[i][2] is True\n if data_dirs_pair[i][2]:\n df = df.loc[np.abs(df[\"steering\"]) > 0.01, :]\n for k in range(data_dirs_pair[i][1]):\n all_df.append(df)\n\n all_df = pd.concat(all_df)\n\n return all_df", "title": "" }, { "docid": "17940bade254a9678e21d0f094ba7483", "score": "0.4991661", "text": "def init_tables_for_all_assets():\n markets = get_all_market_pairs()\n\n for pair in markets:\n create_table_for_pair(pair)\n print(\"Created order book tables for {} pair\".format(pair))", "title": "" }, { "docid": "2c8bcdd6708fec0cf80acaba8ed3da92", "score": "0.49912554", "text": "def _create_sets(\n tweets, train_dev_labels, test_labels, data_frac, train_dev_frac):\n\n # to allow for merge, need the same type\n tweets[COL_ID] = tweets[COL_ID].astype(np.int64)\n\n # Merge by ID\n train_dev_data = pd.merge(tweets, train_dev_labels, on=COL_ID)\n test_set = pd.merge(tweets, test_labels, on=COL_ID)\n\n # take (train_dev_frac * 100) % of the traindevdata\n train_set = train_dev_data.sample(frac=train_dev_frac, random_state=0)\n # take % that remain\n dev_set = train_dev_data.drop(train_set.index)\n\n take_part_of_df = lambda df: np.split(df, [int(data_frac*len(df))])\n train_set, _ = take_part_of_df(train_set)\n dev_set, _ = take_part_of_df(dev_set)\n\n # drop the ID columns, not needed anymore\n train = train_set.drop(COL_ID, axis=1)\n dev = dev_set.drop(COL_ID, axis=1)\n test = test_set.drop(COL_ID, axis=1)\n return train, dev, test", "title": "" }, { "docid": "ee63c0be1d4a6f0479c206cc75463131", "score": "0.49875638", "text": "def loadData(catalog):\n loadArtists(catalog)\n loadArtworks(catalog)", "title": "" }, { "docid": "96a030bb281de21745bba079b7914ed3", "score": "0.4985496", "text": "def __multitable_definition(self):\n\n res_tables = datasets.dataset_definition(connection=self._connection,\n dataset_id=self._cube_id,\n fields=['tables', 'columns'])\n _ds_definition = res_tables.json()\n\n for table in _ds_definition['result']['definition']['availableObjects']['tables']:\n column_list = [column['columnName']\n for column in _ds_definition['result']['definition']['availableObjects']['columns']\n if table['name'] == column['tableName']]\n self._table_definition[table['name']] = column_list", "title": "" }, { "docid": "bf84d4228cdf43511f111ae6419caae5", "score": "0.4980978", "text": "def load_demo(dataset_name=None, data_path=DATA_PATH, metadata=False):\n if dataset_name:\n meta, tables = _load_demo_dataset(dataset_name, data_path)\n else:\n meta, tables = _load_relational_dummy()\n\n if metadata:\n return meta, tables\n\n return tables", "title": "" }, { "docid": "c577ba42f2a69a47c456d4020595a550", "score": "0.49802992", "text": "def read_datasets():\n genuine_users = pd.read_csv(\"F:\\Git\\Fake-Profile-Detection-using-ML\\data/users.csv\")\n fake_users = pd.read_csv(\"F:\\Git\\Fake-Profile-Detection-using-ML\\data/fusers.csv\")\n # print genuine_users.columns\n # print genuine_users.describe()\n #print fake_users.describe()\n x=pd.concat([genuine_users,fake_users]) \n y=len(fake_users)*[0] + len(genuine_users)*[1]\n return x,y", "title": "" }, { "docid": "e8e3d5954de20748335b857d84ef4e57", "score": "0.4978883", "text": "async def _generate_source_tables_metadata(self) -> List[Dict[str, Any]]:\n\n self._logger.info('Generating Hasura metadata based on project models')\n views = await self._get_views()\n\n metadata_tables = {}\n model_tables = {}\n\n for app, model in iter_models(self._package):\n table_name = model._meta.db_table or pascal_to_snake(model.__name__)\n model_tables[f'{app}.{model.__name__}'] = table_name\n metadata_tables[table_name] = self._format_table(table_name)\n\n for view in views:\n metadata_tables[view] = self._format_table(view)\n\n for app, model in iter_models(self._package):\n table_name = model_tables[f'{app}.{model.__name__}']\n\n for field in model._meta.fields_map.values():\n if isinstance(field, fields.relational.ForeignKeyFieldInstance):\n if not isinstance(field.related_name, str):\n raise HasuraError(f'`related_name` of `{field}` must be set')\n related_table_name = model_tables[field.model_name]\n metadata_tables[table_name]['object_relationships'].append(\n self._format_object_relationship(\n name=field.model_field_name,\n column=field.model_field_name + '_id',\n )\n )\n metadata_tables[related_table_name]['array_relationships'].append(\n self._format_array_relationship(\n related_name=field.related_name,\n table=table_name,\n column=field.model_field_name + '_id',\n )\n )\n\n return list(metadata_tables.values())", "title": "" }, { "docid": "ae73c2d5edaa23a5af580fda6f65b2ac", "score": "0.4976056", "text": "def copy_metadata(self, ds):\n for key, value in ds.GetMetadata().items():\n self._ds.SetMetadataItem(key, value)", "title": "" }, { "docid": "fa4639c1fe80b5975ec4a717821ad864", "score": "0.49721435", "text": "def fetch_all(self, tags=\"catalog\"):\n for dataset in self.datasets.values():\n if not dataset.tags:\n continue\n if set(dataset.tags) & set(tags):\n dataset.fetch()", "title": "" }, { "docid": "f3ca84c968e4ff08fceaba619ab842ec", "score": "0.4971977", "text": "def read_data(kitti_tracking_dir, annotation_files):\n dataset = []\n for train_val,seq_number,frame_id,ground_truth in annotation_files:\n #To-do: make it generic, reading from both from training / testing tracking dataset folders\n basedir = os.path.join(kitti_tracking_dir,'training')\n if not (os.path.isdir(basedir)):\n continue\n loaded = pykitti.tracking(basedir,seq_number)\n if not loaded.velo_files:\n print(\"cannot be loaded\",basedir,seq_number)\n continue\n image = loaded.get_cam2(int(frame_id) )\n image = np.array(image)\n velo_data = loaded.get_velo(int(frame_id))\n if velo_data.size == 0:\n print(\"data could be get\",seq_number,frame_id)\n continue\n dataset.append({\n 'folder_type': train_val,\n 'seq_number': seq_number,\n 'frame_id': frame_id,\n 'ground_truth': ground_truth,\n 'velo_data': velo_data,\n 'image': image\n })\n return dataset", "title": "" }, { "docid": "e8d7fbec26033dae9f54ed58d1211c07", "score": "0.49705693", "text": "def make_all_tables():\n\n # read configs\n with open(r\"backend/pull-public-data/configs.yaml\") as file:\n configs = yaml.load(file, Loader=yaml.FullLoader)\n xwalk = pd.read_csv(configs[\"resources\"][\"varname_crosswalk\"])\n\n # make data tables\n data_sources = list(configs[\"sources\"].keys())\n data_list = [\n make_tables_data_source(x, xwalk, configs) for x in data_sources\n ]\n table_names = list(configs[\"tables\"].keys())\n table_list = [pd.concat([d[t] for d in data_list]) for t in table_names]\n table_dict = {table_names[i]: table_list[i] for i in range(len(table_list))}\n\n return table_dict", "title": "" }, { "docid": "0456300cc616a92e28e25fbc5e4452d1", "score": "0.49683225", "text": "def load_conjoint_synthetic(dims):\n if isinstance(dims, int):\n dims = tuple([dims])\n\n datasets = []\n for dim in dims:\n primary_dim = dim // 2\n secondary_dim = dim - primary_dim\n primary_components = np.repeat(np.eye(primary_dim), secondary_dim, axis=0)\n secondary_components = np.tile(np.eye(secondary_dim), (primary_dim, 1))\n data = np.concatenate((primary_components, secondary_components), axis=1)\n primary_labels = np.repeat(np.arange(primary_dim), primary_dim, axis=0)\n secondary_labels = np.tile(np.arange(secondary_dim), secondary_dim)\n datasets.append({'data': data, 'target': primary_labels, 'tag': secondary_labels})\n\n if len(datasets) == 1:\n # unlist the result\n return datasets[0]\n return tuple(datasets)", "title": "" }, { "docid": "46d8965932df9aee7976330747b92c25", "score": "0.49662623", "text": "def register_tao_instances(name, metadata, json_file, image_root):\n DatasetCatalog.register(name, lambda: load_tao_json(json_file, image_root, name))\n MetadataCatalog.get(name).set(\n json_file=json_file, image_root=image_root, evaluator_type=\"mot\", **metadata\n )", "title": "" }, { "docid": "f4d7e912ea27433a15de1f883bc64822", "score": "0.49620882", "text": "def build():\n fpaths, names = get_paths()\n result = read(fpaths)\n meta_path = dialogs.standard('askopenfilename', title='Select Metadata', \n initialdir=paths.DATA_DIR)\n meta = metadata.from_csv(meta_path, 0, 'Animal ID', *['Treatment'], \n delimiter='\\t')\n #Esnure dset['names'] & dset['metadata'].keys() match so that subsequent data \n #extraction can be done using spectrum.dataset.extract()\n for ix, name in enumerate(names):\n keys = list(meta.keys())\n idx = [i for i, key in enumerate(keys) if name in key][0]\n names[ix] = keys[idx]\n \n dset = {'data': result, 'fpaths': fpaths, 'names': names, 'metadata': meta,\n 'axes': ['names', 'epochs', 'score']}\n return dset", "title": "" }, { "docid": "87ef75375f029328e7941747198ad6a5", "score": "0.4960286", "text": "def loadDataInstagram(inputfiles, dataInstagramURL):\n dataInstagram = dict()\n for filename in inputfiles:\n alias = filename.split('/')[-1]\n print BLUE, 'Load:', alias, RESET\n f = open(filename, 'r')\n nlines = 0\n for i in f:\n nlines += 1\n f.seek(0)\n reader = csv.reader(f)\n for data in tqdm(reader, total=nlines, disable=True):\n try:\n urlPlace, placeName = dataInstagramURL[data[0]]\n except KeyError:\n # KeyError in case of sample not available or not resolved\n continue\n except ValueError:\n # ValueError in case of duplicated sample lines\n continue\n try:\n dataInstagram[data[0]] = (data[6], urlPlace, placeName, data[1])\n # (dateTime, urlPlace, placeName, idUser)\n except IndexError:\n print RED, 'CORRUPTED LINE!', RESET\n print data\n return\n print GREEN, 'Done!', RESET\n return dataInstagram", "title": "" }, { "docid": "dc5657bc67a69846bcfd7a1b8ef8edbf", "score": "0.49590766", "text": "def load_original_dataset():\n\n # load data from financial.db\n conn = sql.connect('financial.db')\n c = conn.cursor()\n\n # Convert SQL tables to pandas dataframes to explore the data\n query_account = \"SELECT * FROM account\"\n query_client = \"SELECT * FROM client\"\n query_disposition = \"SELECT * FROM disp\"\n query_order = \"SELECT * FROM [order]\"\n query_trans = \"SELECT * FROM trans\"\n query_loan = \"SELECT * FROM loan\"\n query_card = \"SELECT * FROM card\"\n query_district = \"SELECT * FROM district\"\n\n account = pd.read_sql_query(query_account, conn)\n card = pd.read_sql_query(query_card, conn)\n client = pd.read_sql_query(query_client, conn)\n disp = pd.read_sql_query(query_disposition, conn)\n district = pd.read_sql_query(query_district, conn)\n loan = pd.read_sql_query(query_loan, conn)\n order = pd.read_sql_query(query_order, conn)\n trans = pd.read_sql_query(query_trans, conn)\n\n return account, card, client, disp, district, loan, order, trans", "title": "" }, { "docid": "f05482fab9baa1a6d609ddd2e957b72f", "score": "0.49542627", "text": "def from_csv_to_database(table_name):\n for year, path in datasets.items():\n # load csv files\n with open(path, encoding='cp1251') as dataset:\n datasets[year] = csv.DictReader(dataset, delimiter=';')\n print(f\"Year {year} is loading\")\n # database.copy_dataframe(datasets[year], year, size=32768, table_name=table_name)\n database.exec_values(datasets[year], year, size=32768, table_name=table_name)", "title": "" }, { "docid": "f57d67dfc847272ea6f229e586b40205", "score": "0.49498722", "text": "def _dataset_from_files(dataset: Dataset) -> list[Dataset]:\n result: list[Dataset] = []\n errors = []\n\n if any(_isglob(f) for f in dataset.facets.values()):\n logger.debug(\n \"Expanding dataset globs for dataset %s, \"\n \"this may take a while..\", dataset.summary(shorten=True))\n\n repr_dataset = _representative_dataset(dataset)\n for repr_ds in repr_dataset.from_files():\n updated_facets = {}\n failed = {}\n for key, value in dataset.facets.items():\n if _isglob(value):\n if key in repr_ds.facets and not _isglob(repr_ds[key]):\n updated_facets[key] = repr_ds.facets[key]\n else:\n failed[key] = value\n\n if failed:\n msg = (\"Unable to replace \" +\n \", \".join(f\"{k}={v}\" for k, v in failed.items()) +\n f\" by a value for\\n{dataset}\")\n # Set supplementaries to [] to avoid searching for supplementary\n # files.\n repr_ds.supplementaries = []\n if repr_ds.files:\n paths_msg = \"paths to \" if any(\n isinstance(f, LocalFile) for f in repr_ds.files) else \"\"\n msg = (f\"{msg}\\nDo the {paths_msg}the files:\\n\" +\n \"\\n\".join(f\"{f} with facets: {f.facets}\"\n for f in repr_ds.files) +\n \"\\nprovide the missing facet values?\")\n else:\n timerange = repr_ds.facets.get('timerange')\n patterns = repr_ds._file_globs\n msg = (\n f\"{msg}\\nNo files found matching:\\n\" +\n \"\\n\".join(str(p) for p in patterns) + # type:ignore\n (f\"\\nwithin the requested timerange {timerange}.\"\n if timerange else \"\"))\n errors.append(msg)\n continue\n\n new_ds = dataset.copy()\n new_ds.facets.update(updated_facets)\n new_ds.supplementaries = repr_ds.supplementaries\n result.append(new_ds)\n\n if errors:\n raise RecipeError(\"\\n\".join(errors))\n\n return result", "title": "" }, { "docid": "98a2430f3415921aca5c94ab7b4d1825", "score": "0.49473467", "text": "def insert_question_summaries(self, data):\n date = str(datetime.datetime.now())\n\n for row in data:\n tags = row.tags\n articles = row.articles\n\n self.upsert(row, \"QuestionSummary\", date, ['tags', 'articles'], ['ref', 'question'])\n self.insert_articles(articles)\n\n qs_id = self.check(\"QuestionSummary\", row)\n\n for tag in tags:\n tag_id = self.check(\"Tag\", tag, ['name'])\n ass_tag = AssociatedTag(tag_id, qs_id)\n\n self.upsert(ass_tag, \"AssociatedTag\")\n\n for article in articles:\n article_id = self.check(\"Article\", article, ['id'])\n print(f'Article ID: {article_id}')\n ass_tag = AssociatedArticle(article_id, qs_id)\n \n self.upsert(ass_tag, \"AssociatedArticle\")", "title": "" }, { "docid": "265fd2a8bb81a998c552333ffc8d411f", "score": "0.4941762", "text": "def test_merge_metadata_reorder(self):\n \n metadata=[[\"# samples\",\"s1\",\"s2\"],[\"feature1\",\"A\",\"B\"],[\"feature2\",1,2]]\n samples=[\"s2\",\"s1\"]\n values=[[\"bug1\",1,2],[\"bug2\",2,4]]\n \n merged, new_samples=utilities.merge_metadata(metadata, samples, values)\n \n expected=[[\"feature1\",\"A\",\"B\"],[\"feature2\",1,2],[\"bug1\",2,1],[\"bug2\",4,2]]\n expected_samples=[\"s1\",\"s2\"]\n \n self.assertEqual(merged, expected) \n self.assertEqual(new_samples, expected_samples)", "title": "" }, { "docid": "2f7f981ef7922df5d9843654f425c276", "score": "0.49373838", "text": "def get_trivago_datasets(columns, percentage=1, seed=1,\n uitems_min=2, lt_drop=0, time=0):\n debug_print(\"Loading trivago datasets\", level=0)\n columns = set(columns + COLUMNS)\n # Compute names, must be sorted, because sets are kinda random\n file_name = str(hash_params(sorted(columns, reverse=True), percentage, \n seed, uitems_min, lt_drop, time))\n # Check for existence\n os.makedirs(_script_relative(CACHE_FOLDER), exist_ok=True)\n dataset_path = _script_relative(os.path.join(CACHE_FOLDER, file_name))\n\n debug_print(\"Trying {}\".format(dataset_path), level=2)\n # Check cached\n if not os.path.exists(dataset_path):\n debug_print(\"Not found\", level=2)\n # Create dataset\n train_path = _script_relative(REL_TRAIN_PATH)\n test_path = _script_relative(REL_TEST_PATH)\n\n __pandas_modify_datasets(train_path, test_path)\n\n train = __pandas_get_dataset(train_path)\n debug_print(\"Train shape before {}\".format(train.shape))\n\n train = __pandas_strip_columns(train, columns | {ACTION_TYPE})\n train = __pandas_trivago_invalid_rows(train)\n train = __pandas_strip_columns(train, columns)\n\n train = __pandas_trivago_drop_unique(train, USER_ID, percentage=percentage)\n train = __pandas_drop_top(train, USER_ID, percentage=lt_drop, min_items=uitems_min)\n train = __pandas_drop_time(train, time=time)\n debug_print(\"Train shape after {}\".format(train.shape))\n\n test = __pandas_get_dataset(test_path)\n test = __pandas_strip_columns(test, columns | {ACTION_TYPE})\n test = __pandas_trivago_invalid_rows(test)\n test = __pandas_strip_columns(test, columns)\n debug_print(\"Dropping non train {}\".format(test.shape), level=2)\n test = test[test[USER_ID].isin(train[USER_ID].unique())]\n debug_print(\"After non train {}\".format(test.shape), level=2)\n # __pandas_reindex_values(train, test, column=USER_ID)\n # __pandas_reindex_values(train, test, column=REFERENCE)\n\n # Save dataset\n debug_print(\"Saving dataset {}\".format(dataset_path))\n with open(dataset_path, \"wb\") as f:\n Pickler(f).dump((train, test))\n else: # Load dataset\n with open(dataset_path, \"rb\") as f:\n debug_print(\"Found\", level=2)\n train, test = Unpickler(f).load()\n print(len(set(test[USER_ID])))\n print(len(set(train[USER_ID]) & set(test[USER_ID])))\n __pandas_trivago_plot_density(train, USER_ID)\n return __pandas_to_coo(train, test)", "title": "" } ]
c28aee430da2c76ae2914d6aeca5a13d
Gets the SharesCollectionPage in async
[ { "docid": "0e1bf656fcc455cff94a91681137bf7c", "score": "0.63819665", "text": "def get_async(self):\n future = self._client._loop.run_in_executor(None,\n self.get)\n collection_page = yield from future\n return collection_page", "title": "" } ]
[ { "docid": "47a58e4fad06da48e8ef633f63ef95ff", "score": "0.73378843", "text": "def collection_page(self):\n if self._collection_page:\n self._collection_page._prop_list = self._prop_dict[\"value\"]\n else:\n self._collection_page = SharesCollectionPage(self._prop_dict[\"value\"])\n\n return self._collection_page", "title": "" }, { "docid": "92bdae1d1bcca46b2d3e1615921788c1", "score": "0.6724456", "text": "def get_async(self):\n collection_page = yield from self.request().get_async()\n return collection_page", "title": "" }, { "docid": "11417927cfb67cc100e41736334bfaa7", "score": "0.6246151", "text": "def get_next_page_request(collection_page, client, options=None):\n if collection_page._next_page_link:\n return SharesCollectionRequest(collection_page._next_page_link, client, options)\n else:\n return None", "title": "" }, { "docid": "16638a4bc95c4b98e3c92899cc94bf88", "score": "0.5372887", "text": "def general_scrapp(self) -> list:\n resulted_listings = []\n domria_url = self._source_url\n\n try:\n page_soup = BaseScrapper.create_soup_obj(domria_url)\n except TypeError:\n print(\"Incorrect url for creating bs object.\")\n # TODO: retry logic here...\n\n for _ in range(MAXIMUM_AMOUNT_OF_PAGES_FOR_SCRAPPING):\n catalog = page_soup.find_all(\"section\", attrs={\"class\": re.compile(f\"{self.catalog_class}.+$\")})\n\n for listing in catalog:\n try:\n listing_info = self.scrap_single_listing(listing)\n resulted_listings.append(listing_info)\n except ValueError as err:\n print(err)\n continue\n except KeyError as err:\n print(\"got wrong keys: \", err)\n\n domria_url = self.paginate_page(domria_url)\n page_soup = BaseScrapper.create_soup_obj(domria_url)\n\n print(\"No more pages left.\")\n return resulted_listings", "title": "" }, { "docid": "6c66f388488b7b92b2fad553f91b5960", "score": "0.5342777", "text": "def get_collection(collection):\n cls = endpoint_class(collection)\n\n resources = retrieve_collection(collection, request.args)\n\n _validate(cls, request.method, resources)\n\n start = stop = None\n\n if request.args and 'page' in request.args:\n page = int(request.args['page'])\n results_per_page = app.config.get('RESULTS_PER_PAGE', 20)\n start, stop = page * results_per_page, (page + 1) * results_per_page\n return collection_response(cls, resources, start, stop)", "title": "" }, { "docid": "098b24799e74e813ec4840b190914ba3", "score": "0.5321038", "text": "def get_collection_data(collection_url):\n # Setting up Selenium driver\n driver = webdriver.Firefox()\n driver.get(collection_url)\n\n # Extracting collection metadata\n collection_id = collection_url.split('/')[-1]\n collection_title = driver.find_element(By.CSS_SELECTOR, 'h1.collection__title').text\n collection_description = driver.find_element(By.CSS_SELECTOR, '.collection__description-text').text\n total_images_text = driver.find_element(By.CSS_SELECTOR, '.collection__totalr').text\n total_images = int(''.join(filter(str.isdigit, total_images_text)))\n\n # Extracting data for each image in the collection\n images_data = []\n while len(images_data) < total_images:\n images = driver.find_elements(By.CSS_SELECTOR, '.showcase__item')\n for image in images:\n image_data = {}\n # Extracting image title and link to preview image\n image_title = image.get_attribute('data-title')\n image_preview_url = image.get_attribute('data-image')\n\n # Checking if there are more pages and navigating to them if they exist\n print('Search Next Page Button')\n pagination_button = driver.find_element(By.CSS_SELECTOR, '.pagination__button')\n pagination_button_title = pagination_button.find_element(By.TAG_NAME, 'span').text\n next_page_button = None\n if pagination_button_title == 'Next Page':\n print('Next Page Button found')\n next_page_button = pagination_button\n break\n if not next_page_button or not next_page_button.is_enabled():\n break\n else:\n next_page_button.click()\n # Closing Selenium driver\n driver.quit()", "title": "" }, { "docid": "bcb6cde106b8b064777e8e4cd0a3ee02", "score": "0.51819134", "text": "def _fetch_page(self,\n page: Optional[int] = None,\n per_page: Optional[int] = None) -> Tuple[Iterable[FileLink], str]:\n path = self._get_path()\n params = {}\n if page is not None:\n params[\"page\"] = page\n if per_page is not None:\n params[\"per_page\"] = per_page\n\n response = self.session.get_resource(path=path, params=params)\n collection = response[self._collection_key]\n return collection, \"\"", "title": "" }, { "docid": "07077c58ebaf6559ab2cec529b0af5d9", "score": "0.5116463", "text": "def get_user_collection(self):\n self.user = re.split('/', self.user)[-1] if '/' in self.user else self.user\n user_url = f'https://bandcamp.com/{self.user}'\n user_page = requests.get(user_url, headers=HEADERS)\n if user_page.status_code != requests.codes.ok:\n print(f'User {self.user} does not appear to be a valid user.')\n sys.exit()\n soup = BeautifulSoup(user_page.content, 'lxml')\n pagedata = soup.find('div', id='pagedata')\n if not pagedata:\n print(f'User page for {self.user} missing data.')\n sys.exit()\n pj = json.loads(pagedata.get('data-blob'))\n collection_count = pj.get('current_fan', 0).get('collection_count', 0)\n fan_id = pj.get('fan_data').get('fan_id')\n last_token = pj.get('collection_data').get('last_token')\n self.parse_tracks(pj, page=True)\n cc = 0 if collection_count <= 45 else collection_count\n print(f'Found {cc} tracks for {self.user}')\n while cc > 0:\n print(f'Getting collection data: {cc} tracks left')\n post_data = {'fan_id': fan_id, 'older_than_token': last_token, 'count': 40}\n user_api_url = 'https://bandcamp.com/api/fancollection/1/collection_items'\n r = requests.post(user_api_url, json=post_data, headers=HEADERS)\n np = r.json()\n self.parse_tracks(np, api=True)\n cc -= 40\n self.monitor_mpd()", "title": "" }, { "docid": "2392123c437eed81ee71620af1323aeb", "score": "0.4990865", "text": "def load_collection(self):\n tracks = self.grab_four(self.user_collection)\n # item_ids = [t['item_id'] for t in tracks]\n item_urls = [t['item_url'] for t in tracks]\n self.get_song_meta(item_urls)", "title": "" }, { "docid": "c2e4d2bd52eb12ecdc9fe7867d35c130", "score": "0.49727923", "text": "def getCollection(self, collectienaam):\n start = 0\n rows = 50\n basesearchurl = u'https://api.rkd.nl/api/search/images?filters[collectienaam]=%s&filters[objectcategorie][]=schilderij&format=json&start=%s&rows=%s'\n #while True:\n searchUrl = basesearchurl % (collectienaam.replace(u' ', u'+'), start, rows)\n print searchUrl\n searchPage = requests.get(searchUrl, verify=False)\n searchJson = searchPage.json()\n numfound = searchJson.get('response').get('numFound')\n #print numfound\n #if not start < numfound:\n # return\n #start = start + rows\n\n foundcollections = {}\n totalfound = 0\n\n for rkdimage in searchJson.get('response').get('docs'):\n if rkdimage.get(u'priref') in self.currentcollections:\n collection = self.currentcollections.get(rkdimage.get(u'priref'))\n if collection not in foundcollections:\n foundcollections[collection] = 0\n foundcollections[collection]+=1\n\n print foundcollections\n\n if len(foundcollections.keys())==1:\n collectionqid = foundcollections.keys()[0]\n if foundcollections.get(collectionqid) > 1:\n return collectionqid\n elif len(foundcollections.keys())==2:\n if foundcollections.keys()[0] < foundcollections.keys()[1]:\n collectionqid = foundcollections.keys()[1]\n else:\n collectionqid = foundcollections.keys()[0]\n if foundcollections.get(collectionqid) > 2:\n return collectionqid\n return None", "title": "" }, { "docid": "7eda5baaa8acbe3a3f75b161b9cb58f6", "score": "0.49399263", "text": "def collection(self, **kwargs):\n path = self._get_path('collection')\n\n response = self._GET(path, kwargs)\n self._set_attrs_to_values(response)\n return response", "title": "" }, { "docid": "d561ffb06c7382fff83982dbf409bb32", "score": "0.49177718", "text": "def list_shares(self, report_id, page_size=None, page=None,\n include_all=None, include_workspace_shares=False):\n _op = fresh_operation('list_shares')\n _op['method'] = 'GET'\n _op['path'] = '/reports/' + str(report_id) + '/shares'\n _op['query_params']['pageSize'] = page_size\n _op['query_params']['page'] = page\n _op['query_params']['includeAll'] = include_all\n if include_workspace_shares:\n _op['query_params']['include'] = 'workspaceShares'\n\n expected = ['IndexResult', 'Share']\n\n prepped_request = self._base.prepare_request(_op)\n response = self._base.request(prepped_request, expected, _op)\n\n return response", "title": "" }, { "docid": "ecdef727ff0f2df223e13bd547593b95", "score": "0.4900011", "text": "def get(self, uri):\n params = self.params.getParams()\n pagination = self.pagination()\n uri = uri\n slace = self.getSlace(**pagination)\n start, end = (slace['start'], slace['end'])\n oid = self.getIdDict()\n if oid:\n try:\n result = yield self.collection.find_one(oid)\n result['_id'] = str(result['_id'])\n self.setResponseDictSuccess(result)\n except Exception as e:\n self.setResponseDictErrors(\"Not Found!\")\n else:\n cursor = self.collection.find(params).sort([('_id', -1)])[start:end]\n objects = []\n while (yield cursor.fetch_next):\n objects.append(cursor.next_object())\n results = {'data':[document for document in objects]}\n self.setResponseDictSuccess(results)\n return", "title": "" }, { "docid": "d842e2dac4cd7bbb895007968599c741", "score": "0.4895584", "text": "async def _get(self) -> List:\n\n return await self._collection.get(self._key)", "title": "" }, { "docid": "e061e9173472f7e5475d161b859b89df", "score": "0.48673168", "text": "def get_collection_playlist(args):\n url = '%s/get_collection' % (CONFIG['gmusicproxy-url'])\n to_filename = os.path.join(CONFIG['mpd-playlist-dir'], '%scollection.m3u' % (CONFIG['playlist-prefix']))\n fetch_playlist(url, to_filename, args)", "title": "" }, { "docid": "4c76e306f59f8b92c8cf4f510afdd0f7", "score": "0.48492444", "text": "def get(self):\n args = pagination_arguments.parse_args(request)\n page = args.get('page', 1)\n per_page = args.get('per_page', 10)\n\n brews_query = Brew.query\n brews_page = brews_query.paginate(page, per_page, error_out=False)\n\n return brews_page", "title": "" }, { "docid": "460adaf033deac6a179101eb5681d9ec", "score": "0.4837121", "text": "async def _get(self) -> List:\n return await self._collection.get(self._key)", "title": "" }, { "docid": "460adaf033deac6a179101eb5681d9ec", "score": "0.4837121", "text": "async def _get(self) -> List:\n return await self._collection.get(self._key)", "title": "" }, { "docid": "0a79530e0e8aeb01cd5b8daf9258b2b9", "score": "0.48278204", "text": "def page97(self):\n self.token_imageId = \\\n '80'\n result = request9701.GET('/SimpleAuctionWebAppDb/AuctionImageServlet' +\n '?imageId=' +\n self.token_imageId)\n\n return result", "title": "" }, { "docid": "535bacd0a820ce2e6a0a328a991309d2", "score": "0.4818279", "text": "def get_collection(self, id: str, **kwargs) -> Collection:\n ...", "title": "" }, { "docid": "ccaa514064c99dd4942bffa9bf8f8bc3", "score": "0.48151413", "text": "async def get_page(self, page_number: int) -> typing.List[typing.Any]:\n\n try:\n return self._page_cache[page_number]\n except KeyError:\n pass\n try:\n if self.sorters is None:\n if inspect.isasyncgenfunction(self.data) or inspect.isasyncgen(\n self.data\n ):\n v = await self.data.__anext__()\n elif inspect.isgeneratorfunction(self.data) or inspect.isgenerator(\n self.data\n ):\n v = next(self.data)\n elif inspect.iscoroutinefunction(self.data):\n v = await self.data(page_number)\n elif inspect.isfunction(self.data):\n v = self.data(page_number)\n else:\n v = self.data[\n page_number * self.per_page : (page_number + 1) * self.per_page\n ]\n self._page_cache[page_number] = v\n else:\n if (\n self.filters is not None\n and self.filters.current_filters is not None\n ):\n self.filtered_data = self.filters.filter(self.data)\n self.filtered_data = self.sorters.current_sorter.sorter(\n self.filtered_data\n )\n v = self.filtered_data[\n page_number * self.per_page : (page_number + 1) * self.per_page\n ]\n self._page_cache[page_number] = v\n except (StopIteration, StopAsyncIteration):\n self.max_pages = page_number\n page_number -= 1\n self.current_page -= 1\n if self._data_is_iterable:\n pages, left_over = divmod(len(self.filtered_data), self.per_page)\n if left_over:\n pages += 1\n self.max_pages = pages\n return self._page_cache[page_number]", "title": "" }, { "docid": "4dcae826eae9a205f6bbe6c994df8f02", "score": "0.48079345", "text": "def collection_get(self) -> dict:\n self.set_transaction_name('collection_get')\n headers = self.request.response.headers\n try:\n pagination = self.get_records()\n except ValidationError as e:\n error_details = {'location': e.location, 'description': e.message, 'name': e.name}\n return self.raise_invalid(**error_details)\n\n headers['Total-Records'] = str(self.count_records())\n # Force in here to use the listing serialization.\n pagination['data'] = [o.to_listing_dict() for o in pagination['data']]\n # also append columns metadata if available\n columns_map = self._columns_map\n if columns_map:\n pagination['columns'] = columns_map\n return pagination", "title": "" }, { "docid": "0688bbad372943a1c07b57e337686693", "score": "0.47786555", "text": "def _fetch_page(self):\r\n page_info = self.fetch_url(self.url)\r\n\r\n self.prev_url = page_info.get('prev_url')\r\n self.next_url = page_info.get('next_url')\r\n self.per_page = page_info.get('per_page', self.per_page)\r\n self.page_data = page_info.get('data')\r\n self.page_headers = page_info.get('headers')\r\n self.total_count = page_info.get('total_count')\r\n\r\n return self.page_data", "title": "" }, { "docid": "7d69075115a1c9f07298168ac856dcfc", "score": "0.47686407", "text": "def test_get_shouts_with_page(self):\n shouts = self.album.get_shouts(page=2)\n self.utils.assert_response_content(shouts)\n assert_equal(shouts['shouts']['@attr']['page'], '2')", "title": "" }, { "docid": "725338ad95734686cd4e3862d482759c", "score": "0.47666788", "text": "def get(self):\n args = request.args\n return get_all_suppliers_with_pagination(args)", "title": "" }, { "docid": "80c333cba8ce85d4dd834b338a9affc3", "score": "0.47442642", "text": "def get(self):\n content_html = self.fetch_page() \n if content_html:\n stats = self.extract_stats(content_html)\n logging.debug('Stats from %s: %r', self.url, stats)\n self.response.write('<div>%s</div>' % (json.dumps(stats)))\n if stats:\n stats_record = self.StatsRecord(**stats)\n stats_record.put()\n else:\n self.response.write('<div>Fetch URL %s failed</div>' % (self.url))", "title": "" }, { "docid": "49a5db8650d1657676bb327160590656", "score": "0.47350064", "text": "def sets():\n collection_query = Collection.query.all()\n collections = []\n for c in collection_query:\n collections.append(c.cards)\n\n return render_template('collections.html', collections = collections)", "title": "" }, { "docid": "8b00a8f46894a2507219c9c831aeed29", "score": "0.47114947", "text": "def get_items(storageServerURL, auth, collection, asJSON=True):\n\tlogging.debug(\"get_items()\")\n\n\t# The only reason to set withFalse=False is for unit testing\n\turl = storageServerURL + \"/storage/%s?full=1\" % (collection)\n\treturn storage_http_op(\"GET\", url, asJSON=asJSON, withAuth=auth)", "title": "" }, { "docid": "c9eb9dad34192fafb49af0fb87c9f2ec", "score": "0.46948603", "text": "def collection(self):\r\n return self._collection", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "e4dd2ca5095033c857e91f8060b8cc2e", "score": "0.4683089", "text": "def pages(cls) -> List[base.ServiceResource]:\n pass", "title": "" }, { "docid": "dff34039dfb1cde6e92cf6f1732584d7", "score": "0.46788567", "text": "def collection(self):\n return self._collection", "title": "" }, { "docid": "a97cf44f9a715ed46f1ebd1e32d1ff2c", "score": "0.46786183", "text": "async def get_metadata(self, sort: str = 'top', window: str = 'week', page: int = 0):\n logger.info(f'Getting page {page} of favorites')\n\n meta = await self.api.get(\n f'gallery/t/{self.id}/{sort}/{window}/{page}',\n headers={\n 'Authorization': 'Client-ID %s' % self.api._configuration.client_id\n }\n )\n return (meta or {}).get('data')", "title": "" }, { "docid": "252060b80a7ab4c3b01e84f9f080f429", "score": "0.4667465", "text": "def run_connector(self):\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.__async_scraper())\n\n return self.data", "title": "" }, { "docid": "252060b80a7ab4c3b01e84f9f080f429", "score": "0.4667465", "text": "def run_connector(self):\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self.__async_scraper())\n\n return self.data", "title": "" }, { "docid": "77f01796e8476ec8f46b1303de3266ba", "score": "0.46512774", "text": "def get(self):\n req = scoreboard_page_req.parse_args(strict=True)\n if req['board'] == 'groups' and not api.user.is_logged_in():\n raise PicoException(\n 'You must be logged in to retrieve pages from the ' +\n 'groups scoreboard.', 401\n )\n return jsonify(api.stats.get_scoreboard_page(\n req['board'], req['page'], req['gid']\n ))", "title": "" }, { "docid": "cb0fee65e99ea5ff0733213150620f29", "score": "0.4649958", "text": "def games_collection():\n return api_handlers.games_collection_handler(request.args)", "title": "" }, { "docid": "fa15ecbcda178f012d65ff382ebe504f", "score": "0.46498695", "text": "def _get_collection(self, name):\n if name not in self.collections.keys():\n self.add_collection(name)\n return self.collections[name]", "title": "" }, { "docid": "d0e899a85eca92608c5d2bbe87b7fc6b", "score": "0.46178317", "text": "def _scrape_links(self, links: List[SearchResult]) -> List[Document]:\n if not links:\n return []\n\n def link_fetch(link: SearchResult) -> List[Document]:\n \"\"\"\n Encapsulate the link fetching logic in a function to be used in a ThreadPoolExecutor.\n \"\"\"\n docs: List[Document] = []\n try:\n docs = self.link_content_fetcher.fetch(\n url=link.url,\n doc_kwargs={\n \"id_hash_keys\": [\"meta.url\"],\n \"search.score\": link.score,\n \"search.position\": link.position,\n \"snippet_text\": link.snippet,\n },\n )\n except Exception as e:\n # Log the exception for debugging\n logger.debug(\"Error fetching documents from %s : %s\", link.url, str(e))\n\n return docs\n\n thread_count = min(cpu_count() if len(links) > cpu_count() else len(links), 10) # max 10 threads\n with ThreadPoolExecutor(max_workers=thread_count) as executor:\n fetched_pages: Iterator[List[Document]] = executor.map(link_fetch, links)\n\n # Flatten list of lists to a single list\n extracted_docs = [doc for doc_list in fetched_pages for doc in doc_list]\n\n # Sort by score\n extracted_docs = sorted(extracted_docs, key=lambda x: x.meta[\"search.score\"], reverse=True)\n\n return extracted_docs", "title": "" }, { "docid": "271860ddbe88ba3b46d455d7d63f4113", "score": "0.46007073", "text": "def get_collection(self, id: str, **kwargs) -> schemas.Collection:\n collection = self.lookup_id(id, table=self.collection_table).first()\n collection.base_url = str(kwargs[\"request\"].base_url)\n return schemas.Collection.from_orm(collection)", "title": "" }, { "docid": "38faffe5fed674df4c3af48966bf3cbc", "score": "0.4600642", "text": "def get(self, request):\n logger.info(\"Run: collection_display; Params: \" + json.dumps(request.GET.dict()))\n SessionManager.clear_other_session_data(request, SessionManager.Deck)\n\n init_mana_list = Symbol.get_base_symbols()\n try:\n search_term = request.session['collection_deck_search_Term']\n selected_mana = request.session['collection_deck_selected_mana']\n deck_list = request.session['collection_deck_deck_list']\n clear_search = request.session['collection_deck_clear']\n full_list = request.session['collection_deck_deck_full']\n except KeyError:\n search_term = request.session['collection_deck_search_Term'] = \"\"\n selected_mana = request.session['collection_deck_selected_mana'] = []\n deck_list = request.session['collection_deck_deck_list'] = Deck.objects.get_deck_list(request.user.username)\n clear_search = request.session['collection_deck_clear'] = False\n full_list = request.session['collection_deck_deck_full'] = False\n\n\n mana_list = []\n for init_mana in init_mana_list:\n if init_mana.symbol in selected_mana:\n mana_list.append(\n {'symbol': init_mana.symbol, 'checked': True, 'image_url': init_mana.image_url,\n 'id': init_mana.id})\n else:\n mana_list.append(\n {'symbol': init_mana.symbol, 'checked': False, 'image_url': init_mana.image_url,\n 'id': init_mana.id})\n\n deck_list_split = list(deck_list.split(\"},\"))\n if deck_list_split[0] == '':\n deck_list_split = []\n page = request.GET.get('page', 1)\n paginator = Paginator(deck_list_split, 20)\n try:\n decks = paginator.page(page)\n except PageNotAnInteger:\n decks = paginator.page(1)\n except EmptyPage:\n decks = paginator.page(paginator.num_pages)\n\n\n try:\n font_family = UserProfile.get_font(request.user)\n should_translate = UserProfile.get_translate(request.user)\n context = {'font_family': font_family, 'should_translate': should_translate, 'pages': decks,\n 'collection_deck_search_Term': search_term, 'mana_list': mana_list, 'clearSearch': clear_search,\n 'full_list': full_list, 'user_id':request.user.id}\n return render(request, 'Collection/deck_list.html', context)\n except JSONDecodeError:\n request.session['collection_deck_search_Term'] = \"\"\n request.session['collection_deck_selected_mana'] = []\n request.session['collection_deck_deck_list'] = Deck.objects.get_deck_list(request.user.username)\n request.session['collection_deck_clear'] = False\n request.session['collection_deck_deck_full'] = False\n messages.error(request, \"Invalid search, please try again.\")\n font_family = UserProfile.get_font(request.user)\n should_translate = UserProfile.get_translate(request.user)\n context = {'font_family': font_family, 'should_translate': should_translate}\n return render(request, 'Collection/deck_list.html', context)", "title": "" }, { "docid": "4a388a267962caae28ac4996810be415", "score": "0.4593507", "text": "def fetch(self):\n self.__genre = \"Review\"\n self.__task_elements_dict = {\n 'priority':self.task.priority,\n 'level': self.task.level,\n 'last_updated_time':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'pickup_date':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'connector_instance_log_id': self.task.connector_instance_log_id,\n 'connector_instance_id':self.task.connector_instance_id,\n 'workspace_id':self.task.workspace_id,\n 'client_id':self.task.client_id,\n 'client_name':self.task.client_name,\n 'versioned':False,\n 'category':self.task.instance_data.get('category',''),\n 'task_log_id':self.task.id }\n try:\n self.__setSoupForCurrentUri()\n if not self.__goToReviewsPage():\n return False\n #self.__setParentPage()\n while True:\n if not self.__addReviews():\n log.info(self.log_msg('fetched all posts')) \n break \n try:\n self.currenturi = self.soup.find('a', title ='next')['href']\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('Next page not found for the url %s'\\\n %self.currenturi)) \n break \n return True\n except:\n log.exception(self.log_msg(\"Exception in fetch %s\"%self.currenturi)) \n return True", "title": "" }, { "docid": "89ac587f91ed360e256135e0bafc7d4b", "score": "0.45929584", "text": "def get_collections():\n\n res = requests.get(collection_endpoint)\n collections = [collection[\"Collection\"] for collection in res.json()]\n collections.sort()\n\n return collections", "title": "" }, { "docid": "e636c0bb430e9f788e377c81e0cb211d", "score": "0.45918497", "text": "async def _get_next_page(username: str, page: int, sort: str):\n logger.info(f'Getting page {page} of favorites')\n meta = await self.api.get(\n f'account/{username}/favorites/{page}/{sort}',\n headers={\n 'Authorization': f'Bearer {self.api._configuration.access_token}',\n }\n )\n return (meta or {}).get('data')", "title": "" }, { "docid": "4b446b5e50912b22451a6326f9a5028d", "score": "0.45828927", "text": "def get_content(self):\r\n self.wait.until(\r\n EC.presence_of_element_located((By.CSS_SELECTOR, '#mainsrp-itemlist > div > div > div:nth-child(1)'))\r\n )\r\n # get the page of source\r\n html = self.browser.page_source\r\n content_text = etree.HTML(html)\r\n names = content_text.xpath('*//div[@class=\"pic\"]/a/img/@alt')\r\n srcs = content_text.xpath('*//div[@class=\"pic\"]/a/img/@data-src')\r\n totals = [(name, src) for (name, src) in zip(names, srcs)]\r\n for total in totals:\r\n self.download(total[1], total[0])", "title": "" }, { "docid": "7881674d4173009cc12b8618d9401aa0", "score": "0.45787275", "text": "def get_collection(self, name):\n if name not in self.collections:\n self.collections[name] = Collection()\n \n return self.collections[name]", "title": "" }, { "docid": "ae80acf98bbe856af7037f8ea4826c3b", "score": "0.4576466", "text": "def get_all(self):\n return self._client.get_collection(self.URI)", "title": "" }, { "docid": "b0e8e97e016ddc638c4cb0e74c7228b4", "score": "0.45761546", "text": "def collection(index=0):\n queryString = \"?api_key=\" + API_KEY + \"&offset=\" + str(index) + \"&format=json\" \n url = \"https://comicvine.gamespot.com/api/issues/\" + queryString\n totalOffsets = user_offset(url)\n print(\"number_of_total_results: \", totalOffsets)\n allComics = request_data(url)\n comicInfo = []\n comics = []\n \n for comic in allComics:\n if comic['name']:\n comicName = comic['volume']['name'] + ' #' + comic['issue_number'] + ' - ' + comic['name']\n else:\n comicName = comic['volume']['name'] + ' #' + comic['issue_number']\n dateTime = date_format(comic['date_added'])\n comicInfo.append(comicName)\n comicInfo.append(dateTime)\n comicInfo.append(comic['image']['original_url'])\n comicId = comic['api_detail_url'].split(\"/\")\n comicInfo.append(comicId[5] + \"/\" + queryString)\n comics.append(comicInfo)\n comicInfo = []\n sorted(comics, key = lambda x: x[1], reverse=True)\n return render_template(\"allComics.html\", comics=comics)", "title": "" }, { "docid": "825144a3bcc896b1662f9b4fec2ab3fd", "score": "0.45726463", "text": "def get_collection(self, collection):\n\n return self.db.collection(collection)", "title": "" }, { "docid": "a555b5e357f4cdec9e6e057af24e2ad7", "score": "0.45641616", "text": "def get(self):\n return {'status': 'success', 'count': MultiGallery.query.count()}, 200", "title": "" }, { "docid": "3987c509e863799013f77fcc30a5b12d", "score": "0.45543617", "text": "def getShots(self, show):\n self._toggleComboLoadState(self.shotCombo, loading=True)\n self._toggleComboLoadState(self.taskCombo, loading=False)\n\n self._shotsThread = ShotgunShotsThread(self, self._shows.get(show))\n self.connect(self._shotsThread, QtCore.SIGNAL(\"shots (PyQt_PyObject)\"), self.handleShots)\n self.connect(self._shotsThread, QtCore.SIGNAL(\"error (PyQt_PyObject)\"), self.handleShotsError)\n self._shotsThread.start()\n self.threads.append(self._shotsThread)", "title": "" }, { "docid": "5ff34ca2a4d021108b1f39762855a399", "score": "0.45417258", "text": "def page(self):\r\n limit = self.get_limit()\r\n offset = self.get_offset()\r\n count = self.get_count()\r\n objects = self.get_slice(limit, offset)\r\n meta = {\r\n 'offset': offset,\r\n 'limit': limit,\r\n 'total_count': count,\r\n }\r\n\r\n if limit:\r\n meta['previous'] = self.get_previous(limit, offset)\r\n meta['next'] = self.get_next(limit, offset, count)\r\n\r\n return {\r\n self.collection_name: objects,\r\n 'meta': meta,\r\n }", "title": "" }, { "docid": "2274d9da9e652bb88dd81d69adaa824b", "score": "0.45402092", "text": "def page87(self):\n self.token_imageId = \\\n '80'\n result = request8701.GET('/SimpleAuctionWebAppDb/AuctionImageServlet' +\n '?imageId=' +\n self.token_imageId +\n '&mode=' +\n self.token_mode)\n\n return result", "title": "" }, { "docid": "b68c0d9515875357f6a04db52658e458", "score": "0.453966", "text": "def collection(self):\n\n return self._collection", "title": "" }, { "docid": "27d4377c62d04fb7166f241656f48af0", "score": "0.45261878", "text": "def collection(self):\r\n return self.__collection", "title": "" }, { "docid": "63ebcd81009e537f66e4c91a8a6f5c76", "score": "0.4523499", "text": "def get(self, repository_id, set_id):\n repository = DatabaseConnection(repository_id)\n\n set_exists = yield Set.exists(repository, set_id)\n if not set_exists:\n raise HTTPError(404, \"Set {} not found\".format(set_id))\n\n page = int(self.get_argument(\"page\", \"1\"))\n page_size = int(self.get_argument(\"page_size\", \"100\"))\n page_size = min(page_size, 1000)\n\n elements = yield Set.get_elements(repository, set_id, page, page_size)\n\n self.finish({'status': 200, 'data': {'assets': [e.split('/')[-1] for e in elements]}})", "title": "" }, { "docid": "b959d9ad3725867ed1fc0413e074a45a", "score": "0.4523363", "text": "def getCollection (self, key):\n\t\tself.read()\n\t\ttry:\n\t\t\treturn self[key]\n\t\texcept KeyError:\n\t\t\tprint 'no such collection: %s' % key\n\t\t\treturn None", "title": "" }, { "docid": "208494ce7946e10e774cce262ffc4e57", "score": "0.4518764", "text": "def collectionGet(collection_id=None, **kwargs):\n try:\n db_session = connect_to_database()\n collection = db_session.query(Collection)\n collection = collection.options(subqueryload(Collection.usr))\n if 'user_id' in kwargs:\n collection = collection.filter_by(user_id=kwargs['user_id'])\n if collection_id:\n collection = collection.filter_by(id=collection_id)\n res = collection.first()\n else:\n collection = collection.order_by(Collection.name)\n if 'limit' in kwargs:\n collection = collection.limit(kwargs['limit'])\n res = collection.all()\n db_session.close()\n return res\n except:\n abort(404)\n # should this be False?", "title": "" }, { "docid": "ea45e034507c25dbb428ffc21f8a64e7", "score": "0.45171952", "text": "def fetch(self):\n self.genre = \"Review\"\n try:\n self.currenturi = self.parent_uri = self.currenturi + '&s=d'\n if not self.__setSoupForCurrentUri():\n log.info( self.log_msg(\"Soup not set for the uri:%s\"\\\n %self.currenturi))\n return False\n if not self.__getParentPage():\n log.info(self.log_msg(\"Parent page not found for the uri %s\"\\\n %self.currenturi))\n while True:\n main_page_soup = copy.copy(self.soup)\n if not self.__addReviews():\n log.info(self.log_msg('fetched all posts for the url %s'%\\\n self.parent_uri))\n break \n try:\n self.currenturi = self.task.instance_data['uri'].rsplit\\\n ('/', 1)[0] + '/' + main_page_soup.find('div', 'pgb')\\\n .find('a', text='Next').parent['href']\n if not self.__setSoupForCurrentUri():\n log.info(self.log_msg('Soup not set for uri %s'%self.currenturi))\n break \n except:\n log.info(self.log_msg('Next page not found for the url %s'\\\n %self.currenturi)) \n break \n return True\n except:\n log.exception(self.log_msg(\"Exception in fetch %s\"%self.parent_uri)) \n return False", "title": "" }, { "docid": "aa64a80ca08b57840fb0b0303a79a31f", "score": "0.45087507", "text": "def _collection_html_response(resources, start=0, stop=20):\n return make_response(render_template(\n 'collection.html',\n resources=resources[start:stop]))", "title": "" }, { "docid": "b8b5fc660f27af9beb05f34259356f80", "score": "0.4500135", "text": "def parse(self, response):\n\n # Find the cast and crew page\n next_page = response.css(\"li.ipc-inline-list__item a\")[2].attrib[\"href\"]\n\n # If the cast and crew page exists, update the url, and move to it\n if next_page:\n next_page = response.urljoin(next_page) # Update url\n yield scrapy.Request(next_page, callback = self.parse_full_credits) # Move to page and run parse_full_credits", "title": "" }, { "docid": "d93e5f81d12f2d75b8bb13d3369debe3", "score": "0.44845217", "text": "def get(self):\r\n request = cherrypy.serving.request\r\n self.tot_gets += 1\r\n \r\n uri = cherrypy.url(qs=request.query_string)\r\n uricache = self.store.get(uri)\r\n if uricache is None:\r\n return None\r\n \r\n header_values = [request.headers.get(h, '')\r\n for h in uricache.selecting_headers]\r\n variant = uricache.wait(key=tuple(sorted(header_values)),\r\n timeout=self.antistampede_timeout,\r\n debug=self.debug)\r\n if variant is not None:\r\n self.tot_hist += 1\r\n return variant", "title": "" }, { "docid": "fd87c697406643342d668e3094f9e740", "score": "0.4482858", "text": "def page98(self):\n result = request9801.GET('/SimpleAuctionWebAppDb/')\n\n return result", "title": "" }, { "docid": "0088050ba85e1c9386a7625382818c14", "score": "0.44750962", "text": "def on_get(self, req, resp, organization_code):\n session = Session()\n try:\n organization = session.query(Organization).get(organization_code)\n if organization is None:\n raise falcon.HTTPNotFound()\n\n # Build query to fetch items\n query = session\\\n .query(OrganizationSecurityThreat)\\\n .join(SecurityThreat)\\\n .filter(OrganizationSecurityThreat.organization_id == organization_code)\\\n .order_by(SecurityThreat.name)\\\n\n data, paging = get_collection_page(req, query, custom_asdict)\n resp.media = {\n 'data': data,\n 'paging': paging\n }\n finally:\n session.close()", "title": "" }, { "docid": "30c44f5eb027b212a3e7f35cf21997d4", "score": "0.4472616", "text": "async def from_href(self):\n if not hasattr(self, \"href\"):\n raise TypeError(\n \"Spotify object has no `href` attribute, therefore cannot be retrived\"\n )\n\n if hasattr(self, \"http\"):\n return await self.http.request( # pylint: disable=no-member\n (\"GET\", self.href) # pylint: disable=no-member\n )\n\n klass = type(self)\n\n try:\n client = getattr(self, f\"_{klass.__name__}__client\")\n except AttributeError:\n raise TypeError(\"Spotify object has no way to access a HTTPClient.\")\n else:\n http = client.http # pylint: disable=no-member\n\n data = await http.request((\"GET\", self.href)) # pylint: disable=no-member\n\n return klass(client, data)", "title": "" }, { "docid": "ba44ca97abe13c5f0f57e1884448330a", "score": "0.4468384", "text": "def parse(self, response):\n\t\tlist_of_shops = response.xpath('//*/div[@class=\"media-body\"]')\n\t\t#remove all the links with \"#top\" as they are pointing to the top of the screen\n\t\tfor shop in list_of_shops:\n\t\t\titem = EhiSiegelItem()\n\t\t\titem['name'] = shop.xpath('.//div[@class=\"shop-head\"]/h4[1]/text()').get().strip('\\n').strip()\n\t\t\titem['categories'] = [category.strip('\\n').strip() for category in shop.xpath('.//div[@class=\"shop-head\"]/a/text()').getall()]\n\t\t\titem['desc'] = shop.xpath('.//div[@class=\"shop-description\"]/text()').get().strip('\\n').strip()\n\t\t\titem['website'] = shop.xpath('.//div[@class=\"shop-links\"]/a[1]/@href').get()\n\t\t\turl = shop.xpath('.//div[@class=\"shop-links\"]/a[2]/@href').get()\n\t\t\titem['url'] = url\n\t\t\tyield scrapy.Request(url, callback=self.parse_certificate, meta={'item': item})\n\t\tnext_page = response.xpath('//*/a[@class=\"next\"]/@href').get()\n\t\tif next_page:\n\t\t\tnext_page = \"https://ehi-siegel.de\" + next_page\n\t\t\tyield scrapy.Request(next_page, callback=self.parse)", "title": "" }, { "docid": "2aa853d14ab5a5e8b778b3722fb7e0b1", "score": "0.44672686", "text": "def get(self):\n request = cherrypy.serving.request\n self.tot_gets += 1\n\n uri = cherrypy.url(qs=request.query_string)\n uricache = self.store.get(uri)\n if uricache is None:\n return None\n\n header_values = [request.headers.get(h, '')\n for h in uricache.selecting_headers]\n variant = uricache.wait(key=tuple(sorted(header_values)),\n timeout=self.antistampede_timeout,\n debug=self.debug)\n if variant is not None:\n self.tot_hist += 1\n return variant", "title": "" }, { "docid": "1a58dc89e32d1038e30126b2c0cb5993", "score": "0.44607663", "text": "def _fetch_pages(\n self,\n method,\n key: str,\n args: dict = None,\n limit: int = None,\n max_rows: int = None,\n items_name: str = None,\n collection_name: str = None,\n print_result: bool = True,\n ) -> list:\n # fetch first page\n page = 1\n output_str = (\n f\"Fetching {items_name if items_name else method} \"\n f\"from {collection_name if collection_name else 'workspace'}...\"\n )\n logger.info(output_str)\n if not args:\n args = {}\n if not limit:\n limit = settings.SLACK_PAGE_LIMIT\n base_args = {**args, **{\"limit\": limit}}\n response = getattr(self._client, method)(**base_args)\n rows = response[key]\n\n # fetch additional page (if any)\n while (\n (not max_rows or len(rows) < max_rows)\n and response.get(\"response_metadata\")\n and response[\"response_metadata\"].get(\"next_cursor\")\n ):\n page += 1\n logger.info(\"%s - page %s\", output_str, page)\n page_args = {\n **base_args,\n **{\n \"cursor\": response[\"response_metadata\"].get(\"next_cursor\"),\n },\n }\n response = getattr(self._client, method)(**page_args)\n rows += response[key]\n\n if print_result:\n logger.info(\n \"Received %s %s\",\n format_decimal(len(rows), locale=self._locale),\n items_name if items_name else \"objects\",\n )\n return rows", "title": "" }, { "docid": "e0afcccafb47346e98205d8ac27744ea", "score": "0.44563144", "text": "def get_share(self, report_id, share_id):\n _op = fresh_operation('get_share')\n _op['method'] = 'GET'\n _op['path'] = '/reports/' + str(report_id) + '/shares/' + str(\n share_id)\n\n expected = 'Share'\n prepped_request = self._base.prepare_request(_op)\n response = self._base.request(prepped_request, expected, _op)\n\n return response", "title": "" }, { "docid": "5edb5c042cffd9c23b9e8dab2afc6de4", "score": "0.44468078", "text": "def shaded_collection(self):\n return self._shaded_collection", "title": "" }, { "docid": "953c96d818ead87c68aee168ae0cfcfa", "score": "0.44450122", "text": "def _get_collection(self, collection):\n\n return self.db[collection.collection_name]", "title": "" }, { "docid": "e3e713be2f2323b896d8ef332d60b0c9", "score": "0.44442415", "text": "def retrieve_collection_results(run_id, host):\n data = collector_handler.get_collection_results(run_id, host)\n return Response(json.dumps({\"ret\": \"ok\", \"message\": \"Results for collection retrieved successfully.\", \"data\": data}))", "title": "" }, { "docid": "32fcd2614dff1655b1c0970508ef88c9", "score": "0.44412997", "text": "def page47(self):\n self.token_imageId = \\\n '85'\n result = request4701.GET('/SimpleAuctionWebAppDb/AuctionImageServlet' +\n '?imageId=' +\n self.token_imageId +\n '&mode=' +\n self.token_mode)\n\n return result", "title": "" }, { "docid": "ee96863ff4337302c3da9999cacff5f7", "score": "0.44337195", "text": "def collect_data_from_page():\n\n card_num = 0\n url = start_url + extended_url\n for i in range(num_pages):\n soup = safe_soup_page(url)\n cards = soup.find_all('ul', attrs={'class': 'cards-grid clear'})\n\n for line in str(cards).split('\\n'):\n if 'img' in line:\n img_link = line.split('src=\"')[-1].strip('\"/>') # get image link\n img_name = str(card_num) + \"_\" + line.split('alt=\"')[-1].split('\"')[0] + \".png\" #build image string\n print(\"Collecting: \" + img_link + \" as \" + img_name)\n recorded = False\n attempts_left = 3\n while not recorded:\n attempts_left -= 1\n try:\n with open(directory + img_name, 'wb') as f:\n f.write(request.urlopen(img_link).read())\n recorded = True\n except:\n if attempts_left == 0:\n recorded = True\n print(\"FAILED!\")\n else:\n print('Failed Trying again...')\n card_num += 1\n\n url = start_url + str(i+2) + extended_url", "title": "" }, { "docid": "de83e0055b1ee8351afce2b2797e4d28", "score": "0.44336548", "text": "def pages(self):\n ...", "title": "" }, { "docid": "cb1ebe0a17fc1daf42925da4ae72dad0", "score": "0.4432582", "text": "def retrieve_collection(collection, query_arguments=None):\n session = _get_session()\n cls = endpoint_class(collection)\n if query_arguments:\n filters = []\n order = []\n limit = None\n for key, value in query_arguments.items():\n if key == 'page':\n continue\n if value.startswith('%'):\n filters.append(getattr(cls, key).like(str(value), escape='/'))\n elif key == 'sort':\n order.append(getattr(cls, value))\n elif key == 'limit':\n limit = value\n elif key:\n filters.append(getattr(cls, key) == value)\n resources = session.query(cls).filter(*filters).order_by(\n *order).limit(limit)\n else:\n resources = session.query(cls).all()\n return resources", "title": "" }, { "docid": "82d8ae6fee2bac7e9a341210219942fd", "score": "0.44306582", "text": "def page57(self):\n self.token_imageId = \\\n '81'\n result = request5701.GET('/SimpleAuctionWebAppDb/AuctionImageServlet' +\n '?imageId=' +\n self.token_imageId +\n '&mode=' +\n self.token_mode)\n\n return result", "title": "" }, { "docid": "793794e59ca30033fb52d705e428944c", "score": "0.44170564", "text": "def get_image_page(request):\n urls = []\n s3 = boto3.client('s3', region_name='eu-west-3')\n page_size = request.GET.get(\"page_size\", 50)\n page_start = request.GET.get(\"page_start\", 0)\n try:\n page_size = int(page_size)\n page_start = int(page_start)\n except ValueError:\n return HttpResponse(status=400, content=\"Invalid size/start provided\")\n image_ids = Image.objects.all().values_list('pk', flat=True).order_by(\"-id\")\n images = Image.objects.filter(id__in=image_ids[page_start:page_start + page_size]).order_by(\"-id\")\n for image in images:\n image_id = str(image.id)\n uploader = User.objects.get(id=int(image.uploader_id)).username\n if in_bucket('resized/' + image_id + \".\" + image.ext, s3=s3):\n url = s3.generate_presigned_url(ClientMethod=\"get_object\",\n Params={'Bucket': S3_BUCKET,\n 'Key': 'resized/' + image_id + \".\" + image.ext}\n )\n urls.append({\"url\": url, \"id\": image_id, \"uploader\": uploader})\n return HttpResponse(json.dumps(urls), content_type=\"application/json\")", "title": "" }, { "docid": "48dd476c1f5acc6af03cdd0c322a9867", "score": "0.44151825", "text": "def queryforlistings(mongoclient):\n listing_collection = mongoclient.scraper.listing\n query = {\"content_parsed\": False, \"content_acquired\": True}\n listings = listing_collection.find(query, no_cursor_timeout=True)\n return listings", "title": "" }, { "docid": "2123e6a8941a63ad0998bccf4da90ea8", "score": "0.4411767", "text": "def retrieve_collections():\n data = collector_handler.get_collection_names()\n return Response(json.dumps({\"ret\": \"ok\", \"message\": \"Names of collections retrieved successfully.\", \"data\": data}))", "title": "" }, { "docid": "5ad79cc7253604c1f039e86554e0fe91", "score": "0.44089416", "text": "def index(self):\r\n self.subsection = 'KrampOver'\r\n return self.get_flatpage()", "title": "" }, { "docid": "eb0a69c27071c36840adbdb655642da6", "score": "0.44087207", "text": "async def __async_scraper(self):\n\n async_tasks = []\n conn = aiohttp.TCPConnector(limit_per_host=connection_settings['CONCURRENT_CONNECTIONS'])\n timeout = aiohttp.ClientTimeout(total=connection_settings['CONNECTION_TIMEOUT'])\n user_agent = generate_user_agent()\n\n async with aiohttp.ClientSession(connector=conn,\n timeout=timeout,\n headers={'User-Agent': user_agent}) as session:\n for url in self.urls:\n async_tasks.append(self.__http_request__async(url, session, user_agent))\n\n self.data = await asyncio.gather(*async_tasks)", "title": "" }, { "docid": "b2b759688e06a0612eb347cdd258bb16", "score": "0.44081268", "text": "async def page_async(\n self,\n friendly_name: Union[str, object] = values.unset,\n page_token: Union[str, object] = values.unset,\n page_number: Union[int, object] = values.unset,\n page_size: Union[int, object] = values.unset,\n ) -> WorkspacePage:\n data = values.of(\n {\n \"FriendlyName\": friendly_name,\n \"PageToken\": page_token,\n \"Page\": page_number,\n \"PageSize\": page_size,\n }\n )\n\n response = await self._version.page_async(\n method=\"GET\", uri=self._uri, params=data\n )\n return WorkspacePage(self._version, response)", "title": "" }, { "docid": "4339caac217c5944118d47e49a881d72", "score": "0.44075042", "text": "def get_sresults_page(page_num=None):\n base_url = 'https://www.roksa.pl/pl/szukaj/?anons_type=0&cenaod=1'\n if page_num:\n base_url += f'&pageNr={page_num}'\n\n _, page_body = get_rox_page(base_url)\n return page_body", "title": "" } ]
93362519e55e546b3e963fd8b86ee02a
read excel file for target
[ { "docid": "68c86ecf7a8fdfb2368874eb0d6b6e87", "score": "0.7322174", "text": "def read_excel(target):\n \n bpath = '/home/sam/Dropbox/HIGP/Crater_Lakes/Dmitri_Sam/data/{0}/{0}'.format(target)\n fpath = '{0}_satellite.xlsx'.format(bpath)\n df = pd.read_excel(fpath)\n\n # set datetime as index\n df = df.set_index(pd.DatetimeIndex(df['datetime']))\n \n return df", "title": "" } ]
[ { "docid": "de8846ccf04c7035ebfb08f8da3a26b5", "score": "0.71546036", "text": "def _read_excel(self):\n self.workbook = xlrd.open_workbook(self.handler.output_file_path)\n self.worksheet = self.workbook.sheet_by_index(0)", "title": "" }, { "docid": "c3aff5b016160299da8a4b5856409681", "score": "0.6741806", "text": "def read_excel(self):\n file_name = os.path.join(self.data_dir, \"dataframe.xls\")\n pd.read_excel(file_name)", "title": "" }, { "docid": "0e61bc43dcb8a23497cdfd310a492f5c", "score": "0.6685606", "text": "def load_excel(filename):\n raise NotImplementedError(\"Developer has been lazy.\")", "title": "" }, { "docid": "fab8b5144eb446bf645ffa927b2a663c", "score": "0.6581318", "text": "def __load_source(source):\n return pd.read_excel(source, sheet_name=0)", "title": "" }, { "docid": "52881ed13f02bd20fe5af8c5540c457a", "score": "0.6563986", "text": "def read_excel_file(path_to_file):\n\n try:\n wb = openpyxl.load_workbook(path_to_file)\n worksheet = wb.active\n return [[_get_cell_data(worksheet, row, column)\n for column in range(1, worksheet.max_column + 1)]\n for row in range(1, worksheet.max_row + 1)]\n except FileNotFoundError:\n logger.error(f\"Excel file wasn't found for reading with path: {path_to_file}\")", "title": "" }, { "docid": "57e299ddeb35c9c6207a378a7195a56f", "score": "0.65607506", "text": "def XlsxOpen(filename):", "title": "" }, { "docid": "2b8961e4559e9d918736a0b041fd3df0", "score": "0.65605354", "text": "def read_input_file(file_path):\n if \"xlsx\" in file_path:\n input_file = pd.read_excel(file_path, engine='openpyxl')\n else:\n input_file = pd.read_csv(file_path)\n return input_file", "title": "" }, { "docid": "6634ae2e2a541bb6553ebae1a20e797a", "score": "0.6550803", "text": "def auto_input(filename_location):\n ds = pd.read_excel(filename_location, header=None, names=[])\n print(ds.head(5))\n # TODO: make work", "title": "" }, { "docid": "1f66946413e56283f913f05721fdf08f", "score": "0.6524245", "text": "def read_excel(file_path):\n if os.path.isfile(file_path):\n\ttry:\n\t data = xlrd.open_workbook(file_path)\n\t return data\n\texcept Exception, e:\n\t print str(e)\n\t return None\n else:\n\treturn None", "title": "" }, { "docid": "8eeca5c45cf5cdabd5146f1e348ff2a8", "score": "0.65180016", "text": "def read_input_file(file_path):\n if \"xlsx\" in file_path:\n file = pd.read_excel(file_path, engine='openpyxl')\n else:\n file = pd.read_csv(file_path)\n return file", "title": "" }, { "docid": "2ba7df1c920c2b4879e021f06f7e0d47", "score": "0.6454333", "text": "def get_data_from_excel():\n file_name = f'./static/data/prayer_data.xlsx'\n print(\"Reading to excel ........\")\n if check_data_from_file(file_name):\n return pd.read_excel(file_name, sheet_name=None)\n return None", "title": "" }, { "docid": "acbb8b535d3479b7b8a75edfcd99b34b", "score": "0.6380624", "text": "def read(self, file_name):\n\n import xlrd, xlwt\n readbook = xlrd.open_workbook(file_name, formatting_info=True)\n sheet = readbook.sheet_by_index(0)\n xls_data = [sheet.row_values(rownum) for rownum in range(sheet.nrows)]\n return xls_data", "title": "" }, { "docid": "0ce32b6b4b9d321c3ed6a8ef603025d3", "score": "0.62234914", "text": "def open_xls(xls_input_file_name):\r\n try:\r\n return openpyxl.load_workbook(xls_input_file_name, data_only=True)\r\n except Exception as e:\r\n print(e)\r\n print(\"Please ensure the file exists or the correct filename was entered when utilizing the \\\"-i\\\" argument.\")", "title": "" }, { "docid": "d73be0852b4b3f29cf8bc888b4ef3015", "score": "0.6152587", "text": "def excel(filename, subset=[], skip='#', **kwargs):\n\tif subset:\n\t\treturn pd.read_excel(filename, comment=skip, **kwargs)[subset]\n\n\treturn pd.read_excel(filename, comment=skip, **kwargs)", "title": "" }, { "docid": "1f68b1dcf11fc1447844d74378ce3927", "score": "0.61364347", "text": "def get_excel_file():\n exporter = ExcelExporter(DATA_TEXT_FILE, DATA_EXCEL_FILE)\n exporter.run()", "title": "" }, { "docid": "50e6a1bb39be48ac47969cbc09903412", "score": "0.6115224", "text": "def load_excel_file(self, file_name):\n self.library = pd.read_excel(file_name)\n self.columns = self.library.columns", "title": "" }, { "docid": "45affa54d13389401e7f6f5d380360c6", "score": "0.6079485", "text": "def target_reader(path):\n target = np.array(pd.read_csv(path)[\"target\"])\n return target", "title": "" }, { "docid": "08d024678e2a28a2d100ce22c4851b12", "score": "0.6076378", "text": "def read(**kwargs) -> collections.OrderedDict:\n filePath = kwargs['filePath']\n\n try:\n checkConsistency = kwargs['params']['check_consistency']\n except:\n checkConsistency = False\n\n try:\n engine = kwargs['params']['engine']\n except:\n engine = 'openpyxl'\n\n data = pd.read_excel(filePath, sheet_name=None, engine=engine)\n data = collections.OrderedDict(data)\n\n ColumnsCheck.checkColumns(data=data) # check whether From -> To columns exist.\n\n if checkConsistency:\n ConsistencyCheck.checkConsistency(data=data) # check the consistency of the linguistic ratings.\n \n return data", "title": "" }, { "docid": "5c72ee14da0c4bdd9423ffb80fc97362", "score": "0.60558236", "text": "def _readTrendFile(self, sheetName=None):\n # this sheet_name option will correspond to the folder it reside\n df1 = pd.DataFrame(pd.read_excel(self.pathFileTrend, sheet_name=sheetName)) #####\n return df1", "title": "" }, { "docid": "832b6c29cb4ab16f8f6df48a76b15817", "score": "0.5996795", "text": "def ANIP_excel_read(filename):\n\tnames=['End date','Precipitation','Temperature','O18','s.d. d18O',\"H2\",\"s.d. d2H\",\"H3\", 's.d. H3']\n\tcolumns=[i for i in range(1,13,1)]\n\tdataframe = pd.read_excel(filename,\n\t\tsheet_name=\"Daten_T\",\n\t\tskiprows=[0,1,2],\n\t\tusecols=columns,\n\t\tparse_dates = True,day_first=True, index_col=[0,1])\n\tdataframe.columns=names\n\tdataframe.rename_axis(['Site','Date'], inplace=True)\n\n\treturn dataframe", "title": "" }, { "docid": "54661139113e47f8b129bae073a733ee", "score": "0.5995006", "text": "def read_yields_excel(filename, sheet_name=0, **kwargs):\n experiment = mp.ReadExperimentTable.from_xls(filename, sheet_name=sheet_name,\n use_is=False, **kwargs)\n\n data = experiment.df\n data.update(data.select_dtypes(include='number').clip(lower=0)) # kills all negative numbers\n return data", "title": "" }, { "docid": "0e926e6c24790835620fc21ff46a0a04", "score": "0.5949647", "text": "def readResultsFromWorkbook(self, inputFilepath, reader):\n if inputFilepath != None:\n workbook = load_workbook(inputFilepath)\n wsNames = workbook.get_sheet_names()\n for name in wsNames:\n worksheet = workbook.get_sheet_by_name(name)\n reader.read(worksheet)", "title": "" }, { "docid": "123fd4c702996e27958820d28f65323e", "score": "0.5949289", "text": "def get_parsed_excel():\n dataframe = pandas.read_excel(DEFAULT_TEST_FILE, sheet_name=DEFAULT_SOURCE)\n\n return parser.excel_parser(dataframe)", "title": "" }, { "docid": "38b548ead66d07b2d013e4ea71fc5245", "score": "0.5917841", "text": "def read_file():", "title": "" }, { "docid": "47aee263562bca1e7ccad8c444b6fc07", "score": "0.5900029", "text": "def readFromFile(self):\n \n pass", "title": "" }, { "docid": "bce39512b72bcbf060d4d20ea3680edc", "score": "0.5889155", "text": "def look_for_excel_file_and_select(self):\n self.select_open()\n self.select_this_device()\n self.select_storage()\n self.select_mobiauto()\n self.select_document()\n self.select_excel_file()\n self.agree_to_open_file_anyway()", "title": "" }, { "docid": "9ee3912d680c41c365cac83507029160", "score": "0.5848106", "text": "def sheet_read(self, path, sheet_name):\n if self.path_is_exist(path):\n if self.sheet_name_is_exist(path, sheet_name):\n workbook = xlrd.open_workbook(path)\n worksheet = workbook.sheet_by_name(sheet_name)\n interface_data = []\n interface_list = []\n interface_list_tested = []\n for i in range(0, worksheet.ncols):\n for j in range(0, worksheet.nrows):\n if not str(worksheet.cell_value(0, 0)).strip(' ').find('interface-list'):\n if j >0 :\n interface_list.append(worksheet.cell_value(j, 0))\n\n if not str(worksheet.cell_value(0, 1)).strip(' ').find('interface-list-tested'):\n if j >0 :\n interface_list_tested.append(worksheet.cell_value(j, 1))\n\n interface_data.append(interface_list)\n interface_data.append(interface_list_tested)\n return interface_data\n else:\n print(f'The given sheet_name:{sheet_name} does not exist!!!')\n else:\n print(f'The given path:{path} does not exist!!!')", "title": "" }, { "docid": "489a3b257cd3640d90a9a00aa1af4e9c", "score": "0.58480847", "text": "def load_workbook(self):\n open_file = (QFileDialog.getOpenFileName(None, \"Select a File\", os.path.expanduser(\"~\")))\n print((open_file[0]))\n if open_file != (\"\", \"\"): # User can press cancel or x button. QFileDialog.getOpenFileName returns tuple()\n self.line_edit.setText(str(open_file[0]))\n self.open_excel = load_workbook(filename=str(open_file[0]))\n self.sheet = self.open_excel['CAN_Sheet']\n self.generate_button.clicked.connect(self.excel_extract_data)\n else:\n self.label_log.setText(\"Log: Input file is not loaded\")", "title": "" }, { "docid": "5c8e4085405331d66e929ea7610cde7c", "score": "0.58176404", "text": "def open_excel(file_name='weather.xlsx'):\n try:\n wb = xl.get_book(file_name=file_name)\n except FileNotFoundError:\n wb = xl.Book()\n setup_excel(wb)\n save_workbook(wb)\n return wb", "title": "" }, { "docid": "e159fd8707f2587e789ed3d05417b8b0", "score": "0.5805986", "text": "def read_sample_sheet(in_file):\n return pd.read_csv(in_file, header = line_with(\"[Data]\", in_file))", "title": "" }, { "docid": "1fa99b3568fa164ccf427158d99e7ff2", "score": "0.5795465", "text": "def extract_excel(source, log_queue):\n logger = logging.getLogger()\n\n # check that the files key exists within the configuration\n if not 'files' in source:\n logger.warn('No files to process.')\n\n file_iterator = 1\n\n for file_configuration in source['files']: \n excel = ExcelTarget(file_configuration)\n \n logger.debug('primary_key: {0}'.format(excel.get_primary_key()))\n\n df = pd.DataFrame()\n\n # create destination target\n destination_target = SqlServerTarget(\n server=destination_server, \n database=destination_database, \n table=create_stg_table_name(\n excel.get_process_name().upper(),\n excel.get_vendor_name().upper() \n ), \n schema=destination_schema,\n psa_batch_size=excel.get_psa_batch_size()\n )\n\n excel_files = [] \n\n # EXCEL_EXTENSIONS: xls, xlsx, xlsm, xlsb, and odf\n for ext in EXCEL_EXTENSIONS:\n for file in glob.glob(os.path.join(excel.get_path(), \"*.{0}\".format(ext))):\n excel_files.append(file)\n\n for file in excel_files:\n\n df = pd.read_excel(\n io=os.path.join(excel.get_path(), file), \n dtype=str,\n skiprows=excel.get_skip_rows()\n )\n\n # negative skip rows \n if excel.get_skip_rows() < 0:\n df = df[:(len(df) + excel.get_skip_rows())]\n\n #TODO: Check to make sure file will load to table, file schema matches table schema\n logger.info('Processing file {0}'.format(file))\n \n # check stage/psa tables exist\n if not check_stg_table_exists(\n process_name=excel.get_process_name(), \n vendor_name=excel.get_vendor_name(), \n destination_target=destination_target\n ) or not check_psa_table_exists(\n process_name=excel.get_process_name(), \n vendor_name=excel.get_vendor_name(), \n destination_target=destination_target\n ):\n \n logger.info('Destination tables missing...')\n \n # create dataframe of file columns\n data = {\n 'columnd_id' : range(2, len(df.columns) + 2), \n 'column_name' : df.columns, \n 'data_type' : 'varchar', \n 'length' : 0,\n 'precision' : 0, \n 'scale' : 0 }\n column_df = pd.DataFrame(data, dtype=str)\n\n # add metadata \n metadata = {\n 'columnd_id' : 1, \n 'column_name' : 'HPXR_FILE_NAME', \n 'data_type' : 'varchar', \n 'length' : 0,\n 'precision' : 0, \n 'scale' : 0 }\n metadata_df = pd.DataFrame(metadata, index=[0], dtype=str)\n\n column_df = metadata_df.append(column_df, ignore_index=True)\n \n if excel.get_primary_key():\n primary_key_df = pd.DataFrame(dtype=str)\n \n for index, key in enumerate(primary_keys):\n current_key_df = pd.DataFrame({ 'column_id' : (index + 1), 'column_name' : key['column_name'], 'data_type' : key['data_type'], 'length' : key['length'], 'precision' : 0, 'scale' : 0}, index=[0], dtype=str)\n primary_key_df = primary_key_df.append(current_key_df, ignore_index=True)\n\n # update the columns dataframe with metadata from primary key\n current_key_in_columns = column_df.loc[column_df['column_name'] == key['column_name']]\n current_key_in_columns['data_type'] = key['data_type']\n current_key_in_columns['length'] = key['length']\n\n column_df.update(current_key_in_columns)\n\n primary_keys = primary_key_df.to_json(orient='records')\n\n\n columns = column_df.to_json(orient='records')\n \n logger.debug(columns)\n logger.debug(excel.get_primary_key())\n\n create_stg_table(\n process_name=excel.get_process_name(), \n vendor_name=excel.get_vendor_name(), \n destination_target=destination_target, \n columns=columns, \n primary_keys=excel.get_primary_key()\n )\n\n create_psa_table(\n process_name=excel.get_process_name(), \n vendor_name=excel.get_vendor_name(), \n destination_target=destination_target, \n columns=columns, \n primary_keys=excel.get_primary_key()\n )\n\n # truncate stage\n truncate_stage_table(process_name=excel.get_process_name(), vendor_name=excel.get_vendor_name(), destination_target=destination_target)\n\n # process table metadata\n if not check_table_metadata(\n process_name=excel.get_process_name(), \n vendor_name=excel.get_vendor_name(), \n destination_target=destination_target\n ):\n # create new record and return logical id\n logger.info('Creating table metadata record for {0}.'.format(create_stg_table_name(process_name=excel.get_process_name(), vendor_name=excel.get_vendor_name())))\n destination_target.set_table_metadata_logical_id(\n insert_table_metadata_logical_id(\n process_name=excel.get_process_name(), \n vendor_name=excel.get_vendor_name(), \n destination_target=destination_target\n )\n )\n else: \n # get logical id for existing record\n logger.info('Get table metadata record for {0}.'.format(create_stg_table_name(process_name=excel.get_process_name(), vendor_name=excel.get_vendor_name())))\n update_table_metadata(\n process_name=excel.get_process_name(),\n vendor_name=excel.get_vendor_name(),\n destination_target=destination_target\n )\n\n destination_target.set_table_metadata_logical_id(\n get_table_metadata_logical_id(\n process_name=excel.get_process_name(), \n vendor_name=excel.get_vendor_name(), \n destination_target=destination_target\n )\n )\n\n file_iterator += 1\n\n logger.info('Writing records to table {0}'.format(destination_target.get_table_name()))\n\n df['HPXR_FILE_NAME'] = os.path.basename(file)\n \n df.to_sql(name=destination_target.get_table_name(), index=False, schema=destination_target.get_schema_name(), if_exists='append', con=destination_target.create_engine())\n\n if excel.get_is_delete_file():\n delete_file(os.path.join(excel.get_path(), file))\n \n if excel.get_is_archive_file():\n archive_file(\n os.path.join(excel.get_path(), file), \n os.path.join(excel.get_archive_file_path(), file)\n )\n delete_file(os.path.join(excel.get_path(), file))\n\n load_psa(\n destination_target=destination_target, \n logical_id=destination_target.get_table_metadata_logical_id()\n )", "title": "" }, { "docid": "6df9eb1b8fa4d506fd81618b493c655d", "score": "0.578345", "text": "def get_xlsx():\n if os.path.exists(RAWFILENAME):\n return\n resp = requests.get(URL)\n with open(RAWFILENAME, 'wb') as output:\n output.write(resp.content)", "title": "" }, { "docid": "e83260045475bb8b946fd02cd67af79c", "score": "0.5766377", "text": "def read(description, path, file_type='excel', separator=',', skip_rows=0, use_cols=None, sheet_name=0):\n df_target = None\n if validate_path(path):\n if file_type.lower() == 'csv':\n # Read csv based file.\n df_target = pd.read_csv(path, sep=separator, skiprows=skip_rows, usecols=use_cols)\n elif file_type.lower() == 'excel':\n # Read Excel based file.\n if len((pd.ExcelFile(path)).sheet_names) > 1:\n df_target = pd.read_excel(path, skiprows=skip_rows, sheet_name=sheet_name)\n else:\n df_target = pd.read_excel(path, skiprows=skip_rows)\n\n logging.info(f'{description} records <{len(df_target.index)}> were read from <{path}>')\n return df_target", "title": "" }, { "docid": "f86e391a60b9f70c0e25ba7644829e4b", "score": "0.5727123", "text": "def import_data(self, file_name):\r\n excel_file = xlrd.open_workbook(file_name)\r\n if excel_file is None:\r\n return False\r\n\r\n \"\"\" TODO error, range checking\"\"\"\r\n summary_sheet = excel_file.sheet_by_name('Summary')\r\n self.title = summary_sheet.cell(6,3).value\r\n self.date = xlrd.xldate_as_tuple(summary_sheet.cell(8,3).value,excel_file.datemode)\r\n self.analyst = summary_sheet.cell(10,3)\r\n self.description = summary_sheet.cell(12,3)\r\n self.POD_Type = summary_sheet.cell(19,3)\r\n self.num_opps = summary_sheet.cell(20,3)\r\n self.num_cracks = summary_sheet.cell(21,3)\r\n self.num_false = summary_sheet.cell(22,3)\r\n self.is_data_loaded = True\r\n self.file_name = file_name\r\n\r\n data_sheet = excel_file.sheet_by_name('Sheet1')\r\n data_rows = data_sheet.nrows\r\n self.input_sizes = np.ndarray(data_rows, dtype = float)\r\n self.input_results = np.ndarray(data_rows, dtype = float)\r\n self.title_sizes = data_sheet.cell(0,self.col_sizes-1)\r\n self.title_results = data_sheet.cell(0,self.col_results-1)\r\n\r\n \"\"\" wacky indexing... xlrd uses zero indexing\r\n so that means input_sizes[0] is at row 2 in Excel address which is addressed as row 1 by xlrd\r\n and there are data_rows - 1 data points cause first row is title\"\"\"\r\n for i in range(0,data_rows-1,1):\r\n self.input_sizes[i] = data_sheet.cell(i+1, self.col_sizes-1).value\r\n self.input_results[i] = data_sheet.cell(i+1, self.col_results-1).value\r\n\r\n # Now we have data, might as well get the models\r\n self.load_models()\r\n return True", "title": "" }, { "docid": "545bb2da1e766528b73bc700e150c4e5", "score": "0.5720713", "text": "def process_file(self):\n self.load_keywords()\n if (Configs.input_ignore_header): self.startFrom = 1\n if (re.compile(r'(?i).xlsx?$').search(Configs.input_file) == None):\n self.process_file_csv()\n else:\n self.process_file_excel()", "title": "" }, { "docid": "9b306cc5e1826bfacdd88da3e11356ca", "score": "0.5718505", "text": "def get_book(path_to_workbook):\n endChars = path_to_workbook.split(\".\")[-1] \n if os.path.isfile(path_to_workbook):\n if endChars in [\"xlsx\", \"xls\"]:\n wbook = xlrd.open_workbook(path_to_workbook, encoding_override=\"UTF-8\")\n return wbook\n else:\n raise Exception(\"Expected a xls or xlsx file!\")", "title": "" }, { "docid": "85881bede640ce5460255add2ac1cb31", "score": "0.56909865", "text": "def import_xlsx_metadata(url, filename):\n global path \n\n df = pd.read_excel( url, engine=\"openpyxl\", usecols=column_check, header=0 )\n\n return df.to_json()", "title": "" }, { "docid": "3272326084f7060106ab90b8d40265f8", "score": "0.5684714", "text": "def read_triggerfile():", "title": "" }, { "docid": "f73929cb0c7bcec4f5c2e31d158128bd", "score": "0.56704795", "text": "def select_excel_file(self):\n self.driver.scroll(\"xls_directory\", direction=\"down\")\n self.driver.wait_for_object(\"xls_directory\", timeout=5)\n self.driver.click(\"xls_directory\", change_check={\"wait_obj\": \"xls_directory\", \"invisible\": True})\n self.driver.wait_for_object(\"excel_file\", timeout=5)\n self.driver.click(\"excel_file\", change_check={\"wait_obj\": \"excel_file\", \"invisible\": True})", "title": "" }, { "docid": "f8d96e1a1441e34a282ac2e266747b98", "score": "0.56563354", "text": "def get_ex_target(input_file, model):\n with open(input_file, newline='', encoding='utf-8-sig') as csvfile:\n reader = csv.DictReader(csvfile, dialect='excel')\n #exchanges_targets = []\n for row in reader:\n if row['production'] == 'yes':\n metab = row['metabolite_BiGG_ID']\n ex = model.reactions.get_by_id('EX_'+metab+'_e')\n #exchanges_targets.append(ex)\n return ex", "title": "" }, { "docid": "25d4b1eceff8c1f2b8a49ae9892810c1", "score": "0.56495184", "text": "def import_excel(pth_load_est, sheet_name='Sheet1'):\n\n\t# Read the raw excel worksheet\n\tdf_raw = pd.read_excel(\n\t\tio=pth_load_est,\t\t\t# Path to worksheet\n\t\tsheet_name=sheet_name,\t\t# Name of worksheet to import\n\t\t# skiprows=2,\t\t\t\t\t# Skip first 2 rows since they do not contain anything useful\n\t\theader=0\n\t)\n\n\t# Remove any special characters from the column names (i.e. new line characters)\n\tdf_raw.columns = df_raw.columns.str.replace('\\n', '')\n\t#df_raw.reset_index(drop=True, inplace=True)\n\tdf_raw.set_index(df_raw.columns[0], inplace=True)\n\treturn df_raw", "title": "" }, { "docid": "c6edc0bf54fd81db4766b8df3d82dcd2", "score": "0.564593", "text": "def get(self): \n rp = self._eLABJournalObject__api._request(\"/api/v1/experiments/sections/\"+urllib.parse.quote(str(self.id()))+\"/excel\", \"get\", {}, stream=True)\n wb = openpyxl.load_workbook(BytesIO(rp.content))\n return(wb)", "title": "" }, { "docid": "fa7abad2f7ecc626b8fe144d4b63d841", "score": "0.5632193", "text": "def import_excel(\n xlxs_file_nme, sheet_num ,col_nme = '' , sheet_idx=False, file_loc=True):\n if file_loc:\n file_path = os.path.abspath(xlxs_file_nme)\n xlsx = pd.ExcelFile(file_path)\n sheet1 = xlsx.parse(sheet_num)\n if sheet_idx:\n sheet1.index = sheet1[col_nme]\n new_df_beach_0 = sheet1.iloc[:]\n new_df_beach_0.fillna(value= 'NaN', inplace=True)\n return new_df_beach_0\n else:\n new_df_beach_0 = sheet1.iloc[:]\n new_df_beach_0.reset_index(inplace= True)\n new_df_beach_0.drop(['index'], axis=1, inplace = True)\n new_df_beach_0.fillna(value= 'NaN', inplace=True)\n return new_df_beach_0\n else:\n xlsx = pd.ExcelFile(xlxs_file_nme)\n sheet1 = xlsx.parse(sheet_num)\n if sheet_idx:\n sheet1.index = sheet1[col_nme]\n new_df_beach_0 = sheet1.iloc[:]\n new_df_beach_0.fillna(value= 'NaN', inplace=True)\n return new_df_beach_0\n else:\n new_df_beach_0 = sheet1.iloc[:]\n new_df_beach_0.reset_index(inplace= True)\n new_df_beach_0.drop(['index'], axis=1, inplace = True)\n new_df_beach_0.fillna(value= 'NaN', inplace=True)\n return new_df_beach_0", "title": "" }, { "docid": "608f130b5e580dd455e6a06fd9475958", "score": "0.55839115", "text": "def read_file(filename):\n skip_columns = ['Респуб-лика бўйича ўртача', 'Вилоят бўйича ўртача', 'Шаҳар бўйича ўртача']\n use_rows = [\n 'Мол гўшти', 'Сут, 1 литр', 'Тухум, 10 донаси',\n 'Картошка', 'Ўсимлик ёғи', 'Гуруч', 'Буғдой уни', 'Шакар'\n ]\n excel_file = ExcelFile(filename)\n df_list = []\n for sheet in excel_file.sheet_names:\n if sheet not in ['laroux', '1700']:\n df_sheet = read_sheet(excel_file, sheet, skip_columns, use_rows)\n if not df_sheet.empty:\n df_list.append(df_sheet)\n else:\n return DataFrame()\n\n df_merged = concat(df_list)\n df_merged = df_merged.astype('float64', copy=True, errors='raise')\n temp_index = df_merged.index.to_series().replace(\n {re.compile(r'(.*) \\*'): r'\\1', re.compile(r'(.*)\\*'): r'\\1'}, regex=True\n )\n df_merged.index = temp_index.replace(convert_dict)\n df_merged = df_merged[df_merged.index.notnull()]\n return df_merged", "title": "" }, { "docid": "ca759595cec5ac8d3d8d0bcea6147da1", "score": "0.558055", "text": "def read_file_data(self):\n\n if self.custom_reconciliation_filename:\n self.reconciliation_filename = self.custom_reconciliation_filename\n else:\n self.reconciliation_filename = self.reconciliation_filename_regex.format(\n self.billing_month.strftime('%Y%m'),\n self.invoice_number\n )\n\n reconciliation_file_path = os.path.join(self.reconciliation_files_root,\n self.reconciliation_filename +\n self.reconciliation_filename_extension)\n\n \"\"\" Check if file is exists \"\"\"\n if not os.path.exists(reconciliation_file_path):\n raise FileNotFoundError('Reconciliation file not found in : %s' % reconciliation_file_path)\n\n self.reconciliation_data = pandas.read_excel(reconciliation_file_path,\n sheetname=self.reconciliation_filename,\n header=0).fillna('')\n\n def convert_dtype(column_name, dtype):\n \"\"\" Data type converter \"\"\"\n if column_name not in self.reconciliation_data.columns:\n return False\n if dtype == 'datetime':\n self.reconciliation_data[column_name] = pandas.to_datetime(self.reconciliation_data[column_name])\n\n \"\"\" Refactoring data types \"\"\"\n dtypes = {\n 'ChargeStartDate': 'datetime',\n 'ChargeEndDate': 'datetime'\n }\n\n for column, d_type in dtypes.items():\n convert_dtype(column_name=column, dtype=d_type)\n\n return True", "title": "" }, { "docid": "af54bd3b983aa53026bf73dbebcab7ff", "score": "0.55750084", "text": "def read_excel(filename, sheet=None):\n\n from openpyxl import load_workbook # type: ignore\n wb = load_workbook(filename)\n if sheet is None:\n ws = wb.active\n else:\n ws = wb.get_sheet_by_name(sheet)\n table = [[cell.value or \"\" for cell in row] for row in ws.rows]\n return from_table(table)", "title": "" }, { "docid": "23d7cc11ec7f0bd067421bd8d1ed5406", "score": "0.55582947", "text": "def import_sample(source_excel_path, dump_file_path, hw_models_2_id = None, delete_only_healthy_days = True):\n\n decompo = re.search(r\"([\\S]*)(sample[0-9_]*)([\\S]*)\",dump_file_path).groups()\n prefix_path = decompo[0] \n name = decompo[1]\n extension = decompo[2]\n\n df = None\n dump_file_path = prefix_path+name + ('_sick_only' if delete_only_healthy_days else '_full') + '.pk'\n\n if(os.path.isfile(dump_file_path)):\n print('Retrieving from '+dump_file_path)\n df = pd.read_pickle(dump_file_path)\n else:\n if(os.path.isfile(source_excel_path)):\n # read it from the source\n print('Reading '+source_excel_path)\n df = pd.read_excel(source_excel_path)\n \n print('Performing some transformation')\n # we lower case the column names\n df.columns = map(str.lower, df.columns)\n original_cols = list(df.columns)\n\n # transforming dates to datetime and adding week day\n df['day_0'] = pd.to_datetime(df['day_0'],dayfirst = True)\n df['weekday'] = df['day_0'].apply(lambda x : x.weekday())\n\n # converting the hardware model to an ID\n translator = hw_models_2_id if hw_models_2_id else { 'CONNECT BOX CH7465LG COMPAL': 0,\n 'UBEE EVM3206 (ED 3.0) - CPE': 1,\n 'UBEE EVM3236 (ED 3.0) - CPE': 2,\n 'WLAN MODEM EVW3226 - CPE': 3,\n 'WLAN MODEM TC7200 - CPE': 4,\n 'WLAN MODEM TC7200 V2 - CPE': 5,\n 'WLAN MODEM TWG870 - CPE': 6}\n df['hardware_model'] = df['hardware_model'].map(translator)\n\n # transforming categories\n df['cmts'] = df['cmts'].astype('category')\n df['service_group'] = df['service_group'].astype('category')\n df['milestone_name'] = df['milestone_name'].astype('category')\n df['weekday'] = df['weekday'].astype('category') \n\n # we reorganise the columns\n new_cols = ['weekday'] + original_cols\n df = df[new_cols]\n\n if(delete_only_healthy_days):\n # we only keep the days during which there are at least 1 sick CPE\n tmp = df[['day_0']]\n tmp['sick'] = convert_to_binary_labels(df['milestone_name'])\n sick_per_day = tmp[['day_0','sick']].groupby(['day_0']).sum()\n no_entirely_healthy_day = sick_per_day[sick_per_day['sick'] > 0].index\n df = df[df['day_0'].isin(no_entirely_healthy_day)]\n\n # we serialize it\n\n print('Saving to ' + dump_file_path)\n df.to_pickle(dump_file_path)\n\n else:\n print('The source file cannot be found : '+source_excel_path)\n return df\n\n n_total,dimensions = df.shape\n n_sick = df['milestone_name'].count()\n n_healthy = n_total-n_sick\n print('The sample is composed of : {} vectors of dimension {}\\n\\tn_sick\\t\\t= {:>6}\\n\\tn_healthy\\t= {:>6}'.format(n_total,dimensions,n_sick,n_healthy))\n return df", "title": "" }, { "docid": "0eaa51d809a8dbc3fc7938c5fb208ab2", "score": "0.5550547", "text": "def read_network_config_excel(self):\n # Testing in progress, should replace the CSV Method.\n try:\n all_switches = pd.read_excel(r\"data/stadler/BU_3794636.xlsx\", sheet_name=\"BMU-A\")\n switch_information = all_switches[\n all_switches[\"Position\"].str.contains(self.switch, na=False)\n ]\n with pd.option_context('display.max_rows', None, 'display.max_columns',\n None): # more options can be specified also\n print(switch_information)\n except FileNotFoundError:\n print(f\"The selected file {self.config} could not be found.\")\n except ValueError:\n print(f\"There is an empty value that can't be processed.\")", "title": "" }, { "docid": "0adb8e29557bc52cc989ecc3b16b3fdf", "score": "0.55332494", "text": "def process_excel(file_path: str):\n if not os.path.exists(file_path):\n raise Exception(f\"no file found at location {file_path}\")\n excel_file = pd.ExcelFile(io=file_path)\n master_df = pd.DataFrame()\n json = []\n for sheet_name in excel_file.sheet_names:\n json.append(process_data(sheet_name=sheet_name, df=excel_file.parse(sheet_name)))\n return (json)", "title": "" }, { "docid": "a60ac646f7d94ac64b93d8bb36ad9a3c", "score": "0.5527913", "text": "def load_xlsx(file_path):\n\n # Data on excel to DataFrame\n warnings.simplefilter(\"ignore\")\n wb_obj = openpyxl.load_workbook(file_path)\n warnings.simplefilter(\"default\")\n sheet = wb_obj.active\n data_dict = {i: c[0] for i, c in enumerate(zip(sheet.values))}\n data = pd.DataFrame.from_dict(data_dict, orient='index')\n\n # Find the entry of the first entry\n ix_of_first = index_of_first_spend(data)\n # Find the card num which generated the movements\n lst_4_digits = try_finding_card_num(data)\n \n # Select data from the first entry onwards and only the first 3 columns.\n # The first three columns are: `date`, `description` and `amount` respectively.\n data = data.iloc[\n ix_of_first:,:3\n ].copy().reset_index(drop=True)\n \n # Date column as datetime\n data.iloc[:,0] = data.iloc[:,0].apply(\n lambda x: parse_date(x)[1]\n )\n \n # Amount column as float\n data.iloc[:,2] = data.iloc[:,2].apply(\n lambda x: parse_amount(x)\n )\n\n # Drop nans\n data.dropna(inplace=True)\n \n return data, lst_4_digits", "title": "" }, { "docid": "507f1b02e481de8d4f7647e01069aedf", "score": "0.5527462", "text": "def load_xls(file_path):\n \n # Data on excel to DataFrame\n file = pd.read_html(file_path)\n data = file[1]\n \n # Find the entry of the first entry\n ix_of_first = index_of_first_spend(data)\n # Find the card num which generated the movements\n lst_4_digits = try_finding_card_num(data)\n \n # Select data from the first entry onwards and only the [0,2,3] columns.\n # The [0,2,3] columns are: `date`, `description` and `amount` respectively.\n data = data.iloc[\n ix_of_first:,[0,2,3]\n ].copy().reset_index(drop=True)\n \n # Date columns as datetime\n data.iloc[:,0] = data.iloc[:,0].apply(\n lambda x: parse_date(x)[1]\n )\n \n # Amount column as float\n data.iloc[:,2] = data.iloc[:,2].apply(\n lambda x: parse_amount(x)\n )\n\n # Drop nans\n data.dropna(inplace=True)\n\n data.rename(columns={2:1,3:2}, inplace=True)\n \n return data, lst_4_digits", "title": "" }, { "docid": "52ba23963f06f560e5e70cdba55fc22e", "score": "0.55263925", "text": "def data_from_xl_sheet(path, file_name, sheet_name, unmerge = True, **kwargs):\n\n full_path = os.path.join(path, file_name)\n\n try:\n data = __unmerge_xl(full_path, sheet_name) if unmerge \\\n else pe.get_array(file_name = full_path, sheet_name = sheet_name)\n except:\n return sheet_name + ' - Not Found'\n\n data = clean.clean_data(data, trim_strings = True)\n\n return data", "title": "" }, { "docid": "351b90aeec3b3cd60d50ed1ee106fbef", "score": "0.550862", "text": "def excel_read(ctx):\n\n session = Session()\n session.query(DenysOrders).delete()\n session.query(DenysClients).delete()\n\n loc = '../task/ПримерМСКонтрагентыSQLAzure2.xlsx'\n\n wb = xlrd.open_workbook(loc)\n orders_sheet = wb.sheet_by_index(0)\n counterparty_sheet = wb.sheet_by_index(1)\n\n clients_list = []\n\n for i in range(1, counterparty_sheet.nrows):\n values = counterparty_sheet.row_values(i)\n clients_list.append(\n DenysClients(\n id=values[0],\n name=values[1],\n )\n )\n session.add_all(clients_list)\n session.commit()\n\n orders_list = []\n for i in range(1, orders_sheet.nrows):\n values = orders_sheet.row_values(i)\n orders_list.append(\n DenysOrders(\n id=values[0],\n name=values[1],\n description=values[2],\n moment=values[3],\n sum=values[4],\n counterparty_id=values[5],\n )\n )\n session.add_all(orders_list)\n session.commit()", "title": "" }, { "docid": "6684d2a9879b383efbb0ba41449b32ed", "score": "0.55018765", "text": "def excel_dict(xlsx_url):\r\n download_page = request(xlsx_url).text\r\n resourceId_pattern = '\"id\":(.*?),\"linkType\"'\r\n resource = re.search(resourceId_pattern, download_page)\r\n resourceDownloadPermeters = 'data: \"(.*?)&.*&taskId=(.*?)&iscomplete'\r\n postPermeters = re.search(resourceDownloadPermeters, download_page)\r\n # print('正在下载excel文件...')\r\n download_url = '/checkResourceDownload.do' #?{}'.format(postPermeters[1])\r\n data = {\r\n 'MIME Type': 'application/x-www-form-urlencoded',\r\n postPermeters[1]: '',\r\n 'resourceId': resource.group(1),\r\n 'downloadFrom': 1,\r\n 'isMobile': 'false',\r\n 'taskId': postPermeters[2],\r\n 'iscomplete': 'false',\r\n 'history': 'false'\r\n }\r\n xls_resource = request(download_url, data) # content字节码, stream=True\r\n # print(xls_resource.json())\r\n if 'status' in xls_resource.json() and xls_resource.json()['status'] == 'indirect': # 'status' in xls_resource.json and\r\n download_url = '/filePreviewServlet?indirect=true&resourceId={}'.format(resource.group(1))\r\n xls_resource = request(download_url)\r\n # print('资源下载成功,正在解压...')\r\n import rarfile, zipfile\r\n try:\r\n zf = zipfile.ZipFile(io.BytesIO(xls_resource.content))\r\n archive = zf\r\n except zipfile.error as ze:\r\n try:\r\n rf = rarfile.RarFile(io.BytesIO(xls_resource.content))\r\n archive = rf\r\n except rarfile.error as rfe:\r\n return None\r\n for file in archive.infolist():\r\n if file.filename.endswith('.xls'):\r\n with archive.open(file) as fd:\r\n import xlrd\r\n xls = xlrd.open_workbook(file_contents=fd.read())\r\n return xls2dict(xls)", "title": "" }, { "docid": "4e8692675fa9d0a72a6e5e710ed613f2", "score": "0.550016", "text": "def panda_read_excel(path, sheet='Sheet1', **kwargs):\n try:\n import pandas as pd\n except ImportError:\n raise ImportError('Pandas is required for Excel input.')\n\n indf = pd.read_excel(path, sheet, **kwargs)\n return panda_process(indf)", "title": "" }, { "docid": "fba9d122975185b1870c72266fd9fe6b", "score": "0.54920435", "text": "def import_dataframe_excel(obj, filename, column, sheet_name, index_col, data_type,\n create_new, overwrite,\n network_id, scenario_id, attribute_id, user_id):\n\n client = get_logged_in_client(obj, user_id=user_id)\n\n\n if filename.endswith('csv'):\n dataframe = pandas.read_csv(filename, index_col=index_col, parse_dates=True)\n elif filename.endswith('xlsx') or filename.endswith('xls'):\n dataframe = pandas.read_excel(filename, sheet_name=sheet_name, index_col=index_col, parse_dates=True)\n if isinstance(dataframe, dict):\n dataframe = list(dataframe.values())[0]\n else:\n raise Exception(\"Unrecognised file extention. Must be csv or xlsx.\")\n\n data.import_dataframe(client, dataframe, network_id, scenario_id, attribute_id, column,\n create_new=create_new, data_type=data_type, overwrite=overwrite)", "title": "" }, { "docid": "71178cf8f50a6f0d87f0d0fab1fdc832", "score": "0.54861134", "text": "def is_excel_file_exist():\n return os.path.exists(DATA_EXCEL_FILE)", "title": "" }, { "docid": "3a23b2d89a2bca70be3d411f2bc61cb9", "score": "0.5472113", "text": "def __readSheet(self, name, rowstart=0):\n workbook = xlrd.open_workbook(filename=self.__path, on_demand=True)\n sheet = workbook.sheet_by_name(name)\n for rownum in range(rowstart, sheet.nrows):\n vals=[]\n for cell in sheet.row(rownum):\n if cell.ctype in [0,6]: #blank string\n vals.append('')\n elif cell.ctype == 2: #number\n try: \n val = float(cell.value)\n vals.append(str(int(val)))\n except: vals.append(str(cell.ctype))\n else: #text,date,bool,error\n vals.append(str(cell.value))\n yield vals", "title": "" }, { "docid": "ac63824d6b4739b7874b3d7c1224b301", "score": "0.5468466", "text": "def xlsreader(filename,worksheet=0):\n\n book=xlrd.open_workbook(filename)\n try:\n worksheet=int(worksheet)\n except:\n pass\n if isinstance(worksheet,int):\n try:\n sheet=book.sheet_by_index(worksheet)\n except IndexError:\n die('%s: worksheet %d not found in %s.'%(progname,worksheet,filename))\n else:\n try:\n sheet=book.sheet_by_name(str(worksheet))\n except xlrd.biffh.XLRDError:\n die('%s: worksheet %r not found in %s.'%(progname,str(worksheet),filename))\n r=0\n while r<sheet.nrows:\n yield [x.value for x in sheet.row(r)]\n r+=1", "title": "" }, { "docid": "501c014da0d6604ca78743c9eec8157f", "score": "0.5465446", "text": "def XlsxGetCellValueByName(file, sheet, index):", "title": "" }, { "docid": "39690f9293ed67e3cb38e398f131a74f", "score": "0.5462054", "text": "def ingest_data(self):\n workbook = get_workbook(Path(self.source))\n sheets = workbook.sheets()\n table = [(index, sheet.name) for index, sheet in enumerate(sheets)]\n typer.echo(\n tabulate(table, headers=[\"Sheet ID\", \"Sheet Name\"], tablefmt=\"pretty\")\n )\n sheet_ids = typer.prompt(text=\"Enter the Sheet IDs\", default=\"all\")\n if sheet_ids == \"all\":\n sheet_ids = range(len(sheets))\n else:\n sheet_ids = [int(sheet_id.strip()) for sheet_id in sheet_ids.split(\",\")]\n invalid_sheet_ids = [\n str(sheet_id)\n for sheet_id in sheet_ids\n if sheet_id not in range(len(sheets))\n ]\n if invalid_sheet_ids:\n typer.echo(\n typer.style(\n f\"[INFO] Skipping invalid sheet IDs: {', '.join(invalid_sheet_ids)}\",\n fg=typer.colors.YELLOW,\n )\n )\n\n for index, sheet in enumerate(sheets):\n if index not in sheet_ids:\n continue\n header_row = find_header_row(\n sheet=sheet, header_start=self.config[\"header_start\"]\n )\n if not header_row:\n continue\n header = sheet.row_values(header_row)\n header = cleaned_value(\n value=header, replace_dict=self.config[\"replace_dict\"][\"headers\"]\n )\n header_dict = {idx: value for idx, value in enumerate(header)}\n for row_number in range(header_row + 1, sheet.nrows):\n row = sheet.row_values(row_number)\n if ignore_row(row):\n continue\n else:\n row_object = {\n header_dict[key]: value for key, value in enumerate(row)\n }\n update_dict = {}\n for key, value in row_object.items():\n if \"/\" in key:\n keys = key.split(\"/\")\n values = value.split(\"/\")\n for idx, k in enumerate(keys):\n if len(values) > idx:\n update_dict[k] = values[idx]\n else:\n update_dict[k] = None\n row_object = {**row_object, **update_dict}\n if row_object:\n row_object = {\n key: cleaned_value(\n value,\n replace_dict=self.config[\"replace_dict\"][\"values\"],\n )\n for key, value in row_object.items()\n }\n self.data.append(row_object)", "title": "" }, { "docid": "befdc2f87eee61adc682d66bd32bd617", "score": "0.54558825", "text": "def get_data_from_excel(fileFullPath, sheetName='assets'):\n fieldMap = format_field_map()\n dataSet = []\n try:\n wb = load_workbook(filename=fileFullPath)\n print('Available sheets are %s' % ','.join(wb.get_sheet_names()))\n sheet = wb[sheetName]\n head = map(lambda x: x.value, sheet.rows[0])\n for row in sheet.rows[1:]:\n data_row = {}\n for i, cell in enumerate(row):\n if head[i] == 'IMEI':\n data_row['imei'] = ''\n if head[i] == u'版本号':\n data_row['version'] = ''\n if cell.value:\n if fieldMap[head[i]] == 'asset_id':\n data_row[fieldMap[head[i]]] = str(cell.value).strip().upper()\n else:\n data_row[fieldMap[head[i]]] = str(cell.value).strip()\n else:\n data_row[fieldMap[head[i]]] = ''\n data_row['store_state'] = '库存'\n dataSet.append(data_row)\n return dataSet\n except Exception, _ex:\n print(\"error occured while read data from excel : %s\" % str(_ex))\n return dataSet", "title": "" }, { "docid": "8f9fa76d8ecf1537131ec8a3d4c52020", "score": "0.5454214", "text": "def leer_excel(filename):\n if not os.path.isfile(filename):\n raise FileNotFoundError\n if not filename.endswith('.xlsx'):\n raise ValueError\n data = pd.read_excel(filename)\n data = data.sort_values('timestamp', ascending=True)\n data.set_index('timestamp', inplace=True)\n return data", "title": "" }, { "docid": "ef917a2fc16a460fb46f3eebd9244cfe", "score": "0.54490995", "text": "def __init__(self):\n self.df = pd.read_excel('subs.xlsx')", "title": "" }, { "docid": "3d731cf01554f0e350931e56c172e0b9", "score": "0.54470813", "text": "def parse_spreadsheet(file_name):\n ss = Spreadsheet()\n with open(file_name, 'rb') as f:\n result = ss.parse_txt_file(f)\n\n return(result)", "title": "" }, { "docid": "fdd08453f86a20ef070220500fa254e5", "score": "0.54183865", "text": "def read_sheet(excel_file, sheet_name, skip_columns, use_rows):\n df = excel_file.parse(\n sheet_name=sheet_name, skiprows=4,\n skipfooter=2, header=None)\n if df.at[0, 0] is nan:\n if df.at[1, 0] == 'Маҳсулот номи':\n skip_rows = 5\n else:\n skip_rows = 3\n df = excel_file.parse(\n sheet_name=sheet_name, skiprows=skip_rows,\n skipfooter=2, header=None)\n\n df = df.replace('Туманлар', '')\n df.iloc[0:2] = df.iloc[0:2].fillna('')\n df.columns = df.iloc[0:2].apply(\n lambda column: ''.join([row for row in column]), axis=0)\n df = df.drop(skip_columns, axis=1, errors='ignore')\n df = df.iloc[2:]\n try:\n df.set_index('Маҳсулот номи', inplace=True)\n df = df.loc[use_rows, :]\n df = df.transpose()\n return df\n except Exception as excp:\n create_messagebox(f'{sheet_name}: ' + str(excp))\n return DataFrame()", "title": "" }, { "docid": "f40b2baf9385565c89154a9e02184620", "score": "0.5411327", "text": "def load_sheet(filename):\n res = {}\n\n oh = open(filename, \"rU\")\n for line in oh:\n ll = line.strip().split(\",\")\n if ll[0] != \"FCID\":\n sampleID = ll[2]\n index = ll[4].replace(\"-\", \"\").upper()\n res[sampleID] = index\n config.log.info(\"Loaded sample sheet '%s'\" % filename)\n return(res)", "title": "" }, { "docid": "d07d4275fad592fd0498e1aaa1459f11", "score": "0.5410949", "text": "def read_excel(file_path, sheet_name, columns, header_row=0, restrictions=None, end_row=None):\n workbook = xlrd.open_workbook(file_path)\n worksheet = workbook.sheet_by_name(sheet_name)\n\n data = []\n keys = [v.value for v in worksheet.row(header_row)]\n end_row = worksheet.nrows if not end_row else end_row\n for row_number in range(end_row):\n if row_number <= header_row:\n continue\n row_data = {}\n for col_number, cell in enumerate(worksheet.row(row_number)):\n if keys[col_number] in columns:\n cell_type = cell.ctype\n if cell_type == 3:\n cell_value = xlrd.xldate.xldate_as_datetime(cell.value, workbook.datemode\n ).strftime('%d-%b-%Y')\n else:\n cell_value = cell.value\n row_data[keys[col_number]] = cell_value\n if not restrictions:\n data.append(row_data)\n else:\n is_valid = True\n for restriction_key, restriction_values in restrictions.items():\n if row_data[restriction_key].strip().upper() not in [restriction_value.strip().upper()\n for restriction_value in restriction_values]:\n is_valid = False\n if is_valid:\n data.append(row_data)\n\n return data", "title": "" }, { "docid": "da726ebbd4e3f332b45a43fff0cd91a7", "score": "0.5394223", "text": "def XlsxGetCellValue(file, sheet, row, column):", "title": "" }, { "docid": "a664915795afa421b8d5adee6915cd9c", "score": "0.53936356", "text": "def open_file(path):\r\n book = xlrd.open_workbook(path)\r\n\r\n sheet = book.sheet_by_index(0)\r\n\r\n print sheet.name\r\n print sheet.ncols\r\n print sheet.nrows\r\n\r\n dataList = []\r\n\r\n for row_index in range(sheet.nrows):\r\n for col_index in range(sheet.ncols):\r\n \r\n #print col_index\r\n #print row_index\r\n print sheet.cell(row_index,col_index).value\r\n if (col_index == 0):\r\n value1 = sheet.cell(row_index,col_index).value\r\n dataList.append(value1)\r\n if (col_index == 1):\r\n value2 = sheet.cell(row_index,col_index).value\r\n dataList.append(value2)\r\n print \"value1*value2 : \" + str((value1*value2)/36000)\r\n dataList.append(str((value1*value2)/36000))\r\n \r\n #---------------------------------------------------------------------- \r\n\r\n book = Workbook()\r\n sheet1 = book.add_sheet('Sheet 1')\r\n\r\n for index, elem in enumerate(dataList):\r\n print(index, elem)\r\n\r\n if (index%3 == 1):\r\n sheet1.write(index/3,1,elem)\r\n if (index%3 == 2):\r\n sheet1.write(index/3,2,elem)\r\n if (index%3 == 0):\r\n sheet1.write(index/3,0,elem)\r\n\r\n\r\n sheet1.col(0).width = 2000\r\n\r\n book.save('simple.xls')\r\n book.save(TemporaryFile())", "title": "" }, { "docid": "b58dd9175538fca35ea3da03077e9ec9", "score": "0.5388667", "text": "def read_data(self, file_path):", "title": "" }, { "docid": "7ca8e03046905e20021123c6048044d1", "score": "0.53885823", "text": "def read_detail(\n excel_file,\n expt_prefix=\"Region\",\n drop_injection=True,\n label_rows=True,\n drop_empty_columns=True,\n get_instrument=False,\n):\n\n expt_sheet_name = \"{:s} Detail\".format(expt_prefix)\n\n # header check for # channels\n ch_check_ = pd.read_excel(\n excel_file,\n sheet_name=expt_sheet_name,\n skiprows=18,\n header=None,\n nrows=1,\n )\n\n ch_check_.dropna(axis=\"columns\", inplace=True)\n\n # the channel names should be on the first row\n ch_ = ch_check_.iloc[0, :]\n\n # ch_ now contains (column index, channel name) entry\n if ch_.empty:\n # earlier version (SHG-only) does not have channel headers\n # and doesnt contain an experiment \"Raw\" sheet\n # letter ranges (Excel style) for data columns in v3\n data_cols_id = \"N:R\"\n col_interest = [\"Clock Time\", \"Elapsed Time (s)\", \"Median Counts\"]\n ch_names = {\"Median Counts\": \"P-SHG\"}\n else:\n # for 4-channel versions\n data_cols_id = \"O:S,U:Y,AA:AE,AG:AK\"\n col_interest = [\n \"Clock Time\",\n \"Elapsed Time (s)\",\n \"Median\",\n \"Median.1\",\n \"Median.2\",\n \"Median.3\",\n ]\n # create a lookup-table for replacing column names\n ch_names = {}\n for i, ch in enumerate(ch_.values):\n if i == 0:\n ch_names[\"Median\"] = ch\n if i > 0:\n ch_names[\"Median.{:d}\".format(i)] = ch\n\n # for plate metadata, look for basic column headers\n metadata = pd.read_excel(\n excel_file,\n sheet_name=expt_sheet_name,\n skiprows=19,\n usecols=check_col_hdr,\n )\n\n # different sheet layout of the outputs will use different 'usecols'\n # only pull-out the columns of interest for the data\n median_data = pd.read_excel(\n excel_file,\n sheet_name=expt_sheet_name,\n skiprows=19,\n usecols=data_cols_id,\n ).loc[:, col_interest]\n\n # rename the median data column to its channel name\n median_data.rename(columns=ch_names, inplace=True)\n\n # merge the metadata with median data\n merged_data = pd.concat([metadata, median_data], axis=1)\n\n if label_rows:\n # label each row for broadcasting operations \"A1\", \"A2\", \"A3\", etc.\n choose_cols = [\"Read Row\", \"Read Column\"]\n well_labels = [\n \"{:s}{:d}\".format(*[row[h] for h in choose_cols])\n for index, row in merged_data[choose_cols].iterrows()\n ]\n\n # assign well position as row labels (e.g. \"A1\", \"A2\", ...)\n merged_data.set_index(pd.Series(well_labels), inplace=True)\n\n try:\n # fill in missing units\n valid_units = merged_data[merged_data[\"Read #\"] == \"Inj\"].loc[\n :, [\"Read Units\", \"Source Units\"]\n ]\n\n merged_data[\"Read Units\"].fillna(\n valid_units[\"Read Units\"], axis=\"index\", inplace=True\n )\n\n merged_data[\"Source Units\"].fillna(\n valid_units[\"Source Units\"], axis=\"index\", inplace=True\n )\n except KeyError:\n pass\n # print(\"File is from the 'old' style Result file. Skipping Read/Source Units\")\n\n if drop_empty_columns:\n merged_data.dropna(axis=\"columns\", how=\"all\", inplace=True)\n\n # after the missing units have been filled from 'Inj' row\n # we can remove this row since it contains no data\n if drop_injection:\n merged_data = merged_data[merged_data[\"Read #\"] != \"Inj\"]\n\n merged_data[\"Well coordinates\"] = merged_data[\"Read Row\"].astype(\n str\n ) + merged_data[\"Read Column\"].astype(str)\n\n if get_instrument:\n wb = load_workbook(excel_file)\n ws = wb[expt_sheet_name]\n instrumentSN = ws.cell(row=7, column=5).value\n return merged_data, instrumentSN\n\n else:\n\n return merged_data", "title": "" }, { "docid": "b31e2683075260888daf248f1eaa9d47", "score": "0.53835183", "text": "def getDataFramefromExcel(path):\n database = pd.read_csv(path)\n\n return database", "title": "" }, { "docid": "e5dd9a48958bec5fb8bd28021b0e5659", "score": "0.53765476", "text": "def load_workbook(wkbkName, dir):\r\n\twkbk = xlrd.open_workbook(dir + wkbkName + \".xls\")\r\n\twksht = wkbk.sheet_by_index(0)\r\n\treturn wksht", "title": "" }, { "docid": "846ccb577eea79da568e19ba3717960c", "score": "0.53722614", "text": "def importSingleXL(dataPath, fileName):\n fullFile_i = dataPath + '/' + fileName\n data_i = pandas.read_excel(fullFile_i, sheetname=\"Scattering Sij(f)\")\n freqStep = data_i.values[1,0] - data_i.values[2,0]\n\n f = data_i.values[:,0]\n\n s11_real = data_i.values[:,1]\n s11_imag = data_i.values[:,2]\n\n s11_mag, s11_ang = dp_ml.realimag_to_magphase(s11_real, s11_imag)\n # unwrap phase:\n s11_ang = np.unwrap(s11_ang)\n #s11_dang[i,:] = np.diff(s11_ang[i,:])*freqStep\n s11_dang = np.diff(s11_ang)\n\n s21_real = data_i.values[:,3]\n s21_imag = data_i.values[:,4]\n\n s21_mag, s21_ang = dp_ml.realimag_to_magphase(s21_real, s21_imag)\n s21_ang = np.unwrap(s21_ang)\n #s21_dang = np.diff(s21_ang)*freqStep\n s21_dang = np.diff(s21_ang)\n\n\n s12_real = data_i.values[:,5]\n s12_imag = data_i.values[:,6]\n\n s12_mag, s12_ang = dp_ml.realimag_to_magphase(s12_real, s12_imag)\n # unwrap phase:\n s12_ang = np.unwrap(s12_ang)\n #s12_dang[i,:] = np.diff(s11_ang[i,:])*freqStep\n s12_dang = np.diff(s12_ang)\n\n s22_real = data_i.values[:,7]\n s22_imag = data_i.values[:,8]\n\n s22_mag, s22_ang = dp_ml.realimag_to_magphase(s22_real, s22_imag)\n s22_ang = np.unwrap(s22_ang)\n #s12_dang = np.diff(s12_ang)*freqStep\n s22_dang = np.diff(s22_ang)\n\n S_f = np.zeros((8, max(f.shape)))\n #S_f = np.vstack((s11_mag[0,:-1], s11_dang[0,:], s21_mag[0,:-1], s21_dang[0,:], s12_mag[0,:-1], s12_dang[0,:], s22_mag[0,:-1], s22_dang[0,:]))\n S_f = np.vstack((s11_mag[0,:-1], s11_ang[0,:-1], s21_mag[0,:-1], s21_ang[0,:-1], s12_mag[0,:-1], s12_ang[0,:-1], s22_mag[0,:-1], s22_ang[0,:-1]))\n\n s11_comp = s11_real[:-1] + s11_imag[:-1]*1j\n s21_comp = s21_real[:-1] + s21_imag[:-1]*1j\n s12_comp = s12_real[:-1] + s12_imag[:-1]*1j\n s22_comp = s22_real[:-1] + s22_imag[:-1]*1j\n\n S_comp = np.vstack((s11_comp, s21_comp, s12_comp, s22_comp))\n\n return f, S_f, S_comp", "title": "" }, { "docid": "8ab844b029a1c34e2ef02126ba8878b9", "score": "0.5361748", "text": "def read_spreadsheet(self):\n print \"Reading Google spreadsheet. Please wait...\"\n self.cells = self.spreadsheet.get_cells()", "title": "" }, { "docid": "45c5d468bdcde0af6367dae7a44bdb99", "score": "0.5360966", "text": "def load_neurodevelopmental(path):\n \n neurodev = pandas.read_excel(path, sheetname=\"neurodevelopmental.dominant_lof\")\n \n return neurodev", "title": "" }, { "docid": "489cc0d5f087386e0b99264fe832f7f8", "score": "0.5309525", "text": "def read_file(filenamme):\n _, ext = get_filename_ext(filenamme)\n if ext == \".csv\":\n return pd.read_csv\n else:\n return pd.read_excel", "title": "" }, { "docid": "138c359d41431f3022c254846e74936f", "score": "0.5305895", "text": "def import_data():\n # Import the csv file\n df_raw_k = pd.read_csv(\"../data/01_raw/DataAnalyst.csv\")\n # Import the excel file and skip the 3 first empty rows\n df_raw_b = pd.read_excel(\"../data/01_raw/2020_Data_Professional_Salary_Survey_Responses.xlsx\",\n skiprows=3)\n return df_raw_k, df_raw_b", "title": "" }, { "docid": "ddad008ad174eb6889a819ff61d0afe9", "score": "0.53056026", "text": "def readFile(target):\r\n assert os.path.exists(target)\r\n with open(target) as fin:\r\n lines = fin.read()\r\n lines = lines.splitlines()\r\n return lines", "title": "" }, { "docid": "3c6fddea0e6ae6cd0303cb71dca6e091", "score": "0.5305108", "text": "def read_site_master(xl_file_path, sheet_name):\n xl_book = xlrd.open_workbook(xl_file_path)\n xl_sheet = xl_book.sheet_by_name(sheet_name)\n last_row = int(xl_sheet.nrows)\n # find the header and first data rows\n for i in range(last_row):\n if xl_sheet.cell(i,0).value == \"Site\":\n header_row = i\n first_data_row = header_row + 1\n break\n # read the header row\n header_row_values = xl_sheet.row_values(header_row)\n # read the site data from the master Excel spreadsheet\n site_info = OrderedDict()\n for n in range(first_data_row,last_row):\n site_name = xl_sheet.cell(n,0).value\n site_name = site_name.replace(\" \",\"\")\n site_info[site_name] = OrderedDict()\n for item in header_row_values[1:]:\n i = header_row_values.index(item)\n site_info[site_name][item] = xl_sheet.cell(n,i).value\n return site_info", "title": "" }, { "docid": "44175e68eba0abab32e0b96edf888374", "score": "0.52887267", "text": "def extract_excel(file_):\n # doing this locally because it might be slow\n import pandas\n\n first_sheet = pandas.read_excel(file_, sheet_name=0)\n html = first_sheet._repr_html_() # pylint: disable=protected-access\n return html, {}", "title": "" }, { "docid": "9f1ee37b62cb5fa7ccdacb3b1f9749a0", "score": "0.52517444", "text": "def _source_cell(self):\n return self._uno_sheet.getCellByPosition(*self.position)", "title": "" }, { "docid": "7ee02954b16b7d830e4823819181ce83", "score": "0.5245815", "text": "def read_project_file(self, file_path):\n self.library = pd.read_excel(file_path)\n project_names = self.library['Project Name']\n video_paths = self.library['Video Path']\n begaze_paths = self.library['BeGaze Path']\n return project_names, video_paths, begaze_paths", "title": "" }, { "docid": "3f8c00f515f42e537994b1248d98b9eb", "score": "0.5223265", "text": "def __read_file(self, zipped_file):\n if self.__verbose:\n print(\"Started reading zipped gene expression file\")\n self.__gene_exp_matrix_df = pd.read_csv(zipped_file, sep=\"\\t\", header=0, index_col=0)\n if self.__verbose:\n print(\"Finished reading zipped gene expression file\")", "title": "" }, { "docid": "1033dfbf8ce0c51df5ab40f7a5b8158a", "score": "0.52211434", "text": "def read_excel(path_to_workbook, sheetName=None, sheetIndex=0, showInfo=False):\n \n book = get_book(path_to_workbook)\n sheet = get_sheet(book, sheetName, sheetIndex)\n \n table_dict, shape, columnNames = dictTable(sheet)\n if showInfo:\n print(\"Rows/Cols: \", shape, \"Columns:\\n\", columnNames)\n \n return table_dict", "title": "" }, { "docid": "ab71630438e50a521bef95fcaeaa5347", "score": "0.5197437", "text": "def read_set_from_excel(file_name, sheet_name, start_loc, end_loc, n_set):\n start_loc = cell_loc_conversion(start_loc)\n end_loc = cell_loc_conversion(end_loc)\n \n for sheet in xlrd.open_workbook(file_name).sheets():\n if sheet.name == sheet_name:\n set_list = [\n sheet.cell(row, start_loc[1]).value\n for row in range(start_loc[0] + 1, end_loc[0] + 1)\n ]\n\n return set_list", "title": "" }, { "docid": "020e7548b753956e866296a3cc75c554", "score": "0.5196826", "text": "def import_xls_to_df(filename, name_of_sheet):\n # Set up logging\n logger = logging.getLogger(__name__)\n logger.info('Importing data...')\n\n return pd.read_excel(filename,sheetname=name_of_sheet)", "title": "" }, { "docid": "b33e5b867d8c1e72827c32bee2ced578", "score": "0.51823854", "text": "def read_file(file):\n\n if os.path.exists(file):\n \n\n # read csv\n if '.csv' in file:\n read_me = pandas.read_csv(file)\n return read_me\n\n # read json\n elif '.json' in file:\n return pandas.read_json(file)\n \n elif '.xlsx' in file:\n return pandas.read_excel(file, sheet_name=1)\n \n elif '.txt' in file:\n # returning data with default header if one was not included\n # data = pandas.read_csv(file, header=None)\n # return data\n\n data = pandas.read_csv('./data.txt')\n # manual header assignment:\n data.columns = [\n 'ID', 'Address', 'City', 'Zip', 'Country', 'Customer', 'Emp'\n ]\n # manual index assignment:\n \n # we could assign a new variable..\n # data_index = data.set_index('ID')\n # return data_index\n\n # or add the inplace parameters\n data.set_index('ID', inplace=True)\n return data\n\n\n else:\n return 'File format not supported.'", "title": "" }, { "docid": "ebccf14bba442de3f784911896b65be4", "score": "0.5180519", "text": "def read_data(file):", "title": "" }, { "docid": "c9301f2ff756eff10d0cf5b47eaa8a9b", "score": "0.51701397", "text": "def _uno_sheet(self):\n return self.sheet.i7e_sheet", "title": "" }, { "docid": "91f7aeba253469bc53851f3aef27b16d", "score": "0.5167025", "text": "def open_workbook(filename=None,\n logfile=sys.stdout,\n verbosity=0,\n use_mmap=USE_MMAP,\n file_contents=None,\n encoding_override=None,\n formatting_info=False,\n on_demand=False,\n ragged_rows=False):\n\n peeksz = 4\n if file_contents:\n peek = file_contents[:peeksz]\n else:\n filename = os.path.expanduser(filename)\n with open(filename, \"rb\") as f:\n peek = f.read(peeksz)\n if peek == b\"PK\\x03\\x04\": # a ZIP file\n if file_contents:\n zf = zipfile.ZipFile(timemachine.BYTES_IO(file_contents))\n else:\n zf = zipfile.ZipFile(filename)\n\n # Workaround for some third party files that use forward slashes and\n # lower case names. We map the expected name in lowercase to the\n # actual filename in the zip container.\n component_names = dict([(X12Book.convert_filename(name), name)\n for name in zf.namelist()])\n\n if verbosity:\n logfile.write('ZIP component_names:\\n')\n pprint.pprint(component_names, logfile)\n if 'xl/workbook.xml' in component_names:\n from . import xlsx\n bk = xlsx.open_workbook_2007_xml(\n zf,\n component_names,\n logfile=logfile,\n verbosity=verbosity,\n use_mmap=use_mmap,\n formatting_info=formatting_info,\n on_demand=on_demand,\n ragged_rows=ragged_rows,\n )\n return bk\n if 'xl/workbook.bin' in component_names:\n raise XLRDError('Excel 2007 xlsb file; not supported')\n if 'content.xml' in component_names:\n raise XLRDError('Openoffice.org ODS file; not supported')\n raise XLRDError('ZIP file contents not a known type of workbook')\n\n from . import book\n bk = book.open_workbook_xls(\n filename=filename,\n logfile=logfile,\n verbosity=verbosity,\n use_mmap=use_mmap,\n file_contents=file_contents,\n encoding_override=encoding_override,\n formatting_info=formatting_info,\n on_demand=on_demand,\n ragged_rows=ragged_rows,\n )\n return bk", "title": "" }, { "docid": "b5d8bc0742b7b7632615a8c320f06fc5", "score": "0.5153749", "text": "def parse_excel(\n file_path: str,\n entrez_id_header,\n log_fold_change_header,\n adjusted_p_value_header,\n entrez_delimiter,\n base_mean_header=None,\n) -> List[Gene]:\n logger.info(\"In parse_excel()\")\n\n df = pd.read_excel(file_path)\n\n return _handle_dataframe(\n df,\n entrez_id_name=entrez_id_header,\n log2_fold_change_name=log_fold_change_header,\n adjusted_p_value_name=adjusted_p_value_header,\n entrez_delimiter=entrez_delimiter,\n base_mean=base_mean_header,\n )", "title": "" }, { "docid": "099343e31bf7fc38469cebd37aed5028", "score": "0.5146416", "text": "def readFile(self,filename):\n self.dfNodeSolarValue = pd.read_hdf('data/test - saveNodesLmpSolarThreaded.h5','dfNodeSolarValue')\n self.dfLmpSolar = pd.read_hdf('data/test - saveNodesLmpSolarThreaded.h5','dfLmpSolar')", "title": "" }, { "docid": "86e11681fe9a923063afdc0669bac54f", "score": "0.51406467", "text": "def parse(self):\n return parse_workbook(open_workbook(file_contents=self.data.read()))", "title": "" }, { "docid": "7271bb11119fb7d2e82205a5ad332b5c", "score": "0.5138962", "text": "def get_xls(self,xls_name, sheet_name):\n cls = []\n # get xls file's path\n xlsPath = os.path.join(proDir, \"testFile\", 'case', xls_name)\n # open xls file\n file = open_workbook(xlsPath)\n # get sheet by name\n sheet = file.sheet_by_name(sheet_name)\n # get one sheet's rows\n nrows = sheet.nrows\n for i in range(nrows):\n if sheet.row_values(i)[0] != u'case_name':\n cls.append(sheet.row_values(i))\n return cls", "title": "" }, { "docid": "47bd822860c8e4589b2d0fd37ca588a5", "score": "0.51364434", "text": "def get_xls_train_set(files, num_text):\n train_set = []\n for t_file in files:\n data = xlrd.open_workbook(t_file)\n table = data.sheet_by_index(0)\n num_line = min(table.nrows, num_text)\n print(\"Read %s lines from %s.\" % (num_line, t_file))\n for t_line in range(num_line):\n train_set.append((t_file.split(\"/\")[-1].split(\".\")[0], table.cell(t_line, 0).value.encode('utf-8')))\n return train_set", "title": "" }, { "docid": "47e00a75576e374ca51877279b3e4028", "score": "0.51256925", "text": "def data_from_xl(path, file_name, sheets, unmerge = True, **kwargs):\n\n # print('Xl - ', kwargs)\n result = {s: data_from_xl_sheet(path, file_name, s,\n unmerge, **kwargs) for s in sheets}\n\n return result", "title": "" }, { "docid": "0dd626bf4b67b58c19964c64c86043f4", "score": "0.51243544", "text": "def read_file(file, features_list, target):\n names_list = features_list.copy()\n names_list.append(target)\n data = pd.read_csv(file, names=names_list)\n features = data[features_list]\n target = data[target]\n print(data.head())\n return data, features, target", "title": "" }, { "docid": "bb7bccb162ad2b8902c3227bc4650644", "score": "0.5122755", "text": "def excel_reader(filename):\n tables = []\n rbook = xlrd.open_workbook(filename)\n for sheet in rbook.sheets():\n row_num = sheet.nrows\n col_num = sheet.ncols\n _sh = dict(name=sheet.name, headers=sheet.row_values(0))\n datas = []\n for i in range(1, row_num):\n row = [parse_cell(sheet.cell(i, j)) for j in range(col_num)]\n datas.append(row)\n _sh.update(dict(datas=datas))\n tables.append(_sh)\n return tables", "title": "" } ]
398be6fb10a69c0a785eb240f174c075
Does a POST request to /public/clusters/cloudEdition. Sends a request to create a new Cloud Edition Cohesity Cluster and returns the IDs, name, and software version of the new cluster. Also returns the status of each node.
[ { "docid": "be7e214a76a89f11be93995025f6cc40", "score": "0.7171899", "text": "def create_cloud_cluster(self, body):\n try:\n self.logger.info('create_cloud_cluster called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for create_cloud_cluster.')\n self.validate_parameters(body=body)\n\n # Prepare query URL\n self.logger.info('Preparing query URL for create_cloud_cluster.')\n _url_path = '/public/clusters/cloudEdition'\n _query_builder = Configuration.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for create_cloud_cluster.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for create_cloud_cluster.')\n _request = self.http_client.post(\n _query_url,\n headers=_headers,\n parameters=APIHelper.json_serialize(body))\n AuthManager.apply(_request)\n _context = self.execute_request(_request,\n name='create_cloud_cluster')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info('Validating response for create_cloud_cluster.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(\n _context.response.raw_body,\n CreateClusterResult.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "title": "" } ]
[ { "docid": "985f340a66c978a5cae7d74f141e616b", "score": "0.72254884", "text": "def create_expand_cloud_cluster(self, body):\n try:\n self.logger.info('create_expand_cloud_cluster called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for create_expand_cloud_cluster.'\n )\n self.validate_parameters(body=body)\n\n # Prepare query URL\n self.logger.info(\n 'Preparing query URL for create_expand_cloud_cluster.')\n _url_path = '/public/clusters/cloudEdition/nodes'\n _query_builder = Configuration.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info(\n 'Preparing headers for create_expand_cloud_cluster.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for create_expand_cloud_cluster.'\n )\n _request = self.http_client.post(\n _query_url,\n headers=_headers,\n parameters=APIHelper.json_serialize(body))\n AuthManager.apply(_request)\n _context = self.execute_request(_request,\n name='create_expand_cloud_cluster')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info(\n 'Validating response for create_expand_cloud_cluster.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(\n _context.response.raw_body,\n CreateClusterResult.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "title": "" }, { "docid": "8b833884d86f09a96176a96cb1a19265", "score": "0.6304552", "text": "def post_clusters():\n # Get DB from context:\n db = getattr(g, 'db', None)\n colection = get_kmeans_collection()\n kmeans_collection = db[colection]\n\n # Get the Json order from the request\n new_cluster = request.get_json()\n new_cluster['timestamp'] = datetime.datetime.utcnow() # we add the submit date not the user...\n\n # At least validate that each cluster/centroid has 'k' number of arrays\n k = new_cluster['k']\n centroids = new_cluster['centroids']\n cluster_data = new_cluster['clusters']\n labels = new_cluster['labels']\n if (len(centroids) is not k):\n return make_response(jsonify({'error': 'centroids data inconsistent'}), 404)\n elif (len(cluster_data) is not k):\n return make_response(jsonify({'error': 'cluster data inconsistent'}), 404)\n elif (len(labels) is not k):\n return make_response(jsonify({'error': 'labels inconsistent'}), 404)\n\n result = kmeans_collection.find_one_and_update(\n {'level': new_cluster['level']}, \n {'$set': new_cluster}, \n upsert=True\n )\n new_cluster['_id'] = str(result['_id']) # Update local copy with the id.\n \n return jsonify({'cluster': new_cluster}), 200", "title": "" }, { "docid": "ec88c1bcc0352d2f8188e014fdccb617", "score": "0.6077821", "text": "def post(self, request, *args, **kwargs):\n self.serializer_class = ClusterchoicesSerializer\n serializer = self.serializer_class(data=request.DATA)\n user_token = Token.objects.get(key=request.auth)\n user = UserInfo.objects.get(user_id=user_token.user.user_id)\n if serializer.is_valid():\n try:\n db_cluster_create(user, serializer.data)\n return Response({\"id\": 1, \"message\": \"Requested cluster created in db\"})\n except ClientError, e:\n return Response({\"id\": 1, \"message\": e.message})\n except Exception, e:\n return Response({\"id\": 1, \"message\": e.args[0]})\n\n return Response(serializer.errors)", "title": "" }, { "docid": "ca5673c4bcf136ed1886af0a1ec1a508", "score": "0.5962067", "text": "def post(self, data):\n context = pecan.request.context\n request_data = data.as_dict()\n cluster_flavor = request_data['flavor']\n\n if data.size <= 0:\n raise exception.Invalid(_(\"Invalid cluster size provided\"))\n elif data.size > CONF.api.max_cluster_size:\n raise exception.RequestEntityTooLarge(\n _(\"Invalid cluster size, max size is: %d\")\n % CONF.api.max_cluster_size)\n\n if len(data.network_id) > 1:\n raise exception.Invalid(_(\"Invalid number of network_id's\"))\n\n # extract username/password\n if (data.authentication and data.authentication.type and\n data.authentication.token):\n auth_validator = auth_validate.AuthTokenValidator.validate_token(\n auth_type=data.authentication.type,\n token=data.authentication.token)\n if not auth_validator or not auth_validator.validate():\n raise exception.Invalid(_(\"Invalid broker authentication \"\n \"parameter(s)\"))\n else:\n raise exception.Invalid(_(\"Missing broker authentication \"\n \"parameter(s)\"))\n\n default_rabbit_user = data.authentication.token['username']\n default_rabbit_pass = data.authentication.token['password']\n\n broker_name = CONF.default_broker_name\n\n # get the image id of default broker\n image_id = objects.BrokerMetadata.get_image_id_by_broker_name(\n context, broker_name)\n\n # validate cluster flavor\n self._validate_flavor(image_id, cluster_flavor)\n\n # convert 'network_id' from list to string type for objects/cluster\n # compatibility\n request_data['network_id'] = request_data['network_id'][0]\n\n # create new cluster object with required data from user\n new_cluster = objects.Cluster(**request_data)\n\n # create new cluster with node related data from user\n new_cluster.create(context)\n\n # retrieve cluster data\n cluster = get_complete_cluster(context, new_cluster.id)\n\n nodes = objects.Node.get_nodes_by_cluster_id(context,\n cluster.id)\n\n # create list with node id's for create cluster flow\n node_ids = [node.id for node in nodes]\n\n # prepare and post cluster create job to backend\n flow_kwargs = {\n 'cluster_id': cluster.id,\n 'node_ids': node_ids,\n 'user_network_id': cluster.network_id[0],\n 'management_network_id': CONF.management_network_id,\n }\n\n # generate unique erlang cookie to be used by all nodes in the new\n # cluster, erlang cookies are strings of up to 255 characters\n erlang_cookie = uuidutils.generate_uuid()\n\n job_args = {\n 'tenant_id': new_cluster.project_id,\n 'flavor': cluster.flavor,\n 'image': image_id,\n 'volume_size': cluster.volume_size,\n 'port': '5672',\n 'context': context.to_dict(),\n # TODO(sputnik13: this needs to come from the create request\n # and default to a configuration value rather than always using\n # config value\n 'security_groups': [CONF.os_security_group],\n 'port': CONF.rabbit_port,\n 'key_name': CONF.openstack.os_key_name,\n 'erlang_cookie': erlang_cookie,\n 'default_rabbit_user': default_rabbit_user,\n 'default_rabbit_pass': default_rabbit_pass,\n }\n job_client = task_flow_client.get_client_instance()\n # TODO(dagnello): might be better to use request_id for job_uuid\n job_uuid = uuidutils.generate_uuid()\n job_client.post(create_cluster, job_args,\n flow_kwargs=flow_kwargs,\n tx_uuid=job_uuid)\n\n LOG.info(_LI('Create Cluster Request Cluster ID %(cluster_id)s '\n 'Cluster size %(size)s network ID %(network_id)s '\n 'Job ID %(job_id)s Broker name %(broker_name)s') %\n ({\"cluster_id\": cluster.id,\n \"size\": cluster.size,\n \"network_id\": cluster.network_id,\n \"job_id\": job_uuid,\n \"broker_name\": broker_name}))\n\n cluster.additional_information = []\n cluster.additional_information.append(\n dict(def_rabbit_user=default_rabbit_user))\n cluster.additional_information.append(\n dict(def_rabbit_pass=default_rabbit_pass))\n\n cluster.unset_empty_fields()\n return cluster", "title": "" }, { "docid": "237040f9b3c38178a07a44a81b7d2a4b", "score": "0.594618", "text": "def create_virtual_cluster(self, body):\n try:\n self.logger.info('create_virtual_cluster called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for create_virtual_cluster.')\n self.validate_parameters(body=body)\n\n # Prepare query URL\n self.logger.info('Preparing query URL for create_virtual_cluster.')\n _url_path = '/public/clusters/virtualEdition'\n _query_builder = Configuration.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for create_virtual_cluster.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for create_virtual_cluster.')\n _request = self.http_client.post(\n _query_url,\n headers=_headers,\n parameters=APIHelper.json_serialize(body))\n AuthManager.apply(_request)\n _context = self.execute_request(_request,\n name='create_virtual_cluster')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info('Validating response for create_virtual_cluster.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(\n _context.response.raw_body,\n CreateClusterResult.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "title": "" }, { "docid": "29208719eff350a5be655f6711a1b894", "score": "0.5791408", "text": "def create_physical_cluster(self, body):\n try:\n self.logger.info('create_physical_cluster called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for create_physical_cluster.')\n self.validate_parameters(body=body)\n\n # Prepare query URL\n self.logger.info(\n 'Preparing query URL for create_physical_cluster.')\n _url_path = '/public/clusters/physicalEdition'\n _query_builder = Configuration.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for create_physical_cluster.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for create_physical_cluster.')\n _request = self.http_client.post(\n _query_url,\n headers=_headers,\n parameters=APIHelper.json_serialize(body))\n AuthManager.apply(_request)\n _context = self.execute_request(_request,\n name='create_physical_cluster')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info(\n 'Validating response for create_physical_cluster.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(\n _context.response.raw_body,\n CreateClusterResult.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "title": "" }, { "docid": "20c0a3dc7a1717b2f5f3c3759cbebccb", "score": "0.5766759", "text": "def create_expand_physical_cluster(self, body):\n try:\n self.logger.info('create_expand_physical_cluster called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for create_expand_physical_cluster.'\n )\n self.validate_parameters(body=body)\n\n # Prepare query URL\n self.logger.info(\n 'Preparing query URL for create_expand_physical_cluster.')\n _url_path = '/public/clusters/physicalEdition/nodes'\n _query_builder = Configuration.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info(\n 'Preparing headers for create_expand_physical_cluster.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for create_expand_physical_cluster.'\n )\n _request = self.http_client.post(\n _query_url,\n headers=_headers,\n parameters=APIHelper.json_serialize(body))\n AuthManager.apply(_request)\n _context = self.execute_request(\n _request, name='create_expand_physical_cluster')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info(\n 'Validating response for create_expand_physical_cluster.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(\n _context.response.raw_body,\n CreateClusterResult.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "title": "" }, { "docid": "bdc8949302ec5b460be2d80b5ec92d24", "score": "0.570004", "text": "def create(cls, data):\n\n # TODO(enchantner): fix this temporary hack in clients\n if \"release_id\" not in data:\n release_id = data.pop(\"release\", None)\n data[\"release_id\"] = release_id\n\n # remove read-only attribute\n data.pop(\"is_locked\", None)\n assign_nodes = data.pop(\"nodes\", [])\n enabled_editable_attributes = None\n\n if 'components' in data:\n enabled_core_attributes = cls.get_cluster_attributes_by_components(\n data['components'], data[\"release_id\"])\n data = dict_merge(data, enabled_core_attributes['cluster'])\n enabled_editable_attributes = enabled_core_attributes['editable']\n\n data[\"fuel_version\"] = settings.VERSION[\"release\"]\n cluster = super(Cluster, cls).create(data)\n cls.create_default_group(cluster)\n\n cls.create_attributes(cluster, enabled_editable_attributes)\n cls.create_vmware_attributes(cluster)\n cls.create_default_extensions(cluster)\n\n try:\n cls.get_network_manager(cluster).\\\n create_network_groups_and_config(cluster, data)\n cls.add_pending_changes(\n cluster, consts.CLUSTER_CHANGES.attributes)\n cls.add_pending_changes(\n cluster, consts.CLUSTER_CHANGES.networks)\n cls.add_pending_changes(\n cluster, consts.CLUSTER_CHANGES.vmware_attributes)\n\n if assign_nodes:\n cls.update_nodes(cluster, assign_nodes)\n except (\n errors.OutOfVLANs,\n errors.OutOfIPs,\n errors.NoSuitableCIDR\n ) as exc:\n raise errors.CannotCreate(exc.message)\n\n db().flush()\n\n ClusterPlugins.add_compatible_plugins(cluster)\n PluginManager.enable_plugins_by_components(cluster)\n\n return cluster", "title": "" }, { "docid": "4c62ec1f765220f0b6c8098c3aad23ef", "score": "0.5560969", "text": "def vcenter_multiple_cluster(self):\n self.env.revert_snapshot(\"ready_with_1_slaves\")\n\n # Configure cluster\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE_SIMPLE,\n settings={\n 'use_vcenter': True,\n 'host_ip': settings.VCENTER_IP,\n 'vc_user': settings.VCENTER_USERNAME,\n 'vc_password': settings.VCENTER_PASSWORD,\n 'cluster': settings.VCENTER_CLUSTERS\n }\n )\n logger.info(\"cluster is {0}\".format(cluster_id))\n\n # Add nodes to roles\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-01': ['controller']}\n )\n # Deploy cluster\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n # Wait until nova-compute get information about clusters\n # Fix me. Later need to change sleep with wait function.\n time.sleep(60)\n\n ctrl_ip = self.fuel_web.get_nailgun_node_by_name('slave-01')['ip']\n logger.info(\"Controller ip is {}\".format(ctrl_ip))\n os = os_actions.OpenStackActions(ctrl_ip)\n hypervisors = os.get_hypervisors()\n\n # Check hypervisor quantity and create instances\n assert_true(len(hypervisors) > 1, 'Not enoght vCenter clusters.')\n if len(hypervisors) > 1:\n logger.info(\"Create Instances and assign floating ips:\")\n for i in range(1, 6):\n srv = os.create_server_for_migration(timeout=300)\n logger.info(os.get_instance_detail(srv).to_dict()['name'])\n os.assign_floating_ip(srv)\n\n # Check that there are instanses on each hypervisor\n # Fix me. Later need to change sleep with wait function.\n time.sleep(30)\n hypervisors = os.get_hypervisors()\n for hypervisor in hypervisors:\n assert_true(os.get_hypervisor_vms_count(hypervisor) != 0,\n \"No active VMs on \" +\n os.get_hypervisor_hostanme(hypervisor))\n logger.info(\"{} active VMs on Hypervisor {}\".format(\n os.get_hypervisor_vms_count(hypervisor),\n os.get_hypervisor_hostanme(hypervisor)))\n\n # Get instances ips from different hypervisors\n servers_for_check = {}\n ips_for_check = []\n servers = os.get_servers()\n for server in servers:\n if os.get_srv_hypervisor_name(server) not in servers_for_check:\n servers_for_check[os.get_srv_hypervisor_name(server)] = {}\n server_detail = os.get_instance_detail(server).to_dict()\n for net_prefs in server_detail['addresses']['novanetwork']:\n if net_prefs['OS-EXT-IPS:type'] == 'floating' and \\\n net_prefs['addr'] not in ips_for_check and \\\n len(ips_for_check) == 0:\n ips_for_check.append(net_prefs['addr'])\n if net_prefs['OS-EXT-IPS:type'] == 'fixed' and \\\n len(ips_for_check) == 1:\n ips_for_check.append(net_prefs['addr'])\n\n # Wait until vm is booted\n ssh = self.env.get_ssh_to_remote_by_name(\"slave-01\")\n wait(\n lambda: not ssh.execute('curl -s -m1 http://' + ips_for_check[0] +\n ':22 |grep -iq \"[a-z]\"')['exit_code'],\n interval=10, timeout=100)\n # Check server's connectivity\n res = int(os.execute_through_host(ssh, ips_for_check[0],\n \"ping -q -c3 \" + ips_for_check[1] +\n \" 2>/dev/null >/dev/null;\"\n \" echo -n $?\"))\n assert_true(res == 0, \"Error in Instances network connectivity.\")", "title": "" }, { "docid": "d19a3f02b527ce32fcfe2b0fc18ce77f", "score": "0.5548057", "text": "def create_cluster(client, environment_name, deployment_name, config):\n cluster_size = config.getint(\"cluster\", \"size\")\n template = ClusterTemplate(\n name=config.get('cluster', 'name'),\n productVersions={\n 'CDH': config.get('cluster', 'cdh_version')\n },\n services=['HDFS', 'YARN', 'SPARK_ON_YARN'],\n virtualInstanceGroups={\n 'masters': VirtualInstanceGroup(\n name='masters',\n minCount=1,\n serviceTypeToRoleTypes={\n 'HDFS': ['NAMENODE', 'SECONDARYNAMENODE'],\n 'YARN': ['RESOURCEMANAGER', 'JOBHISTORY'],\n 'SPARK_ON_YARN': ['SPARK_YARN_HISTORY_SERVER']\n },\n virtualInstances=[create_virtual_instance_with_random_id(config, 'master'), ]\n ),\n 'gateways': VirtualInstanceGroup(\n name='gateways',\n minCount=1,\n serviceTypeToRoleTypes={\n 'SPARK_ON_YARN': ['GATEWAY'],\n 'HDFS': ['GATEWAY'],\n 'YARN': ['GATEWAY']\n },\n virtualInstances=[create_virtual_instance_with_random_id(config, 'gateway'),]\n ),\n 'workers': VirtualInstanceGroup(\n name='workers',\n minCount=cluster_size,\n serviceTypeToRoleTypes={\n 'HDFS': ['DATANODE', ],\n 'YARN': ['NODEMANAGER'],\n 'SPARK_ON_YARN':['GATEWAY']\n },\n roleTypesConfigs={\n 'HDFS': {\n 'DATANODE': {\n 'dfs_datanode_handler_count': '10'\n },\n 'NODEMANAGER': {\n 'nodemanager_webserver_port': '8047'\n }\n }\n },\n virtualInstances=[create_virtual_instance_with_random_id(config, 'worker')\n for _ in range(0, cluster_size)]\n )\n }\n )\n\n api = ClustersApi(client)\n try:\n api.create(environment_name, deployment_name, template)\n\n except HTTPError as e:\n if e.code == 302:\n print 'Warning: a cluster with the same name already exists'\n else:\n raise e\n\n print \"Clusters: %s\" % api.list(environment_name, deployment_name)\n return template.name", "title": "" }, { "docid": "3f0b014c1907aeef7e4b7f7e2ee4adf0", "score": "0.55450344", "text": "def create_cluster(cluster_size,flavor_id,image_id,root_password,cluster_master,cluster_node_prefix,cluster_suffix):\n if not check_existing(cluster_master,cluster_node_prefix,cluster_suffix):\n print \"Creating a cluster of size %d\" % (cluster_size)\n print \"Creating master node %s...\" % cluster_master\n try:\n cloudservers.servers.create(name=cluster_master,image=image_id,flavor=flavor_id)\n print \"Successfully created %s\" % cluster_master\n except Exception, e:\n print \"ERROR Provisioning %s with error: %s\" % (cluster_master,e)\n sys.exit(1)\n\n print \"Beginning to provision nodes 1-%d\" % cluster_size\n for server_no in range(cluster_size):\n node_name=\"%s%03d%s\" % (cluster_node_prefix,server_no+1,cluster_suffix)\n print \"Provisioning %s...\" % node_name\n try:\n cloudservers.servers.create(name=node_name,image=image_id,flavor=flavor_id)\n time.sleep(10)\n print \"Successfully created %s\" % node_name\n except Exception, e:\n print \"ERROR Provisioning %s with error: %s\" % (cluster_master,e)\n sys.exit(1)", "title": "" }, { "docid": "e3494276ab783ac5d44bc3c040fd61f0", "score": "0.5532318", "text": "def create_active_cluster_and_contents(request):\n\n print('*** Create an Active Cluster with Content for display ****')\n # Variable declarations\n response_data = {}\n display_content_limit = 10\n\n # Get session name\n session_name = request.POST.get('sessionName')\n status_check = ['ACTIVE', 'UNUSED']\n\n # Get form fields data\n login_email = request.POST.get('loginEmail').upper()\n cluster_setup = request.POST.get('clusterSetupOfActiveCandContentInput')\n content_setup = request.POST.get('contentSetupOfActiveCandContentInput')\n # device = request.POST.get('deviceOfActiveCandDeviceInput').upper()\n print(cluster_setup)\n print(content_setup)\n\n # Check if Active Cluster and Contents Setup exist. Check if the numbers of\n # Contents displaying (i.e. Content Setup) are less 10.\n try:\n # Checks if Active Cluster Setup Name exist\n cluster_data = ClustersSetup.objects.filter(cluster_location_detail=cluster_setup).values('cluster_setup_id',\n 'cluster_status')\n print(\"Active Cluster Setup check pass\")\n print(cluster_data)\n\n # Check if Cluster is Active or Unused. If UNUSED, make it ACTIVE and proceed.\n current_cluster_status = str(cluster_data[0].get('cluster_status'))\n print(\"current_cluster_status 1: \", current_cluster_status)\n\n if current_cluster_status == status_check[1]:\n print(\"current_cluster_status 2: \", current_cluster_status)\n # Update Device Status to ACTIVE\n cluster_data.update(cluster_status=status_check[0])\n\n # Checks if Content Setup, Data and Contract id exist and count is less or equal 10\n # in ActiveClustersAndDevices table\n content_setup_data = ContentsSetupDataContract.objects.filter(content_title=content_setup) \\\n .values('content_setup_id')\n print(\"Active Content Setup check pass\")\n print(content_setup_data)\n\n # Convert >> cluster_setup_id << UUID value to String without (-) dashes\n cluster_setup_id = uuid.UUID(str(cluster_data[0].get('cluster_setup_id'))).hex\n print(\"cluster_setup_id: \" + cluster_setup_id)\n\n # Convert >> content_setup_id << UUID value to String without (-) dashes\n content_setup_id = uuid.UUID(str(content_setup_data[0].get('content_setup_id'))).hex\n print(\"cluster_setup_id: \" + content_setup_id)\n\n # Check numbers of ContentsSetupDataContract is equal allowed limit (i.e display_content_limit) in the\n # ActiveClustersAndContents table\n active_cluster_content_limit_check = ActiveClustersAndContents.objects \\\n .filter(cluster_setup=cluster_setup_id).count()\n\n print(\"Limit Count: \", active_cluster_content_limit_check)\n if active_cluster_content_limit_check >= display_content_limit:\n # Response data\n response_data['result'] = \"This Cluster \" + cluster_setup + \" has reached its maximum \" + \\\n str(display_content_limit) + \" allowed Displaying Content.\"\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n # Check if the combination of Active Cluster and Content exist\n active_cluster_and_content_check = ActiveClustersAndContents.objects.filter(cluster_setup=cluster_setup_id,\n content_setup=content_setup_id)\n\n # If Active Cluster and Content record not found\n if not active_cluster_and_content_check:\n print(\"Active_cluster_and_Content doesn't exist\")\n print(active_cluster_and_content_check)\n\n # Create Active Cluster and Content\n active_cluster_and_Content_data = ActiveClustersAndContents(cluster_setup=ClustersSetup.objects\n .get(cluster_setup_id=cluster_setup_id),\n content_setup=ContentsSetupDataContract.objects\n .get(content_setup_id=content_setup_id),\n last_modified_user=login_email)\n active_cluster_and_Content_data.save()\n print(\"Active Cluster and Content save pass\")\n\n # Response data\n response_data['result'] = content_setup + ' is now added to ' + cluster_setup + ' cluster.'\n\n # # Get list of Active Cluster and Content after adding new Cluster Type.\n # print(\"Start Cluster Setups fetch...\")\n # response_data[session_name] = get_cluster_setup(request, 'DATA')\n # print(\"Cluster Setups fetch completed.\")\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n else:\n print(\"Active Cluster and Content setup already exist\")\n # Already exist response message\n response_data['result'] = 'Active Cluster and Content setup already exist'\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n except ObjectDoesNotExist:\n print(\"Active Cluster and Content Setup check failed\")\n\n response_data['result'] = \"ERROR Status: \" + str(ObjectDoesNotExist.silent_variable_failure) \\\n + \". Active Cluster and Content Setup validation check failed. CACC(1)\"\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")", "title": "" }, { "docid": "0894635393c50721816551a026b1b24e", "score": "0.54949325", "text": "def clusters_post_with_http_info(self, data, **kwargs):\n\n all_params = ['data']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method clusters_post\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'data' is set\n if ('data' not in params) or (params['data'] is None):\n raise ValueError(\"Missing the required parameter `data` when calling `clusters_post`\")\n\n resource_path = '/clusters/'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'data' in params:\n body_params = params['data']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n return self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Cluster',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "title": "" }, { "docid": "39906e738e69c748a99b5c9ea35176bf", "score": "0.5448483", "text": "def update_upgrade_cluster(self, body):\n try:\n self.logger.info('update_upgrade_cluster called.')\n\n # Validate required parameters\n self.logger.info(\n 'Validating required parameters for update_upgrade_cluster.')\n self.validate_parameters(body=body)\n\n # Prepare query URL\n self.logger.info('Preparing query URL for update_upgrade_cluster.')\n _url_path = '/public/clusters/software'\n _query_builder = Configuration.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for update_upgrade_cluster.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for update_upgrade_cluster.')\n _request = self.http_client.put(\n _query_url,\n headers=_headers,\n parameters=APIHelper.json_serialize(body))\n AuthManager.apply(_request)\n _context = self.execute_request(_request,\n name='update_upgrade_cluster')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info('Validating response for update_upgrade_cluster.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(\n _context.response.raw_body,\n UpgradeClusterResult.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise", "title": "" }, { "docid": "652f5b47d047f1d4620eff5dd4aeebd9", "score": "0.5440439", "text": "def get_clusters(self):\n @query(self._session)\n def func(session):\n \"\"\"\n java -classpath /usr/share/cloudstack-common/lib/jasypt-1.9.0.jar org.jasypt.intf.cli.JasyptPBEStringDecryptionCLI decrypt.sh input=\"Y3ZfBKd1/0b8Vu9hjDwCIldiJpULLG5v\" password=\"$(cat /etc/cloud/management/key)\" verbose=false\n /usr/share/cloudstack-common/lib/\n\n \n SELECT t1.id, t1.name, t1.uuid, t1.private_ip_address, t1.cluster_id, t1.pod_id, t1.hypervisor_type, t1.hypervisor_version FROM cloud.host as t1\nWHERE status='Up' and type='Routing';\n \"\"\"\n # get clusters\n sql = [\"SELECT * FROM cloud.cluster as t1 WHERE t1.removed is Null\"] \n query = session.query(\"id\", \"name\", \"pod_id\", \"data_center_id\", \"hypervisor_type\").\\\n from_statement(\" \".join(sql)).\\\n params().all()\n \n clusters = []\n for item in query:\n cluster_id = item[0]\n hypervisor = item[4]\n cluster = {'id':cluster_id,\n 'name':item[1],\n 'pod':item[2],\n 'zone':item[3],\n 'hypervisor':hypervisor}\n if hypervisor == 'VMware':\n # get VMware clusters params\n sql = [\"SELECT * FROM cloud.cluster_details WHERE cluster_id=:cluster_id\"] \n query2 = session.query(\"id\", \"cluster_id\", \"name\", \"value\").\\\n from_statement(\" \".join(sql)).\\\n params(cluster_id=cluster_id).all()\n # append username and password\n for item in query2:\n if query2['name'] == 'username':\n cluster['username'] = query2['value']\n elif query2['name'] == 'password':\n cluster['password'] = query2['value']\n \n clusters.append(cluster)\n\n return clusters\n return func()", "title": "" }, { "docid": "eb3de0455b20f560b2c0296361e5170e", "score": "0.5413793", "text": "def get_clusters(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n if \"upToYear\" in data:\n ret = network_visualizer.scan_clustering.main(int(data[\"clusSize\"]), \n float(data[\"clusCoef\"]), data[\"graphType\"], int(data[\"upToYear\"]))\n else:\n ret = network_visualizer.scan_clustering.main(int(data[\"clusSize\"]), \n float(data[\"clusCoef\"]), data[\"graphType\"])\n ret = network_visualizer.scan_clustering.get_cluster_idxs(ret)\n return HttpResponse(json.dumps(ret), content_type=\"application/json\")", "title": "" }, { "docid": "faf2dbf06638885446f37017cc934b15", "score": "0.5401939", "text": "def CreateCluster(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "1f97d81caa713818dceedadc2766520b", "score": "0.53903824", "text": "def _create_cluster(self):\n args = []\n if self._alpha_cluster:\n args = ['gcloud', 'alpha']\n else:\n args = ['gcloud', 'beta']\n args.extend(['container', 'clusters', 'create', self._cluster_name])\n if self._extra_create_args:\n args.extend(self._extra_create_args)\n\n logging.info('Creating GKE cluster: %s ...', self._cluster_name)\n try:\n self._gcloud_call(args)\n except KeyboardInterrupt:\n logging.error(\n 'GKE Cluster creation interrupted. Deallocating the cluster %s ...',\n self._cluster_name)\n self.delete_cluster(wait=False)", "title": "" }, { "docid": "d2925aeb0886d8880adf67ac7bb8de37", "score": "0.5385241", "text": "def create_cluster_network_config(request):\n\n print('*** Create a Cluster Network Configuration ****')\n response_data = {}\n\n # Get session name\n session_name = request.POST.get('sessionName')\n\n # Get form fields data\n login_email = request.POST.get('loginEmail').upper()\n cluster_setup = request.POST.get('clusterSetupOfClusterNetConfigInput').upper()\n cluster_network_provider = request.POST.get('clusterNetworkProviderInput')\n cluster_network_pass = request.POST.get('clusterNetworkPassInput').upper()\n\n # Check if Cluster Setup already exist in Cluster Network Configuration table\n try:\n cluster_setup_data = ClustersSetup.objects.filter(cluster_location_detail=cluster_setup) \\\n .values('cluster_setup_id')\n print(\"Cluster Network Configuration check pass\")\n print(cluster_setup_data)\n\n # Convert >> area_id << UUID value to String without (-) dashes\n cluster_setup_id = uuid.UUID(str(cluster_setup_data[0].get('cluster_setup_id'))).hex\n print(cluster_setup_data)\n\n # Check if Cluster Network Configuration exist\n cluster_network_config_check = ClustersNetworkConfiguration.objects.filter(cluster_setup=cluster_setup_id)\n\n # If Cluster Network Configuration record not found\n if not cluster_network_config_check:\n print(\"Cluster Network Configuration doesn't exist\")\n print(cluster_network_config_check)\n\n # Create Cluster Network Configuration\n cluster_network_config_data = ClustersNetworkConfiguration(cluster_setup=ClustersSetup.objects\n .get(cluster_setup_id=cluster_setup_id),\n cluster_network_provider=cluster_network_provider,\n cluster_network_pass=cluster_network_pass,\n last_modified_user=login_email)\n cluster_network_config_data.save()\n print(\"Cluster Network Configuration save pass\")\n\n # Response data\n response_data['result'] = cluster_setup + ' Network Configuration completed.'\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n else:\n print(\"Cluster Network Configuration already exist\")\n # Already exist response message\n response_data['result'] = 'Cluster Network Configuration already exist. Only one network ' \\\n 'configuration is allowed.'\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n except ObjectDoesNotExist:\n print(\"Cluster Network Configuration check failed\")\n\n response_data['result'] = \"ERROR Status: \" + str(ObjectDoesNotExist.silent_variable_failure) \\\n + \". Cluster Network Configuration validation check failed. CCNCS(1)\"\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")", "title": "" }, { "docid": "439d3e6d23a54525a18be76c50c20587", "score": "0.5381638", "text": "def _do_add_nodes(self, count, cloud):\n startCloud = self._init_cloud(cloud)\n vals = { 'action': '_do_add_nodes', 'count': count }\n self.logger.debug(self.ACTION_REQUESTING_NODES % vals)\n node_instances = self.controller.create_nodes(count, \n client.check_agent_process, self.AGENT_PORT, startCloud)\n\n # Startup agents\n for node in node_instances:\n client.create_node(node.ip, self.AGENT_PORT, self.hub_ip)\n self.logger.info(\"Added node %s: %s \" % (node.id, node.ip))\n # node_info.add_node_info('/etc/hosts', node.ip, node.id)\n\n self.nodes += node_instances\n self.state = self.S_RUNNING", "title": "" }, { "docid": "238621c38d948aea6fb761f00195f271", "score": "0.5367316", "text": "def list(self, request, project_id):\n cluster_info = self.get_cluster_list(request, project_id)\n cluster_node_map = self.cluster_has_node(request, project_id)\n cluster_data = cluster_info.get('results') or []\n # add allow delete perm\n for info in cluster_data:\n info['environment'] = cluster_env_transfer(info['environment'])\n # allow delete cluster\n allow_delete = False if cluster_node_map.get(info['cluster_id']) else True\n info['allow'] = info['allow_delete'] = allow_delete\n perm_can_use = True if request.GET.get('perm_can_use') == '1' else False\n\n cluster_results = Cluster.hook_perms(\n request, project_id, cluster_data, filter_use=perm_can_use)\n # add disk resource\n try:\n cluster_results = prometheus.fixed_disk_usage(cluster_results)\n except Exception as err:\n logger.error('request prometheus err, detail: %s', err)\n # add can create cluster perm for prod/test\n can_create_test, can_create_prod = self.get_cluster_create_perm(request, project_id)\n\n return response.Response({\n 'code': ErrorCode.NoError,\n 'data': {'count': len(cluster_results), 'results': cluster_results},\n 'permissions': {\n 'test': can_create_test,\n 'prod': can_create_prod,\n 'create': can_create_test or can_create_prod\n }\n })", "title": "" }, { "docid": "6b003655eea5c2e019d5b64ecf9112aa", "score": "0.5344877", "text": "def nsxv_smoke_add_compute(self):\n self.env.revert_snapshot('ready_with_5_slaves')\n\n self.install_nsxv_plugin()\n\n # Configure cluster\n settings = self.get_settings()\n settings[\"images_vcenter\"] = True\n # Configure cluster\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings=settings,\n configure_ssl=False)\n\n # Configure VMWare vCenter settings\n self.fuel_web.vcenter_configure(cluster_id, vc_glance=True)\n\n self.enable_plugin(cluster_id=cluster_id)\n\n controllers = ['slave-01', 'slave-02', 'slave-03']\n\n # Assign roles to nodes\n for node in controllers:\n self.fuel_web.update_nodes(cluster_id, {node: ['controller'], })\n\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n\n old_configured_clusters = {}\n for node in controllers:\n old_configured_clusters[node] = self.get_configured_clusters(node)\n logger.info(\"Old configured clusters on {0} is {1}\"\n .format(node, old_configured_clusters[node]))\n\n # Add 1 node with compute-vmware role and redeploy cluster\n self.fuel_web.update_nodes(\n cluster_id, {'slave-04': ['compute-vmware'], })\n\n target_node_2 = self.node_name('slave-04')\n\n # Configure VMWare vCenter settings\n self.fuel_web.vcenter_configure(cluster_id,\n vc_glance=True,\n multiclusters=True,\n target_node_2=target_node_2)\n\n self.fuel_web.deploy_cluster_wait(cluster_id, check_services=False)\n\n new_configured_clusters = {}\n for node in controllers:\n new_configured_clusters[node] = self.get_configured_clusters(node)\n logger.info(\"New configured clusters on {0} is {1}\"\n .format(node, new_configured_clusters[node]))\n\n for node in controllers:\n assert_true(set(new_configured_clusters[node]) -\n set(old_configured_clusters[node]),\n \"Clusters on node {0} not reconfigured\".format(node))", "title": "" }, { "docid": "0475ce18fb603fad4271d1a1e9464a28", "score": "0.5278573", "text": "def create_cluster(name=None, version=None, roleArn=None, resourcesVpcConfig=None, clientRequestToken=None):\n pass", "title": "" }, { "docid": "33099fc257607ad4eaf3708e2d8f189b", "score": "0.5263153", "text": "def create_active_cluster_and_devices(request):\n\n print('*** Create an Active Cluster with Devices ****')\n # Variable declarations\n response_data = {}\n status_check = ['ACTIVE', 'DEACTIVATED']\n\n # Get session name\n # session_name = request.POST.get('sessionName')\n\n # Get form fields data\n login_email = request.POST.get('loginEmail').upper()\n cluster_setup = request.POST.get('clusterSetupOfActiveCandDeviceInput')\n device = request.POST.get('deviceOfActiveCandDeviceInput').upper()\n\n # Check if Active Cluster and Device exist. Check if the numbers of\n # Contents displaying (i.e. Content Setup) are less 10.\n try:\n # Checks if Active Cluster Setup Name exist\n cluster_data = ClustersSetup.objects.filter(cluster_location_detail=cluster_setup).values('cluster_setup_id',\n 'cluster_status')\n print(\"Active Cluster Setup check pass\")\n print(cluster_data)\n\n # Checks if Device MAC Name exist - THIS SEARCH CAN BE NARROW DOWN TO COUNTRY - CITY - AREA\n device_data = Devices.objects.filter(device_mac_address=device).values('device_id', 'device_status')\n print(\"Device check pass\")\n print(device_data)\n\n device_status = str(device_data[0].get('device_status'))\n\n # Check if Device is available for use\n if device_status in status_check:\n print('Device is ', device_status)\n\n # Response data\n response_data['result'] = 'Device ' + device + ' is currently ' + device_status + '. Not available for use.'\n #\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n # Convert >> cluster_setup_id << UUID value to String without (-) dashes\n cluster_setup_id = uuid.UUID(str(cluster_data[0].get('cluster_setup_id'))).hex\n print(\"cluster_setup_id: \" + cluster_setup_id)\n\n # Convert >> device_id << UUID value to String without (-) dashes\n device_id = uuid.UUID(str(device_data[0].get('device_id'))).hex\n print(\"device_id: \" + device_id)\n\n # Check if the combination of Active Cluster, Content and Device exist\n active_cluster_and_device_check = ActiveClustersAndDevices.objects.filter(cluster_setup=cluster_setup_id,\n device=device_id)\n\n # If Active Cluster and Device record not found\n if not active_cluster_and_device_check:\n print(\"Active_cluster_and_device doesn't exist\")\n print(active_cluster_and_device_check)\n\n # Create Active Cluster and Device\n active_cluster_and_device_data = ActiveClustersAndDevices(cluster_setup=ClustersSetup.objects.\n get(cluster_setup_id=cluster_setup_id),\n device=Devices.objects.get(device_id=device_id),\n last_modified_user=login_email)\n active_cluster_and_device_data.save()\n print(\"Active Cluster, Content and Device save pass\")\n\n # Update Device Status to ACTIVE\n device_update = Devices.objects.filter(device_mac_address=device)\n device_update.update(device_status=status_check[0])\n\n print(\"Device Updated to: \", device_update)\n\n # Response data\n response_data['result'] = device + ' is now added to ' + cluster_setup + ' cluster.'\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n else:\n print(\"Active Cluster and Device setup already exist\")\n # Already exist response message\n response_data['result'] = 'Active Cluster and Device setup already exist'\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n\n except ObjectDoesNotExist:\n print(\"Active Cluster and Device Setup check failed\")\n\n response_data['result'] = \"ERROR Status: \" + str(ObjectDoesNotExist.silent_variable_failure) \\\n + \". Active Cluster and Device Setup validation check failed. CACAD(1)\"\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")", "title": "" }, { "docid": "ef0d1da53366f53990ebf2727cfcd57c", "score": "0.5262311", "text": "def register_cluster_with_http_info(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method register_cluster\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `register_cluster`\")\n\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api('/cluster', 'POST',\n path_params,\n query_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Cluster',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)", "title": "" }, { "docid": "194417f59bccace68cfb7edab7dd2752", "score": "0.52440494", "text": "def test_eigenvector_centrality_06(self):\n body = {\"depth\": 5, \"sample\": -1, \"label\": \"created\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 3, '1:marko': 3, '1:josh': 1,\n '1:vadas': 1, '1:peter': 3, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "600dd8f3ca1736c0036f358761e4495d", "score": "0.52275395", "text": "def post_cluster(\n host,\n token,\n process_request_id,\n cluster_index,\n method=METHOD['https']):\n\n url = '%s%s%s' % (method, host, URLS['CLUSTERS'])\n headers = {'Authorization': \"Token %s\" % token}\n\n # process_request and index are required\n data = {\n 'process_request': process_request_id,\n 'index': cluster_index\n }\n\n try:\n response = requests.post(\n url,\n headers=headers,\n data=data,\n verify=False)\n except Exception as e:\n print(e)\n return {'status': None, 'reason': 'No response', 'data': ''}\n\n if response.status_code == 201:\n try:\n data = response.json()\n except Exception as e:\n data = response.text()\n print(e)\n else:\n data = response.text\n\n return {\n 'status': response.status_code,\n 'reason': response.reason,\n 'data': data,\n }", "title": "" }, { "docid": "faacb870e5a14408abd9f5d68b54d26c", "score": "0.52251863", "text": "def func(session):\n # get clusters\n sql = [\"SELECT * FROM cloud.cluster as t1 WHERE t1.removed is Null\"] \n query = session.query(\"id\", \"name\", \"pod_id\", \"data_center_id\", \"hypervisor_type\").\\\n from_statement(\" \".join(sql)).\\\n params().all()\n \n clusters = []\n for item in query:\n cluster_id = item[0]\n hypervisor = item[4]\n cluster = {'id':cluster_id,\n 'name':item[1],\n 'pod':item[2],\n 'zone':item[3],\n 'hypervisor':hypervisor}\n if hypervisor == 'VMware':\n # get VMware clusters params\n sql = [\"SELECT * FROM cloud.cluster_details WHERE cluster_id=:cluster_id\"] \n query2 = session.query(\"id\", \"cluster_id\", \"name\", \"value\").\\\n from_statement(\" \".join(sql)).\\\n params(cluster_id=cluster_id).all()\n # append username and password\n for item in query2:\n if query2['name'] == 'username':\n cluster['username'] = query2['value']\n elif query2['name'] == 'password':\n cluster['password'] = query2['value']\n \n clusters.append(cluster)\n\n return clusters", "title": "" }, { "docid": "389163d9fbe3a4db1a6d3ac870751214", "score": "0.5164309", "text": "def _create_ecs_cluster(self, vpc):\n ecs_cluster = ecs.Cluster(self, 'Cluster', vpc=vpc)\n return ecs_cluster", "title": "" }, { "docid": "4370733fff85b86c00231f33aa3bbc29", "score": "0.5149943", "text": "def test_eigenvector_centrality_14(self):\n body = {\"depth\": 5, \"degree\": 50, \"sample\": -1, \"top\": 10, \"source_sample\": -1,\n \"label\":\"created\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 3, '1:marko': 3, '1:peter': 3, '1:josh': 1, '1:vadas': 1, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "506d07d226a92469b4c1f4889ff7efed", "score": "0.51120394", "text": "def get_clusters(self):\n fields = ['name', ]\n return self.get_data(\"clusters\", fields)", "title": "" }, { "docid": "d4a91b98a13bfbf817111ae13793c720", "score": "0.50987124", "text": "def vcenter_one_node_simple(self):\n self.env.revert_snapshot(\"ready_with_1_slaves\")\n\n # Configure cluster\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE_SIMPLE,\n settings={\n 'use_vcenter': True,\n 'host_ip': settings.VCENTER_IP,\n 'vc_user': settings.VCENTER_USERNAME,\n 'vc_password': settings.VCENTER_PASSWORD,\n 'cluster': settings.VCENTER_CLUSTERS\n }\n )\n logger.info(\"cluster is {}\".format(cluster_id))\n\n # Add nodes to roles\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-01': ['controller']}\n )\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n # Wait until nova-compute get information about clusters\n # Fix me. Later need to change sleep with wait function.\n time.sleep(60)\n\n self.fuel_web.run_ostf(\n cluster_id=cluster_id, test_sets=['smoke', 'sanity'])", "title": "" }, { "docid": "09a120beee8bf8ac66027efbb5e436c2", "score": "0.50866413", "text": "def create_cluster(self):\n create_or_update_cluster(\n config_file=self.project_definition.cluster_yaml(),\n override_min_workers=None,\n override_max_workers=None,\n no_restart=False,\n restart_only=False,\n yes=True,\n override_cluster_name=self.session_name,\n )", "title": "" }, { "docid": "0a26ee1208586b56dff2add7efcc8287", "score": "0.5037164", "text": "def _get_cluster_detail(name: str, min_core: int, max_core: int,\n min_task: int = 0, max_task: int = 0,\n master_instance_type: str = \"r5.xlarge\",\n core_instance_type: str = \"r5.xlarge\",\n task_instance_type: str = \"r5.xlarge\",\n ebs_root_volume_size: int = 32,\n ebs_device_count: int = 4, ebs_device_size_gb: int = 64,\n temporary: bool = True, jupyter: bool = False):\n assert max_core >= min_core, f\"Max core ({max_core}) must be greater than min core spec ({min_core})\"\n assert max_task >= min_task, f\"Max task ({max_task}) must be greater than min task spec ({min_task})\"\n\n cluster_type = 'temporary' if temporary else 'permanent'\n\n cluster_config = {\n 'Name': name,\n 'Instances': {\n \"InstanceFleets\": [\n {\n \"InstanceFleetType\": \"MASTER\",\n \"TargetOnDemandCapacity\": 1,\n \"InstanceTypeConfigs\": [{\"InstanceType\": \"r5.xlarge\"}],\n },\n {\n \"InstanceFleetType\": \"CORE\",\n \"TargetOnDemandCapacity\": 1,\n \"InstanceTypeConfigs\": [{\"InstanceType\": \"r5.xlarge\"}],\n },\n {\n \"InstanceFleetType\": \"TASK\",\n \"TargetOnDemandCapacity\": 1,\n \"InstanceTypeConfigs\": [{\"InstanceType\": task_instance_type}],\n },\n ],\n \"KeepJobFlowAliveWhenNoSteps\": False,\n },\n 'LogUri': AWS_CONFIG['emr']['loguri_prefix'],\n 'ReleaseLabel': AWS_CONFIG['emr']['releaselabel'],\n 'Applications': [{'Name': app} for app in AWS_CONFIG['emr']['applications']],\n 'JobFlowRole': AWS_CONFIG['emr']['jobflowrole'],\n 'ServiceRole': AWS_CONFIG['emr']['servicerole']\n }\n\n return cluster_config", "title": "" }, { "docid": "c101b5f476ab322006e850d7e4bcfe2b", "score": "0.501688", "text": "def test_eigenvector_centrality_10(self):\n body = {\"depth\": 5, \"sample\": -1, \"direction\": \"IN\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 1, '1:marko': 6, '1:josh': 1,\n '1:vadas': 1, '1:peter': 3, '2:lop': 2}\n else:\n assert 0", "title": "" }, { "docid": "83fd27a91aad93a3987eae7ac7ce71ec", "score": "0.50140077", "text": "def test_eigenvector_centrality_07(self):\n body = {\"depth\": 5, \"sample\": -1, \"label\": \"knows\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 1, '1:marko': 4, '1:josh': 4,\n '1:vadas': 4, '1:peter': 4, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "3c54a844c5588e0e5ca01742e94834d9", "score": "0.5012575", "text": "def create(self, cluster_name, num_masters=1, num_workers=2,\n master_type='n1-standard-1', worker_type='n1-standard-1',\n master_disk_gb=50, worker_disk_gb=50, init_scripts=[], block=True):\n log.info(\"Creating cluster '{}'\".format(cluster_name))\n zone_uri = 'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(\n self.dataproc.project, self.dataproc.zone)\n\n cluster_data = {\n 'projectId': self.dataproc.project,\n 'clusterName': cluster_name,\n 'config': {\n 'gceClusterConfig': {\n 'zoneUri': zone_uri\n },\n 'workerConfig': {\n 'numInstances': num_workers,\n 'machineTypeUri': worker_type,\n 'diskConfig': {\n 'bootDiskSizeGb': worker_disk_gb\n }\n },\n 'masterConfig': {\n 'numInstances': num_masters,\n 'machineTypeUri': master_type,\n 'diskConfig': {\n 'bootDiskSizeGb': master_disk_gb\n }\n }\n }\n }\n\n if init_scripts:\n cluster_data['config']['initializationActions'] = [\n {'executableFile': init_script} for init_script in init_scripts\n ]\n\n log.debug('Cluster settings: {}'.format(cluster_data))\n\n try:\n result = self.dataproc.client.projects().regions().clusters().create(\n projectId=self.dataproc.project,\n region=self.dataproc.region,\n body=cluster_data\n ).execute()\n except HttpError as e:\n if e.resp['status'] == '409':\n raise ClusterAlreadyExistsException(\"Cluster '{}' already exists\".format(cluster_name))\n raise e\n\n log.debug(\"Create call for cluster '{}' returned: {}\".format(cluster_name, result))\n\n cluster = Cluster(self.dataproc, cluster_name)\n\n if not block:\n return cluster\n\n\n status = cluster.status()\n log.info(\"Waiting for cluster to be ready...\")\n while not status in ['RUNNING', 'ERROR']:\n time.sleep(5)\n status = cluster.status()\n\n if status == 'ERROR':\n cluster_info = cluster.info()\n status_detail = cluster_info['status'].get('detail', '')\n raise Exception(\"Cluster encountered an error: {}\".format(status_detail))\n\n log.info(\"Cluster '{}' is ready.\".format(cluster_name))\n return cluster", "title": "" }, { "docid": "2b12463d568e2327865d03bc7cec241c", "score": "0.5009245", "text": "def get_clusters():\n # Get DB from context:\n db = getattr(g, 'db', None)\n colection = get_kmeans_collection()\n kmeans_collection = db[colection]\n\n ret_array = []\n for cluster in kmeans_collection.find().sort([('class_name', pymongo.DESCENDING), ('_id', pymongo.DESCENDING)]):\n cluster['_id'] = str(cluster['_id'])\n ret_array.append(cluster)\n return jsonify({'clusters': ret_array}),200", "title": "" }, { "docid": "2419ae7f843e93729b55e1798b3b3f92", "score": "0.5001839", "text": "def principal_clusters():\n jsondata = request.json\n features = jsondata.get('data')\n k = jsondata.get('k')\n return kkmeans.get_clusters(features, k)", "title": "" }, { "docid": "26d6efd78cc952e012dba3367c0a2f7a", "score": "0.5000834", "text": "def do_POST(self):\n\n # Extract and print the contents of the POST\n length = int(self.headers['Content-Length'])\n print 'length: %d' % (length)\n\n df = pandas.read_csv(StringIO(self.rfile.read(length)), sep=\" \")\n classifier.fit(df) \n\n components = []\n for i in range(n_clusters):\n component = {}\n component[\"x\"] = int(classifier.means_[i][0].tolist())\n component[\"y\"] = int(classifier.means_[i][1].tolist())\n component[\"weight\"] = float(\"%.3f\" % classifier.weights_[i].tolist())\n component[\"covarX\"] = int(classifier.covars_[i][0].tolist())\n component[\"covarY\"] = int(classifier.covars_[i][1].tolist())\n components.append(component)\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write(json.dumps(components))", "title": "" }, { "docid": "c09ea59bdfe65438ad42fd9ba4c6ba7d", "score": "0.49960417", "text": "def get_clusters(self):\n r = self.__client.get(self.__routes.clusters())\n if r.status_code != 200:\n raise ResponseError(\"Cannot list clusters\", r)\n return r.json()", "title": "" }, { "docid": "1f4f00721765b8f24f3be17a8bb1ed5a", "score": "0.4993667", "text": "def test_eigenvector_centrality_18(self):\n body = {\"depth\": 5, \"degree\": 50, \"sample\": -1, \"top\": 10, \"source_sample\": -1,\n \"direction\": \"BOTH\", \"label\": \"created\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 3, '1:marko': 3, '1:peter': 3, '1:josh': 1, '1:vadas': 1, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "4e76e2337d5e90c11cb86fe71357234b", "score": "0.4993377", "text": "def create_couch_cluster(self, name='', description='', replicas=1, partitions=1):\n persistence_sys = PersistenceSystem(name=name, description=description, type=PersistenceType.COUCHDB)\n persistence_sys.defaults['replicas'] = replicas\n persistence_sys.defaults['partitions'] = partitions\n persistence_sys_id, rev = self.clients.resource_registry.create(persistence_sys)\n return persistence_sys_id", "title": "" }, { "docid": "5508d4818184a3de02f262b3c9299838", "score": "0.49872875", "text": "def clusters_factory(request):\n factory = ClustersFactory(keep_logs_on_failure=request.config.getoption(\"keep_logs_on_cluster_failure\"))\n\n def _cluster_factory(cluster_config):\n cluster_config = _write_cluster_config_to_outdir(request, cluster_config)\n cluster = Cluster(\n name=request.config.getoption(\"cluster\")\n if request.config.getoption(\"cluster\")\n else \"integ-tests-{0}{1}{2}\".format(\n random_alphanumeric(),\n \"-\" if request.config.getoption(\"stackname_suffix\") else \"\",\n request.config.getoption(\"stackname_suffix\"),\n ),\n config_file=cluster_config,\n ssh_key=request.config.getoption(\"key_path\"),\n )\n if not request.config.getoption(\"cluster\"):\n factory.create_cluster(cluster)\n return cluster\n\n yield _cluster_factory\n if not request.config.getoption(\"no_delete\"):\n factory.destroy_all_clusters(\n keep_logs=request.config.getoption(\"keep_logs_on_test_failure\") and request.node.rep_call.failed\n )", "title": "" }, { "docid": "b4e665a4ade18ce49359a793552157bd", "score": "0.49824542", "text": "def _createCluster(self):\n # Write the particles\n prot = self.protocol\n project = prot.getProject()\n inputSet = prot.getInputParticles()\n fnSqlite = prot._getTmpPath('cluster_particles.sqlite')\n cleanPath(fnSqlite)\n partSet = SetOfParticles(filename=fnSqlite)\n partSet.copyInfo(inputSet)\n for point in self.getData():\n if point.getState() == Point.SELECTED:\n particle = inputSet[point.getId()]\n partSet.append(particle)\n partSet.write()\n partSet.close()\n \n from protocol_batch_cluster import BatchProtNMACluster\n newProt = project.newProtocol(BatchProtNMACluster)\n clusterName = self.clusterWindow.getClusterName()\n if clusterName:\n newProt.setObjLabel(clusterName)\n newProt.inputNmaDimred.set(prot)\n newProt.sqliteFile.set(fnSqlite)\n \n project.launchProtocol(newProt)", "title": "" }, { "docid": "5f8797f4cbbe8e7a0ee684d22bcf7d0e", "score": "0.49792212", "text": "def clusters_post(self, data, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.clusters_post_with_http_info(data, **kwargs)\n else:\n (data) = self.clusters_post_with_http_info(data, **kwargs)\n return data", "title": "" }, { "docid": "bcdec3261f0b961eba096e73b3a462dd", "score": "0.4972235", "text": "def get_clusters(self):\n response = self.proxy.one.clusterpool.info(self.session_string)\n if response[0] is not True:\n raise (Exception(\"one.clusterpool.info failed (error code: {}) {}\".format(\n response[2],\n response[1])))\n items = []\n for child in etree.fromstring(response[1]):\n items.append(Cluster.from_xml_etree(child))\n items.sort(key=lambda x: x.name)\n return items", "title": "" }, { "docid": "932e844dc9b45c5a46fab0a870a5f7bc", "score": "0.4963693", "text": "async def get_cluster_version(self, message):\n clus = Cluster()\n clus.get()\n await message.respond('All done! Response: {}'.format(clus.version))", "title": "" }, { "docid": "5a9a0d86a365d8248e278e24ef516ccb", "score": "0.4953062", "text": "def get_cluster_version_info(self):\n params = {}\n return self.send_request(\n 'GetClusterVersionInfo',\n params)", "title": "" }, { "docid": "468b892fe49ca61f4a2b68ec5b076b9e", "score": "0.49524367", "text": "def test_eigenvector_centrality_19(self):\n body = {\"depth\": 5, \"degree\": 50, \"sample\": -1, \"top\": 10, \"source_sample\": 10,\n \"direction\": \"BOTH\", \"label\": \"created\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 3, '1:marko': 3, '1:peter': 3, '1:josh': 1, '1:vadas': 1, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "8312c3dd843d3b62fc498ee3de71acbc", "score": "0.4950606", "text": "def get(self, request, project_id):\n name = request.GET.get(\"name\")\n cluster_resp = paas_cc.get_cluster_by_name(\n request.user.token.access_token, project_id, name\n )\n if cluster_resp.get('code') != ErrorCode.NoError:\n raise error_codes.APIError(cluster_resp.get('message'))\n data = cluster_resp.get('data') or {}\n return response.Response({\n 'is_exist': True if data.get('count') else False\n })", "title": "" }, { "docid": "28e1cac5372c0b1040b4049c2d3dbaad", "score": "0.49503842", "text": "def get_cluster_setup(request, return_type):\n\n print('*** Get Cluster Setup ****')\n response_data = {}\n\n # Get Cluster Setup\n try:\n data_check = ClustersSetup.objects.values('cluster_location_detail')\n print(str(data_check))\n\n if data_check:\n print('Cluster Setup(s) data found')\n data = list(data_check)\n\n for i in range(len(data)):\n response_data.setdefault(str(i), (data[i].get('cluster_location_detail')))\n\n print('Response length: ', len(response_data)) # Sanity check\n print(response_data) # Sanity check\n\n if return_type == 'HTTP':\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n else:\n return response_data\n\n else:\n response_data[\"none\"] = \"NO DATA\"\n # print(response_data)\n\n return response_data\n\n except ObjectDoesNotExist:\n response_data['result'] = \"ERROR Status: \" + str(ObjectDoesNotExist.silent_variable_failure) + \\\n \". Get Cluster Setup request failure. GCS(1)\"\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")", "title": "" }, { "docid": "53523e5f96cca55dbad6395fecc0eb17", "score": "0.49493504", "text": "def MakeCluster(args):\n cluster = {}\n if args.description:\n cluster['display_name'] = args.description\n if args.nodes:\n cluster['serve_nodes'] = args.nodes\n return cluster", "title": "" }, { "docid": "d94208a7406f6aeca853bfad2fabb9fa", "score": "0.49448454", "text": "def get_compute_clusters(self):\n nc = self._get_service_topo('services.nova.components')\n\n # We need to filter out non-existent compute hosts (i.e. baremetal)\n if 'hypervisor-list' in self.data:\n hyp_list = self.data['hypervisor-list']\n else:\n hyp_list = self.call_service(target='nova',\n operation='hypervisor-list',\n data={'include_status': False})\n old_cplanes = nc['nova-compute']['control_planes']\n new_cplanes = {}\n for hyp in hyp_list:\n # Use the hypervisor's service_host, which appears to match what is\n # in the model and works with kvm hosts,\n ch_name = hyp['service_host']\n\n ch_region = hyp['region']\n\n # go thru each control plane and see if the host and region from\n # the hypervisor list exists. If so, add this into new_cplanes\n for cp_name, cp in old_cplanes.iteritems():\n if ch_region not in cp['regions']:\n continue\n for type in ('resources', 'clusters'):\n if type not in cp:\n continue\n for cl_name, cl_hosts in cp[type].iteritems():\n if ch_name not in cl_hosts:\n continue\n new_cplanes = self._insert_cluster_data(\n new_cplanes,\n cp_name,\n cp['regions'],\n type,\n cl_name,\n ch_name\n )\n nc['nova-compute']['control_planes'] = new_cplanes\n\n return self._derive_clusters(nc['nova-compute'])", "title": "" }, { "docid": "1900c42ea424567b34d71f9d88b8d16b", "score": "0.4935743", "text": "def test_eigenvector_centrality_05(self):\n body = {\"depth\": 5, \"sample\": 2}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert 1\n # assert result == {'2:ripple': 4, '1:marko': 4, '1:josh': 7, '1:vadas': 5, '1:peter': 6, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "54a7607dc0ce46193aa930a2bf8d6309", "score": "0.49267843", "text": "def test_eigenvector_centrality_11(self):\n body = {\"depth\": 5, \"sample\": -1, \"degree\": 5}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 10, '1:marko': 10, '1:josh': 10, '1:vadas': 10, '1:peter': 10, '2:lop': 10}\n else:\n assert 0", "title": "" }, { "docid": "6574e10317e891ac7a64858a3165a329", "score": "0.49235997", "text": "def test_eigenvector_centrality_04(self):\n body = {\"depth\": 5, \"sample\": -1}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 10, '1:marko': 10, '1:josh': 10,\n '1:vadas': 10, '1:peter': 10, '2:lop': 10}\n else:\n assert 0", "title": "" }, { "docid": "a35d5501640f8cb500b86f025179d19b", "score": "0.49158698", "text": "def create_ocean_cluster(self, ocean):\n ocean = spotinst_ocean.OceanRequest(ocean)\n\n excluded_group_dict = self.exclude_missing(json.loads(ocean.toJSON()))\n\n formatted_group_dict = self.convert_json(\n excluded_group_dict, self.underscore_to_camel)\n\n body_json = json.dumps(formatted_group_dict)\n\n group_response = self.send_post(\n body=body_json,\n url=self.__base_ocean_url,\n entity_name='ocean')\n\n formatted_response = self.convert_json(\n group_response, self.camel_to_underscore)\n\n retVal = formatted_response[\"response\"][\"items\"][0]\n\n return retVal", "title": "" }, { "docid": "af9683853b785d2afeecff640a663520", "score": "0.49140784", "text": "def list_node(self):\n return self._request(\n 'GET',\n f\"{self.cluster.api_address}/api/v1/nodes\"\n )", "title": "" }, { "docid": "dbd51770485af159b7146df35cf9e9b3", "score": "0.49140137", "text": "def test_eigenvector_centrality_15(self):\n body = {\"depth\": 5, \"degree\": 50, \"sample\": -1, \"top\": 10, \"source_sample\": -1,\n \"direction\":\"BOTH\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 10, '1:marko': 10, '1:josh': 10,\n '1:vadas': 10, '1:peter': 10, '2:lop': 10}\n else:\n assert 0", "title": "" }, { "docid": "c1f6bed1bab2577fe67154c63ddd8229", "score": "0.4905028", "text": "def UpdateCluster(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "a8bc2cb00262c9170fd58d2aaf358bf7", "score": "0.4904268", "text": "def ListClusters(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "dd803ed00a68faf1b805246a160f8d4c", "score": "0.4902687", "text": "def put(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.DATA)\n user_token = Token.objects.get(key=request.auth)\n user = UserInfo.objects.get(user_id=user_token.user.user_id)\n if serializer.is_valid():\n try:\n db_cluster_update(user, serializer.data['status'],\n serializer.data['cluster_name'],\n master_ip=serializer.data['master_ip'])\n return Response({\"id\": 1, \"message\": \"Requested cluster updated\"})\n except ClientError, e:\n return Response({\"id\": 1, \"message\": e.message})\n except Exception, e:\n return Response({\"id\": 1, \"message\": e.args[0]})\n return Response(serializer.errors)", "title": "" }, { "docid": "f09535e80d3fa45de8f4d83dc6368264", "score": "0.48892722", "text": "def test_eigenvector_centrality_13(self):\n body = {\"depth\": 5, \"degree\": 50, \"sample\": -1, \"top\": 10, \"source_sample\": -1}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 10, '1:marko': 10, '1:josh': 10,\n '1:vadas': 10, '1:peter': 10, '2:lop': 10}\n else:\n assert 0", "title": "" }, { "docid": "c149608753c1ce2cb2c485d9a5933d96", "score": "0.48825493", "text": "def create_cluster(self, **attrs):\n return self._create(_cluster.Cluster, **attrs)", "title": "" }, { "docid": "c149608753c1ce2cb2c485d9a5933d96", "score": "0.48825493", "text": "def create_cluster(self, **attrs):\n return self._create(_cluster.Cluster, **attrs)", "title": "" }, { "docid": "542b3dfd49748a48fa7641dc0bd5cde5", "score": "0.48817107", "text": "def test_eigenvector_centrality_16(self):\n body = {\"depth\": 5, \"degree\": 50, \"sample\": -1, \"top\": 10, \"source_sample\": -1,\n \"direction\":\"IN\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'1:marko': 6, '1:peter': 3, '2:lop': 2, '2:ripple': 1, '1:josh': 1, '1:vadas': 1}\n else:\n assert 0", "title": "" }, { "docid": "827947482a205feb5f47f663fff7b4d1", "score": "0.4878942", "text": "def cluster_create(name, version, **cm_args):\n if cluster_exists(name, **cm_args):\n log.info('{0!r} already exists'.format(name))\n return False\n\n a = _connect(**cm_args)\n if a is None:\n return False\n\n try:\n c = a.create_cluster(name, version)\n except ApiException as e:\n err = '{0}'.format(e._message)\n log.error(err)\n\n if cluster_exists(name, **cm_args):\n log.info('{0!r} created'.format(name))\n return True\n else:\n return False", "title": "" }, { "docid": "37d6020303ef4010218f17e911a3b982", "score": "0.4870987", "text": "def create_compellent_cluster(self, name='', description='', replicas=1):\n persistence_sys = PersistenceSystem(name=name, description=description, type=PersistenceType.COMPELLANT)\n persistence_sys.defaults['replicas'] = replicas\n persistence_sys_id, rev = self.clients.resource_registry.create(persistence_sys)\n return persistence_sys_id", "title": "" }, { "docid": "8bde7a2dc0289bf7d7bd4b753eec0ba3", "score": "0.48643857", "text": "def get_cluster_info(self):\n params = {}\n return self.send_request(\n 'GetClusterInfo',\n params)", "title": "" }, { "docid": "b0d403a8c74e945d43b3014ae7c4e177", "score": "0.48630592", "text": "def get_clusters(host):\n return _get_page(host, \"/clusters\")[\"listFields\"][\"clusters\"]", "title": "" }, { "docid": "4ccf33e43cddc89b7146c2996d81070b", "score": "0.4861253", "text": "def create_cluster(cluster_name: str, min_core: int, max_core: int,\n min_task: int = 0, max_task: int = 0,\n master_instance_type: str = \"r5.xlarge\",\n core_instance_type: str = \"r5.xlarge\",\n task_instance_type: str = \"r5.xlarge\",\n ebs_root_volume_size: int = 32,\n ebs_device_count: int = 4, ebs_device_size_gb: int = 64,\n temporary: bool = True, jupyter: bool = False) -> str:\n spec = _get_cluster_detail(cluster_name, min_core, max_core,\n min_task=min_task, max_task=max_task,\n master_instance_type=master_instance_type,\n core_instance_type=core_instance_type,\n task_instance_type=task_instance_type,\n ebs_root_volume_size=ebs_root_volume_size,\n ebs_device_count=ebs_device_count,\n ebs_device_size_gb=ebs_device_size_gb,\n temporary=temporary, jupyter=jupyter)\n log.debug(f\"Cluster create spec: {pprint.pformat(spec)}\")\n response = _get_client().run_job_flow(**spec)\n return response['JobFlowId']", "title": "" }, { "docid": "63c851de880ddd0f65d6d5d90a136f78", "score": "0.48584178", "text": "def test_eigenvector_centrality_17(self):\n body = {\"depth\": 5, \"degree\": 50, \"sample\": -1, \"top\": 10, \"source_sample\": -1,\n \"direction\": \"OUT\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 5, '1:josh': 3, '1:vadas': 2, '1:peter': 2, '1:marko': 1, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "6f10e2c6e41123fa35be7ac419be5027", "score": "0.4847715", "text": "def generate_cluster(self):\n from bims.models.boundary_type import BoundaryType\n from bims.models.cluster import Cluster\n from bims.utils.cluster import update_cluster\n\n # get clusters below of this boundary\n clusters = Cluster.objects.filter(\n boundary__top_level_boundary=self).order_by(\n 'module'\n )\n # if lowest boundary, do actual update cluster\n if self.type == BoundaryType.lowest_type():\n update_cluster(self)\n else:\n # if not lowest, calculate from it's cluster boundary\n Cluster.objects.filter(\n boundary=self).delete()\n for cluster in clusters:\n # create/update new cluster count\n try:\n self_cluster = Cluster.objects.get(\n boundary=self,\n module=cluster.module\n )\n except Cluster.DoesNotExist:\n self_cluster = Cluster.objects.create(\n boundary=self,\n module=cluster.module\n )\n\n # add site count\n self_cluster.site_count = (\n self_cluster.site_count + cluster.site_count\n )\n\n # update detail\n details = {\n 'records': 0,\n 'sites': 0,\n 'survey': 0\n }\n try:\n details = json.loads(\n self_cluster.details)\n except ValueError:\n pass\n\n # adding value from lower cluster\n try:\n cluster_detail = json.loads(cluster.details)\n details['records'] += cluster_detail['records']\n details['sites'] += cluster_detail['sites']\n details['survey'] += cluster_detail['survey']\n try:\n details['site_detail'] = cluster_detail['site_detail']\n except KeyError:\n pass\n except ValueError:\n pass\n\n self_cluster.details = json.dumps(details)\n self_cluster.save()", "title": "" }, { "docid": "97b8dfe1e71edf1cc65b1538a361a8ab", "score": "0.48442325", "text": "def test_eigenvector_centrality_02(self):\n body = {\"depth\": 5, \"source_sample\": -1}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert 1\n # assert result == {'2:ripple': 3, '1:marko': 5, '1:josh': 2, '1:vadas': 5, '1:peter': 4, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "458e56eaa2c4328f903b88a3bafa2761", "score": "0.4841581", "text": "def gke_clusters(self) -> pulumi.Output[Sequence['outputs.ResponsePolicyGKEClusterResponse']]:\n return pulumi.get(self, \"gke_clusters\")", "title": "" }, { "docid": "6c4b92701486cce0987952ad0b624fa4", "score": "0.48351386", "text": "def vcenter_ha(self):\n self.env.revert_snapshot(\"ready_with_3_slaves\")\n\n # Configure cluster\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=settings.DEPLOYMENT_MODE_HA,\n settings={\n 'use_vcenter': True,\n 'host_ip': settings.VCENTER_IP,\n 'vc_user': settings.VCENTER_USERNAME,\n 'vc_password': settings.VCENTER_PASSWORD,\n 'cluster': settings.VCENTER_CLUSTERS,\n 'tenant': 'vcenter',\n 'user': 'vcenter',\n 'password': 'vcenter'\n }\n )\n logger.info(\"cluster is {0}\".format(cluster_id))\n\n # Add nodes to roles\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller']}\n )\n # Deploy cluster\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n # Wait until nova-compute get information about clusters\n # Fix me. Later need to change sleep with wait function.\n time.sleep(60)\n\n self.fuel_web.run_ostf(\n cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])", "title": "" }, { "docid": "28150e3a6c5804186cb58ee3d8a8b9c3", "score": "0.4829644", "text": "def _do_startup(self, cloud):\n\n startCloud = self._init_cloud(cloud)\n vals = { 'action': '_do_startup', 'count': 1 }\n self.logger.debug(self.ACTION_REQUESTING_NODES % vals)\n\n try:\n nodes = self.controller.create_nodes(1,\n client.check_agent_process, self.AGENT_PORT, startCloud)\n\n hub_node = nodes[0]\n\n # The first agent is a TaskFarm Hub and a TaskFarm Node\n client.create_hub(hub_node.ip, self.AGENT_PORT)\n client.create_node(hub_node.ip, self.AGENT_PORT, hub_node.ip)\n self.logger.info(\"Added node %s: %s \" % (hub_node.id, hub_node.ip))\n # node_info.add_node_info('/etc/hosts', hub_node.ip, hub_node.id)\n\n self.hub_ip = hub_node.ip\n\n # Extend the nodes list with the newly created one\n self.nodes += nodes\n self.state = self.S_RUNNING\n except Exception, err:\n self.logger.exception('_do_startup: Failed to create hub: %s' % err)\n self.state = self.S_ERROR", "title": "" }, { "docid": "56f2005c367d3e3700c61da29ca6870b", "score": "0.48292768", "text": "def nsxv_add_delete_nodes(self):\n self.env.revert_snapshot(\"ready_with_9_slaves\")\n\n self.install_nsxv_plugin()\n\n settings = self.get_settings()\n # Configure cluster\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings=settings,\n configure_ssl=False)\n\n # Assign role to node\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['controller'],\n 'slave-03': ['controller'],\n 'slave-04': ['compute-vmware'], })\n\n target_node_2 = self.node_name('slave-04')\n\n # Configure VMWare vCenter settings\n self.fuel_web.vcenter_configure(cluster_id,\n multiclusters=True,\n target_node_2=target_node_2)\n\n self.enable_plugin(cluster_id=cluster_id)\n self.fuel_web.verify_network(cluster_id)\n self.fuel_web.deploy_cluster_wait(\n cluster_id, timeout=pt_settings.WAIT_FOR_LONG_DEPLOY)\n\n self.fuel_web.run_ostf(\n cluster_id=cluster_id, test_sets=['smoke'])\n\n # Add 1 node with cinder-vmware role and redeploy cluster\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['cinder-vmware'], })\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.run_ostf(\n cluster_id=cluster_id, test_sets=['smoke'])\n\n # Remove node with cinder-vmware role and redeploy cluster\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['cinder-vmware'], }, False, True)\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.run_ostf(\n cluster_id=cluster_id, test_sets=['smoke'])\n\n # Remove node with compute-vmware role and redeploy cluster\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-04': ['compute-vmware'], }, False, True)\n self.fuel_web.vcenter_configure(cluster_id)\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.fuel_web.run_ostf(\n cluster_id=cluster_id, test_sets=['smoke'])", "title": "" }, { "docid": "71c61b2cf83fa21d4e587d7ca9ba6e82", "score": "0.48271054", "text": "def test_eigenvector_centrality_09(self):\n body = {\"depth\": 5, \"sample\": -1, \"direction\": \"OUT\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 5, '1:marko': 1, '1:josh': 3,\n '1:vadas': 2, '1:peter': 2, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "7ba7cc7d5d2dcddeac440d6747ddf78c", "score": "0.4825316", "text": "def list_clusters_with_details(self):\n\t\tprint(\"List the details of clusters in the region.\")\n\t\tresponse = self.client.list_clusters(self.project_id, self.zone)\n\t\tprint (response)", "title": "" }, { "docid": "9a11a921dec891ec1db62f33a53c2023", "score": "0.48237437", "text": "def test_eigenvector_centrality_12(self):\n body = {\"depth\": 5, \"sample\": -1, \"top\": 5}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 10, '1:marko': 10, '1:josh': 10, '1:vadas': 10, '1:peter': 10}\n else:\n assert 0", "title": "" }, { "docid": "47c576ed9e7aa217567f5d89b57d9ed8", "score": "0.48200986", "text": "def test_eigenvector_centrality_03(self):\n body = {\"depth\": 5, \"source_sample\": 2}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert 1\n # assert result == {'2:ripple': 2, '1:peter': 1, '2:lop': 1}\n else:\n assert 0", "title": "" }, { "docid": "2f04d5a90c43cfdb02ffca40062dee6b", "score": "0.48068434", "text": "def test_create_cluster(self):\n pass", "title": "" }, { "docid": "062b81af77dfe3bd06e108cccc01d0aa", "score": "0.4801918", "text": "def add_nodes(self, kwargs):\n self.controller.add_context_replacement(dict(STRING='taskfarm'))\n\n # Adding nodes makes sense only in the RUNNING state\n if self.state != self.S_RUNNING:\n vals = { 'curstate': self.state, 'action': 'add_nodes' }\n return HttpErrorResponse(self.WRONG_STATE_MSG % vals)\n\n # Ensure 'count' is valid\n count_or_err = self.__check_count_in_args(kwargs) \n if isinstance(count_or_err, HttpErrorResponse):\n return count_or_err\n\n count = count_or_err\n\n self.state = self.S_ADAPTING\n Thread(target=self._do_add_nodes, args=[count, kwargs['cloud']]).start()\n\n return HttpJsonResponse({ 'state': self.state })", "title": "" }, { "docid": "4f9185539e374e8e4c46a7e53790d31b", "score": "0.47986478", "text": "def etcd_cluster_status(self):\n name, master_ip = self.get_random_master()\n\n exec_command = ['/bin/sh', '-c', ETCDCTL_BASE.format(\n \"member list\", master_ip)]\n\n response = stream(self.api.connect_get_namespaced_pod_exec,\n \"etcd-%s\" % name, 'kube-system',\n command=exec_command,\n stderr=True, stdin=False,\n stdout=True, tty=False)\n\n if not response or not re.search(\"master-\\\\d+\", response):\n LOGGER.info(response)\n raise ValueError(\"Could not extract current etcd cluster state!\")\n # respone should be something like\n # {'members': [{'ID': 9007573287841766007, 'name': 'master-7-am',\n # 'peerURLs': ['https://10.32.192.11:2380'],\n # 'clientURLs': ['https://10.32.192.11:2379']}]}\n response = yaml.load(response)\n etcd_cluster = \",\".join((\"=\".join((m['name'], m['peerURLs'][0])) for m\n in response['members'] if 'name' in m))\n LOGGER.debug(\"Current etcd cluster state is: %s\", etcd_cluster)\n\n return etcd_cluster", "title": "" }, { "docid": "103908da911376d51743fcc6080bc488", "score": "0.4791052", "text": "def post(self):\n dimensions = json.loads(self.request.get('dimensions'))\n group = self.request.get('group')\n instance_map = json.loads(self.request.get('instance_map'))\n policies = json.loads(self.request.get('policies'))\n\n requests = {}\n\n for instance_name, service_account in instance_map.iteritems():\n requests[instance_name] = {\n 'dimensions': dimensions.copy(), 'policies': policies}\n requests[instance_name]['dimensions']['hostname'] = instance_name\n requests[instance_name]['policies']['machine_service_account'] = (\n service_account)\n\n try:\n responses = machine_provider.add_machines(\n requests.values()).get('responses', {})\n except net.Error as e:\n logging.warning(e)\n responses = {}\n\n for response in responses:\n request = response.get('machine_addition_request', {})\n error = response.get('error')\n instance_name = request.get('dimensions', {}).get('hostname')\n if instance_name in requests.keys():\n if not error:\n logging.info('Instance added to Catalog: %s', instance_name)\n requests.pop(instance_name)\n elif error == 'HOSTNAME_REUSE':\n logging.warning('Hostname reuse in Catalog: %s', instance_name)\n requests.pop(instance_name)\n else:\n logging.warning('Instance not added to Catalog: %s', instance_name)\n else:\n logging.info('Unknown instance: %s', instance_name)\n\n reschedule_instance_cataloging(\n models.InstanceGroup.generate_key(group), requests.keys())", "title": "" }, { "docid": "e86dddb13b74051620497b69d5b77d6d", "score": "0.4788126", "text": "def test_eigenvector_centrality_08(self):\n body = {\"depth\": 5, \"sample\": -1, \"direction\": \"BOTH\"}\n code, res = Algorithm().post_eigenvector_centrality(body, auth=auth)\n id = res[\"task_id\"]\n if id > 0:\n result = get_task_res(id, 120, auth=auth)\n print(result)\n assert result == {'2:ripple': 10, '1:marko': 10, '1:josh': 10,\n '1:vadas': 10, '1:peter': 10, '2:lop': 10}\n else:\n assert 0", "title": "" }, { "docid": "bdc325b42fcfa78ac3c27ba3b6597447", "score": "0.47805455", "text": "def cluster(self):\n return self.api.cluster()", "title": "" }, { "docid": "89e1e6de958be1bf8540d3e7cb9bf207", "score": "0.4761062", "text": "def setCloudMode(self, value):\n\t\treturn self._post(\"cloud_mode\", value)", "title": "" }, { "docid": "c0bc9fddc67dbd06da248016ed8c8fc5", "score": "0.47522718", "text": "def create_cluster(\n self, flow_config: dict, cluster_config: list, region: str\n ) -> dict:\n log_uri = flow_config.get(self.LOG_URI) + self._get_startup_timestamp()\n\n response = self.client.run_job_flow(\n Name=flow_config.get(self.NAME),\n LogUri=log_uri,\n ReleaseLabel=flow_config.get(self.RELEASE_LABEL, None),\n Instances=flow_config.get(region).get(self.INSTANCES),\n Applications=flow_config.get(self.APPLICATIONS, []),\n BootstrapActions=flow_config.get(self.BOOTSTRAP_ACTIONS, []),\n Configurations=cluster_config,\n VisibleToAllUsers=True,\n JobFlowRole=flow_config.get(region).get(\n self.JOB_FLOW_ROLE, self.EMR_JOB_FLOW_DEFAULT\n ),\n ServiceRole=flow_config.get(region).get(self.SERVICE_ROLE),\n Tags=flow_config.get(self.TAGS, {}),\n )\n\n self.cluster_id = response.get(\"JobFlowId\")\n logging.info(\"New Cluster [%s] running in region [%s]\", self.cluster_id, region)\n\n return response", "title": "" }, { "docid": "07ab1101a7f112c4e577bb4cc87d8e1f", "score": "0.47511598", "text": "def GetCluster(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "24b81ab0183b6ddd57b0ee7654e16adb", "score": "0.4746546", "text": "def create(self, request, *args, **kwargs):\n if hasattr(self.request.user, 'worker'):\n try:\n worker = models.Worker.objects.get(user=self.request.user)\n cluster = models.Cluster.objects.get(\n id=request.data['cluster_id']\n )\n except Exception as e:\n return Response(data={'detail': e.message}, status=400)\n\n # ensure ProcessRequest is assigned to this worker\n if cluster.process_request.worker != worker:\n return Response(\n data={\n 'detail': 'Request is not assigned to this worker'\n },\n status=400)\n else:\n # only workers can post SampleClusters\n return Response(data={'detail': 'Bad request'}, status=400)\n\n # if we get here, the worker is bonafide! \"He's a suitor!\"\n # we can create the SampleCluster instance now,\n # but we'll do so inside an atomic transaction\n try:\n sample = models.Sample.objects.get(id=request.data['sample_id'])\n\n with transaction.atomic():\n sample_cluster = models.SampleCluster(\n cluster=cluster,\n sample=sample,\n event_percentage=request.data['event_percentage']\n )\n\n # save event indices in a numpy file\n events_file = TemporaryFile()\n np.savetxt(\n events_file,\n np.array(request.data['events']),\n fmt='%s',\n delimiter=','\n )\n sample_cluster.events.save(\n join([str(sample.id), 'csv'], '.'),\n File(events_file),\n save=False\n )\n\n sample_cluster.clean()\n sample_cluster.save()\n\n # now create SampleClusterParameter instances\n for param in request.data['parameters']:\n models.SampleClusterParameter.objects.create(\n sample_cluster=sample_cluster,\n channel=param,\n location=request.data['parameters'][param]\n )\n\n # Finally, save all the SampleClusterComponent instances\n # along with their parameters\n for comp in request.data['components']:\n scc = models.SampleClusterComponent.objects.create(\n sample_cluster=sample_cluster,\n covariance_matrix=comp['covariance'],\n weight=comp['weight']\n )\n for comp_param in comp['parameters']:\n models.SampleClusterComponentParameter.objects.create(\n sample_cluster_component=scc,\n channel=comp_param,\n location=comp['parameters'][comp_param]\n )\n\n except Exception as e: # catch any exception to rollback changes\n return Response(data={'detail': e.message}, status=400)\n\n serializer = serializers.SampleClusterSerializer(\n sample_cluster,\n context={'request': request}\n )\n headers = self.get_success_headers(serializer.data)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED,\n headers=headers)", "title": "" }, { "docid": "3f44a8ed8aa2a9ea146ee5ed864ae171", "score": "0.4731527", "text": "def create_cluster(Name):\n conn = get_conn()\n cur = conn.cursor(dictionary=True)\n sql_str = f\"\"\"\n INSERT INTO Clusters (Name)\n VALUES ('{Name}')\n \"\"\"\n try:\n cur.execute(sql_str)\n conn.commit()\n except Error as e:\n cur.close()\n conn.close()\n return (False, str(e))\n cur.close()\n conn.close()\n return (True, \"Cluster created successfully.\")", "title": "" }, { "docid": "414b6635e531d36987cdabf6a8325382", "score": "0.47254992", "text": "def clustering(cloud, vg_size=0.1, tolerance=0.5, min_cluster_size=1000):\n\n # voxelization using voxel_gird_filter() - downsampling\n # original points = 205620 -> after 24030\n\n\n cluster_list = list()\n cloud_value = cloud\n vg = cloud.make_voxel_grid_filter()\n vg.set_leaf_size(vg_size, vg_size, vg_size)\n cloud_filtered = vg.filter()\n\n tree = cloud_filtered.make_kdtree()\n ec = cloud_filtered.make_EuclideanClusterExtraction()\n ec.set_ClusterTolerance(tolerance) # 0.5 = 50cm\n ec.set_MinClusterSize(min_cluster_size) # impose that the clusters found must have at least\n ec.set_MaxClusterSize(cloud_value.size) # impose that the clusters found must have at Maximum\n ec.set_SearchMethod(tree)\n\n\n cluster_indices = ec.Extract() # return index number that is included in the result of clustering\n\n for j, indices in enumerate(cluster_indices):\n\n cloud_cluster = pcl.PointCloud()\n\n points = np.zeros((len(indices), 3), dtype=np.float32)\n\n\n for i, indice in enumerate(indices):\n\n points[i][0] = cloud_filtered[indice][0]\n points[i][1] = cloud_filtered[indice][1]\n points[i][2] = cloud_filtered[indice][2]\n\n cloud_cluster.from_array(points)\n cluster_list.append(cloud_cluster)\n\n if len(cluster_list) != 0:\n\n return cluster_list\n else:\n cluster_list.append(cloud)\n\n return cluster_list", "title": "" }, { "docid": "912d067f2d410c5edf822ba92b8c0119", "score": "0.47230214", "text": "def main():\n print(\"Clustering\")\n raw_data = get_data(FILENAME)\n data = np.empty((len(raw_data),5))\n n_clusters=2\n data[:,0], _ = factorize([f.party for f in raw_data])\n data[:,1], _ = factorize([f.gender for f in raw_data])\n data[:,2] = preprocessing.scale([f.age for f in raw_data])\n data[:,3] = preprocessing.scale([f.cl0 for f in raw_data])\n data[:,4] = preprocessing.scale([f.cl2 for f in raw_data])\n print(\"Clustering\", len(data), \"entries\")\n kproto = kprototypes.KPrototypes(n_clusters=n_clusters).fit(X=data, categorical=[0,1])\n idx = kproto.fit_predict(data, categorical=[0,1])\n labels = kproto.labels_\n silohouette = metrics.silhouette_score(data, labels, metric='cosine')\n\n print (\"Silhouette score: {}\".format(silohouette))\n\n print(\"Saving word clusters to {}\".format(CLUSTER_OUTPUT_FILE))\n with open(CLUSTER_OUTPUT_FILE, \"w\", encoding=\"utf-8\") as f:\n writer = csv.writer(f, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow([\"Name\", \"Party\", \"Gender\", \"Age\", \"Cluster 0\", \"Cluster 2\", \"Assigned Cluster\"])\n for i, j in enumerate(raw_data):\n writer.writerow(j.to_row() + [labels[i],])", "title": "" }, { "docid": "658f2588a262854ae48d8959a47b4e39", "score": "0.47137997", "text": "def update_machines_clusters_and_servers(self, delete_now=True):\n domain_token = deployer_utils.get_domain_token(self.aliases)\n location = LocationContext()\n location.add_name_token(domain_token, self.model_context.get_domain_name())\n folder_list = list()\n folder_list.append(CLUSTER)\n folder_list.append(SERVER)\n folder_list.append(SERVER_TEMPLATE)\n folder_list.append(MIGRATABLE_TARGET)\n folder_list.append(MACHINE)\n folder_list.append(UNIX_MACHINE)\n\n self._process_section(self._topology, folder_list, MACHINE, location)\n self._process_section(self._topology, folder_list, UNIX_MACHINE, location)\n # avoid circular references between clusters and server templates\n self._topology_helper.create_placeholder_server_templates(self._topology)\n\n # create placeholders for JDBC resources that may be referenced in cluster definition.\n jdbc_names = self._topology_helper.create_placeholder_jdbc_resources(self._resources)\n\n self._process_section(self._topology, folder_list, CLUSTER, location, delete_now)\n self._process_section(self._topology, folder_list, SERVER_TEMPLATE, location, delete_now)\n\n # create placeholders for Servers that are in a cluster as /Server/JTAMigratableTarget\n # can reference \"other\" servers\n self._topology_helper.create_placeholder_servers_in_cluster(self._topology)\n\n self._process_section(self._topology, folder_list, SERVER, location, delete_now)\n\n self._process_section(self._topology, folder_list, MIGRATABLE_TARGET, location, delete_now)\n\n # targets may have been inadvertently assigned when clusters were added\n return jdbc_names", "title": "" }, { "docid": "38c5aa300fe5f6fd861ae78f2129dd2a", "score": "0.4713615", "text": "def set_up_cluster_with_http_info(self, id, type, **kwargs):\n\n all_params = ['id', 'type']\n all_params.append('callback')\n all_params.append('_return_http_data_only')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method set_up_cluster\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params) or (params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `set_up_cluster`\")\n # verify the required parameter 'type' is set\n if ('type' not in params) or (params['type'] is None):\n raise ValueError(\"Missing the required parameter `type` when calling `set_up_cluster`\")\n\n resource_path = '/cluster/{id}/{type}'.replace('{format}', 'json')\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id']\n if 'type' in params:\n path_params['type'] = params['type']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type([])\n\n # Authentication setting\n auth_settings = []\n\n return self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='str',\n auth_settings=auth_settings,\n callback=params.get('callback'),\n _return_http_data_only=params.get('_return_http_data_only'))", "title": "" }, { "docid": "802de6a8143629015cc94faa6a4df532", "score": "0.47127196", "text": "def get_active_cluster_and_devices(request, return_type):\n\n print('*** Get Active Cluster, Content and Device Setup ****')\n response_data = {}\n\n # Get Active Cluster, Content and Device Setup\n try:\n data_check = ClustersSetup.objects.values('cluster_location_detail')\n print(str(data_check))\n\n if data_check:\n print('Active Cluster, Content and Device Setup(s) data found')\n data = list(data_check)\n\n for i in range(len(data)):\n # response_data.setdefault(str(i), (data[i]))\n response_data.setdefault(str(i), (data[i].get('cluster_location_detail')))\n\n print('Response length: ', len(response_data)) # Sanity check\n print(response_data) # Sanity check\n\n if return_type == 'HTTP':\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")\n else:\n return response_data\n\n else:\n response_data[\"none\"] = \"NO DATA\"\n # print(response_data)\n\n return response_data\n\n except ObjectDoesNotExist:\n response_data['result'] = \"ERROR Status: \" + str(ObjectDoesNotExist.silent_variable_failure) + \\\n \". Get Active Cluster, Content and Device Setup request failure. GACCAD(1)\"\n\n return HttpResponse(json.dumps(response_data), content_type=\"application/json\")", "title": "" }, { "docid": "eb3f41caf78dfc9e4b3c97c250cddf69", "score": "0.47119993", "text": "def test_add_cluster(self):\n params = {\"name\": uhex(), \"description\": uhex()}\n response = self.add_item(\"cluster\", params)\n\n # Verify response\n self.verify_valid_response(response)\n\n # Verify persistence\n cluster = models.Cluster.objects.get(name=params[\"name\"])\n self.assertEqual(cluster.description, params[\"description\"])", "title": "" } ]
649087d6ced5dca822efab808bee8372
Custom premake hook for building libjpeg.
[ { "docid": "3f4fd333dbfc50897df7f0c581deac67", "score": "0.47270572", "text": "def pre_configure(options, buildout):\n os.system(\"sh autogen.sh\")", "title": "" } ]
[ { "docid": "3df4074d540a4efc12734b5d27c2b239", "score": "0.55957216", "text": "def _jpgProcess(sourceFile):\n if utils.isToolPresent('jpegtran'):\n args = ['-optimize',\n '-progressive',\n '-copy', 'none',\n ]\n return utils.pipeRun('jpegtran', sourceFile, args)\n raise RuntimeError('No JPG minification facilities present!'\n + '(tried jpegtran)')", "title": "" }, { "docid": "3541a56efebbde3279e7b1383b283ff7", "score": "0.53404695", "text": "def optimize_jpg(src, dst, extra_markers='all'):\n args = [UTIL_EXE.get('jpegtran', None)]\n if args[0] is None:\n return\n args.extend(('-optimize', '-progressive', '-copy', extra_markers))\n args.extend((src, dst))\n subprocess.run(args)", "title": "" }, { "docid": "ad3a5eee4c9908a9d417f4d8f0f05940", "score": "0.5226535", "text": "def pre_build_hook(player, builddata):", "title": "" }, { "docid": "6062525f70c0f7e614e2bf688c78b009", "score": "0.52218205", "text": "def __init__ (self, requirement='', only_static=False):\n\n self.name = 'libjpeg'\n header = 'jpeglib.h'\n\n candidates = find_header(header)\n\n if not candidates:\n raise RuntimeError(\"could not find %s's `%s' - have you installed %s on this machine?\" % (self.name, header, self.name))\n\n found = False\n\n if not requirement:\n self.include_directory = os.path.dirname(candidates[0])\n self.version = libjpeg_version(candidates[0])\n\n # special condition (using libjpeg-turbo instead)\n if self.version is None:\n turbo_candidates = find_header('jconfig.h')\n if turbo_candidates:\n self.version = libjpeg_turbo_version(turbo_candidates[0])\n\n found = True\n\n else:\n\n # requirement is 'operator' 'version'\n operator, required = [k.strip() for k in requirement.split(' ', 1)]\n\n # now check for user requirements\n for candidate in candidates:\n vv = libjpeg_version(candidate)\n available = LooseVersion(vv)\n if (operator == '<' and available < required) or \\\n (operator == '<=' and available <= required) or \\\n (operator == '>' and available > required) or \\\n (operator == '>=' and available >= required) or \\\n (operator == '==' and available == required):\n self.include_directory = os.path.dirname(candidate)\n self.version = vv\n found = True\n break\n\n if not found:\n raise RuntimeError(\"could not find the required (%s) version of %s on the file system (looked at: %s)\" % (requirement, self.name, ', '.join(candidates)))\n\n # normalize\n self.include_directory = os.path.normpath(self.include_directory)\n\n # find library\n prefix = os.path.dirname(os.path.dirname(self.include_directory))\n module = 'jpeg'\n candidates = find_library(module, version=self.version, prefixes=[prefix], only_static=only_static)\n\n if not candidates:\n raise RuntimeError(\"cannot find required %s binary module `%s' - make sure `%s' is installed on `%s'\" % (self.name, module, self.name, prefix))\n\n # libraries\n self.libraries = []\n name, ext = os.path.splitext(os.path.basename(candidates[0]))\n if ext in ['.so', '.a', '.dylib', '.dll']:\n self.libraries.append(name[3:]) #strip 'lib' from the name\n else: #link against the whole thing\n self.libraries.append(':' + os.path.basename(candidates[0]))\n\n # library path\n self.library_directory = os.path.dirname(candidates[0])", "title": "" }, { "docid": "218880e4be1fadac441c568c5cdba1a2", "score": "0.50741684", "text": "def make_jpeg(self, path = None):\n \n if path == None:\n im_path = glob(self.path+'/pre-fire/indices/*stack_total*')[0]\n else:\n im_path = path\n stack = np.swapaxes(np.swapaxes(gdal.Open(im_path).ReadAsArray()[5:],0,2),0,1)\n \n # plt.imsave('/'.join(im_path.split('/')[:-3])+'/pre_fire/'+im_path.split('/')[7]+'_'+im_path.split('/')[8]+'_pre.jpg', stack)\n plt.imsave('/'.join(im_path.replace('.tif','.jpg'), stack))\n # misc.toimage(stack, cmin=0.0, cmax=255).save(path+'/pre_fire/'+im_path.split('/')[6]+'_'+im_path.split('/')[7]+'_pre.jpg')", "title": "" }, { "docid": "5bf492d8c6588f6cd5b28ab411c92871", "score": "0.5008826", "text": "def build_step(self):\n # cfr. https://elist.ornl.gov/pipermail/visit-developers/2011-September/010063.html\n self.cfg.update('premakeopts', 'LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH' % os.path.join(self.cfg['start_dir'], 'lib'))\n\n super(EB_Qt, self).build_step()", "title": "" }, { "docid": "90e28a769381d4299a03a120fd9255cc", "score": "0.48877725", "text": "def process_jpeg_files():\n # TODO: expand this to include all jpeg files\n data_path = models.SOURCES['jpeg']['data_path']\n only_files = [f for f in listdir(data_path)\n if isfile(os.path.join(data_path, f))]\n for f in only_files:\n if f == 'thumbs.db':\n continue\n date_string = DATA_RE.search(f).group(0)\n print('processing JPEG {}'.format(date_string))\n file_name = os.path.join(data_path, f)\n if getsize(file_name) > 10000:\n row_list = logic_ocr.process_image(file_name, 'jpeg', date_string)\n csv_file = os.path.join(ROOT_PATH, 'total.csv')\n with open(csv_file, 'a', newline='') as csvfile:\n csv_writer = csv.writer(csvfile, delimiter=',')\n csv_writer.writerow(row_list)\n # process_jpeg_file(HTML_DATA_PATH + f)\n # move_file_to_archive(f, SCREEN_DATA_PATH, SCREEN_ARCH_PATH)", "title": "" }, { "docid": "a7f301db8f680cc2433c3d196aa2f8f8", "score": "0.48801607", "text": "def packImages(ctx, imagedir, verbose, clobber):\n #this is set at the main command, so we need to access it here\n outputdir = ctx.obj['outputdir']\n print(\"outputdir:\", outputdir)\n metadatafile = ctx.obj['metadatafile']\n print(\"metadata file:\", metadatafile)\n if verbose:\n click.echo(\"image inputdir is \" + imagedir)\n click.echo(\"iamge output dir is \" + outputdir)\n #note we convert the metadatafile handle to a string because it is used multiple times \n #in this function, and will fail its reads after the first time otherwise\n generate_image_xar(imagedir, outputdir, metadatafile.name, verbose, clobber)\n #make_metadata_file()\n return 0", "title": "" }, { "docid": "d9573d3f6934de5261f24ec977709452", "score": "0.4858986", "text": "def update_pre_build_configuration(self):\n if self.info.get(\"base_image_uri\"):\n self.build_args[\"BASE_IMAGE\"] = self.info[\"base_image_uri\"]\n\n if self.info.get(\"extra_build_args\"):\n self.build_args.update(self.info.get(\"extra_build_args\"))\n\n if self.info.get(\"labels\"):\n self.labels.update(self.info.get(\"labels\"))", "title": "" }, { "docid": "cb742f1e8de75ca1fe557a8923b34a96", "score": "0.48556158", "text": "def test_com_day_cq_dam_core_impl_handler_jpeg_handler(self):\n pass", "title": "" }, { "docid": "ea88cf92c046231e2c20480b89bc7b1d", "score": "0.46749908", "text": "def pngcrush_image(src, **kw):\r\n log.info('[1@None] Optimizing image: %s' % src)\r\n try:\r\n # pngcrush -ow has some issues, use a temporary file and do the final\r\n # renaming ourselves.\r\n suffix = '.opti.png'\r\n tmp_path = '%s%s' % (os.path.splitext(src)[0], suffix)\r\n cmd = [settings.PNGCRUSH_BIN, '-q', '-rem', 'alla', '-brute',\r\n '-reduce', '-e', suffix, src]\r\n sp = subprocess.Popen(cmd, stdin=subprocess.PIPE,\r\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\r\n stdout, stderr = sp.communicate()\r\n\r\n if sp.returncode != 0:\r\n log.error('Error optimizing image: %s; %s' % (src,\r\n stderr.strip()))\r\n pngcrush_image.retry(args=[src], kwargs=kw, max_retries=3)\r\n return False\r\n\r\n shutil.move(tmp_path, src)\r\n log.info('Image optimization completed for: %s' % src)\r\n return True\r\n except Exception, e:\r\n log.error('Error optimizing image: %s; %s' % (src, e))", "title": "" }, { "docid": "cf746bf22234ca5bee691f72abf0c3da", "score": "0.46351728", "text": "def build(self, spec, prefix):\n with working_dir(self.build_directory):\n if self.generator == \"Unix Makefiles\":\n inspect.getmodule(self).make(*self.build_targets)\n elif self.generator == \"Ninja\":\n self.build_targets.append(\"-v\")\n inspect.getmodule(self).ninja(*self.build_targets)", "title": "" }, { "docid": "0df3a32c5cdd27e865d88dbf14993a9a", "score": "0.46079585", "text": "def images_to_library(t_path, images_path): \r\n images_list = glob.iglob(os.path.join(images_path, \"*.jpg\")) #contains the files\r\n num_of_images = len(os.listdir(images_path)) #the num of files from the images library \r\n os.chdir(t_path)\r\n # cheaks if t_path is for test or train\r\n if r\"\\test\" in t_path: \r\n for jpgfile in images_list: #copy all of the images to test\r\n shutil.copy(jpgfile, t_path)\r\n else:\r\n counter = 0 #counts the number of times the loop repeat \r\n for jpgfile in images_list: #copy 70% of the images to train\r\n if (counter < 0.7*num_of_images):\r\n shutil.copy(jpgfile, t_path)\r\n counter = counter + 1", "title": "" }, { "docid": "f208ba4ef551758b4b60f3eeb6bad4e9", "score": "0.45812887", "text": "def _pngProcess(sourceFile):\n if utils.isToolPresent('pngcrush_wrapper'):\n args = []\n return utils.pipeRun('pngcrush_wrapper', sourceFile, args)\n raise RuntimeError('No PNG minification facilities present!'\n + '(tried pngcrush_wrapper)')", "title": "" }, { "docid": "0e2e544564df026ad5efd491a16e25bf", "score": "0.4554481", "text": "def pre_process():\n pass", "title": "" }, { "docid": "e822bc299571c69989b1dcb9b5774856", "score": "0.4544572", "text": "def makeImgs(self, outputDir):\n return []", "title": "" }, { "docid": "8ab893f76aca39e8f882d6f2815bea69", "score": "0.45428932", "text": "def build_image(ctx, tag=\"striker-backend\"):\n ctx.run(gradle(\"jibDockerBuild\", f\"-Djib.to.image={tag}\"))", "title": "" }, { "docid": "d3a0766a2681424912dfd3906a022020", "score": "0.45408678", "text": "def upload_build(config):\n pass", "title": "" }, { "docid": "ad0e0d35baae7ca495da388432e42376", "score": "0.45242116", "text": "def pre_build(self, package_config):\n \n self.package_config = package_config\n if package_config.get(\"package\") is None:\n package_config[\"package\"] = self.make_package_name(\n self.global_config[\"base_path\"],\n package_config[\"path\"],\n self.global_config[\"prefix\"]\n )", "title": "" }, { "docid": "ae20155a7ba185624b4844c747fc089d", "score": "0.45018637", "text": "def photos(ctx, filename):\n print(\"Preparing photos: %s\" % filename)\n\n exif_keep = \"-GPSLatitudeRef -GPSLongitudeRef -GPSLatitude -GPSLongitude -GPSInfo -ImageLength -ImageHeight -ImageWidth\"\n invoke.run('mogrify -resize \"1920x1920>\" -quality 70% -interlace Plane {0}'.format(filename), echo=True)\n invoke.run('exiftool -overwrite_original_in_place -all= -tagsFromFile @ {0} {1}'.format(exif_keep, filename), echo=True)", "title": "" }, { "docid": "7d2a046f78e2d310b02518df56695239", "score": "0.4500426", "text": "def build(sub_args):\n repo_url = sub_args.repo_url\n img_name = sub_args.img_name.lower()\n output = sub_args.output\n skip_build = sub_args.skip_build\n base_image = sub_args.base_img\n use_dockta_reqs = sub_args.use_dockta_reqs\n\n repo_name = repo_url.split('/')[-1].split('.git')[0]\n repo_dir = os.path.join(output, repo_name)\n repo_dir_lowercase = os.path.join(output, repo_name.lower())\n\n print('Repository url: {}'.format(repo_url))\n print('Image name: {}'.format(img_name))\n print('Repo directory: {}'.format(repo_dir))\n print('Starting build..')\n\n if (not os.path.isdir(output)):\n os.mkdir(output)\n\n if (os.path.isdir(repo_dir_lowercase) or os.path.isdir(repo_dir)):\n warnings.warn('Repo directory already exists, skipping clone..')\n else:\n bash('git -C {0} clone {1}'.format(output, repo_url))\n\n # Workaround for lowercase repo name requirement\n if (os.path.isdir(repo_dir) and not repo_dir.split('/')[-1].islower()):\n # Only rename repo directory\n # if it is not lower case\n bash('mv {0} {1}'.format(repo_dir, repo_dir_lowercase))\n\n # Do not use the users defined requirements\n # Let Dockta try to determine the its requirements\n if (use_dockta_reqs):\n if exists(os.path.join(repo_dir_lowercase, 'requirements.txt')):\n bash('rm {}'.format(os.path.join(repo_dir_lowercase, 'requirements.txt')))\n if exists(os.path.join(repo_dir_lowercase, 'Dockerfile')):\n bash('rm {}'.format(os.path.join(repo_dir_lowercase, 'Dockerfile')))\n\n bash('dockta compile --from {0} {1}'.format(base_image,repo_dir_lowercase))\n\n if (not skip_build):\n # Build the Docker image\n dockerfile = '.Dockerfile' # Default Dockerfile name generated by Dockta\n if exists(os.path.join(repo_dir_lowercase, 'Dockerfile')):\n # Dockerfile already exists in Github repo\n # use that Dockerfile instead\n dockerfile = 'Dockerfile'\n if not exists(os.path.join(repo_dir_lowercase, 'requirements.txt')):\n filelist = open(os.path.join(repo_dir_lowercase, dockerfile), 'r').readlines()\n for line in filelist:\n if line.startswith('RUN pip3 install --requirement requirements.txt'):\n # Edit Dockerfile with builds for R packages \n # AND a requirements.txt is not present \n bash(\"sed -i 's/RUN pip3 install --requirement requirements.txt \\\\\\$//g' {}\".format(dockerfile), \n cwd=repo_dir_lowercase)\n bash(\"sed -i 's@^ && bash -c \\\"Rscript@RUN bash -c \\\"Rscript@' {}\".format(dockerfile), \n cwd=repo_dir_lowercase)\n break\n\n # Check for compatiability with bionic cran mirror \n if base_image == 'ubuntu:18.04':\n # Edit Dockerfile for compatiability with Ubuntu Bionic\n bash(\"sed -i 's@eoan-cran@bionic-cran@' {}\".format(dockerfile), cwd=repo_dir_lowercase)\n\n bash('docker build --no-cache -f {} --tag={} .'.format(dockerfile, img_name), \n cwd=repo_dir_lowercase)\n return", "title": "" }, { "docid": "d2c513245fde7da64d670ccad02ad403", "score": "0.44900596", "text": "async def jpeg(self, ctx, strength: int = 30, stretch: int = 20, quality: int = 10):\n if not 0 < strength <= 100:\n await ctx.send(f\"{config.emojis['warning']} Strength must be between 0 and 100.\")\n return\n if not 0 <= stretch <= 40:\n await ctx.send(f\"{config.emojis['warning']} Stretch must be between 0 and 40.\")\n return\n if not 1 <= quality <= 95:\n await ctx.send(f\"{config.emojis['warning']} Quality must be between 1 and 95.\")\n return\n await improcess(ctx, captionfunctions.jpeg, [[\"VIDEO\", \"GIF\", \"IMAGE\"]], strength, stretch, quality,\n handleanimated=True)", "title": "" }, { "docid": "0654b0b74e5cc92b1753f7b8d8c46497", "score": "0.44820434", "text": "def pkg_image_create(self, repourl=None, prefix=None,\n additional_args=\"\", exit=0, env_arg=None):\n\n if repourl and prefix is None:\n prefix = \"test\"\n\n self.image_destroy()\n os.mkdir(self.img_path())\n cmdline = \"{0} image-create -F \".format(self.pkg_cmdpath)\n if repourl:\n cmdline = \"{0} -p {1}={2} \".format(cmdline, prefix, repourl)\n cmdline += additional_args\n cmdline = \"{0} {1}\".format(cmdline, self.img_path())\n\n retcode = self.cmdline_run(cmdline, exit=exit, env_arg=env_arg)\n\n self.__setup_signing_files()\n return retcode", "title": "" }, { "docid": "39c9d4f20cc2f2aaac7b9ba65dd0da14", "score": "0.44648036", "text": "def configure():\n clean()\n # Create a temporary site-package directory.\n shutil.copytree('python', 'dragon')\n # Copy headers.\n shutil.copytree('../targets/native/include', 'dragon/include')\n # Copy \"caffe\" => \"dragon.vm.caffe\"\n shutil.copytree('../caffe', 'dragon/vm/caffe')\n # Copy \"dali\" => \"dragon.vm.dali\"\n shutil.copytree('../dali', 'dragon/vm/dali')\n # Copy \"tensorflow\" => \"dragon.vm.tensorflow\"\n shutil.copytree('../tensorflow', 'dragon/vm/tensorflow')\n # Copy \"tensorlayer\" => \"dragon.vm.tensorlayer\"\n shutil.copytree('../tensorlayer', 'dragon/vm/tensorlayer')\n # Copy \"tensorrt/python\" => \"dragon.vm.tensorrt\"\n shutil.copytree('../tensorrt/python', 'dragon/vm/tensorrt')\n # Copy \"torch\" => \"dragon.vm.torch\"\n shutil.copytree('../torch', 'dragon/vm/torch')\n # Copy \"torchvision\" => \"dragon.vm.torchvision\"\n shutil.copytree('../torchvision', 'dragon/vm/torchvision')\n # Copy the pre-built libraries.\n if not os.path.exists('dragon/lib'):\n os.makedirs('dragon/lib')\n for src, dest in find_libraries().items():\n if os.path.exists(src):\n shutil.copy(src, dest)\n else:\n print('ERROR: Unable to find the library at <%s>.\\n'\n 'Build it before installing to package.' % src)\n shutil.rmtree('dragon')\n sys.exit()\n # Write the version file.\n with open('dragon/version.py', 'w') as f:\n f.write(\"from __future__ import absolute_import\\n\"\n \"from __future__ import division\\n\"\n \"from __future__ import print_function\\n\\n\"\n \"version = '{}'\\n\"\n \"git_version = '{}'\\n\".format(version, git_version))", "title": "" }, { "docid": "96156125d45882ab4b8262ab4b0ddda4", "score": "0.44527164", "text": "def force_build_image(self, force_build: bool = False, **kwargs) -> bool:\n return force_build", "title": "" }, { "docid": "bab196c020818be588831dda4be7dc17", "score": "0.44469875", "text": "def build(self, spec, prefix):\n builder = Executable(\"./compile.sh\")\n builder()", "title": "" }, { "docid": "2269bc2a3569470db477f384f3bcfd5d", "score": "0.44424745", "text": "def have_pil(self):\n return HAVE_PIL", "title": "" }, { "docid": "599ae191629b3715e787eae2849f1825", "score": "0.44372785", "text": "def prepare_test_img(self, idx):\n img_info = self.img_infos[idx]\n frame_ind = img_info['frame_ind']\n foldername = img_info['foldername']\n ann_info = self.get_ann_info(idx, frame_ind)\n filename = osp.join(foldername, f\"{frame_ind:06d}.JPEG\")\n img_info['filename'] = filename\n results = dict(img_info=img_info, ann_info=ann_info)\n self.pre_pipeline(results)\n results['frame_ind'] = frame_ind\n results = self.pipeline(results)\n return results", "title": "" }, { "docid": "7598b60ee1792217ef9a4bd7f7a35aa6", "score": "0.44333562", "text": "def package_framework(build_config,\n target_device,\n out_dir,\n output_name,\n extra_gn_options,\n extra_ninja_options):\n print('\\nBuilding for %s (%s)' % (target_device, build_config))\n\n build_result = build(build_config,\n target_device,\n extra_gn_options,\n extra_ninja_options)\n if build_result != 0:\n error = 'Building %s/%s failed with code: ' % (build_config, target_device)\n print(error, build_result, file=sys.stderr)\n return build_result\n copy_build_products(build_config, target_device, out_dir, output_name)\n return 0", "title": "" }, { "docid": "c200446cfc180705d51a796b54bfeb7b", "score": "0.44244534", "text": "def _pre_build_pipeline(self):\n\n if self.is_playing:\n self._notify('err', \"Can't rebuild pipeline while playing.\")\n raise RuntimeError(\"Can't rebuild pipeline while playing.\")\n\n if self._pipeline is not None:\n self._notify('debug', \"Setting pipeline state to NULL.\")\n self._pipeline.set_state(Gst.State.NULL)", "title": "" }, { "docid": "5a761d8938c15cba31d7b711145b104d", "score": "0.44173667", "text": "def build_image(c):\n version = toml.load(\"pyproject.toml\")[\"tool\"][\"poetry\"][\"version\"]\n\n c.run(f\"docker build -t knowsuchagency/covid-19:{version} .\")\n c.run(\n f\"docker tag knowsuchagency/covid-19:{version} knowsuchagency/covid-19:latest\"\n )", "title": "" }, { "docid": "12822151acf84a50cc4aad1b2ba56f8d", "score": "0.44041842", "text": "def bakeImage(self, event=None):\n imgData = np.copy(self.picdata)\n _add = WINDOW.addResource()\n _add.setData(imgData)\n _add.pipe()\n _add.autoFit()", "title": "" }, { "docid": "e22413701829343de17b22e63efd40c4", "score": "0.4389174", "text": "def img_pre_process(img):\n ## Chop off 1/3 from the top and cut bottom 150px(which contains the head of car)\n shape = img.shape\n img = img[int(shape[0]/3):shape[0]-150, 0:shape[1]]\n img=img/255.\n print(img.shape)\n \n ## Resize the image\n resize_img=resize(img, (128, 384), mode='reflect')\n ## Return the image sized as a 4D array\n return resize_img#np.resize(img, (w, h, c))", "title": "" }, { "docid": "8854937bd857c2149a3600bc36f5539f", "score": "0.43738654", "text": "def compress_img(args):\n source_img_path, destination_image_path, img_quality_val = args\n source_img_name = os.path.split(source_img_path)[1]\n img = open_image(source_img_path)\n if not img:\n print(\"whoops, bad image.\")\n else:\n full_destination_image_path = os.path.join(\n destination_image_path, source_img_name\n )\n save_image(img, full_destination_image_path, img_quality_val)", "title": "" }, { "docid": "43da3f3e3622aaccf68213b592702c8a", "score": "0.4361627", "text": "def build_image(self):\n cmd = '''\n source import-stx\n stx shell -c \"build-image --keep\"\n '''\n ret = run_cmd(cmd)\n log.info(\"Build image return code %s\", ret.returncode)\n if ret.returncode != 0:\n raise Exception(\"Failed to build image\")\n\n # Update gpg\n log.info(\"Updating gpg settings\")\n self.add_gpg_pinentry()", "title": "" }, { "docid": "261c56793f9e849ad79328d4fcdeeded", "score": "0.43596533", "text": "def compress(path_origin_tiff, path_mkv, crf, lossless):\n command ='ffmpeg -f image2 -start_number 30 -i ' + path_origin_tiff + '\\\\0_0_%d.tiff ' \\\n '-vcodec libx265 -y -preset ultrafast '\n if lossless == 1:\n command = command + ' -x265-params lossless=1 ' + path_mkv\n else:\n command = command + ' -x265-params crf=' + str(crf) + ' ' + path_mkv\n ret = subprocess.Popen(command, stderr=subprocess.PIPE) #ret is the ffmpeg run information and its type is bytes\n string = bytes.decode(ret.stderr.read()) #convert the ffmpeg run information to string\n ret.communicate()\n # get the last line of the run information which is the key information of the compression\n end_line = string.split('\\r\\n')[-2]\n dic = {}\n #get the \"encoded frames\",\"Compress time(s)\" and \"Avg QP\" from the last line\n dic['Encoded frames'] = end_line.split()[1]\n dic['Compress time(s)'] = end_line.split()[4][0:-2]\n dic['Avg QP'] = end_line.split()[-1].split(':')[-1]\n return dic", "title": "" }, { "docid": "5ddfdc459f57568d43cb91b121e697c6", "score": "0.4359427", "text": "def mk_audio_png(self,src,png_name):\n p = gslevels.Make_png()\n p.location = src\n p.verbose = self.options.verbose\n p.setup()\n p.start()\n ret = p.mk_png(png_name)\n\n return ret", "title": "" }, { "docid": "42d8cdc985f983633f0a3c36d63a07c2", "score": "0.43461606", "text": "def build(setup_kwargs):\n distribution = Distribution(\n {\n \"name\": \"src/cupcake\",\n \"ext_modules\": cythonize(\n extensions, compiler_directives={\"language_level\": \"3\"}\n ),\n }\n )\n distribution.package_dir = \"cupcake\"\n\n cmd = ExtBuilder(distribution)\n cmd.ensure_finalized()\n cmd.run()\n\n # Copy built extensions back to the project\n for output in cmd.get_outputs():\n output = Path(output)\n relative_extension = Path(\"src\").joinpath(output.relative_to(cmd.build_lib))\n if not output.exists():\n continue\n\n shutil.copyfile(output, relative_extension)\n mode = relative_extension.stat().st_mode\n mode |= (mode & 0o444) >> 2\n relative_extension.chmod(mode)\n\n return setup_kwargs", "title": "" }, { "docid": "5022b1b2c855b6f436eef029c74aaec1", "score": "0.43433812", "text": "def add_post_compilation_lflags(ldflags_arr):\n if is_benchmark('libjpeg'):\n ldflags_arr += libjpeg_turbo_asm_object_files()\n elif is_benchmark('php'):\n ldflags_arr += ['-lresolv']\n elif is_benchmark('curl'):\n ldflags_arr += [\n '-ldl', '-lpsl', '/src/openssl/libcrypto.a', '/src/openssl/libssl.a'\n ]\n elif is_benchmark('openssl'):\n ldflags_arr += ['/src/openssl/libcrypto.a', '/src/openssl/libssl.a']\n elif is_benchmark('systemd'):\n shutil.copy(\n os.path.join(os.environ['OUT'],\n 'src/shared/libsystemd-shared-245.so'),\n '/usr/lib/libsystemd-shared-245.so')\n ldflags_arr += ['-lsystemd-shared-245']", "title": "" }, { "docid": "0fff129ebc209a3c5799d991a0cd3c66", "score": "0.43231675", "text": "def _BuildPrepare(\n self, source_directory, project_name, project_version, version_suffix,\n distribution, architecture):\n # Script to run before building, e.g. to change the dpkg packaging files.\n if os.path.exists(self._prep_script):\n command = u'sh ../{0:s} {1:s} {2!s} {3:s} {4:s} {5:s}'.format(\n self._prep_script, project_name, project_version, version_suffix,\n distribution, architecture)\n exit_code = subprocess.call(u'(cd {0:s} && {1:s})'.format(\n source_directory, command), shell=True)\n if exit_code != 0:\n logging.error(u'Running: \"{0:s}\" failed.'.format(command))\n return False\n\n return True", "title": "" }, { "docid": "940bad1e8188c75dbd0ccd021792984d", "score": "0.4320876", "text": "def _compile(self, install):\n pass", "title": "" }, { "docid": "22d71a760b37f89695882874f8f9a793", "score": "0.43189907", "text": "def _process_image(filename, coder):\r\n with open(filename, 'rb') as f:\r\n image_data = f.read()\r\n if _is_png(filename):\r\n logging.info('Converting PNG to JPEG for %s' % filename)\r\n image_data = coder.png_to_jpeg(image_data)\r\n image = coder.decode_jpeg(image_data)\r\n assert len(image.shape) == 3\r\n height = image.shape[0]\r\n width = image.shape[1]\r\n assert image.shape[2] == 3\r\n return image_data, height, width", "title": "" }, { "docid": "1bb26d73ee18bb55ccf68d6b1fefcf0c", "score": "0.4315954", "text": "def SetupPrebuiltTools(adb):\n\n # TODO(bulach): Build the targets for x86/mips.\n device_tools = [\n 'file_poller',\n 'forwarder_dist/device_forwarder',\n 'md5sum_dist/md5sum_bin',\n 'purge_ashmem',\n 'run_pie',\n ]\n\n host_tools = [\n 'bitmaptools',\n 'md5sum_bin_host',\n ]\n\n if platform.GetHostPlatform().GetOSName() == 'linux':\n host_tools.append('host_forwarder')\n\n has_device_prebuilt = adb.system_properties['ro.product.cpu.abi'].startswith(\n 'armeabi')\n if not has_device_prebuilt:\n return all([support_binaries.FindLocallyBuiltPath(t) for t in device_tools])\n\n build_type = None\n for t in device_tools + host_tools:\n executable = os.path.basename(t)\n locally_built_path = support_binaries.FindLocallyBuiltPath(t)\n if not build_type:\n build_type = GetBuildTypeOfPath(locally_built_path) or 'Release'\n constants.SetBuildType(build_type)\n dest = os.path.join(constants.GetOutDirectory(), t)\n if not locally_built_path:\n logging.info('Setting up prebuilt %s', dest)\n if not os.path.exists(os.path.dirname(dest)):\n os.makedirs(os.path.dirname(dest))\n platform_name = ('android' if t in device_tools else\n platform.GetHostPlatform().GetOSName())\n prebuilt_path = support_binaries.FindPath(executable, platform_name)\n if not prebuilt_path or not os.path.exists(prebuilt_path):\n raise NotImplementedError(\"\"\"\n%s must be checked into cloud storage.\nInstructions:\nhttp://www.chromium.org/developers/telemetry/upload_to_cloud_storage\n\"\"\" % t)\n shutil.copyfile(prebuilt_path, dest)\n os.chmod(dest, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)\n return True", "title": "" }, { "docid": "693229a3fc24ff3590cf8fa12579f0b6", "score": "0.43148106", "text": "def test_algo_makeImage(self):", "title": "" }, { "docid": "2f48da42a7c922cbb5dddc5465acc407", "score": "0.43111637", "text": "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n if not validate_config({DOMAIN: config}, {DOMAIN: ['mjpeg_url']},\n _LOGGER):\n return None\n\n add_devices_callback([MjpegCamera(config)])", "title": "" }, { "docid": "2f0d2188f1b0030839596f61d9598699", "score": "0.42945918", "text": "def on_picture_taken(self, filename):", "title": "" }, { "docid": "8fc03c00a61c59d44f298bb068d67dcc", "score": "0.42930296", "text": "def setup(self, retinaParameterFile=None, applyDefaultSetupOnFailure=None): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "aa02e5e9f12e398319446b968909ad7e", "score": "0.4275885", "text": "def rebuild():\n local('make clean')\n local('make dsdlc')\n local('packager/packager.py')\n local('make -B -j')", "title": "" }, { "docid": "cc9589226b44003261f1efdc40642832", "score": "0.42728302", "text": "def installImage():\n checkInstall(packages.IMAGE_PACKAGES)", "title": "" }, { "docid": "931f4bf90877f50dd1684ee1a3f5ebda", "score": "0.42664364", "text": "def setupEnvironment(env):\n # Directories that build output will be generated into\n platform = sys.platform\n bld_mode = env['build_mode'] \n bld_dir = env['build_dir']\n variant_suffix = ''\n if env['use_plat']:\n bld_pdir = '%s/%s' % (bld_dir, platform)\n variant_suffix = platform\n else:\n bld_pdir = bld_dir\n bld_vdir = '%s/%s' % (bld_pdir, bld_mode)\n env['build_vdir'] = bld_vdir\n variant_suffix += bld_mode\n \n\n \n # Store build directories and setup build output data structures\n env.AppendUnique(\n PRJ_BLD_DIR = Dir(bld_dir),\n PRJ_BLD_VDIR = Dir(bld_vdir),\n PRJ_BLD_BIN = Dir('%s/bin' % bld_vdir),\n PRJ_BLD_LIB = Dir('%s/lib' % bld_vdir),\n PRJ_VRT_SFX = variant_suffix,\n PRJ_EXES = {},\n PRJ_TSTS = {},\n PRJ_LIBS = {},\n PRJ_OBJS = {}\n )\n\n # If project install location (prefix) is specified\n if env['PREFIX']:\n # Define installation locations\n env.AppendUnique(\n PRJ_INST_DIR = Dir(env['PREFIX']),\n PRJ_INST_BIN = Dir('%s/bin' % env['PREFIX']),\n PRJ_INST_LIB = Dir('%s/lib' % env['PREFIX'])\n )\n \n # Baseline compile/link flags\n if platform == 'win32':\n if 'cl' in env['CC']:\n if env['build_mode'] == 'dbg':\n env.MergeFlags('-MDd -W1 -D_DEBUG -RTCs -Zi')\n else:\n env.MergeFlags('-MD -O1 -DNDEBUG')\n if env['verbose']:\n env.AppendUnique(CCFLAGS='-Bt')\n env.AppendUnique(LINKFLAGS=['-verbose:lib', '-time'])\n else:\n print \"Unrecognized compiler: %s\" % env['CC']\n elif 'linux' in platform:\n # Replace LINKCOM to position LINKFLAGS at the very end of\n # link command line\n env.Replace(LINKCOM='$LINK -o $TARGET $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS $LINKFLAGS')\n env.AppendUnique(LINKFLAGS = ['-lm' ])\n env.AppendUnique(CCFLAGS = ['-fPIC','-DPIC'])\n if env['build_mode'] == 'dbg':\n env.MergeFlags('-g')\n else:\n env.MergeFlags('-O2 -w')\n if env['verbose']:\n env.AppendUnique(CCFLAGS='-v')\n else:\n # Warning only, rely on SCons to come up with meaningful defaults\n print \"Unrecognized platform: %s\" % platform\n\n \n # Inform user about the build mode\n print \"Will build for %s mode...\" % bld_mode\n # Help output to be shown to users\n Help(\"\"\"\nType: 'scons' to build all libraries and executables.\n \"\"\")\n # At the abnormal exit show information about build failures\n atexit.register(printBuildFailures)\n return 0", "title": "" }, { "docid": "4eebe627f0a0841c74f1ae121c2d0c4a", "score": "0.42564732", "text": "def copyimg():\n d = {} # dictionary of image names\n dictionaries_dir = \"../sphinx/cslv1/dictionaries\"\n # current\n # dictionaries_dir = \"source/dictionaries\"\n target_dir = \"source/images\"\n n = 0 # number of files copied\n for dictcode in dictcodes:\n if dictcode in ['acc','ae','ap90','ben','bhs']:\n print('skipping dictionary',dictcode)\n continue\n dirname = '%s/prefaces/%spref/images' %(dictionaries_dir,dictcode)\n try:\n filenames = os.listdir(dirname)\n except:\n print('%s has no images: dir=%s' % (dictcode,dirname))\n continue\n #print(filenames)\n \n for filename in filenames:\n old = '%s/%s' %(dirname,filename)\n new = '%s/%s' %(target_dir,filename)\n copyfile(old,new)\n n = n+1\n print(n,\"files copied to\",target_dir)", "title": "" }, { "docid": "caa47c8460ac7d88d9ce6d48fc7f6801", "score": "0.4256299", "text": "def handle(self, *args, **options):\n sourcekey = options['sourcekey']\n foldername = options['foldername']\n dryrun = options['dry_run']\n\n if dryrun:\n log.info(term.yellow(\"DRY RUN. Changes will not be committed.\"))\n\n folderpath = os.path.join(\"/data\", \"images\", foldername)\n if not os.path.exists(folderpath):\n log.error(term.red(\"Folder {0} does not exist. Exiting.\".format(foldername)))\n sys.exit(-1)\n\n try:\n src = Source.objects.get(pk=sourcekey)\n except Source.DoesNotExist:\n log.error(term.red(\"Source {0} does not exist. Exiting.\".format(sourcekey)))\n sys.exit(-1)\n\n # Check whether source has pages attached already\n num_pages = src.pages.count()\n if num_pages > 0:\n log.error(term.red(\"Source already has {0} page records. This should only be run on sources with no existing page records.\".format(num_pages)))\n sys.exit(-1)\n\n # Check how many images are in the images folder.\n files = sorted(glob.glob(os.path.join(folderpath, \"*.jpx\")))\n\n if len(files) == 0:\n log.error(term.red(\"There were no JPX files in {0}. Exiting.\".format(foldername)))\n sys.exit(-1)\n\n # Try to parse the filename for info.\n # Matches filenames of format:\n # GB-Lcm_ms1070_133v.jpx\n # GB-Lcm_ms1070_133v_w.jpx\n # GB-Lcm_ms1070_backcover.jpx\n # E-Sco_5-1-43_back.jpx\n # E-Sco_5-5-20_039r_a.jpx\n # E-MOsb_MS1085_115br.jpx\n # As well as those with the non-foliated names in them (see the keys for NON_FOLIATED_NAMES\n page_name_regex = re.compile(r\"(?P<sig>.*)_(?P<pname>(\\d{3}b?[r|v])|(\" + \"|\".join(NON_FOLIATED_NAMES.keys()) + r\"))(?P<spctype>_w|_a)?(?P<ext>.jpx)\")\n\n for order, imagepath in enumerate(files):\n log.info(term.magenta(\"------- New image {0} --------\".format(imagepath)))\n image_name = os.path.basename(imagepath)\n\n # Try to retrieve the image.\n loc = \"https://{0}/iiif/image/{1}/{2}\".format(settings.HOSTNAME, foldername, image_name)\n url = urljoin(loc + '/', \"info.json\")\n\n log.info(term.green(\"Retrieving {0}\".format(url)))\n r = requests.get(url, headers={\n \"referer\": \"https://{0}\".format(settings.HOSTNAME),\n \"X-DIAMM\": settings.DIAMM_IMAGE_KEY\n })\n\n iiif_resp = None\n if 200 <= r.status_code < 300:\n log.info(term.green(\"Received a success response from info.json retrieval\"))\n j = r.json()\n iiif_resp = ujson.dumps(j)\n elif r.status_code == 404:\n log.warning(term.yellow(\"404 not found for {0}\".format(loc)))\n log.warning(term.yellow(\"Skipping.\"))\n continue\n\n if iiif_resp is None:\n log.warning(term.yellow(\"No valid IIIF image response. Skipping.\"))\n continue\n\n re_match = re.match(page_name_regex, image_name)\n if not re_match:\n log.warning(term.yellow(\"No matches found for {0}. It will be skipped.\".format(image_name)))\n continue\n\n pname = re_match.group(\"pname\")\n special_type = re_match.group(\"spctype\")\n\n log.debug(term.green(\"Parsing filename: Page name: {pname}, Special type: {spctype}\".format(pname=pname, spctype=special_type)))\n\n # Try to get the special page names; if None, use the name from the filename.\n page_name = NON_FOLIATED_NAMES.get(pname, None)\n if not page_name:\n page_name = pname\n\n if not special_type:\n log.info(term.green(\"Creating a regular page with label {0}\".format(page_name)))\n # Create a page record.\n p = {\n 'source': src,\n 'numeration': page_name,\n 'sort_order': order,\n 'page_type': Page.PAGE\n }\n if not dryrun:\n pg = Page(**p)\n pg.save()\n else:\n # Try to find a previously-saved page\n log.info(term.green(\"Found a special image; trying to retrieve a page record for it.\"))\n try:\n pg = Page.objects.filter(numeration=page_name).first()\n except Page.DoesNotExist:\n print(term.yellow(\"Could not find an ordinary page for label {0}\".format(page_name)))\n print(term.yellow(\"Special image {0} could not be attached to a page.\".format(image_name)))\n continue\n\n # Create an image record\n imtype = TYPE_MAP.get(special_type)\n log.info(term.green(\"Creating an image record.\"))\n\n if dryrun:\n pg = None\n\n im = {\n \"page\": pg,\n \"type\": imtype,\n \"location\": loc,\n \"public\": True,\n \"iiif_response_cache\": iiif_resp\n }\n\n if not dryrun:\n img = Image(**im)\n img.save()\n\n log.info(\"Done adding pages and images.\")", "title": "" }, { "docid": "f2e7e52145979651f8e764be0ac944be", "score": "0.42553544", "text": "def PreBuild(self):\n for p in self.prebuild:\n command_output(shlex.split(p), self.repo_dir)\n if platform.system() == 'Linux' or platform.system() == 'Darwin':\n for p in self.prebuild_linux:\n command_output(shlex.split(p), self.repo_dir)\n if platform.system() == 'Windows':\n for p in self.prebuild_windows:\n command_output(shlex.split(p), self.repo_dir)", "title": "" }, { "docid": "c0f9268a86bd5cf388896c9e6cc435ef", "score": "0.42465493", "text": "def preprocess(self):\n if os.path.isfile(self.image_path):\n self.dataset.append(self.image_path)\n else:\n for item_subffix in self.subffix:\n path = os.path.join(self.image_path,'*.%s'%(item_subffix))\n images = glob.glob(path)\n for item in images:\n self.dataset.append(item)\n # self.dataset = images\n print('Finished preprocessing the test dataset, total image number: %d...'%len(self.dataset))", "title": "" }, { "docid": "83615e47c4c8e6ec4317e253d4c0f456", "score": "0.4245301", "text": "def test_glow_compile_spec(self):\n\n dims = [2, 2]\n input_meta = InputMeta()\n input_meta.set(dims, torch.float32)\n inputs = [input_meta, input_meta]\n\n options = CompilationOptions()\n options.backend = \"Interpreter\"\n spec = GlowCompileSpec()\n spec.set(inputs, options)", "title": "" }, { "docid": "3c2da5c93233795c7c729aadab12e4fb", "score": "0.423874", "text": "def extra_packages_pre(self):\n self.setcfg('pkgdefaultclass', ['easybuild.easyblocks.rextension', \"EB_RExtension\"])\n self.setcfg('pkgfilter', EXTS_FILTER_R_PACKAGES)\n self.setcfg('pkgtemplate', '%(name)s/%(name)s_%(version)s.tar.gz')\n self.setcfg('pkginstalldeps', True)", "title": "" }, { "docid": "0313009dac6f4a0e87fb8e3f19625f18", "score": "0.42296776", "text": "def imageSave(imagePIL,filename) :", "title": "" }, { "docid": "c10ffeb4f0d975da3fac04fcd4371222", "score": "0.42233226", "text": "def preprocessing(self):\n for file in self.files:\n try:\n name = self.dir_name + \"/\" + file\n image = Image.open(name)\n for orientation in ExifTags.TAGS.keys():\n if ExifTags.TAGS[orientation] == 'Orientation':\n break\n exif = dict(image._getexif().items())\n\n if exif[orientation] == 3:\n image = image.rotate(180, expand=True)\n elif exif[orientation] == 6:\n image = image.rotate(270, expand=True)\n elif exif[orientation] == 8:\n image = image.rotate(90, expand=True)\n image.save(\"preprocessed_files/\" + file)\n image.close()\n\n except (AttributeError, KeyError, IndexError):\n # cases: image don't have getexif\n pass\n\n self.dir_name = self.preprocess_dir", "title": "" }, { "docid": "a1835ac248bd378b3d0e9068889a180d", "score": "0.42206183", "text": "def post_process_images(self, doctree):\n\n super(AbstractSlideBuilder, self).post_process_images(doctree)\n\n # figure out where this doctree is in relation to the srcdir\n relative_base = (\n ['..'] *\n doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')\n )\n\n for node in doctree.traverse(nodes.image):\n\n if node.get('candidates') is None:\n node['candidates'] = ('*',)\n\n # fix up images with absolute paths\n if node['uri'].startswith(self.outdir):\n node['uri'] = '/'.join(\n relative_base + [\n node['uri'][len(self.outdir) + 1:]\n ]\n )", "title": "" }, { "docid": "76322eedefe7381b7ad6e285e4446d9c", "score": "0.42137706", "text": "def mock_turbo_jpeg(\n first_width=None, second_width=None, first_height=None, second_height=None\n):\n mocked_turbo_jpeg = Mock()\n mocked_turbo_jpeg.decode_header.side_effect = [\n (first_width, first_height, 0, 0),\n (second_width, second_height, 0, 0),\n ]\n mocked_turbo_jpeg.scale_with_quality.return_value = EMPTY_8_6_JPEG\n mocked_turbo_jpeg.encode.return_value = EMPTY_8_6_JPEG\n return mocked_turbo_jpeg", "title": "" }, { "docid": "4b137a2959a440af04a5d3457c9e8269", "score": "0.42095077", "text": "def __init__(self, *args, **kwargs):\r\n registry.formats = []\r\n registry.names = {}\r\n registry._populate()\r\n registry.register(PNG)\r\n\r\n super(PymagingImage, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "306b12b3d6b749ba01e8c258fbc3bdee", "score": "0.42088935", "text": "def pre_encode(self, image): # pylint: disable=unused-argument,no-self-use\n return None", "title": "" }, { "docid": "82bedca95ee3267632a23c5118327551", "score": "0.42069277", "text": "def texturePush ( db = None, doc_id = \"\", path = \"\", description = \"\",\n progressbar = False, msgbar = False, rename = False ) :\n \n #List the directory\n #TODO: Use glob to get the right textures\n if not os.path.isdir ( path ):\n return False\n \n lsdir = os.listdir ( path )\n \n files = list ()\n # Iterate over files contained in the directory 'path'\n # and check if the file is a mra or psd\n for fil in lsdir:\n \n # Get file extension\n ext = os.path.splitext ( fil ) [-1] \n \n if ext in ( \".mra\", \".psd\" ):\n # If current file is a Mari or photoshop file\n # aka extraordinary textures files\n if not ( fil.find ( doc_id ) == 0 ) :\n print fil, \"should begin with %s\" % doc_id\n return False \n else:\n # else add the file for texture check\n files.append ( os.path.join ( path, fil ) )\n \n # Check the none extraordinary textures files\n texCheck = textureCheck ( doc_id, files )\n \n # If every textures success the check\n if len ( texCheck ) == 0 :\n \n #Push the directory containing the textures\n pushed = pushDir ( db, doc_id, path, description ) \n return pushed\n \n else :\n for tex in texCheck :\n print ( \"texturePush(): %s is wrong\" % tex )\n \n simptex = \"%s_%s_%s.%s.%s\" % ( doc_id, \"<variation>\", \"<type>\", \"<udim>\", \"tif\" )\n animtex = \"%s_%s_%s.%s.%s.%s\" % ( doc_id, \"<variation>\", \"<type>\", \"<udim>\",\"<frame>\", \"tif\")\n print \"texturePush(): expect %s or %s \" % ( simptex , animtex)\n \n return False", "title": "" }, { "docid": "da2699bab1ad8aadb7feb5aa5c6dbbac", "score": "0.42052326", "text": "def build(self, spec, prefix):\n\n with working_dir(self.build_directory):\n mvn = which(\"mvn\")\n if self.run_tests:\n mvn(\"verify\", *self.build_args())\n else:\n mvn(\"package\", \"-DskipTests\", *self.build_args())", "title": "" }, { "docid": "b71ad87811ae08a48d3a0ea6062ef5dc", "score": "0.42012763", "text": "def _run_premake(self, args):\n\n\t\tsublime.status_message(\"Running premake...\")\n\n\t\tself.window.run_command(\"exec\", {\"cmd\": [\"premake4\", \"--file=\" + self._get_premake_filepath()] + args})", "title": "" }, { "docid": "e871f2fb080d998dd21099b5167e5436", "score": "0.41976154", "text": "def pdf2jpeg(pdf_input_path, jpeg_output_path):\n args = [\"pef2jpeg\", # actual value doesn't matter\n \"-dNOPAUSE\",\n \"-sDEVICE=jpeg\",\n \"-r144\",\n \"-sOutputFile=\" + jpeg_output_path,\n pdf_input_path]\n\n encoding = locale.getpreferredencoding()\n args = [a.encode(encoding) for a in args]\n\n ghostscript.Ghostscript(*args)", "title": "" }, { "docid": "08acedb5174fbf0e855ee1d9caa25bee", "score": "0.41944075", "text": "def job_builder(args, meta):\n # Overall job stack. List of list of jobs\n job_stack = []\n\n # Jobs/CPU (INT): divide the number of images by the number of requested CPU resources\n jobs_per_cpu = args.jobcount / args.cpu\n\n # Get the list of images\n # images = list(meta.keys())\n images = []\n for img in list(meta.keys()):\n # # If a date range was requested, check whether the image is within range\n # if args.dates:\n # # Convert image datetime to unix time\n # timestamp = dt_parser(meta[img]['timestamp'])\n # time_delta = timestamp - datetime.datetime(1970, 1, 1)\n # unix_time = (time_delta.days * 24 * 3600) + time_delta.seconds\n # if unix_time < args.start_date or unix_time > args.end_date:\n # continue\n if args.coprocess is not None:\n if meta[img]['imgtype'] != args.coprocess:\n images.append(img)\n else:\n images.append(img)\n\n print(\"Job list will include \" + str(len(images)) + \" images\" + '\\n', file=sys.stderr)\n\n # For each image\n for img in images:\n if (args.coprocess is not None) and ('coimg' in meta[img]):\n # Create an output file to store the co-image processing results and populate with metadata\n coimg = meta[meta[img]['coimg']]\n coout = file_writer(os.path.join(\".\", args.jobdir, meta[img][\"coimg\"] + \".txt\"))\n coout.write('\\t'.join(map(str, (\"META\", \"image\", os.path.join(coimg['path'], meta[img]['coimg'])))) + '\\n')\n # Valid metadata\n for m in list(args.valid_meta.keys()):\n coout.write('\\t'.join(map(str, (\"META\", m, coimg[m]))) + '\\n')\n\n # Create an output file to store the image processing results and populate with metadata\n outfile = file_writer(os.path.join(\".\", args.jobdir, img + \".txt\"))\n outfile.write('\\t'.join(map(str, (\"META\", \"image\", os.path.join(meta[img]['path'], img)))) + '\\n')\n # Valid metadata\n for m in list(args.valid_meta.keys()):\n outfile.write('\\t'.join(map(str, (\"META\", m, meta[img][m]))) + '\\n')\n\n outfile.close()\n\n # Build the job stack\n # The first n - 1 CPUs will get INT jobs_per_cpu\n # The last CPU will get the remainder\n job = 0\n # For the first n - 1 CPU\n for c in range(1, args.cpu):\n # List of jobs for this CPU\n jobs = []\n\n # For each job/CPU\n for j in range(0, jobs_per_cpu):\n job_parts = [\"python\", args.pipeline, \"--image\", os.path.join(meta[images[job]]['path'], images[job]),\n \"--outdir\", args.outdir, \"--result\", os.path.join(args.jobdir, images[job]) + \".txt\"]\n # Add job to list\n if args.coprocess is not None and ('coimg' in meta[images[job]]):\n job_parts = job_parts + [\"--coresult\", os.path.join(args.jobdir, meta[images[job]]['coimg']) + \".txt\"]\n if args.writeimg:\n job_parts.append(\"--writeimg\")\n if args.other_args:\n other_args1=re.sub(\"'\",\"\",args.other_args)\n other_args = other_args1.split(\" \")\n job_parts = job_parts + other_args\n jobs.append(job_parts)\n\n # Increase the job counter by 1\n job += 1\n\n # Add the CPU job list to the job stack\n job_stack.append(jobs)\n\n # Add the remaining jobs to the last CPU\n jobs = []\n for j in range(job, len(images)):\n job_parts = [\"python\", args.pipeline, \"--image\", os.path.join(meta[images[job]]['path'], images[job]),\n \"--outdir\", args.outdir, \"--result\", os.path.join(args.jobdir, images[job]) + \".txt\"]\n # Add job to list\n if args.coprocess is not None and ('coimg' in meta[images[j]]):\n job_parts = job_parts + [\"--coresult\", os.path.join(args.jobdir, meta[images[job]]['coimg']) + \".txt\"]\n if args.writeimg:\n job_parts.append(\"--writeimg\")\n if args.other_args:\n other_args1 = re.sub(\"'\",\"\",args.other_args)\n other_args = other_args1.split(\" \")\n job_parts = job_parts + other_args\n jobs.append(job_parts)\n\n # Add the CPU job list to the job stack\n job_stack.append(jobs)\n\n return job_stack", "title": "" }, { "docid": "bd16ae9ded44a1270a21f06b1094d9dd", "score": "0.41869715", "text": "def set_compression():\n with Image(filename=asset('mona-lisa.jpg')) as img:\n img.compression_quality = 50\n assert img.compression_quality == 50\n strio = StringIO.StringIO()\n img.save(file=strio)\n strio.seek(0)\n with Image(file=strio) as jpg:\n assert jpg.compression_quality == 50\n with raises(TypeError):\n img.compression_quality = 'high'", "title": "" }, { "docid": "53ffb512585e2f327127c15450791536", "score": "0.4181887", "text": "def create_protein_seq_image(item=None, color_map=None):\n sequence,img_name, img_h, img_w = item\n image = np.full((img_h, img_w,3), (500,500,500))\n for index in range(len(sequence)):\n image[:, index, :] = color_map[sequence[index]]\n pil_image = Image.fromarray(image.astype(np.uint8))\n pil_image.save(img_name)", "title": "" }, { "docid": "3fa2f68205a17ae54df55cd67cb2c3d1", "score": "0.41778874", "text": "def prepare_step(self, *args, **kwargs):\n super(Bundle, self).prepare_step(*args, **kwargs)\n\n if get_software_root('Python') is None:\n raise EasyBuildError(\"Python not included as dependency!\")\n\n self.pylibdir = det_pylibdir()", "title": "" }, { "docid": "31d0317908ec11adf775cf362e344fa4", "score": "0.41708156", "text": "def preprocess(genre_or_style, min_vals = [128,128], n=None,shuffle=True):\n \n path = config.datafile(genre_or_style)\n #list all images in ../data/'genre_or_style'/img1.jpg\n\n print(\"Reading images...\")\n\n all_images = [x for x in os.listdir(path) if x.endswith(\".jpg\") | x.endswith(\".png\") | x.endswith(\".jpeg\")]\n if shuffle:\n random.shuffle(all_images)\n\n all_images_ndarray = []\n i=0\n for x in all_images:\n if n is not None and i>=n:\n break\n # figure out prefix\n pfx = int(x[:x.find('-')])\n if config.min_prefix_no is not None and pfx < config.min_prefix_no: continue\n if config.max_prefix_no is not None and pfx > config.max_prefix_no: continue\n try:\n im = io.imread(os.path.join(path,x))\n shape = im.shape\n r = shape[0] / shape[1]\n if shape[0] >= min_vals[0] and shape[1] >= min_vals[1] and shape[2] == 3 and r>config.min_img_ratio and r<=config.max_img_ratio:\n all_images_ndarray.append(im)\n i += 1\n except:\n print(\"Cannot read {}\".format(x))\n\n print(\"Using {} images out of {}\".format(len(all_images_ndarray),len(all_images)))\n\n show_image = False\n \n if show_image:\n ## show first 5 images ##\n from matplotlib import pyplot as plt\n for i in range(5):\n plt.imshow(all_images_ndarray[i], interpolation = \"nearest\")\n plt.show()\n \n del all_images\n gc.collect()\n \n #get min values in dimensions for resizing:\n #min_vals = min(list(map(np.shape, all_images_ndarray)))\n ## hardcoded: shrink size to 128x128 by hand because of latter training\n \n ## possible enhancement: take only image which are at leat 128x128 ? if smaller and \"upsizing\"\n ## might lead to bad result?! \n \n # transform that each image has the shape (256,256,3)\n all_images_ndarray_resized = list(map(lambda x: resize_helper(x,min_vals=min_vals), all_images_ndarray))\n \n del all_images_ndarray\n gc.collect()\n \n # add dimension such that we can generate a dataset, hence (1,256,256,3)\n all_images_ndarray_resized = list(map(expander, all_images_ndarray_resized))\n \n # vertical stack resized images in order to get dataset:\n final_images_stacked = np.vstack(all_images_ndarray_resized)\n \n del all_images_ndarray_resized\n gc.collect() \n \n ## show first 5 images ##\n if show_image:\n from matplotlib import pyplot as plt\n for i in range(5):\n plt.imshow(final_images_stacked[i], interpolation = \"nearest\")\n plt.show()\n \n #res = [all_images_ndarray_resized, final_images_stacked, all_images_ndarray]\n \n return final_images_stacked", "title": "" }, { "docid": "fe4fe4aa7052682775addcf32bd93cec", "score": "0.41707328", "text": "def fpreproc(bld,source):\n ppfs = []\n for f in to_nodes(bld,source): \n if f.suffix()=='.F90': \n ppf = f.change_ext('.f90')\n bld(rule='${FC} -E ${SRC} > ${TGT}', source=f, target=ppf)\n ppfs.append(ppf)\n else:\n ppfs.append(f)\n return ppfs if len(ppfs)>1 else ppfs[0]", "title": "" }, { "docid": "83f4d9145d32d5f677d1b9c0ca2908a9", "score": "0.4165178", "text": "def install(self, spec, prefix):\n with working_dir(self.build_directory):\n if self.generator == \"Unix Makefiles\":\n inspect.getmodule(self).make(*self.install_targets)\n elif self.generator == \"Ninja\":\n inspect.getmodule(self).ninja(*self.install_targets)", "title": "" }, { "docid": "7303d3dcf73645b55c66766867608418", "score": "0.416275", "text": "def pre(self, command, output_dir, vars):\n common.Package.pre(self, command, output_dir, vars)\n vars['mode'] = 'zeo'\n if not 'with_ploneproduct_paasync' in vars:\n vars['with_ploneproduct_paasync'] = False\n vars['plonesite'] = 'Plone'\n vars['major'] = int(vars['plone_version'][0])\n vars['sources_url'] = self.get_sources_url(vars)\n #vars['versions_url'] = self.get_versions_url(vars)\n #vars['zope2_url'] = self.get_zope2_url(vars)\n #vars['ztk_url'] = self.get_ztk_url(vars)\n if not vars.get('ztk_url', None):\n vars['ztk_url'] = False\n vars['sane_name'] = common.SPECIALCHARS.sub('', vars['project'])\n vars['category'] = 'zope'\n vars['includesdirs'] = ''\n vars['hr'] = '#' * 120\n common.Package.pre(self, command, output_dir, vars)\n vars['mode'] = vars['mode'].lower().strip()\n\n # transforming eggs requirements as lists\n for var in self.sections_mappings:\n if var in vars:\n vars[var] = [a.strip() for a in vars[var].split(',')]\n\n vars['autocheckout'] = []\n for var in vars:\n if var.startswith('with_autocheckout') and vars[var]:\n vn = var.replace('with_autocheckout_', '')\n vars['autocheckout'].append(\n self.plone_sources[vn]['name']\n )\n\n for var in self.plone_sources:\n if self.plone_sources[var].get('autocheckout', '') == 'y':\n if not self.plone_sources[var]['name'] in vars['autocheckout']:\n if (\n (True in [vars.get(o, False)\n for o in self.plone_sources[var]['options']])\n and (self.plone_sources[var]['name']\n not in vars['autocheckout'])\n ):\n vars['autocheckout'].append(\n self.plone_sources[var]['name']\n )\n\n lps = copy.deepcopy(self.plone_sources)\n for item in self.plone_sources:\n col = self.plone_sources[item]\n found = False\n for option in col['options']:\n if vars.get(option, False):\n found = True\n break\n if not found:\n del lps[item]\n vars['plone_sources'] = lps\n\n # ZODB3 from egg\n if vars['major'] < 4:\n vars['additional_eggs'].append('#ZODB3 is installed as an EGG!')\n vars['additional_eggs'].append('ZODB3')\n\n # do we need some pinned version\n vars['plone_versions'] = []\n pin_added = []\n for var in self.versions_mappings:\n vars['plone_versions'].append(('# %s' % var, '',))\n vmap = self.versions_mappings[var]\n vmap.sort()\n for pin in vmap:\n if not pin in pin_added:\n pin_added.append(pin)\n vars['plone_versions'].append(pin)\n\n if not vars['mode'] in ['zeo']:\n raise Exception('Invalid mode (not in zeo')\n if not os.path.exists(self.output_dir):\n os.makedirs(self.output_dir)\n\n for section in self.sections_mappings:\n for var in [k\n for k in self.sections_mappings[\n section]\n if vars.get(k, '')]:\n # skip plone products which are already in\n # the product 's setup.py\n if vars['with_generic'] and section == 'additional_eggs':\n pass\n if not section == 'plone_zcml':\n vars[section].append('#%s' % var)\n for item in self.sections_mappings[section][var]:\n if section == 'plone_zcml':\n item = '-'.join(item)\n if not '%s\\n' % item in vars[section]:\n if not item in vars[section]:\n vars[section].append(item)\n\n # order zcml\n\n def zcmlsort(obja, objb):\n obja = re.sub('^#', '', obja).strip()\n objb = re.sub('^#', '', objb).strip()\n ma, mb = (package_slug_re.match(obja),\n package_slug_re.match(objb))\n if not obja:\n return 1\n if not objb:\n return -1\n apackage, aslug = (obja, 'configure')\n if ma:\n apackage, aslug = ma.groups()\n bpackage, bslug = (objb, 'configure')\n if mb:\n bpackage, bslug = mb.groups()\n aorder = self.zcml_loading_order.get((apackage, aslug), 50000)\n border = self.zcml_loading_order.get((bpackage, bslug), 50000)\n return aorder - border\n\n vars[\"plone_zcml\"].sort(zcmlsort)\n vars[\"plone_zcml\"] = [a for a in vars[\"plone_zcml\"] if a.strip()]\n\n # add option marker\n for option in self.zcml_mappings:\n for p in self.zcml_mappings[option]:\n id = '-'.join(p)\n if id in vars['plone_zcml']:\n i = vars['plone_zcml'].index(id)\n vars['plone_zcml'][i:i] = ['#%s' % option]\n vars['plone_zcml'][0:0] = ['']\n\n if not os.path.exists(self.output_dir):\n self.makedirs(self.output_dir)\n vars['plone_products_install'] = ''\n vars['zope2_install'] = ''\n vars['debug_mode'] = 'off'\n vars['verbose_security'] = 'off'\n\n # and getting stuff from it.\n ep = None\n try:\n if not getattr(self, 'default_template_package', None):\n raise NoDefaultTemplateError('')\n\n epk = pkg_resources.load_entry_point(\n self.default_template_package,\n self.default_template_epn,\n self.default_template_templaten\n )\n ep = epk(self)\n coo = command.options.overwrite\n command.options.overwrite = True\n\n def null(a, b, c):\n pass\n ep.post = null\n ep.check_vars(vars, command)\n ep.run(command, vars['path'], vars)\n command.options.overwrite = coo\n except NoDefaultTemplateError, e:\n pass\n except Exception, e:\n print 'Error executing plone buildout, %s' % e\n # be sure our special python is in priority\n if vars['with_supervisor_instance4']:\n vars['with_supervisor_instance3'] = True\n if vars['with_supervisor_instance3']:\n vars['with_supervisor_instance2'] = True\n if vars['with_supervisor_instance2']:\n vars['with_supervisor_instance1'] = True\n\n for port in range(500):\n vars['http_port%s' % port] = int(\n vars['http_port']) + port\n #if 'socket' == vars['zeo_port'].strip():\n # vars['zeo_port_buildbot'] = int(vars['zeo_port']) + 1\n vars['running_user'] = common.running_user\n vars['instances_description'] = common.INSTANCES_DESCRIPTION % vars\n suffix = vars['major']\n if vars['major'] > 3:\n suffix = self.name.replace('genericskel.plone', '')\n zaztk_path = pkg_resources.resource_filename(\n 'collective.generic.skel',\n 'projects/plone%s/zopeapp.versions.cfg' % suffix\n )\n ztk_path = pkg_resources.resource_filename(\n 'collective.generic.skel',\n 'projects/plone%s/ztk.versions.cfg' % suffix\n )\n vars['have_ztk'] = False\n if vars['with_supervisor_instance1']:\n vars['first_instance'] = 'instance1'\n else:\n vars['first_instance'] = 'instance'\n if os.path.exists(ztk_path):\n vars['have_ztk'] = True\n vars['ztk_path'] = ztk_path\n vars['zaztk_path'] = zaztk_path\n vars['default_plone_profile'] = '%s.policy:default' % vars['project']\n if vars['with_generic_addon']:\n vars['default_plone_profile'] = '%s:default' % vars['project']\n vars['ndot'] = '.'", "title": "" }, { "docid": "4bbe0c8d8067b207272db65b5c75c7f2", "score": "0.416049", "text": "def _main():\r\n # Parse command line arguments\r\n from optparse import OptionParser\r\n version = '%prog ' + __revision__.strip('$').replace('Rev: ', 'r')\r\n parser = OptionParser(version=version)\r\n parser.set_usage(\"%prog [options] [pnmfile]\")\r\n parser.add_option(\"-i\", \"--interlace\",\r\n default=False, action=\"store_true\",\r\n help=\"create an interlaced PNG file (Adam7)\")\r\n parser.add_option(\"-t\", \"--transparent\",\r\n action=\"store\", type=\"string\", metavar=\"color\",\r\n help=\"mark the specified color as transparent\")\r\n parser.add_option(\"-b\", \"--background\",\r\n action=\"store\", type=\"string\", metavar=\"color\",\r\n help=\"save the specified background color\")\r\n parser.add_option(\"-a\", \"--alpha\",\r\n action=\"store\", type=\"string\", metavar=\"pgmfile\",\r\n help=\"alpha channel transparency (RGBA)\")\r\n parser.add_option(\"-g\", \"--gamma\",\r\n action=\"store\", type=\"float\", metavar=\"value\",\r\n help=\"save the specified gamma value\")\r\n parser.add_option(\"-c\", \"--compression\",\r\n action=\"store\", type=\"int\", metavar=\"level\",\r\n help=\"zlib compression level (0-9)\")\r\n parser.add_option(\"-T\", \"--test\",\r\n default=False, action=\"store_true\",\r\n help=\"create a test image\")\r\n parser.add_option(\"-R\", \"--test-red\",\r\n action=\"store\", type=\"string\", metavar=\"pattern\",\r\n help=\"test pattern for the red image layer\")\r\n parser.add_option(\"-G\", \"--test-green\",\r\n action=\"store\", type=\"string\", metavar=\"pattern\",\r\n help=\"test pattern for the green image layer\")\r\n parser.add_option(\"-B\", \"--test-blue\",\r\n action=\"store\", type=\"string\", metavar=\"pattern\",\r\n help=\"test pattern for the blue image layer\")\r\n parser.add_option(\"-A\", \"--test-alpha\",\r\n action=\"store\", type=\"string\", metavar=\"pattern\",\r\n help=\"test pattern for the alpha image layer\")\r\n parser.add_option(\"-D\", \"--test-deep\",\r\n default=False, action=\"store_true\",\r\n help=\"use test patterns with 16 bits per layer\")\r\n parser.add_option(\"-S\", \"--test-size\",\r\n action=\"store\", type=\"int\", metavar=\"size\",\r\n help=\"width and height of the test image\")\r\n (options, args) = parser.parse_args()\r\n\r\n # Convert options\r\n if options.transparent is not None:\r\n options.transparent = color_triple(options.transparent)\r\n if options.background is not None:\r\n options.background = color_triple(options.background)\r\n\r\n # Run regression tests\r\n if options.test:\r\n return test_suite(options)\r\n\r\n # Prepare input and output files\r\n if len(args) == 0:\r\n ppmfilename = '-'\r\n ppmfile = sys.stdin\r\n elif len(args) == 1:\r\n ppmfilename = args[0]\r\n ppmfile = open(ppmfilename, 'rb')\r\n else:\r\n parser.error(\"more than one input file\")\r\n outfile = sys.stdout\r\n\r\n # Encode PNM to PNG\r\n width, height = read_pnm_header(ppmfile)\r\n writer = Writer(width, height,\r\n transparent=options.transparent,\r\n background=options.background,\r\n has_alpha=options.alpha is not None,\r\n gamma=options.gamma,\r\n compression=options.compression)\r\n if options.alpha is not None:\r\n pgmfile = open(options.alpha, 'rb')\r\n awidth, aheight = read_pnm_header(pgmfile, 'P5')\r\n if (awidth, aheight) != (width, height):\r\n raise ValueError(\"alpha channel image size mismatch\" +\r\n \" (%s has %sx%s but %s has %sx%s)\"\r\n % (ppmfilename, width, height,\r\n options.alpha, awidth, aheight))\r\n writer.convert_ppm_and_pgm(ppmfile, pgmfile, outfile,\r\n interlace=options.interlace)\r\n else:\r\n writer.convert_ppm(ppmfile, outfile,\r\n interlace=options.interlace)", "title": "" }, { "docid": "9f7eb0a414dacafdaac20d758bf88648", "score": "0.41584086", "text": "def Jpeg(\n #ToSPieceOut, \n #ToSMaskOut, \n #PieceIn, \n #MaskIn, \n MaskReset, \n #Enable, \n PushPop, \n Reset,\n #Clk,\n clk_fast,\n DEPTH = 16\n):\n\n ONES = 2**16-1\n\n #ToSPiece = Signal(intbv(0)[6:])\n #ToSMask = Signal(intbv(ONES)[16:])\n #Stack = [Signal(intbv(0)[16:]) for i in range(DEPTH-1)]\n Jpeg = [Signal(intbv(0)[16:]) for i in range(DEPTH-1)]\n StackWriteData = Signal(intbv(0)[16:])\n StackReadData = Signal(intbv(0)[16:])\n StackWrite = Signal(bool(0))\n Pointer = Signal(intbv(0, min=0, max=DEPTH-1))\n WritePointer = Signal(intbv(0, min=0, max=DEPTH-1))\n NrItems = Signal(intbv(0, min=0, max=DEPTH+1))\n sig_out = [Signal(intbv(0)[52:]) for i in range(DEPTH-1) ]\n sig_in = [Signal(intbv(0)[52:]) for i in range(DEPTH-1) ]\n left_s = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))\n right_s = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))\n sam_s = Signal(intbv(0, min = -DATA_WIDTH, max = DATA_WIDTH))\n even_odd_s = Signal(bool(0))\n fwd_inv_s = Signal(bool(0))\n updated_s = Signal(bool(0))\n noupdate_s = Signal(bool(0))\n \n #@always_seq(Clk.posedge, reset=Reset)\n @always_seq(clk_fast.posedge, reset=Reset)\n def control():\n StackWrite.next = False\n if MaskReset != 0:\n #ToSMask.next = ToSMask & ~MaskReset\n #this is a dummy statement\n updated_s.next = 0\n #elif PushPop and Enable: # push \n elif PushPop : # push\n #ToSPiece.next = PieceIn\n #ToSMask.next = MaskIn \n NrItems.next = NrItems + 1\n if NrItems > 0:\n #StackWriteData.next = concat(ToSPiece, ToSMask)\n StackWrite.next = True\n Pointer.next = WritePointer\n if WritePointer < DEPTH-2:\n WritePointer.next = WritePointer + 1\n #elif not PushPop and Enable: # pop\n elif not PushPop : # pop\n #ToSPiece.next = StackReadData[22:16]\n #ToSMask.next = StackReadData[16:]\n NrItems.next = NrItems - 1\n WritePointer.next = Pointer\n if Pointer > 0:\n Pointer.next = Pointer - 1\n\n\n if updated_s:\n noupdate_s.next = 0\n if even_odd_s:\n if fwd_inv_s:\n Jpeg[Pointer].next = sam_s - ((left_s >> 1) + (right_s >> 1))\n else:\n Jpeg[Pointer].next = sam_s + ((left_s >> 1) + (right_s >> 1))\n else:\n if fwd_inv_s:\n Jpeg[Pointer].next = sam_s + ((left_s + right_s + 2)>>2)\n else:\n Jpeg[Pointer].next = sam_s - ((left_s + right_s + 2)>>2)\n else:\n noupdate_s.next = 1 \n #@always_seq(Clk.posedge, reset=None) \n @always_seq(clk_fast.posedge, reset=None)\n def write_stack():\n if StackWrite:\n #Stack[Pointer].next = StackWriteData\n Jpeg[Pointer].next = StackWriteData\n\n \n @always_comb\n def read_stack():\n #StackReadData.next = Stack[Pointer]\n StackReadData.next = Jpeg[Pointer]\n sig_in[Pointer].next = sig_out[Pointer]\n\n #@always_comb\n #def output():\n #ToSPieceOut.next = ToSPiece\n #ToSMaskOut.next = ToSMask \n \n #return control, write_stack, read_stack, output\n return control, write_stack, read_stack", "title": "" }, { "docid": "261123d4ebca397077965cfdaeaf604b", "score": "0.41515687", "text": "def _main():\n # Parse command line arguments\n from optparse import OptionParser\n version = '%prog ' + __revision__.strip('$').replace('Rev: ', 'r')\n parser = OptionParser(version=version)\n parser.set_usage(\"%prog [options] [pnmfile]\")\n parser.add_option(\"-i\", \"--interlace\",\n default=False, action=\"store_true\",\n help=\"create an interlaced PNG file (Adam7)\")\n parser.add_option(\"-t\", \"--transparent\",\n action=\"store\", type=\"string\", metavar=\"color\",\n help=\"mark the specified color as transparent\")\n parser.add_option(\"-b\", \"--background\",\n action=\"store\", type=\"string\", metavar=\"color\",\n help=\"save the specified background color\")\n parser.add_option(\"-a\", \"--alpha\",\n action=\"store\", type=\"string\", metavar=\"pgmfile\",\n help=\"alpha channel transparency (RGBA)\")\n parser.add_option(\"-g\", \"--gamma\",\n action=\"store\", type=\"float\", metavar=\"value\",\n help=\"save the specified gamma value\")\n parser.add_option(\"-c\", \"--compression\",\n action=\"store\", type=\"int\", metavar=\"level\",\n help=\"zlib compression level (0-9)\")\n parser.add_option(\"-T\", \"--test\",\n default=False, action=\"store_true\",\n help=\"create a test image\")\n parser.add_option(\"-R\", \"--test-red\",\n action=\"store\", type=\"string\", metavar=\"pattern\",\n help=\"test pattern for the red image layer\")\n parser.add_option(\"-G\", \"--test-green\",\n action=\"store\", type=\"string\", metavar=\"pattern\",\n help=\"test pattern for the green image layer\")\n parser.add_option(\"-B\", \"--test-blue\",\n action=\"store\", type=\"string\", metavar=\"pattern\",\n help=\"test pattern for the blue image layer\")\n parser.add_option(\"-A\", \"--test-alpha\",\n action=\"store\", type=\"string\", metavar=\"pattern\",\n help=\"test pattern for the alpha image layer\")\n parser.add_option(\"-D\", \"--test-deep\",\n default=False, action=\"store_true\",\n help=\"use test patterns with 16 bits per layer\")\n parser.add_option(\"-S\", \"--test-size\",\n action=\"store\", type=\"int\", metavar=\"size\",\n help=\"width and height of the test image\")\n (options, args) = parser.parse_args()\n\n # Convert options\n if options.transparent is not None:\n options.transparent = color_triple(options.transparent)\n if options.background is not None:\n options.background = color_triple(options.background)\n\n # Run regression tests\n if options.test:\n return test_suite(options)\n\n # Prepare input and output files\n if len(args) == 0:\n ppmfilename = '-'\n ppmfile = sys.stdin\n elif len(args) == 1:\n ppmfilename = args[0]\n ppmfile = open(ppmfilename, 'rb')\n else:\n parser.error(\"more than one input file\")\n outfile = sys.stdout\n\n # Encode PNM to PNG\n width, height = read_pnm_header(ppmfile)\n writer = Writer(width, height,\n transparent=options.transparent,\n background=options.background,\n has_alpha=options.alpha is not None,\n gamma=options.gamma,\n compression=options.compression)\n if options.alpha is not None:\n pgmfile = open(options.alpha, 'rb')\n awidth, aheight = read_pnm_header(pgmfile, 'P5')\n if (awidth, aheight) != (width, height):\n raise ValueError(\"alpha channel image size mismatch\" +\n \" (%s has %sx%s but %s has %sx%s)\"\n % (ppmfilename, width, height,\n options.alpha, awidth, aheight))\n writer.convert_ppm_and_pgm(ppmfile, pgmfile, outfile,\n interlace=options.interlace)\n else:\n writer.convert_ppm(ppmfile, outfile,\n interlace=options.interlace)", "title": "" }, { "docid": "711c83632ed4f0833941f40c92d51931", "score": "0.41494596", "text": "def build():\n image = '{}:{}'.format(env.image_name, env.version_tag)\n cmd = 'docker build -t {image} .'.format(image=image)\n\n prebuild()\n\n with cd(env.project_dir):\n run(cmd)", "title": "" }, { "docid": "294a4d27e897268ea270acb6ae50f078", "score": "0.41487384", "text": "def make(ctx):\n ctx.run(\"docker build -t pkgparse:latest .\")", "title": "" }, { "docid": "2cb37ac7e5b5e8109802f1adaa07544b", "score": "0.41416988", "text": "def preproctrain(inputs, working_dir):\n # preproc images\n Path(f'{working_dir}/dataset/train_rgb').mkdir(parents=True,\n exist_ok=True)\n catalog_paths = list(sorted(Path(inputs).glob('./Atlanta_nadir*')))\n assert len(catalog_paths) > 0\n print('Found {} catalog directories'.format(len(catalog_paths)))\n for catalog_dir in tqdm.tqdm(catalog_paths, total=len(catalog_paths)):\n src_imgs = list(sorted(catalog_dir.glob('./Pan-Sharpen/Pan-*.tif')))\n for src in tqdm.tqdm(src_imgs, total=len(src_imgs)):\n dst = f'{working_dir}/dataset/train_rgb/{src.name}'\n if not Path(dst).exists():\n pan_to_bgr(str(src), dst)\n\n # prerpoc masks\n (Path(working_dir) / Path('dataset/masks')).mkdir(parents=True,\n exist_ok=True)\n geojson_dir = Path(inputs) / Path('geojson/spacenet-buildings')\n mask_dir = Path(working_dir) / Path('dataset/masks')\n ref_catalog_name = list(Path(inputs).glob(\n './Atlanta_nadir*/Pan-Sharpen'))[0].parent.name\n for geojson_fn in geojson_dir.glob('./spacenet-buildings_*.geojson'):\n masks_from_geojson(mask_dir, inputs, ref_catalog_name, geojson_fn)", "title": "" }, { "docid": "0ae4f7f89b378f5bb6fcc451178d7141", "score": "0.41414735", "text": "def create_thumbnail():\n subprocess.call(\"{} -t {}\".format(sys.executable, config.THUMBNAIL_FILE_PATH))", "title": "" }, { "docid": "7100ac5bfb2c845ba8dbbbc1ab1a20f5", "score": "0.4140333", "text": "def prepare(self, *args, **kwargs):\n super(cpeAOCC, self).prepare(*args, **kwargs)\n\n if self.options['dynamic'] or self.options['shared']:\n self.log.debug(\"Enabling building of shared libs/dynamically linked executables via $CRAYPE_LINK_TYPE\")\n env.setvar('CRAYPE_LINK_TYPE', 'dynamic')", "title": "" }, { "docid": "5ad06fb09276fde453878c0a7428cdc7", "score": "0.41402984", "text": "def main(\n model,\n hparams,\n imgdir,\n logdir,\n resize_images,\n n_images,\n shuffle_images,\n lambda_cartoonx,\n lambda_pixelrde,\n save_mask_after,\n preoptimize,\n save_files,\n):\n # Get device (use GPU if possible)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n # Get classifier to explain\n if model == \"VGG16\":\n model = (\n models.vgg16(weights=models.VGG16_Weights.IMAGENET1K_V1).eval().to(device)\n )\n elif model == \"mobile-net\":\n model = (\n models.mobilenet_v3_small(\n weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1\n )\n .eval()\n .to(device)\n )\n elif model == \"deit\":\n model = (\n timm.create_model(\"deit_tiny_patch16_224\", pretrained=True)\n .eval()\n .to(device)\n )\n else:\n raise ValueError(f\"Model {model} not implemented.\")\n\n # Get hyperparameters for wavelet RDE and pixel RDE\n with open(os.path.join(sys.path[0], hparams)) as f:\n HPARAMS_CARTOONX = yaml.load(f, Loader=yaml.FullLoader)[\"CartoonX\"]\n with open(os.path.join(sys.path[0], hparams)) as f:\n HPARAMS_PIXEL_RDE = yaml.load(f, Loader=yaml.FullLoader)[\"PixelRDE\"]\n\n # Update hyperparameters if specified\n if lambda_cartoonx is not None:\n HPARAMS_CARTOONX[\"l1lambda\"] = int(lambda_cartoonx)\n if lambda_pixelrde is not None:\n HPARAMS_PIXEL_RDE[\"l1lambda\"] = int(lambda_pixelrde)\n\n # Initialize wavelet RDE and pixel RDE\n cartoonX = CartoonX(model=model, device=device, **HPARAMS_CARTOONX)\n pixelRDE = PixelRDE(model=model, device=device, **HPARAMS_PIXEL_RDE)\n\n # Get files of images\n files = [\n f\n for f in os.listdir(imgdir)\n if f.lower().endswith(\".jpg\")\n or f.lower().endswith(\".png\")\n or f.lower().endswith(\".jpeg\")\n ]\n if shuffle_images:\n files = np.random.permutation(files) # shuffle files\n files = (\n files[:n_images] if len(files) > n_images else files\n ) # only use n_images\n\n # Convert string of ints to list of ints\n if save_mask_after != \"\":\n save_mask_after = [int(s) for s in save_mask_after.split(',')]\n else:\n save_mask_after = []\n\n # Explain model decsision for each image in files\n for fname in files:\n print(f\"Processing file: {fname}\")\n # Get image and transform to tensor\n path = os.path.join(imgdir, fname)\n x = Image.open(path)\n x = transforms.ToTensor()(x)\n\n if x.shape[0] == 1: # if image is grayscale, convert to RGB\n colored_image = np.zeros((3, x.shape[1], x.shape[2]))\n colored_image[0] = x\n colored_image[1] = x\n colored_image[2] = x\n x = colored_image\n x = torch.from_numpy(x).to(dtype=torch.float32)\n \n x = transforms.Resize(size=(resize_images, resize_images))(x)\n x = x.to(device)\n\n # Get prediction for x\n output = model(x.unsqueeze(0).detach())\n max_idx = nn.Softmax(dim=1)(output).max(1)[1].item()\n label = LABEL_LIST[max_idx]\n\n\n # Get explanation for x\n exp_cartoonX, DWTmask_cartoonX, logs_cartoonX, intermediate_mask_cartoonx = cartoonX(\n x.unsqueeze(0),\n target=max_idx,\n path=path,\n save_mask_after=save_mask_after,\n preoptimize=preoptimize,\n )\n exp_pixelRDE, logs_pixelRDE, intermediate_mask_rde = pixelRDE(\n x.unsqueeze(0),\n target=max_idx, save_mask_after=save_mask_after\n )\n\n # Plot explanations next to original image\n P = [\n (x, f\"Pred:{label}\"),\n (exp_cartoonX, \"CartoonX\"),\n (exp_pixelRDE, \"Pixel RDE\"),\n ]\n fig, axs = plt.subplots(1, 3, figsize=(10, 10))\n for idx, (img, title) in enumerate(P):\n args = {\"cmap\": \"copper\"} if idx > 0 else {}\n axs[idx].imshow(\n np.asarray(transforms.ToPILImage()(img)), vmin=0, vmax=255, **args\n )\n axs[idx].set_title(title, size=8)\n axs[idx].axis(\"off\")\n\n # Strip file extension from fname to create folder names\n fname_split = fname.split(\".\")\n assert (\n fname_split[1] == \"png\"\n or fname_split[1] == \"jpg\"\n or fname_split[1] == \"JPEG\"\n ), \"Image must be in .png or .jpg format.\"\n save_name = fname_split[0]\n\n # Things to save in the folder name\n hparams_str = {\n \"CNX_l1lambda\": HPARAMS_CARTOONX[\"l1lambda\"],\n \"RDE_l1lambda\": HPARAMS_PIXEL_RDE[\"l1lambda\"],\n \"mask\": HPARAMS_CARTOONX[\"init_mask\"][0],\n \"preopt\": preoptimize\n }\n\n save_name += hparams_to_str(hparams_str)\n log_path = os.path.join(logdir, save_name)\n\n # Log to tensorboard and save data to logdir\n writer = SummaryWriter(log_path)\n writer.add_figure(f\"Explanations\", fig)\n for i in range(len(logs_cartoonX[\"loss\"])):\n writer.add_scalar(\"Loss CartoonX\", logs_cartoonX[\"loss\"][i], global_step=i)\n writer.add_scalar(\n \"Loss CartoonX (Sparsity Component)\",\n logs_cartoonX[\"loss-sparsity\"][i],\n global_step=i,\n )\n writer.add_scalar(\n \"Loss CartoonX (Distortion Component)\",\n logs_cartoonX[\"loss-distortion\"][i],\n global_step=i,\n )\n writer.add_scalar(\n \"L1-Norm CartoonX\", logs_cartoonX[\"l1-norm\"][i], global_step=i\n )\n writer.add_scalar(\n \"Distortion CartoonX\", logs_cartoonX[\"distortion\"][i], global_step=i\n )\n for i in range(len(logs_pixelRDE[\"loss\"])):\n writer.add_scalar(\"Loss PixelRDE\", logs_pixelRDE[\"loss\"][i], global_step=i)\n writer.add_scalar(\n \"L1-Norm PixelRDE\", logs_pixelRDE[\"l1-norm\"][i], global_step=i\n )\n writer.add_scalar(\n \"Distortion PixelRDE\", logs_pixelRDE[\"distortion\"][i], global_step=i\n )\n writer.flush()\n writer.close()\n\n fig.savefig(\n os.path.join(log_path, \"figure.jpg\"),\n bbox_inches=\"tight\",\n transparent=True,\n pad_inches=0,\n )\n\n if save_files:\n # Save additional results to log_path\n np.save(os.path.join(log_path, \"original_image.npy\"), x.cpu().detach().numpy())\n np.save(\n os.path.join(log_path, \"exp_cartoonX.npy\"),\n exp_cartoonX.cpu().detach().numpy(),\n )\n np.save(\n os.path.join(log_path, \"exp_pixelRDE.npy\"),\n exp_pixelRDE.cpu().detach().numpy(),\n )\n with open(\n os.path.join(log_path, \"DWTmask_cartoonX.pickle\"), \"wb\"\n ) as fp: # Save DWTmask_cartoonX\n pickle.dump(DWTmask_cartoonX, fp)\n with open(os.path.join(log_path, \"pred.txt\"), \"w\") as f: # Save prediction\n f.write(f\"{label}, {max_idx}\")\n with open(\n os.path.join(log_path, \"intermediate_mask_cartoonx.pickle\"), \"wb\"\n ) as fp: # Save intermediate masks\n pickle.dump(intermediate_mask_cartoonx, fp)\n with open(\n os.path.join(log_path, \"intermediate_mask_rde.pickle\"), \"wb\"\n ) as fp: # Save intermediate masks\n pickle.dump(intermediate_mask_rde, fp)\n\n # Log the hparams file to check later what hparams were used\n with open(os.path.join(log_path, \"hparams.yaml\"), \"w\") as f:\n yaml.dump(\n {\"CartoonX\": HPARAMS_CARTOONX, \"PixelRDE\": HPARAMS_PIXEL_RDE},\n f,\n default_flow_style=False,\n )", "title": "" }, { "docid": "c022344cafabf98fa86424f6a583e4d4", "score": "0.4137711", "text": "def postbuild(working_directory, configuration):\n\n # Only process if perforce is source control\n if is_git():\n return 0\n\n # Copy the windows version of the CodeWarrior libraries.\n error = 0\n\n # If the host can run CodeWarrior for Windows (Codewarrior 9.4 Mac or\n # Windows) then copy the generated windows library files\n\n if get_windows_host_type() or is_codewarrior_mac_allowed():\n\n # Update the codewarrior files\n\n sdks = get_sdks_folder()\n windowsburgerbase = os.path.join(sdks, \"windows\", \"burgerlib\")\n outputfolder = os.path.join(working_directory, \"bin\")\n for item in WINDOWS_LIB_FILES:\n error = copy_file_if_needed(\n os.path.join(outputfolder, item),\n os.path.join(windowsburgerbase, item), perforce=True)\n if error:\n break\n\n # If the host can run CodeWarrior for MacOS Carbon (Codewarrior 10 Mac)\n # then copy the generated Mac 68k library files\n\n if not error and is_codewarrior_mac_allowed():\n\n # Update the codewarrior files\n\n sdks = get_sdks_folder()\n macburgerbase = os.path.join(sdks, \"mac\", \"burgerlib\")\n outputfolder = os.path.join(working_directory, \"bin\")\n for item in MAC68K_LIB_FILES:\n error = copy_file_if_needed(\n os.path.join(outputfolder, item),\n os.path.join(macburgerbase, item), perforce=True)\n if error:\n break\n\n return error", "title": "" }, { "docid": "c66ed66a279731821b386ff7888d0823", "score": "0.41351366", "text": "def prepare4slides(config):\n files = config[\"files\"]\n make_backup(files)\n mod_files(files, _fix_fig_links)", "title": "" }, { "docid": "a6ae45f8a28063c9df761dd3486c06f0", "score": "0.41308075", "text": "def libjpeg_turbo_asm_object_files():\n return [\n './BUILD/simd/jidctred-sse2-64.o', './BUILD/simd/jidctint-sse2-64.o',\n './BUILD/simd/jidctfst-sse2-64.o', './BUILD/simd/jdmerge-sse2-64.o',\n './BUILD/simd/jidctflt-sse2-64.o', './BUILD/simd/jdsample-sse2-64.o',\n './BUILD/simd/jdcolor-sse2-64.o'\n ]", "title": "" }, { "docid": "9068e33ad63689e244eb7ef210bac50f", "score": "0.41292897", "text": "def add_jpeg_decoding(module_spec):\r\n input_height, input_width = hub.get_expected_image_size(module_spec)\r\n input_depth = hub.get_num_image_channels(module_spec)\r\n jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')\r\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)\r\n # Convert from full range of uint8 to range [0,1] of float32.\r\n decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,\r\n tf.float32)\r\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\r\n resize_shape = tf.stack([input_height, input_width])\r\n resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)\r\n resized_image = tf.image.resize_bilinear(decoded_image_4d,\r\n resize_shape_as_int)\r\n return jpeg_data, resized_image", "title": "" }, { "docid": "c15be318cb1917325255d8fbcc26dbc8", "score": "0.41276672", "text": "def install_imgcnv ():\n\n filename_zip = os.path.join(DIRS['depot'], 'imgcnv.zip')\n imgcnv = which('imgcnv')\n if imgcnv :\n r, version = call ([ imgcnv, '-v'], capture = True)\n if r == 0:\n print( \"Found imgcnv version %s\" % version)\n if not os.path.exists(filename_zip):\n print( \"Imgcnv is installed and no-precompiled version exists. Using installed version\")\n return\n\n if not os.path.exists(filename_zip):\n print (\"\"\"No pre-compiled version of imgcnv exists for your system\n Please visit biodev.ece.ucsb.edu/projects/imgcnv\n or visit our mailing list https://groups.google.com/forum/#!forum/bisque-bioimage\n for help\"\"\")\n return\n\n\n if getanswer (\"Install Bio-Image Convert\", \"Y\",\n \"imgcnv will allow image server to read pixel data\") == \"Y\":\n\n filename_check = os.path.join(DIRS['bin'], 'imgcnv%s'% SCRIPT_EXT)\n uncompress_dependencies (filename_zip, DIRS['bin'], filename_check)", "title": "" }, { "docid": "c0a0eff42db025bc2e87243202cf80f0", "score": "0.41248497", "text": "def build_and_push_images(test_image_suffix):\n images = [\n ['base-image'],\n ['base-clang'],\n # base-runner is also dependent on base-clang.\n ['base-builder', 'base-runner'],\n # Exclude 'base-builder-swift' as it takes extremely long to build because\n # it clones LLVM.\n [\n 'base-runner-debug',\n 'base-builder-go',\n 'base-builder-javascript',\n 'base-builder-jvm',\n 'base-builder-python',\n 'base-builder-rust',\n ],\n ]\n os.environ['DOCKER_BUILDKIT'] = '1'\n max_parallelization = max([len(image_list) for image_list in images])\n proc_count = min(multiprocessing.cpu_count(), max_parallelization)\n logging.info('Using %d parallel processes.', proc_count)\n with multiprocessing.Pool(proc_count) as pool:\n for image_list in images:\n args_list = [(image, test_image_suffix) for image in image_list]\n pool.starmap(build_and_push_image, args_list)", "title": "" }, { "docid": "52f61492c16a3e8a6cdb04e0c42808e4", "score": "0.41232747", "text": "def compress_images(self, image_files):\r\n\t\t\r\n\t\tif not image_files:\r\n\t\t\treturn\r\n\t\t\r\n\t\tlogging.info('Image compression: start')\r\n\t\tself.size_before['img'] = self.__calculate_files_size(image_files)\r\n\t\tself.files_count += len(image_files)\r\n\r\n\t\tjpg_image_files = filter(lambda file: True if file.split('.')[-1] in ['jpg', 'jpeg'] else False, image_files)\r\n\t\tpng_image_files = filter(lambda file: True if file.split('.')[-1] in ['png', 'gif'] else False, image_files)\t\r\n\t\t\r\n\t\tif self.overwrite_original:\r\n\t\t\tdestination = ''\r\n\t\telse:\r\n\t\t\tdestination = '--dest=\"%(temp_directory)s\"' % {'temp_directory': self.temporaryDirectories['jpg_files']}\r\n\r\n\t\tfor file in jpg_image_files:\r\n\t\t\tsubprocess.Popen('%(jpegoptim_path)s --strip-all %(destination)s \"%(input_file)s\"' % {'jpegoptim_path': JPEGOPTIM_PATH, 'destination': destination, 'input_file': file}, shell = True, stdout = subprocess.PIPE).communicate()[0]\r\n \r\n\t\t\tcompressed_file_path = os.path.join(self.temporaryDirectories['jpg_files'], os.path.basename(file))\r\n\t\t\tif not self.overwrite_original and os.path.exists(compressed_file_path):\r\n\t\t\t\t# Add a suffix to the file name and move it back to the original location \r\n\t\t\t\tnew_name = self.__add_suffix_after_file_name(compressed_file_path)\r\n\t\t\t\tself.__move_file(new_name, os.path.dirname(file))\r\n\t\t\r\n\t\tfor file in png_image_files:\r\n\t\t\tif self.overwrite_original:\r\n\t\t\t\toutput_file = file\r\n\t\t\telse:\r\n\t\t\t\toutput_file = self.__get_file_name_with_suffix(file)\r\n\t\t\t\t\r\n\t\t\tsubprocess.Popen('%(optipng_path)s \"%(input_file)s\" -out \"%(output_file)s\"' % {'optipng_path': OPTIPNG_PATH, 'input_file': file, 'output_file': output_file}, shell = True, stdout = subprocess.PIPE).communicate()[0]\r\n\t\t\r\n\t\tif self.overwrite_original:\r\n\t\t\tself.size_after['img'] = self.__calculate_files_size(image_files)\r\n\t\telse:\r\n\t\t\tself.size_after['img'] = self.__calculate_files_size(map(self.__get_file_name_with_suffix, image_files))\r\n\t\t\r\n\t\tlogging.info('Image compression: completed')", "title": "" }, { "docid": "b8a6fe31df772d22e6f037ac2ee5d14d", "score": "0.41220394", "text": "def main():\n # Get the start time\n start = time()\n\n # Delete the folder from outputs\n os.system(\"rm -rf outputs\")\n\n # Create the folder for outputs\n os.system(\"mkdir outputs\")\n\n # Clear the screen\n print(\"\\033c\")\n\n # Input the wanted quality\n qualities = int(input(\"Enter approximate quality as integer: \"))\n\n # Get all of the JPEG files in the current folder\n jpegs = [\n filer\n for filer in os.listdir()\n if filer.endswith(\".jpg\") or filer.endswith(\".JPG\")\n ]\n\n\n # For every file in that list:\n for name in jpegs:\n # Print the image we are compressing.\n print(\"Compressing image:\", name)\n\n # Open the image.\n current_image = Image.open(name)\n\n # Print the original image size.\n print(\"Original image size:\", str(int(os.path.getsize(name)) / 1000) + \"kb\")\n\n # Store it.\n orig_size = os.path.getsize(name)\n\n # Add the directory name to the file name.\n name = \"outputs/\" + name\n\n # Save the image, compressed.\n current_image.save(name, optimize=0, quality=qualities)\n\n # Print the compressed image file size.\n print(\"Compressed image size:\", str(int(os.path.getsize(name)) / 1000) + \"kb\")\n\n # Print the percent compressed.\n print(\n \"Percent compressed:\",\n str(-int((os.path.getsize(name) / orig_size) * 100) + 100) + \"% reduced.\",\n )\n\n # Print a divider.\n print(\"-\" * 100)\n\n # Get the end time:\n end = time()\n\n # Find the time it took to compress the images.\n took = end - start\n\n # Print that time.\n print(\"Compressing\", len(jpegs), \"images took\", took, \"seconds.\")", "title": "" }, { "docid": "db415dba9553d5e8f5e8fc21d7de6755", "score": "0.4120715", "text": "def build_and_push_image(image, test_image_suffix):\n main_tag, testing_tag = get_image_tags(image, test_image_suffix)\n build_image(image, [main_tag, testing_tag], testing_tag)\n push_image(testing_tag)", "title": "" }, { "docid": "765a90e033df57bbad6b24f95dbaa328", "score": "0.41200718", "text": "def compile_op(destination):\n subprocess.run(\"make\", cwd=destination, check=True)", "title": "" }, { "docid": "1803c67f47fa49116157a190b10e35ea", "score": "0.41172165", "text": "def build_base_images():\n _build(get_build_config_file('base-images.yaml'), 'base-images')", "title": "" }, { "docid": "2cb97514c619b4c9f4bb9defd4fa4a08", "score": "0.41170114", "text": "def slice_jpgs():\r\n valid_images = False\r\n while not valid_images:\r\n directory = get_directory(DIRECTORY_PROMPT)\r\n images = load_jpg_files(directory)\r\n\r\n valid_images = validate_jpgs(images) \r\n\r\n out_img = create_output_jpg(images)\r\n\r\n while True:\r\n try:\r\n output_filename = input(OUTPUT_JPG_PROMPT)\r\n out_img.save(output_filename)\r\n break\r\n except IOError:\r\n print(\"Could not save to file {}, choose another name\".format(output_filename))", "title": "" }, { "docid": "19ae938de12ec96e58c100be63e77210", "score": "0.4114064", "text": "def prestageFile( self, *parms, **kws ):\n return S_ERROR( \"Storage.prestageFile: implement me!\" )", "title": "" }, { "docid": "76a4948ec902e59a80ffcaa2b5861147", "score": "0.41108218", "text": "def create_build_image():\n image = '{}:{}'.format(env.image_name+'-js-build', env.version_tag)\n with cd(env.project_dir):\n run('rm -rf release || true')\n run('docker build -f etc/docker/jsbuild -t {image} .'.format(image=image))", "title": "" }, { "docid": "42b8e764f0ed6a7c8da01d382cee5e32", "score": "0.41077414", "text": "def create_image(label, image_name, image_data, old_info):\r\n\r\n\tif label == \"Fire\" :\r\n\t\toutput_dir = config.OUTPUT_FIRE_PATH\r\n\telse:\r\n\t\toutput_dir = config.OUTPUT_NONFIRE_PATH\r\n\r\n\t# Write new image\r\n\timage = os.path.sep.join([output_dir, image_name])\r\n\tcv2.imwrite(image, image_data)\r\n\r\n\t# Write metadata\r\n\tinfo = IPTCInfo(image, force=True)\r\n\tinfo['keywords'] = old_info\t\r\n\tinfo['caption/abstract'] = label\r\n\tinfo.save()\r\n\r\n\treturn image", "title": "" }, { "docid": "6b85be4b2f43672dce23bf41706ba6d6", "score": "0.4105137", "text": "def build(c, force=None):\n\n force_flag = '-F' if force else ''\n\n c.run(f\"mp build -r {force_flag}\", pty=True)", "title": "" }, { "docid": "167db7d7fcef8f7c62ac64c422bb29d3", "score": "0.4104251", "text": "def prepare_external_data(data_dir, pickle_name, save=False):\n image_names = glob.glob(data_dir + '*.jpg')\n\n name_list = []\n for idx, name in enumerate(image_names):\n img = mpimg.imread(name)\n # shapes of grey scale images without channel\n if len(img.shape) > 2:\n name_list.append(name)\n else:\n print(img.shape)\n if idx % 1000 == 0:\n print(len(name_list))\n\n if save:\n with open(os.path.join(SAVE_PATH, pickle_name), 'wb') as f:\n pickle.dump(name_list, f)", "title": "" } ]
3262375d1f6c329c5b91d05fbc3cd507
Build a random vector with random indexes Thiago F Pappacena integer size The vector size float min The minimum random number float max The maximum random number
[ { "docid": "3159f2a650547ec677599fac03d5b500", "score": "0.7505865", "text": "def makeRandomVector(size, min = 0.0, max = 1.0):\n import random\n return tuple([min + (random.random() * (max-min)) for i in range(size)])", "title": "" } ]
[ { "docid": "0399617dbc736d7223479c5119f263e2", "score": "0.720517", "text": "def gen_vector(size):\n ret = []\n for _ in range(size):\n ret.append(random.randint(0, 20))\n \n return(ret)", "title": "" }, { "docid": "c58773bfd20fbb308bce725a48a9e39e", "score": "0.7197337", "text": "def random_vector(minmax):\r\n return minmax[:,0] + (minmax[:,1] - minmax[:,0]) * np.random.random(len(minmax))", "title": "" }, { "docid": "8a4edc26a3975481dbf7637a913ff159", "score": "0.69426054", "text": "def randvec(n=50, lower=-0.5, upper=0.5):\n return np.array([random.uniform(lower, upper) for i in range(n)])", "title": "" }, { "docid": "5707094d1292632995b6a0e35539e554", "score": "0.6885018", "text": "def random(maxX, maxY, maxZ):\n return Vector(random.randrange(maxX), random.randrange(maxY), random.randrange(maxZ))", "title": "" }, { "docid": "82d577b5bceb5b934d2851406c2f75e3", "score": "0.67334646", "text": "def random(min=-1,max=1,color=mycolors.WHITE,width=1,arrow=[0.1,0.5]):\n x=random.uniform(min,max)\n y=random.uniform(min,max)\n return Vector(x,y,color=color,width=width,arrow=arrow)", "title": "" }, { "docid": "fd438544fd665a2025323ac5d8c66de2", "score": "0.6657029", "text": "def _pick_random_vector():\n tensor_size = np.random.randint(3)\n tensor_shape = np.random.randint(1, 10, size=(tensor_size)).tolist()\n return np.random.normal(size=tensor_shape + [4])", "title": "" }, { "docid": "734b493f5102e692d330bed8a53e032d", "score": "0.6514739", "text": "def random_binary_vector(size):\n vec = np.random.uniform(-1, 1, size)\n vec[vec < 0] = -1\n vec[vec >= 0] = 1\n return vec", "title": "" }, { "docid": "b7540166a6c7e25786f8ccc5adb52e1d", "score": "0.63063174", "text": "def random_pos(self, n=1):\n return np.random.uniform(self.min, self.max, (n, self.ndim))", "title": "" }, { "docid": "781129f799d67de5681d1ccd0a572606", "score": "0.62645143", "text": "def random(min=-1,max=1):\n vector=Vector.random(min,max)\n return Force(vector)", "title": "" }, { "docid": "38b22de3855742fe618bff9cb2f4996e", "score": "0.6258775", "text": "def p_vector(nmax):\n\tp_vec = np.zeros(nmax)\n\tfor n in np.arange(1,nmax):\n\t\tp_vec[n] = p_initial(n)\n\treturn p_vec/np.sum(p_vec)", "title": "" }, { "docid": "0260f03733ddb7e65f4c96e65d675bef", "score": "0.62004274", "text": "def generate_random_translation_vector(max_translation=0.12):\n return np.random.uniform(-max_translation,\n max_translation,\n size=(1, 3))", "title": "" }, { "docid": "51d7260640ae638ebfd876b88b0ff222", "score": "0.6152188", "text": "def rand_vec(cls, n, normalize=False):\n seed = np.random.randint(0, np.iinfo(np.int32).max)\n logger.debug(\"rand_vec default_rng seeded with seed=%s\", seed)\n rng = np.random.default_rng(seed)\n vec = rng.random(n) + 1j * rng.random(n)\n if normalize:\n vec /= np.sqrt(np.dot(vec, np.conj(vec)))\n return vec", "title": "" }, { "docid": "530676ed513640cbd1d74a3473bfd203", "score": "0.6146668", "text": "def integer_vector(shape, max_value, return_symbol=False):\n return mx.sym.round(mx.sym.random_uniform(shape=shape) * max_value) if return_symbol \\\n else np.round(np.random.uniform(size=shape) * max_value)", "title": "" }, { "docid": "0d7d0d6bdd87bfab2e2b9a9c483a7b83", "score": "0.61429304", "text": "def generate(n,l,a):\n for i in range(0,n):\n a.append(r.randrange(-l,l))", "title": "" }, { "docid": "2ac3ae37d86ab88a77989e62def5f288", "score": "0.61085486", "text": "def random_in_range(t_min, t_max):\n return t_min + np.random.uniform(size=(len(t_min), )) * (t_max - t_min)", "title": "" }, { "docid": "4cbc7a82a960b176e8c6adcbfd8a8df5", "score": "0.61035734", "text": "def random_center():\n return np.random.randint(0, 16, 64).tolist()", "title": "" }, { "docid": "6162c724cd373a9e18c2222cba6feefd", "score": "0.60890734", "text": "def constrained_random(n, total):\n\n return [x - 1 for x in constrained_sum_sample_pos(n, total + n)]", "title": "" }, { "docid": "2fb82a216b7bbaa381c5424b29db0ab2", "score": "0.6073753", "text": "def random_binary_array(size, task_index, layer_index):\n # to make sure that each task in each layer has a different seed (but seeds are the same for different runs)\n global seeds\n seed = seeds[task_index] + layer_index\n np.random.seed(seed)\n\n # np.random.seed(1) # set fixed seed to have always the same random vectors\n vec = np.random.uniform(-1, 1, size)\n vec[vec < 0] = -1\n vec[vec >= 0] = 1\n return vec", "title": "" }, { "docid": "e0cddbb885782aac133410ab6dc2abfb", "score": "0.6053779", "text": "def initialize_random_int(self, min_value, max_value):\n self.cells = np.random.randint(min_value, max_value, size=self.size) \n self.update_neighbors()", "title": "" }, { "docid": "33ae659a77b54eee862fbe2ef4e46789", "score": "0.601582", "text": "def init_W(n,d): \n# random.seed(0.001)\n W = np.zeros((n,d))\n for i in range(n):\n for j in range(d):\n W[i,j] = random.uniform(-0.1,0.1)\n return W", "title": "" }, { "docid": "ed6148bc194542229608a067f7dd4a2d", "score": "0.6002623", "text": "def vec_x(n):\n x = np.linspace(0, h, n)\n return x", "title": "" }, { "docid": "42a84b65f8f254e18b2329d413059a4b", "score": "0.5988954", "text": "def _initRandom(self, constraints = DEFAULT_CONSTRAINTS):\n\t\tself.v = [[0.0 for r in range(self.arch[\"shape\"][i])] for i in range(len(self.arch[\"shape\"]))]\n\t\tprint self.v", "title": "" }, { "docid": "8df07ca16a8d800ea34ac7cf88a405e1", "score": "0.5982783", "text": "def coord_calc(n, max_x, max_y, size, v_max):\n elems = []\n for i in range(n):\n angle = 2 * pi * random.random()\n elems.append({\n 'x': random.randint(100, max_x - 100),\n 'y': random.randint(100, max_y - 100),\n 'r': size,\n 'vx': int(v_max * cos(angle)),\n 'vy': int(v_max * sin(angle)),\n 'color': random.choice(list(COLORS.values())[:-1]), # removing BLACK color\n })\n return elems", "title": "" }, { "docid": "84e00c588c39bee457f4e6aa1f2e9d5a", "score": "0.59618956", "text": "def makeDataValues(size=100, min=0., max=1., random=-1):\n datavalues = np.arange(0, size, dtype='float')\n datavalues *= (float(max) - float(min)) / (datavalues.max() - datavalues.min())\n datavalues += min\n if random > 0:\n rng = np.random.RandomState(random)\n randorder = rng.rand(size)\n randind = np.argsort(randorder)\n datavalues = datavalues[randind]\n datavalues = np.array(list(zip(datavalues)), dtype=[('testdata', 'float')])\n return datavalues", "title": "" }, { "docid": "96aa4ea970f1059596f8b5d58d0d2b3b", "score": "0.5955264", "text": "def individual(length, minimum, maximum):\n return [np.random.uniform(minimum, maximum) for x in xrange(length)]", "title": "" }, { "docid": "b627cb08fe46810efbccc3ea71fb7168", "score": "0.59485555", "text": "def random_vector(tbl_name, dim, conn):\n import random\n clear_table(tbl_name, conn)\n cur = conn.cursor()\n # this is slow, I know\n for i in range(dim):\n cur.execute(\"insert into %s values (%s, 0, %s)\" % (tbl_name, i, random.uniform(0, 1)))\n conn.commit()", "title": "" }, { "docid": "efdb3206f9bc248d5ee72d74c8643636", "score": "0.59480405", "text": "def createOneHotVec(size, indx = -1):\n\tvec = [0] * size\n\ts = random.randint(0, size - 1) if indx < 0 else indx\n\tvec[s] = 1\n\treturn vec", "title": "" }, { "docid": "4f7bb9c96a609ea0650f69a3124ed00c", "score": "0.5945593", "text": "def getRandom(self) -> int:\n return random.choice(self.vector)", "title": "" }, { "docid": "d4be365955cf2431b028aaeaee5d1904", "score": "0.5928089", "text": "def generate(self, n):", "title": "" }, { "docid": "1c8fd61a8ecb8789bb93fcc0aedf1574", "score": "0.59012926", "text": "def create_distribution_vector(n, m, distribution='uniform', exponent=-1):\n if distribution == 'uniform': # e.g., n=10, m=23: dist = [3, 3, 3, 2, 2, 2, 2, 2, 2, 2]\n d = m // n\n mod = m % n\n dist = [d+1] * mod, [d] * (n-mod)\n\n elif distribution == 'triangle':\n def triangle(x, slope):\n return int(ceil((x+pi/4)*slope-pi/pi/1e6))\n # Explanation 1: pi as irrational number for pivot of slope is chosen to make sure that there is\n # always some slope from that point with a total number of points below (thus, only one point is added\n # for an infinitesimal small increase of slope)\n # Explanation 2: pivot has vertical point to allow 0 edges for some nodes (small to only use if necessary)\n\n def sum_difference(k): # given slope k, what is the difference to m currently\n s = m\n for i in range (n):\n s -= triangle(i,k)\n return s\n\n k0 = 2.*m/n**2\n slope = optimize.bisect(sum_difference, 0, 2*k0, xtol=1e-20, maxiter=500)\n # Explanation: find the correct slope so that exactly m points are below the separating line\n # http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.bisect.html#scipy.optimize.bisect\n dist = []\n for i in range (n-1, -1, -1): # counting backwards to start with biggest values first\n dist.append(triangle(i,slope))\n\n elif distribution == 'powerlaw':\n exponent -= pi/100000 # make sure that the exponent is irrational, allows to always find a solution\n\n\n def sum_difference(top):\n return m-np.sum(np.ceil(top * (np.arange(1, n+1) ** exponent) - pi/1e6))\n # Explanation: -pi/100000 allows 0 if m<n (as for triangle)\n\n def powerlaw_vec(top):\n return np.ceil(top * (np.arange(1, n + 1) ** exponent) - pi / 1e6).astype(int)\n\n integral = 1. / (exponent + 1) * (n**(exponent+1)-1)\n a0 = 1.*m/integral # upper bound\n top = optimize.bisect(sum_difference, 0, a0, xtol=1e-20, maxiter=500)\n\n dist = powerlaw_vec(top)\n else:\n raise Exception(\"You specified a non-existing method\")\n\n dist = np.hstack(dist).astype(int) # creates numpy array (plus flattens the array, necessary for triangular)\n assert dist.sum() == m\n return dist", "title": "" }, { "docid": "e178528765fe15a6a9a4844514c46ed0", "score": "0.58896476", "text": "def randomVector(self):\r\n random = self.agent.random.random\r\n x, y = random() - 0.5, random() - 0.5\r\n d = (x * x + y * y) ** 0.5 / 0.1\r\n # assuming that d > 0.0\r\n return Double2D(x / d, y / d)", "title": "" }, { "docid": "160ae44f8c686112790586c3874fdb12", "score": "0.587725", "text": "def initialize(V,rank):\n shape = V.shape \n Winit = abs(random.uniform(low = 0, high = 1,size = (shape[0],rank)))\n #Hinit = random.random(size = (rank,shape[1]))\n Hinit = abs(random.uniform(low = 0, high = 1,size = (rank,shape[1])))\n \n return Winit, Hinit", "title": "" }, { "docid": "95ac5416d2163235ebacb4596b17a1ef", "score": "0.58736116", "text": "def randomize_uniform(min_value: float, max_value: float):\n return (np.random.random() * (max_value-min_value)) + min_value", "title": "" }, { "docid": "3d8c46108e4b2887cb6507fce50de5cb", "score": "0.5861744", "text": "def UniRand(size = 1):\n return np.random.random(size)", "title": "" }, { "docid": "939465431e444d0ce006b181a0e5677c", "score": "0.5858728", "text": "def gen_random_uniform(random, args):\n bounds = args.get(\"gen_kwargs\")[\"bounds\"]\n # Random uniform. For b we could use N(mean, var) (clipped above zero) and\n # could even randomize mean and var.\n params = [random.uniform(l, h) for l, h in zip(bounds[:, 0], bounds[:, 1])]\n return params", "title": "" }, { "docid": "75777304c76416691a1921b7d3d26aa4", "score": "0.5847074", "text": "def randtour(rs, n, D):\r\n rand_r_nodes = list(np.random.choice(rs, size=n))\r\n return rand_r_nodes, length(rand_r_nodes, D)", "title": "" }, { "docid": "5eaa4a6784d69e4ce59396af330495d0", "score": "0.5841101", "text": "def random_init(n, max_norm):\n\n X = np.zeros((n, 2))\n angles = np.random.rand(n) * 2 * np.pi\n norms = np.random.rand(n) * max_norm\n for i, angle, norm in zip(range(n), angles, norms):\n X[i] = np.array([np.cos(angle), np.sin(angle)]) * norm\n return X", "title": "" }, { "docid": "d1190fc9e7ee77b7fdc17533edb1dce8", "score": "0.5834686", "text": "def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n self.evaluation = -inf\n\n self.x = np.random.uniform(lower_bound, upper_bound)\n self.best_position = self.x\n self.best_evaluation = -inf\n delta = upper_bound - lower_bound\n self.v = np.random.uniform(-delta, delta)", "title": "" }, { "docid": "942e0d42decc27ea87f36286945ae6a0", "score": "0.5822349", "text": "def uniformRandomSum(s, n):\n boundaries = random.sample(range(s), n-1)\n boundaries.sort()\n boundariesMaxes = list(boundaries) + [s]\n boundariesMins = [0] + list(boundaries)\n res = [boundaryMax-boundaryMin for boundaryMax, boundaryMin in zip(boundariesMaxes, boundariesMins)]\n return(np.array(res))", "title": "" }, { "docid": "4faee722045830d195cf0ced4de0ea1a", "score": "0.5811319", "text": "def _CreateRandomValues(self, num_values=100):\n return [int(random.uniform(0, 1 << 20)) for i in xrange(num_values)]", "title": "" }, { "docid": "7ac69a1c2b8210072b188ef11b5aa5d9", "score": "0.5808305", "text": "def m_init_uniform(pos):\n return (1, 1, 1)", "title": "" }, { "docid": "9456cdada251b8edf98d1c3105be9033", "score": "0.5794625", "text": "def normal2uniform(rnd):\r\n z = np.zeros(rnd.shape)\r\n for i, r in enumerate(rnd):\r\n z[i,:] = 0.5 * special.erfc(-r / np.sqrt(2))\r\n \r\n return z", "title": "" }, { "docid": "4d4561752310766ba6a4f58d472359fe", "score": "0.5789707", "text": "def create_individual(listLength, minValue, maxValue):\n return [random.randint(minValue, maxValue) for x in range(listLength)]", "title": "" }, { "docid": "2feab6bede82d6ed6a46a3137fe990e0", "score": "0.5787642", "text": "def generate_indexes(num):\n places = []\n for i in range(WIDTH):\n for j in range(HEIGHT):\n places.append((i, j))\n\n xy = random.sample(places, num)\n return xy", "title": "" }, { "docid": "25192beacc5e1b870c250e396e16dd40", "score": "0.57806224", "text": "def makeVector(n, f):\n arr = []\n for i in range(n):\n arr.append(f())\n v = Vector(data=arr)\n return v", "title": "" }, { "docid": "f37ea24237a54084016372b21d73a48d", "score": "0.57609004", "text": "def random_int_array(self, minimum: int, maximum: int, shape: Tuple) -> Tuple:\n pass", "title": "" }, { "docid": "bd01331acc1c5c5e8edbaaca5405d208", "score": "0.5759394", "text": "def random_uniform(n_samples=1, bound=5.):\n size = (2,) if n_samples == 1 else (n_samples, 2)\n return bound * gs.random.rand(*size)", "title": "" }, { "docid": "3fe967c36f5f8a2a464919140085e86d", "score": "0.5742766", "text": "def rand(low, high):\n return np.random.uniform(low, high)", "title": "" }, { "docid": "d2a1b3c99e373b83dd5e2e7bebf884fc", "score": "0.5741822", "text": "def generate_point_list(n, min_val, max_val):\n \n p = []\n \n for i in range(n):\n \n # coordinates as integer values\n p.append((random.randint(min_val,max_val),\n random.randint(min_val,max_val)))\n \n # coordinates as float values\n #p.append((np.random.normal(random.randint(min_val,max_val), scale=0.5),\n # np.random.normal(random.randint(min_val,max_val), scale=0.5)))\n \n return p", "title": "" }, { "docid": "0eee1323d6f496e7ed64421f7a096bf2", "score": "0.5738997", "text": "def uniform_vector(shape, min_value=0, max_value=1, return_symbol=False):\n return mx.sym.random_uniform(low=min_value, high=max_value, shape=shape) if return_symbol \\\n else np.random.uniform(low=min_value, high=max_value, size=shape)", "title": "" }, { "docid": "4675410f5f48682c65038513b31c2acb", "score": "0.5734081", "text": "def random_point(self):\n return np.array([np.random.uniform(0.0, k) for k in self.size])", "title": "" }, { "docid": "8c4f8b3a36edc062e5673caf14731af3", "score": "0.5734049", "text": "def random(size, iscomplex):\n out = rng.standard_normal(size)\n if iscomplex:\n out = out + 1j * rng.standard_normal(size)\n return out", "title": "" }, { "docid": "d8c04931d4806336c118db809857f8d0", "score": "0.57332224", "text": "def makeRandomMatrix(lines, columns, min = 0.0, max = 1.0):\n matrix = []\n for l in range(lines):\n matrix.append(makeRandomVector(columns, min, max))\n return tuple(matrix)", "title": "" }, { "docid": "324d5d699067a53bac0cecd394a49b11", "score": "0.5727442", "text": "def construct_random_initial(self):\n channel_u = np.random.random([self._shape[-3], self._bound_u]).flatten()\n channel_v = np.random.random([self._shape[-3], self._bound_u, self._bound_v]).flatten()\n return np.concatenate([channel_u, channel_v], axis=0)", "title": "" }, { "docid": "520da0f0eca8eaf3a96b6d1ea615b0c9", "score": "0.5715816", "text": "def randomReal(size):\n sample = np.random.random_sample(size)-.5\n return sample/np.linalg.norm(sample)", "title": "" }, { "docid": "714084593ddfd326332dc99bb281c8ce", "score": "0.571363", "text": "def randVertex(a, b, c, d, Verts):\n i = random.randint(1,2)\n A, B, C, D = 0, 0, 0, 0\n if(a==1):\n A, B, C, D = a, b, c, d\n else:\n A, B, C, D = a, d, c, b\n \n i = randnum(0.1, 0.9)\n \n\n vecAB=Verts[B]-Verts[A]\n E=Verts[A]+vecAB*i\n \n vecDC=Verts[C]-Verts[D]\n F=Verts[D]+vecDC*i\n \n i = randnum(0.1, 0.9)\n vecEF=F-E\n \n O=E+vecEF*i\n return O", "title": "" }, { "docid": "ab1b54603f79a7cd1ec22b1e32889276", "score": "0.5713045", "text": "def makeDataValues(size=100, min=0., max=1., nd=3, random=-1):\n data = []\n for d in range(nd):\n datavalues = np.arange(0, size, dtype='float')\n datavalues *= (float(max) - float(min)) / (datavalues.max() - datavalues.min())\n datavalues += min\n if random > 0:\n rng = np.random.RandomState(random)\n randorder = rng.rand(size)\n randind = np.argsort(randorder)\n datavalues = datavalues[randind]\n datavalues = np.array(list(zip(datavalues)), dtype=[('testdata' + '%d' % (d), 'float')])\n data.append(datavalues)\n data = rfn.merge_arrays(data, flatten=True, usemask=False)\n return data", "title": "" }, { "docid": "272a6c3379e4b724cce1c2bf147ba72c", "score": "0.57082736", "text": "def generate_random_matrix(m, n, max_val):\n matrix = []\n for i in range(n):\n \trow = []\n \tfor j in range(m):\n \t\trow.append(rd.randint(0, max_val))\n \tmatrix.append(row)\n return matrix", "title": "" }, { "docid": "d46e837e95a9a6defc9ee45f46b18cb9", "score": "0.57003456", "text": "def random_point(self, n_samples=1, bound=1.0):", "title": "" }, { "docid": "90fb95c5d50563403006deee8540875c", "score": "0.56933635", "text": "def generate_mat(n, seq):\n seq_red = uniq_pivots(seq)\n pivots = gen_pivots(seq_red)\n # generate uniq vector\n res = []\n for k, pos in enumerate(seq_red):\n vect = []\n for i in range(n):\n if (i+1 < pos):\n vect.append(0)\n elif (i+1 == pos):\n vect.append(pivots[k])\n else:\n vect.append(randint(-9,9))\n res.append(vect)\n mat = []\n k = 0\n for p in seq:\n while p > seq_red[k]:\n k += 1\n mat.append(copy(res[k]))\n return mat", "title": "" }, { "docid": "a667ed0f87f1b03a908ac2869d2e6475", "score": "0.56902486", "text": "def rand_arr(size):\n return np.random.normal(size=(size,size))", "title": "" }, { "docid": "9d980696b958853cf62b897b9e45871d", "score": "0.568659", "text": "def generate_random(self, num_points=(5, 25), scale=(10, 15), pos=((0, 0), (0, 0))):\n # Creates a ConvexHull of a random set of points\n random_points = numpy.random.rand(random.randrange(*num_points), 2)\n hull = spatial.ConvexHull(random_points)\n coords = hull.points[hull.vertices]\n # Scales the coords of the convex hull\n coords = coords * random.uniform(*scale)\n # Random position of the coords\n coords = coords + [random.uniform(*yx) for yx in zip(*pos)]\n return coords", "title": "" }, { "docid": "735a2da930748da59a840304f799351e", "score": "0.5676005", "text": "def random_pert():\n v = np.random.randn(6)\n return v / np.linalg.norm(v)", "title": "" }, { "docid": "637d742134bce03cc8646dbeb64ed9a1", "score": "0.5667486", "text": "def draw_samples_random(self, size=None):\n return np.random.uniform(self.lower, self.upper, size)", "title": "" }, { "docid": "7a4fec36903410dce8fbcca61833b6f3", "score": "0.566257", "text": "def generate_random_solution(self):\n pass", "title": "" }, { "docid": "2c5a4b532faf4eca067ef67ecb7552f5", "score": "0.5661589", "text": "def random():\n return _bigrand() / _bigrand_max", "title": "" }, { "docid": "30f3d23bdaebc27a4b5800f34f1d22a2", "score": "0.56593466", "text": "def random_grid_point(self, spacing):\n\n vector = []\n for length in self.size:\n\n if length < spacing:\n raise ex.RandomiseFailed('Not enough space in the box')\n\n value = np.random.choice(np.arange(0, length, spacing))\n vector.append(value)\n\n return np.array(vector)", "title": "" }, { "docid": "1ee323110771bda5033153b647537553", "score": "0.56453395", "text": "def generate_random_input(size, elements_range):\n in_arr = []\n for _ in range(size):\n in_arr.append(random.randint(0, elements_range))\n return in_arr", "title": "" }, { "docid": "96fd45cb5ce52bd47cd8d12363ca09e1", "score": "0.56447595", "text": "def _sedov_vector_temp(cls, x):\r\n\r\n lower_length = x[np.where(x < 0.4)].size\r\n upper = x[np.where(x >= 0.4)]\r\n return np.concatenate([np.full(lower_length, 0.4 ** -4.32), upper ** -4.32])", "title": "" }, { "docid": "dcc1d7375f9f1b3a68d32a444e9b54a7", "score": "0.5641124", "text": "async def random_unit_vector(sectype, n):\n await runtime.returnType((sectype, True), n)\n if n == 1:\n return [sectype(1)]\n\n b = n-1\n k = b.bit_length()\n x = runtime.random_bits(sectype, k)\n i = k-1\n u = [x[i], 1 - x[i]]\n while i:\n i -= 1\n if (b >> i) & 1:\n v = runtime.scalar_mul(x[i], u)\n v.extend(runtime.vector_sub(u, v))\n u = v\n elif await runtime.output(u[0] * x[i]): # TODO: mul_public\n # restart, keeping unused secret random bits x[:i]\n x[i:] = runtime.random_bits(sectype, k - i)\n i = k-1\n u = [x[i], 1 - x[i]]\n else:\n v = runtime.scalar_mul(x[i], u[1:])\n v.extend(runtime.vector_sub(u[1:], v))\n u[1:] = v\n return u", "title": "" }, { "docid": "408b5491a71b8c2680ff151f43636dc7", "score": "0.563639", "text": "def random_embedding_vector(embedding_dim, scale=0.6):\n return np.random.normal(scale=scale, size=(embedding_dim,))", "title": "" }, { "docid": "7b8710c3e18eeddf6672671dabfea00c", "score": "0.5627697", "text": "def generate_data(start, end, size):\n return (end - start) * numpy.random.random_sample(size) + start", "title": "" }, { "docid": "1c88837e31a2f23ea2267e257703def1", "score": "0.56251204", "text": "def gen_center():\n v = [random.choice(C),random.choice(C),random.choice(C)]\n return v", "title": "" }, { "docid": "7a9970d613d5e3794e8037fd2ebaeb5d", "score": "0.56235284", "text": "def set_indices(self):\n # In case I wanted a variable blending method\n #indices = random.sample(range(self.vec_length), self.analyzer.num_selections)\n #self.indices = random.sample(range(self.vec_length), self.vec_length/2)\n # I can select/determine a random sequence, and keep it for the iteration\n self.indices = np.arange(start=0, stop=self.vec_length, step=2)\n self.indices = torch.tensor(self.indices).cuda().long()", "title": "" }, { "docid": "68fe50d26b8a66d9019930470f82203b", "score": "0.5616751", "text": "def _get_random_initialization_vector(cls, length=CRYPTO_BLOCK_SIZE):\n return ''.join(random.choice(VECTOR_SPACE) for i in xrange(length))", "title": "" }, { "docid": "78c6c1c2dc6ae7d0efc36624d7f36e5f", "score": "0.56030583", "text": "def get_random_index(self):\n return [np.random.randint(0, dim) for dim in self.size]", "title": "" }, { "docid": "ee4548b52406d06b450c1d330246ae45", "score": "0.55999666", "text": "def generate_random__unit_vector():\n\n theta = np.random.uniform(low=0.0, high=2 * np.pi)\n z = np.random.uniform(low=-1.0, high=1.0)\n u = np.array(\n [np.sqrt(1 - z**2) * np.cos(theta),\n np.sqrt(1 - z**2) * np.sin(theta), z])\n return (u)", "title": "" }, { "docid": "edadabf450e99030b3a3aed61d31fbee", "score": "0.559609", "text": "def inicializarW(self, vector_size):\n rgen = numpy.random.RandomState(self.random_state)\n self.w = rgen.normal(loc=0.0, scale=0.01, size=vector_size)", "title": "" }, { "docid": "bfb1b9dd795224468625de41a13c1e6d", "score": "0.55914044", "text": "def generate(h, seed_index, n):\r\n x = np.zeros((vocabulary_size, 1))\r\n x[seed_index] = 1\r\n indexs = []\r\n for t in xrange(n):\r\n h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)\r\n y = np.dot(Why, h) + by\r\n p = np.exp(y) / np.sum(np.exp(y))\r\n index = np.random.choice(range(vocabulary_size), p=p.ravel())\r\n x = np.zeros((vocabulary_size, 1))\r\n x[index] = 1\r\n indexs.append(index)\r\n return indexs", "title": "" }, { "docid": "80517edfdc04c8af656a996b4998c23c", "score": "0.5590245", "text": "def generator_1dspatial(size, x_min, x_max, random=True):\n seg_len = (x_max-x_min) / size\n linspace_lo = x_min + seg_len*0.5\n linspace_hi = x_max - seg_len*0.5\n center = torch.linspace(linspace_lo, linspace_hi, size)\n noise_lo = -seg_len*0.5\n while True:\n if random:\n noise = seg_len*torch.rand(size) + noise_lo\n yield center + noise\n else:\n yield center", "title": "" }, { "docid": "6aa013ce961eee100176605999e587ca", "score": "0.5587947", "text": "def makeTimes(size=100, min=0., max=10., random=-1):\n datavalues = np.arange(0, size, dtype='float')\n datavalues *= (float(max) - float(min)) / (datavalues.max() - datavalues.min())\n datavalues += min\n if random > 0:\n rng = np.random.RandomState(random)\n randorder = rng.rand(size)\n randind = np.argsort(randorder)\n datavalues = datavalues[randind]\n datavalues = np.array(list(zip(datavalues)), dtype=[('times', 'float')])\n return datavalues", "title": "" }, { "docid": "3fa0f992518ae1f88c1a4254e075f35a", "score": "0.5581112", "text": "def sample_vector(dimensions: int, seed: int) -> List[float]:\n st = random.getstate()\n random.seed(seed)\n\n vec = []\n for _ in range(dimensions):\n vec.append(random.uniform(-1, 1)) # noqa: S311\n\n random.setstate(st)\n return vec", "title": "" }, { "docid": "37091530bb6aa71b4df64e6109cb295b", "score": "0.55795705", "text": "def v1(n = 100000):\n\n rand = random.random\n sqrt = math.sqrt\n sm = 0.0\n for i in xrange(n):\n sm += sqrt(1.0-rand()**2)\n return 4.0*sm/n", "title": "" }, { "docid": "2665714ec238f7f8b31d155716d1f544", "score": "0.55794835", "text": "def _get_random_index():\n indexes = tuple()\n for ft in self.feature_dimensions:\n rnd_ind = np.random.randint(0, len(ft.bins) - 1, 1)[0]\n indexes = indexes + (rnd_ind,)\n return indexes", "title": "" }, { "docid": "d6cd8a37dfd7c63af2592e295be8c9d5", "score": "0.55782056", "text": "def _make_values_(self):\n\n self.GRV = self.minimum[0] + self._random_()*(self.maximum[0]-self.minimum[0])\n self.NTG = self.minimum[1] + self._random_()*(self.maximum[1]-self.minimum[1])\n self.fi = self.minimum[2] + self._random_()*(self.maximum[2]-self.minimum[2])\n self.Sw = self.minimum[3] + self._random_()*(self.maximum[3]-self.minimum[3])\n self.Bo = self.minimum[4] + self._random_()*(self.maximum[4]-self.minimum[4])", "title": "" }, { "docid": "0679426ad40632f7d9af03af545f4803", "score": "0.55764496", "text": "def random_vector(environment, num_elements, rng=random):\n dataset = environment.generate_sequence(1, num_elements) \\\n .set_parallelism(1)\n dataset = dataset.map(lambda x: (x, rng.random())).set_parallelism(1)\n return dataset", "title": "" }, { "docid": "d8c918a8240b38c53e3598608c7b9654", "score": "0.557237", "text": "def _random_(self, iteration = 1000000):\n return np.random.uniform(0,1,iteration)", "title": "" }, { "docid": "a0600c17c27346bc239c53d65326b23c", "score": "0.5564057", "text": "def _build_partition_vector(n, n_parts, size_bubbles, mu=0.03):\n # initialize all fractions to 0\n result = np.zeros(n)\n # generate n_parts random positions from all available positions\n positions = np.random.choice(np.arange(n), n_parts, replace=False)\n\n bubbles = np.sort(np.random.choice(np.arange(1, size_bubbles), n_parts - 1, replace=False)) / size_bubbles\n _result = np.diff(bubbles, prepend=0, append=1)\n\n # set fractions at selected positions to successive difference between bubbles\n result[positions] = _result\n # for fractions that are within tol of mu, clamp them up to mu\n result[(mu - self.tol <= result) & (result < mu)] = mu\n\n return result", "title": "" }, { "docid": "f1bcd821f2ad302269f0279e34450f82", "score": "0.5562953", "text": "def rand_floats(n, m):\n return rand(n, m)\n #return n * 2 * random.random() + m", "title": "" }, { "docid": "3d3b1bb5d6522558b887ae965c531a12", "score": "0.5562697", "text": "def generate_feature_vector(length, func, features):\n # print(len(features), length)\n feature_vector = [0] * length\n\n for i in range(len(features)):\n pos = int(func(*features[i]))\n feature_vector[pos] += (1/len(features))\n\n # print(feature_vector)\n return feature_vector", "title": "" }, { "docid": "fbbf7c78e364001e325bf95088c4af5b", "score": "0.5549318", "text": "def random_unitary(size, cutoff):\n U = np.identity(cutoff, dtype=np.complex128)\n U[:size, :size] = random_interferometer(size)\n return U", "title": "" }, { "docid": "15d55233e43432eb4846fb340dcae9df", "score": "0.5533064", "text": "def _generate_uniform(\n data: np.ndarray,\n n_obs: int,\n a: float = 0.0,\n b: float = 1.0\n) -> np.ndarray:\n return np.random.uniform(a, b, (n_obs, data.ndim))", "title": "" }, { "docid": "ccc6cbe6a34241a062884569206b4b06", "score": "0.55264807", "text": "def random_point(n, low=-10, high=10):\n return tuple(np.random.uniform(low=low, high=high, size=(n)))", "title": "" }, { "docid": "aa2d104bc056f21f51f261b295267438", "score": "0.5525075", "text": "def selectindex( ):\n return int(np.log(np.random.rand())/np.log(pexp))", "title": "" }, { "docid": "2720c4c4abe22e0f81e0424ba5e16b92", "score": "0.5513888", "text": "def pos_ini(self):\n\n #x0 = ((rand(2) + self.centroide ) - array([0.5, 0.5]))*2*self.l\n x0 = ( rand(2) - array([0.5, 0.5]) )*2*self.l + self.centroide #+ self.centroide\n\n #x0 = rand(2)*2*self.l + self.centroide\n\n while True:\n\n L1 = [] # lista de vectores que van de los vertices al punto x0\n L2 = [] # proyecciones de vect de vert a x0 sobre normal correspondiente\n j = 0\n\n for i in range(self.n):\n a = x0 - self.vertices[i]\n L1.append(a)\n\n b = dot(a, self.normales[i])\n L2.append(b)\n\n L2 = array(L2)\n\n if all(L2 > 0):\n break\n\n x0 = ( rand(2) - array([0.5, 0.5]) )*2*self.l + self.centroide\n #plot(x0[0], x0[1], \"o\")\n\n return x0", "title": "" }, { "docid": "4bd06765c9958f1ed042680a666330ca", "score": "0.55129206", "text": "def array_generator(len):\n return [(int)(-100*random.random()+100) for e in range(len)]", "title": "" }, { "docid": "0a65741b9b696fd5eed2c33760144c36", "score": "0.55002636", "text": "def uniform_sampler(min_val, max_val):\n\n def sample(num_samples):\n return np.random.randint(min_val, max_val + 1, size=(num_samples,))\n\n max_length = max_val\n\n return sample, max_length", "title": "" }, { "docid": "8b01d30de26cf11b89a00435a6496db3", "score": "0.54948676", "text": "def getRandomUt(nb_vehicles, nb_tasks, ut_range):\n min_ut, max_ut = ut_range\n\n shapes = getUtilityShape(nb_vehicles,nb_tasks+1)\n\n utilities = np.random.randint(min_ut,max_ut,shapes)\n\n alloc_dim = np.zeros([nb_tasks+1]*(nb_vehicles))\n allAlloc = [x for x,_ in np.ndenumerate(alloc_dim)]\n\n for a in allAlloc:\n for i in range(utilities[a].size):\n if a[i] == 0:\n utilities[a][i] = 0\n\n return utilities", "title": "" }, { "docid": "8420950f23443c1b632740aedf1bab83", "score": "0.54948187", "text": "def create_random_bee(search_space):\r\n return {'vector' : random_vector(search_space)}", "title": "" }, { "docid": "2a21326dd95321111d365bb805f554f5", "score": "0.54947853", "text": "def build_BM(n,sqrt_dt):\r\n W=np.zeros(n+1)\r\n W_ind=np.random.normal(0,sqrt_dt,n)\r\n for k in range(n):\r\n W[k+1]=W[k]+W_ind[k]\r\n return W", "title": "" } ]
a06723196544353c4f3557cc9541c3d1
`without_role_check` returns `False` if `Context.author` has unwanted role.
[ { "docid": "c75b617f8fa485e3043cce60070c8a5d", "score": "0.7814249", "text": "def test_without_role_check_returns_false_with_unwanted_role(self):\n role_id = 42\n self.ctx.author.roles.append(MockRole(id=role_id))\n self.assertFalse(checks.without_role_check(self.ctx, role_id))", "title": "" } ]
[ { "docid": "00cd2fe15e15fb6a12b5a5d2315858b8", "score": "0.7967805", "text": "def test_with_role_check_without_required_roles(self):\n self.ctx.author.roles = []\n self.assertFalse(checks.with_role_check(self.ctx))", "title": "" }, { "docid": "13990780036da3b2db8f1e73dc9e80d5", "score": "0.74591565", "text": "def test_without_role_check_returns_true_without_unwanted_role(self):\n role_id = 42\n self.ctx.author.roles.append(MockRole(id=role_id))\n self.assertTrue(checks.without_role_check(self.ctx, role_id + 10))", "title": "" }, { "docid": "5a8de711c3d920cab9a3f29b742c4533", "score": "0.7119366", "text": "def test_with_role_check_without_guild(self):\n self.ctx.guild = None\n self.assertFalse(checks.with_role_check(self.ctx))", "title": "" }, { "docid": "27db7cc671ed06ef472b2f5aaf62b0f1", "score": "0.6994414", "text": "def test_without_role_check_without_guild(self):\n self.ctx.guild = None\n self.assertFalse(checks.without_role_check(self.ctx))", "title": "" }, { "docid": "f53914ddbae4277adc38041a53a39ae5", "score": "0.6500096", "text": "def check_role(role):\n\n if role.upper() not in config.roles:\n return False\n\n return True", "title": "" }, { "docid": "ad0d8a84b559a5f49eec34985a054653", "score": "0.6372471", "text": "async def require_roles(ctx: Context, roles: Sequence[Role]) -> bool:\n\n if is_in_any_role(ctx.author, roles):\n return True\n\n await react_if_not_help(ctx)\n\n return False", "title": "" }, { "docid": "1d77d86c6f4cb79a057a195382272e1f", "score": "0.63189214", "text": "def has_role(self, role):\n return self.current_user.has_role(role)", "title": "" }, { "docid": "f7fae621404e9d6a9681eb02d0af92be", "score": "0.62527186", "text": "def testForNoRole(self):\n result = profile_logic.isNoRoleEligibleForOrg(self.profile, self.org.key())\n self.assertTrue(result)", "title": "" }, { "docid": "5fd5e14d2d9e07e62b21442d860a14d1", "score": "0.6148728", "text": "def _check_role(self):\n log.debug(\"Checking for roles\")\n log.debug(self.lti_kwargs)\n role = u'any'\n if 'role' in self.lti_kwargs:\n role = self.lti_kwargs['role']\n log.debug(\n \"check_role lti_role=%s decorator_role=%s\", self.role, role\n )\n if not (role == u'any' or self.is_role(role)):\n raise LTIRoleException('Not authorized.')", "title": "" }, { "docid": "a8ddc6cf28f77c14e4aa0f0b9072fa62", "score": "0.60736716", "text": "def test_with_role_check_with_guild_and_required_role(self):\n self.ctx.author.roles.append(MockRole(id=10))\n self.assertTrue(checks.with_role_check(self.ctx, 10))", "title": "" }, { "docid": "6fde6b7758c4c4344607c738b02ec4fa", "score": "0.6067208", "text": "def hasNoGlobalRoles(self):\n return self.id != 'admin' and len(self.getUserRoles()) == 0", "title": "" }, { "docid": "9543ffcef7cc42a2832f5c17f15d8cb6", "score": "0.6039772", "text": "def role_required(role):\n def role_required_decorator(func):\n @wraps(func)\n def role_required_wrapper(*args, **kwargs):\n metadata = get_metadata(current_user)\n roles = (metadata.get('roles', []) or []) if metadata else []\n if current_user.is_authenticated and role in roles:\n return func(*args, **kwargs)\n raise Forbidden\n return role_required_wrapper\n return role_required_decorator", "title": "" }, { "docid": "97ed516f8058fea96be12e785309574f", "score": "0.5988119", "text": "def ok_role(token: str) -> bool:\n\n return Role.get(token) is not None", "title": "" }, { "docid": "8b24bb72ccc0ac7e68d701081b93c432", "score": "0.598717", "text": "def has_role(self, role):\n return role == self.rolename", "title": "" }, { "docid": "87baa3b0e39610b0bcf7363a0fa3f28d", "score": "0.5912369", "text": "def has_democraciv_role(role: mk.DemocracivRole):\n\n def predicate(ctx):\n if not isinstance(ctx.channel, discord.abc.GuildChannel):\n raise commands.NoPrivateMessage()\n\n if config.DEMOCRACIV_GUILD_ID != ctx.guild.id:\n raise exceptions.NotDemocracivGuildError()\n\n found = discord.utils.get(ctx.author.roles, id=role.value)\n\n if found is None:\n raise commands.MissingRole(role.printable_name)\n\n return True\n\n return commands.check(predicate)", "title": "" }, { "docid": "35820adb63ef32a635b06daebe811541", "score": "0.5906565", "text": "def _validate_role_against_whitelist(self, role: Role) -> bool:\n # Check the whitelist to make sure we are allowed to add this role\n if role.name not in self.config[\"content\"][\"role_whitelist\"]:\n return False\n return True", "title": "" }, { "docid": "496b8d6a6fdd9ab0593bd8fbbd972a75", "score": "0.5856141", "text": "def userRequestedRole(self):\n return self.user_role == ROLE", "title": "" }, { "docid": "b0828b2dbcfb02db61a44f7c57f154e6", "score": "0.5847325", "text": "def is_admin(ctx, admin_role_name):\n return admin_role_name.lower() in [role.name.lower() for role in ctx.author.roles]", "title": "" }, { "docid": "3bbe170d0fd36eaf925a6d2d3e338b7a", "score": "0.5834166", "text": "def hasNoGlobalRoles(self):\n return False", "title": "" }, { "docid": "a0184d26d21ccab8d0cdf10024ad6a1d", "score": "0.5809232", "text": "def _role(self, omx_role=None):\n if omx_role is None:\n raise Exception(\"No 'omx_role' to check!\")\n elif omx_role in self.roles():\n return True\n else:\n return False", "title": "" }, { "docid": "018777fb175febf7ec502ecd418a7cf1", "score": "0.5791455", "text": "def admin_only():\n def predicate(ctx):\n return str(ctx.author) in ADMINS\n return commands.check(predicate)", "title": "" }, { "docid": "aa70f2b6ec2e652818669bcf098a1879", "score": "0.57905227", "text": "async def cog_check(self, ctx: Context) -> bool:\n return await commands.has_any_role(*constants.MODERATION_ROLES).predicate(ctx)", "title": "" }, { "docid": "0517b7ed64c02c681ac96756276b917b", "score": "0.578054", "text": "def check_self_role(ctx: commands.Context, role_id: int) -> bool:\n\n return any(role.id == role_id for role in ctx.author.roles)", "title": "" }, { "docid": "1f81e7a66368baa381c35ebf6880a4d8", "score": "0.576306", "text": "def is_suitable_role(self, role):\n return role['role']['name'].startswith('Employee Cent')", "title": "" }, { "docid": "047904ba83dd849fe068773f87fef12c", "score": "0.57535404", "text": "def simple_role_check(*roles):\n def wrapper(func):\n \"Wrap view in role check\"\n @wraps(func)\n def decorated_view(*args, **kwargs):\n \"\"\"Decorate view with simple role check\n \"\"\"\n if not roles:\n raise ValueError('Refusing to fake role check with no roles')\n user_roles = getattr(current_user, 'roles', [])\n if not set(user_roles).intersection(roles):\n abort(403, description=(\n 'User %s (roles=%s) lacks accpetable role: %s' % (\n getattr(current_user, 'username', getattr(\n current_user, 'id', 'unknown')),\n str(user_roles), str(roles))))\n return func(*args, **kwargs)\n return decorated_view\n return wrapper", "title": "" }, { "docid": "d56d48f14c496fc670d07d70973f2e71", "score": "0.57208836", "text": "async def remrole(self, ctx, *, role = None):\r\n\t\tblock_list = self.settings.getServerStat(ctx.guild, \"UserRoleBlock\")\r\n\t\tif ctx.author.id in block_list:\r\n\t\t\tawait ctx.send(\"You are currently blocked from using this command.\")\r\n\t\t\treturn\r\n\r\n\t\tif role == None:\r\n\t\t\tawait ctx.send(\"Usage: `{}remrole [role name]`\".format(ctx.prefix))\r\n\t\t\treturn\r\n\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tif self.settings.getServerStat(server, \"OnlyOneUserRole\"):\r\n\t\t\tawait ctx.invoke(self.setrole, role=None)\r\n\t\t\treturn\r\n\r\n\t\t# Check if we're suppressing @here and @everyone mentions\r\n\t\tif self.settings.getServerStat(server, \"SuppressMentions\"):\r\n\t\t\tsuppress = True\r\n\t\telse:\r\n\t\t\tsuppress = False\r\n\t\t\r\n\t\t# Get the array\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\t# Check if role is real\r\n\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\tif not roleCheck:\r\n\t\t\t# No luck...\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\t\t\r\n\t\t# Got a role - set it\r\n\t\trole = roleCheck\r\n\r\n\t\tremRole = []\r\n\t\tfor arole in promoArray:\r\n\t\t\troleTest = DisplayName.roleForID(arole['ID'], server)\r\n\t\t\tif not roleTest:\r\n\t\t\t\t# Not a real role - skip\r\n\t\t\t\tcontinue\r\n\t\t\tif str(arole['ID']) == str(role.id):\r\n\t\t\t\t# We found it!\r\n\t\t\t\tif roleTest in ctx.author.roles:\r\n\t\t\t\t\t# We have it\r\n\t\t\t\t\tremRole.append(roleTest)\r\n\t\t\t\telse:\r\n\t\t\t\t\t# We don't have it...\r\n\t\t\t\t\tawait ctx.send(\"You don't currently have that role.\")\r\n\t\t\t\t\treturn\r\n\t\t\t\tbreak\r\n\r\n\t\tif not len(remRole):\r\n\t\t\t# We didn't find that role\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role.name), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif len(remRole):\r\n\t\t\tself.settings.role.rem_roles(ctx.author, remRole)\r\n\r\n\t\tmsg = '*{}* has been removed from **{}!**'.format(DisplayName.name(ctx.message.author), Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)", "title": "" }, { "docid": "06b376f34615e60f7b3de84ba4387160", "score": "0.572087", "text": "def has_role(self, role) -> bool:\n auth = flask.request.authorization\n route_uri = '/authz/users/{user}/{role}'.format(user=auth.username, role=role)\n route_headers = {\n 'Accept': 'application/json'\n }\n\n resp = requests.request(\n method='GET', \n url='{root}{route}'.format(root=self.root_uri, route=route_uri),\n headers=route_headers, \n auth=(self.service_user, self.service_password)\n )\n\n if resp.status_code == requests.codes.ok: # pylint: disable=no-member\n # Dictionary will be empty if the user does not have that role - empty dicts are falsy\n if resp.json():\n return True\n\n return False", "title": "" }, { "docid": "c236251e0e02b1aab810a12f3d6511ee", "score": "0.57171214", "text": "def is_author(self, user):\n return self.user == user", "title": "" }, { "docid": "c236251e0e02b1aab810a12f3d6511ee", "score": "0.57171214", "text": "def is_author(self, user):\n return self.user == user", "title": "" }, { "docid": "64f14cddd3d56ad7c1c7fdf10376a4a7", "score": "0.569854", "text": "def should_ignore(self, msg):\n\n # Check if the author is a bot / system message\n if msg.author.bot or msg.type != discord.MessageType.default:\n logger.info('Author is a bot, ignoring')\n return True\n\n if len(msg.content) == 0:\n logger.info('Message has no text, ignoring')\n return True\n\n if is_command(msg.content):\n logger.info('Message appears to be a bot command, ignoring')\n return True\n\n return False", "title": "" }, { "docid": "7e8fa4af9d0e5bd96336025a29a29678", "score": "0.56785756", "text": "def exists(self, role: str) -> bool:\n return role in self.oncreate_for", "title": "" }, { "docid": "6406fb27103eb3063b9d04eb1265886e", "score": "0.5649002", "text": "def has_role(self, role):\n return role in (role.name for role in self.roles)", "title": "" }, { "docid": "242357ca511d3e1c248ce7242df3c82c", "score": "0.56437784", "text": "def publisher_and_admin(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n if author or user_role == 2:\n return fn(*args, **kwargs)\n else:\n message = {\"msg\":\"This feature is available to only the admin and the individual who created it!\"}\n response = Response(json.dumps(message), status=403)\n return response\n return wrapper", "title": "" }, { "docid": "2d5baef94541b07266382a310c933df9", "score": "0.56165695", "text": "def orgOfferedMentorRole(self):\n return self.org_role == MENTOR_ROLE", "title": "" }, { "docid": "508dfaa90cd99bd686c77f184c69b249", "score": "0.56135726", "text": "def author_required(card):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.current_user.kname != card.data.get('author'):\n return forbidden('Not the author of the card.')\n return f(*args, **kwargs)\n return decorated_function\n return decorator", "title": "" }, { "docid": "f309a348dab0b3cd1101bb7c00e15a7c", "score": "0.5603542", "text": "async def setrole(self, ctx, *, role = None):\r\n\t\tblock_list = self.settings.getServerStat(ctx.guild, \"UserRoleBlock\")\r\n\t\tif ctx.author.id in block_list:\r\n\t\t\tawait ctx.send(\"You are currently blocked from using this command.\")\r\n\t\t\treturn\r\n\t\t\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tif not self.settings.getServerStat(server, \"OnlyOneUserRole\"):\r\n\t\t\tawait ctx.invoke(self.addrole, role=role)\r\n\t\t\treturn\r\n\r\n\t\t# Check if we're suppressing @here and @everyone mentions\r\n\t\tif self.settings.getServerStat(server, \"SuppressMentions\"):\r\n\t\t\tsuppress = True\r\n\t\telse:\r\n\t\t\tsuppress = False\r\n\t\t\r\n\t\t# Get the array\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\tif role == None:\r\n\t\t\t# Remove us from all roles\r\n\t\t\tremRole = []\r\n\t\t\tfor arole in promoArray:\r\n\t\t\t\troleTest = DisplayName.roleForID(arole['ID'], server)\r\n\t\t\t\tif not roleTest:\r\n\t\t\t\t\t# Not a real role - skip\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tif roleTest in ctx.message.author.roles:\r\n\t\t\t\t\t# We have this in our roles - remove it\r\n\t\t\t\t\tremRole.append(roleTest)\r\n\t\t\tif len(remRole):\r\n\t\t\t\tself.settings.role.rem_roles(ctx.author, remRole)\r\n\t\t\t# Give a quick status\r\n\t\t\tmsg = '*{}* has been moved out of all roles in the list!'.format(DisplayName.name(ctx.message.author))\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\t# Check if role is real\r\n\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\tif not roleCheck:\r\n\t\t\t# No luck...\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\t\t\r\n\t\t# Got a role - set it\r\n\t\trole = roleCheck\r\n\r\n\t\taddRole = []\r\n\t\tremRole = []\r\n\t\tfor arole in promoArray:\r\n\t\t\troleTest = DisplayName.roleForID(arole['ID'], server)\r\n\t\t\tif not roleTest:\r\n\t\t\t\t# Not a real role - skip\r\n\t\t\t\tcontinue\r\n\t\t\tif str(arole['ID']) == str(role.id):\r\n\t\t\t\t# We found it!\r\n\t\t\t\taddRole.append(roleTest)\r\n\t\t\telif roleTest in ctx.message.author.roles:\r\n\t\t\t\t# Not our intended role and we have this in our roles - remove it\r\n\t\t\t\tremRole.append(roleTest)\r\n\r\n\t\tif not len(addRole):\r\n\t\t\t# We didn't find that role\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role.name), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif len(remRole) or len(addRole):\r\n\t\t\tself.settings.role.change_roles(ctx.author, add_roles=addRole, rem_roles=remRole)\r\n\r\n\t\tmsg = '*{}* has been moved to **{}!**'.format(DisplayName.name(ctx.message.author), Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)", "title": "" }, { "docid": "a152faa0b408cd9f08755911355cf788", "score": "0.55912274", "text": "def has_teaparty_role():\n def predicate(ctx):\n return commands.check_any(commands.has_role(TEAPARTY_ROLE_NAME))\n return commands.check(predicate)", "title": "" }, { "docid": "ffa5853e1d419248b064d4dc8109c5f6", "score": "0.5563933", "text": "async def removeRole(args : List[Union[Guild, Role, int]], reactingUser : Union[User, Member] = None) -> bool:\n dcGuild = args[0]\n dcMember = dcGuild.get_member(reactingUser.id)\n role = args[1]\n msgID = args[2]\n\n if role in dcMember.roles:\n await dcMember.remove_roles(role, reason=\"User requested role toggle via BB reaction menu \" + str(msgID))\n return False", "title": "" }, { "docid": "15dd419b2eba499e9bc3c069ab4342e8", "score": "0.5562312", "text": "def is_administrator(ctx):\n return ctx.message.author.permissions_in(ctx.message.channel).administrator", "title": "" }, { "docid": "34c7bc99bc23838efe2fecb6df6b29cf", "score": "0.55352145", "text": "def test_forced_inequality(self):\n assert Role(\"foo\") != Role(\"bar\")", "title": "" }, { "docid": "f2739cf0ed5ae09077eb576de0ea9216", "score": "0.5531027", "text": "def is_staff():\n async def predicate(ctx):\n with open(path.join(path.dirname(__file__), 'permissions.json')) as f:\n permitted_roles = json.load(f)[__name__.split('.')[-1]]\n try:\n user_roles = [role.id for role in ctx.message.author.roles]\n except AttributeError:\n return False\n return any(role in permitted_roles for role in user_roles)\n return commands.check(predicate)", "title": "" }, { "docid": "15737f21272dd0ab98d849ca98a41553", "score": "0.55220103", "text": "def decorated_view(*args, **kwargs):\n if not roles:\n raise ValueError('Refusing to fake role check with no roles')\n user_roles = getattr(current_user, 'roles', [])\n if not set(user_roles).intersection(roles):\n abort(403, description=(\n 'User %s (roles=%s) lacks accpetable role: %s' % (\n getattr(current_user, 'username', getattr(\n current_user, 'id', 'unknown')),\n str(user_roles), str(roles))))\n return func(*args, **kwargs)", "title": "" }, { "docid": "9c7270c5e47e54a690d72fddff221bed", "score": "0.550964", "text": "def has(self, role: str, entity: str) -> bool:\n return self.db.grants.find_one({'role': role, 'entity': entity}) is not None", "title": "" }, { "docid": "3b88a6942e16503b82faec03b09a8bfb", "score": "0.55071825", "text": "async def addrole(self, ctx, *, role = None):\r\n\t\tblock_list = self.settings.getServerStat(ctx.guild, \"UserRoleBlock\")\r\n\t\tif ctx.author.id in block_list:\r\n\t\t\tawait ctx.send(\"You are currently blocked from using this command.\")\r\n\t\t\treturn\r\n\t\t\r\n\t\tif role == None:\r\n\t\t\tawait ctx.send(\"Usage: `{}addrole [role name]`\".format(ctx.prefix))\r\n\t\t\treturn\r\n\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tif self.settings.getServerStat(server, \"OnlyOneUserRole\"):\r\n\t\t\tawait ctx.invoke(self.setrole, role=role)\r\n\t\t\treturn\r\n\r\n\t\t# Check if we're suppressing @here and @everyone mentions\r\n\t\tif self.settings.getServerStat(server, \"SuppressMentions\"):\r\n\t\t\tsuppress = True\r\n\t\telse:\r\n\t\t\tsuppress = False\r\n\t\t\r\n\t\t# Get the array\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\t# Check if role is real\r\n\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\tif not roleCheck:\r\n\t\t\t# No luck...\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\t\t\r\n\t\t# Got a role - set it\r\n\t\trole = roleCheck\r\n\r\n\t\taddRole = []\r\n\t\tfor arole in promoArray:\r\n\t\t\troleTest = DisplayName.roleForID(arole['ID'], server)\r\n\t\t\tif not roleTest:\r\n\t\t\t\t# Not a real role - skip\r\n\t\t\t\tcontinue\r\n\t\t\tif str(arole['ID']) == str(role.id):\r\n\t\t\t\t# We found it!\r\n\t\t\t\tif roleTest in ctx.author.roles:\r\n\t\t\t\t\t# We already have it\r\n\t\t\t\t\tawait ctx.send(\"You already have that role.\")\r\n\t\t\t\t\treturn\r\n\t\t\t\taddRole.append(roleTest)\r\n\t\t\t\tbreak\r\n\r\n\t\tif not len(addRole):\r\n\t\t\t# We didn't find that role\r\n\t\t\tmsg = '*{}* not found in list.\\n\\nTo see a list of user roles - run `{}listuserroles`'.format(Nullify.escape_all(role.name), ctx.prefix)\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\tif len(addRole):\r\n\t\t\tself.settings.role.add_roles(ctx.author, addRole)\r\n\r\n\t\tmsg = '*{}* has acquired **{}!**'.format(DisplayName.name(ctx.message.author), Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)", "title": "" }, { "docid": "3c4ec30bb8f60656179cd1ae5bac2054", "score": "0.5499786", "text": "def test_context_has_role(self, role):\n with mute_signals(post_save):\n profile = ProfileFactory.create()\n Role.objects.create(\n role=role,\n program=self.program_page.program,\n user=profile.user,\n )\n self.client.force_login(profile.user)\n response = self.client.get(self.program_page.url)\n assert response.context['authenticated'] is True\n assert response.context['username'] == profile.user.username\n assert response.context['is_staff'] is True", "title": "" }, { "docid": "64be3723a9f94fd3b8708d685a8f22fa", "score": "0.54997045", "text": "def _isAdmin(self):\n if 'omxware-admin' in self.roles():\n return True\n else:\n return False", "title": "" }, { "docid": "158536aa3f15f1458985a0f2221d3b60", "score": "0.5489829", "text": "def is_chapter_admin(self):\n return self.role.title == 'chapter_admin'", "title": "" }, { "docid": "9ec8b7543e5bda7b3f1736a8e7b032f1", "score": "0.5478492", "text": "def role_required(*role_names):\n return passes_test_decorator(lambda u, *args, **kwargs: has_role(u, role_names, *args, **kwargs),\n \"Restricted to role%s: %s\" % (\"s\" if len(role_names) != 1 else \"\", \", \".join(role_names)))", "title": "" }, { "docid": "c4d1665c1ef453011f6c232700e638ec", "score": "0.5473421", "text": "def admin_check():\n def predicate(ctx):\n return str(ctx.author) in ADMINS or len(ops[str(ctx.guild.id)]) == 0\\\n or str(ctx.author) in ops[str(ctx.guild.id)]\n return commands.check(predicate)", "title": "" }, { "docid": "c94fdc531ee19f073afbddf8738d6732", "score": "0.546712", "text": "async def recheck(self, ctx):\n\t\tif not ctx.guild.me.guild_permissions.manage_roles:\n\t\t\treturn await ctx.send('I do not have permission to manage roles in this server.')\n\t\tdata = await self.config.guild(ctx.guild).all()\n\t\troledict = data['roledict']\n\t\tdoAdd = data['doAdd']\n\t\tdoRemove = data['doRemove']\n\t\ttorem = set()\n\t\ttoadd = set()\n\t\tfailed = set()\n\t\tfor role in ctx.author.roles:\n\t\t\tif str(role.id) in roledict:\n\t\t\t\tif ctx.guild.me.top_role > role:\n\t\t\t\t\ttorem.add(role)\n\t\t\t\telse:\n\t\t\t\t\tfailed.add(role) \n\t\tactivities = [a.name for a in ctx.author.activities]\n\t\tfor role in [rid for rid in roledict if any(a in roledict[rid] for a in activities)]:\n\t\t\trole = ctx.guild.get_role(int(role))\n\t\t\tif role is not None and ctx.guild.me.top_role > role:\n\t\t\t\ttoadd.add(role)\n\t\t\telif role:\n\t\t\t\tfailed.add(role)\n\t\tsetsum = torem & toadd\n\t\ttorem -= setsum\n\t\ttoadd -= setsum\n\t\t#Filter out managed roles like Nitro Booster\n\t\ttorem = [r for r in torem if not r.managed]\n\t\ttoadd = [r for r in toadd if not r.managed]\n\t\tif toadd and doAdd:\n\t\t\ttry:\n\t\t\t\tawait ctx.author.add_roles(*toadd, reason='Gameroles')\n\t\t\texcept discord.errors.Forbidden:\n\t\t\t\treturn await ctx.send(\n\t\t\t\t\t'Encountered an unexpected discord.errors.Forbidden adding roles, canceling'\n\t\t\t\t)\n\t\tif torem and doRemove:\n\t\t\ttry:\n\t\t\t\tawait ctx.author.remove_roles(*torem, reason='Gameroles')\n\t\t\texcept discord.errors.Forbidden:\n\t\t\t\treturn await ctx.send(\n\t\t\t\t\t'Encountered an unexpected discord.errors.Forbidden removing roles, canceling'\n\t\t\t\t)\n\t\tif failed:\n\t\t\tawait ctx.send(\n\t\t\t\t'The following roles could not be managed '\n\t\t\t\tf'because they are higher than my highest role:\\n`{humanize_list(list(failed))}`'\n\t\t\t)\n\t\tawait ctx.tick()", "title": "" }, { "docid": "c72f81d70aa8dd792f346dec292079b0", "score": "0.54653704", "text": "def author_role(self,author,role=None):\n\n return self.authorlist.author_role(author,role)", "title": "" }, { "docid": "1e60753a1bee7030521b5d1562186c5a", "score": "0.54592943", "text": "def needs_admin(user_to_check: UserObject):\n if not user_to_check.is_admin:\n print(\"lol\")\n return _insufficient_permissions_response", "title": "" }, { "docid": "107151ae5275c72d514b90033be42725", "score": "0.5443324", "text": "def __is_user_not_authorised(self):\r\n \"\"\"\" Future use \"\"\"\r\n # self._psql_session.execute(\r\n # CHECK_USER_AUTHENTICATION_QUERY.format(self.loggedin_user_details[LOGGEDINUSERID_KEY]))\r\n # result_set = self._psql_session.fetchone()\r\n # if self._psql_session.rowcount and result_set[ROLE_KEY] == ADMIN_VAL:\r\n # return False\r\n # return True\r", "title": "" }, { "docid": "9e277c62148bbf8bd6b13ebc9a9de0f7", "score": "0.5440305", "text": "def check_is_admin(context):\r\n init()\r\n # the target is user-self\r\n credentials = context.to_dict()\r\n target = credentials\r\n # Backward compatibility: if ADMIN_CTX_POLICY is not\r\n # found, default to validating role:admin\r\n admin_policy = (ADMIN_CTX_POLICY in policy._rules\r\n and ADMIN_CTX_POLICY or 'role:admin')\r\n return policy.check(admin_policy, target, credentials)", "title": "" }, { "docid": "e2df92613a176313fc991b87dccb96f7", "score": "0.54377854", "text": "def test_role(role: schemas.Role, required: RoleTuple) -> bool:\n if not hasattr(role, required.role):\n raise ValueError(f\"No such role type as '{required.role}'\")\n permission: schemas.Permission = getattr(role, required.role)\n if not hasattr(permission, required.permission):\n raise ValueError(f\"No such permission type as '{required.permission}\")\n return getattr(permission, required.permission)", "title": "" }, { "docid": "97d9565bdd644451b271e7f7c3c004ed", "score": "0.54209864", "text": "def is_owner(ctx: \"CustomContext\") -> bool:\n return ctx.author.id == ctx.bot.config.discord_owner_id", "title": "" }, { "docid": "495a47c193908bc87f0e5bdce1baa593", "score": "0.5414442", "text": "def is_admin():\r\n def predicate(ctx): # Вне is_admin не существует(имя можно использовать вне как угодно)\r\n return int(ctx.message.author.id) in admins\r\n return commands.check(predicate)", "title": "" }, { "docid": "a54fc7a6ec1127399980ccbf104e3e0d", "score": "0.54070807", "text": "def is_admin(self):\n if not self.author:\n return False\n\n return self.author.is_staff is True", "title": "" }, { "docid": "c78c11a4c5341c50b06a09710eaac089", "score": "0.5404345", "text": "async def ignore_all(self, ctx):\n if not await self.bot.has_perm(ctx, admin=True, message_on_fail=False): return\n # TODO: This.", "title": "" }, { "docid": "804448847de293e66adc8f371108d242", "score": "0.53969187", "text": "def iseditable(self):\n currentUser = getSecurityManager().getUser()\n return (currentUser.has_role(MANAGER_ROLE) or\n currentUser.has_role(CZ_ADMIN_ROLE) or\n currentUser.has_role(ZEN_MANAGER_ROLE))", "title": "" }, { "docid": "50d3df74b900fcc51ffaa0603ab1b089", "score": "0.53913784", "text": "async def removeuserrole(self, ctx, *, role = None):\r\n\t\t\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\t\tchannel = ctx.message.channel\r\n\r\n\t\tusage = 'Usage: `{}removeuserrole [role]`'.format(ctx.prefix)\r\n\r\n\t\tif not await Utils.is_admin_reply(ctx): return\r\n\r\n\t\tif role == None:\r\n\t\t\tawait channel.send(usage)\r\n\t\t\treturn\r\n\r\n\t\trr_list = self.settings.getServerStat(ctx.guild, \"ReactionMessageList\", [])\r\n\r\n\t\tif type(role) is str:\r\n\t\t\tif role == \"everyone\":\r\n\t\t\t\trole = \"@everyone\"\r\n\t\t\t# It' a string - the hope continues\r\n\t\t\t# Let's clear out by name first - then by role id\r\n\t\t\ttry:\r\n\t\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\t\texcept Exception:\r\n\t\t\t\tpromoArray = []\r\n\t\t\tif promoArray == None:\r\n\t\t\t\tpromoArray = []\r\n\r\n\t\t\tfor aRole in promoArray:\r\n\t\t\t\t# Get the role that corresponds to the name\r\n\t\t\t\tif aRole['Name'].lower() == role.lower():\r\n\t\t\t\t\t# We found it - let's remove it\r\n\t\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\t\t# Also remove it from the rr_list\r\n\t\t\t\t\trr_list = [x for x in rr_list if x[\"role_id\"] != int(aRole[\"ID\"])]\r\n\t\t\t\t\tself.settings.setServerStat(server, \"ReactionMessageList\", rr_list)\r\n\t\t\t\t\tself.settings.setServerStat(server, \"UserRoles\", promoArray)\r\n\t\t\t\t\tmsg = '**{}** removed successfully.'.format(Nullify.escape_all(aRole['Name']))\r\n\t\t\t\t\tawait channel.send(msg)\r\n\t\t\t\t\treturn\r\n\t\t\t# At this point - no name\r\n\t\t\t# Let's see if it's a role that's had a name change\r\n\r\n\r\n\t\t\troleCheck = DisplayName.roleForName(role, server)\r\n\t\t\tif roleCheck:\r\n\t\t\t\t# We got a role\r\n\t\t\t\t# If we're here - then the role is an actual role\r\n\t\t\t\ttry:\r\n\t\t\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\t\t\texcept Exception:\r\n\t\t\t\t\tpromoArray = []\r\n\t\t\t\tif promoArray == None:\r\n\t\t\t\t\tpromoArray = []\r\n\r\n\t\t\t\tfor aRole in promoArray:\r\n\t\t\t\t\t# Get the role that corresponds to the id\r\n\t\t\t\t\tif str(aRole['ID']) == str(roleCheck.id):\r\n\t\t\t\t\t\t# We found it - let's remove it\r\n\t\t\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\t\t\t# Also remove it from the rr_list\r\n\t\t\t\t\t\trr_list = [x for x in rr_list if x[\"role_id\"] != roleCheck.id]\r\n\t\t\t\t\t\tself.settings.setServerStat(server, \"ReactionMessageList\", rr_list)\r\n\t\t\t\t\t\tself.settings.setServerStat(server, \"UserRoles\", promoArray)\r\n\t\t\t\t\t\tmsg = '**{}** removed successfully.'.format(Nullify.escape_all(aRole['Name']))\r\n\t\t\t\t\t\tawait channel.send(msg)\r\n\t\t\t\t\t\treturn\r\n\t\t\t\t\r\n\t\t\t# If we made it this far - then we didn't find it\r\n\t\t\tmsg = '*{}* not found in list.'.format(Nullify.escape_all(roleCheck.name))\r\n\t\t\tawait channel.send(msg)\r\n\t\t\treturn\r\n\r\n\t\t# If we're here - then the role is an actual role - I think?\r\n\t\ttry:\r\n\t\t\tpromoArray = self.settings.getServerStat(server, \"UserRoles\")\r\n\t\texcept Exception:\r\n\t\t\tpromoArray = []\r\n\t\tif promoArray == None:\r\n\t\t\tpromoArray = []\r\n\r\n\t\tfor aRole in promoArray:\r\n\t\t\t# Get the role that corresponds to the id\r\n\t\t\tif str(arole['ID']) == str(role.id):\r\n\t\t\t\t# We found it - let's remove it\r\n\t\t\t\tpromoArray.remove(aRole)\r\n\t\t\t\t# Also remove it from the rr_list\r\n\t\t\t\trr_list = [x for x in rr_list if x[\"role_id\"] != role.id]\r\n\t\t\t\tself.settings.setServerStat(server, \"ReactionMessageList\", rr_list)\r\n\t\t\t\tself.settings.setServerStat(server, \"UserRoles\", promoArray)\r\n\t\t\t\tmsg = '**{}** removed successfully.'.format(Nullify.escape_all(aRole['Name']))\r\n\t\t\t\tawait channel.send(msg)\r\n\t\t\t\treturn\r\n\r\n\t\t# If we made it this far - then we didn't find it\r\n\t\tmsg = '*{}* not found in list.'.format(Nullify.escape_all(role.name))\r\n\t\tawait channel.send(msg)", "title": "" }, { "docid": "2b9574deb8b415c8881ee0e0253ac6c4", "score": "0.5390936", "text": "def user_resource(self):\n return not self.submitter.is_staff", "title": "" }, { "docid": "9214b86c2229b96e156385dbe08ffd2f", "score": "0.5366788", "text": "def check_wrapper(self, cb):\n if not cb():\n if users.get_current_user():\n raise ForbiddenException()\n else:\n raise UnauthorizedException()", "title": "" }, { "docid": "b71ba79480cdf58d729c0cbd8518ea13", "score": "0.5359209", "text": "def rolecheck(member):\n rolelist = []\n for role in member.roles:\n rolelist.append(role.name)\n if 'Admin' in rolelist or 'Mod' in rolelist:\n return True\n else:\n return False", "title": "" }, { "docid": "331dec12692681ae46c30c87cc93de38", "score": "0.5358072", "text": "def is_one(self, role):\r\n\t\treturn role in self._roles", "title": "" }, { "docid": "4026df2e99892eb8686de286fa138c17", "score": "0.53557825", "text": "def __call__(self, target, creds, enforcer):\n\n return self.match.lower() in [x.lower() for x in creds['roles']]", "title": "" }, { "docid": "64ad07cf7107aa66afe2027f0992de46", "score": "0.53488904", "text": "def cog_check(self, ctx):\n\t\treturn checks.is_owner_check(ctx.message.author)", "title": "" }, { "docid": "5bdc51b49f13fb04b8fc6ff6ef9b2b3c", "score": "0.5346053", "text": "def role_checker(roles):\n set_supported_roles = {'TOP SOLO','MIDDLE SOLO','BOTTOM DUO_SUPPORT','BOTTOM DUO_CARRY','JUNGLE NONE'}\n if len(set(roles).difference(set_supported_roles)) != 0:\n return False\n else:\n return True", "title": "" }, { "docid": "85b208bce3a23c919ad3f493e32f978d", "score": "0.5344031", "text": "async def deny(self, ctx: commands.Context, target: discord.Member):\n bot = self.bot\n guild = ctx.guild\n author = ctx.author\n applicant = get(guild.roles, name=\"Staff Applicant\")\n if applicant in target.roles:\n await ctx.send(\"Would you like to specify a reason? (yes/no)\")\n pred = MessagePredicate.yes_or_no(ctx)\n try:\n await bot.wait_for(\"message\", timeout=30, check=pred)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n if pred.result is True:\n await ctx.send(\"Please, specify your reason now.\")\n\n def check(m):\n return m.author == author\n\n try:\n reason = await bot.wait_for(\"message\", timeout=120, check=check)\n except asyncio.TimeoutError:\n return await ctx.send(\"You took too long. Try again, please.\")\n await target.send(\n \"Your application in {0} has been denied.\\n*Reason:* {1}\".format(\n guild.name, reason.content\n )\n )\n else:\n await target.send(\n \"Your application in {0} has been denied.\".format(guild.name)\n )\n await target.remove_roles(applicant)\n await ctx.send(\"Denied {0}'s application.\".format(target.mention))\n else:\n await ctx.send(\n \"Uh oh. Looks like {0} hasn't applied for anything.\".format(\n target.mention\n )\n )", "title": "" }, { "docid": "cd8b78bdaa63434680799267e7b5009f", "score": "0.5343538", "text": "async def cog_check(self, ctx: commands.Context):\n return await ctx.bot.is_owner(ctx.author) and ctx.bot.user.bot", "title": "" }, { "docid": "6fd0acfc6a1b841ba564465d0f5d072a", "score": "0.53390586", "text": "def test_func(self):\n\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False", "title": "" }, { "docid": "5952ee99ad1cec14a1bf7b9f2f091615", "score": "0.53362167", "text": "def is_valid_role(role_name):\n return role_name in ROLES", "title": "" }, { "docid": "7d8736a32d3b30bbb92bc7946bb7d7dd", "score": "0.5321969", "text": "def is_valid_entity_for(self, role: str, entity: str) -> bool:\n if role in self.valid_entity_for:\n return self.valid_entity_for[role](entity)\n return True", "title": "" }, { "docid": "bbebf3b7b19bb0336c24acd925d2fe1c", "score": "0.53096974", "text": "def _(self, user, perm, board):\n return board.has_manager(user)", "title": "" }, { "docid": "67e9b48dd1d882e62d40776138b787d7", "score": "0.53032076", "text": "def test_func(self):\n return not self.request.user.is_authenticated", "title": "" }, { "docid": "67e9b48dd1d882e62d40776138b787d7", "score": "0.53032076", "text": "def test_func(self):\n return not self.request.user.is_authenticated", "title": "" }, { "docid": "96227892468b20c49d98fb01709cc434", "score": "0.5301967", "text": "def test_func(self):\r\n\t\tpost = self.get_object()\r\n\t\tif self.request.user == post.author:\r\n\t\t\treturn True\r\n\t\treturn False", "title": "" }, { "docid": "96227892468b20c49d98fb01709cc434", "score": "0.5301967", "text": "def test_func(self):\r\n\t\tpost = self.get_object()\r\n\t\tif self.request.user == post.author:\r\n\t\t\treturn True\r\n\t\treturn False", "title": "" }, { "docid": "889115506f266e8b8f5e1b1375936074", "score": "0.52981144", "text": "def has_object_permission(self, request, view, obj):\n return not request.user == obj.offered_by", "title": "" }, { "docid": "df94e23efb13d5fe9317c75dfcacb238", "score": "0.529567", "text": "def is_role(role):\n\n return isinstance(role, BaseRole)", "title": "" }, { "docid": "9bc3b3734c1126b30bdae2e7fa94d2ba", "score": "0.5287125", "text": "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "title": "" }, { "docid": "9bc3b3734c1126b30bdae2e7fa94d2ba", "score": "0.5287125", "text": "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "title": "" }, { "docid": "9bc3b3734c1126b30bdae2e7fa94d2ba", "score": "0.5287125", "text": "def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator", "title": "" }, { "docid": "c79aa845ebd678d96c8b49108e5111b5", "score": "0.52853805", "text": "def has_role(role, roles):\n if role is None or roles is None:\n return False\n return role.lower() in [r.lower() for r in roles]", "title": "" }, { "docid": "390b50569efb92032a076b6c81b1e5d8", "score": "0.52851695", "text": "def check_roles(self, role: Optional[schemas.Role], operation: Optional[str] = None):\n if self.role_name is None:\n logger.warning(\"No target role was provided in role check. Skipping check.\")\n return\n if not (role and operation):\n logger.warning(\"No role was provided in role check or operation does not required. Skipping role check.\")\n return\n if not operation in self.permissions:\n logger.warning(\n \"Operation '%s' is not specified for operator on model '%s'\", operation, self.__class__.__name__\n )\n return\n required_roles = [\n RoleTuple(role=self.role_name, permission=permission) for permission in self.permissions[operation]\n ]\n if not role_conforms(role, required_roles):\n raise exceptions.InsufficientPermissions([str(r) for r in required_roles])\n return", "title": "" }, { "docid": "a6ca186b6c4faf48cec40369230df038", "score": "0.5273022", "text": "def test_forced_identity(self):\n assert Role(\"foo\") is not Role(\"foo\")", "title": "" }, { "docid": "75c07537a957faecc97cc6e18ceb4b1a", "score": "0.5264748", "text": "def __ignore_basic_auth(self, properties):\n if properties is None:\n return False\n\n return properties.get('performsAuthorization') is True", "title": "" }, { "docid": "464363da3d34e8f9a48e8fcfe3a6cbe5", "score": "0.5261933", "text": "def orgOfferedOrgAdminRole(self):\n return self.org_role == ORG_ADMIN_ROLE", "title": "" }, { "docid": "58f4c02035bb358497e164165e6caae1", "score": "0.5249895", "text": "def is_moderator():\n try:\n role = User.query.filter_by(id = g.user.id).first().role\n if role > 0:\n return True\n else:\n return False\n # Catch exception when not logged in\n except AttributeError:\n return False", "title": "" }, { "docid": "7ac2303917cdb34f3c8f4c7e6e4be517", "score": "0.523977", "text": "def check_role(self, role_class, user, obj, new_perm):\n # remove the new permission because the old model doesnt have it\n perm_list = role_class.class_to_permissions[type(obj)]\n if new_perm in perm_list:\n # Make a copy so that we can modify it\n copy_list = perm_list[:]\n copy_list.remove(new_perm)\n\n return user.has_perms(copy_list, obj)", "title": "" }, { "docid": "61fe779fbbde216086b8a60026fb1ca1", "score": "0.52360606", "text": "async def remove(self, ctx, role: discord.Role):\n if role.id in (await self.profiles._get_guild_roles(ctx.guild)).values():\n await self.profiles._remove_guild_role(ctx.guild, role)\n await ctx.send(_(\"Role deleted.\"))\n else:\n await ctx.send(_(\"Remove a role from the list.\"))", "title": "" }, { "docid": "613b85d30c5be4430af895ee9551aa4c", "score": "0.52290684", "text": "async def role(self, ctx, *, role: typing.Union[discord.Role, str]):\n public_roles = ctx.bot.roles.get(ctx.guild.id, \"public_roles\", {})\n if isinstance(role, discord.Role):\n roles_by_id = {r[\"id\"]: r for k, r in public_roles.items()}\n # switch public_roles over to a dict with the role id as the key\n public_roles = roles_by_id\n role_key = role.id\n else:\n role_str = role.lower()\n if not role_str or role_str == \"list\":\n lines = [\"I'm aware of the following roles:\", \"\"]\n lines += [\n # \"`{0}`: <@&{1}> -- {2}\".format(k, r[\"id\"], r[\"description\"])\n \"`{0}`: {1} -- {2}\".format(k, r[\"name\"], r[\"description\"])\n for k, r in sorted(public_roles.items())\n ]\n await ctx.send(\n \"\\n\".join(lines),\n delete_after=ROLES_MESSAGE_DELETE_DELAY,\n # allowed_mentions=discord.AllowedMentions(\n # everyone=False, users=False, roles=False\n # ),\n )\n return\n role_key = role_str\n try:\n role_dict = public_roles[role_key]\n except KeyError:\n missingstr = (\n f\"I can't find that role. Try `{ctx.prefix}{ctx.command} list` for a\"\n f\" list of self-assignable roles.\"\n )\n await ctx.send(missingstr, delete_after=ROLES_MESSAGE_DELETE_DELAY)\n return\n member = ctx.message.author\n discord_role = ctx.guild.get_role(role_dict[\"id\"])\n if discord_role in member.roles:\n await member.remove_roles(discord_role)\n else:\n await member.add_roles(discord_role)", "title": "" }, { "docid": "16d0812719c080ec0eea11d89cb489d3", "score": "0.52168554", "text": "def test_handle_missing_authorization(authorizer):\n assert not authorizer.handle_missing_authorization()", "title": "" }, { "docid": "195babe1c6b1498e9846e6455e04a0a5", "score": "0.5213924", "text": "def PublishAddExcludeAuthrole(builder, excludeAuthrole):\n return AddExcludeAuthrole(builder, excludeAuthrole)", "title": "" }, { "docid": "1f24af4c9f29db60e7584a3d0d1bf914", "score": "0.52134967", "text": "def authorized(user):\r\n auth = False\r\n for r in user.roles:\r\n if r.permissions.administrator:\r\n auth = True\r\n\r\n return auth", "title": "" }, { "docid": "c30c7a12dcb24b1f952bb17fe0e15625", "score": "0.52103424", "text": "def not_disabled(self, user):\n\n if user.is_anonymous():\n return False\n else:\n return not user.is_disabled", "title": "" }, { "docid": "e5013631feb684908b0b97466d2246f5", "score": "0.5207754", "text": "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "title": "" }, { "docid": "ad5a4d9631438a7bf0eb519d3572b763", "score": "0.52042127", "text": "def store_attendant_authorised(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n role = user_role\n if role != 1:\n message = {\"msg\":\"This feature is available to only the store attendants!\"}\n response = Response(json.dumps(message), status=403)\n return response\n else:\n return fn(*args, **kwargs)\n return wrapper", "title": "" }, { "docid": "b93b430d99cba1a6d186464db169c52c", "score": "0.520329", "text": "def isdemisauce_admin():\n if not self.user:\n return False\n elif self.user.id == 1:\n return True\n return False", "title": "" }, { "docid": "73ddc45e2604fec75ff170a4b0b691a5", "score": "0.5203028", "text": "async def is_admin(self, ctx=None):\n async def predicate(ctx):\n if ctx.message.author.server_permissions.administrator:\n return True\n await ctx.send(\"Please have an admin use this command\")\n return False\n return commands.check(predicate)", "title": "" } ]
c732de38d48d22cb8ff99906815730e3
Returns argument at given index, else none.
[ { "docid": "5723224e2b2c339c3814707f91f0d21e", "score": "0.0", "text": "def get(self, x):\r\n try:\r\n return self.all[x]\r\n except IndexError:\r\n return None", "title": "" } ]
[ { "docid": "1b435c63ddb1ed3e184f9ee53599ada4", "score": "0.72445065", "text": "def arg(self, index):\n\n if self.current_node.is_a(lpl.CallExpr):\n return self._with_current_node(\n self.current_node.f_suffix[index].f_expr\n )\n elif self.current_node.is_a(lpl.AndExpr, lpl.OrExpr):\n if index == 0:\n return self._with_current_node(\n self.current_node.f_left\n )\n elif index == 1:\n return self._with_current_node(\n self.current_node.f_right\n )\n\n assert False", "title": "" }, { "docid": "00ef993fe88766c46f97e3a8f47232e5", "score": "0.72230095", "text": "def __call__(self, arguments): # suppress(no-self-use)\n try:\n return arguments[num]\n except IndexError:\n return None", "title": "" }, { "docid": "01c96ef86380e763e1b28f3500553974", "score": "0.72014165", "text": "def get_arg(self, i):\n return self.args[i]", "title": "" }, { "docid": "9eb06c47752c47e25ce752318e2bcdb8", "score": "0.716783", "text": "def get_arg(argument_number=1,default_argument=0):\n try:\n argument=sys.argv[argument_number]\n return argument \n except:\n return default_argument", "title": "" }, { "docid": "9e27ab7392667cb3cef0f582da97ea45", "score": "0.7031297", "text": "def arg (self, index_or_name) :\n if isinstance (index_or_name, pyk.int_types) :\n return self.argv [index_or_name]\n elif isinstance (index_or_name, pyk.string_types) :\n return self.arg_dict [index_or_name].value\n else :\n raise KeyError (index_or_name)", "title": "" }, { "docid": "0fd617c45da1e5a038efc2c64c051a81", "score": "0.68421346", "text": "def get_pos_only_arg(self, pos, name):\n if len(self.args) >= pos:\n arg = self.args[pos - 1]\n elif name not in self.kwargs:\n self.parser.report_error(self.func_name + \" misses argument \" + name)\n else:\n arg = self.kwargs[name]\n\n return arg", "title": "" }, { "docid": "c3f98ddd7143e1ba324456dd05bef333", "score": "0.68296385", "text": "def arg_i(arg, i, default=None):\n if arg is None:\n return default\n return arg[min(i, len(arg) - 1)]", "title": "" }, { "docid": "83223213f166ca6b07c481c6af2f2564", "score": "0.68091714", "text": "def one(idx):\n return idx - 1", "title": "" }, { "docid": "286a0abe07d501ec7601b9060bdf6b61", "score": "0.6768992", "text": "def __call__(self, arguments): # suppress(no-self-use)\n try:\n argument_index = 1\n for argument_index in range(1, len(arguments)):\n if arguments[argument_index - 1].contents == argument:\n break\n\n return arguments[argument_index]\n except IndexError:\n return None", "title": "" }, { "docid": "6d4fd82eef21e580e8e2d8d9b36e59c0", "score": "0.67642564", "text": "def argpos(self, name):\n for argid, arg in enumerate(self):\n if arg.name == name:\n return argid\n raise IndexError(\"Unknown argument: %s\" % name)", "title": "" }, { "docid": "4c9b5d338a615ada1859c742aa56e46d", "score": "0.6482989", "text": "def get_param_i(param, i):\n if len(param) > i:\n return param[i]\n else:\n return param[0]", "title": "" }, { "docid": "ad2d10e69d0cd7e6a846c9f568d0df90", "score": "0.6466943", "text": "def __getitem__(self, index):\n assert isinstance(index, int), \"Only integer input allowed\"\n index = index -1\n if index <0 or index>= self.count:\n raise IndexError\n\n return self.array[index]", "title": "" }, { "docid": "ca79f025de7a2beff10e8779d70a5109", "score": "0.6454986", "text": "def getitem(l, i, default):\n try:\n return l[i]\n except IndexError:\n return default", "title": "" }, { "docid": "cc91a873ee0899c8ba39ffc0786bf10e", "score": "0.6436853", "text": "def get_valued_arg (name):\n arg = '-' + name\n out = None\n if is_arg_passed(name):\n i = sys.argv.index(arg)\n if len(sys.argv) > i:\n out = sys.argv[i + 1]\n return out", "title": "" }, { "docid": "aa827cf9cd2e38a913062cc08b6d3635", "score": "0.64010525", "text": "def get_parameter_at_index(self, index):\n if self.size == 0:\n raise Exception(\"Parameter List is empty\")\n if (index < 0) or (index >= self.size):\n raise IndexError(\n \"index out of bounds, must be between 0 and {}\".format(\n self.size - 1))\n return self.parameter_list[index]", "title": "" }, { "docid": "d054da14dcd371a761ae030793737bac", "score": "0.63908285", "text": "def safe_get(list_, idx=0, default=None):\n try:\n return list_[idx]\n except IndexError:\n return default", "title": "" }, { "docid": "b219b4a2c3d5c2ee619602bb4b2d98e9", "score": "0.6383436", "text": "def get_index(self, index):\n if isinstance(index, int) and index >= 0 and index < len(self):\n return self.idxs[index]\n elif isinstance(index, str) and index in self.idxs:\n return index\n return None", "title": "" }, { "docid": "2ec7f5ca800c43eb8e68855e5b71c7d3", "score": "0.637907", "text": "def get_argument(self, name):\n return self.get_arguments()[name]", "title": "" }, { "docid": "4551e03260226e5dfd40011b53a61632", "score": "0.6378856", "text": "def safe_idx(seq, index):\n try:\n return type(seq[index])\n except IndexError:\n return None", "title": "" }, { "docid": "24aeb5c6a7d66ef0947bdbd667786d5b", "score": "0.63568205", "text": "def _maybe_get_index(param, opt, idx, decs):\n if not param:\n return -1\n if idx is not None:\n return idx\n for i, o in enumerate(decs[param].value):\n if str(o) == str(opt):\n return i", "title": "" }, { "docid": "3a048e5e70a91781aa3240166c23b9ce", "score": "0.6353696", "text": "def __call__(self, i):\n return self.__args[i]", "title": "" }, { "docid": "c6cc37b1276190cd5132e4392143ba75", "score": "0.6348913", "text": "def __get(self, nums, index):\n try:\n return nums[index]\n except IndexError:\n return 2147483647", "title": "" }, { "docid": "8b767171378be6d7feb2488061dc127b", "score": "0.63221824", "text": "def __getitem__(self, index: Tuple[int, int]) -> Union[None, bool]:\n _x, _y = index\n if _x > self._size or _x < 0 or _y > self._size or _y < 0:\n raise IndexError(\"Invalid index for ({x}, {y})\".format(x=_x, y=_y))\n return self._board[_x][_y]", "title": "" }, { "docid": "77d069ec5a6ac00a7ac5731897a64d39", "score": "0.631979", "text": "def get_subset(iterable, index):\n try:\n value = iterable[int(index)]\n except IndexError or TypeError:\n if TypeError:\n print(\"index provided is of an invalid type\")\n if IndexError:\n print(\"value does not exist at the index provided\")\n value = None\n\n return value", "title": "" }, { "docid": "b41bd5289be47a05b5ce8eeb0b9b7d0c", "score": "0.6315311", "text": "def getarg(argname, argspec, args, kw):\n argnames = argspec[0]\n try:\n argpos = argnames.index(argname)\n except ValueError:\n argpos = None\n if argpos is not None:\n if len(args) > argpos:\n return args[argpos]\n if argname in kw:\n return kw[argname]\n else:\n raise TypeError(\"could not find key argument %r in %r/%r (%r)\" % (\n argname, args, kw, argpos\n ))", "title": "" }, { "docid": "a0012d6dc331345511f9e3aa15996edf", "score": "0.63079095", "text": "def __getitem__(self, idx):\n return self.eval()[idx]", "title": "" }, { "docid": "e08d638768956663031978e9bb33af35", "score": "0.62990946", "text": "def __getitem__(index):", "title": "" }, { "docid": "11cea47f3c83443c29ca2e066f9f5d7a", "score": "0.6284572", "text": "def safe_list_get(arr: list, index: int, default: any = None):\n try:\n return arr[index]\n except IndexError:\n return default", "title": "" }, { "docid": "f2778e33446183ef45ef6c40fbe17489", "score": "0.6281679", "text": "def get_thing(self, idx):\n try:\n idx = int(idx)\n except ValueError:\n return None\n\n if idx < 0 or idx >= len(self.things):\n return None\n\n return self.things[idx]", "title": "" }, { "docid": "550b60e72c9109f4a744f25a7dfa39c9", "score": "0.6264729", "text": "def arg_pos (self, index_or_name) :\n if isinstance (index_or_name, pyk.int_types) :\n return self.argv [index_or_name].pos\n elif isinstance (index_or_name, pyk.string_types) :\n return self.arg_dict [index_or_name].pos\n else :\n raise KeyError (index_or_name)", "title": "" }, { "docid": "03f584b0ac41f05aaecbf3ce3b86746b", "score": "0.61941165", "text": "def __getitem__(self, index: int) -> float:\n if index == 0:\n return self.x\n elif index == 1: \n return self.y\n elif:\n raise IndexError", "title": "" }, { "docid": "4182cba91bc2d9186d188105bd6e5698", "score": "0.61828977", "text": "def __getitem__(self, index):\r\n # NOTE: this automatically supports slicing :-)\r\n return self._main._sequence[index]", "title": "" }, { "docid": "4938c1f170d0ed44b8f6ecc4020364bd", "score": "0.6167297", "text": "def _find_arg_no(num):\n class Finder(mixin, object): # suppress(too-few-public-methods)\n \"\"\"Finder for arguments after num.\"\"\"\n\n def __call__(self, arguments): # suppress(no-self-use)\n \"\"\"Return argument at num.\"\"\"\n try:\n return arguments[num]\n except IndexError:\n return None\n\n return Finder()", "title": "" }, { "docid": "5cdc65577dfed05efa36ca8e9ead39c1", "score": "0.61555254", "text": "def indexof_one(self, member):\n script = self.load_script('deque_indexof')\n r = self._run_lua_script(script, [self._key], [member])\n return None if r[0] == -1 else r[0]", "title": "" }, { "docid": "dec453e0bd92d2d47b9bfb4ad3d0786b", "score": "0.6148281", "text": "def __getitem__(self, item: int) -> int:\n if item == 0:\n return self.x\n if item == 1:\n return self.y\n if item == 2:\n return self.z\n raise IndexError", "title": "" }, { "docid": "04b30a07af97ee35a60b8c894e461429", "score": "0.61445487", "text": "def lget(lst, idx, default=None):\n try:\n return lst[idx]\n except IndexError:\n return default", "title": "" }, { "docid": "ce3372b950b5847bfe10b688d60055d5", "score": "0.6140189", "text": "def get(self, idx):\n return self.lst[idx] # Subject to range exception", "title": "" }, { "docid": "bfd238af1d519bab9d1e8cc668490a2c", "score": "0.6121619", "text": "def __getitem__(self, index):\n\n if index < 0:\n raise IndexError(f'negative index: {index}')\n\n i = index\n for value in self: # May raise ValueError at some point if @self is not a list\n if i == 0:\n return value\n i -= 1\n \n raise IndexError(f'index too large: {index}')", "title": "" }, { "docid": "27bf55c81570933ce8a24e940e8241f2", "score": "0.6118458", "text": "def ifNotEmptyGetIndex(self,somelist,index=0):\n if somelist: \n return somelist[index]\n else:\n return ''", "title": "" }, { "docid": "2d52584f53107a398bc7086d5a332ac8", "score": "0.61050767", "text": "def peek(ind: int = 0) -> Any:\n\n print(ind)\n return None if ind >= len(my_gueue) else my_gueue[ind]", "title": "" }, { "docid": "2d89a3382d8efba43d35d1291575ab9d", "score": "0.60997826", "text": "def __getitem__(self, i):\n if isinstance(i, TypedExpr):\n return TupleIndex(self, i)\n else:\n return self.args[i]", "title": "" }, { "docid": "7f1a696bb42bbe133a676ca60b0e2903", "score": "0.6098257", "text": "def maybe_get(self, condition, index):\n return self[condition * index].zero_if_not(condition)", "title": "" }, { "docid": "b1b9ed4930dca684fba92365e87b5864", "score": "0.6090736", "text": "def __getitem__(self, index: int | slice) -> int:\n if isinstance(index, slice):\n return self.__getslice__(index)\n return (self.__value >> index) & 1", "title": "" }, { "docid": "7a3ed77078d9df4bec09c85f78e1fbcd", "score": "0.6081032", "text": "def get_argument(self, name, default=..., strip=...):\n ...", "title": "" }, { "docid": "cb04ebdf0d19e7765fc1a174d69377ec", "score": "0.6055748", "text": "def __getitem__(self, index):\r\n index, item = self._list.__getitem__(index)\r\n return item", "title": "" }, { "docid": "d2af53d1538dc1a41d82917280abd536", "score": "0.6053593", "text": "def index(self, x):\n return None", "title": "" }, { "docid": "0057435041de9b803816d19417aa556e", "score": "0.605194", "text": "def getByIndex(self, index):\n\t\treturn self.__queue.get(index)[1]", "title": "" }, { "docid": "d813849cd3850ed07d325576f47dad81", "score": "0.60506374", "text": "def __getitem__(self, i):\n result = super().__getitem__(i)\n if self.mode == ArgMode.DESC:\n return result\n assert self.mode == ArgMode.USE\n return result.value", "title": "" }, { "docid": "c6c06445d907ac1c64fe87371adb5a0e", "score": "0.60445076", "text": "def __getitem__(self, index):\n assert 0 <= index < self._n, 'Invalid Index'\n return self._array[index]", "title": "" }, { "docid": "01a01b24b11f60206d8f5504e6abbe58", "score": "0.60128987", "text": "def item(cls, index):\n return operator.itemgetter(index)", "title": "" }, { "docid": "e5d82a1796ea5974bd5a4cbb29c78782", "score": "0.6012343", "text": "def index_index_value(nums):\n if nums[-1] >= len(nums):\n return -1\n return -2 if nums[nums[-1]] >= len(nums) else nums[nums[nums[-1]]]", "title": "" }, { "docid": "fe66e6f4ad7c886e8841a7e3f45582b5", "score": "0.6008781", "text": "def get_dataloader_item(dataloader,indx):\n etl = enumerate(dataloader)\n found = False\n for idx, inputs in etl:\n if idx==indx:\n return inputs\n if found == False:\n return \"Index not found\"", "title": "" }, { "docid": "b5c9c09ac3af1886e67ea1750bff02d9", "score": "0.60063475", "text": "def lget(l, i, default=None):\n\n try:\n return l[i]\n except IndexError:\n return default", "title": "" }, { "docid": "f08298933fa8334e501b693ebc0906db", "score": "0.5984481", "text": "def findArg(self, name, pars):\n ii = 0\n while ((ii < pars.__len__()) and (type(pars[ii]).__name__ == 'tuple')):\n if (pars[ii][0] == name):\n return ii\n ii += 1\n return -1", "title": "" }, { "docid": "b30b79c6bc0ba4d9675d17d89a87f924", "score": "0.5978397", "text": "def at(self, index):\n if not 0 <= index < self.SIZE:\n raise IndexError(\"Index out of range!!\")\n return self.arr[index]", "title": "" }, { "docid": "2cc45b014ff9ffcabc9a3aaa3dcea6af", "score": "0.59749025", "text": "def __getitem__(self, index):\n actual = getattr(self._sequence, \"actual_result_count\", None)\n if actual is not None and actual != len(self._sequence) and index < self.length:\n # optimized batch that contains only the wanted items in the\n # sequence\n return self._sequence[index]\n if index < 0:\n if index + self.end < self.first:\n raise IndexError(index)\n return self._sequence[index + self.end]\n if index >= self.length:\n raise IndexError(index)\n return self._sequence[index + self.first]", "title": "" }, { "docid": "4d15a7b89b413e7b6299969ccd715159", "score": "0.59705377", "text": "def __getitem__(self, index):\n\t\tif not isinstance(index, int) or index < 0 or index >= self.dim:\n\t\t\traise IndexError(\"Invalid Index#: \" + str(index))\n\t\treturn self.data[index]", "title": "" }, { "docid": "0c11175c4537c5e9f058a835f8d94592", "score": "0.5961961", "text": "def get_arg(label):\n if label in sys.argv:\n return sys.argv[sys.argv.index(label) + 1]\n return None", "title": "" }, { "docid": "2396e49b34f9a19a23c76c54f6fdc228", "score": "0.59523696", "text": "def __getitem__(self, index):\n if type(index) is not int:\n raise TypeError\n elif index >= len(self) or index < -len(self):\n raise IndexError\n else:\n return self._convo[index] if index >= 0 else self._convo[len(self) + index]", "title": "" }, { "docid": "60008fb035f76bf97f707abd5f7640fc", "score": "0.5950407", "text": "def getNumberAtIndex(line : str, index : int) -> str:\n try:\n if index < 0:\n return numericExpression.findall(line)[index]\n else:\n for idx, word in enumerate(numericExpression.finditer(line)):\n if idx == index:\n return word.group()\n else:\n return None\n except IndexError:\n return None", "title": "" }, { "docid": "18c51f5686240c610af7ff6b1659e707", "score": "0.5940959", "text": "def choice_return_first(*args, **kwargs):\n return args[0][0]", "title": "" }, { "docid": "71ee18676acb6b4c1d7aab93ad18f624", "score": "0.5929344", "text": "def __getitem__(self, index):\r\n return self.val[index]", "title": "" }, { "docid": "03622356ceff0773a7131ec2c968e8bf", "score": "0.5925957", "text": "def get_string_idx(self, idx):\n if idx < len(self.strings):\n return self.strings[idx][:-1]\n return None", "title": "" }, { "docid": "298cb2165587b419d3d4ca4efc0ec39c", "score": "0.59218", "text": "def getWordAtIndex(line : str, index : int) -> str:\n if index < 0 or useStringSplit:\n try:\n return line.split()[index]\n except:\n return None\n else:\n for idx, word in enumerate(wordExpression.finditer(line)):\n if idx == index:\n return word.group()\n return None", "title": "" }, { "docid": "786aab42e392bb4ab1f718928d4df617", "score": "0.59211046", "text": "def __getitem__(self,i) :\n s = self.__list__()\n if type(i) == int :\n if i == 0 : return s[0]\n elif i == 1 : return s[1]\n else : raise IndexError\n if type(i) == tuple :\n if (len(i) != 2) or (type(i[0]) != int ) : raise IndexError\n return s[i[0]][i[1]]", "title": "" }, { "docid": "b038745eb52dd7ccee780e65638de250", "score": "0.5915314", "text": "def pop(self, x):\r\n try:\r\n return self._args.pop(x)\r\n except IndexError:\r\n return None", "title": "" }, { "docid": "d23ace5ceea8dcd57253eb1217a0f674", "score": "0.587823", "text": "def first_exists(*args):\n if not args:\n raise ValueError('Requires one or more arguments')\n\n return next((item for item in args if item),\n args[-1])", "title": "" }, { "docid": "806ffe54261ec7690a56f72fa6850ed9", "score": "0.58678406", "text": "def __getitem__(self, index):\n if not isinstance(index, int):\n raise TypeError(\"index must be an integer\")\n return self._entries[index]", "title": "" }, { "docid": "91b420293cffe78e0cbbf91aeb0b41cd", "score": "0.5865526", "text": "def arg_keyword(self, index):\n assert self.current_node.is_a(lpl.CallExpr)\n kw = self.current_node.f_suffix[index].f_name\n return kw.text if kw is not None else None", "title": "" }, { "docid": "2ed764d177050be35fc78bae22ca6f4b", "score": "0.58562165", "text": "def get_attribute_value(self, index):\n assert self._as_parameter_\n return zzub_plugin_get_attribute_value(self, index)", "title": "" }, { "docid": "c8d98cddace7d96fb40e2b3e17fc07b2", "score": "0.58528864", "text": "def __getitem__(self, index):\n if index == 0:\n return self._lep\n if index == 1:\n return self._rep", "title": "" }, { "docid": "adec11fa007cd2394f8bc14ddb0c735a", "score": "0.5850454", "text": "def __getitem__(self, index: int) -> int:\n ...", "title": "" }, { "docid": "40cdc7594bb448744b2be9c3eefb3ddd", "score": "0.5848692", "text": "def get(self, index: int | pli.Series | list[int]) -> pli.Series:", "title": "" }, { "docid": "3311a4d66bd45c6c73537a9f4e437c7e", "score": "0.5848453", "text": "def arg(self):\n return self.args[0]", "title": "" }, { "docid": "bea90985cabfad78377efe704983b7a7", "score": "0.58473134", "text": "def yank_nth_arg(event: E) -> None:\n n = event.arg if event.arg_present else None\n event.current_buffer.yank_nth_arg(n)", "title": "" }, { "docid": "cd1df8f3f8919a65f84f2579a7ea9ba3", "score": "0.58461046", "text": "def attrib_by_index(self, index):\n if self.attributes is None:\n return None\n str_index = str(index)\n for attrib in self.attributes:\n if attrib.index == str_index:\n return attrib\n return None", "title": "" }, { "docid": "171ba73f009c0b79b87dff7b73968a5c", "score": "0.5845537", "text": "def get_kwarg(self, pos, name, default):\n if len(self.args) >= pos:\n arg = self.args[pos - 1]\n elif name in self.kwargs:\n arg = self.kwargs[name]\n else:\n return default\n\n return arg", "title": "" }, { "docid": "49561c404bd63df9c759457731bda918", "score": "0.5837347", "text": "def get_arg(key, default=None):\n # type: (str, Any) -> Any\n if default is None:\n default = \"\"\n return plugin.args.get(key, [default])[0]", "title": "" }, { "docid": "fb93ed64cbc9b87ef666762bdb83c015", "score": "0.5829505", "text": "def get(self, index):\n \n for begin, end, value in self.values:\n if begin <= index < end:\n return value\n raise IndexError('list idnex out of range')", "title": "" }, { "docid": "ffe1f6c939760b67a2cf083294adda85", "score": "0.5826634", "text": "def repeater(index):\n global __last_commands__\n try:\n call_data = __last_commands__[index]\n return call_data[0](*call_data[1], **call_data[2])\n except IndexError:\n return None", "title": "" }, { "docid": "22f827aaf08b84c12f86009e15d22289", "score": "0.58199567", "text": "def _get_column_by_idx(self, idx):\n if idx is None or idx < 0 or idx >= len(self._list_columns):\n return None\n\n return self._list_columns[idx]", "title": "" }, { "docid": "7e40807984fbe9c5d6ff7e55db25c73f", "score": "0.5816621", "text": "def get(self, index):\n return self._apply(weldstr.get, index)", "title": "" }, { "docid": "8fea397ae6434ae256b238f3c5e09cb5", "score": "0.58132523", "text": "def get_first_arg_index(mat: torch.Tensor, label: int) -> int:\n return int(torch.where(mat == label)[0][0])", "title": "" }, { "docid": "df854bfa593a2aa602471f6ca814b347", "score": "0.58001286", "text": "def if_(*args):\n assert len(args) >= 2\n for i in range(0, len(args) - 1, 2):\n if args[i]:\n return args[i + 1]\n if len(args) % 2:\n return args[-1]\n else:\n return None", "title": "" }, { "docid": "724494bb8646f4b2a913799a9795863d", "score": "0.57978064", "text": "def index0(x: iter):\n return x[0]", "title": "" }, { "docid": "e90f21127f523747f71ceab99323c1d4", "score": "0.5794962", "text": "def value_at(head: Optional[Node], index: int) -> Optional[int]:\n if head is None:\n return None\n else:\n if index == 0:\n return head.data\n else:\n return value_at(head.next, index - 1)", "title": "" }, { "docid": "ab5d54d38fdce1504b91289af9e03187", "score": "0.5792284", "text": "def get(self, index: int) -> int:\n if index < 0 or index > len(self.deque) - 1:\n return -1\n return self.deque[index].val", "title": "" }, { "docid": "6a4ffcb41eecb15bf394c7127b86288f", "score": "0.5788428", "text": "def __getitem__(self, index):\n return self._array[index]", "title": "" }, { "docid": "e9d1fd47c110ac875abf388ca1dfe205", "score": "0.5785478", "text": "def get(self, idx):\n try:\n retval = self.msg[idx]\n except:\n retval = False\n print \"Index out of range\"\n return retval", "title": "" }, { "docid": "2c991e02e1a0f19dabf1c84fb6b8c6f7", "score": "0.57845616", "text": "def index(IndexAble, i):\n return IndexAble[i-1]", "title": "" }, { "docid": "2e4c18be7bf742623cd69048f2d7442d", "score": "0.57834125", "text": "def __getitem__(self, index):\n return self.seqlist[index]", "title": "" }, { "docid": "834cbb4c39df1f04a6132dfcfab75e3a", "score": "0.57772624", "text": "def pop(self, index: int = None):\n if None:\n return super().pop(randint(0, len(self)-1))\n else:\n return super().pop(-1)", "title": "" }, { "docid": "d5fc9b6e55b2bc4bbd56d4da3be9dff4", "score": "0.5775966", "text": "def index1(x: iter):\n return x[1]", "title": "" }, { "docid": "f9bdd80fa8654abed8fc9e97823679ba", "score": "0.57716995", "text": "def __getitem__(self, index):\n return self.getPixel(index[0], index[1])", "title": "" }, { "docid": "9545ddc2a15e4beef69c9580414b152c", "score": "0.57640105", "text": "def findElementByIndex(self, tag, indexNum, filter=None, elementList=None): \r\n try:\r\n myElements = self.getElementsList(tag, filter=None, elementList=None)\r\n return myElements[indexNum]\r\n except:\r\n (ErrorType,ErrorValue,ErrorTB)=sys.exc_info()\r\n print (sys.exc_info())\r\n traceback.print_exc(ErrorTB)\r\n return None\r\n else:\r\n return None", "title": "" }, { "docid": "d5c71b2a4b6377e20777ceebc433a0ab", "score": "0.57626706", "text": "def __getitem__(self,idx):\n try:\n return self.parent[self.full_index(idx)]\n except Exception as ex:\n return None", "title": "" }, { "docid": "ab3d001b980400a4743f3efbbe0b75ab", "score": "0.57622576", "text": "def _arg_index_of(func, name):\n argspec = inspect.getargspec(func)\n for i in range(len(argspec[0])):\n if (argspec[0][i] == name):\n logger.debug(\"argspec[0][{0}]=={1}\".format(i, name))\n return i\n return -1", "title": "" }, { "docid": "6cbf4a60e8711fb17966ca6fb7b48248", "score": "0.5760603", "text": "def get_chosen_item (index_label, item_index, items):\n\n for item in items: \n if item.get(\"{0}\".format(index_label)) == item_index:\n return item", "title": "" }, { "docid": "fe528b1114620a01af7bc390f00755da", "score": "0.57564306", "text": "def __getitem__(self, i):\n return self.get(i, i + 1)", "title": "" }, { "docid": "791da9c2b7dcfe394fcc4669528f8e62", "score": "0.57486445", "text": "def __getitem__(self, index):\r\n pointer = self._getnode(index)\r\n \r\n if pointer is not None:\r\n return pointer.data\r\n \r\n raise IndexError('list index out of range')", "title": "" }, { "docid": "d335a4f3eaa17dba82ef848cc9d019f4", "score": "0.5748025", "text": "def getindex(arr, val, default=-1):\n return arr.index(val) if val in arr else default", "title": "" } ]
b5d32bc7d4481a83810dc500fab61f8f
Render the output into the proper format.
[ { "docid": "75e6d2cf5d65171f1b0fe0b14321f787", "score": "0.0", "text": "def process_result(results, output_format, **kwargs):\n module_name = 'monitorstack.common.formatters'\n method_name = 'write_{}'.format(output_format.replace('-', '_'))\n output_formatter = getattr(\n importlib.import_module(module_name),\n method_name\n )\n\n # Force the output formatter into a list\n if not isinstance(results, list): # pragma: no cover\n results = [results]\n\n exit_code = 0\n for result in results:\n output_formatter(result)\n if result['exit_code'] != 0:\n exit_code = result['exit_code']\n else:\n sys.exit(exit_code)", "title": "" } ]
[ { "docid": "c25af8348f8522a3cff527bba3ff3fa4", "score": "0.72547436", "text": "def render():", "title": "" }, { "docid": "5ef62b7811a31f1f104641cb0374764d", "score": "0.68567944", "text": "def render(self):\n pass", "title": "" }, { "docid": "5ef62b7811a31f1f104641cb0374764d", "score": "0.68567944", "text": "def render(self):\n pass", "title": "" }, { "docid": "5ef62b7811a31f1f104641cb0374764d", "score": "0.68567944", "text": "def render(self):\n pass", "title": "" }, { "docid": "036a62818d09c68a1dce4859084ca84e", "score": "0.6747799", "text": "def render(self, mode='human'):", "title": "" }, { "docid": "036a62818d09c68a1dce4859084ca84e", "score": "0.6747799", "text": "def render(self, mode='human'):", "title": "" }, { "docid": "13cc7edd25602ffedbb108324727c66b", "score": "0.67007977", "text": "def render(self):\n raise NotImplementedError", "title": "" }, { "docid": "7160846a797db8b8dcf24eb398730e74", "score": "0.66916245", "text": "def render(self, mode='human'):\n pass", "title": "" }, { "docid": "50eadfa6e28b8bf7db05aa517d4d17be", "score": "0.665088", "text": "def output(self):\n pass", "title": "" }, { "docid": "50eadfa6e28b8bf7db05aa517d4d17be", "score": "0.665088", "text": "def output(self):\n pass", "title": "" }, { "docid": "68665f3266468c7b5116c79198fcea34", "score": "0.66461754", "text": "def __call__(self):\n return self.render()", "title": "" }, { "docid": "d0e0bbc206780765c61c831b90bd7e2b", "score": "0.66406584", "text": "def output(self):\r\n pass", "title": "" }, { "docid": "d0e0bbc206780765c61c831b90bd7e2b", "score": "0.66406584", "text": "def output(self):\r\n pass", "title": "" }, { "docid": "185ccf2e06b1c997441b93929b9096a2", "score": "0.6615272", "text": "def render(self):\n return self.renderer.render(self.parsed, self.keys)", "title": "" }, { "docid": "bda54c533cf9c2250b0bfa8c0ab0cfa8", "score": "0.658594", "text": "def _format_output(self, output):\n return list(self.models.values())[0]._format_output(output)", "title": "" }, { "docid": "e50f3cd1773f8786063a5f652e5b5684", "score": "0.6580442", "text": "def __call__(self):\r\n return self.render()", "title": "" }, { "docid": "1089f58ca7306e6ddf5fc041b2002945", "score": "0.6530993", "text": "def render(self, console):\n pass", "title": "" }, { "docid": "0455532573c58b6b53811f7a1066cf0a", "score": "0.65225536", "text": "def render(self, output):\n self._table._output = output\n\n return self._table.render()", "title": "" }, { "docid": "c29fa1bc9a701c00ef8f856e6905fa7f", "score": "0.65222025", "text": "def render(self):\n raise NotImplementedError()", "title": "" }, { "docid": "e725fa10e7516459fe20836a56dfc3c3", "score": "0.6486291", "text": "def write_output(self) -> None:", "title": "" }, { "docid": "2e23bd16a286b993c3dfdd6294763054", "score": "0.6429051", "text": "def render(self, context, output_format):\n\n tpl = self.templates.get(output_format, None)\n if tpl is None:\n raise Exception('cannot found {0} template, please check.'.format(output_format))\n content = tpl.render(context)\n\n if output_format == 'FHIR-XML':\n node_str_arr = ['<?xml version=\"1.0\" encoding=\"UTF-8\"?>', ]\n process_node(node_str_arr, json.loads(content), 0)\n content = '\\n'.join(node_str_arr)\n elif output_format == 'FHIR-JSON':\n content = json.dumps(json.loads(content), indent=2)\n return content", "title": "" }, { "docid": "6d1824ff861efc1ea381d954e409cfb9", "score": "0.6400087", "text": "def render(self):\n result = self.execute()\n\n if result is None or len(result) == 0:\n print(self.color_text(self.ICON, self.ICON_COLOR))\n return\n\n text = ''\n if type(result) is list:\n text = ' '.join(result)\n else:\n text = result\n\n print('{icon} {text}'.format(\n icon=self.color_text(self.ICON, self.ICON_COLOR),\n text=text,\n ))", "title": "" }, { "docid": "b8cd5a0e99dbd96044367211044c6c88", "score": "0.63968295", "text": "def _render(self, mode='human', close=False):\n s = \"state: {:2d} reward: {:2d} info: {}\"\n print(s.format(self.state, self.reward, self.info))", "title": "" }, { "docid": "dd7f8321163c1db6a1f9cfe125a073b4", "score": "0.63952184", "text": "def render():\n\tglobal hol, tolapi\n\n\t# We walk the curr_render dictionary, and render each entry to the buffer\n\t#\n\tfor a_shape in curr_render:\n\t\t((shape_data, colorval, user), timestamp) = curr_render[a_shape]\n\n\t\t# Now render the colors onto those holidays\n\t\trgb = makeRGB(colorval)\n\t\tfor string_number in shape_data[1]:\n\t\t\tlogging.debug(\"Writing to string %d\" % string_number)\n\t\t\thol.fill(string_number, rgb[0], rgb[1], rgb[2])\n\n\t# Now that we have everything, render to a byte array and send it out\n\tpkt = hol.render()\n\ttolapi.transmit(pkt)\n\treturn", "title": "" }, { "docid": "e0411b41171c2d4a1480453a197c75f4", "score": "0.638653", "text": "def render(self, **args):\r\n stdout = []\r\n self.execute(stdout, **args)\r\n return stdout", "title": "" }, { "docid": "c5a33d99ac5053583119f3d6ccb0c5e6", "score": "0.63787407", "text": "def render(self, compress=False):\n raise NotImplementedError", "title": "" }, { "docid": "f0015b73c5577161cdb7f970b87e66b7", "score": "0.63461816", "text": "def render(self, file_out, cur_ind=\"\"):\n pass", "title": "" }, { "docid": "512d18ff26f9bf8bf03cc6bd791d3b5f", "score": "0.6337742", "text": "def render(self, data):\n pass", "title": "" }, { "docid": "d58836b3cdb2923fc87f1196eb2fff0b", "score": "0.63364995", "text": "def format(self, result, ostream):\n raise NotImplementedError", "title": "" }, { "docid": "2a019f959ac0c34ec16cb67abf55b784", "score": "0.6316891", "text": "def render(self):\n unique_words, vectors200 = self.run_word2vec()\n self.create_dirs()\n\n print \"Writing data to \" + \"\\033[1m\" + self.dst_v200 + \"\\033[0m\"\n with open(self.dst_v200, 'w+') as f_v200:\n f_v200.write(str(len(unique_words)) + \" \" + \"200\" + \"\\n\")\n for word, vector in zip(unique_words, vectors200):\n f_v200.write( word + \" \")\n for element in vector:\n f_v200.write(str(element) + ' ')\n f_v200.write(\"\\n\")\n\n print \"-\"*80", "title": "" }, { "docid": "d7dd163e501842642e79f40085ff5292", "score": "0.6264013", "text": "def _render_output_html(self):\n enl = self.getNewsletter()\n # get out_template from ENL object and render it in context of issue\n out_template_pt_field = enl.getField('out_template_pt')\n ObjectField.set(out_template_pt_field, self, ZopePageTemplate(\n out_template_pt_field.getName(),\n enl.getRawOut_template_pt()))\n output_html = safe_portal_encoding(self.out_template_pt.pt_render())\n output_html = compactify(output_html, filter_tags=False)\n return output_html", "title": "" }, { "docid": "d1ea0e0a27ac2d8ac24d52f8c332ee61", "score": "0.6255284", "text": "def render(self):\n self.__exit__(None, None, None)", "title": "" }, { "docid": "d1ea0e0a27ac2d8ac24d52f8c332ee61", "score": "0.6255284", "text": "def render(self):\n self.__exit__(None, None, None)", "title": "" }, { "docid": "cd406e35d4f2c7480ec9818c4e5bc69b", "score": "0.6248067", "text": "def render(self, *_args):\n task_chart.render(self.entries)\n entry_table.render(self.entries)\n if self.tracking:\n timer_display.render(time_to_render=self.time_delta)\n else:\n timer_display.render(time_to_render=datetime.datetime.now())", "title": "" }, { "docid": "38f50c927d0ace9b452886cc48ca7dcf", "score": "0.62261516", "text": "def output(self):\n if not os.path.exists(\"output\"):\n os.makedirs(\"output\")\n if self.method == \"embed\":\n self.output_embedded_image()\n else:\n self.output_decoded_text()", "title": "" }, { "docid": "b411578a9020ebde943f2fc5aff58dd0", "score": "0.62246966", "text": "def output_results(self):\n self.__count += 1 # new line of output\n # 1. retrieve and store the values from model variables\n if self.bdailyrun:\n self.output_dailyrun(False)\n else:\n self.output_hourlyrun(False)\n # 2. output daily results:\n output = self.__str[0].format(*self.out.values())\n self.fout.write(output + '\\n')\n # 3. output any auxiliaries:\n self.output_auxiliary()\n # 4. show progress of run\n if self.bdailyrun:\n self.progbar.update(self.__count)", "title": "" }, { "docid": "a1290fda2f95a69769ca82550c588ec5", "score": "0.6217824", "text": "def render_impl(self):\n raise NotImplementedError", "title": "" }, { "docid": "4ca4e941d7db196dd76e9f34909df899", "score": "0.6211879", "text": "def format(self, content):\n\n if self.which == \"xml\":\n content = self.renderXml(content).encode(\"utf-8\")\n elif self.which == \"johnxml\":\n content = self.renderJohnXml(content).encode(\"utf-8\")\n elif self.which == \"oldxml\":\n content = self.renderOldXml(content).encode(\"utf-8\")\n\n return content", "title": "" }, { "docid": "f47f587f77375e27ab1c568a2e25503c", "score": "0.61825514", "text": "def output(self):\n\t\tself.normalize()\n\t\tself.order()\n\t\tdata = [self.fields]\n\t\tfor instance in self.normalized_instances:\n\t\t\trow = [instance[key] for key in self.fields] \n\t\t\tdata.append(row)\n\t\ttable = AsciiTable(data)\n\t\tprint table.table", "title": "" }, { "docid": "f66da46e58302d8fd009441bb09fc187", "score": "0.617808", "text": "def _render_buffer(self, buffer: Iterable[Segment]) -> str:\n output: List[str] = []\n append = output.append\n color_system = self._color_system\n legacy_windows = self.legacy_windows\n not_terminal = not self.is_terminal\n if self.no_color and color_system:\n buffer = Segment.remove_color(buffer)\n for text, style, control in buffer:\n if style:\n append(\n style.render(\n text,\n color_system=color_system,\n legacy_windows=legacy_windows,\n mxp=self._mxp,\n pueblo=self._pueblo,\n links=self._links,\n )\n )\n elif not (not_terminal and control):\n append(text)\n\n rendered = \"\".join(output)\n return rendered", "title": "" }, { "docid": "31ad551df59d785b35ee251c249fe628", "score": "0.61696064", "text": "def display(self):\r\n if self.formatter and self.verbose > 0:\r\n res = self.results\r\n if res:\r\n print >> self.out, \"\"\r\n for a in self.formatter(res, map(lambda x: x[0], self.headers)):\r\n print >> self.out, a\r\n print >> self.out, \"\"", "title": "" }, { "docid": "57303d5799caa1495f64a01a8d699382", "score": "0.61434746", "text": "def render(self, output='str'):\n if output not in ['str', 'list']:\n raise ValueError('output should be list or str')\n\n rendered_rows = []\n\n horizontal_splitter = self.renderHorizontalSplitter(self.column_widths)\n\n # step 1\n rendered_rows.append(horizontal_splitter)\n\n # step 2\n rendered_rows.append(self.renderRow(0))\n\n # step 3\n rendered_rows.append(horizontal_splitter)\n\n if self.row_count > 1:\n # step 4\n for i in range(self.row_count)[1:]:\n rendered_rows.append(self.renderRow(i))\n\n # step 5\n rendered_rows.append(horizontal_splitter)\n\n if output == 'str':\n return '\\n'.join(rendered_rows)\n else:\n return rendered_rows", "title": "" }, { "docid": "b511d0392bd4e8721d999bda807b6d29", "score": "0.6139907", "text": "def index(self):\n rstfile, htmlfile, newfile, mld, rstdata, settings = self.state.index()\n return format_output(rstfile, htmlfile, newfile, mld, rstdata, settings, self.state)", "title": "" }, { "docid": "e4831c7b42a45fac61950c7288be6917", "score": "0.6117997", "text": "def render(self):\n r = TemplateIO(html=True)\n r += self._render_start()\n r += self._render_body()\n r += self._render_finish()\n return r.getvalue()", "title": "" }, { "docid": "fdb2e3c7e635d1117b4dd01db9793800", "score": "0.6100429", "text": "def render(self, **args):\r\n raise NotImplementedError", "title": "" }, { "docid": "57b797be06a43b27c3977f85a057130c", "score": "0.60978997", "text": "def render(self):\n self.env.render()", "title": "" }, { "docid": "57b797be06a43b27c3977f85a057130c", "score": "0.60978997", "text": "def render(self):\n self.env.render()", "title": "" }, { "docid": "57b797be06a43b27c3977f85a057130c", "score": "0.60978997", "text": "def render(self):\n self.env.render()", "title": "" }, { "docid": "d43cd77a5b73e1e0ef5b61542ce8da43", "score": "0.6079293", "text": "def render(self):\r\n return None", "title": "" }, { "docid": "4f13788e1c98156df52d5acdc5f64e9c", "score": "0.6063218", "text": "def render(self, file_out, cur_ind=\"\"):\n file_out.write('{}<{}>'.format(cur_ind, self.tag))\n for con in self.content:\n file_out.write(con)\n file_out.write('</{}>\\n'.format(self.tag))", "title": "" }, { "docid": "05302b49ed835f48484ed90db64fe5d1", "score": "0.6054268", "text": "def render(self, mode='human', close=False):\n pass", "title": "" }, { "docid": "d229771c00219a4742952bcbec52d33a", "score": "0.60503936", "text": "def render(self, format_string=None):\n return \"\"", "title": "" }, { "docid": "4962f8a85557efa8546080f9e55af4fa", "score": "0.6050227", "text": "def render(self, *args) -> \"void\":\n return _coin.SoRenderManager_render(self, *args)", "title": "" }, { "docid": "35a2771fad4ef7fd79ac6eedd55910c7", "score": "0.6047248", "text": "def process_output(self, ctx):", "title": "" }, { "docid": "f1c8aa5a192c0e2e037fafb17c9d9996", "score": "0.6034899", "text": "def render(self):\n chart_dir = self.config['chart_dir']\n if not os.path.exists(chart_dir):\n os.mkdir(chart_dir)\n\n template = self.env.get_template(\"chart.html\")\n formatted_rows = []\n for row in self.chart['data_rows']:\n formatted_rows.append(self._format_row(row))\n self.chart['data_rows'] = formatted_rows\n context = {\n 'chart': self.chart,\n 'base_currency': self.config['base_currency'],\n }\n html = template.render(context)\n logging.debug(\"html=\\n{}\".format(html))\n\n filepath = self._get_filepath(chart_dir)\n logging.debug(\"filepath={}\".format(filepath))\n\n open(filepath, \"w\").write(html)\n logging.info(\"{} generated\".format(filepath))\n\n return 0", "title": "" }, { "docid": "d25d4afbd359be10bed29ed7699895b9", "score": "0.6031096", "text": "def render(self):\n return self.child().render()", "title": "" }, { "docid": "881340f3ed1ba6ca240a080a3dc774b2", "score": "0.6025321", "text": "def createOutput(self):\n from rmgpy.chemkin import save_html_file\n if self.foreign:\n # Chemkin file was not from RMG, do not parse the comments when visualizing the file.\n save_html_file(self.path, read_comments=False)\n else:\n save_html_file(self.path)", "title": "" }, { "docid": "9d8a1f24dd772af88e4960919a0f86cc", "score": "0.6016343", "text": "def report(self):\r\n global HTML_START, HTML_END\r\n \r\n html_report = self.process_results()\r\n return HTML_START + html_report + HTML_END", "title": "" }, { "docid": "3ab61f4887eca30018ff6d1b843d8623", "score": "0.6013244", "text": "def render(self, file_out, cur_ind=''):\n file_out.write(f'{cur_ind}<{self.tag}{self.fmt_attrs()}>\\n')\n\n for item in self.content:\n if hasattr(item, 'render'):\n item.render(file_out, cur_ind + self.indent)\n else:\n file_out.write(cur_ind + self.indent + str(item) + '\\n')\n\n file_out.write(cur_ind + f'</{self.tag}>\\n')", "title": "" }, { "docid": "1d48bad5f8276b69b2e3b753789e60c0", "score": "0.6011203", "text": "def output(self):\n self.logic()\n return self.output", "title": "" }, { "docid": "cc2a40dc10538dcc17463b4fe13d717d", "score": "0.60079914", "text": "def render(self, file_out, cur_ind=\"\"):\n file_out.write(\"{}<{}>\\n\".format(cur_ind, self.tag))\n for con in self.content:\n if hasattr(con, 'render'):\n con.render(file_out, (cur_ind + self.indent))\n else:\n file_out.write((cur_ind + self.indent) + con + '\\n')\n file_out.write(\"{}</{}>\".format(cur_ind, self.tag))", "title": "" }, { "docid": "f160f693600f614972b1bb7e81b33af7", "score": "0.60046965", "text": "def _render_report(self, file, output=None):\n if output is None:\n report = self._reports.get(file)\n if report is not None:\n output = report.render_dir\n if output is None:\n name = osp.splitext(osp.basename(file))[0]\n id_ = to_text_string(uuid.uuid4())\n output = osp.join(REPORTS_TEMPDIR, id_, '{}.html'.format(name))\n self._reports[file].render_dir = output\n\n folder = osp.split(output)[0]\n self.check_create_tmp_dir(folder)\n doc = Pweb(file, output=output)\n\n # TODO Add more formats support\n if doc.file_ext == '.mdw':\n _format = 'md2html'\n elif doc.file_ext == '.md':\n _format = 'pandoc2html'\n else:\n raise Exception(\"Format not supported ({})\".format(doc.file_ext))\n\n f = CaptureStdOutput(self.sig_render_progress)\n\n if pweave_version.startswith('0.3'):\n with redirect_stdout(f):\n self.sig_render_progress.emit(\"Readign\")\n doc.read()\n self.sig_render_progress.emit(\"Running\")\n doc.run()\n self.sig_render_progress.emit(\"Formating\")\n doc.format(doctype=_format)\n self.sig_render_progress.emit(\"Writing\")\n doc.write()\n return doc.sink\n else:\n with redirect_stdout(f):\n doc.setformat(_format)\n doc.detect_reader()\n doc.parse()\n doc.run()\n doc.format()\n doc.write()\n return doc.sink", "title": "" }, { "docid": "5ac8d42eeefc2dc377aceb7411c5a65f", "score": "0.6003891", "text": "def render(self) -> None:\n\n # By default, do nothing.\n pass", "title": "" }, { "docid": "5a66fbc225be37fc1873f65e7610eddd", "score": "0.600155", "text": "def show_results(self):\n self.render(\"testreport.html\")", "title": "" }, { "docid": "9ee43ff18afc4071a571a1fac5e7e536", "score": "0.5997465", "text": "def render(self, file_out, cur_ind=''):\n file_out.write(f'{cur_ind}<{self.tag}{self.fmt_attrs()}> ')\n\n for item in self.content:\n if hasattr(item, 'render'):\n item.render(file_out, cur_ind + self.indent)\n else:\n file_out.write(str(item) + ' ')\n\n file_out.write(f'</{self.tag}>\\n')", "title": "" }, { "docid": "f8b6049c2874e62088a19871469e96d8", "score": "0.5989644", "text": "def render(self, context):\n pass", "title": "" }, { "docid": "0ea8db7f61e65e5051cb98e9558bb807", "score": "0.597957", "text": "def createOutput(self):\n import rmgpy.tools.diffmodels as diff_models\n\n kwargs = {\n 'web': True,\n 'wd': self.path,\n }\n diff_models.execute(\n self.chemkin1, self.dict1, None,\n self.chemkin2, self.dict2, None,\n **kwargs\n )", "title": "" }, { "docid": "05f6f1656e5333ba20eccfcd0ff6f556", "score": "0.597521", "text": "def write_rendered_file(self, filename):\n try:\n with open(filename, 'w') as out_file:\n for item in self.rendered_:\n out_file.write(item + '\\n')\n # todo could this addition be removed?\n # if self.rendered_:\n # out_file.write('\\n')\n\n except IOError:\n self.log_error(\"Prb while opening output file {}\".format(filename))\n return False\n return True", "title": "" }, { "docid": "fcd09ebf86862c43686adb0a13249e84", "score": "0.59751207", "text": "def render(self, file_out, cur_ind=''):\n self.g_render(file_out, cur_ind,\n cur_ind + '<' + self.tag,\n '>\\n',\n cur_ind + '</' + self.tag + '>')", "title": "" }, { "docid": "0f1a68e49d8d0846f223e13a8132dc70", "score": "0.5967545", "text": "def _render_frame(self):\r\n\r\n if not self.enabled:\r\n # in case we're disabled or stream is closed while still rendering,\r\n # we render the frame and increment the frame index, so the proper\r\n # frame is rendered if we're reenabled or the stream opens again.\r\n return\r\n\r\n self.clear()\r\n frame = self.frame()\r\n # output = '\\r{}'.format(frame)\r\n output = frame\r\n\r\n try:\r\n self._write(output)\r\n except UnicodeEncodeError:\r\n self._write(encode_utf_8_text(output))", "title": "" }, { "docid": "19cfccc20b1eeedd1d4f80aa7376821f", "score": "0.59409595", "text": "def render(self, **kwargs):\n pass", "title": "" }, { "docid": "a929b00f6a0faa3d6eee8b6d87dee6e1", "score": "0.59365594", "text": "def render(self) -> None:\n self.env.render()", "title": "" }, { "docid": "9d959b8e52471f5575ea56be687b0cb1", "score": "0.5936295", "text": "def _render(self, indices, mode=\"human\"):\n raise NotImplementedError", "title": "" }, { "docid": "dbabf0233b095893b8f713bb6e1357d0", "score": "0.59228677", "text": "def _output_to_console(self):\n output_text = self._get_output()\n\n if self.settings.get_bool('options/format_text', True):\n use_unicode = self.settings.get_bool('options/use_unicode', True)\n output_text = _format_output(output_text, self._file, use_unicode)\n\n if self.settings.get_bool('options/clear_output', True):\n self._clear_list()\n self.output_widget.setPlainText(output_text)\n else:\n self._append_output(output_text)\n self.output_widget.setPlainText(''.join(self.lines))\n return\n\n self._clear_list()", "title": "" }, { "docid": "a8590ecb42412b8cbe65ab95026c0045", "score": "0.5881268", "text": "def pydantic_output(output_data: Any) -> None:\n\n OutputUI(output_data).render_ui()", "title": "" }, { "docid": "11709e1f71e9727a58c897f81b4ce89c", "score": "0.5876766", "text": "def _stringify_result(self):\r\n \r\n \r\n # Empty outputs. False is probably a good output value \r\n if self._result != False and not self._result:\r\n self._output = ''\r\n # List outputs.\r\n elif isinstance(self._result, ListType):\r\n \r\n if len(self._result) > 0:\r\n \r\n columns_num = 1\r\n if isinstance(self._result[0], ListType):\r\n columns_num = len(self._result[0])\r\n \r\n table = PrettyTable(['']*(columns_num))\r\n table.align = 'l'\r\n table.header = False\r\n \r\n for row in self._result:\r\n if isinstance(row, ListType):\r\n table.add_row(row)\r\n else:\r\n table.add_row([ row ])\r\n \r\n self._output = table.get_string()\r\n \r\n # Dict outputs are display as tables\r\n elif isinstance(self._result, DictType) and self._result:\r\n\r\n # Populate the rows\r\n randomitem = next(self._result.itervalues())\r\n if isinstance(randomitem, ListType):\r\n table = PrettyTable(['']*(len(randomitem)+1))\r\n table.align = 'l'\r\n table.header = False\r\n \r\n for field in self._result:\r\n table.add_row([field] + self._result[field])\r\n \r\n else:\r\n table = PrettyTable(['']*2)\r\n table.align = 'l'\r\n table.header = False\r\n \r\n for field in self._result:\r\n table.add_row([field, str(self._result[field])])\r\n \r\n\r\n self._output = table.get_string()\r\n # Else, try to stringify\r\n else:\r\n self._output = str(self._result)", "title": "" }, { "docid": "2c3a625f975f27d84302bbb84840638b", "score": "0.5871555", "text": "def pipe(self):\n print(self.name)\n print(str(self.scale))\n\n for i in self.trans:\n for j in i:\n print('\\t{:<1.10f}'.format(j), end = \"\")\n print()\n for atom in self.atoms:\n print(\" {:>2}\".format(atom), end=\"\")\n\n print()\n\n for num in self.nums:\n print(\" {:>2d}\".format(num), end=\"\")\n\n print()\n if self.selective:\n print(\"Selective dynamics\")\n print(self.type)\n if self.selective:\n fix = [['T' if truth else 'F' for truth in line] for line in self.fix]\n for i, coord in enumerate(self.coords):\n print(f\" {coord[0]:>12.9f} {coord[1]:>12.9f} {coord[2]:>12.9f} {' '.join(map(str,fix[i]))}\")\n else:\n for coord in self.coords:\n print(f\" {coord[0]:>12.9f} {coord[1]:>12.9f} {coord[2]:>12.9f}\")", "title": "" }, { "docid": "2c206a62f868fc68815db5e0b5c2e374", "score": "0.5871205", "text": "def print(self):\n pipeline_name = self.pipeline_context.pipeline_name\n main_panel_title = Text(f\"{pipeline_name.upper()} - {self.name}\")\n main_panel_title.stylize(Style(color=\"blue\", bold=True))\n duration_subtitle = Text(f\"⏲️ Total pipeline duration for {pipeline_name}: {format_duration(self.run_duration)}\")\n step_results_table = Table(title=\"Steps results\")\n step_results_table.add_column(\"Step\")\n step_results_table.add_column(\"Result\")\n step_results_table.add_column(\"Finished after\")\n\n for step_result in self.steps_results:\n step = Text(step_result.step.title)\n step.stylize(step_result.status.get_rich_style())\n result = Text(step_result.status.value)\n result.stylize(step_result.status.get_rich_style())\n\n if step_result.status is StepStatus.SKIPPED:\n step_results_table.add_row(step, result, \"N/A\")\n else:\n run_time = format_duration((step_result.created_at - step_result.step.started_at))\n step_results_table.add_row(step, result, run_time)\n\n to_render = [step_results_table]\n if self.failed_steps:\n sub_panels = []\n for failed_step in self.failed_steps:\n errors = Text(failed_step.stderr)\n panel_title = Text(f\"{pipeline_name} {failed_step.step.title.lower()} failures\")\n panel_title.stylize(Style(color=\"red\", bold=True))\n sub_panel = Panel(errors, title=panel_title)\n sub_panels.append(sub_panel)\n failures_group = Group(*sub_panels)\n to_render.append(failures_group)\n\n if self.pipeline_context.dagger_cloud_url:\n self.pipeline_context.logger.info(f\"🔗 View runs for commit in Dagger Cloud: {self.pipeline_context.dagger_cloud_url}\")\n\n main_panel = Panel(Group(*to_render), title=main_panel_title, subtitle=duration_subtitle)\n console.print(main_panel)", "title": "" }, { "docid": "5adc97ec59b2784c95445cee4a1e198c", "score": "0.58473814", "text": "def __str__(self):\n rv = self.render()\n if rv is None:\n return ''\n return rv", "title": "" }, { "docid": "ee20d2605cd6f1372a52f163505cd0a4", "score": "0.5847312", "text": "def _init_output(self):\n # dictionary keys are the parameters names and used as headers\n # model parameters will appear in the output file in the order they are defined here\n self.__count = 0 # reset counter\n self.close_files() # close all previous files if they are opened\n self.fout = open(self.fname_out, 'wt') # file handle for daily model output\n if self.fname_aux is not None:\n self.faux = open(self.fname_aux, 'wt') # file handle for auxiliary model output\n # prepare the output list:\n if self.bdailyrun:\n self.output_dailyrun(True)\n else:\n self.output_hourlyrun(True)\n # now format the output:\n nspan = self.__nspan\n n = 0\n self.__str = ['', '']\n for v in self.out.values():\n if n > 0:\n self.__str[0] += ','\n self.__str[1] += ','\n if isinstance(v, float):\n self.__str[0] += '{:>' + str(nspan) + '.5f}' # 3 decimal places\n else:\n self.__str[0] += '{:>' + str(nspan) + '}'\n self.__str[1] += '{:>' + str(nspan) + '}'\n n += 1", "title": "" }, { "docid": "f3d92c720d226c2d4ad3fcfb2c0dbbe0", "score": "0.5844782", "text": "def to_stdout(self):\n my_template = self.load_template_from_file()\n return my_template.render(self.data)", "title": "" }, { "docid": "b244d499ec717703a1cd9007758dc63f", "score": "0.5843539", "text": "def print_output(self, fileName, output):\n result = sorted(output, key=lambda o: o.cycles[STAGES[0]])\n result[len(result) - 1].cycles[STAGES[1]] = 0\n\n string = 'instruction'.ljust(INSTRUCTION_LEFT_JUSTIFY)\n for stage in STAGES:\n string += '\\t' + stage + '\\t'\n for hazard in HAZARDS:\n string += '\\t' + hazard + '\\t'\n string += '\\n'\n\n for i in range(len(result)):\n string += str(result[i]) + '\\n'\n\n string += '\\nTotal number of requests to instruction cache ' + str(Instruction_Cache.requests)\n string += '\\nTotal number of instruction cache hit ' + str(Instruction_Cache.hits)\n string += '\\nTotal number of requests to data cache ' + str(Data_Cache.requests)\n string += '\\nTotal number of data cache hit ' + str(Data_Cache.hits)\n\n writeString(fileName, string)\n #print string", "title": "" }, { "docid": "0625acc204f3cbeb3ef8942e4c6460a7", "score": "0.5836006", "text": "def format_output_data(data):\n return data", "title": "" }, { "docid": "334bc2d745cfc133c89a6a038ea76e3b", "score": "0.5833567", "text": "def render_content(self, inpath, outpath):\n logging.warn(\"render_content not defined for base class\")", "title": "" }, { "docid": "8a3a2173df8611fb872f641d781a062e", "score": "0.58323866", "text": "def html(self):\n pass", "title": "" }, { "docid": "8116502d410a44d15e1c1c97a76b7ecb", "score": "0.58204156", "text": "def output(self, run_settings):\n pass", "title": "" }, { "docid": "1caaa6b173c7dea1e5e62677992fda36", "score": "0.5819392", "text": "def output(self, attrs,header):\n\t\tpass", "title": "" }, { "docid": "a3dcc85075b2b899d96efdcac3a57cf4", "score": "0.5817025", "text": "def process_results(self):\r\n _html = \" \"\r\n for result_type, results_list in self._run_results.items():\r\n for result_list in results_list:\r\n _li = BLANK_STR\r\n for result in result_list:\r\n _list = []\r\n _string = BLANK_STR\r\n if isinstance(result, dict) or isinstance(result, list):\r\n _list = list(flatten_iterable(result))\r\n _string = concatenate_list_to_string(_list)\r\n elif (isinstance(result, text_type) and result != BLANK_STR):\r\n _string = result\r\n else:\r\n _string = enquote(u(\"API returns empty result for this type of call.\"))\r\n \r\n _li += _string\r\n _html += u(\"<li class='\") + result_type + u(\"'>\") + _li + u(\"</li>\")\r\n return _html", "title": "" }, { "docid": "158fb444143869278894f8f2719e6f93", "score": "0.58095896", "text": "def get_output(self):\n pass", "title": "" }, { "docid": "158fb444143869278894f8f2719e6f93", "score": "0.58095896", "text": "def get_output(self):\n pass", "title": "" }, { "docid": "158fb444143869278894f8f2719e6f93", "score": "0.58095896", "text": "def get_output(self):\n pass", "title": "" }, { "docid": "3a232d86fa43b4e9e5fabd51c522953e", "score": "0.57964784", "text": "def format_output(raw_data):\n for i in range(len(raw_data)):\n print(raw_data[i][0] + ' -- ' + str(raw_data[i][1]) + ' views')", "title": "" }, { "docid": "e449adb18195960aca6c76cbe645cda3", "score": "0.5787087", "text": "def formatReport():", "title": "" }, { "docid": "cb7190e4e257f57eeb7169bc3e5658ae", "score": "0.57852864", "text": "def render(self):\n\n text = self._pre_render()\n\n if self.render_children:\n text += self._render_children()\n \n text += self._post_render()\n\n return text", "title": "" }, { "docid": "ab79ca7dda486822c5fd935c33d68c68", "score": "0.5781533", "text": "def process_output_as_str(self) -> str:\n return '\\n'.join(self.process_output_chunks)", "title": "" }, { "docid": "1517b296fe06a45bd44a9755371b4849", "score": "0.5779777", "text": "def visualize_result(self, result_dir, output_type, **kwargs):\n pass", "title": "" }, { "docid": "e766e7bc1ed54b56bafa25a96f64fa89", "score": "0.57797503", "text": "def render(self, mode='human', close=False):\n raise NotImplementedError", "title": "" }, { "docid": "6e1ecbb59dcb979013c4d6296557780c", "score": "0.577275", "text": "def format(self):\n # store line offsets in self.lines\n self.lines = [0, 0]\n pos = 0\n while 1:\n pos = string.find(self.raw, '\\n', pos) + 1\n if not pos: break\n self.lines.append(pos)\n self.lines.append(len(self.raw))\n\n # parse the source and write it\n self.pos = 0\n text = StringIO(self.raw)\n self.out.write('<pre class=\"python\">\\n')\n try:\n tokenize.tokenize(text.readline, self)\n except tokenize.TokenError, ex:\n msg = ex[0]\n line = ex[1][0]\n self.out.write(\"<h5 class='error>'ERROR: %s%s</h5>\" % (\n msg, self.raw[self.lines[line]:]))\n self.out.write('\\n</pre>\\n')", "title": "" }, { "docid": "6725b08c795c5d9197182e587a82e9a1", "score": "0.57717365", "text": "def create_output_page(model, submodel, data, input):\n html = render_to_string('01hms_output_basic_header.html', {\n 'SITE_SKIN': os.environ['SITE_SKIN'],\n 'TITLE': \"HMS \" + model,\n 'LABEL': submodel\n })\n # HMS water quality imports html\n html += render_to_string('03hms_waterquality_imports.html', {})\n\n html += render_to_string('02epa_drupal_header_bluestripe_onesidebar.html', {})\n html += render_to_string('03epa_drupal_section_title.html', {})\n\n\n # Generates html for metadata and data tables.\n \"\"\"\n Columns setup and ordering logic. Keys are checked for numerical value, which is used as the new key for the dict.\n \"\"\"\n try:\n dt_day_table = data[\"dtDay\"]\n dt_kl_table = data[\"dtKL\"]\n # -------------------------- #\n\n html += render_to_string('04hms_waterquality_output.html', {\n 'MODEL': model,\n 'SUBMODEL': submodel.title(),\n 'TITLE': \"HMS \" + submodel.replace('_', ' ').title() + \" Data\",\n 'INPUT_DATA': input,\n 'DAYTABLE': dt_day_table,\n 'KLTABLE': dt_kl_table\n })\n except Exception as ex:\n print(\"ERROR: Unable to construct output tables.\")\n return redirect('/hms/' + model + '/' + submodel + '/')\n # Generates html for links left\n html += render_to_string('07ubertext_end_drupal.html', {})\n html += links_left.ordered_list(model, submodel)\n\n html += render_to_string('09epa_drupal_ubertool_css.html', {})\n html += render_to_string('10epa_drupal_footer.html', {})\n return html", "title": "" }, { "docid": "ca246f07f181f9a43b9c7b1cfd2662c7", "score": "0.57641304", "text": "def render(self, mode='human', close=False):\n raise NotImplementedError()", "title": "" }, { "docid": "3467942833a28e98a990db4dbbe8b679", "score": "0.5760095", "text": "def render(self, formatter, obj):\n return AnsiString('')", "title": "" } ]
2d51276f3ae132f7a728c654f3f18324
Allows you to retry opening the file if theres an IOError so that 2 hour processing run isn't wasted.
[ { "docid": "55ab7254fefe31bfd1a63a5dbfd1a2be", "score": "0.630632", "text": "def retry_open(fl, *args, **kwargs):\n while True:\n try:\n return codecs.open(fl, *args, **kwargs)\n except IOError as ex:\n sys.stdout.write(\"{0} when opening {1}\\n\".format(ex, fl))\n sys.stdout.write(\"<Enter> to retry, s to skip: \")\n if 's' not in sys.stdin.readline().lower().strip():\n continue", "title": "" } ]
[ { "docid": "ea3e0d12da9b85a41269447fc1a7a0fd", "score": "0.6223054", "text": "def test_lock_file_error(self):\n with tempfile.TemporaryFile() as fp:\n fp.close()\n with pytest.raises(ValueError):\n with lock_file(fp):\n pass", "title": "" }, { "docid": "6097ec642b44eb752ebab5c10a50d567", "score": "0.6206845", "text": "def open_file(file_name,mode):\r\n try:\r\n file = open(\"assets/test_files/\"+file_name,mode)\r\n except IOError as e:\r\n print(\"unable to open the file\", file_name, \"ending program.\\n\", e)\r\n try:\r\n file = open(\"assets/errors/errors_log.txt\",\"a+\")\r\n time = datetime.now()\r\n error_time = time.strftime(\"%m/%d/%y %H:%M:%S\")\r\n file.write(str(e)+\" \"+str(error_time)+\"\\n\")\r\n input(\"\\n\\npress the enter key to exit.\")\r\n sys.exit()\r\n except:\r\n sys.exit()\r\n else:\r\n return file", "title": "" }, { "docid": "6a9de93368bb01275c1963c35cf931a1", "score": "0.601808", "text": "def try_open (*args, **kwargs):\n try:\n return io.open (*args, **kwargs)\n except IOError as e:\n if e.errno == 2:\n return None\n raise", "title": "" }, { "docid": "6a9de93368bb01275c1963c35cf931a1", "score": "0.601808", "text": "def try_open (*args, **kwargs):\n try:\n return io.open (*args, **kwargs)\n except IOError as e:\n if e.errno == 2:\n return None\n raise", "title": "" }, { "docid": "400d83528310296ee416bb55eacebc38", "score": "0.5983093", "text": "def retry_on_eintr(function, *args, **kw):\r\n while True:\r\n try:\r\n return function(*args, **kw)\r\n except IOError as e:\r\n if e.errno != errno.EINTR:\r\n raise", "title": "" }, { "docid": "7b72a925a0164903e9d8188e4c651174", "score": "0.5941306", "text": "def test_harvester_new_file_exception(self):\n # create the file so that it is unreadable\n self.create_sample_data_set_dir(\"node59p1_step1.dat\", TELEM_DIR, F_NAME,\n mode=000, copy_metadata=False)\n\n # Start sampling and watch for an exception\n self.driver.start_sampling()\n\n self.assert_exception(ValueError)\n\n # At this point the harvester thread is dead. The agent\n # exception handle should handle this case.", "title": "" }, { "docid": "b25084fc89dd7371e70dcadcb12cb06d", "score": "0.5874291", "text": "def restartFailed(self):\r\n self.core.files.restartFailed()", "title": "" }, { "docid": "466ef3d30361a21360a15542f2d1b6f0", "score": "0.58498734", "text": "def try_open_file(file_path, key):\n file = MyFile(file_path, key)\n return file", "title": "" }, { "docid": "0eb5a9e6c78a457845637a19e2bd3dfb", "score": "0.57852393", "text": "def test_exhaustTriggersEMFILE(self):\n self.addCleanup(self.exhauster.release)\n self.exhauster.exhaust()\n exception = self.assertRaises(IOError, self.openAFile)\n self.assertEqual(exception.errno, errno.EMFILE)", "title": "" }, { "docid": "3bd93335821ef64360605f5115da4888", "score": "0.5781258", "text": "def open_error_file(fn, mode):\r\n\r\n try:\r\n f = file(os.path.join(renpy.config.logdir, fn), mode)\r\n return f, fn\r\n except:\r\n pass\r\n\r\n try:\r\n f = file(fn, mode)\r\n return f, fn\r\n except:\r\n pass\r\n\r\n import tempfile\r\n\r\n fn = os.path.join(tempfile.gettempdir(), \"renpy-\" + fn)\r\n return file(fn, mode), fn", "title": "" }, { "docid": "488b47a5c9352a4f6f5828483b6ef929", "score": "0.5752766", "text": "def retry_reads(self):\n ...", "title": "" }, { "docid": "006825a2d93b52f7844cbf463bd15aa2", "score": "0.5725639", "text": "def retry_writes(self):\n ...", "title": "" }, { "docid": "f0547ce6d1256f39efff53c9f77c8696", "score": "0.57227445", "text": "def test_open_file(self):\n test_file = tempfile.mkstemp()\n f = open_file(test_file[1])\n self.assertTrue(f.readable())\n f.close()\n os.close(test_file[0])\n os.remove(test_file[1])\n\n test_file = ''\n with self.assertRaises(FileNotFoundError):\n open_file(test_file)", "title": "" }, { "docid": "8c3b36646bb7144dca970239ffe39b40", "score": "0.5707297", "text": "def exclusive_open(filename, *args, timeout=3, retry_time=0.05, **kwargs):\n _name = kwargs[\"foo\"]\n del kwargs[\"foo\"]\n lockfile = filename + \".lock\"\n if timeout is not None:\n deadline = datetime.now() + timedelta(seconds=timeout)\n else:\n deadline = None\n while True:\n try:\n fd = os.open(lockfile, os.O_CREAT|os.O_EXCL)\n break\n except (FileExistsError, PermissionError):\n if timeout is not None and datetime.now() >= deadline:\n raise\n print(f\"Z_{_name}_Z\", end=\" \", flush=True)\n sleep(retry_time)\n try:\n print(_name, \"opening\", filename)\n with open(filename, *args, **kwargs) as f:\n yield f\n finally:\n try:\n os.close(fd)\n finally:\n os.unlink(lockfile)", "title": "" }, { "docid": "1d32dfaaf473d23b23d3c1481a9ecc05", "score": "0.56918484", "text": "def open_file():#open file function\n while True:#infinite loop till the user enters correct file name\n try: #try-except feature to account for errors\n file_name=input(\"Enter a file name: \")#input file name\n fp=open(file_name)#file open function using file object fp\n return fp#end of funtion\n except FileNotFoundError:#error case\n print(\"Error. Please try again\")#displays error", "title": "" }, { "docid": "e4e883b487dded4df924fc51d782638f", "score": "0.56675655", "text": "def test_load_shared_data_open_error(self, exists: MagicMock):\n\n exists.return_value = True\n\n with self.assertRaises(IOError):\n invoker.load_shared_data('fakepaththatexists')", "title": "" }, { "docid": "3d5a6ce6553bba82ea8e0288bd2f3c5b", "score": "0.5667051", "text": "def lock_file(fin, max_tries=100, retry_interval=0.5):\n count = 0\n while True:\n try:\n fcntl.lockf(fin, fcntl.LOCK_EX | fcntl.LOCK_NB)\n break\n except IOError as e:\n if e.errno != errno.EAGAIN:\n raise LockError('Failed to lock file: %' % fin)\n else:\n time.sleep(retry_interval)\n count += 1\n if count > max_tries:\n raise LockError(35, 'Tried too many times to lock file: %s' % fin)", "title": "" }, { "docid": "8600e3e12dccd68390cb1afd9918adaf", "score": "0.56645244", "text": "def test_non_existent_file(self):\r\n with self.assertRaises(SystemExit):\r\n readfile(\"non existent file\")", "title": "" }, { "docid": "b24dad88a110def5d06daf76961762a6", "score": "0.56530637", "text": "def unreadable_file_error_message(context):\n assert_in(\"next-action: error: can't open file: \", context.next_action())", "title": "" }, { "docid": "f830db8915982bf1c9c6c7c0ac665dd7", "score": "0.56445867", "text": "async def _receive_file_retries(\n self,\n file_ptr: BufferedRandom,\n peer: StreamPair,\n chunk: File.Chunk):\n read_size = False\n num_tries = 0\n\n async def file_receiver(reader: aio.StreamReader):\n \"\"\" Used by the client to receive file content \"\"\"\n file_data = await Message[bytes].read(reader) # potential stack issue\n file_ptr.seek(chunk.offset)\n amt_got = file_ptr.write(file_data)\n return amt_got\n\n while not read_size and num_tries < 5:\n # TODO: make tries param\n amt_read = await peer.request(\n Request.DOWNLOAD,\n chunk = chunk,\n receiver = file_receiver )\n\n read_size = amt_read == chunk.size\n num_tries += 1", "title": "" }, { "docid": "dd7052450884ea0b031d52081ebff09b", "score": "0.56396854", "text": "def onerror(func, path, exc_info):\r\n import stat\r\n if not os.access(path, os.W_OK):\r\n # Is the error an access error ?\r\n os.chmod(path, stat.S_IWUSR)\r\n func(path)\r\n else:\r\n raise", "title": "" }, { "docid": "745b0b01731b86f996607802cf537e21", "score": "0.55805373", "text": "def __io_error(self, ex_message, filename):\r\n\t\tprint 'Error: open %s failed.\\n%s' % (filename, ex_message)\r\n\t\tif self.in_vcf_fh:\r\n\t\t\tself.in_vcf_fh.close()\r\n\t\tif self.out_vcf_fh:\r\n\t\t\tself.out_vcf_fh.close()\r\n\t\tsys.exit(1)", "title": "" }, { "docid": "0123c60f22d0ee22fb6d122d4d39d391", "score": "0.557356", "text": "def http_error_default(self, url, fp, errcode, errmsg, headers):\r\n # The following two lines are copied from urllib.URLopener's\r\n # implementation of http_error_default\r\n void = fp.read()\r\n fp.close()\r\n raise gPodderDownloadHTTPError(url, errcode, errmsg)", "title": "" }, { "docid": "9c6928e0e252d660e633e1d19e64f572", "score": "0.5558335", "text": "def unlink_file(filename):\n NUM_RETRIES = 10\n for retry_no in range(1, NUM_RETRIES + 1):\n try:\n os.unlink(filename)\n break\n except PermissionError:\n util.debug(\n '[ResourceTracker] tried to unlink {}, got '\n 'PermissionError'.format(filename)\n )\n if retry_no == NUM_RETRIES:\n raise\n else:\n time.sleep(.2)\n except FileNotFoundError:\n # In case of a race condition when deleting the temporary folder,\n # avoid noisy FileNotFoundError exception in the resource tracker.\n pass", "title": "" }, { "docid": "b14f2a5cc17bf7daf396526fe822f0c2", "score": "0.55530417", "text": "def test_download_retry_exceeded_error(caplog):\n caplog.set_level(logging.DEBUG, \"snowflake.connector\")\n mock_resource = MagicMock()\n mock_resource.download_file.side_effect = RetriesExceededError(Boto3Error())\n client_meta = {\n \"cloud_client\": mock_resource,\n \"stage_info\": {\"location\": \"loc\"},\n }\n meta = {\n \"name\": \"f\",\n \"src_file_name\": \"f\",\n \"stage_location_type\": \"S3\",\n \"client_meta\": SFResourceMeta(**client_meta),\n \"sha256_digest\": \"asd\",\n \"src_file_size\": 99,\n \"get_callback_output_stream\": None,\n \"show_progress_bar\": False,\n \"get_callback\": None,\n }\n meta = SnowflakeFileMeta(**meta)\n with mock.patch(\n \"snowflake.connector.s3_util.SnowflakeS3Util._get_s3_object\",\n return_value=mock_resource,\n ):\n SnowflakeS3Util._native_download_file(meta, \"f\", 4)\n assert meta.last_error is mock_resource.download_file.side_effect\n assert meta.result_status == ResultStatus.NEED_RETRY", "title": "" }, { "docid": "6ed9b92e9546cffcac90786465db2f4c", "score": "0.55504125", "text": "def broken(to_path):", "title": "" }, { "docid": "424b9b1ff310a744199d8563f387632b", "score": "0.55405045", "text": "def _try_with_ioerror(callee, *args, **kwargs):\n try:\n return callee(*args, **kwargs)\n except ftplib.all_errors:\n ftp_error = sys.exc_info()[1]\n raise FTPIOError(ftp_error)", "title": "" }, { "docid": "a8805c42d3f3fb231e0fb1c296991fb1", "score": "0.5539635", "text": "def halt_on_file_error(self) -> bool:\n return self._halt_on_file_error", "title": "" }, { "docid": "7848e0562fedb167bc8cbfc3225c156e", "score": "0.55366886", "text": "def _reopen(self, start):\n if self.file:\n self.file.close()\n self.file = open(self.fname, *self.open_args)\n self.stat = os.fstat(self.file.fileno())\n self.stat_time = time.time()\n if start:\n # the beginning: a very good place to start\n self.pos = 0\n else:\n # skip to the end. I always do....\n self.pos = self.stat.st_size", "title": "" }, { "docid": "62a6d5b521f0b01d32a247953f49d362", "score": "0.552888", "text": "def rescue(self, path):\n whole_path = path + '/' + self.file_name\n self.volume.seek_to_cluster(self.first_cluster)\n LOG.info('Recovering: %r', whole_path)\n if self.is_directory():\n if not os.path.exists(whole_path):\n try:\n os.makedirs(whole_path)\n except (OSError, IOError):\n LOG.exception('Failed to create directory: %s', whole_path)\n return\n for dirent in self.children:\n dirent.rescue(whole_path)\n else:\n try:\n bufsize = 0x100000\n remains = self.file_size\n\n with open(whole_path, 'wb') as f:\n while remains > 0:\n read = min(remains, bufsize)\n remains -= read\n buf = self.volume.infile.read(read)\n f.write(buf)\n except (OSError, IOError, OverflowError):\n LOG.exception('Failed to create file: %s', whole_path)", "title": "" }, { "docid": "3f23e3d67cf6e61a4e7984f007467fb0", "score": "0.551724", "text": "def test_opening_resource_bad_open_timeout(self):\n rname = list(RESOURCE_ADDRESSES.values())[0]\n\n with self.assertRaises(ValueError) as cm:\n rsc = self.rm.open_resource(rname,\n open_timeout=\"\")\n\n self.assertIn(\"integer (or compatible type)\", str(cm.exception))", "title": "" }, { "docid": "6844f3267d6de8c1b76a925755b2fee2", "score": "0.550726", "text": "def _open(self, filename):\n self._close()\n\n try:\n self.fid = self.cls(filename, **self.cls_kwargs)\n except IOError:\n self.fid = None\n if self.err:\n raise\n else:\n warnings.warn(f\"IOError: {filename}\")", "title": "" }, { "docid": "176e2dfa104efb6f77cb0d7d0109e8b4", "score": "0.55065", "text": "def test_read_file_raises_exception_when_wrong_filename(self):\n with self.assertRaises(IOError):\n self.cr.read_file('non_existing_file.ini')", "title": "" }, { "docid": "6c6d185cf2e08c9f9e304d0edee02174", "score": "0.5501081", "text": "def _onerror(func, path, exc_info):\n if salt.utils.platform.is_windows() and not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise # pylint: disable=E0704", "title": "" }, { "docid": "e3891b3c074b04664fb0c72f4d1610c4", "score": "0.5484524", "text": "def _in_use(path):\n try:\n with open(path, 'r+'):\n return False\n except IOError:\n return True", "title": "" }, { "docid": "919fe35cd52ba8bf5ebad4b2df76a3e7", "score": "0.54744893", "text": "def onerror(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "title": "" }, { "docid": "7f0357ed43e5bf7e6676df151edd8ff6", "score": "0.5454983", "text": "def halt_on_file_error(self) -> bool:\n return self._halt_on_file_error or False", "title": "" }, { "docid": "7f0e1e524b4b53e595364c1b53621aea", "score": "0.5445205", "text": "def retry_on_httperror(exc):\n\tglobal headers\n\tsession = get_session()\n\theaders['session'] = session\n\treturn isinstance(exc, requests.HTTPError)", "title": "" }, { "docid": "089f453bfd41305c0c8123943a74ae11", "score": "0.5439881", "text": "def reload_file(self):\r\n err_msg = None\r\n if (not hasattr(self, '_filename') \r\n or self._filename is None):\r\n err_msg = \"No file specified\"\r\n elif not os.path.isfile(self._filename):\r\n err_msg = \"File not found. ({})\".format(self._filename)\r\n \r\n if err_msg:\r\n raise IOError(err_msg)\r\n else:\r\n self._load_file()", "title": "" }, { "docid": "edabc377df441672aefa4b748e16dbc7", "score": "0.5429339", "text": "def retry_urlopen(url, attempts=50, *args, **kwargs):\n for attempt in range(attempts):\n try:\n return urllib2.urlopen(url, *args, **kwargs)\n except urllib2.URLError as ex:\n log.error(\"Caught {0} in GetPage, retry number {1}.\"\n .format(ex, attempt))\n if \"590\" in str(ex):\n log.error(\"Getting new cookie\")\n GetCookie()\n continue\n\n log.error(\"Retried {0} times with no valid response.\".format(attempts))\n raise urllib2.URLError(\"Open retried {0} times!\".format(attempts))", "title": "" }, { "docid": "10f54b42ea35b380328dcbc67c8119aa", "score": "0.54278344", "text": "def _open_file(file_path, type='r'):\n try:\n fh = open(file_path, type)\n except IOError:\n logger.error('Unable to open {}'.format(file_path))\n else:\n return fh", "title": "" }, { "docid": "e5097d455e7f712fbb7ebadce80081cc", "score": "0.5424614", "text": "def _preread(self):\n if not self.file:\n self._reopen(False)\n return\n now = time.time()\n if now >= self.stat_time + self.stat_time_min:\n nstat = os.stat(self.fname)\n self.stat_time = now\n if nstat.st_dev != self.stat.st_dev or \\\n nstat.st_ino != self.stat.st_ino:\n # start at top of new file\n self._reopen(True)\n return\n\n # should clear previous EOF condition\n self.file.seek(self.pos)", "title": "" }, { "docid": "32c3d9619ac9d5239954386cc999f2b1", "score": "0.54188836", "text": "def __retry_on_error(self, fn, *args, **kwargs):\n\n try:\n return fn(*args, **kwargs)\n except ConnectionFailure:\n self.logger.debug('experienced network error- retrying')\n try:\n return fn(*args, **kwargs)\n # the only way for a duplicate key error to happen here is if the\n # first insert succeeded\n except DuplicateKeyError:\n self.logger.debug('retry resolved network error')\n except Exception:\n self.logger.error('could not resolve with retry')\n raise\n # catching all non-network errors to raise and log.\n except Exception as e:\n self.logger.error('error is not retryable: {!s}'.format(e))\n raise", "title": "" }, { "docid": "5407947524374bd6dfbb648ce0a9e67d", "score": "0.5410458", "text": "def test_read_w_file_unc_exception(self, mock_isfile, mock_json_loads, mock_validation):\n sis = SessionStore(file_path=\"/tmp/.session_id_store\", session_timeout=600)\n\n mock_isfile.return_value = True\n mock_validation.return_value = True\n mock_json_loads.side_effect = Exception(\"Some exception\")\n r_data = 'invalid_json'\n\n with mock.patch('pyotrs.lib.open',\n mock.mock_open(read_data=r_data)) as m:\n self.assertRaisesRegex(Exception,\n 'Exception Type',\n sis.read)\n\n m.assert_called_once_with('/tmp/.session_id_store', 'r')", "title": "" }, { "docid": "a21bd5fb2cdcba79baea386e1fbfc2ea", "score": "0.5403063", "text": "def try_read ( self, *args, **kwargs ):\n ret = None\n try:\n self.read ( *args, **kwargs )\n except IOError as ioerr:\n if ioerr.errno == errno.ENOENT:\n ret = False\n else:\n raise\n else:\n ret = True\n\n return ret", "title": "" }, { "docid": "b075c04b7966ae0af6483d2e359d9fe2", "score": "0.54011923", "text": "def onerror(func, path, exc_info):\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "title": "" }, { "docid": "b90be67043760e6443c54998259362c7", "score": "0.5396067", "text": "def _download_file(self, download_path, dest_path, num_attempts=2):\n for attempt in range(1,num_attempts+1):\n try:\n urllib.request.urlretrieve(download_path, dest_path)\n except:\n if attempt==num_attempts:\n raise\n else:\n time.sleep(30)\n continue", "title": "" }, { "docid": "4e99559c4f9c8b7e73450c8795f6a72b", "score": "0.53904605", "text": "def on_error(self, e):\n\n print(\"Initial Error: {}\\n Cause: {}\\nContext: {}\\n\"\n \"- attempting to recover.\".format(e, e.__cause__, e.__context__))\n\n try:\n self.close()\n except Exception as e:\n print(\"Ignored closing error: {}\".format(e))\n pass\n\n sleep(0.5)\n\n self.start()", "title": "" }, { "docid": "4458b4735c33c17aa6d85631ad6b0d14", "score": "0.53815645", "text": "def _retry_request(self, rsrc_id):\n # XXX(boysson): Duplicate of _base_service.clt_update_request\n request_lnk = os.path.join(self._service_rsrc_dir, rsrc_id)\n _LOGGER.debug('Updating %r', rsrc_id)\n # NOTE(boysson): This does the equivalent of a touch on the symlink\n try:\n os.lchown(\n request_lnk,\n os.getuid(),\n os.getgid()\n )\n except OSError as err:\n if err.errno != errno.ENOENT:\n raise", "title": "" }, { "docid": "46b00975d55b5b1c00ea650f7d9ce455", "score": "0.53651065", "text": "def test_download_ioerror(self):\n self.get().iter_content.return_value = ['<content>']\n self.open().__enter__().write.side_effect = IOError\n self.assertRaises(CloudifyCliError, download_file, 'some_url')", "title": "" }, { "docid": "3f36ba17da66b0c51b14c2cf52d1fb96", "score": "0.53557026", "text": "def test_retry_option_eventual_success(state, state_tree, tmp_path):\n testfile1 = tmp_path / \"testfile-1\"\n testfile2 = tmp_path / \"testfile-2\"\n\n def create_testfile(testfile1, testfile2):\n while True:\n if testfile1.exists():\n break\n time.sleep(2)\n testfile2.touch()\n\n thread = threading.Thread(target=create_testfile, args=(testfile1, testfile2))\n sls_contents = \"\"\"\n file_test_a:\n file.managed:\n - name: {}\n - content: 'a'\n\n file_test:\n file.exists:\n - name: {}\n - retry:\n until: True\n attempts: 5\n interval: 2\n splay: 0\n - require:\n - file_test_a\n \"\"\".format(\n testfile1, testfile2\n )\n with pytest.helpers.temp_file(\"retry.sls\", sls_contents, state_tree):\n thread.start()\n ret = state.sls(\"retry\")\n for state_return in ret:\n assert state_return.result is True\n assert state_return.full_return[\"duration\"] > 4\n # It should not take 5 attempts\n assert \"Attempt 5\" not in state_return.comment", "title": "" }, { "docid": "d3aa7981b419044f467e8a533cf21647", "score": "0.5355312", "text": "def on_error(func, path, exc_info):\n if not os.access(path, os.W_OK):\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n pass", "title": "" }, { "docid": "d19c999b7a729438cd4039b62d09b196", "score": "0.53541124", "text": "def try_touch(fname, times=None):\n try:\n touch(fname, times=times)\n except:\n pass", "title": "" }, { "docid": "7c986d1a8ece6f9937c1ecf6fe0b5b6c", "score": "0.5352609", "text": "def closeonerror(read_meth):\r\n def new_read_meth(inst):\r\n try:\r\n return read_meth(inst)\r\n except httplib.HTTPException:\r\n inst.close()\r\n raise\r\n return new_read_meth", "title": "" }, { "docid": "a159ed7894e5edd6f449400f6b3151cf", "score": "0.5350608", "text": "def ResetErrno(self):\n self.last_errno = False", "title": "" }, { "docid": "d2a7abeb5e3156ce558432e57d0da4a2", "score": "0.53498656", "text": "def retry(f):\n @wraps(f)\n def wrapper(self, *args, **kwds):\n max_tries = getattr(self, 'call_retry_count', 1)\n for i in range(max_tries):\n try:\n return f(self, *args, **kwds)\n except Exception as e:\n logging.warning(\n 'pymc operation failed: {0} (try={1})'.format(e, i+1))\n if max_tries-1 == i:\n raise e\n\n return wrapper", "title": "" }, { "docid": "e185b89f185fd04fc5653e1e80657ea8", "score": "0.53208077", "text": "def try_again():\n seconds = self._config[CONF_SCAN_INTERVAL] * 2\n _LOGGER.error(\"Retrying in %i seconds\", seconds)\n async_call_later(self.hass, seconds, self.fetching_data)", "title": "" }, { "docid": "2accc5f6d7bf0d7db9ed357546f7926a", "score": "0.53148216", "text": "def lock():\n loopcount = 0\n while os.path.isfile(lockPath):\n if loopcount > 15:\n sys.stdout.write(\"lockfile_timeout\")\n exit()\n time.sleep(1)\n loopcount += 1\n\n open(lockPath,\"w\").close()", "title": "" }, { "docid": "355739e68e460360dc2eb5286a10e2c8", "score": "0.53141433", "text": "def onerror(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "title": "" }, { "docid": "355739e68e460360dc2eb5286a10e2c8", "score": "0.53141433", "text": "def onerror(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "title": "" }, { "docid": "355739e68e460360dc2eb5286a10e2c8", "score": "0.53141433", "text": "def onerror(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "title": "" }, { "docid": "d4b2859da012116cd46b61a6c373a5e8", "score": "0.5308402", "text": "def test_read_from_file_fail(self):\n with self.assertRaises(ValueError):\n Data.open(\"tests/test_data/bad_test.conf\")", "title": "" }, { "docid": "e9c3b978ebd5a7c83e4ab5c3ead3532c", "score": "0.5285584", "text": "def upload_retry(func):\n\n def inner(*args, **kwargs):\n \"\"\"Handle.\"\"\"\n time_interval = 5\n times = 0\n logger = logging.getLogger(__name__)\n while True:\n result = func(*args, **kwargs)\n if result == 0:\n return True\n if times > 10:\n if result == 11:\n raise RayvisionError(200025, \"There are files that cannot\"\n \"be uploaded, please check\")\n if result == 10:\n time.sleep(time_interval)\n time_interval += 10\n if time_interval > 600:\n time_interval = 600\n logger.info(\"Retrying upload......\")\n elif result == 9:\n raise Exception(\"Parameter or domain name resolution\"\n \"error.\")\n else:\n times += 1\n logger.info(\"Retrying upload......\")\n\n return inner", "title": "" }, { "docid": "c54df501208c6ffc8c5a9ca1eeb00398", "score": "0.52850586", "text": "def cache_get_file( file_path, max_age, connect_args, job_attrs, squid_port, http_port ):\n \n # get the file from squid\n global tmp_connect_args\n global tmp_job_attrs\n try:\n if tmp_job_attrs == None:\n tmp_job_attrs = {}\n if tmp_connect_args == None:\n tmp_connect_args = {}\n \n tmp_connect_args[os.path.basename(file_path)] = copy.deepcopy(connect_args)\n tmp_job_attrs[os.path.basename(file_path)] = iftfile.iftjob.get_attrs_copy(job_attrs)\n \n proxy_handler = urllib2.ProxyHandler( {'http': 'http://127.0.0.1:' + str(squid_port)} )\n opener = urllib2.build_opener( proxy_handler )\n \n if max_age > 0:\n opener.addheaders = [(\"Cache-Control\",\"max-age=\" + str(max_age))]\n \n cached_file_fd = opener.open( \"http://127.0.0.1:\" + str(http_port) + os.path.abspath( file_path ) )\n return cached_file_fd\n \n except urllib2.HTTPError, inst:\n iftlog.log(1, \"iftcache.cache_get_file: file not in cache\")\n tmp_connect_args[os.path.basename(file_path)] = None\n tmp_job_attrs[os.path.basename(file_path)] = None\n return None\n \n except socket.error, inst:\n # if the connection was simply refused, then just fail\n if inst.args[0] == 111:\n # receive the file with any protocol\n iftlog.log(5, \"iftcache.cache_get_file: could not query cache on port \" + str(http_port) + \", 111 connection refused\" )\n return None\n \n else:\n iftlog.exception( \"iftcache.cache_get_file: socket error\", inst)\n return None # some other error\n \n \n except Exception, inst:\n iftlog.exception( \"iftcache.cache_get_file: could not query cache on port \" + str(http_port), inst)\n tmp_connect_args[os.path.basename(file_path)] = None\n tmp_job_attrs[os.path.basename(file_path)] = None\n return None", "title": "" }, { "docid": "8f2d7942bbdd9f3cad5b8a12a5963103", "score": "0.5283778", "text": "def open_and_lock(self, timeout, delay):\r\n if self._locked:\r\n raise AlreadyLockedException('File %s is already locked' %\r\n self._filename)\r\n start_time = time.time()\r\n\r\n validate_file(self._filename)\r\n try:\r\n self._fh = open(self._filename, self._mode)\r\n except IOError, e:\r\n # If we can't access with _mode, try _fallback_mode and don't lock.\r\n if e.errno == errno.EACCES:\r\n self._fh = open(self._filename, self._fallback_mode)\r\n return\r\n\r\n # We opened in _mode, try to lock the file.\r\n while True:\r\n try:\r\n fcntl.lockf(self._fh.fileno(), fcntl.LOCK_EX)\r\n self._locked = True\r\n return\r\n except IOError, e:\r\n # If not retrying, then just pass on the error.\r\n if timeout == 0:\r\n raise e\r\n if e.errno != errno.EACCES:\r\n raise e\r\n # We could not acquire the lock. Try again.\r\n if (time.time() - start_time) >= timeout:\r\n logger.warn('Could not lock %s in %s seconds' % (\r\n self._filename, timeout))\r\n if self._fh:\r\n self._fh.close()\r\n self._fh = open(self._filename, self._fallback_mode)\r\n return\r\n time.sleep(delay)", "title": "" }, { "docid": "4babf95f81bb4b5e042524a9da3a4964", "score": "0.5269822", "text": "def handle_ioexception(self, exception):", "title": "" }, { "docid": "ec0e2f303167bf9b58e1581ca5378ad9", "score": "0.52630764", "text": "def test_exitOpensFileOnException(self):\n\n class TestException(Exception):\n \"\"\"\n An exception only used by this test.\n \"\"\"\n\n self.reservedFD.reserve()\n with self.assertRaises(TestException):\n with self.reservedFD:\n raise TestException()", "title": "" }, { "docid": "f962098c8f82ca6cee4edce556ad6cf6", "score": "0.5243935", "text": "def _get_file_creation_time_raises_exception_when_file_creation_time_is_invalid(description,\n file_path, ctime,\n mock_getctime):\n\n mock_getctime.return_value = ctime\n\n try:\n _get_file_creation_time(file_path)\n except FileTimeOutOfRangeException:\n pass\n except Exception as exception: # pylint: disable=locally-disabled, broad-except\n template = \"Test case '{0}' failed: An unexpected {1} exception was raised.\"\n ok_(False, template.format(description, type(exception).__name__))\n else:\n template = \"Test case '{0}' failed: An exception was expected but was not raised.\"\n ok_(False, template.format(description))\n\n mock_getctime.assert_called_once_with(file_path)", "title": "" }, { "docid": "72dea6072c42521597aa7481f7d7cf2d", "score": "0.5218542", "text": "def open_and_lock(self, timeout, delay):\r\n if self._locked:\r\n raise AlreadyLockedException('File %s is already locked' %\r\n self._filename)\r\n self._locked = False\r\n\r\n validate_file(self._filename)\r\n try:\r\n self._fh = open(self._filename, self._mode)\r\n except IOError, e:\r\n # If we can't access with _mode, try _fallback_mode and don't lock.\r\n if e.errno == errno.EACCES:\r\n self._fh = open(self._filename, self._fallback_mode)\r\n return\r\n\r\n lock_filename = self._posix_lockfile(self._filename)\r\n start_time = time.time()\r\n while True:\r\n try:\r\n self._lock_fd = os.open(lock_filename,\r\n os.O_CREAT|os.O_EXCL|os.O_RDWR)\r\n self._locked = True\r\n break\r\n\r\n except OSError, e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n if (time.time() - start_time) >= timeout:\r\n logger.warn('Could not acquire lock %s in %s seconds' % (\r\n lock_filename, timeout))\r\n # Close the file and open in fallback_mode.\r\n if self._fh:\r\n self._fh.close()\r\n self._fh = open(self._filename, self._fallback_mode)\r\n return\r\n time.sleep(delay)", "title": "" }, { "docid": "4bf89ff2567e9e48b0f812dad5b90280", "score": "0.5211121", "text": "def reopen(self, mode):\n\t\tif not self.file.closed:\n\t\t\tself.file.close()\n\t\tself.file = open(self.file.name, mode)", "title": "" }, { "docid": "78db649548ca820810d083b7ca4f0db1", "score": "0.5210911", "text": "def file_access_errors(df_strace):\n file_access(df_strace, filter_ret_int=\"^-1$\")", "title": "" }, { "docid": "da2ffdc7d3f6f74a6e80282fd858111c", "score": "0.5204986", "text": "def test_not_found_error(self):\n filename = str(hashlib.sha256(str(time.time()).encode(\"utf-8\")).\n hexdigest())\n with self.assertRaises(FileNotFoundError):\n zhmc_prometheus_exporter.parse_yaml_file(filename)", "title": "" }, { "docid": "cdcd2e64fd6c511116a8e7c3cdf515d1", "score": "0.519963", "text": "def _FileFetchFailed(self, index: int,\n status: Optional[rdf_flow_objects.FlowStatus]):\n\n pathspec, request_data = self._RemoveCompletedPathspec(index)\n if pathspec is None:\n # This was already reported as failed.\n return\n\n self.state.files_failed += 1\n\n # Report the request_data for this flow's caller.\n self.FileFetchFailed(pathspec, request_data=request_data, status=status)", "title": "" }, { "docid": "64b7c13675a78646036c5be151004ae2", "score": "0.51841146", "text": "def test_nonexistent_file(self):\n self.assertRaises(IOError, FileOnDisk, \"/foo\", \"bar.txt\")", "title": "" }, { "docid": "e0e696135bebfcee876663f5030e7b8b", "score": "0.5175287", "text": "def test_second_attempt_succeeds(self):\n\n pyega3.TEMPORARY_FILES_SHOULD_BE_DELETED = False\n\n file_size_without_iv = 92700\n file_size_with_iv = file_size_without_iv + 16\n\n self.server_config_file_loaded_successfully()\n self.user_has_authenticated_successfully()\n\n amount_of_missing_bytes = 123\n file_size_with_missing_bytes = file_size_without_iv - amount_of_missing_bytes\n input_file_with_few_bytes_missing = bytearray(os.urandom(file_size_with_missing_bytes))\n self.file_can_be_downloaded(input_file_with_few_bytes_missing)\n\n rest_of_the_input_file = bytearray(os.urandom(amount_of_missing_bytes))\n self.file_can_be_downloaded(rest_of_the_input_file)\n\n output_file_path = self.create_output_file_path()\n\n pyega3.download_file_retry(('', ''), 'test_file_id1', output_file_path, output_file_path,\n file_size_with_iv, 'check_sum', 1, None, output_file_path, None, 2, 0.1)\n\n temp_file = pyega3.TEMPORARY_FILES.pop()\n # The temporary file should not exist because everything went fine,\n # and it was deleted automatically:\n self.assertFalse(os.path.exists(temp_file))\n\n self.assertEqual(responses.calls[1].request.headers.get('Range'), 'bytes=0-92699')\n self.assertEqual(responses.calls[2].request.headers.get('Range'), 'bytes=92577-92699')\n self.assertEqual(responses.calls[2].request.headers.get('Range'), 'bytes={}-92699'\n .format(file_size_with_missing_bytes))\n\n self.assertTrue(os.path.exists(output_file_path))\n output_file_size = os.stat(output_file_path).st_size\n self.assertEqual(output_file_size, file_size_without_iv)\n os.remove(output_file_path)", "title": "" }, { "docid": "e2f34ea55eafaa67fa006606b56a112a", "score": "0.5164844", "text": "def test_check_file_size_over(self):\n with self.assertRaises(FileTooLargeError):\n with open(\"kitsune/upload/tests/media/test.jpg\", \"rb\") as f:\n up_file = File(f)\n # This should raise\n check_file_size(up_file, 0)", "title": "" }, { "docid": "e914723aefc4dd4aec9703c3c8efbaa5", "score": "0.5162419", "text": "def test_loaderException(self):\n counter = count()\n\n def _loadMe(path, crashMe=False):\n if crashMe:\n raise Exception('It is an exception!')\n return next(counter)\n\n cf = CachedFile(self.testFile, _loadMe)\n\n # Can we still load after the first attempt raises an exception?\n self.assertRaises(Exception, cf.load, True)\n self.assertEqual(cf.load(), 0)\n\n # Cache should be valid now, so a broken loader shouldn't matter\n self.assertEqual(cf.load(True), 0)\n\n # A second broken load\n cf.invalidate()\n\n self.assertRaises(Exception, cf.load, True)\n self.assertEqual(cf.load(), 1)", "title": "" }, { "docid": "5b904299858d8f84f092bf798f03c337", "score": "0.51484394", "text": "def ebOpened(err):\n if isinstance(err.value, FTPCmdError):\n return (err.value.errorCode, path)\n log.err(err, \"Unexpected error received while opening file:\")\n self.l.response(FTPService._name, self.transport.getPeer().host, FTPService._port, RESPONSE.get(FILE_NOT_FOUND),\n self.user, \"STOR\")\n return FILE_NOT_FOUND, path", "title": "" }, { "docid": "324415ca327a4811854dd96eab3fdd0a", "score": "0.514601", "text": "def test_csv_files_read_file_filename_invalid(self):\n self.assertRaises(IOError, read_csv_file, filename='error.csv')", "title": "" }, { "docid": "9e4db726737c0b3b37815d256e4e6cdc", "score": "0.5127637", "text": "def open_file(self, path):\n return None", "title": "" }, { "docid": "1dbe0bb25fb1e3d2440b2cf9fbc7e4f3", "score": "0.5124724", "text": "def My_open_file(filename, mode):\n try:\n fp=open( filename, mode)\n except FileNotFoundError:\n print('File <%s> not found'%(filename))\n current_location=os.getcwd() \n # gets the directory where the program is executing \n print(\"executing program in directory: \"+current_location) \n sys.exit(2) # exits the program, returning 2 to the operating system \n return(fp)", "title": "" }, { "docid": "6bec0e99818d9853d358df4e66b90122", "score": "0.51227343", "text": "def close_on_error(read_meth):\n def new_read_meth(inst):\n try:\n return read_meth(inst)\n except httplib.HTTPException:\n inst.close()\n raise\n return new_read_meth", "title": "" }, { "docid": "ec958faa408080299232eee23ff635dd", "score": "0.5121937", "text": "def open_file(self,filename): \n try:\n name = self.resolve( self.propDict['TODO_DIR'] + \"/\" + filename )\n self.taskFile = open( name, 'U' )\n except IOError:\n raise TaskFileNotFoundError", "title": "" }, { "docid": "98ecc48f358deb7798529dcc364640d1", "score": "0.5120445", "text": "def acquire(self) -> None:\n if self.lock_counter > 0:\n # Dude, you already locked the file...\n self.lock_counter += 1\n return\n\n start_time = time.time()\n while True:\n try:\n self.fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n self.lock_counter += 1\n break\n except OSError as oserr:\n import errno\n if oserr.errno != errno.EEXIST:\n raise\n if (time.time() - start_time) >= self.timeout:\n raise FileLock.TimeoutError(\"Timeout occurred\")\n time.sleep(self.delay)", "title": "" }, { "docid": "8a71f57c82c041e043ba8877f92bf9b5", "score": "0.5115005", "text": "def test_lock_file_ok(self):\n with tempfile.TemporaryFile() as fp:\n with lock_file(fp):\n pass", "title": "" }, { "docid": "66761e3f7f8777d39cbb701ed1b19a46", "score": "0.5114575", "text": "def handle_repair(self):\n self.__filelock.acquire()\n repair()", "title": "" }, { "docid": "a1fc00d1fd428d3d1c617e742b41480e", "score": "0.51085734", "text": "def test_reserveOpensFileOnce(self):\n self.assertEqual(len(self.reservedFileObjects), 0)\n\n for _ in range(10):\n self.reservedFD.reserve()\n self.assertEqual(len(self.reservedFileObjects), 1)\n self.assertFalse(self.reservedFileObjects[0].closed)", "title": "" }, { "docid": "dfe6d7bf61ae2dc1e71d01c5aae4e908", "score": "0.5107998", "text": "def _try_repeatedly(self, method, max_retries):\n retries = 0\n\n while True:\n try:\n return method()\n except warthog.exceptions.WarthogApiError as e:\n if e.api_code not in warthog.core.TRANSIENT_ERRORS or retries >= max_retries:\n raise\n self._logger.debug(\n \"Encountered transient error %s - %s, retrying... \", e.api_code, e.api_msg)\n time.sleep(self._interval)\n retries += 1", "title": "" }, { "docid": "496c8d18ddac83411a1b3f9d405146bb", "score": "0.51065177", "text": "def test_write_mock_open_file_not_exist_v_nok(self, mock_isfile, mock_validation, mock_chmod):\n test_path = \"/tmp/mocked_wr2\"\n sis = SessionStore(file_path=test_path, session_timeout=600)\n sis.value = \"some_session_id_value1\"\n\n mock_isfile.return_value = False\n mock_validation.return_value = False\n\n with mock.patch('pyotrs.lib.open', mock.mock_open()) as m:\n self.assertRaisesRegex(IOError,\n 'Race condition.*',\n sis.write,\n 'foobar12')\n\n self.assertEqual(mock_isfile.call_count, 1)\n self.assertEqual(mock_validation.call_count, 1)\n self.assertEqual(mock_chmod.call_count, 1)\n m.assert_called_once_with(test_path, 'w')", "title": "" }, { "docid": "1689c79d16be38466933456ca4fd0a85", "score": "0.510647", "text": "def _open_file(fhir_file: Path) -> io.TextIOWrapper:\n if fhir_file.name in NOT_WORKING:\n pytest.skip(\"test disabled\")\n\n return fhir_file.open()", "title": "" }, { "docid": "63ef3f7181ce28ccfc5bcf336e0aa72a", "score": "0.5105689", "text": "def catch_ioerror(meth):\r\n @wraps(meth)\r\n def wrapper(self, *args, **kwargs):\r\n try:\r\n return meth(self, *args, **kwargs)\r\n except IOError as (errno, strerror):\r\n if errno == ENOSPC:\r\n msg = 'No space left on device'\r\n raise ScanMustStopByKnownReasonExc(msg)\r\n\r\n return wrapper", "title": "" }, { "docid": "99898765dbff89770842397ebeea9d77", "score": "0.510453", "text": "def test_queries_files_read_file_filename_invalid(self):\n self.assertRaises(IOError, read_queries_file, filename='error.txt')", "title": "" }, { "docid": "2c84b915db5bf34044c7aa521b44fe81", "score": "0.5088615", "text": "def test_fileDescriptorsReleasedOnFailure(self):\n fileDescriptors = []\n\n def failsAfterThree():\n if len(fileDescriptors) == 3:\n raise ValueError(\n \"test_fileDescriptorsReleasedOnFailure\" \" fake open exception\"\n )\n else:\n fd = os.dup(0)\n fileDescriptors.append(fd)\n return fd\n\n exhauster = _ExhaustsFileDescriptors(failsAfterThree)\n self.addCleanup(exhauster.release)\n\n self.assertRaises(ValueError, exhauster.exhaust)\n self.assertEqual(len(fileDescriptors), 3)\n self.assertEqual(exhauster.count(), 0)\n\n for fd in fileDescriptors:\n exception = self.assertRaises(OSError, os.fstat, fd)\n self.assertEqual(exception.errno, errno.EBADF)", "title": "" }, { "docid": "7582cc7ee28759ed4148e5c4b1c7ccbe", "score": "0.5086147", "text": "def open_and_lock(self, timeout, delay):\r\n if self._locked:\r\n raise AlreadyLockedException('File %s is already locked' %\r\n self._filename)\r\n start_time = time.time()\r\n\r\n validate_file(self._filename)\r\n try:\r\n self._fh = open(self._filename, self._mode)\r\n except IOError, e:\r\n # If we can't access with _mode, try _fallback_mode and don't lock.\r\n if e.errno == errno.EACCES:\r\n self._fh = open(self._filename, self._fallback_mode)\r\n return\r\n\r\n # We opened in _mode, try to lock the file.\r\n while True:\r\n try:\r\n hfile = win32file._get_osfhandle(self._fh.fileno())\r\n win32file.LockFileEx(\r\n hfile,\r\n (win32con.LOCKFILE_FAIL_IMMEDIATELY|\r\n win32con.LOCKFILE_EXCLUSIVE_LOCK), 0, -0x10000,\r\n pywintypes.OVERLAPPED())\r\n self._locked = True\r\n return\r\n except pywintypes.error, e:\r\n if timeout == 0:\r\n raise e\r\n\r\n # If the error is not that the file is already in use, raise.\r\n if e[0] != _Win32Opener.FILE_IN_USE_ERROR:\r\n raise\r\n\r\n # We could not acquire the lock. Try again.\r\n if (time.time() - start_time) >= timeout:\r\n logger.warn('Could not lock %s in %s seconds' % (\r\n self._filename, timeout))\r\n if self._fh:\r\n self._fh.close()\r\n self._fh = open(self._filename, self._fallback_mode)\r\n return\r\n time.sleep(delay)", "title": "" }, { "docid": "358b6799cd6ff9bf19428f1840481532", "score": "0.5084627", "text": "def CheckFile(filename):\n if not os.path.exists(filename):\n raise FileNotFoundError('%s: file not found' % filename)\n elif not os.access(filename, os.R_OK):\n raise FileNotReadableError('%s: file not readable' % filename)", "title": "" }, { "docid": "903976667975809c4b259b290d89857b", "score": "0.5077576", "text": "def test_retry_option(state, state_tree):\n sls_contents = \"\"\"\n file_test:\n file.exists:\n - name: /path/to/a/non-existent/file.txt\n - retry:\n until: True\n attempts: 3\n interval: 1\n splay: 0\n \"\"\"\n expected_comment = (\n 'Attempt 1: Returned a result of \"False\", with the following '\n 'comment: \"Specified path /path/to/a/non-existent/file.txt does not exist\"'\n )\n with pytest.helpers.temp_file(\"retry.sls\", sls_contents, state_tree):\n ret = state.sls(\"retry\")\n for state_return in ret:\n assert state_return.result is False\n assert expected_comment in state_return.comment\n assert state_return.full_return[\"duration\"] >= 3", "title": "" }, { "docid": "96127b1dff235b4d4c2923f8b05bdb0d", "score": "0.5075218", "text": "def claim_file(self):\n while True:\n file_path = self.find_file()\n if not file_path:\n break\n os.makedirs(os.path.join(self.doing_dir, os.path.dirname(file_path)))\n try:\n # The following is an atomic operation.\n os.rename(os.path.join(self.todo_dir, file_path), os.path.join(self.doing_dir, file_path))\n return file_path\n except OSError as err:\n if err.errno in (errno.ENOENT, ):\n # Another process has snatched this file away from us.\n pass\n else:\n raise", "title": "" }, { "docid": "aa6cc2e433984d295267f35875762a8d", "score": "0.50697535", "text": "def catch_ioerror(meth):\n @wraps(meth)\n def wrapper(self, *args, **kwargs):\n try:\n return meth(self, *args, **kwargs)\n except IOError as (errno, strerror):\n if errno == ENOSPC:\n msg = 'No space left on device'\n raise ScanMustStopByKnownReasonExc(msg)\n\n return wrapper", "title": "" }, { "docid": "c6f3ac5ade3d4d72d54f88ff0dedbd25", "score": "0.50652796", "text": "def reopen_files(self):\r\n for log in (self.error_log, self.access_log):\r\n for h in log.handlers:\r\n if isinstance(h, logging.FileHandler):\r\n h.acquire()\r\n h.stream.close()\r\n h.stream = open(h.baseFilename, h.mode)\r\n h.release()", "title": "" }, { "docid": "3c000215aba2d30253299d1b5bb4fa17", "score": "0.50562423", "text": "def test_get_file_http_with_http_error_404(self):\n self.spy_on(urlopen,\n op=kgb.SpyOpRaise(HTTPError(url='https://example.com',\n code=404,\n msg=None,\n hdrs=None,\n fp=None)))\n\n client = SCMClient(path='/path/to/repo')\n\n with self.assertRaises(FileNotFoundError) as ctx:\n client.get_file_http('https://example.com',\n path='/path/to/file',\n revision='abc123')\n\n e = ctx.exception\n self.assertEqual(e.path, '/path/to/file')\n self.assertEqual(e.revision, 'abc123')", "title": "" } ]
d80eaebe061dba2fec374effb1258b7b
Gets the query interface for a parameter.
[ { "docid": "40b48b8c6bd432aa87c4cb285b04c1ec", "score": "0.7126727", "text": "def get_parameter_query(self):\n return # osid.configuration.ParameterQuery", "title": "" } ]
[ { "docid": "4b7819f700261f5e24fb35c743438eb6", "score": "0.6674734", "text": "def query_parameter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"query_parameter\")", "title": "" }, { "docid": "27054f178c9db5c9c41275d118edd515", "score": "0.62531286", "text": "def query(self, param):\n pass", "title": "" }, { "docid": "6358e0ff7ac2406890ef6c874e1ef880", "score": "0.6038211", "text": "def get_parameter_query_record(self, parameter_record_type):\n return # osid.configuration.records.ParameterQueryRecord", "title": "" }, { "docid": "af55d2cdeb9a32dd84e101b9ef1f85ed", "score": "0.59388274", "text": "def get_query():\n return BridgeQuery", "title": "" }, { "docid": "7f15a883e4b4f5827a8f06479e8c6b34", "score": "0.59099", "text": "def query(self) -> Optional[str]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "1e3c539b6185778255d79c58631ced05", "score": "0.5843162", "text": "def get_parameter(self, parameter):\n\n return self.parameters[parameter]", "title": "" }, { "docid": "2a9f5cf67d306135d65a9e6f9ab5ba47", "score": "0.5842759", "text": "def queryAdapter(object, interface, name, default=None):", "title": "" }, { "docid": "436977b69986df8f4e8e7f7e0f5ec59a", "score": "0.5756555", "text": "def dlgflow_get_parameter(data, parameter: str):\r\n return data['queryResult']['parameters'].get(parameter, None)", "title": "" }, { "docid": "68e140ba32c778b8c11937064e777186", "score": "0.5741", "text": "def _query(self, variablename: str):\n raise NotImplementedError", "title": "" }, { "docid": "a6933ec847d408ab7038ca043b9ac9c8", "score": "0.57195246", "text": "def get_query_parm(event, query_parm_name):\n try:\n return event['queryStringParameters'][query_parm_name]\n except:\n return None", "title": "" }, { "docid": "b974cc34127b22e725dff6e57238c5c3", "score": "0.57120806", "text": "def get_query(self, key: str, default: Any = _DEFAULT_GET) -> ParameterValue:\n try:\n return deepcopy(self._query_dict[key])\n except KeyError:\n if default is _DEFAULT_GET:\n raise\n return default", "title": "" }, { "docid": "1118bb7a8aa8293e89af180e047cfe3b", "score": "0.5704343", "text": "def token_query_parameter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_query_parameter\")", "title": "" }, { "docid": "1118bb7a8aa8293e89af180e047cfe3b", "score": "0.5704343", "text": "def token_query_parameter(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"token_query_parameter\")", "title": "" }, { "docid": "d05496d53aba122a685df2d59caf73e1", "score": "0.5699936", "text": "def find_parameter(self, path) -> BaseParameter:\n return self._params[path]", "title": "" }, { "docid": "da6e683cb9d1c829062e7c75047737a5", "score": "0.5674833", "text": "def query(self) -> str:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "da6e683cb9d1c829062e7c75047737a5", "score": "0.5674833", "text": "def query(self) -> str:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "da6e683cb9d1c829062e7c75047737a5", "score": "0.5674833", "text": "def query(self) -> str:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "00c11d2fe00bff36e47bc542c3b5ecd7", "score": "0.56260556", "text": "def query_parameters(self) -> Optional[Sequence['outputs.ApiOperationRequestQueryParameter']]:\n return pulumi.get(self, \"query_parameters\")", "title": "" }, { "docid": "a3faf0e6e5274b098c23173dd8b15e6f", "score": "0.5596269", "text": "def query(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"query\")", "title": "" }, { "docid": "dca4b9f17142c1ad92aa58fa9aada88a", "score": "0.55834836", "text": "def query(self, param: str = '') -> list:\n return []", "title": "" }, { "docid": "1fa1f3fac693df10d0d187cd9e53ae8c", "score": "0.55738884", "text": "def _get_parameter(self):\n return self.__parameter", "title": "" }, { "docid": "1fa1f3fac693df10d0d187cd9e53ae8c", "score": "0.55738884", "text": "def _get_parameter(self):\n return self.__parameter", "title": "" }, { "docid": "1fa1f3fac693df10d0d187cd9e53ae8c", "score": "0.55738884", "text": "def _get_parameter(self):\n return self.__parameter", "title": "" }, { "docid": "1fa1f3fac693df10d0d187cd9e53ae8c", "score": "0.55738884", "text": "def _get_parameter(self):\n return self.__parameter", "title": "" }, { "docid": "1fa1f3fac693df10d0d187cd9e53ae8c", "score": "0.5573174", "text": "def _get_parameter(self):\n return self.__parameter", "title": "" }, { "docid": "b8c199f5c75bc5ecd2c8eed5222482e6", "score": "0.55717045", "text": "def parameter(self) -> Optional[str]:\n return pulumi.get(self, \"parameter\")", "title": "" }, { "docid": "05b7ed414fcba9f49aac5ee3d0a6046b", "score": "0.55680805", "text": "def get_query(self):\n return self.query_data", "title": "" }, { "docid": "05b7ed414fcba9f49aac5ee3d0a6046b", "score": "0.55680805", "text": "def get_query(self):\n return self.query_data", "title": "" }, { "docid": "a19e1d8e4b1c5fdd472dfc3dec7a92c3", "score": "0.5566937", "text": "def get_query_arg(self, key):\n return self.query_dict[key]", "title": "" }, { "docid": "c43ee1f5f3e9da5982330a9ee4c4af30", "score": "0.5556744", "text": "def get_typed_parameter(self):\n try:\n if self.type == self.BOOLEAN_TYPE:\n return BooleanParameter.objects.get(base=self)\n elif self.type == self.STATIC_TYPE:\n return StaticParameter.objects.get(base=self)\n elif self.type == self.STRING_TYPE:\n return StringParameter.objects.get(base=self)\n elif self.type == self.CHOICE_TYPE:\n return ChoiceParameter.objects.get(base=self)\n elif self.type == self.TEXT_TYPE:\n return TextParameter.objects.get(base=self)\n elif self.type == self.INTEGER_TYPE:\n return IntegerParameter.objects.get(base=self)\n elif self.type == self.FLOAT_TYPE:\n return FloatParameter.objects.get(base=self)\n else:\n return None\n except (\n BooleanParameter.DoesNotExist,\n StaticParameter.DoesNotExist,\n StringParameter.DoesNotExist,\n ChoiceParameter.DoesNotExist,\n TextParameter.DoesNotExist,\n IntegerParameter.DoesNotExist,\n FloatParameter.DoesNotExist,\n ) as e:\n return None", "title": "" }, { "docid": "98634ee6eba77913bf8045390dfaecd5", "score": "0.5553962", "text": "def getParameter(self, name):\n value = None\n if parameterTable != None:\n value = parameterTable.get(name.lower())\n return value", "title": "" }, { "docid": "17c054ef968a267d5b7b4e2f328f8bb6", "score": "0.55252516", "text": "def queryUtility(interface, name='', default=None):", "title": "" }, { "docid": "4bda3eb406f74e958cfd0931bbd964dc", "score": "0.5523067", "text": "def _GetQueryImplementation(name):\n try:\n return _QUERY_IMPL[name]\n except KeyError:\n raise errors.OpPrereqError(\"Unknown query resource '%s'\" % name,\n errors.ECODE_INVAL)", "title": "" }, { "docid": "5d600781605a0f3f77c1d9c7d8b5b77a", "score": "0.55165964", "text": "def get_experiment_param(self, instr, exper, param=None):\n \n cursor = self._conn.cursor()\n if param:\n\n # parameter name is specified, find that param only\n q = \"\"\"SELECT p.val FROM experiment_param p, experiment e, instrument i\n WHERE p.exper_id = e.id AND e.instr_id = i.id \n AND i.name=%s AND e.name=%s AND p.param=%s\"\"\"\n cursor.execute(q, (instr, exper, param))\n\n rows = cursor.fetchall()\n if not rows: return None\n return rows[0][0]\n \n else:\n \n # get all parameters\n q = \"\"\"SELECT p.param, p.val, p.descr \n FROM experiment_param p, experiment e, instrument i\n WHERE p.exper_id = e.id AND e.instr_id = i.id AND i.name=%s AND e.name=%s\"\"\"\n cursor.execute(q, (instr, exper))\n \n return list(map(tuple, cursor.fetchall()))", "title": "" }, { "docid": "7202886f6a0fcae5ad00918ad007209c", "score": "0.5506992", "text": "def search_query_param(self):\n return self._search_query_param", "title": "" }, { "docid": "58285a7f47a91da27cc46656d0c8125b", "score": "0.54631895", "text": "def get_query(self, query=None):\n if query is None:\n return self._build_query()\n return self._queries[query]", "title": "" }, { "docid": "8758003e906a4a9f38f7fc6804b42fd9", "score": "0.54441595", "text": "def get_param(self, name, default=None):\n try:\n return self.query_params[name]\n except KeyError:\n return default", "title": "" }, { "docid": "025b4f8128b88deb0306014dd0aa6f18", "score": "0.54427135", "text": "def find_parameter(projektnavn: str, parameter: str) -> str:\n param = find_faneblad(projektnavn, \"Parametre\", ARKDEF_PARAM)\n if parameter not in list(param[\"Navn\"]):\n fire.cli.print(f\"FEJL: '{parameter}' ikke angivet under fanebladet 'Parametre'\")\n sys.exit(1)\n\n return param.loc[param[\"Navn\"] == \"Database\"][\"Værdi\"].to_string(index=False)", "title": "" }, { "docid": "751f41830ab210673da2be1150bea940", "score": "0.54342335", "text": "def query_plugin( pa, interface, name, *args, **kwargs ):\n from pluggdapps.plugin import PluginMeta, ISettings\n if isinstance(interface, str) :\n intrf = interface.lower()\n interface = PluginMeta._interfmap.get(intrf, {}).get('cls', None)\n cls = PluginMeta._implementers.get(interface, {}).get(name.lower(), None)\n return cls( pa, *args, **kwargs ) if cls else None", "title": "" }, { "docid": "72415898ee8dc68975968a2ba03ace1a", "score": "0.5431948", "text": "def get_query(self, id):\n try:\n return PeriscopeQuery(id)\n\n except KeyError as err:\n print(err.args[0], \"is invalid ID...\")\n return None", "title": "" }, { "docid": "6b27f5faa19db8b57c00b3087b41592c", "score": "0.54300034", "text": "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"filter\":\n return \"%24filter\"\n if original_name == \"search\":\n return \"%24search\"\n return original_name", "title": "" }, { "docid": "c5bfa7fdaef9b64af7bd8fb904954912", "score": "0.5422422", "text": "def query_type(self) -> Optional[str]:\n return pulumi.get(self, \"query_type\")", "title": "" }, { "docid": "e310994ab451fb5d68bee560cf8c5a69", "score": "0.5401807", "text": "def get_query():\n return QueDoidura.query()", "title": "" }, { "docid": "efe50d8df78030d9ab954fba8711093f", "score": "0.53882724", "text": "def queryResource(name, request, default=None, providing=Interface,\n context=None):", "title": "" }, { "docid": "f2008f9c4afc4bec27ea458a6989e2f1", "score": "0.5378236", "text": "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"count\":\n return \"%24count\"\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"filter\":\n return \"%24filter\"\n if original_name == \"orderby\":\n return \"%24orderby\"\n if original_name == \"search\":\n return \"%24search\"\n if original_name == \"select\":\n return \"%24select\"\n if original_name == \"skip\":\n return \"%24skip\"\n if original_name == \"top\":\n return \"%24top\"\n return original_name", "title": "" }, { "docid": "f2008f9c4afc4bec27ea458a6989e2f1", "score": "0.5378236", "text": "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"count\":\n return \"%24count\"\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"filter\":\n return \"%24filter\"\n if original_name == \"orderby\":\n return \"%24orderby\"\n if original_name == \"search\":\n return \"%24search\"\n if original_name == \"select\":\n return \"%24select\"\n if original_name == \"skip\":\n return \"%24skip\"\n if original_name == \"top\":\n return \"%24top\"\n return original_name", "title": "" }, { "docid": "f2008f9c4afc4bec27ea458a6989e2f1", "score": "0.5378236", "text": "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"count\":\n return \"%24count\"\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"filter\":\n return \"%24filter\"\n if original_name == \"orderby\":\n return \"%24orderby\"\n if original_name == \"search\":\n return \"%24search\"\n if original_name == \"select\":\n return \"%24select\"\n if original_name == \"skip\":\n return \"%24skip\"\n if original_name == \"top\":\n return \"%24top\"\n return original_name", "title": "" }, { "docid": "34aedc72726db7b783398ce4db52967b", "score": "0.5355816", "text": "def sql_query(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"sql_query\")", "title": "" }, { "docid": "03498e2390307086e5eb6cbec66644e1", "score": "0.53476095", "text": "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"count\":\n return \"%24count\"\n if original_name == \"filter\":\n return \"%24filter\"\n if original_name == \"orderby\":\n return \"%24orderby\"\n if original_name == \"select\":\n return \"%24select\"\n if original_name == \"skip\":\n return \"%24skip\"\n if original_name == \"top\":\n return \"%24top\"\n return original_name", "title": "" }, { "docid": "09ae8fb01a55bbca6d78397cb8f8d5ae", "score": "0.5344779", "text": "def query(self):\n return self._query", "title": "" }, { "docid": "09ae8fb01a55bbca6d78397cb8f8d5ae", "score": "0.5344779", "text": "def query(self):\n return self._query", "title": "" }, { "docid": "09ae8fb01a55bbca6d78397cb8f8d5ae", "score": "0.5344779", "text": "def query(self):\n return self._query", "title": "" }, { "docid": "09ae8fb01a55bbca6d78397cb8f8d5ae", "score": "0.5344779", "text": "def query(self):\n return self._query", "title": "" }, { "docid": "09ae8fb01a55bbca6d78397cb8f8d5ae", "score": "0.5344779", "text": "def query(self):\n return self._query", "title": "" }, { "docid": "09ae8fb01a55bbca6d78397cb8f8d5ae", "score": "0.5344779", "text": "def query(self):\n return self._query", "title": "" }, { "docid": "4c41714a645d4cec8b35a6a6b93c8921", "score": "0.53226024", "text": "def parameter(self):\n return self._parameter", "title": "" }, { "docid": "9f8975b6aca1f1e27e8298a9ac01c13d", "score": "0.53187233", "text": "def getquery(qnum):\n return QUERIES['parameters']['query'][qnum-1]['text']", "title": "" }, { "docid": "db8eb5a8d5b3553fc28ba7f05b092db9", "score": "0.5317693", "text": "def get_parameter_query_inspector_record(self, parameter_record_type):\n return # osid.configuration.records.ParameterQueryInspectorRecord", "title": "" }, { "docid": "9f9eacc7a788ff4d182004ea553c0f06", "score": "0.53174", "text": "def query_params(self) -> Optional[Sequence['outputs.DiagnosticBackendResponseDataMaskingQueryParam']]:\n return pulumi.get(self, \"query_params\")", "title": "" }, { "docid": "ece811f6cae74e605bc60391a03755e3", "score": "0.53161997", "text": "def get(self, parameter):\n return getattr(self, parameter)", "title": "" }, { "docid": "cd33755eeee67a51fe46c6bef518c277", "score": "0.5315046", "text": "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"select\":\n return \"%24select\"\n return original_name", "title": "" }, { "docid": "cd33755eeee67a51fe46c6bef518c277", "score": "0.5315046", "text": "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"select\":\n return \"%24select\"\n return original_name", "title": "" }, { "docid": "cd33755eeee67a51fe46c6bef518c277", "score": "0.5315046", "text": "def get_query_parameter(self,original_name: Optional[str] = None) -> str:\n if not original_name:\n raise TypeError(\"original_name cannot be null.\")\n if original_name == \"expand\":\n return \"%24expand\"\n if original_name == \"select\":\n return \"%24select\"\n return original_name", "title": "" }, { "docid": "4fad4d6b8e20e8a9a6cb2b330ea0374b", "score": "0.53138906", "text": "def query_params(self) -> Optional[Sequence['outputs.ApiDiagnosticBackendResponseDataMaskingQueryParam']]:\n return pulumi.get(self, \"query_params\")", "title": "" }, { "docid": "86e3da7f5f9c5eb5d5b7bba38523e916", "score": "0.5302493", "text": "def get_parameter(self, param_name: str) -> Parameter:\r\n for p in self.parameters:\r\n if p.name == param_name:\r\n return p\r\n raise SDKException(ErrorCode.param_err('get parameter failed.'))", "title": "" }, { "docid": "aca7159786beca56195f538ee01cea4b", "score": "0.5275483", "text": "def get_parameter(self, name):\n return self.parameter_dict[name]", "title": "" }, { "docid": "aca7159786beca56195f538ee01cea4b", "score": "0.5275483", "text": "def get_parameter(self, name):\n return self.parameter_dict[name]", "title": "" }, { "docid": "515d8f61db0df0efa0ffdecb0d6f9a46", "score": "0.52507865", "text": "def getQuery(spec, name, args, kwargs):\n\n if 'query' not in spec:\n msg = \"Query `{}` contains no `query` field.\"\n raise QueryError(msg.format(name))\n\n try:\n # render the query template using the args and kwargs\n query_template = spec['query']\n query = callQuery(query_template, args, kwargs)\n except TemplateVariableError as e:\n # the args and kwargs were not sufficient to satisfy all\n # required parameters of the query\n msg = \"Query `{}` requires missing `{}` parameter.\"\n raise QueryError(msg.format(name, e.variable))\n return query", "title": "" }, { "docid": "b762773cbc25ed41856c40c079723afc", "score": "0.52379686", "text": "def query_plugin( pa, webapp, interface, name, *args, **kwargs ):\n from pluggdapps.plugin import PluginMeta\n if isinstance(interface, str) :\n intrf = interface.lower()\n interface = PluginMeta._interfmap.get(intrf, {}).get('cls', None)\n cls = PluginMeta._implementers.get(interface, {}).get(name.lower(), None)\n return cls( pa, webapp, *args, **kwargs ) if cls else None", "title": "" }, { "docid": "868f060fe34d121856c1866b52b9cbfc", "score": "0.52371657", "text": "def get_parameter(self, field: Union[str, Field]):\n # Normalise a field name to a field reference\n if isinstance(field, str):\n if field in self.fields:\n field = self.fields[field]\n else:\n return None\n\n # Return the value stored against the field,\n # or None if there isn't one\n if field in self.params:\n return self.params[field]\n else:\n return None", "title": "" }, { "docid": "045d5bf20e9472103997da60431b2669", "score": "0.5227112", "text": "def query_params(self) -> Optional[Sequence['outputs.ApiDiagnosticBackendRequestDataMaskingQueryParam']]:\n return pulumi.get(self, \"query_params\")", "title": "" }, { "docid": "2c9e15067a4aae4b1c5131b69829471b", "score": "0.5226343", "text": "def query(self):\n return '&'.join('{}={}'.format(key, value) for key, value in sorted(self.parameters.iteritems()))", "title": "" }, { "docid": "89dc84f6f4279ccf68ab2da22c1f450e", "score": "0.5224622", "text": "def query_params(self) -> Optional[Sequence['outputs.ApiDiagnosticFrontendResponseDataMaskingQueryParam']]:\n return pulumi.get(self, \"query_params\")", "title": "" }, { "docid": "7bb511cfdd2d367665c03cba4616a530", "score": "0.5217329", "text": "def query_params(self) -> Optional[Sequence['outputs.DiagnosticBackendRequestDataMaskingQueryParam']]:\n return pulumi.get(self, \"query_params\")", "title": "" }, { "docid": "ba250978f9d3879e70e8720adae1cc82", "score": "0.5216417", "text": "def query_params(self) -> Optional[Sequence['outputs.DiagnosticFrontendResponseDataMaskingQueryParam']]:\n return pulumi.get(self, \"query_params\")", "title": "" }, { "docid": "3711d5a8580d8b71296f7c2e00593a10", "score": "0.52121717", "text": "def get_query(*args):\n return _xapian.Enquire_get_query(*args)", "title": "" }, { "docid": "e5945f5df0f7bed29e588d308ffbd328", "score": "0.5200163", "text": "def get_query(self):\r\n return input(\"Enter your query: \")", "title": "" }, { "docid": "11560ffd3947a173cfe0e635c523fddf", "score": "0.5184068", "text": "def P(name=None):\n return sql_dialects.ast.Parameter(name)", "title": "" }, { "docid": "3e6dc293608ed37a9123b8a77924690c", "score": "0.51710457", "text": "def get_query(self, query):\n url = self._base[:-1] if self._base[-1] == '/' else self._base\n return self.get(url, query)", "title": "" }, { "docid": "720bec68f7c5637e88fb509eb5b129bf", "score": "0.5154815", "text": "def qparam( parameters ):\n try :\n for attr, value in parameter :\n if attr == b'q' : return float( value )\n else :\n return None\n except :\n return None", "title": "" }, { "docid": "556025de489bdbdbeadff5a311c61cbd", "score": "0.51525074", "text": "def query_params(self) -> Optional[Sequence['outputs.ApiDiagnosticFrontendRequestDataMaskingQueryParam']]:\n return pulumi.get(self, \"query_params\")", "title": "" }, { "docid": "0d1ad0da3b6b5b1b90531ca7ca9cca32", "score": "0.5151356", "text": "def general_api_query(self, route):\n data = self._http_request(\n method='GET',\n verify=False,\n url_suffix=route\n )\n\n return data.get('result')", "title": "" }, { "docid": "a65e7eb26cf592bb14f08da04bf1a27c", "score": "0.51280814", "text": "def query_params(self) -> Optional[Sequence['outputs.DiagnosticFrontendRequestDataMaskingQueryParam']]:\n return pulumi.get(self, \"query_params\")", "title": "" }, { "docid": "f2249efa6d71115c1c119411f7a5f295", "score": "0.5118213", "text": "def param(self, name):\n return self.parameters.value(name)", "title": "" }, { "docid": "cbe4bec8326dbe2f99f51c9539c804af", "score": "0.50967056", "text": "def get_query_argument(self, name: str, default: Any = None):\n return self.request.query.get(name, default)", "title": "" }, { "docid": "3daabff7720c625a0f28735faa326fcc", "score": "0.50814617", "text": "def query(self) -> Query:", "title": "" }, { "docid": "572a3f10ea773a3606ecc2cf0ecfb15d", "score": "0.50590533", "text": "def get_query(self):\n query = request.GET.get(\"query\")\n return bson_loads(query) if query else {}", "title": "" }, { "docid": "01a52c7655548130698ab57e738c2b12", "score": "0.50545776", "text": "def get_parameter(self, parameter: str):\n\n # We assume that all input is valid, so do not use this if you don't know what you're doing\n self.execute_command(\"@\" + str(self.notify_node_nr) + \" get :\" + parameter)\n\n # We return whatever is printed to our notification node\n return self.get_notifications()", "title": "" }, { "docid": "021e588f80aa321224d37948decf67f7", "score": "0.5048619", "text": "def query(self):\n return DataQuery()", "title": "" }, { "docid": "280c552eb4796992f0a2f6115d638259", "score": "0.504772", "text": "def get_param(self, parameter):\n if not self.InterfaceStatisticsParameter._is_valid(parameter):\n raise OnepIllegalArgumentException(parameter, 'Invalid Value')\n try:\n param = self.stats_list[parameter]\n except IndexError:\n raise OnepNotSupportedException('%s is not supported.' % self.InterfaceStatisticsParameter.enumval(parameter))\n if param.retcode == OnepStatus.ONEP_OK:\n return param.stats\n raise OnepException('Error', ExceptionIDL(param.retcode))", "title": "" }, { "docid": "3afab9ab41c34170d00fc20070067f37", "score": "0.5037319", "text": "def supports_parameter_query(self):\n return # boolean", "title": "" }, { "docid": "3afab9ab41c34170d00fc20070067f37", "score": "0.5037319", "text": "def supports_parameter_query(self):\n return # boolean", "title": "" }, { "docid": "52ea341a511cb79d6021fec52f8f9870", "score": "0.5025326", "text": "def _get_param(pattern: str) -> RegisteredParameter:\n p = pattern.lower()\n if p in _parameter_registry:\n return _parameter_registry[p]\n raise ParameterError(f\"'{pattern}' is not a parameter\")", "title": "" }, { "docid": "b378e32c5620c6106bcb44783af03cf5", "score": "0.5003257", "text": "def parameter(self):\n raise NotImplementedError('Abstract method \"parameter\" is not '\n 'implemented!')", "title": "" }, { "docid": "db81cb6259b2b5cc6163ccfe38f14818", "score": "0.50007105", "text": "def query(self):\n return self.redshift_options.query", "title": "" }, { "docid": "b6e3cb829a64a81d1a60403ec8cac875", "score": "0.49933928", "text": "def search_Parameter_Stmt(stmt, node, gentype=None):\n get_name_or_defer(stmt, node.items[1], res_value)", "title": "" }, { "docid": "6b94dfa5695323a67a3ed20d4f1bdbf6", "score": "0.4981768", "text": "def param(self, param):\n if param in self._param_vals.keys():\n return self._param_vals[param]\n else:\n return self._cam_param(param)", "title": "" }, { "docid": "96220dd745a1fac08381d90328434018", "score": "0.49702138", "text": "def query(default=NO_VALUE):", "title": "" }, { "docid": "3286f1de22f22810f4e9c482539e3a82", "score": "0.49596173", "text": "def search(self, parameter, context=None):\n return self._client.call_method(\n 'jgi_gateway.search',\n [parameter], self._service_ver, context)", "title": "" } ]
3f54b1b2a4c1f7c4ea0c9dee34adadc7
kk is a vector of k values, zval is a scalar. Returns P(kk,zval)
[ { "docid": "3a3e93bff869cb81a5562e778ece9ada", "score": "0.7042401", "text": "def interpolate(self,kk,zval):\n pofz = np.dot(self.pktable,self.lagrange_spam(zval))\n pk = np.interp(kk,self.kk,pofz)\n return(pk)\n #", "title": "" } ]
[ { "docid": "f767fef19dc479ea256fa605aad67d44", "score": "0.6809561", "text": "def __call__(self,z=1.0):\n pofz = np.dot(self.pktable,self.lagrange_spam(z))\n return((self.kk,pofz))\n #", "title": "" }, { "docid": "f767fef19dc479ea256fa605aad67d44", "score": "0.6809561", "text": "def __call__(self,z=1.0):\n pofz = np.dot(self.pktable,self.lagrange_spam(z))\n return((self.kk,pofz))\n #", "title": "" }, { "docid": "0de13dff335481b99034dbf58c3138bb", "score": "0.6147657", "text": "def get_k(visc, c_p, Prt):\n #\n kk = numpy.zeros_like(visc)\n kk[:,:] = visc[:,:]*c_p/Prt\n #\n return kk", "title": "" }, { "docid": "46d3a121cd2da10a75afa20120f67e99", "score": "0.5945165", "text": "def valuePt(self, *args):\n return _MontePython_cxx.PollsKd_valuePt(self, *args)", "title": "" }, { "docid": "bc849932f9534139900a6c342d564bb1", "score": "0.57819426", "text": "def __call__(self, u):\r\n s = len([uk for uk in self.knots if uk == u])\r\n for k, uk in enumerate(self.knots):\r\n if uk >= u:\r\n break\r\n if s == 0:\r\n k -= 1\r\n if self.degree == 0:\r\n if k == len(self.points):\r\n k -= 1\r\n return self.points[k]\r\n ps = [dict(zip(range(k - self.degree, k - s + 1),\r\n self.points[k - self.degree:k - s + 1]))]\r\n\r\n for r in range(1, self.degree - s + 1):\r\n ps.append({})\r\n for i in range(k - self.degree + r, k - s + 1):\r\n a = (u - self.knots[i]) / (self.knots[i + self.degree - r + 1]\r\n - self.knots[i])\r\n ps[r][i] = (1 - a) * ps[r - 1][i - 1] + a * ps[r - 1][i]\r\n return ps[-1][k - s]", "title": "" }, { "docid": "d4344dca5d7c37659332d17a875bedf5", "score": "0.5726942", "text": "def z(tau, zs):\n\n def l(j,t):\n \"\"\"\n Intermediate values for polynomial interpolation\n \"\"\"\n tau = self.NLPdata['collocation_points']\\\n [self.NLPdata['cp']][self.NLPdata['DEG']]\n return np.prod(np.array([ \n (t - tau[k])/(tau[j] - tau[k]) \n for k in xrange(0,self.NLPdata['DEG']+1) if k is not j]))\n\n \n interp_vector = []\n for i in xrange(self.NEQ):\n interp_vector += [np.sum(np.array([l(j, tau)*zs[j,i]\n for j in xrange(0, self.NLPdata['DEG']+1)]))]\n return interp_vector", "title": "" }, { "docid": "e68318b4bd5b28198c1b293d1321c119", "score": "0.5702241", "text": "def vjp(self, vector):\n raise NotImplementedError", "title": "" }, { "docid": "38645e49b1f7c2d040ef6a64e790f0f0", "score": "0.568116", "text": "def vval(d, k):\n try:\n return val(d, k)\n except AttributeError:\n return d[k].magnitude", "title": "" }, { "docid": "cc616ecebf48e726a06f7c89b6805928", "score": "0.5637379", "text": "def f(k, *p):\n\t\treturn 0.", "title": "" }, { "docid": "36e042e7f941d81972325c14f6aa8404", "score": "0.5617754", "text": "def _zpkbilinear(z, p, k, fs):\n z = atleast_1d(z)\n p = atleast_1d(p)\n\n degree = _relative_degree(z, p)\n\n fs2 = 2.0*fs\n\n # Bilinear transform the poles and zeros\n z_z = (fs2 + z) / (fs2 - z)\n p_z = (fs2 + p) / (fs2 - p)\n\n # Any zeros that were at infinity get moved to the Nyquist frequency\n z_z = append(z_z, -ones(degree))\n\n # Compensate for gain change\n k_z = k * real(prod(fs2 - z) / prod(fs2 - p))\n\n return z_z, p_z, k_z", "title": "" }, { "docid": "9d8a91ec9a249d5fc34e2cdc9a0dcfbe", "score": "0.5600237", "text": "def Kx_p(self):\n return np.diag([self.kpz, self.kpphi, self.kptheta, self.kppsi])", "title": "" }, { "docid": "0bdbc02e15893a7ae530d3ed46023bbe", "score": "0.55806327", "text": "def polyEval(coefs, k):\n assert k == len(coefs) + 1\n\n value = 0\n degrees = range(len(coefs) - 1, -1, -1)\n for coef, deg in zip(coefs, degrees):\n value += coef*(k**deg)\n\n return value", "title": "" }, { "docid": "8fcdb9052755165c4618a3279e215b41", "score": "0.5538348", "text": "def lp2lp_zpk(z, p, k, wo=1.0):\n z = atleast_1d(z)\n p = atleast_1d(p)\n wo = float(wo) # Avoid int wraparound\n\n degree = _relative_degree(z, p)\n\n # Scale all points radially from origin to shift cutoff frequency\n z_lp = wo * z\n p_lp = wo * p\n\n # Each shifted pole decreases gain by wo, each shifted zero increases it.\n # Cancel out the net change to keep overall gain the same\n k_lp = k * wo**degree\n\n return z_lp, p_lp, k_lp", "title": "" }, { "docid": "55b51a3ec9e3556497fbdfd71f28861f", "score": "0.55254513", "text": "def make_kvs(k):\n return reduce(make_kvs_two, k)", "title": "" }, { "docid": "584c18e976da35d932719cb5e2caa411", "score": "0.55241287", "text": "def multiply_function_by_k(self, k):\n\n p = Polynomial(self.coeff[:])\n for (index, value) in enumerate(p.coeff):\n p.coeff[index] = k * value\n\n return p", "title": "" }, { "docid": "25a6677215a04225ddd9ac264ca5e1d1", "score": "0.5512778", "text": "def zpk2tf(z, p, k):\n z = atleast_1d(z)\n k = atleast_1d(k)\n if len(z.shape) > 1:\n temp = poly(z[0])\n b = zeros((z.shape[0], z.shape[1]+1), temp.dtype.char)\n if len(k) == 1:\n k = [k[0]]*z.shape[0]\n for i in range(z.shape[0]):\n b[i] = k[i] * poly(z[i])\n else:\n b = k * poly(z)\n a = poly(p)\n return b, a", "title": "" }, { "docid": "7e17b4aaf82124fdc14e5d3bff579837", "score": "0.5505581", "text": "def k_v(self) -> float:\n return self._k_v", "title": "" }, { "docid": "ac00a1fb5be9e6e891341b8462ac2e9c", "score": "0.54963124", "text": "def __calc_Ikk__(self):\n for i in range(self.noPoints):\n self.__bias__ = self.__vdc__[i]\n self.__ikk__[i] = self.__Int__.integrate(self.__KK_integrand__, \\\n 0.0, self.__KK_vMax__)/pi\n print self.__bias__, \":\", self.__ikk__[i]", "title": "" }, { "docid": "cd5ad3936df2f1baa96ec8f5ec31b2d3", "score": "0.54487735", "text": "def zpk2tf(z, p, k):\n z = atleast_1d(z)\n k = atleast_1d(k)\n if len(z.shape) > 1:\n temp = poly(z[0])\n b = np.empty((z.shape[0], z.shape[1] + 1), temp.dtype.char)\n if len(k) == 1:\n k = [k[0]] * z.shape[0]\n for i in range(z.shape[0]):\n b[i] = k[i] * poly(z[i])\n else:\n b = k * poly(z)\n a = atleast_1d(poly(p))\n\n # Use real output if possible. Copied from numpy.poly, since\n # we can't depend on a specific version of numpy.\n if issubclass(b.dtype.type, numpy.complexfloating):\n # if complex roots are all complex conjugates, the roots are real.\n roots = numpy.asarray(z, complex)\n pos_roots = numpy.compress(roots.imag > 0, roots)\n neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))\n if len(pos_roots) == len(neg_roots):\n if numpy.all(numpy.sort_complex(neg_roots) ==\n numpy.sort_complex(pos_roots)):\n b = b.real.copy()\n\n if issubclass(a.dtype.type, numpy.complexfloating):\n # if complex roots are all complex conjugates, the roots are real.\n roots = numpy.asarray(p, complex)\n pos_roots = numpy.compress(roots.imag > 0, roots)\n neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))\n if len(pos_roots) == len(neg_roots):\n if numpy.all(numpy.sort_complex(neg_roots) ==\n numpy.sort_complex(pos_roots)):\n a = a.real.copy()\n\n return b, a", "title": "" }, { "docid": "041e86a741601b84b4778b4b2821bfe4", "score": "0.54375386", "text": "def C_v(self, name, *, tmpr_K):\n return self.C_p(name, tmpr_K=tmpr_K) - const.R", "title": "" }, { "docid": "f5986ac20e8b586803389220a6f4e373", "score": "0.5433672", "text": "def Vk(k):\n with np.errstate(divide='ignore'):\n return np.where(k==0.0, 0.0, np.divide(4.0*np.pi, k**2))", "title": "" }, { "docid": "f13451411973ba1e59babb00248d4934", "score": "0.54329437", "text": "def V_2N(zs, Ks):\n z1, z2 = zs\n K1, K2 = Ks\n return (-K1*z1 - K2*z2 + z1 + z2)/(K1*K2*z1 + K1*K2 *\n z2 - K1*z1 - K1*z2\n - K2*z1 - K2*z2 + z1 + z2)", "title": "" }, { "docid": "2f7c76cdc0c5ac579bfd3b451e914936", "score": "0.54308856", "text": "def bilinear_zpk(z, p, k, fs):\n z = atleast_1d(z)\n p = atleast_1d(p)\n\n degree = _relative_degree(z, p)\n\n fs2 = 2.0*fs\n\n # Bilinear transform the poles and zeros\n z_z = (fs2 + z) / (fs2 - z)\n p_z = (fs2 + p) / (fs2 - p)\n\n # Any zeros that were at infinity get moved to the Nyquist frequency\n z_z = append(z_z, -ones(degree))\n\n # Compensate for gain change\n k_z = k * real(prod(fs2 - z) / prod(fs2 - p))\n\n return z_z, p_z, k_z", "title": "" }, { "docid": "ce0dcc524eac166440e568981c9a3911", "score": "0.5426405", "text": "def kthvalue(self, k, dim=None, keepdim=False): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "9efc70e676514f90d449fa36a86df8bc", "score": "0.54226995", "text": "def get_p_values(z_scores_kmers):\n # initialize the container\n p_vals = defaultdict(float)\n # iterates through the kmer keys\n for kmer in z_scores_kmers:\n # calculates the p values to under represented\n # kmers (negative z scores)\n # add the kmer and p values to the container\n if z_scores_kmers[kmer] < 0.0:\n under = math.erfc(-z_scores_kmers[kmer] / math.sqrt(2)) / 2\n p_vals[kmer] = p_vals.get(kmer, 0.0) + under\n else:\n # add the kmer and p values to the container to over represented\n # and all other kmers\n others = math.erfc(z_scores_kmers[kmer] / math.sqrt(2)) / 2\n p_vals[kmer] = p_vals.get(kmer, 0.0) + others\n return p_vals", "title": "" }, { "docid": "9289ba6d8ed9c3b71b10fc30c42fc142", "score": "0.5418374", "text": "def rv_pqw(k, p, ecc, nu):\n pqw = np.array(\n [[cos(nu), sin(nu), 0], [-sin(nu), ecc + cos(nu), 0]]\n ) * np.array([[p / (1 + ecc * cos(nu))], [sqrt(k / p)]])\n return pqw", "title": "" }, { "docid": "556d6f203c5c69f56f29b7e72c989388", "score": "0.541355", "text": "def V_3N(zs, Ks):\n z1, z2, z3 = zs\n K1, K2, K3 = Ks\n return (-K1*K2*z1/2 - K1*K2*z2/2 - K1*K3*z1/2 - K1*K3*z3/2 + K1*z1 + K1*z2/2 + K1*z3/2 - K2*K3*z2/2 - K2*K3*z3/2 + K2*z1/2 + K2*z2 + K2*z3/2 + K3*z1/2 + K3*z2/2 + K3*z3 - z1 - z2 - z3 - (K1**2*K2**2*z1**2 + 2*K1**2*K2**2*z1*z2 + K1**2*K2**2*z2**2 - 2*K1**2*K2*K3*z1**2 - 2*K1**2*K2*K3*z1*z2 - 2*K1**2*K2*K3*z1*z3 + 2*K1**2*K2*K3*z2*z3 - 2*K1**2*K2*z1*z2 + 2*K1**2*K2*z1*z3 - 2*K1**2*K2*z2**2 - 2*K1**2*K2*z2*z3 + K1**2*K3**2*z1**2 + 2*K1**2*K3**2*z1*z3 + K1**2*K3**2*z3**2 + 2*K1**2*K3*z1*z2 - 2*K1**2*K3*z1*z3 - 2*K1**2*K3*z2*z3 - 2*K1**2*K3*z3**2 + K1**2*z2**2 + 2*K1**2*z2*z3 + K1**2*z3**2 - 2*K1*K2**2*K3*z1*z2 + 2*K1*K2**2*K3*z1*z3 - 2*K1*K2**2*K3*z2**2 - 2*K1*K2**2*K3*z2*z3 - 2*K1*K2**2*z1**2 - 2*K1*K2**2*z1*z2 - 2*K1*K2**2*z1*z3 + 2*K1*K2**2*z2*z3 + 2*K1*K2*K3**2*z1*z2 - 2*K1*K2*K3**2*z1*z3 - 2*K1*K2*K3**2*z2*z3 - 2*K1*K2*K3**2*z3**2 + 4*K1*K2*K3*z1**2 + 4*K1*K2*K3*z1*z2 + 4*K1*K2*K3*z1*z3 + 4*K1*K2*K3*z2**2 + 4*K1*K2*K3*z2*z3 + 4*K1*K2*K3*z3**2 + 2*K1*K2*z1*z2 - 2*K1*K2*z1*z3 - 2*K1*K2*z2*z3 - 2*K1*K2*z3**2 - 2*K1*K3**2*z1**2 - 2*K1*K3**2*z1*z2 - 2*K1*K3**2*z1*z3 + 2*K1*K3**2*z2*z3 - 2*K1*K3*z1*z2 + 2*K1*K3*z1*z3 - 2*K1*K3*z2**2 - 2*K1*K3*z2*z3 + K2**2*K3**2*z2**2 + 2*K2**2*K3**2*z2*z3 + K2**2*K3**2*z3**2 + 2*K2**2*K3*z1*z2 - 2*K2**2*K3*z1*z3 - 2*K2**2*K3*z2*z3 - 2*K2**2*K3*z3**2 + K2**2*z1**2 + 2*K2**2*z1*z3 + K2**2*z3**2 - 2*K2*K3**2*z1*z2 + 2*K2*K3**2*z1*z3 - 2*K2*K3**2*z2**2 - 2*K2*K3**2*z2*z3 - 2*K2*K3*z1**2 - 2*K2*K3*z1*z2 - 2*K2*K3*z1*z3 + 2*K2*K3*z2*z3 + K3**2*z1**2 + 2*K3**2*z1*z2 + K3**2*z2**2)**0.5/2)/(K1*K2*K3*z1 + K1*K2*K3*z2 + K1*K2*K3*z3 - K1*K2*z1 - K1*K2*z2 - K1*K2*z3 - K1*K3*z1 - K1*K3*z2 - K1*K3*z3 + K1*z1 + K1*z2 + K1*z3 - K2*K3*z1 - K2*K3*z2 - K2*K3*z3 + K2*z1 + K2*z2 + K2*z3 + K3*z1 + K3*z2 + K3*z3 - z1 - z2 - z3)", "title": "" }, { "docid": "52bfaccf280790cc7fb68104a5130f52", "score": "0.5411632", "text": "def kPar(k):\r\n \r\n k_par=ka*np.sqrt(1+(k/ks)**2)\r\n \r\n return k_par", "title": "" }, { "docid": "36d974f94a3863cf6e260e5f24037451", "score": "0.5377174", "text": "def _zpklp2lp(z, p, k, wo=1.0):\n z = atleast_1d(z)\n p = atleast_1d(p)\n wo = float(wo) # Avoid int wraparound\n\n degree = _relative_degree(z, p)\n\n # Scale all points radially from origin to shift cutoff frequency\n z_lp = wo * z\n p_lp = wo * p\n\n # Each shifted pole decreases gain by wo, each shifted zero increases it.\n # Cancel out the net change to keep overall gain the same\n k_lp = k * wo**degree\n\n return z_lp, p_lp, k_lp", "title": "" }, { "docid": "4ff5a8332653294ea7969f4027397886", "score": "0.5375248", "text": "def crossValidate(val_ids, q, K):\n\tresults={}\n\torig={}\n\tfor v in range(q.shape[0]):\n\t\tif v in val_ids:\n\t\t\t# hold aside v from q\n\t\t\tqprime = array(q)\n\t\t\tqprime[v]=0\n\t\t\tval = dot(K[v],qprime)\n\t\t\tresults[v]=val\n\t\t\toval = val + K[v][v]*q[v]\n\t\t\torig[v]=oval\n\t\t\t#print v, val, K[v][v], oval\n\t\telse:\n\t\t\t#print d\n\t\t\toval = dot(K[v],array(q))\n\t\t\torig[v]=oval\t\t\n\treturn (results, orig)", "title": "" }, { "docid": "70903d5be5c3f84311d26115d884367a", "score": "0.53614986", "text": "def from_pair_test(j,k):\n pass", "title": "" }, { "docid": "7633f0c1ed4d29ab94081c93f7078093", "score": "0.53370583", "text": "def get_basis_dk_vector(u, u_list, p, k):\n u[-1] -= EPSILON # for numeric stability\n n = len(u_list) - p - 1\n basis_dk_v = np.empty((n, len(u)))\n for i in range(n):\n basis_dk_v[i, :] = basis_dk_vector(k, i, p, u, u_list=u_list)\n return basis_dk_v", "title": "" }, { "docid": "02ded7470180accdf91032bbee7133fb", "score": "0.5315989", "text": "def get_scipy_p_values(z_scores_kmers):\n # initialize the container\n p_vals = defaultdict(float)\n # iterates through the kmer keys\n for kmer in z_scores_kmers:\n # calculates the p values to under represented\n # kmers (negative z scores)\n # add the kmer and p values to the container\n if z_scores_kmers[kmer] < 0.0:\n p_vals[kmer] = p_vals.get(kmer, 0.0) + norm.sf(abs(-z_scores_kmers[kmer]))\n else:\n # add the kmer and p values to the container to over represented\n # and all other kmers\n p_vals[kmer] = p_vals.get(kmer, 0.0) + norm.sf(abs(z_scores_kmers[kmer]))\n return p_vals", "title": "" }, { "docid": "84b58d9c16e352540dea62c0749b7922", "score": "0.53154546", "text": "def jvp(self, vector):\n raise NotImplementedError", "title": "" }, { "docid": "2eff2aa2daee84c21111fee3005773a8", "score": "0.5310939", "text": "def E(self,t,z,p,v):\n return z", "title": "" }, { "docid": "ce3fdb481ae8ec259b1158bae5cb913d", "score": "0.5307158", "text": "def implied_r(self,k,Z):\n\n par = self.par\n r = Z*par.alpha*k**(par.alpha-1)-par.delta\n return r", "title": "" }, { "docid": "70a4d6c4f991e87fc124053e8d7ceb11", "score": "0.53045976", "text": "def lp2hp_zpk(z, p, k, wo=1.0):\n z = atleast_1d(z)\n p = atleast_1d(p)\n wo = float(wo)\n\n degree = _relative_degree(z, p)\n\n # Invert positions radially about unit circle to convert LPF to HPF\n # Scale all points radially from origin to shift cutoff frequency\n z_hp = wo / z\n p_hp = wo / p\n\n # If lowpass had zeros at infinity, inverting moves them to origin.\n z_hp = append(z_hp, zeros(degree))\n\n # Cancel out gain change caused by inversion\n k_hp = k * real(prod(-z) / prod(-p))\n\n return z_hp, p_hp, k_hp", "title": "" }, { "docid": "a64143ae091bcacb8455c170a6820c52", "score": "0.52907693", "text": "def oh(x, k):\n vec = np.zeros(k)\n vec[x] = 1\n return vec.reshape((-1, 1))", "title": "" }, { "docid": "976acdebe51fd6a0c8c5e8333ac0ed2d", "score": "0.52889323", "text": "def eval_pck(prd_kps, data, idx):\n pckthres = data['pckthres'][idx]\n ncorrt = correct_kps(data['trg_kps'][idx][:, :data['valid_kps_num'][idx]].cuda(\n ), prd_kps, pckthres, data['alpha'])\n pair_pck = int(ncorrt) / int(data['valid_kps_num'][idx])\n\n return pair_pck", "title": "" }, { "docid": "2774a3f98bfe93e7c088d8aae8338b1a", "score": "0.5238181", "text": "def collatz_eval (i, j) :\n assert i > 0\n assert j > 0\n # <your code>\n v = 1\n assert v > 0\n return v", "title": "" }, { "docid": "b0bddeadd147cb03501d44b384153294", "score": "0.52361536", "text": "def mk(y, y_):\n return ppv(y, y_) + npv(y, y_) - 1", "title": "" }, { "docid": "0015aead22f833f09000baae2f43a9d5", "score": "0.52298373", "text": "def project_points(K, xyz):\n uv = np.matmul(K, xyz.T).T\n return uv[:, :2] / uv[:, -1:]", "title": "" }, { "docid": "d37934fae8545fb29c74ca31c5f0df5f", "score": "0.52180016", "text": "def dot(l, k):\n \n if len(k) == 0 or len(l) == 0:\n return 0.0\n elif len(k) == len(l):\n vermeenigvuldig = dot(l[1:], k[1:]) + mult(l[0],k[0]) \n return vermeenigvuldig", "title": "" }, { "docid": "ad7782c949c71bcf4ce4c9c31010f315", "score": "0.5215727", "text": "def output(t, k, params):\n # extract params\n alpha = params['alpha']\n\n # intensive form for output \n y = k**alpha\n \n return y", "title": "" }, { "docid": "4d160771987506424640ad9fdf31f2c1", "score": "0.520803", "text": "def _zpklp2hp(z, p, k, wo=1.0):\n z = atleast_1d(z)\n p = atleast_1d(p)\n wo = float(wo)\n\n degree = _relative_degree(z, p)\n\n # Invert positions radially about unit circle to convert LPF to HPF\n # Scale all points radially from origin to shift cutoff frequency\n z_hp = wo / z\n p_hp = wo / p\n\n # If lowpass had zeros at infinity, inverting moves them to origin.\n z_hp = append(z_hp, zeros(degree))\n\n # Cancel out gain change caused by inversion\n k_hp = k * real(prod(-z) / prod(-p))\n\n return z_hp, p_hp, k_hp", "title": "" }, { "docid": "a67113c939645252ea8edd3d98cf9db1", "score": "0.52028924", "text": "def get_zz(xx,nz):\n\n ij = (xx*nz).astype('int')\n zz = ij[:,0]*(nz+2) + ij[:,1]+1\n return zz", "title": "" }, { "docid": "ec1db15617b598e0938931dcf5c6610a", "score": "0.51955295", "text": "def func(p, x):\n k, b = p\n return k * x + b", "title": "" }, { "docid": "2532df14c1d19662132a4cb62540b549", "score": "0.51850396", "text": "def __call__(self, z):\n return interp(z, self.grid, self.vals)", "title": "" }, { "docid": "ea878584ebe2f977b847083e592f6508", "score": "0.5173247", "text": "def map(self, z, k=-1):\n zk = self.prevertex\n if k<0:\n d = np.abs(z - zk)\n d[np.isinf(self.vertex)] = np.inf\n k = np.argmin(d)\n\n return self.vertex[k] + self.C * self.zquad(zk[k],z,k)", "title": "" }, { "docid": "6264fb0b6eba1292cc3b4491cc71065a", "score": "0.5164847", "text": "def __call__(self,val):\n if val<=self.points[0][0]:\n return self.points[0][1]\n if val>=self.points[-1][0]:\n return self.points[-1][1]\n for i in range(len(self.points)-1):\n if val == self.points[i][0]:\n return self.points[i][1]\n if val < self.points[i+1][0]:\n val -= self.points[i][0]\n val /= self.points[i+1][0]-self.points[i][0]\n y = [None,self.points[i][1],self.points[i+1][1],None]\n if i==0:\n y[0] = self.points[i][1]\n else:\n y[0] = self.points[i-1][1]\n if i+2==len(self.points):\n y[3] = self.points[i+1][1]\n else:\n y[3] = self.points[i+2][1]\n\n a = list()\n a.append(y[3]-y[2]-y[0]+y[1])\n a.append(y[0]-y[1]-a[0])\n a.append(y[2]-y[0])\n a.append(y[1])\n return a[0]*val**3+a[1]*val**2+a[2]*val+a[3]\n return None", "title": "" }, { "docid": "baf628cfc644b9013aca764d1b61e358", "score": "0.51578027", "text": "def k(self, i ,j):\n x = self.data[i,1:]\n y = self.data[j,1:]\n return exp(-((np.linalg.norm(x-y))**2)/self.eps)", "title": "" }, { "docid": "0c4207bad66bbf7c3fffdf7a09ca5aa8", "score": "0.51443344", "text": "def term_val(val, pgr, ke):\n\n return (val*(1+pgr)) / (ke - pgr)", "title": "" }, { "docid": "3b040ea4bcace2d7d4d8556a0d1ca3c2", "score": "0.51426697", "text": "def polyval(p, x):\n ...", "title": "" }, { "docid": "b5efaad81af04316f2ca105a34d37a04", "score": "0.51426476", "text": "def bprop_J(x, dz):\n return (Jinv(dz),)", "title": "" }, { "docid": "df9d86a03e7f99e8e316f906fea1095d", "score": "0.51326853", "text": "def dkl(P:List[float], Q:List[float]) -> float:", "title": "" }, { "docid": "0bf32fe826a454afaeff4d077c30bf88", "score": "0.5122246", "text": "def K(self):\n self._updateL()\n RV = SP.dot(self.L,self.L.T)\n return RV", "title": "" }, { "docid": "1fbdfcdf9d369138141956ae25408148", "score": "0.51152414", "text": "def pair_eval(self, X, Y):\n (n1, d1) = X.shape\n (n2, d2) = Y.shape\n assert d1==1, 'd1 must be 1'\n assert d2==1, 'd2 must be 1'\n diff = (X-Y)/self.width\n Kvec = sig.bspline( diff , 1)\n return Kvec", "title": "" }, { "docid": "d149ca9f7fb7f34609ada575b1d950eb", "score": "0.511486", "text": "def freqz_zpk(z, p, k, worN=512, whole=False, fs=2*pi):\n z, p = map(atleast_1d, (z, p))\n\n if whole:\n lastpoint = 2 * pi\n else:\n lastpoint = pi\n\n if worN is None:\n # For backwards compatibility\n w = numpy.linspace(0, lastpoint, 512, endpoint=False)\n elif _is_int_type(worN):\n w = numpy.linspace(0, lastpoint, worN, endpoint=False)\n else:\n w = atleast_1d(worN)\n w = 2*pi*w/fs\n\n zm1 = exp(1j * w)\n h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p)\n\n w = w*fs/(2*pi)\n\n return w, h", "title": "" }, { "docid": "34fb492bb58820f8e41433739963f61b", "score": "0.5110587", "text": "def from_pair(j,k):\n pass", "title": "" }, { "docid": "da00068a27e8e3c4907715a5ab016dd3", "score": "0.51101947", "text": "def makeKPT(params):\n import math as m\n import numpy as np\n\n recipcell,kpts = makeAtoms(params).get_reciprocal_cell(),[]\n for i in range(3):\n k = 2 * 3.14159 * m.sqrt((recipcell[i]**2).sum()) * params['kptden'] \n kpts.append(2 * int(np.ceil(k / 2)))\n\n kind = params['kind']\n if kind=='surface': return np.array(kpts[:2]+[1])\n elif kind=='molecule': return np.array([1,1,1])\n else: return np.array(kpts)", "title": "" }, { "docid": "062a236c4f67abc3a91417d8f488e230", "score": "0.51082695", "text": "def knotInV(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "5a87835269d55d0213b068e68d81a0c1", "score": "0.51007193", "text": "def _vjp_func_jacobian(\n self, values: VariableData\n ) -> Tuple[FactorValue, \"VectorJacobianProduct\"]:\n from autofit.graphical.factor_graphs.jacobians import (\n VectorJacobianProduct,\n )\n raw_fval, fvjp = self._factor_vjp(*self.resolve_args(values))\n fval = self._factor_value(raw_fval)\n\n fvjp_op = VectorJacobianProduct(\n self.factor_out,\n fvjp,\n *self.args,\n out_shapes=fval.to_dict().shapes,\n )\n return fval, fvjp_op", "title": "" }, { "docid": "8ccc2ea00a6ef2695c1623ad0cade38b", "score": "0.50897676", "text": "def bypass_kp(xyz):\n\n # we always expect a dictionary as return value to be more explicit\n res = {}\n\n # Bypass for xy coordiantes\n res[\"xyz\"] = xyz\n\n # Bypass for score (a dummy)\n res[\"score\"] = xyz[:, 0]\n\n return res", "title": "" }, { "docid": "d8bc6cc0e3567f50797fc342ce02a623", "score": "0.508157", "text": "def tk(self, k, x):\n weights = np.diag(np.ones(k+1))[k]\n return np.polynomial.chebyshev.chebval(self._x2c(x), weights)", "title": "" }, { "docid": "e0241548e770d450a172297ab5b6739b", "score": "0.5078671", "text": "def psv(karray):\n k = (karray**2).sum(axis=3)**0.5\n return self.ps_vv(k) * self.velocity_damping(karray[..., 0])", "title": "" }, { "docid": "e4a21da45d428bb45887f42973965c67", "score": "0.5071999", "text": "def D_v_hMpc(k_hMpc,mu,kvav=0.48,av=0.156,bv=1.57):\n\treturn 1-(k_hMpc**av/kvav) * mu**bv", "title": "" }, { "docid": "d303dee308afc0f123b1fea490817668", "score": "0.5070388", "text": "def multiply(self, k):\n self.x = self.x * k\n self.y = self.y * k\n return Vector(self.x, self.y)", "title": "" }, { "docid": "a1cf33334d4567f9428006a299767f1e", "score": "0.50676876", "text": "def findPr(Z,K2,La,R,R1=None,Pt=None,b=None,Z1=None): \n ### BEGIN SOLUTION\n if Z1 is None:\n Z1=1.\n Pr=Pt*b*K2/La**2.*(R1/R)**2.*(Z/Z1)\n return Pr\n\n ### END SOLUTION", "title": "" }, { "docid": "6d2129d9616b23f2cc7a7aec87e3b4b6", "score": "0.5061766", "text": "def get_z(x,nz):\n\n i = 1+int(x[0]*nz)\n j = 1+int(x[1]*nz)\n z = i*nz+j\n return z", "title": "" }, { "docid": "8c0c8c36ea236152d37b59cf0a0cc29e", "score": "0.50523555", "text": "def valuePt(self, *args):\n return _MontePython_cxx.DuBoisJastrow_valuePt(self, *args)", "title": "" }, { "docid": "b535c5a84b9dc87fb994e1e277cae935", "score": "0.50510347", "text": "def rk2(xn: numpy.array, vn: numpy.array, dt: float, m: float):\r\n phin = numpy.concatenate((xn, vn))\r\n\r\n k1 = f(phin) * dt\r\n k2 = f(phin + k1 / 2) * dt\r\n phinp1 = phin + k2\r\n\r\n xnp1 = phinp1[0:3]\r\n vnp1 = phinp1[3:6]\r\n\r\n return xnp1, vnp1", "title": "" }, { "docid": "766ab1d978573576e80b7dde9416e586", "score": "0.5041864", "text": "def zpv():\n pv = zeros(shape=(2,3),dtype = float)\n _sofa.iauZpv(pv)\n return pv", "title": "" }, { "docid": "acedd749856653c9535ee8d0d5490686", "score": "0.5035245", "text": "def knotsInV(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "9d221f435497ca9ed515562e4a677a83", "score": "0.50337386", "text": "def _hh(self, k):\n # homogeneous indexed by an integer k (positive or negative)\n # if k is less than 0, result is 0\n # if k == 0, result is s([])\n # if k > 0, then the result is s([k])\n sym = self.sym\n s = sym.s()\n if k == 0:\n return s.one()\n elif k < 0:\n # 0, but as a sym func\n return 0 * s.one()\n elif k > 0:\n return s([k])\n else:\n raise ValueError", "title": "" }, { "docid": "8ae141623be0d0a0954f26b6ce67458c", "score": "0.50336736", "text": "def RHS(self, z, Gamma):\n z = zip(*[iter(z)]*2)\n z = list(enumerate(zip(z, Gamma)))\n return np.array([Gamma_j * self.domain.JGradh(z_j) + 2 *\n sum(Gamma_i * self.domain.JGradG(z_j, z_i)\n for (i, (z_i, Gamma_i)) in z if i != j)\n for (j, (z_j, Gamma_j)) in z]).flatten()", "title": "" }, { "docid": "fa479d89af37d2cb7c3b14de1b3a0f52", "score": "0.50312775", "text": "def __mul__(self, pVal):\n return _almathswig.Rotation3D___mul__(self, pVal)", "title": "" }, { "docid": "51a4c19bbc43558aaab3faeeb8ab69f0", "score": "0.50310344", "text": "def get_kpis(self):", "title": "" }, { "docid": "2a22545e82fee234dac8427587c55a94", "score": "0.5026321", "text": "def wklobjective0(plan, p, q, K, epsilon, gamma):\n f = epsilon * kl(plan, K)\n margs = kl(plan.sum(axis=1), p) + kl(plan.sum(axis=0), q)\n f += gamma * margs\n\n return f", "title": "" }, { "docid": "536b6442352fa2c84b4cb10038f6b97d", "score": "0.50244063", "text": "def get_fvk(fout):\n vkx, vky = get_vk(fout)\n tck = interp.splrep(vkx, vky)\n fvk = lambda k:interp.splev(k, tck)\n return fvk", "title": "" }, { "docid": "8a00b2fd9768423c94ada1e674f5329b", "score": "0.5010669", "text": "def __mul__(self, pVal):\n return _almathswig.Velocity3D___mul__(self, pVal)", "title": "" }, { "docid": "d3b51f2a01408aa9507a1a7885a9bdd4", "score": "0.5006632", "text": "def x(z):\n\treturn z/L", "title": "" }, { "docid": "7d535cc3427229c46f9880a108603d05", "score": "0.500589", "text": "def K_z(self, measure):\n try:\n return self._K_z[id(measure)]\n except KeyError:\n self._compute(measure)\n return self._K_z[id(measure)]", "title": "" }, { "docid": "3c7ce1da79f3e65507fef967bdb185f8", "score": "0.5001759", "text": "def get_vk(fout):\n data = get_data_block(fout, 'VK')\n vkx, vky = data.T\n\n # QMCPACK vk is divided by volume, undo!\n omega = get_volume(fout)\n vky *= omega\n\n return vkx, vky", "title": "" }, { "docid": "2786e1b860ae01abf3b5f835e5f12b7d", "score": "0.4996806", "text": "def create_kp_mat(kp):\n kp_mat = np.empty((len(kp),2),dtype=np.double)\n\n for i,tmp_kp in enumerate(kp):\n kp_mat[i] = tmp_kp.pt\n\n return kp_mat", "title": "" }, { "docid": "005b25d5e8e3d4fda2ca4f2b042c8077", "score": "0.49938032", "text": "def K_vec(u, t, L=None, beta=None, x=None):\n N = len(u) - 1\n dx = x[1] - x[0]\n K = zeros((N+1,N+1))\n K[0,0] = 0\n K[1:N-1] = beta/dx**2\n K[1:N] = -2*beta/dx**2\n K[2:N+1] = beta/dx**2\n K[N,N-1] = (beta/dx**2)*2\n K[N,N] = (beta/dx**2)*(-2)\n return K", "title": "" }, { "docid": "663581ab06bdaa3521db9c3c5085e809", "score": "0.49892807", "text": "def z(self):\n return self.__values[2]", "title": "" }, { "docid": "1588e9e5c24e464f9e1053f46ae40bea", "score": "0.49882033", "text": "def __call__(self, *args):\n return _MontePython_cxx.PollsKd___call__(self, *args)", "title": "" }, { "docid": "a3e7fb932fcdfc473bbd4c85ffb9f640", "score": "0.49795985", "text": "def setKnotInV(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "3ccfbb0bc6d89fb5bc18fd6a9c952ec8", "score": "0.49768814", "text": "def additional_derivatives(self, vec_z, k):\n return", "title": "" }, { "docid": "bc1781dbefdf270e6757cb92d4f55e18", "score": "0.4967767", "text": "def Nzp(self, zz, wvl):\n p = (wvl * zz) / np.sqrt(4 * self.ps * self.ps - wvl * wvl)\n return np.int((1 / self.ps) * (self.Wr/2 -self.Ws/2 + p) + 1)", "title": "" }, { "docid": "7e0a4b5971a784f8501e773b3e7293c0", "score": "0.49675673", "text": "def dot(L, K):\n if L == [] or K == []:\n return 0.0\n return L[0] * K[0] + dot(L[1:], K[1:])", "title": "" }, { "docid": "fd9054fd389375fbee017d31024cbdfe", "score": "0.49630347", "text": "def sigma_v(self, z):\n print \"using sigma_v (km/s): \" + repr(self._sigma_v)\n sigma_v_hinvMpc = (self._sigma_v / 100.)\n return np.ones_like(z) * sigma_v_hinvMpc", "title": "" }, { "docid": "4dd90036192b5cc29ad6e82c76f20e61", "score": "0.49608293", "text": "def setKnotsInV(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "21c59eccb7e810f2878f26499694e0ab", "score": "0.4958897", "text": "def valuePt(self, *args):\n return _MontePython_cxx.MateuszWaveAll_valuePt(self, *args)", "title": "" }, { "docid": "008a492ac61cd83ad762a7d19f95202c", "score": "0.4955211", "text": "def elem_sym_poly(lambda_value, k):\n N = lambda_value.shape[0]\n E = np.zeros((k + 1, N + 1))\n E[0, :] = 1\n\n for i in range(1, k+1):\n for j in range(1, N+1):\n E[i, j] = E[i, j - 1] + lambda_value[j-1] * E[i - 1, j - 1]\n\n return E", "title": "" }, { "docid": "f3c5e1998e14c62292c08c1568ad9244", "score": "0.4954975", "text": "def pz(self, value):\n self.__vector3d.z = value", "title": "" }, { "docid": "692f52fb4faef1d108f29d89ac502538", "score": "0.49540603", "text": "def bprop_Jinv(x, dz):\n return (J(dz),)", "title": "" }, { "docid": "86a4c10b4e10033d3f60d191203af2fa", "score": "0.49529544", "text": "def polyval(p,x):\n \n \n return ndarray()", "title": "" }, { "docid": "2c03558cd8128ba7ed1dd7ef77b46b70", "score": "0.49515352", "text": "def F(self,t,z,p):\n return 0.*z", "title": "" } ]
3d6427bc986034793e2e2844a5d522cb
Get sensor temperature value from dictionary list.
[ { "docid": "186790b4769487ff6bb59a48265a5e30", "score": "0.0", "text": "def read():\n\n # Check if temperature list is empty\n if not global_dict['temperature']:\n temp = {}\n # Get first element from temperature list, and then pop it from list\n else:\n temp = [t for t in global_dict['temperature']][0]\n global_dict['temperature'].pop(0)\n \n data = {\n 'timestamp': now(),\n 'temperature': temp\n }\n resp = jsonify(data)\n resp.status_code = 200\n return resp", "title": "" } ]
[ { "docid": "e08185ab808b9ad7dbaecaeb1dc17c9e", "score": "0.6676416", "text": "def get_temperature(self):\n return self.result['current_conditions']['temp_c']", "title": "" }, { "docid": "c17f85daa53c33a28dcecaf9d7f69ea7", "score": "0.6675716", "text": "def get_sensor_temperature(self, n):\n node = self._lookup_sensor_node(n)\n\n # <http://www.openzwave.com/dev/classOpenZWave_1_1SensorMultilevel.html>\n values = node.get_values(\n class_id=0x31, # COMMAND_CLASS_SENSOR_MULTILEVEL\n genre=\"User\", readonly=True, writeonly=False,\n label=self._labels_xref['temperature']\n )\n\n if not values:\n raise RuntimeError(\"Temperature: label not found. Is this a sensor?\")\n\n # who cares if more => bug??? Check the get_values() above\n if len(values) > 1:\n self.logger.warning(\n \"Node {}: get_values(Temperature) returned more than one value!?\".format(\n node.node_id\n )\n )\n\n # [BUG] WTF, value is °F?? <https://aeotec.freshdesk.com/support/solutions/articles/6000036562-multisensor-6-firmware-update-6-17-2016->\n value = [\n v.data if v.units == 'C' else f_to_c(v.data)\n for k,v in values.items()\n ][0]\n\n return {\n \"controller\": self.controller_name,\n \"sensor\": node.node_id,\n \"location\": node.location,\n \"type\": 'temperature',\n \"updateTime\": self._get_node_timestamp(node),\n \"value\": value\n }", "title": "" }, { "docid": "59dbdbe39bafbc4fedaf28a145c5eebc", "score": "0.66063887", "text": "def get_sensors_temperature(self, sensors):\n logger.debug(\"sensors[0].get_cache(index=0) : %s\"%sensors[0].get_cache(index=0))\n return sensors[0].get_cache(index=0)", "title": "" }, { "docid": "13744fd2576680b9dd0e2368a571862a", "score": "0.645974", "text": "async def get_temperatures(self, **kwargs: Any) -> Dict[str, float]:\n\n # get all temperatures\n temps = {}\n for name, var in self._temperatures.items():\n temps[name] = self._status[var]\n\n # return it\n return temps", "title": "" }, { "docid": "457c46b74f29aabdba54b626812707b9", "score": "0.6396818", "text": "def get_temperature(self):\n log.debug(\"Get Thermocouple %d temperature -> %f\"\n % (self.id_, self.temperature_))\n self.temperature_ = self.status['Thermocouples'][self.id_]['temperature']\n return self.temperature_ # Checks temp and returns value", "title": "" }, { "docid": "4a70dd0f78359688fe5dcff51f13db15", "score": "0.63583636", "text": "def temperature(self):\n return self.measurements[0]", "title": "" }, { "docid": "7deea170c9e7ef9211acc1bf76ecfa36", "score": "0.6321188", "text": "def get_temperature(self):\n log.debug(\"Get AuxThermocouple %d temperature -> %f\"\n % (self.id_, self.temperature_))\n self.temperature_ = self.status['AuxThermocouples'][self.id_]['temperature']\n return self.temperature_ # Checks temp and returns value", "title": "" }, { "docid": "a6c1510d8d21c711f01344f86fe510d5", "score": "0.6288386", "text": "def temperature(self):\n return self.read(\"present_temperature\")", "title": "" }, { "docid": "efb7aad6926d0d118ea4497c7ba9cab1", "score": "0.62467116", "text": "def get_sensor_temp(self):\n return psutil.sensors_temperatures()", "title": "" }, { "docid": "d594df2a5e2655580b93785ad872599f", "score": "0.62387127", "text": "def get_sensor_value(self, keys):\n\n # Point to the device on the sensors dictionary based on the keys\n d = self.sensors\n for k in keys:\n d = d[k]\n\n # If the value is a float, truncate it to 2 decimals\n if type(d['value']) is float:\n return round(d['value'], 2)\n else:\n return d['value']", "title": "" }, { "docid": "833e20a9f0eea230694ace578f0dfe80", "score": "0.6179427", "text": "def get_temp(device):\n # Not all devices export this file. On other devices, the only real way to\n # read it is via Java\n # developer.android.com/guide/topics/sensors/sensors_environment.html\n temps = []\n for i in xrange(2):\n out, _ = device.shell('cat /sys/class/thermal/thermal_zone%d/temp' % i)\n if out:\n temps.append(int(out))\n return temps", "title": "" }, { "docid": "5a99b50f3dbcb4c43d32cbeed2cc1b19", "score": "0.6156568", "text": "async def get_temperature(self) -> float: # type: ignore\n ...", "title": "" }, { "docid": "7e64a0f41fe100b62ad47014f92b5a01", "score": "0.6153637", "text": "def GetTemperatures(self):\n # Not all devices export these files. On other devices, the only real way to\n # read it is via Java\n # developer.android.com/guide/topics/sensors/sensors_environment.html\n out = {}\n for sensor in self.List('/sys/class/thermal') or []:\n if sensor.filename in ('.', '..'):\n continue\n if not sensor.filename.startswith('thermal_zone'):\n continue\n path = '/sys/class/thermal/' + sensor.filename\n # Expected files:\n # - mode: enabled or disabled.\n # - temp: temperature as reported by the sensor, generally in C or mC.\n # - type: driver name.\n # - power/\n # - trip_point_0_temp\n # - trip_point_0_type\n # - trip_point_1_temp\n # - trip_point_1_type\n # - subsystem/ -> link back to ../../../../class/thermal\n # - policy\n # - uevent\n # - passive\n temp = self.PullContent(path + '/temp')\n if not temp:\n continue\n # Assumes it's in °C.\n value = float(temp)\n if value > 1000:\n # Then assumes it's in m°C.\n # TODO(maruel): Discern near cold temperature, e.g. 0.1°C.\n value = value / 1000.\n if value <= 0.:\n # TODO(maruel): Support cold temperatures below 0°C.\n continue\n sensor_type = self.PullContent(path + '/type')\n if sensor_type:\n out[sensor_type.strip()] = value\n # Filter out unnecessary stuff.\n return out", "title": "" }, { "docid": "de636ff484f89001eced7b249aedbf33", "score": "0.6134211", "text": "def getTemperature(self):\n \n retVal = {'temp': self.__tempC, 'unit': 'c'}\n \n return retVal", "title": "" }, { "docid": "f5d2482dd7245ebb3d2e152a6e065bba", "score": "0.60949475", "text": "def getTempValue():\n # Where the initial value stored at.\n # often the file is located under /sys/bus/w1/[\"a string you need to obtain from the system\"]/w1_slave\n tfile = open(\"/sys/bus/w1/devices/28-0300a279d1d3/w1_slave\")\n text = tfile.read()\n tfile.close()\n\n #parse the temperature value\n secondline = text.split(\"\\n\")[1]\n tpd = secondline.split(\" \")[9]\n tmp = float(tpd[2:])\n tmp = tmp / 1000\n return tmp", "title": "" }, { "docid": "300a357de081ab18e9bd2d1951fdb49c", "score": "0.6085144", "text": "def temperature(self):\n return float(self.read_temperature_register()) / 65536 * 175.72 - 46.85", "title": "" }, { "docid": "2139f5f777e8e9d27c2280a689e306ba", "score": "0.60524964", "text": "def getTemperature(self):\r\n return self.temperature", "title": "" }, { "docid": "b9368da9034bc51d2ed416102e8bdae2", "score": "0.604688", "text": "def temperature(self):\n return self.zamg_data.get_data(ATTR_WEATHER_TEMPERATURE)", "title": "" }, { "docid": "7fdc4aff99e3ff0a05f0e277b9274113", "score": "0.60027194", "text": "def get_temperature(self):\n return self.temperature.get_temperature(self)", "title": "" }, { "docid": "31d6494f798b9fa205b919dae47e824f", "score": "0.598451", "text": "def getTemperature(self):\n return self.__dht11Temp.getTemperature()", "title": "" }, { "docid": "76da5e69493f8108cb3e121039fca329", "score": "0.59787905", "text": "def get_temperature(self) -> float:\n temperature, humidity, last_change = self.bricklet.get_sensor_data(83)\n return temperature / 10", "title": "" }, { "docid": "c4b4e81bc451debb1626bfe8376b13d9", "score": "0.596506", "text": "def _get_temperature(self):\n return self.__temperature", "title": "" }, { "docid": "c4b4e81bc451debb1626bfe8376b13d9", "score": "0.596506", "text": "def _get_temperature(self):\n return self.__temperature", "title": "" }, { "docid": "c4b4e81bc451debb1626bfe8376b13d9", "score": "0.596506", "text": "def _get_temperature(self):\n return self.__temperature", "title": "" }, { "docid": "88abd4f0315f3b13b78b50e7aa358f5a", "score": "0.5960304", "text": "def get_temperature(self):\n\n return self.sensor.get_temperature()", "title": "" }, { "docid": "3719da94d028c822a09388f45f8a9523", "score": "0.5958551", "text": "def get_target_temp(self):\n if self._update_on_read:\n self.update()\n return self.get_conv_val('ROOM_TEMP_HEAT_DAY_HC1')", "title": "" }, { "docid": "30a765f8b0804560eb4ff9bb1d9a4a7c", "score": "0.59540886", "text": "def read_temp(self):\r\n\t\tdata = bus.read_i2c_block_data(TMP112_DEFAULT_ADDRESS, TMP112_REG_TEMP, 2)\r\n\t\t\r\n\t\t# Convert the data to 12-bits\r\n\t\ttemp =(data[0] * 256 + data[1]) / 16\r\n\t\tif temp > 2047 :\r\n\t\t\ttemp -= 4096\r\n\t\tcTemp = temp * 0.0625\r\n\t\tfTemp = cTemp * 1.8 + 32\r\n\t\t\r\n\t\treturn {'c' : cTemp, 'f' : fTemp}", "title": "" }, { "docid": "d48008799cde022ea4f794869159662b", "score": "0.5939954", "text": "def temperature(self):\n return self._get_iio_attr(\"temp0\", \"raw\", False, self._ctrl)", "title": "" }, { "docid": "c7089a456e87bd1782fbe49200b33dbd", "score": "0.5928779", "text": "def get_temperature(self):\n return self._fritz.get_temperature(self.ain)", "title": "" }, { "docid": "33136e77c8a58040dc0a70712a2001bc", "score": "0.58884937", "text": "def read(self):\n self.read_temperature() \n return (self.temperature)", "title": "" }, { "docid": "080c6178026b1917cb19d738f78ed0c7", "score": "0.5882742", "text": "def temperature(self):\n return self._temperature", "title": "" }, { "docid": "cb392a347b3e3b1b59bb02cee6f3b7e1", "score": "0.58739245", "text": "def get_temperature(self, unit='kelvin'):\n # This is due to the fact that the OWM Weather API responses are mixing\n # absolute temperatures and temperature deltas together\n to_be_converted = dict()\n not_to_be_converted = dict()\n for label, temp in self._temperature.items():\n if temp is None or temp < 0:\n not_to_be_converted[label] = temp\n else:\n to_be_converted[label] = temp\n converted = temputils.kelvin_dict_to(to_be_converted, unit)\n return dict(list(converted.items()) + \\\n list(not_to_be_converted.items()))", "title": "" }, { "docid": "8ee115076bdb9ab63d2b3cf45dd2099a", "score": "0.5838656", "text": "def get_reading(sensor):\n logging.debug(\"Get temprature for sensor \"+str(sensor))\n temperature = []\n IDs = []\n \n for filename in os.listdir(\"/sys/bus/w1/devices\"):\n time.sleep(0.3)\n if fnmatch.fnmatch(filename, sensor):\n with open(\"/sys/bus/w1/devices/\" + filename + \"/w1_slave\") as fileobj:\n lines = fileobj.readlines()\n if lines[0].find(\"YES\"):\n pok = lines[1].find('=')\n temperature.append(float(lines[1][pok+1:pok+6])/1000)\n IDs.append(filename)\n else:\n logger.error(\"Error reading sensor with ID: %s\" % (filename))\n\n if (len(temperature)>0):\n # insertDB(IDs, temperature)\n return (IDs[0],time.strftime(\"%Y-%m-%d\"), time.strftime(\"%H:%M\"), temperature[0])", "title": "" }, { "docid": "2f43925d1b7a762129784f7c681efe0c", "score": "0.5835962", "text": "def read_temps(self):\n disks = {\n i['serial']: i['name'] for i in self.middleware.call_sync('datastore.query', 'storage.disk', [\n ['serial', '!=', ''],\n ['togglesmart', '=', True],\n ['hddstandby', '=', 'Always On'],\n ], {'prefix': 'disk_'})\n }\n temps = self.read_sata_or_sas_disk_temps(disks)\n temps.update(self.read_nvme_temps(disks))\n\n for disk in set(disks.values()) - set(temps.keys()):\n # try to subprocess and run smartctl for any disk that we didn't get the temp\n # for using the much quicker methods\n temps.update({disk: self.middleware.call_sync('disk.temperature_uncached', disk, 'never')})\n\n return temps", "title": "" }, { "docid": "176588b790281065f170499d848c7e72", "score": "0.5817111", "text": "async def read_temperature(self):\n\n return await self._read(\"T\")", "title": "" }, { "docid": "a5951c4ba15a486b8d709bef1656f50b", "score": "0.5815056", "text": "def read_temp(self):\n temp_f = 0.0\n try:\n lines = self.read_file(self.sensor_file)\n while not re.search(r'YES', lines):\n time.sleep(0.2)\n lines = self.read_file(self.sensor_file)\n temp_string = re.search('t=([-]?\\d+)', lines).group(1)\n temp_c = float(temp_string) / 1000.0\n temp_f = temp_c * 9.0 / 5.0 + 32.0\n except Exception as e:\n print('Failed to read temperature sensor value: ' + str(e))\n print('Check that your temperature sensor is plugged in correctly.')\n return temp_f", "title": "" }, { "docid": "ea5f8a939ca33a842d9c0ecf8490bdc1", "score": "0.5812518", "text": "def Temperature(self):\n try:\n ret = self._TemperatureDict['sum'] / self._TemperatureDict['number']\n except ZeroDivisionError as e:\n ret = 0\n return ret", "title": "" }, { "docid": "7a2177e04f0a9c969467b8d6dbe6a824", "score": "0.5807292", "text": "def _get_temperature(self, data):\n\n # The temperature is in two fields, one for the integer part,\n # one for the fraction\n #\n # The integer part was decoded as a signed two's complement number,\n # but this isn't how it's really stored. The MSB is a sign, the lower\n # 7 bits are the unsigned temperature value.\n #\n # To convert from the decoded value we have to add 128 and then negate,\n # if the decoded value was negative\n frac = data[3] / 100\n if data[2] < 0:\n return -(data[2] + 128 + frac)\n else:\n return data[2] + frac", "title": "" }, { "docid": "02d0acbf685709c7117f34b75500c0ae", "score": "0.5801608", "text": "def get_temperature(self):\n\n return self.get_temperature_from_humidity()", "title": "" }, { "docid": "240cb3947c6fed2b503828bfa6853c5a", "score": "0.57664776", "text": "async def temperature(self, publish=True):\n return await self._ds.temperature(self._r, publish=publish)", "title": "" }, { "docid": "d0db7505331082786dfb78677e216b23", "score": "0.5759943", "text": "def get_temp(self, key):\n\t\treturn frappe.cache().hget(\"temp\", key)", "title": "" }, { "docid": "4826bc263d92d43cad6386192c4bf806", "score": "0.57513636", "text": "def get_temp():\n with open('/sys/class/thermal/thermal_zone0/temp', 'r') as infile:\n return float(infile.read()) * 0.001", "title": "" }, { "docid": "299599ad72c670eef92cb499efc36323", "score": "0.5748098", "text": "def current_temperature(self):\n value = self.thermostat.uhome_thermostat_keys['room_temperature']['value']\n return value", "title": "" }, { "docid": "79aa1894449a3c638db79e62c4b58f5a", "score": "0.5747888", "text": "def read_temperature(self):\n with open(self.path, 'r') as file:\n lines = file.readlines() \n line=lines[1].find('t=')\n t_string=lines[1][line+2:]\n self.temperature=(float(t_string)/(1000.0))", "title": "" }, { "docid": "a143d8544aac402e49564de37d3e27e7", "score": "0.5740222", "text": "def temperature(self):\n return self._temperature", "title": "" }, { "docid": "cb54a187a20f730860dcbd9ab6fd9c02", "score": "0.57306176", "text": "def get_temperature_song(city):\n current_temp = int(temperature(city))\n print(f\"TEMP:{current_temp}\")\n if (current_temp > 60):\n return random.choice(warm_list)\n elif (current_temp > 32):\n return random.choice(cool_list)\n else:\n return random.choice(freezing_list)", "title": "" }, { "docid": "38a6378dcfe674c7cb0af68334d3f53c", "score": "0.57288593", "text": "def get_temperature(self):\n T = self.IOP.get_temperature()\n return T", "title": "" }, { "docid": "72c11634e168168d616846da3f638916", "score": "0.5715457", "text": "def get_temperature_from_humidity(self):\n\n self._init_humidity() # Ensure humidity sensor is initialised\n temp = 0\n data = self._humidity.humidityRead()\n if (data[2]): # Temp valid\n temp = data[3]\n return temp", "title": "" }, { "docid": "34b7c102966eff70d9b04b174dd031e0", "score": "0.57137555", "text": "def forecast_weather(self):\n\n list=json_parsed['list']\n \n i=0\n length=len(list)\n while i < length:\n\n \t first = list[i]\n main=first['main']\n temp = main['temp']\n temp_min = main['temp_min']\n temp_max = main['temp_max']\n humidity = main['humidity']\n pressure = main['pressure']\n weather = first['weather']\n weather_desc = weather[0]\n description = weather_desc['description']\n mainDescription = weather_desc['main']\n wind = first['wind']\n speed = wind['speed']\n deg = wind['deg']\n cloud=first['clouds']\n cloudiness=cloud['all']\n dt_txt = first['dt_txt']\n i+=1 \n #print(dt_txt) ", "title": "" }, { "docid": "aebe5f30923b402b31d551346baac8f6", "score": "0.5712046", "text": "def get_temp(forecast, fahrenheit=True):\n precipitation_idx = [child.tag for child in forecast].index('temperature')\n precipitation = forecast[precipitation_idx]\n if fahrenheit:\n multiplier = 9/5.\n offset = 32.\n else:\n multiplier = 1.\n offset = 0\n\n return float(precipitation.attrib['value']) * multiplier + offset # convert to Fahrenheit", "title": "" }, { "docid": "f606cc1339de298f56a818ea4e1af5d5", "score": "0.5709872", "text": "def get_setpoint_temperature(self):\n return float(self.TED4015.query('SOUR:TEMP?'))", "title": "" }, { "docid": "80a29c665516a65b035e2e99f2abc43d", "score": "0.5705426", "text": "def temperature(self) -> float:\n # Read the temperature reference from register 0x24\n treference = self.read_reg(0x24)\n\n # Value taken from maximum time of temperature conversion on the datasheet section 12.\n # maximum time for temperature conversion = 1603 us\n delay = 0.1\n\n # Set the device to single measurement mode\n self._transceive(bytes([_CMD_SM | _CMD_TEMP]))\n\n time.sleep(delay)\n\n # Read the 'temp' data\n data = self._transceive(bytes([_CMD_RM | _CMD_TEMP]), 2)\n\n # Unpack status and raw int values\n self._status_last = data[0]\n\n # from https://www.melexis.com/-/media/files/documents/\n # application-notes/mlx90393-temperature-compensation-application-note-melexis.pdf\n tvalue = struct.unpack(\">H\", data[1:3])[0]\n # See previous link for conversion formula\n\n return 35 + ((tvalue - treference) / 45.2)", "title": "" }, { "docid": "a7995f72ab0fdeac9edc7350526d5d6d", "score": "0.5693162", "text": "def getTemperature(retries = 3):\n for retry in range(retries):\n try:\n time.sleep(1)\n rtn = snd('r10')\n tokens = rtn.split()\n for token in tokens:\n if token.count('e'):\n return float(token)\n except KeyboardInterrupt:\n print 'keyboard interrupt, returning'\n return\n except:\n continue\n traceback.print_exc()\n raise Exception('failed to gather the measured temperature of the pond')", "title": "" }, { "docid": "421bc8b6d3f45341b92414f002f33cf1", "score": "0.56906295", "text": "def dew_point_temperature(self):\n return self._get_data_by_field(7)", "title": "" }, { "docid": "7b186ba2f40e5f6b87fba21788792809", "score": "0.56898206", "text": "def temperature(self) -> float:\n self.start_measurement(TEMPERATURE)\n value = self._data()\n self._measurement = 0\n return value * 175.72 / 65536.0 - 46.85", "title": "" }, { "docid": "0cdc2b969a9068e988d688b04da15e9c", "score": "0.5677471", "text": "def get_temperature_from_sensor(sensor_name):\n sensor_file = os.path.join(\n \"/sys/bus/w1/devices/\",\n sensor_name,\n \"w1_slave\")\n\n timestamp = int(time.time())\n with open(sensor_file, 'r') as f:\n data = f.read()\n\n found = DS18B20_TEMP_RE.search(data)\n if found is None:\n return (timestamp, None)\n return (timestamp, int(found.group('temperature')) / 1000)", "title": "" }, { "docid": "79ba1952a0a146857d230a2f0fff474a", "score": "0.5668948", "text": "def temperature(self):\n rawdata = self.read_data(self.TEMPERATURE, 2)\n data = unpack('>H', rawdata)[0]\n\n # Check if negative value bit set\n if data & (1 << 15):\n data &= ((1 << 15) - 1) # clear negative bit\n data = -data\n\n # Temperature is stored as int with one decimal place\n return data / 10", "title": "" }, { "docid": "2600de0c62811ec5c572c261c5adf0b7", "score": "0.56605536", "text": "def getTemp(self):\n # 20037 is NotReached\n # 20035 is NotStabalized\n # 20036 is Stabalized\n # 20034 is Off\n result = andor.GetTemperatureStatus()\n mode = andor.GetTemperatureF()\n txt = \"\" + str(mode[0])\n logger.debug(str(result))\n for e in result:\n txt = txt + \",\" + str(e)\n return \"temp \" + txt", "title": "" }, { "docid": "eefd558cad3add07f9c1f752189ba03c", "score": "0.5651442", "text": "def get_cpu_temperature(admin_id):\n\n if is_admin(admin_id):\n cpu = CPUTemperature()\n return {\"cpu_temperature\": cpu.temperature}\n else:\n return {\"cpu_temperature\": None}", "title": "" }, { "docid": "16b69aea66ec062b1fccd8598b028440", "score": "0.56387043", "text": "def target_temperature(self):\n return float(self.channel_data.get(\"value\") / 10.0)", "title": "" }, { "docid": "05b4ebc5a387887129eddbb2df52f5cc", "score": "0.56279534", "text": "def current_temperature(self) -> float | None:\n return self._heater_data.get(\"temperature\")", "title": "" }, { "docid": "1eb6717e012b992f25ed6c4b76719f00", "score": "0.5626476", "text": "def publishTemperature(self):\r\n payload = self.sensor.read_temps(self.fahrenheit)\r\n \"\"\"\r\n\tif self.fahrenheit:\r\n\t\tpayload['unit'] = 'F'\r\n\telse:\r\n\t\tpayload['unit'] = 'C'\r\n \"\"\"\r\n for sen in payload:\r\n self.client.publish(self.topic+str(sen)+\"/temperature\", str(payload[sen]))\r\n print(str(sen)+\" \"+str(payload[sen]))", "title": "" }, { "docid": "c1a4bddd320be28b66e2d22c6bec7e04", "score": "0.5610856", "text": "def current_temperature(self):\n status = self.hass.data[UPDATED_DATA][self._peripheral.name]\n if status:\n return status.indoor_temp\n return None", "title": "" }, { "docid": "02560c393f9e7725f581ff7e0930ac88", "score": "0.56083816", "text": "def native_temperature(self) -> float | None:\n if self.observation:\n return self.observation.get(\"temperature\")\n return None", "title": "" }, { "docid": "c65b0fa657ddc30bb03efcb75b7a7930", "score": "0.560173", "text": "def _value_as_temperature(self):\n return round(float(self._value), 1)", "title": "" }, { "docid": "6aadecc103e229f58b1d29b737bb3c86", "score": "0.5599835", "text": "def get_temperature():\n return random() * 20", "title": "" }, { "docid": "a7b5ed922bfb55235eea71625824358b", "score": "0.5599748", "text": "def get_cpu_temperature(is_admin):\n\n if is_admin:\n cpu = CPUTemperature()\n return {\"cpu_temperature\": cpu.temperature}\n else:\n return {\"cpu_temperature\": None}", "title": "" }, { "docid": "359e76b0a03c86ceccb6c18502c84bb8", "score": "0.55988497", "text": "def _get_temperature():\n\n # If running on real hardware, read from onboard temperature sensor\n # otherwise return a simulated value.\n if platform.machine() == 'armv5tejl':\n return float(os.popen(\"tshwctl --cputemp|grep external|cut -f2 -d'='\").read().rstrip())\n else:\n return 30 + random.uniform(0, 5)", "title": "" }, { "docid": "752eebacf98cb930e4bece18c092cb16", "score": "0.5582737", "text": "def temperatures(self) -> Tuple[float, float]:\n return self.temperature_ch1, self.temperature_ch2", "title": "" }, { "docid": "3c959b6e3d30f1776cda1881c61f30b3", "score": "0.55788696", "text": "def temperature(self):\n return self._read() / 4.0", "title": "" }, { "docid": "44989e90e265c75c8278178b337dde66", "score": "0.556674", "text": "def get_current_temp(self):\n if self._update_on_read:\n self.update()\n return self.get_conv_val('ACTUAL_ROOM_TEMPERATURE_HC1')", "title": "" }, { "docid": "f30caca05bd475d501556d69b4bf951d", "score": "0.5557684", "text": "def get_self_cal_temperature(self):\n\n response = float(self.device.query(f'SOURce{self.module_number}:SCALibration:TEMP?'))\n return response", "title": "" }, { "docid": "eae3f9886e41c9362a592eeb15bdec82", "score": "0.5554129", "text": "def get_temp(self):\n self._logger.info(\"Reading temperture...\")\n\n def temp_raw(filename):\n with open(filename,'r') as f:\n lines=f.readlines()\n return(lines)\n\n def read_temp(device_name):\n max_attempts = 10\n attempt = 0\n filename = os.path.join('/sys/bus/w1/devices',device_name,'w1_slave')\n try:\n lines = temp_raw(filename)\n except IOError:\n temp = -99\n else:\n while lines[0].strip()[-3:] != 'YES' and attempt < max_attempts:\n time.sleep(0.5)\n lines = temp_raw(filename)\n attempt += 1\n match_obj=re.match('.*t=([0-9]*)',lines[1])\n if match_obj is not None and attempt < max_attempts:\n temp = float(match_obj.group(1))/1000.0\n else:\n temp = -99\n return(temp)\n\n self._logger.info(\"self._identifier = {}\".format(self._identifier))\n device_name = self._settings.get([\"sensor_name\"])\n self._logger.info(\"sensor_name = {}\".format(device_name))\n temp = read_temp(device_name)\n self._logger.info(\"enclosure temp = {}oC\".format(temp))\n self.current_temp = temp\n self._plugin_manager.send_plugin_message(self._identifier, dict(enclosureTemp=self.current_temp))", "title": "" }, { "docid": "bd4449ca9940842c4565d1bba8d23ce3", "score": "0.5549259", "text": "def temperature(self) -> float:\n raw_temp = Bits(uint=self.registers.read_word(self.TEMP_OUT0), length=16).int\n actual_temp = (raw_temp / 340.0) + 36.53\n return actual_temp", "title": "" }, { "docid": "9aaae65f5b52f17a5bc90f29e0d46345", "score": "0.5545238", "text": "def get_temperatures(self):\n print('RUNNING {0} UNDER {1}').format(\"get_gps_datum\", self.serial_num)\n curs = self.conn.cursor()\n dbstr = '''SELECT Id, SensorReading, Date, DeviceId FROM Temperatures ORDER BY Date DESC;'''\n curs.execute(dbstr)\n rows = curs.fetchall()\n return rows", "title": "" }, { "docid": "21e0775a7bcb0bdca5ae7fc2f8a65211", "score": "0.5544107", "text": "def getSensorData():\n # get random value for now. to simulate getting value from real sensors\n ret_val = random.randrange(0, 10)\n return ret_val", "title": "" }, { "docid": "ee436d8b26ad2c52ff055a01447ff70d", "score": "0.55366313", "text": "def target_temperature(self):\n return self.thermostat.uhome_thermostat_keys['room_setpoint']['value']", "title": "" }, { "docid": "037e8a340f0159a0ceeeae55d00df2b4", "score": "0.55325246", "text": "def convert_temperature(ds):\n # Temperature is in Kelvin\n return ds[\"temperature\"] - 273.15", "title": "" }, { "docid": "9d9294fdaf39f273fa7edbbf074b77a7", "score": "0.55316716", "text": "def temperature(self):\n return self._driver.temperature", "title": "" }, { "docid": "e18b248805236456f7ec62578a6b5fa1", "score": "0.55270416", "text": "def read_values(self):\r\n avg_humid_percentage, time = self.take_soil_values()\r\n temperature, humidity = self.take_ambient_values()\r\n data = dict(\r\n measured_at=datetime.fromtimestamp(float(time)),\r\n name=time,\r\n ambient_temperature=float(temperature) if temperature else None,\r\n ambient_humidity=float(humidity) if humidity else None,\r\n soil_humidity=float(avg_humid_percentage)\r\n )\r\n self.process_item(data)", "title": "" }, { "docid": "e2443b891e5cb53a839181d68113b94c", "score": "0.5518582", "text": "def get_solvent_temp_list():\n database.load('solvation', '')\n solvent_temp_list = []\n solvent_list = getSolventList()\n for label, index in solvent_list:\n solvent_data = database.solvation.get_solvent_data(label)\n if solvent_data.name_in_coolprop != None:\n Tc = \"%.2f\" % (solvent_data.get_solvent_critical_temperature() - 0.01) # 0.01 is subtracted because Tc is not inclusive\n solvent_temp_list.append((label, index + \": 280 K - \" + str(Tc) + \" K\"))\n return solvent_temp_list", "title": "" }, { "docid": "eae14c11f2a844446d55c66928919f37", "score": "0.5516758", "text": "def myHumiTemp():\n sensor = Adafruit_DHT.DHT11\n pin = 17\n ret = []\n try: \n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n if humidity is not None and temperature is not None:\n ret.append(int(humidity))\n ret.append(int(temperature))\n return ret\n except:\n # drop in some error values so we can filter out at the sql level\n ret = [-999,-999]\n return ret", "title": "" }, { "docid": "784a6e7e9598aa4bbc421748128f9f47", "score": "0.55164546", "text": "def current_temperature(self):\n return self.uhome.uhome_module_keys['average_room_temperature']['value']", "title": "" }, { "docid": "0b61b95c54a10fb3b1709812da227411", "score": "0.5513178", "text": "def check_sensors(self) -> dict:\n resp = self._send(0x1)\n temperature = struct.unpack(\"<bb\", resp[:0x2])\n temperature = temperature[0x0] + temperature[0x1] / 10.0\n return {\"temperature\": temperature}", "title": "" }, { "docid": "b36b721d2be7a3495c9c4e6a4235b676", "score": "0.5505357", "text": "def temperature(self) -> Optional[float]:\n if self.data[\"temp_dec\"] is not None:\n return self.data[\"temp_dec\"] / 10.0\n\n return None", "title": "" }, { "docid": "1bc17688c3e49bfcfd83c882ebcf64ac", "score": "0.5500298", "text": "def get_data():\n global turbidity_sensor\n\n # Date (DD-MM-YYY) and time (HH:MM:SS)\n d = datetime.now()\n time = '{:%H:%M:%S}'.format(d)\n date = '{:%d-%m-%Y}'.format(d)\n\n # (DS18B) Water temperature\n try:\n w = W1ThermSensor()\n water_temp = str(w.get_temperature())\n except:\n water_temp = '0'\n\n # (BMP180) Air temperature + pressure\n try:\n b = BMP085()\n air_temp = str(b.read_temperature())\n air_pressure = str(b.read_pressure())\n except:\n air_temp = '0'\n air_pressure = '0'\n\n # Turbidity of the water\n turb = turbidity_sensor.read_turbidity()\n if turb > 1023:\n turb = 0\n\n return {\n 'time' : time,\n 'date' : date,\n 'air_temp' : air_temp,\n 'air_pressure' : air_pressure,\n 'water_temp' : water_temp,\n 'turb' : turb\n }", "title": "" }, { "docid": "b6729d1086d2c80fb51febd72d1f3d57", "score": "0.54981697", "text": "def get_temp_hum_config_value(self, th_sample_rate, th_repeatability):\n\n # Set up sample rate dictionary\n sr_dict = {\n 0x20: '0.5 Hz',\n 0X21: '1 Hz',\n 0x22: '2 Hz',\n 0x23: '4 Hz',\n 0x27: '10 Hz'\n }\n\n # Set up repeatability dictionary\n rep_dict = {\n 0x20: {\n 0x2F: 'Low',\n 0x24: 'Med',\n 0x32: 'High'\n },\n 0x21: {\n 0x2d: 'Low',\n 0x26: 'Med',\n 0x30: 'High'\n },\n 0x22: {\n 0x2B: 'Low',\n 0x20: 'Med',\n 0x36: 'High'\n },\n 0x23: {\n 0x29: 'Low',\n 0x22: 'Med',\n 0x34: 'High'\n },\n 0x27: {\n 0x2A: 'Low',\n 0x21: 'Med',\n 0x37: 'High'\n }\n }\n # Return dictionary values\n return sr_dict[th_sample_rate], rep_dict[th_sample_rate][th_repeatability]", "title": "" }, { "docid": "4ebba33277132741ba69415ed8adce8e", "score": "0.5496477", "text": "def read_temperature(self):\n return self.BNO055.read_temp()", "title": "" }, { "docid": "68c8b427b5dcc6c0dd25709c103ccdc5", "score": "0.5494471", "text": "def get_sensor_data(self, should_update=False):\n # All is comment in this dict because some agent could not access to these or are not numbers.\n # It then lead to issue in influxDB.\n # some can just be not callable, as sensor temp that lead to error\n sensor_data = {\n # \"sensor_temp\": self.get_sensor_temp(),\n # \"sensor_fans\": self.get_sensor_fans(),\n # \"sensor_battery_percent\": self.get_sensor_battery().percent,\n # \"sensor_battery_secsleft\": self.get_sensor_battery().secsleft,\n # \"sensor_battery_power_plugged\": self.get_sensor_battery().power_plugged,\n }\n\n return self.handle_get_data(sensor_data, should_update)", "title": "" }, { "docid": "6bc2efe6e8d0e97a6452d003bedb63a4", "score": "0.54920286", "text": "def get_temp(self):\n raw_temp = self.read_i2c_word_data(self.TEMP_OUT0)\n\n # Get the actual temperature using the formule given in the\n # MPU-6050 Register Map and Descriptions revision 4.2, page 30\n actual_temp = (raw_temp / 340.0) + 36.53\n\n return actual_temp", "title": "" }, { "docid": "8f39353b1c00a8c58a6281f6bd21f057", "score": "0.54880804", "text": "def target_temps(self) -> FridgeSetPoints:\n return self.appliance.get_erd_value(ErdCode.TEMPERATURE_SETTING)", "title": "" }, { "docid": "6d98497a7135c1b6ce099e048cbcfa53", "score": "0.5474027", "text": "def get_temperature(self, recordid):\n print('RUNNING {0} UNDER {1}').format(\"get_gps_data\", self.serial_num)\n\n curs = self.conn.cursor()\n dbstr = '''SELECT Id, SensorReading, Date, DeviceId FROM Temperatures WHERE Id = {0};'''.format(recordid)\n curs.execute(dbstr)\n rows = curs.fetchall()\n return rows", "title": "" }, { "docid": "aa8e73c06442ee840860a797ebf81b95", "score": "0.54591364", "text": "def target_temperature(self) -> int:\n return getattr(self.target_temps, self.heater_type)", "title": "" }, { "docid": "c04de01457675be2294013ce11bf1917", "score": "0.5453177", "text": "def get_value(self, unit=None):\n if unit is None:\n unit = self._unit\n raw_temp = self.i2c.read_word(self.TEMP_OUT0)\n # Get the actual temperature using the formule given in the\n # MPU-6050 Register Map and Descriptions revision 4.2, page 30\n actual_temp = (raw_temp / 340.0) + 36.53 # in celcius\n if unit == self.CELSIUS:\n return actual_temp\n elif unit == self.KELVIN:\n return actual_temp + 273.15\n elif unit == self.FAHRENHEIT:\n return actual_temp * 1.8 + 32", "title": "" }, { "docid": "8967f07548965d0f819037099cc1592c", "score": "0.5452468", "text": "def current_temperature(self):\n return float(int(self.channel_data.get(\"temperature\")) / 10.0)", "title": "" }, { "docid": "effb3c69d72459835831d56379fc3e6a", "score": "0.5452152", "text": "def get_temp(self):\n raw_temp = self.read_i2c_word(self.TEMP_OUT0)\n\n # Get the actual temperature using the formule given in the\n # MPU-6050 Register Map and Descriptions revision 4.2, page 30\n actual_temp = (raw_temp / 340.0) + 36.53\n\n return actual_temp", "title": "" }, { "docid": "f0242a2812d7e15c22d26da059afb40b", "score": "0.5449852", "text": "def get_temperature_from_pressure(self):\n\n self._init_pressure() # Ensure pressure sensor is initialised\n temp = 0\n data = self._pressure.pressureRead()\n if (data[2]): # Temp valid\n temp = data[3]\n return temp", "title": "" }, { "docid": "c6dce3751768da2fb87ad0ed4f707f5a", "score": "0.54486334", "text": "def value_single(self):\n return {\n \"type\": str(self.sensor_name),\n \"value\": self._random_num(),\n }", "title": "" }, { "docid": "afdbca78d2ad1b13c6c8845d0ed6dac3", "score": "0.5441813", "text": "def current_temperature(self) -> int:\n current_temps = self.appliance.get_erd_value(ErdCode.CURRENT_TEMPERATURE)\n current_temp = getattr(current_temps, self.heater_type)\n if current_temp is None:\n _LOGGER.exception(f\"{self.name} has None for current_temperature (available: {self.available})!\")\n return current_temp", "title": "" }, { "docid": "2cd5c4b2f9b1a3578571d6970c6bee2c", "score": "0.54412764", "text": "def target_temperature(self):\n status = self.hass.data[UPDATED_DATA][self._peripheral.name]\n if status:\n return status.target_temp\n return None", "title": "" }, { "docid": "c03b64078ea52bd2bc0fe2ee0b42baa3", "score": "0.5439738", "text": "def target_temperature(self):\n return self.uhome.uhome_module_keys['holiday_setpoint']['value']", "title": "" } ]
9a47bf42baf67f0fb9006cccbb52312e
Method to find the comfort zone for the robot to begin measure ambience
[ { "docid": "6a104ff13eeccbb52661329a21970444", "score": "0.57630306", "text": "def comfortZone():\n # If the robot is too close with a wall in front, or if it is touching the wall\n if robot.readDistance() < DISTANCE_TO_WALL or robot.readTouch()[0] == 1:\n robot.backward(SPEED_MOVEMENT, 0.5)\n return False\n\n # Check whether the robot is too close to a wall on the left\n robot.pointerLeft(time=0.1)\n if robot.readDistance() < DISTANCE_TO_WALL:\n robot.turnRight(SPEED_MOVEMENT, 0.2)\n robot.forward(SPEED_MOVEMENT, 0.5)\n return False\n\n # Check whether the robot is too close to a wall on the right\n robot.zeroPointer()\n robot.pointerTo(90)\n if robot.readDistance() < DISTANCE_TO_WALL:\n robot.turnLeft(SPEED_MOVEMENT, 0.2)\n robot.forward(SPEED_MOVEMENT, 0.5)\n return False\n\n robot.zeroPointer()\n return True", "title": "" } ]
[ { "docid": "30982d4ec599f57101bd074c18227ad4", "score": "0.57420546", "text": "def antenna():", "title": "" }, { "docid": "7e0c4028b64b6011945b4d82211ea695", "score": "0.5630881", "text": "def atgoal(self):\n\n return np.sqrt(np.sum((self.position[:, self.t] - self.platform_location)**2)) <= (self.platform_radius + 1)", "title": "" }, { "docid": "f4090757c2506dee82068f93c24f4089", "score": "0.5581201", "text": "def get_focus_zone(self, body):\n\n def zone_from_center_size(x, y, size):\n \"\"\"\n Return zone [left, top, right, bottom] \n from zone center (x,y) and zone size (the zone is square).\n \"\"\"\n half_size = size // 2\n size = half_size * 2\n if size > self.img_w:\n x = self.img_w // 2\n x1 = x - half_size\n if x1 < -self.pad_w:\n x1 = -self.pad_w\n elif x1 + size > self.img_w + self.pad_w:\n x1 = self.img_w + self.pad_w - size\n x2 = x1 + size\n if size > self.img_h: \n y = self.img_h // 2\n y1 = y - half_size\n if y1 < -self.pad_h: \n y1 = -self.pad_h\n elif y1 + size > self.img_h + self.pad_h:\n y1 = self.img_h + self.pad_h - size\n y2 = y1 + size\n return [x1, y1, x2, y2]\n \n\n def get_one_hand_zone(hand_label):\n \"\"\"\n Return the zone [left, top, right, bottom] around the hand given by its label \"hand_label\" (\"left\" or \"right\")\n Values are expressed in pixels in the source image C.S.\n If the wrist keypoint is not visible, return None.\n If self.hands_up_only is True, return None if wrist keypoint is below elbow keypoint.\n \"\"\"\n wrist_kp = hand_label + \"_wrist\"\n wrist_score = body.scores[BODY_KP[wrist_kp]]\n if wrist_score < self.score_thresh: \n return None\n x, y = body.keypoints[BODY_KP[wrist_kp]]\n if self.hands_up_only:\n # We want to detect only hands where the wrist is above the elbow (when visible)\n elbow_kp = hand_label + \"_elbow\"\n if body.scores[BODY_KP[elbow_kp]] > self.score_thresh and \\\n body.keypoints[BODY_KP[elbow_kp]][1] < body.keypoints[BODY_KP[wrist_kp]][1]:\n return None\n # Let's evaluate the size of the focus zone\n size = self.estimate_focus_zone_size(body)\n if size == 0: return [-self.pad_w, -self.pad_h, self.frame_size-self.pad_w, self.frame_size-self.pad_h] # The hand is too close. No need to focus\n return zone_from_center_size(x, y, size)\n\n if self.mode == \"group\":\n zonel = get_one_hand_zone(\"left\")\n if zonel:\n zoner = get_one_hand_zone(\"right\")\n if zoner:\n xl1, yl1, xl2, yl2 = zonel\n xr1, yr1, xr2, yr2 = zoner\n x1 = min(xl1, xr1)\n y1 = min(yl1, yr1)\n x2 = max(xl2, xr2)\n y2 = max(yl2, yr2)\n # Global zone center (x,y)\n x = int((x1+x2)/2)\n y = int((y1+y2)/2)\n size_x = x2-x1\n size_y = y2-y1\n size = 2 * (max(size_x, size_y) // 2)\n return (zone_from_center_size(x, y, size), \"group\")\n else:\n return (zonel, \"left\")\n else:\n return (get_one_hand_zone(\"right\"), \"right\")\n elif self.mode == \"higher\":\n if body.scores[BODY_KP[\"left_wrist\"]] > self.score_thresh:\n if body.scores[BODY_KP[\"right_wrist\"]] > self.score_thresh:\n if body.keypoints[BODY_KP[\"left_wrist\"]][1] > body.keypoints[BODY_KP[\"right_wrist\"]][1]:\n hand_label = \"right\"\n else:\n hand_label = \"left\"\n else: \n hand_label = \"left\"\n else:\n if body.scores[BODY_KP[\"right_wrist\"]] > self.score_thresh:\n hand_label = \"right\"\n else:\n return (None, None)\n return (get_one_hand_zone(hand_label), hand_label)\n else: # \"left\" or \"right\"\n return (get_one_hand_zone(self.mode), self.mode)", "title": "" }, { "docid": "0eaad14827579dec7bfe600da4bcd511", "score": "0.55643475", "text": "def get_manager_ac_space(ob_space,\n relative_goals,\n env_name,\n use_fingerprints,\n fingerprint_dim):\n if env_name in [\"AntMaze\", \"AntPush\", \"AntFall\", \"AntGather\",\n \"AntFourRooms\"]:\n manager_ac_space = Box(\n low=np.array([-10, -10, -0.5, -1, -1, -1, -1, -0.5, -0.3, -0.5,\n -0.3, -0.5, -0.3, -0.5, -0.3]),\n high=np.array([10, 10, 0.5, 1, 1, 1, 1, 0.5, 0.3, 0.5, 0.3,\n 0.5, 0.3, 0.5, 0.3]),\n dtype=np.float32,\n )\n elif env_name == \"UR5\":\n manager_ac_space = Box(\n low=np.array([-2 * np.pi, -2 * np.pi, -2 * np.pi, -4, -4, -4]),\n high=np.array([2 * np.pi, 2 * np.pi, 2 * np.pi, 4, 4, 4]),\n dtype=np.float32,\n )\n elif env_name == \"Pendulum\":\n manager_ac_space = Box(\n low=np.array([-np.pi, -15]),\n high=np.array([np.pi, 15]),\n dtype=np.float32\n )\n elif env_name in [\"ring0\", \"ring1\"]:\n if relative_goals:\n manager_ac_space = Box(-.5, .5, shape=(1,), dtype=np.float32)\n else:\n manager_ac_space = Box(0, 1, shape=(1,), dtype=np.float32)\n elif env_name == \"figureeight0\":\n if relative_goals:\n manager_ac_space = Box(-.5, .5, shape=(1,), dtype=np.float32)\n else:\n manager_ac_space = Box(0, 1, shape=(1,), dtype=np.float32)\n elif env_name == \"figureeight1\":\n if relative_goals:\n manager_ac_space = Box(-.5, .5, shape=(7,), dtype=np.float32)\n else:\n manager_ac_space = Box(0, 1, shape=(7,), dtype=np.float32)\n elif env_name == \"figureeight2\":\n if relative_goals:\n manager_ac_space = Box(-.5, .5, shape=(14,), dtype=np.float32)\n else:\n manager_ac_space = Box(0, 1, shape=(14,), dtype=np.float32)\n elif env_name == \"merge0\":\n if relative_goals:\n manager_ac_space = Box(-.5, .5, shape=(5,), dtype=np.float32)\n else:\n manager_ac_space = Box(0, 1, shape=(5,), dtype=np.float32)\n elif env_name == \"merge1\":\n if relative_goals:\n manager_ac_space = Box(-.5, .5, shape=(13,), dtype=np.float32)\n else:\n manager_ac_space = Box(0, 1, shape=(13,), dtype=np.float32)\n elif env_name == \"merge2\":\n if relative_goals:\n manager_ac_space = Box(-.5, .5, shape=(17,), dtype=np.float32)\n else:\n manager_ac_space = Box(0, 1, shape=(17,), dtype=np.float32)\n elif env_name == \"PD-Biped3D-HLC-Soccer-v1\":\n manager_ac_space = Box(\n low=np.array([0, -1, -1, -1, -1, -2, -2, -2, -2, -2, -2, -2, -1,\n -2]),\n high=np.array([1.5, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 1, 2]),\n dtype=np.float32\n )\n else:\n if use_fingerprints:\n low = np.array(ob_space.low)[:-fingerprint_dim[0]]\n high = ob_space.high[:-fingerprint_dim[0]]\n manager_ac_space = Box(low=low, high=high, dtype=np.float32)\n else:\n manager_ac_space = ob_space\n\n return manager_ac_space", "title": "" }, { "docid": "24ad48558366d28e65a01ab200a3909f", "score": "0.5562395", "text": "def find_zone(self, context, criterion):", "title": "" }, { "docid": "08d8af3fb5597796a578dc420bb57efc", "score": "0.5426856", "text": "def calculate_curtains_steps(self):\n\n telescope = self.telescope\n steps = {}\n Logger.getLogger().debug(\"Telescope status %s\", telescope.status)\n # TODO verify tele height:\n # if less than east_min_height e ovest_min_height\n if telescope.status is TelescopeStatus.LOST or telescope.status is TelescopeStatus.ERROR:\n steps[\"west\"] = self.curtain_west.steps()\n steps[\"east\"] = self.curtain_east.steps()\n\n if telescope.is_below_curtains_area():\n # keep both curtains to 0\n steps[\"west\"] = 0\n steps[\"east\"] = 0\n\n # else if higher to east_max_height e ovest_max_height\n elif telescope.is_above_curtains_area(self.alt_max_tend_e, self.alt_max_tend_w) or not telescope.is_within_curtains_area():\n # move both curtains max open\n steps[\"west\"] = self.n_step_corsa\n steps[\"east\"] = self.n_step_corsa\n\n # else if higher to ovest_min_height and Az tele to west\n elif telescope.status == TelescopeStatus.WEST:\n Logger.getLogger().debug(\"inside west status\")\n # move curtain east max open\n steps[\"east\"] = self.n_step_corsa\n # move curtain west to f(Alt telescope - x)\n steps[\"west\"] = round((telescope.coords[\"alt\"]-self.alt_min_tend_w)/self.increm_w)\n\n # else if higher to ovest_min_height and Az tele to est\n elif telescope.status == TelescopeStatus.EAST:\n Logger.getLogger().debug(\"inside east status\")\n # move curtian west max open\n steps[\"west\"] = self.n_step_corsa\n # if inferior to est_min_height\n # move curtain east to f(Alt tele - x)\n steps[\"east\"] = round((telescope.coords[\"alt\"]-self.alt_min_tend_e)/self.increm_e)\n\n Logger.getLogger().debug(\"calculatd curtain steps %s\", steps)\n\n return steps", "title": "" }, { "docid": "181f5cfebb04cf6b2e138b717b3d4f73", "score": "0.5343166", "text": "def __searchObstacle(self):\n\n \"\"\"tira uma foto da tela e o converte numa matrix\"\"\"\n img = pyautogui.screenshot(region=(self.__dinoFront[0], self.__dinoPosition['top'] - 40, 400, 200))\n img = np.array(img)\n\n scan = Scan(len(img[0]), len(img))\n\n end = 140 # limite da tela\n con = 200 # limite de confirmação\n\n \"\"\"\n Busca a distancia do Obstaculo\n \n 'x' percorre a tela da esquerda para a direita\n 'y' percorre a tela de cima para baixo\n \n 'x' e 'y' são as coord do Obstaculo do seu lado esquerdo\n 'x' é a distancia do Dino até o Obstaculo\n \n se não encontrar redefine os atributos do Obstaculo\n \"\"\"\n for x in range(len(img[0])):\n y = scan.scanArray((0, end, 2), img, x, 0)\n if y:\n self.__obstacle['dino_distance'] = x\n\n \"\"\"\n Busca a largura do Obstaculo\n \n 'x_reverse' percorre a tela da direita para a esquerda\n y_reverse percorre a tela de cima para baixo\n \n 'x_reverse' e 'y_reverse' são as coord do Obstaculo do seu lado direito\n a largura do Obstaculo é 'x - x_reverse'\n \"\"\"\n for x_reverse in range(len(img[0])-1, -1, -1):\n y_reverse = scan.scanArray((0, end, 2), img, x_reverse, 0)\n if y_reverse:\n self.__obstacle['width'] = x_reverse - x\n\n \"\"\"\n Busca a altura do Obstaculo\n\n 'y_height' percorre a tela de cima para baixo\n 'x_height' percorre a tela da esquerda para a direita\n\n 'x_height' e 'y_height' são as coord da parte de cima do Obstaculo\n \n 'height' percorre o obstaculo de cima para baixo até não detectar o Obstaculo\n 'height' é a altura do Obstaculo\n \"\"\"\n for y_height in range(end):\n x_height = scan.scanArray((0, x_reverse, 2), img, x, y_height, False)\n if x_height:\n height = scan.notScanArray((0, con, 1), img, x + x_height, y_height)\n if height and height > 20:\n self.__obstacle['height'] = height\n\n \"\"\"\n Busca a altura do Obstaculo em relação ao chão\n \n 'floor' percorre da parte de baixo do obstaculo até o chão\n \n se 'floor' for diferente de None, 'height' é multiplicado por 3 e percorre 'floor' novamente\n 'floor' é a distancia do Obstaculo em relação ao chão\n \n se 'floor' for None, a distancia do Obstaculo em relação ao chão é 0\n \"\"\"\n floor = scan.scanArray((0, con, 1), img, x + x_height, y_height + height)\n if floor:\n self.__obstacle['height'] *= 3\n floor = scan.scanArray((0, con, 1), img, x + x_height, y_height + self.__obstacle['height'])\n if floor:\n self.__obstacle['floor_distance'] = floor\n return True\n self.__obstacle['floor_distance'] = 0\n return True\n self.__obstacle = {\n 'width': -1,\n 'height': -1,\n 'dino_distance': -1,\n 'floor_distance': -1,\n 'velocity': self.__velocity\n }\n return False", "title": "" }, { "docid": "201560c950258ada701df6c1c5db2bfa", "score": "0.5332081", "text": "def get_sim_location(self):\n\n agent_state = super().habitat_env.sim.get_agent_state(0)\n x = -agent_state.position[2]\n y = -agent_state.position[0]\n axis = quaternion.as_euler_angles(agent_state.rotation)[0]\n if (axis % (2 * np.pi)) < 0.1 or (axis %\n (2 * np.pi)) > 2 * np.pi - 0.1:\n o = quaternion.as_euler_angles(agent_state.rotation)[1]\n else:\n o = 2 * np.pi - quaternion.as_euler_angles(agent_state.rotation)[1]\n if o > np.pi:\n o -= 2 * np.pi\n return x, y, o", "title": "" }, { "docid": "ce56785393018a34fae93ac58e7d1e9d", "score": "0.5328771", "text": "def observation_characterization(self, sInd, mode):\r\n \r\n OS = self.OpticalSystem\r\n ZL = self.ZodiacalLight\r\n TL = self.TargetList\r\n SU = self.SimulatedUniverse\r\n Obs = self.Observatory\r\n TK = self.TimeKeeping\r\n \r\n # find indices of planets around the target\r\n pInds = np.where(SU.plan2star == sInd)[0]\r\n \r\n # get the detected status, and check if there was a FA\r\n det = self.lastDetected[sInd,0]\r\n FA = (len(det) == len(pInds) + 1)\r\n if FA == True:\r\n pIndsDet = np.append(pInds, -1)[det]\r\n else:\r\n pIndsDet = pInds[det]\r\n \r\n # initialize outputs, and check if there's anything (planet or FA) to characterize\r\n characterized = np.zeros(len(det), dtype=int)\r\n fZ = 0./u.arcsec**2\r\n systemParams = SU.dump_system_params(sInd) # write current system params by default\r\n SNR = np.zeros(len(det))\r\n intTime = None\r\n if len(det) == 0: # nothing to characterize\r\n return characterized, fZ, systemParams, SNR, intTime\r\n \r\n # look for last detected planets that have not been fully characterized\r\n if (FA == False): # only true planets, no FA\r\n tochar = (self.fullSpectra[pIndsDet] == 0)\r\n else: # mix of planets and a FA\r\n truePlans = pIndsDet[:-1]\r\n tochar = np.append((self.fullSpectra[truePlans] == 0), True)\r\n \r\n # 1/ find spacecraft orbital START position including overhead time,\r\n # and check keepout angle\r\n if np.any(tochar):\r\n # start times\r\n startTime = TK.currentTimeAbs + mode['syst']['ohTime']\r\n startTimeNorm = TK.currentTimeNorm + mode['syst']['ohTime']\r\n # planets to characterize\r\n tochar[tochar] = Obs.keepout(TL, sInd, startTime, mode)\r\n \r\n # 2/ if any planet to characterize, find the characterization times\r\n # at the detected fEZ, dMag, and WA\r\n if np.any(tochar):\r\n fZ = ZL.fZ(Obs, TL, sInd, startTime, mode)\r\n fEZ = self.lastDetected[sInd,1][det][tochar]/u.arcsec**2\r\n dMag = self.lastDetected[sInd,2][det][tochar]\r\n WA = self.lastDetected[sInd,3][det][tochar]*u.arcsec\r\n intTimes = np.zeros(len(tochar))*u.day\r\n intTimes[tochar] = OS.calc_intTime(TL, sInd, fZ, fEZ, dMag, WA, mode)\r\n # add a predetermined margin to the integration times\r\n intTimes = intTimes*(1 + self.charMargin)\r\n # apply time multiplier\r\n totTimes = intTimes*(mode['timeMultiplier'])\r\n # end times\r\n endTimes = startTime + totTimes\r\n endTimesNorm = startTimeNorm + totTimes\r\n # planets to characterize\r\n tochar = ((totTimes > 0) & (totTimes <= OS.intCutoff) & \r\n (endTimesNorm <= TK.OBendTimes[TK.OBnumber]))\r\n \r\n # 3/ is target still observable at the end of any char time?\r\n if np.any(tochar) and Obs.checkKeepoutEnd:\r\n tochar[tochar] = Obs.keepout(TL, sInd, endTimes[tochar], mode)\r\n \r\n # 4/ if yes, allocate the overhead time, and perform the characterization \r\n # for the maximum char time\r\n if np.any(tochar):\r\n TK.allocate_time(mode['syst']['ohTime'])\r\n intTime = np.max(intTimes[tochar])\r\n pIndsChar = pIndsDet[tochar]\r\n log_char = ' - Charact. planet inds %s (%s/%s detected)'%(pIndsChar, \r\n len(pIndsChar), len(pIndsDet))\r\n self.logger.info(log_char)\r\n self.vprint(log_char)\r\n \r\n # SNR CALCULATION:\r\n # first, calculate SNR for observable planets (without false alarm)\r\n planinds = pIndsChar[:-1] if pIndsChar[-1] == -1 else pIndsChar\r\n SNRplans = np.zeros(len(planinds))\r\n if len(planinds) > 0:\r\n # initialize arrays for SNR integration\r\n fZs = np.zeros(self.ntFlux)/u.arcsec**2\r\n systemParamss = np.empty(self.ntFlux, dtype='object')\r\n Ss = np.zeros((self.ntFlux, len(planinds)))\r\n Ns = np.zeros((self.ntFlux, len(planinds)))\r\n # integrate the signal (planet flux) and noise\r\n dt = intTime/self.ntFlux\r\n for i in range(self.ntFlux):\r\n # allocate first half of dt\r\n TK.allocate_time(dt/2.)\r\n # calculate current zodiacal light brightness\r\n fZs[i] = ZL.fZ(Obs, TL, sInd, TK.currentTimeAbs, mode)[0]\r\n # propagate the system to match up with current time\r\n SU.propag_system(sInd, TK.currentTimeNorm - self.propagTimes[sInd])\r\n self.propagTimes[sInd] = TK.currentTimeNorm\r\n # save planet parameters\r\n systemParamss[i] = SU.dump_system_params(sInd)\r\n # calculate signal and noise (electron count rates)\r\n Ss[i,:], Ns[i,:] = self.calc_signal_noise(sInd, planinds, dt, mode, \r\n fZ=fZs[i])\r\n # allocate second half of dt\r\n TK.allocate_time(dt/2.)\r\n \r\n # average output parameters\r\n fZ = np.mean(fZs)\r\n systemParams = {key: sum([systemParamss[x][key]\r\n for x in range(self.ntFlux)])/float(self.ntFlux)\r\n for key in sorted(systemParamss[0])}\r\n # calculate planets SNR\r\n S = Ss.sum(0)\r\n N = Ns.sum(0)\r\n SNRplans[N > 0] = S[N > 0]/N[N > 0]\r\n # allocate extra time for timeMultiplier\r\n extraTime = intTime*(mode['timeMultiplier'] - 1)\r\n TK.allocate_time(extraTime)\r\n \r\n # if only a FA, just save zodiacal brightness in the middle of the integration\r\n else:\r\n totTime = intTime*(mode['timeMultiplier'])\r\n TK.allocate_time(totTime/2.)\r\n fZ = ZL.fZ(Obs, TL, sInd, TK.currentTimeAbs, mode)[0]\r\n TK.allocate_time(totTime/2.)\r\n \r\n # calculate the false alarm SNR (if any)\r\n SNRfa = []\r\n if pIndsChar[-1] == -1:\r\n fEZ = self.lastDetected[sInd,1][-1]/u.arcsec**2\r\n dMag = self.lastDetected[sInd,2][-1]\r\n WA = self.lastDetected[sInd,3][-1]*u.arcsec\r\n C_p, C_b, C_sp = OS.Cp_Cb_Csp(TL, sInd, fZ, fEZ, dMag, WA, mode)\r\n S = (C_p*intTime).decompose().value\r\n N = np.sqrt((C_b*intTime + (C_sp*intTime)**2).decompose().value)\r\n SNRfa = S/N if N > 0 else 0.\r\n \r\n # save all SNRs (planets and FA) to one array\r\n SNRinds = np.where(det)[0][tochar]\r\n SNR[SNRinds] = np.append(SNRplans, SNRfa)\r\n \r\n # now, store characterization status: 1 for full spectrum, \r\n # -1 for partial spectrum, 0 for not characterized\r\n char = (SNR >= mode['SNR'])\r\n # initialize with full spectra\r\n characterized = char.astype(int)\r\n WAchar = self.lastDetected[sInd,3][char]*u.arcsec\r\n # find the current WAs of characterized planets\r\n WAs = systemParams['WA']\r\n if FA:\r\n WAs = np.append(WAs, self.lastDetected[sInd,3][-1]*u.arcsec)\r\n # check for partial spectra\r\n IWA_max = mode['IWA']*(1 + mode['BW']/2.)\r\n OWA_min = mode['OWA']*(1 - mode['BW']/2.)\r\n char[char] = (WAchar < IWA_max) | (WAchar > OWA_min)\r\n characterized[char] = -1\r\n # encode results in spectra lists (only for planets, not FA)\r\n charplans = characterized[:-1] if FA else characterized\r\n self.fullSpectra[pInds[charplans == 1]] += 1\r\n self.partialSpectra[pInds[charplans == -1]] += 1\r\n \r\n return characterized.astype(int), fZ, systemParams, SNR, intTime", "title": "" }, { "docid": "2ce30ca152a1c3aa48159313698c8aaa", "score": "0.52701706", "text": "def antennalist():", "title": "" }, { "docid": "5a84338260643bbace12398c57636484", "score": "0.5225109", "text": "def get_initial_guess(self, x_p1, p_goal, p_puck, obstacles):\n hit_dir = p_goal - p_puck\n hit_dir = 6.0 * hit_dir / np.linalg.norm(hit_dir)\n x_des = np.array([p_puck[0], p_puck[1], hit_dir[0], hit_dir[1]])\n #x_des = np.array([1.0, 1.0, 0, 0]) \n print(\"x_des: {}, {}\".format(x_des[0], x_des[1]))\n print(\"x_des shape\", x_des.shape)\n print(\"zeros.shape\", np.zeros(4).shape)\n print(\"p_player\", x_p1[0:2])\n print(\"p_puck {}, {}\".format(p_puck[0], p_puck[1]))\n print(\"p_goal\", p_goal)\n prog = DirectCollocation(self.mpc_params.sys_c, self.mpc_params.sys_c.CreateDefaultContext(), self.mpc_params.N+1,\n minimum_timestep=self.mpc_params.minT, maximum_timestep=self.mpc_params.maxT)\n\n prog.AddBoundingBoxConstraint(x_p1, x_p1, prog.initial_state())\n prog.AddQuadraticErrorCost(Q=self.mpc_params.Omega_N_max, x_desired=x_des, vars=prog.final_state())\n\n prog.AddEqualTimeIntervalsConstraints()\n\n # generate trajectory non in collision with puck \n #for n in range(self.mpc_params.N):\n # x = prog.state()\n # eps = 0.1\n # obs_pos = p_puck[0:2]\n # prog.AddConstraintToAllKnotPoints((x[0:2]-obs_pos).dot(x[0:2]-obs_pos) >= (self.sim_params.player_radius + self.sim_params.puck_radius - eps)**2)\n\n for obs_pos in obstacles:\n for n in range(self.mpc_params.N):\n x = prog.state()\n prog.AddConstraintToAllKnotPoints((x[0:2]-obs_pos).dot(x[0:2]-obs_pos) >= (2.0*self.sim_params.player_radius)**2)\n\n \n prog.AddConstraintToAllKnotPoints(prog.input()[0] <= self.sim_params.input_limit)\n prog.AddConstraintToAllKnotPoints(prog.input()[0] >= -self.sim_params.input_limit)\n prog.AddConstraintToAllKnotPoints(prog.input()[1] <= self.sim_params.input_limit)\n prog.AddConstraintToAllKnotPoints(prog.input()[1] >= -self.sim_params.input_limit)\n\n r = self.sim_params.player_radius\n prog.AddConstraintToAllKnotPoints(prog.state()[0] + r <= self.sim_params.arena_limits_x / 2.0)\n prog.AddConstraintToAllKnotPoints(prog.state()[0] - r >= -self.sim_params.arena_limits_x / 2.0)\n prog.AddConstraintToAllKnotPoints(prog.state()[1] + r <= self.sim_params.arena_limits_y / 2.0)\n prog.AddConstraintToAllKnotPoints(prog.state()[1] - r >= -self.sim_params.arena_limits_y / 2.0)\n\n prog.AddFinalCost(prog.time())\n\n if not self.prev_u is None and not self.prev_x is None:\n prog.SetInitialTrajectory(traj_init_u=self.prev_u, traj_init_x=self.prev_x)\n\n solver = SnoptSolver()\n result = solver.Solve(prog)\n\n u_traj = prog.ReconstructInputTrajectory(result)\n x_traj = prog.ReconstructStateTrajectory(result)\n\n self.prev_u = u_traj\n self.prev_x = x_traj\n\n u_vals = u_traj.vector_values(u_traj.get_segment_times())\n x_vals = x_traj.vector_values(x_traj.get_segment_times())\n print(u_vals)\n print(u_vals[:,0])\n return u_vals[:,0]", "title": "" }, { "docid": "bd1bf7ce1a99502dac7845760750d38e", "score": "0.5218086", "text": "def obstacle_detection(self):\n #saving the acelerations\n x_accel = self._imudata.linear_acceleration.x\n y_accel = self._imudata.linear_acceleration.y\n z_accel = self._imudata.linear_acceleration.z\n \n axis_list = [x_accel, y_accel, z_accel]\n \n #looking for the major measure\n max_axis_index = axis_list.index(max(axis_list))\n #if that measure is positive or not\n positive = axis_list[max_axis_index] >= 0\n \n #if value is > than 7 then True\n significative_value = axis_list[max_axis_index] > self._threshold\n \n message = \"\"\n \n if significative_value:\n if max_axis_index == 0:\n # Winner is in the x axis, therefore its a side crash left/right\n rospy.logwarn(\"[X=\"+str(x_accel))\n rospy.loginfo(\"Y=\"+str(y_accel)+\", Z=\"+str(z_accel)+\"]\")\n if positive:\n message = \"right\"\n else:\n message = \"left\"\n \n elif max_axis_index == 1:\n # Winner is the Y axis, therefore its a forn/back crash\n rospy.logwarn(\"[Y=\"+str(y_accel))\n rospy.loginfo(\"X=\"+str(x_accel)+\", Z=\"+str(z_accel)+\"]\")\n if positive:\n message = \"front\"\n else:\n message = \"back\"\n elif max_axis_index == 2:\n # Z Axis is the winner, therefore its a crash that made it jump\n rospy.logwarn(\"[Z=\"+str(z_accel))\n rospy.loginfo(\"X=\"+str(x_accel)+\", Y=\"+str(y_accel)+\"]\")\n \n if positive:\n message = \"up\"\n else:\n message = \"down\"\n else:\n message = \"unknown_direction\"\n else:\n rospy.loginfo(\"X=\"+str(x_accel)+\"Y=\"+str(y_accel)+\", Z=\"+str(z_accel)+\"]\")\n message = \"nothing\"\n \n return self.convert_to_dict(message)", "title": "" }, { "docid": "ce79f10b09c69e1be3df5447d882bd61", "score": "0.52171683", "text": "def get_goal(self):\n if self.stationary:\n self.head = (self.loc[0], 0)\n else:\n self.head = self.porch.get_entrance()\n return self.head", "title": "" }, { "docid": "b143b2615492cb4e57c127ad86657be9", "score": "0.52151763", "text": "def process_measurements(self):\n\t# Check samples from https://github.com/markwsilliman/turtlebot/blob/master/goforward_and_avoid_obstacle.py\n\n sonar_meas = self.ros_interface.get_range()\n imu_meas = self.ros_interface.get_imu()\n\n if(sonar_meas == None):\n return\n elif(sonar_meas <= 30):\n pose = np.array([0,0,math.pi/4]) # emulate\n goal = np.array([0,0])\n else:\n pose = np.array([0,0,0])\n goal = np.array([1,0])\n\n vel = self.diff_drive_controller.compute_vel(pose, goal)\n self.vel = vel[0:2];\n self.ros_interface.command_velocity(vel[0], vel[1])\n rospy.loginfo_throttle(5, \"robot_control velocity:\"+ str(self.vel)+\" sonar:\"+str(sonar_meas))#+\" imu:\"+str(imu_meas))\n #rospy.loginfo(\"robot_control velocity:\"+ str(self.vel)+\" sonar:\"+str(sonar_meas))#+\" imu:\"+str(imu_meas))\n return", "title": "" }, { "docid": "e0b100604e6f84092811f07c91477a5d", "score": "0.5214432", "text": "def _get_current_zone(self, devicename, latitude, longitude):\n\n current_zone_list = active_zone(self.hass, latitude, longitude)\n\n #Example current_zone:\n #<state zone.home=zoning; hidden=True, latitude=27.726639,\n #longitude=-80.3904565, radius=40.0, friendly_name=Home, icon=mdi:home,\n #beacon=uuid=9AC56DEE-E6F3-4446-A2BC-9A68D06BC0BB, major=1, minor=1\n\n log_msg = (\"►►GET CURRENT ZONE BEG, Zone={}, Lat={}, Long={}, \"\n \"CurrentStateTable={}\").format(\n current_zone_list, latitude, longitude,\n self.current_state.get(devicename)) \n self._LOGGER_debug_msg(log_msg)\n\n if current_zone_list:\n # current_zone = current_zone.attributes.get('friendly_name')\n current_zone = str(current_zone_list)\n current_zone = current_zone.split('=') #=<state zone.home\n current_zone = current_zone[0].split('.') #=home\n current_zone = current_zone[1]\n \n #Override 'NearZone' zone name, will be reset later to not_home\n if 'nearzone' in current_zone:\n current_zone = 'near_zone'\n else:\n current_zone = 'not_home'\n\n log_msg = (\"►►GET CURRENT ZONE END, Zone={}, Lat={}, Long={}, \"\n \"TriggerState={}\").format(\n current_zone, latitude, longitude,\n self.current_state.get(devicename))\n self._LOGGER_debug_msg(log_msg)\n\n return current_zone #.lower()", "title": "" }, { "docid": "6c293626f67f405cb352c65789c67b10", "score": "0.51940376", "text": "def t_atm():", "title": "" }, { "docid": "c3bf636eec87b6209f3ab0ba829e0f19", "score": "0.51474625", "text": "def main():\n rospy.init_node(\"ik_pick_and_place_demo\")\n # Load Gazebo Models via Spawning Services\n # Note that the models reference is the /world frame\n # and the IK operates with respect to the /base frame\n load_gazebo_models()\n # Remove models from the scene on shutdown\n rospy.on_shutdown(delete_gazebo_models)\n\n # Wait for the All Clear from emulator startup\n rospy.wait_for_message(\"/robot/sim/started\", Empty)\n\n limb = 'left'\n # Starting Joint angles for left arm\n starting_joint_angles = {'left_w0': 0.6699952259595108,\n 'left_w1': 1.030009435085784,\n 'left_w2': -0.4999997247485215,\n 'left_e0': -1.189968899785275,\n 'left_e1': 1.9400238130755056,\n 'left_s0': -0.08000397926829805,\n 'left_s1': -0.9999781166910306}\n pnp = Pose_with_shots(limb)\n # An orientation for gripper fingers to be overhead and parallel to the obj\n overhead_orientation = Quaternion(\n x=-0.0249590815779,\n y=0.999649402929,\n z=0.00737916180073,\n w=0.00486450832011)\n block_poses = list()\n # The Pose of the block in its initial location.\n # You may wish to replace these poses with estimates\n # from a perception node.\n block_poses.append(Pose(\n position=Point(x=0.7, y=0.15, z=-0.1),\n orientation=overhead_orientation))\n block_poses.append(Pose(\n position=Point(x=0.7, y=0.15, z=0.0),\n orientation=overhead_orientation))\n block_poses.append(Pose(\n position=Point(x=0.7, y=0.15, z=0.1),\n orientation=overhead_orientation))\n block_poses.append(Pose(\n position=Point(x=0.7, y=0.15, z=0.2),\n orientation=overhead_orientation))\n block_poses.append(Pose(\n position=Point(x=0.7, y=0.15, z=0.3),\n orientation=overhead_orientation))\n block_poses.append(Pose(\n position=Point(x=0.7, y=0.15, z=0.4),\n orientation=overhead_orientation))\n block_poses.append(Pose(\n position=Point(x=0.7, y=0.35, z=0.4),\n orientation=overhead_orientation))\n block_poses.append(Pose(\n position=Point(x=0.7, y=0.35, z=0.3),\n orientation=overhead_orientation))\n # Move to the desired starting angles\n pnp.move_to_start(starting_joint_angles)\n idx = 0\n for pose in block_poses:\n pnp.servo_to_pose(pose)\n return 0", "title": "" }, { "docid": "9d3d2c18359d9bd4b5e3ad429d446750", "score": "0.51426506", "text": "def update_occupancy(self):\n self.x, self.y, self.z = self.pc.get_position()\n\n if self.multiranger.front < self.DETECTION_THRESHOLD_SIDEWAY:\n self.fill_dynamic_occupancy(\n self.x+self.multiranger.front, self.y, self.TILE_OBSTACLE)\n\n if self.multiranger.left < self.DETECTION_THRESHOLD_SIDEWAY:\n self.fill_dynamic_occupancy(\n self.x, self.y+self.multiranger.left, self.TILE_OBSTACLE)\n\n if self.multiranger.back < self.DETECTION_THRESHOLD_SIDEWAY:\n self.fill_dynamic_occupancy(\n self.x-self.multiranger.back, self.y, self.TILE_OBSTACLE)\n\n if self.multiranger.right < self.DETECTION_THRESHOLD_SIDEWAY:\n self.fill_dynamic_occupancy(\n self.x, self.y-self.multiranger.right, self.TILE_OBSTACLE)\n\n if self.multiranger.down < self.DETECTION_THRESHOLD_Z:\n if self.x <= self.TAKEOFF_REGION_X[0] and self.x >= self.TAKEOFF_REGION_X[1]:\n self.fill_dynamic_occupancy(self.x, self.y, self.TILE_TAKEOFF)\n elif self.x <= self.LANDING_REGION_X[0] and self.x >= self.LANDING_REGION_X[1]:\n self.fill_dynamic_occupancy(self.x, self.y, self.TILE_LANDING)\n else:\n self.fill_dynamic_occupancy(self.x, self.y, self.TILE_FREE)", "title": "" }, { "docid": "6a1bdb3b9d5ed4c00d362233733fb0cf", "score": "0.51417774", "text": "def cozmoBehavior(robot: cozmo.robot.Robot):\r\n \r\n global grid, stopevent\r\n \r\n center = (grid.width/2, grid.height/2)\r\n goal_grid = center\r\n grid.addGoal(center)\r\n goal_angle = 0\r\n path = None\r\n grid_frame_pose = Pose(robot.pose.position.x, robot.pose.position.y, 0, angle_z = robot.pose.rotation.angle_z)\r\n current_grid = (0, 0)\r\n state = 'INIT'\r\n current_angle = 0\r\n cube2_seen = False\r\n cube3_seen = False\r\n \r\n robot.move_lift(-3)\r\n robot.set_head_angle(degrees(0)).wait_for_completed()\r\n \r\n while not stopevent.is_set():\r\n cube1 = robot.world.get_light_cube(1)\r\n cube2 = robot.world.get_light_cube(2)\r\n cube3 = robot.world.get_light_cube(3)\r\n\r\n if cube1.is_visible:\r\n if state == 'INIT' or state == 'NO_GOAL':\r\n print('Cube 1 seen, add and go to goal')\r\n # setup goal and calculate path\r\n coord_x, coord_y, goal_angle = get_relative_coord(cube1, grid_frame_pose)\r\n if coord_x < 0 or coord_y < 0:\r\n print(\"\")\r\n print(\"============== ERROR ==============\")\r\n print(\"A cube is placed where cozmo's target position is out of grid bound. Please turn the cube or place it closer to the center of the grid\")\r\n print(\"\")\r\n sys.exit()\r\n goal_angle = update_grid_with_goal(coord_x, coord_y, goal_angle)\r\n path = calculate_path(current_grid)\r\n state = 'FOUND_GOAL'\r\n elif state == 'TURN_IN_PLACE':\r\n print('TURNING: Cube 1 seen, add and go to goal')\r\n # recalibrate current position\r\n robot_x, robot_y, robot_angle = get_relative_coord(robot, grid_frame_pose)\r\n current_grid = (robot_x, robot_y)\r\n current_angle = robot_angle\r\n # setup goal and calculate path\r\n coord_x, coord_y, goal_angle = get_relative_coord(cube1, grid_frame_pose)\r\n goal_angle = update_grid_with_goal(coord_x, coord_y, goal_angle)\r\n path = calculate_path(current_grid)\r\n state = 'FOUND_GOAL'\r\n \r\n if cube2.is_visible:\r\n if state == 'INIT':\r\n state = 'NO_GOAL'\r\n print(\"Cube 2 seen\")\r\n if not cube2_seen:\r\n # mark it as seen\r\n cube2_seen = True\r\n # add obstacle to grid\r\n coord_x, coord_y, goal_angle = get_relative_coord(cube2, grid_frame_pose)\r\n obstacles = get_obstacles_around_coord(coord_x, coord_y)\r\n grid.addObstacles(obstacles)\r\n if state != 'TURN_IN_PLACE':\r\n path = calculate_path(current_grid)\r\n else:\r\n robot.turn_in_place(degrees(10), speed=degrees(20), in_parallel=False).wait_for_completed()\r\n \r\n if cube3.is_visible:\r\n if state == 'INIT':\r\n state = 'NO_GOAL'\r\n print(\"Cube 3 seen\")\r\n if not cube3_seen:\r\n # mark it as seen\r\n cube3_seen = True\r\n # add obstacle to grid\r\n coord_x, coord_y, goal_angle = get_relative_coord(cube3, grid_frame_pose)\r\n obstacles = get_obstacles_around_coord(coord_x, coord_y)\r\n grid.addObstacles(obstacles)\r\n if state != 'TURN_IN_PLACE':\r\n path = calculate_path(current_grid)\r\n else:\r\n robot.turn_in_place(degrees(10), speed=degrees(20), in_parallel=False).wait_for_completed()\r\n\r\n if not (cube1.is_visible or cube2.is_visible or cube3.is_visible):\r\n if state == 'INIT':\r\n path = calculate_path(current_grid)\r\n state = 'NO_GOAL'\r\n if state == 'TURN_IN_PLACE':\r\n robot.turn_in_place(degrees(10), speed=degrees(20), in_parallel=False).wait_for_completed()\r\n \r\n if path is not None and len(path) > 0:\r\n # figure out the pose angle. should keep the angle of the path traveled\r\n next_grid = path.pop(0)\r\n angle = get_step_angle(current_grid, next_grid, current_angle)\r\n robot.turn_in_place(degrees(angle), speed=degrees(45), is_absolute=False).wait_for_completed()\r\n time.sleep(0.2)\r\n current_angle += angle\r\n if round(current_angle) % 90 == 0:\r\n dist = grid.scale\r\n else:\r\n dist = grid.scale*1.4142\r\n robot.drive_straight(distance_mm(dist), speed_mmps(25)).wait_for_completed()\r\n time.sleep(0.2)\r\n current_grid = next_grid\r\n grid.setStart(current_grid)\r\n if len(path) == 0:\r\n if state == 'FOUND_GOAL':\r\n robot.turn_in_place(degrees(goal_angle), speed=degrees(45), is_absolute=True).wait_for_completed()\r\n break\r\n else:\r\n state = 'TURN_IN_PLACE'", "title": "" }, { "docid": "810eb7f7aef89aa9e45d1f4783ab3dac", "score": "0.5132676", "text": "def test_lookat(self):\r\n\r\n with Morse() as morse:\r\n\r\n #TODO: Stupid duplication of SetUpEnv values. Could not find a way\r\n #to share the value. Class variables does not seem to work here.\r\n ptu_x = 0.2020\r\n ptu_z = 1.4400 + .1 # 0.1 -> height of ATRV center\r\n\r\n precision = 0.02\r\n\r\n res = morse.rpc('robot.ptu', 'look_at_point', 1 ,0 ,ptu_z)\r\n res = morse.rpc('robot.ptu', 'get_pan_tilt')\r\n self.assertAlmostEqual(res[0], 0.0, delta=precision)\r\n self.assertAlmostEqual(res[1], 0.0, delta=precision)\r\n\r\n\r\n\r\n res = morse.rpc('robot.ptu', 'look_at_point', -1 ,0 ,ptu_z)\r\n res = morse.rpc('robot.ptu', 'get_pan_tilt')\r\n# self.assertAlmostEqual(res[0], math.radians(180.0), delta=precision)\r\n self.assertAlmostEqual(res[1], 0.0, delta=precision)\r\n\r\n\r\n\r\n res = morse.rpc('robot.ptu', 'look_at_point', ptu_x,1,ptu_z)\r\n res = morse.rpc('robot.ptu', 'get_pan_tilt')\r\n self.assertAlmostEqual(res[0], math.radians(90), delta=precision)\r\n self.assertAlmostEqual(res[1], 0.0, delta=precision)\r\n\r\n\r\n\r\n res = morse.rpc('robot.ptu', 'look_at_point', ptu_x, -1, ptu_z)\r\n res = morse.rpc('robot.ptu', 'get_pan_tilt')\r\n self.assertAlmostEqual(res[0], math.radians(-90), delta=precision)\r\n self.assertAlmostEqual(res[1], 0.0, delta=precision)\r\n\r\n \r\n \r\n res = morse.rpc('robot.ptu', 'look_at_point', ptu_x,0,10)\r\n res = morse.rpc('robot.ptu', 'get_pan_tilt')\r\n self.assertAlmostEqual(res[1], math.radians(-90), delta=precision)\r\n # Reset position\r\n morse.rpc('robot.ptu', 'set_pan_tilt', 0.0, 0.0)\r\n\r\n res = morse.rpc('robot.ptu', 'look_at_object', 'chair')\r\n res = morse.rpc('robot.ptu', 'get_pan_tilt')\r\n self.assertAlmostEqual(res[0], math.radians(90), delta=precision)\r\n self.assertAlmostEqual(res[1], 0.466, delta=precision)", "title": "" }, { "docid": "bf89d2ab5ff4a592ad4d7c31cb0bb3a9", "score": "0.5096512", "text": "def exec(self) -> None:\n\n steps = self.calculate_curtains_steps()\n Logger.getLogger().debug(\"calculated steps %s\", steps)\n self.crac_status.curtain_east_status = self.curtain_east.read()\n self.crac_status.curtain_east_steps = self.curtain_east.steps()\n self.crac_status.curtain_west_status = self.curtain_west.read()\n self.crac_status.curtain_west_steps = self.curtain_west.steps()\n Logger.getLogger().debug(\"curtain_east_steps %s\", self.curtain_east.steps())\n Logger.getLogger().debug(\"curtain_east_status %s\", self.curtain_east.read())\n Logger.getLogger().debug(\"curtain_west_steps %s\", self.curtain_west.steps())\n Logger.getLogger().debug(\"curtain_west_status %s\", self.curtain_west.read())\n self.read_altaz_mount_coordinate()\n\n if self.telescope.status not in [TelescopeStatus.FLATTER, TelescopeStatus.SECURE]:\n self.panel_off()\n\n if not self.started:\n self.park_curtains()\n self.curtain_east.is_disabled = True\n self.curtain_west.is_disabled = True\n return\n\n self.curtain_east.is_disabled = False\n self.curtain_west.is_disabled = False\n prevSteps = {\"east\": self.curtain_east.steps(), \"west\": self.curtain_west.steps()}\n if self.is_diff_steps(steps, prevSteps):\n Logger.getLogger().debug(\"Differenza steps sufficienti\")\n self.move_curtains_steps(steps)\n # solo se la differenza è misurabile imposto le coordinate\n # precedenti uguali a quelle attuali altrimenti muovendosi\n # a piccoli movimenti le tende non verrebbero mai spostate", "title": "" }, { "docid": "9d895aa94c9803e785130b1fedba0b8d", "score": "0.5093552", "text": "def home(self):\n # make sure the camera is past the cannula and tool vertical\n print(\"starting home\")\n self.arm.home()\n self.arm.close_jaw()\n\n if self.arm.get_current_joint_position()[2] > 0.12:\n # Already past cannula\n carte_goal = self.arm.get_current_position().p\n carte_goal[2] += 0.04\n self.arm.move(carte_goal)\n\n goal = np.zeros(6)\n\n if ((self.arm.name() == 'PSM1') or (self.arm.name() == 'PSM2') or\n (self.arm.name() == 'PSM3') or (self.arm.name() == 'ECM')):\n # set in position joint mode\n goal[2] = 0.08\n self.arm.move_joint(goal)\n self.arm.move(self.ROT_MATRIX)", "title": "" }, { "docid": "6d065ac6573b153a5aa0b234800e7e29", "score": "0.508981", "text": "def calc_pos_obstacle(self):\n open_pos = random.randint(0, 3)\n if open_pos == 0:\n pos_x_1, pos_y_1 = self.pos_agent[0] + 1, self.pos_agent[1]\n pos_x_2, pos_y_2 = self.pos_agent[0], self.pos_agent[1] + 1\n pos_x_3, pos_y_3 = self.pos_agent[0] - 1, self.pos_agent[1]\n elif open_pos == 1:\n pos_x_1, pos_y_1 = self.pos_agent[0], self.pos_agent[1] + 1\n pos_x_2, pos_y_2 = self.pos_agent[0] - 1, self.pos_agent[1]\n pos_x_3, pos_y_3 = self.pos_agent[0], self.pos_agent[1] - 1\n elif open_pos == 2:\n pos_x_1, pos_y_1 = self.pos_agent[0] - 1, self.pos_agent[1]\n pos_x_2, pos_y_2 = self.pos_agent[0], self.pos_agent[1] - 1\n pos_x_3, pos_y_3 = self.pos_agent[0] + 1, self.pos_agent[1]\n elif open_pos == 3:\n pos_x_1, pos_y_1 = self.pos_agent[0], self.pos_agent[1] - 1\n pos_x_2, pos_y_2 = self.pos_agent[0] + 1, self.pos_agent[1]\n pos_x_3, pos_y_3 = self.pos_agent[0], self.pos_agent[1] + 1\n return pos_x_1, pos_y_1, pos_x_2, pos_y_2, pos_x_3, pos_y_3", "title": "" }, { "docid": "909ac8330a9bded23668abc325a5c7bb", "score": "0.50755465", "text": "def observation_characterization(self, sInd, mode):\n\n OS = self.OpticalSystem\n ZL = self.ZodiacalLight\n TL = self.TargetList\n SU = self.SimulatedUniverse\n Obs = self.Observatory\n TK = self.TimeKeeping\n\n # selecting appropriate koMap\n koMap = self.koMaps[mode[\"syst\"][\"name\"]]\n\n # find indices of planets around the target\n pInds = np.where(SU.plan2star == sInd)[0]\n # get the last detected planets, and check if there was a FA\n # det = self.lastDetected[sInd,0]\n det = np.ones(pInds.size, dtype=bool)\n fEZs = SU.fEZ[pInds].to(\"1/arcsec2\").value\n dMags = SU.dMag[pInds]\n WAs = SU.WA[pInds].to(\"arcsec\").value\n\n FA = det.size == pInds.size + 1\n if FA:\n pIndsDet = np.append(pInds, -1)[det]\n else:\n pIndsDet = pInds[det]\n\n # initialize outputs, and check if any planet to characterize\n characterized = np.zeros(det.size, dtype=int)\n fZ = 0.0 / u.arcsec**2\n systemParams = SU.dump_system_params(\n sInd\n ) # write current system params by default\n SNR = np.zeros(len(det))\n intTime = None\n if len(det) == 0: # nothing to characterize\n return characterized, fZ, systemParams, SNR, intTime\n\n # look for last detected planets that have not been fully characterized\n if not (FA): # only true planets, no FA\n tochar = self.fullSpectra[pIndsDet] == 0\n else: # mix of planets and a FA\n truePlans = pIndsDet[:-1]\n tochar = np.append((self.fullSpectra[truePlans] == 0), True)\n\n # look for last detected planets that have not been fully characterized\n tochar = np.zeros(len(det), dtype=bool)\n if not (FA):\n tochar[det] = self.fullSpectra[pInds[det]] != 1\n elif pInds[det].size > 1:\n tochar[det] = np.append((self.fullSpectra[pInds[det][:-1]] != 1), True)\n else:\n tochar[det] = np.array([True])\n\n # 1/ find spacecraft orbital START position and check keepout angle\n if np.any(tochar):\n # start times\n startTime = TK.currentTimeAbs\n startTimeNorm = TK.currentTimeNorm\n # planets to characterize\n koTimeInd = np.where(np.round(startTime.value) - self.koTimes.value == 0)[\n 0\n ][\n 0\n ] # find indice where koTime is startTime[0]\n # wherever koMap is 1, the target is observable\n tochar[tochar] = koMap[sInd][koTimeInd]\n\n # 2/ if any planet to characterize, find the characterization times\n if np.any(tochar):\n # propagate the whole system to match up with current time\n # calculate characterization times at the detected fEZ, dMag, and WA\n fZ = ZL.fZ(Obs, TL, sInd, startTime, mode)\n # fEZ = self.lastDetected[sInd,1][tochar]/u.arcsec**2\n # dMag = self.lastDetected[sInd,2][tochar]\n # WA = self.lastDetected[sInd,3][tochar]*u.mas\n fEZ = fEZs[tochar] / u.arcsec**2\n dMag = dMags[tochar]\n WAp = WAs[tochar] * u.arcsec\n\n intTimes = np.zeros(len(pInds)) * u.d\n intTimes[tochar] = OS.calc_intTime(TL, sInd, fZ, fEZ, dMag, WAp, mode)\n intTimes[~np.isfinite(intTimes)] = 0 * u.d\n # add a predetermined margin to the integration times\n intTimes = intTimes * (1 + self.charMargin)\n # apply time multiplier\n totTimes = intTimes * (mode[\"timeMultiplier\"])\n # end times\n endTimes = startTime + totTimes\n endTimesNorm = startTimeNorm + totTimes\n # planets to characterize\n tochar = (\n (totTimes > 0)\n & (totTimes <= OS.intCutoff)\n & (endTimesNorm <= TK.OBendTimes[TK.OBnumber])\n )\n\n # 3/ is target still observable at the end of any char time?\n if np.any(tochar) and Obs.checkKeepoutEnd:\n koTimeInds = np.zeros(len(endTimes.value[tochar]), dtype=int)\n # find index in koMap where each endTime is closest to koTimes\n for t, endTime in enumerate(endTimes.value[tochar]):\n if endTime > self.koTimes.value[-1]:\n # case where endTime exceeds largest koTimes element\n endTimeInBounds = np.where(\n np.floor(endTime) - self.koTimes.value == 0\n )[0]\n koTimeInds[t] = (\n endTimeInBounds[0] if endTimeInBounds.size != 0 else -1\n )\n else:\n koTimeInds[t] = np.where(\n np.round(endTime) - self.koTimes.value == 0\n )[0][\n 0\n ] # find indice where koTime is endTimes[0]\n tochar[tochar] = [koMap[sInd][koT] if koT >= 0 else 0 for koT in koTimeInds]\n\n # 4/ if yes, perform the characterization for the maximum char time\n if np.any(tochar):\n intTime = np.max(intTimes[tochar])\n pIndsChar = pIndsDet[tochar]\n log_char = \" - Charact. planet(s) %s (%s/%s detected)\" % (\n pIndsChar,\n len(pIndsChar),\n len(pIndsDet),\n )\n self.logger.info(log_char)\n\n self.vprint(log_char)\n\n # SNR CALCULATION:\n # first, calculate SNR for observable planets (without false alarm)\n planinds = pIndsChar[:-1] if pIndsChar[-1] == -1 else pIndsChar\n SNRplans = np.zeros(len(planinds))\n if len(planinds) > 0:\n # initialize arrays for SNR integration\n fZs = np.zeros(self.ntFlux) / u.arcsec**2\n systemParamss = np.empty(self.ntFlux, dtype=\"object\")\n Ss = np.zeros((self.ntFlux, len(planinds)))\n Ns = np.zeros((self.ntFlux, len(planinds)))\n # integrate the signal (planet flux) and noise\n dt = intTime / self.ntFlux\n for i in range(self.ntFlux):\n # allocate first half of dt\n TK.allocate_time(dt / 2.0)\n # calculate current zodiacal light brightness\n fZs[i] = ZL.fZ(Obs, TL, sInd, TK.currentTimeAbs, mode)[0]\n # propagate the system to match up with current time\n SU.propag_system(sInd, TK.currentTimeNorm - self.propagTimes[sInd])\n self.propagTimes[sInd] = TK.currentTimeNorm\n # save planet parameters\n systemParamss[i] = SU.dump_system_params(sInd)\n # calculate signal and noise (electron count rates)\n Ss[i, :], Ns[i, :] = self.calc_signal_noise(\n sInd, planinds, dt, mode, fZ=fZs[i]\n )\n # allocate second half of dt\n TK.allocate_time(dt / 2.0)\n\n # average output parameters\n fZ = np.mean(fZs)\n systemParams = {\n key: sum([systemParamss[x][key] for x in range(self.ntFlux)])\n / float(self.ntFlux)\n for key in sorted(systemParamss[0])\n }\n # calculate planets SNR\n S = Ss.sum(0)\n N = Ns.sum(0)\n SNRplans[N > 0] = S[N > 0] / N[N > 0]\n # allocate extra time for timeMultiplier\n extraTime = intTime * (mode[\"timeMultiplier\"] - 1)\n TK.allocate_time(extraTime)\n\n # if only a FA, just save zodiacal brightness in the middle of the\n # integration\n else:\n totTime = intTime * (mode[\"timeMultiplier\"])\n TK.allocate_time(totTime / 2.0)\n fZ = ZL.fZ(Obs, TL, sInd, TK.currentTimeAbs, mode)[0]\n TK.allocate_time(totTime / 2.0)\n\n # calculate the false alarm SNR (if any)\n SNRfa = []\n if pIndsChar[-1] == -1:\n fEZ = fEZs[-1] / u.arcsec**2\n dMag = dMags[-1]\n WA = WAs[-1] * u.arcsec\n C_p, C_b, C_sp = OS.Cp_Cb_Csp(TL, sInd, fZ, fEZ, dMag, WA, mode)\n S = (C_p * intTime).decompose().value\n N = np.sqrt((C_b * intTime + (C_sp * intTime) ** 2).decompose().value)\n SNRfa = S / N if N > 0 else 0.0\n\n # save all SNRs (planets and FA) to one array\n SNRinds = np.where(det)[0][tochar]\n SNR[SNRinds] = np.append(SNRplans, SNRfa)\n\n # now, store characterization status: 1 for full spectrum,\n # -1 for partial spectrum, 0 for not characterized\n char = SNR >= mode[\"SNR\"]\n # initialize with full spectra\n characterized = char.astype(int)\n WAchar = WAs[char] * u.arcsec\n # find the current WAs of characterized planets\n WA = WAs * u.arcsec\n if FA:\n WAs = np.append(WAs, WAs[-1] * u.arcsec)\n # check for partial spectra\n IWA_max = mode[\"IWA\"] * (1 + mode[\"BW\"] / 2.0)\n OWA_min = mode[\"OWA\"] * (1 - mode[\"BW\"] / 2.0)\n char[char] = (WAchar < IWA_max) | (WAchar > OWA_min)\n characterized[char] = -1\n # encode results in spectra lists (only for planets, not FA)\n charplans = characterized[:-1] if FA else characterized\n self.fullSpectra[pInds[charplans == 1]] += 1\n self.partialSpectra[pInds[charplans == -1]] += 1\n\n return characterized.astype(int), fZ, systemParams, SNR, intTime", "title": "" }, { "docid": "2b68fd7cfc709465444d4ba6f786a9ca", "score": "0.5064862", "text": "def calc_goal_joint_pose(arm_L, arm_speed, table_L,\n theta0, theta1, goal_x, goal_y):\n theta_g = math.atan2(L - goal_y, goal_x)\n arm_vel_g = np.array([\n [arm_speed * math.cos(theta_g)],\n [arm_speed * math.sin(theta_g)]\n ])\n theta0_g, theta1_g = calc_joints_from_pos(arm_L, goal_x, goal_y)\n\n # determine angular velocity of joints\n jacobian = np.array([\n [-arm_L * math.sin(theta1) - arm_L * math.sin(theta0 + theta1),\n -arm_L * math.sin(theta0 + theta1)],\n [arm_L * math.cos(theta1) + arm_L * math.cos(theta0 + theta1),\n arm_L * math.cos(theta0 + theta1)]\n ])\n inv_jacobian = np.linalg.inv(jacobian)\n omega = np.matmul(inv_jacobian, arm_vel_g)\n del_theta0 = theta0_g - theta0\n del_theta1 = theta1_g - theta1\n alpha0 = omega[0]**2 / (2 * del_theta0)\n alpha1 = omega[1]**2 / (2 * del_theta1)\n\n\n # HAVE SOME WHILE LOOP THAT CONSTANTLY BRINGS COLLISION POINT CLOSER\n # UNTIL DESIRED OMEGA AND ALPHA ARE WITHIN LIMITS OF MOTORS B/C\n # IF TRY TO SET TOO HIGH VEL OR ACC, ARM WILL JUST MISS PUCK\n\n\n return omega[0], omega[1], alpha0, alpha1", "title": "" }, { "docid": "5f75daae031902ae27bd78189aad7b85", "score": "0.5060235", "text": "def get_action_space(self):\n control_mode = self.cfg[\"CONTROL_MODE\"]\n\n if control_mode == \"position\":\n joint_limits = self.cfg[\"JOINT_LIMITS\"]\n\n lower_bounds = np.array(\n [\n joint_limits[\"LOW\"][\"J1\"],\n joint_limits[\"LOW\"][\"J2\"],\n joint_limits[\"LOW\"][\"J3\"],\n joint_limits[\"LOW\"][\"J4\"],\n joint_limits[\"LOW\"][\"GRIP\"],\n ]\n )\n upper_bounds = np.array(\n [\n joint_limits[\"HIGH\"][\"J1\"],\n joint_limits[\"HIGH\"][\"J2\"],\n joint_limits[\"HIGH\"][\"J3\"],\n joint_limits[\"HIGH\"][\"J4\"],\n joint_limits[\"HIGH\"][\"GRIP\"],\n ]\n )\n elif control_mode == \"velocity\":\n raise NotImplementedError(\n \"Control mode %s is not implemented yet.\" % control_mode\n )\n\n elif control_mode == \"effort\":\n raise NotImplementedError(\n \"Control mode %s is not implemented yet.\" % control_mode\n )\n else:\n raise ValueError(\"Control mode %s is not known!\" % control_mode)\n print(lower_bounds, upper_bounds, self.cfg[\"ACTION_DIM\"])\n return gym.spaces.Box(low=lower_bounds, high=upper_bounds, dtype=np.float32)", "title": "" }, { "docid": "9437f10a46917dc41737231a8525a0d8", "score": "0.50555265", "text": "def getObservation(self):\r\n res = zeros(5)\r\n if self.env.perseus == self.env.goal:\r\n res[4] = 1\r\n elif self.env.perseus == self.env.initPos[0]:\r\n res[0] = 1\r\n elif self.env.perseus[1] == 3:\r\n if random() > 0.7:\r\n res[self.env.perseusDir] = 1\r\n else:\r\n res[(self.env.perseusDir + 2) % 4] = 1\r\n else:\r\n res[(self.env.perseusDir + 2) % 4] = 1\r\n return res", "title": "" }, { "docid": "1a2f8db380976492f51ff81e0646771b", "score": "0.50536174", "text": "def pick_locations_to_evaluate(self, method=\"contour\"):\n current_cell = self.world.cell_corresponding_to_gps(self.current_gps[0], self.current_gps[1])\n current_pose_utm = convert_gps_to_utm(self.current_gps[0], self.current_gps[1])\n if method == \"fixed\":\n self.fixed_window(current_cell)\n elif method == \"contour\":\n # Create mesh TODO move to constructor.\n\n # Predict for the whole world.\n locations, mean, variances = self.world.predict(self.world.cell_locations)\n L = numpy.array(locations)\n #X_utm = L[:,0].reshape(self.world.width, self.world.height)\n #Y_utm = L[:,1].reshape(self.world.width, self.world.height)\n #mean = mean.reshape(self.world.width, self.world.height)\n #V = variances.reshape(self.world.width, self.world.height)\n X_utm = L[:,0].reshape(self.world.height, self.world.width)\n Y_utm = L[:,1].reshape(self.world.height, self.world.width)\n mean = mean.reshape(self.world.height, self.world.width)\n V = variances.reshape(self.world.height, self.world.width)\n # Find contours.\n '''\n plt.figure()\n plt.contourf(X_utm, Y_utm, mean)\n plt.colorbar()\n plt.draw() # TODO have a debug flag.\n plt.pause(0.001)\n plt.figure()\n plt.contourf(X_utm, Y_utm, V)\n plt.colorbar()\n '''\n try:\n C = plt.contour(X_utm, Y_utm, V, 8, colors='black', linewidth=.5)\n contour_with_robot = None\n # TODO not in try block.\n \"\"\"\n # Find frontier.\n for c in C.collections:\n #if c.get_paths()[0].contains_point((current_pose_utm.x, current_pose_utm.y)):\n #p = Polygon(*c.get_paths()[0].to_polygons()[0])\n p = c.get_paths()[0].to_polygons()[0]\n p = numpy.array(p)\n #print \"p\"\n #print p\n #print \"contour_with_robot\"\n #print contour_with_robot\n if contour_with_robot is not None:\n if polygon_area(p[:,0], p[:,1]) > polygon_area(contour_with_robot[:,0], contour_with_robot[:,1]):\n contour_with_robot = p\n contour_obj = c\n else:\n contour_with_robot = p\n contour_obj = c\n \"\"\"\n\n contour_with_robot = numpy.array(C.collections[-1].get_paths()[0].to_polygons()[0])\n contour_obj = C.collections[-1]\n # Find locations on the frontier.\n self.candidate_list = []\n if contour_with_robot is not None:\n contour_obj.set_linewidth(10)\n #plt.plot(current_pose_utm.x, current_pose_utm.y, '*')\n #plt.draw()\n #plt.pause(0.001)\n\n #contour_with_robot = Polygon(*contour_with_robot)\n for i in xrange(len(contour_with_robot)-1):\n #for s in contour_with_robot.sides:\n s = Segment(contour_with_robot[i], contour_with_robot[i+1])\n step_size = s.length / self.world.spacing\n step_size = 1.0 / step_size.evalf()\n t = Symbol('t', real=True)\n #print s\n #print contour_with_robot\n #print \"The vallue of S = \"\n #print s\n point_t = s.arbitrary_point()\n\n for step in numpy.arange(0.0, 1.0000001, step_size):\n p = Point(point_t.x.subs(t, step), point_t.y.subs(t, step))\n cell = self.world.cell_corresponding_to_gps(p.x.evalf(), p.y.evalf(), utm=True)\n if cell not in self.candidate_list:\n self.candidate_list.append(cell)\n\n except ValueError:\n # If no contour is found.\n rospy.logerr(\"No contour found, fixed window used.\")\n self.fixed_window(current_cell)\n \n print \"current cell\", current_cell\n print \"current gps\", self.current_gps\n \n #print \"Candidate list\", self.candidate_list\n return self.candidate_list\n #candidate_locations = [x_values, y_values]\n #return list(itertools.product(*candidate_locations))", "title": "" }, { "docid": "10584f5094c74059701b1fa45eb0dc23", "score": "0.5047193", "text": "def what_zone(coords):\n x = coords[0]\n y = coords[1]\n if y < 2:\n if x < 2:\n return \"R\"\n if x >= 2:\n return \"G\"\n elif y > 2:\n if x < 1:\n return \"Y\"\n if x > 2:\n return \"B\"\n else:\n return None", "title": "" }, { "docid": "6395997f4270570b6b5f0315ec3cc0b5", "score": "0.504529", "text": "def getCOG(self):\n #raise Exception(\"abtract class %s has no center of gravity.\" % self.__class__.__name__)\n return 0. # XXX", "title": "" }, { "docid": "d8aee7ef24e6a4177a6d02fb30469cb4", "score": "0.503883", "text": "def robotInit(self):\n\n # Left motor \n self.frontleft_motor = ctre.WPI_TalonSRX(0)\n self.backleft_motor = ctre.WPI_TalonSRX(1) \n self.leftdrive = wpilib.SpeedControllerGroup(self.frontleft_motor, self.backleft_motor)\n # Right motor\n self.frontright_motor = ctre.WPI_TalonSRX(2)\n self.backright_motor = ctre.WPI_TalonSRX(3)\n self.rightdrive = wpilib.SpeedControllerGroup(self.frontright_motor, self.backright_motor)\n # Drive - combining left + right\n self.drive = wpilib.drive.DifferentialDrive(self.leftdrive, self.rightdrive)\n\n # Elevator Rev through CAN(lift)+\n self.eleLeft = rev.CANSparkMax(10, rev.MotorType.kBrushless)\n self.eleRight = rev.CANSparkMax(11, rev.MotorType.kBrushless)\n if wpilib.RobotBase.isSimulation():\n self.climber = rev.CANSparkMax(12, rev.MotorType.kBrushless)\n else: \n self.eleRight.follow(self.eleLeft, invert = True)\n\n # Thor's Stabilizer\n #self.StaLeft = rev.CANSparkMax(12, rev.MotorType.kBrushless)\n #self.StaRight = rev.CANSparkMax(14, rev.MotorType.kBrushless)\n #self.Lift = rev.CANSparkMax(152, rev.MotorType.kBrushless)\n \n # intake motors\n self.left_motor = ctre.WPI_VictorSPX(6)\n self.right_motor = ctre.WPI_VictorSPX(7)\n # intake angle\n self.intake_angle = ctre.WPI_TalonSRX(5)\n \n # intake stick & timer\n # elevator timer\n self.timer = wpilib.Timer()\n # pneumatics joystick\n self.stick = wpilib.XboxController(0)\n #construct Shuffleboard\n self.sd = NetworkTables.getTable('SmartDashboard') \n # pneumatics solenoids\n self.hatchcover = wpilib.DoubleSolenoid(0,1)\n self.doubleSolenoid = wpilib.DoubleSolenoid(2,3)\n self.stick2 = wpilib.XboxController(1)\n \n # Construct Camera\n wpilib.CameraServer.launch()\n\n #construct Shuffleboard\n self.sd = NetworkTables.getTable('SmartDashboard')\n\n #Put items on Shuffleboard\n \n self.sd.putNumber('liftLimit', .4)\n self.sd.putBoolean('LiftLimit', 0)\n self.sd.putNumber('Drive Limit', .5)\n \n\n\n #Get items from Shuffeboard\n self.driveLimit = self.sd.getNumber('Drive Limit',.5)\n #Construct the Pigeon\n self.pigeon = ctre.pigeonimu.PigeonIMU(self.frontleft_motor)\n self.yaw = lambda: self.pigeon.getYawPitchRoll()[0]\n #Construct communication with the pi and the Hephestus output\n self.hephestus=NetworkTables.getTable('hephestus')\n\n #Consruct the line tracker inputs\n self.leftLine=wpilib.AnalogInput(0)\n self.centerLeftLine=wpilib.AnalogInput(1)\n self.centerRightLine=wpilib.AnalogInput(2)\n self.lineRightLine=wpilib.AnalogInput(3)\n\n #Construct the trun controller...\n turnController = wpilib.PIDController(\n self.kP, self.kI, self.kD, self.kF,self.yaw , output=self\n )\n \n turnController.setInputRange(-180.0, 180.0)\n turnController.setOutputRange(-.5, .5)\n turnController.setAbsoluteTolerance(self.kToleranceDegrees)\n turnController.setContinuous(True)\n\n self.turnController = turnController\n self.rotateToAngleRate = 0\n\n\n\n # Rip auto\n self.autonomousInit = self.teleopInit\n self.autonomousPeriodic = self.teleopPeriodic", "title": "" }, { "docid": "c77beb1d8663fb3fafd2c303150c63f9", "score": "0.5035031", "text": "def controller(q):\n\t# Find PC.\n\tprint(\"Searching...\")\n\twhile(field.assignExternalUuid(P_NODE_EXTERN)==False):\n\t\tt.sleep(0.1)\n\tprint(\"External PC found!\")\n\t\n\t# Start-up indicator.\n\tfor i in range(0,5):\n\t\tprint(5-i)\n\t\tt.sleep(1)\n\t\n\t# Running.\n\twhile running:\n\t\t# Mode.\n\t\tballbot.set_velocity_mode()\n\t\tfield.whisperExternalUuid(\"VELOCITY MODE!\")\n\t\t\n\t\t# Get from queue.\n\t\t[posXCam, posYCam, yawCam] = q.get()\n\t\t#[posXCam, posYCam, yawCam, bb_x, bb_y, bb_z, bb_roll, bb_pitch, bb_yaw] = q.get()\n\t\t\n\t\t# Setup robot.\t\t\n\t\t#while(field.receiveEndpointExternalUuid()==False):\n\t\t#\tt.sleep(0.1)\t\n\t\t#[posXEnd, posYEnd] = field.getEndpoint()\n\t\tposXEnd = 1.0\n\t\tposYEnd = 1.0\n\t\tfield.whisperExternalUuid(\"BALLBOT AT X:%s Y:%s\" % (posXCam, posYCam))\n\t\tfield.whisperExternalUuid(\"GOAL AT X:%s Y:%s\" % (posXEnd, posYEnd))\n\t\t\n\t\t# Setup and solve optimization problem.\t\t\n\t\tsolver.setEnvironment(S_ROOM_WIDTH, S_ROOM_HEIGHT)\n\t\tsolver.addCircle(2.5, 1.5, 0.5)\n\t\tsolver.setRobot([posXCam, posYCam],\n\t\t\t\t[posXEnd, posYEnd],\n\t\t\t\tS_SAFETY_MARGIN,\n\t\t\t\tC_FF_VMAX_TOT,\n\t\t\t\tC_FF_AMAX_TOT)\n\t\t\n\t\tsolver.solve()\n\t\t\n\t\tposXPath, posYPath, velXPath, velYPath, time = solver.getSolution()\n\t\t\n\t\t# Start cmd.\n\t\t#while(field.receiveStartExternalUuid()==False):\n\t\t#\tt.sleep(0.5)\n\t\t\n\t\t# Empty queue.\n\t\tflush(q)\n\t\t\n\t\t# Bug fix.\n\t\tgammaOld = 0.0\n\t\tend = round(0.8*len(time))\n\t\tvelXPathRot = 0.0\n\t\t\n\t\t# Loop\n\t\tfor i in xrange(1, len(time)):\n\t\t\t# Watchdog\n\t\t\twatchdog.start() \n\t\t\t\n\t\t\t# Update from queue.\n\t\t\t[posXCam, posYCam, yawCam] = q.get()\n\t\t\t#[posXCam, posYCam, yawCam, bb_x, bb_y, bb_z, bb_roll, bb_pitch, bb_yaw] = q.get()\n\t\t\t\n\t\t\t# Calculate angles.\n\t\t\tgamma = atan2(velYPath[i], velXPath[i])\n\t\t\tif(i > end and abs(velXPathRot) < 0.01):\n\t\t\t\tgamma = gammaOld\n\t\t\t\n\t\t\talpha = gamma - yawCam\n\t\t\t#print(gamma*180/pi, yawCam*180/pi, alpha*180/pi)\n\t\t\t\n\t\t\t# Rotate to path frame.\n\t\t\t#[posXCamRot, posYCamRot] = ballbot.rotate(posXCam, posYCam, gamma)\n\t\t\te_x_g = posXPath[i] - posXCam\n\t\t\te_y_g = posYPath[i] - posYCam\n\t\t\t[e_x, e_y] = ballbot.rotate(e_x_g, e_y_g, -gamma)\n\t\t\t[velXPathRot, velYPathRot] = ballbot.rotate(velXPath[i], velYPath[i], -gamma)\n\t\t\t\n\t\t\t# Correct position.\n\t\t\tvelXCorr = pidPosX.calculate(0.0, e_x)\n\t\t\tvelYCorr = pidPosY.calculate(0.0, e_y)\n\t\t\t\n\t\t\t# Correct velocity gain.\n\t\t\tkp = solver.getFeedforwardGain(velXPathRot, velYPathRot, C_FF_KP_VEL, C_FF_VMAX_TOT)\n\t\t\tif(i > end):\n\t\t\t\tkp = 1.0\n\t\t\t\n\t\t\t# Feed forward.\n\t\t\tvelXCmd = velXPathRot*kp + velXCorr\n\t\t\tvelYCmd = velYPathRot*kp + velYCorr\n\t\t\t\n\t\t\t# Velocity command.\t\t\t\n\t\t\t[velXCmdRob, velYCmdRob] = ballbot.rotate(velXCmd, velYCmd, alpha)\n\t\t\tballbot.set_velocity_cmd(velXCmdRob, velYCmdRob, 0)\n\t\t\tfield.whisperExternalUuid(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\" % (posXCam, posYCam, posXPath[i], posYPath[i], velXPath[i], velYPath[i], velXCorr, velYCorr, velXPathRot, velYPathRot, velXCmd, velYCmd, e_x, e_y))\n\t\t\t#field.whisperExternalUuid(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\") % (posXCam, posYCam, posXPath[i], posYPath[i], velXPath[i], velYPath[i], velXCorr, velYCorr, velXPathRot, velYPathRot, velXCmd, velYCmd, e_x, e_y, bb_roll, bb_pitch, bb_yaw\"))\n\t\t\t\n\t\t\t# Hold gamma.\n\t\t\tgammaOld = gamma\n\t\t\t\n\t\t\t# Maintain loop time.\n\t\t\twatchdog.hold()\n\t\t\n\t\t# MODE.\n\t\tfield.whisperExternalUuid(\"END\")\n\t\t\n\t\t# Empty the queue.\n\t\tflush(q)", "title": "" }, { "docid": "067fc8d122c1eccc118380e454f92396", "score": "0.503354", "text": "def coast_until_pos_vel_misalign(self):\n\n self.vesGuard.control.forward = 0\n self.logger.debug(\"Coast: Coasting toward Bandit...\")\n # loop_start_time = time.time()\n while True:\n\n # compute angle between relative velocity and relative position\n # vectors for the Bandit-Guard system\n vel_vesB_vesG__lhgntw = self.vesBandit.velocity(self.vesGuard.orbital_reference_frame)\n pos_vesB_vesG__lhgntw = self.vesBandit.position(self.vesGuard.orbital_reference_frame)\n ang_vel_pos_rad = U.angle_between(\n -np.array(vel_vesB_vesG__lhgntw), \n np.array(pos_vesB_vesG__lhgntw))\n\n self.logger.debug(\"Coast: Bandit-Guard vel-pos vector angle = {}\".format(ang_vel_pos_rad))\n\n # check for coast breakout condition to repeat process\n if ang_vel_pos_rad > self.ang_vel_pos_thresh or \\\n self.stop_bot_thread:\n # time.time() - loop_start_time > LBG1_LG1_ParentEnv.LOOP_TIMEOUT:\n break\n\n # re-orient along velocity vector to make smoother transition to zeroing phase\n self.vesGuard.auto_pilot.target_direction = -np.array(vel_vesB_vesG__lhgntw)", "title": "" }, { "docid": "9dba9a39fbd39fc73551be0e8cf8dcba", "score": "0.5032063", "text": "def detect_robot(frame, scale=1):\r\n blue_low = np.array(BLUE_LOW, np.uint8)\r\n blue_high = np.array(BLUE_HIGH, np.uint8)\r\n frame = frame.copy()\r\n \r\n clean_contours = find_color(frame, blue_low, blue_high)\r\n \r\n good_cnt = []\r\n \r\n for cnt in clean_contours:\r\n if(len(cnt) == NB_VERTEX_TRIANGLE):\r\n K = TRIANGLE_LONG_SHORT_RATIO\r\n A = 0\r\n B = 0\r\n C = 0\r\n dAB = 0\r\n dBC = 0\r\n dCA = 0\r\n p1 = cnt[FIRST][0]\r\n p2 = cnt[SECOND][0]\r\n p3 = cnt[THIRD][0]\r\n d1 = np.linalg.norm(p2-p1)\r\n d2 = np.linalg.norm(p3-p2)\r\n d3 = np.linalg.norm(p1-p3)\r\n min_ix = np.argmin([d1, d2, d3])\r\n if(min_ix == FIRST):\r\n A = p3\r\n B = p2\r\n C = p1\r\n dAB = d2\r\n dBC = d1\r\n dCA = d3\r\n elif(min_ix == SECOND):\r\n A = p1\r\n B = p3\r\n C = p2\r\n dAB = d3\r\n dBC = d2\r\n dCA = d1\r\n else:\r\n A = p2\r\n B = p3\r\n C = p1\r\n dAB = d2\r\n dBC = d3\r\n dCA = d1\r\n score = abs(dAB-dCA)+abs(K*dBC - dAB)+abs(K*dBC - dCA)/np.linalg.norm(dAB)\r\n good_cnt.append([A, B, C, score])\r\n \r\n good_cnt = sorted(good_cnt, key = lambda x: x[3])\r\n \r\n robot_pos = [np.array([0, 0]), 0, False, 0]\r\n \r\n if(len(good_cnt) > 0):\r\n robot_visible = True\r\n A = good_cnt[0][FIRST]\r\n B = good_cnt[0][SECOND]\r\n C = good_cnt[0][THIRD]\r\n D = (np.mean([[B, C]], axis=IY))[0]\r\n \r\n \r\n Center = (np.mean([[A, B, C]], axis=IY))[0]\r\n\r\n direction = A - D\r\n \r\n size = np.linalg.norm(direction)\r\n \r\n angle = np.arctan2(direction[IY], direction[IX])\r\n \r\n frame = cv2.line(frame, (int(D[IX]), int(D[IY])), (int(A[IX]), int(A[IY])), color=RED, thickness=1)\r\n frame = cv2.circle(frame, (int(Center[IX]), int(Center[IY])), radius=5, color=RED, thickness=-1)\r\n Center = np.multiply(Center, scale).astype(int)\r\n text = \"position: ({:0.2f}, {:0.2f}) angle: {:0.4f}\".format(Center[IX], Center[IY], angle)\r\n font = cv2.FONT_HERSHEY_SIMPLEX \r\n cv2.putText(frame, text, (10, 50), font, 0.5, GREEN, 1, cv2.LINE_AA)\r\n \r\n \r\n robot_pos = [Center, angle, True, size]\r\n \r\n return robot_pos, frame", "title": "" }, { "docid": "a8174fe88e7f9d79da82add604a2fe9d", "score": "0.5023404", "text": "def __init__(self, granularity = 1):\n MazeEnvironment.__init__(self)\n action_info = FeatureVectorInfo() # describes the actions\n observation_info = FeatureVectorInfo() # describes the observations\n reward_info = FeatureVectorInfo() # describes the rewards\n action_info.add_discrete(0, CONT_MAZE_N_ACTIONS-1) # action\n ( (xmin, ymin), (xmax, ymax) ) = MazeEnvironment.maze.xy_limits()\n print 'MAZE LIMITS', ( (xmin, ymin), (xmax, ymax) )\n observation_info.add_continuous(xmin, xmax) # x-coord\n observation_info.add_continuous(ymin, ymax) # y-coord\n observation_info.add_continuous(0, CONT_MAZE_MAX_DISTANCE ) # distance to target\n observation_info.add_continuous(-180, 180) # angle to target\n for i in range(CONT_MAZE_N_RAYS):\n observation_info.add_continuous(0,1) # ray sensor\n reward_info.add_continuous(-100,100)\n self.agent_info = AgentInitInfo(observation_info, action_info, reward_info)\n self.granularity = granularity\n self.max_steps = MAX_STEPS * 15 * self.granularity # allow 15 * g actions per cell\n print 'Initialized EgocentricMazeEnvironment'", "title": "" }, { "docid": "7a2854a0d479ad37e0a34be97a3d2306", "score": "0.5019608", "text": "def get_observation(self):\n\n \"\"\"To avoid dealing with collisions, we want the robot to reach\n for a target above the object position in simulation.\n \"\"\"\n self.update_goal_position()\n\n current_ee_pose = self.robot_interface.ee_pose\n delta = self.goal_position - self.robot_interface.ee_position\n\n camera_img = self.camera_interface.frames()\n observation = (delta, current_ee_pose, camera_img.get('image'))\n\n return observation", "title": "" }, { "docid": "a4531edc0c3cf05ccf9cbf6baf6f9c6c", "score": "0.50193727", "text": "def coord_defined():\n\treturn dsslib.BUSI(ctypes.c_int32(2), ctypes.c_int32(0))", "title": "" }, { "docid": "00ec6e3e692885f678c42f7aad5175f1", "score": "0.5016273", "text": "def __init__(self, dx=0.0, dy=0.0, dyaw=0.0, occupancy=[], prev_grid=[]):\n\n self.origin_x = -15.0\n self.origin_y = -15.0\n self.resolution = 0.3\n self.width = 100 \n self.height = 150 \n self.grid = np.zeros((height, width))\n\n for n in occupancy:\n \n translate_vect = np.array((-dx, -dy))\n\n c = np.cos(self.dyaw)\n s = np.sin(self.dyaw)\n R = np.array(((c, -s), (s, c))) \n\n \n\n\n self.grid = prev_grid[n + translate_vect]\n\n \n lidar_vect = np.array(((lidar_x), (lidar_y)))\n\n # Creates rotation matrix given theta\n c = np.cos(self.yaw)\n s = np.sin(self.yaw)\n R = np.array(((c, -s), (s, c))) \n\n # Vector of point in vehicle frame\n vp = np.array(((point_x), (point_y)))\n\n rotate = R.dot(vp) # rotation to allign with global frame\n transform = rotate + lidar_vect # translates point to global frame", "title": "" }, { "docid": "b1bba3bb7c4bc6efcd1e29d615c238f2", "score": "0.50095695", "text": "def transformToMaze(arm, goals, obstacles, window, granularity):\n dim = len(arm.getArmAngle())\n if (dim==1):\n alpha = arm.getArmAngle()[0]\n beta = 0\n gamma =0\n alimit= arm.getArmLimit()[0]\n blimit=(0,0)\n hlimit=(0,0)\n row_num = int((alimit[1]-alimit[0])/granularity+1)\n col_num =1\n hei_num =1\n cur_r,cur_c,cur_h = angleToIdx((alpha,beta,gamma),(alimit[0],blimit[0],hlimit[0]),granularity)\n if(dim==3):\n alpha,beta,gamma = arm.getArmAngle()\n alimit,blimit,hlimit= arm.getArmLimit()\n row_num = int((alimit[1]-alimit[0])/granularity+1)\n col_num = int((blimit[1]-blimit[0])/granularity+1)\n hei_num = int((hlimit[1]-hlimit[0])/granularity+1)\n cur_r,cur_c,cur_h = angleToIdx((alpha,beta,gamma),(alimit[0],blimit[0],hlimit[0]),granularity)\n\n # print(cur_r,cur_c)\n map = [[[SPACE_CHAR for i in range(hei_num)] for j in range(col_num)]for k in range(row_num)]\n\n # go through map\n # print(obstacles)\n for i in range(row_num):\n for j in range(col_num):\n flag = False\n for k in range(hei_num):\n # if(flag):\n # map[i][j][k] = WALL_CHAR\n # continue\n cur_alpha,cur_beta,cur_gamma = idxToAngle((i,j,k),(alimit[0],blimit[0],hlimit[0]),granularity)\n arm.setArmAngle([cur_alpha,cur_beta,cur_gamma])\n if (not isArmWithinWindow(arm.getArmPos(),window)):\n map[i][j][k] = WALL_CHAR\n elif(doesArmTouchObjects(arm.getArmPosDist(),obstacles,False)): \n map[i][j][k] = WALL_CHAR\n elif(i==cur_r and j == cur_c and k == cur_h):\n map[i][j][k] = START_CHAR\n elif(dim==1 and i==cur_r):\n map[i][j][k] = START_CHAR\n elif(doesArmTipTouchGoals(arm.getEnd(),goals)):\n map[i][j][k] = OBJECTIVE_CHAR\n # flag=True\n elif(doesArmTouchObjects(arm.getArmPosDist(),goals,True)): \n map[i][j][k] = WALL_CHAR\n\n return Maze(map,[alimit[0],blimit[0],hlimit[0]],granularity)", "title": "" }, { "docid": "97ccaa35e1da6e4a46addc4478eb8351", "score": "0.49981672", "text": "def point_at_azel(self, az, el):\n self.ephemeris_cmd_location = None\n self.rotor_offsets = (0.0, 0.0)\n self.radio_queue.put((\"soutrack\", f\"azel_{az}_{el}\"))\n new_rotor_destination = (az, el)\n new_rotor_cmd_location = new_rotor_destination\n if self.rotor.angles_within_bounds(*new_rotor_cmd_location):\n self.rotor_destination = new_rotor_destination\n self.rotor_cmd_location = new_rotor_cmd_location\n while not azel_within_range(self.rotor_location, self.rotor_cmd_location):\n sleep(0.1)\n else:\n self.log_message(f\"Object at {new_rotor_cmd_location} Not in Motor Bounds\")", "title": "" }, { "docid": "8eae9d1cc9ede171c34e39e3d75fdad1", "score": "0.49961495", "text": "def get_reward(self):\n # task.sim.pose (the position of the quadcopter in (x,y,z)\n # task.sim.v (the velocity of the quadcopter in (x,y,z) dimension\n # task.sim.angular_v (radians/seconds for each euler angle)\n # Reward for landing softly.\n \n # Lets make problem in terms of z axis first.\n #print(self.sim.pose[2])\n \n #reward = -(abs((self.sim.pose[2]+self.sim.v[2]+(abs(self.sim.pose[2]-self.temp)**2))-3*abs(np.min((self.sim.pose[2]- 5),0)) *self.sim.pose[2] )/(self.sim.runtime))\n #reward = -(abs((self.sim.pose[2]+7*abs(self.sim.v[2])+(6*abs(self.sim.pose[2]-self.temp))-3*abs(np.min((self.sim.pose[2]- 5),0))*self.sim.pose[2])))+self.sim.time\n #reward = np.clip(-(abs((self.sim.pose[2]+abs(self.sim.v[2])+(abs(self.sim.pose[2]-self.temp))-3*abs(np.min((self.sim.pose[2]- 5),0))*self.sim.pose[2])))+self.sim.time,-1,1)\n #else :\n #reward = -np.clip( ((self.sim.v[2] + self.sim.pose[2]*0.0001)/(2*self.sim.runtime)),0,1.0)\n #reward = 1.0 - np.clip((0.4*abs((0.2*(self.sim.pose[2])/(0.8*((abs(self.sim.pose[2]-self.temp)))+0.001) + abs(self.sim.pose[0])+abs(self.sim.pose[1])))) + 1.5*(self.sim.runtime),-1.0,1.0)\n #reward =np.tanh(self.sim.pose[2] -0.7*(abs(self.sim.pose[2]-self.target_pos[2])**2)) \n \n reward =10*self.sim.v[2]**3+0.7*((self.sim.pose[2]-self.target_pos[2])) +(self.temp - self.sim.pose[2])\n #reward = np.clip(reward,-1,1)\n \n #print('t',self.temp)\n #print('p',self.sim.pose[2])\n self.temp = self.sim.pose[2]\n reward = np.tanh(reward)\n \n #print(self.sim.v[2])\n \n\n \n \n #print(reward)\n #reward = 1.-.3*(abs(self.sim.pose[:3] - self.target_pos)).sum()\n return reward", "title": "" }, { "docid": "51f75210e718494c859d17bac0b9a30e", "score": "0.49859318", "text": "def _space(self):\n K = self.base_ring()\n return self.cartan_type()._type.root_system().ambient_space(K)", "title": "" }, { "docid": "b4cb8119649ad02fc60f02510c3c0e55", "score": "0.49701813", "text": "def clarke_error_zone_detailed(act, pred):\n # Zone A\n if (act < 70 and pred < 70) or abs(act - pred) < 0.2 * act:\n return 0\n # Zone E - left upper\n if act <= 70 and pred >= 180:\n return 8\n # Zone E - right lower\n if act >= 180 and pred <= 70:\n return 7\n # Zone D - right\n if act >= 240 and 70 <= pred <= 180:\n return 6\n # Zone D - left\n if act <= 70 <= pred <= 180:\n return 5\n # Zone C - upper\n if 70 <= act <= 290 and pred >= act + 110:\n return 4\n # Zone C - lower\n if 130 <= act <= 180 and pred <= (7/5) * act - 182:\n return 3\n # Zone B - upper\n if act < pred:\n return 2\n # Zone B - lower\n return 1", "title": "" }, { "docid": "9603fed9fa173cb74833a2069c426f97", "score": "0.4968276", "text": "def measure(self, i, robot_measurement):\n landmark_barcode = robot_measurement[0]\n\n landmark_id = self.barcode_dict[landmark_barcode]\n # landmark[landmark_gt_x, landmark_gt_y]\n landmark = self.landmark_gt_dict[landmark_id]\n landmark_x = landmark[0]\n landmark_y = landmark[1]\n\n pos = [self.particles[i].x, self.particles[i].y, self.particles[i].theta]\n particle_landmark_range = np.sqrt((landmark_x - pos[0])**2 + (landmark_y - pos[1])**2)\n particle_landmark_bearing = np.arctan2((landmark_y - pos[1]), (landmark_x - pos[0])) - pos[2]\n\n diff_range = robot_measurement[1] - particle_landmark_range\n # diff_bearing = np.abs(particle_landmark_bearing - robot_measurement[2])\n diff_bearing = np.arccos(np.cos(particle_landmark_bearing) * np.cos(robot_measurement[2]) \\\n + np.sin(particle_landmark_bearing) * np.sin(robot_measurement[2]))\n # print(\"diff_range\",diff_range)\n # print(\"diff_bearing\",diff_bearing)\n\n l_range = self.norm_pdf(diff_range,2+6)\n l_bearing = self.norm_pdf(diff_bearing,np.pi/4+ np.pi/4)\n l = l_range * l_bearing\n\n # l = np.exp(-diff_range) + np.exp(-diff_bearing)\n # print(\"l_range\",l_range)\n # print(\"l_bearing\",l_bearing)\n # print(\"l\",l)\n return l", "title": "" }, { "docid": "76e6df5c93b1d52aedc054cb5f6c31f6", "score": "0.49680388", "text": "def load_arena():\n # arena = np.random.randint(low = 0, high = 6, size=(9, 9))\n # Arena reference\n # 0 -> black base\n # 1 -> light green base\n # 2 -> blue base\n # 3 -> red base\n # 4 -> purple base\n # 5 -> yellow base\n # 6 -> white base\n # 7 -> dark green base\n arena = np.array([\n [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],\n [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],\n [6, 6, 5, 5, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6],\n [6, 6, 5, 5, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6],\n [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],\n [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],\n [6, 6, 5, 5, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6],\n [6, 6, 5, 5, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6],\n [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],\n [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],\n [6, 6, 5, 5, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6],\n [6, 6, 5, 5, 6, 6, 5, 5, 6, 6, 5, 5, 6, 6],\n [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],\n [6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6],\n ])\n\n base_plate_dict = {\n 1: 'rsc/base plate/base plate green.urdf',\n 2: 'rsc/base plate/base plate cyan.urdf',\n 3: 'rsc/base plate/base plate red.urdf',\n 4: 'rsc/base plate/base plate purple.urdf',\n 5: 'rsc/base plate/base plate yellow.urdf',\n 6: 'rsc/base plate/base plate white.urdf',\n 7: 'rsc/base plate/base plate darkgreen.urdf',\n # 1: 'rsc/base plate/base plate blue.urdf',\n }\n\n def get_base_plate_position(i, j):\n return [(arena_size / 2 - 0.5)-i*1,(arena_size / 2 - 0.5)-j*1,0]\n\n for i in range(arena_size):\n for j in range(arena_size):\n if arena[i, j] == 0:\n continue\n p.loadURDF(base_plate_dict[arena[i, j]], get_base_plate_position(i, j), p.getQuaternionFromEuler([0, 0, 0]), useFixedBase=1)\n for i in (-1,0,1):\n for j in (-1,0,1):\n p.loadURDF('rsc/boundary.urdf',[4*i,4*j,-0.1], useFixedBase=1)\n print(\"Namaste\")", "title": "" }, { "docid": "edd96b1d25575302fe7a795fcc89a5f4", "score": "0.49664378", "text": "def findBarrierLocation(self):\n phi_tol = abs(self.phi_metaMin - self.phi_absMin) * 1e-12\n V_phimeta = self.V(self.phi_metaMin)\n phi1 = self.phi_metaMin\n phi2 = self.phi_absMin\n phi0 = 0.5 * (phi1+phi2)\n\n # Do a very simple binary search to narrow down on the right answer.\n while abs(phi1-phi2) > phi_tol:\n V0 = self.V(phi0)\n if V0 > V_phimeta:\n phi1 = phi0\n else:\n phi2 = phi0\n phi0 = 0.5 * (phi1+phi2)\n return phi0", "title": "" }, { "docid": "f5da99907d0ef2b8d5aa0cf2a42a3382", "score": "0.4955862", "text": "def getCurrentLandUseSuitability(self):\n variableDict = self.variableDict.get(9) \n current = self.nullMask\n for aKey in variableDict.keys():\n current = ifthenelse(pcreq(self.environment, aKey), \\\n variableDict.get(aKey), current)\n currentLandUseSuitbaility = self.normalizeMap(current)\n return currentLandUseSuitbaility", "title": "" }, { "docid": "6d233fc98e8a90f4b997c0ca3c917e63", "score": "0.49537125", "text": "def detect_lane_obstacle(actor, extension_factor=3, margin=1.02):\n world = CarlaDataProvider.get_world()\n world_actors = world.get_actors().filter('vehicle.*')\n actor_bbox = actor.bounding_box\n actor_transform = actor.get_transform()\n actor_location = actor_transform.location\n actor_vector = actor_transform.rotation.get_forward_vector()\n actor_vector = np.array([actor_vector.x, actor_vector.y])\n actor_vector = actor_vector / np.linalg.norm(actor_vector)\n actor_vector = actor_vector * (extension_factor - 1) * actor_bbox.extent.x\n actor_location = actor_location + carla.Location(actor_vector[0], actor_vector[1])\n actor_yaw = actor_transform.rotation.yaw\n\n is_hazard = False\n for adversary in world_actors:\n if adversary.id != actor.id and \\\n actor_transform.location.distance(adversary.get_location()) < 50:\n adversary_bbox = adversary.bounding_box\n adversary_transform = adversary.get_transform()\n adversary_loc = adversary_transform.location\n adversary_yaw = adversary_transform.rotation.yaw\n overlap_adversary = RotatedRectangle(\n adversary_loc.x, adversary_loc.y,\n 2 * margin * adversary_bbox.extent.x, 2 * margin * adversary_bbox.extent.y, adversary_yaw)\n overlap_actor = RotatedRectangle(\n actor_location.x, actor_location.y,\n 2 * margin * actor_bbox.extent.x * extension_factor, 2 * margin * actor_bbox.extent.y, actor_yaw)\n overlap_area = overlap_adversary.intersection(overlap_actor).area\n if overlap_area > 0:\n is_hazard = True\n break\n\n return is_hazard", "title": "" }, { "docid": "58e9e02a6f9160e5be060e8cd1fe8f8a", "score": "0.49527997", "text": "def _get_achieved_goal(self):\n\n cube_qpos = self.sim.data.get_joint_qpos('object:joint')\n assert cube_qpos.shape == self.CUBE_CONFIGURATION_SHAPE\n\n cube_right_qpos = self.sim.data.get_joint_qpos('object_right:joint')\n assert cube_right_qpos.shape == self.CUBE_CONFIGURATION_SHAPE\n\n return np.concatenate([cube_qpos, cube_right_qpos])", "title": "" }, { "docid": "60b16c9bb66f49c3f3669ced9bbefa38", "score": "0.4951658", "text": "def t_ground():", "title": "" }, { "docid": "09bf0fb91bcfe19f39587dea013a7523", "score": "0.49516237", "text": "def add_obstacles(trial):\n return", "title": "" }, { "docid": "b0a893f6a7e2e8001047016f66130374", "score": "0.49476954", "text": "def GetArenaTheta():\n return _GetArenaHelper(_wlbt.Walabot_GetArenaTheta)", "title": "" }, { "docid": "6b0cd246f1e71d517cc72b63a2a30fce", "score": "0.49460945", "text": "def tractorBeam2(startX , startY , maxX, maxY):\r\n\r\n coordinateSystem = {}\r\n\r\n for x in range(startX, maxX):\r\n for y in range(startY, maxY):\r\n incodeProgram = IncodeComputer(instructionList, [x,y] )\r\n output = incodeProgram.defaultRunOfProgram()\r\n coordinateSystem[(x,y)] = output[0]\r\n #print(f\"output: {output}\")\r\n\r\n if coordinateSystem.get((x - 100, y) == 1) and output[0] == 1:\r\n print(f\"Hope: {(x,y)}\")\r\n\r\n if coordinateSystem.get((x - 100, y) == 1) and coordinateSystem.get((x, y - 100) == 1) and output[0] == 1:\r\n print((x,y))\r\n break\r\n\r\n with open(\"output.txt\", 'w') as file:\r\n for x in range(startX, maxX):\r\n for y in range(startY, maxY):\r\n file.write(str(coordinateSystem[(x,y)]))\r\n\r\n if y == maxY - 1 :\r\n file.write(\"\\n\")\r\n\r\n return list(coordinateSystem.values()).count(1)", "title": "" }, { "docid": "227c06dc966b6c29e557fb9f9fda08e9", "score": "0.493245", "text": "def get_four_sector_detection(self):\n x_accel = self._imu.linear_acceleration.x\n y_accel = self._imu.linear_acceleration.y\n z_accel = self._imu.linear_acceleration.z\n\n axis_list = [x_accel, y_accel, z_accel]\n max_axis_index = axis_list.index(max(axis_list))\n positive = axis_list[max_axis_index] >= 0\n significative_val = abs(axis_list[max_axis_index]) > self._threshold\n\n if significative_val:\n if max_axis_index == 0:\n # Winner is in the x-axis\n rospy.logwarn(\"[X=\"+str(x_accel)+\"]\")\n rospy.loginfo(\"[Y=[str\"+str(y_accel)+\", Z=\"+str(z_accel)+\"]\")\n if positive:\n #should turn left\n message = \"right\"\n else:\n message = \"left\"\n elif max_axis_index == 1:\n #winner is in the y-axis\n rospy.logwarn(\"[Y=\"+str(y_accel)+\"]\")\n rospy.loginfo(\"[X=\"+str(x_accel+\", Z=\"+str(z_accel)+\"]\"))\n if positive:\n message = \"front\"\n else:\n message = \"back\"\n elif max_axis_index == 2:\n #winner is in the z-axis\n rospy.logwarn(\"[Z=\"+str(z_accel)+\"]\")\n rospy.loginfo(\"[X=\"+str(x_accel)+\", Y=\"+str(y_accel)+\"]\")\n if positive:\n message = \"up\"\n else:\n message = \"down\"\n else:\n message = \"unknown_direction\"\n else:\n message = \"nothing\"\n rospy.loginfo(\"[X=\"+str(x_accel)+\", Y=\"+str(y_accel)+\", Z=\"+str(z_accel)+\"]\")\n return self.convert_to_dict(message)", "title": "" }, { "docid": "bb5effcdd87d6a71161f42e980af4157", "score": "0.4924573", "text": "def process_observation( self, observation):\n # returns 6 blocks right in front of the agent, what it is staring at, item_count and pitch status\n # get yaw, and depending upon the yaw make current state out of the 9 blocks right in front\n self.logger.debug(observation)\n direction = {'left':90.0,'right':270,'forward':180.0,'backward':0.0}\n yaw = observation.get(u'Yaw')\n if yaw is None:\n print \"Incomplete Observation:\"\n print(observation)\n exit(1)\n if observation.has_key(u'LineOfSight'):\n los = observation.get(u'LineOfSight')\n block_type = los[u'type']\n in_range = los[u'inRange']\n else:\n block_type = 'undefined'\n in_range = False\n # extract grid from observation\n grid = observation.get(u'around9x9', 0)\n if grid is None:\n print \"Incomplete Observation: \" + observation\n exit(1)\n self.logger.debug(grid)\n flag = 0\n item_count = 0\n # format \"front\" depending upon Yaw of the agent\n if yaw == direction['left']:\n self.logger.debug(\"%%Facing left%%\")\n front_idx = range(5*3+1,5*1+0,-5) + range(5*3+26, 5*1+25, -5) + range(5*3+51, 5*1+50, -5)\n elif yaw == direction['right']:\n self.logger.debug(\"%%Facing right%%\")\n front_idx = range(5*1+3,5*3+4,5) + range(5*1+28, 5*3+29, 5) + range(5*1+53,5*3+54,5)\n elif yaw == direction['forward']:\n self.logger.debug(\"%%Facing forward%%\")\n front_idx = range(6,9) + range(6+25, 9+25) + range(6+50,9+50)\n else:\n self.logger.debug(\"%%Facing backward%%\")\n front_idx = range(18,15,-1) + range(18+25,15+25,-1) + range(18+50,15+50,-1)\n front = [grid[block_idx] for block_idx in range(len(grid)) if block_idx in front_idx]\n self.logger.debug(front_idx)\n self.logger.debug(front)\n # check if relevant items are on the myopic horizon\n for item in self.relevant_items:\n if item in grid:\n flag = 1\n # get count for item\n item_count = grid.count(item)\n break\n current_s = (front[0],\n front[1],\n front[2],\n front[3],\n front[4],\n front[5],\n front[6],\n front[7],\n front[8],\n block_type,\n in_range,\n item_count,\n self.pitch_count,\n self.prev_a,\n )\n return current_s", "title": "" }, { "docid": "46d790b1d1c40485ce81dabb27d45b6e", "score": "0.49083832", "text": "def GetArenaZ():\n\n return _GetArenaHelper(_wlbt.Walabot_GetArenaZ)", "title": "" }, { "docid": "772db5c3310d7805e566c29f53baf6b4", "score": "0.49066034", "text": "def check_sector(self):\n # figure out which sector we're in\n map_center = np.asarray(\n [x.interior_knots[7] for x in self.model.splines])\n sector_width = np.asarray(\n [(x.interior_knots[-1]-x.interior_knots[0])/3.0 \n for x in self.model.splines])\n sector = tuple(np.floor((self.ac_ned[0:2] - map_center\n + np.asarray([25,25]))/sector_width))\n return sector", "title": "" }, { "docid": "baaf4857149763de428b1aed030204fc", "score": "0.49062628", "text": "def boundary_conditions(self):\r\n global_indeces_1=self.patch_data.patch_global_2D(0)\r\n global_indeces_2=self.patch_data.patch_global_2D(1)\r\n global_indeces_3=self.patch_data.patch_global_2D(2)\r\n global_indeces_4=self.patch_data.patch_global_2D(3)\r\n global_indeces_5=self.patch_data.patch_global_2D(4)\r\n global_indeces_6=self.patch_data.patch_global_2D(5)\r\n global_indeces_7=self.patch_data.patch_global_2D(6)\r\n global_indeces_8=self.patch_data.patch_global_2D(7)\r\n \r\n# global_indeces=self.patch_data.patch_global_2D(self.ess_data[0],self.ess_data[1])\r\n \"\"\" \r\n location of corners\r\n \"\"\"\r\n br_corner=global_indeces_2[0,-1]\r\n bl_corner=global_indeces_1[0,0]\r\n tl_corner=global_indeces_7[-1,0]\r\n tr_corner=global_indeces_8[-1,-1]\r\n tm_point=global_indeces_8[-1,0]\r\n bm_point=global_indeces_1[0,-1]\r\n lm_point=global_indeces_3[-1,0]\r\n rm_point=global_indeces_4[-1,-1]\r\n \r\n# \r\n# boundary_pts_nat=np.hstack((bl_corner,tr_corner)) \r\n# flux_corners=np.array([1,-1])*10E-6\r\n \r\n# boundary_pts_nat=np.hstack((bl_corner,br_corner,tr_corner,tl_corner)) \r\n# flux_corners=np.array([1,1,1,1])*-10E-6\r\n \"\"\"\r\n Laplace verification\r\n \"\"\"\r\n# self.boundary_pts_ess=np.unique(np.hstack((global_indeces_1[0,:],global_indeces_1[:,0],global_indeces_1[:,-1]))) \r\n# boundary_pts_ess=self.boundary_pts_ess\r\n# u_hat_corners=np.zeros(len(boundary_pts_ess))\r\n \r\n \"\"\"\r\n 5 spot\r\n \"\"\"\r\n# self.boundary_pts_ess=np.hstack((br_corner,tl_corner)) \r\n# boundary_pts_ess=np.hstack((br_corner,tl_corner)) \r\n# u_hat_corners=np.array([1,1])*0\r\n\r\n \"\"\"\r\n Standard problem\r\n \"\"\"\r\n# global_indeces_scurve=np.hstack((self.patch_data.patch_global_2D(2)[:,-1],self.patch_data.patch_global_2D(4)[:,-1]))\r\n self.boundary_pts_ess=np.hstack((bl_corner,br_corner,tr_corner,tl_corner)) \r\n boundary_pts_ess= self.boundary_pts_ess \r\n u_hat_corners=np.ones(len(boundary_pts_ess))*40.*10**6\r\n\r\n \"\"\"\r\n Example problem\r\n \"\"\"\r\n# global_indeces_scurve=np.hstack((self.patch_data.patch_global_2D(2)[:,-1],self.patch_data.patch_global_2D(4)[:,-1]))\r\n# self.boundary_pts_ess=global_indeces_1[:,0] \r\n# boundary_pts_ess= self.boundary_pts_ess \r\n# u_hat_corners=np.ones(len(boundary_pts_ess))*40.*10**6\r\n# u_hat_corners=np.ones(len(boundary_pts_ess))*0\r\n \r\n \r\n \r\n \"\"\"\r\n Applies flux to edge\r\n \"\"\"\r\n for i in range(len(self.nuemann_bound)):\r\n if self.nuemann_bound[i,0]==-1:\r\n continue\r\n direction=self.nuemann_bound[i,0]; boundary=self.nuemann_bound[i,1]; patch=self.nuemann_bound[i,2]\r\n flux=self.flux[i]\r\n self.edge_integral(direction,boundary,patch,flux)\r\n \r\n \"\"\"\r\n Applying bcs to non modified matrix\r\n \"\"\"\r\n \r\n self.F+= -np.einsum('ij,j',self.K[:,boundary_pts_ess],u_hat_corners)\r\n self.K[:,boundary_pts_ess]=0\r\n self.K[boundary_pts_ess,:]=0\r\n self.K[boundary_pts_ess,boundary_pts_ess]=1.\r\n self.F[boundary_pts_ess]=u_hat_corners\r\n\r\n# \r\n \"\"\"\r\n Applies flux to points\r\n \"\"\"\r\n# boundary_pts_nat=np.hstack((bl_corner,br_corner,tr_corner,tl_corner)) \r\n# boundary_pts_nat=np.hstack((bl_corner,tr_corner)) \r\n#\r\n# boundary_pts_nat=np.hstack((tm_point,bm_point,lm_point,rm_point)) \r\n# boundary_pts_nat=np.hstack((tm_point,bm_point,bl_corner,tl_corner,br_corner,tr_corner,lm_point,rm_point)) \r\n# boundary_pts_nat=np.hstack((global_indeces_3[:,-1],global_indeces_5[:,-1]))\r\n# flux_corners=np.array([1,-1])*self.source_strength \r\n##\r\n## \r\n# self.F[boundary_pts_nat]+=flux_corners\r\n# \r\n \r\n \"\"\"\r\n Psuedocode for L.M. method\r\n \"\"\"", "title": "" }, { "docid": "de42eaec6c23445a3c1603bfe3b63ed5", "score": "0.49062467", "text": "def detect_obstacles_while_moving(self):\n if self.state == self.STATE_EXPLORATION_RIGHT:\n if any(np.array(self.front_list < self.SCANNER)):\n\n # Get ids in list where obstacle near\n idx_list = np.where(np.array(self.front_list) < self.SCANNER)\n # convert ids into y coordinate\n y_obstacles = self.y_before_step - idx_list[0]*9/100*self.STEP\n scan = np.array(self.front_list)\n # calculate x coordinate of obstacles basted on sensor value\n x_obstacles = self.x + scan[idx_list[0]]\n\n return y_obstacles, x_obstacles\n\n elif self.state == self.STATE_EXPLORATION_LEFT:\n if any(np.array(self.front_list < self.SCANNER)):\n\n idx_list = np.where(np.array(self.front_list) < self.SCANNER)\n y_obstacles = self.y_before_step + idx_list[0]*9/100*self.STEP\n scan = np.array(self.front_list)\n x_obstacles = self.x + scan[idx_list[0]]\n\n return y_obstacles, x_obstacles\n\n elif self.state == self.STATE_FORWARD:\n if any(np.array(self.left_list < self.SCANNER)):\n\n idx_list = np.where(np.array(self.front_list) < self.SCANNER)\n x_obstacles = self.x_before_step + idx_list[0] * 9 / 100 * self.STEP # step when going forward\n scan = np.array(self.front_list)\n y_obstacles = self.x + scan[idx_list[0]]\n\n return y_obstacles, x_obstacles\n\n if any(np.array(self.right_list < self.SCANNER)):\n\n idx_list = np.where(np.array(self.front_list) < self.SCANNER)\n x_obstacles = self.x_before_step + idx_list[0] * 9 / 100 * self.STEP # step when going forward\n scan = np.array(self.front_list)\n y_obstacles = self.x - scan[idx_list[0]]\n\n return y_obstacles, x_obstacles\n else:\n pass", "title": "" }, { "docid": "2377e9ae7f380ed353c3b32f5cbdefc5", "score": "0.49051", "text": "def main(self):\n\n self.start_sim()\n\n \"\"\"物体に当たるまで直進\"\"\"\n while True:\n if self.force == 0:\n print \"-----not contact-----\"\n\n self.position.data[2] -= 0.004\n self.publisher_position(self.position)\n\n elif self.force == 1:\n print \"-----contact-----\"\n break\n rospy.sleep(0.1)\n\n # self.orbit_position[:, 2] = self.arm_position[2]\n\n # print self.position.data[2]\n print \"-----start serching-----\"\n rospy.sleep(1)\n\n\n step = 1\n for i in tqdm(range(self.orbit_position.shape[0]-1)):\n print \"\\n\"\n print \"step :\" , step\n\n\n const = 1\n while const < 100:\n print \"---------------------------------------------------\"\n current_position = np.array(self.arm_position)\n distance = np.linalg.norm(current_position[0:2] - self.orbit_position[i+1][0:2], ord=2)\n\n print \"current---\", current_position\n # print \"position-----------\", self.orbit_position[i+1][0:2]\n\n print \"distance: \", distance\n\n if distance < self.b / 2:\n break\n\n\n w = 10\n x = np.linspace(current_position[0] - 0.007, current_position[0] + 0.007, w)\n y = np.linspace(current_position[1] - 0.007, current_position[1] + 0.007, w)\n z = np.linspace(current_position[2] - 0.007, current_position[2] + 0.007, w)\n sample = np.array([x,y,z])\n # print sample\n\n\n \"\"\"current_positionの近くのデータ点を収集\"\"\"\n X_ = self.X[np.where((self.X[:, 0] > current_position[0]-0.008) & (self.X[:, 0] < current_position[0]+0.008)\\\n & (self.X[:, 1] > current_position[1]-0.008) & (self.X[:, 1] < current_position[1]+0.008))]\n Y_ = self.Y[np.where((self.X[:, 0] > current_position[0]-0.008) & (self.X[:, 0] < current_position[0]+0.008)\\\n & (self.X[:, 1] > current_position[1]-0.008) & (self.X[:, 1] < current_position[1]+0.008))]\n\n # print \"XXXXXXXXXXX:\",X_\n\n # fig = plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n\n # ax.set_xlabel(\"X-axis\")\n # ax.set_ylabel(\"Y-axis\")\n # ax.set_zlabel(\"Z-axis\")\n # ax.scatter(current_position[0], current_position[1], current_position[2], s = 100)\n # ax.scatter(self.X[:, 0], self.X[:, 1], self.X[:, 2], alpha = 0.5, color = \"red\")\n # ax.scatter(X_[:, 0], X_[:, 1], X_[:, 2], color = \"black\")\n # plt.show()\n\n print X_.shape\n\n fp = Path_Planning(X_, Y_)\n\n # 凹凸判定\n la = fp.decision_func(sample)\n\n # 法線と方向の決定\n pred = fp.direction_func(current_position, self.orbit_position[i+1])\n pred_normal = pred[0]\n pred_direction = pred[1]\n print \"normal: \", pred_normal\n print \"direction: \", pred_direction\n\n if la == 1: # When convex\n\n print \"-----shape is convex-----\"\n pred_position = current_position + self.a * pred_direction\n self.position.data = pred_position\n\n # self.orientation.data = self.create_orientation(-pred_normal)\n # self.publisher_orientation(self.orientation)\n # rospy.sleep(1)\n\n self.publisher_position(self.position)\n rospy.sleep(0.1)\n\n if self.force == 0:\n # self.X = np.vstack((self.X, self.arm_position))\n # self.Y = np.vstack((self.Y, [-1]))\n\n n = 1\n while n < 10:\n # normal_position = pred_position - n * np.array([0, 0, 0.001])\n current_position = np.array(self.arm_position)\n\n self.position.data = current_position - 0.0001 * pred_normal\n self.publisher_position(self.position)\n\n if self.force == 1:\n break\n rospy.sleep(0.1)\n n += 1\n\n # else:\n # self.X = np.vstack((self.X, self.arm_position))\n # self.Y = np.vstack((self.Y, [0]))\n\n\n\n elif la == 0: # When concave\n\n print \"-----shape is concave-----\"\n # self.orientation.data = self.create_orientation(pred_normal)\n # self.publisher_orientation(self.orientation)\n # rospy.sleep(0.1)\n\n # normal_position = current_position + np.array([0, 0, 0.001])\n # self.position.data = normal_position\n print self.position.data\n\n self.position.data = current_position - 0.003 * pred_normal\n self.publisher_position(self.position)\n # print self.position.data\n rospy.sleep(0.1)\n\n n = 1\n while n < 10:\n current_position = np.array(self.arm_position)\n new_position = current_position + n * self.a / 10 * pred_direction\n self.position.data = new_position\n # print self.position.data\n self.publisher_position(self.position)\n\n if self.force == 1:\n break\n\n rospy.sleep(0.1)\n\n n += 1\n\n if self.force == 0:\n # self.X = np.vstack((self.X, self.arm_position))\n # self.Y = np.vstack((self.Y, [-1]))\n\n n = 1\n while n < 100:\n # normal_position = new_position - n * np.array([0, 0, 0.001])\n current_position = np.array(self.arm_position)\n self.position.data = current_position + 0.001 * pred_normal\n self.publisher_position(self.position)\n\n if self.force == 1:\n break\n\n rospy.sleep(0.1)\n n += 1\n\n # else:\n # self.X = np.vstack((self.X, self.arm_position))\n # self.Y = np.vstack((self.Y, [0]))\n\n\n const += 1\n # time.sleep(0.01)\n rospy.sleep(1)\n\n step += 1\n\n\n # # np.save(\"X\", self.X)\n # # np.save(\"Y\", self.Y)\n #\n self.stop_sim()", "title": "" }, { "docid": "09cbae7c223ca7054ad052498ebd7fdc", "score": "0.49028695", "text": "def behave_explore(self):\n # storing the robot's position before movement\n if self.state != self.STATE_FORWARD_LAND and self.state != self.STATE_LEFT_LAND and self.state != self.STATE_RIGHT_LAND:\n self.x_before_step, self.y_before_step, self.z_before_step = self.pc.get_position()\n\n if self.x >= self.LANDING_REGION_X[0] and self.x <= self.LANDING_REGION_X[1]:\n self.DETECTION_THRESHOLD_SIDEWAY = 4 * self.GRID_PRECISION\n self.OBSTACLE_AVOIDANCE_THRESHOLD = 4* self.GRID_PRECISION\n self.STEP = 2* self.GRID_PRECISION\n else:\n self.DETECTION_THRESHOLD_SIDEWAY = 8 * self.GRID_PRECISION\n self.OBSTACLE_AVOIDANCE_THRESHOLD = 8 * self.GRID_PRECISION\n self.STEP = 6 * self.GRID_PRECISION\n \n self.x, self.y, self.z = self.pc.get_position()\n print(\"state: \", self.state, \" position: {0} {1} {2}\".format(self.x,self.y,self.z), \" left: {0}, right {1}\".format(self.multiranger.left, self.multiranger.right))\n self.down_list = []\n self.up_list = []\n # Handling state when the robot is the exploring to the right.\n if self.state == self.STATE_EXPLORATION_RIGHT:\n # map limit reached or obstacle on right sensor\n try:\n obstacle_ahead = self.dynamic_occupancy.at[self.x,self.y-self.OBSTACLE_AVOIDANCE_THRESHOLD] == self.TILE_OBSTACLE\n except:\n obstacle_ahead = False\n if self.y < self.Y_MIN or self.multiranger.right < self.OBSTACLE_AVOIDANCE_THRESHOLD or obstacle_ahead:\n if self.multiranger.front < self.OBSTACLE_AVOIDANCE_THRESHOLD: # if obstacle on front sensor, go left\n self.state = self.STATE_EXPLORATION_RIGHT_BACK\n else:\n # go forward and switch to left exploration\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.forward(self.STEP)\n self.edge_displacement = self.detect_edge()\n if self.edge_displacement is not None:\n self.state = self.STATE_FORWARD_LAND\n else:\n self.state = self.STATE_EXPLORATION_LEFT\n\n else:\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.right(self.STEP)\n self.edge_displacement = self.detect_edge()\n if self.edge_displacement is not None:\n self.state = self.STATE_RIGHT_LAND\n\n # Handling state when the robot is backing up to the left after detecting an obstacle from the front sensor.\n elif self.state == self.STATE_EXPLORATION_RIGHT_BACK:\n if self.multiranger.front > self.OBSTACLE_AVOIDANCE_THRESHOLD: # no obstacle on front sensor\n # go forward and switch to left exploration\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.forward(self.STEP)\n self.edge_displacement = self.detect_edge()\n if self.edge_displacement is not None:\n self.state = self.STATE_FORWARD_LAND\n else:\n self.state = self.STATE_EXPLORATION_LEFT\n self.state = self.STATE_EXPLORATION_LEFT\n else:\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.left(self.STEP)\n self.edge_displacement = self.detect_edge()\n if self.edge_displacement is not None:\n self.state = self.STATE_LEFT_LAND\n # Handling state when the robot is the exploring to the left.\n if self.state == self.STATE_EXPLORATION_LEFT:\n # map limit reached or obstacle on left sensor\n try:\n obstacle_ahead = self.dynamic_occupancy.at[self.x,self.y+self.OBSTACLE_AVOIDANCE_THRESHOLD] == self.TILE_OBSTACLE\n except:\n obstacle_ahead = False\n if self.y > self.Y_MAX or self.multiranger.left < self.OBSTACLE_AVOIDANCE_THRESHOLD or obstacle_ahead:\n if self.multiranger.front < self.OBSTACLE_AVOIDANCE_THRESHOLD: # if obstacle on front sensor, go right\n self.state = self.STATE_EXPLORATION_LEFT_BACK\n else:\n # go forward and switch to right exploration\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.forward(self.STEP)\n self.edge_displacement = self.detect_edge()\n if self.edge_displacement is not None:\n self.state = self.STATE_FORWARD_LAND\n else:\n self.state = self.STATE_EXPLORATION_RIGHT\n else:\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.left(self.STEP)\n self.edge_displacement = self.detect_edge()\n if self.edge_displacement is not None:\n self.state = self.STATE_LEFT_LAND\n # Handling state when the robot is backing up to the right after detecting an obstacle from the front sensor.\n elif self.state == self.STATE_EXPLORATION_LEFT_BACK:\n if self.multiranger.front > self.OBSTACLE_AVOIDANCE_THRESHOLD: # no obstacle on front sensor\n # go forward and switch to right exploration\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.forward(self.STEP)\n self.edge_displacement = self.detect_edge()\n if self.edge_displacement is not None:\n self.state = self.STATE_FORWARD_LAND\n else:\n self.state = self.STATE_EXPLORATION_RIGHT\n else:\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.right(self.STEP)\n self.edge_displacement = self.detect_edge()\n if self.edge_displacement is not None:\n self.state = self.STATE_RIGHT_LAND\n # Handling state when the robot detected the landing pad while moving forward.\n elif self.state == self.STATE_FORWARD_LAND:\n self.x, self.y, self.z = self.pc.get_position()\n self.pc.go_to(self.x_before_step + self.edge_displacement + PLATFORM_SIDE/2, self.y + 0.5)\n self.state = self.LANDING_MANEUVER_RIGHT\n self.edge_displacement = None\n # Handling state when the robot detected the landing pad while moving right.\n elif self.state == self.STATE_RIGHT_LAND:\n self.x, self.y, self.z = self.pc.get_position()\n self.pc.go_to(self.x + 0.5, self.y_before_step - self.edge_displacement - PLATFORM_SIDE/2)\n self.state = self.LANDING_MANEUVER_BACK\n self.edge_displacement = None\n # Handling state when the robot detected the landing pad while moving left.\n elif self.state == self.STATE_LEFT_LAND:\n self.x, self.y, self.z = self.pc.get_position()\n self.pc.go_to(self.x + 0.5, self.y_before_step + self.edge_displacement + PLATFORM_SIDE/2)\n self.state = self.LANDING_MANEUVER_BACK\n self.edge_displacement = None\n # Handling state when the robot is backing up, looking for the landing pad after having detected an edge while approaching\n # from the right or left\n elif self.state == self.LANDING_MANEUVER_BACK:\n self.pc.set_default_velocity(0.1)\n self.STEP = 0.15\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.back(self.STEP)\n LAND_THRESHOLD = 0.065\n self.edge_displacement = self.detect_edge()\n LAND_THRESHOLD = 0.09\n if self.edge_displacement is not None:\n self.x, self.y, self.z = self.pc.get_position()\n self.pc.go_to(self.x_before_step - self.edge_displacement - PLATFORM_SIDE/2, self.y_before_step )\n self.state = self.STATE_LANDING\n self.edge_displacement = None\n # Handling state when the robot is backing up from the right, looking for the landing pad after having detected an edge while approaching\n # from the front\n elif self.state == self.LANDING_MANEUVER_RIGHT:\n self.pc.set_default_velocity(0.1)\n self.STEP = 0.15\n self.up_list, self.down_list, self.front_list, self.back_list, self.left_list, self.right_list = self.pc.right(self.STEP)\n LAND_THRESHOLD = 0.065\n self.edge_displacement = self.detect_edge()\n LAND_THRESHOLD = 0.09\n if self.edge_displacement is not None:\n self.x, self.y, self.z = self.pc.get_position()\n self.pc.go_to(self.x_before_step, self.y_before_step - self.edge_displacement - PLATFORM_SIDE/2 )\n self.state = self.STATE_LANDING\n self.edge_displacement = None\n # Handling state of landing, storing landing coordinates\n elif self.state == self.STATE_LANDING:\n self.x, self.y, self.z = self.pc.get_position()\n self.landing_position = [self.x, self.y]\n return False\n \n self.x, self.y, self.z = self.pc.get_position()\n # always return true before the landing\n return True", "title": "" }, { "docid": "b1c24f21fae580b82e50c985ff1c377c", "score": "0.48960453", "text": "def assign_to_manikins(self):\r\n interface = [\"grasp\", \"push\", \"toggle\", \"boot\"]\r\n\r\n for man in self.occupants:\r\n for dev in self.devices:\r\n dev_interface = dev.device_interface\r\n if dev_interface != \"none\" and dev_interface != \"seat\":\r\n visible = [dev.class_name, False]\r\n for inter in interface:\r\n if inter in dev_interface:\r\n if inter == \"grasp\":\r\n man_inv = man.grasp_left_veh_inv\r\n man_inv_rt = man.grasp_right_veh_inv\r\n veh_trans = dev.device_trans\r\n max_dist = 0.45\r\n elif inter == \"push\":\r\n man_inv = man.push_left_veh_inv\r\n man_inv_rt = man.push_right_veh_inv\r\n veh_trans = dev.device_trans\r\n max_dist = 0.68\r\n elif inter == \"toggle\":\r\n man_inv = man.toggle_left_veh_inv\r\n man_inv_rt = man.toggle_right_veh_inv\r\n veh_trans = dev.device_trans\r\n max_dist = 0.55\r\n elif inter == \"boot\":\r\n man_inv = man.boot_left_veh_inv\r\n man_inv_rt = man.boot_right_veh_inv\r\n veh_trans = dev.device_trans\r\n max_dist = 0.1\r\n else:\r\n raise ValueError(\"Unrecognized interface type specified\")\r\n\r\n mat_human_to_device = np.dot(man_inv, veh_trans)\r\n trans_human_to_device = mat_human_to_device[:3, 3]\r\n reach_distance = np.sqrt(np.dot(trans_human_to_device,\r\n trans_human_to_device))\r\n # TODO: why convert theta to deg and immediately back to rad for use??\r\n theta = np.arccos(trans_human_to_device[2] / reach_distance) * \\\r\n (180. / np.pi)\r\n res_z_line = reach_distance * np.sin(theta * np.pi / 180)\r\n phi = np.arccos(trans_human_to_device[0] / res_z_line) * (180. / np.pi)\r\n\r\n ###right hand zones\r\n mat_human_to_device_rt = np.dot(man_inv_rt, veh_trans)\r\n trans_human_to_device_rt = mat_human_to_device_rt[:3, 3]\r\n reach_distance_rt = np.sqrt(np.dot(trans_human_to_device_rt,\r\n trans_human_to_device_rt))\r\n theta_rt = np.arccos(trans_human_to_device[2] / reach_distance) * \\\r\n 180. / np.pi\r\n res_z_line_rt = reach_distance * np.sin(theta * np.pi / 180)\r\n phi_rt = np.arccos(trans_human_to_device[0] / res_z_line_rt) * \\\r\n 180. / np.pi\r\n\r\n if reach_distance <= max_dist and theta <= 180.0 and \\\r\n phi <= 180.0 or reach_distance_rt <= max_dist and \\\r\n theta_rt <= 180.0 and phi_rt <= 180.0:\r\n visible = [dev.class_name, True]\r\n break\r\n man.ergo_devices.append(visible)\r\n\r\n msg = \"\\nManikin {} can use these devices:\\n\".format(man.name)\r\n for p in man.ergo_devices:\r\n msg += \"\\t{}\\n\".format(p)\r\n logging.info(msg)", "title": "" }, { "docid": "15176ddd85d1879f06d75365dfe27b7f", "score": "0.48908404", "text": "def getLoc():\n return (\"Still in the makes\")", "title": "" }, { "docid": "6aa483e780e981f5352a2cc418fed6bc", "score": "0.48852456", "text": "def run(self):\n\n if self.vehicle.mode.name != \"BRAKE\" and self.vehicle.mode.name != \"ALT_HOLD\" and self.vehicle.mode.name != \"LAND\" and self.vehicle.mode.name != \"RTL\":\n gps_precision_in_decimal_degrees = Polygonal.centimetersToDecimalDegrees(self.vehicle.gps_0.eph)\n probable_drone_locations = Polygonal.random_coordinates((self.vehicle.location.lat, self.vehicle.location.lon), gps_precision_in_decimal_degrees, self.precision)\n\n # Adaptive fence, predicts the location of the drone according to its velocity, after 1 second\n if self.adaptive_fence:\n def add_velocity(x): return (x[0] + self.vehicle.velocity[0], x[1] + self.vehicle.velocity[1])\n probable_drone_locations = map(add_velocity, probable_drone_locations)\n\n if Polygonal.points_in_poly(probable_drone_locations, self.fence) is False:\n print \"Broke circular fence.\"\n print self.vehicle.location\n print gps_precision_in_decimal_degrees\n\n return SafeBehaviour.SafeBehaviour.halt\n\n if self.vehicle.location.alt >= self.maximum_altitude or self.vehicle.location.alt <= self.minimum_altitude:\n print \"Broke altitude geo-fence.\"\n print self.vehicle.location\n print gps_precision_in_decimal_degrees\n\n return SafeBehaviour.SafeBehaviour.halt\n\n return SafeBehaviour.SafeBehaviour.do_nothing", "title": "" }, { "docid": "3a61d33f7b31a521f22ef3e4141a7042", "score": "0.48851964", "text": "def task4x4(self):\n self.grid = np.zeros([4,4])\n #starting location of an agent\n self.agent = np.array([0,0])\n #left, right, up, down\n self.action_space = np.array([0,1,2,3])\n self.actual_move = np.array([[-1,0],[1,0],[0,-1],[0,1]])", "title": "" }, { "docid": "79dcb3e5a41efe83f3fd58d17fa7a943", "score": "0.4868996", "text": "def find_direction(self):\n _direction = Direction('NULL').to_string()\n\n if self._board.is_my_bomberman_dead():\n print(\"Bomberman is dead. Sending 'NULL' command...\")\n return _direction\n\n # here's how we find the current Point of our bomberman\n _bm = self._board.get_bomberman()\n _bm_x, _bm_y = _bm.get_x(), _bm.get_y()\n print(\"Found your Bomberman at {}\".format(_bm))\n # Let's check whether our bomberman is not surrounded by walls\n if 4 == self._board.count_near(_bm_x, _bm_y, Element('DESTROY_WALL')):\n print(\"It seems like walls surround you. Self-destroying.\")\n return Direction('ACT').to_string() # Let's drop a bomb then\n\n # Getting alive bombermans to chaise\n enemies = self._board.get_other_alive_bombermans()\n\n # All walls in list\n walls = self._board.get_walls()\n walls.extend(self._board.get_destroy_walls())\n\n ####\n cf = {}\n for alien in enemies:\n # Getting closest enemy\n diagram = GridWithWeights(self._board.len, self._board.len)\n diagram.walls = [(i.get_x(), i.get_y()) for i in walls]\n\n player_point = Point(_bm_x, _bm_y)\n s = alien\n came_from, cost_so_far = a_star_search(diagram,\n (s.get_x(), s.get_y()),\n (player_point.get_x(), player_point.get_y()))\n\n if (_bm_x, _bm_y) in came_from:\n # Looking for closest enemy\n if len(came_from) < len(cf) or not cf:\n cf = came_from # Path here\n csf = cost_so_far # Costs here\n\n if cf:\n # If we found a player to chaise\n print(\"Alien at {}\".format(s))\n # Just debug\n print(\"CSF: {}\".format(csf))\n\n # Next location to move\n nx, ny = cf[(_bm_x, _bm_y)]\n if nx - _bm_x == 1:\n __dir = Direction('RIGHT')\n elif nx - _bm_x == -1:\n __dir = Direction('LEFT')\n elif ny - _bm_y == 1:\n __dir = Direction('DOWN')\n elif ny - _bm_y == -1:\n __dir = Direction('UP')\n\n # If something on the way - invert direction\n if self._board.is_barrier_at(nx, ny):\n __dir.inverted()\n\n if sum(csf.values()) < len(csf):\n # We have reached enemy - plant a bomb and act like kamikadze !! :)\n __dir = Direction('ACT')\n else:\n # Do random choice\n __dir = Direction(choice(('LEFT', 'RIGHT', 'DOWN', 'UP')))\n\n _deadline = time() + 5\n while time() < _deadline:\n # now we calculate the coordinates of potential point to go\n _x, _y = __dir.change_x(_bm.get_x()), __dir.change_y(_bm.get_y())\n # if there's no barrier at random point\n if not self._board.is_barrier_at(_x, _y):\n # here we count the attempt to choose the way\n self._count += 1\n # and check whether it's not the one we just came from\n if not self._last == (_x, _y) or self._count > 5:\n # but we will go back if there were no others twice\n _direction = __dir.to_string()\n self._last = _bm.get_x(), _bm.get_y()\n self._count = 0\n break\n else:\n __dir.inverted()\n _direction = __dir.to_string()\n else: # it seem that we are surrounded\n print(\"It's long time passed. Let's drop a bomb\")\n _direction = Direction('ACT').to_string() # let's drop a bomb :)\n\n return _direction", "title": "" }, { "docid": "444dc1659ab7be104efdfb5add003fe4", "score": "0.486718", "text": "def observation(self):\n vip_agent = self.scenario.vip_agent\n bystander_p_pos = np.asarray([bystander.state.p_pos for bystander in self.scenario.bystanders])\n nearest_crowd, crowd_idx = find_nearest_suspected_crowd_members(bystander_p_pos , vip_agent.state.p_pos, 5)\n other_vel = []\n for idx in crowd_idx:\n other_vel.append(self.scenario.bystanders[idx].distance(vip_agent))\n comm = []\n other_agents = []\n if self.scenario.communication:\n for other in self.scenario.bodyguards:\n if other is self: continue\n comm.append(other.state.c)\n other_agents.append(vip_agent.state.p_pos-other.state.p_pos)\n return np.concatenate([self.state.p_vel] + [(vip_agent.state.p_pos-self.state.p_pos)] + [(nearest_crowd - self.state.p_pos).flatten()]+ [other_vel] + other_agents + comm )", "title": "" }, { "docid": "8caeb06eda87f313fec8ecd03e758bb1", "score": "0.48630464", "text": "def GetCurrent(phi, x, y, j1, j2, j3, j4, j5):\n theta_list = np.array([j1, j2, j3, j4, j5]) #list of arm joint angles\n\n T_chassis = np.array([[math.cos(phi), -math.sin(phi), 0, x], #se(3) of chassis configuration\n [math.sin(phi), math.cos(phi), 0, y],\n [0, 0, 1, 0.0963],\n [0, 0, 0, 1]])\n\n J_arm = mr.JacobianBody(arm_screws, theta_list) #body jacobian of arm\n\n T_0e = mr.FKinBody(M_0e, arm_screws, theta_list) #end-effector se(3) at current joint angles\n\n X = np.dot(np.dot(T_chassis, T_b0), T_0e) #current end-effector se(3) in world frame\n\n J_body = np.dot(mr.Adjoint(np.dot(np.linalg.inv(T_0e),T_b0)), F6) #chassis body jacobian\n\n return X, J_arm, J_body", "title": "" }, { "docid": "10b414065dbf88d31ecb70fa0933397b", "score": "0.48619035", "text": "def my_go_to_pose2(robot, x, y, angle_z):\n # ####\n # TODO: Implement a function that makes the robot move to a desired pose\n # using the robot.drive_wheels() function to jointly move and rotate the \n # robot to reduce distance between current and desired pose (Approach 2).\n # ####\n \n \n ####\n #\n # We need to find the differential speeds of left and right wheel and the time \n # duration so that robot will go around some point i.e. it will follow the arc by moving\n # and rotating.\n #\n #\n #\n # cos(theta) - sin(theta) 0 \n # rotation component = sin(theta) cos(theta) 0 \n # 0 0 1\n #\n # r*phi_left/2 + r*phi_right/2 \n # translation component = 0\n # r*phi_right/d - r*phi_left/d\n #\n #\n # x\n # y = rotation component*translation component\n # thetatarget\n #\n ####\n \n #####\n #\n # Solving above equations by inverse (from Correll book, equation 3.64) \n #\n # phi_left = (2*xR - thetatarget*d)/(2*r)\n # \n # phi_right = (2*xR + thetatarget*d)/(2*r)\n # \n #\n #####\n \n d = 49 # get_distance_between_wheels(robot)\n \n r = 13 # get_front_wheel_radius(robot)\n \n thetatarget = (angle_z*2*math.pi)/360\n \n xR = ((x)*math.cos(thetatarget)) + ((y)*math.sin(thetatarget))\n\n \n phi_left = (2*xR - thetatarget*d)/(2*r)\n phi_right = (2*xR + thetatarget*d)/(2*r)\n \n t = 5\n \n angularspeed_left = phi_left/t\n angularspeed_right = phi_right/t\n \n linearspeed_left = angularspeed_left*r\n linearspeed_right = angularspeed_right*r\n \n print(\"xR = \" + str(xR))\n print(\"phi_left = \" + str(phi_left))\n print(\"phi_right = \" + str(phi_right))\n print(\"angularspeed_left = \" + str(angularspeed_left))\n print(\"angularspeed_right = \" + str(angularspeed_right))\n \n robot.drive_wheels(linearspeed_left, linearspeed_right, None, None, t)", "title": "" }, { "docid": "dd8126596d269b594543a082b53c934e", "score": "0.48612747", "text": "def main_sequence():\n robot = Robot(None, None, 0, 0, 0)\n\n # Forward run\n with SyncCrazyflie(uri, cf=Crazyflie(rw_cache='./cache')) as scf:\n time.sleep(0.5)\n scf.cf.param.set_value('kalman.resetEstimation','1') \n time.sleep(0.1)\n scf.cf.param.set_value('kalman.resetEstimation','0')\n time.sleep(0.1)\n scf.cf.param.set_value('kalman.resetEstimation','1') \n time.sleep(0.1)\n scf.cf.param.set_value('kalman.resetEstimation','0')\n\n with CustomMultiranger(scf, rate_ms=50) as multiranger:\n with CustomPositionHlCommander(\n scf,\n MotionCommander(scf, DEFAULT_HEIGHT),\n multiranger,\n x=0.0, y=0.0, z=0.0, \n landing_yaw = math.pi,\n default_velocity=DEFAULT_HEIGHT,\n default_height=DEFAULT_VELOCITY,\n controller=CustomPositionHlCommander.CONTROLLER_MELLINGER) as pc:\n\n time.sleep(1)\n \n pc.set_default_velocity(0.1)\n time.sleep(1)\n \n robot.pc = pc\n robot.multiranger = multiranger\n x, y, z = pc.get_position()\n robot.x = x \n robot.y = y\n robot.z = z\n \n while robot.behave_explore() == True:\n robot.update_occupancy()\n time.sleep(2)\n pc._hl_commander.go_to(pc._x, pc._y, DEFAULT_HEIGHT, math.pi, 2)\n time.sleep(3)\n \n landing_yaw = multiranger.state_estimate_yaw\n # build occupancy map \n time.sleep(2)\n robot.state = robot.STATE_EXPLORATION_RIGHT\n robot.x, robot.y = 0,0\n robot.change_dynamic_occupancy_referential()\n robot.build_map()\n # return run.\n with SyncCrazyflie(uri, cf=Crazyflie(rw_cache='./cache')) as scf:\n time.sleep(0.5)\n scf.cf.param.set_value('kalman.resetEstimation','1') \n time.sleep(0.1)\n scf.cf.param.set_value('kalman.resetEstimation','0')\n time.sleep(0.1)\n scf.cf.param.set_value('kalman.resetEstimation','1') \n time.sleep(0.1)\n scf.cf.param.set_value('kalman.resetEstimation','0')\n\n scf.cf.param.set_value('kalman.initialX',str(robot.x))\n scf.cf.param.set_value('kalman.initialY',str(robot.y))\n scf.cf.param.set_value('kalman.initialYaw',str(landing_yaw - math.pi))\n time.sleep(0.1)\n with CustomMultiranger(scf, rate_ms=50) as multiranger:\n with CustomPositionHlCommander(\n scf,\n MotionCommander(scf, DEFAULT_HEIGHT),\n multiranger,\n x=0, y=0, z=0.0, \n landing_yaw = 0,\n default_velocity=DEFAULT_HEIGHT,\n default_height=DEFAULT_VELOCITY,\n controller=CustomPositionHlCommander.CONTROLLER_MELLINGER) as pc:\n\n time.sleep(2)\n \n pc.set_default_velocity(0.1)\n\n robot.pc = pc\n robot.multiranger = multiranger\n x, y, z = pc.get_position()\n robot.x = x \n robot.y = y\n robot.z = z\n while robot.behave_return() == True:\n robot.update_occupancy()\n # To compensate for estimation drift, start looking again for takeoff pad\n robot.pc.back(2*robot.GRID_PRECISION)\n while robot.behave_explore() == True:\n robot.update_occupancy()", "title": "" }, { "docid": "c6188034af807994d00e86348a703e43", "score": "0.48606128", "text": "def trajectory_2(robot):\n d1 = 200 # mm, media baldosa\n d2 = 400 # mm, baldosa\n alpha = 0.2617 # 15º\n stop = False\n estado = 0\n\n while not stop:\n\n # Leer coordenadas del robot\n robot.lock_odometry.acquire()\n x, y, th = robot.readOdometry()\n robot.lock_odometry.release()\n\n if estado == 0:\n estado = 1\n # Actualizar velocidad\n robot.setSpeed(0., math.pi / 2.)\n\n elif estado == 1:\n # estado 1, empieza la trayectoria\n if (-5 <= x <= 5) and (-5 <= y <= 5) and (math.pi/2. - 0.01 <= th <= math.pi/2. + 0.01):\n estado = 2\n # Actualizar velocidad\n robot.setSpeed(50*math.pi, -math.pi/4)\n\n elif estado == 2:\n # estado 2, semicícurlo\n if (alpha - 0.01 <= th <= alpha + 0.015):\n estado = 3\n # Actualizar velocidad\n robot.setSpeed((2*d2 + d1) / 5., 0.)\n\n elif estado == 3:\n # estado 3, reposicionamiento\n if (2*d2 + d1 - 25 <= x <= 2*d2 + d1 + 25) and (d2 - 25 <= y <= d2 + 25) and (alpha - 0.04 <= th <= alpha + 0.04):\n estado = 4\n # Actualizar velocidad\n robot.setSpeed(0., -alpha * 0.8)\n\n elif estado == 4:\n # estado 4, línea recta\n if (2*d2 + d1 -25 <= x <= 2*d2 + d1 + 25) and (d2 - 30 <= y <= d2 + 30) and (-0.002 <= th <= 0.002):\n estado = 5\n # Actualizar velocidad\n robot.setSpeed(100*math.pi, -math.pi/4)\n\n elif estado == 5:\n # estado 5, segundo semicírculo\n if (math.pi -alpha/1.5 <= th <= math.pi-alpha/1.5 + 0.02):\n estado = 6\n # Actualizar velocidad\n # robot.setSpeed(0., -alpha * 0.5)\n robot.setSpeed((2*d2 + d1) / 5., 0)\n \n elif estado == 6:\n # estado 6, vuelve al origen\n if (d1 - 30 <= x <= d1 - 25):\n estado = 7\n # Actualizar velocidad\n robot.setSpeed(50*math.pi, -math.pi/4)\n\n elif estado == 7:\n # estado 7, vuelve a posición inicial\n if (-10 <= x <= 10) and (-10 <= y <= 10):\n stop = True\n \n\n time.sleep(0.005)\n return", "title": "" }, { "docid": "65223d3214bd82e07fc1559a15078a89", "score": "0.48600715", "text": "def plants_eia860():\n return # PLANTS_1", "title": "" }, { "docid": "382d28467c269eddf803a342a3b9009a", "score": "0.4859536", "text": "def get_intersection_region(ball_position: np.array, player_position: np.array, kick_slope: float):\n d = np.hypot(*(ball_position-player_position)) * TeamMasterSupporting.max_robot_speed_optimistic\n d /= TeamMasterSupporting.kick_speed\n n = TeamMasterSupporting.get_point_on_vector(player_position, kick_slope, d)\n n_ = TeamMasterSupporting.get_point_on_vector(player_position, kick_slope, -d)\n a = np.array([ball_position, player_position, n, n_])\n a_vis = np.array([ball_position, n, n_, ball_position, player_position]) # a[:, 0] - x coord, a[:, 1] - y coord\n return a, a_vis", "title": "" }, { "docid": "6dc09de1988940cf2f1e847c9731cd85", "score": "0.48562777", "text": "def get_observation(self):\n images = self.camera.frames()\n image = images['rgb']\n depth = images['depth']\n point_cloud = self.camera.deproject_depth_image(depth)\n\n # Crop.\n if self.crop_max is not None and self.crop_min is not None:\n crop_mask = np.logical_and(\n np.all(point_cloud >= self.crop_min, axis=-1),\n np.all(point_cloud <= self.crop_max, axis=-1))\n point_cloud = point_cloud[crop_mask]\n\n # Segment.\n if self.env.simulator:\n segmask = images['segmask']\n segmask = segmask.flatten()\n segmask = pc_utils.convert_segment_ids(segmask, self.body_ids)\n point_cloud = pc_utils.group_by_labels(\n point_cloud, segmask, self.num_bodies, self.num_points)\n else:\n point_cloud = pc_utils.remove_table(point_cloud)\n segmask = pc_utils.cluster(\n point_cloud, num_clusters=self.num_bodies, method='dbscan')\n point_cloud = point_cloud[segmask != -1]\n segmask = pc_utils.cluster(\n point_cloud, num_clusters=self.num_bodies)\n point_cloud = pc_utils.group_by_labels(\n point_cloud, segmask, self.num_bodies, self.num_points)\n\n # Confirm target.\n if self.confirm_target:\n # Click the target position.\n self.target_position = None\n self.depth = depth\n self.ax.cla()\n self.ax.imshow(image)\n logger.info('Please click the target object...')\n while self.target_position is None:\n plt.pause(1e-3)\n logger.info('Target Position: %r', self.target_position)\n\n # Exchange the target object with the first object.\n centers = np.mean(point_cloud, axis=1)\n dists = np.linalg.norm(\n centers - self.target_position[np.newaxis, :], axis=-1)\n target_id = dists.argmin()\n if target_id != 0:\n tmp = copy.deepcopy(point_cloud)\n point_cloud[0, :] = tmp[target_id, :]\n point_cloud[target_id, :] = tmp[0, :]\n\n # Show the segmented point cloud.\n pc_utils.show2d(point_cloud, self.camera, self.ax, image=image)\n\n return point_cloud", "title": "" }, { "docid": "bd383d2643adf149abd6b90634ed9966", "score": "0.48557466", "text": "def get_state(self, rocket, asteroids):\n\n \"\"\"\n Inputs:\n 1. Rocket x Position\n 2. Rocket y Position\n 3. Rocket Acceleration\n 4. Rocket Speed\n 5. Rocket Velocity (direction)\n 6. Rocket shooter countdown\n 7. Rocket Direction\n\n - Inputs from 4 closest astroids\n\n 8. Asteroid x Position\n 9. Asteroid y Position\n 10. Asteroid Speed\n 11. Asteroid Direction\n 12. Distance from Asteroid (edge of polygon) to Rocket\n 12. (distance from edge to player)\n \n repeat asteroid 4x\n\n total inputs 27. +1 for a bias.\n \"\"\"\n try:\n asteroids = asteroids[:4]\n except:\n pass\n\n input_vector = [-1 for i in range(27)]\n input_vector[0] = rocket.pos.x/WIDTH\n input_vector[1] = rocket.pos.y/HEIGHT\n input_vector[2] = rocket.acceleration/Rocket.ACCELERATION\n input_vector[3] = rocket.velocity.magnitude() / Rocket.MAX_SPEED\n input_vector[4] = math.atan2(rocket.velocity.x, rocket.velocity.y) / (2*math.pi)\n input_vector[5] = rocket.shoot_countdown / Rocket.SHOOTER_DELAY\n input_vector[6] = (rocket.direction%(2*math.pi)) / math.pi\n\n index = 7\n for i, ast in enumerate(asteroids):\n input_vector[index] = ast.pos.x / WIDTH\n input_vector[index+1] = ast.pos.y / HEIGHT\n # change asteroid max speed values to fit such\n input_vector[index+2] = ast.speed / 3\n input_vector[index+3] = (ast.direction%(2*math.pi)) / (2*math.pi)\n input_vector[index+4] = (math.hypot(rocket.pos.x-ast.pos.x, rocket.pos.y-ast.pos.y)-ast.size) / math.hypot(WIDTH, HEIGHT)\n\n index += 5\n\n input_vector = list(np.array(input_vector)) + [1]\n\n return np.array(input_vector)", "title": "" }, { "docid": "74d1869550c2ceeb24977a200a7bdbae", "score": "0.48546192", "text": "def get_obstacles(self):\n return np.argwhere(self.obstacles == True) * self.config.grid_reso", "title": "" }, { "docid": "7dbf90912cdf49ba738c8a76af9924a7", "score": "0.48503548", "text": "def simulate_zone_occupancy_default(step_time):\n\t# Should return two lists\n\tctime = secs2datetime(step_time)\n\thour, _ = ctime.hour, ctime.weekday()\n\n\t# Set the default and extraneous rules\n\tif 6<hour<19:\n\t\tocc_list = [True,True,True,True,True]\n\t\ttNexOccAll = [0.0]\n\telse:\n\t\tocc_list = [False,False,False,False,False]\n\t\ttNexOccAll = [86400.0]\n\n\treturn occ_list, tNexOccAll", "title": "" }, { "docid": "f11c22bfa79c3ef6826dd69a27b640ab", "score": "0.4847432", "text": "def evaluateCSPadCenterGlobal(self) :\n offset = self.calibpars.getCalibPars ('offset')\n offset_corr = self.calibpars.getCalibPars ('offset_corr')\n marg_gap_shift = self.calibpars.getCalibPars ('marg_gap_shift')\n quad_rotation = self.calibpars.getCalibPars ('quad_rotation')\n quad_tilt = self.calibpars.getCalibPars ('quad_tilt')\n\n #quadMargX, margX, gapX, shiftX = marg_gap_shift[0,:]\n #quadMargY, margY, gapY, shiftY = marg_gap_shift[1,:]\n #quadMargZ, margZ, gapZ, shiftZ = marg_gap_shift[2,:]\n\n margX, margY, margZ = marg_gap_shift[:,1]\n gapX, gapY, gapZ = marg_gap_shift[:,2]\n shiftX, shiftY, shiftZ = marg_gap_shift[:,3]\n\n dx = np.array([margX-gapX+shiftX, margX-gapX-shiftX, margX+gapX-shiftX, margX+gapX+shiftX])\n dy = np.array([margY-gapY-shiftY, margY+gapY-shiftY, margY+gapY+shiftY, margY-gapY+shiftY])\n dz = np.array([0, 0, 0, 0])\n\n xmin_quad = offset[0] + offset_corr[0] + dx \n ymin_quad = offset[1] + offset_corr[1] + dy\n zmin_quad = offset[2] + offset_corr[2] + dz\n\n self.fill2x1CentersInQuads()\n\n xc_glob = np.zeros( (4,8), dtype=np.float32 )\n yc_glob = np.zeros( (4,8), dtype=np.float32 )\n zc_glob = np.zeros( (4,8), dtype=np.float32 )\n\n quad_rotation = np.array([180, 90, 0, 270]) # Rotation of quads in MATRIX coordinate system\n #quad_rotation = np.array([90, 0, 270, 180]) # Rotation of quads in OPTICAL coordinate system\n #print 'quad_rotation', quad_rotation\n #print 'offset\\n', offset\n\n for quad in range(4) :\n\n coords_in_quad = self.get2x1CentersInQuadForRotN90(quad, quad_rotation[quad])\n\n xc_glob[quad] = coords_in_quad[0] + xmin_quad[quad]\n yc_glob[quad] = coords_in_quad[1] + ymin_quad[quad]\n zc_glob[quad] = coords_in_quad[2] + zmin_quad[quad]\n\n xc_glob, yc_glob, zc_glob = self.get2x1CentersInDetForRot270(xc_glob, yc_glob, zc_glob) # Transformation from MATRIX to OPTICAL coordinate system\n\n self.center_global_evaluated = np.array([ xc_glob, yc_glob, zc_glob])\n\n #print 'center_global_evaluated =\\n', self.center_global_evaluated\n #return self.evalpars['center_global']\n return self.center_global_evaluated", "title": "" }, { "docid": "fad3ffdf112d29790a5b1a63f2415ef0", "score": "0.4843545", "text": "def test_costmap_no_obstacles(self):\n cml = JPTCostmapLocation(self.milk, reachable_for=self.robot, model=self.model)\n sample = next(iter(cml))\n\n with simulated_robot:\n action_designator.NavigateAction.Action(sample.pose).perform()\n action_designator.MoveTorsoAction.Action(sample.torso_height).perform()\n action_designator.PickUpAction.Action(\n object_designator.ObjectDesignatorDescription(types=[\"milk\"]).resolve(),\n arm=sample.reachable_arm, grasp=sample.grasp).perform()", "title": "" }, { "docid": "a907fd9052567324e4a3a8cb0b1b80c5", "score": "0.4842324", "text": "def look_and_move(self, center, vision) -> Tuple[int, int]:\n locs = find_visible_locs(vision)\n locs = (locs + center) % self.n\n locs = [tuple(loc) for loc in locs]\n empty_locs = [loc for loc in locs if loc not in self.occupied]\n \n if len(empty_locs) == 0:\n return center\n \n t = [self.grid[loc] for loc in empty_locs]\n \n closest_i = np.argmax(t)\n return empty_locs[closest_i]", "title": "" }, { "docid": "46f5c64097e096b7cf4255aaac3c3a2f", "score": "0.4840711", "text": "def required_horiz_joint_angles (self, x,y, rot_rel_base):\n \n max_reach_fwd = L1 + L2 \n max_reach_bck = L1 - L2 \n if (isclose(x,0, abs_tol=0.000002) and isclose(y,0.215, abs_tol=0.000002)):\n return (np.pi/2,0,0)\n #Checks if x,y is close to home position\n if( np.sqrt(x**2 + y**2) < max_reach_bck or np.sqrt(x**2 + y**2) > max_reach_fwd ):\n #Checks if x,y is withing robots workspace \n return (0,0,0)\n \n else:\n #Computes maths for IK\n beta = np.arccos((L1**2 + L2**2 - x**2 -y**2)/(2*L1*L2))\n alpha = np.arccos((x**2 + y**2 + L1**2 -L2**2)/(2*L1*np.sqrt(x**2 + y**2)))\n gamma = np.arctan2(y,x)\n\n theta1 = gamma - alpha\n theta2 = np.pi - beta\n \n theta2 = -theta2 #Angle is negated as dynamixel motor is upsidedown\n\n if theta1 > 0 and theta2 < 0:\n theta3 = rot_rel_base + (-theta2 - theta1)\n else:\n theta3 = rot_rel_base + (-theta2 - (-theta1))\n\n\n if theta1<-1:\n theta1+=2*np.pi\n if theta3 < -2:\n theta3+=np.pi\n \n theta3=-theta3 #Angle is negated as dynamixel motor is upside down\n\n return (theta1,theta2,theta3)", "title": "" }, { "docid": "90b6215dabb38cb90f0911a1ad4e7cae", "score": "0.48355192", "text": "def oracle(self, env):\n OracleAgent = collections.namedtuple('OracleAgent', ['act'])\n\n def act(obs, info):\n act = {'camera_config': self.camera_config, 'primitive': None}\n if not obs or self.done():\n return act\n\n # Oracle uses ground truth object segmentation masks.\n colormap, heightmap, object_mask = self.get_object_masks(env)\n\n if (isinstance(self, tasks.names['cable-ring']) or\n isinstance(self, tasks.names['cable-ring-notarget'])):\n # ------------------------------------------------------------ #\n # Instead of assigning fixed target zones, pick the closest\n # circle to target mapping, then correct the largest\n # discrepancy. Mostly the same as bag-alone-open; keeping these\n # separate for now just in case we change the envs. Only care\n # about positions here; orientation doesn't matter. Upon\n # inspection, I think we could fine-tune this a bit more by\n # avoiding any pull that makes the bead cross over itself. Not\n # sure how to precisely do that.\n # ------------------------------------------------------------ #\n vert_pos_l = []\n for bead_ID in self.cable_bead_IDs:\n bead_position = p.getBasePositionAndOrientation(bead_ID)[0]\n vert_pos_l.append(bead_position)\n vertpos_xyz_np = np.array(vert_pos_l)\n\n targets_l = []\n for bead_ID in self.goal['places']:\n target_position, _ = self.goal['places'][bead_ID]\n targets_l.append(target_position)\n targets_xyz_np = np.array(targets_l)\n\n assert vertpos_xyz_np.shape == targets_xyz_np.shape\n nb_maps = len(self.cable_bead_IDs)\n min_dist = np.float('inf')\n vertex_pos, target_pos = None, None\n\n # Different (but 'rotationally consistent') ordering of beads.\n for a in range(nb_maps * 2):\n if a < nb_maps:\n # mapping = [a, a+1, ..., nb_maps-1, 0, 1, ..., a-1]\n mapping = [i for i in range(a, nb_maps)] + [i for i in range(0, a)]\n else:\n # Same as above but reverse it (to handle flipped ring).\n a -= nb_maps\n mapping = [i for i in range(a, nb_maps)] + [i for i in range(0, a)]\n mapping = mapping[::-1]\n differences = targets_xyz_np - vertpos_xyz_np[mapping]\n distances = np.linalg.norm(differences, axis=1)\n average_distance = np.mean(distances)\n\n if average_distance < min_dist:\n # Index of the largest distance among vertex + target.\n max_idx = np.argmax(distances)\n vertex_pos = vertpos_xyz_np[mapping][max_idx]\n target_pos = targets_xyz_np[max_idx]\n min_dist = average_distance\n\n overshoot = 0.0\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif (isinstance(self, tasks.names['cloth-flat']) or\n isinstance(self, tasks.names['cloth-flat-notarget'])):\n # ------------------------------------------------------------ #\n # Hopefully the easiest of all the cloth environments. Since\n # the cloth is always in the same side up, we can assume a\n # clockwise ordering of cloth corners to zone corners. The\n # first action is key and it should grip the cloth corner\n # closest to the zone and pull it to the furthest zone corner.\n # See: https://github.com/DanielTakeshi/pybullet-def-envs/pull/6\n # ------------------------------------------------------------ #\n mappings = [[0, 1, 2, 3],\n [3, 0, 1, 2],\n [2, 3, 0, 1],\n [1, 2, 3, 0],]\n\n # Get cloth mesh data and info about cloth/zone corners.\n _, vert_pos_l = p.getMeshData(self.cloth_id, -1, flags=p.MESH_DATA_SIMULATION_MESH)\n corner_idx_np = np.array(self.corner_indices)\n targets_xy_np = self.corner_targets_xy\n vertpos_xy_np = np.array(vert_pos_l)[:, :2]\n min_dist = np.float('inf')\n min_std = np.float('inf')\n vertex_pos = None\n target_pos = None\n\n # Iterate through corner assignments.\n for mapping in mappings:\n corners = corner_idx_np[mapping]\n differences = targets_xy_np - vertpos_xy_np[corners]\n distances = np.linalg.norm(differences, axis=1)\n avg_dist = np.min(distances)\n avg_std = np.std(distances)\n\n if (self.t == 0) and (avg_std <= min_std):\n # Pick cloth corner closest to _zone_ center.\n min_std = avg_std\n zone_xy = np.array(self.zone_pose[0][:2]).reshape(1,2)\n differences = zone_xy - vertpos_xy_np[corners]\n distances = np.linalg.norm(differences, axis=1)\n idx = np.argmin(distances)\n vertex_pos = vertpos_xy_np[corners][idx]\n target_pos = targets_xy_np[idx]\n elif (self.t != 0) and (avg_dist <= min_dist):\n # Otherwise, consider largest discrepancy in match.\n min_dist = avg_dist\n idx = np.argmax(distances)\n vertex_pos = vertpos_xy_np[corners][idx]\n target_pos = targets_xy_np[idx]\n else:\n # If the above don't apply, DON'T update positions.\n pass\n self.t += 1\n\n # Currently overshooting slightly due to cloth physics.\n overshoot = 0.03\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif isinstance(self, tasks.names['cloth-cover']):\n # ------------------------------------------------------------ #\n # Hopefully a straightforward one: put item on the center of\n # the cloth, then adjust cloth so it covers it using a\n # triangular fold. TODO: should we have a guard against failure\n # cases? I'm using task stages here instead of t=0 vs t=1 just\n # in case the first pick and place missed the cube, or in case\n # we have multiple cubes.\n # ------------------------------------------------------------ #\n assert len(self.block_IDs) == 1 # For now\n\n # Get cloth mesh data and info about cloth/zone corners.\n _, vert_pos_l = p.getMeshData(self.cloth_id, -1, flags=p.MESH_DATA_SIMULATION_MESH)\n corner_idx_np = np.array(self.corner_indices)\n vertpos_xy_np = np.array(vert_pos_l)[:, :2]\n cloth_center_xy = np.mean(vertpos_xy_np[corner_idx_np], axis=0)\n\n # Compute task stage. TODO this is a really bad hack.\n if self.t > 0:\n self.task_stage = 2\n self.t += 1\n\n if self.task_stage == 1:\n # Put a cube on the center of the cloth.\n block_id = self.block_IDs[0]\n vertex_pos = p.getBasePositionAndOrientation(block_id)[0]\n target_pos = cloth_center_xy\n overshoot = 0.0\n\n elif self.task_stage == 2:\n # Fold the cloth. Must pick one of the four directions.\n direction = np.random.randint(4)\n if direction == 0:\n source = corner_idx_np[0]\n target = corner_idx_np[2]\n elif direction == 1:\n source = corner_idx_np[1]\n target = corner_idx_np[3]\n elif direction == 2:\n source = corner_idx_np[2]\n target = corner_idx_np[0]\n elif direction == 3:\n source = corner_idx_np[3]\n target = corner_idx_np[1]\n vertex_pos = vertpos_xy_np[source]\n target_pos = vertpos_xy_np[target]\n overshoot = 0.03\n\n # We adjusted overshooting earlier based on the task stage.\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif isinstance(self, tasks.names['bag-alone-open']):\n # ------------------------------------------------------------ #\n # We have a circular ring of targets on the 2D plane, and want\n # to maximize bag opening area. If we have 32 'top ring\n # indices' then there are 32 targets. But due to rotations,\n # they can rotate, so there's 32 different 'assignments'. We\n # want to pick the one that is closest w.r.t. the xy plane.\n # THEN find the single bag top ring vertex and corner target\n # that's furthest. Somewhat naive but it's a start.\n # ------------------------------------------------------------ #\n # The main failure case is likely with the bag hiding its own\n # top ring, so I'm going to carefully set the starting state.\n # ------------------------------------------------------------ #\n # Unlike cable-ring envs, we do NOT have to consider \"flipped\n # beads\" in the rotation, due to the way we sample the bag.\n # ------------------------------------------------------------ #\n\n # Check object_mask for all IDs that correspond to the cable ring.\n cable_IDs = np.array(self.cable_bead_IDs)\n bead_mask = np.isin(object_mask, test_elements=cable_IDs) # :D\n\n # Detect visible beads. Is there a faster way to do this?\n visible_beads = []\n for bead in self.cable_bead_IDs:\n if bead in object_mask:\n visible_beads.append(bead)\n frac_visible = len(visible_beads) / len(self.cable_bead_IDs)\n\n # If only a few beads are visible, exit early.\n if frac_visible <= BEAD_THRESH:\n self.exit_gracefully = True\n print(f'WARNING: fraction of visible beads: {frac_visible}')\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n\n # Now do bag-opening. TODO: only pick at VISIBLE beads?\n vert_pos_l = []\n for bead_ID in self.cable_bead_IDs:\n bead_position = p.getBasePositionAndOrientation(bead_ID)[0]\n vert_pos_l.append(bead_position)\n vertpos_xyz_np = np.array(vert_pos_l)\n\n targets_l = self.circle_target_positions\n targets_xyz_np = np.array( [[p[0],p[1],p[2]] for p in targets_l] )\n\n assert vertpos_xyz_np.shape == targets_xyz_np.shape\n assert len(self.top_ring_idxs) == len(self.cable_bead_IDs)\n nb_maps = len(self.top_ring_idxs)\n min_dist = np.float('inf')\n vertex_pos, target_pos = None, None\n\n for a in range(nb_maps):\n # mapping = [a, a+1, ..., nb_maps-1, 0, 1, ..., a-1]\n mapping = [i for i in range(a, nb_maps)] + [i for i in range(0, a)]\n differences = targets_xyz_np - vertpos_xyz_np[mapping]\n distances = np.linalg.norm(differences, axis=1)\n average_distance = np.mean(distances)\n\n if average_distance < min_dist:\n # Index of the largest distance among vertex + target.\n max_idx = np.argmax(distances)\n vertex_pos = vertpos_xyz_np[mapping][max_idx]\n target_pos = targets_xyz_np[max_idx]\n min_dist = average_distance\n\n # Make the robot 'overshoot' slightly towards the target position.\n overshoot = 0.02\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif (isinstance(self, tasks.names['bag-items-easy']) or\n isinstance(self, tasks.names['bag-items-hard'])):\n # ------------------------------------------------------------ #\n # Hard-coding several stages for the task. We allocate task\n # selection to the task to enable both oracle and learned\n # policy adjust action parameters based on the task stage.\n # ------------------------------------------------------------ #\n # (1) Do what we did for bag-alone-open and naively ignore z\n # coordinate, to get the area of the convex hull.\n # ------------------------------------------------------------ #\n # (2) For this stage, assume we were successful, because it\n # almost always works, and we have code that supports dropping\n # a rigid item onto a deformable. Refer to `self.item_IDs` for\n # items we have to insert. Use the segmentation mask to figure\n # out a good placing location.\n # ------------------------------------------------------------ #\n # (3) Move the bag with the item(s) by picking at a visible bead.\n # ------------------------------------------------------------ #\n\n # Detect visible beads. Is there a faster way to do this?\n visible_beads = []\n for bead in self.cable_bead_IDs:\n if bead in object_mask:\n visible_beads.append(bead)\n\n # Allocate the stage selection to the task-specific method.\n success, place_pixels_eroded = self.determine_task_stage(\n colormap=colormap,\n heightmap=heightmap,\n object_mask=object_mask,\n visible_beads=visible_beads,\n )\n\n # Exit gracefully if no success OR if we determined we should in reset().\n if (not success) or (self.exit_gracefully):\n self.exit_gracefully = True # adding here to handle `not success`\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n\n # ------------------------------ #\n # Perform task-dependent actions #\n # ------------------------------ #\n\n if self.task_stage == 1:\n # Copy bag-alone-open. TODO: only pick at VISIBLE beads?\n vert_pos_l = []\n for bead_ID in self.cable_bead_IDs:\n bead_position = p.getBasePositionAndOrientation(bead_ID)[0]\n vert_pos_l.append(bead_position)\n vertpos_xyz_np = np.array(vert_pos_l)\n\n targets_l = self.circle_target_positions\n targets_xyz_np = np.array( [[p[0],p[1],p[2]] for p in targets_l] )\n\n assert vertpos_xyz_np.shape == targets_xyz_np.shape\n assert len(self.top_ring_idxs) == len(self.cable_bead_IDs)\n nb_maps = len(self.top_ring_idxs)\n min_dist = np.float('inf')\n vertex_pos, target_pos = None, None\n\n for a in range(nb_maps):\n # mapping = [a, a+1, ..., nb_maps-1, 0, 1, ..., a-1]\n mapping = [i for i in range(a, nb_maps)] + [i for i in range(0, a)]\n differences = targets_xyz_np - vertpos_xyz_np[mapping]\n distances = np.linalg.norm(differences, axis=1)\n average_distance = np.mean(distances)\n\n if average_distance < min_dist:\n # Index of the largest distance among vertex + target.\n max_idx = np.argmax(distances)\n vertex_pos = vertpos_xyz_np[mapping][max_idx]\n target_pos = targets_xyz_np[max_idx]\n min_dist = average_distance\n\n # Make the robot 'overshoot' slightly towards the target position.\n overshoot = 0.02\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif self.task_stage == 2:\n # I ran into this failure. Does it only happen with 3.0.4 w/out disp?\n if np.sum(np.float32(place_pixels_eroded)) == 0:\n print('Warning, place_pixels_eroded has no pixels left.')\n self.save_images(colormap, heightmap, object_mask)\n self.exit_gracefully = True\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n\n # Identify an item that is not within the bag.\n item = None\n for ID in self.item_IDs:\n if ID not in self.items_in_bag_IDs:\n item = ID\n pick_mask = np.uint8(object_mask == item)\n pick_mask = cv2.erode(pick_mask, np.ones((3, 3), np.uint8))\n\n # This should not happen, so let's exit gracefully if it does.\n # Or does it only apply if using PyBullet 3.0.4?\n if np.sum(pick_mask) == 0:\n print('Something bad happened, pick mask sum is 0?')\n self.save_images(colormap, heightmap, object_mask, pick_mask)\n self.exit_gracefully = True\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n break\n assert item is not None\n\n # Key assumption: we assume the placing will be successful.\n self.items_in_bag_IDs.append(item)\n\n # Compute picking point. Sample anywhere on item's eroded area.\n pick_prob = np.float32(pick_mask)\n pick_pixel = utils.sample_distribution(pick_prob)\n pick_position = utils.pixel_to_position(\n pick_pixel, heightmap, self.bounds, self.pixel_size)\n p0 = pick_position\n\n # Placing position. Sample anywhere on the open eroded area.\n place_prob = np.float32(place_pixels_eroded)\n place_pixel = utils.sample_distribution(place_prob)\n place_position = utils.pixel_to_position(\n place_pixel, heightmap, self.bounds, self.pixel_size)\n p1 = place_position\n\n # Get the usual action parameter, without overshooting.\n act['params'] = self.params_no_rots(p0, p1, overshoot=0.0)\n\n if isinstance(self, tasks.names['bag-items-hard']):\n new_pose1 = act['params']['pose1']\n\n # But now sample the rotation, assuming we use 24 (TODO: make more robust?).\n num_rots = 24\n _rots = [i * 2 * np.pi / num_rots for i in range(num_rots)]\n _rot = np.random.choice(_rots)\n _rot_deg = _rot*180/np.pi\n print(f'Sampled rotation: {_rot_deg:0.4f}')\n\n # Assign placing pose. Picking still uses identity rotation.\n new_rot1 = p.getQuaternionFromEuler((0, 0, _rot))\n new_pose1 = (new_pose1[0], new_rot1)\n act['params']['pose1'] = new_pose1\n\n elif self.task_stage == 3:\n # Bag gripping + depositing. Currently considering any VISIBLE bead as\n # pick points at random. If we filter data, hopefully a pattern appears.\n p0 = None\n p1 = self.zone_pose[0]\n\n # This should never happen but just in case ...\n if len(visible_beads) == 0:\n print(f'WARNING: no visible beads in task stage 3??')\n visible_beads.append(self.cable_bead_IDs[0])\n\n bead_ID = np.random.choice(visible_beads)\n p0 = p.getBasePositionAndOrientation(bead_ID)[0]\n act['params'] = self.params_no_rots(p0, p1, overshoot=0.0)\n\n elif isinstance(self, tasks.names['bag-color-goal']):\n # ------------------------------------------------------------ #\n # Open a target color bag and move the item to the target.\n # NOTE: use cable_bead_target_bag_IDs, not cable_bead_IDs\n # ------------------------------------------------------------ #\n\n # Detect visible beads. Is there a faster way to do this?\n visible_beads = []\n for bead in self.cable_bead_target_bag_IDs:\n if bead in object_mask:\n visible_beads.append(bead)\n\n # Allocate the stage selection to the task-specific method.\n success, place_pixels_eroded = self.determine_task_stage(\n colormap=colormap,\n heightmap=heightmap,\n object_mask=object_mask,\n visible_beads=visible_beads,\n )\n\n # Exit gracefully if no success OR if we determined we should in reset().\n if (not success) or (self.exit_gracefully):\n self.exit_gracefully = True # adding here to handle `not success`\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n\n # ------------------------------ #\n # Perform task-dependent actions #\n # ------------------------------ #\n\n if self.task_stage == 1:\n # Copy bag-alone-open. TODO: only pick at VISIBLE beads?\n vert_pos_l = []\n for bead_ID in self.cable_bead_target_bag_IDs:\n bead_position = p.getBasePositionAndOrientation(bead_ID)[0]\n vert_pos_l.append(bead_position)\n vertpos_xyz_np = np.array(vert_pos_l)\n\n targets_l = self.circle_target_positions\n targets_xyz_np = np.array( [[p[0],p[1],p[2]] for p in targets_l] )\n\n assert vertpos_xyz_np.shape == targets_xyz_np.shape\n assert len(self.top_ring_idxs) == len(self.cable_bead_target_bag_IDs)\n nb_maps = len(self.top_ring_idxs)\n min_dist = np.float('inf')\n vertex_pos, target_pos = None, None\n\n for a in range(nb_maps):\n # mapping = [a, a+1, ..., nb_maps-1, 0, 1, ..., a-1]\n mapping = [i for i in range(a, nb_maps)] + [i for i in range(0, a)]\n differences = targets_xyz_np - vertpos_xyz_np[mapping]\n distances = np.linalg.norm(differences, axis=1)\n average_distance = np.mean(distances)\n\n if average_distance < min_dist:\n # Index of the largest distance among vertex + target.\n max_idx = np.argmax(distances)\n vertex_pos = vertpos_xyz_np[mapping][max_idx]\n target_pos = targets_xyz_np[max_idx]\n min_dist = average_distance\n\n # Make the robot 'overshoot' slightly towards the target position.\n overshoot = 0.02\n act['params'] = self.params_no_rots(vertex_pos, target_pos, overshoot)\n\n elif self.task_stage == 2:\n # I ran into this failure. Does it only happen with 3.0.4 w/out disp?\n if (place_pixels_eroded is None) or (np.sum(np.float32(place_pixels_eroded)) == 0):\n print('Warning, place_pixels_eroded has no pixels left or is None.')\n self.save_images(colormap, heightmap, object_mask)\n self.exit_gracefully = True\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n\n # ONLY do the target item.\n item = self.single_block_ID\n pick_mask = np.uint8(object_mask == item)\n pick_mask = cv2.erode(pick_mask, np.ones((3, 3), np.uint8))\n\n # This should not happen, so let's exit gracefully if it does.\n # Or does it only apply if using PyBullet 3.0.4?\n if np.sum(pick_mask) == 0:\n print('Something bad happened, pick mask sum is 0?')\n self.save_images(colormap, heightmap, object_mask, pick_mask)\n self.exit_gracefully = True\n act['params'] = IDENTITY\n act['primitive'] = self.primitive\n return act\n\n # Compute picking point. Sample anywhere on item's eroded area.\n pick_prob = np.float32(pick_mask)\n pick_pixel = utils.sample_distribution(pick_prob)\n pick_position = utils.pixel_to_position(\n pick_pixel, heightmap, self.bounds, self.pixel_size)\n p0 = pick_position\n\n # Placing position. Sample anywhere on the open eroded area.\n place_prob = np.float32(place_pixels_eroded)\n place_pixel = utils.sample_distribution(place_prob)\n place_position = utils.pixel_to_position(\n place_pixel, heightmap, self.bounds, self.pixel_size)\n p1 = place_position\n\n # Get the usual action parameter, without overshooting.\n act['params'] = self.params_no_rots(p0, p1, overshoot=0.0)\n new_pose1 = act['params']['pose1']\n\n # Assign placing pose. Picking still uses identity rotation.\n new_rot1 = p.getQuaternionFromEuler((0, 0, 0))\n new_pose1 = (new_pose1[0], new_rot1)\n act['params']['pose1'] = new_pose1\n\n elif self.primitive == 'pick_place':\n # Trigger reset if no ground truth steps are available.\n if len(self.goal['steps']) == 0:\n self.goal['steps'] = [] # trigger done then reset\n return act\n\n # Get possible picking locations (prioritize furthest).\n next_step = self.goal['steps'][0]\n possible_objects = np.int32(list(next_step.keys())).copy()\n distances = []\n for object_id in possible_objects:\n position = p.getBasePositionAndOrientation(object_id)[0]\n targets = next_step[object_id][1]\n targets = [t for t in targets if t in self.goal['places']]\n places = [self.goal['places'][t][0] for t in targets]\n d = np.float32(places) - np.float32(position).reshape(1, 3)\n distances.append(np.min(np.linalg.norm(d, axis=1)))\n\n distances_sort = np.argsort(distances)[::-1]\n possible_objects = possible_objects[distances_sort]\n for object_id in possible_objects:\n pick_mask = np.uint8(object_mask == object_id)\n pick_mask = cv2.erode(pick_mask, np.ones((3, 3), np.uint8))\n if np.sum(pick_mask) > 0:\n break\n\n # Trigger task reset if no object is visible.\n if np.sum(pick_mask) == 0:\n self.goal['steps'] = [] # trigger done then reset\n return act\n\n # Compute picking pose.\n pick_prob = np.float32(pick_mask)\n pick_pixel = utils.sample_distribution(pick_prob)\n pick_position = utils.pixel_to_position(\n pick_pixel, heightmap, self.bounds, self.pixel_size)\n pick_rotation = p.getQuaternionFromEuler((0, 0, 0))\n pick_pose = (pick_position, pick_rotation)\n\n # Get candidate target placing poses.\n targets = next_step[object_id][1]\n targets = [pi for pi in targets if pi in self.goal['places']]\n i = np.random.randint(0, len(targets))\n true_pose = self.goal['places'][targets[i]]\n\n # Compute placing pose.\n object_pose = p.getBasePositionAndOrientation(object_id)\n world_to_pick = self.invert(pick_pose)\n object_to_pick = self.multiply(world_to_pick, object_pose)\n pick_to_object = self.invert(object_to_pick)\n place_pose = self.multiply(true_pose, pick_to_object)\n\n # For various cable tasks, we don't want to apply rotations.\n if (isinstance(self, tasks.names['cable']) or\n isinstance(self, tasks.names['cable-shape']) or\n isinstance(self, tasks.names['cable-shape-notarget']) or\n isinstance(self, tasks.names['cable-line-notarget'])):\n place_pose = (place_pose[0], (0, 0, 0, 1))\n\n params = {'pose0': pick_pose, 'pose1': place_pose}\n act['params'] = params\n\n elif isinstance(self, tasks.names['sweeping']):\n p0 = None\n p1 = self.zone_pose[0]\n\n # Set farthest object position as start position.\n for object_id in self.object_points:\n object_pose = p.getBasePositionAndOrientation(object_id)\n position = self.object_points[object_id].squeeze()\n position = self.apply(object_pose, position)\n d = np.linalg.norm(np.float32(position) - np.float32(p1))\n if (p0 is None) or (d > threshold):\n p0 = position\n threshold = d\n\n # Adjust start and end positions.\n p0 = (p0[0], p0[1], 0.001)\n p1 = (p1[0], p1[1], 0.001)\n rotation = p.getQuaternionFromEuler((0, 0, 0))\n direction = np.float32(p0) - np.float32(p1)\n length = np.linalg.norm(direction)\n direction = direction / length\n new_p0 = np.float32(p1) + direction * (length + 0.02)\n new_p1 = np.float32(p0) - direction * (length - 0.05)\n p0, p1 = tuple(new_p0), tuple(new_p1)\n params = {'pose0': (p0, rotation), 'pose1': (p1, rotation)}\n act['params'] = params\n\n elif isinstance(self, tasks.names['pushing']):\n # Get start position.\n p0 = np.float32(p.getLinkState(env.ur5, env.ee_tip_link)[0])\n rotation = p.getQuaternionFromEuler((0, 0, 0))\n\n # Compute end position.\n goal_position = np.array([0.5, -0.5, 0])\n object_id = env.objects[0]\n object_pose = p.getBasePositionAndOrientation(object_id)\n world_to_object = self.invert(object_pose)\n goal_position = self.apply(world_to_object, goal_position)\n p1_object = np.float32(goal_position)\n p1_object[0] = -p1_object[0] * 2\n p1 = self.apply(object_pose, p1_object)\n push_direction = (p1 - p0) / np.linalg.norm((p1 - p0))\n p1 = p0 + push_direction * 0.01\n params = {'pose0': (p0, rotation), 'pose1': (p1, rotation)}\n act['params'] = params\n else:\n raise ValueError('Tried to define action, but task {} is not '\n 'supported:\\n{}'.format(self, tasks.names))\n\n act['primitive'] = self.primitive\n return act\n\n return OracleAgent(act)", "title": "" }, { "docid": "647b1a9933bf9cb84943fcaba729e701", "score": "0.48313004", "text": "def set_home(self):\n # if simulating exit function\n if self.simulate == True:\n return (0,0)\n else:\n m0_pos, m1_pos = self.get_joint_pos()\n print('set_home',m0_pos, m1_pos)\n return(m0_pos,m1_pos) # the home is (0,0) but we will put the two legs as (0,pi) deg at the beginning", "title": "" }, { "docid": "7dde6a6b8b1bfb0453292d9c484cdc63", "score": "0.4828109", "text": "def test_get_ObsDetectionMaxIntTime(self):\n life = 1.0 * u.year\n tk = self.fixture(missionLife=life.to(u.year).value, missionPortion=1.0)\n sim = self.allmods[0](scriptfile=self.script1)\n allModes = sim.OpticalSystem.observingModes\n Obs = sim.Observatory\n mode = list(filter(lambda mode: mode[\"detectionMode\"], allModes))[0]\n\n # 1) Does returned times enable allocation to succeed\n tk.currentTimeNorm = 0 * u.d\n tk.currentTimeAbs = tk.missionStart\n tk.exoplanetObsTime = 0 * u.d\n tk.OBendTimes = [1.0] * u.year\n tk.OBstartTimes = [0] * u.d\n MITOBT, MITEOT, MITML = tk.get_ObsDetectionMaxIntTime(Obs, mode)\n intTime = min([MITOBT, MITEOT, MITML])\n extraTime = intTime * (mode[\"timeMultiplier\"] - 1)\n self.assertTrue(\n tk.allocate_time(\n intTime + extraTime + Obs.settlingTime + mode[\"syst\"][\"ohTime\"], True\n )\n )\n\n # 2) Returned time ends mission at exoplanetObsTime = missionLife*missionPortion\n tk.missionLife = 1 * u.year\n tk.missionPortion = 0.1\n tk.currentTimeNorm = 0 * u.d\n tk.currentTimeAbs = tk.missionStart\n tk.exoplanetObsTime = 0 * u.d\n tk.OBendTimes = [1.0] * u.year\n tk.OBstartTimes = [0] * u.d\n MITOBT, MITEOT, MITML = tk.get_ObsDetectionMaxIntTime(Obs, mode)\n intTime = min([MITOBT, MITEOT, MITML])\n extraTime = intTime * (mode[\"timeMultiplier\"] - 1)\n self.assertTrue(\n tk.allocate_time(\n intTime + extraTime + Obs.settlingTime + mode[\"syst\"][\"ohTime\"], True\n )\n ) # was allocation successful\n self.assertTrue(\n tk.exoplanetObsTime == tk.missionLife.to(\"day\") * tk.missionPortion\n )\n\n # 3) Returned time ends mission at missionLife\n tk.missionLife = 1 * u.year\n tk.missionPortion = 1\n tk.currentTimeNorm = 0 * u.d\n tk.currentTimeAbs = tk.missionStart\n tk.exoplanetObsTime = 0 * u.d\n tk.OBendTimes = [1.1] * u.year\n tk.OBstartTimes = [0] * u.d\n MITOBT, MITEOT, MITML = tk.get_ObsDetectionMaxIntTime(Obs, mode)\n intTime = min([MITOBT, MITML])\n extraTime = intTime * (mode[\"timeMultiplier\"] - 1)\n self.assertTrue(\n tk.allocate_time(\n intTime + extraTime + Obs.settlingTime + mode[\"syst\"][\"ohTime\"], False\n )\n ) # was allocation successful\n self.assertTrue(tk.missionLife.to(\"day\") == tk.currentTimeNorm)\n self.assertTrue(tk.missionLife.to(\"day\") == tk.currentTimeAbs - tk.missionStart)\n\n # 4) Returned time ends mission at OBendTime\n tk.missionLife = 1 * u.year\n tk.missionPortion = 1\n tk.currentTimeNorm = 0 * u.d\n tk.currentTimeAbs = tk.missionStart\n tk.exoplanetObsTime = 0 * u.d\n tk.OBendTimes = [0.5] * u.year\n tk.OBstartTimes = [0] * u.d\n MITOBT, MITEOT, MITML = tk.get_ObsDetectionMaxIntTime(Obs, mode)\n intTime = min([MITOBT, MITEOT, MITML])\n extraTime = intTime * (mode[\"timeMultiplier\"] - 1)\n self.assertTrue(\n tk.allocate_time(\n intTime + extraTime + Obs.settlingTime + mode[\"syst\"][\"ohTime\"], True\n )\n ) # was allocation successful\n self.assertTrue(tk.OBendTimes[tk.OBnumber] == tk.currentTimeNorm)\n self.assertTrue(\n tk.OBendTimes[tk.OBnumber] == tk.currentTimeAbs - tk.missionStart\n )", "title": "" }, { "docid": "5b65151065f12141ac22249aedc16673", "score": "0.48206154", "text": "def get_space_upfront(self, x, y):\n space = -self.model.space_between_vehicles\n for i in range(1, self.model.road_length - x):\n if self.model.grid.is_cell_empty((x + i, y)):\n space += 1\n if space >= self.max_vehicle_speed:\n break\n else:\n break\n if space > 0:\n return space\n return 0", "title": "" }, { "docid": "8b354ee8c14be5f814f568d4f719309b", "score": "0.48175433", "text": "def movement(self):\n conteract = False\n anie_x = self.anie.rect.x\n anie_y = self.anie.rect.y\n anie_ds = self.anie.down_speed\n anie_height = self.anie.rect.height\n #print \"%f y, %f target, %s\" % (anie_y, self.jump_target,self.anie.jumping)\n if self.anie.rect.move(0, anie_ds).collidelist(self.current_level) != -1:\n wall = self.current_level[self.anie.rect.move(0, anie_ds) \\\n .collidelist(self.current_level)]\n self.anie.rect.move_ip(0, math.fabs((self.anie.rect.y + self.anie.rect.height) - wall.y))\n self.anie.down_speed = 0\n self.anie.jump_frames = 0\n else:\n self.anie.rect.move_ip(0, self.anie.down_speed)\n self.anie.down_speed = min(TERMINAL_VELOCITY, (self.anie.down_speed + GRAVITY))\n\n if self.anie.jump_frames <= 0:\n self.anie.jumping = False\n\n if self.m_left and self.m_right: # cannot move left and right at the same time.\n pass\n\n elif self.m_left:\n if self.anie.rect.move(-self.anie.speed, 0).collidelist(self.current_level) != -1:\n pass\n # wall = self.current_level[self.anie.rect.move(-self.anie.speed,0)\\\n # .collidelist(self.current_level)]\n # self.anie.rect.move_ip(-math.fabs(anie_x-self.anie.speed+wall.x),0)\n # print wall.x,(anie_x-self.anie.speed+wall.x)\n else:\n self.anie.rect.move_ip(-self.anie.speed, 0)\n self.pan_camera_l()\n\n elif self.m_right:\n if self.anie.rect.move(self.anie.speed, 0).collidelist(self.current_level) != -1:\n pass\n # wall = self.current_level[self.anie.rect.move(self.anie.speed,0)\\\n # .collidelist(self.current_level)]\n # self.anie.rect.move_ip(math.fabs(self.anie.speed - \\\n # (anie_x + self.anie.speed)-wall.x),0)\n # print wall.x,anie_x\n\n else:\n self.anie.rect.move_ip(self.anie.speed, 0)\n self.pan_camera_r()\n\n if self.anie.jumping:\n self.anie.jump_frames -= 1\n self.anie.up_speed = min((math.fabs(self.anie.up_speed) + math.fabs(self.anie.jump_accel) \\\n , TERMINAL_VELOCITY))\n if self.anie.rect.move(0, -self.anie.up_speed).collidelist(self.current_level) != -1:\n wall = self.current_level[self.anie.rect.move(0, -self.anie.up_speed) \\\n .collidelist(self.current_level)]\n self.anie.rect.move_ip(0, -math.fabs(self.anie.rect.y - (wall.y + wall.height)))\n self.anie.jumping = False\n self.anie.down_speed = GRAVITY\n else:\n self.anie.rect.move_ip(0, -self.anie.up_speed)", "title": "" }, { "docid": "5d3f4af0e2bf3984dbee732aa9d95931", "score": "0.4816177", "text": "def acaratio():", "title": "" }, { "docid": "5c23940d7f1f5f0fbe95ae1dafccfefc", "score": "0.481269", "text": "def findLight():\n # Set the current heaiding of the robot\n robot.setHeading()\n # ------ Variables to store the maximum light intensity and its angle\n max_ambience_intensity = -1\n max_ambience_turn_count = 0\n\n # Turn around and gather the ambient light intensity\n turn_count = 0\n robot.readAmbient() # This call is used to set the mode of color sensor\n heading = robot.readHeading()\n print(\"HEADING BEFORE: \", heading)\n while heading <= 330:\n # Check to see whether there is a new max ambience intensity found\n current_ambience_intensity = robot.readAmbient()\n print(current_ambience_intensity)\n if current_ambience_intensity > max_ambience_intensity:\n max_ambience_intensity = current_ambience_intensity\n max_ambience_turn_count = turn_count\n\n # Keep turning\n robot.turnRight(SPEED_TURN, 0.2)\n turn_count += 1\n heading = robot.readHeading()\n\n # After turn 1 circle, stop the robot, set the heading to current heading\n robot.stop()\n robot.setHeading()\n print(\"===> Set heading: \", robot.readHeading())\n\n # Turn the robot back to the location where it found the max ambience\n # Because the robot stops at 330 degree, we turn 3 more times to make it back to origin\n # Each of our turn is approximately 10 degrees\n for i in range(max_ambience_turn_count + 3):\n robot.turnRight(SPEED_TURN, 0.2)\n\n robot.stop()\n # 6 is the thresh hold of the exit lighting\n if max_ambience_intensity >= 6:\n robot.forward(SPEED_MOVEMENT, 3)\n return True\n else:\n robot.forward(SPEED_MOVEMENT, 0.5)\n return False", "title": "" }, { "docid": "53bd46a8099e21c3600566f15454c5e3", "score": "0.4812509", "text": "def health_system_colapse_identifier(Hi, Hj, Ui, Uj, dp, mp):\n H = Hi + Hj\n U = Ui + Uj\n\n capacidade_leitos = dp.bed_ward\n capacidade_UTIs = dp.bed_icu\n\n lotacao = mp.lotation\n\n t_max = mp.t_max\n\n # IDENTIFICADOR DE DIAS DE COLAPSOS\n # Dia em que colapsa o sistema de saude: 30, 50, 80, 100% capacidade\n dia_colapso_leitos_30 = np.min(np.where(H > capacidade_leitos*lotacao[0]))\n dia_colapso_leitos_50 = np.min(np.where(H > capacidade_leitos*lotacao[1]))\n dia_colapso_leitos_80 = np.min(np.where(H > capacidade_leitos*lotacao[2]))\n dia_colapso_leitos_100 = np.min(np.where(H > capacidade_leitos*lotacao[3]))\n dia_colapso_leitos = (dia_colapso_leitos_30, dia_colapso_leitos_50,\n dia_colapso_leitos_80, dia_colapso_leitos_100)\n print(dia_colapso_leitos)\n\n dia_colapso_UTIs_30 = np.min(np.where(U > capacidade_UTIs*lotacao[0]))\n dia_colapso_UTIs_50 = np.min(np.where(U > capacidade_UTIs*lotacao[1]))\n dia_colapso_UTIs_80 = np.min(np.where(U > capacidade_UTIs*lotacao[2]))\n dia_colapso_UTIs_100 = np.min(np.where(U > capacidade_UTIs*lotacao[3]))\n dia_colapso_UTIs = (dia_colapso_UTIs_30, dia_colapso_UTIs_50,\n dia_colapso_UTIs_80,dia_colapso_UTIs_100)\n print(dia_colapso_UTIs)\n\n # TimeSeries\n datelist = [d.strftime('%d/%m/%Y')\n for d in pd.date_range(datetime.today(), periods = t_max)]\n\n print('Dia em que colapsa o sistema de saude (leitos comuns): 30, 50, 80, 100% capacidade')\n\n print(datelist[dia_colapso_leitos[0]])\n print(datelist[dia_colapso_leitos[1]])\n print(datelist[dia_colapso_leitos[2]])\n print(datelist[dia_colapso_leitos[3]])\n\n print('Dia em que colapsa o sistema de saude (UTI): 30, 50, 80, 100% capacidade')\n\n print(datelist[dia_colapso_UTIs[0]])\n print(datelist[dia_colapso_UTIs[1]])\n print(datelist[dia_colapso_UTIs[2]])\n print(datelist[dia_colapso_UTIs[3]])", "title": "" }, { "docid": "b09a4ff9e6411f8b4095936eda11e1ff", "score": "0.4812283", "text": "def initial_scan():\r\n state = 0\r\n angleSearch = 60*math.pi/180\r\n ang = 100\r\n\r\n motion.moveInit()\r\n camIndex = 0 # Starts with upper camera\r\n\r\n state = 0\r\n while ang == 100:\r\n [CC, AA] = scan_area(angleSearch, camIndex)\r\n print CC\r\n ang, found = rotate_center_head(CC, AA)\r\n if found == 0 and camIndex == 1:\r\n state = state + 1\r\n camIndex = 0\r\n motion.moveTo(0, 0, 2*math.pi/3)\r\n elif found == 0 and camIndex == 0:\r\n camIndex = 1\r\n if state == 3:\r\n tts.say('I need to move to find the ball')\r\n motion.moveTo(0.3, 0, 0)\r\n state = 0\r\n\r\n else:\r\n motion.moveTo(0, 0, ang*7/6)\r\n pic(path + \"ball_likely.png\",0)\r\n [CC1, AA1] = take_pics(math.pi/9, camIndex)\r\n print \"Centers of ball_likely:\" + str(CC1)\r\n print\r\n [ang, X, delta] = locate_ball(CC1, AA1)\r\n if ang == 100:\r\n camIndex = 2\r\n print 'Delta', delta\r\n print 'Ang', ang\r\n motion.moveTo(0, 0, ang*7/6)\r\n img=cv2.imread(path + \"ball_likely.png\")\r\n CM=CenterOfMassUp(img, 10)\r\n \r\n return CM, delta, camIndex", "title": "" }, { "docid": "3bbc44f047456fc6d846b813532e09bc", "score": "0.48104572", "text": "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "title": "" }, { "docid": "3bbc44f047456fc6d846b813532e09bc", "score": "0.48104572", "text": "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "title": "" }, { "docid": "3bbc44f047456fc6d846b813532e09bc", "score": "0.48104572", "text": "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "title": "" }, { "docid": "3bbc44f047456fc6d846b813532e09bc", "score": "0.48104572", "text": "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "title": "" }, { "docid": "3bbc44f047456fc6d846b813532e09bc", "score": "0.48104572", "text": "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "title": "" }, { "docid": "3bbc44f047456fc6d846b813532e09bc", "score": "0.48104572", "text": "def zone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"zone\")", "title": "" }, { "docid": "899701540d7f6f6c1e20a20845e6c483", "score": "0.48088658", "text": "def __init__(self, porch, location, stationary=True):\n self.porch = porch\n self.stationary = stationary\n self.sidling = False # something only stationary bees do\n self.img = img.new_bee_img()\n self.loc = location\n self.get_goal() # set self.head", "title": "" }, { "docid": "0c8909eaa6bda5163e7665291548cdfe", "score": "0.48045352", "text": "def Wing_Geo_Additional(Aircraft):\n layout = Aircraft.ParLayoutConfig\n afnp = Aircraft.ParAnFP\n \n #get parameters\n y_mac = afnp.y_MAC\n Sweep_LE = afnp.Sweep_LE\n #calculate new things\n x_LE_root = layout.x_lemac - y_mac*np.tan(Sweep_LE)\n \n return(x_LE_root)", "title": "" }, { "docid": "4d6be285f4554e8e760150602d076189", "score": "0.48035252", "text": "def RA1(agent):\n pop = get_pop(agent)\n env = p.get_env(pop)\n env_size = e.size(env)\n l, l_pos = potential_places(pop, agent, env, env_size, get_vision(agent), l = [], l_pos = []) \n pos_cell = get_pos(agent)\n if len(l_pos) > 0:\n pos_cell = max_ressource_cell_position(l, l_pos)\n cell = e.get_cell(env, pos_cell)\n eat_and_consumpt_sugar(agent, cell)\n deplace_or_remove(env, pop, agent, cell, pos_cell)", "title": "" }, { "docid": "6b0f1557570be43106d2c4af38032aa2", "score": "0.47998402", "text": "def _sample_frame_get_pose(robot_name, frame):\n # Set the time\n # TODO: Implement this in parent function\n pm.currentTime(frame)\n\n # tool_name = get_tool_name(robot_name)\n tool_name = mimic_utils.get_tool_ctrl_path(robot_name)\n try: # Try to grab the named tool\n tool_object = pm.ls(tool_name)[0] # Try to get tool, may raise an exception\n except IndexError: # No tool attached, use flange\n tool_name = mimic_utils.get_tcp_hdl_path(robot_name)\n\n # Local Base Frame controller (circle control at base of the robot).\n base_name = pm.ls(mimic_utils.get_local_ctrl_path(robot_name))[0]\n\n # Get name of the tcp and base\n world_matrix = '.worldMatrix'\n tcp_name_world_matrix = tool_name + world_matrix\n base_name_world_matrix = base_name + world_matrix\n\n # TRANSLATIONS\n\n # Get translation with respect to Maya's world frame\n tcp_translation = pm.xform(tool_name, query=True, rp=True, ws=True)\n base_translation = pm.xform(base_name, query=True, rp=True, ws=True)\n\n # ROTATIONS\n\n # Get TCP rotation with respect to Maya's world frame\n _tcp_matrix = pm.getAttr(tcp_name_world_matrix, time=frame)\n tcp_rotation = general_utils.matrix_get_rotations(_tcp_matrix)\n\n # Get Base rotation with respect to Maya's world frame\n _base_matrix = pm.getAttr(base_name_world_matrix, time=frame)\n base_rotation = general_utils.matrix_get_rotations(_base_matrix)\n\n # TRANSFORMATIONS\n\n # Compose 4x4 matrices using the rotation and translation from the above\n tcp_matrix_4x4 = general_utils.matrix_compose_4x4(tcp_rotation, tcp_translation)\n base_matrix_4x4 = general_utils.matrix_compose_4x4(base_rotation, base_translation)\n\n # Invert the base matrix\n base_matrix_4x4 = general_utils.matrix_get_inverse(base_matrix_4x4)\n\n # Get pose itself\n initial_pose_matrix = general_utils.matrix_multiply(base_matrix_4x4, tcp_matrix_4x4)\n\n # CONVERSIONS\n\n # Decompose the initial pose matrix\n initial_translation = general_utils.matrix_get_translation(initial_pose_matrix)\n initial_rotations = general_utils.matrix_get_rotations(initial_pose_matrix)\n\n # Rearrange from Maya CS (mcs) to Robot CS (rcs)\n indices = [2, 0, 1]\n new_translation = [initial_translation[i] * 10 for i in indices] # cm to mm\n new_rotation = [[initial_rotations[i][j] for j in indices] for i in indices]\n\n # Define rotation matrix and convert the rotations based robot type\n # Based on the orientation of the coordinate frame of the mounting flange\n # TODO: Integrate this with rigs, unclear and shouldn't be hardcoded\n robot_type = mimic_utils.get_robot_type(robot_name)\n if robot_type == 'ABB':\n conversion_rotation = [\n [0, 0, -1],\n [0, 1, 0],\n [1, 0, 0]\n ]\n # elif robot_type == 'KUKA':\n # conversion_rotation = [\n # [0, -1, 0],\n # [0, 0, 1],\n # [-1, 0, 0]\n # ]\n else:\n raise Exception('Robot type not supported for Pose movement')\n\n # Perform the conversion operation itself\n converted_rotation = general_utils.matrix_multiply(conversion_rotation, new_rotation)\n\n # Compose pose\n pose_matrix = general_utils.matrix_compose_4x4(converted_rotation, new_translation)\n\n # Decompose pose as expected for output\n pose = general_utils.matrix_decompose_4x4_completely(pose_matrix)\n return pose", "title": "" } ]
d92a22a0968fefe94ea8f346ec69e7e8
Returns artifacts with spans in [start_span, end_span] inclusive. This resolver function is based on the spanversion semantics, which only considers the latest version of each span. If you want to keep all versions, then set keep_all_versions=True. Input artifacts must have both "span" int property and "version" int property. Please note that the spans in exclude_span_numbers are excluded AFTER getting the artifacts with spans in the range. If there are less than min_spans unique spans present in the resolved artifacts, then the component execution will be skipped. Corresponds to StaticRange in TFX.
[ { "docid": "375e6a0514b8a2cbb01e2ba741ae8d7f", "score": "0.79851294", "text": "def static_range(artifacts,\n *,\n start_span_number: int = -1,\n end_span_number: int = -1,\n keep_all_versions: bool = False,\n exclude_span_numbers: Sequence[int] = (),\n min_spans: Optional[int] = None):\n resolved_artifacts = ops.StaticSpanRange(\n artifacts,\n start_span=start_span_number,\n end_span=end_span_number,\n keep_all_versions=keep_all_versions)\n if exclude_span_numbers:\n resolved_artifacts = ops.ExcludeSpans(\n resolved_artifacts, denylist=exclude_span_numbers)\n\n if min_spans is None:\n # We check that start_span_number and end_span_number are positive to ensure\n # min_spans is well defined. Else, it is set to -1, meaning all the unique\n # spans will be considered.\n if start_span_number >= 0 and end_span_number >= 0:\n min_spans = end_span_number - start_span_number + 1\n logging.warning(\n 'min_spans for static_range(...) was not set and is being set to '\n 'end_span_number - start_span_number + 1 = %s - %s + 1 = %s.',\n end_span_number,\n start_span_number,\n min_spans,\n )\n else:\n min_spans = -1\n logging.warning(\n 'min_spans for static_range(...) was not set and is being set to -1, '\n 'meaning static_range(...) will never throw a SkipSignal.'\n )\n\n return ops.SkipIfLessThanNSpans(resolved_artifacts, n=min_spans)", "title": "" } ]
[ { "docid": "db98839d51840100dbe276ad0430e33e", "score": "0.6690784", "text": "def rolling_range(artifacts,\n *,\n start_span_number: int = 0,\n num_spans: int = 1,\n skip_num_recent_spans: int = 0,\n keep_all_versions: bool = False,\n exclude_span_numbers: Sequence[int] = (),\n min_spans: Optional[int] = None):\n resolved_artifacts = ops.LatestSpan(\n artifacts,\n min_span=start_span_number,\n n=num_spans,\n skip_last_n=skip_num_recent_spans,\n keep_all_versions=keep_all_versions)\n if exclude_span_numbers:\n resolved_artifacts = ops.ExcludeSpans(\n resolved_artifacts, denylist=exclude_span_numbers)\n\n if min_spans is None:\n logging.warning(\n 'min_spans for rolling_range(...) was not set, so it is defaulting to '\n 'num_spans = %s. If skip_num_recent_spans is set, this may delay '\n 'the component triggering on the first run until sufficient Examples '\n 'artifacts are available.',\n num_spans,\n )\n min_spans = num_spans\n\n return ops.SkipIfLessThanNSpans(resolved_artifacts, n=min_spans)", "title": "" }, { "docid": "67b0b368ac82f576dd683a4f8f405114", "score": "0.64668304", "text": "def sequential_rolling_range(artifacts,\n *,\n start_span_number: Optional[int] = None,\n num_spans: int = 1,\n skip_num_recent_spans: int = 0,\n keep_all_versions: bool = False,\n exclude_span_numbers: Sequence[int] = ()):\n resolved_artifacts = ops.ConsecutiveSpans(\n artifacts,\n first_span=start_span_number if start_span_number is not None else -1,\n skip_last_n=skip_num_recent_spans,\n keep_all_versions=keep_all_versions,\n denylist=exclude_span_numbers)\n\n return ops.SlidingWindow(resolved_artifacts, window_size=num_spans)", "title": "" }, { "docid": "20baba72e9686fdabb310b868461bb82", "score": "0.5914835", "text": "def _build_span_index(self, spans: List[Span]) -> IntervalTree:\n index = IntervalTree()\n for span in spans:\n # constraint - all spans disjoint\n existing = index[span.start:span.end]\n if existing:\n raise ValueError(f'Existing {existing} when attempting index {span}')\n # add to index\n index[span.start:span.end] = span\n return index", "title": "" }, { "docid": "491bf9cf76152dfb0d6eb72815a52624", "score": "0.58177626", "text": "def all_spans(artifacts):\n return ops.AllSpans(artifacts)", "title": "" }, { "docid": "d9254802978fa97ecb0f75964a4a56f8", "score": "0.55618674", "text": "def get_spans(span, samples, weights=None):\n ndim = len(samples)\n if span is None:\n span = [0.999999426697 for i in range(ndim)]\n span = list(span)\n if len(span) != len(samples):\n raise ValueError(\"Dimension mismatch between samples and span.\")\n for i, _ in enumerate(span):\n try:\n xmin, xmax = span[i]\n except(TypeError):\n q = [0.5 - 0.5 * span[i], 0.5 + 0.5 * span[i]]\n span[i] = _quantile(samples[i], q, weights=weights)\n return span", "title": "" }, { "docid": "0736dcf618b5c6a25a36282b2c7c70d8", "score": "0.53980535", "text": "def get_span(self, start, end):\n if start.year == end.year and start.month == end.month:\n return [log for log in self.get_month(start.year, start.month)\n if log['start'].date() >= start\n and log['start'].date() <= end]\n\n logs = [log for log in self.get_month(start.year, start.month)\n if log['start'].date() >= start]\n\n year, month = int(start.year), int(start.month)\n while True:\n month += 1\n if month > 12:\n month = 1\n year += 1\n if year == end.year and month == end.month:\n break\n logs.extend(self.get_month(year, month))\n\n logs.extend([log for log in self.get_month(end.year, end.month)\n if log['start'].date() <= end])\n\n return logs", "title": "" }, { "docid": "d1e8d68e47b74e67da558f493a7f74a5", "score": "0.53014404", "text": "def _get_extent_from_spans(rupture, spans=[]):\n (clon, clat) = _rupture_center(rupture)\n mag = rupture.getOrigin().mag\n xmin = None\n xmax = None\n ymin = None\n ymax = None\n for spankey, span in spans.items():\n if mag > span[0] and mag <= span[1]:\n ymin = clat - span[2]/2\n ymax = clat + span[2]/2\n xmin = clon - span[3]/2\n xmax = clon + span[3]/2\n break\n if xmin is not None:\n return (xmin, xmax, ymin, ymax)\n return None", "title": "" }, { "docid": "6bb369f23a2b42f3e0806cfedbd9a127", "score": "0.52477807", "text": "def alive_in_range(asset, start, end, include_asset_start_date=False):\n if include_asset_start_date:\n asset_start = asset.start_date\n else:\n asset_start = asset.start_date + pd.Timedelta('1 day')\n return intervals_overlap((asset_start, asset.end_date), (start, end))", "title": "" }, { "docid": "57a088b130b269e7dbdea89e93347662", "score": "0.5196654", "text": "def split_interval_at_values(start: T, end: T, offsets: Sequence[T]\n ) -> list[tuple[T, T]]:\n assert end > start\n assert offsets\n\n if offsets[0] > end or offsets[-1] < start:\n # no intersection, return the original time range\n return [(start, end)]\n\n out = []\n for offset in offsets:\n if offset >= end:\n break\n if start < offset:\n out.append((start, offset))\n start = offset\n if start != end:\n out.append((start, end))\n\n assert len(out) >= 1\n return out", "title": "" }, { "docid": "068df3583efd44da4db4ba9048c6d30c", "score": "0.49319845", "text": "def combined_adjacent_spans(self, max_dist=1):\n prev_span = None\n span_groups = []\n span_group = None\n for span in self:\n if not prev_span:\n span_group = [span]\n elif prev_span.end + max_dist >= span.start:\n span_group.append(span)\n else:\n span_groups.append(SpanGroup(span_group))\n span_group = [span]\n prev_span = span\n if span_group:\n span_groups.append(SpanGroup(span_group))\n return AnnoTier(span_groups)", "title": "" }, { "docid": "b4acb1e3fba01ad268c4df708c1a39b6", "score": "0.48873115", "text": "def remove_overlapping(sorted_spans):\n overlap = lambda s1, s2: s1.i1 < s2.i1 <= s1.i2 < s2.i2\n \n accepted = []\n for s1 in sorted_spans: # for every combination of spans with already accepted spans\n flag = True\n for s2 in accepted:\n if overlap(s1, s2) or overlap(s2, s1): # if i overlaps j or vice versa\n flag = False # let the function know not to accept this span\n break # break this loop, since we will not accept span i\n\n if flag: # if span i does not overlap with any previous spans\n accepted.append(s1) # accept it\n\n return accepted", "title": "" }, { "docid": "771d04a6d31181df450c8b7750389515", "score": "0.48725882", "text": "def _span(r):\n return abs(r.end - r.start)", "title": "" }, { "docid": "9bf8d714b11f5d2f4dcd36d60561109c", "score": "0.48677772", "text": "def _get_span(self, sents, soup_text):\n\n spans = shared.find_sub_lists(self.tokens, sents)\n n_matches = len(spans)\n # Case 1: If can't match the span, record and return. This doesn't happen\n # much.\n if n_matches == 0:\n stats[\"no_matches\"] += 1\n return None\n # Case 2: IF there are multiple span matches, go back and look the original\n # XML tag to determine which match we want.\n elif n_matches > 1:\n xml_tag = self.xml.__repr__()\n tmp_ixs = shared.find_sub_lists(list(self.text), list(soup_text))\n text_ixs = []\n # Last character of the match must be a dash or char after must be an\n # escape, else we're not at end of token.\n text_ixs = [ixs for ixs in tmp_ixs if\n soup_text[ixs[1] + 1] in '([ -/,.+])<' or soup_text[ixs[1]] == \"-\"]\n if len(text_ixs) != n_matches:\n # If the number of xml tag matches doesn't equal the number of span\n # matches, record and return.\n stats[\"different_num_matches\"] += 1\n return None\n tag_ix = shared.find_sub_lists(list(xml_tag), list(soup_text))\n assert len(tag_ix) == 1\n tag_ix = tag_ix[0]\n text_inside = [x[0] >= tag_ix[0] and x[1] <= tag_ix[1] for x in text_ixs]\n assert sum(text_inside) == 1\n match_ix = text_inside.index(True)\n else:\n match_ix = 0\n stats[\"successful_matches\"] += 1\n span = spans[match_ix]\n return span", "title": "" }, { "docid": "5460aa2643923cab497c88cfe2bca273", "score": "0.4859204", "text": "def getSpan(self, *args):\n return _coin.SbBox3i32_getSpan(self, *args)", "title": "" }, { "docid": "1427750a7802d33fb8b2adccbe7b40e9", "score": "0.47833663", "text": "def sort_spans(self):\n self.spans = sorted(self.spans, key=lambda span: span.get_start_offset() )", "title": "" }, { "docid": "2aa739b59240a8fb080f002113a0c1dc", "score": "0.4767171", "text": "def getSpan(self, *args):\n return _coin.SbBox3d_getSpan(self, *args)", "title": "" }, { "docid": "47929560edd89c0768a960f8a81b1362", "score": "0.47669604", "text": "def getSpan(self, start, bound, err=\"unexpected end of file\"):\n if self.isEndOfFile():\n raise ValueError(err)\n return twineSlice((self._currentLine, self._currentSpan), start, bound)", "title": "" }, { "docid": "583c97e2fa5b8444a841beffc6103dc5", "score": "0.47559726", "text": "def get_ingested_spans(\n self,\n start_hr: datetime,\n *,\n end_hr: Union[datetime, UnsetType] = unset,\n ) -> UsageIngestedSpansResponse:\n kwargs: Dict[str, Any] = {}\n kwargs[\"start_hr\"] = start_hr\n\n if end_hr is not unset:\n kwargs[\"end_hr\"] = end_hr\n\n return self._get_ingested_spans_endpoint.call_with_http_info(**kwargs)", "title": "" }, { "docid": "89b11a713e4ebff7f7099ba55f01bcfd", "score": "0.47449145", "text": "def get_usage_indexed_spans(\n self,\n start_hr: datetime,\n *,\n end_hr: Union[datetime, UnsetType] = unset,\n ) -> UsageIndexedSpansResponse:\n kwargs: Dict[str, Any] = {}\n kwargs[\"start_hr\"] = start_hr\n\n if end_hr is not unset:\n kwargs[\"end_hr\"] = end_hr\n\n return self._get_usage_indexed_spans_endpoint.call_with_http_info(**kwargs)", "title": "" }, { "docid": "6a5db296b90e55af7d9a96594dbabe31", "score": "0.47324225", "text": "def _get_h3_range_lst(h3_min, h3_max):\n return list(range(h3_min, h3_max + 1))", "title": "" }, { "docid": "f45064e5b0a723498b6084a7f8c2c681", "score": "0.47219443", "text": "def get_chunk_range(selection, chunks):\n chunk_range = [range(s.start//l, int(np.ceil(s.stop/l)))\n if isinstance(s, slice)\n else range(s//l, (s//l)+1)\n for s, l in zip(selection, chunks)]\n return chunk_range", "title": "" }, { "docid": "7579ea9172579b2a1868938a6cee0765", "score": "0.47215998", "text": "def add_start(self, span: Span):\n idx = 0\n for existing in self.starts:\n if existing.end < span.end:\n break\n idx += 1\n self.starts.insert(idx, span)", "title": "" }, { "docid": "0dd03dfa10611b51501231b5020937df", "score": "0.47020164", "text": "def build_vrange(start, end):\n return ValidDuringRange({\n 'start_date': start.isoformat() if start else None,\n 'end_date': end.isoformat() if end else None,\n }, None)", "title": "" }, { "docid": "322f333a1aa8d85320767367669d053e", "score": "0.4699885", "text": "def exon_slice(self, start=None, end=None):\n idx = 0\n start_index = start or 0\n end_index = end or len(self)-1\n start_offset = start_index\n end_offset = end_index\n subseqs = []\n started = False\n ended = False\n for s in self._seqs:\n if started:\n start_base = s.base(0)\n elif start_index < idx + len(s):\n started = True\n start_base = s.base(start_offset)\n\n if started and end_index < idx + len(s):\n end_base = s.base(end_offset)\n ended = True\n else:\n end_base = s.base(len(s)-1)\n\n if started:\n subseqs.append((start_base,end_base))\n if ended:\n break\n else:\n start_offset = start_offset - len(s)\n end_offset = end_offset - len(s)\n idx = idx + len(s)\n\n if self.strand == '-':\n return [(end, start) for start, end in subseqs]\n else:\n return subseqs", "title": "" }, { "docid": "9610e679774815e8a5a7ec977cf67f28", "score": "0.46871275", "text": "def _get_ui_revisions(self, revspan, revrange):\n (revmin, revmax) = revspan\n allrevisions = range(revmin, revmax+1)\n allrevisions.sort()\n revs = [c for c in allrevisions]\n revs.reverse()\n revisions = []\n for rev in revs:\n if len(revisions) > 40:\n if int(rev)%20 and (rev not in revrange):\n continue\n elif len(revisions) > 10:\n if int(rev)%10 and (rev not in revrange):\n continue\n revisions.append(str(rev))\n if revisions[-1] != str(revmin):\n revisions.append(str(revmin))\n return revisions", "title": "" }, { "docid": "96f941b437d4c69c86c26377680c9efc", "score": "0.46767268", "text": "def split_span(s, offset=0, pattern=pattern):\n for match in re.finditer(pattern, s):\n span = match.span()\n yield match.group(0), span[0]+offset, span[1]+offset", "title": "" }, { "docid": "c2ec7687ff267a3ca2718da247766aa0", "score": "0.4671601", "text": "def get_range(self, start_id=None, include_start_object=True,\n limit=100, ids_only=False, deadline=None, **kwargs):\n return self.get_range_async(\n start_id, include_start_object, limit, ids_only, deadline=deadline,\n **kwargs).get_result()", "title": "" }, { "docid": "89a286502047065dc29b64e1aa71fa57", "score": "0.4666122", "text": "def spans_overlapped_by_span(self, selector_span):\n return(\n AnnoTier([span for span in self if selector_span.overlaps(span)])\n )", "title": "" }, { "docid": "716904dd9739ad989177871392bc2346", "score": "0.46285164", "text": "def bridge_span(unformat_span: str) -> List[float]:\n lst_spans = []\n indices = []\n i = 6\n while len(indices) < unformat_span.count('=') - 1:\n j = unformat_span.find('=', i)\n indices.append(unformat_span[unformat_span.find('=', j) + 1\n :unformat_span.find(';', j)])\n i = j + 1\n for index in indices:\n lst_spans.append(float(index))\n return lst_spans", "title": "" }, { "docid": "f11df23559eea25d09e5051eeb264bc7", "score": "0.46242285", "text": "def irange(self, minimum=None, maximum=None, inclusive=(True, True),\n reverse=False):\n _maxes = self._maxes\n\n if not _maxes:\n return iter(())\n\n _lists = self._lists\n\n # Calculate the minimum (pos, idx) pair. By default this location\n # will be inclusive in our calculation.\n\n if minimum is None:\n min_pos = 0\n min_idx = 0\n else:\n if inclusive[0]:\n min_pos = bisect_left(_maxes, minimum)\n\n if min_pos == len(_maxes):\n return iter(())\n\n min_idx = bisect_left(_lists[min_pos], minimum)\n else:\n min_pos = bisect_right(_maxes, minimum)\n\n if min_pos == len(_maxes):\n return iter(())\n\n min_idx = bisect_right(_lists[min_pos], minimum)\n\n # Calculate the maximum (pos, idx) pair. By default this location\n # will be exclusive in our calculation.\n\n if maximum is None:\n max_pos = len(_maxes) - 1\n max_idx = len(_lists[max_pos])\n else:\n if inclusive[1]:\n max_pos = bisect_right(_maxes, maximum)\n\n if max_pos == len(_maxes):\n max_pos -= 1\n max_idx = len(_lists[max_pos])\n else:\n max_idx = bisect_right(_lists[max_pos], maximum)\n else:\n max_pos = bisect_left(_maxes, maximum)\n\n if max_pos == len(_maxes):\n max_pos -= 1\n max_idx = len(_lists[max_pos])\n else:\n max_idx = bisect_left(_lists[max_pos], maximum)\n\n return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)", "title": "" }, { "docid": "deb01dd3e3060574acedadc5150ff75f", "score": "0.46074483", "text": "def test_deserialize_span_groups_compat(\n en_tokenizer, spans_bytes, doc_text, expected_spangroups, expected_warning\n):\n doc = en_tokenizer(doc_text)\n\n if expected_warning:\n with pytest.warns(UserWarning):\n doc.spans.from_bytes(spans_bytes)\n else:\n # TODO: explicitly check for lack of a warning\n doc.spans.from_bytes(spans_bytes)\n\n assert doc.spans.keys() == expected_spangroups.keys()\n for name, spangroup_args in expected_spangroups.items():\n assert doc.spans[name].name == spangroup_args[\"name\"]\n spans = [Span(doc, start, end) for start, end in spangroup_args[\"spans\"]]\n assert list(doc.spans[name]) == spans", "title": "" }, { "docid": "97316db9038a06f55a850c2bdd74ae9d", "score": "0.45970547", "text": "def sub_spans_and_update_indices(\n spans_to_replace: List[Tuple[int, int, str, str]],\n full_string: str\n) -> Tuple[str, List]:\n # TODO: check no spans overlapping\n # TODO: check all spans well-formed\n\n # assert all spans are equal to full_text span\n assert all([full_string[start:end] == token for start, end, token, _ in spans_to_replace])\n\n # assert none of the spans start with the same start ind\n start_inds = [rep[0] for rep in spans_to_replace]\n assert len(set(start_inds)) == len(start_inds)\n\n # sort by start index\n spans_to_replace.sort(key=lambda x: x[0])\n\n # compute offsets for each span\n new_spans = [[start, end, token, surface, 0] for start, end, token, surface in spans_to_replace]\n for i, entry in enumerate(spans_to_replace):\n start, end, token, surface = entry\n new_end = start + len(surface)\n offset = new_end - end\n new_spans[i][1] += offset\n for new_span_entry in new_spans[i+1:]:\n new_span_entry[4] += offset\n\n # generate new text and create final spans\n new_text = replace_refspans(spans_to_replace, full_string, btwn_padding=\"\")\n new_spans = [(start + offset, end + offset, token, surface) for start, end, token, surface, offset in new_spans]\n\n return new_text, new_spans", "title": "" }, { "docid": "f22a590babf82985a6b185b86de24565", "score": "0.45950842", "text": "def getSpan(self, *args):\n return _coin.SbBox3f_getSpan(self, *args)", "title": "" }, { "docid": "11ff1360142ba87ed2e6bbb5ee72d059", "score": "0.45887744", "text": "def convert_range(ranges, index_in = 0, index_out = 1, start_incl_in = True, start_incl_out = True,\n end_incl_in = False, end_incl_out = True):\n ## compensate index\n index_offset = index_out - index_in\n start_offset = int(start_incl_out) - int(start_incl_in)\n end_offset = int(end_incl_in) - int(end_incl_out)\n if is_range(ranges):\n start, end = ranges\n return (start + index_offset + start_offset, end + index_offset + end_offset)\n else:\n return [convert_range(r, index_in = index_in, index_out = index_out,\n start_incl_in = start_incl_in, start_incl_out = start_incl_out,\n end_incl_in = end_incl_in, end_incl_out = end_incl_out) for r in ranges]", "title": "" }, { "docid": "504676dbeee82aec62b8a842ef3d436f", "score": "0.45867518", "text": "def span_before(self, target_span, allow_overlap=True):\n closest_span = None\n for span in self:\n if span.start >= target_span.start:\n break\n if not allow_overlap and span.end > target_span.start:\n break\n closest_span = span\n return closest_span", "title": "" }, { "docid": "8541bce479cb2df302016e1437566a29", "score": "0.45756525", "text": "def _get_w_indices(a_span):\n ret = []\n # split span on commas\n spans = a_span.split(COMMA_SEP)\n for s in spans:\n if WSPAN.match(s):\n ret.append(s)\n else:\n mobj = WMULTISPAN.match(s)\n if mobj:\n start, end = int(mobj.group(1)), int(mobj.group(2)) + 1\n ret += [(WSPAN_PREFIX + str(w_id)) for w_id in xrange(start, end)]\n else:\n raise ValueError(\"Unrecognized span format: {:s}\".format(a_span))\n return ret", "title": "" }, { "docid": "0988c13ef2377a0addab808086f6809b", "score": "0.45736367", "text": "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "title": "" }, { "docid": "5d411d9e8c361127b2b69a5551bac22b", "score": "0.45728403", "text": "def spans_contained_by_span(self, selector_span):\n return(\n AnnoTier([span for span in self if selector_span.contains(span)])\n )", "title": "" }, { "docid": "92313528041ed498cd49d59d2f91ef18", "score": "0.4570739", "text": "def locate_in(self, spans: Iterable[Span]) -> Optional[int]:\n for i, span in enumerate(spans):\n # The starts may coincide and the ends may coincide.\n if (span.start <= self.start and self.start < span.end and\n span.start < self.end and self.end <= span.end):\n return i\n return None", "title": "" }, { "docid": "291248275aaa43f56c50efd7f0255d51", "score": "0.4565741", "text": "def location_range(start: int, end: int) -> Iterable[int]:\n step = 1\n if start > end:\n step = -1\n\n return range(start, end + step, step)", "title": "" }, { "docid": "9ca2e00dfdd5a14112958e341c66f906", "score": "0.45578918", "text": "def find_spans(self, doc: Doc) -> Iterable[Tuple[int, int, str]]:\n\n # We search for the majority label for each entity string\n majority_labels = self.get_majority_labels(doc)\n\n # we build trie to easily search for these entities in the text\n tries = {label: gazetteers.Trie()\n for label in set(majority_labels.values())}\n for ent_tokens, label in majority_labels.items():\n tries[label].add(list(ent_tokens))\n\n gazetteer = GazetteerAnnotator(self.name, tries, self.case_sensitive,\n additional_checks=not self.case_sensitive)\n for start, end, label in gazetteer.find_spans(doc):\n yield start, end, label", "title": "" }, { "docid": "3160faf7c4161e9c20f06b6803d18d25", "score": "0.45520505", "text": "def get_node_ids_in_range(adj_list, elevs, min, max):\n return [node for node in adj_list if min<=elevs[node]<=max]", "title": "" }, { "docid": "e18212abe6ebd9008afce12f7ddbb82c", "score": "0.45340955", "text": "def get_spans(output, type:str):\n output_spans = set()\n start = -1\n for i in range(len(output)):\n if output[i].startswith(\"B-\") and type in output[i]:\n start = i\n if output[i].startswith(\"E-\") and type in output[i]:\n end = i\n output_spans.add(Span(start, end, output[i][2:]))\n if output[i].startswith(\"S-\") and type in output[i]:\n output_spans.add(Span(i, i, output[i][2:]))\n return output_spans", "title": "" }, { "docid": "16cfb29758a5d6a53bbc1b83dad97216", "score": "0.4529438", "text": "def ragged_range(starts, limits, deltas, Tsplits=_dtypes.int64, name=None):\n _ctx = _context._context or _context.context()\n tld = _ctx._thread_local_data\n if tld.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, tld.device_name, \"RaggedRange\", name,\n tld.op_callbacks, starts, limits, deltas, \"Tsplits\", Tsplits)\n _result = _RaggedRangeOutput._make(_result)\n return _result\n except _core._FallbackException:\n try:\n return ragged_range_eager_fallback(\n starts, limits, deltas, Tsplits=Tsplits, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n _ops.raise_from_not_ok_status(e, name)\n # Add nodes to the TensorFlow graph.\n if Tsplits is None:\n Tsplits = _dtypes.int64\n Tsplits = _execute.make_type(Tsplits, \"Tsplits\")\n _, _, _op, _outputs = _op_def_library._apply_op_helper(\n \"RaggedRange\", starts=starts, limits=limits, deltas=deltas,\n Tsplits=Tsplits, name=name)\n _result = _outputs[:]\n if _execute.must_record_gradient():\n _attrs = (\"T\", _op._get_attr_type(\"T\"), \"Tsplits\",\n _op._get_attr_type(\"Tsplits\"))\n _inputs_flat = _op.inputs\n _execute.record_gradient(\n \"RaggedRange\", _inputs_flat, _attrs, _result)\n _result = _RaggedRangeOutput._make(_result)\n return _result", "title": "" }, { "docid": "5986874a26203b25899863d095de3c5a", "score": "0.45012787", "text": "def range_filter(reference_genome, variants, reference_name, start, end):\n if 'chr' not in reference_name:\n reference_name = 'chr' + reference_name\n variants = variants.filter(Reference_Name=reference_name)\n if reference_genome == 'hg36':\n variants = variants.order_by('Hg36_Start')\n variants = variants.filter(Hg36_Start__lt=end, Hg36_End__gt=start)\n elif reference_genome == 'hg37':\n variants = variants.order_by('Hg37_Start')\n variants = variants.filter(Hg37_Start__lt=end, Hg37_End__gt=start)\n elif reference_genome == 'hg38':\n variants = variants.order_by('Hg38_Start')\n variants = variants.filter(Hg38_Start__lt=end, Hg38_End__gt=start)\n\n return variants", "title": "" }, { "docid": "6a6706ec37072deeada8a70ed06bfe8e", "score": "0.44963202", "text": "def build_calling_regions(contigs, regions_to_include, regions_to_exclude):\n # Initially we are going to call everything in the reference.\n regions = ranges.RangeSet.from_contigs(contigs)\n\n # If we provided a regions to include, intersect it with all of the regions,\n # producing a common set of regions between the reference and the provided\n # calling regions.\n contig_dict = ranges.contigs_dict(contigs)\n if regions_to_include:\n regions = regions.intersection(\n ranges.RangeSet.from_regions(regions_to_include, contig_dict))\n\n # If we provided regions to exclude, intersect those with the existing calling\n # regions to further refine our set of contigs to process.\n if regions_to_exclude:\n # exclude_regions mutates regions.\n regions.exclude_regions(\n ranges.RangeSet.from_regions(regions_to_exclude, contig_dict))\n\n return regions", "title": "" }, { "docid": "e31cb049c5cd62970224d7a3757f9b96", "score": "0.4493971", "text": "def document_spans(self, doc_id):\n spans = []\n while self.current_doc_id() == doc_id:\n try:\n line = self.iter.lookahead.rstrip('\\n')\n span = parse_stringdb_span_line(\n line,\n source=self.source,\n no_type_mapping=self.no_type_mapping\n )\n span.line_no = self.iter.index\n spans.append(span)\n except Exception as e:\n self.errors += 1\n print(f'error parsing {self.stream.name} line '\n f'{self.iter.index}: {e}: {line}', file=sys.stderr)\n if self.raise_on_error:\n raise\n next(self.iter)\n return spans", "title": "" }, { "docid": "f5cb551113651a48226a2f1be4edd593", "score": "0.4490083", "text": "def getSpan(self, *args):\n return _coin.SbXfBox3f_getSpan(self, *args)", "title": "" }, { "docid": "15c977a45cf7fe68efe9e2cfb5d6ec53", "score": "0.44883025", "text": "def Range(start=None, end=None):\n positions = position_model.PositionRange(start=start, end=end)\n positions = utils.RowsAsDicts(positions)\n positions.sort(key=lambda p: p['epoch'])\n TagShortStops(positions)\n # TODO: Update skip fields\n\n posts = _GetPosts(start, end)\n positions = list(_FilterPositions(positions, posts))\n combined = positions + posts\n combined.sort(key=lambda p: p['epoch'])\n return combined", "title": "" }, { "docid": "ec404bc470876cc36cf3993b967a9a47", "score": "0.44861975", "text": "def extract_spans(in_file, out_file, kb = None):\n if kb is not None:\n kb = load_embedding(kb)\n\n counter = 0\n flag = False\n spans = []\n with open(in_file) as handle:\n for line in handle:\n line = line.strip()\n if line.startswith(\"DOCSTART\") or line.startswith(\"DOCEND\") or line == \"*NL*\" or len(line) == 0:\n continue\n elif line.startswith(\"MMSTART\"):\n assert not flag\n entity = line.strip().split()[-1]\n\n if kb is not None:\n entity = normalize_entity(kb, entity, kb.prefix) \n \n spans.append([entity, [], counter, counter-1])\n flag = True\n elif line.startswith(\"MMEND\"):\n flag = False\n elif flag:\n spans[-1][-1] += 1\n spans[-1][1].append(line)\n counter += 1\n else:\n counter += 1\n\n spans.sort(key = lambda x:(x[-2], x[-1]))\n\n with open(out_file, \"w\") as whandle:\n for entity, surface, start, end in spans:\n surface = \" \".join(surface)\n whandle.write(f\"{start}\\t{end}\\t{entity}\\t{surface}\\n\")", "title": "" }, { "docid": "1e695e2a0681776a2f0a4e1c883b4b7d", "score": "0.44827375", "text": "def get_range_async(self, start_id=None, include_start_object=True,\n limit=100, ids_only=False, deadline=None, **kwargs):\n\n app_id = kwargs.pop('app_id', None)\n if kwargs:\n raise TypeError('Invalid arguments: %s' % ', '.join(kwargs))\n request = search_service_pb.ListDocumentsRequest()\n if app_id:\n request.set_app_id(app_id)\n\n params = request.mutable_params()\n _CopyMetadataToProtocolBuffer(self, params.mutable_index_spec())\n\n if start_id:\n params.set_start_doc_id(start_id)\n params.set_include_start_doc(include_start_object)\n\n params.set_limit(_CheckInteger(\n limit, 'limit', zero_ok=False,\n upper_bound=MAXIMUM_DOCUMENTS_RETURNED_PER_SEARCH))\n params.set_keys_only(ids_only)\n\n response = search_service_pb.ListDocumentsResponse()\n def hook():\n _CheckStatus(response.status())\n return self._NewGetResponse(response)\n return _RpcOperationFuture(\n 'ListDocuments', request, response, deadline, hook)", "title": "" }, { "docid": "e7f2942e4b3166631235e0ba6730f362", "score": "0.44728863", "text": "def timespan(self, from_date, to_date=None, span=None, current=False):\r\n if span and not to_date:\r\n diff = None\r\n if span == 'month':\r\n diff = relativedelta(months=1)\r\n elif span == 'week':\r\n diff = relativedelta(days=7)\r\n elif span == 'day':\r\n diff = relativedelta(days=1)\r\n if diff is not None:\r\n to_date = from_date + diff\r\n datesQ = Q(end_time__gte=from_date)\r\n datesQ &= Q(end_time__lt=to_date) if to_date else Q()\r\n datesQ |= Q(end_time__isnull=True) if current else Q()\r\n return self.filter(datesQ)", "title": "" }, { "docid": "0ebe57a37b6ae0f3ce366bc555579bab", "score": "0.4464451", "text": "def _GetRevisionsInRange(sub_ranges):\n return [\n revision for sub_range in sub_ranges for revision in sub_range if revision\n ]", "title": "" }, { "docid": "d53fc454a905cab7a99668545848b052", "score": "0.44590396", "text": "def get_consensus_range(block_id_start, block_id_end, proxy=None):\n if proxy is None:\n proxy = get_default_proxy()\n\n resp = proxy.get_consensus_range(block_id_start, block_id_end)\n return resp", "title": "" }, { "docid": "7d3243397e4106feef93193423226ce1", "score": "0.4456184", "text": "def simple_limit_end_index(starts, ends, limit_unit):\n new_starts_index, new_ends_index = [], []\n for s, e in zip(starts, ends):\n new_starts_index.append(s)\n new_end = min(s + limit_unit, e)\n new_ends_index.append(new_end)\n return new_starts_index, new_ends_index", "title": "" }, { "docid": "e24e0b5877c114421b1e87757f1afdb2", "score": "0.44560114", "text": "def expand_str_range(str_ranges):\n out_list = []\n for r in str_ranges:\n if '-' in r:\n start, end = r.split('-')\n out_list.extend(range(int(start), int(end) + 1))\n else:\n out_list.append(int(r))\n\n return out_list", "title": "" }, { "docid": "c84ed95774417f90d52d655cab366f9a", "score": "0.44515133", "text": "def group_spans_by_containing_span(self,\n other_tier,\n allow_partial_containment=False):\n if isinstance(other_tier, AnnoTier):\n other_spans = other_tier.spans\n else:\n other_spans = sorted(other_tier)\n other_spans_idx = 0\n for span in self.spans:\n span_group = []\n # iterate over the other spans that come before this span.\n while other_spans_idx < len(other_spans):\n if allow_partial_containment:\n if other_spans[other_spans_idx].end > span.start:\n break\n else:\n if other_spans[other_spans_idx].start >= span.start:\n break\n other_spans_idx += 1\n other_span_idx_2 = other_spans_idx\n while other_span_idx_2 < len(other_spans):\n if other_spans[other_span_idx_2].start >= span.end:\n break\n if not allow_partial_containment:\n # Skip the other span if it is not contained by this span.\n # It is possible there is another shorter span that starts\n # after it and is fully contained by this span.\n if other_spans[other_span_idx_2].end > span.end:\n other_span_idx_2 += 1\n continue\n span_group.append(other_spans[other_span_idx_2])\n other_span_idx_2 += 1\n yield span, span_group", "title": "" }, { "docid": "87817dd3ef5ace1fd65c063e65f9a3e0", "score": "0.44384313", "text": "def find_spans(self, doc: Doc) -> Iterable[Tuple[int, int, str]]:\n\n # Extract the first mentions of each entity\n first_observed = self.get_first_mentions(doc)\n\n # We construct tries based on the first mentions\n tries = {label: gazetteers.Trie() for label in self.labels}\n first_observed_bounds = set()\n for tokens, span in first_observed.items():\n tries[span.label_].add(tokens)\n first_observed_bounds.add((span.start, span.end))\n\n gazetteer = GazetteerAnnotator(self.name, tries, case_sensitive=self.case_sensitive,\n additional_checks=not self.case_sensitive)\n\n for start, end, label in gazetteer.find_spans(doc):\n if (start, end) not in first_observed_bounds:\n yield start, end, label\n\n return doc", "title": "" }, { "docid": "7495ad63437b0b4bd3adaa71734bcc0e", "score": "0.44365188", "text": "def get_free_ranges(self, min_duration=None):\n if not self.records:\n if not min_duration or min_duration <= self.range.get_timedelta_second():\n return [self.range]\n return []\n\n result = []\n if self.records[0].get('range').start_datetime != self.range.start_datetime:\n result.append(DateTimeRange(self.range.start_datetime,\n self.records[0].get('range').start_datetime))\n if self.records[-1].get('range').end_datetime != self.range.end_datetime:\n result.append(DateTimeRange(\n self.records[-1].get('range').end_datetime, self.range.end_datetime))\n\n for i in range(0, len(self.records) - 1):\n rec_1_end = self.records[i].get('range').end_datetime\n rec_2__start = self.records[i + 1].get('range').start_datetime\n if rec_1_end != rec_2__start:\n result.append(DateTimeRange(rec_1_end, rec_2__start))\n result.sort(key=lambda _record: _record.end_datetime)\n if min_duration:\n result = list(\n filter(lambda x: x.get_timedelta_second() >= min_duration, result))\n return result", "title": "" }, { "docid": "77219c1a1aa16514b8257f92c3123be0", "score": "0.44354022", "text": "def axvspan(self, xmin, xmax, ymin=0, ymax=1, lc=None):\n self._spans.append(Span.create(xmin, xmax, ymin, ymax, lc))", "title": "" }, { "docid": "8480a41a0decfc8cb7307d59d9096da2", "score": "0.44286212", "text": "def time_span(self):\n s = self['start'][0]\n e = self['end'].iloc[-1]\n return IntervalSet(s, e)", "title": "" }, { "docid": "de2c358b597ee7a75719cadcfa5466b7", "score": "0.44251376", "text": "def get_sentence_indices(range_param, src_sentences):\n ids = []\n if args.range:\n try:\n if \":\" in args.range:\n from_idx,to_idx = args.range.split(\":\")\n else:\n from_idx = int(args.range)\n to_idx = from_idx\n ids = range(int(from_idx)-1, int(to_idx))\n except Exception as e:\n logging.info(\"The --range does not seem to specify a numerical \"\n \"range (%s). Interpreting as file name..\" % e)\n tmp_path = \"%s/sgnmt-tmp.%s\" % (os.path.dirname(args.range), \n uuid.uuid4())\n logging.debug(\"Temporary range file: %s\" % tmp_path)\n while True:\n try:\n os.rename(args.range, tmp_path)\n with open(tmp_path) as tmp_f:\n all_ids = [i.strip() for i in tmp_f]\n next_id = None\n if all_ids:\n next_id = all_ids[0]\n all_ids = all_ids[1:]\n with open(tmp_path, \"w\") as tmp_f:\n tmp_f.write(\"\\n\".join(all_ids))\n os.rename(tmp_path, args.range)\n if next_id is None:\n return\n logging.debug(\"Fetched ID %s and updated %s\"\n % (next_id, args.range))\n yield int(next_id)-1\n except Exception as e:\n logging.debug(\"Could not fetch sentence ID from %s (%s). \"\n \"Trying again in 2 seconds...\" \n % (args.range, e))\n time.sleep(2)\n else:\n if src_sentences is False:\n logging.fatal(\"Input method dummy requires --range\")\n else:\n ids = range(len(src_sentences))\n for i in ids:\n yield i", "title": "" }, { "docid": "c49515fae40e6e090722fec98715a173", "score": "0.4418353", "text": "def get_range_spec(self, commit_a, commit_b):\n raise NotImplementedError()", "title": "" }, { "docid": "a73803a30954db8d31b1d2595671b0b3", "score": "0.44142994", "text": "def span_equality(self, span=10):\n\n # how far away are we from average?? NO 100 39 29 10 178/4 45\n # 39 43 40 28 #compactness #sort them and get the median\n if len(self.angle_list) == 4: #terms\n angle_max = max(self.angle_spans)\n\n if angle_max <= 35: #24 for flange\n ordered_spans = sorted(self.angle_spans)\n mid_one, mid_two = ordered_spans[1:3]\n median = int((mid_one + mid_two)/float(2))\n\n span_difs = [abs(suk - median) for suk in self.angle_spans]\n spans_over_threshold = [tuk for tuk in span_difs if tuk >= span]\n\n if spans_over_threshold:\n return 0\n else:\n return 1\n else:\n return 0", "title": "" }, { "docid": "18f9d5f6084efadf8db3e52b0274916d", "score": "0.44004512", "text": "def num_spans(x):\n if isinstance(x, SpanGroup):\n return len(set(x.iterate_leaf_base_spans()))\n else:\n return 1", "title": "" }, { "docid": "dc364d6ea916d8616c8b0344551ea546", "score": "0.43953955", "text": "def span_after(self, target_span):\n span = None\n for span in self:\n if span.start >= target_span.end:\n break\n return span", "title": "" }, { "docid": "41051a5fa3637b11866fc1c0e33906ff", "score": "0.43785033", "text": "def get_range(self, start: int=0, stop: int=-1, encoding='utf-8') -> Awaitable[List]:\n\n return self.get_connection().lrange(self._key, start, stop, encoding=encoding)", "title": "" }, { "docid": "c1d817d11c40336a1ccbd33f1c697b4e", "score": "0.43736947", "text": "def _get_range_overlaps(ranges: List[range]) -> List[Tuple[int, int]]:\n overlaps = []\n\n for i, current_range in enumerate(ranges):\n for j, next_range in enumerate(ranges[i + 1 :]): # noqa: E203\n if (\n current_range.start < next_range.start\n and current_range.stop < next_range.stop\n and current_range.stop >= next_range.start\n ):\n overlaps.append((i, j + i + 1))\n\n return overlaps", "title": "" }, { "docid": "9939bc3fd93eddd165cf6b629d53b8db", "score": "0.43684417", "text": "def inclusive_list(start, stop):\n return list(range(start, stop + 1))", "title": "" }, { "docid": "548e3909b502da20f5468217f960f845", "score": "0.4348357", "text": "def set_range(x):\n xs = sorted(x)\n n = len(xs)\n low = xs[int(0.005*n)]\n high = xs[int(0.995*n)]\n span = high-low\n return [low - 0.3*span, high + 0.3*span]", "title": "" }, { "docid": "548e3909b502da20f5468217f960f845", "score": "0.4348357", "text": "def set_range(x):\n xs = sorted(x)\n n = len(xs)\n low = xs[int(0.005*n)]\n high = xs[int(0.995*n)]\n span = high-low\n return [low - 0.3*span, high + 0.3*span]", "title": "" }, { "docid": "44b5133f666be4d22a1296cfd82d9371", "score": "0.4348245", "text": "def pos_to_span(pos: List[str]) -> Span:\n (start, path) = pos[0].split(\":\")\n (end, _) = pos[-1].split(\":\")\n return Span(int(start), int(end), path)", "title": "" }, { "docid": "0da9a5e273cac82b4d292a55a333c2ad", "score": "0.4342695", "text": "def test_search_variants_requested_range_present(self):\n start = 41222970\n end = 41247374\n request = self.factory.post(\"/data/ga4gh/variants/search\",\n json.dumps({\"end\": end,\n \"referenceName\": \"chr17\",\n \"variantSetId\": \"brca-hg37\",\n \"start\": start}),\n content_type=\"application/json\")\n response = views.search_variants(request)\n jresp = json.loads(response.content)\n self.assertGreater(len(jresp[\"variants\"]), 0)\n for variant in jresp[\"variants\"]:\n self.assertTrue(long(variant[\"end\"]) > start)\n self.assertTrue(long(variant[\"start\"]) < end,\n \"Should be in range\"\n \" v.start {} r.end {}\".format(variant['start'], end))\n self.assertTrue(\n variant[\"referenceName\"] == \"chr17\" or variant[\"referenceName\"] == \"17\",\n \"Searched for chr17 and got {}\".format(variant[\"referenceName\"]))\n request = self.factory.post(\"/data/ga4gh/variants/search\",\n json.dumps({\"end\": end,\n \"referenceName\": \"17\",\n \"variantSetId\": \"brca-hg37\",\n \"start\": start}),\n content_type=\"application/json\")\n response1 = views.search_variants(request)\n self.assertEqual(response.content, response1.content,\n \"Both 17 and chr17 return the same results\")", "title": "" }, { "docid": "e3703e1f5aaf3382d75cb0840438739a", "score": "0.43423903", "text": "def do_span_inference(self, hps, tree_inference_inputs, span_scores):\n\n # start from the bottom\n init_span_beliefs = tf.exp(span_scores)\n\n max_depth = len(hps.tree_widths_at_level)\n\n # add extra indices for gather_nd-ing and tile out samples\n span_off_to_node_msg = tree_inference_inputs.span_off_to_node_msg\n span_on_to_node_msg = tree_inference_inputs.span_on_to_node_msg\n span_belief_to_node_idx = add_all_leading_idx_1(\n to_int32(tree_inference_inputs.span_belief_to_node_idx))\n\n nodes_up_to_sum_tree_idx = add_all_leading_idx_2(\n to_int32(tree_inference_inputs.nodes_up_to_sum_tree_idx))\n nodes_up_to_sum_tree_log_z_idx = add_all_leading_idx_2(\n to_int32(tree_inference_inputs.nodes_up_to_sum_tree_log_z_idx))\n sum_tree_msg_start_depths = to_int32(\n tree_inference_inputs.sum_tree_msg_start_depths)\n sum_tree_msg_end_depths = to_int32(\n tree_inference_inputs.sum_tree_msg_end_depths)\n sum_tree_up_to_parent_idx = add_all_leading_idx_2(\n to_int32(tree_inference_inputs.sum_tree_up_to_parent_idx))\n sum_tree_up_to_parent_log_z_idx = add_all_leading_idx_2(\n to_int32(tree_inference_inputs.sum_tree_up_to_parent_log_z_idx))\n\n sum_tree_down_to_nodes_idx = add_all_leading_idx_2(\n to_int32(tree_inference_inputs.sum_tree_down_to_nodes_idx))\n node_to_span_off_belief_idx = add_all_leading_idx_2(\n to_int32(tree_inference_inputs.node_to_span_off_belief_idx))\n\n node_to_span_on_belief_range_idx = to_int32(\n tree_inference_inputs.node_to_span_on_belief_range_idx)\n\n node_to_span_on_belief_start_idx = []\n node_to_span_on_belief_end_idx = []\n for nsbrgi in node_to_span_on_belief_range_idx:\n starts, ends = tf.split(3, 2, nsbrgi)\n node_to_span_on_belief_start_idx.append(starts)\n node_to_span_on_belief_end_idx.append(ends)\n\n node_to_span_on_belief_start_idx = add_all_leading_idx_2(\n node_to_span_on_belief_start_idx)\n node_to_span_on_belief_end_idx = add_all_leading_idx_2(\n node_to_span_on_belief_end_idx)\n\n parent_on_down_to_sum_tree_idx = add_all_leading_idx_2(\n to_int32(tree_inference_inputs.parent_on_down_to_sum_tree_idx))\n parent_off_down_to_sum_tree_idx = add_all_leading_idx_2(\n to_int32(tree_inference_inputs.parent_off_down_to_sum_tree_idx))\n\n global_sum_tree_msg_start_depths = to_int32(\n tree_inference_inputs.global_sum_tree_msg_start_depths)\n global_sum_tree_msg_end_depths = to_int32(\n tree_inference_inputs.global_sum_tree_msg_end_depths)\n\n node_up_to_global_idx = add_leading_idx_1(\n to_int32(tree_inference_inputs.node_up_to_global_idx))\n node_up_to_global_log_z_idx = add_leading_idx_1(\n to_int32(tree_inference_inputs.node_up_to_global_log_z_idx))\n global_down_to_node_idx = add_leading_idx_1(\n to_int32(tree_inference_inputs.global_down_to_node_idx))\n\n span_off_belief_to_span_off_marginal_idx = add_leading_idx_1(\n to_int32(\n tree_inference_inputs.span_off_belief_to_span_off_marginal_idx))\n\n node_sample_to_span_off_belief_sample_idx = add_all_leading_idx_3(\n replicate_all_samples_3(\n to_int32(tree_inference_inputs.node_to_span_off_belief_idx),\n hps.num_samples))\n parent_on_sample_down_to_sum_tree_idx = add_all_leading_idx_3(\n replicate_all_samples_3(\n to_int32(tree_inference_inputs.parent_on_down_to_sum_tree_idx),\n hps.num_samples))\n parent_off_sample_down_to_sum_tree_idx = add_all_leading_idx_3(\n replicate_all_samples_3(\n to_int32(tree_inference_inputs.parent_off_down_to_sum_tree_idx),\n hps.num_samples))\n sum_tree_sample_down_to_nodes_idx = add_all_leading_idx_3(\n replicate_all_samples_3(\n to_int32(tree_inference_inputs.sum_tree_down_to_nodes_idx),\n hps.num_samples))\n\n global_down_to_node_sample_idx = add_leading_idx_2(\n replicate_samples_2(\n to_int32(tree_inference_inputs.global_down_to_node_idx),\n hps.num_samples))\n\n span_sample_off_belief_to_span_sample_off_marginal_idx = add_leading_idx_2(\n replicate_samples_2(\n to_int32(\n tree_inference_inputs.span_off_belief_to_span_off_marginal_idx),\n hps.num_samples))\n\n # Handle base case of forward inference pass\n\n init_node_beliefs = (\n span_off_to_node_msg[0] + span_on_to_node_msg[0] * padded_gather_nd(\n init_span_beliefs, span_belief_to_node_idx[0], 2, 4))\n\n cur_node_out_up_msg = init_node_beliefs\n\n cur_node_out_log_zs = tf.to_float(tf.zeros_like(cur_node_out_up_msg))\n\n sum_tree_layers = []\n parent_constraint_layers = []\n\n for d in xrange(1, max_depth):\n\n # Sum up siblings at current level\n\n cur_fft_tree_width = hps.fft_tree_widths_at_level[max_depth - d - 1]\n cur_sum_tree_msg_start_depths = sum_tree_msg_start_depths[d - 1]\n cur_sum_tree_msg_end_depths = sum_tree_msg_end_depths[d - 1]\n\n cur_sum_tree_layer = SiblingSumTreeLayer(\n hps.batch_size,\n hps.max_num_sentences,\n hps.num_samples,\n cur_fft_tree_width,\n cur_sum_tree_msg_start_depths,\n cur_sum_tree_msg_end_depths,\n layer_depth=str(d),\n message_damp_lambda=hps.fft_tree_msg_damp_lambdas[max_depth - d - 1])\n\n cur_sum_tree_inc_up_msg = padded_gather_nd(\n cur_node_out_up_msg, nodes_up_to_sum_tree_idx[d - 1], 3, 4)\n cur_sum_tree_inc_log_zs = padded_gather_nd(\n cur_node_out_log_zs, nodes_up_to_sum_tree_log_z_idx[d - 1], 3, 4)\n\n # pylint: disable=line-too-long\n sum_tree_out_up_msg, sum_tree_out_log_zs = cur_sum_tree_layer.compute_up_msg(\n cur_sum_tree_inc_up_msg, cur_sum_tree_inc_log_zs)\n # pylint: enable=line-too-long\n\n sum_tree_layers.append(cur_sum_tree_layer)\n\n # Apply parent constraints at current level\n\n cur_parent_constraint_layer = ParentConstraintLayer(\n init_span_beliefs, layer_depth=str(d))\n\n parent_constraint_layers.append(cur_parent_constraint_layer)\n\n # pylint: disable=line-too-long\n cur_node_out_up_msg, cur_node_out_log_zs = cur_parent_constraint_layer.compute_up_msg(\n sum_tree_out_up_msg, sum_tree_up_to_parent_idx[d - 1],\n sum_tree_up_to_parent_log_z_idx[d - 1], span_belief_to_node_idx[d],\n span_off_to_node_msg[d], span_on_to_node_msg[d], sum_tree_out_log_zs)\n # pylint: enable=line-too-long\n\n # Sentence-level forward pass done\n # now we have the upward messages for each sentence\n up_msg_by_sentence = cur_node_out_up_msg\n up_log_zs_by_sentence = cur_node_out_log_zs\n\n # Sum up the running log z across sentences and give an extra dim for\n # the global \"sentence\"\n # Since sum-trees expect a \"sentence\" dimension\n global_up_sum_tree_inc_log_zs = padded_gather_nd(\n up_log_zs_by_sentence, node_up_to_global_log_z_idx, 3, 3)\n\n global_up_sum_tree_inc_msg = padded_gather_nd(up_msg_by_sentence,\n node_up_to_global_idx, 3, 3)\n\n if not hps.single_sentence_concat:\n global_sum_tree_layer = SiblingSumTreeLayer(\n hps.batch_size,\n 1,\n hps.num_samples,\n hps.global_fft_tree_width,\n global_sum_tree_msg_start_depths,\n global_sum_tree_msg_end_depths,\n layer_depth=\"global\")\n\n # pylint: disable=line-too-long\n global_sum_tree_out_up_msg, global_sum_tree_out_up_log_zs = global_sum_tree_layer.compute_up_msg(\n global_up_sum_tree_inc_msg, global_up_sum_tree_inc_log_zs)\n # pylint: enable=line-too-long\n\n global_sum_tree_out_up_msg = tf.reshape(global_sum_tree_out_up_msg,\n [hps.batch_size, -1])\n\n running_log_zs = tf.reshape(global_sum_tree_out_up_log_zs,\n [hps.batch_size, -1])\n else:\n running_log_zs = tf.reshape(global_up_sum_tree_inc_log_zs,\n [hps.batch_size, -1])\n global_sum_tree_out_up_msg = global_up_sum_tree_inc_msg\n\n running_log_z = tf.reshape(running_log_zs, [hps.batch_size, -1])\n running_log_z = tf.reshape(\n tf.slice(running_log_z, [0, 0], [hps.batch_size, 1]),\n [hps.batch_size, 1])\n\n with tf.name_scope(\"k_beliefs\"):\n\n # TODO(lvilnis@) insert different type of cardinality potential here!\n # this might not encourage long enough summaries.\n if hps.single_sentence_concat:\n k_pot_msg = su.create_mask(tree_inference_inputs.abstract_len + 1,\n hps.global_fft_tree_width)\n k_pot_msg -= su.create_mask(tree_inference_inputs.abstract_len * 0 + 1,\n hps.global_fft_tree_width)\n else:\n k_pot_msg = su.create_mask(tree_inference_inputs.abstract_len,\n hps.global_fft_tree_width)\n\n k_belief, _, log_k_z = su.normalize_and_log(k_pot_msg *\n global_sum_tree_out_up_msg)\n\n running_log_z += log_k_z\n\n running_log_z = tf.reshape(running_log_z, [hps.batch_size])\n\n # with cardinality beliefs, start downward message passing\n with tf.name_scope(\"k_samples\"):\n\n rep_k_b = su.repeat(hps.num_samples, k_belief)\n rep_k_b = tf.reshape(rep_k_b, [hps.num_samples, hps.batch_size, -1])\n rep_k_b = tf.transpose(rep_k_b, [1, 0, 2])\n rep_k_b = tf.reshape(rep_k_b, [hps.batch_size * hps.num_samples, -1])\n k_samples, _ = su.sample_categorical(tf.log(rep_k_b))\n k_samples = tf.reshape(k_samples, [hps.batch_size, hps.num_samples, -1])\n\n # compute the global samples and global downward messages\n\n self.k_samples = k_samples\n\n if not hps.single_sentence_concat:\n k_pot_msg = tf.reshape(k_pot_msg, [hps.batch_size, 1, -1])\n k_samples = tf.reshape(k_samples,\n [hps.batch_size, 1, hps.num_samples, -1])\n\n global_sum_tree_down_msgs, _ = global_sum_tree_layer.compute_down_msg(\n k_pot_msg)\n global_sum_tree_down_samples = global_sum_tree_layer.compute_down_samples(\n k_samples)\n\n global_sum_tree_down_msgs = tf.reshape(global_sum_tree_down_msgs,\n [hps.batch_size, -1])\n global_sum_tree_down_samples = tf.reshape(\n global_sum_tree_down_samples, [hps.batch_size, hps.num_samples, -1])\n\n else:\n global_sum_tree_down_msgs = k_pot_msg\n global_sum_tree_down_samples = k_samples\n\n # now we gather down to per-sentence messages and samples\n\n cur_node_inc_down_msg = padded_gather_nd(global_sum_tree_down_msgs,\n global_down_to_node_idx, 2, 4)\n\n cur_node_inc_down_samples = padded_gather_nd(global_sum_tree_down_samples,\n global_down_to_node_sample_idx,\n 3, 5)\n\n # This is stored as [batch,sample,sentence,node] so we need to transpose\n\n cur_node_inc_down_samples = tf.transpose(cur_node_inc_down_samples,\n [0, 2, 1, 3])\n\n # stores [batch,sentence,width] span_off_marginals for each depth\n all_span_off_marginals = []\n # stores [batch,sentence,sample,width] span_off_samples for each depth\n all_span_off_samples = []\n\n for d in reversed(xrange(1, max_depth)):\n\n cur_parent_constraint_layer = parent_constraint_layers[d - 1]\n cur_sum_tree_layer = sum_tree_layers[d - 1]\n\n # pylint: disable=line-too-long\n cur_span_off_marginals, sum_tree_out_down_msg = cur_parent_constraint_layer.compute_down_msg(\n cur_node_inc_down_msg, node_to_span_off_belief_idx[max_depth - d - 1],\n node_to_span_on_belief_start_idx[max_depth - d - 1],\n node_to_span_on_belief_end_idx[max_depth - d - 1],\n parent_on_down_to_sum_tree_idx[max_depth - d - 1],\n parent_off_down_to_sum_tree_idx[max_depth - d - 1])\n\n cur_span_off_samples, sum_tree_out_down_samples = cur_parent_constraint_layer.compute_down_samples(\n cur_node_inc_down_samples,\n node_sample_to_span_off_belief_sample_idx[max_depth - d - 1],\n parent_on_sample_down_to_sum_tree_idx[max_depth - d - 1],\n parent_off_sample_down_to_sum_tree_idx[max_depth - d - 1])\n # pylint: enable=line-too-long\n\n all_span_off_marginals.append(cur_span_off_marginals)\n all_span_off_samples.append(cur_span_off_samples)\n\n cur_sum_tree_inc_down_msg, _ = cur_sum_tree_layer.compute_down_msg(\n sum_tree_out_down_msg)\n\n cur_sum_tree_inc_down_samples = cur_sum_tree_layer.compute_down_samples(\n sum_tree_out_down_samples)\n\n cur_node_inc_down_msg = padded_gather_nd(\n cur_sum_tree_inc_down_msg, sum_tree_down_to_nodes_idx[max_depth - d],\n 3, 4)\n\n cur_node_inc_down_samples = padded_gather_nd(\n cur_sum_tree_inc_down_samples,\n sum_tree_sample_down_to_nodes_idx[max_depth - d], 4, 5)\n\n # Handle the base case for the final part of sampling\n\n bottom_span_off_samples = padded_gather_nd(\n cur_node_inc_down_samples,\n node_sample_to_span_off_belief_sample_idx[-1], 4, 5)\n\n all_span_off_samples.append(bottom_span_off_samples)\n\n # Handle the base case for the final part of message passing\n\n bottom_node_beliefs = cur_node_inc_down_msg * init_node_beliefs\n\n bottom_span_off_beliefs = padded_gather_nd(bottom_node_beliefs,\n node_to_span_off_belief_idx[-1],\n 3, 4)\n\n bottom_span_off_marginals = bottom_span_off_beliefs\n\n # Get \"integrated\" beliefs for easy sums over spans\n integrated_bottom_beliefs = tf.cumsum(bottom_node_beliefs, 2)\n\n span_on_start_cumulative_belief = padded_gather_nd(\n integrated_bottom_beliefs, node_to_span_on_belief_start_idx[-1], 3, 4)\n span_on_end_cumulative_belief = padded_gather_nd(\n integrated_bottom_beliefs, node_to_span_on_belief_end_idx[-1], 3, 4)\n\n bottom_span_on_beliefs = (\n span_on_end_cumulative_belief - span_on_start_cumulative_belief)\n\n bottom_span_belief_normalizer = (\n bottom_span_on_beliefs + bottom_span_off_beliefs)\n\n bottom_span_off_marginals = su.safe_divide(bottom_span_off_beliefs,\n bottom_span_belief_normalizer)\n\n all_span_off_marginals.append(bottom_span_off_marginals)\n\n # Gather back out to the (batch,span_id) format\n\n tree_marg_tensor = tf.concat(\n 2, [tf.expand_dims(ss, 2) for ss in all_span_off_marginals])\n\n # switch off -> on\n\n span_marginals = 1.0 - padded_gather_nd(\n tree_marg_tensor, span_off_belief_to_span_off_marginal_idx, 4, 3)\n\n tree_sample_tensor = tf.concat(\n 3, [tf.expand_dims(ss, 3) for ss in all_span_off_samples])\n\n # We have [batch,sample,sent,depth,width] indices\n # in the gather indices\n # but tree_sample_tensor is [batch,sent,sample,depth,width]\n # so we have to transpose\n tree_sample_tensor = tf.transpose(tree_sample_tensor, [0, 2, 1, 3, 4])\n\n # switch off -> on\n\n span_samples = 1.0 - padded_gather_nd(\n tree_sample_tensor,\n span_sample_off_belief_to_span_sample_off_marginal_idx, 5, 4)\n\n return span_marginals, span_samples, running_log_z", "title": "" }, { "docid": "dc4d31a0b91778902a99a80818378596", "score": "0.43410257", "text": "def get_node_slice_by_idx(self, start: int, end: int) -> List[Node]:\n return self._node_list[start:end]", "title": "" }, { "docid": "09771bae3513cc9544486d33ce84200a", "score": "0.43387803", "text": "def subrange(self, sub_start, sub_end):\n assert 0 <= sub_start <= sub_end < self.size()\n return self.__class__(self.start + sub_start, self.start + sub_end)", "title": "" }, { "docid": "b4bcb8120f5e9eae93178b4fae3a785a", "score": "0.43363887", "text": "def extract_slices_range(split, next_read_index, Y_size, Z_size):\n indexes = []\n x_index_min = -1\n read_start, read_end = next_read_index\n for i in range(0, split.split_x):\n index = int(split.split_pos[-3]) + (int(split.split_pos[-2])) * Y_size + (int(\n split.split_pos[-1]) + i) * Y_size * Z_size\n # if split's one row is in the write range.\n if index >= read_start and index <= read_end:\n if len(indexes) == 0:\n x_index_min = i\n indexes.append(index)\n else:\n continue\n\n X_index_min = index_to_voxel(min(indexes), Y_size, Z_size)[2]\n X_index_max = index_to_voxel(max(indexes), Y_size, Z_size)[2]\n x_index_max = x_index_min + (X_index_max - X_index_min)\n\n return (X_index_min, X_index_max, x_index_min, x_index_max)", "title": "" }, { "docid": "0d1d453c60e90506d8892701891a65c5", "score": "0.43341306", "text": "def get_range(count, offset=0):\n high, low = _count_and_offset_to_high_and_low(count, offset)\n return '%s to %s' % (low, high)", "title": "" }, { "docid": "c869b32e3faac2c64ece9741a1a4979d", "score": "0.43273094", "text": "def find_fragment_intervals(digest_param: str, seq: str) -> List[int]:\n enz = getattr(Restriction, digest_param, None)\n if enz is None:\n raise ValueError(\"Enzyme not found: {}\".format(digest_param))\n \n s = Seq(seq)\n positions = [_ - 1 for _ in enz.search(s)]\n intervals = to_intervals(positions, len(seq))\n return intervals", "title": "" }, { "docid": "8076eeef0fa284998da56485b333fcc9", "score": "0.43256515", "text": "def with_contained_spans_from(self, other_tier, allow_partial_containment=False):\n span_groups = self.group_spans_by_containing_span(other_tier,\n allow_partial_containment=allow_partial_containment)\n result = []\n for span, group in span_groups:\n for other_span in group:\n result.append(SpanGroup([span, other_span]))\n return AnnoTier(result)", "title": "" }, { "docid": "f8ea7f1a465209445d12726670a32c9d", "score": "0.43234244", "text": "def get_range(self, start_addr, end_addr, fields=None):\n frag = \"range?start_addr={0}&end_addr={1}\".format(start_addr, end_addr)\n if fields:\n frag += \"&_return_fields=\" + fields\n return self._get(frag)", "title": "" }, { "docid": "29dd04d845c302d4c8bfac074ccb25ca", "score": "0.43182838", "text": "def get_reads_in_interval(sam, chrom, start, end):\n iterator = sam.fetch(chrom, start, end)\n reads = [ x for x in iterator ]\n return reads", "title": "" }, { "docid": "061441dbff3d5e763432dedc86131ea2", "score": "0.4317306", "text": "def range(start, limit=None, delta=1, dtype='int64', **kwargs):\n args = ArgHelper.parse(locals())\n args['dtype'] = args['dtype'].lower()\n if limit is None:\n args['slice'] = (float(start), float(delta))\n else:\n args['slice'] = (float(start), float(limit), float(delta))\n args.pop('start')\n args.pop('limit')\n args.pop('delta')\n op_lib = array_ops_lib.Range\n trainable = args.pop('trainable') if 'trainable' in args else False\n if context.executing_eagerly():\n return op_lib.instantiate(\n num_args=len(args['slice']),\n dtype=dtype,\n ).apply(args['slice'], trainable=trainable)\n else:\n return op_lib.blend(**args)", "title": "" }, { "docid": "4ac59d41a7097cb2c2228e2289c78c5f", "score": "0.43170267", "text": "def splitrange(raw_range):\n\n result = []\n\n if re.search(r'^(\\d+)\\-(\\d+)$', raw_range):\n match = re.search(r'^(\\d+)\\-(\\d+)$', raw_range)\n first = int(format(match.group(1)))\n last = int(format(match.group(2)))\n for i in range(first, last+1):\n result.append(str(i))\n return result", "title": "" }, { "docid": "4ac59d41a7097cb2c2228e2289c78c5f", "score": "0.43170267", "text": "def splitrange(raw_range):\n\n result = []\n\n if re.search(r'^(\\d+)\\-(\\d+)$', raw_range):\n match = re.search(r'^(\\d+)\\-(\\d+)$', raw_range)\n first = int(format(match.group(1)))\n last = int(format(match.group(2)))\n for i in range(first, last+1):\n result.append(str(i))\n return result", "title": "" }, { "docid": "1d2bb929256bd06ef1fec74dbf07fc18", "score": "0.43169934", "text": "def analyse_slice(aslice, upper_bound):\n if aslice.start is None:\n start = 0\n else:\n start = max(aslice.start, 0)\n if aslice.stop is None:\n stop = upper_bound\n else:\n stop = min(aslice.stop, upper_bound)\n if start > stop:\n raise ValueError\n elif start < stop:\n if aslice.step:\n my_range = range(start, stop, aslice.step)\n else:\n my_range = range(start, stop)\n if not PY2:\n # for py3, my_range is a range object\n my_range = list(my_range)\n else:\n my_range = [start]\n return my_range", "title": "" }, { "docid": "6fb41caa2834107fa965a86cca6db7b8", "score": "0.4316206", "text": "def range_n(n, start, end, exclude=False):\n if exclude:\n return range(start, n) + range(n+1, end)\n else:\n return range(start, end)", "title": "" }, { "docid": "a6e1fe08cb48ead02808986ec6e00cc8", "score": "0.43139184", "text": "def makespan(sol={}):\n return sol[\"makespan\"]", "title": "" }, { "docid": "a74aebd93ba566a7ff9c1ac05dbd200e", "score": "0.4313173", "text": "def get_proper_tls_version_span(tls_version_min, tls_version_max):\n min_allowed_idx = TLS_VERSIONS.index(TLS_VERSION_MINIMAL)\n\n try:\n min_version_idx = TLS_VERSIONS.index(tls_version_min)\n except ValueError:\n raise ValueError(\"tls_version_min ('{val}') is not a known \"\n \"TLS version.\".format(val=tls_version_min))\n\n try:\n max_version_idx = TLS_VERSIONS.index(tls_version_max)\n except ValueError:\n raise ValueError(\"tls_version_max ('{val}') is not a known \"\n \"TLS version.\".format(val=tls_version_max))\n\n if min_version_idx > max_version_idx:\n raise ValueError(\"tls_version_min is higher than \"\n \"tls_version_max.\")\n\n if min_version_idx < min_allowed_idx:\n min_version_idx = min_allowed_idx\n logger.warning(\"tls_version_min set too low ('%s'),using '%s' instead\",\n tls_version_min, TLS_VERSIONS[min_version_idx])\n\n if max_version_idx < min_allowed_idx:\n max_version_idx = min_version_idx\n logger.warning(\"tls_version_max set too low ('%s'),using '%s' instead\",\n tls_version_max, TLS_VERSIONS[max_version_idx])\n return TLS_VERSIONS[min_version_idx:max_version_idx+1]", "title": "" }, { "docid": "4f150b4b181012783e95533d4fd21ea6", "score": "0.4299693", "text": "def get_bounds(cls, placements: Iterable[WidgetPlacement]) -> Region:\n bounding_region = Region.from_union(\n [placement.region.grow(placement.margin) for placement in placements]\n )\n return bounding_region", "title": "" }, { "docid": "3caa02a76ba655f2548993dc46ea94d2", "score": "0.42987713", "text": "def get_slice_intersect_coord(self, slice_start_wrt_ref_1based, slice_end_wrt_ref_1based):", "title": "" }, { "docid": "e0658abccb3e6acc442977e38e422050", "score": "0.4297281", "text": "def range_filter(self, chrom, start, end, contained=False, fraction_query=False, fraction_subject=False):\r\n if contained!=False: #the interval must be completely contained within the query\r\n bytes = [self.coord_to_index[a,b,c] for a,b,c in self.coord_to_index if \r\n a==chrom and int(b)>=int(start) and int(c)<=int(end)]\r\n elif fraction_query!=False: #the fraction of overlap/query length must be greater than parameter\r\n bytes = [self.coord_to_index[a,b,c] for a,b,c in self.coord_to_index if a==chrom and \r\n (getOverlap((float(start),float(end)), \r\n (float(b),float(c))) / (float(end)-float(start))) >= float(fraction_query)]\r\n elif fraction_subject!=False: #the fraction of overlap/gelist interval length must be greater than parameter\r\n bytes = [self.coord_to_index[a,b,c] for a,b,c in self.coord_to_index if a==chrom and \r\n (getOverlap((float(start),float(end)), \r\n (float(b),float(c))) / (float(c)-float(b))) >= float(fraction_subject)] \r\n else: #default; there must be at least 1 bp overlap\r\n bytes = [self.coord_to_index[a,b,c] for a,b,c in self.coord_to_index if \r\n a==chrom and (int(b)>=int(start) or int(c)<=int(end))]\r\n\r\n #bytes is a lists of lists of bytes. Each list should contain 1 byte, but there may be more \r\n for byte in sorted(bytes):\r\n if len(byte)==1:\r\n self.file.seek(byte[0])\r\n line = self.file.readline()\r\n if not line:\r\n raise IndexError\r\n else:\r\n yield parse_gff_line(line, format=self.format) \r\n else:\r\n for b in byte:\r\n self.file.seek(b)\r\n line = self.file.readline()\r\n if not line:\r\n raise IndexError\r\n else:\r\n yield parse_gff_line(line, format=self.format)", "title": "" }, { "docid": "88fec8c1c507ca9f2e5182cd12cdf965", "score": "0.4293684", "text": "def _valid_range(self, spec):\n valid = None\n for key, dim in spec.items():\n assert key in self.data\n v = self.data[key].valid_range(dim[-3:])\n if v is None:\n raise Dataset.OutOfRangeError()\n valid = v if valid is None else valid.intersect(v)\n assert valid is not None\n return valid", "title": "" }, { "docid": "fa1f8c1cfe688a2aeca5c51427c20190", "score": "0.429209", "text": "def sa_range(start: int, end: int) -> StaticArray:\n if start > end:\n size = start - end + 1\n new_arr = StaticArray(size)\n for index in range(size):\n new_arr[index] = start\n start -= 1\n\n elif start < end:\n size = end - start + 1\n new_arr = StaticArray(size)\n for index in range(size):\n new_arr[index] = start\n start += 1\n\n else:\n new_arr = StaticArray(1)\n new_arr[0] = start\n\n return new_arr", "title": "" }, { "docid": "a4ec1eba642f370d1d119ee4d70a51fb", "score": "0.42881706", "text": "def range_check(arr, sensor_span, user_span=None):\n flag_arr = np.ones_like(arr, dtype='uint8')\n if len(sensor_span) != 2:\n raise ValueError(\"Sensor range extent must be size two.\")\n # Ensure coordinates are in proper order.\n s_span_sorted = sorted(sensor_span)\n if user_span is not None:\n if len(user_span) != 2:\n raise ValueError(\"User defined range extent must be size two.\")\n u_span_sorted = sorted(user_span)\n if (u_span_sorted[0] < s_span_sorted[0] or\n u_span_sorted[1] > s_span_sorted[1]):\n raise ValueError(\"User span range may not exceed sensor bounds.\")\n # Test timing.\n flag_arr[(arr < u_span_sorted[0]) |\n (arr > u_span_sorted[1])] = QCFlags.SUSPECT\n flag_arr[(arr < s_span_sorted[0]) |\n (arr > s_span_sorted[1])] = QCFlags.BAD_DATA\n return flag_arr", "title": "" }, { "docid": "b9f4ff2e63d85fe0106749fa16bcce94", "score": "0.42803988", "text": "def get_range_slice(self, *args, **kwargs):\r\n with self.get_client() as client:\r\n return client.get_range_slice(*args, **kwargs)", "title": "" }, { "docid": "02c25209e8df55e34a0c69d4bb894463", "score": "0.4272854", "text": "def offset(self, offset):\n\n span = self\n if offset > 0:\n for i in range(offset):\n span = span.next_period()\n elif offset < 0:\n for i in range(-offset):\n span = span.prev_period()\n return span", "title": "" }, { "docid": "49067883574936e900307878f637eb5e", "score": "0.42692095", "text": "def labels_in_range(self, start, end, fully_included=False):\n\n if fully_included:\n intervals = self.label_tree.envelop(start, end)\n else:\n intervals = self.label_tree.overlap(start, end)\n\n return [iv.data for iv in intervals]", "title": "" }, { "docid": "710adb944319984df6bd438ff34c9ef2", "score": "0.42672583", "text": "def _fetch_range(self, start, end):\n raise NotImplementedError", "title": "" }, { "docid": "855078119e03b52db7047b5203d4f3b1", "score": "0.42457885", "text": "def interpolated(self, time_range, span, as_df=True, numeric=True):\n pass", "title": "" } ]
99f78cd801828c47c468dde22fed4952
plots the final result of the trained model on the input data
[ { "docid": "39189d822a479cbe3249845e807a280d", "score": "0.0", "text": "def plot_3d_mlp(epoch=100, nand=False):\n if not nand:\n inputs, labels = dataset(2, 200)\n else:\n inputs, labels = dataset_nand(2, 200)\n\n mlp = MLP(2, 2, epoch)\n mlp.train(inputs, labels)\n x_1 = inputs[:, 0]\n x_2 = inputs[:, 1]\n\n def z_function(x, y):\n Z = np.zeros(len(x))\n for x, y, n in zip(x, y, range(len(x))):\n x = x.reshape(1)\n y = y.reshape(1)\n Z[n] = mlp.test([x, y])\n return Z\n\n z = z_function(x_1, x_2)\n ax = plt.axes(projection=\"3d\")\n ax.scatter3D(x_1, x_2, labels, color=\"orange\", label=\"label\")\n ax.scatter3D(x_1, x_2, z, color=\"blue\", label=\"predicted\")\n ax.set_xlabel('x_1')\n ax.set_ylabel('x_2')\n ax.set_zlabel('z')\n plt.legend()\n plt.show()", "title": "" } ]
[ { "docid": "6dd78742d784ef5ffeae68abe49fe372", "score": "0.7612694", "text": "def visualize_model():\n sns.set_style(\"darkgrid\")\n plt.scatter(x=y_test, y=y_predict, alpha=0.4)\n\n plt.xlabel(\"Actual prices\")\n plt.ylabel(\"Predicted prices\")\n plt.title(\"Actual vs Predicted prices\")\n\n plt.show()", "title": "" }, { "docid": "d840a6375fcd834ef03d671d23595e97", "score": "0.741404", "text": "def __plot(self) -> None:\n\n #region - parameters retrieval\n fields = self.model_name.split('-')\n batch_size = int(fields[1][:-1])\n epochs = int(fields[2][:-1])\n lr = float(fields[3][:-1])\n #endregion\n\n #region - plotting\n x = list(range(1, epochs + 1))\n plt.plot(x, self.epochs_training_accuracy_list, label='Training')\n plt.plot(x, self.epochs_validation_accuracy_list, label='Validation')\n plt.grid(True)\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy %')\n title = 'data_augmentation={}, batch_size={}, epochs={}, lr={}'.format(self.data_augmentation, batch_size, epochs, lr)\n plt.title(title)\n plt.legend()\n #endregion\n\n #region - save plot\n basedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n folder = os.path.join(basedir, 'results/')\n if not os.path.exists(folder): \n os.makedirs(folder) \n filepath = os.path.join(folder, self.model_name)\n plt.savefig(\"{}.png\".format(filepath), dpi=1000)\n #endregion", "title": "" }, { "docid": "f772f959a787eb358442662e92096fd8", "score": "0.73529005", "text": "def plot_results(models,\n data,\n batch_size=128,\n model_name=\"vae_mnist\"):\n\n encoder, decoder = models\n x_test, y_test = data\n os.makedirs(model_name, exist_ok=True)\n\n filename = os.path.join(model_name, \"vae_mean.png\")\n # display a 2D plot of the digit classes in the latent space\n z_mean, _, _ = encoder.predict(x_test,\n batch_size=batch_size)\n plt.figure(figsize=(12, 10))\n\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test==0)\n plt.colorbar()\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.savefig(filename)\n #print(len(z_mean))\n nb_elem_per_class = dataset_size*test_size\n\n\n\n to_decode = np.array([[0.5, 0], [1.8, 1]], dtype=np.float32)\n final = decoder.predict(to_decode)\n print(final )\n\n # print(\"ICI \", file_shuffle[:int(dataset_size * test_size)])\n #for i, txt in enumerate(file_shuffle[ :int(dataset_size*2 * test_size)]):\n #print(\"i \", i)\n #plt.annotate(txt,(z_mean[i,0], z_mean[i,1]))\n\n\n plt.show()", "title": "" }, { "docid": "5de3e5b8a779841756a827749d20c47e", "score": "0.732496", "text": "def plot(self, data: pd.DataFrame, predicted_loads: pd.DataFrame, estimator_name: str) -> None:", "title": "" }, { "docid": "7368560051f11b436338835cdd1eb4b0", "score": "0.7141638", "text": "def plot(model_generated, predictions, titles, nums):\n\n figure = plt.figure(1)\n for index in range(len(model_generated)):\n figure.add_subplot(nums, 1, index + 1)\n print(index)\n plt.plot(\n predictions[index],\n linewidth=0.5,\n linestyle=\"-\",\n color='black')\n plt.title(titles[index], fontsize=10)\n figure.subplots_adjust(hspace=0.5)\n plt.show()", "title": "" }, { "docid": "a88fc3d29eb351c048e39cb282bc9b3a", "score": "0.709263", "text": "def plot_results():\n plt.subplot(211)\n time = data_plot[:, 0]\n q = data_plot[:, 1:number_of_cars + 1]\n v = data_plot[:, number_of_cars + 1:]\n plt.plot(time, q, label='u')\n plt.ylabel('u')\n plt.subplot(212)\n plt.plot(time, v, label='v')\n plt.ylabel('v')\n plt.xlabel('time')\n plt.savefig('train_brakes.png')", "title": "" }, { "docid": "c09289dd0f39929a78e8c2f0e1f874b5", "score": "0.7068866", "text": "def display_results(x: numpy.ndarray, y: numpy.ndarray, model: LinearRegression):\n # plot the data\n pyplot.scatter(x, y, color='black')\n # plot the trained model as a line\n pyplot.plot(x, model.predict(x),\n color='blue', linewidth=3)", "title": "" }, { "docid": "6e44011c20c14e745b8d17714ab28e36", "score": "0.6979469", "text": "def plot(self):\n from IPython.display import clear_output\n import matplotlib.pyplot as plt\n\n clear_output(True)\n plt.figure(figsize=(16, 5))\n\n plt.subplot(131)\n plt.title('Loss')\n plt.plot(self.history['loss'], alpha=0.5)\n plt.plot(self.history['loss_smooth'])\n plt.xlabel('Updating steps')\n\n plt.subplot(132)\n plt.plot(self.history['target_values'], self.history['predicted_values'],\n 'o', label='predicted', alpha=0.8)\n xmin, xmax = [f(self.history['target_values']) for f in [min, max]]\n ymin, ymax = [f(self.history['predicted_values']) for f in [min, max]]\n plt.plot([xmin,xmax], [ymin,ymax])\n plt.plot(self.history['target_values'], self.history['rewards'],\n 'o', label='rewards', alpha=0.8)\n plt.xlabel('target_values')\n plt.legend()\n\n plt.subplot(133)\n plt.imshow(self.sample_values, origin='lower')\n plt.show()\n #plt.close()", "title": "" }, { "docid": "66e7f535a8152d781cfb351f9e0279ae", "score": "0.69347215", "text": "def plot_results(self, models,\n data,\n batch_size=128,\n model_name=\"vae_gym\"):\n\n encoder, decoder = models\n x_test, y_test = data\n # os.makedirs(model_name, exist_ok=True)\n\n filename = os.path.join(\"vae_mean.png\")\n # display a 2D plot of the digit classes in the latent space\n z_mean, _, _ = encoder.predict(x_test,\n batch_size=self.batch_size)\n plt.figure(figsize=(12, 10))\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)\n plt.colorbar()\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.savefig(filename)\n plt.show()\n\n filename = os.path.join(\"digits_over_latent.png\")\n # display a 30x30 2D manifold of digits\n n = 30\n digit_size = 28\n figure = np.zeros((digit_size * n, digit_size * n))\n # linearly spaced coordinates corresponding to the 2D plot\n # of digit classes in the latent space\n grid_x = np.linspace(-4, 4, n)\n grid_y = np.linspace(-4, 4, n)[::-1]\n\n for i, yi in enumerate(grid_y):\n for j, xi in enumerate(grid_x):\n z_sample = np.array([[xi, yi]])\n x_decoded = decoder.predict(z_sample)\n digit = x_decoded[0].reshape(digit_size, digit_size)\n figure[i * digit_size: (i + 1) * digit_size,\n j * digit_size: (j + 1) * digit_size] = digit\n\n plt.figure(figsize=(10, 10))\n start_range = digit_size // 2\n end_range = n * digit_size + start_range + 1\n pixel_range = np.arange(start_range, end_range, digit_size)\n sample_range_x = np.round(grid_x, 1)\n sample_range_y = np.round(grid_y, 1)\n plt.xticks(pixel_range, sample_range_x)\n plt.yticks(pixel_range, sample_range_y)\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.imshow(figure, cmap='Greys_r')\n plt.savefig(filename)\n plt.show()", "title": "" }, { "docid": "43a928d57126cd566bbe619b78196059", "score": "0.6871092", "text": "def __display_results(self, history):\n accuracy = history.history['accuracy']\n value_accuracy = history.history['val_accuracy']\n loss = history.history['loss']\n value_loss = history.history['val_loss']\n epochs_range = range(15)\n\n plt.figure(figsize=(8, 8))\n plt.subplot(1, 2, 1)\n plt.plot(epochs_range, accuracy, label='Training Accuracy')\n plt.plot(epochs_range, value_accuracy, label='Validation Accuracy')\n plt.legend(loc='lower right')\n plt.title('Training and Validation Accuracy')\n\n plt.subplot(1, 2, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, value_loss, label='Validation Loss')\n plt.legend('upper right')\n plt.title('Training and Validation Loss')\n plt.show()", "title": "" }, { "docid": "b2022255db4a0ce690d2b7bd6d6b59c5", "score": "0.68540174", "text": "def plot_results(self):\n pass", "title": "" }, { "docid": "dc4b48da93d1f12c8832bf3851891fbc", "score": "0.6833333", "text": "def viz_resids(model_title: str, X, y, random_state_number=42) -> None:\n\n # HANDLING DATA\n # train/test split\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=random_state_number)\n\n # instatiate model\n lr = LinearRegression()\n # fit model\n lr.fit(X_train, y_train)\n\n preds = lr.predict(X_test)\n resids = y_test - preds\n target_name = y.name.capitalize()\n\n # HANDLING SUBPLOTS\n fig, axes = plt.subplots(2, 2, figsize=(12,10)) # 2 row x 2 columns\n fig.suptitle(f\"{model_title}: $R^2$ test ={lr.score(X_test, y_test):2.2%}\", fontsize = 24, y = 1.05)\n\n ax_1 = axes[0][0]\n ax_2 = axes[0][1]\n ax_3 = axes[1][0]\n\n subplot_title_size = 18\n subplot_label_size = 14\n \n # 1ST PLOT - y_true vs. y_pred\n ax_1.set_title(\"True Values ($y$) vs. Predictions ($\\hat{y}$)\", fontsize = subplot_title_size, pad = 10)\n maxDist = max(max(preds),max(y)) # maxiumum value used to determin x_lim and y_lim\n minDist = min(min(preds),min(y)) # maxiumum value used to determin x_lim and y_lim\n # 45deg line, signifying prediction == true value\n ax_1.plot((minDist,maxDist),(minDist,maxDist), c = \"r\", alpha = .7);\n \n sns.scatterplot(ax = ax_1, x = y_test, y = preds, alpha = .5)\n ax_1.set_xlabel(\"True Values ($y$)\", fontsize = subplot_label_size, labelpad = 10)\n ax_1.set_ylabel(\"Predictions ($\\hat{y}$)\", fontsize = subplot_label_size, labelpad = 10)\n\n # 2ND PLOT - residuals\n ax_2.set_title(\"Residuals\", fontsize = subplot_title_size)\n sns.scatterplot(ax = ax_2, x = range(len(resids)),y = resids, alpha = .5)\n ax_2.set_ylabel(target_name, fontsize = subplot_label_size)\n ax_2.axhline(0, c = \"r\", alpha = .7);\n\n # 3RD PLOT - residuals histogram\n ax_3.set_title(\"Histogram of residuals\", fontsize = subplot_title_size)\n sns.distplot(resids, ax = ax_3, kde = False);\n ax_3.set_xlabel(target_name, fontsize = subplot_label_size)\n ax_3.set_ylabel(\"Frequency\", fontsize = subplot_label_size)\n\n plt.tight_layout() # handles most overlaping and spacing issues", "title": "" }, { "docid": "717b6fbb84e40c8caa8dde927f16f0f0", "score": "0.68246317", "text": "def save_predictions_overview(self):\n # Predict each Batch in the File===============\n predictions, actuals = ([], [])\n #for coord in self.valid_data:\n #for batch in coord:\n for batch in self.valid_data:\n predictions.extend(self.model.predict(batch[0], batch_size=self.batch_size)[:, -1])\n actuals.extend(batch[1].reshape(batch[1].shape[0]))\n\n # Organize Dataframe =========================\n tpl_stats = tuple(map(lambda x: x[self.output_column].values[0], self.stats))\n df_pred = pd.DataFrame({'Predicted': predictions, 'Actual': actuals})\n #df_pred = utils.denormalize(df_pred, tpl_stats)\n #df_pred['\\u0394 Predicted'] = df_pred['Predicted'] - df_pred['Actual']\n\n #Save Plot Figure =============================\n #max_v, min_v= (tpl_stats[0]+2*tpl_stats[1], tpl_stats[0]-2*tpl_stats[1])\n max_actual, min_actual = df_pred['Actual'].max(), df_pred['Actual'].min()\n max_pred, min_pred = df_pred['Predicted'].max(), df_pred['Predicted'].min()\n \n plt.figure()\n df_pred.plot(\n x='Predicted',\n y='Actual',\n figsize=(22, 10),\n title=f'Predictions do Modelo: {self.model_name}',\n xlabel='Batch item',\n ylabel='Normalized Prediction (non-dimensional)',\n kind='scatter',\n ylim=(min_actual, max_actual),\n xlim=(min_pred, max_pred),\n ) \n min_linha, max_linha = min(min_actual, min_pred), max(max_actual, max_pred)\n plt.plot([min_linha, max_linha], [min_linha, max_linha], color='red')\n plt.savefig(self.model_folder+\"Predictions_Batch_Chart.png\")\n print(f'Predictions Overview Chart Created and Saved to {self.model_folder+\"Predictions_Batch_Chart.png\"}')", "title": "" }, { "docid": "a6e3ed7cef7a877ee45ce7f5cccc2d2a", "score": "0.6818909", "text": "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "title": "" }, { "docid": "508937f4ad380cd0fef57a0b671a844e", "score": "0.68137157", "text": "def viz_clf(x_train, x_test, y_train, y_test, mean_func, var_func, number, mlp):\n x = np.concatenate((x_train, x_test))\n y = np.concatenate((y_train, y_test))\n color = ['r']*len(x_train)+['b']*len(y_train)\n plt.scatter(x, y, c=color, alpha=0.5)\n plt.xlabel('X')\n plt.ylabel('Y')\n plt.title('Data fitted with MLP parametric variance')\n\n #visualize the model in plot\n #plot mean line\n x_c = np.array(range(500))/500.\n y_c = mlp.output(expand(x_c,1,number).T).eval()\n #print y_c\n mean_c = y_c[0]\n variance_c = np.exp(y_c[1])\n\n #plot variance line\n plt.plot(x_c, mean_c, 'b')\n plt.plot(x_c, mean_c+np.sqrt(variance_c),'r')\n plt.plot(x_c, mean_c-np.sqrt(variance_c), 'r')\n plt.show()\n\n\n #visualize par\n mean_x = mean_func(x_c)\n var_x = var_func(x_c)\n y_pred = mlp.output(expand(x_c,1,number).T).eval()\n mean_test, var_test = y_pred[0], np.exp(y_pred[1])\n\n plt.plot(x_c, mean_x, 'b')\n plt.plot(x_c, mean_test, 'b--')\n plt.plot(x_c, var_x, 'r')\n plt.plot(x_c, var_test, 'r--')\n plt.xlabel('x')\n plt.ylabel('parameters')\n plt.legend(['mean(x)', 'Prediction of mean', 'std_err(x)', 'Prediction of std_err'], loc=3)\n plt.show()\n\n #nll = np.sum(compute_neglog(y_test[i], mean_test[i], var_test[i]) for i in range(len(y_test)))\n #print \"NNL on test data: %r\" %(nll)", "title": "" }, { "docid": "345f7f6898750de8fe3bd619a965e4aa", "score": "0.67996377", "text": "def plot_results(models,\n data,\n batch_size=128,\n model_name=\"vae_mnist\"):\n\n encoder, decoder = models\n x_test, y_test = data\n os.makedirs(model_name, exist_ok=True)\n\n filename = os.path.join(model_name, \"vae_mean.png\")\n # display a 2D plot of the digit classes in the latent space\n z_mean, _, _ = encoder.predict(x_test,\n batch_size=batch_size)\n pca = PCA(n_components=2)\n z_mean_pc2 = pca.fit_transform(z_mean)\n # print(\"z_mean shape: \", z_mean.shape, \"z_mean_pc2 shape: \", z_mean_pc2.shape)\n plt.figure(figsize=(12, 10))\n plt.scatter(z_mean_pc2[:, 0], z_mean_pc2[:, 1], c=y_test)\n plt.colorbar()\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.savefig(filename)\n plt.show()", "title": "" }, { "docid": "92a046c9f525c19ddda16b2826ff897a", "score": "0.67941374", "text": "def inspect(self, X):\n if not self.training_results:\n print(\"Error: No training results available\")\n else:\n print(\"printing training results..\")\n for _label, key in self.training_results.items():\n for label, result in key.items():\n plt.plot(result,label=_label+\" \"+label)\n plt.title(\"Training results\")\n plt.legend()\n plt.show()\n\n if not self.model:\n print(\"Error: No model available\")\n else:\n print(\"printing feature importance..\")\n f=lgb.plot_importance(self.model)\n f.figure.set_size_inches(10, 30) \n plt.show()", "title": "" }, { "docid": "738ce7444010efdbd6c0820fceb23c95", "score": "0.6767977", "text": "def drawLossAcc(plot_result,plot_title,path):\n plt.figure()\n #plt.subplot(2,2,1)\n #plt.cla()\n #plt.plot(plot_result[0], color='#1a53ff')\n #plt.ylabel('loss train')\n #plt.subplot(2,2,2)\n #plt.plot(plot_result[1], color='#1a53ff')\n #plt.ylabel('accuracy train')\n #plt.subplot(2,2,3)\n #plt.plot(plot_result[2], color='#1a53ff')\n #plt.ylabel('loss val')\n #plt.xlabel('Epoch')\n plt.subplot(2, 2, 4)\n plt.plot(plot_result[3], color='#1a53ff')\n plt.ylabel('accuracy val')\n plt.xlabel('Epoch')\n plt.tight_layout()\n plot_path = path + \"/fig_\" + plot_title + \"_target_model.png\"\n plt.savefig(plot_path)", "title": "" }, { "docid": "874cf54f493c776e2edd706c8417bf89", "score": "0.6736981", "text": "def plot_results(models,\n data,\n batch_size=25 ,\n model_name=\"vae_mnist\"):\n\n encoder, decoder = models\n x_test, y_test = data\n os.makedirs(model_name)\n\n filename = os.path.join(model_name, \"vae_mean.png\")\n # display a 2D plot of the digit classes in the latent space\n z_mean, _, _ = encoder.predict(x_test,\n batch_size=batch_size)\n plt.figure(figsize=(12, 10))\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)\n plt.colorbar()\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.savefig(filename)\n plt.show()\n\n filename = os.path.join(model_name, \"digits_over_latent.png\")\n # display a 30x30 2D manifold of digits\n n = 30\n digit_size = 28\n figure = np.zeros((digit_size * n, digit_size * n))\n # linearly spaced coordinates corresponding to the 2D plot\n # of digit classes in the latent space\n grid_x = np.linspace(-4, 4, n)\n grid_y = np.linspace(-4, 4, n)[::-1]\n\n for i, yi in enumerate(grid_y):\n for j, xi in enumerate(grid_x):\n z_sample = np.array([[xi, yi]])\n x_decoded = decoder.predict(z_sample)\n digit = x_decoded[0].reshape(digit_size, digit_size)\n figure[i * digit_size: (i + 1) * digit_size,\n j * digit_size: (j + 1) * digit_size] = digit\n\n plt.figure(figsize=(10, 10))\n start_range = digit_size // 2\n end_range = n * digit_size + start_range + 1\n pixel_range = np.arange(start_range, end_range, digit_size)\n sample_range_x = np.round(grid_x, 1)\n sample_range_y = np.round(grid_y, 1)\n plt.xticks(pixel_range, sample_range_x)\n plt.yticks(pixel_range, sample_range_y)\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.imshow(figure, cmap='Greys_r')\n plt.savefig(filename)\n plt.show()", "title": "" }, { "docid": "c1ac09914f18c86c59d8688dc931deff", "score": "0.6733383", "text": "def plot_results(models,\n data,\n batch_size=128,\n model_name=\"vae_mnist\"):\n\n encoder, decoder = models\n x_test, y_test = data\n os.makedirs(model_name, exist_ok=True)\n\n filename = os.path.join(model_name, \"vae_mean.png\")\n # display a 2D plot of the digit classes in the latent space\n z_mean, _, _ = encoder.predict(x_test,\n batch_size=batch_size)\n plt.figure(figsize=(12, 10))\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)\n plt.colorbar()\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.savefig(filename)\n plt.show()\n\n filename = os.path.join(model_name, \"digits_over_latent.png\")\n # display a 30x30 2D manifold of digits\n n = 30\n digit_size = 28\n figure = np.zeros((digit_size * n, digit_size * n))\n # linearly spaced coordinates corresponding to the 2D plot\n # of digit classes in the latent space\n grid_x = np.linspace(-4, 4, n)\n grid_y = np.linspace(-4, 4, n)[::-1]\n\n for i, yi in enumerate(grid_y):\n for j, xi in enumerate(grid_x):\n z_sample = np.array([[xi, yi]])\n x_decoded = decoder.predict(z_sample)\n digit = x_decoded[0].reshape(digit_size, digit_size)\n figure[i * digit_size: (i + 1) * digit_size,\n j * digit_size: (j + 1) * digit_size] = digit\n\n plt.figure(figsize=(10, 10))\n start_range = digit_size // 2\n end_range = n * digit_size + start_range + 1\n pixel_range = np.arange(start_range, end_range, digit_size)\n sample_range_x = np.round(grid_x, 1)\n sample_range_y = np.round(grid_y, 1)\n plt.xticks(pixel_range, sample_range_x)\n plt.yticks(pixel_range, sample_range_y)\n plt.xlabel(\"z[0]\")\n plt.ylabel(\"z[1]\")\n plt.imshow(figure, cmap='Greys_r')\n plt.savefig(filename)\n plt.show()", "title": "" }, { "docid": "84c2a25f28e1de63d428b4bdb522633e", "score": "0.6719283", "text": "def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):\r\n\r\n blue= '#34495E'\r\n green = '#2ECC71'\r\n orange = '#E23B13'\r\n\r\n # plot model loss\r\n #fig, (ax1, ax2) = plt.subplots(2, figsize=(9, 7))\r\n ax1=plt\r\n ax1.plot(range(1, len(train_loss) + 1), train_loss, blue, linewidth=5, label='training')\r\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, green, linewidth=5, label='validation')\r\n ax1.xlabel('# epoch')\r\n ax1.ylabel('loss')\r\n # ax1.tick_params('y')\r\n ax1.grid(True)\r\n ax1.legend(loc='upper right', shadow=False)\r\n ax1.title('Model loss through #epochs', color=orange, fontweight='bold')\r\n ax1.show() \r\n # plot model accuracy\r\n ax2=plt\r\n ax2.plot(range(1, len(train_acc) + 1), train_acc, blue, linewidth=5, label='training')\r\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, green, linewidth=5, label='validation')\r\n ax2.xlabel('# epoch')\r\n ax2.ylabel('accuracy')\r\n #ax2.tick_params('y')\r\n ax2.grid(True)\r\n ax2.legend(loc='lower right', shadow=False)\r\n ax2.title('Model accuracy through #epochs', color=orange, fontweight='bold')\r\n ax2.show()", "title": "" }, { "docid": "6e79b32e3fac17b6b55e57e2d773358e", "score": "0.670359", "text": "def plot_predictions(self, save_fig = 'no', path = ''):\n axis_x = [r'$\\mathit{f}_{\\ast,true}$', r'$V_{circ,true}$',r'$\\mathit{f}_{X,true}$', r'$\\tau_{CMB,true}$' ]\n axis_y = [r'$\\mathit{f}_{\\ast,pred}$', r'$V_{circ,pred}$',r'$\\mathit{f}_{X,pred}$', r'$\\tau_{CMB,pred}$' ]\n\n #axis_x = [r'$\\mathit{k1}_{true}$', r'$k2_{true}$',r'$\\mathit{k3}_{true}$']\n #axis_y = [r'$\\mathit{k1}_{pred}$', r'$k2_{pred}$',r'$\\mathit{k3}_{pred}$']\n\n fig = plt.figure(figsize = (800/50, 800/96), dpi = 96)\n\n for i in range(len(self.expected_outputs[0])):\n\n x = list(map(list, zip(*self.expected_outputs)))[i]\n y = list(map(list, zip(*self.predictions)))[i]\n\n ax = plt.subplot(2,2,i+1)\n\n if (i == 1) or (i==0) or (i ==2): #or (i ==3):\n #if (i ==3):\n\n ax.loglog( x, y, '+')\n\n ax.set_xlim(min(min(x) - 0.1*min(x), min(y) - 0.1*min(y)),\n max(max(x) + 0.4*max(x), max(y) + 0.4*max(y)))\n ax.set_ylim(min(min(x) - 0.1*min(x), min(y) - 0.1*min(y)),\n max(max(x) + 0.4*max(x), max(y) + 0.4*max(y)))\n\n x_vals = np.array(plt.xlim())\n y_vals = x_vals\n ax.loglog(x_vals, y_vals, '--', color = 'k', linewidth = 1.0)\n\n else:\n ax.plot( x, y, '+')\n ax.set_xlim(min(min(x) - 0.1*min(x), min(y) - 0.1*min(y)),\n max(max(x) + 0.1*max(x), max(y) + 0.1*max(y)))\n ax.set_ylim(min(min(x) - 0.1*min(x), min(y) - 0.1*min(y)),\n max(max(x) + 0.1*max(x), max(y) + 0.1*max(y)))\n x_vals = np.array(plt.xlim())\n y_vals = x_vals\n ax.plot(x_vals, y_vals, '--', color = 'k', linewidth = 1.0)\n\n ax.grid()\n ax.set_xlabel(axis_x[i], size = 13)\n ax.set_ylabel(axis_y[i], size = 13)\n\n fig.subplots_adjust(top = 0.9, bottom = 0.1, hspace = 0.3, wspace = 0.25, right = 0.93, left = 0.08)\n #plt.show()\n\n if (save_fig == 'yes'):\n fig.savefig(path + self.model_datetime +'/predictions.png', dpi = 100)\n\n return", "title": "" }, { "docid": "c7739aec2aa09b73fa572efd084eda10", "score": "0.66924506", "text": "def plot(self):\n # return None, None\n plot_step = 0.05\n colors = [\"r\", \"y\", \"b\"]\n # The 4 lines below create the input dataframe consisting of points in R2\n x_low, x_high = self.X.loc[:,0].min()-1, self.X.loc[:,0].max()+1\n y_low, y_high = self.X.loc[:,1].min()-1, self.X.loc[:,1].max()+1\n x_axis, y_axis = np.meshgrid(np.arange(x_low, x_high, plot_step),np.arange(y_low, y_high, plot_step))\n X = pd.DataFrame(np.c_[x_axis.ravel(), y_axis.ravel()])\n\n\n fig, ax = plt.subplots(1,self.n_estimators)\n for i in range(self.n_estimators):\n # Plotting the boundary for all the learnt trees\n ax[i].set_title(\"Iteration : \"+str(i))\n y = self.models[i].predict(X)\n y = np.array(y).reshape(y_axis.shape)\n # Below line plots the prediction to the dataframe\n ax[i].contourf(x_axis, y_axis, y, cmap = plt.get_cmap(\"RdBu\"))\n for row in range(len(self.X)):\n # Below line plots the training points\n ax[i].scatter(self.X.loc[row][0], self.X.loc[row][1], color = colors[self.y[row]])\n fig1 = fig\n plt.show()\n\n # Plotting the boundary for the overall classifier\n fig, ax = plt.subplots(1,1)\n fig.suptitle(\"Random Forest Learnt Boundary\")\n y = self.predict(X)\n y = np.array(y).reshape(y_axis.shape)\n ax.contourf(x_axis, y_axis, y, cmap = plt.get_cmap(\"RdBu\"))\n for row in range(len(self.X)):\n ax.scatter(self.X.loc[row][0], self.X.loc[row][1], color = colors[self.y[row]])\n fig2 = fig\n plt.show()\n return fig1,fig2", "title": "" }, { "docid": "cb8d73aa03a7522e5a56e8b93b006cf6", "score": "0.66895324", "text": "def plot_train_result(history, best_epoch=None, save_path=None):\n fig = plt.figure(figsize=(15, 20))\n gs = GridSpec(2, 2, figure=fig)\n\n ax1 = fig.add_subplot(gs[0, :]) # 1st row, entire row : global VAE loss\n ax2 = fig.add_subplot(gs[1, :]) # 2ndst row, entire row on 2 * 2 grid: reconstruction\n\n # ax3 = fig.add_subplot(gs[2, 0]) # top left on a 4x4 grid: KL divergence\n # ax4 = fig.add_subplot(gs[2, 1]) # bottom right on a 4x4 grid: MI\n\n\n # plot the overall loss\n ax1.set_title('Loss')\n ax1.plot(history['train_loss'], color='dodgerblue', label='train')\n ax1.plot(history['test_loss'], linestyle='--', color='lightsalmon', label='test')\n if best_epoch:\n ax1.axvline(best_epoch, linestyle='--', color='r', label='Early stopping')\n\n ax2.set_title('Accuracy')\n ax2.plot(history['train_acc'], color='dodgerblue', label='train')\n ax2.plot(history['test_acc'], linestyle='--', color='lightsalmon', label='test')\n\n ax1.legend()\n ax2.legend()\n\n if save_path:\n plt.savefig(os.path.join(save_path, 'loss_eval.png'))\n\n plt.show()\n\n return fig", "title": "" }, { "docid": "cf8a35e9ee0cd1644755ccffa5df0a96", "score": "0.668359", "text": "def plot_results(models, data, batch_size):\r\n\r\n encoder, decoder = models\r\n x_test, y_test = data\r\n\r\n filename = 'vae_mean.png'\r\n # display a 2D plot of the digit classes in the latent space\r\n z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size)\r\n plt.figure(figsize=(12, 10))\r\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)\r\n plt.colorbar()\r\n plt.xlabel(\"z[0]\")\r\n plt.ylabel(\"z[1]\")\r\n plt.tight_layout()\r\n plt.savefig(filename)\r\n plt.show()\r\n\r\n filename = 'digits_over_latent.png'\r\n # display a 30x30 2D manifold of digits\r\n n = 30\r\n digit_size = 28\r\n figure = np.zeros((digit_size * n, digit_size * n))\r\n # linearly spaced coordinates corresponding to the 2D plot\r\n # of digit classes in the latent space\r\n grid_x = np.linspace(-4, 4, n)\r\n grid_y = np.linspace(-4, 4, n)[::-1]\r\n\r\n for i, yi in enumerate(grid_y):\r\n for j, xi in enumerate(grid_x):\r\n z_sample = np.array([[xi, yi]])\r\n x_decoded = decoder.predict(z_sample)\r\n digit = x_decoded[0].reshape(digit_size, digit_size)\r\n figure[i * digit_size: (i + 1) * digit_size,\r\n j * digit_size: (j + 1) * digit_size] = digit\r\n\r\n plt.figure(figsize=(10, 10))\r\n start_range = digit_size // 2\r\n end_range = n * digit_size + start_range + 1\r\n pixel_range = np.arange(start_range, end_range, digit_size)\r\n sample_range_x = np.round(grid_x, 1)\r\n sample_range_y = np.round(grid_y, 1)\r\n plt.xticks(pixel_range, sample_range_x)\r\n plt.yticks(pixel_range, sample_range_y)\r\n plt.xlabel(\"z[0]\")\r\n plt.ylabel(\"z[1]\")\r\n plt.tight_layout()\r\n plt.imshow(figure, cmap='Greys_r')\r\n plt.savefig(filename)\r\n plt.show()", "title": "" }, { "docid": "89de2a2bd4064539a11443a30fbef2ad", "score": "0.6667843", "text": "def plot_keras_history(history):\n \"\"\" history: keras result form model.fit \"\"\"\n plt.clf()\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()", "title": "" }, { "docid": "d3bccd6f012da3306dbe8720537d6e56", "score": "0.6665484", "text": "def plot_results(models, data, session):\r\n \r\n encoder, decoder = models\r\n x_test, y_test = data\r\n sess = session\r\n \r\n filename = 'vae_mean.png'\r\n # display a 2D plot of the digit classes in the latent space\r\n z_mean, _, _ = sess.run(encoder, feed_dict={X: x_test})\r\n plt.figure(figsize=(12, 10))\r\n plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)\r\n plt.colorbar()\r\n plt.xlabel(\"z[0]\")\r\n plt.ylabel(\"z[1]\")\r\n plt.tight_layout()\r\n plt.savefig(filename)\r\n plt.show()\r\n\r\n filename = 'digits_over_latent.png'\r\n # display a 30x30 2D manifold of digits\r\n n = 30\r\n digit_size = 28\r\n figure = np.zeros((digit_size * n, digit_size * n))\r\n # linearly spaced coordinates corresponding to the 2D plot\r\n # of digit classes in the latent space\r\n grid_x = np.linspace(-4, 4, n)\r\n grid_y = np.linspace(-4, 4, n)[::-1]\r\n\r\n for i, yi in enumerate(grid_y):\r\n for j, xi in enumerate(grid_x):\r\n z_sample = np.array([[xi, yi]])\r\n x_decoded = sess.run(decoder, feed_dict={Z: z_sample})\r\n digit = x_decoded[0].reshape(digit_size, digit_size)\r\n figure[i * digit_size: (i + 1) * digit_size,\r\n j * digit_size: (j + 1) * digit_size] = digit\r\n\r\n plt.figure(figsize=(10, 10))\r\n start_range = digit_size // 2\r\n end_range = n * digit_size + start_range + 1\r\n pixel_range = np.arange(start_range, end_range, digit_size)\r\n sample_range_x = np.round(grid_x, 1)\r\n sample_range_y = np.round(grid_y, 1)\r\n plt.xticks(pixel_range, sample_range_x)\r\n plt.yticks(pixel_range, sample_range_y)\r\n plt.xlabel(\"z[0]\")\r\n plt.ylabel(\"z[1]\")\r\n plt.tight_layout()\r\n plt.imshow(figure, cmap='Greys_r')\r\n plt.savefig(filename)\r\n plt.show()", "title": "" }, { "docid": "1ee6152ce48b7d71e028dd3859052f4e", "score": "0.664598", "text": "def plot_lr(self):\n if not self.train_outputs and not self.test_outputs:\n print(\"You need to fetch data first using fetch_range() or fetch_last().\")\n return\n\n f = plt.figure()\n plt.plot(self.cache_log_train[:, 0], self.cache_log_train[:, 1], label=r'lr')\n plt.xlabel('iteration')\n plt.legend(loc='best')\n plt.grid()", "title": "" }, { "docid": "aac22337d11ab180cd72f3d75222bf20", "score": "0.6641936", "text": "def draw_graph(self):\n if self.history != None:\n # plot accuracy during training\n # judge overfit/early stop\n plt.plot(self.history['accuracy'])\n plt.plot(self.history['val_accuracy'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()\n # plot loss during training\n plt.plot(self.history['loss'])\n plt.plot(self.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n plt.show()\n else:\n print(\"You can't draw yet... maybe load or train a model first!\")", "title": "" }, { "docid": "6b025f98fadd00506df06bf8fd685687", "score": "0.661944", "text": "def plot_training(self):\n fig, axs = plt.subplots(1,3, figsize=(15, 4))\n fig.subplots_adjust(hspace = .5, wspace=0.3)\n axs = axs.ravel()\n axs[0].plot(self.train_cost_log,label='train cost')\n axs[0].plot(self.val_cost_log,label='val cost')\n axs[0].legend(loc='best')\n axs[0].set_title('Cost function')\n axs[0].set_xlabel('epochs', fontsize=10)\n axs[0].grid()\n axs[1].plot(self.train_loss_log,label='train loss')\n axs[1].plot(self.val_loss_log,label='val loss')\n axs[1].legend(loc='best')\n axs[1].set_title('loss function')\n axs[1].set_xlabel('epochs', fontsize=10)\n axs[1].grid()\n axs[2].plot(self.train_acc_log,label='train accuracy')\n axs[2].plot(self.val_acc_log,label='val accuracy')\n axs[2].legend(loc='best')\n axs[2].set_title('Accuracy')\n axs[2].set_xlabel('epochs', fontsize=10)\n axs[2].grid()\n plt.show()", "title": "" }, { "docid": "54d341932b7142d9bc9ae0f0cbd84f28", "score": "0.6604968", "text": "def evaluate_model_plot_history(model, train_generator, test_generator,as_df=False, plot=True):\n from IPython.display import display\n import pandas as pd\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import functions_combined_BEST as ji\n print('\\n')\n print('---'*28)\n print('\\tEVALUATE MODEL:')\n print('---'*28)\n # duration = print(clock._lap_duration_)\n model_results = model.history.history\n \n if plot==True and len(model.history.epoch)>1:\n\n # ji.plot_keras_history()\n fig, ax = plt.subplots(figsize=(6,3))\n\n for k,v in model_results.items():\n ax.plot(range(len(v)),v, label=k)\n \n plt.title('Model Training History')\n ax.set_xlabel('Epoch #',**{'size':12,'weight':70})\n ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))\n\n plt.legend()\n plt.show()\n\n\n # # EVALUATE MODEL PREDICTIONS FROM GENERATOR \n print('Evaluating Train Generator:')\n model_metrics_train = model.evaluate_generator(train_generator,verbose=1)\n print('Evaluating Test Generator:')\n model_metrics_test = model.evaluate_generator(test_generator,verbose=1)\n # print(model_metrics_test)\n\n eval_gen_dict = {}\n eval_gen_dict['Train Data'] = dict(zip(model.metrics_names,model_metrics_train))\n eval_gen_dict['Test Data'] = dict(zip(model.metrics_names,model_metrics_test))\n df_eval = pd.DataFrame(eval_gen_dict).round(4).T\n display(df_eval.style.set_caption('Model Evaluation Results'))\n\n if as_df:\n return df_eval\n else:\n return eval_gen_dict", "title": "" }, { "docid": "0966ff6a9bd0ebad69028bee871fa3a9", "score": "0.6601335", "text": "def plot_history (self):\n if not self.loaded_trained: \n raise ValueError(\"Train or load a model beforehand\")\n if self.history is None: \n raise ValueError(\"History unavailable\")\n # list all data in history\n print(self.history.history.keys())\n # summarize history for accuracy\n fig, axs = plt.subplots(2, 2)\n axs[0, 0].plot(history.history['acc'])\n axs[0, 0].plot(history.history['val_accuracy'])\n axs[0, 0].title('model accuracy')\n axs[0, 0].ylabel('accuracy')\n axs[0, 0].xlabel('epoch')\n axs[0, 0].legend(['train', 'validation'], loc='upper left')\n # summarize history for f1 score\n axs[0, 1].plot(history.history['f1_m'])\n axs[0, 1].plot(history.history['val_f1_m'])\n axs[0, 1].title('model f1 score')\n axs[0, 1].ylabel('f1 score')\n axs[0, 1].xlabel('epoch')\n axs[0, 1].legend(['train', 'validation'], loc='upper left')\n # summarize history for precision\n axs[1, 0].plot(history.history['precision_m'])\n axs[1, 0].plot(history.history['val_precision_m'])\n axs[1, 0].title('model precision')\n axs[1, 0].ylabel('precision')\n axs[1, 0].xlabel('epoch')\n axs[1, 0].legend(['train', 'validation'], loc='upper left')\n # summarize history for recall \n axs[1, 0].plot(history.history['recall_m'])\n axs[1, 0].plot(history.history['val_recall_m'])\n axs[1, 0].title('model recall')\n axs[1, 0].ylabel('recall')\n axs[1, 0].xlabel('epoch')\n axs[1, 0].legend(['train', 'validation'], loc='upper left')\n plt.show()", "title": "" }, { "docid": "4783abe3a9d411e61a28b6f99229cabe", "score": "0.6594694", "text": "def CreatePlots(self):\n AliMLModelResultsBase.CreatePlots(self)\n\n # Test dataset and evaluate scores\n labels = ['Class 1', 'Class 2']\n\n ##### Save histograms/plots\n epochs = range(len(self.fHistoryNumberEvents))\n\n # Accuracy\n AliMLHelpers.SavePlot('./Results/{:s}-Accuracy.png'.format(self.fModelName), 'Accuracy function', x=epochs, y=(self.fHistoryAccuracyTraining, self.fHistoryAccuracyValidation), functionlabels=('Training', 'Validation'), legendloc='lower right')\n\n # Score\n AliMLHelpers.SaveHistogram('./Results/{:s}-Scores.png'.format(self.fModelName), 'Scores on validation data', tuple(self.fCurrentTestScores), tuple(labels), rangex=(0,1), logY=True)\n # AUC\n AliMLHelpers.SavePlot('./Results/{:s}-AUC.png'.format(self.fModelName), 'AUC values', x=self.fHistoryNumberEvents, y=(self.fHistoryAUC,), functionlabels=('AUC',), legendloc='lower right')\n # ROC\n AliMLHelpers.SavePlot('./Results/{:s}-ROC.png'.format(self.fModelName), 'ROC curve', x=self.fCurrentROCy, y=(self.fCurrentROCx,self.fCurrentROCy), functionlabels=('(AUC={0:.3f})'.format(self.fCurrentAUC),'Guess ROC'), rangex=(0,1.1), legendloc='lower right', axislabels=('False Positive Rate', 'True Positive Rate') )", "title": "" }, { "docid": "71fe16781c43d15b155fe78f5b8f797c", "score": "0.6591169", "text": "def plot_metrics(self):\n fig = plt.figure()\n ax = plt.subplot(111)\n # ax.plot(self.early_history.history['val_loss'], label='val_loss')\n # ax.plot(self.early_history.history['mae'], label='mae')\n ax.plot(self.early_history.history[\"mse\"], label=\"mse\")\n plt.xlabel(\"Epochs\")\n ax.legend()\n plt.show()", "title": "" }, { "docid": "5822e7dc025245449bd386c2248c6b09", "score": "0.6581818", "text": "def plotHistory(history):\n pd.DataFrame(history).plot(figsize=(8, 5))\n plt.grid(True)\n plt.title(\"Training Curves of Traditional Model\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Loss and Accuracy\")\n plt.gca().set_ylim(0, 1)\n\n plt.show()", "title": "" }, { "docid": "ab542a45258d767364e750fbdcec0f3d", "score": "0.65648776", "text": "def plot_gam_by_predictor(\n model_dict, model_index, X_data, y_data, dataset=\"train\", suptitle_y=1\n):\n # reset indices to prevent index match errors\n X_data = X_data.copy().reset_index(drop=True)\n y_data = y_data.copy().reset_index(drop=True)\n\n idx = model_index\n model = model_dict[\"model\"][idx]\n X_varnames = X_data.columns\n y_varname = model_dict[\"y_variables\"][idx].replace(\"_\", \" \")\n model_desc = model_dict[\"description\"]\n\n n_X_vars = len(X_varnames)\n n_rows = np.ceil(n_X_vars / 2).astype(int)\n\n # generate deviance residuals\n res = model.deviance_residuals(X_data, y_data.iloc[:, idx])\n\n # plot all predictors against price to visualize relationship\n fig, axes = plt.subplots(n_rows, 2, sharey=False, figsize=(12, 4 * n_rows))\n\n plt.suptitle(\n \"{} predictions:\\nContribution of each predictor to overall function \"\n \"(partial dependence and 95% CI)\\n{}\\n\"\n \"Illustrated with {} observations\".format(\n y_varname.upper(),\n model_desc,\n \"training\" if dataset == \"train\" else \"TEST\",\n ),\n fontsize=18,\n y=suptitle_y,\n )\n\n for (i, ax), term in zip(enumerate(axes.flat), model.terms):\n if term.isintercept:\n continue\n\n XX = model.generate_X_grid(term=i)\n pdep, confi = model.partial_dependence(term=i, X=XX, width=0.95)\n pdep2, _ = model.partial_dependence(term=i, X=X_data, width=0.95)\n\n ax.scatter(\n X_data.iloc[:, term.feature], pdep2 + res, color=\"silver\", alpha=1,\n )\n ax.plot(XX[:, term.feature], pdep, \"k-\")\n ax.plot(XX[:, term.feature], confi, c=\"k\", ls=\"--\")\n\n ax.set_title(X_varnames[i], fontsize=14)\n ax.set_xlabel(\"observed values\", fontsize=12)\n ax.grid(\":\", alpha=0.4)\n\n if i % 2 == 0:\n ax.set_ylabel(\"partial dependence\", fontsize=12)\n\n # hide all markings for final missing axes in odd number predictors\n n_fewer = n_X_vars % 2\n if n_fewer != 0:\n for pos in [\"right\", \"top\", \"bottom\", \"left\"]:\n axes[n_rows - 1, -n_fewer].spines[pos].set_visible(False)\n axes[n_rows - 1, -n_fewer].tick_params(\n axis=\"x\", which=\"both\", bottom=False, top=False, labelbottom=False\n )\n axes[n_rows - 1, -n_fewer].tick_params(\n axis=\"y\", which=\"both\", right=False, left=False, labelleft=False\n )\n\n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "c693e5d18325873815f1985209ac4ee5", "score": "0.6564375", "text": "def plot_map_stage(data,NeuralNet,loss_hist,datapath,k,img_size=50,channel_stride=1,sample_stride=10,overwrite=False,savefig=False):\n model = NeuralNet\n state_dict = torch.load('%s/model%02i.pt'%(datapath,k))\n model.load_state_dict(state_dict)\n model.eval()\n if os.path.exists('%s/model%02i.npy'%(datapath,k)) and overwrite==False:\n prob_map = numpy.load('%s/model%02i.npy'%(datapath,k))\n else:\n prob_map = extract_prob_map(data,model,img_size,channel_stride,sample_stride)\n numpy.save('%s/model%02i'%(datapath,k),prob_map)\n plt.style.use('seaborn')\n fig,ax = plt.subplots(2,2,figsize=(18,10),dpi=80)\n ax[0][0].plot(loss_hist[:,3]/loss_hist[:,2],label='Training loss')\n ax[0][0].plot(loss_hist[:,6]/loss_hist[:,5],label='Validation loss')\n ax[0][0].axvline(k,color='black',lw=0.8)\n ax[0][0].scatter([k,k],[loss_hist[k,3]/loss_hist[k,2],loss_hist[k,6]/loss_hist[k,5]],color='black',zorder=3)\n ax[0][0].set_xlabel('Batch Iterations')\n ax[0][0].set_ylabel('Loss')\n ax[0][0].legend(frameon=False)\n ax[0][1].plot(100*loss_hist[:,4]/loss_hist[:,2],label='Training accuracy')\n ax[0][1].plot(100*loss_hist[:,7]/loss_hist[:,5],label='Validation accuracy')\n ax[0][1].axvline(k,color='black',lw=0.8)\n ax[0][1].scatter([k,k],[100*loss_hist[k,4]/loss_hist[k,2],100*loss_hist[k,7]/loss_hist[k,5]],color='black',zorder=3)\n ax[0][1].set_xlabel('Batch Iterations')\n ax[0][1].set_ylabel('Accuracy')\n ax[0][1].legend(frameon=False)\n ax[1][0].imshow(data,aspect='auto',cmap='inferno')\n ax[1][0].set_title('Raw strain measurements')\n ax[1][0].set_xlabel('Samples')\n ax[1][0].set_ylabel('Channels')\n ax[1][1].imshow(prob_map,aspect='auto',cmap='jet',vmin=0,vmax=1)\n ax[1][1].set_title('Probability map')\n ax[1][1].set_xlabel('Samples')\n plt.tight_layout()\n if savefig:\n plt.savefig('%s/model%02i.png'%(datapath,k),dpi=200)\n plt.close()\n else:\n plt.show()", "title": "" }, { "docid": "aaa984a290d8a096c6ff9f14e235b236", "score": "0.6562105", "text": "def plot(self, history: dict):\n\n # Plot training & validation accuracy values\n fig, ax = plt.subplots(1, 2)\n ax[0].plot(history.history[\"accuracy\"], label=\"Train\")\n ax[0].plot(history.history[\"val_accuracy\"], label=\"Test\")\n ax[0].set_title(\"Model accuracy\")\n ax[0].set_ylabel(\"Accuracy\")\n ax[0].set_xlabel(\"Epoch\")\n ax[0].set_ylim(0, 1)\n # Plot training & validation loss values\n ax[1].plot(history.history[\"loss\"], label=\"Train\")\n ax[1].plot(history.history[\"val_loss\"], label=\"Test\")\n ax[1].set_title(\"Model loss\")\n ax[1].set_ylabel(\"Loss\")\n ax[1].set_xlabel(\"Epoch\")\n ax[1].set_ylim(0, 2)\n plt.legend(loc=\"upper left\")\n plt.show()", "title": "" }, { "docid": "bcb468ee460f38b3d5d8b16f56761cef", "score": "0.65550065", "text": "def plot_history(history_obj):\n plt.plot(history_obj.history['loss'])\n plt.plot(history_obj.history['val_loss'])\n plt.title('model mean squared error loss')\n plt.ylabel('mean squared error loss')\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n plt.show()", "title": "" }, { "docid": "bb2849cf6c05ac7ec9f793dec674daa5", "score": "0.6553949", "text": "def refresh_plot(self):\n P = self.model.predict(x=self.X)\n P = np.reshape(P, [-1, 512, 512, 2])\n for i in range(0,self.numviz):\n self.axes[i,1].imshow(P[i,:,:,0],cmap='gray')\n plt.pause(0.0001)", "title": "" }, { "docid": "1b19aa653e328524547b718c6afdcbe5", "score": "0.65429944", "text": "def plot(self):\n fig, ax = plt.subplots(figsize=(15, 9))\n ax.plot(self.best_y[:, 0], color='blue', label=\"Target\")\n ax.plot(self.best_pred[:, 0], color='red', label=\"Prediction\")\n ax.legend()\n ax.set_title(\"ShipmentCases Forecast vs Target for {}\".format(self.name))\n ax.grid(True)\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n fig.savefig(\"{}_{}_step.png\".format(self.name, self.best_y.size(0)))", "title": "" }, { "docid": "ecdd297b31282c4bf3e75a003953327a", "score": "0.6534831", "text": "def plot_result_data(self, digits, predictions):\n digits_and_predictions = list(zip(digits, predictions))\n for index, (image, label) in enumerate(digits_and_predictions):\n plt.subplot(2, 4, index + 5)\n plt.axis('off')\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title('Prediction: %i' % label)\n plt.show()", "title": "" }, { "docid": "08a6dd55075cc694790dfa828930f52b", "score": "0.6531651", "text": "def plot_bad_predictions(normalized_train, y_train, y_pred, classifier=\"KNN\", resolution='8x8'):\n\n \n a = int(input(\"Bad Prediction 1: \"))\n b = int(input(\"Bad Prediction 2: \"))\n \n y_train_ints = y_train.astype(int)\n y_pred_ints = y_pred.astype(int)\n \n X_aa = normalized_train[(y_train_ints == a) & (y_pred_ints == a)]\n X_ab = normalized_train[(y_train_ints == a) & (y_pred_ints == b)]\n X_ba = normalized_train[(y_train_ints == b) & (y_pred_ints == a)]\n X_bb = normalized_train[(y_train_ints == b) & (y_pred_ints == b)]\n\n fig = plt.figure(figsize=(8,8))\n ax1 = fig.add_subplot(221); show_digits(X_aa[:25], images_per_row=5)\n ax2 = fig.add_subplot(222); show_digits(X_ab[:25], images_per_row=5)\n ax3 = fig.add_subplot(223); show_digits(X_ba[:25], images_per_row=5)\n ax4 = fig.add_subplot(224); show_digits(X_bb[:25], images_per_row=5)\n \n ax1.title.set_text('Correctly Classified as ' + str(a))\n ax2.title.set_text('Incorrectly Classified as ' + str(b)) \n ax3.title.set_text('Incorrectly Classified as ' + str(a))\n ax4.title.set_text('Correctly Classified as ' + str(b))\n \n plt.savefig('figs/{} Bad Predictions for ({} pixels)'.format(classifier, resolution))\n plt.show(block=False)\n plt.pause(3)\n plt.close()", "title": "" }, { "docid": "0ee15fbd378910a867f2646d75fe2875", "score": "0.65228784", "text": "def plot(self):\n if not self.train_outputs and not self.test_outputs:\n print(\"You need to fetch data first using fetch_range() or fetch_last().\")\n return\n num_graphs = len(self.train_outputs + self.test_outputs)\n colormap = cm.winter(np.linspace(0, 1, num_graphs))\n graph_count = 0\n f = plt.figure()\n iterations = self.cache_log_train[:, 0].astype(int)\n for train_output in self.train_outputs:\n plt.plot(iterations, [x[train_output] for x in self.cache_log_train[:, 2]],\n label=train_output, color=colormap[graph_count])\n graph_count += 1\n\n iterations = self.cache_log_test[:, 0].astype(int)\n for test_output in self.test_outputs:\n plt.plot(iterations, [x[test_output] for x in self.cache_log_test[:, 2]], label=test_output,\n color=colormap[graph_count])\n graph_count += 1\n plt.xlabel('iteration')\n plt.legend(loc='best')\n plt.grid()", "title": "" }, { "docid": "3c364a6a9f8eb28ee004ef902d904b0c", "score": "0.6513268", "text": "def plotting(data,rmse,parameters):\n no_points = parameters[0]\n layer_no = parameters[5]\n option = parameters[1]\n# single_input = parameters[2]\n model = parameters[3]\n Xtrain = data[0]\n Ytrain = data[1]\n Xtest = data[2]\n Ytest = data[3]\n post_mean = data[4]\n post_var = data[5]\n # temp1 = Xtest[no_points:]\n # Xplot = np.concatenate((Xtrain,temp1),axis=0)\n s = np.sqrt(post_var.flatten())\n mu = post_mean.flatten()\n plt.figure(1)\n plt.clf()\n plt.hold(True)\n \n if model == 'Aug_Duvenaud' or model == 'Aug_inputs':\n plt.plot(Xtrain[:,1].flatten(),Ytrain.flatten(), 'r.',label = 'training data')\n elif model == 'Single_inputs':\n plt.plot(Xtrain.flatten(),Ytrain.flatten(), 'r.',label = 'training data')\n \n plt.plot(Xtest[no_points:,0].flatten(), Ytest[no_points:].flatten(), 'b.', label='test data')\n plt.fill_between(Xtest[:,0].flatten(), mu-2*s, mu+2*s, color=\"#C0C0C0\", label = 'mu +/- 2sd')\n plt.plot(Xtest[:,0].flatten(), mu, 'w-', lw=1, label = 'Prediction')\n\n plt.legend(loc='upper right')\n plt.title(\"This is layer \"+str(layer_no)+\" with NRMSE \" + str(rmse))\n name_of_plot = 'Prediction_in_layer_' + str(layer_no)\n PATH = '../Plots/'+ model + '/'+ option\n os.makedirs(PATH,exist_ok = True)\n file = PATH + '/_' + name_of_plot +'_'+str(parameters[0]) +'.png'\n plt.savefig(file, format = 'png',dpi=600)", "title": "" }, { "docid": "aae9febe07fac9073f647d5b84fe6b8c", "score": "0.650203", "text": "def plotPrediction(self, fit, ax):\n\n sub = self.sub\n if len(sub) == 0:\n sub = X.index\n Xout = self.X.ix[-self.X.index.isin(sub)]\n yout = self.y.ix[-self.y.index.isin(sub)]\n ypred = fit.predict(Xout)\n ax.scatter(yout, ypred, alpha=0.6, edgecolor='black',\n color='blue', lw=0.5, label='fit')\n ax.plot(ax.get_xlim(), ax.get_xlim(), ls=\"--\", lw=2, c=\".2\")\n ax.set_xlabel('test')\n ax.set_ylabel('predicted')\n ax.set_title('predicted vs test data')\n import statsmodels.tools.eval_measures as em\n yt = yout.squeeze().values\n rmse = em.rmse(yt, ypred)\n ax.text(0.9,0.1,'rmse: '+ str(round(rmse,3)),ha='right',\n va='top', transform=ax.transAxes)\n return", "title": "" }, { "docid": "c498f016e0dcd86bd9b2d5d3f586463a", "score": "0.64968574", "text": "def plot_model(model, test_data, save=False):\n\n if test_data.multiple_trajectories:\n i = randrange(len(test_data.x))\n test_data.x = test_data.x[i]\n test_data.x_dot = test_data.x_dot[i]\n test_data.u = test_data.u[i]\n test_data.t = test_data.t[i]\n\n u_interp = interp1d(test_data.t, test_data.u, axis=0, kind='cubic',fill_value=\"extrapolate\")\n x_predicted = model.simulate(test_data.x[0], test_data.t, u=u_interp)\n \n fig, axs = plt.subplots(len(FEATURES)+1, 1, figsize=(9, 16))\n\n axs[0].plot(test_data.x[:,0], test_data.x[:,1], 'g+', label='simulation (ground truth)')\n axs[0].plot(x_predicted[:,0], x_predicted[:, 1], 'r+', label='model')\n axs[0].invert_yaxis()\n axs[0].legend()\n axs[0].set(title='Car path', xlabel=r'$x$', ylabel=r'$y$')\n\n for i, feature in enumerate(FEATURES):\n axs[i+1].plot(test_data.t, test_data.x[:,i], 'k', label='true simulation')\n axs[i+1].plot(test_data.t, x_predicted[:,i], 'r--', label='model simulation')\n axs[i+1].legend()\n axs[i+1].set(title=feature, xlabel=r'$t\\ [s]$', ylabel=feature)\n\n plt.tight_layout()\n plt.show()\n\n if save: plt.savefig('plot.png')", "title": "" }, { "docid": "b96c183da1f6dcf9cc9c444a60c2f44e", "score": "0.6491601", "text": "def visualize(self, data, name):\n # Figure / axis set up\n fig, ax = plt.subplots()\n\n # We'll plot the list of params and their accuracy\n ax.plot(data.keys(), data.values())\n\n # Title\n ax.set_title(rf'{self.data_name} {name} Tune Results')\n\n # X axis\n ax.set_xlabel(name)\n ax.set_xlim(min(data.keys()), max(data.keys()))\n ax.set_xticks(list(data.keys()))\n ax.set_xticklabels(list(data.keys()), rotation=45, fontsize=6)\n\n # Y axis\n ax.set_ylabel('Target')\n\n # Saving\n plt.savefig(f'output_{self.data_name}\\\\{self.data_name}_{self.hidden_layers_count}_layers_tune_{name}.jpg')", "title": "" }, { "docid": "863e985cd79ec74e443a6d4999108b86", "score": "0.6489213", "text": "def validation_plot(self, history, metric):\r\n plt.plot(history.history[metric])\r\n plt.xlabel(\"Epochs\")\r\n plt.ylabel(metric)\r\n plt.show()", "title": "" }, { "docid": "3382722c38f9917ed96b8faaf82c0fe9", "score": "0.6483504", "text": "def visualise(history):\r\n plt.subplot(211)\r\n plt.title('Accuracy')\r\n plt.plot(history.history['val_sparse_categorical_accuracy'],\r\n color='r', label='Train')\r\n plt.show()", "title": "" }, { "docid": "14a66ff066c21c118cc47efceedb2b7b", "score": "0.6478059", "text": "def plot_results(results_dir, results, model):\n results_df = df.from_dict(results)\n\n fig, ax1 = plt.subplots()\n ax1.plot(results_df['epoch'], results_df['val_accuracy'], color='tab:orange', marker='o', label='Val accuracy')\n ax1.plot(results_df['epoch'], results_df['train_accuracy'], color='tab:red', marker='o', label='Train accuracy')\n ax1.set_xlabel('Epoch')\n ax1.set_ylabel('Accuracy')\n ax1.tick_params('y')\n ax1.set_ylim([0, 1])\n\n ax2 = ax1.twinx()\n ax2.plot(results_df['epoch'], results_df['val_loss'], color='tab:blue', marker='o', label='Val loss')\n ax2.plot(results_df['epoch'], results_df['train_loss'], color='tab:green', marker='o', label='Train loss')\n ax2.set_ylabel('Loss')\n ax2.tick_params('y')\n # ax2.set_ylim([0, 5])\n\n h1, l1 = ax1.get_legend_handles_labels()\n h2, l2 = ax2.get_legend_handles_labels()\n ax1.legend(h1+h2, l1+l2, loc=0)\n plt.tight_layout()\n plot_path = results_dir / f'{model.__class__.__name__}_accuracy_loss_curves.png'\n plt.savefig(plot_path)\n plt.show()", "title": "" }, { "docid": "6a700a4f19719277e6035bf9bb94efaa", "score": "0.64709663", "text": "def on_train_begin(self, logs={}):\n fig, axes = plt.subplots(self.numviz, 3)\n P = np.zeros(self.Y.shape)\n for i in range(0,self.numviz):\n axes[i,0].imshow(self.X[i,:,:,0],cmap='gray')\n plt.pause(0.0001)\n axes[i,1].imshow(P[i,:,:,0],cmap='gray')\n plt.pause(0.0001)\n axes[i,2].imshow(self.Y[i,:,:,0],cmap='gray')\n plt.pause(0.0001)\n for j in range(0,3):\n axes[i,j].xaxis.set_visible(False)\n axes[i,j].yaxis.set_visible(False)\n plt.tight_layout()\n plt.ion()\n plt.show()\n self.axes = axes\n self.fig = fig", "title": "" }, { "docid": "00b616c70c89287a224363de08bf543d", "score": "0.64704514", "text": "def plot_data(self, results): # Ok!\r\n\r\n# self.save_latex(results)\r\n\r\n training_df, prob_training_clip, prob_testing_clip, \\\r\n prob_testing_clip_marginalized, prob_testing_clip_category, avg_NE_itr,\\\r\n W_in, P, Tau = results\r\n\r\n plot_blocks = self.environment.plot_blocks\r\n plot_blocks_ID = self.environment.plot_blocks_ID\r\n\r\n show = []\r\n result = {}\r\n\r\n show.append(('W_in' , 'heatmap'))\r\n result['W_in'] = W_in\r\n\r\n show.append(('P matrix' , 'heatmap'))\r\n result['P matrix'] = P\r\n\r\n show.append(('Tau matrix' , 'heatmap'))\r\n result['Tau matrix'] = Tau\r\n\r\n result_1 = prob_testing_clip.copy()\r\n show.append(('General Pairwise probability', 'heatmap'))\r\n result['General Pairwise probability'] = result_1\r\n\r\n result_2 = prob_testing_clip_marginalized\r\n show.append(('Within category probability', 'heatmap'))\r\n result['Within category probability'] = result_2\r\n\r\n result_3 = prob_testing_clip_category\r\n show.append(('Category-to-category probability', 'heatmap'))\r\n result['Category-to-category probability'] = result_3\r\n\r\n test_prob_dict = self.df_to_dict(prob_testing_clip_category)\r\n index = sorted(test_prob_dict.keys())\r\n agn_1 = [test_prob_dict[k] for k in index]\r\n result_4 = pd.DataFrame({'Connection Probabilities': agn_1}, index = index)\r\n\r\n show.append(('Relation results', 'bar'))\r\n result['Relation results'] = result_4\r\n\r\n if bool(plot_blocks): # returns True if the dict is not empty\r\n for k_ , v_ in plot_blocks.items():\r\n agn_prob = self.agent_results_avg(v_, test_prob_dict)\r\n index = sorted(plot_blocks[k_].keys())\r\n index = plot_blocks_ID[k_]\r\n agn_2 = [agn_prob[k] for k in index]\r\n result_ = pd.DataFrame({'Connection Probabilities': agn_2},\r\n index = index)\r\n show.append((k_, 'bar'))\r\n result[k_] = result_\r\n\r\n return show, result", "title": "" }, { "docid": "84eecf6221c09417a8f6621ac6bb1d78", "score": "0.6469754", "text": "def report_outputs(self, ax, outputs):\n layers.check_order(outputs, \"BDL\")\n pred = outputs[0].detach().cpu().softmax(0).numpy()\n for i in range(pred.shape[0]):\n ax.plot(pred[i])", "title": "" }, { "docid": "ba515face994b4ce7565c51a73e26bb0", "score": "0.6469471", "text": "def __graph1(self,H, plot_path):\r\n plt.style.use(\"ggplot\")\r\n plt.figure()\r\n N = self.__EPOCHS\r\n plt.plot(np.arange(0, N), H.history[\"loss\"], label=\"train_loss\") #plot the training loss\r\n plt.plot(np.arange(0, N), H.history[\"accuracy\"], label=\"train_accuracy\") #plot the training accuracy\r\n plt.title(\"Training Loss and Accuracy\")\r\n plt.xlabel(\"Epoch #\")\r\n plt.ylabel(\"Loss/Accuracy\")\r\n plt.legend(loc=\"upper left\")\r\n plt.savefig(plot_path) #save the praph to file on the path that the user chose to save the results\r\n plt.show() #show the graph on the screen\r\n plt.close()", "title": "" }, { "docid": "85f2b9dbc7def6b6c012c29b30f7f4dc", "score": "0.64684665", "text": "def plot_inference(predictions, predict_seq, gt_data, time_points):\n if(predict_seq is False):\n # Flatten the predictions\n # predictions[0] is (output_feature_dim, batch_size)\n preds = predictions[0]\n for i in range(1, len(predictions)):\n preds = np.concatenate((preds, predictions[i]), axis=1)\n\n # Reshape the prediction axes to batch_axis (samples), feature_axis\n preds = np.swapaxes(preds, 0, 1)\n\n else:\n # Flatten the predictions\n # predictions[0] is (output_feature_dim, seq_len, batch_size)\n preds = predictions[0]\n for i in range(1, len(predictions)):\n preds = np.concatenate((preds, predictions[i]), axis=2)\n\n # Reshape the prediction axes to batch_axis (samples), time_axis, feature_axis\n preds = np.swapaxes(preds, 0, 2)\n\n # Reshape so that samples are concatenated at the end of each other\n # (time_axis, feature_axis)\n preds = preds.reshape((preds.shape[0] * preds.shape[1], preds.shape[2]))\n\n # If matplotlib is available, plot the results\n # Get ground truth values\n gt_vals = gt_data.test['y']['data'].reshape(preds.shape)\n\n # Take only up to 8 cycles\n # time_points = min(8 * no_points, preds.shape[0])\n preds = preds[:time_points, ...]\n gt_vals = gt_vals[:time_points, ...]\n\n # Plot predictions across time\n fig, ax = plt.subplots(figsize=(20, 10))\n ax.plot(range(preds.shape[0]), preds[:, 0],\n linestyle=':',\n marker='s', label='predicted_x')\n ax.plot(range(preds.shape[0]), preds[:, 1],\n linestyle=':',\n marker='o', label='predicted_y')\n ax.plot(range(preds.shape[0]), gt_vals[:, 0],\n linestyle=':',\n marker='d', label='gt_x')\n ax.plot(range(preds.shape[0]), gt_vals[:, 1],\n linestyle=':',\n marker='D', label='gt_y')\n ax.legend()\n ax.grid()\n title = 'Lissajous Curve Predictions and Ground Truth, Predict Sequence:%s' % predict_seq\n ax.set_title(title)\n fig.savefig('PredictedCurve_Time_PredictSeq_%s.png' % predict_seq, dpi=128)\n plt.clf()\n\n # Plot one feature in x, the other in y axis\n fig, ax = plt.subplots(figsize=(20, 10))\n ax.plot(preds[:, 0], preds[:, 1],\n linestyle=':',\n marker='s', label='predicted')\n ax.plot(gt_vals[:, 0], gt_vals[:, 1],\n linestyle=':',\n marker='o', label='ground truth')\n title = 'Lissajous Curve Predictions and Ground Truth,\\n' \\\n '2D Time Series, Predict Sequence:%s' % predict_seq\n ax.set_title(title)\n ax.legend()\n ax.grid()\n fig.savefig('PredictedCurve_2D_PredictSeq_%s.png' % predict_seq, dpi=128)", "title": "" }, { "docid": "3cdd650fcae13603d2fa74fd244180e6", "score": "0.6463765", "text": "def plot_history(history):\r\n plt.plot(history.history['acc'])\r\n plt.plot(history.history['val_acc'])\r\n plt.rcParams[\"font.size\"] = 18\r\n plt.title('model accuracy')\r\n plt.ylabel('accuracy')\r\n plt.xlabel('epoch')\r\n plt.legend(['train', 'test'], loc='lower right')\r\n plt.show()", "title": "" }, { "docid": "7001399636102b50aeeb61501ed27cf3", "score": "0.6454435", "text": "def plot_history(history: dict) -> None:\n\n ax = plt.figure().gca()\n ax.plot(history['train'])\n ax.plot(history['val'])\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['train', 'validation'])\n plt.title('Loss over training epochs')\n plt.show()", "title": "" }, { "docid": "3041d00a1a9bf1d90cb076a5b2129af9", "score": "0.64484566", "text": "def plot_rets_v_acc(return_df):\n fig = plt.figure(figsize=(20,15))\n models = ['LogisticRegression','RandomForestClassifier','GradientBoostingClassifier']\n colors = ['blue','green','red']\n ax1 = fig.add_subplot(1,1,1)\n for i in range(len(models)):\n ax1.scatter(return_df[return_df['model']==models[i]]['Accuracy'],return_df[return_df['model']==models[i]]['Returns'],c=colors[i],label=models[i])\n plt.legend(prop={'size': 35})\n ax1.tick_params('x',labelsize=25)\n ax1.tick_params('y',labelsize=25)\n plt.ylabel('Returns (%)',fontsize=30,fontweight='bold')\n plt.xlabel('Accuracy',fontsize=30,fontweight='bold')\n plt.title('Blended Returns vs. Accuracy',fontsize=40,fontweight='bold')\n plt.savefig('images/returns_v_acc.png')\n plt.show()", "title": "" }, { "docid": "771fc73041da05a77f579f2f2bb1cd90", "score": "0.64380616", "text": "def plot_examples(model_path, model_name, mode, batch_id, num_examples, dataloaer):\n model = torch.load(os.path.join(model_path, model_name))\n\n class_colormap = pd.read_csv(\"class_dict.csv\")\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n # variable for legend\n category_and_rgb = [[category, (r,g,b)] for idx, (category, r, g, b) in enumerate(class_colormap.values)]\n legend_elements = [Patch(facecolor=webcolors.rgb_to_hex(rgb), \n edgecolor=webcolors.rgb_to_hex(rgb), \n label=category) for category, rgb in category_and_rgb]\n \n # test / validation set에 대한 시각화\n if (mode in ('train', 'val')):\n with torch.no_grad():\n for index, (imgs, masks, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n temp_masks = masks\n\n model.eval()\n # inference\n outs = model(torch.stack(temp_images).to(device))\n oms = torch.argmax(outs, dim=1).detach().cpu().numpy()\n\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=3, figsize=(12, 4*num_examples), constrained_layout=True)\n fig.tight_layout()\n \n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Groud Truth\n ax[row_num][1].imshow(label_to_color_image(masks[row_num].detach().cpu().numpy()))\n ax[row_num][1].set_title(f\"Groud Truth : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][2].imshow(label_to_color_image(oms[row_num]))\n ax[row_num][2].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][2].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n plt.show()\n \n # test set에 대한 시각화\n else :\n with torch.no_grad():\n for index, (imgs, image_infos) in enumerate(dataloaer):\n if index == batch_id:\n image_infos = image_infos\n temp_images = imgs\n\n model.eval()\n \n # inference\n outs = model(torch.stack(temp_images).to(device))\n oms = torch.argmax(outs, dim=1).detach().cpu().numpy()\n break\n else:\n continue\n \n fig, ax = plt.subplots(nrows=num_examples, ncols=2, figsize=(10, 4*num_examples), constrained_layout=True)\n\n for row_num in range(num_examples):\n # Original Image\n ax[row_num][0].imshow(temp_images[row_num].permute([1,2,0]))\n ax[row_num][0].set_title(f\"Orignal Image : {image_infos[row_num]['file_name']}\")\n # Pred Mask\n ax[row_num][1].imshow(label_to_color_image(oms[row_num]))\n ax[row_num][1].set_title(f\"Pred Mask : {image_infos[row_num]['file_name']}\")\n ax[row_num][1].legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0)\n \n plt.show()", "title": "" }, { "docid": "85da549ef36704e017c3600d2d352588", "score": "0.643711", "text": "def plot(self):\n\n plt.show()", "title": "" }, { "docid": "4b9594ac0a56574b66e5b65aba147f5e", "score": "0.6436407", "text": "def plot_part1(iterations,train_perf,valid_perf,test_perf):\n make_plot(0,iterations,train_perf,\"Iterations\",\"Correct classification of training set\",\"training_learn_rate.png\") \n make_plot(1,iterations,test_perf,\"Iterations\",\"Correct classification of test set\",\"test_learn_rate.png\") \n make_plot(2,iterations,valid_perf,\"Iterations\",\"Correct classification of training set\",\"valid_learn_rate.png\") \n\n return", "title": "" }, { "docid": "5ee2996b3bb28f85fdf92529063f742d", "score": "0.6429877", "text": "def __graph3(self, history, plot_path):\r\n\t # plot loss\r\n plt.subplot(211)\r\n plt.title('Model Loss')\r\n plt.plot(history.history['loss'], color='green', label='train') #plot the training loss\r\n plt.plot(history.history['val_loss'], color='orange', label='validation') #plot the validation loss\r\n plt.xlabel(\"Epoch #\")\r\n plt.ylabel(\"Loss\")\r\n plt.legend(loc=\"upper right\")\r\n\t # plot accuracy\r\n plt.subplot(212)\r\n plt.title('Model Accuracy')\r\n plt.plot(history.history['accuracy'], color='green', label='train') #plot the training accuracy\r\n plt.plot(history.history['val_accuracy'], color='orange', label='validation') #plot the validation accuracy\r\n plt.xlabel(\"Epoch #\")\r\n plt.ylabel(\"Accuracy\")\r\n plt.legend(loc=\"upper right\")\r\n plt.savefig(plot_path) #save the praph to file on the path that the user chose to save the results\r\n plt.show() #show the graph on the screen\r\n plt.close()", "title": "" }, { "docid": "0310de50b5397aa88eb8ac61e1aaad18", "score": "0.6426437", "text": "def create_plot(self):\n results = [item[\"result\"] for item in self.game_history]\n avg_results = []\n for i in range(1, len(results)):\n avg_results.append(np.mean(results[:i]))\n\n num_games = range(1, len(self.game_history))\n plt.figure()\n plt.title(self.__str__() + \" results\")\n plt.xlabel(\"Number of games\")\n plt.ylabel(\"Avg score\")\n plt.ylim((0, 2))\n plt.plot(num_games, avg_results)\n plt.show()", "title": "" }, { "docid": "d71c6e3c1eff92bb67e23dd0c66f4155", "score": "0.6425112", "text": "def plot_data(data_training, data_validation, xaxis, yaxis):\n \n pos_train, neg_train = get_splitted_dataset(data_training, xaxis, yaxis)\n\n plt.scatter(neg_train[[xaxis]], neg_train[[yaxis]], color='g', label=\"neg\")\n plt.scatter(pos_train[[xaxis]], pos_train[[yaxis]], color='r', label=\"pos\")\n\n plt.xlabel(xaxis)\n plt.ylabel(yaxis)\n plt.title(\"Data Visualizing\")\n plt.legend(loc=\"upper right\", title=\"diabetes\")\n plt.show()", "title": "" }, { "docid": "c5b414c13dad5a7b23e1ef49f2569ae4", "score": "0.6404838", "text": "def plot_task(task): \n num_train = len(task['train'])\n num_test = len(task['test'])\n num_tot = num_train + num_test\n fig, axs = plt.subplots(2, num_tot, figsize=(3*num_tot,3*2))\n for i in range(num_train):\n plot_one(axs[0,i],task['train'][i]['input'],'train input')\n plot_one(axs[1,i],task['train'][i]['output'],'train output')\n i+=1\n for j in range(num_test):\n plot_one(axs[0,i+j],task['test'][j]['input'],'test input')\n plot_one(axs[1,i+j],task['test'][j]['output'],'test output') \n plt.tight_layout()\n plt.show()", "title": "" }, { "docid": "ab9c5c78b5bac8bcf085fa9110d0ed4d", "score": "0.64018166", "text": "def forecast_result_chart_model(self, data, model: Model, file_path: str, model_name: str = \"model\", xlabel='date', ylabel='price'):\n try:\n plt.figure(figsize=self.__figsize)\n plt.title(\"Forecast Result Model (\" + model_name + \"): \" + self.__title_times_data, fontsize=self.__fontsize_title)\n plt.plot(data, color='lightgray', label='actul')\n plt.plot(model.train_predict, label=\"train predict\")\n plt.plot(model.test_predict, color=\"red\", label=\"test predict\")\n plt.xlabel(xlabel, fontsize=self.__fontsize_x_y_label)\n plt.ylabel(ylabel, fontsize=self.__fontsize_x_y_label)\n x_site = min(data)\n y_site = min(data)\n plt.text(x_site*0.05, y_site, r'$\\mathrm{train} : $' + '\\n\\n' +\n r'$\\mathrm{MSE} = $' + str(round(model.train_error[0], 4)) + '\\n' +\n r'$\\mathrm{MAE} = $' + str(round(model.train_error[1], 4)) + '\\n' +\n r'$\\mathrm{RMSE} = $' + str(round(model.train_error[2], 4)) + '\\n' +\n r'$\\mathrm{NRMSE} = $' + str(round(model.train_error[3], 4)) \n + '\\n\\n' + r'$\\mathrm{test} : $' + '\\n\\n' + r'$\\mathrm{MSE} = $' + str(round(model.test_error[0], 4)) \n +'\\n' + r'$\\mathrm{MAE} = $' + str(round(model.test_error[1], 4)) + '\\n' + r'$\\mathrm{RMSE} = $' + str(round(model.test_error[2], 4)) \n + '\\n' + r'$\\mathrm{NRMSE} = $' + str(round(model.test_error[3], 4)),\n bbox=dict( boxstyle=\"square\", ec=(1., 0.5, 0.5), fc=(1., 0.9, 0.9), ), fontsize=self.__fontsize_x_y)\n plt.xticks(np.linspace(0, len(data), 12), rotation=15)\n plt.tick_params(axis='both', labelsize=self.__fontsize_x_y)\n plt.legend(loc='upper left')\n plt.tight_layout()\n if not os.path.isdir(file_path + '/'):\n os.mkdir(file_path)\n plt.savefig(file_path + \"/Forecast_Result_Model_\"+ model_name +\"_\" + self.__title_times_data + \".png\")\n plt.show()\n print('Saved successfully (forecast result model '+ model_name +'). File path = ' + file_path +\n \"/Forecast_Result_Model\" + model_name + \"_\" + self.__title_times_data + \".png\")\n except TypeError as err:\n raise TypeError(err)", "title": "" }, { "docid": "210193749f2b1c9fb4d5e658fbdd382f", "score": "0.640165", "text": "def plot_evaluation(viz, results, env_name):\n\n def get_Y_legend(key, v_train, v_valid):\n Y = []\n legend = []\n\n Y.append(np.array(v_train))\n if v_valid is not None:\n Y.append(np.array(v_valid))\n legend.append('{} (train)'.format(key))\n legend.append('{} (test)'.format(key))\n else:\n legend.append(key)\n\n return Y, legend\n\n train_summary = results['train']\n valid_summary = results['valid']\n for k in train_summary.keys():\n v_train = train_summary[k]\n v_valid = valid_summary[k] if k in valid_summary.keys() else None\n if isinstance(v_train, dict):\n Y = []\n legend = []\n for k_ in v_train:\n vt = v_valid.get(k_) if v_valid is not None else None\n Y_, legend_ = get_Y_legend(k_, v_train[k_], vt)\n Y += Y_\n legend += legend_\n else:\n Y, legend = get_Y_legend(k, v_train, v_valid)\n\n opts = dict(\n xlabel='epochs',\n legend=legend,\n ylabel=k,\n title=k)\n\n if len(Y) == 1:\n Y = Y[0]\n X = np.arange(Y.shape[0])\n else:\n Y = np.column_stack(Y)\n X = np.column_stack([np.arange(Y.shape[0])] * Y.shape[1])\n\n viz.line(\n Y=Y,\n X=X,\n env=env_name,\n opts=opts,\n win='line_{}'.format(k))", "title": "" }, { "docid": "a4240a5aadf84ac63cc47713c81c8741", "score": "0.6392306", "text": "def plot(self):\n pass", "title": "" }, { "docid": "73a8157b07865b7aed5c837010dd2efd", "score": "0.6383978", "text": "def plot_model_distribution(m, submodel, future):\n submodel = submodel.copy()\n fig, ax = plt.subplots(2, 1, facecolor='w', figsize=(12, 10), dpi=100)\n\n proj_prediction = submodel['prediction_df']\n ref_prediction = m.predict(future)\n ax[0].plot(proj_prediction['ds'].values,\n proj_prediction['yhat'],\n label='Mean prediction',\n ls='-',\n color='red')\n\n ax[0].fill_between(proj_prediction['ds'].values,\n proj_prediction['yhat_lower'],\n proj_prediction['yhat_upper'],\n color='#910078', alpha=0.2,\n label='Uncertainty interval')\n\n test_indices = future.index.values > m.history.index.max()\n train_indices = future.index.values <= m.history.index.max()\n\n ax[1].plot(future['ds'].values,\n ref_prediction['yhat'],\n label=\"Reference model\",\n ls='-')\n\n ax[1].fill_between(proj_prediction['ds'].values,\n ref_prediction['yhat_lower'],\n ref_prediction['yhat_upper'],\n color='#0072B2', alpha=0.2,\n label='Uncertainty interval')\n for pred_ax in ax:\n pred_ax.plot(future.loc[train_indices, 'ds'].values,\n future.loc[train_indices, 'y'], 'k.',\n label=\"Data\")\n pred_ax.plot(future.loc[test_indices, 'ds'].values,\n future.loc[test_indices, 'y'], 'k.')\n pred_ax.axvline(pd.to_datetime('2017-05-01'), color='gray', ls='--',\n label='Beginning of test set')\n pred_ax.legend(loc='lower left')\n pred_ax.set_ylabel('log(total sales)')\n pred_ax.set_xlabel('date')\n pred_ax.set_xlim([pd.to_datetime('2017-03-01'), future['ds'].tail(1)])\n plt.show()", "title": "" }, { "docid": "42b32015e65c184374616a8b30efce65", "score": "0.63838476", "text": "def plot(self):\n\t\tpass", "title": "" }, { "docid": "5d889a94c4a5850abedf75e27d88cd49", "score": "0.63711256", "text": "def myplot(history, name):\r\n epoch = range(1, EPOCH_SIZE + 1)\r\n _, axes = plt.subplots(nrows = 2, ncols = 1, figsize = (12, 12))\r\n axes[0].plot(epoch, history.history['acc'], label = 'train')\r\n axes[0].plot(epoch, history.history['val_acc'], label = 'validation')\r\n axes[0].legend()\r\n axes[0].grid(axis = 'y')\r\n axes[0].set_xlabel(xlabel = 'epoch')\r\n axes[0].set_ylabel(ylabel = 'accuracy')\r\n axes[0].set_title(label = 'Accuracy')\r\n axes[1].plot(epoch, history.history['loss'], label = 'train')\r\n axes[1].plot(epoch, history.history['val_loss'], label = 'validation')\r\n axes[1].legend()\r\n axes[1].grid(axis = 'y')\r\n axes[1].set_xlabel(xlabel = 'epoch')\r\n axes[1].set_ylabel(ylabel = 'loss')\r\n axes[1].set_title(label = 'Loss')\r\n plt.savefig(FIGURE_PATH+name+'.analysis.png')", "title": "" }, { "docid": "5b2cbcbc5710473dba3ef0dc91926c37", "score": "0.63508946", "text": "def plot_train_validation_curve(history):\n fig = plt.figure(figsize=(14, 8), dpi=200)\n plt.plot(history.history['accuracy'])\n plt.plot(history.history['val_accuracy'])\n plt.title('FC Training Model accuracy')\n plt.ylabel('Accuracy')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Test'], loc='upper left')\n plt.show()", "title": "" }, { "docid": "098ef9307beb6995d25dc9979983782e", "score": "0.6336319", "text": "def make_plot(ss):\n nrows, ncols = 2, 2\n fig, ax = plt.subplots(nrows, ncols, figsize=(8*nrows,6*ncols),\n sharey=True)\n\n all_train = np.array(ss['train'])\n all_test = np.array(ss['test'])\n all_raw_test = np.array(ss['raw_test'])\n all_lrates = np.array(ss['lrates'])\n epochs = ss['epoch']\n \n # {train, test, raw_test} = shape (K,N) where N is how often we recorded it.\n # For plots, ideally N = E, where E is number of epochs, but usually N > E.\n # For all cross validation folds, we must have N be the same.\n assert all_test.shape == all_raw_test.shape\n K, N = all_train.shape\n print(\"\\nall_train.shape: {}\".format(all_train.shape))\n print(\"all_test.shape: {}\".format(all_test.shape))\n print(\"all_lrates.shape: {}\".format(all_lrates.shape))\n print(\"epoch: {}\\n\".format(epochs))\n\n # Since N != E in general, try to get epochs to line up.\n xs = np.arange(N) * (epochs[0] / float(N))\n\n # Plot losses!\n for cv in range(K):\n ax[0,0].plot(xs, all_train[cv,:], label='cv_{}'.format(cv))\n ax[0,1].plot(xs, all_test[cv,:], label='cv_{}'.format(cv))\n\n mean_0 = np.mean(all_train, axis=0)\n std_0 = np.std(all_train, axis=0)\n mean_1 = np.mean(all_test, axis=0)\n std_1 = np.std(all_test, axis=0)\n\n ax[1,0].plot(xs, mean_0, lw=2, label='train_losses')\n ax[1,1].plot(xs, mean_1, lw=2, label='test_losses')\n ax[1,0].fill_between(xs, mean_0-std_0, mean_0+std_0, alpha=error_alpha, facecolor=error_fc)\n ax[1,1].fill_between(xs, mean_1-std_1, mean_1+std_1, alpha=error_alpha, facecolor=error_fc)\n\n # Titles\n ax[0,0].set_title(\"CV Train Losses\", fontsize=tsize)\n ax[0,1].set_title(\"CV Test Losses\", fontsize=tsize)\n ax[1,0].set_title(\"CV Train Losses\", fontsize=tsize)\n ax[1,1].set_title(\"CV Test Losses\", fontsize=tsize)\n\n # Bells and whistles\n for i in range(nrows):\n for j in range(ncols):\n ax[i,j].legend(loc=\"best\", ncol=2, prop={'size':legend_size})\n ax[i,j].set_xlabel('Epoch', fontsize=xsize)\n ax[i,j].set_ylabel('Average L2 Loss', fontsize=ysize)\n ax[i,j].tick_params(axis='x', labelsize=tick_size)\n ax[i,j].tick_params(axis='y', labelsize=tick_size)\n \n plt.tight_layout()\n figname = osp.join(OUTPUT_PATH,\"check_stats.png\")\n plt.savefig(figname)\n print(\"Look at this figure:\\n{}\".format(figname))", "title": "" }, { "docid": "b0d7ae2dfb20f52cf074267a53b8d240", "score": "0.6314435", "text": "def training_eval(results, title='resnet'):\n \n # summarize history for accuracy\n plt.clf()\n plt.plot(results.history['acc'])\n plt.plot(results.history['val_acc'])\n plt.title(title + ' training accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['training', 'validation'], loc='upper left')\n plt.savefig('output/'+title+'_training_accuracy.png', bbox_inches='tight')\n plt.savefig('output/'+title+'_training_accuracy.pdf', bbox_inches='tight')\n \n # summarize history for loss\n plt.clf()\n plt.plot(results.history['loss'])\n plt.plot(results.history['val_loss'])\n plt.title(title+' training loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['training', 'validation'], loc='upper left')\n plt.savefig('output/'+title+'_training_loss.png', bbox_inches='tight')\n plt.savefig('output/'+title+'_training_loss.pdf', bbox_inches='tight')\n\n mean_val_loss = '%.4f' % np.mean(results.history['val_loss'])\n print('Mean validation loss after training ' + mean_val_loss)\n with open('output/'+title+'val_loss.txt','w') as f:\n f.write(mean_val_loss)", "title": "" }, { "docid": "be2b1b69201fbfc7eac1521386af1253", "score": "0.6310921", "text": "def plot_history_classify(self):\n hist = pd.DataFrame(self.history.history)\n hist['epoch'] = self.history.epoch\n\n # ****loss plost *******************\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('loss')\n plt.plot(hist['epoch'], hist['loss'],\n label='Train loss')\n plt.plot(hist['epoch'], hist['val_loss'],\n label='Val loss')\n plt.ylim([0, 3])\n plt.legend()\n plt.savefig(f'04_images/{self.NAME_STR}_loss.png', dpi=150, format='png')\n # plt.savefig(f'/googledrive/MyDrive/04_images/{self.NAME_STR}_loss.png', dpi=150, format='png')\n\n # ****************accuracy plot******************\n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.plot(hist['epoch'], hist['accuracy'],\n label='Train Accuracy')\n plt.plot(hist['epoch'], hist['val_accuracy'],\n label='Val Accuracy')\n plt.ylim([0, 1])\n plt.legend()\n plt.savefig(f'04_images/{self.NAME_STR}_acc.png', dpi=150, format='png')\n # plt.savefig(f'/googledrive/MyDrive/04_images/{self.NAME_STR}_acc.png', dpi=150, format='png')\n plt.show()\n return hist", "title": "" }, { "docid": "fa1de7137b28111f1c956a7b19d111c2", "score": "0.63066125", "text": "def plot_learning(self):\n plt.plot([i for i in range(len(self.fitness_list))], self.fitness_list)\n plt.ylabel('Fitness')\n plt.xlabel('Iteration')\n plt.show()", "title": "" }, { "docid": "b07067a27f2c1451e17535000f14ba9e", "score": "0.6301739", "text": "def __graph2(self,H, plot_path):\r\n plt.style.use(\"ggplot\")\r\n plt.figure()\r\n N = self.__EPOCHS\r\n plt.plot(np.arange(0, N), H.history[\"val_loss\"], label=\"val_loss\") #plot the validation loss\r\n plt.plot(np.arange(0, N), H.history[\"val_accuracy\"], label=\"val_accuracy\") #plot the validation accuracy\r\n plt.title(\"Validation Loss and Accuracy\")\r\n plt.xlabel(\"Epoch #\")\r\n plt.ylabel(\"Loss/Accuracy\")\r\n plt.legend(loc=\"upper left\")\r\n plt.savefig(plot_path) #save the praph to file on the path that the user chose to save the results\r\n plt.show()\r\n plt.close()", "title": "" }, { "docid": "74ffb9eb186f256985b34a991fc4f3d5", "score": "0.6299238", "text": "def features(plt, data, response, model_class):\n # split dependent and independent variables\n feature = (set(data.columns) - set([response])).pop()\n x = data[feature]\n y = data[response]\n\n # create model\n x_line = x\n model = model_class(x, y)\n y_line = model.predict(x)\n\n # draw feature values as points\n plt.plot(x, y,'ob', markersize=3)\n\n # draw model values as line\n plt.plot(x_line, y_line,'-r')\n\n # return plot\n plt.ylabel(response)\n plt.xlabel(feature)\n return plt", "title": "" }, { "docid": "e307f6234fae5e9bb7d92d0b9346dd56", "score": "0.62864804", "text": "def plot(self):\n\n self.figure.clear()\n ax = self.figure.add_subplot(111)\n\n x = [item for item in ClassEstimationWindow.result_dict]\n y = [ClassEstimationWindow.result_dict[item] for item in ClassEstimationWindow.result_dict]\n ax.bar(x, y)\n\n ax.set_title('Profit prediction')\n ax.set_xlabel('Product')\n ax.set_ylabel('Profit in ₹')\n ax.set_xticklabels(labels=[item for item in ClassEstimationWindow.result_dict], rotation=90) # to rotate ticks of X-axis for readability\n\n # when we want to set fontsize according to requirement\n for tick in ax.xaxis.get_major_ticks():\n tick.label.set_fontsize(4.5)\n\n self.canvas.draw()", "title": "" }, { "docid": "da9eefb069878bdcca6b6567e46d6810", "score": "0.6280102", "text": "def test(index, dataset, model):\n\n # Load raw and segmented images\n img, seg = dataset[index]\n class_ids = dataset.class_ids\n\n # Run through model\n img = img.unsqueeze(0)\n model.eval()\n with torch.no_grad():\n yhat = model(img).squeeze(0) # dim (n_classes, height, width)\n prediction = yhat.argmax(axis=0) # dim (height, width)\n\n # Have to trim edges off image, to match size of prediction\n img = centercrop(img, size=prediction.shape)\n img = img.squeeze(0)\n img = np.transpose(np.array(img), axes=(1, 2, 0))\n pred = np.array(prediction.cpu())\n seg = seg.cpu().numpy()\n\n # Plot raw and segmented images stacked vertically\n fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)\n plt.subplots_adjust(hspace=0)\n plt1 = ax1.imshow(img)\n plt2 = ax2.imshow(seg, cmap='jet', vmin=0, vmax=33)\n # ax2.text(200, 100, 'test', color='white')\n plt3 = ax3.imshow(pred, cmap='jet', vmin=0, vmax=33)\n ax1.set_yticklabels([])\n ax2.set_yticklabels([])\n ax3.set_yticklabels([])\n ax3.set_xticklabels([])\n ax1.set_ylabel('Image')\n ax2.set_ylabel('Ground truth')\n ax3.set_ylabel('Prediction')\n\n # Add a colorbar with corresponding seg labels\n if class_ids is not None:\n seg_ids = np.unique(seg)\n cbar = fig.colorbar(plt2, ax=[ax1, ax2, ax3])\n labels = [class_ids[i] for i in seg_ids]\n cbar.set_ticks(seg_ids)\n cbar.set_ticklabels(labels)", "title": "" }, { "docid": "47f50bf5f1e25b1722cfa3f4cf2c54bc", "score": "0.62780225", "text": "def plotResults(predicted, expected, output):\n var = metrics.explained_variance_score(expected, predicted)\n mae = metrics.mean_absolute_error(expected, predicted)\n mse = metrics.mean_squared_error(expected, predicted)\n r2 = metrics.r2_score(expected, predicted)\n rms = np.sqrt(np.mean((expected - predicted) ** 2))\n\n print output\n print 'Explained variance (best possible score is 1.0, lower values are worse):', var\n print 'Mean Absolute Error (best is 0.0):', mae\n print 'Mean Squred Error (best is 0.0):', mse\n print 'R2 score (best is 1.0):', r2\n print 'RMS:', rms\n print '\\n\\n\\n'\n\n title = 'RMS=%.4f, MSE=%.4f, R2=%.3f' % (rms, mse, r2)\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n plt.title(title)\n ax1.scatter(expected, predicted, alpha=0.2, s=5)\n ax1.set_xlabel(\"Spectroscopic Redshift\")\n ax1.set_ylabel(\"Photo-z\")\n ax1.plot([0, 8], [0, 8], '-r')\n ax1.set_xlim(0, 1.1*expected.max())\n ax1.set_ylim(0, 1.1*expected.max())\n plt.savefig(output+'Results.pdf')\n plt.close()", "title": "" }, { "docid": "bad9662b9ddcb122cef87ea949c9a024", "score": "0.62708586", "text": "def save_resid_plot(model, x_train, y_train, x_test, y_test, title, file_name):\n y_pred_train = model.predict(x_train)\n y_pred_test = model.predict(x_test)\n plt.scatter(y_pred_train, y_pred_train - y_train, c='b', s=40, alpha=0.5)\n plt.scatter(y_pred_test, y_pred_test - y_test, c='r', s=40, alpha=0.5)\n plt.hlines(y=0, xmin=min(min(y_pred_train), min(y_pred_test)), xmax=max(max(y_pred_train), max(y_pred_test)))\n plt.ylabel('Residuals')\n plt.xlabel('Predicted log Ki')\n plt.title(title)\n handles = [mpatches.Patch(color='blue', label='Training'),\n mpatches.Patch(color='red', label='Test')]\n plt.legend(handles=handles)\n plt.savefig(file_name)\n plt.gcf().clear()\n\n return", "title": "" }, { "docid": "e95d0bd36fcb1cb8bb3ee1c99ecd3ee8", "score": "0.6270621", "text": "def plot_pred(records, predictions, fig_savepath, log_reconvert=True):\n\n length = records.size\n t = np.linspace(start=1, stop=length, num=length)\n\n if log_reconvert:\n records = np.power(10, records / 2.3) - 1\n predictions = np.array(list(predictions))\n predictions = np.power(10, predictions / 2.3) - 1\n else:\n predictions = np.array(list(predictions))\n\n plt.figure(figsize=(16, 9))\n plt.subplots_adjust(\n left=0.1, bottom=0.1, right=0.9, top=0.9, hspace=0.2, wspace=0.3)\n # plt.title('flow prediction based on DNN')\n plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n plt.xlabel('Time(d)', fontsize=18)\n plt.ylabel(\"flow(\" + r\"$m^3$\" + \"/s)\", fontsize=18)\n plt.plot(t, records, '-', color='blue', label='records')\n plt.plot(t, predictions, '--', color='red', label='predictions')\n plt.legend(loc='upper left', shadow=True, fontsize=18)\n plt.savefig(fig_savepath, format='png', dpi=1000)\n plt.show()", "title": "" }, { "docid": "5f3882164d44682bb225a611e75abb1a", "score": "0.6268838", "text": "def PredictionPlot(y_test,y_pred_test,lons,lats, filenames_train, filenames_test, savefolder,rmses):\n now = datetime.datetime.now()\n imshape = (100, 9)\n print(now)\n timestamp = datetime.datetime.strftime(now,\"%d-%m_%H%M\")\n\n plt.rcParams['font.size'] = 8\n for t in range(y_pred_test.shape[0]):\n runname = filenames_test[t]\n print(t,runname)\n rmse = rmses[t]\n\n nlon,nlat = len(lons),len(lats)\n\n y_reshaped = y_test[t,:].reshape((nlat,nlon))\n y_pred_reshaped = y_pred_test[t,:].reshape((nlat,nlon))\n \n absmax = np.max(np.abs(y_reshaped))\n yplot_pred = y_pred_reshaped\n maxlvl = np.ceil(absmax*0.8)\n\n levels = np.arange(-maxlvl,maxlvl+0.01,0.2)\n plt.clf()\n fig = plt.figure(figsize=(11,5))\n\n\n ax2 = plt.subplot2grid((2, 2), (0, 0))\n ax3 = plt.subplot2grid((2, 2), (0, 1))\n ax4 = plt.subplot2grid((2, 2), (1, 0))\n ax5 = plt.subplot2grid((2, 2), (1, 1))\n\n\n plt.sca(ax2)\n plotmap(lons,lats,y_pred_reshaped,cmap='RdBu_r',\n levels=levels, variable_label='Temperature Response (K)',\n plottitle='Predicted',\n plotaxis=ax2)\n\n plt.sca(ax3)\n absmax = np.max(np.abs(y_reshaped))\n yplot = y_reshaped \n plotmap(lons,lats,y_reshaped,cmap='RdBu_r',levels=levels,\n variable_label='Temperature Response (K)',\n plottitle='True Model',\n plotaxis=ax3)\n \n plt.sca(ax4)\n plotmap(lons,lats,yplot_pred-yplot,cmap='RdBu_r',\n levels=np.arange(-2.,2.1,0.1),variable_label='Temperature Difference (K)',\n plottitle='Absolute Difference: (Predicted - True)',\n plotaxis=ax4)\n\n plt.sca(ax5)\n divide = (yplot_pred - yplot)/np.abs(yplot)\n plotmap(lons,lats,divide,cmap = 'RdBu_r', \n plottitle='Fractional Difference: (Prediction - True)/|True| ',variable_label='Fractional Difference',\n levels=np.arange(-3.0,3.1,0.2),plotaxis=ax5)\n\n\n fig.suptitle(runname)\n savefileas = savefolder+'%s_%s.png'%(runname.replace(' ','_'),timestamp)\n plt.savefig(savefileas,bbox_inches='tight')\n print(\"Saved as \",savefileas)\n plt.close()", "title": "" }, { "docid": "9acc3b438400a1b4efc73ac1be1608c5", "score": "0.62602735", "text": "def plot_results(self):\r\n\t#plt.plot(self.evaluated_time,self.evaluated_tau,marker='o',ms=4,lw=0.0)\r\n\tplt.plot(self.evaluated_time,self.smoothed_tau,marker='x',ms=0.5,lw=3.0,mfc='r')\r\n\tplt.minorticks_on()\r\n\tplt.grid()\r\n\tplt.xlim(0.0,max(self.evaluated_time))\r\n\tplt.xticks(size=20)\r\n\tplt.yticks(size=20)\r\n\tplt.xlabel(r'Time, $t$',size=20)\r\n\tplt.ylabel(r'Viscosity, $\\eta(\\mathrm{Pa.s})$',size=20)\r\n\tplt.yscale('log')\r\n\tplt.show()", "title": "" }, { "docid": "b6947415b67c04e531ee5b4c39f5317e", "score": "0.62577593", "text": "def plot_learning_curve(training_losses): \n plt.ylabel('Loss')\n plt.xlabel('Training Steps')\n plt.plot(training_losses)", "title": "" }, { "docid": "b6947415b67c04e531ee5b4c39f5317e", "score": "0.62577593", "text": "def plot_learning_curve(training_losses): \n plt.ylabel('Loss')\n plt.xlabel('Training Steps')\n plt.plot(training_losses)", "title": "" }, { "docid": "1ebf8f4a1054a3d0fa25be94ae073098", "score": "0.62425953", "text": "def plot_prediction(predictions, labels, num_plot=200):\n import matplotlib.pyplot as plt\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.plot(labels[:num_plot], c='b', marker=\"^\", ls='--', label='Ground True', fillstyle='none')\n ax.plot(predictions[:num_plot], c='k', marker=\"v\", ls='-', label='DA-RNN')\n\n plt.legend(loc=2)\n\n plt.xlabel('Time axis')\n plt.show()\n\n return", "title": "" }, { "docid": "5be41fa777a0d1636697f5aa77676046", "score": "0.62392336", "text": "def forecast_result_chart_predict(self, model: Model, file_path: str, model_name: str = \"model\", xlabel='date', ylabel='price'):\n try:\n plt.figure(figsize=self.__figsize)\n plt.title(\"Forecast Result \" + model_name + \" : \" + self.__title_times_data, fontsize=self.__fontsize_title)\n plt.plot(model.test_data, color='lightgray', label='actul')\n plt.plot(model.test_predict, color=\"red\", label=\"test predict\")\n plt.xlabel(xlabel, fontsize=self.__fontsize_x_y_label)\n plt.ylabel(ylabel, fontsize=self.__fontsize_x_y_label)\n x_site = len(model.test_data)\n y_site = min(model.test_data)\n plt.text(x_site*0.05, y_site, r'$\\mathrm{test} : $' + '\\n\\n' + r'$\\mathrm{MSE} = $' + str(round(model.test_error[0], 4)) \n +'\\n' + r'$\\mathrm{MAE} = $' + str(round(model.test_error[1], 4)) + '\\n' + r'$\\mathrm{RMSE} = $' + str(round(model.test_error[2], 4)) \n + '\\n' + r'$\\mathrm{NRMSE} = $' + str(round(model.test_error[3], 4)),\n bbox=dict( boxstyle=\"square\", ec=(1., 0.5, 0.5), fc=(1., 0.9, 0.9), ), fontsize=self.__fontsize_x_y)\n plt.xticks(np.linspace(0, len(model.test_data), 12), rotation=15)\n plt.tick_params(axis='both', labelsize=self.__fontsize_x_y)\n plt.legend(loc='best')\n plt.tight_layout()\n if not os.path.isdir(file_path + '/'):\n os.mkdir(file_path)\n plt.savefig(file_path + \"/Forecast_Result_predict_\"+ model_name +\"_\" + self.__title_times_data + \".png\")\n plt.show()\n print('Saved successfully (forecast result presict '+ model_name +'). File path = ' + file_path +\n \"/Forecast_Result_predict_\" + model_name + \"_\" + self.__title_times_data + \".png\")\n except TypeError as err:\n raise TypeError(err)", "title": "" }, { "docid": "7d28d0b5e2b02e3a5389fbd01c4626bf", "score": "0.6235628", "text": "def plot_train_history(hist, name: str = None, val: bool = True) -> None:\n plt.subplots()\n plt.plot(hist.history['loss'])\n if val:\n plt.plot(hist.history['val_loss'])\n plt.yscale('log')\n plt.title('Model loss')\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n leg = ['Training']\n if val:\n leg += ['Validation']\n plt.legend(leg, loc='upper right')\n if name is not None:\n save_figure(name, False)\n else:\n plt.show()", "title": "" }, { "docid": "1f5f2de146afd1e7e6782be3ddc3daf3", "score": "0.62353086", "text": "def plot(self):\n\n self._predict()\n\n figures = PrecisionRecallComparisonDisplay(\n self.precisions, self.recalls, self.model_names)\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n # plt.legend()\n figures.plot()", "title": "" }, { "docid": "434059d769e4d37e7c789dc7be6ea581", "score": "0.62339157", "text": "def plot_history(H, epochs, num_units, dropout, optimizer):\n # name for saving output\n figure_name = f\"model_history_{num_units}_{dropout}_{optimizer}.png\"\n figure_path = os.path.join(\"..\", \"out\", \"age_detection\", figure_name)\n # Visualize performance\n plt.style.use(\"fivethirtyeight\")\n plt.figure()\n plt.plot(np.arange(0, epochs), H.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, epochs), H.history[\"accuracy\"], label=\"train_acc\")\n plt.plot(np.arange(0, epochs), H.history[\"val_accuracy\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.tight_layout()\n plt.savefig(figure_path)\n \n print(f\"\\n[INFO] Loss and accuracy across on training and validation is saved as '{figure_path}'.\")", "title": "" }, { "docid": "34e736966ac08de2ea8ae844fd181097", "score": "0.62210846", "text": "def plot(train_history, run, save_dir='plots/'):\n dy = train_history['discriminator_loss']\n gy = train_history['generator_loss']\n aucy = train_history['auc']\n x = np.linspace(1, len(dy), len(dy))\n\n plt.plot(x, dy, color='blue', label='Discriminator Loss')\n plt.plot(x, gy, color='red', label='Generator Loss')\n plt.plot(x, aucy, color='yellow', linewidth='3', label='ROC, AUC={}'.format(round(aucy[-1], 4)))\n\n plt.legend(loc='best')\n plt.savefig(save_dir + str(run) + '.png')\n plt.show()", "title": "" }, { "docid": "b569d148398df91f34bfceccc0ec31fb", "score": "0.6220569", "text": "def plot_training_loss(self):\n plt.plot(self.__results[\"train\"].keys(), self.__results[\"train\"].values())\n plt.xlabel('epochs')\n plt.ylabel('Training Loss')", "title": "" }, { "docid": "e18f9a57e921e4470f06bfc0c53adaeb", "score": "0.6212462", "text": "def plot_model_results(train_loader, test_loader, model, optimizer, loss_fn, epochs, device,\r\n figure_number_0=None,\r\n figure_number_1=None, plot_accuracy_and_loss_on_same_figure=True, plt_show=True,\r\n random_colors=False, label_prefix=''):\r\n epoch_train_accuracies = []\r\n epoch_train_losses = []\r\n epoch_test_accuracies = []\r\n epoch_test_losses = []\r\n\r\n for t in range(epochs):\r\n train_accuracy, train_loss = train_loop(train_loader, model, loss_fn, optimizer, device)\r\n test_accuracy, test_loss = test_loop(test_loader, model, loss_fn, device)\r\n epoch_train_accuracies.append(train_accuracy)\r\n epoch_train_losses.append(train_loss)\r\n epoch_test_accuracies.append(test_accuracy)\r\n epoch_test_losses.append(test_loss)\r\n\r\n if t % SHOW_RESULTS_EVERY_EPOCHS == 0:\r\n print(\r\n f'Epoch {t} results: Train Accuracy = {train_accuracy}, Train Loss = {train_loss}, Test Accuracy = {test_accuracy}, Test Loss = {test_loss}')\r\n\r\n\r\n # Remove Epoch 0 Results\r\n del epoch_train_accuracies[0]\r\n del epoch_train_losses[0]\r\n del epoch_test_accuracies[0]\r\n del epoch_test_losses[0]\r\n epochs -= 1\r\n\r\n if figure_number_0 is None:\r\n plt.figure()\r\n else:\r\n plt.figure(figure_number_0)\r\n if plot_accuracy_and_loss_on_same_figure:\r\n plt.title('Accuracy/Loss')\r\n else:\r\n plt.title('Accuracy')\r\n plt.xlabel('Epoch')\r\n\r\n train_accuracy_color = 'green' if not random_colors else np.random.rand(3, )\r\n test_accuracy_color = 'blue' if not random_colors else np.random.rand(3, )\r\n train_loss_color = 'red' if not random_colors else train_accuracy_color\r\n test_loss_color = 'yellow' if not random_colors else test_accuracy_color\r\n\r\n plt.plot(range(epochs), epoch_train_accuracies, color=train_accuracy_color,\r\n label=(label_prefix + '. ' if label_prefix != '' else '') + 'Train Accuracy')\r\n plt.plot(range(epochs), epoch_test_accuracies, color=test_accuracy_color,\r\n label=(label_prefix + '. ' if label_prefix != '' else '') + 'Test Accuracy')\r\n if not plot_accuracy_and_loss_on_same_figure:\r\n plt.legend(loc='lower left')\r\n if plt_show:\r\n plt.show()\r\n if figure_number_1 is None:\r\n plt.figure()\r\n else:\r\n plt.figure(figure_number_1)\r\n plt.xlabel('Epoch')\r\n plt.title('Loss')\r\n plt.plot(range(epochs), epoch_train_losses, color=train_loss_color,\r\n label=(label_prefix + '. ' if label_prefix != '' else '') + 'Train Loss')\r\n plt.plot(range(epochs), epoch_test_losses, color=test_loss_color,\r\n label=(label_prefix + '. ' if label_prefix != '' else '') + 'Test Loss')\r\n plt.legend(loc='lower left')\r\n if plt_show:\r\n plt.show()\r\n print(f'Model Accuracy: {epoch_test_accuracies[-1]}, Model Loss: {epoch_test_losses[-1]}')\r\n return epoch_test_accuracies[-1], epoch_test_losses[-1]", "title": "" }, { "docid": "cf99ad3a2d6d4b7489bf00ae0150824a", "score": "0.62115777", "text": "def plot_hist(output=\"predict_hist\"):\n d = Data(default_config)\n finput = os.path.split(default_config[\"data_path\"])[-1]\n price_list, predict_list = d.get_data()\n predict_list.hist()\n plt.title(\"input_file: %s\" % finput)\n plt.xlabel(\"predict value\")\n plt.ylabel(\"count\")\n foutput = os.path.join(\"..\", \"regTest_output\", output)\n plt.savefig(output)", "title": "" }, { "docid": "38160be83f2eec69f8b00af25c49c9cb", "score": "0.62071127", "text": "def plot_training(plotting_history, plot_filename):\n figure, (axis_loss, axis_accuracy) = pyplot.subplots(2, 1, sharex=True, figsize=(8, 8))\n pyplot.subplots_adjust(hspace=0.00)\n\n _prepare_graph(axis_loss, plotting_history, True)\n _prepare_graph(axis_accuracy, plotting_history, False)\n\n figure.savefig(plot_filename)", "title": "" }, { "docid": "502147572c13556e16e397192dbbb7ba", "score": "0.62038356", "text": "def visualize(self, data):\n scatter_matrix(data)\n plot.show()", "title": "" }, { "docid": "6b734e82c4141e93a839427c7bb55d01", "score": "0.6195077", "text": "def make_plots(self, path, input_slice, semantic_output, semantic_target, semantic_predicted, stem_keypoint_output, stem_offset_output, stem_position_output, stem_position_target, test_run):\n image_bgr = visualization.tensor_to_bgr(input_slice[:3], **self.dataset_val.normalization_rgb_dict)\n image_nir = visualization.tensor_to_single_channel(input_slice[3], **self.dataset_val.normalization_nir_dict)\n image_false_color = visualization.tensor_to_false_color(input_slice[:3], input_slice[3],\n **self.dataset_val.normalization_rgb_dict, **self.dataset_val.normalization_nir_dict)\n\n plot_semantics = visualization.make_plot_from_semantic_output(input_rgb=input_slice[:3],\n input_nir=input_slice[3],\n semantic_output=semantic_output,\n semantic_target=None,\n apply_softmax=False,\n **self.dataset_val.normalization_rgb_dict,\n **self.dataset_val.normalization_nir_dict)\n\n plot_semantics_target_labels = visualization.make_plot_from_semantic_labels(input_rgb=input_slice[:3],\n input_nir=input_slice[3],\n semantic_labels=semantic_target,\n **self.dataset_val.normalization_rgb_dict,\n **self.dataset_val.normalization_nir_dict)\n\n plot_semantics_predicted_labels = visualization.make_plot_from_semantic_labels(input_rgb=input_slice[:3],\n input_nir=input_slice[3],\n semantic_labels=semantic_predicted,\n **self.dataset_val.normalization_rgb_dict,\n **self.dataset_val.normalization_nir_dict)\n\n plot_stems_keypoint_offset = visualization.make_plot_from_stem_keypoint_offset_output(input_rgb=input_slice[:3],\n input_nir=input_slice[3],\n stem_keypoint_output=stem_keypoint_output,\n stem_offset_output=stem_offset_output,\n keypoint_radius=self.keypoint_radius,\n apply_sigmoid=False,\n apply_tanh=False,\n **self.dataset_val.normalization_rgb_dict,\n **self.dataset_val.normalization_nir_dict)\n\n plot_stems = visualization.make_plot_from_stem_output(input_rgb=input_slice[:3],\n input_nir=input_slice[3],\n stem_position_output=stem_position_output,\n stem_position_target=stem_position_target,\n keypoint_radius=self.keypoint_radius,\n target_width=stem_keypoint_output.shape[-1],\n target_height=stem_keypoint_output.shape[-2],\n **self.dataset_val.normalization_rgb_dict,\n **self.dataset_val.normalization_nir_dict)\n\n path_rgb = path.parent/(path.name+'_rgb.jpg')\n path_nir = path.parent/(path.name+'_nir.jpg')\n path_false_color = path.parent/(path.name+'_false_color.jpg')\n path_semantics = path.parent/(path.name+'_semantics.jpg')\n path_semantics_target_labels = path.parent/(path.name+'_semantics_target_labels.jpg')\n path_semantics_predicted_labels = path.parent/(path.name+'_semantics_predicted_labels.jpg')\n path_stems_keypoint_offset = path.parent/(path.name+'_stems_keypoint_offset.jpg')\n path_stems = path.parent/(path.name+'_stems.jpg')\n\n if test_run:\n cv2.imshow('semantics', plot_semantics)\n cv2.imshow('semantics_target_labels', plot_semantics_target_labels)\n cv2.imshow('semantics_predicted_labels', plot_semantics_predicted_labels)\n cv2.imshow('stems_keypoint_offset', plot_stems_keypoint_offset)\n cv2.imshow('stems', plot_stems)\n # cv2.imshow('image_bgr', image_bgr)\n # cv2.imshow('image_nir', image_nir)\n # cv2.imshow('image_false_color', image_false_color)\n cv2.waitKey(0)\n\n cv2.imwrite(str(path_rgb), (255.0*image_bgr).astype(np.uint8))\n cv2.imwrite(str(path_nir), (255.0*image_nir).astype(np.uint8))\n cv2.imwrite(str(path_false_color), (255.0*image_false_color).astype(np.uint8))\n cv2.imwrite(str(path_semantics), (255.0*plot_semantics).astype(np.uint8))\n cv2.imwrite(str(path_semantics_target_labels), (255.0*plot_semantics_target_labels).astype(np.uint8))\n cv2.imwrite(str(path_semantics_predicted_labels), (255.0*plot_semantics_predicted_labels).astype(np.uint8))\n cv2.imwrite(str(path_stems_keypoint_offset), (255.0*plot_stems_keypoint_offset).astype(np.uint8))\n cv2.imwrite(str(path_stems), (255.0*plot_stems).astype(np.uint8))", "title": "" }, { "docid": "8d6f76dd4b5fe5da243f91c1355282f3", "score": "0.61896753", "text": "def plot_bunch_results():\n for img in images_gen:\n # Non-targeted\n adv_img, noise, losses = run_non_targeted_attack(image=img, model=model, **kwargs)\n fig, orig_label, adversarial_label = draw_result(img, noise, adv_img, model=model)\n plt.savefig('out/non_targeted/orig_label={},adversarial_label={}.png'.format(orig_label, adversarial_label))\n plt.close(fig)\n\n plt.plot(losses)\n plt.title('Cross Entropy Loss\\norig_label={},adversarial_label={}'.format(orig_label, adversarial_label))\n plt.ylabel('loss')\n plt.xlabel('n_iterations')\n plt.savefig(\n 'out/non_targeted/loss_orig_label={},adversarial_label={}.png'.format(orig_label, adversarial_label))\n plt.close()\n\n # Targeted\n adv_img, noise, losses = run_targeted_attack(image=img, label=823, model=model, **kwargs)\n fig, orig_label, adversarial_label = draw_result(img, noise, adv_img, model=model)\n plt.savefig('out/targeted/orig_label={},adversarial_label={}.png'.format(orig_label, adversarial_label))\n plt.close(fig)\n\n plt.plot(losses)\n plt.title('Cross Entropy Loss\\norig_label={},adversarial_label={}'.format(orig_label, adversarial_label))\n plt.ylabel('loss')\n plt.xlabel('n_iterations')\n plt.savefig('out/targeted/loss_orig_label={},adversarial_label={}.png'.format(orig_label, adversarial_label))\n plt.close()", "title": "" } ]
13aa2d7782d9b5810a055a80e3e9e114
Find application and its corresponding versions
[ { "docid": "d47acf5dcd33af52c89b45a996862281", "score": "0.0", "text": "def write_to_csv_check_application_versions():\n process_list = backdoor.check_application_version()\n\n if process_list:\n # Write to CSV file\n final_file_path = read_configure_file('file_location', value='application_and_versions.csv')\n backdoor.convert_to_csv(final_file_path, process_list)\n print(\"Application and versions are written in application_and_versions.csv\")\n\n # Write the CSV file to JSON\n backdoor.convert_csv_to_json(final_file_path)\n print(\"Application and versions are written in application_and_versions.json\")", "title": "" } ]
[ { "docid": "9aecd4f08ea2caedf7097609f00b2eb9", "score": "0.7185892", "text": "def versions_for_app(app_path):\n assert(os.path.isdir(app_path))\n\n versions = set()\n\n for potential_version in os.listdir(app_path):\n # Skip invalid entries\n if potential_version.startswith(\".\"):\n continue\n\n versions.add(potential_version)\n\n assert(len(versions) != 0)\n\n return versions", "title": "" }, { "docid": "6ee6fad2bb7e678b8f56266d62fe009c", "score": "0.67797", "text": "def _discoverApplications(self):\n applications = []\n\n if sys.platform == \"darwin\":\n prefix = [\"/\", \"Applications\"]\n\n applications.extend(self._searchFilesystem(\n versionExpression=r\"Hiero(?P<version>.*)\\/.+$\",\n expression=prefix + [\"Hiero\\d.+\", \"Hiero\\d.+.app\"],\n label=\"Hiero\",\n variant=\"{version}\",\n applicationIdentifier=\"hiero_{version}\",\n icon=\"hiero\"\n ))\n\n applications.extend(self._searchFilesystem(\n versionExpression=r\"Nuke(?P<version>.*)\\/.+$\",\n expression=prefix + [\"Nuke.*\", \"Hiero\\d[\\w.]+.app\"],\n label=\"Hiero\",\n variant=\"{version}\",\n applicationIdentifier=\"hiero_{version}\",\n icon=\"hiero\"\n ))\n\n elif sys.platform == \"win32\":\n prefix = [\"C:\\\\\", \"Program Files.*\"]\n\n applications.extend(self._searchFilesystem(\n expression=prefix + [\"Hiero\\d.+\", \"hiero.exe\"],\n label=\"Hiero\",\n variant=\"{version}\",\n applicationIdentifier=\"hiero_{version}\",\n icon=\"hiero\"\n ))\n\n # Somewhere along the way The Foundry changed the default install\n # directory.\n # Add the old directory as expression to find old installations of\n # Hiero as well.\n applications.extend(self._searchFilesystem(\n expression=prefix + [\"The Foundry\", \"Hiero\\d.+\", \"hiero.exe\"],\n label=\"Hiero\",\n variant=\"{version}\",\n applicationIdentifier=\"hiero_{version}\",\n icon=\"hiero\"\n ))\n\n version_expression = re.compile(\n r\"Nuke(?P<version>[\\d.]+[\\w\\d.]*)\"\n )\n\n applications.extend(self._searchFilesystem(\n expression=prefix + [\"Nuke.*\", \"Nuke\\d.+.exe\"],\n versionExpression=version_expression,\n label=\"Hiero\",\n variant=\"{version}\",\n applicationIdentifier=\"hiero_{version}\",\n icon=\"hiero\",\n launchArguments=[\"--hiero\"]\n ))\n\n elif sys.platform == \"linux2\":\n applications.extend(self._searchFilesystem(\n versionExpression=r\"Hiero(?P<version>.*)\\/.+\\/.+$\",\n expression=[\n \"/\", \"usr\", \"local\", \"Hiero.*\", \"bin\", \"Hiero\\d.+\"\n ],\n label=\"Hiero\",\n variant=\"{version}\",\n applicationIdentifier=\"hiero_{version}\",\n icon=\"hiero\"\n ))\n\n applications.extend(self._searchFilesystem(\n expression=[\"/\", \"usr\", \"local\", \"Nuke.*\", \"Nuke\\d.+\"],\n label=\"Hiero\",\n variant=\"{version}\",\n applicationIdentifier=\"hiero_{version}\",\n icon=\"hiero\",\n launchArguments=[\"--hiero\"]\n ))\n\n self.logger.debug(\n \"Discovered applications:\\n{0}\".format(\n pprint.pformat(applications)\n )\n )\n\n return applications", "title": "" }, { "docid": "e65afdf9ce33651656a7922db22a96da", "score": "0.6393468", "text": "def get_app_bin(self, app, version=None, system=None):\n global cache\n if cache.get('project_aps'):\n if self.code in cache.get('project_aps'):\n if app in cache.get('project_aps').get(self.code):\n return cache.get('project_aps').get(self.code).get(app)\n system = system or os.name\n result_app = pipeline_app = app\n binary_names = dict(\n hython=('houdini', 'hython'),\n mayapy=('maya','mayapy'),\n nython=('nuke','python'),\n )\n if pipeline_app in binary_names:\n result_app = binary_names[pipeline_app][1]\n pipeline_app = binary_names[pipeline_app][0]\n\n suff = {'nt':['windows', 'win'],'posix':['linux'],'os2': ['mac']}.get(system)\n if suff:\n path = [x for x in [self.software_paths().get('_'.join([pipeline_app, suf])) for suf in suff] if x]\n if not path:\n print 'Not found'\n return\n path = path[0]\n if not pipeline_app == result_app:\n dir = os.path.dirname(path)\n name, ext = os.path.splitext(os.path.basename(path))\n path = '/'.join([dir,result_app+ext])\n\n if '{version}' in path:\n versions = self.app_versions(pipeline_app)\n if not versions:\n raise Exception('Version list not found in config')\n if version:\n if not version in versions:\n raise Exception('Requested version not found: %s (%s)' % (version, versions))\n path = path.replace('{version}', str(version))\n else:\n version = sorted(versions)[-1]\n path = path.replace('{version}', str(version))\n path = path.replace('\\\\','/')\n # if not os.path.exists(path):\n # print '>>> WARNING: File not exists on this machine'\n if cache.get('project_aps'):\n if cache['project_aps'].get(self.code):\n cache['project_aps'][self.code][app] = path\n else:\n cache['project_aps'][self.code] = {app: path}\n else:\n cache['project_aps'] = {self.code:{app: path}}\n return path", "title": "" }, { "docid": "b3738350c48e5a99b0a8713eaca62826", "score": "0.6350528", "text": "def get_app_versions(self, add_on_key, application=None):\n params = {}\n if application:\n params[\"application\"] = application\n url = \"rest/2/addons/{addonKey}/versions\".format(addonKey=add_on_key)\n return self.get(url, params=params)", "title": "" }, { "docid": "653527af2bef91bd0163c1a0374b0d78", "score": "0.62919384", "text": "def get_app_versions(self, app_id):\n\n resp = self.http.get(url_maker(\"/apps\", app_id, \"versions\"))\n\n return self.process_data(resp)", "title": "" }, { "docid": "52a0dc0e2a6c746a47798ab767a809c1", "score": "0.62910664", "text": "def _find_exe_version(self):\n pass", "title": "" }, { "docid": "db5e13d496d84b2ce113374eda49a6e0", "score": "0.6181091", "text": "def apps(self) -> Tuple[Application, ...]:", "title": "" }, { "docid": "98f07cd20c110f98e387e92af82dfc93", "score": "0.61162776", "text": "def get_version():\n # Create the list of versions from our data\n return [RELEASE_VERSIONS[key] for key in sorted(RELEASE_VERSIONS.keys())]", "title": "" }, { "docid": "00391dde24acbe0d446cbe0eb8aecad7", "score": "0.5999538", "text": "def find_programdata_vs_vers(self): # -> dict[Unknown, Unknown]:\n ...", "title": "" }, { "docid": "abe9558ef02c457ac29d948d7d855106", "score": "0.59892696", "text": "def find_matching_pkginfo(self, pkginfo):\n if not pkginfo.get(\"installer_item_hash\"):\n return None\n\n pkgdb = self._make_catalog_db()\n # match hashes for the pkg or dmg\n if \"installer_item_hash\" in pkginfo:\n matchingindexes = pkgdb[\"hashes\"].get(pkginfo[\"installer_item_hash\"])\n if matchingindexes:\n # we have an item with the exact same checksum hash in the repo\n return pkgdb[\"items\"][matchingindexes[0]]\n\n # try to match against installed applications\n applist = [\n item\n for item in pkginfo.get(\"installs\", [])\n if item.get(\"type\") in (\"application\", \"bundle\") and \"path\" in item\n ]\n if applist:\n matching_indexes = []\n for app in applist:\n app_path = app[\"path\"]\n if \"version_comparison_key\" in app:\n app_version = app[app[\"version_comparison_key\"]]\n else:\n app_version = app[\"CFBundleShortVersionString\"]\n match = pkgdb[\"applications\"].get(app_path, {}).get(app_version)\n if not match:\n # no entry for app['path'] and app['version']\n # no point in continuing\n return None\n else:\n if not matching_indexes:\n # store the array of matching item indexes\n matching_indexes = set(match)\n else:\n # we're only interested in items that match\n # all applications\n matching_indexes = matching_indexes.intersection(set(match))\n\n # did we find any matches?\n if matching_indexes:\n return pkgdb[\"items\"][list(matching_indexes)[0]]\n\n # fall back to matching against receipts\n matching_indexes = []\n for item in pkginfo.get(\"receipts\", []):\n pkgid = item.get(\"packageid\")\n vers = item.get(\"version\")\n if pkgid and vers:\n match = pkgdb[\"receipts\"].get(pkgid, {}).get(vers)\n if not match:\n # no entry for pkgid and vers\n # no point in continuing\n return None\n else:\n if not matching_indexes:\n # store the array of matching item indexes\n matching_indexes = set(match)\n else:\n # we're only interested in items that match\n # all receipts\n matching_indexes = matching_indexes.intersection(set(match))\n\n # did we find any matches?\n if matching_indexes:\n return pkgdb[\"items\"][list(matching_indexes)[0]]\n\n # try to match against install md5checksums\n filelist = [\n item\n for item in pkginfo.get(\"installs\", [])\n if item[\"type\"] == \"file\" and \"path\" in item and \"md5checksum\" in item\n ]\n if filelist:\n for fileitem in filelist:\n cksum = fileitem[\"md5checksum\"]\n if cksum in pkgdb[\"checksums\"]:\n cksum_matches = pkgdb[\"checksums\"][cksum]\n for cksum_match in cksum_matches:\n if cksum_match[\"path\"] == fileitem[\"path\"]:\n matching_pkg = pkgdb[\"items\"][cksum_match[\"index\"]]\n\n # TODO: maybe match pkg name, too?\n # if matching_pkg['name'] == pkginfo['name']:\n\n return matching_pkg\n\n # Try to match against a simple list of files and paths\n # where our pkginfo version also matches\n path_only_filelist = [\n item\n for item in pkginfo.get(\"installs\", [])\n if item.get(\"type\") == \"file\"\n and \"path\" in item\n and \"md5checksum\" not in item\n ]\n if path_only_filelist:\n for pathitem in path_only_filelist:\n path = pathitem[\"path\"]\n if path in pkgdb[\"files\"]:\n path_matches = pkgdb[\"files\"][path]\n for path_match in path_matches:\n if path_match[\"path\"] == pathitem[\"path\"]:\n matching_pkg = pkgdb[\"items\"][path_match[\"index\"]]\n # make sure we do this only for items that also\n # match our pkginfo version\n if matching_pkg[\"version\"] == pkginfo[\"version\"]:\n return matching_pkg\n\n # if we get here, we found no matches\n return None", "title": "" }, { "docid": "67f5273df3fbaababbf1e908e3fd88da", "score": "0.59230524", "text": "def deployed_version():\n run('cat %(current)s/setup.py | grep -i version' % env)", "title": "" }, { "docid": "6deb5b2e8d49c6fa84b612aa4aeb0bcc", "score": "0.59152794", "text": "def get_application_info(self):\n return dict((app.name, app.url) for app in self.get_all(AppStatus))", "title": "" }, { "docid": "efb920284acd3a3e784555a85a1f75ff", "score": "0.5912217", "text": "def find_app(pattern: Optional[str]) -> None:\n do_find_app(pattern)", "title": "" }, { "docid": "6fbe6617fe86d76de2c69b4113239382", "score": "0.5908145", "text": "def expected_apps(self):\n _apps = [\n 'ceph-radosgw'\n ]\n try:\n zaza_model.get_application('slave-ceph-radosgw')\n _apps.append('slave-ceph-radosgw')\n except KeyError:\n pass\n return _apps", "title": "" }, { "docid": "dcfa5433939342db50c6272ada62afed", "score": "0.59054655", "text": "def getApplications(ms):\n try:\n thistory = pt.table(ms.getkeyword('HISTORY'), readonly=True, ack=False)\n appList = []\n \n for app in thistory.getcol('APPLICATION'): \n if app != 'imager':\n appList.append(app)\n return appList\n except:\n return []", "title": "" }, { "docid": "72b82e797c2312b9a056e8ff601d851a", "score": "0.5873698", "text": "def _find_packaged_app(self, command):\n\n\t\t# Used the last occurrence of a .app path as the app's name\n\t\t#app_search = self._packaged_app_name_search.search(command)\n\t\tapp_search = re.findall(self._packaged_app_name_search, command)\n\t\tif app_search:\n\t\t\treturn app_search[-1]\n\t\treturn \"\"", "title": "" }, { "docid": "4f700e24f9dfbe09e5f33dd148879b53", "score": "0.58674526", "text": "def get_versions():\n\tresponse = requests.get('https://code.google.com/p/googleappengine/downloads/list?can=1&q=&colspec=Filename&num=2000')\n\tresponse.raise_for_status()\n\n\tversions = []\n\tfor match in re.finditer(r'google_appengine_([0-9]+)\\.([0-9]+)\\.([0-9]+)\\.zip', response.text):\n\t\tversions.append((match.group(1),match.group(2),match.group(3)))\n\n\treturn sorted(set(versions), key=lambda tup: str(tup), reverse=True)", "title": "" }, { "docid": "5d608e1477b03d65f0f2d06199653a8b", "score": "0.58299094", "text": "def get_currently_served_version(app_name):\n listed_versions = subprocess.check_output([\n GCLOUD_PATH, 'app', 'versions', 'list', '--hide-no-traffic',\n '--service=default', '--project=%s' % app_name])\n default_version_line_start_str = 'default '\n listed_versions = listed_versions[\n listed_versions.index(default_version_line_start_str) + len(\n default_version_line_start_str):]\n return listed_versions[:listed_versions.index(' ')]", "title": "" }, { "docid": "8d761475443c88594053789a4aba74a9", "score": "0.58276325", "text": "def list_apps(pattern: Optional[str]) -> None:\n do_list_app(pattern)", "title": "" }, { "docid": "24b5e93d2da420aaa819737fcbbd805d", "score": "0.5768763", "text": "def match_version(self,versions):\n pass", "title": "" }, { "docid": "67337ffd45be6978e7d48eb6f1f6d045", "score": "0.57567775", "text": "def get_app_version(self, app_id, version_id):\n\n resp = self.http.get(url_maker(\"/apps\", app_id, \"versions\", version_id))\n\n return self.process_data(resp)", "title": "" }, { "docid": "ea4c83bec00533c6f69bd37cc0a08884", "score": "0.5744063", "text": "def get_installed_version(name):\n versions = []\n for pydir in get_python_lib_paths():\n for egg_path in glob.glob(\"%s-*.egg*\" % os.path.join(pydir, name)):\n egg = os.path.basename(egg_path)\n versions.append(map(int, egg.split('-')[1].split('.')))\n if versions:\n return \".\".join(map(str, max(versions)))", "title": "" }, { "docid": "a45122377bd04635e6ba37c5c8957ef6", "score": "0.5740184", "text": "def get_apps(self):\n return self.get_json_field('apps', path='/v2/apps')", "title": "" }, { "docid": "36b7b1b0accb4e7133abb8d4689e0fd4", "score": "0.57084435", "text": "def rq_app_list():\n print(\"[COLLECT] Downloading application directory\")\n\n rq = requests.get(\"https://api.steampowered.com/ISteamApps/GetAppList/v2\")\n\n assert rq.status_code == requests.codes.ok\n return rq.json()['applist']['apps']", "title": "" }, { "docid": "2be108ce8d5112186131f530c4da49d1", "score": "0.5693642", "text": "def _find_exe_app(self, command):\n\t\texe_search = self._exe_app_name_search.search(command)\n\t\tif exe_search:\n\t\t\treturn exe_search.group(1)\n\t\treturn \"\"", "title": "" }, { "docid": "2c9bef7a4ed2f159fc9a5e490cc347a4", "score": "0.56896996", "text": "def test_get_versions(self):\n # type: () -> None\n import hardest.python_searcher as pysearch\n from hardest.python_version import PythonVersion\n\n instance = pysearch.PythonSearcher(env=self.env,\n validator=self.validator)\n\n test_versions_paths = {\n self.binpath + 'python',\n self.binpath + 'python1.2',\n self.binpath + 'jython9.1',\n self.binpath + 'anaconda',\n self.binpath + 'raisecode',\n }\n test_versions = {\n PythonVersion('Python test.1.2', {\n self.binpath + 'python',\n self.binpath + 'python1.2',\n }),\n PythonVersion('Jython test.9.1', {\n self.binpath + 'jython9.1',\n }),\n PythonVersion('Anaconda test.3.1', {\n self.binpath + 'anaconda',\n }),\n }\n bad_version = PythonVersion('Unknown', {\n self.binpath + 'raisecode',\n })\n found_vers = set(instance.get_python_versions(test_versions_paths))\n self.assertEqual(test_versions & found_vers, test_versions)\n self.assertIn(bad_version, instance.bad_versions)", "title": "" }, { "docid": "fcf9ea240f21ee7cf178e19f4bfbd455", "score": "0.56792116", "text": "def main(connection, userid = None, different = False, version = True, missing = True):\n coremodule,coreconfig = loadapp(\"Core\")\n loadtables(coremodule,coreconfig,connection,missing=missing,version=version)\n if userid is None:\n apps = Core.getinstalledapps(connection)\n else:\n apps = Core.getuserapps(connection,userid)\n apps = [app['app'] for app in apps]\n appsloaded = dict(apps={})\n for app in apps:\n module,config = loadapp(app)\n if not module:\n appsloaded['apps'][app] = None\n continue\n tables = loadtables(module,config,connection,missing=missing,version=version, different = different)\n appsloaded['apps'][app] = dict(module=module, tables = tables)\n return appsloaded", "title": "" }, { "docid": "3e94b2d935550a5a106161b9c4685f0f", "score": "0.56642157", "text": "def getKnown(db):\n cursor = db.cursor()\n cursor.execute('SELECT DISTINCT application_name, application_version, application_release, build_distribution FROM build')\n tuples = imap(tuple, results(cursor))\n builds = starmap(Build, tuples)\n known = set(builds)\n return known", "title": "" }, { "docid": "699e9b2cba55f3ad438ab4ffded853ef", "score": "0.56591374", "text": "def installed_packages(versions=installed_versions()):\n bins = set()\n for directory in os.listdir(STORAGE):\n if directory in versions:\n for b in os.listdir(STORAGE + '/' + directory + '/bin/'):\n if b != 'npm' and b != 'node' and in_registry(b):\n bins.add(b)\n return bins", "title": "" }, { "docid": "38b985af9531550b083fd296cf982aee", "score": "0.56524533", "text": "def get_available_versions(self):\n with open('.klasses.json', 'r') as f:\n klass_versions = json.loads(f.read())\n\n return [\n version\n for version in klass_versions\n if self.module_name in klass_versions[version] and\n self.klass_name in klass_versions[version][self.module_name]\n ]", "title": "" }, { "docid": "36242795e2ec6725cd3a38f99fc58814", "score": "0.5642893", "text": "def main():\n compare_versions(Stringdb())", "title": "" }, { "docid": "13c3a2e2f080c805c83a74b9b7a18bad", "score": "0.5639076", "text": "def infos_for_results(result_path):\n infos = dict()\n\n for app_path in os.listdir(result_path):\n # Skip invalid entries\n if app_path.startswith(\".\"):\n continue\n\n app_versions = versions_for_app(os.path.join(result_path, app_path))\n infos[app_path] = app_versions\n\n return infos", "title": "" }, { "docid": "7ab8c73b3a1fcb68c0a264a29b1afa9a", "score": "0.5623244", "text": "def list_installables(args):\n oerpenv = OdooEnvironment(config_filename=args.config)\n oerpenv.set_python_environment(args.environment)\n\n for application in oerpenv.installables:\n print \"--\"\n print \"Application:\", application.name\n print \"Version:\", application.fullname\n print \"Description:\", application.description", "title": "" }, { "docid": "3b1269763f4dcc4b846d005475b80922", "score": "0.5621373", "text": "def get_app(client):\n total_apps = len(client.applications)\n if total_apps == 2:\n for app in client.applications:\n if app.name != 'Stormpath':\n return app\n\n apps = client.applications.search({'name': 'flask-stormpath-sample'})\n return apps[0] if len(apps) == 1 else None", "title": "" }, { "docid": "96ffba0718082542efce9b02dba16fe6", "score": "0.55815244", "text": "def get_latest_versions(self) -> None:\n for package_id in self.adjacency_list.keys():\n try:\n new_item = get_latest_version_available_in_registry(\n self.ctx,\n str(package_id.package_type),\n package_id.public_id.to_latest(),\n aea_version=self._current_aea_version,\n )\n except click.ClickException: # pragma: nocover\n continue\n if package_id.public_id.version == new_item.version:\n continue\n new_version = new_item.version\n self.item_to_new_version[package_id] = new_version", "title": "" }, { "docid": "b5bd6b08e41362cc054e7b73e5bf8e98", "score": "0.5577247", "text": "def get_version(self):\n # OVS version can be read offline\n return []", "title": "" }, { "docid": "11bebdceb80416d0f87c75fc46c71b59", "score": "0.55697393", "text": "def _get_apps_for_session(self, session_name):\n return {\n name: data\n for name, data in self.conf.app.raw().items() if data.get('kernel') == session_name\n }", "title": "" }, { "docid": "42de9183bc0b86e3fb6de7212d345686", "score": "0.55584127", "text": "def get_version():\n return get_versions()[\"version\"]", "title": "" }, { "docid": "3125681d9d5e8f26ac7b1d69d3170832", "score": "0.55537254", "text": "def update_application_info(self):\n try:\n status_on_all_nodes = self.helper.get_status_info()\n app_names_and_urls = {}\n\n if not status_on_all_nodes:\n return {}\n\n for status in status_on_all_nodes:\n for app, done_loading in status['apps'].iteritems():\n if app == self.NO_APPS_RUNNING:\n continue\n if done_loading:\n try:\n app_names_and_urls[app] = \"http://{0}:{1}\".format(\n self.helper.get_login_host(), self.helper.get_app_port(app))\n except AppHelperException:\n app_names_and_urls[app] = None\n else:\n app_names_and_urls[app] = None\n\n # To make sure that we only update apps that have been recently uploaded\n # or removed, we grab a list of all the apps that were running before we\n # asked the AppController and compare it against the list of apps that the\n # AppController reports are now running.\n all_apps = self.get_all(AppStatus)\n all_app_names_were_running = [app.key.id() for app in all_apps]\n all_app_names_are_running = [app for app in app_names_and_urls.keys()]\n\n # Delete any apps that are no longer running.\n app_names_to_delete = []\n for app_name in all_app_names_were_running:\n if app_name not in all_app_names_are_running:\n app_names_to_delete.append(app_name)\n elif not app_names_and_urls[app_name]:\n app_names_to_delete.append(app_name)\n\n if app_names_to_delete:\n apps_to_delete = []\n for app in all_apps:\n if app.name in app_names_to_delete:\n apps_to_delete.append(app.key)\n ndb.delete_multi(apps_to_delete)\n\n # Add in new apps that are now running.\n app_names_to_add = []\n for app_name in all_app_names_are_running:\n if app_name not in all_app_names_were_running:\n app_names_to_add.append(app_name)\n elif app_names_and_urls[app_name]:\n app_names_to_add.append(app_name)\n\n # Also add in apps that have been relocated, since we need to update the\n # URL that the user can access the app at.\n for app in all_apps:\n if app.key.id() in all_app_names_are_running and \\\n app.url != app_names_and_urls[app.key.id()]:\n app_names_to_add.append(app_name)\n\n if app_names_to_add:\n apps_to_add = [AppStatus(id=app, name=app, url=app_names_and_urls[app])\n for app in app_names_to_add]\n ndb.put_multi(apps_to_add)\n\n return app_names_and_urls\n except Exception as err:\n logging.exception(err)\n return {}", "title": "" }, { "docid": "fe65af56082fa220e60fc8f04bd23a8d", "score": "0.5552964", "text": "def recent(\n self, *, owner_name: str, app_name: str\n ) -> List[BasicReleaseDetailsResponse]:\n\n self.log.info(f\"Getting recent versions of app: {owner_name}/{app_name}\")\n\n request_url = self.generate_url(owner_name=owner_name, app_name=app_name)\n request_url += \"/recent_releases\"\n\n response = self.get(request_url)\n\n return deserialize.deserialize(\n List[BasicReleaseDetailsResponse], response.json()\n )", "title": "" }, { "docid": "b6feeb16c7c22c071c5b07f1318fe0dd", "score": "0.5538371", "text": "def get_version(output):\n\tfrom git import Repo\n\tfrom git.exc import InvalidGitRepositoryError\n\n\tfrom frappe.utils.change_log import get_app_branch\n\tfrom frappe.utils.commands import render_table\n\n\tfrappe.init(\"\")\n\tdata = []\n\n\tfor app in sorted(frappe.get_all_apps()):\n\t\tmodule = frappe.get_module(app)\n\t\tapp_hooks = frappe.get_module(app + \".hooks\")\n\n\t\tapp_info = frappe._dict()\n\n\t\ttry:\n\t\t\tapp_info.commit = Repo(frappe.get_app_source_path(app)).head.object.hexsha[:7]\n\t\texcept InvalidGitRepositoryError:\n\t\t\tapp_info.commit = \"\"\n\n\t\tapp_info.app = app\n\t\tapp_info.branch = get_app_branch(app)\n\t\tapp_info.version = getattr(app_hooks, f\"{app_info.branch}_version\", None) or module.__version__\n\n\t\tdata.append(app_info)\n\n\t{\n\t\t\"legacy\": lambda: [click.echo(f\"{app_info.app} {app_info.version}\") for app_info in data],\n\t\t\"plain\": lambda: [\n\t\t\tclick.echo(f\"{app_info.app} {app_info.version} {app_info.branch} ({app_info.commit})\")\n\t\t\tfor app_info in data\n\t\t],\n\t\t\"table\": lambda: render_table(\n\t\t\t[[\"App\", \"Version\", \"Branch\", \"Commit\"]]\n\t\t\t+ [[app_info.app, app_info.version, app_info.branch, app_info.commit] for app_info in data]\n\t\t),\n\t\t\"json\": lambda: click.echo(json.dumps(data, indent=4)),\n\t}[output]()", "title": "" }, { "docid": "a0c250b88527d451fd414434a9ff0741", "score": "0.55006915", "text": "def apps():\n pass", "title": "" }, { "docid": "4025d6cc5f186cacf8cdd38d17795109", "score": "0.54841936", "text": "def package_versions(context, request):\n normalized_name = normalize_name(context.name)\n versions = request.db.all(normalized_name)\n return {\n \"packages\": versions,\n \"write\": request.access.has_permission(normalized_name, \"write\"),\n }", "title": "" }, { "docid": "696a9c6d8c95260d3647f7e5183e0b37", "score": "0.5474778", "text": "def installed_versions():\n versions = [version for version in os.listdir(STORAGE)]\n versions = sorted(versions, key=functools.cmp_to_key(semver.compare))\n\n return versions", "title": "" }, { "docid": "a922612e4fdb0b09f0348d09648d9462", "score": "0.54717517", "text": "def get_all_versions(self, module):\n bru_file_names = os.listdir(self.get_module_dir(module))\n regex = re.compile('^(.+)\\\\.bru$') # version can be 1.2.3 or 1.2rc7 or ...\n for bru_file_name in bru_file_names:\n match = regex.match(bru_file_name)\n if match != None:\n version = match.group(1)\n yield version", "title": "" }, { "docid": "a1e1cbf57c867f954cd6f05c2ae01bb7", "score": "0.5466233", "text": "def _get_app_data(self, app):\n # Gather a copy of the app configuration.\n env = app.config.copy()\n\n # Gather current environment variables, prefix with \"os.\".\n for key, val in os.environ.iteritems():\n env['os.{0}'.format(key)] = val\n\n # Gather mapping of module names to versions.\n modules = {}\n for module in pkg_resources.working_set:\n modules[module.key] = module.version\n\n return {\n 'env': self.filter(env, 'configuration'),\n 'python_version': sys.version.replace('\\n', ''),\n 'application_root_directory': app.root_path,\n 'loaded_modules': modules,\n }", "title": "" }, { "docid": "0ca9f3dce84d52f849f6e7639b3cef29", "score": "0.54638886", "text": "def find_reg_vs_vers(self): # -> List[Unknown]:\n ...", "title": "" }, { "docid": "873300ce263db17e06562f9c64de36f9", "score": "0.54631776", "text": "def select_versions(self):\n return [('5.2', '5.2+')]", "title": "" }, { "docid": "4af72630e2d3621c22b26a6f5f5a4c29", "score": "0.5455188", "text": "def get_versions(self):\n return self.versions", "title": "" }, { "docid": "717439eadd660a23e91b155a2e65f029", "score": "0.5453852", "text": "def GetVersions(api_name):\n # pylint:disable=protected-access\n return apis_internal._GetVersions(api_name)", "title": "" }, { "docid": "75bcf38e84fd90674d10c42b7eb5f303", "score": "0.5453016", "text": "def get_version_info():\n import importlib\n report = {}\n # list the core packages here\n packages = [\"openff.qcsubmit\", \"openff.toolkit\", \"basis_set_exchange\", \"qcelemental\"]\n for package in packages:\n module = importlib.import_module(package)\n report[package] = pd.Series({\"version\": module.__version__})\n\n # now try openeye else use rdkit\n try:\n import openeye\n report[\"openeye\"] = pd.Series({\"version\": openeye.__version__})\n except ImportError:\n import rdkit\n report[\"rdkit\"] = pd.Series({\"version\": rdkit.__version__})\n\n return pd.DataFrame(report).transpose()", "title": "" }, { "docid": "0bc44c9647b89908aaa749f2943562d2", "score": "0.5452543", "text": "def show_versions(self):\n versions = [_('Versions of frameworks and their installed plugins:')]\n for name, runner in sorted(self.framework_registry.frameworks.items()):\n version = (runner.get_versions(self) if runner.is_installed()\n else None)\n versions.append('\\n'.join(version) if version else\n '{}: {}'.format(name, _('not available')))\n QMessageBox.information(self, _('Dependencies'),\n _('\\n\\n'.join(versions)))", "title": "" }, { "docid": "bfd35f401e1cf2ccf81224c5fb4b4f3c", "score": "0.5450613", "text": "def list_app(self, third_only=False):\n cmd = [\"pm\", \"list\", \"packages\"]\n if third_only:\n cmd.append(\"-3\")\n output = self.shell(cmd)\n packages = output.splitlines()\n # remove all empty string; \"package:xxx\" -> \"xxx\"\n packages = [p.split(\":\")[1] for p in packages if p]\n return packages", "title": "" }, { "docid": "d0391942928702a1adfeb0d312a694b1", "score": "0.54418886", "text": "def findApp(\n appName,\n checkoutApp,\n dataLoc,\n backend,\n _browse,\n *args,\n silent=SILENT_D,\n version=None,\n legacy=False,\n **kwargs,\n):\n\n (commit, release, local) = (None, None, None)\n extraMod = None\n\n appLoc = None\n if appName is not None and appName.startswith(\"app:\"):\n appLoc = normpath(appName[4:])\n else:\n appName = normpath(appName)\n\n if dataLoc is None and appName is None:\n console(\"No TF-app and no data location specified\", error=True)\n return None\n\n if dataLoc is not None and appName is not None:\n console(\"Both a TF-app and a data location are specified\", error=True)\n return None\n\n dataOrg = None\n dataRepo = None\n dataFolder = None\n inNb = runsInNotebook()\n\n if silent not in {TERSE, DEEP}:\n dm(\"**Locating corpus resources ...**\", inNb=inNb)\n\n if dataLoc is None:\n if appLoc:\n if \":\" in appLoc:\n console(\n \"When passing an app by `app:fullpath` you cannot use :-specifiers\"\n )\n return None\n appPath = ex(appLoc) if appLoc else \"\"\n absPath = abspath(appPath)\n\n if isDir(absPath):\n appDir = absPath\n appBase = \"\"\n else:\n console(f\"{absPath} is not an existing directory\", error=True)\n appBase = False\n appDir = None\n appPath = appDir\n elif \"/\" in appName:\n (dataOrg, rest) = appName.split(\"/\", maxsplit=1)\n (dataRepo, *rest) = rest.split(\"/\")\n if len(rest) > 0 and rest[-1] == APP_APP:\n appParts = rest\n dataParts = rest[0:-1] + [RELATIVE]\n elif len(rest) > 0 and rest[-1] == RELATIVE:\n appParts = rest[0:-1] + [APP_APP]\n dataParts = rest\n else:\n dataParts = rest + [RELATIVE]\n appParts = rest + [APP_APP]\n appFolder = prefixSlash(\"/\".join(appParts))\n dataFolder = prefixSlash(\"/\".join(dataParts))\n\n (commit, release, local, appBase, appDir) = checkoutRepo(\n backend,\n _browse=_browse,\n org=dataOrg,\n repo=dataRepo,\n folder=appFolder,\n checkout=checkoutApp,\n withPaths=True,\n keep=False,\n silent=silent,\n label=\"app\",\n )\n appBaseRep = f\"{appBase}/\" if appBase else \"\"\n appPath = f\"{appBaseRep}{appDir}\"\n else:\n (commit, release, local, appBase, appDir) = checkoutRepo(\n backend,\n _browse=_browse,\n org=ORG,\n repo=f\"app-{appName}\",\n folder=APP_CODE,\n checkout=checkoutApp,\n withPaths=True,\n keep=False,\n silent=silent,\n label=\"app\",\n )\n appBaseRep = f\"{appBase}/\" if appBase else \"\"\n appPath = f\"{appBaseRep}{appDir}\"\n cfg = findAppConfig(appName, appPath, commit, release, local, backend)\n provenanceSpec = kwargs.get(\"provenanceSpec\", {})\n if provenanceSpec:\n for k in (\"org\", \"repo\", \"relative\"):\n value = provenanceSpec.get(k, None)\n if value:\n if k == \"relative\":\n value = prefixSlash(value)\n cfg[k] = value\n\n dataOrg = cfg.get(\"provenanceSpec\", {}).get(\"org\", None)\n dataRepo = cfg.get(\"provenanceSpec\", {}).get(\"repo\", None)\n\n if not legacy:\n if dataOrg:\n console(\n (\n \"WARNING: in the future, pass \"\n f\"`{dataOrg}/{appName}` instead of `{appName}`\"\n ),\n error=True,\n )\n (commit, release, local, appBase, appDir) = checkoutRepo(\n backend,\n _browse=_browse,\n org=dataOrg,\n repo=dataRepo,\n folder=APP_APP,\n checkout=checkoutApp,\n withPaths=True,\n keep=False,\n silent=silent,\n label=\"app\",\n )\n appBaseRep = f\"{appBase}/\" if appBase else \"\"\n appPath = f\"{appBaseRep}{appDir}\"\n else:\n parts = dataLoc.split(\":\", maxsplit=1)\n if len(parts) == 1:\n parts.append(\"\")\n (dataLoc, checkoutData) = parts\n if checkoutData == \"\":\n appPath = ex(dataLoc) if dataLoc else \"\"\n absPath = abspath(appPath)\n\n if isDir(absPath):\n (appDir, appName) = splitPath(absPath)\n appBase = \"\"\n else:\n console(f\"{absPath} is not an existing directory\", error=True)\n appBase = False\n appDir = None\n appPath = appDir\n else:\n appBase = \"\"\n appDir = \"\"\n appPath = \"\"\n extraMod = f\"{dataLoc}:{checkoutData}\"\n\n cfg = findAppConfig(\n appName,\n appPath,\n commit,\n release,\n local,\n backend,\n org=dataOrg,\n repo=dataRepo,\n relative=dataFolder,\n version=version,\n )\n version = cfg[\"provenanceSpec\"].get(\"version\", None)\n isCompatible = cfg[\"isCompatible\"]\n if isCompatible is None:\n appClass = App\n elif not isCompatible:\n return None\n else:\n appBaseRep = f\"{appBase}/\" if appBase else \"\"\n appPath = f\"{appBaseRep}{appDir}\"\n\n appClass = findAppClass(appName, appPath) or App\n\n mod = kwargs.get(\"mod\", [])\n mod = [] if mod is None else mod.split(\",\") if type(mod) is str else list(mod)\n if extraMod:\n if len(mod) > 0:\n mod = [extraMod, *mod]\n else:\n mod = [extraMod]\n kwargs[\"mod\"] = mod\n try:\n app = appClass(\n cfg,\n appName,\n appPath,\n commit,\n release,\n local,\n backend,\n _browse,\n *args,\n version=version,\n silent=silent,\n **kwargs,\n )\n except Exception as e:\n if appClass is not App:\n console(\n f\"There was an error loading corpus {appName}\",\n error=True,\n )\n console(repr(e), error=True)\n traceback.print_exc()\n console(\"Text-Fabric is not loaded\", error=True)\n return None\n return app", "title": "" }, { "docid": "6acc4ed2308ee6ce4db8196fa5ac3c7b", "score": "0.5438261", "text": "def getApp(self,appname):\n return self.apps[appname]", "title": "" }, { "docid": "75ba8eaa250b2e910528823bf6a924fc", "score": "0.5437068", "text": "def version_info(ctx=None):\n from . import __version__\n\n prog = ctx.find_root().info_name if ctx else APP_NAME\n version = __version__\n try:\n import pkg_resources\n except ImportError:\n pass\n else:\n for dist in iter(pkg_resources.working_set):\n scripts = dist.get_entry_map().get('console_scripts') or {}\n for _, entry_point in scripts.items():\n if entry_point.module_name == (__package__ + '.__main__'):\n version = dist.version\n break\n\n return VERSION_INFO % dict(prog=prog, version=version)", "title": "" }, { "docid": "6c7ff6e4744a56492140ef87d1ff84e5", "score": "0.5434671", "text": "def do_application_info(self, arg):\n if self._upcheck():\n applications = arg.split()\n if not applications or \"all\" in applications:\n try:\n infos = self.supvisors().get_all_applications_info()\n except xmlrpclib.Fault, e:\n self.ctl.output('ERROR ({})'.format(e.faultString))\n else:\n for info in infos:\n self.output_application_info(info)\n else:\n for application_name in applications:\n try:\n info = self.supvisors().get_application_info(application_name)\n except xmlrpclib.Fault, e:\n self.ctl.output('{}: ERROR ({})'.format(application_name, e.faultString))\n else:\n self.output_application_info(info)", "title": "" }, { "docid": "3991ae88aa4fcea8efb839c452a3b8ab", "score": "0.5431224", "text": "def RefreshPackages(self):\n names = self.cmd.run(\"/sbin/apk info\").stdout.splitlines()\n nameversions = self.cmd.run(\"/sbin/apk info -v\").stdout.splitlines()\n for pkg in zip(names, nameversions):\n pkgname = pkg[0]\n version = pkg[1][len(pkgname) + 1:]\n self.logger.debug(\" pkgname: %s\" % pkgname)\n self.logger.debug(\" version: %s\" % version)\n self.installed[pkgname] = version", "title": "" }, { "docid": "472ac1356525e2abfa6cde8e73203ba0", "score": "0.54180527", "text": "def get_apps(self, **kwargs):\n\n resp = self.http.get(\"/apps\")\n\n return self.process_data(resp)", "title": "" }, { "docid": "8f18768a664dc1444eb1824a9a1f04a3", "score": "0.5415859", "text": "def applications(self):\n return self._applications", "title": "" }, { "docid": "8f18768a664dc1444eb1824a9a1f04a3", "score": "0.5415859", "text": "def applications(self):\n return self._applications", "title": "" }, { "docid": "97d0799ff2ab91bfbf8e2169d6bbf91b", "score": "0.54154146", "text": "def apps(server):\n server_info = CONFIGURED_HOSTS[server]\n ssh_host = server_info[\"host\"]\n ssh_user = server_info.get(\"user\")\n executor = Executor(ssh_host, user=ssh_user)\n # The caddy configuration files are used as source of truth\n # to get the list of installed apps\n apps = executor.run(\"ls ~/dallinger/caddy.d\")\n for app in apps.split():\n print(app)", "title": "" }, { "docid": "95a7a85deeee1fb1dc6c75aa38b0c856", "score": "0.54145324", "text": "def which_app(app):\n\n for path in os.environ[\"PATH\"].split(os.pathsep):\n fname = app + \".toml\"\n abspath = os.path.join(path.strip('\"'), fname)\n\n if os.path.isfile(abspath):\n return abspath\n\n return None", "title": "" }, { "docid": "62f5f80263c4255860d0721cfd4d073d", "score": "0.54106504", "text": "def identify_new_apps(result_infos, itunes_infos):\n new_apps = list()\n\n for bundleId in itunes_infos:\n app_info = itunes_infos[bundleId]\n if app_info[\"price\"] == 0.0:\n if bundleId not in result_infos:\n itunes_id = app_info[\"trackId\"]\n new_apps.append(itunes_id)\n\n return new_apps", "title": "" }, { "docid": "c9a33fe765009ff9bd1a6ba25fa024be", "score": "0.5410111", "text": "def get_app(self, name):\n params = {}\n self._add_key_param(params)\n return self._op('apps', name, params = params)", "title": "" }, { "docid": "e0a4c895121c3bf830aafd24228424da", "score": "0.5400375", "text": "def set_app_version():\n cmd = [\"kubelet\", \"--version\"]\n version = check_output(cmd)\n hookenv.application_version_set(version.split(b\" v\")[-1].rstrip())", "title": "" }, { "docid": "0fb3d0b3e3c0693a799157cedf8f987a", "score": "0.5392899", "text": "def get_apps(args):\n if ch and not args.debug:\n ch.setLevel(logging.INFO)\n else:\n ch.setLevel(logging.DEBUG)\n\n try:\n cloud = ocs.Ocs(\n Config.CLOUD_USER, Config.CLOUD_USER_PWD, Config.CLOUD_BASE_URL\n )\n except (OperationFailure, WrongParam) as e:\n logger.error(\"Could not connect to the cloud: {e}\".format(e=e.value))\n exit(1)\n\n apps = cloud._apps # cloud.get_apps()\n for itm in apps:\n print(itm)", "title": "" }, { "docid": "6f8506d4b00706993f0caea06ba7e226", "score": "0.5379068", "text": "def get_available_pyenv_python_versions(args):\n reply = run([\"pyenv\", \"versions\", \"--bare\"])\n available_ = []\n for line in reply.split(\"\\n\"):\n\n chars = line.strip()\n if \"/\" not in chars:\n if args.miniconda:\n if chars.startswith(\"miniconda\"):\n available_.append(chars)\n elif chars.replace(\".\", \"\").isdigit():\n available_.append(chars)\n\n return available_", "title": "" }, { "docid": "6a9668291bbe4e7b074072bddf01fab8", "score": "0.53574795", "text": "def findAppConfig(\n appName,\n appPath,\n commit,\n release,\n local,\n backend,\n org=None,\n repo=None,\n version=None,\n relative=None,\n straight=False,\n):\n\n appPath = normpath(appPath)\n configPath = f\"{appPath}/{APP_CONFIG}\"\n configPathOld = f\"{appPath}/{APP_CONFIG_OLD}\"\n cssPath = f\"{appPath}/{APP_DISPLAY}\"\n\n checkApiVersion = True\n\n isCompatible = None\n\n if fileExists(configPath):\n with open(configPath, encoding=\"utf8\") as fh:\n cfg = yaml.load(fh, Loader=yaml.FullLoader)\n else:\n cfg = None\n\n if cfg is None or cfg == {}:\n cfg = {}\n checkApiVersion = False\n if fileExists(configPathOld):\n isCompatible = False\n if straight:\n return cfg\n\n cfg.update(\n appName=appName, appPath=appPath, commit=commit, release=release, local=local\n )\n\n if version is None:\n version = cfg.setdefault(\"provenanceSpec\", {}).get(\"version\", None)\n else:\n cfg.setdefault(\"provenanceSpec\", {})[\"version\"] = version\n\n if org is None:\n org = cfg.get(\"provenanceSpec\", {}).get(\"org\", None)\n else:\n cfg[\"provenanceSpec\"][\"org\"] = org\n\n if repo is None:\n repo = cfg.get(\"provenanceSpec\", {}).get(\"repo\", None)\n else:\n cfg[\"provenanceSpec\"][\"repo\"] = repo\n\n if relative is None:\n relative = prefixSlash(cfg.get(\"provenanceSpec\", {}).get(\"relative\", None))\n else:\n cfg[\"provenanceSpec\"][\"relative\"] = prefixSlash(relative)\n\n cfg[\"local\"] = local\n cfg[\"localDir\"] = getLocalDir(backend, cfg, local, version)\n\n avA = cfg.get(\"apiVersion\", None)\n if isCompatible is None and checkApiVersion:\n isCompatible = avA is not None and avA == avTf\n if not isCompatible:\n if isCompatible is None:\n pass\n elif avA is None or avA < avTf:\n console(\n f\"\"\"\nApp `{appName}` requires API version {avA or 0} but Text-Fabric provides {avTf}.\nYour copy of the TF app `{appName}` is outdated for this version of TF.\nRecommendation: obtain a newer version of `{appName}`.\nHint: load the app in one of the following ways:\n\n {org}/{repo}\n {org}/{repo}:latest\n {org}/{repo}:hot\n\n For example:\n\n The Text-Fabric browser:\n\n text-fabric {org}/{repo}:latest\n\n In a program/notebook:\n\n A = use('{org}/{repo}:latest', hoist=globals())\n\n\"\"\",\n error=True,\n )\n else:\n console(\n f\"\"\"\nApp `{appName}` or rather `{org}/{repo}` requires API version {avA or 0}\nbut Text-Fabric provides {avTf}.\nYour Text-Fabric is outdated and cannot use this version of the TF app `{org}/{repo}`.\nRecommendation: upgrade Text-Fabric.\nHint:\n\n pip install --upgrade text-fabric\n\n\"\"\",\n error=True,\n )\n\n cfg[\"isCompatible\"] = isCompatible\n\n if fileExists(cssPath):\n with open(cssPath, encoding=\"utf8\") as fh:\n cfg[\"css\"] = fh.read()\n else:\n cfg[\"css\"] = \"\"\n\n return cfg", "title": "" }, { "docid": "667f067b5d8949c96d42d53ca09da98a", "score": "0.53541374", "text": "def version(self, irc, msg, args, optlist, package):\r\n url = 'http://packages.debian.org/search?keywords=%(keywords)s' + \\\r\n '&searchon=%(searchon)s&suite=%(suite)s&section=%(section)s'\r\n def reg(name):\r\n return self.registryValue('defaults.version.%s' % name, msg.args[0])\r\n args = {'keywords': None,\r\n 'searchon': reg('searchon'),\r\n 'suite': reg('branch'),\r\n 'section': reg('section')}\r\n for (key, value) in optlist:\r\n if key == 'exact':\r\n url += '&exact=1'\r\n elif key == 'branch':\r\n args['suite'] = value\r\n elif key == 'section':\r\n args['section'] = value\r\n elif key == 'searchon':\r\n args['searchon'] = value\r\n responses = []\r\n if '*' in package:\r\n irc.error('Wildcard characters can not be specified.', Raise=True)\r\n args['keywords'] = utils.web.urlquote(package)\r\n url %= args\r\n try:\r\n html = utils.web.getUrl(url).decode()\r\n except utils.web.Error as e:\r\n irc.error(format('I couldn\\'t reach the search page (%s).', e),\r\n Raise=True)\r\n if 'is down at the moment' in html:\r\n irc.error('Packages.debian.org is down at the moment. '\r\n 'Please try again later.', Raise=True)\r\n pkgs = self._deblistreVersion.findall(html)\r\n if not pkgs:\r\n irc.reply(format('No package found for %s (%s)',\r\n utils.web.urlunquote(package), args['suite']))\r\n else:\r\n for pkg in pkgs:\r\n pkgMatch = pkg[0]\r\n soup = BeautifulSoup.BeautifulSoup(pkg[1])\r\n liBranches = soup.find_all('li')\r\n branches = []\r\n versions = []\r\n def branchVers(br):\r\n vers = [b.next.string.strip() for b in br]\r\n return [utils.str.rsplit(v, ':', 1)[0] for v in vers]\r\n for li in liBranches:\r\n branches.append(li.a.string)\r\n versions.append(branchVers(li.find_all('br')))\r\n if branches and versions:\r\n for pairs in zip(branches, versions):\r\n branch = pairs[0]\r\n ver = ', '.join(pairs[1])\r\n s = format('%s (%s)', pkgMatch,\r\n ': '.join([branch, ver]))\r\n responses.append(s)\r\n resp = format('%i matches found: %s',\r\n len(responses), '; '.join(responses))\r\n irc.reply(resp)", "title": "" }, { "docid": "8d3eac90db82a38344b87669a2e6baa1", "score": "0.5347631", "text": "def get_app_data(app_name):\n \n api_response = list_apps()\n app_data = None\n for app in api_response:\n if app['name'] == app_name:\n app_data = app\n return app_data", "title": "" }, { "docid": "f8f54191e467f4681458c26173a2a2e6", "score": "0.53411037", "text": "async def application_info(self):\n ...", "title": "" }, { "docid": "a5bb358488ba02d8483265d3a7db7718", "score": "0.5337048", "text": "def find_app(pth, app):\n result = _find(pth, app)\n if result is None:\n return None\n return result.replace(os.path.join(app, 'Contents'),\n '@executable_path/..')", "title": "" }, { "docid": "96ee304b1095de0a4254d28385dca9e8", "score": "0.5336504", "text": "def all(\n self,\n *,\n owner_name: str,\n app_name: str,\n published_only: bool = False,\n scope: Optional[str] = None,\n ) -> Iterator[BasicReleaseDetailsResponse]:\n\n self.log.info(f\"Getting versions of app: {owner_name}/{app_name}\")\n\n request_url = self.generate_url(owner_name=owner_name, app_name=app_name)\n request_url += \"/releases?\"\n\n parameters = {\"published_only\": str(published_only).lower()}\n\n if scope:\n parameters[\"scope\"] = scope\n\n request_url += urllib.parse.urlencode(parameters)\n\n response = self.get(request_url)\n\n return deserialize.deserialize(\n List[BasicReleaseDetailsResponse], response.json()\n )", "title": "" }, { "docid": "e54d8f0f3774e1bee5311ebba86f4405", "score": "0.53199285", "text": "def get_installed(self):\n uri = self._kytos_api + self._NAPPS_INSTALLED\n\n try:\n response = urllib.request.urlopen(uri)\n if response.getcode() != 200:\n msg = \"Error calling Kytos to check installed NApps.\"\n raise KytosException(msg)\n\n content = json.loads(response.read())\n return sorted((c[0], c[1]) for c in content['napps'])\n except urllib.error.URLError as exception:\n LOG.error(\"Error checking installed NApps. Is Kytos running?\")\n raise KytosException(exception)", "title": "" }, { "docid": "f25438579b150b07ece3e309eeab8b0a", "score": "0.5304842", "text": "def find(self, blurVersion):\n paths = self.list(blurVersion=blurVersion, showSummary=False)\n if len(paths) == 1:\n return paths[0]['guid']\n elif len(paths) == 0:\n debug(\"No upgrade path found that matches source version %s\" % blurVersion)\n return None\n else:\n error(\"Multiple upgrade paths found that source version %s: %s\" % (blurVersion, paths))\n return None", "title": "" }, { "docid": "1fd4a884ee1c0da310961d461aa2798f", "score": "0.5297303", "text": "def finder():\n # Locals\n apps = list()\n stripped = list()\n pattern = re.compile(\"^Proto\")\n # Get netstat result\n netstat = cmd(\"netstat | grep -Ei 'listen|udp*'\")\n print(t.yellow(\"[{}] Running search ...\".format(datetime.now())))\n if netstat:\n for line in netstat.split(\"\\r\\n\"):\n if line and pattern.match(line) == None:\n socket = line.split()\n protocol = socket[0]\n port = socket[3].split(':')[-1]\n if protocol and port:\n app = cmd(\"grep {} /proc/net/{}\".format(to_hex(port), protocol))\n if app:\n uid = process_uid(protocol, app)\n if uid == -1:\n continue\n application_list = cmd(\"ps | grep '{}' \".format(uid)).split()\n app = application_list[8]\n apps.append(app)\n stripped.append(line)\n # Build apps and lines\n iterated_apps = iter(apps)\n iterated_lines = iter(stripped)\n # If we have results, print them out\n try:\n while True:\n print(t.yellow(\"-\" * 150))\n for i in range(0, len(apps)):\n print(t.yellow(\"[{}] {}\\t\\t{}\".format(i, iterated_apps.next(), iterated_lines.next())))\n except StopIteration:\n pass", "title": "" }, { "docid": "f264d23e010828047e3008f1109bf39e", "score": "0.5291001", "text": "def get_app(self, name, pid=None):\n if not pid:\n pid = self.app.process\n for child in self.desktop_info.children():\n if child.name == name and pid == child.process_id:\n return child\n raise Exception(\"Application not found\")", "title": "" }, { "docid": "eb8fe117492a37f9cc7706d3f1188f4c", "score": "0.52853197", "text": "def FindInstallations(): # pylint: disable=redundant-returns-doc\n\t\tpass", "title": "" }, { "docid": "fa1095a7d4cd4027413f4d552cb6e6cc", "score": "0.52843636", "text": "def grab_apps(rec, data):\n pass # todo", "title": "" }, { "docid": "5eadc592abba83ed7ea18bee1f37f357", "score": "0.5282951", "text": "def get_app(ApplicationId=None):\n pass", "title": "" }, { "docid": "a15e8f244ec52c1d13ad878025dec677", "score": "0.52826476", "text": "def get_versions(self, psk):\n pass", "title": "" }, { "docid": "0c223e8e049b8557e83f13e69dc84217", "score": "0.5271456", "text": "def test_versions_list(self):\n pass", "title": "" }, { "docid": "680624a66683ac79edf7e5d6750a60fc", "score": "0.5269746", "text": "def get_app_and_models(self, app_label):\n try:\n app = apps.get_app_config(app_label)\n except:\n raise CommandError(\"%s is ImproperlyConfigured - did you remember to add %s to settings.INSTALLED_APPS?\" %\n (app_label, app_label))\n\n models = app.get_models()\n return (app, models)", "title": "" }, { "docid": "157cc4c21a6874dc33ed26872cbc2eb9", "score": "0.52573484", "text": "def _prompt_app_list(self):\n print(\"Choose which applications to install:\")\n package_names = self._names(self._package_file)\n selection = cutie.select_multiple(package_names,\n ticked_indices=self._default_file,\n deselected_unticked_prefix=' ⬡ ',\n deselected_ticked_prefix=self._stat.colored('Green', ' ⬢ '),\n selected_unticked_prefix=self._stat.colored('Yellow', ' ⬡ '),\n selected_ticked_prefix=self._stat.colored('Yellow', ' ⬢ '),\n hide_confirm=True)\n chosen = []\n for i in selection:\n chosen.append(package_names[i])\n self._select(chosen)", "title": "" }, { "docid": "053efaa0ef4c683c72fb38f38adf0988", "score": "0.52535754", "text": "def find_version():\n energyplus = distutils.spawn.find_executable('energyplus')\n if not energyplus:\n raise AttributeError\n energyplus = os.path.realpath(energyplus) # follow links in /usr/bin\n folder = os.path.dirname(energyplus)\n version = os.path.basename(folder)[-5:]\n# version = version.replace('.', '-')\n# assert version[1] == '-' and version[3] == '-'\n\n return version", "title": "" }, { "docid": "5089a859eed2a46a595a169ef84ca83d", "score": "0.5252053", "text": "def tiden_get_applications_path():", "title": "" }, { "docid": "0389c83c9c7c53b1e61372c740f8cbdf", "score": "0.5250961", "text": "def versions():\n url = f\"{DataDragon.domain}api/versions.json\"\n return DataDragon._get(url)", "title": "" }, { "docid": "8b0171f92fc3a527ba3a4a70bccb8025", "score": "0.5246958", "text": "def release_id_for_version(\n self, *, owner_name: str, app_name: str, version: str\n ) -> Optional[int]:\n\n for app_version in self.all(owner_name=owner_name, app_name=app_name):\n if app_version.version == version:\n return app_version.identifier\n\n return None", "title": "" }, { "docid": "480d0f78eb1d43dd844b4dd6bbfb6d0a", "score": "0.52339107", "text": "def get_versions() -> tuple[Version | None, Version]:\n # import in the func (rather than top-level scope) so that at setup time,\n # libraries aren't required -- otherwise, setuptools will fail to run\n # because these packages aren't installed yet.\n import requests\n from packaging.version import Version\n\n try:\n response = requests.get(\"https://pypi.python.org/pypi/globus-cli/json\")\n # if the fetch from pypi fails\n except requests.RequestException:\n return None, Version(__version__)\n parsed_versions = [Version(v) for v in response.json()[\"releases\"]]\n latest = max(parsed_versions)\n return latest, Version(__version__)", "title": "" }, { "docid": "566540b1c3a5b5c67d0c9207a39f0f38", "score": "0.5222795", "text": "def get_lookup():\n lookup = dict()\n version_file = os.path.join(\"deid\", \"version.py\")\n with open(version_file) as filey:\n exec(filey.read(), lookup)\n return lookup", "title": "" }, { "docid": "d57cefc8cdf9357428a15ab803229db7", "score": "0.5222035", "text": "def test_get_versions(self):\n from flask_monitoringdashboard import config\n from flask_monitoringdashboard.database.versions import get_versions\n\n with session_scope() as db_session:\n result = get_versions(db_session)\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], config.version)", "title": "" }, { "docid": "1a3b8ae3c69d5bfe70907737922e0183", "score": "0.521808", "text": "async def initialize_applications(self) -> None:\n self.applications = Applications(self)\n if self.params and version.parse(\n self.params.embedded_development\n ) >= version.parse(APPLICATIONS_MINIMUM_VERSION):\n try:\n await self.applications.update()\n except Unauthorized: # Probably a viewer account\n return\n\n tasks = []\n\n for app_class, app_attr in (\n (FenceGuard, \"fence_guard\"),\n (LoiteringGuard, \"loitering_guard\"),\n (MotionGuard, \"motion_guard\"),\n (ObjectAnalytics, \"object_analytics\"),\n (Vmd4, \"vmd4\"),\n ):\n if (\n app_class.name in self.applications # type: ignore[attr-defined]\n and self.applications[app_class.name].status # type: ignore[attr-defined]\n == APPLICATION_STATE_RUNNING\n ):\n tasks.append(self._initialize_api_attribute(app_class, app_attr))\n\n if tasks:\n await asyncio.gather(*tasks)", "title": "" }, { "docid": "e500e30ef23106147f8cfd38f14ded8e", "score": "0.52157974", "text": "def get_old_display_applications( self, trans, hda ):\n display_apps = []\n if not self.app.config.enable_old_display_applications:\n return display_apps\n\n for display_app in hda.datatype.get_display_types():\n target_frame, display_links = hda.datatype.get_display_links( hda,\n display_app, self.app, trans.request.base )\n\n if len( display_links ) > 0:\n display_label = hda.datatype.get_display_label( display_app )\n\n app_links = []\n for display_name, display_link in display_links:\n app_links.append({\n 'target': target_frame,\n 'href' : display_link,\n 'text' : gettext.gettext( display_name )\n })\n if app_links:\n display_apps.append( dict( label=display_label, links=app_links ) )\n\n return display_apps", "title": "" }, { "docid": "36fba35dedf378352bcf5b55e087e94a", "score": "0.5209214", "text": "def get_version(self, user=None, napp=None):\n return self._get_napp_key('version', user, napp) or 'latest'", "title": "" }, { "docid": "ee0cdc0e14a014c3f9eefab91e3579db", "score": "0.5206882", "text": "def test_search(self):\n # type: () -> None\n from hardest.python_searcher import PythonSearcher\n from hardest.python_searcher import PythonVersion\n\n instance = PythonSearcher(env=self.env, validator=self.validator)\n found_versions = set(instance.search())\n\n test_versions = set((\n PythonVersion('Python test.1.2', set((\n self.binpath + 'python',\n self.binpath + 'python1.2',\n ))),\n PythonVersion('Jython test.9.1', set((\n self.binpath + 'jython9.1',\n ))),\n PythonVersion('Anaconda test.3.1', set((\n self.binpath + 'anaconda',\n ))),\n ))\n self.assertEqual(test_versions & found_versions, test_versions)", "title": "" }, { "docid": "8c031fae850580f5bd03ef5e416a5fb0", "score": "0.5205821", "text": "def ensure_application_version(disk_store, application_tag='dummy_tag'):\n application = disk_store.application(tag=application_tag)\n if not application:\n application = disk_store.add_application(tag=application_tag, category='wgs',\n description='dummy_description')\n disk_store.add_commit(application)\n\n prices = {'standard': 10, 'priority': 20, 'express': 30, 'research': 5}\n version = disk_store.application_version(application, 1)\n if not version:\n version = disk_store.add_version(application, 1, valid_from=datetime.now(),\n prices=prices)\n\n disk_store.add_commit(version)\n return version", "title": "" }, { "docid": "8c031fae850580f5bd03ef5e416a5fb0", "score": "0.5205821", "text": "def ensure_application_version(disk_store, application_tag='dummy_tag'):\n application = disk_store.application(tag=application_tag)\n if not application:\n application = disk_store.add_application(tag=application_tag, category='wgs',\n description='dummy_description')\n disk_store.add_commit(application)\n\n prices = {'standard': 10, 'priority': 20, 'express': 30, 'research': 5}\n version = disk_store.application_version(application, 1)\n if not version:\n version = disk_store.add_version(application, 1, valid_from=datetime.now(),\n prices=prices)\n\n disk_store.add_commit(version)\n return version", "title": "" }, { "docid": "ef7f1851202f7539ceafc03577feb3e7", "score": "0.5203554", "text": "def get_apps(PageSize=None, Token=None):\n pass", "title": "" }, { "docid": "4c5ec7891ee13c76c919b4282426b6f7", "score": "0.520087", "text": "def allSoftwareVersions():\n result = []\n f = urllib.urlopen(\"https://cmstags.cern.ch/tc/ReleasesXML/?anytype=1\")\n for line in f:\n for tok in line.split():\n if tok.startswith(\"label=\"):\n release = tok.split(\"=\")[1].strip('\"')\n result.append(release)\n return result", "title": "" }, { "docid": "1e4fe86e65dae4c2b35835f0436cb5f7", "score": "0.51996136", "text": "def fetch_app(self, app=None):\n query = \"\"\"\n SELECT app, source_path, target_path\n FROM apps\n \"\"\"\n if app is not None:\n query += \"\"\"WHERE app = ? \"\"\"\n return self.qry(query, (app, ))\n return self.qry(query)", "title": "" } ]
88b60b38933845833091e1930c7fc728
Testing if the ``DocumentManager`` retrieves the correct objects.
[ { "docid": "667e1b8a43b0683139b26a934093703d", "score": "0.6640915", "text": "def test_manager(self):\n request = Mock(LANGUAGE_CODE='de')\n self.assertEqual(\n models.Document.objects.published(request).count(), 1, msg=(\n 'In German, there should be one published document.'))\n\n request = Mock(LANGUAGE_CODE='en')\n self.assertEqual(\n models.Document.objects.published(request).count(), 1, msg=(\n 'In English, there should be one published document.'))\n\n request = Mock(LANGUAGE_CODE=None)\n self.assertEqual(\n models.Document.objects.published(request).count(), 0, msg=(\n 'If no language is set, there should be no published'\n ' documents.'))", "title": "" } ]
[ { "docid": "f9c9d1bd862ae9183edf36403f90dc66", "score": "0.71074057", "text": "def test_client_document_retrieve(self):\n pass", "title": "" }, { "docid": "ceeba713fa4c7f6491558110e856064b", "score": "0.69682175", "text": "def test_documents_for(self):\n # Test the default ES version\n self._test_documents_for(_documents_for)\n\n # Test the DB version\n self._test_documents_for(_db_documents_for)", "title": "" }, { "docid": "0823cacc10adec1a2f04bde2523882a9", "score": "0.6940775", "text": "def test_get_document(self):\n pass", "title": "" }, { "docid": "c3ae784c5253be1062f6208553500418", "score": "0.6804564", "text": "def test_list_documents(self):\n pass", "title": "" }, { "docid": "40a16af714a092bddb52621692827840", "score": "0.6788717", "text": "def test_custom_get_document_model(self):\n self.assertIs(get_document_model(), CustomDocument)", "title": "" }, { "docid": "d225f0cb8c50bc3d1d53190faa774a8b", "score": "0.66045684", "text": "def test_document_retrieval(self):\n MockHttpsConnection.add_response(200, \"OK\",\n json.dumps(self._document_at(self.two_document_response, 0)))\n MockHttpsConnection.add_response(200, \"OK\",\n json.dumps(self._document_at(self.two_document_response, 1)))\n\n connection_policy = documents.ConnectionPolicy()\n connection_policy.EnableEndpointDiscovery = False\n dc = document_client.DocumentClient(RateTest.host, {'masterKey' : RateTest.masterKey }, connection_policy)\n it = dc.QueryDocuments('coll_1', \"SELECT * FROM coll_1\")\n it = iter(it)\n self.assertEqual(1, next(it)['id'])\n self.assertEqual(2, next(it)['id'])\n\n # check that no further responses are pending\n self.assertEqual(0, len(MockHttpsConnection.responses))", "title": "" }, { "docid": "88e043491d37154a2bbe28c68ced3913", "score": "0.6564783", "text": "def test_standard_get_document_model(self):\n del settings.WAGTAILDOCS_DOCUMENT_MODEL\n from wagtail.documents.models import Document\n\n self.assertIs(get_document_model(), Document)", "title": "" }, { "docid": "f554d1746a25773a299f96022624ee8b", "score": "0.6516272", "text": "def test_client_document_list(self):\n pass", "title": "" }, { "docid": "9717c4a8c14b71a3fe75021ee34f496c", "score": "0.64175117", "text": "def test_001(self):\n document = Document()\n self.assertEqual(document.document, None)", "title": "" }, { "docid": "ae808ba1ceb4296f5ccfb2a236582bde", "score": "0.6401401", "text": "def test_client_verification_document_retrieve(self):\n pass", "title": "" }, { "docid": "e73a6eb90b68b62d50a0cfa76e1e97fd", "score": "0.6388543", "text": "def test_custom_manager(self):\r\n class BlogPost(Document):\r\n tags = ListField(StringField())\r\n deleted = BooleanField(default=False)\r\n date = DateTimeField(default=datetime.now)\r\n\r\n @queryset_manager\r\n def objects(cls, qryset):\r\n opts = {\"deleted\": False}\r\n return qryset(**opts)\r\n\r\n @queryset_manager\r\n def music_posts(doc_cls, queryset, deleted=False):\r\n return queryset(tags='music',\r\n deleted=deleted).order_by('date')\r\n\r\n BlogPost.drop_collection()\r\n\r\n post1 = BlogPost(tags=['music', 'film']).save()\r\n post2 = BlogPost(tags=['music']).save()\r\n post3 = BlogPost(tags=['film', 'actors']).save()\r\n post4 = BlogPost(tags=['film', 'actors', 'music'], deleted=True).save()\r\n\r\n self.assertEqual([p.id for p in BlogPost.objects()],\r\n [post1.id, post2.id, post3.id])\r\n self.assertEqual([p.id for p in BlogPost.music_posts()],\r\n [post1.id, post2.id])\r\n\r\n self.assertEqual([p.id for p in BlogPost.music_posts(True)],\r\n [post4.id])\r\n\r\n BlogPost.drop_collection()", "title": "" }, { "docid": "c95e986efce918222da253a257ac9565", "score": "0.6337192", "text": "def test_document_retrieval(self, test_urls):\n entity_document = MyEntityDocument.objects.create(\n original_filename='test.txt',\n my_field='cats are lactose intolerant',\n )\n\n url = reverse(\n 'test-document-item',\n kwargs={\n 'entity_document_pk': entity_document.pk,\n },\n )\n\n response = self.api_client.get(url)\n assert response.status_code == status.HTTP_200_OK\n assert response.data == {\n 'id': str(entity_document.pk),\n 'my_field': 'cats are lactose intolerant',\n 'original_filename': entity_document.original_filename,\n 'url': entity_document.url,\n 'status': 'not_virus_scanned',\n }", "title": "" }, { "docid": "a4623fff2a162b78189db862dd788734", "score": "0.62880766", "text": "def test_place_id_documents_get(self):\n response = self.client.open(\n '/place/{id}/documents'.format(id=56),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "53a05d4f9bce200dc745dd965fc2b3b1", "score": "0.6232883", "text": "def test_getitem(self):\r\n response, status = self.get(self.known_resource, item=self.item_id)\r\n self.assert200(status)\r\n self.assertDocumentVersions(response, 0)", "title": "" }, { "docid": "7209ae5197e19353a2c57e20adffb057", "score": "0.6219981", "text": "def test_docs_page(self):\n with self.client:\n response = self.client.get('/docs/')\n self.assert200(response)", "title": "" }, { "docid": "5e33572a293bf8004f524f76ee9d26fd", "score": "0.62082094", "text": "def test_get_objects(self):\n response = self.client.get(self.get_endpoint())\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # self.assertNotEqual(response.data, [])", "title": "" }, { "docid": "8a3166e555fc440c0b86a4c66495ac01", "score": "0.6156591", "text": "def test_document_load(self):\n pass", "title": "" }, { "docid": "69a8fbc7019c4bd547b9b46511aace9d", "score": "0.6101651", "text": "def test_doc_all(self):\n self.assertTrue(len(DBStorage.all.__doc__) > 1)", "title": "" }, { "docid": "dd7743eece026c6ed262c8f7c5f5ecba", "score": "0.60596293", "text": "def test_get_documents_populated(index_with_documents):\n response = index_with_documents().get_documents()\n assert isinstance(response, list)\n assert len(response) == 20", "title": "" }, { "docid": "fdd11db9dc6440fa721dececc3861217", "score": "0.6051573", "text": "def test_client_verification_document_list(self):\n pass", "title": "" }, { "docid": "dfe703c07707bd493fe34730ad4f2f29", "score": "0.60400146", "text": "def test_002(self):\n document = Document(None)\n self.assertEqual(document.document, None)", "title": "" }, { "docid": "ed463579efc5d9c30df053cf7edae470", "score": "0.6037316", "text": "def test_documents_length(self):\n\n cktTopic = Topics.objects.get(TopicName=\"SpekiLove\")\n\n topics_filtered = Topics.objects.get(TopicId=cktTopic.TopicId)\n\n docs = topics_filtered.documents_set.all()\n\n self.assertEqual(cktTopic.TopicName, \"SpekiLove\")\n self.assertEqual(len(docs), 2)", "title": "" }, { "docid": "b4fb920262a91214360a6e199481bbb1", "score": "0.6030731", "text": "def test_dynamic_document_queries(self):\r\n p = self.Person()\r\n p.name = \"Dean\"\r\n p.age = 22\r\n p.save()\r\n\r\n self.assertEqual(1, self.Person.objects(age=22).count())\r\n p = self.Person.objects(age=22)\r\n p = p.get()\r\n self.assertEqual(22, p.age)", "title": "" }, { "docid": "51aac65254cac19bdeff8856c91a925e", "score": "0.6010344", "text": "def test_custom_querysets_managers_directly(self):\r\n\r\n class CustomQuerySetManager(QuerySetManager):\r\n\r\n @staticmethod\r\n def get_queryset(doc_cls, queryset):\r\n return queryset(is_published=True)\r\n\r\n class Post(Document):\r\n is_published = BooleanField(default=False)\r\n published = CustomQuerySetManager()\r\n\r\n Post.drop_collection()\r\n\r\n Post().save()\r\n Post(is_published=True).save()\r\n self.assertEqual(Post.objects.count(), 2)\r\n self.assertEqual(Post.published.count(), 1)\r\n\r\n Post.drop_collection()", "title": "" }, { "docid": "985b36a887806e168ddb1caa18abcef1", "score": "0.6001792", "text": "def test_can_get_single_document(self):\n self.assertEqual(type(Recipe.get_single_recipe()), Recipe)", "title": "" }, { "docid": "96eb1e0996c05e85a5f3e68ea612246f", "score": "0.6001333", "text": "def test_documents(self):\n arabic = LanguageScript.objects.create(language=\"Arabic\", script=\"Arabic\")\n french = LanguageScript.objects.create(language=\"French\", script=\"Latin\")\n english = LanguageScript.objects.create(language=\"English\", script=\"Latin\")\n\n arabic_doc = Document.objects.create()\n arabic_doc.languages.add(arabic)\n french_arabic_doc = Document.objects.create()\n french_arabic_doc.languages.add(arabic, french)\n\n lang_admin = LanguageScriptAdmin(model=LanguageScript, admin_site=admin.site)\n # retrieve via admin queryset to ensure we have count annotated\n qs = lang_admin.get_queryset(request=None)\n\n arabic_usage_link = lang_admin.documents(qs.get(language=\"Arabic\"))\n assert f\"?languages__id__exact={arabic.pk}\" in arabic_usage_link\n assert \"2</a>\" in arabic_usage_link\n\n french_usage_link = lang_admin.documents(qs.get(language=\"French\"))\n assert f\"?languages__id__exact={french.pk}\" in french_usage_link\n assert \"1</a>\" in french_usage_link\n\n english_usage_link = lang_admin.documents(qs.get(language=\"English\"))\n assert f\"?languages__id__exact={english.pk}\" in english_usage_link\n assert \"0</a>\" in english_usage_link\n\n # test secondary documents\n arabic = qs.get(language=\"Arabic\")\n arabic_secondary_link = lang_admin.secondary_documents(\n qs.get(language=\"Arabic\")\n )\n assert f\"?secondary_languages__id__exact={arabic.pk}\" in arabic_secondary_link\n assert \"0</a>\" in arabic_secondary_link\n\n # add a secondary language to our arabic document\n arabic_doc.secondary_languages.add(french)\n french_secondary_link = lang_admin.secondary_documents(\n qs.get(language=\"French\")\n )\n assert f\"?secondary_languages__id__exact={french.pk}\" in french_secondary_link\n assert \"1</a>\" in french_secondary_link", "title": "" }, { "docid": "c6875778851d6129f3f7840c71dc1f11", "score": "0.59706855", "text": "def test_api_docs_get_valid(self):\n response = self.client.get('/bot/docs', app.config['API_SUBDOMAIN'], headers=app.config['TEST_HEADER'])\n self.assertEqual(response.status_code, 200)\n self.assertIn(self.valid_data, response.json)", "title": "" }, { "docid": "617bda073e7f0cc77d547cb36d623c0a", "score": "0.59597284", "text": "def test_get_all_songs(self):\n # hit the API endpoint\n response = self.client.get(\n reverse(\"documentos-all\", kwargs={\"version\": \"v1\"})\n )\n # fetch the data from db\n expected = Documentos.objects.all()\n serialized = SongsSerializer(expected, many=True)\n self.assertEqual(response.data, serialized.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "77f4091968faa4686d39c0620941b5f9", "score": "0.5941505", "text": "def test_documentation(self):\n for name, model_cls in sorted(model_resolver.lookup_dict.items()):\n with self.subTest(name=name):\n try:\n docdata = model_cls.__docdata__\n except AttributeError:\n self.fail('missing __docdata__')\n self.assertIn('citation', docdata)\n self.assertIn('author', docdata['citation'])\n self.assertIn('link', docdata['citation'])\n self.assertIn('year', docdata['citation'])", "title": "" }, { "docid": "89b2ce405003b7bde8ca19d3f978416c", "score": "0.59196764", "text": "def testAddManyDocuments(self):\n name = _randomString(20)\n\n docs = []\n for _ in range(5):\n doc = {'id': _randomString(20),\n 'name': name,\n 'title': [_randomString(20)]}\n docs.append(doc)\n\n yield self.client.add(docs)\n yield self.client.commit()\n\n r = yield self.client.search('name:%s' % name)\n\n self.assertEqual(r.results.numFound, len(docs),\n 'Document was not added')", "title": "" }, { "docid": "05f2a6728900fc02f12699da7382b0df", "score": "0.59034896", "text": "def test_initialisation(self):\r\n self.assertTrue(isinstance(self.Person.objects, QuerySet))\r\n self.assertEqual(self.Person.objects._collection.name,\r\n self.Person._get_collection_name())\r\n self.assertTrue(isinstance(self.Person.objects._collection,\r\n pymongo.collection.Collection))", "title": "" }, { "docid": "6da8e9a7bc3bd16f0483311b995e029d", "score": "0.5902863", "text": "def test_documents_for_fallback(self):\n general_bookmarks_documents, fallback = documents_for(\n locale='es', topics=[self.general, self.bookmarks])\n eq_(len(general_bookmarks_documents), 0)\n eq_(len(fallback), 1)", "title": "" }, { "docid": "f73d9a6c69e2f475937f84e654b90e8f", "score": "0.58682674", "text": "def test_get(self):\n response = self.client.get(reverse('idea-task-document-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotEqual(response.data, [])\n self.assertTrue(isinstance(response.data, list))", "title": "" }, { "docid": "2b6a44f3bf311aa9620fcb0afd4861c3", "score": "0.58589345", "text": "def test_docs(self):\n self.assertTrue(len(BaseModel.__doc__) > 0)", "title": "" }, { "docid": "b4474ff244cba8c48559b1885b573ad6", "score": "0.58326244", "text": "def test_self_check(self):\n \n self.createContent()\n result = self.portal.topic.queryCatalog() \n self.assertEqual(len(result), 1) # Front page\n \n self.createDocument() # create document without publishing date \n result = self.portal.topic.queryCatalog() \n self.assertEqual(len(result), 2) # One document, one result", "title": "" }, { "docid": "e88b211f3b69719add94daa231017e9b", "score": "0.5832469", "text": "def test_get_editor(self):\n\t\tresponse = self.app.get(reverse('documents:edit', args=[self.document.url_title]), expect_errors=True)\n\t\tself.assertEqual(response.status_code, 403) # test anonymous user cannot access page\n\n\t\tresponse = self.app.get(reverse('documents:edit', args=[self.document.url_title]), user=self.user)\n\t\tself.assertEqual(response.status_code, 200)\n\n\t\tform = response.forms[0]\n\t\tself.assertEqual(form.get('title').value, self.document.title)\n\t\tself.assertEqual(form.get('text').value, self.document.text)\n\t\tself.assertEqual(int(form.get('moderator').value), self.document.moderator.id)\n\t\tself.assertEqual([int(id) for id in form.get('participants').value], [participant.id for participant in self.document.participants.all()])", "title": "" }, { "docid": "5e016faf0ae6286b2f20741217518b3d", "score": "0.58308524", "text": "def test_034(self):\n document = Document()\n self.assertEqual(document.size, 0)", "title": "" }, { "docid": "57a2beb3201c1d39aea988b7223ae25b", "score": "0.58204055", "text": "def test_check_db_store(self):\r\n publications = Publication.objects.all()\r\n self.assertEqual(len(publications), 1)\r\n self.assertEqual(publications[0].brand.name, \"Nissan\")", "title": "" }, { "docid": "cb12d63ed586b7421eab3d6c8c61cbf4", "score": "0.58096135", "text": "def test_objects(self):\n self.assertTrue(hasattr(FileStorage, '_FileStorage__objects'))", "title": "" }, { "docid": "db602514d684e3a440afc9f15cccd133", "score": "0.5791692", "text": "def objects(cls, company):\n return DocumentManager(cls, company)", "title": "" }, { "docid": "acbae52beea9272e42f6badf4959dd65", "score": "0.5784941", "text": "def test_for_valid_document_type(self):\n with self.assertRaises(DocTypeException):\n li.get_documents('foo')\n\n with self.assertRaises(DocTypeException):\n li.get_document('foo', '000000')", "title": "" }, { "docid": "6398b8e7d5b2d70a3493f95e293e69df", "score": "0.57831556", "text": "def test_documents_post(self):\n pass", "title": "" }, { "docid": "c1c9700435652eb009af7777b05cd2cb", "score": "0.57742995", "text": "def test_document_with_deletion_pending_retrieval(self, test_urls):\n entity_document = MyEntityDocument.objects.create(\n original_filename='test.txt',\n my_field='large field',\n )\n entity_document.document.mark_deletion_pending()\n\n url = reverse(\n 'test-document-item',\n kwargs={\n 'entity_document_pk': entity_document.pk,\n },\n )\n\n response = self.api_client.get(url)\n assert response.status_code == status.HTTP_404_NOT_FOUND", "title": "" }, { "docid": "39cb2857ebc82551cffe9439896326f0", "score": "0.57570463", "text": "def test_docs_index_page(self):\n result = self.client().get(\"/\")\n\n self.assertIsNotNone(result)", "title": "" }, { "docid": "80fdbceb942b5eacf41ff228c804bafd", "score": "0.57506174", "text": "def test_get_document(index_with_documents):\n response = index_with_documents().get_document('500682')\n assert isinstance(response, dict)\n assert 'title' in response\n assert response['title'] == 'The Highwaymen'", "title": "" }, { "docid": "f403f05dab9cc716afd2694031de780e", "score": "0.57430947", "text": "def test_036(self):\n document = Document()\n self.assertEqual(document.type, None)", "title": "" }, { "docid": "08c11b7867efef9bbbdcd8d1d2a76fd8", "score": "0.57429695", "text": "def test_custom_manager2(self):\n from modeltranslation.manager import MultilingualManager, MultilingualQuerySet\n\n manager = models.CustomManager2TestModel.objects\n assert isinstance(manager, models.CustomManager2)\n assert isinstance(manager, MultilingualManager)\n qs = manager.all()\n assert isinstance(qs, models.CustomQuerySet)\n assert isinstance(qs, MultilingualQuerySet)", "title": "" }, { "docid": "97649611ab893573b45591b302934c51", "score": "0.5741506", "text": "def test_unknown_get_document_model(self):\n with self.assertRaises(ImproperlyConfigured):\n get_document_model()", "title": "" }, { "docid": "ee04eac5a636eae2864298656befd1d5", "score": "0.5724755", "text": "def test_employee_get_managers(self):\n logger.setLevel(logging.WARN)\n test_employee = Employee.objects.create(firstname=\"Margaret\", surname=\"Peters\")\n test_employee.role = EmployeeType.objects.create(code=\"MGR\")\n test_employee.save()\n\n # Check total count of 2 managers stored.\n self.assertEqual(Employee.objects.get_managers().count(), 2)", "title": "" }, { "docid": "f2dbf0dea4379d1a0d5340976fef3c58", "score": "0.5716516", "text": "def test_standard_get_document_model_string(self):\n del settings.WAGTAILDOCS_DOCUMENT_MODEL\n self.assertEqual(get_document_model_string(), \"wagtaildocs.Document\")", "title": "" }, { "docid": "34f60cb8bc71c1e9b98bacd1640f7f27", "score": "0.57145363", "text": "def testGetDocumentsNonexistentUser(self):\n self.assertEqual(models.FAILURE, self.documents.get_documents(\"NONEXISTENT_USER_ID\"))", "title": "" }, { "docid": "b81b8a515adb4477d691a5647af9e323", "score": "0.5711663", "text": "def test_docs(self):\n self.assertIn('docs.inventree.org', inventree_extras.inventree_docs_url())", "title": "" }, { "docid": "c1036afc812998be14badad79d7515d6", "score": "0.57045144", "text": "def test_get_document_as_prov(self):\n self.clear_database()\n example = examples.bundles2()\n document_id = self.provapi.save_document_from_prov(example)\n\n prov_document = self.provapi.get_document_as_prov(document_id)\n self.assertIsNotNone(prov_document)\n self.assertIsInstance(prov_document, ProvDocument)\n\n self.assertEqual(prov_document, example)", "title": "" }, { "docid": "d13a748661699215f044824c17d3b025", "score": "0.5692811", "text": "def test_overridden_get_object_view(self):\r\n request = factory.get('/1')\r\n with self.assertNumQueries(1):\r\n response = self.view(request, pk=1).render()\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n self.assertEqual(response.data, self.data[0])", "title": "" }, { "docid": "383de025633ffcf7831cd1714c403c96", "score": "0.56764567", "text": "def test_api_docs_get_detail(self):\n response = self.client.get(\n f'/bot/docs?package={self.valid_data[\"package\"]}', app.config['API_SUBDOMAIN'], headers=app.config['TEST_HEADER']\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json, [self.valid_data])", "title": "" }, { "docid": "167aadde5872008309375465e4f22c7b", "score": "0.56719375", "text": "def test_user_can_get_documents_from_library(self):\n\n # Ensure a user exists\n user = User(absolute_uid=self.stub_user.absolute_uid)\n with self.app.session_scope() as session:\n session.add(user)\n session.commit()\n\n # Ensure a library exists\n library = Library(name='MyLibrary',\n description='My library',\n public=True,\n bibcode=self.stub_library.bibcode)\n\n # Give the user and library permissions\n permission = Permissions(permissions={'read': True, 'write': True, 'admin': False, 'owner': True})\n\n # Commit the stub data\n user.permissions.append(permission)\n library.permissions.append(permission)\n\n session.add_all([library, permission, user])\n session.commit()\n for obj in [library, permission, user]:\n session.refresh(obj)\n session.expunge(obj)\n\n # Retrieve the bibcodes using the web services\n with MockEmailService(self.stub_user, end_type='uid'):\n response_library, meta_data = \\\n self.library_view.get_documents_from_library(\n library_id=library.id,\n service_uid=user.id\n )\n self.assertEqual(library.bibcode, response_library.bibcode)", "title": "" }, { "docid": "df5b6cd3970680233bb596cee756b84b", "score": "0.5669666", "text": "def testObjector404(self):\n self.auth1 = Author.objects.create(name=\"toto\", title=\"ToTo\", birth_date=date(2000,1,2))\n result = get_object_or_404_json(Author, name=\"toto\")\n self.assertTrue(isinstance(result, Author))\n result = get_object_or_404_json(Author, name=\"nothing\")\n self.assertEqual(result.status_code, 404)\n self.assertTrue(\"not found\" in result.content)", "title": "" }, { "docid": "119be3ea671b41a4099f46ce144fdc72", "score": "0.56695133", "text": "def test_doc_all(self):\n expected = 'returns private attribute: __objects'\n actual = FileStorage.all.__doc__\n self.assertEqual(expected, actual)", "title": "" }, { "docid": "21275c2ac372132116809e9cab0e0b0a", "score": "0.566535", "text": "def test_custom_querysets_set_manager_directly(self):\r\n\r\n class CustomQuerySet(QuerySet):\r\n def not_empty(self):\r\n return self.count() > 0\r\n\r\n class CustomQuerySetManager(QuerySetManager):\r\n queryset_class = CustomQuerySet\r\n\r\n class Post(Document):\r\n objects = CustomQuerySetManager()\r\n\r\n Post.drop_collection()\r\n\r\n self.assertTrue(isinstance(Post.objects, CustomQuerySet))\r\n self.assertFalse(Post.objects.not_empty())\r\n\r\n Post().save()\r\n self.assertTrue(Post.objects.not_empty())\r\n\r\n Post.drop_collection()", "title": "" }, { "docid": "d20a4cf08c537be2157772d21a4854d7", "score": "0.5662349", "text": "def test_listObjects_all(self):\n i = PPD()\n id1 = i.addObject({'foo': 'bar'})\n id2 = i.addObject({'hey': 'ho'})\n objects = i.listObjects()\n obj1 = i.getObject(id1)\n obj2 = i.getObject(id2)\n self.assertEqual(objects, [obj1, obj2],\n \"Should return both objects\")", "title": "" }, { "docid": "12e021cff4572b108d6ddc800957ca02", "score": "0.56539273", "text": "def test_client_document_create(self):\n pass", "title": "" }, { "docid": "59e4efcf0abd0249d12f12673a1dcb54", "score": "0.56388056", "text": "def test_cursor(self):\n document = model.Document.from_data(DATA)\n collection = Collection([document])\n\n cursor = collection.cursor()\n\n doc = cursor.next()\n\n self.assertIsInstance(doc, model.Document)\n self.assertEqual(doc, document)\n\n # in this case (non-caching mode), collection should also preserve reference to the doc\n self.assertEqual(id(doc), id(document))", "title": "" }, { "docid": "d6f7c803fd357da41ec70f21db7ee253", "score": "0.56361276", "text": "def getDocuments(self): \n return self.__Docuemnts", "title": "" }, { "docid": "bedf51b8b482067663e30ddac1ab3d49", "score": "0.5632494", "text": "def test_collections(self):\n\n self.clear_database()\n prov_document = examples.collections()\n stored_document_id = self.provapi.save_document_from_prov(prov_document)\n stored_document = self.provapi.get_document_as_prov(stored_document_id)\n\n self.assertEqual(stored_document, prov_document)", "title": "" }, { "docid": "42c69509b975726f2e4442ab1f3a214c", "score": "0.56316555", "text": "def test_tickets_manager(self):\n\n # only public comments should be returned by the default manager\n comments = FollowUp.objects.all()\n self.assertEqual(comments.count(), 2)\n shouldbe = [self.comment1.id, self.comment2.id]\n self.assertQuerysetEqual(comments, shouldbe, lambda a: a.id, ordered=False)\n self.assertQuerysetEqual(\n comments, [False, False], lambda a: a.private, ordered=False\n )\n\n # all comments can be retrieved vy all_comments\n comments = FollowUp.all_comments.all()\n self.assertEqual(comments.count(), 3)\n shouldbe = [self.comment1.id, self.comment2.id, self.comment3.id]\n self.assertQuerysetEqual(comments, shouldbe, lambda a: a.id, ordered=False)\n self.assertQuerysetEqual(\n comments, [False, False, True], lambda a: a.private, ordered=False\n )", "title": "" }, { "docid": "942e24b34264628324b85c7e1463eb02", "score": "0.56208074", "text": "def test_invalid_get_document_model(self):\n with self.assertRaises(ImproperlyConfigured):\n get_document_model()", "title": "" }, { "docid": "95cbbb103ff7c9221a955be5b60aa272", "score": "0.56180185", "text": "def test_get_books(self):\n get_books_url = reverse(\"book_list\")\n response = self.client.get(get_books_url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # get data from db\n books = Book.objects.all()\n serializer = BookSerializer(books, many=True)\n self.assertEqual(response.data, serializer.data)\n\n self.assertEqual(len(response.data), 3)", "title": "" }, { "docid": "51b5d7da25135ad9335a814bc3d42533", "score": "0.5617232", "text": "def test_delete_action(self):\n resp = self.api.delete(self.document)\n self.assert200(resp)\n self.assertFalse(all_models.Document.query.filter(\n all_models.Document.id == self.document.id).all())", "title": "" }, { "docid": "35abe82e55360f08f63bb2bb91fbee1d", "score": "0.5611541", "text": "def testDocumentFeedItem(self):\n\n document_feed = news_feed_logic.retrieveFeed(self.document)\n self.failIfEqual(document_feed, [])\n self.failUnlessEqual(document_feed[0].sender, self.document)\n self.failUnlessEqual(document_feed[0].receivers[0], self.site)\n self.failUnlessEqual(document_feed[0].user, self.user)", "title": "" }, { "docid": "a3ff21737d076a21c1c53b98eea15397", "score": "0.5611503", "text": "def test_if_get_news(self):\n response = self.client.get(self.news_url)\n response_object = response.json().get('results')[0]\n self.assertEqual(News.objects.count(), 1)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response_object.get('description'), self.news_saved.description)\n self.assertEqual(response_object.get('newspaper'), self.news_saved.newspaper)", "title": "" }, { "docid": "dc99b8e19213f0cdfc442b6073b5b117", "score": "0.5604579", "text": "def get_documents(self): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "097034db80a24ab188de01aea9085aea", "score": "0.5595425", "text": "def test_tickets_manager(self):\n\n # only active tickets should be returned by the default manager\n tickets = Ticket.objects.all()\n self.assertEqual(tickets.count(), 2)\n shouldbe = [self.ticket1.id, self.ticket2.id]\n self.assertQuerysetEqual(tickets, shouldbe, lambda a: a.id, ordered=False)\n self.assertQuerysetEqual(\n tickets, [True, True], lambda a: a.active, ordered=False\n )\n\n # all tickets can be retrieved vy all_tickets\n tickets = Ticket.all_tickets.all()\n self.assertEqual(tickets.count(), 3)\n shouldbe = [self.ticket1.id, self.ticket2.id, self.ticket3.id]\n self.assertQuerysetEqual(tickets, shouldbe, lambda a: a.id, ordered=False)\n self.assertQuerysetEqual(\n tickets, [True, True, False], lambda a: a.active, ordered=False\n )", "title": "" }, { "docid": "bd974c37f8ea235a952b7c9e08755d71", "score": "0.55944204", "text": "def test_custom_get_document_model_string(self):\n self.assertEqual(get_document_model_string(), \"tests.CustomDocument\")", "title": "" }, { "docid": "6f5ad3ca4d909d9951f8dbc1a030f68f", "score": "0.5592704", "text": "def test_index_view_shows_only_available_objects(self):\n username, password = 'wisa', 'wis_pwa'\n user = add_User(username=username, password=password)\n company3 = add_Company(\n nombre_comercial='Tienda 3',\n ruc='1234567842',\n razon_social='Pace Pil',\n direccion_matriz=\"C del pepeno\")\n add_CompanyUser(user=user, company=company3)\n establecimiento3 = add_instance(\n models.Establecimiento,\n company=company3,\n descripcion=\"Matriz\",\n direccion='C del pepano',\n codigo=\"001\",\n )\n\n add_instance(models.PuntoEmision,\n establecimiento=establecimiento3,\n descripcion=\"Caja de la matriz\",\n codigo=\"001\")\n\n ob3 = self.cls(company=company3, **self.data)\n ob3.save()\n\n c = Client()\n r = c.post(\"/accounts/login/\",\n {'username': username, 'password': password})\n self.assertEquals(r['location'],\n \"http://testserver\" + reverse('billing_index'))\n\n reverse_index_args = (company3.id,)\n r = c.get(\n reverse(self.index_view, args=reverse_index_args))\n context_object_name = r.context_data['view'].context_object_name\n self.assertEquals(\n # queryset objects are never equal\n # so I have to convert them to lists to compare properly\n list(r.context_data[context_object_name]),\n list(self.cls.objects.filter(company=company3)),\n \"The list view shows objects from a different company\")\n\n r = self.c.get(\n reverse(self.index_view, args=self.reverse_index_args))\n context_object_name = r.context_data['view'].context_object_name\n self.assertEquals(\n # queryset objects are never equal\n # so I have to convert them to lists to compare properly\n list(r.context_data[context_object_name]),\n list(self.cls.objects.filter(company=self.company)),\n \"The list view shows objects from a different company\")", "title": "" }, { "docid": "27423cf484fcb76b3d4ae0e72c361b63", "score": "0.559174", "text": "def test_osbrepository_get(self):\n pass", "title": "" }, { "docid": "56ea1c8d4984d3173529cef4756a5430", "score": "0.55830497", "text": "def test_if_domain_manager_initiated(self):\n from resello.managers.domain import DomainManager\n client = ReselloClient(api_key='test', reseller_reference='test')\n\n self.assertIsInstance(client.domain, DomainManager)", "title": "" }, { "docid": "e9b9853306dfc74f3f70022b05d719ce", "score": "0.5582615", "text": "def test___init__(self):\n document = model.Document.from_data(DATA)\n collection = Collection([document])\n\n self.assertIsInstance(collection, Collection)\n self.assertEqual(collection.count(), 1)\n\n # collection should contain 1 document\n self.assertEqual(len(collection), 1)", "title": "" }, { "docid": "aedfef16930dea4b677449be1c7e0f5f", "score": "0.55800015", "text": "def test_doc_class(self):\n self.assertTrue(len(DBStorage.__doc__) > 1)", "title": "" }, { "docid": "aa370f37d486ce6aa553ca0c6af2560d", "score": "0.5575962", "text": "def test_object_list(self):\n word = WordFactory()\n\n response = self.get(url_name=\"v1:word-list\")\n self.assert_http_200_ok(response=response)\n assert response.data[\"count\"] == 1\n\n relevant_response_data = response.data[\"results\"][0]\n assert word.english in relevant_response_data[\"english\"]\n assert word.turkish in relevant_response_data[\"turkish\"]\n assert word.author.username in relevant_response_data[\"author_name\"]", "title": "" }, { "docid": "5705394ec24b8d387d20ab36e0ae9b39", "score": "0.55745137", "text": "def test_read_doc_for_public(self):\n self.login_as(user=self.user)\n # Non-draft DocIntegration, with features and an avatar\n response = self.get_success_response(self.doc_2.slug, status_code=status.HTTP_200_OK)\n assert serialize(self.doc_2) == response.data\n features = IntegrationFeature.objects.filter(\n target_id=self.doc_2.id, target_type=IntegrationTypes.DOC_INTEGRATION.value\n )\n for feature in features:\n assert serialize(feature) in serialize(self.doc_2)[\"features\"]\n assert serialize(self.doc_2.avatar.get()) == response.data[\"avatar\"]\n # Draft DocIntegration, without features or an avatar\n self.get_error_response(self.doc_1.slug, status_code=status.HTTP_403_FORBIDDEN)", "title": "" }, { "docid": "259c99f844d254bc79d2714430539ef4", "score": "0.5569446", "text": "def test_supporting_documents():\n documents = [RegsGovDoc(str(i), str(i)*3) for i in range(4)]\n notice = _dummy_notice()\n assert notice.supporting_documents == []\n notice.supporting_documents = documents\n assert notice.supporting_documents == documents", "title": "" }, { "docid": "e698c31f4c8cea16dd8a2eca80b9e9c5", "score": "0.55557555", "text": "def test_indexers_courses_related_objects_consistency(self):\n # Create a course with a page in both english and french\n organization = OrganizationFactory(should_publish=True)\n category = CategoryFactory(should_publish=True)\n course = CourseFactory(\n fill_organizations=[organization],\n fill_categories=[category],\n should_publish=True,\n )\n CourseRunFactory(page_parent=course.extended_object, should_publish=True)\n\n course_document = list(\n CoursesIndexer.get_data_for_es(index=\"some_index\", action=\"some_action\")\n )[0]\n self.assertEqual(\n course_document[\"organizations\"],\n [\n next(\n OrganizationsIndexer.get_data_for_es(\n index=\"some_index\", action=\"some_action\"\n )\n )[\"_id\"]\n ],\n )\n self.assertEqual(\n course_document[\"categories\"],\n [\n next(\n CategoriesIndexer.get_data_for_es(\n index=\"some_index\", action=\"some_action\"\n )\n )[\"_id\"]\n ],\n )", "title": "" }, { "docid": "bd635a6d76d0a9e857004e6fd9f1a1dd", "score": "0.5552145", "text": "def test_getAll(self):\n objs = self.model.getAll()\n self.assertTrue(isinstance(objs, list))\n self.assertEqual(len(objs), 3)", "title": "" }, { "docid": "b9bd5e3edd724be2dab0ddc4d283a925", "score": "0.55385095", "text": "def test_find_one(self):\r\n person1 = self.Person(name=\"User A\", age=20)\r\n person1.save()\r\n person2 = self.Person(name=\"User B\", age=30)\r\n person2.save()\r\n\r\n # Retrieve the first person from the database\r\n person = self.Person.objects.first()\r\n self.assertTrue(isinstance(person, self.Person))\r\n self.assertEqual(person.name, \"User A\")\r\n self.assertEqual(person.age, 20)\r\n\r\n # Use a query to filter the people found to just person2\r\n person = self.Person.objects(age=30).first()\r\n self.assertEqual(person.name, \"User B\")\r\n\r\n person = self.Person.objects(age__lt=30).first()\r\n self.assertEqual(person.name, \"User A\")\r\n\r\n # Use array syntax\r\n person = self.Person.objects[0]\r\n self.assertEqual(person.name, \"User A\")\r\n\r\n person = self.Person.objects[1]\r\n self.assertEqual(person.name, \"User B\")\r\n\r\n self.assertRaises(IndexError, self.Person.objects.__getitem__, 2)\r\n\r\n # Find a document using just the object id\r\n person = self.Person.objects.with_id(person1.id)\r\n self.assertEqual(person.name, \"User A\")\r\n\r\n self.assertRaises(InvalidQueryError, self.Person.objects(name=\"User A\").with_id, person1.id)", "title": "" }, { "docid": "99f430d5416867d4cbf0c6287161ec3c", "score": "0.5533983", "text": "def test_update_document(self):\n pass", "title": "" }, { "docid": "343997d18996cb40b175137689caf432", "score": "0.5533954", "text": "def test_001_get_company_object_successfully(self):\n company0 = Company.objects.get(index=0)\n company1 = Company.objects.get(index=1)\n self.assertEqual(company0.index, 0)\n self.assertEqual(company0.company, \"Test Company 0\")\n self.assertEqual(company1.index, 1)\n self.assertEqual(company1.company, \"Test Company 1\")\n self.assertEqual(Company.objects.count(), 2)", "title": "" }, { "docid": "e3e8208c0993a095f6f77042e2354fbe", "score": "0.5533199", "text": "def test_retrieve(self):\n for sd in test_srs:\n srs = self.SpatialRefSys.objects.get(srid=sd[\"srid\"])\n self.assertEqual(sd[\"srid\"], srs.srid)\n\n # Some of the authority names are borked on Oracle, e.g., SRID=32140.\n # also, Oracle Spatial seems to add extraneous info to fields, hence the\n # the testing with the 'startswith' flag.\n auth_name, oracle_flag = sd[\"auth_name\"]\n # Compare case-insensitively because srs.auth_name is lowercase\n # (\"epsg\") on Spatialite.\n if not connection.ops.oracle or oracle_flag:\n self.assertIs(srs.auth_name.upper().startswith(auth_name), True)\n\n self.assertEqual(sd[\"auth_srid\"], srs.auth_srid)\n\n # No PROJ and different srtext on Oracle.\n if not connection.ops.oracle:\n self.assertTrue(srs.wkt.startswith(sd[\"srtext\"]))\n self.assertRegex(srs.proj4text, sd[\"proj_re\"])", "title": "" }, { "docid": "03269094cc3a0e14a0c94271275aceed", "score": "0.55329883", "text": "def test_read_doc_for_superuser(self):\n self.login_as(user=self.superuser, superuser=True)\n # Non-draft DocIntegration, with features and an avatar\n response = self.get_success_response(self.doc_2.slug, status_code=status.HTTP_200_OK)\n assert serialize(self.doc_2) == response.data\n features = IntegrationFeature.objects.filter(\n target_id=self.doc_2.id, target_type=IntegrationTypes.DOC_INTEGRATION.value\n )\n for feature in features:\n assert serialize(feature) in response.data[\"features\"]\n assert serialize(self.doc_2.avatar.get()) == response.data[\"avatar\"]\n # Draft DocIntegration, without features or an avatar\n response = self.get_success_response(self.doc_1.slug, status_code=status.HTTP_200_OK)\n assert serialize(self.doc_1) == response.data\n assert not response.data[\"avatar\"]", "title": "" }, { "docid": "1ed8c8e9b63bdaabcc4e4da779455591", "score": "0.5532942", "text": "def test_if_reseller_manager_initiated(self):\n from resello.managers.reseller import ResellerManager\n client = ReselloClient(api_key='test', reseller_reference='test')\n\n self.assertIsInstance(client.reseller, ResellerManager)", "title": "" }, { "docid": "deb5a7699f81ffd4d1934e0f6abc58bf", "score": "0.55317855", "text": "def test_show_publisher(self):\n response = self.client.get(reverse('publisher-detail', kwargs={'pk': self.publisher_record.pk}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertEqual(\n json.loads(response.content),\n self.prepare_publisher_object(self.publisher_record)\n )", "title": "" }, { "docid": "2eaebb5d68c622abb26d70d065288c5c", "score": "0.5530631", "text": "def test_list(self):\n self.assertIsInstance(self.repo.list(), pymongo.cursor.Cursor)", "title": "" }, { "docid": "54f0f2fc108f8ad395053475d941a280", "score": "0.552551", "text": "def test_document_get_preview(self):\n pass", "title": "" }, { "docid": "795324efc181db068b89191397cdf5fb", "score": "0.5516457", "text": "def test_get_queryset(self):\n # Testing with Musiker model here, instead of Genre, due to the variety\n # of relations that model has.\n _unused = make(Musiker)\n view = self.get_view(request=self.get_request())\n for limit in [0, 1, 2]:\n relations, queryset = view.get_queryset(Musiker, limit)\n with self.subTest(limit=limit):\n self.assertEqual(queryset.count(), limit + 1)", "title": "" }, { "docid": "e006caccbc8fbcd79cbded4d98e0d61b", "score": "0.5515975", "text": "def test_queryset(self):\n liaison = Liaison.objects.latest('pk')\n liaison.active = False\n liaison.save()\n\n count_all = Liaison.objects_all.count()\n count_visible = Liaison.objects.count()\n assert count_visible < count_all\n\n for liaison in Liaison.objects.all():\n assert liaison in LiaisonList().queryset\n\n for liaison in LiaisonList().queryset:\n assert liaison in Liaison.objects.all()", "title": "" }, { "docid": "e3421244e858f0f810ce57e09e95d824", "score": "0.5502273", "text": "def test_book_detail(self):\n book = Book.objects.get(title='Gnarf')\n self.assertEqual(book.author, 'django')", "title": "" }, { "docid": "e212d6e5e80b618505f4f58d8ee58e02", "score": "0.550184", "text": "def test_publisher_listing(self):\n response = self.client.get(reverse('publisher-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assert_paginated_response(\n response=response,\n expected_results=[self.prepare_publisher_object(self.publisher_record)]\n )", "title": "" }, { "docid": "4b92c250683d58f538413c6d3a234a07", "score": "0.548912", "text": "def test_document_load_from_uri(self):\n pass", "title": "" }, { "docid": "83d51924d3fe55054f250c3db8af1f8f", "score": "0.5488377", "text": "def test_doc_class(self):\n self.assertIsNotNone(DBStorage.__doc__)", "title": "" }, { "docid": "bee81fe471f224512bc6808b003a56e0", "score": "0.5487065", "text": "def testDeleteManyDocumentsByQuery(self):\n name = _randomString(20)\n\n docs = []\n for _ in range(5):\n doc = {'id': _randomString(20),\n 'name': name}\n docs.append(doc)\n\n # Add the documents\n yield self.client.add(docs)\n yield self.client.commit()\n\n # Delete the documents\n yield self.client.deleteByQuery('name:%s' % name)\n yield self.client.commit()\n\n r = yield self.client.search('name:%s' % name)\n self.assertEqual(r.results.numFound, 0,\n 'Document was not deleted')\n\n for doc in docs:\n r = yield self.client.search('id:%s' % doc['id'])\n self.assertEqual(r.results.numFound, 0,\n 'Document was not deleted')", "title": "" }, { "docid": "dfd006b6b869c012af55ec7a8f4d8965", "score": "0.5484172", "text": "def test_find(service, models_pool):\n models = models_pool[service.model_class]\n ret = service.find()\n\n assert len(ret) == len(models)\n assert set(ret) == set(models)", "title": "" } ]
34ddb702a86b35078394c58921e0456b
Get get general document preferences from sublime preferences.
[ { "docid": "7b669abd88a28de68f020e93affca0b6", "score": "0.0", "text": "def setup(self, **kwargs):\n\n eh_settings = sublime.load_settings(PACKAGE_SETTINGS)\n settings = self.view.settings()\n alternate_font_size = eh_settings.get(\"alternate_font_size\", False)\n alternate_font_face = eh_settings.get(\"alternate_font_face\", False)\n self.font_size = settings.get('font_size', 10) if alternate_font_size is False else alternate_font_size\n self.font_face = settings.get('font_face', 'Consolas') if alternate_font_face is False else alternate_font_face\n self.tab_size = settings.get('tab_size', 4)\n self.padd_top = settings.get('line_padding_top', 0)\n self.padd_bottom = settings.get('line_padding_bottom', 0)\n self.char_limit = int(eh_settings.get(\"valid_selection_size\", 4))\n font_options = settings.get('font_options', [])\n self.no_bold = 'no_bold' in font_options\n self.no_italic = 'no_italic' in font_options\n self.bground = ''\n self.fground = ''\n self.gbground = ''\n self.gfground = ''\n self.table_mode = kwargs[\"table_mode\"]\n self.numbers = kwargs[\"numbers\"]\n self.date_time_format = kwargs[\"date_time_format\"]\n self.time = time.localtime()\n self.disable_nbsp = kwargs[\"disable_nbsp\"]\n self.show_full_path = kwargs[\"show_full_path\"]\n self.sels = []\n self.ignore_selections = kwargs[\"ignore_selections\"]\n if self.ignore_selections:\n self.multi_select = False\n self.highlight_selections = False\n else:\n self.highlight_selections = kwargs[\"highlight_selections\"]\n if kwargs[\"multi_select\"] and not kwargs[\"highlight_selections\"]:\n self.multi_select = self.check_sel()\n else:\n self.multi_select = False\n self.browser_print = kwargs[\"browser_print\"]\n self.auto_wrap = kwargs[\"wrap\"] is not None and int(kwargs[\"wrap\"]) > 0\n self.wrap = 900 if not self.auto_wrap else int(kwargs[\"wrap\"])\n self.hl_continue = None\n self.curr_hl = None\n self.size = self.view.size()\n self.pt = 0\n self.end = 0\n self.curr_row = 0\n self.tables = 0\n self.curr_annot = None\n self.curr_comment = None\n self.annotations = self.get_annotations()\n self.annot_num = -1\n self.new_annot = False\n self.open_annot = False\n self.no_header = kwargs[\"no_header\"]\n self.annot_tbl = []\n self.toolbar = kwargs[\"toolbar\"]\n self.legacy = eh_settings.get('legacy_color_matcher', False)\n if eh_settings.get(\"toolbar_orientation\", \"horizontal\") == \"vertical\":\n self.toolbar_orientation = \"block\"\n else:\n self.toolbar_orientation = \"inline-block\"\n self.ebground = self.bground\n self.lumens_limit = float(eh_settings.get(\"bg_min_lumen_threshold\", 62))\n\n fname = self.view.file_name()\n if fname is None or not path.exists(fname):\n fname = \"Untitled\"\n self.file_name = fname\n\n temp = kwargs[\"color_scheme\"]\n # Get color scheme\n if temp is not None and (not AUTO or temp != \"auto\"):\n alt_scheme = temp\n else:\n alt_scheme = eh_settings.get(\"alternate_scheme\", False)\n if AUTO and alt_scheme == \"auto\":\n alt_scheme = False\n\n switch = False\n view_scheme = self.view.settings().get('color_scheme')\n default = sublime.load_settings('Preferences.sublime-settings')\n self.switch = False\n self.save_to_view = False\n self.view_scheme = view_scheme\n if isinstance(alt_scheme, str) and alt_scheme != view_scheme:\n switch = True\n if view_scheme != default.get('color_scheme'):\n self.save_to_view = True\n scheme_file = alt_scheme\n else:\n scheme_file = view_scheme\n\n if scheme_file == 'auto' and AUTO:\n info = sublime.ui_info()\n scheme_file = info['color_scheme']['resolved_value']\n\n self.highlights = []\n if self.highlight_selections:\n for sel in self.view.sel():\n if not sel.empty():\n self.highlights.append(sel)\n\n self.tweak_cache = {}\n self.tweaker = ColorTweaker(kwargs[\"filter\"])\n\n if self.legacy:\n print('ExportHtml: Using legacy color matcher')\n self.csm = ColorSchemeMatcher(\n scheme_file,\n color_filter=(lambda x: ColorSchemeTweaker().tweak(x, kwargs[\"filter\"]))\n )\n self.fground = self.csm.get_special_color('foreground', simulate_transparency=True)\n self.bground = self.csm.get_special_color('background', simulate_transparency=True)\n if kwargs[\"style_gutter\"]:\n self.gfground = self.csm.get_special_color('gutter_foreground', simulate_transparency=True)\n self.gbground = self.csm.get_special_color('gutter', simulate_transparency=True)\n else:\n self.gfground = self.fground\n self.gbground = self.bground\n else:\n self.switch = switch\n if self.switch:\n self.view.settings().set('color_scheme', scheme_file)\n self.fground = self.tweak(self.view.style().get('foreground'), None)[0]\n self.bground = self.tweak(None, self.view.style().get('background'))[1]\n self.gfground = self.tweak(self.view.style().get('gutter_foreground', self.fground), None)[0]\n self.gbground = self.tweak(None, self.view.style().get('gutter', self.bground))[1]", "title": "" } ]
[ { "docid": "a89d575bf649c9bcb65bc1478dafa92c", "score": "0.680794", "text": "def Settings(name, default):\n view = sublime.active_window().active_view()\n project_config = view.settings().get('sublimeautopep8', {}) if view else {}\n global_config = sublime.load_settings(common.USER_CONFIG_NAME)\n return project_config.get(name, global_config.get(name, default))", "title": "" }, { "docid": "9528ded56b3c6115228817be32522e65", "score": "0.67926383", "text": "def get_preferences():\n return get_info(\"Preferences\")", "title": "" }, { "docid": "429ba1080f521d9809e40df793fe4bec", "score": "0.67554325", "text": "def settings():\n return load_settings(\"GutterColor.sublime-settings\")", "title": "" }, { "docid": "d96d3eb86624081fd55c04fbabacefca", "score": "0.61913735", "text": "def get_settings():\n with open('settings.text', 'rb') as fhandle:\n data = cPickle.load(fhandle)\n global level, default_shortcut, custom_shortcut, access, available_check, company\n level = data['debug_mode']\n if data.get('default_shortcut'):\n default_shortcut = data['default_shortcut']\n if data.get('custom_shortcut'):\n custom_shortcut = data['custom_shortcut']\n if data.get('access'):\n access = data['access']\n if data.get('avaliable_check'):\n available_check = data['avaliable_check']\n if data.get('company'):\n company = data['company']", "title": "" }, { "docid": "d4c3b2e5890f983309afac490fa9f389", "score": "0.619115", "text": "def get_setting(key):\n\n return sublime.load_settings(SETTINGS_FILE).get(key)", "title": "" }, { "docid": "621544a262df15ef0b9a95fe45c15b59", "score": "0.5969022", "text": "def get_misc_settings(self):\n return self._settings[\"misc\"]", "title": "" }, { "docid": "3c1299347dee1f1c1b804afd17bbe01b", "score": "0.5696845", "text": "def load_settings():\n data = {}\n\n try:\n with open(Path(os.getcwd()) / 'pyproject.toml', 'r') as file:\n data = toml.load(file)\n logging.info('Found pyproject.toml; loading data...')\n except FileNotFoundError:\n logging.info(\n '''Could not find a pyproject.toml file with a valid [dpymenus] header. Using default settings.\n See https://dpymenus.com/global_configuration.html on how to set configuration options.'''\n )\n finally:\n config_data = data.get('dpymenus', {})\n\n return config_data if config_data else {}", "title": "" }, { "docid": "1e0b4e254e4ecf9854d1e5125187c660", "score": "0.56887597", "text": "def get_user_settings(gh, cfg=\"global.cfg\"):\n ws = get_workspace(gh)\n try:\n c = ws.get_contents(\"Settings/%s\" % (cfg))\n except (UnknownObjectException):\n return None\n return c.decoded_content", "title": "" }, { "docid": "dec617d1eac279aa4ec59778ddfaf64b", "score": "0.5682325", "text": "def get_preferences(self):\r\n return self.__preferences", "title": "" }, { "docid": "e7a5af3effabce5ea88034db9889eb52", "score": "0.5566162", "text": "def open_preferences(self):\r\n \r\n if os.path.exists(os.path.join(PREFSDIR, PREFSNAME)):\r\n \r\n f = open(os.path.join(PREFSDIR, PREFSNAME), 'r')\r\n prefs = f.read()\r\n f.close()\r\n \r\n return prefs", "title": "" }, { "docid": "1be04de3647b42d1cf761dabd54373a0", "score": "0.55356455", "text": "def get_default(self,preference):\n\n if self.mainWindow == None and re.search('path',preference):\n return '/some/path'\n elif preference == 'pdfviewer_path':\n return self.mainWindow.controller.get_pdfviewer_path() \n elif preference == 'r_path':\n return self.mainWindow.controller.get_r_path()\n elif preference == 'python_path':\n return self.mainWindow.controller.get_python_path()\n elif preference == 'latex_path':\n return self.mainWindow.controller.get_latex_path()\n elif preference == 'latex2html_path':\n return self.mainWindow.controller.get_latex2html_path()\n elif preference == 'sphinx_path':\n return self.mainWindow.controller.get_sphinx_path()\n elif defaultLog.has_key(preference):\n return defaultLog[preference]\n else:\n print \"ERROR: tried to get invalid preference default in Preferences.py\"\n print \"...\", preference", "title": "" }, { "docid": "4894b978053c776035dfb849c5a30428", "score": "0.54340416", "text": "def keyword_preference_window_get_selected_language(self):\n selected_language = self.Preferences_window_get_selected_Language()\n return {\"selected_language\":selected_language}", "title": "" }, { "docid": "4b892937ce2550a3ff2e8e2277a65781", "score": "0.5428152", "text": "def get_project_data(self) -> Dict[str, Any]:\n return sublime.active_window().project_data() or {}", "title": "" }, { "docid": "5353604667360957cd309743b82cc8c9", "score": "0.53976405", "text": "def get_config():\n filename = os.getenv('QISKIT_SETTINGS', DEFAULT_FILENAME)\n if not os.path.isfile(filename):\n return {}\n user_config = UserConfig(filename)\n user_config.read_config_file()\n return user_config.settings", "title": "" }, { "docid": "1fba3ac1b8af9b37226e5168c435c457", "score": "0.53944296", "text": "def GetDocumentStyle():\n prompt_styles = styles.default_style_extensions\n prompt_styles.update({\n Token.Menu.Completions.Completion.Current: Color(BLUE, GRAY),\n Token.Menu.Completions.Completion: Color(BLUE, DARK_GRAY),\n Token.Toolbar: Color(BLUE, DARK_GRAY),\n Token.Toolbar.Account: Color(),\n Token.Toolbar.Separator: Color(),\n Token.Toolbar.Project: Color(),\n Token.Prompt: Color()\n })\n return styles.PygmentsStyle.from_defaults(style_dict=prompt_styles)", "title": "" }, { "docid": "5efb234bf4d2d0da18602f55e5139707", "score": "0.5391207", "text": "def preferences(self):\r\n \r\n return self._preferences", "title": "" }, { "docid": "e490e9a231d8f9340e27bfef02bb0607", "score": "0.53811395", "text": "def get_search_settings():\r\n search_settings = \"\"\r\n with open(\"search_settings.txt\", 'r') as f:\r\n search_settings = f.read()\r\n return search_settings", "title": "" }, { "docid": "90c6d6879b891c8f6a6fcbbfd71ec968", "score": "0.5334762", "text": "def get_settings(self):\n output = self._run_command('nordvpn settings')\n if output is None:\n return {}\n return self._parse_settings(output)", "title": "" }, { "docid": "d25ccff2db5159d66e7d5e57953be9b2", "score": "0.5291177", "text": "def get_user_prefs():\n\n prefs = {}\n\n prefs['starting_location'] = '28105'\n prefs['max_travel_hours'] = 2\n prefs['n_destinations'] = 4\n\n return prefs", "title": "" }, { "docid": "018df9af0a018dea13d440cfe95ccde3", "score": "0.52848035", "text": "def get(self, key, default=None):\n return self.config.get('general', key) if self.config.get('general', key) else default", "title": "" }, { "docid": "07c16e45ca70c94933019d35cc667c54", "score": "0.52522904", "text": "def _read_global_options(path_globaloptions_file: str):\n with open(path_globaloptions_file, 'r') as globaloptions:\n globaloptions_lines = globaloptions.readlines()\n return globaloptions_lines", "title": "" }, { "docid": "e3007f86dbb3be9962765d843734ae15", "score": "0.52511156", "text": "def get_settings(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "807eed3fad97080a423c7f891fdaed96", "score": "0.52510756", "text": "def settings(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"settings\")", "title": "" }, { "docid": "652002432a70e8b91d4df027d66c8d2d", "score": "0.5241598", "text": "def get_preferences(self):\n schema = PreferencesSchema()\n resp = self.service.get(self.base+'preferences/')\n return self.service.decode(schema, resp)", "title": "" }, { "docid": "384d9a76173195d9f97137c970b59a7d", "score": "0.522977", "text": "def get_settings(self):\n ...", "title": "" }, { "docid": "5d3d6ea2288dd705112fbdbd769e8be0", "score": "0.51978713", "text": "def general_mef_settings(self, prefs):\n self._no_keyword = 'N/A'\n\n gen_settings = prefs.create_category('general')\n gen_settings.load(onError='silent')\n self._sci_extname = gen_settings.get('sciextname', 'SCI')\n self._err_extname = gen_settings.get('errextname', 'ERR')\n self._dq_extname = gen_settings.get('dqextname', 'DQ')\n self._ext_key = gen_settings.get('extnamekey', 'EXTNAME')\n self._extver_key = gen_settings.get('extverkey', 'EXTVER')\n self._tel_key = gen_settings.get('telescopekey', 'TELESCOP')\n self._ins_key = gen_settings.get('instrumentkey', 'INSTRUME')", "title": "" }, { "docid": "9342fa827fb0ebd9784212359e19e92e", "score": "0.5192739", "text": "def get_server_prefs():\n # Check for mandatory prefs and bail if any are missing.\n required_prefs = {\n 'key': pref('key'),\n 'server_url': pref('ServerURL').rstrip('/')}\n\n for key, val in required_prefs.items():\n if not val:\n exit(f'Required Sal preference \"{key}\" is not set.')\n\n # Get optional preferences.\n name_type = pref('NameType', default='ComputerName')\n\n return required_prefs[\"server_url\"], name_type, required_prefs[\"key\"]", "title": "" }, { "docid": "4fa602d037a0677d8a92cc17de307c30", "score": "0.518916", "text": "def get_server_prefs():\n # Check for mandatory prefs and bail if any are missing.\n required_prefs = {\n 'key': pref('key'),\n 'server_url': pref('ServerURL').rstrip('/')}\n\n for key, val in required_prefs.items():\n if not val:\n sys.exit('Required Sal preference \"{}\" is not set.'.format(key))\n\n # Get optional preferences.\n name_type = pref('NameType', default='ComputerName')\n\n return required_prefs[\"server_url\"], name_type, required_prefs[\"key\"]", "title": "" }, { "docid": "c1ff829d68d21ea722d289978ba32ae6", "score": "0.5174097", "text": "def __get_settings_filename(self):\n return '{}.sublime-settings'.format(PLUGIN_NAME)", "title": "" }, { "docid": "e1a0eeec0a90fc3f98dde1539d883686", "score": "0.5171339", "text": "def get_config(self):\n return self.ask(\":CONFigure?\").strip()[1:-1]", "title": "" }, { "docid": "f89772119a6ad7f5f22bd3d6fc31dd83", "score": "0.5170249", "text": "def get_global_option(option):\n try:\n return CONFIGURATION['global'][option]\n except KeyError:\n return None", "title": "" }, { "docid": "d9ae843e6468ee5b8b2e20109118a4d1", "score": "0.51414067", "text": "def prompt_for_settings(self):\n return dict()", "title": "" }, { "docid": "05996245548b326aeaad8752e0c60dd0", "score": "0.51340383", "text": "def get_global(self, key, user='__global'):\n\t\treturn self.get_default(key, user)", "title": "" }, { "docid": "fa450eb9febb1cb5c47d0f8decc9f6d0", "score": "0.51311636", "text": "def settings(self) -> Optional[Any]:\n return pulumi.get(self, \"settings\")", "title": "" }, { "docid": "fa450eb9febb1cb5c47d0f8decc9f6d0", "score": "0.51311636", "text": "def settings(self) -> Optional[Any]:\n return pulumi.get(self, \"settings\")", "title": "" }, { "docid": "fa450eb9febb1cb5c47d0f8decc9f6d0", "score": "0.51311636", "text": "def settings(self) -> Optional[Any]:\n return pulumi.get(self, \"settings\")", "title": "" }, { "docid": "0081477a6554b401d23f62b903b9cc2a", "score": "0.50997555", "text": "def get_setting(self, key):\n key, definition_type, section, default = self._define(key)\n my_val = definition_type(os.getenv(key, default))\n return my_val", "title": "" }, { "docid": "daa2bf815d8509653aae9b626185b6cc", "score": "0.50980043", "text": "def get_global(self, key, user=\"__global\"):\n\t\treturn self.get_default(key, user)", "title": "" }, { "docid": "fb32d207d859705c4d8d5c8241107801", "score": "0.5092649", "text": "def get_preferences(self):\n path = 'api/account/preferences'\n return self._r.get(path)['prefs']", "title": "" }, { "docid": "8fed4098a4ec8669f8cad56ad5b8b03a", "score": "0.5092312", "text": "def get_global_defaults(cls) -> SiteConfigurationSettings:\n return _GLOBAL_DEFAULTS", "title": "" }, { "docid": "07c24ec58797f18956e0894eae478b75", "score": "0.5087636", "text": "def personalsetting(self):\n return self._personalsetting", "title": "" }, { "docid": "9f996f76e5c31106ec6cf66101f1d410", "score": "0.50788945", "text": "def available_settings(self):\n comments = []\n order = []\n result_settings = {}\n if self.filename:\n fh = open(self.filename, 'r')\n for line in fh.readlines():\n line = line.strip()\n if line:\n if line.startswith('#'):\n temp = line.strip('#').strip()\n if temp:\n comments.append(temp)\n else:\n if '=' in line:\n setting = '%s.%s' % (\n self.conffile, line.split('=')[0])\n result_settings[setting] = {\n 'help': comments,\n 'value': '='.join(line.split('=')[1:]).strip()\n }\n comments = []\n order.append(setting)\n else:\n comments = []\n return result_settings, order", "title": "" }, { "docid": "9b9a1b71e9643ef18174d53dab51e932", "score": "0.5073364", "text": "def get_settings():\n with open(\"{0}/{1}\".format(__BASE_DIR, \"../settings.json\")) as data_file:\n data = json.load(data_file)\n return data", "title": "" }, { "docid": "f60a5177620de9999d703bf04cc4aba2", "score": "0.5069891", "text": "def get_settings(self):\n return self.settings", "title": "" }, { "docid": "f60a5177620de9999d703bf04cc4aba2", "score": "0.5069891", "text": "def get_settings(self):\n return self.settings", "title": "" }, { "docid": "f60a5177620de9999d703bf04cc4aba2", "score": "0.5069891", "text": "def get_settings(self):\n return self.settings", "title": "" }, { "docid": "c233fbb26d61e99cc88e1c322117a59b", "score": "0.5068087", "text": "def getConfig():", "title": "" }, { "docid": "df6fab8b8deb58301908168e1bdb5d6b", "score": "0.50641686", "text": "def get_chall_pref(domain):", "title": "" }, { "docid": "77a364f760a724da41c1cee3f75ca97e", "score": "0.50534016", "text": "def get(self, setting, default=None):\r\n return self.settings.get(setting, default)", "title": "" }, { "docid": "147f35325a73883c6b56191958bf8e80", "score": "0.5050404", "text": "def get_settings(self):\r\n\r\n settings = self.find_gutter_themes()\r\n settings.append(['None', 'Do not display gutter marks'])\r\n self.themes.append('none')\r\n\r\n return settings", "title": "" }, { "docid": "5906692c8abb6a58e23030155beee150", "score": "0.5044252", "text": "def get_env(app, curdoc):\n from sphinx.application import ENV_PICKLE_FILENAME\n filename = os.path.join(\n app.env.doctreedir, curdoc, ENV_PICKLE_FILENAME)\n try:\n f = open(filename, 'rb')\n except IOError:\n app.info(\"\")\n app.warn(\"Unable to fetch %s \"%filename)\n return None\n docenv = cPickle.load(f)\n f.close()\n return docenv", "title": "" }, { "docid": "af1a002c3735d7ef818d6c1bcd3c781a", "score": "0.5044217", "text": "def read_settings(self):\n if self.app.config.get('general', 'music') == 'yes':\n self.ids._music.active = True\n else:\n self.ids._music.active = False\n if self.app.config.get('general', 'voice') == 'yes':\n self.ids._voice.active = True\n else:\n self.ids._voice.active = False\n self.ids._maxdigits.text = self.app.config.get('basicmath', 'maxdigits')\n self.ids._operation.text = self.app.config.get('basicmath', 'operation')", "title": "" }, { "docid": "f2af39fab5fbfe228019e11fbd97f9af", "score": "0.50326645", "text": "def preferences_file() -> Tuple[Path, Any]:\n # Linux\n home = Path.home()\n tmpd = Path(tempfile.gettempdir())\n # Windows\n localappdata = os.environ.get(\"LOCALAPPDATA\")\n if localappdata:\n localappdata = Path(localappdata)\n appdata = os.environ.get(\"APPDATA\")\n if appdata:\n appdata = Path(appdata)\n # search in order of preferred path\n for configd in (\n localappdata,\n appdata,\n home.joinpath(\".config\"),\n home,\n tmpd,\n ):\n if not configd:\n continue\n if configd.is_dir() and os.access(configd, os.W_OK):\n break\n configd = None\n if not configd:\n raise RuntimeError(\"No writeable preferences directory found\")\n\n # https://bachiraoun.github.io/pypref/\n from pypref import Preferences\n\n pref = Preferences(filename=str(PREFERENCE_FILE_NAME), directory=str(configd))\n path_ = configd.joinpath(PREFERENCE_FILE_NAME)\n return path_, pref", "title": "" }, { "docid": "488c74be0966ea6665a2c7615fff8621", "score": "0.50283927", "text": "def get_settings(self):\r\n return highlight.mark_style_names()", "title": "" }, { "docid": "86b8e4ac060c828d3b399ecb4ce43b01", "score": "0.5026659", "text": "def settings(item):\n registry = pyramid.threadlocal.get_current_registry()\n return registry.settings.get(item, None)", "title": "" }, { "docid": "8b8be297f6a5aaf39f6385270fdbabf1", "score": "0.5026524", "text": "def read_prefs(self) -> dict:\n\n\t\tif self.verbose: print(f\"Trying to read {self.filename}\")\n\n\t\tcontent = {} # Content will be where the prefs will be stored when reading\n\n\t\twith open(self.filename, \"r\") as file: # Open the file with read permissions\n\t\t\tlines = file.read().split(\"\\n\") # Read lines\n\t\t\tlines = self.clean_lines(lines)\n\n\t\t\tif len(lines) == 0:\n\t\t\t\tif self.verbose: print(f\"Emtpy file {self.filename}\")\n\t\t\t\treturn {}\n\n\t\t\tcontent = self.get_lines_properties(lines) # Get lines properties (key, val, indentLevel)\n\t\t\tcontent = self.tree_to_dict(content) # Interpreting the result of get_lines_properties() returns the dictionary with the prefs. \n\t\t\tcontent = self.eval_dict(content) # Pass content to eval_dict function that eval each value.\n\n\t\tif self.verbose: print(f\"Read {self.filename}\")\n\n\t\treturn content # Return prefs file as dictionary", "title": "" }, { "docid": "6e6ca9d4031b357d6f9766737dd3a05b", "score": "0.5022887", "text": "def get(name):\n if name not in _OPTIONS:\n rt.throw(\"pyesdoc option {0} is unsupported\".format(name))\n\n return _OPTIONS[name]", "title": "" }, { "docid": "406c406233a9db7bd46f7d8798472a2a", "score": "0.5015478", "text": "def get_main_settings(db: Session = Depends(generate_session)):\n\n return SiteSettings.get_site_settings(db)", "title": "" }, { "docid": "196bfd9c892af60025467d791c0cfbb4", "score": "0.50147325", "text": "def get_config():\n config_name = \".tsdrc\"\n # Startwith default values\n config = {\n \"series_dir\": os.getenv(\"HOME\") + \"/tsd/\",\n \"testing\": 0,\n }\n config.update(_get_config(os.getenv(\"HOME\") + \"/\" + config_name))\n config.update(_get_config(config_name))\n # Cast what we can\n config[\"testing\"] = bool(config[\"testing\"])\n global G_CONFIG\n G_CONFIG = config", "title": "" }, { "docid": "debadf5e2c677d1d56991735dbdc531f", "score": "0.5012113", "text": "def get_settings(self):\n location = self.get_location()\n settings_file = location / \"BespinSettings\" / \"settings\"\n if not settings_file.exists():\n return {}\n settings = {}\n for line in settings_file.lines(retain=False):\n info = line.split(\" \", 1)\n if len(info) != 2:\n continue\n settings[info[0]] = info[1]\n return settings", "title": "" }, { "docid": "6a4a390edb2de15245025c0bc4a46d65", "score": "0.5002996", "text": "def get_pelican_settings(generators):\n assert len(generators) > 0\n return generators[0].settings", "title": "" }, { "docid": "660267aa85a56ab1dbf3d7cadafaa499", "score": "0.50019157", "text": "def getSettings(self):\n _descriptiveName = self.tkListbox.get(tk.ACTIVE)\n # Just the descriptive name for the server, get the password from\n # favourites file\n return _descriptiveName", "title": "" }, { "docid": "f1e83e98aef44f5fdaaa387bd5c85339", "score": "0.49873662", "text": "def get_merged_settings(self):\r\n\r\n # Start with the overall project settings. Note that when\r\n # files are loaded during quick panel preview, it can happen\r\n # that they are linted without having a window.\r\n window = self.view.window()\r\n\r\n if window:\r\n data = window.project_data() or {}\r\n project_settings = data.get(persist.PLUGIN_NAME, {})\r\n else:\r\n project_settings = {}\r\n\r\n # Merge global meta settings with project meta settings\r\n meta = self.meta_settings(persist.settings.settings)\r\n meta.update(self.meta_settings(project_settings))\r\n\r\n # Get the linter's project settings, update them with meta settings\r\n project_settings = project_settings.get('linters', {}).get(self.name, {})\r\n project_settings.update(meta)\r\n\r\n # Update the linter's settings with the project settings\r\n settings = self.merge_project_settings(self.settings().copy(), project_settings)\r\n\r\n # Update with rc settings\r\n self.merge_rc_settings(settings)\r\n\r\n self.replace_settings_tokens(settings)\r\n return settings", "title": "" }, { "docid": "90aec462a3fcfd819a2dc99e0f912461", "score": "0.49696273", "text": "def retrieve_default_settings(self):\n current_level = self.xml_file.read_default_active_setting('level')\n default_settings = {\n \"level\" : self.xml_file.read_default_active_setting('level'),\n \"algorithm\" : self.xml_file.read_default_active_setting('algorithm'),\n \"path\" : self.xml_file.retrieve_text_node_value('output', 'Game Output', 'path'),\n \"filename\" : self.xml_file.retrieve_text_node_value('output', 'Game Output', 'filename'),\n \"min\": self.xml_file.retrieve_text_node_value(\"level\", current_level, \"min\"),\n \"max\": self.xml_file.retrieve_text_node_value(\"level\", current_level, \"max\"),\n \"hints\": self.xml_file.retrieve_text_node_value(\"level\", current_level, \"hints\"),\n }\n return default_settings", "title": "" }, { "docid": "d0f647eca4662903bbed8d203a25c5bc", "score": "0.4966755", "text": "def _get_settings(self):\r\n if self._settings is None:\r\n self._settings = self.get_settings()\r\n\r\n return self._settings", "title": "" }, { "docid": "2aa513f123498b88242edd8e958078da", "score": "0.49444044", "text": "def settings(self):\n settings_file = os.path.splitext(self._name)[0]\n if os.path.exists(settings_file):\n with open(settings_file) as f:\n config = configparser.ConfigParser()\n config.read_file(f, source=settings_file)\n\n return config['Plot']\n else:\n return {}", "title": "" }, { "docid": "d1a75b3be1e11edd4a3fa422ef5acb95", "score": "0.49413037", "text": "def _get_setting(name: str) -> Optional[str]:\n direct_specifier = os.getenv(name)\n commit_message = safe_getenv(\"BUILDKITE_MESSAGE\")\n if direct_specifier:\n return direct_specifier\n else:\n m = re.search(r\"\\[\" + name + r\"=(\\S+)\\]\", commit_message)\n return m.group(1) if m else None", "title": "" }, { "docid": "a117d742f1021a31aa7faaa4ec24eedb", "score": "0.49403787", "text": "def read_default_config():\n result = subprocess.Popen(\n ['gcloud', 'config', 'list', \"--format='json'\"], stdout=subprocess.PIPE)\n out = result.stdout.read()\n out_json = json.loads(out)\n return out_json", "title": "" }, { "docid": "11ba34faa08c74f29bec021c0bf636fb", "score": "0.49309698", "text": "def get(self, soption, default=None):\n \n if soption in self.settings.keys():\n return self.settings[soption]\n else:\n if default is None:\n default = \"\"\n return default", "title": "" }, { "docid": "0e2056889922d7c2c8f849b4ae2c8711", "score": "0.49303097", "text": "def get(self):\n\t\treturn (self.conf)", "title": "" }, { "docid": "a11156a3301dfcebdbd373b8105a646b", "score": "0.49158227", "text": "def get_options():\n\treturn _options", "title": "" }, { "docid": "d52546a603aed3d89fb05445e66af7aa", "score": "0.49102554", "text": "def sublimeTextPathFinder():\n \n # get the sublime text path from default\n defaultSublimePath = [\n 'C:/Program Files/sublime_text/sublime_text.exe',\n 'C:/Program Files/Sublime Text 2/sublime_text.exe',\n 'C:/Program Files (x86)/Sublime Text 2/sublime_text.exe',\n ]\n for path in defaultSublimePath:\n if os.path.exists(path):\n SETTINGS.add('sublime_text_path', path)\n\n sublimeTextPath = SETTINGS.get('sublime_text_path')\n if sublimeTextPath and os.path.exists(sublimeTextPath):\n # launch sublime text\n subprocess.Popen(sublimeTextPath[0])\n else:\n # ask for the sublime text exe path\n filedialog = cmds.fileDialog2(cap='Please give me the path of Sublime Text.exe !',\n fm=1,\n dir='C:\\\\Program Files\\\\',\n ff='*.exe')\n if filedialog:\n sublimeTextPath = str(filedialog[0])\n if os.path.exists(sublimeTextPath):\n # setting Setting\n SETTINGS.add('sublime_text_path', sublimeTextPath)\n return sublimeTextPath\n else:\n raise UserWarning('No exe found !')", "title": "" }, { "docid": "13289eb9fbfc92f406e9d6a648ee1be0", "score": "0.49065408", "text": "def get_main_settings(session: Session = Depends(generate_session)):\n\n try:\n data = db.settings.get(session, \"main\")\n except:\n default_settings_init(session)\n data = db.settings.get(session, \"main\")\n return data", "title": "" }, { "docid": "4d3438bfb0b7070038bf68f2f97b84c7", "score": "0.49057907", "text": "def conf(self, doc):\n if doc != ';':\n return doc", "title": "" }, { "docid": "58e01347140a4d708ca3ccc45fbcca33", "score": "0.48938292", "text": "def project_settings(request):\n variables = [\"CONFERENCE_TITLE\", \"LANDING_GLOBAL_REDIRECT\", \"CONTACT_EMAIL\", \"SPONSORS_EMAIL\",\n \"CFP_EMAIL\", \"PRESS_EMAIL\"]\n return {variable: getattr(settings, variable) for variable in variables}", "title": "" }, { "docid": "d7c3552efc6e8f0f2c4bff0226f94270", "score": "0.48928797", "text": "def preferences(tmpdir):\n return Preferences(compound_location=str(tmpdir))", "title": "" }, { "docid": "1124153478e041231383185d43bd276f", "score": "0.48793915", "text": "def get_settings(self):\n\n if not self.get_hl_settings:\n self.get_hl_settings = True\n self.style_plain_text = self.config['style_plain_text']\n\n config = None\n self.highlighter = None\n for ext in self.md.registeredExtensions:\n try:\n config = getattr(ext, \"get_pymdownx_highlight_settings\")()\n self.highlighter = getattr(ext, \"get_pymdownx_highlighter\")()\n break\n except AttributeError:\n pass\n\n css_class = self.config['css_class']\n self.css_class = css_class if css_class else config['css_class']\n\n self.extend_pygments_lang = config.get('extend_pygments_lang', None)\n self.guess_lang = config['guess_lang']\n self.pygments_style = config['pygments_style']\n self.use_pygments = config['use_pygments']\n self.noclasses = config['noclasses']\n self.language_prefix = config['language_prefix']", "title": "" }, { "docid": "59be5843aad7b55b4b5c4d261aa766e5", "score": "0.48786563", "text": "def _get_settings(self):\n\n return self._settings", "title": "" }, { "docid": "3d81e2e6b92e2ea75b9aba379c88a15b", "score": "0.48765185", "text": "def get_global_conf() -> TypedConfigParser:\n global GLOBAL_CONF # pylint: disable=global-statement\n if GLOBAL_CONF is None:\n GLOBAL_CONF = TypedConfigParser(interpolation=ExtendedInterpolation())\n GLOBAL_CONF.read(CONF_FILES)\n return GLOBAL_CONF", "title": "" }, { "docid": "d575764cf7093d69fd366c8027095ad0", "score": "0.4875697", "text": "def get_setting(name, default):\n parent_name = \"CMSPLUGIN_NEWS_{0}\".format(name)\n return getattr(django_settings, parent_name, default)", "title": "" }, { "docid": "3d06d6bacd582d738df746c044cda660", "score": "0.48740506", "text": "def cfg(self):\r\n if not self._cfg:\r\n self._cfg = self.goptions_dict.get('CONFIG')\r\n return self._cfg", "title": "" }, { "docid": "8da6deb6458c72bca05298c1d177a1e2", "score": "0.4871038", "text": "def keyword_preference_window_get_password(self):\n ui_password = self.Preferences_window_get_Password()\n ui_confirm_password = self.Preferences_window_get_confirm_password()\n return {\"password\" : ui_password , \"confirm_password\" : ui_confirm_password}", "title": "" }, { "docid": "188954c2a5fcef31420a7b73e6c91f54", "score": "0.48665392", "text": "def pref_get(opt):\n # Load prefs data.\n if os.path.isfile(CONFIGFILE):\n try:\n with open(CONFIGFILE, 'r') as fread:\n allprefs = fread.readlines()\n except (IOError, OSError) as ex:\n print_error('Unable to open config file: {}'.format(CONFIGFILE),\n exc=ex,\n boldtext=CONFIGFILE)\n return False\n else:\n # No prefs file.\n allprefs = []\n existingopt = None\n for line in allprefs:\n if line.startswith(opt):\n # Found the line, retrieve its whole content.\n existingopt = line\n break\n\n # No option found.\n if not existingopt:\n return None\n\n # Parse option.\n if '=' in existingopt:\n val = existingopt.strip('\\n').split('=')[1].strip()\n return val\n else:\n # Bad config\n return None", "title": "" }, { "docid": "af4b8de4a562193aca265267c6eeb322", "score": "0.48660952", "text": "def _getDefaultDataStore(self):\r\n \r\n return unicode(self.defaultDataStoreComboBox.currentText())", "title": "" }, { "docid": "d95a8698af115bdb6b4f28ce27c91010", "score": "0.48660266", "text": "def selected_setting(self, index):\r\n\r\n return self.settings[index]", "title": "" }, { "docid": "be54f330a30e196f76b14142e39b8939", "score": "0.48649812", "text": "def get_config():\n conf = {}\n conf['token'] = os.environ.get(\"GREGBOT_TOKEN\")\n conf['owner_id'] = os.environ.get(\"GREGBOT_OWNER_ID\")\n conf['cmd_prefixes'] = os.environ.get(\"GREGBOT_CMD_PREFIXES\").split(',')\n conf['loaded_extensions'] = os.environ.get(\"GREGBOT_LOADED_EXTENSIONS\").split(',')\n\n return conf", "title": "" }, { "docid": "ed8291051d06159ddf719f39a4821aed", "score": "0.4864345", "text": "def get_settings_config():\n name_config = 'settings.ini'\n path_config = pkg_resources.resource_filename('facelib._utils', name_config)\n config = ConfigParser(interpolation=ExtendedInterpolation())\n config.read(path_config)\n assert 1 >= float(config['Predict']['tolerance']) > 0, \"Tolerance should be: (0, 1]\"\n return config", "title": "" }, { "docid": "1bc20106b5a4f5c27bea957c684460e2", "score": "0.48440015", "text": "def keyword_get_quick_setting_parameter_collection_from_UI(self):\n quick_settings_ui = self.quick_settings_window_get_parameter_details()\n return {\"quick_settings_ui\":quick_settings_ui}", "title": "" }, { "docid": "df41c1498a81ed1284a86f5dfc58447f", "score": "0.48407328", "text": "def get_toolbox_settings_file() -> str:\n return join(get_toolbox_dir(), TOOLBOX_SETTINGS)", "title": "" }, { "docid": "5c5e4e6555f1a885439305e557f599e7", "score": "0.4835503", "text": "def get_global(file_name, global_name):\n import os\n globals = {}\n exec(open(os.path.join(os.path.dirname(__file__), \"mellowchord\", file_name)).read(), globals)\n return globals[global_name]", "title": "" }, { "docid": "0ae7e6248a034ffcd8a9aa432bea19ed", "score": "0.48341903", "text": "def get_auto_options_define(self):\n return self.auto_options_define", "title": "" }, { "docid": "53d3ffd1dc93c28cecb2f70b51d0b3b6", "score": "0.48304954", "text": "def get_global_opts(self):\r\n gparser = self.create_global_parser(no_usage=True, add_help=False)\r\n try:\r\n sys.stdout = open(os.devnull, 'w')\r\n sys.stderr = open(os.devnull, 'w')\r\n gopts, _ = gparser.parse_args()\r\n return gopts\r\n except SystemExit:\r\n pass\r\n finally:\r\n sys.stdout = sys.__stdout__\r\n sys.stderr = sys.__stderr__", "title": "" }, { "docid": "87c145b1360cb20c5554780cf9843a92", "score": "0.48233846", "text": "def _GetMSBuildToolSettings(msbuild_settings, tool):\n return msbuild_settings.setdefault(tool.msbuild_name, {})", "title": "" }, { "docid": "a3e15038040e854c3ee56e7a7aef179a", "score": "0.48216477", "text": "def settings(self):\n with ConnectionContext(self.identifier, get=True) as ctxt:\n self._execute(ctxt, 'settings_get')\n\n try:\n return json.loads(ctxt.results[0][0])\n except IndexError:\n return dict()", "title": "" }, { "docid": "5a937fecdc2804b19b80b8c126f59d5b", "score": "0.48203805", "text": "def get_settings(self):\r\n return [[name.capitalize(), description] for name, description in persist.LINT_MODES]", "title": "" }, { "docid": "4fc00b92abbbdef5b1d86d5aacb404ac", "score": "0.4815188", "text": "def localGetCurrentSetting(self):\n return {}", "title": "" }, { "docid": "6db145830eb1ef18f730177377d092c2", "score": "0.48151118", "text": "def userPreferences():\n backgroundTheme = input(\"Inser favorite background theme: \")\n url = 'https://www.pexels.com/search/' + str(backgroundTheme)\n #todo: maybe write preferences in a file\n return url", "title": "" }, { "docid": "f341d0b32c0f45874f215937ed929128", "score": "0.4807595", "text": "def get_current_settings(request):\r\n return {'settings': adagios.settings}", "title": "" }, { "docid": "243c377d7775bd73899edab913d64661", "score": "0.48055393", "text": "def get_settings(cls, store):\n if store == \"TESCO\": \n return TescoSearchSettings()\n elif store == \"WAITROSE\": \n return WaitroseSearchSettings()\n elif store == \"SAINSBURY\": \n return SainsburySearchSettings()\n else:\n # Fail safe by returning empty settings\n return SearchSettings()", "title": "" }, { "docid": "1500be403f23a37613eb0e0395b9ca26", "score": "0.48000926", "text": "def get_defaults(self):\n return dict(((p,d) for p,t,d in self.settings\n if d is not None and '*' not in p))", "title": "" }, { "docid": "a819c6e975f5d0c52328232691f94d95", "score": "0.47976685", "text": "def _get_chall_pref(self, domain):\n # Make sure to make a copy...\n chall_prefs = []\n chall_prefs.extend(self.auth.get_chall_pref(domain))\n return chall_prefs", "title": "" } ]
17d845a7945a76ac53fb4f92dd7a46a9
Set ajax class attribute on self.form.
[ { "docid": "f61ae54a88b35a3fc5718e4c77ffd992", "score": "0.77012664", "text": "def prepare_ajax(self):\n if not self.ajax:\n return\n if self.form.attrs.get('class_add') \\\n and self.form.attrs['class_add'].find('ajax') == -1:\n self.form.attrs['class_add'] += ' ajax'\n else:\n self.form.attrs['class_add'] = 'ajax'", "title": "" } ]
[ { "docid": "eb4dc8607e9397d0bc9155df9e3371eb", "score": "0.80698156", "text": "def prepare_ajax(self):\n if not self.ajax:\n return\n if self.form.attrs.get('class') \\\n and self.form.attrs['class'].find('ajax') == -1:\n self.form.attrs['class'] += ' ajax'\n else:\n self.form.attrs['class'] = 'ajax'", "title": "" }, { "docid": "015ed2128a48beba88fb05d211125b86", "score": "0.6531199", "text": "def add_form_class(self):\n return self.form_class", "title": "" }, { "docid": "4a9a19565b820fe2dd6854f6891eb1d5", "score": "0.61416787", "text": "def get_form_class(self):\n form_class = getattr(self, \"form_class\", False)\n if not form_class:\n form_class = self.kwargs[\"form_class\"]\n return form_class", "title": "" }, { "docid": "d54aa4bd58cb984a9fb3c59b60a00d04", "score": "0.6101503", "text": "def formfield(self, **kwargs):\n return super().formfield(**{'form_class': self.form_class, **kwargs})", "title": "" }, { "docid": "cc7faf1315197828b871742757b892cc", "score": "0.59509814", "text": "def __init__(self, *args, **kwargs):\n super(BaseFormMixin, self).__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_class = 'form'\n self.helper.form_tag = False", "title": "" }, { "docid": "abfa4bb69d0ddf10da4eabc03b6dc2ba", "score": "0.59411514", "text": "def get_form_class(self):\n form = super().get_form_class()\n\n # Name and URL need input class 'input' for Bulma.io\n form.base_fields['name'].widget = forms.TextInput(\n attrs={\n 'class': 'input is-primary',\n 'placeholder': 'Transcription Name',\n },\n )\n form.base_fields['url'].widget = forms.TextInput(\n attrs={\n 'class': 'input is-primary',\n 'placeholder': 'URL',\n },\n )\n\n # Filter projects to only show those owned by user\n form.base_fields['project'] = forms.ModelChoiceField(\n queryset=Project.objects.filter(\n owner=self.request.user\n ),\n empty_label=\"Select a Project\",\n )\n form.base_fields['transcription_item_publish_date'].widget=forms.SelectDateWidget(\n years = list(\n range(2000, datetime.datetime.now().year + 1))[::-1],\n empty_label=(\n 'Select Year', 'Select Month', 'Select Day'),\n )\n return form", "title": "" }, { "docid": "f73a3f37be25d34cab62f8b6b18d10bc", "score": "0.58868945", "text": "def get_form_class(self):\n return self.form_class", "title": "" }, { "docid": "f73a3f37be25d34cab62f8b6b18d10bc", "score": "0.58868945", "text": "def get_form_class(self):\n return self.form_class", "title": "" }, { "docid": "f73a3f37be25d34cab62f8b6b18d10bc", "score": "0.58868945", "text": "def get_form_class(self):\n return self.form_class", "title": "" }, { "docid": "73ea73829aad4d21ec851f49da4841a9", "score": "0.58348054", "text": "def formfield(self, **kwargs):\n widget = JSONEditorWidget(\n self.schema_field.editor_schema,\n collapsed=False\n )\n defaults = {'form_class': JSONFormField, 'widget': widget}\n defaults.update(kwargs)\n defaults['form_class'] = to_schema_field(defaults['form_class'])\n return super().formfield(**defaults)", "title": "" }, { "docid": "33ab849e0160ac23b3e5300cebbaf540", "score": "0.57782656", "text": "def get_form_class(self):\r\n if self.form_class:\r\n return self.form_class\r\n else:\r\n return self.get_edit_handler_class().get_form_class(self.model)", "title": "" }, { "docid": "eb9d21f1abea6a0d5b3f6e7f64ad650c", "score": "0.5763158", "text": "def default_form_init_handler(self, data, form_dict):\n form_class = form_dict['form_class']\n prefix = form_dict.get('prefix', None)\n #form = form_class(data=data, prefix=prefix)\n form = form_class(data=data, prefix=prefix)\n form_dict['form'] = form", "title": "" }, { "docid": "2b4252a407dd7d04cce79ee50073ece6", "score": "0.566212", "text": "def __init__(self, form_class):\n self.form_class = form_class\n self.model_class = self.form_class.Meta.model\n self.meta = self.form_class.Meta", "title": "" }, { "docid": "f7e393d6252179e4c5877c664c95ed03", "score": "0.56561315", "text": "def _get_form_class(self):\n if self.form_class is None:\n raise ImproperlyConfigured('from_class attribute must be set')\n return self.form_class", "title": "" }, { "docid": "4658daa530dda896252f2e241c422910", "score": "0.5631689", "text": "def configure_field_classes(field):\n if isinstance(field.field.widget, forms.CheckboxInput) or \\\n isinstance(field.field.widget, forms.RadioSelect):\n return\n if \"class\" in field.field.widget.attrs:\n field.field.widget.attrs[\"class\"] += \" form-control\"\n else:\n field.field.widget.attrs[\"class\"] = \"form-control\"", "title": "" }, { "docid": "e7f7437ae7d6a962755bd88ab129b963", "score": "0.55541843", "text": "def get_formset_form_class(self):\n if self.form_class or self.change_fields:\n params = {'formfield_callback': self.formfield_for_dbfield}\n if self.form_class:\n fc = self.customize_form_widgets(self.form_class)\n params['form'] = fc\n if self.change_fields:\n params['fields'] = self.change_fields\n\n return model_forms.modelform_factory(self.model, **params)", "title": "" }, { "docid": "33c2bc81af046e93750a595821b2ab19", "score": "0.5543394", "text": "def ajax(self):\n return None", "title": "" }, { "docid": "1968cba864622014babd81c059abee00", "score": "0.55237365", "text": "def get_form(self, form_class):\n form = super(FieldCSSMixin, self).get_form(form_class)\n if form.errors:\n for field in form:\n if field.errors:\n field.field.widget.attrs.update({\"class\": \"uk-form-danger\"})\n else:\n field.field.widget.attrs.update({\"class\": \"uk-form-success\"})\n\n return form", "title": "" }, { "docid": "a00556f9595fe2b77485d191b02527ae", "score": "0.5498356", "text": "def __init__(self, *args, **kwargs):\n super(BaseHorizontalFormMixin, self).__init__(*args, **kwargs)\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-sm-2'\n self.helper.field_class = 'col-sm-10'\n self.helper.form_tag = False", "title": "" }, { "docid": "2cde7934c2541fbde3858f2848af9a68", "score": "0.5459633", "text": "def __init__(self, *args, **kwargs):\n css_class = kwargs.get('css_class')\n super().__init__(*args, **kwargs)\n if css_class is not None:\n self.field_classes = 'btn {}'.format(css_class)", "title": "" }, { "docid": "0967e50332dd1e89c4261f914ddd5ce7", "score": "0.54554623", "text": "def get_django_form_class(self, role=None, field_factory=None):\n from formidable.forms import get_dynamic_form_class\n return get_dynamic_form_class(self, role, field_factory)", "title": "" }, { "docid": "48939aa2e0a0e2769e78a202e1f4c4e3", "score": "0.5436427", "text": "def get_form_class(self, form_name):\n return self.form_classes.get(form_name, self.form_class)", "title": "" }, { "docid": "8ab4e56bf4b10323688b8c5b24c91edf", "score": "0.5398185", "text": "def set_widget(self):\n if \"html_class\" in self.kwargs:\n self.html_class = self.kwargs.pop(\"html_class\")\n attrs = self.get_widget_attrs()\n if isclass(self.widget):\n self.widget = self.widget(attrs)\n else:\n self.widget = self.widget.__class__(attrs)", "title": "" }, { "docid": "f62f8862eda5c220578836a13fc776c2", "score": "0.5352909", "text": "def get_formset_class(self, **kwargs):\n form_class = self.get_formset_form_class()\n if form_class:\n kwargs['formfield_callback'] = self.formfield_for_dbfield\n return model_forms.modelformset_factory(self.model,\n form_class, fields=self.change_fields, extra=0,\n **kwargs)", "title": "" }, { "docid": "fa014d595adb7f2b5573edd218803f5e", "score": "0.5343256", "text": "def _class(self, _class):\n\n\n self.__class = _class", "title": "" }, { "docid": "0e82850c0c11d4c9c214bd669a9aa26b", "score": "0.5340369", "text": "def _class(self, _class: List[str]):\n\n self.__class = _class", "title": "" }, { "docid": "db11c2287d715482d729a25bb049fc21", "score": "0.52979267", "text": "def get_form_class(request, form):\n form = get_form(request, form)\n if form:\n return form.form_class if isinstance(form, Layout) else form", "title": "" }, { "docid": "ab960009bc4b841b1229e38172c03ed6", "score": "0.52948004", "text": "def _class(self, _class):\n\n self.__class = _class", "title": "" }, { "docid": "ab960009bc4b841b1229e38172c03ed6", "score": "0.52948004", "text": "def _class(self, _class):\n\n self.__class = _class", "title": "" }, { "docid": "4c1e9312d8135c7c7d268e21c252e616", "score": "0.52937025", "text": "def __call__(self):\n use_ajax = getattr(self, 'use_ajax', False)\n ajax_options = getattr(self, 'ajax_options', '{}')\n self.schema = self.schema.bind(**self.get_bind_data())\n form = self.form_class(self.schema, buttons=self.buttons,\n use_ajax=use_ajax, ajax_options=ajax_options,\n **dict(self.form_options))\n self.before(form)\n reqts = form.get_widget_resources()\n result = None\n\n for button in form.buttons:\n if button.name in self.request.POST:\n success_method = getattr(self, '%s_success' % button.name)\n try:\n controls = self.request.POST.items()\n validated = form.validate(controls)\n result = success_method(validated)\n except deform.exception.ValidationFailure as e:\n fail = getattr(self, '%s_failure' % button.name, None)\n if fail is None:\n fail = self.failure\n result = fail(e)\n break\n\n if result is None:\n result = self.show(form)\n\n if isinstance(result, dict):\n result['js_links'] = reqts['js']\n result['css_links'] = reqts['css']\n\n return result", "title": "" }, { "docid": "569d27a6513f73311e206b3378d7e2f4", "score": "0.5260821", "text": "def get_metadata_form_class(self, request, obj):\n return self.metadata_form_class", "title": "" }, { "docid": "c7794c5f4a496f52b908df90fb1c189e", "score": "0.5254055", "text": "def _add_class(widget_def):\n widget_def['class'] = ALL_WIDGETS[widget_def['class']]\n\n # FIXME Add something here for widgets that can contain widgets,\n # iterate over content and call _add_class", "title": "" }, { "docid": "08f5cf35e47d48e9b1867052e00969cb", "score": "0.52323294", "text": "def __call__(self, field, **kwargs):\n if kwargs.get('class'):\n kwargs['class'] += \" ckeditor\"\n else:\n kwargs.setdefault('class', 'ckeditor')\n return super(CKEditorWidget, self).__call__(field, **kwargs)", "title": "" }, { "docid": "fd7748fdd3bca175dcdb9a5e4d5a2c90", "score": "0.5229025", "text": "def get_form_class(self):\n return get_consent_form_class(self.study.consent_form)", "title": "" }, { "docid": "b6faa75f1b7803f99b94e44872bc8f72", "score": "0.52217305", "text": "def get_formset_class(self):\n return self.formset_class", "title": "" }, { "docid": "b4587cf4278e1757bda822eb5727a8f5", "score": "0.5215743", "text": "def add_class(field, class_attr):\n return field.as_widget(attrs={'class': class_attr})", "title": "" }, { "docid": "44ca4383f85613d15a6bf87466a37ef1", "score": "0.5208577", "text": "def form_class(self):\n class InputForm(Form):\n extra = wtforms.fields.HiddenField()\n\n # get the fields\n for name, obj in self.__class__.__dict__.iteritems():\n if isinstance(obj, UnboundField):\n setattr(InputForm, name, obj)\n\n return InputForm", "title": "" }, { "docid": "52ff09b7dd163f78c97cf2fcacd345fc", "score": "0.52025616", "text": "def get_form_class(self, request):\n from refs.accounts.forms import UserRegistrationForm\n\n return UserRegistrationForm", "title": "" }, { "docid": "a01c1242e00e4e01fbbfc91916836cab", "score": "0.51651424", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[\"details\"].widget.attrs.update(\n {\"class\": \"materialize-textarea\"})\n self.fields[\"limit_date\"].widget.attrs.update(\n {\"class\": \"datepicker\"})", "title": "" }, { "docid": "e93797d22f428bd8cc2c5ef426f72016", "score": "0.5145421", "text": "def get_form_class(self):\n attrs = dict(\n (field.name, field.get_form_field()) for field in self.fields.all()\n )\n return type(\"GeneratedBeGoodForm\", (forms.Form,), attrs)", "title": "" }, { "docid": "ec0bb266aa33e2bd2eb2494e08af844a", "score": "0.51286066", "text": "def __getitem__(self, name):\n bound_field = super(BootstrapFormWrapper, self).__getitem__(name)\n if isinstance(bound_field.field.widget, CheckboxInput):\n bound_field.is_checkbox = True\n if isinstance(bound_field.field.widget, DateInput):\n bound_field.is_date = True\n if isinstance(bound_field.field.widget, CheckboxSelectMultiple):\n bound_field.is_multi_checkbox = True\n\n classes = bound_field.field.widget.attrs.get(\"class\", \"\")\n if isinstance(bound_field.field.widget, (widgets.TextInput, widgets.Textarea)):\n bound_field.field.widget.attrs['class'] = classes + \" form-control\"\n return bound_field", "title": "" }, { "docid": "e2f309a6a30c61cd3735e13e3629725d", "score": "0.5063563", "text": "def get_form_class(self,**kwargs):\n if 'fields' in kwargs:\n fields = kwargs.pop('fields')\n else:\n fields = self.get_fields()\n\n exclude = []\n # TODO: Need to remove readonly fields from form, but include them when returning by get_ng_model\n # if self.request.method == 'POST':\n # exclude = self.get_readonly_fields();\n\n if self.form_class:\n if fields or len(exclude) > 0:\n # TODO make sure it is saving all form_class definitions!!!!!\n if fields == '__all__':\n fields = ModelFormOptions(self.form_class).fields\n form_class = _model_forms.modelform_factory(self.model, form=self.form_class, fields=fields, exclude=exclude)\n else:\n form_class = self.form_class\n else:\n model = None\n if self.model is not None:\n # If a model has been explicitly provided, use it\n model = self.model\n elif hasattr(self, 'object') and self.object is not None:\n # If this view is operating on a single object, use\n # the class of that object\n model = self.object.__class__\n else:\n # Try to get a queryset and extract the model class\n # from that\n model = self.get_queryset().model\n\n if fields is None and model is None:\n raise ImproperlyConfigured(\n \"Using ModelFormMixin (base class of %s) without \"\n \"the 'fields' attribute is prohibited.\" % self.__class__.__name__\n )\n\n form_class = _model_forms.modelform_factory(model, form=ModelForm, fields=(fields if fields else self.fields),\n exclude=exclude)\n for field in self.get_readonly_fields():\n if form_class.base_fields.get(field,False):\n form_class.base_fields[field].required = False\n form_class.base_fields[field].is_disabled = True\n\n return form_class", "title": "" }, { "docid": "a5df591063594d00623bf3d2056ea852", "score": "0.5058508", "text": "def _class(self, _class):\n allowed_values = [\"TimeOff\", \"Sick\", \"Flex\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and _class not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `_class` ({0}), must be one of {1}\" # noqa: E501\n .format(_class, allowed_values)\n )\n\n self.__class = _class", "title": "" }, { "docid": "f16c7ee94322ac04292f779f04829764", "score": "0.50167644", "text": "def get_form_options(self):\n return json.dumps(self.ajax_form_options) if self.ajax_form_options else None", "title": "" }, { "docid": "6ae2a1794ef65a0608ba0f74e5e43e4b", "score": "0.50137746", "text": "def configuration_form_class(self):\n\n from forms import WidgetForm # avoid recursion\n return modelform_factory(self.__class__, form=WidgetForm)", "title": "" }, { "docid": "16a76cbfea156f0ed5e19ed6bb9d6dc0", "score": "0.5009553", "text": "def update_classes(self, cls):\n pass", "title": "" }, { "docid": "4b391933ae7de68ac889e4fb5052eb69", "score": "0.4993083", "text": "def add_css_class(self, attr):\n if len(self.cls) != 0:\n attr[\"cls\"] = \" \".join(self.cls)", "title": "" }, { "docid": "2704df55061d768a19f7a89421671cc4", "score": "0.4988455", "text": "def get_form_class(self):\n income_forms = {\n 'exact': forms.ExactIncomeForm,\n 'hourly': forms.HourlyIncomeForm,\n 'estimate': forms.EstimateIncomeForm\n }\n return income_forms[self.request.session['income_method']]", "title": "" }, { "docid": "aa18b4d88950487e9a012e404e117608", "score": "0.49850345", "text": "def _form_handler(\n request, form_cls, require_login=False, block_get=False, ajax=False,\n next=None, template=None, login_url=None, pass_request=True,\n validate_only=False, **kwargs\n):\n RESULT_KEY = getattr(settings, \"RESULT_KEY\", \"response\")\n request.REQUEST = request.GET.copy()\n request.REQUEST.update(request.POST)\n if \"next\" in request.REQUEST:\n next = request.REQUEST[\"next\"]\n is_ajax = request.is_ajax() or ajax or request.REQUEST.get(\"json\") == \"true\"\n if isinstance(form_cls, basestring):\n # can take form_cls of the form: \"project.app.forms.FormName\"\n mod_name, form_name = get_mod_func(form_cls)\n form_cls = getattr(__import__(mod_name, {}, {}, ['']), form_name)\n validate_only = (\n validate_only or request.REQUEST.get(\"validate_only\") == \"true\"\n )\n if login_url is None:\n login_url = getattr(settings, \"LOGIN_URL\", \"/login/\")\n if callable(require_login):\n require_login = require_login(request)\n elif require_login:\n require_login = not request.user.is_authenticated()\n if require_login:\n redirect_url = \"%s?next=%s\" % (\n login_url, urlquote(request.get_full_path())\n ) # FIXME\n if is_ajax:\n return JSONResponse({'success': False, 'redirect': redirect_url})\n return HttpResponseRedirect(redirect_url)\n if block_get and request.method != \"POST\":\n raise Http404(\"only post allowed\")\n if next:\n assert template, \"template required when next provided\"\n\n def get_form(with_data=False):\n _form = form_cls(request) if pass_request else form_cls()\n _form.next = next\n if with_data:\n _form.data = request.REQUEST\n _form.files = request.FILES\n _form.is_bound = True\n if hasattr(_form, \"init\"):\n res = _form.init(**kwargs)\n if res:\n raise ResponseReady(res)\n return _form\n\n if is_ajax and request.method == \"GET\":\n return JSONResponse(get_form_representation(get_form()))\n if template and request.method == \"GET\":\n return render(request, template, {\"form\": get_form()})\n form = get_form(with_data=True)\n if form.is_valid():\n if validate_only:\n return JSONResponse({\"valid\": True, \"errors\": {}})\n r = form.save()\n if is_ajax:\n return JSONResponse(\n {\n 'success': True,\n RESULT_KEY: (\n form.get_json(r) if hasattr(form, \"get_json\") else r\n )\n }\n )\n if isinstance(r, HttpResponse):\n return r\n if next:\n return HttpResponseRedirect(next)\n if template:\n return HttpResponseRedirect(r)\n return JSONResponse(\n {\n 'success': True,\n RESULT_KEY: (\n form.get_json(r) if hasattr(form, \"get_json\") else r\n )\n }\n )\n if validate_only:\n if \"field\" in request.REQUEST:\n errors = form.errors.get(request.REQUEST[\"field\"], \"\")\n if errors:\n errors = \"\".join(errors)\n else:\n errors = form.errors\n return JSONResponse({\"errors\": errors, \"valid\": not errors})\n if is_ajax:\n return JSONResponse({'success': False, 'errors': form.errors})\n if template:\n return render(request, template, {\"form\": form})\n return JSONResponse({'success': False, 'errors': form.errors})", "title": "" }, { "docid": "088c0ec21cea3981516fa2ae5ff10565", "score": "0.49793217", "text": "def get_form_class(self, request, obj=None, **kwargs):\n\n return super(AdminWithReadOnly, self).get_form(request, obj,\n **kwargs)", "title": "" }, { "docid": "b0d8b524364e7c2bfd5ac9e28a394bc3", "score": "0.4978189", "text": "def get_form(self, form_class):\n return form_class(self.request, **self.get_form_kwargs())", "title": "" }, { "docid": "d05bd7bb724ea18b5d0ff8a20fcbefb3", "score": "0.4969821", "text": "def __init__(self, *args, **kwargs):\n trait_pk = kwargs.pop('trait_pk') # trait_pk added in the view.\n self.trait = get_object_or_404(SourceTrait, pk=trait_pk)\n super(TagSpecificTraitForm, self).__init__(*args, **kwargs)\n # Form formatting and add a submit button.\n self.helper = FormHelper(self)\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-sm-2'\n self.helper.field_class = 'col-sm-6'\n self.helper.form_method = 'post'\n button_save = generate_button_html('submit', 'Save', btn_type='submit', css_class='btn-primary')\n self.helper.layout.append(button_save)", "title": "" }, { "docid": "611db9a04a21c56c1f61ed85b33a7962", "score": "0.49474993", "text": "def __init__(self, request=None, *args, **kwargs):\n super(ProductClassSelectForm, self).__init__(*args, **kwargs)\n\n if request is not None:\n qs = self.fields['product_class'].queryset = ProductClass.objects.filter(\n shop_id=request.shop_id)\n\n if not kwargs.get('initial') and len(qs) == 1:\n self.fields['product_class'].initial = qs[0]\n else:\n qs = self.fields['product_class'].queryset = ProductClass.objects.all()", "title": "" }, { "docid": "c5ce17ef99d6cec84aaf882e9513702b", "score": "0.49463156", "text": "def get_form_classes(self,):\r\n settings_forms = []\r\n if permissions.can_change_permissions(self.article, self.request.user):\r\n settings_forms.append(self.permission_form_class)\r\n plugin_forms = [F for F in plugin_registry.get_settings_forms()]\r\n plugin_forms.sort(key=lambda form: form.settings_order)\r\n settings_forms += plugin_forms\r\n for i in range(len(settings_forms)):\r\n # TODO: Do not set an attribute on a form class - this\r\n # could be mixed up with a different instance\r\n # Use strategy from Edit view...\r\n setattr(settings_forms[i], 'action', 'form%d' % i)\r\n\r\n return settings_forms", "title": "" }, { "docid": "fae8866f7599c9c3f15df3f2c2f9ebdd", "score": "0.49424788", "text": "def get_factory_kwargs(self):\n kwargs = super(ModelFormSetMixin, self).get_factory_kwargs()\n kwargs.update({\n 'exclude': self.exclude,\n 'fields': self.fields,\n 'formfield_callback': self.formfield_callback,\n 'widgets': self.widgets,\n })\n if self.get_form_class():\n kwargs['form'] = self.get_form_class()\n if self.get_formset_class():\n kwargs['formset'] = self.get_formset_class()\n return kwargs", "title": "" }, { "docid": "968d68377391e230b96a55466d2a5c3a", "score": "0.49273622", "text": "def formfield(self, **kwargs):\n\n defaults = {\n 'form_class': LocalizedFieldForm\n }\n\n defaults.update(kwargs)\n return super().formfield(**defaults)", "title": "" }, { "docid": "968d68377391e230b96a55466d2a5c3a", "score": "0.49273622", "text": "def formfield(self, **kwargs):\n\n defaults = {\n 'form_class': LocalizedFieldForm\n }\n\n defaults.update(kwargs)\n return super().formfield(**defaults)", "title": "" }, { "docid": "1176fa3d7d35ec706c7e37106c96cb7f", "score": "0.49251089", "text": "def form_initializer(self):", "title": "" }, { "docid": "e4466908684e3fa411f36a10c83c7afe", "score": "0.49248618", "text": "def get_ajax_handler(self):\n return absolute_url(self.context, self.request, self.ajax_form_handler)", "title": "" }, { "docid": "e17840a2966a9cee46f71dc3cf2de11b", "score": "0.4914237", "text": "def _class(self, _class: str):\n allowed_values = [\"FARE\", \"ANCILLARY\"] # noqa: E501\n if _class not in allowed_values:\n raise ValueError(\n \"Invalid value for `_class` ({0}), must be one of {1}\"\n .format(_class, allowed_values)\n )\n\n self.__class = _class", "title": "" }, { "docid": "2d12763a8b2d5a634e066a32b51cf01c", "score": "0.49092233", "text": "def get_form_class(self):\n if self.fields is not None and self.form_class:\n raise ImproperlyConfigured(\"Specifying both 'fields' and 'form_class' is not permitted.\")\n\n if self.form_class:\n return self.form_class\n\n model = self.get_model()\n return modelform_factory(model, fields=self.fields, session=self.get_session())", "title": "" }, { "docid": "3b9b43f71f50061897acf7d999f5b4ed", "score": "0.48945865", "text": "def addClass(klass):", "title": "" }, { "docid": "d3926cdbcd3af09b09e901cbf4fe4ce8", "score": "0.48656932", "text": "def jsUpdate(self, data=None):\r\n if data is None:\r\n # In this case we assume that we are in a javascript method and the javascript will produce the relevant data\r\n return self.aresObj.jsOnLoadFnc.add('%s.addClass(data[0] + ' ' + data[1])' % self.jqId)\r\n\r\n content = data if isinstance(data, (tuple, list)) else (data, 1)\r\n # Python know before the completion of the report that the call will be required\r\n return self.aresObj.jsOnLoadFnc.add(\"%s.attr('class', ''); %s.addClass('%s fa-%sx')\" % (self.jqId, self.jqId, content[0], content[1]))", "title": "" }, { "docid": "b25cbae621bbfa823768dee2b4e8401c", "score": "0.48656493", "text": "def __init__(self, *args, **kwargs):\r\n super(ProductClassSelectForm, self).__init__(*args, **kwargs)\r\n qs = self.fields['product_class'].queryset\r\n if not kwargs.get('initial') and len(qs) == 1:\r\n self.fields['product_class'].initial = qs[0]", "title": "" }, { "docid": "8c4286dda00ed358386c09dda9463b9d", "score": "0.48647124", "text": "def pre_render(self, form, request):\n pass", "title": "" }, { "docid": "597047d081e7c04272e2e882cd4e5cdb", "score": "0.48572993", "text": "def clazz(self, clazz):\n self.__clazz = clazz", "title": "" }, { "docid": "87df4cf8b0373f04f3e905e14b78936e", "score": "0.48533514", "text": "def get_form_class(self, request):\n return EmailRegistrationForm", "title": "" }, { "docid": "23783a0bbf764b3fb525f23cdccaf44f", "score": "0.48396057", "text": "def ajax_request(self):\n return self.request.params.get('ajax') and self.ajax", "title": "" }, { "docid": "23783a0bbf764b3fb525f23cdccaf44f", "score": "0.48396057", "text": "def ajax_request(self):\n return self.request.params.get('ajax') and self.ajax", "title": "" }, { "docid": "0cf8a76ee992fdf1d11825aee0625c4d", "score": "0.48319957", "text": "def get_html_class_name(self):\n return self.get_input_name().replace('.', '_')", "title": "" }, { "docid": "c40fbdc186c3b9a5ac48ad0f2e6c4de1", "score": "0.4829611", "text": "def _set_class_wrapper(self):\n self._class_wrapper = _class_wrapper_template % self", "title": "" }, { "docid": "d0be1f3a79b8c441c968f2826b611f36", "score": "0.48227006", "text": "def __init__(self, *args, **kwargs):\n super(TransactionForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.add_input(Submit('submit', 'Submit', css_class='btn-primary'))", "title": "" }, { "docid": "accfe3af306b8e45c8f9cbc0a46ac3c5", "score": "0.48046377", "text": "def is_ajax(self):\n return self.is_xhr", "title": "" }, { "docid": "abfd0d0011789b77849401b82d65ae32", "score": "0.4802242", "text": "def get_forms_classes(self):\n return self.forms_classes", "title": "" }, { "docid": "fd4c3f5483809abb3fa05eed6887f692", "score": "0.47934854", "text": "def __init__(self, formdata=None, obj=None, prefix='', handler = None, *args, **kwargs):\n\n self.handler = handler\n super(I18nForm, self).__init__(formdata=formdata, obj=obj, prefix=prefix, **kwargs)", "title": "" }, { "docid": "1114a428b055b9f05e54232a8709a26f", "score": "0.47897884", "text": "def generate_form_class(text_field_size):\n class DonationForm(forms.Form):\n\n def __init__(self, *args, **kwargs):\n super(DonationForm, self).__init__(*args, **kwargs)\n self.fields['state']._fill_choices(states_in_the_US_and_other())\n self.fields['action_center']._fill_choices(vibha_action_centers())\n\n # Contact information\n email = vibhaforms.EmailField(help_text='Official receipts will be emailed to this address', widget=forms.TextInput(attrs={'size': text_field_size}))\n first_name = vibhaforms.CharField(max_length=100, widget=forms.TextInput(attrs={'size': text_field_size}))\n last_name = vibhaforms.CharField(max_length=100, widget=forms.TextInput(attrs={'size': text_field_size}))\n address_1 = vibhaforms.CharField(label='Address', max_length=100, widget=forms.TextInput(attrs={'size': text_field_size}))\n address_2 = vibhaforms.CharField(label='Address (line 2)', required=False, max_length=100, widget=forms.TextInput(attrs={'size': text_field_size}))\n city = vibhaforms.CharField(max_length=100, widget=forms.TextInput(attrs={'size': text_field_size}))\n state = vibhaforms.ForeignKeyChoiceField(help_text='Please use \"Other\" listed at the end for a non-US address')\n zipcode = vibhaforms.CharField(max_length=100, widget=forms.TextInput(attrs={'size': text_field_size}))\n country = vibhaforms.CharField(max_length=100, initial='United States', widget=forms.TextInput(attrs={'size': text_field_size}))\n action_center = vibhaforms.ForeignKeyChoiceField(required=False, help_text='Vibha action center geographically closest to your area')\n phone = USPhoneNumberField(help_text='Eg: 111-111-1111')\n\n # Amount\n AMOUNT_CHOICES = [\n ('20.00', \"$20\"),\n ('50.00', \"$50\"),\n ('120.00', \"$120\"),\n ('420.00', \"$420\"),\n ('900.00', \"$900\"),\n ('0', \"Other\"),\n ]\n amount_choice = forms.ChoiceField(choices=AMOUNT_CHOICES, initial='0', widget=vibhaforms.RadioSelect(show_br=False))\n amount = forms.DecimalField(max_digits=9, decimal_places=2, min_value=decimal.Decimal(\"1.00\"), help_text='Amount in dollars and cents (E.g. 50.00)')\n\n # credit card information\n cc_name = vibhaforms.CharField(label='Name', max_length=100, help_text='Full name as it appears on the credit card', widget=forms.TextInput(attrs={'size': text_field_size}))\n credit_card = vibhaforms.CreditCardField(label='Card Number',\n widget=forms.TextInput(attrs={'autocomplete': 'off'}),\n help_text=u\"\"\"The address on the credit card must match the\n address above. We accept Visa, Mastercard, American Express,\n Discover.\"\"\")\n expr_date = vibhaforms.MonthYearField(label='Expiry date')\n cvv = vibhaforms.CharField(max_length=4, label='Security Code',\n widget=forms.TextInput(attrs={'autocomplete': 'off', 'size': 5}),\n help_text=u\"\"\"Three-digit credit card security code on the back\n for your Visa/Mastercard/Discover card or the four-digit code\n on the front of your American Express card. This code helps\n prevent frauds. <a\n href=\"javascript:popUp('http://www.merchantamerica.com/help.php?id=23#guide')\">Need\n Help?</a>\"\"\")\n\n # Matching donation\n company_name = vibhaforms.CharField(label='Company',\n required=False, max_length=256, widget=forms.TextInput(attrs={'size': text_field_size}))\n company_id = forms.IntegerField(required=False, widget=forms.HiddenInput())\n\n # Which dream registry event?\n dream_event_id = forms.IntegerField(required=False, widget=forms.HiddenInput())\n\n # Comment\n comments = vibhaforms.CharField(label='Other Comments', required=False, widget=forms.Textarea(attrs={'rows': 5}))\n\n # Email subscription information\n event_subscription = forms.BooleanField(required=False, initial=True)\n project_subscription = forms.BooleanField(required=False, initial=True)\n paper_receipt = forms.BooleanField(required=False, initial=False)\n\n # Which Austin champion caused this donation?\n champion_id = forms.IntegerField(required=False, widget=forms.HiddenInput())\n\n # Which project is this donation made out to?\n project_id = forms.IntegerField(required=False, widget=forms.HiddenInput())\n\n\n anonymous = forms.BooleanField(label='Remain anonymous',\n initial=False, required=False, help_text=u'Make my donation anonynous')\n\n # Comment\n comments = vibhaforms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 5}))\n\n # Referral\n referrer = vibhaforms.CharField(required=False, label=u'Honoree', help_text=u'Name of the person (if any) who referred you to make this donation')\n\n def clean_expr_date(self):\n clean_date = self.cleaned_data['expr_date']\n # Expr date needs to be cleaned only when credit card is used.\n if True:\n month, year = clean_date\n month, year = int(month), int(year)\n expr_date_in_future = ((year > TODAY.year) or ((year == TODAY.year) and (month >= TODAY.month)))\n if not expr_date_in_future:\n raise forms.ValidationError(u'Expiry date is in the past.')\n return clean_date\n\n def clean_amount_choice(self):\n ac = self.cleaned_data['amount_choice']\n self.fields['amount'].required = (ac == '0')\n return ac\n\n def clean_zipcode(self):\n state = self.cleaned_data.get('state', None)\n zipcode = self.cleaned_data['zipcode']\n if ((state is not None) and\n (zipcode is not None) and\n is_in_US(state) and\n (not re.match(r'^\\d{5}(?:-\\d{4})?$', zipcode))):\n raise forms.ValidationError(u'Enter a zip code in the format XXXXX or XXXXX-XXXX.')\n else:\n return zipcode\n\n def clean(self):\n # Handle amount choice\n self.fields['amount'].required = True\n ac = self.cleaned_data.get('amount_choice', '0')\n if ac != '0':\n self.cleaned_data['amount'] = decimal.Decimal(ac)\n return self.cleaned_data\n return DonationForm", "title": "" }, { "docid": "b8ac1f4e1442952acff12085fb04558b", "score": "0.47792712", "text": "def formfield(self, **kwargs):\n # if a form class is not specified, check to see if there is a custom\n # form_class specified for this datatype\n if not kwargs.get('form_class', None):\n datatype = self._get_internal_type()\n\n if datatype in INTERNAL_DATATYPE_FORMFIELDS:\n name = INTERNAL_DATATYPE_FORMFIELDS[datatype]\n kwargs['form_class'] = get_form_class(name)\n\n # define default arguments for the formfield class constructor\n kwargs.setdefault('label', self.name.title())\n\n if self.has_choices and 'widget' not in kwargs:\n kwargs['widget'] = forms.SelectMultiple(choices=self.choices)\n\n # get the default formfield for the model field\n return self.field.formfield(**kwargs)", "title": "" }, { "docid": "fc95ff5bcff0da28c382355d242e80f8", "score": "0.4775329", "text": "def get_sidebar_form_classes(self):\r\n form_classes = {}\r\n for cnt, plugin in enumerate(self.sidebar_plugins):\r\n form_classes['form%d' % cnt] = (plugin, plugin.sidebar.get('form_class', None))\r\n return form_classes", "title": "" }, { "docid": "7145a025e66b7ab0492cda268ae2de2e", "score": "0.47662282", "text": "def form(self, form):\n\n self._form = form", "title": "" }, { "docid": "65235279fbca2a64e04e0c3bc50babd4", "score": "0.47658804", "text": "def css_classes(self, extra_classes=None):\n if hasattr(extra_classes, 'split'):\n extra_classes = extra_classes.split()\n extra_classes = set(extra_classes or [])\n # field_css_classes is an optional member of a Form optimized for django-angular\n field_css_classes = getattr(self.form, 'field_css_classes', None)\n if hasattr(field_css_classes, 'split'):\n extra_classes.update(field_css_classes.split())\n elif isinstance(field_css_classes, (list, tuple)):\n extra_classes.update(field_css_classes)\n elif isinstance(field_css_classes, dict):\n extra_field_classes = []\n for key in ('*', self.name):\n css_classes = field_css_classes.get(key)\n if hasattr(css_classes, 'split'):\n extra_field_classes = css_classes.split()\n elif isinstance(css_classes, (list, tuple)):\n if '__default__' in css_classes:\n css_classes.remove('__default__')\n extra_field_classes.extend(css_classes)\n else:\n extra_field_classes = css_classes\n extra_classes.update(extra_field_classes)\n return super(NgBoundField, self).css_classes(extra_classes)", "title": "" }, { "docid": "e433e5518ad27765051cd995b3cf7313", "score": "0.47611362", "text": "def test_get_form_class(self):\n form_class = self.omniform.get_form_class()\n self.assertTrue(issubclass(form_class, OmniModelFormBaseForm))", "title": "" }, { "docid": "5e2dbe997b17cc1d4485fba8683db88c", "score": "0.47485822", "text": "def add_css_class(field, css):\r\n return field.as_widget(attrs={\"class\":css})", "title": "" }, { "docid": "a2d3b8163535c60f85d8500e8b9d3f46", "score": "0.4738905", "text": "def add_form_to_page(self, page_class, form_class):\n self.populate()\n self._forms.register(form_class)\n page_class.form_classes.append(form_class)\n self._form_to_page[form_class.form_id] = page_class.page_id", "title": "" }, { "docid": "6bf6f94a6d2989b5fe210131a328d38d", "score": "0.4728", "text": "def register_submit(class_name, fire) -> None:\n def submit_handler(event) -> None:\n \"\"\"\n Handle form submit and fire handler\n :param event: Default html form object\n :return: None\n \"\"\"\n event.preventDefault()\n fire()\n\n if window.jQuery('.' + class_name).length == 1:\n return window.jQuery('.' + class_name).on('submit', submit_handler)", "title": "" }, { "docid": "416924df00e7947e826197fd3e77f159", "score": "0.47278285", "text": "def test_base_form_class(self):\n self.assertEqual(\n OmniFormEmailConfirmationHandler.base_form_class,\n EmailConfirmationHandlerBaseFormClass\n )", "title": "" }, { "docid": "d67659474f03400fdb61d7346dc1fbf1", "score": "0.47260907", "text": "def formfield(self, form_class=None, **kwargs):\n form_class = form_class or ReCaptchaFormField\n return super(CharField, self).formfield(form_class=form_class, **kwargs)", "title": "" }, { "docid": "1fb7b12aa66e70e6dcd8048c2cc6108d", "score": "0.47214466", "text": "def class_type(self, class_type):\n\n self._class_type = class_type", "title": "" }, { "docid": "ebd56b064808c982045f8f19be57c5d7", "score": "0.47176027", "text": "def __init__(self, **kwargs):\n self._classes = kwargs.pop(\"classes\", \"\")\n self._attributes = kwargs", "title": "" }, { "docid": "5ec3b3291dfbb3f251469f30d3e0416b", "score": "0.4716892", "text": "def update_pre_render(self, form, request):\n self.pre_render(form, request)", "title": "" }, { "docid": "a411ce301be1635f65151d38c698807d", "score": "0.47117257", "text": "def __init__(self, *args, **kwargs):\n super(ListingForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs.update({'class': \"form-control\"})\n self.fields['title'].widget.attrs.update({'placeholder': \"Your title here\"})\n self.fields['startingPrice'].widget.attrs.update({'placeholder': \"$0.00\"})\n self.fields['imageURL'].widget.attrs.update({'placeholder': \"Add an image url for your item\"})", "title": "" }, { "docid": "9f09efb07350fc63836a1b6e22e30161", "score": "0.47064716", "text": "def updateAttrForm(self, actionData, attr, attrForm):\n attrForm.onModelDataChanged()", "title": "" }, { "docid": "e5e372926a53efbd386d85f8918e9ca9", "score": "0.46965086", "text": "def handle_set_class(self, class_):\n raise NotImplementedError(\"ShellSurface: Set Class\")", "title": "" }, { "docid": "ecde6b02dc35c20c95b56fefb8fa4444", "score": "0.46943158", "text": "def __set_class_by_attribute(self, attribute):\n self.__clazz = self.get_class_by_attribute(attribute)", "title": "" }, { "docid": "563c35f4ecb7f7b4148833d8bdcecde0", "score": "0.46911818", "text": "def create_cls(request):\n template = loader.get_template('form.html')\n form = ClassForm()\n return HttpResponse(template.render({'form':form, 'redirect': '/class', 'submit':'Create Class'}, request))", "title": "" }, { "docid": "31dab0f415cd09fc96d260a2db80f840", "score": "0.46910316", "text": "def wantClass(self, cls):\n pass", "title": "" }, { "docid": "0cb742565be2dd7d231a3f5930173ecd", "score": "0.46905288", "text": "def is_ajax() -> bool:\n ...", "title": "" }, { "docid": "c669123a4faeca297de9c08da75dbf86", "score": "0.46824488", "text": "def update_widget_attrs(self, bound_field, attrs):\n if bound_field.field.has_subwidgets() is False:\n widget_classes = getattr(self, 'widget_css_classes', None)\n if widget_classes:\n if 'class' in attrs:\n attrs['class'] += ' ' + widget_classes\n else:\n attrs.update({'class': widget_classes})\n return attrs", "title": "" }, { "docid": "e17b9f406f4b2474065b1e360bdec97f", "score": "0.4673647", "text": "def __init__(self, *args, **kwargs):\n\n if 'obj' in kwargs:\n self._obj = kwargs['obj']\n super(ModelForm, self).__init__(*args, **kwargs)", "title": "" }, { "docid": "a7ecfcdd453592985ed40262f10dfe7f", "score": "0.46692887", "text": "def flaskbb_tpl_form_new_post_before(form):", "title": "" }, { "docid": "2719e0ebf62ecbaac39f84dcf5d70d64", "score": "0.46655455", "text": "def __init__(self, *args, **kwargs):\n super(PlaylistForm, self).__init__(*args, **kwargs)\n self.fields['description'].widget.attrs['style'] = 'width: 100%; height: 1.75rem; overflow: hidden; border-radius: 0.3rem; border: none'\n self.fields['name'].widget.attrs['style'] = 'width: 100%; height: 1.75rem; border-radius: 0.3rem; border: none'\n self.fields['image'].widget.attrs['style'] = 'width: 50%'\n self.fields['is_shareable'].widget.attrs['class'] = 'form-check-input playlistPop'\n self.fields['is_private'].widget.attrs['class'] = 'form-check-input playlistPop'", "title": "" } ]
721a58bb79187b76f3a34dcc2612caf2
Return true if start date is earlier and end date is later than the test_date
[ { "docid": "c107b508ffe07da8f4f2d9b9f6b5ee32", "score": "0.69243836", "text": "def is_active_at_date(self, test_date):\n return ((self.start_date <= test_date) and \n (self.end_date >= test_date))", "title": "" } ]
[ { "docid": "444b5e2562051fb7f34e87db70361b05", "score": "0.7660612", "text": "def is_end_date_before_start_date(start_date: dt, end_date: dt) -> bool:\n return start_date > end_date", "title": "" }, { "docid": "edf3eea0c955dda09f6c509efcd79be7", "score": "0.7135487", "text": "def back_test(self, start_date, end_date):\n raise NotImplementedError", "title": "" }, { "docid": "90b8902100a26462296727e0fb14a8f1", "score": "0.7107349", "text": "def test_start_date_after_end_date(self):\n # create the bad start and end date\n end_date = date(2019, 1, 1).strftime('%Y-%m-%d')\n start_date = date(2019, 1, 2).strftime('%Y-%m-%d')\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': start_date,\n 'end_date': end_date\n }\n response = self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertFalse(models.Report.objects.exists())\n self.assertFalse(models.SubReport.objects.exists())\n self.assertFalse(models.ComputerReport.objects.exists())\n # test response\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "3dc62a61e98ca14e08c1b30e623ba282", "score": "0.7090523", "text": "def check_dates2(self, cr, uid, ids, context=None):\n exp = self.read(cr, uid, ids[0], ['start_date', 'end_date'])\n if exp['start_date'] and exp['end_date']:\n if exp['start_date'] > exp['end_date']:\n return False\n return True", "title": "" }, { "docid": "965093f3682e71ceed9266c2df488743", "score": "0.7037916", "text": "def validation_compare_dates_in_range(self, date1, date2):\n try:\n end_date = datetime.datetime.strptime(date2, settings.DATE_FORMAT)\n date = datetime.datetime.strptime(date1, settings.DATE_FORMAT)\n delta = end_date - date\n if delta > datetime.timedelta(0):\n return True\n except Exception, e:\n # Not datetime or improper datetime values given. Rising no exception.\n log.error('Search validation exception: %s' % e)\n pass\n return False", "title": "" }, { "docid": "1ec70a7622380f97d5d8b95955d262bf", "score": "0.6827703", "text": "def isBetween(start_date, end_date):\n return isAfter(start_date) and isBefore(end_date)", "title": "" }, { "docid": "358750160ec45e994cf0c014581b8ef2", "score": "0.6775339", "text": "def __contains__(self, date):\n return self._first_day <= date <= self._last_day", "title": "" }, { "docid": "380d45c2d4d91b014847a60c209e1e27", "score": "0.677357", "text": "def is_after(date_1, date_2):\n if (date_1[0], date_1[1]) > (date_2[0], date_2[1]):\n return True\n else:\n return False", "title": "" }, { "docid": "5c5771ca3deff7cfe72f1cb8064a96df", "score": "0.6746228", "text": "def is_after(date1: AnyStr, date2: AnyStr) -> bool:\r\n date1 = datetime.strptime(date1, \"%Y-%m-%d\").date()\r\n date2 = datetime.strptime(date2, \"%Y-%m-%d\").date()\r\n return date1 > date2", "title": "" }, { "docid": "6aceea388de55a1d003e8d7159f58528", "score": "0.672141", "text": "def is_date_before(start_time: dt, end_time: dt) -> bool:\n try:\n return start_time < end_time\n except TypeError:\n return False", "title": "" }, { "docid": "cc499b81b4fa2897ba3ea832eaaaaf41", "score": "0.661248", "text": "def fully_within_dates(\n br: BlockRange, start: Optional[datetime], end: Optional[datetime]\n) -> bool:\n return (start is None or start <= br.start) and (end is None or br.end <= end)", "title": "" }, { "docid": "e5d1101795dfc0389a3c0dbf656cdbdf", "score": "0.6605858", "text": "def end_checker(data, end_date):\n return _date_checker(data, end_date, operator.ge)", "title": "" }, { "docid": "8054afec24707b7129fee70bf4571c33", "score": "0.65806633", "text": "def validate_end_is_after_start(start_date, end_date):\n\n # check is not needed if no end date\n if end_date is None:\n return\n\n if start_date > end_date:\n raise ValidationError(\"End date cannot be before the start date.\")", "title": "" }, { "docid": "c92801ac60d6ad0434fb9494dd60689d", "score": "0.6573168", "text": "def IsUptoDate(self) -> bool:", "title": "" }, { "docid": "d15e74d5d0698ecb35a00b6584333aa4", "score": "0.65297604", "text": "def overlaps_dates(\n br: BlockRange, start: Optional[datetime], end: Optional[datetime]\n) -> bool:\n return (start is None or start <= br.end) and (end is None or br.start <= end)", "title": "" }, { "docid": "b1ce523291097f96f39f5df685e01168", "score": "0.64531404", "text": "def are_dates(self):\n if self.start_datetime and self.end_datetime:\n return True\n else:\n return False", "title": "" }, { "docid": "f1540dcaebb554ce5ff28b96dddcda10", "score": "0.64474434", "text": "def test_end_date_future(self):\n # create the bad start date\n end_date = datetime.now() + timedelta(days=7)\n end_date = end_date.date().strftime('%Y-%m-%d')\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-01-01',\n 'end_date': end_date\n }\n response = self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertFalse(models.Report.objects.exists())\n self.assertFalse(models.SubReport.objects.exists())\n self.assertFalse(models.ComputerReport.objects.exists())\n # test response\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "1b4e756b275966dfc68207372965e3e1", "score": "0.64334244", "text": "def test_validate_date_range_start_after_end(start, end):\n\n with pytest.raises(BadDateRangeError):\n Oasis._validate_date_range(start, end)", "title": "" }, { "docid": "bdb9d0562ac81f68c261971ce4a2e216", "score": "0.6381141", "text": "def dates_overlap_with(self, product_pricing):\n if product_pricing.valid_to and (\n product_pricing.valid_to < self.valid_from):\n return False\n if self.valid_to and (\n self.valid_to < product_pricing.valid_from):\n return False\n return True", "title": "" }, { "docid": "95004d2bc93565dd38754e69c1ea6efd", "score": "0.63805807", "text": "def check_if_date_is_in_range(str_date, from_date, to_date):\n return str_date > from_date and str_date < to_date", "title": "" }, { "docid": "8c9d0f781202f28f2553b04e79f6c78d", "score": "0.6337607", "text": "def dates_overlap_with(self, assigned_product):\n if assigned_product.end_date and (\n assigned_product.end_date < self.start_date):\n return False\n if self.end_date and (assigned_product.start_date > self.end_date):\n return False\n return True", "title": "" }, { "docid": "ea1bdf0bbfe4f3a74282656c5bd86c99", "score": "0.6314689", "text": "def test_start_end_date(self):\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-01-02',\n 'end_date': '2019-01-27'\n }\n self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertEqual(models.SubReport.objects.first().start_date, date(2019, 1, 2))\n self.assertEqual(models.SubReport.objects.first().end_date, date(2019, 1, 27))", "title": "" }, { "docid": "9d66866c5320fc7029f0e216b9134ff9", "score": "0.6306834", "text": "def test_end_date(self):\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-01-01',\n 'end_date': '2019-01-31'\n }\n self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertTrue(models.Report.objects.filter(end_date=date(2019, 1, 31)).exists())", "title": "" }, { "docid": "800dbca70f349784fcd52c620b3f61a1", "score": "0.6278115", "text": "def has_date(self, date):\n return (date >= self.begin() and date <= self.end())", "title": "" }, { "docid": "b1e10e9fd2fbeefa4d847516bc7d7acc", "score": "0.626858", "text": "def is_before(date_1, date_2):\n if (date_1[0], date_1[1]) < (date_2[0], date_2[1]):\n return True\n else:\n return False", "title": "" }, { "docid": "f3955cac58d2afa1202ed9f57f08ccd0", "score": "0.6253915", "text": "def _last_date_checker(last, end):\n if last != end:\n return False\n else:\n return True", "title": "" }, { "docid": "d2f9851ed3f025174c268aaaef494ab3", "score": "0.62294304", "text": "def test_all_dates(self):\r\n \"\"\"US01 Dates before Current Date\"\"\"\r\n for eadates in alldates:\r\n self.assertTrue(eadates < todaysdate)", "title": "" }, { "docid": "ff876b46bf3e6b9c3ce29db7f927a1e8", "score": "0.6229187", "text": "def can_start(self):\n return self.start_date <= date.today() < self.end_date", "title": "" }, { "docid": "0a42611639571114c90da1584dc9ec9e", "score": "0.62121564", "text": "def resolved_between(self, start_date, end_date):\n for transaction in self.transactions:\n if (transaction['transactionType'] == 'status' and\n transaction['newValue'] == 'resolved'):\n timestamp = int(transaction['dateCreated'])\n transaction_date = datetime.fromtimestamp(timestamp)\n return (\n transaction_date >= start_date and\n transaction_date < end_date\n )\n return False", "title": "" }, { "docid": "7a0c91f8d21e9e61583507451e1d2d52", "score": "0.6201534", "text": "def is_before(self,other_date):\n if self.year < other_date.year:\n return True\n elif self.month < other_date.month:\n return True\n else:\n return self.day < other_date.day", "title": "" }, { "docid": "69eb4693dd5a1ecd2f64ef89c5a0bd8a", "score": "0.6176616", "text": "def _check_date_range(date_str, start_datetime, end_datetime):\n result = True\n if start_datetime is not None or end_datetime is not None:\n date_obj = datetime.datetime.strptime(date_str, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n if start_datetime is not None:\n start_obj = datetime.datetime.strptime(start_datetime, \"%Y-%m-%d %H:%M\")\n result = date_obj >= start_obj\n if end_datetime is not None:\n end_obj = datetime.datetime.strptime(end_datetime, \"%Y-%m-%d %H:%M\")\n result = date_obj <= end_obj\n return result", "title": "" }, { "docid": "7d93160a06be52528ac87ca1267126ac", "score": "0.6161413", "text": "def test_is_claim_in_date_range_edge_cases():\n date_range = (\n datetime.date(2017, 6, 1),\n datetime.date(2017, 9, 1),\n )\n claim_overlapping_start_date = claim.Claim({\n 'clm_from_dt': datetime.date(2017, 5, 30),\n 'clm_thru_dt': datetime.date(2017, 6, 1),\n })\n claim_overlapping_end_date = claim.Claim({\n 'clm_from_dt': datetime.date(2017, 9, 1),\n 'clm_thru_dt': datetime.date(2017, 9, 10),\n })\n\n assert qpp_measure.QPPMeasure._is_claim_in_date_range(claim_overlapping_start_date, date_range)\n assert qpp_measure.QPPMeasure._is_claim_in_date_range(claim_overlapping_end_date, date_range)", "title": "" }, { "docid": "2e44b8bd0ac6bfcd4217408a4d8f2c36", "score": "0.6136399", "text": "def check_dates(startdate, enddate):\n try:\n dt.strptime(startdate, \"%Y-%m-%d\")\n startdate_check = True\n except ValueError:\n print(\"'startdate' is incorrect. It should be YYYY-MM-DD\")\n startdate_check = False\n \n try:\n dt.strptime(enddate, \"%Y-%m-%d\")\n enddate_check = True\n except ValueError:\n print(\"'enddate' is incorrect. It should be YYYY-MM-DD\")\n enddate_check = False\n \n if all(x for x in [startdate_check, enddate_check]):\n return True\n else:\n return False", "title": "" }, { "docid": "9b7b9b592fafd6ec745cb991196c1c14", "score": "0.6115989", "text": "def compare_suggested_trunks_grid_end_date(self, actual_date):\n is_matched = False\n if (str(actual_date) != \"\"):\n is_matched = True\n return is_matched", "title": "" }, { "docid": "8a7448b20388e91628e2b7d54e93a13b", "score": "0.61095744", "text": "def test_start_end_date_multiple(self):\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-01-03',\n 'end_date': '2019-03-18'\n }\n self.client.post(reverse(self.view_name), request_body)\n\n # test database\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2019, 1, 3), end_date=date(2019, 1, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2019, 2, 1), end_date=date(2019, 2, 28)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2019, 3, 1), end_date=date(2019, 3, 18)).exists())", "title": "" }, { "docid": "8552b18af25b6e8faa782884cdcc0664", "score": "0.6095983", "text": "def dateIsBefore(y1, m1, d1, y2, m2, d2):\r\n if y1 < y2:\r\n return True\r\n if y1 == y2:\r\n if m1 < m2:\r\n return True\r\n if m1 == m2:\r\n return d1 < d2\r\n return False", "title": "" }, { "docid": "5abfba4a91b1d1adb9ea38ad4c80c39d", "score": "0.6095391", "text": "def in_range(self, date):\n return (self.period_start <= date) and (date <= self.period_end)", "title": "" }, { "docid": "cfe615332b956fb770c9069cbf873021", "score": "0.6064613", "text": "def is_valid(self):\n return (self.date_begin < self.date_end and 0 < self.hours < 12 and\n (self.date_end - self.date_begin).days < 1)", "title": "" }, { "docid": "0cb525405f6a9d7d54aa797128d24dc6", "score": "0.6051333", "text": "def test_api_meetings_create_authenticated_end_greater_than_start(self):\n user = UserFactory()\n other_user = UserFactory()\n jwt_token = AccessToken.for_user(user)\n\n response = self.client.post(\n \"/api/meetings/\",\n {\n \"name\": \"my meeting\",\n \"owner\": str(other_user.id),\n \"start\": \"2022-07-07T10:00:00Z\",\n \"end\": \"2022-07-07T09:00:00Z\",\n },\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n response.json(),\n {\"__all__\": [\"The start date must be earlier than the end date.\"]},\n )", "title": "" }, { "docid": "c1f75dde5e72329e0777429f8fb55560", "score": "0.60476583", "text": "def test_start_date_future(self):\n # create the bad start date\n start_date = datetime.now() + timedelta(days=7)\n start_date = start_date.date().strftime('%Y-%m-%d')\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': start_date,\n 'end_date': '2019-01-31'\n }\n response = self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertFalse(models.Report.objects.exists())\n self.assertFalse(models.SubReport.objects.exists())\n self.assertFalse(models.ComputerReport.objects.exists())\n # test response\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "65ba6b6ff6486df08559c11b812a7848", "score": "0.60400164", "text": "def is_date_bigger_than(date1, date2):\n\tif date2 is None:\n\t\tdate2 = datetime.datetime(1970, 1, 1, 0, 0, 0)\n\tdt1 = date1.replace(tzinfo=None)\n\tdt2 = date2.replace(tzinfo=None)\n\tif dt1 - dt2 > datetime.timedelta(seconds = 0):\n\t\treturn True\n\treturn False", "title": "" }, { "docid": "6a526fbfaa5552a5e091c7c9456be7bf", "score": "0.6028928", "text": "def check_date_from_to(self):\n nworking = self.search_count([('start_date', '<=', self.end_date), ('end_date', '>=', self.start_date), ('holidays_id', '=', self.holidays_id.id), ('id', '!=', self.id)])\n if nworking:\n raise ValidationError(_('You can not have holiday that overlaps on same days!'))", "title": "" }, { "docid": "73d74fc4adc4aa4c5a953c719bca7119", "score": "0.60153085", "text": "def _check_date(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n if act.start_date and act.expiration_date:\n if self.get_date(act.start_date) > self.get_date(act.expiration_date):\n raise osv.except_osv(_(''), _(\"Start Date Must Be Less Than Expiration Date!\"))\n\n if act.date and act.start_date:\n if self.get_datetime(act.date) > self.get_date(act.start_date):\n raise osv.except_osv(_(''), _(\"Request Date Must Be Less Than Start Date!\"))\n return True", "title": "" }, { "docid": "975d664619dc71959238d5fcf687cf8a", "score": "0.60018575", "text": "def check_dates3(self, cr, uid, ids, context=None):\n exp = self.read(cr, uid, ids[0], ['violation_date', 'start_date'])\n if exp['violation_date'] and exp['start_date']:\n if exp['violation_date'] > exp['start_date']:\n return False\n return True", "title": "" }, { "docid": "99d35f51b984a99bcbb881c86c6dfe85", "score": "0.59984785", "text": "def test_start_date_after_end_date(self):\n start_date = timezone.now() + datetime.timedelta(days=30)\n end_date = timezone.now() - datetime.timedelta(days=30)\n rent_price = RentPriceForm({'start_date': start_date, 'end_date': end_date})\n self.assertRaises(ValidationError, rent_price.clean)", "title": "" }, { "docid": "0abbe999327de380726fdd9c2fc58cfc", "score": "0.5980107", "text": "def test_end_time_less_than_start_time(self):\n self.user.is_staff = True\n self.user.is_active = True\n self.user.save()\n\n self.client.login(username=self.username, password=self.password)\n response = self.client.post(\n '/edit_event/?event_id={0}'.format(self.event.id),\n {'csrfmiddlewaretoken': ['6soMcEK3d6JkcDRRnOu6XcdeVETyLibPQCZAuk1yHPHjjpSgxH2pUdQcOusmiiHG'],\n 'start_time': ['2021-04-23T22:31'],\n 'end_time': ['2021-04-23T18:31'],\n 'allday': ['allday'],\n 'event_location': ['MacLean Hall'],\n 'invite_all': ['invite_all'],\n 'role_selected': ['Staff', 'Graduate Student', 'Undergraduate Student', 'Faculty'],\n 'school_year_selected': ['Freshman', 'Sophomore', 'Juniors', 'Faculty'],\n 'mentor_status': ['Mentors', 'Mentees'],\n 'special_category': ['First generation college-student', 'Rural',\n 'Low-income', 'Underrepresented racial/ethnic minority',\n 'Disabled', 'Transfer Student', 'LGBTQ'],\n 'research_area': ['Biochemistry', 'Bioinformatics', 'Biology',\n 'Biomedical Engineering', 'Chemical Engineering',\n 'Chemistry', 'Computer Science and Engineering', 'Environmental Science',\n 'Health and Human Physiology', 'Mathematics', 'Microbiology',\n 'Neuroscience', 'Nursing', 'Physics', 'Psychology']\n }\n , follow=True\n )\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n message = list(response.context['messages'])[0]\n self.assertEqual(message.message, \"End time cannot be less than start-time\")", "title": "" }, { "docid": "ffffa01da1bd7ccd267c959b3796cf38", "score": "0.59581625", "text": "def test_start_date_after_end_date(self):\n start_date = timezone.now() + datetime.timedelta(days=30)\n end_date = timezone.now() - datetime.timedelta(days=30)\n tenancy = TenancyForm({'start_date': start_date, 'end_date': end_date})\n self.assertRaises(ValidationError, tenancy.clean)", "title": "" }, { "docid": "f30fa3b796bb5dbae0cccc1ee46e11de", "score": "0.5922836", "text": "def _validate_date_range(self, start_date, end_date):\n self._validate_date(start_date)\n self._validate_date(end_date)\n if end_date > start_date:\n raise InvalidQueryError(\"The start date must come before the end date. {} comes after {}\".format(start_date, end_date))", "title": "" }, { "docid": "29e561b87ecafb3b583355545ce4c160", "score": "0.5892203", "text": "def test_1st_and_2nd_date_inversed_ordering(self):\n data = {'start_date': '2018-01-31', 'end_date': '2018-01-01'}\n f = DateForm(data=data)\n self.assertFalse(f.is_valid(), f.errors.as_data())", "title": "" }, { "docid": "3253420b8c39313127199f37800cca13", "score": "0.5888996", "text": "def valFromToDates(format, fromDate, toDate, minFrom=None, maxTo=None):\n if not strptime(fromDate, format) or not strptime(toDate, format):\n return False\n if fromDate > toDate:\n return False\n if minFrom and fromDate < minFrom:\n return False\n if maxTo and toDate > maxTo:\n return False\n\n return True", "title": "" }, { "docid": "5f290db037b48f266aaac49d1654f478", "score": "0.58748347", "text": "def check_dates(self, cr, uid, ids, context=None): \n exp = self.read(cr, uid, ids[0], ['violation_date', 'decision_date'])\n if exp['violation_date'] and exp['decision_date']:\n if exp['violation_date'] > exp['decision_date']:\n return False\n return True", "title": "" }, { "docid": "4f323c6bb7c9e8bd612e53dc46690264", "score": "0.58743715", "text": "def test_end_date(self):\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-01-01',\n 'end_date': '2019-01-31'\n }\n self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertEqual(models.Report.objects.first().end_date.strftime('%Y-%m-%d'), request_body['end_date'])", "title": "" }, { "docid": "959a61eb30892f7a586a58ba25de7eb8", "score": "0.58666515", "text": "def date_is_before(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False", "title": "" }, { "docid": "1160ad9e21b2cfd11d47b3f2cc074516", "score": "0.5860055", "text": "def checkifDate(start,end):\n # CM: calendar date start (CalendarDayStart)\n begin = Calendar(binding['CalendarDayStart'])\n\n intStart,strStart = datetoInt(binding[start],True)\n # CM mod\n # CM overwrite date with time step\n binding[start] = intStart\n intEnd,strEnd = datetoInt(binding[end],True)\n # CM mod\n binding[end] = intEnd\n\n # test if start and end > begin\n if (intStart<0) or (intEnd<0) or ((intEnd-intStart)<0):\n strBegin = begin.strftime(\"%d/%m/%Y %H:%M\")\n msg=\"Simulation start date and/or simulation end date are wrong or do not match CalendarStartDate!\\n\"+ \\\n \"CalendarStartDay: \"+strBegin +\"\\n\" + \\\n \"Simulation start: \"+strStart + \" - \"+str(intStart)+\"\\n\" + \\\n \"Simulation end: \"+strEnd + \" - \"+str(intEnd)\n raise LisfloodError(msg)\n modelSteps.append(intStart)\n modelSteps.append(intEnd)\n return", "title": "" }, { "docid": "f7ce36b57291fe6abe48c62fb9f7ea11", "score": "0.5858228", "text": "def check_first_second_dates(first_date, second_date):\n # Convert dates to timestamps\n first = convert_to_timestamp(first_date)\n second = convert_to_timestamp(second_date)\n # Compare timestamps\n if first < second:\n return True\n else:\n return False", "title": "" }, { "docid": "885d87af58fa7db821643e04e23bb26c", "score": "0.58541274", "text": "def test_assignment_is_past_due_date(self):\n DATETIME_EARLIER = datetime(2016, 6, 15, 11, 59, 59)\n DATETIME_MIDDLE = datetime(2016, 6, 15, 12, 0, 0)\n DATETIME_LATER = datetime(2016, 6, 15, 12, 0, 1)\n assignment = self.get_test_assignment()\n assignment.due_date = DATETIME_MIDDLE\n assignment.save()\n self.assertFalse(assignment.is_past_due_date(now=DATETIME_EARLIER))\n self.assertTrue(assignment.is_past_due_date(now=DATETIME_LATER))", "title": "" }, { "docid": "10e6e17c927e1547288d9772045026c2", "score": "0.5850601", "text": "def time_range(self, start, diff, curr):\n td = timedelta(minutes=diff)\n end = start + td\n \n start = self.timestamp(start)\n end = self.timestamp(end)\n\n return bool(start <= curr <= end)", "title": "" }, { "docid": "c82388fcc70e382a7847b7ae80b37de4", "score": "0.58281493", "text": "def test_1st_and_2nd_date_right_ordering(self):\n data = {'start_date': '2018-01-01', 'end_date': '2018-01-31'}\n f = DateForm(data=data)\n self.assertTrue(f.is_valid(), f.errors.as_data())", "title": "" }, { "docid": "3bf3d328ba71114a0a404c640d24acf9", "score": "0.5827171", "text": "def _is_promo_preliminary(end_date):\n\n now = datetime.datetime.now(g.tz)\n return end_date + datetime.timedelta(days=1) > now", "title": "" }, { "docid": "6b21e9ec2fee85cda3a5613fb45819e5", "score": "0.58198726", "text": "def __gt__(self, other: Interval[T]) -> bool:\n if self.start > other.start:\n return True\n elif self.start == other.start:\n if self.stop > other.stop:\n return True\n return False", "title": "" }, { "docid": "9deaf440c1ed3c945f2f02778392e243", "score": "0.58142614", "text": "def is_valid(self):\n return self.end_date is None", "title": "" }, { "docid": "e576aeb486e579f1ac00c062980fe539", "score": "0.5804324", "text": "def delta_passed(delta, dates):\n max_allowed_datetime = pendulum.now() - delta\n\n for date in dates:\n if (date is not None) and (date > max_allowed_datetime):\n return False\n\n return True", "title": "" }, { "docid": "eacf58d11949fa6149d57f940afdd92a", "score": "0.5801466", "text": "def validating(self, parameter, parameter_name):\n\n try:\n first_date = datetime.strptime(self.first_date, '%d.%m.%Y').date()\n\n except ValueError:\n print(f'Error in {parameter_name}: '\n f'Cannot compare date on and date return on, '\n f'because date on does not match format DD.MM.YYYY')\n\n return False\n\n input_parameter = datetime.strptime(parameter, '%d.%m.%Y').date()\n\n if input_parameter < first_date:\n print(f'Error in {parameter_name}: '\n f'Return date must be later '\n 'than departure date')\n\n return False\n\n return super().validating(parameter, parameter_name)", "title": "" }, { "docid": "5e100eeec18b39d9d671d7501a4417a2", "score": "0.5772271", "text": "def out_of_bounds(self):\n return self.original_datetime > self.current_work_day_end or self.original_datetime < self.current_work_day_start", "title": "" }, { "docid": "065caf44c533a4138e837331552eab76", "score": "0.576585", "text": "def _check_start_is_before_end(self):\n for record in self:\n if record.end_datetime < record.start_datetime:\n msg = \"\"\"Invalid event start and end times! The event\n should start before it ends.\"\"\"\n raise ValidationError(msg)", "title": "" }, { "docid": "dd3a445a7a6cd939f182dec0977c4198", "score": "0.575652", "text": "def is_valid_date(self, message):\n\n return GUser.get_message_date(message) > self.end_date", "title": "" }, { "docid": "7b283e1418ec56bc6ae5a0668475b431", "score": "0.5747981", "text": "def test_is_claim_in_date_range_simple_cases():\n date_range = (\n datetime.date(2017, 6, 1),\n datetime.date(2017, 9, 1),\n )\n claim_too_early = claim.Claim({\n 'clm_from_dt': datetime.date(2016, 1, 1),\n 'clm_thru_dt': datetime.date(2016, 1, 1),\n })\n claim_too_late = claim.Claim({\n 'clm_from_dt': datetime.date(2017, 12, 30),\n 'clm_thru_dt': datetime.date(2017, 12, 30),\n })\n claim_just_right = claim.Claim({\n 'clm_from_dt': datetime.date(2017, 7, 1),\n 'clm_thru_dt': datetime.date(2017, 7, 1),\n })\n\n assert not qpp_measure.QPPMeasure._is_claim_in_date_range(claim_too_early, date_range)\n assert not qpp_measure.QPPMeasure._is_claim_in_date_range(claim_too_late, date_range)\n assert qpp_measure.QPPMeasure._is_claim_in_date_range(claim_just_right, date_range)", "title": "" }, { "docid": "92a5296edef27610c782070826ec5922", "score": "0.5745055", "text": "def test_overlap_with_others(self):\n start1 = datetime.strptime('2018-09-01 17:00:00', \"%Y-%m-%d %H:%M:%S\")\n end1 = datetime.strptime('2019-09-15 17:00:00', '%Y-%m-%d %H:%M:%S')\n start2 = datetime.strptime('2019-09-01 17:00:00', '%Y-%m-%d %H:%M:%S')\n end2 = datetime.strptime('2019-09-15 17:00:00', '%Y-%m-%d %H:%M:%S')\n v1 = Voucher(code='SNSD', value=5000, start=start1, end=end1)\n v2 = Voucher(code='SNSD', value=10000, start=start2, end=end2)\n db.session.add(v1)\n db.session.add(v2)\n db.session.commit()\n v2.start = datetime.strptime('2018-09-01 17:00:00', \"%Y-%m-%d %H:%M:%S\")\n v2.end = datetime.strptime('2019-09-15 17:00:00', '%Y-%m-%d %H:%M:%S')\n self.assertEqual(v2.is_overlap_with_other(), True)\n v2.start = datetime.strptime('2020-09-01 17:00:00', \"%Y-%m-%d %H:%M:%S\")\n v2.end = datetime.strptime('2021-09-01 17:00:00', \"%Y-%m-%d %H:%M:%S\")\n self.assertEqual(v2.is_overlap_with_other(), False)", "title": "" }, { "docid": "8d8b885bf46df1c762ba03d2f33bbc04", "score": "0.57382303", "text": "def date_cmp(item1, item2):\n if item1.end_date is None and item2.end_date is None:\n if item1.start_date < item2.start_date:\n return 1\n if item2.start_date < item1.start_date:\n return -1\n return 0\n if item1.end_date is None:\n return -1\n if item2.end_date is None:\n return 1\n if item1.end_date < item2.end_date:\n return 1\n if item2.end_date < item1.end_date:\n return -1\n if item1.start_date < item2.start_date:\n return 1\n if item2.start_date < item1.start_date:\n return -1\n return 0", "title": "" }, { "docid": "a5910e6a1f021f779789d44a001862ba", "score": "0.57225144", "text": "def test_start_end_dates_multiple_cross_year(self):\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2017-11-19',\n 'end_date': '2019-02-20'\n }\n self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2017, 11, 19), end_date=date(2017, 11, 30)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2017, 12, 1), end_date=date(2017, 12, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 1, 1), end_date=date(2018, 1, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 2, 1), end_date=date(2018, 2, 28)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 3, 1), end_date=date(2018, 3, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 4, 1), end_date=date(2018, 4, 30)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 5, 1), end_date=date(2018, 5, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 6, 1), end_date=date(2018, 6, 30)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 7, 1), end_date=date(2018, 7, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 8, 1), end_date=date(2018, 8, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 9, 1), end_date=date(2018, 9, 30)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 10, 1), end_date=date(2018, 10, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 11, 1), end_date=date(2018, 11, 30)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2018, 12, 1), end_date=date(2018, 12, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2019, 1, 1), end_date=date(2019, 1, 31)).exists())\n self.assertTrue(models.SubReport.objects.filter(start_date=date(2019, 2, 1), end_date=date(2019, 2, 20)).exists())", "title": "" }, { "docid": "654601d10be3b1c6754cc376d5505b53", "score": "0.572246", "text": "def dateIsBefore(year1, month1, day1, year2, month2, day2):\r\n if year1 < year2:\r\n return True\r\n if year1 == year2:\r\n if month1 < month2:\r\n return True\r\n if month1 == month2:\r\n return day1 < day2\r\n return False", "title": "" }, { "docid": "325992b1e2c3ff43da06ca2cd2fac901", "score": "0.5715165", "text": "def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False", "title": "" }, { "docid": "325992b1e2c3ff43da06ca2cd2fac901", "score": "0.5715165", "text": "def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False", "title": "" }, { "docid": "325992b1e2c3ff43da06ca2cd2fac901", "score": "0.5715165", "text": "def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False", "title": "" }, { "docid": "5ca865d2ee0169f8aa49d2977c9f2117", "score": "0.57101715", "text": "def overlap(cls, tspan):\n if cls.start_dt <= tspan.end_dt and cls.end_dt >= tspan.start_dt:\n return True\n else:\n return False", "title": "" }, { "docid": "eb1d0b21550e391a468852a42e1126d7", "score": "0.5703416", "text": "def compare_times(now,date1):\r\n print(\"enter compare date\")\r\n try:\r\n if now.year==date1.year and now.month==date1.month and now.day==date1.day and now.hour==date1.hour and now.minute == date1.minute:\r\n print(\"true\")\r\n return True\r\n else:\r\n return False\r\n except Exception as err:\r\n print (err)", "title": "" }, { "docid": "4dd1b6658dcfaab471e72297ccd4fa25", "score": "0.5693276", "text": "def is_actual(self):\n now = timezone.now()\n if self.start_publication and now < self.start_publication:\n return False\n\n if self.end_publication and now >= self.end_publication:\n return False\n return True", "title": "" }, { "docid": "b66b702ce81cbf342b290fba2061ae4c", "score": "0.56712186", "text": "def compare_suggested_trunks_grid_begin_date(self, actual_date):\n is_matched = False\n if (str(actual_date) == str(self.get_current_date())):\n is_matched = True\n return is_matched", "title": "" }, { "docid": "f3be46fad7028e23f2161b08c3f82701", "score": "0.5671133", "text": "def check_project_start_end_date(project):\n\n budgets = project.budgets\n logger.debug('check_project_start_end_date budgets : %s' % len(budgets))\n start = 0\n end = 0\n for budget in budgets:\n if budget.status.code not in ['RJD', 'CNCLD']:\n start_asmilliseconds = budget.get_generic_text_attr('start_date')\n end_asmilliseconds = budget.get_generic_text_attr('end_date')\n\n if start == 0 or start_asmilliseconds < start:\n start = start_asmilliseconds\n if end == 0 or end_asmilliseconds > end:\n end = end_asmilliseconds\n if start != 0:\n from stalker_pyramid.views import from_milliseconds\n project.start = from_milliseconds(start)\n project.end = from_milliseconds(end)\n\n logger.debug('check_project_start_end_date ends')", "title": "" }, { "docid": "c15fe047bfdb5902b132f136dc8da9c2", "score": "0.56584525", "text": "def test_end_date_bad(self):\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-02-01',\n 'end_date': '2019-02-31'\n }\n response = self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertFalse(models.Report.objects.exists())\n self.assertFalse(models.SubReport.objects.exists())\n self.assertFalse(models.ComputerReport.objects.exists())\n # test response\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "bafeb2176f373cc98e92e7cc0bbccb12", "score": "0.56522334", "text": "def valid(self, on_date=None):\n valid_range = self.validRange()\n gmt = dt.utcnow()\n if on_date:\n gmt = on_date\n gmt = gmt.replace(tzinfo=GMT())\n return valid_range.has_date(gmt)", "title": "" }, { "docid": "5bc58654fe0cd7f9caaacc4835841c6f", "score": "0.56470525", "text": "def is_between(x,y1,y2):\n if x > y1 and x < y2:\n return True\n else:\n return False", "title": "" }, { "docid": "efd8d69ec7dbae00ca07d32986fb4ad0", "score": "0.5646979", "text": "def in_this_day(start_time, end_time, day_string):\n max_start_time = datetime.strptime(day_string, \"%Y%m%d\")\n min_end_time = max_start_time + timedelta(days=1)\n if start_time > max_start_time:\n max_start_time = start_time\n\n if end_time < min_end_time:\n min_end_time = end_time\n\n return max_start_time < min_end_time", "title": "" }, { "docid": "96d0ebfbce5410b1a561b58ceac9ede3", "score": "0.5636817", "text": "def test_nested_friendly_date_criteria_reverse( self ):\n # Add topic - past items\n topic = self._ATCT\n date_crit = topic.addCriterion('end', 'ATFriendlyDateCriteria')\n date_crit.setValue(0)\n date_crit.setDateRange('+') # This is irrelevant when the date is now\n date_crit.setOperation('less')\n # Add subtopic - future items\n self.setRoles(['Manager', 'Member'])\n topic.addSubtopic( 'qux' )\n self.setRoles(['Member'])\n subtopic = topic.qux\n date_crit = subtopic.addCriterion('start','ATFriendlyDateCriteria')\n date_crit.setValue(0)\n date_crit.setDateRange('-') # This is irrelevant when the date is now\n date_crit.setOperation('more')\n subtopic.setAcquireCriteria(True)\n # fetch the query\n query = subtopic.buildQuery()\n # this one can have both start and end\n self.failUnless(query['start'])\n self.failUnless(query['end'])", "title": "" }, { "docid": "24bbff8b90b935222b7ff636d873608d", "score": "0.562384", "text": "def right_dates_order(dep, ret):\r\n\r\n if ret is None: # checking for none to prevent\r\n return True # the user from entering an empty string\r\n\r\n dep = str_date_to_date(dep)\r\n ret = str_date_to_date(ret)\r\n\r\n return dep <= ret", "title": "" }, { "docid": "6261bf25c55bd64804ecd64b6475e981", "score": "0.56173736", "text": "def earlier_date(date1, date2):\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))", "title": "" }, { "docid": "7d84ee704cbfd4637dd35218ee0de064", "score": "0.5616227", "text": "def test_death_dates_before_current(self):\r\n \"\"\"US01 Dates before Current Date\"\"\"\r\n individal = Individual(\"I01\")\r\n individal.add_death(\"2013-12-31\")\r\n self.assertTrue(individal.death == \"2013-12-31\")\r\n self.assertTrue(before_current_date(individal.death, todaysdate))", "title": "" }, { "docid": "471d8a54cfd93d7aaf8073727d0f3b43", "score": "0.5609914", "text": "def checkDateSpan(lastdate, tilldate):\r\n if lastdate > tilldate:\r\n print \"Error !! first arg date must less than second\"\r\n return -1\r\n lastdate, tilldate = str(lastdate), str(tilldate)\r\n tdate = datetime.date(int(tilldate[0:4]), int(tilldate[4:6]), int(tilldate[6:8]))\r\n ldate = datetime.date(int(lastdate[0:4]), int(lastdate[4:6]), int(lastdate[6:8]))\r\n return (tdate - ldate).days", "title": "" }, { "docid": "bf38ff266bb919ca219fc5b11d2f1b30", "score": "0.5599499", "text": "def haveTimeConflictWith(self, other):\n compare_min = self.start - datetime.timedelta(minutes=self.time_boundery)\n compare_max = self.end + datetime.timedelta(minutes=self.time_boundery)\n if other.start > compare_max or other.end < compare_min:\n return False\n else:\n return True", "title": "" }, { "docid": "e47e6dec92cddaea006bf795853a8343", "score": "0.5599195", "text": "def start_checker(data, start_date):\n return _date_checker(data, start_date, operator.le)", "title": "" }, { "docid": "64975926d5ae70436de28b897fd21d7e", "score": "0.5594907", "text": "def spans_calendar_year(start: date, end: date) -> bool:\n # Catch error\n if start > end:\n raise ValueError('Start date after end date')\n\n return end.year > start.year", "title": "" }, { "docid": "144eee484deb4e0aa488f91c7b05d924", "score": "0.5592608", "text": "def assertEqualDates(self, dt1, dt2, seconds=None):\r\n if seconds is None:\r\n seconds = self.date_tolerance\r\n \r\n if dt1 > dt2:\r\n diff = dt1 - dt2\r\n else:\r\n diff = dt2 - dt1\r\n if not diff < datetime.timedelta(seconds=seconds):\r\n raise AssertionError('%r and %r are not within %r seconds.' %\r\n (dt1, dt2, seconds))", "title": "" }, { "docid": "faccd6d8df10c8994a9aa987dcf08d90", "score": "0.55838776", "text": "def test_start_date(self):\n # request\n request_body = {\n 'customer': self.customer.id,\n 'start_date': '2019-01-01',\n 'end_date': '2019-01-31'\n }\n self.client.post(reverse(self.view_name), request_body)\n # test database\n self.assertTrue(models.Report.objects.filter(start_date=date(2019, 1, 1)).exists())", "title": "" }, { "docid": "d779a165e087feaa52fedc4c97aba0bf", "score": "0.55826014", "text": "def test_filter_newer_cases_by_order_date_some_newer_cases(\n store_with_multiple_cases_and_samples: Store,\n):\n # GIVEN a store containing cases with different order dates\n cases_query: Query = store_with_multiple_cases_and_samples._get_query(table=Family)\n min_order_date = min(case.ordered_at for case in cases_query)\n max_order_date = max(case.ordered_at for case in cases_query)\n\n # Calculate an intermediate date between the minimum and maximum order dates\n some_order_date = min_order_date + (max_order_date - min_order_date) / 2\n\n # WHEN filtering cases by a date that is earlier than some order dates\n filtered_cases: Query = filter_newer_cases_by_order_date(\n cases=cases_query, order_date=some_order_date\n )\n\n # THEN the query should return the cases with order dates newer than the given date\n assert filtered_cases.count() > 0\n for case in filtered_cases:\n assert case.ordered_at > some_order_date", "title": "" }, { "docid": "7599e0c7492bc7f0525b85b8740d1673", "score": "0.5582041", "text": "def post_validate(self, REQUEST=None, errors=None):\n rstartDate = REQUEST.get('startDate', None)\n rendDate = REQUEST.get('endDate', None)\n from DateTime import DateTime\n\n if rendDate:\n end = DateTime(rendDate)\n else:\n end = self.getEndDate()\n if rstartDate:\n start = DateTime(rstartDate)\n else:\n start = self.getStartDate()\n\n if start > end:\n errors['endDate'] = \"End date must be after start date\"", "title": "" }, { "docid": "3d51a282ebc37061dc5bcf910446a0cb", "score": "0.5580495", "text": "def overdue(self) -> bool:\n if not self.scheduled_start_datetime:\n return False\n\n now = dt.now()\n now_ts = dt.timestamp(now)\n\n if self[\"end\"] is None:\n start_ts = dt.timestamp(self.scheduled_start_datetime)\n if now_ts > start_ts:\n return True\n\n return False\n\n end_ts = dt.timestamp(self[\"end\"])\n if now_ts > end_ts:\n return True\n\n return False", "title": "" }, { "docid": "eab8bf778ebec400a5cab706a13c8b26", "score": "0.55766296", "text": "def test_news_list_by_date_range_with_improper_dates(self):\n\n response = self.client.get(\n reverse(\"news-view-date-range\"),\n {\n \"start_date\": \"09-2019-05\",\n \"end_date\": \"987-56\",\n \"stock\": self.stock.id,\n },\n )\n self.assertRaises(Exception, response)", "title": "" }, { "docid": "b3edc7566c7777422d557b3803ab0be1", "score": "0.5574604", "text": "def validate_date_range(from_date, to_date):\n if from_date > to_date:\n # raise ValidationError(detail=\"from_date must be less than to_date\") \n raise ParseError(detail=\"from_date must be less than to_date\")", "title": "" }, { "docid": "e9b69406b47a6e2d0b3816cb3b8e05c6", "score": "0.55697256", "text": "def is_after(t1, t2):\n l1 = [t1.hour, t1.minute, t1.second]\n l2 = [t2.hour, t2.minute, t2.second]\n return l1 > l2", "title": "" }, { "docid": "a37c33b1d242e75caafbe3be3c6dcbae", "score": "0.55678535", "text": "def outage_exists(outages, expected):\n for outage in outages:\n if (\n outage['start'] == expected['start']\n and outage['end'] == expected['end']\n ):\n return True\n return False", "title": "" } ]
e07c802ed5a452456a5cd07f2507b689
Get the OLS registry.
[ { "docid": "16a4e70d850b4acf0171f0266bd14e8b", "score": "0.0", "text": "def get_ols(force_download: bool = False):\n if PROCESSED_PATH.exists() and not force_download:\n with PROCESSED_PATH.open() as file:\n return json.load(file)\n\n data = requests.get(URL).json()\n data[\"_embedded\"][\"ontologies\"] = sorted(\n data[\"_embedded\"][\"ontologies\"],\n key=itemgetter(\"ontologyId\"),\n )\n if \"next\" in data[\"_links\"]:\n raise NotImplementedError(\n \"Need to implement paging since there are more entries than fit into one page\"\n )\n RAW_PATH.write_text(json.dumps(data, indent=2, sort_keys=True))\n\n processed = {}\n for ontology in data[\"_embedded\"][\"ontologies\"]:\n ols_id = ontology[\"ontologyId\"]\n if ols_id in OLS_SKIP:\n continue\n # TODO better docs on how to maintain this file\n config = get_ols_processing().get(ols_id)\n if config is None:\n if ols_id not in OLS_SKIP:\n logger.warning(\"need to curate processing file for OLS prefix %s\", ols_id)\n continue\n processed[ols_id] = _process(ontology, config)\n\n with PROCESSED_PATH.open(\"w\") as file:\n json.dump(processed, file, indent=2, sort_keys=True)\n return processed", "title": "" } ]
[ { "docid": "8ab3e6a6e433509d323ce714abcf5c90", "score": "0.7038571", "text": "def registry(self):\n\n return self._registry", "title": "" }, { "docid": "4bf38d3a7b168f4437572774e204ad74", "score": "0.6848575", "text": "def get_registry(self):\n return copy.deepcopy(self._registry)", "title": "" }, { "docid": "661f91f78a8d6807dd1821f8c553fa60", "score": "0.6617641", "text": "def base_registry(self):\n return self._get_base_registry()", "title": "" }, { "docid": "4052146d0e07bf02f4a32f44cb675236", "score": "0.632854", "text": "def _get_registry(stix_version):\n if stix_version not in _STIX_REGISTRIES:\n raise RegistryNotFoundError(stix_version)\n\n spec_registry_path = os.path.join(\n os.path.dirname(__file__),\n _STIX_REGISTRIES[stix_version]\n )\n\n with open(spec_registry_path, encoding=\"utf-8\") as f:\n spec_registry = json.load(f)\n\n return spec_registry", "title": "" }, { "docid": "b99c542c0f61953145a615ef24290ae6", "score": "0.63155633", "text": "def _get_registry(module: nn.Module) -> Dict[str, RegistryItem]:\n default_registry: Dict[str, RegistryItem] = OrderedDict()\n return module.__dict__.setdefault(REGISTRY_KEY, default_registry) # type: ignore[call-overload]", "title": "" }, { "docid": "b8f108b698b2d35aeb13413536c12552", "score": "0.62130505", "text": "def _registry():\n return _registry_config()[\"host\"]", "title": "" }, { "docid": "097d0d4d741d65876c215a7845d78914", "score": "0.6187419", "text": "def get_registry(entry, runtime):\n try:\n records_location_param_id = Id('parameter:recordsRegistry@mongo')\n registry = runtime.get_configuration().get_value_by_parameter(\n records_location_param_id).get_string_value()\n return import_module(registry).__dict__.get(entry, {})\n except (ImportError, AttributeError, KeyError, NotFound):\n return {}", "title": "" }, { "docid": "9d044a626098e5f7717202af203b19c6", "score": "0.616971", "text": "def get_app_registry(self) -> AppRegistry:\n piccolo_conf_module = self.get_piccolo_conf_module()\n return getattr(piccolo_conf_module, \"APP_REGISTRY\")", "title": "" }, { "docid": "2ccdf0c224b42931dd73d26f79cc9e80", "score": "0.6160673", "text": "def mf_registry():\n return registry", "title": "" }, { "docid": "f82b312426fb6455da1452261f5d4e63", "score": "0.6108724", "text": "def auth_registry(self):\n return self._get_auth_registry()", "title": "" }, { "docid": "57ec52d6d8b4d2e69025151e0e07f820", "score": "0.60967135", "text": "def _query_registry(self):\r\n #TODO: Set this up for JModelica\r\n import _winreg\r\n\r\n base_key = r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'\r\n\r\n uninstall_key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, base_key, 0,\r\n _winreg.KEY_ALL_ACCESS | _winreg.KEY_WOW64_32KEY)\r\n\r\n number_of_keys = _winreg.QueryInfoKey(uninstall_key)[0] # 0 means number of sub_keys\r\n\r\n product_key = None\r\n for sub_key_id in range(0, number_of_keys):\r\n sub_key_name = _winreg.EnumKey(uninstall_key, sub_key_id)\r\n sub_key = _winreg.OpenKey(uninstall_key, sub_key_name)\r\n number_of_values = _winreg.QueryInfoKey(sub_key)[1]\r\n for value_id in range(0, number_of_values):\r\n value_tuple = _winreg.EnumValue(sub_key, value_id)\r\n value_name = value_tuple[0]\r\n value = value_tuple[1]\r\n if value_name == 'DisplayName' and value == display_name:\r\n product_key = sub_key\r\n\r\n if not product_key:\r\n return \"\", \"\"\r\n\r\n install_location = \"\"\r\n display_version = \"\"\r\n number_of_values = _winreg.QueryInfoKey(product_key)[1]\r\n for value_id in range(0, number_of_values):\r\n value_tuple = _winreg.EnumValue(product_key, value_id)\r\n value_name = value_tuple[0]\r\n value = value_tuple[1]\r\n if value_name == \"InstallLocation\":\r\n install_location = value\r\n elif value_name == \"DisplayVersion\":\r\n display_version = value\r\n\r\n return install_location, display_version", "title": "" }, { "docid": "0d83e1626b9999c7583460adc9a75149", "score": "0.5940556", "text": "def registry():\n return RequestRegistry()", "title": "" }, { "docid": "fb4f35a3f7508ae41380a0787926ca2c", "score": "0.5871335", "text": "def get_all_openers():\n return loader_registry.values()", "title": "" }, { "docid": "2f30bc79e9821a0f6b72f020842ddede", "score": "0.5856736", "text": "def base_registry_raw(self):\n return self._base_registry", "title": "" }, { "docid": "487db8eb3644150767605fb8e1174b4d", "score": "0.58093125", "text": "def auth_registry_raw(self):\n return self._auth_registry", "title": "" }, { "docid": "2059dd79e1b3f0067725dbbfdb7ce844", "score": "0.5795752", "text": "def get(self, request):\n try:\n registry = oai_registry_api.get_all()\n serializer = serializers.RegistrySerializer(registry, many=True)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n content = OaiPmhMessage.get_message_labelled(str(e))\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "title": "" }, { "docid": "d5921199a6d8c3256ed331cf1ef5c975", "score": "0.57763815", "text": "def connect(cls, url=None):\n if not url: url = cls.STScIRegistryURL\n return RegistrySearch(url)\n # Note: it's not clear that the C interface would allow us to\n # override the URL. ", "title": "" }, { "docid": "1f8b014e23c57762ea69707d81ee2be2", "score": "0.56029147", "text": "def searchRegistries( self, REQUEST=None, **kw ):\n interrupt_thread( self )\n\n indexes = {}\n indexes['meta_type'] = 'Registry'\n\n if self._IsDebug():\n portal_debug( '%s.searchRegistries' % self.getId(), 'indexes: %s' % indexes )\n\n results = self.searchResults( REQUEST=REQUEST, sort_on='Title', **indexes )\n if not results: return None\n\n membership = getToolByName( self, 'portal_membership', None )\n current_registry = kw and kw.get('current_registry') or 1\n registries = []\n\n for x in results:\n if x is None:\n continue\n obj = x.getObject()\n if obj is None or (current_registry and not obj.isCurrentRegistry()) or \\\n not membership.checkPermission('Add portal content', obj):\n continue\n registries.append( x )\n\n return registries", "title": "" }, { "docid": "7e22cbc37a8a63b44cf2c539056aa147", "score": "0.5557563", "text": "def _rr(self):\n if self.container.has_capability('RESOURCE_REGISTRY'):\n return self.container.resource_registry\n\n if self._rr_client is None:\n self._rr_client = ResourceRegistryServiceProcessClient(process=self.container)\n\n return self._rr_client", "title": "" }, { "docid": "ad2dd1005eaf6b6f89c0df54f192facb", "score": "0.55548364", "text": "def _get_registry_template():\n try:\n return system_api.get_active_global_version_manager_by_title(\n REGISTRY_XSD_FILENAME\n )\n except Exception as exception:\n raise Exception(\n f\"Impossible to get the template {REGISTRY_XSD_FILENAME} : {str(exception)}\"\n )", "title": "" }, { "docid": "2f9209330b333976e71f0301c4c4cc61", "score": "0.551878", "text": "def connect_registry_client():\n client = adapters.RegistryClient()\n client.connect(runtime_config.registry.connection_string)\n return client", "title": "" }, { "docid": "bad2d1999c8f70284a60565bd44ffc80", "score": "0.5510161", "text": "def get_repository(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin\n return self._provider_session.get_repository()", "title": "" }, { "docid": "bad2d1999c8f70284a60565bd44ffc80", "score": "0.5510161", "text": "def get_repository(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin\n return self._provider_session.get_repository()", "title": "" }, { "docid": "bad2d1999c8f70284a60565bd44ffc80", "score": "0.5510161", "text": "def get_repository(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin\n return self._provider_session.get_repository()", "title": "" }, { "docid": "bad2d1999c8f70284a60565bd44ffc80", "score": "0.5510161", "text": "def get_repository(self):\n # Implemented from template for osid.resource.ResourceLookupSession.get_bin\n return self._provider_session.get_repository()", "title": "" }, { "docid": "dddd1ed494a128cc694499c3e2c71297", "score": "0.5484555", "text": "def registered_root():\n return _registered_root[\"_\"]", "title": "" }, { "docid": "2f35b6d112baae2f7f25d551021b6239", "score": "0.5475703", "text": "def get_registry_path():\n import winreg\n\n res = []\n\n # System-wide part of PATH\n with winreg.CreateKey(\n winreg.HKEY_LOCAL_MACHINE,\n r\"System\\CurrentControlSet\\Control\\Session Manager\\Environment\"\n ) as key:\n try:\n path = winreg.QueryValueEx(key, \"PATH\")[0]\n except WindowsError:\n # No value called PATH\n pass\n else:\n res.extend(path.split(';'))\n\n # User part of PATH\n with winreg.CreateKey(winreg.HKEY_CURRENT_USER, r\"Environment\") as key:\n try:\n path = winreg.QueryValueEx(key, \"PATH\")[0]\n except WindowsError:\n # No value called PATH\n pass\n else:\n res.extend(path.split(';'))\n\n return res", "title": "" }, { "docid": "33ec309b13ba3bd5f5e0ca9959355f9b", "score": "0.5429458", "text": "def internal_get_sreg_base(*args):\n return _idaapi.internal_get_sreg_base(*args)", "title": "" }, { "docid": "3ffeb0e0c7f18db14c1d2f45d5c3a27a", "score": "0.5414764", "text": "def _get_registry_client(self, registry: str) -> RegistryClient:\n client = self.registry_clients.get(registry)\n if client is None:\n session = RegistrySession.create_from_config(self.workflow.conf, registry=registry)\n client = RegistryClient(session)\n self.registry_clients[registry] = client\n return client", "title": "" }, { "docid": "02cf93e8dd9fd97e24df00bb0f3bc320", "score": "0.54137695", "text": "def __iter__(self):\n return iter(self.__registry)", "title": "" }, { "docid": "cd75ccced4e1b55d22bfe7f566595f7e", "score": "0.53941315", "text": "def registrations(self):\n return (reg for reg in self._regs)", "title": "" }, { "docid": "029005f429334e99f31ff356f099d7ab", "score": "0.53505844", "text": "def get_instrument_from_registry(name):\n return instrument_registry[name].copy()", "title": "" }, { "docid": "443fe65f1be992728f3ebbb9f900d712", "score": "0.53022355", "text": "def get_repository(self):\n # Implemented from awsosid template for -\n # osid.resource.ResourceLookupSession.get_bin_template\n return self._provider_session.get_repository()", "title": "" }, { "docid": "443fe65f1be992728f3ebbb9f900d712", "score": "0.53022355", "text": "def get_repository(self):\n # Implemented from awsosid template for -\n # osid.resource.ResourceLookupSession.get_bin_template\n return self._provider_session.get_repository()", "title": "" }, { "docid": "443fe65f1be992728f3ebbb9f900d712", "score": "0.53022355", "text": "def get_repository(self):\n # Implemented from awsosid template for -\n # osid.resource.ResourceLookupSession.get_bin_template\n return self._provider_session.get_repository()", "title": "" }, { "docid": "f0f7f31931f0da3f39a4bbe97bf4684a", "score": "0.53005725", "text": "def _get_auth_registry(self, lookup_allow=None):\n from .auth import AuthenticatedRegistry\n\n if lookup_allow is None:\n lookup_allow = defaultdict(lambda: True)\n\n # help pylint understand our return value\n if False: # pylint:disable=using-constant-test\n return AuthenticatedRegistry()\n\n if self.auth_registry_raw is not None:\n return self.auth_registry_raw\n\n lookup_allow['auth_registry'] = False\n\n if (\n self.use_db and\n lookup_allow['base_registry'] and\n self.has_base_registry and\n self._auth_registry_dynamic is None\n ):\n self._auth_registry_dynamic = AuthenticatedRegistry.load(\n self._get_base_registry(lookup_allow),\n )\n\n if (\n lookup_allow['project'] and\n self._auth_registry_dynamic is None\n ):\n project = self._get_project()\n if project is not None:\n self._auth_registry_dynamic = project.target_registry\n\n if self._auth_registry_dynamic is None and self.use_db:\n self._auth_registry_dynamic = AuthenticatedRegistry.load(\n 'docker.io',\n )\n\n return self._auth_registry_dynamic", "title": "" }, { "docid": "1eb0a70db71cea2cbee2f16a3e60eeaf", "score": "0.52929145", "text": "def redhat(self):\r\n try:\r\n return self.__redhat\r\n except:\r\n return self.extensions()", "title": "" }, { "docid": "34f0e166096bcc145ec3653fd16b4ea1", "score": "0.52832425", "text": "def get_registry_location(self):\n return self.all_joined_units.received_raw.get(\"registry-location\")", "title": "" }, { "docid": "7e3261bd78fa2dfef8b8ab5fe00b98b6", "score": "0.52781516", "text": "def __init__(self):\n self.registry = list()", "title": "" }, { "docid": "b86cc84c60c864f805b14fe8d6c6e70e", "score": "0.5265273", "text": "def get(self, request, registry_id):\n try:\n registry = oai_registry_api.get_by_id(registry_id)\n serializer = serializers.RegistrySerializer(registry)\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n except exceptions.DoesNotExist:\n content = OaiPmhMessage.get_message_labelled('No registry found with the given id.')\n return Response(content, status=status.HTTP_404_NOT_FOUND)\n except exceptions_oai.OAIAPIException as e:\n return e.response()\n except Exception as e:\n content = OaiPmhMessage.get_message_labelled(str(e))\n return Response(content, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "title": "" }, { "docid": "ea63d45e0384ae22be29b726ae597c25", "score": "0.5246942", "text": "def get_store(self):\n return self.send_command(\"/store\", method=\"get\")", "title": "" }, { "docid": "3262923ca0afc35af30c623153cc5ed7", "score": "0.52443963", "text": "def registry_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"registry_id\")", "title": "" }, { "docid": "3f2fe8acd33a4c7cb100781ccf911a4c", "score": "0.5227218", "text": "def _get_base_registry(self, lookup_allow=None):\n if lookup_allow is None:\n lookup_allow = defaultdict(lambda: True)\n\n if self.has_base_registry:\n return self.base_registry_raw\n\n elif lookup_allow['auth_registry']:\n lookup_allow['base_registry'] = False\n auth_registry = self._get_auth_registry(lookup_allow)\n if auth_registry is not None:\n return auth_registry.base_name\n\n return 'docker.io'", "title": "" }, { "docid": "65fba7e2e2d05818b8eced15f5b4595e", "score": "0.521912", "text": "def get_register():\n return __REGISTER", "title": "" }, { "docid": "d7f18aa624e7c605b9c0ccdc589aa5dc", "score": "0.5158106", "text": "def _OpenFile(self, path):\n if not self._registry_file_reader:\n return None\n\n return self._registry_file_reader.Open(\n path, ascii_codepage=self._ascii_codepage)", "title": "" }, { "docid": "62cb1538fb2464d7567ba884f06b022c", "score": "0.51466465", "text": "def all(self):\n return six.itervalues(self.registry)", "title": "" }, { "docid": "b6e54286817cb3f4774de019c4c6655d", "score": "0.5144789", "text": "def _state_registry(self):\n if self.run_options.dry_run:\n return self._dry_run_state_registry\n else:\n return self._real_state_registry", "title": "" }, { "docid": "6b9daefe4c5c89b622f681f2b715a4d2", "score": "0.51437175", "text": "def look_up_registry(self, integration_id):\n\n registry = RemoteRegistry.for_integration_id(\n self._db, integration_id, self.goal\n )\n if not registry:\n return MISSING_SERVICE\n return registry", "title": "" }, { "docid": "2e3092a07d4fb25873ac24e9dc8096ff", "score": "0.5139722", "text": "def get_ospl_home(self):\n return self.ospl_home_", "title": "" }, { "docid": "3fb68d5e66217cba546299914c2886f2", "score": "0.51330405", "text": "def mock_registry():\n return VaspMockRegistry()", "title": "" }, { "docid": "db8b2cdd51bb87f79acf518be8e2476e", "score": "0.51204926", "text": "def entity_reg(opp):\n return mock_registry(opp)", "title": "" }, { "docid": "db8b2cdd51bb87f79acf518be8e2476e", "score": "0.51204926", "text": "def entity_reg(opp):\n return mock_registry(opp)", "title": "" }, { "docid": "db8b2cdd51bb87f79acf518be8e2476e", "score": "0.51204926", "text": "def entity_reg(opp):\n return mock_registry(opp)", "title": "" }, { "docid": "f2829872fe8447561415e24d797cea3b", "score": "0.51167333", "text": "def _get_extension_manager_of_resource(resource_name):\n global _global_extn_mgrs_by_resource\n\n if resource_name not in _global_extn_mgrs_by_resource:\n resource_namespace = 'sushy.resources.' + resource_name + '.oems'\n _global_extn_mgrs_by_resource[resource_name] = (\n _create_extension_manager(resource_namespace)\n )\n return _global_extn_mgrs_by_resource[resource_name]", "title": "" }, { "docid": "af80098b804954cc8fd226dd95bf525b", "score": "0.51127076", "text": "def list_registries():\n try:\n registries = lm.get_workflow_registries()\n if len(registries) == 0:\n print(\"\\n No Workflow Registry found !!!\\n\")\n else:\n print(f\"\\n Workflow Registries:\\n{'*'*80}\")\n for r in registries:\n print(f\"{r.uuid} (name='{r.name}', type={r.type})\")\n print(\"\\n\")\n except Exception as e:\n try:\n detail = re.search('DETAIL:\\\\s*(.+)', str(e)).group(1)\n except AttributeError:\n detail = str(e)\n logger.exception(e)\n print(f\"ERROR: {detail}\", file=sys.stderr)", "title": "" }, { "docid": "5cb03f3743c043cb7fd2543ed3c31203", "score": "0.51071554", "text": "def get_versions(self):\n return self.registry.keys()", "title": "" }, { "docid": "6a20aeb9675b8441343d8c66c99e3348", "score": "0.50963384", "text": "def registry(context):\n logger.info(\"registry | starting subcommands\")\n load_dotenv()\n if (\n not os.getenv(\"REGISTRY_URL\")\n or not os.getenv(\"REGISTRY_USER\")\n or not os.getenv(\"REGISTRY_TOKEN\")\n ):\n logger.info(\n \"registry | No registry URL or credentials; skipping registry commands.\"\n )\n return\n\n registry_add(context)\n registry_add(context)\n registry_list(context)\n registry_get(context)\n registry_update(context)\n registry_del(context)", "title": "" }, { "docid": "603ae24fa987d5af038715fd926c19a9", "score": "0.5093566", "text": "def get(name):\n\n return ITCRegistry._connectors.get(name, {})", "title": "" }, { "docid": "6147aed46aba12d9ecb9083cc7ff4b20", "score": "0.5079778", "text": "def get_repositories(self):\n # Implemented from awsosid template for -\n # osid.resource.BinLookupSession.get_bins_template\n if not self._can('lookup'):\n raise PermissionDenied()\n else:\n return self._provider_session.get_repositories()", "title": "" }, { "docid": "a0f9c218ea9c1caa4a5481a50d8ed2bb", "score": "0.50709593", "text": "def load_drivers(self):\n return libicebox.symbols_load_drivers()", "title": "" }, { "docid": "bc7f2f63ed42fc829ac1fdb99553ee88", "score": "0.5050938", "text": "def get_register(number):\n # Add here to first check cached data? - do this externally to function?\n try:\n register_search = registered_client.register(\"publication\", Epodoc(number))\n return register_search.json()\n except:\n return None", "title": "" }, { "docid": "fd6d6d9097f05a660882c08100bab2a9", "score": "0.5042386", "text": "def load_registry():\n # Load the data\n filename = os.path.join(data_dir, \"registry\", f\"data_{registry_date}_processed.csv\")\n registry = pd.read_csv(filename)\n\n # Convert to a GeoDataFrame\n registry[\"geometry\"] = [Point(i, j) for i, j in zip(registry.lng, registry.lat)]\n registry = gpd.GeoDataFrame(registry, geometry=\"geometry\")\n\n # Set missing geometries to NaN\n invalid = registry.lat.isnull() | registry.lng.isnull()\n registry.loc[invalid, \"geometry\"] = np.nan\n\n # Remove entries where the name is \"None\" or \"Same\"\n bad_names = registry[\"dba_name\"].str.lower().isin([\"same\", \"none\"])\n registry.loc[bad_names, \"dba_name\"] = np.nan\n\n # return\n return registry.rename_axis(\"registry_id\")", "title": "" }, { "docid": "2a81e3d2f831e06301f8f9c53dece16c", "score": "0.5031683", "text": "def get_ospl_env(self):\n return self.ospl_env_", "title": "" }, { "docid": "0901a51185668c4779edae2d3eb029f2", "score": "0.5025725", "text": "def get_secrets(registry) -> dict:\n return registry.getUtility(ISecrets)", "title": "" }, { "docid": "dee16e8a7ae75e7a5330fc9dbe9f24ae", "score": "0.5016897", "text": "def list(self, *args):\n parser = argparse.ArgumentParser(prog=\"pacco registry list\")\n parser.add_argument(\"remote\", help=\"remote name\")\n parsed_args = parser.parse_args(args)\n pm = self.__rm.get_remote(parsed_args.remote)\n self.__out.writeln(pm.list_package_registries())", "title": "" }, { "docid": "fc6cc88a2364c54e3785b9a1b61087fc", "score": "0.49998626", "text": "def get_opener(name):\n global loader_registry\n return loader_registry[name]", "title": "" }, { "docid": "e1efa056f743e6f6ee6165e70f723f86", "score": "0.49717933", "text": "def test_get_registry_index(self):\n pass", "title": "" }, { "docid": "6595a742d85785707ebed8b3a1e551b7", "score": "0.49614847", "text": "def reset_registry():\n return registry.reset()", "title": "" }, { "docid": "bb7692b1d0f959cc276015e74874bd3b", "score": "0.49495292", "text": "def get_registred_applications( self ):\n\t\treturn self.get_registered_applications()", "title": "" }, { "docid": "80cb82c7282f2faafc190f2e55a708f0", "score": "0.4939151", "text": "def populate_registry(self, pooch):\n\n raise NotImplementedError # pragma: no cover", "title": "" }, { "docid": "4abab1ad35283d6369e78c0172987166", "score": "0.49375403", "text": "def registry_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"registry_id\")", "title": "" }, { "docid": "8a2bede780b0b3e7835c4c9db845c270", "score": "0.49306822", "text": "def get_repository(self):\n return self._provider_session.get_repository()", "title": "" }, { "docid": "8a2bede780b0b3e7835c4c9db845c270", "score": "0.49306822", "text": "def get_repository(self):\n return self._provider_session.get_repository()", "title": "" }, { "docid": "0cba2365985535acc686b5c5bbf4f979", "score": "0.4928606", "text": "def ph_get_regCodeSreg(*args):\n return _idaapi.ph_get_regCodeSreg(*args)", "title": "" }, { "docid": "a3b93f365cb58694e60d80af481f1199", "score": "0.49269322", "text": "def get_engine(slug):\n return engine_register[slug]", "title": "" }, { "docid": "51e7833a97527ec211fbefdae27765df", "score": "0.49222627", "text": "def get_referencer(registry):\n # Argument might be a config or request\n regis = getattr(registry, 'registry', None)\n if regis is None:\n regis = registry\n return regis.queryUtility(IReferencer)", "title": "" }, { "docid": "421589a29b4ea6de5e027794562114fb", "score": "0.49163008", "text": "def getSR(*args):\n return _idaapi.getSR(*args)", "title": "" }, { "docid": "b69c8dc0fc7123083f04b5c3f9efd4c5", "score": "0.48925057", "text": "def createPrivateRegistry(self):", "title": "" }, { "docid": "e30e00c66f66b2da820ebbfb3df3c03f", "score": "0.48807397", "text": "def device_reg(opp):\n return mock_device_registry(opp)", "title": "" }, { "docid": "e30e00c66f66b2da820ebbfb3df3c03f", "score": "0.48807397", "text": "def device_reg(opp):\n return mock_device_registry(opp)", "title": "" }, { "docid": "e30e00c66f66b2da820ebbfb3df3c03f", "score": "0.48807397", "text": "def device_reg(opp):\n return mock_device_registry(opp)", "title": "" }, { "docid": "4296d7af7240ecdfcda66368109bf569", "score": "0.48786384", "text": "def reg(self):\n return self._reg", "title": "" }, { "docid": "371c9aa1d2d0e23fc999729bdff078d3", "score": "0.48638448", "text": "def get_resources(self, version):\n return self.registry[version].values()", "title": "" }, { "docid": "a546c6220b60147a74ecc56538700759", "score": "0.48637742", "text": "async def get_rooms(self):\n self.rooms = await self._rooms_entry_point.get_instances()", "title": "" }, { "docid": "b948e92e586c8dc7680101a00e245c26", "score": "0.4862106", "text": "def ph_get_regnames(*args):\n return _idaapi.ph_get_regnames(*args)", "title": "" }, { "docid": "e56480c1375989b7c825da49b1ce369c", "score": "0.4852007", "text": "def ph_get_regDataSreg(*args):\n return _idaapi.ph_get_regDataSreg(*args)", "title": "" }, { "docid": "04e130822fa9c46a14a863185e029823", "score": "0.48503542", "text": "def get_registered_types(self, **kwargs):\n return self.__catalog.get_registered_types()", "title": "" }, { "docid": "a053bd1e78ed554c993f6f2ddb02c16c", "score": "0.48370728", "text": "def ph_get_regFirstSreg(*args):\n return _idaapi.ph_get_regFirstSreg(*args)", "title": "" }, { "docid": "2f4911e73edd3a649c61bf8c0df34a89", "score": "0.4824553", "text": "def load_one_registry(self, args):\n if args.file == None:\n logging.error(\"Reading from stdin\")\n args.file = sys.stdin\n\n registries = self.load_registry_from_file(args.file)\n if args.regname != None:\n for r in registries:\n if r.name == args.regname:\n return r\n logging.error(\"Failed to find requested registry %s.\" % args.regname)\n sys.exit(1)\n else:\n logging.warning(\"Multiple registries found, but no name given. Using first.\")\n return registries[0]", "title": "" }, { "docid": "aa6ed78cc05b3a316f4edd378a931aa5", "score": "0.48219496", "text": "def extract_registry(filename: Path):\n logger.info(\"Extracting Identifiers.org registry...\")\n mapping = download_namespace_mapping()\n logger.info(\"Loading...\")\n with Path(filename).open(mode=\"w\") as handle:\n # We have to convert `datetime` objects to their string representations to be\n # JSON compatible thus the argument `default=str`.\n json.dump(mapping, handle, indent=None, separators=(\",\", \":\"), default=str)", "title": "" }, { "docid": "be3b136858f7651d8fe8f331d9e9078e", "score": "0.4821147", "text": "def get_installation_search(self):\n return # osid.installation.InstallationSearch", "title": "" }, { "docid": "4826a7c6d7beb896abd41cf82dd587a3", "score": "0.4814195", "text": "def ireg(self):\n return self.dumps[0].IREG", "title": "" }, { "docid": "caca09399f7e7bd758fd1a71e1ad0052", "score": "0.48033443", "text": "def get_supported():\n return _registry.get_supported()", "title": "" }, { "docid": "18d3c23c24ec80004288ba338fb945e2", "score": "0.480138", "text": "def registry_get(context, test_type=\"positive\"):\n logger.info(\"registry_get | starting\")\n reg = random.choice(config.registries)\n command = assemble_command(context, \" registry get {0}\".format(reg))\n try:\n logger.debug(\"registry_get | running command {0}\".format(command))\n completed_proc = subprocess.run(\n command.split(), check=True, stdout=subprocess.PIPE\n )\n response = json.loads(completed_proc.stdout)\n reg_name = response[0][\"registry_name\"]\n reg_type = response[0][\"registry_type\"]\n reg_user = response[0][\"registry_user\"]\n logger.info(\n \"registry_get | got reg {0} {1} {2}\".format(reg_name, reg_type, reg_user)\n )\n log_results_simple(\n \"ok\", \"ok\", \"positive\", \"registry_get\", \"got registry {0}\".format(reg_name)\n )\n except Exception as e:\n log_explicit_failure(\n test_type, \"registry_get\", \"failed to get registry {0}\".format(reg)\n )\n logger.error(\"registry_get | error calling anchore-cli: {0}\".format(e))\n logger.info(\"registry_get | finished\")", "title": "" }, { "docid": "5f565d81e2ea5c1e02cbe506ac914989", "score": "0.4794435", "text": "def root(cls, reg_cls):\n reg_cls.REGISTRY = {}\n return reg_cls", "title": "" }, { "docid": "0fa6f8e964d6e99c511aec9f2cd63cd1", "score": "0.47927248", "text": "def routing_registry_name(self) -> Optional[str]:\n return pulumi.get(self, \"routing_registry_name\")", "title": "" }, { "docid": "0fa6f8e964d6e99c511aec9f2cd63cd1", "score": "0.47927248", "text": "def routing_registry_name(self) -> Optional[str]:\n return pulumi.get(self, \"routing_registry_name\")", "title": "" }, { "docid": "4ae3bf8f2ee1bbefa0ab8fd514cf6bc7", "score": "0.4788582", "text": "def createRegistry(self):\n\n # Get all the lowercase letters and numbers as thats all the names support\n letters = string.ascii_lowercase + string.digits\n\n # Create Payload for api request\n data = {\n \"name\": f\"ctf-{''.join(random.choice(letters) for i in range(10))}\",\n \"subscription_tier_slug\": \"basic\"\n }\n\n # Call APi and store details\n res = requests.post(\"https://api.digitalocean.com/v2/registry\", json=data, headers=self.headers)\n self.registry = json.loads(res.text)", "title": "" }, { "docid": "83c8a80879c3d0ffdcdaabfbedafdf62", "score": "0.47622687", "text": "def register_store():\n return store_routes.store_register(mongo)", "title": "" }, { "docid": "5a1eb05ae9a71e433a13c2bb13258d12", "score": "0.4754479", "text": "def get_interfaces():\n return InterfaceInfos.from_system()", "title": "" }, { "docid": "24bf7951be246d11495b746cadf152d1", "score": "0.47500074", "text": "def clear_default_registry():\n old_registry = kopf.get_default_registry()\n new_registry = type(old_registry)() # i.e. OperatorRegistry/GlobalRegistry\n kopf.set_default_registry(new_registry)\n try:\n yield new_registry\n finally:\n kopf.set_default_registry(old_registry)", "title": "" } ]
677a083435b83c78c455b1699bbae99c
Calculate the square values.
[ { "docid": "1a0de8957cc2d81a01a9a3c917a67237", "score": "0.6400062", "text": "def square(value: float) -> float:\n value = value ** 2\n return value", "title": "" } ]
[ { "docid": "a91879b1138e927693b42d7acb85182a", "score": "0.70643616", "text": "def sum_sqr_vals(self):\n\treturn numpy.sum(numpy.square(self.data))", "title": "" }, { "docid": "5c289eea3dd34a8eb2241d184e8ef879", "score": "0.7055393", "text": "def get_square(self):\n return number * number", "title": "" }, { "docid": "8ae8efb73fa4a2ecaae56cb533408803", "score": "0.6895146", "text": "def square(self):\r\n\r\n self.val = self.val ** 2\r\n return self", "title": "" }, { "docid": "fc76323c5946735856ea57fe1dfd6755", "score": "0.66861075", "text": "def my_square(y):\n\treturn(y ** 2)", "title": "" }, { "docid": "67808c898e0fce184d68d9883d430137", "score": "0.66487074", "text": "def sum_square(input):\n return _sum_square.apply(input)", "title": "" }, { "docid": "fe9d37d31af12df1d7fcc3f9662e0c22", "score": "0.6635047", "text": "def sum_of_squares(self) ->np.ndarray:\n return self._rn.sum_of_squares.numpy()", "title": "" }, { "docid": "fff6ec48a26d4b74e0b31c092ef82aa8", "score": "0.66135806", "text": "def square(n) -> float:\n\n return n*n", "title": "" }, { "docid": "7b2753b4ceff73ae6087ba8b997f0dc7", "score": "0.6594041", "text": "def sqr(self):\n\n return self.pls(0)", "title": "" }, { "docid": "3b536a479301be2b139c8c6b9e4fc88e", "score": "0.65131867", "text": "def sum_of_squares(self) ->torch.Tensor:\n return self._sum_of_squares", "title": "" }, { "docid": "282f07e990e347abf87c7c4d77e2bd03", "score": "0.65114367", "text": "def squared(self):\n return self._squared", "title": "" }, { "docid": "2a7603f561a85ea85758183dba9e16d9", "score": "0.6505812", "text": "def squared(x):\r\n return x ** 2", "title": "" }, { "docid": "2c79e41016b7cd1418f8818d0c1d974a", "score": "0.6501792", "text": "def square_of_sum(lo, hi):\n # just square the arithmetic sums formula\n return (hi * (hi + 1) / 2 - lo * (lo - 1) / 2) ** 2", "title": "" }, { "docid": "dcb870dc93aab2bfe112523a2745165f", "score": "0.6476941", "text": "def square(x):\r\n return x*x", "title": "" }, { "docid": "3a295d60479fe35d72e090e201b80478", "score": "0.64313954", "text": "def sqrt(self) -> Series:", "title": "" }, { "docid": "315415936127fa161d18c292e98c1c19", "score": "0.6392065", "text": "def count_sums(self):\n self.sum = 0\n self.sum_sq = 0\n for i in range(0, self.size):\n self.sum += self.data[i]\n for j in range(0, self.size):\n self.sum_sq += self.data[j]**2", "title": "" }, { "docid": "13a8397dc333b1678b52afdde8666866", "score": "0.6384531", "text": "def square(x):\n \n \n return ndarray()", "title": "" }, { "docid": "d5d138777aab56fdc8f4ec8c9694356b", "score": "0.63673866", "text": "def square_of_sum(lower, upper):\n\n # sum of sequence formula\n total = (lower + upper) * (upper - lower + 1) // 2\n return pow(total, 2)", "title": "" }, { "docid": "6e87e684c02d572b77974b6106797b22", "score": "0.63603544", "text": "def norm_sqr(self):\n\n value = self.norm()\n return value * value", "title": "" }, { "docid": "d0673d4645b1fe6f6ab250f4f04da8a6", "score": "0.633653", "text": "def squared_square(maximum):\n # Directions\n right = 0\n up = 1\n left = 2\n down = 3\n # Start\n x = 0\n y = 0\n direction = 0 # Start going right\n direction_change = 0\n steps = 0\n max_steps = 1\n\n # State\n location_bits = {\n (0, 0): 1\n }\n\n previous_sum = 1\n while previous_sum < maximum:\n previous_location = (x, y)\n if direction == right:\n x += 1\n elif direction == up:\n y += 1\n elif direction == left:\n x -= 1\n elif direction == down:\n y -= 1\n\n steps += 1\n new_sum = 0\n new_sum += location_bits.get((x - 1, y), 0)\n new_sum += location_bits.get((x + 1, y), 0 )\n new_sum += location_bits.get((x , y - 1), 0)\n new_sum += location_bits.get((x , y + 1), 0)\n new_sum += location_bits.get((x + 1 , y + 1), 0)\n new_sum += location_bits.get((x + 1 , y - 1), 0)\n new_sum += location_bits.get((x - 1 , y + 1), 0)\n new_sum += location_bits.get((x - 1 , y - 1), 0)\n location_bits[(x, y)] = new_sum\n previous_sum = location_bits[previous_location]\n if steps == max_steps:\n steps = 0\n direction_change += 1\n direction = (direction + 1) % 4\n # Max steps increases by one on evens\n if direction_change % 2 == 0:\n max_steps += 1\n return previous_sum", "title": "" }, { "docid": "4353a5ce55c6ff599edd55506f0f7669", "score": "0.6325286", "text": "def sum_of_squares(lo, hi):\n return ((2 * hi + 1) * (hi + 1) * hi - (2 * (lo - 1) + 1) * lo * (lo - 1)) / 6", "title": "" }, { "docid": "29e8beb92eab7645183d07586822b48e", "score": "0.6324364", "text": "def square(self, **kwargs) -> xr.DataArray:\n return matrix.square(self._obj, **kwargs)", "title": "" }, { "docid": "7eab551d4c40b92b638a881cd363caa0", "score": "0.6288694", "text": "def solve():\n\n return sum( [sum(sqrt_list(i,100)[0]) for i in xrange(2, 101) if not is_square(i)])", "title": "" }, { "docid": "2a51cb3d60950ebdb7ce5a4622193b92", "score": "0.6276741", "text": "def squares(narray):\n return pow_n(array, 2)", "title": "" }, { "docid": "923ff04ac6ab1063e2d7b42e0463131a", "score": "0.62583226", "text": "def total_sum_of_squares(y):\n return sum(v ** 2 for v in de_mean(y))", "title": "" }, { "docid": "af120dbd301c210163f995f803747d8c", "score": "0.62520075", "text": "def square(x):\n return x * x", "title": "" }, { "docid": "01c9a257b2ad9e576f75e1a3a94247a5", "score": "0.62465745", "text": "def square(x):\n return x*x", "title": "" }, { "docid": "01c9a257b2ad9e576f75e1a3a94247a5", "score": "0.62465745", "text": "def square(x):\n return x*x", "title": "" }, { "docid": "3ba305e764e62d1f6cda7ed79be0d16e", "score": "0.6219566", "text": "def get_squares(numbers):\n local_list = numbers\n local_result = []\n for i in local_list:\n r = lambda a: a * a\n local_result.append(r(i))\n # ez: local_result = [x*x for x in local_list]\n return local_result", "title": "" }, { "docid": "a20c44131ea7b9cfcb87a66bbcea0630", "score": "0.6209458", "text": "def sum_of_squares(v):\n\treturn sum(v_i ** 2 for v_i in v)", "title": "" }, { "docid": "025e7c63da82b44e38741aadb08a07fb", "score": "0.6209266", "text": "def sqr(self, *args):\n return _MontePython_cxx.Func_sqr(self, *args)", "title": "" }, { "docid": "78f75f92e39238f2a0d5ea500feeba88", "score": "0.61811167", "text": "def test(self, data): \n sum_x = 0.0\n sum_y = 0.0\n sum_x_squared = 0.0\n sum_y_squared = 0.0\n sum_xy = 0.0\n for i in range(len(data)):\n sum_x += data[i][0]\n sum_y += data[i][1]\n sum_xy += data[i][0] * data[i][1]\n sum_x_squared += data[i][0] * data[i][0]\n sum_y_squared += data[i][1] * data[i][1]\n\n r = float(len(data)) * sum_xy - sum_x * sum_y\n r /= math.sqrt(len(data) * sum_x_squared - sum_x * sum_x) * math.sqrt(len(data) * sum_y_squared - sum_y * sum_y)\n print \"r value: \" + str(r)\n\n return r", "title": "" }, { "docid": "edf4cbbb569f8e22fb3bbbcefc3fbe75", "score": "0.6165674", "text": "def test_square(self):\n tests = [\n # zero\n (\"0\", \"0\"),\n # secp256k1 prime (direct val in with 0 out) -> 0\n (\"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f\", \"0\"),\n # 0 -> secp256k1 prime (direct val in with 0 out)\n (\"0\", \"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f\"),\n # secp256k1 prime-1\n (\"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2e\", \"1\"),\n # secp256k1 prime-2\n (\"fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2d\", \"4\"),\n # Random sampling\n (\n \"b0ba920360ea8436a216128047aab9766d8faf468895eb5090fc8241ec758896\",\n \"133896b0b69fda8ce9f648b9a3af38f345290c9eea3cbd35bafcadf7c34653d3\",\n ),\n (\n \"c55d0d730b1d0285a1599995938b042a756e6e8857d390165ffab480af61cbd5\",\n \"cd81758b3f5877cbe7e5b0a10cebfa73bcbf0957ca6453e63ee8954ab7780bee\",\n ),\n (\n \"e89c1f9a70d93651a1ba4bca5b78658f00de65a66014a25544d3365b0ab82324\",\n \"39ffc7a43e5dbef78fd5d0354fb82c6d34f5a08735e34df29da14665b43aa1f\",\n ),\n (\n \"7dc26186079d22bcbe1614aa20ae627e62d72f9be7ad1e99cac0feb438956f05\",\n \"bf86bcfc4edb3d81f916853adfda80c07c57745b008b60f560b1912f95bce8ae\",\n ),\n ]\n\n for i, (a, res) in enumerate(tests):\n f = field.FieldVal.fromHex(a).normalize().square().normalize()\n expected = field.FieldVal.fromHex(res).normalize()\n assert f.equals(expected), f\"test {i}\"", "title": "" }, { "docid": "085f1d7b8575ae9920c8827534329649", "score": "0.6161802", "text": "def sum_of_squares(v):\n return sum(v_i * v_i for v_i in v)", "title": "" }, { "docid": "edee08ade5b8dc8b2069b433a3cf75cb", "score": "0.6146032", "text": "def square(num):\r\n return num ** 2", "title": "" }, { "docid": "b0115250cb83eb2ec61c9d5aff35a76c", "score": "0.61458814", "text": "def getSquares(self):\n return self.squares", "title": "" }, { "docid": "a376c2bfc0538630a0e885c1f511ce7b", "score": "0.61341417", "text": "def calculate(self):\n # print(self.raw_data)\n mygen = self.my_gen()\n\n for _ in range(self.n):\n\n rows, cols = next(mygen)\n\n m_sum, square_weight, j_count = self.get_neigbours(rows, cols)\n\n numerator = m_sum - (self.mean * j_count)\n\n S = math.sqrt( (self.square_sum / self.n) - (self.mean**2) )\n denominator = S * math.sqrt( ( (self.n * j_count) - square_weight**2) / self.n )\n self.gi_matrix[rows][cols] = numerator / denominator", "title": "" }, { "docid": "823524b0abd38820d5db704daf2e13db", "score": "0.6132455", "text": "def sum_squares(n):\n sum = 0\n for counter in range(n+1):\n sum += counter*counter\n return sum", "title": "" }, { "docid": "9e147cf25f966142cf668994c17a1fc7", "score": "0.61248934", "text": "def eval(self):\n if not self._is_update:\n raise RuntimeError(\"Please call the 'update' method before calling 'eval' method.\")\n\n if self.zero_diagonal:\n np.fill_diagonal(self.sqr_mtx_res, 0)\n\n if self.reduction == 'mean':\n self.sqr_mtx_res = np.mean(self.sqr_mtx_res, axis=-1)\n\n if self.reduction == 'sum':\n self.sqr_mtx_res = np.sum(self.sqr_mtx_res, axis=-1)\n\n return self.sqr_mtx_res", "title": "" }, { "docid": "0bdf7114138e6bb3f27fe8145a2ce375", "score": "0.61059374", "text": "def compute_square(n):\n sqrt = np.sqrt(n)\n return int(sqrt)", "title": "" }, { "docid": "b90354e64963f7440ebd75fc12b52ca4", "score": "0.61057734", "text": "def solution(N):\n total = 0\n sum_of_squares = 0\n for x in range(N + 1):\n sum_of_squares += x ** 2\n total += x\n return sum_of_squares - (total ** 2)", "title": "" }, { "docid": "7e012c74134ddd255999b46c20e6f255", "score": "0.6105563", "text": "def calc_sqrt_sum_sqr_sqr_sums(data):\n result = np.sqrt((data**2).sum()/(data.sum()**2))\n return result", "title": "" }, { "docid": "bd2cf1037eccad3dd874eea620176b0c", "score": "0.60989076", "text": "def _square_sum(data=None, axis=_Null, keepdims=_Null, exclude=_Null, name=None, attr=None, out=None, **kwargs):\n return (0,)", "title": "" }, { "docid": "0e503159496c3f3f8a86ea34bb255de0", "score": "0.60859025", "text": "def sum_of_squares(v):\n return dot(v, v)", "title": "" }, { "docid": "0e503159496c3f3f8a86ea34bb255de0", "score": "0.60859025", "text": "def sum_of_squares(v):\n return dot(v, v)", "title": "" }, { "docid": "c2e97d598df2457b6c9ae92044613d7e", "score": "0.6085728", "text": "def squareOfSum(n):\n sum = 0\n for i in xrange(1, n+1):\n sum += i\n return square(sum)", "title": "" }, { "docid": "2062549549d76f869ae36ac85d2b5a68", "score": "0.6085055", "text": "def square_pixels(self):\n fx = self.intrinsic_matrix[0, 0]\n fy = self.intrinsic_matrix[1, 1]\n fmean = 0.5 * (fx + fy)\n multiplier = np.array([[fmean / fx, 0, 0], [0, fmean / fy, 0], [0, 0, 1]])\n self.intrinsic_matrix = multiplier @ self.intrinsic_matrix", "title": "" }, { "docid": "74bd6b6ac167c24d4ff073fe418889fb", "score": "0.6084045", "text": "def square_area(a):\n return a**2", "title": "" }, { "docid": "4c630ca6357bb97bf75eaafb6568b1d8", "score": "0.60795903", "text": "def square(num1):\n return num1 * num1", "title": "" }, { "docid": "f8b0bf059d257c87757fe2d48638105e", "score": "0.60764325", "text": "def square(s):\n A = s**2\n return A", "title": "" }, { "docid": "420c9fd6ad22f6ee5f906d07a24df1db", "score": "0.6074369", "text": "def my_squares(iters):\n\n out = [i ** 2 for i in range(iters)]\n return out", "title": "" }, { "docid": "0ff980bacea3fa840835c48463f5167c", "score": "0.60713816", "text": "def square(n):\n return n**2", "title": "" }, { "docid": "0ff980bacea3fa840835c48463f5167c", "score": "0.60713816", "text": "def square(n):\n return n**2", "title": "" }, { "docid": "8efe64a45ebdc2a13fbed5a1b5f500cf", "score": "0.606684", "text": "def square(n):\n return n ** 2", "title": "" }, { "docid": "0ee94a701de40da407126cdf490715ac", "score": "0.6062623", "text": "def sum_of_squares(v: Vector) -> float:\n return dot(v,v)", "title": "" }, { "docid": "79ba4a110ff0d29a797db36781c4ff48", "score": "0.60619545", "text": "def computeAndScaleArraySums(self):\n self.sumRT()\n self.scaleRT()\n self.sumA()\n self.scaleA()\n self.Fluence()", "title": "" }, { "docid": "47008aa4e6909f8857552b37456a294e", "score": "0.60617554", "text": "def mySquare(x):\n return x*x", "title": "" }, { "docid": "9a75c0679694c51e73aabb42dbf90718", "score": "0.6060995", "text": "def squared(n):\n\tnew_value = n ** 2\n\tprint(new_value)", "title": "" }, { "docid": "c5fb5c65c274669667f1f47da7247357", "score": "0.60598", "text": "def square_area(side):\n return side*side", "title": "" }, { "docid": "da2933af82cd2cdcda7d9f33612c8535", "score": "0.6047621", "text": "def sum_n_square(self):\n sum_of_square = 0\n for param in self.layer.parameters():\n sum_of_square += torch.sum(torch.pow(param, 2))\n return sum_of_square", "title": "" }, { "docid": "37519b3f8af58353b02261de960672f3", "score": "0.6045802", "text": "def SS(self):\n return sum(self.pwr(self.center(self.y,self.mean())))", "title": "" }, { "docid": "dd15840a29d2dd15f18a385af54c6567", "score": "0.6043517", "text": "def _sumsquares(data,axis=0):\n return numpy.ma.sum(data**2,axis=axis)", "title": "" }, { "docid": "169eadf533d4444beb5ec27ff723bbdf", "score": "0.6042615", "text": "def sum_of_squares(v):\n return dot(v, v)", "title": "" }, { "docid": "169eadf533d4444beb5ec27ff723bbdf", "score": "0.6042615", "text": "def sum_of_squares(v):\n return dot(v, v)", "title": "" }, { "docid": "ac67e89b3c96101d2b061996fb8c282d", "score": "0.6036892", "text": "def sq(x):\n \n return x*x", "title": "" }, { "docid": "63968ec1f2aeca45757f2c75951a5125", "score": "0.6033498", "text": "def calc_s(self):\n return np.dot(np.transpose(self.args['r']), self.args['d'])", "title": "" }, { "docid": "a6c8832825eb86911472008423b1c47b", "score": "0.6032128", "text": "def coordinates(self, values):\n self._coordinates = values\n tempSum = 0\n for x in self.coordinates:\n tempSum = tempSum + x ** 2\n self._lenght = math.sqrt(tempSum)", "title": "" }, { "docid": "d46e70eb022d5258ce63222521a6bd93", "score": "0.60108334", "text": "def my_squares(iters):\n out = [i ** 2 for i in range(iters)]\n return out", "title": "" }, { "docid": "6e6cd5b10721f619b876e4f27ef60c61", "score": "0.598756", "text": "def square_area(side):\n return side * side", "title": "" }, { "docid": "0c9a92b353a40b1c87a607e9fe43eed2", "score": "0.5986941", "text": "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "title": "" }, { "docid": "88f9760b33e869122fdfee2d59233f01", "score": "0.5977461", "text": "def sumOfSquares(n):\n sum = 0\n for i in xrange(1, n+1):\n sum += square(i)\n return sum", "title": "" }, { "docid": "e5f07c8957d59c512847d0d77a7296dc", "score": "0.5976977", "text": "def square(num1):\n sq = num1**num1\n\n return sq", "title": "" }, { "docid": "0e8286ecfb6169b50a57a6a49d94985c", "score": "0.59735185", "text": "def sum_of_squares(lower, upper):\n\n return sum(pow(i, 2) for i in range(lower, upper + 1))", "title": "" }, { "docid": "d81211cb912fb7bd0745ed2cd2b64368", "score": "0.59724563", "text": "def square(number):\n return number ** 2", "title": "" }, { "docid": "6c58b96e83c086abdadc05eec7dc5fdb", "score": "0.5971117", "text": "def square(num1):\n return num1*num1", "title": "" }, { "docid": "1e5fd070645cb769850a2523f2591af9", "score": "0.5965233", "text": "def square(self, base):\n return base ** 2", "title": "" }, { "docid": "92eca359dd8e4c02ae32d615ed293e30", "score": "0.594626", "text": "def square(number):\n sqr = number ** 2\n return sqr", "title": "" }, { "docid": "51b0054697b9bfd6c327d850fbe7c090", "score": "0.5936499", "text": "def sq(num):\n return num * num", "title": "" }, { "docid": "120ea4c8394d96edd9c4bc4ef1f7ca4f", "score": "0.5923819", "text": "def sum_square_difference(u_bound: int):\n s1 = 0\n for i in range(2, u_bound + 1):\n s1 += (i**3) - (i**2)\n return s1", "title": "" }, { "docid": "8fecea9371c7c33b1e0611e0679feef5", "score": "0.59181374", "text": "def length_squared(self):\n return sum([x*x for x in self.v])", "title": "" }, { "docid": "6340a3e54363ccfd768c635d3d7183ef", "score": "0.59120363", "text": "def sqrt(x,out):\n \n \n return ndarray()", "title": "" }, { "docid": "0aa9ef08bfd9805cd7c59c3cdba81a53", "score": "0.5894299", "text": "def square(number):\n return number**2", "title": "" }, { "docid": "30cdadebdf85bf9f8e16f3b9381e2774", "score": "0.5890708", "text": "def square(num1):\n squared = num1 **2\n return squared", "title": "" }, { "docid": "c47e1a2b7fcd83dfdc19187f661d7cf7", "score": "0.58633584", "text": "def squares(nums):\n return [k * k for k in nums]", "title": "" }, { "docid": "d017e0300b7656186e7949cae1c45f87", "score": "0.58597726", "text": "def _calculateSvalues(xarr, yarr, sigma2=1.):\n if len(xarr) != len(yarr):\n raise ValueError(\"Input xarr and yarr differ in length!\")\n if len(xarr) <= 1:\n raise ValueError(\"Input arrays must have 2 or more values elements.\")\n\n S = len(xarr) / sigma2\n Sx = numpy.sum(xarr / sigma2)\n Sy = numpy.sum(yarr / sigma2)\n Sxx = numpy.sum(xarr * xarr / sigma2)\n Sxy = numpy.sum(xarr * yarr / sigma2)\n return (S, Sx, Sy, Sxx, Sxy)", "title": "" }, { "docid": "d017e0300b7656186e7949cae1c45f87", "score": "0.58597726", "text": "def _calculateSvalues(xarr, yarr, sigma2=1.):\n if len(xarr) != len(yarr):\n raise ValueError(\"Input xarr and yarr differ in length!\")\n if len(xarr) <= 1:\n raise ValueError(\"Input arrays must have 2 or more values elements.\")\n\n S = len(xarr) / sigma2\n Sx = numpy.sum(xarr / sigma2)\n Sy = numpy.sum(yarr / sigma2)\n Sxx = numpy.sum(xarr * xarr / sigma2)\n Sxy = numpy.sum(xarr * yarr / sigma2)\n return (S, Sx, Sy, Sxx, Sxy)", "title": "" }, { "docid": "a488ab2b245edcee66ab78decd1382d3", "score": "0.5855203", "text": "def value_for(self, x):\n\t\tfsum = math.fsum( \n#\t\t\t\t\t\t[(xi-mi)**2. for xi, mi in izip(x, self.central_value)])\n\t\t\timap(lambda (xi, mi):(xi-mi)**2., izip(x, self.central_value)))\n#\t\t\t\t\t\t[(xi-mi)**2. for xi, mi in zip(x, self.central_value)])\n\n\t\treturn math.exp(fsum / self.mdub_sqr_sig)", "title": "" }, { "docid": "81f02e79a663a2d0d2236e1a53cd8071", "score": "0.5854327", "text": "def square(lst):\n\n # Needs only one argument\n\n return num1 * num1", "title": "" }, { "docid": "3575b5d8f161d65f23142ed2eb7cb9a7", "score": "0.58454883", "text": "def squared(num_list):\n\tnew_list = []\n\tfor num in num_list:\n\t\tsq_num = pow(num, 2)\n\t\tnew_list.append(sq_num)\n\treturn new_list", "title": "" }, { "docid": "41a3646ca49500677356e36328a4fd2f", "score": "0.5837431", "text": "def sum_of_squares():\n # list comprehension\n arguments = [int(argument) * int(argument) for argument in sys.argv[1:]]\n return sum(arguments)", "title": "" }, { "docid": "dd5e815afb6e9efcac2498bc41eac235", "score": "0.5824152", "text": "def get_y_square(self, x):\n return (x**3 + self.a*x + self.b) % self.p", "title": "" }, { "docid": "3a3bdccc190afc91f0c4cfaa9b6518d3", "score": "0.5821246", "text": "def square_number(number):\n acc = 0\n for i in range(number):\n acc += number\n\n return acc", "title": "" }, { "docid": "7505020c61a200633c1697f8f89ee0a9", "score": "0.581983", "text": "def square(value):\n #Define function body\n print(value ** 2)", "title": "" }, { "docid": "ec641346c92c4108ec1e747da8264b6e", "score": "0.5811082", "text": "def __abs__(self):\n return math.sqrt(self*self)", "title": "" }, { "docid": "d5db6d61b193af243969efaaee0eb6ae", "score": "0.5809329", "text": "def squares(s):\n \"*** YOUR CODE HERE ***\"\n return [int(sqrt(x)) for x in s if gmpy.is_square(x) == 1]", "title": "" }, { "docid": "c884461804a97c87fa1a61e3d9fdbb25", "score": "0.5806062", "text": "def _construct_sqrt(self) -> Matrix:", "title": "" }, { "docid": "9ab6d46db4c236774f7b1d0cc60e8c98", "score": "0.5798393", "text": "def sum_square_difference(n):\n square = 0\n sum = 0\n for i in xrange(1, n + 1):\n square += i**2\n sum += i\n return sum**2 - square", "title": "" }, { "docid": "6c0232f97baa6911d4360eaf2346527e", "score": "0.57894266", "text": "def squaresum(n):\n return (n*(n+1)*(2*n+1))//6", "title": "" }, { "docid": "c36ab81632fdb09c265190dc9999129e", "score": "0.5786454", "text": "def squared(self, value):\n self._squared = value", "title": "" }, { "docid": "849dcd8997ecb74d477e7c3fa74f8189", "score": "0.57784444", "text": "def square(num):\n result = num * num\n return result", "title": "" }, { "docid": "8faf6b58b433d49d112a2f1df334e88c", "score": "0.5773403", "text": "def sqrt(self):\n return Variable(self.val, self.der) ** 0.5", "title": "" } ]
d886c01c4e667f8b25ead077c50aa4ac
Transform a bytes sequence to a token compliant to the pattern.
[ { "docid": "07b377e2b88a307927be13499f4c07bf", "score": "0.0", "text": "def token(self, key):\n if len(key) < 1:\n raise PatternException(_('Password length must be at least 2'))\n\n n = int(key.hex(), base=16)\n d = len(self.gliphs)\n p = []\n\n p.append(self.gliphs[n % d])\n while len(p) < self.length:\n n = int(n/d) if int(n/d) > 0 else n\n if n < d:\n p.append(self.gliphs[n])\n break\n else:\n p.append(self.gliphs[n % d])\n\n if len(p) != self.length:\n raise PatternException()\n\n # while True:\n # p.append(self.gliphs[n % d])\n # n = int(n/d)\n # if n < d or len(p) == self.length:\n # break\n return ''.join(p)", "title": "" } ]
[ { "docid": "7f762237daede125fb9a62581f3b76ab", "score": "0.5276037", "text": "def bytes_transform(byte_str, start_idx, stop_idx, fction):\n return bytes_replace(byte_str, start_idx, stop_idx, fction(byte_str[start_idx:stop_idx]))", "title": "" }, { "docid": "e40b07b6af3046d5e25e441af64f9285", "score": "0.5256977", "text": "def from_bytes(self, ???):", "title": "" }, { "docid": "6c2fd03812c55cb44390239b8a48fb3b", "score": "0.524398", "text": "def token(uncapped_token):\n return uncapped_token", "title": "" }, { "docid": "5d2279cde7d03582d269a082877ba479", "score": "0.5141336", "text": "def create(cls: Type[Sequence], sequence: bytes, alphabet: Alphabet) -> Sequence:\n return cls(lib.imm_seq_create(sequence, alphabet.imm_abc), alphabet)", "title": "" }, { "docid": "8c7b9c3ee3721584c60240fa1526f664", "score": "0.51288867", "text": "def token(uncapped_token: Contract):\n return uncapped_token", "title": "" }, { "docid": "4e7e9277988e36f3de369aa1c91e3f77", "score": "0.5115126", "text": "def get_token_from_utf8(self, lineno, col_offset):\n # type: (int, int) -> Token\n return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))", "title": "" }, { "docid": "b0fdee362c423d22cabbeea394a7f7e6", "score": "0.5100525", "text": "def test_decode_token():\n pass", "title": "" }, { "docid": "413efd6dc76a1dee3fdb3f213339353b", "score": "0.50903225", "text": "def Token(l, token):\n\n return Red(l, lambda _: token)", "title": "" }, { "docid": "d15b5109a7e6072f027c3a666acce3ce", "score": "0.5064242", "text": "def convert(source_token):\n array_regex = '\\w+\\[\\d*\\]'\n pointer_regex = '\\w+\\*'\n # match = re.search(array_regex, source_token)\n # if match:\n # return 'A'\n match = re.search(pointer_regex, source_token)\n if match:\n return 'P'\n return None", "title": "" }, { "docid": "2f60ed92fa634b750d4804450043677c", "score": "0.50241554", "text": "def tokenize(self, sequence: str) -> List[str]:\n raise NotImplementedError", "title": "" }, { "docid": "556c376e37132dbc1a4044aa9bad9466", "score": "0.49853447", "text": "def process(self, example: str) -> List[torch.Tensor]:\n return torch.tensor(self._tokenizer.encode(example, max_length=self.max_seq_len))", "title": "" }, { "docid": "a4415d8169d8424b05728b5ebcf1da00", "score": "0.49738657", "text": "def decode(self, s):", "title": "" }, { "docid": "a4415d8169d8424b05728b5ebcf1da00", "score": "0.49738657", "text": "def decode(self, s):", "title": "" }, { "docid": "1b13a5ab1570a2dd7c9d4a860ab0cbdd", "score": "0.49568224", "text": "def id_to_token(self, index):\r\n return self.decoder.get(index)", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "b350ad471dad0f2f32bfd69797ee2623", "score": "0.49052447", "text": "def from_bytes(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "8dd368c4eb622533e2f1097b41fac978", "score": "0.4901677", "text": "def bytes_to_nodes(buf):\n lst = []\n for i in range(0, len(buf), 4):\n l_type = buf[i]\n l_data = buf[i+1]\n r_type = buf[i+2]\n r_data = buf[i+3]\n lst.append(ReadNode(l_type, l_data, r_type, r_data))\n return lst", "title": "" }, { "docid": "e227a788d44fbacfc94322e2e8807dfe", "score": "0.4888496", "text": "def process_sequence(seq, whitelist):\n sym = ''.join(seq)\n out = validate_symbol(sym, whitelist)\n return out", "title": "" }, { "docid": "7c72a7942e1925080b6fd72090a8544d", "score": "0.48811868", "text": "def transform(token):\n if token == '#t':\n return True\n if token == '#f':\n return False\n if token[0] == '\"':\n return bytes(token[1:-1], \"utf-8\").decode('unicode-escape')\n if token.startswith(';'):\n return ';'\n if token.startswith('#b'):\n return int(token[2:], 2)\n if token.startswith('#o'):\n return int(token[2:], 8)\n if token.startswith('#d'):\n return int(token[2:])\n if token.startswith('#x'):\n return int(token[2:], 16)\n try:\n return int(token)\n except ValueError:\n try:\n return float(token)\n except ValueError:\n try:\n result = complex(token.replace('i', 'j'))\n # user can't write a+bj and form like i, 2i, 3i where no '+' appers\n if token.find('j') >= 0 or token.find('+') < 0:\n return Symbol(token.lower())\n return result\n except ValueError:\n try:\n return fractions.Fraction(token)\n except ValueError:\n return Symbol(token.lower())", "title": "" }, { "docid": "30ca118dfaa05096b2e68cf4b31bed0e", "score": "0.48722556", "text": "def decode(self, tokens: List[str]) -> str:\n return self.bpe.decode([int(token) for token in tokens])", "title": "" }, { "docid": "e0ae1fc618748ee86c844dfd04c99d69", "score": "0.48484638", "text": "def decode(self, *args, **kwargs):\n return self.tokenizer.decode(*args, **kwargs)", "title": "" }, { "docid": "e0ae1fc618748ee86c844dfd04c99d69", "score": "0.48484638", "text": "def decode(self, *args, **kwargs):\n return self.tokenizer.decode(*args, **kwargs)", "title": "" }, { "docid": "131b0a82770ff54e829fdd910333eb80", "score": "0.48438162", "text": "def encodetokens(self, tokens, addstart=False, fixlength=None):\n ln = len(tokens) + (1 if addstart else 0)\n x = np.zeros(ln, dtype=int)\n if addstart:\n x[0] = self.char2index[START]\n offset = 1\n else:\n offset = 0\n for t, token in enumerate(tokens):\n if token in self.char2index:\n x[offset + t] = self.char2index[token]\n else:\n print(\"WARNING: token\", token, \"not recognized\")\n \n # Null padding, if requested \n if fixlength is not None:\n xfix = np.zeros(fixlength, dtype=int)\n for i in range(min(ln,fixlength)):\n xfix[-i] = x[-i]\n for i in range(ln, fixlength):\n xfix[-i] = self.char2index[NULL]\n x = xfix\n \n return x", "title": "" }, { "docid": "3b2e14bd2b390275a2f56c59b6e1cd70", "score": "0.48350835", "text": "def buffer_before_token(self):\n r = \"\".join(i for i in map(lambda x: x.decode(\"utf-8\"), self.buffer))\n self.buffer = []\n return r", "title": "" }, { "docid": "1067cae97a5ebe21ad3f732c99dfbcb0", "score": "0.48310587", "text": "def unlex(tokens):", "title": "" }, { "docid": "e80ad2bd6f7e34516b68d3e310cdace2", "score": "0.48288685", "text": "def transform(self, x): # takes no other parameters (use fields initialized in constructor instead).\n if self.do_clean:\n x = self.clean(x)\n if self.tokenizer is None:\n raise ValueError('Tokenizer has not been initialized.')\n # other transforming to produce tensor for input layer of model\n x = self.tokenizer.texts_to_sequences(x)\n return pad_sequences(x, maxlen=self.max_sequence_length, padding=self.pad_type, truncating=self.trunc_type,\n value=0)", "title": "" }, { "docid": "85b16c4b48c1046f92198c19501ef4bc", "score": "0.4812624", "text": "def tokenize(src):\n\n pass", "title": "" }, { "docid": "f56229ea6732a9e4ea7778c8c95719e5", "score": "0.47853866", "text": "def MakePattern(self,content):\n return self.register(Pattern(content,reg=self))", "title": "" }, { "docid": "d70bedc5eb410387bb343b1553803fdc", "score": "0.47806108", "text": "def convert_single_example(tokenizer, example, max_seq_length=512):\n\n if isinstance(example, PaddingInputExample):\n input_ids = [0] * max_seq_length\n input_mask = [0] * max_seq_length\n segment_ids = [0] * max_seq_length\n label = 0\n return input_ids, input_mask, segment_ids, label\n\n tokens_a = tokenizer.tokenize(example.text_a)\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0 : (max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n \n #print(tokens)\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n return input_ids, input_mask, segment_ids, example.label", "title": "" }, { "docid": "866f8bf19054ca295eaaded5b2ba9ac6", "score": "0.4780301", "text": "def _translate_tokens(self, original_tokens):\n # type: (Iterable[TokenInfo]) -> Iterator[Token]\n for index, tok in enumerate(patched_generate_tokens(original_tokens)):\n tok_type, tok_str, start, end, line = tok\n yield Token(tok_type, tok_str, start, end, line, index,\n self._line_numbers.line_to_offset(start[0], start[1]),\n self._line_numbers.line_to_offset(end[0], end[1]))", "title": "" }, { "docid": "98dc3c4f4250364fe1c6a949c4258d92", "score": "0.4771478", "text": "def id_to_token(self, idx):\n return self._id2token[idx]", "title": "" }, { "docid": "98dc3c4f4250364fe1c6a949c4258d92", "score": "0.4771478", "text": "def id_to_token(self, idx):\n return self._id2token[idx]", "title": "" }, { "docid": "6a8f0ed4aa9f33008c4bc84febc9391c", "score": "0.47680148", "text": "def forward(self, src_tokens, **kwargs):\n return self.decoder(src_tokens, **kwargs)", "title": "" }, { "docid": "4a570387ed4ddb443aa4eb63744ef1b7", "score": "0.47591636", "text": "def celbytes(token: lark.Token) -> celpy.celtypes.BytesType:\n def expand(match_iter: Iterable[Match[str]]) -> Iterator[int]:\n for match in (m.group() for m in match_iter):\n if len(match) == 1:\n yield from match.encode('utf-8')\n elif match[:2] == r'\\x':\n yield int(match[2:], 16)\n elif match[:2] == r'\\u':\n yield int(match[2:], 16)\n elif match[:1] == '\\\\' and len(match) == 4:\n yield int(match[1:], 8)\n else:\n yield ord(CEL_ESCAPES.get(match, match))\n\n text = token.value\n if text[:2].lower() == \"br\":\n # Raw; ignore ``\\`` escapes\n if text[2:5] == '\"\"\"' or text[2:5] == \"'''\":\n # Long\n expanded = celpy.celtypes.BytesType(ord(c) for c in text[5:-3])\n else:\n # Short\n expanded = celpy.celtypes.BytesType(ord(c) for c in text[3:-1])\n elif text[:1].lower() == \"b\":\n # Cooked; expand ``\\`` escapes\n if text[1:4] == '\"\"\"' or text[1:4] == \"'''\":\n # Long\n match_iter = CEL_ESCAPES_PAT.finditer(text[4:-3])\n else:\n # Short\n match_iter = CEL_ESCAPES_PAT.finditer(text[2:-1])\n expanded = celpy.celtypes.BytesType(expand(match_iter))\n else:\n raise ValueError(f\"Invalid bytes literal {token.value!r}\")\n return expanded", "title": "" }, { "docid": "e80e449cbc826426eb86d0b4d16e2b45", "score": "0.47420645", "text": "def test_init_from_bytes(self):\n s = self.SEQ(b\"ACGT\")\n self.assertEqual(s, \"ACGT\")", "title": "" }, { "docid": "7b0aeea4640def1da9b1a36d832cad39", "score": "0.47265285", "text": "def decode(self, bytes_, errors='strict'):\n decoder = self.IncrementalDecoder(errors=errors)\n return (\n decoder.decode(bytes_, final=True),\n len(bytes_),\n )", "title": "" }, { "docid": "09c720842cd37dae0cadb2c09a3a3e8c", "score": "0.47256678", "text": "def _digest_atom(raw_token: RawToken) -> Token:\n\n # Is this a preposition\n try:\n return Token(\n kind=TokenKind.PREPOSITION,\n value=Preposition(raw_token.value),\n location=raw_token.location,\n )\n except ValueError:\n pass\n\n # Is this an integer value?\n try:\n return Token(\n kind=TokenKind.NUMBER,\n value=int(raw_token.value),\n location=raw_token.location,\n )\n except ValueError:\n pass\n\n # A float?\n try:\n return Token(\n kind=TokenKind.NUMBER,\n value=float(raw_token.value),\n location=raw_token.location,\n )\n except ValueError:\n pass\n\n # A duration?\n duration_match = RE_DURATION.match(raw_token.value)\n if duration_match is not None:\n total_length = (\n int(duration_match.group(1) or '0') * 24 * 60 * 60 +\n int(duration_match.group(2) or '0') * 60 * 60 +\n int(duration_match.group(3) or '0') * 60 +\n int(duration_match.group(4) or '0')\n )\n return Token(\n kind=TokenKind.DURATION,\n value=total_length,\n location=raw_token.location,\n )\n\n # A literal?\n try:\n kind, value = LITERALS[raw_token.value]\n return Token(\n kind=kind,\n value=value,\n location=raw_token.location,\n )\n except KeyError:\n pass\n\n # Pass through, split by dots\n return Token(\n kind=TokenKind.ATOM,\n value=raw_token.value.split('.'),\n location=raw_token.location,\n )", "title": "" }, { "docid": "6dd68adc70bcd402f60402c2f113686f", "score": "0.4716139", "text": "def pattern(arg: str) -> Pattern[bytes]:\n try:\n return re.compile(str.encode(arg))\n except (TypeError, re.error) as err:\n raise argparse.ArgumentTypeError(err) from err", "title": "" }, { "docid": "8ab5d7e8c4b871f3ee44cd2e976a98f7", "score": "0.47142303", "text": "def _token_to_id(self, sequence_tokens, token_map, char_map, ngram=0,\n token_ngram_map=None, max_char_sequence_length=-1,\n max_char_length_per_token=-1):\n token_id_list = []\n char_id_list = []\n char_in_token_id_list = []\n ngram_id_list = []\n for token in sequence_tokens:\n char_id = [char_map.get(x, self.VOCAB_UNKNOWN) for x in token]\n char_id_list.extend(char_id[0:max_char_sequence_length])\n char_in_token = [char_map.get(x, self.VOCAB_UNKNOWN)\n for x in token[0:max_char_length_per_token]]\n char_in_token_id_list.append(char_in_token)\n\n token_id_list.append(\n token_map.get(token, token_map[self.VOCAB_UNKNOWN]))\n if ngram > 1:\n for j in range(2, ngram + 1):\n ngram_id_list.extend(\n token_ngram_map[x] for x in\n [\"\".join(sequence_tokens[k:k + j]) for k in\n range(len(sequence_tokens) - j + 1)] if x in\n token_ngram_map)\n if not sequence_tokens:\n token_id_list.append(self.VOCAB_PADDING)\n char_id_list.append(self.VOCAB_PADDING)\n char_in_token_id_list.append([self.VOCAB_PADDING])\n if not ngram_id_list:\n ngram_id_list.append(token_ngram_map[self.VOCAB_PADDING])\n return token_id_list, char_id_list, char_in_token_id_list, ngram_id_list", "title": "" }, { "docid": "f8d7b385f0066dbb55269cad519d0caa", "score": "0.47048274", "text": "def offset_to_tokens(self, offset, length):\n\n first_sent_id = None\n first_tok_id = None\n #print offset\n #print length\n\n for sent_id, (sent_json, sent_conll) in enumerate(\n zip(self.tokenized['sentences'], self.conll)):\n #print first_sent_id\n\n for tok_id, (token_json, token_conll) in enumerate(\n zip(sent_json['tokens'], sent_conll)):\n\n begin = token_json['characterOffsetBegin']\n end = token_json['characterOffsetEnd']\n\n if end < offset:\n continue\n\n if (offset + length) <= begin:\n\n\n if first_sent_id is not None:\n #print \"hello\"\n return ((first_sent_id, first_tok_id),\n (sent_id, tok_id))\n else:\n return None, None\n\n if first_sent_id is None:\n first_sent_id = sent_id\n first_tok_id = tok_id\n\n return None, None", "title": "" }, { "docid": "1c1235d83f0d6d11cafa56deb04e99b0", "score": "0.4704771", "text": "def tokenize(text):\n yield text, 0, len(text.encode('utf-16'))", "title": "" }, { "docid": "2fb0ed22b525a30b65966032dca4b3b4", "score": "0.4693938", "text": "def sleeve(self, payload: str) -> bytes:\n\n payload_bytes = payload.encode()\n timestamp_ms = get_timestamp_ms()\n ts_bytes = timestamp_ms.to_bytes(8, 'big')\n\n crc = zlib.crc32(payload_bytes + ts_bytes).to_bytes(8, 'big')\n\n return payload_bytes + self.sep_token + ts_bytes + self.sep_token + crc", "title": "" }, { "docid": "f44c654e1834c3ba8711e148880dba78", "score": "0.4692407", "text": "def decode_vector_of_t(as_bytes: typing.List[int]) -> list:\n raise NotImplementedError()", "title": "" }, { "docid": "bda6dfd7453b78f1573ff041a12d7945", "score": "0.46906424", "text": "def decode(data): #@NoSelf", "title": "" }, { "docid": "89841b64079a5df97a76a6b5d4e93723", "score": "0.46794814", "text": "def decompress_seq(x: int, length=16):\n bits = 64\n x = np.uint64(x)\n assert length <= (bits / 2 - 1)\n if x & (1 << (bits - 1)):\n return \"N\" * length\n result = bytearray(length)\n for i in range(length):\n result[(length - 1) - i] = bytearray(NUCS[x & np.uint64(0b11)].encode())[0]\n x = x >> np.uint64(2)\n return result.decode()", "title": "" }, { "docid": "daaf228f8549cafe37c8a2c9d7c4cdcc", "score": "0.4675138", "text": "def MakeSeq(self,content):\n return self.register(Seq(content,reg=self))", "title": "" }, { "docid": "4ec06388a5c2f6e199e3807f28d8afd1", "score": "0.46607724", "text": "def preprocess_seq(self, sequence, word2idx):\n story = []\n for value in sequence:\n #v = [word2idx[word] if word in word2idx else UNK_token for word in value.split()] + [EOS_token]\n story.append(word2idx[value] if value in word2idx else UNK_token)\n story = torch.Tensor(story)\n return story", "title": "" }, { "docid": "0cd54ed504a209aca452fd7629005189", "score": "0.46581414", "text": "def decode(self, token):\n\n # <unk>, <pad> and other special tokens will be decoded into ''.\n text = self.tokenizer.decode(token, skip_special_tokens=True)\n\n # Handle replacement characters caused by multi-byte-pair-encoding or\n # Unicode surrogates or multi-code-point graphemes like emojis.\n if self.replacement in text:\n n = -self.surrogates if self.surrogates > 0 else len(self.buffer)\n tokens = self.buffer[n:] + [token]\n text = self.tokenizer.decode(tokens, skip_special_tokens=True)\n\n # Check whether the last grapheme was successfully decoded.\n if text and text[-1] != self.replacement:\n text = text.replace(self.replacement, \"\")\n self.surrogates = 0\n else:\n text = \"\"\n self.surrogates += 1\n else:\n self.surrogates = 0\n\n # Handle whitespace between tokens.\n tokens = self.buffer + [token]\n prefix = self.tokenizer.decode(self.buffer, skip_special_tokens=True)\n whole = self.tokenizer.decode(tokens, skip_special_tokens=True)\n if prefix + \" \" + text == whole:\n text = \" \" + text\n\n # Update buffer and offsets.\n self.buffer = self.buffer[-4:] + [token]\n self.start = self.end\n self.end += len(text)\n\n return text", "title": "" }, { "docid": "064a96d0b6bd5203b12af62b9deb2857", "score": "0.46496508", "text": "def convert_single_example(tokenizer, example, max_seq_length=256):\n\n if isinstance(example, PaddingInputExample):\n input_ids = [0] * max_seq_length\n input_mask = [0] * max_seq_length\n segment_ids = [0] * max_seq_length\n label = 0\n return input_ids, input_mask, segment_ids, label\n\n tokens_a = tokenizer.tokenize(example.text_a)\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0: (max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n # print('Tokens', tokens[:3])\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n return input_ids, input_mask, segment_ids, example.label", "title": "" }, { "docid": "4aca65f135983d0b1455a88c39873286", "score": "0.46368912", "text": "def test_analyze_syntax_utf8():\n test_string = \"a \\u00e3 \\u0201 \\U0001f636 b\"\n byte_array = test_string.encode(\"utf8\")\n result = analyze.analyze_syntax(test_string, encoding=\"UTF8\")\n tokens = result[\"tokens\"]\n\n assert tokens[0][\"text\"][\"content\"] == \"a\"\n offset = tokens[0][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 1].decode(\"utf8\") == tokens[0][\"text\"][\"content\"]\n )\n\n assert tokens[1][\"text\"][\"content\"] == \"\\u00e3\"\n offset = tokens[1][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 2].decode(\"utf8\") == tokens[1][\"text\"][\"content\"]\n )\n\n assert tokens[2][\"text\"][\"content\"] == \"\\u0201\"\n offset = tokens[2][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 2].decode(\"utf8\") == tokens[2][\"text\"][\"content\"]\n )\n\n assert tokens[3][\"text\"][\"content\"] == \"\\U0001f636\"\n offset = tokens[3][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 4].decode(\"utf8\") == tokens[3][\"text\"][\"content\"]\n )\n\n # This demonstrates that the offset takes into account the variable-length\n # characters before the target token.\n assert tokens[4][\"text\"][\"content\"] == \"b\"\n offset = tokens[4][\"text\"].get(\"beginOffset\", 0)\n # 'b' is only one byte long\n assert (\n byte_array[offset : offset + 1].decode(\"utf8\") == tokens[4][\"text\"][\"content\"]\n )", "title": "" }, { "docid": "77e6b352dad7de4a7e3a7e515bc862ed", "score": "0.46360937", "text": "def from_buffer(self, buf):\n with self.lock:\n # if we're on python3, convert buf to bytes\n # otherwise this string is passed as wchar*\n # which is not what libmagic expects\n if type(buf) == str and str != bytes:\n buf = buf.encode('utf-8', errors='replace')\n return magic_buffer(self.cookie, buf)", "title": "" }, { "docid": "f38daa6545540da4342f0b4351441448", "score": "0.46323636", "text": "def from_bytes(self, patterns_bytes: bytes, **kwargs: Any) -> SpaczzRuler:\n cfg = srsly.msgpack_loads(patterns_bytes)\n if isinstance(cfg, dict):\n self.add_patterns(cfg.get(\"spaczz_patterns\", cfg))\n self.defaults = cfg.get(\"spaczz_defaults\", {})\n self.overwrite = cfg.get(\"spaczz_overwrite\", False)\n self.ent_id_sep = cfg.get(\"spaczz_ent_id_sep\", DEFAULT_ENT_ID_SEP)\n else:\n self.add_patterns(cfg)\n return self", "title": "" }, { "docid": "1195a07ae0b47020fcce655da6e7cb0f", "score": "0.46268785", "text": "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "title": "" }, { "docid": "e335bb5e5858cdeefc8d8a413423dc42", "score": "0.4626585", "text": "def encode(self, seq):", "title": "" }, { "docid": "da8ae8d9b9c723caa40716bdb7057b0f", "score": "0.46238288", "text": "def seq2bytes (xs):\n count = 0\n byte = ByteContext()\n for x in xs:\n for i in range(0, x):\n if not byte.write(1):\n count += 1\n yield byte.pack()\n if not byte.write(0):\n count += 1\n yield byte.pack()\n if byte.count != 0:\n yield byte.pad()\n while count < 9:\n count += 1\n yield byte.pad()", "title": "" }, { "docid": "299fa8b85ed952067c780152c2aba17a", "score": "0.46157295", "text": "def byte_sequence(self) -> global___Statement.Declaration:", "title": "" }, { "docid": "471469c4b1ab4a9cc6f71193716fb8b2", "score": "0.4611826", "text": "def raw_tokenize(src: str) -> Iterable[RawToken]:\n # Raw token handling; there is a later semantic mapping stage which\n # annotates atoms for the special handling of keywords and numbers.\n # We treat tokenization as an explicit state machine.\n # State transitions emit the previous block along with the previous state.\n state, start = None, 0\n\n for index, character in enumerate(src):\n next_state = None\n major_category = unicodedata.category(character) + character\n\n for (from_state, category_match), to_state in STATE_MACHINE.items():\n if (\n from_state == state and\n major_category.startswith(category_match)\n ):\n next_state = to_state\n break\n\n if next_state is None:\n raise ParseError(\n \"Unexpected '{0!r}'\".format(character),\n (index, index + 1),\n )\n\n if next_state != state:\n if start != index:\n assert state is not None\n\n yield RawToken(\n kind=state,\n value=src[start:index],\n location=(start, index),\n )\n start = index\n state = next_state\n\n if start != len(src):\n assert state is not None\n\n yield RawToken(\n kind=state,\n value=src[start:],\n location=(start, index + 1),\n )", "title": "" }, { "docid": "47a47cf0743cb384f2688b85c91f4dba", "score": "0.46097898", "text": "def _make_tokens(self, count, token_type=None):\n if token_type:\n for _ in range(count):\n yield (self._make_token(token_type), token_type)\n else:\n for _ in range(count):\n yield self._make_token(token_type)", "title": "" }, { "docid": "31e28d4e60517958d830691744315de1", "score": "0.4604491", "text": "def pack(word, pattern):\n ret = []\n for i, char in enumerate(word):\n if pattern[i]:\n ret.append(char)\n return \"\".join(ret)", "title": "" }, { "docid": "9ab8c7bc8a9aa9c1aed0750a65d0b932", "score": "0.46011093", "text": "def from_buffer(data, encoding='pem'):\n return X509Csr.from_open_file(io.BytesIO(data), encoding)", "title": "" }, { "docid": "a8b0f69b101ec66d891d4ebacace7a59", "score": "0.4585652", "text": "def parse(token):\n\n pass", "title": "" }, { "docid": "534ab9d24da425a103b4e5adbc963f92", "score": "0.4585176", "text": "def _convert_id_to_token(self, index):\n if index in self.fairseq_ids_to_tokens:\n return self.fairseq_ids_to_tokens[index]\n return self.sp_model.IdToPiece(index - self.fairseq_offset)", "title": "" }, { "docid": "dde3694bbd929a742aae16423f2f8f40", "score": "0.45709175", "text": "def match(cls, characters: CharacterStream) -> Optional[\"Token\"]:\n return", "title": "" }, { "docid": "867cca93466a10ba096d724605a626a9", "score": "0.45705393", "text": "def transduce(self, src_token_ids):\n batch_size, src_seq_len = tf.unstack(tf.shape(src_token_ids))\n max_decode_length = src_seq_len + self._extra_decode_length\n decoding_fn = self._build_decoding_fn(max_decode_length)\n decoding_cache = self._build_decoding_cache(src_token_ids, batch_size)\n sos_ids = tf.ones([batch_size], dtype='int32') * SOS_ID\n\n bs = beam_search.BeamSearch(decoding_fn, \n self._embedding_logits_layer._vocab_size, \n batch_size,\n self._beam_width, \n self._alpha, \n max_decode_length, \n EOS_ID)\n\n decoded_ids, scores, decoding_cache = bs.search(sos_ids, decoding_cache)\n\n tgt_tgt_attention = [\n decoding_cache['layer_%d' % i]['tgt_tgt_attention'].numpy()[:, 0]\n for i in range(self._decoder_stack_size)]\n tgt_src_attention = [\n decoding_cache['layer_%d' % i]['tgt_src_attention'].numpy()[:, 0]\n for i in range(self._decoder_stack_size)]\n\n decoded_ids = decoded_ids[:, 0, 1:]\n scores = scores[:, 0] \n\n src_src_attention = [\n self._encoder._stack[i]._mha._attention_weights.numpy()\n for i in range(self._encoder._stack_size)]\n\n return (decoded_ids, scores, \n tgt_tgt_attention, tgt_src_attention, src_src_attention)", "title": "" }, { "docid": "eb7990c3acbe7b35906832c787f8a816", "score": "0.45548388", "text": "def decode_any(as_bytes: typing.List[int]) -> object:\n raise NotImplementedError()", "title": "" }, { "docid": "0644ce7c0847df5360df5c7eed707c5d", "score": "0.45509365", "text": "def __init__ ( self , seq , pattern ):\n\t\tif pattern . search ( seq ):\n\t\t\tprint \" Warning : sequence contains illegal characters \"\n\t\tself . data = seq . upper ()", "title": "" }, { "docid": "0ee26325d24337a7b6171e2acb0a5676", "score": "0.45478678", "text": "def decode(self, x):\n return x", "title": "" }, { "docid": "3f0380f5b6f215a70fa940ecf6822993", "score": "0.45444602", "text": "def _from_bytes(value, dummy, _int=int, _hexlify=_hexlify):\n return _int(_hexlify(value), 16)", "title": "" }, { "docid": "72f40c4e558cabd082e4e470b9bca9c4", "score": "0.45416388", "text": "def _create_token(self, payload, key):\n return jwt.encode(payload, key, algorithm='RS256')", "title": "" }, { "docid": "dd1579e8601c27bd490cabb6add38296", "score": "0.4539931", "text": "def read_token(line):\n token = line.strip().split('\\t')\n if len(token) == 6:\n token += ['_', '_', '_', '_']\n id, form, lemma, cpostag, postag, feats, head, deprel, phead, pdeprel = token\n try:\n head = int(head)\n except ValueError:\n head = '_'\n try:\n phead = int(phead)\n except ValueError:\n phead = '_'\n return Token(int(id), form, lemma, cpostag, postag, feats, head, deprel, phead, pdeprel)", "title": "" }, { "docid": "8317676cc8b473df2f0589006f78c0d7", "score": "0.45359534", "text": "def split_token_source(cls, prev_token, task_pk):\n for n in count(1):\n yield Token(\"{}/{}_{}\".format(prev_token, task_pk, n))", "title": "" }, { "docid": "4cf3466d7002c686eec7256fb7eb5c0e", "score": "0.4535876", "text": "def make_token(self, data: object) -> str:\n return self.serializer.dumps(data)", "title": "" }, { "docid": "ff9cf849d9f03bef8736dca640071ae9", "score": "0.45322838", "text": "def _preprocess(self, txt_seq):\n input = []\n for token in txt_seq.split():\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n input.append(self.word2id[\"<END>\"])\n input = torch.LongTensor(input)\n return input", "title": "" }, { "docid": "b9b63ad9f4add2d309883f24e44e8548", "score": "0.45321223", "text": "def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method\n if token_ids_1 is None:\n # [CLS] X [SEP]\n return (len(token_ids_0) + 2) * [0]\n\n # [CLS] A [SEP] [SEP] B [SEP]\n return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)", "title": "" }, { "docid": "e94956781e20ce1d3a8cdb85ccad2d8b", "score": "0.4531757", "text": "def token(self, pos: int, goal: \"LexerCore.Goal\" = InputElementRegExp) -> Optional[Token]:\n newlines = []\n while 1:\n pos, nls = self.process_skippable(pos)\n newlines.extend(nls)\n\n # Check for common tokens (IdentifierName, Punctuator, NumericLiteral, StringLiteral, Template)\n\n # Identifier\n ident = self.identifiername_match.match(self.src, pos=pos)\n if ident:\n span = ident.span()\n identifier_src = self.src[span[0] : span[1]]\n sv = identifier_name_string_value(identifier_src, syntax_error_ctor=self.syntax_error_ctor)\n id_token = Token(type=\"IDENTIFIER\", src=self.src, value=sv, span=Span(*span), newlines=newlines)\n # Check Early Errors (section 11.6.1.1)\n identifier_name_early_errors(id_token, syntax_error_ctor=self.syntax_error_ctor)\n return id_token\n\n # NumericLiteral\n intconvert = lambda base: lambda span: int(self.src[span[0] + 2 : span[1]], base)\n for matcher, converter in (\n (self.binaryintegerliteral_match, intconvert(2)),\n (self.octalintegerliteral_match, intconvert(8)),\n (self.hexintegerliteral_match, intconvert(16)),\n (self.decimalliteral_match, lambda span: float(self.src[span[0] : span[1]])),\n ):\n nl = matcher.match(self.src, pos=pos)\n if nl:\n span = nl.span()\n return Token(\n type=\"NUMERIC\", src=self.src, value=converter(span), span=Span(*span), newlines=newlines\n )\n\n # Punctuator\n punct = self.punctuator_match.match(self.src, pos=pos)\n if punct:\n span = punct.span()\n return Token(\n type=punct.group(0), value=punct.group(0), src=self.src, span=Span(*span), newlines=newlines\n )\n\n # StringLiteral\n for matcher in (self.doublestringliteral_match, self.singlestringliteral_match):\n sl = matcher.match(self.src, pos=pos)\n if sl:\n span = sl.span()\n return Token(\n type=\"STRING\",\n src=self.src,\n value=self._string_value(self.src[span[0] : span[1]]),\n span=Span(*span),\n newlines=newlines,\n )\n\n # DivPunctuator is available for the InputElementDiv and InputElementTemplateTail goals.\n if goal in (self.InputElementDiv, self.InputElementTemplateTail):\n dp = self.divpunctuator_match.match(self.src, pos=pos)\n if dp:\n span = dp.span()\n return Token(\n type=dp.group(0), value=dp.group(0), src=self.src, span=Span(*span), newlines=newlines\n )\n\n # RightBracePunctuator is available for InputElementDiv or InputElementRegExp\n if goal in (self.InputElementDiv, self.InputElementRegExp):\n dbp = self.rightbracepunctuator_match.match(self.src, pos=pos)\n if dbp:\n span = dbp.span()\n return Token(\n type=dbp.group(0), value=dbp.group(0), src=self.src, span=Span(*span), newlines=newlines\n )\n\n # Regular Expressions available only with InputElementRegExp and InputElementRegExpOrTemplateTail\n if goal in (self.InputElementRegExp, self.InputElementRegExpOrTemplateTail):\n regex_literal = self.regularexpressionliteral_match.match(self.src, pos=pos)\n if regex_literal:\n span = regex_literal.span()\n return Token(\n type=\"REGEXP\",\n value=RegExp(\n utf_16_encode(regex_literal.group(\"body\")), utf_16_encode(regex_literal.group(\"flags\"))\n ),\n src=self.src,\n span=Span(*span),\n newlines=newlines,\n )\n\n # All productions get NoSubstitutionTemplate and TemplateHead\n # But only the \"TemplateTail\" goals get TemplateMiddle or TemplateTail\n for valid_goals, matcher, tokentype in (\n (\n (\n self.InputElementDiv,\n self.InputElementRegExp,\n self.InputElementRegExpOrTemplateTail,\n self.InputElementTemplateTail,\n ),\n self.nosubstitutiontemplate_match,\n \"NOSUBSTITUTIONTEMPLATE\",\n ),\n (\n (\n self.InputElementDiv,\n self.InputElementRegExp,\n self.InputElementRegExpOrTemplateTail,\n self.InputElementTemplateTail,\n ),\n self.templatehead_match,\n \"TEMPLATEHEAD\",\n ),\n (\n (self.InputElementRegExpOrTemplateTail, self.InputElementTemplateTail),\n self.templatemiddle_match,\n \"TEMPLATEMIDDLE\",\n ),\n (\n (self.InputElementRegExpOrTemplateTail, self.InputElementTemplateTail),\n self.templatetail_match,\n \"TEMPLATETAIL\",\n ),\n ):\n if goal in valid_goals:\n tmpl = matcher.match(self.src, pos=pos)\n if tmpl:\n span = tmpl.span()\n return Token(\n type=tokentype,\n value=Template(\n tv=self._TemplateValue(tmpl.group(\"tchars\")),\n trv=self._TemplateRawValue(tmpl.group(\"tchars\")),\n ),\n src=self.src,\n span=Span(*span),\n newlines=newlines,\n )\n\n # The end. If we still have input and we haven't returned, then this is an unrecognized token.\n # You might think this means we should raise a syntax error, but because there are alternate\n # lexical goals that turns out to be a really bad idea.\n return None", "title": "" }, { "docid": "5458ec0e7673559f1a34214aaf393685", "score": "0.45315385", "text": "def create_token(self, direccion, rol):\n contenido = dumps({'direccion': direccion, 'rol': rol}).encode()\n mensaje = b64encode(contenido).decode()\n \n firma = self.crear_firma(mensaje)\n \n token = '{0}.{1}'.format(mensaje, firma)\n return token", "title": "" }, { "docid": "ff79c7dada29506cc44d86654d243e4d", "score": "0.45314696", "text": "def __create_regex(self):\n self.lexer_regex = \"|\".join(self.tokens)\n logger.debug(f\"Generated tokenizer regex {self.lexer_regex}\")", "title": "" }, { "docid": "994c0775f93e7a1713c59f88a1bef7f5", "score": "0.45267844", "text": "def labeler(self, labels, tokens):\n encoded = []\n for idx, document in enumerate(tqdm(tokens)):\n tmp = [0 for char in range(len(document))]\n for name in labels[idx]:\n if re.match(r\"[^a-zA-Z]\", name):\n pattern = list(name)\n else:\n pattern = name\n # for indexes in re.finditer(name, document):\n # tmp[indexes.span()[0]:indexes.span()[1]] = [1 for _ in range(indexes.span()[1] - indexes.span()[0])]\n for i in range(len(document)):\n if document[i] == pattern[0] and document[i:i+len(pattern)] == pattern:\n tmp[i:i+len(pattern)] = [1 for _ in range(len(pattern))]\n encoded.append(tmp)\n\n # # Sanity check\n # for doc, enc in zip(tokens, encoded):\n # print(f\"{len(doc)}, {len(enc)}\")\n\n return encoded", "title": "" }, { "docid": "bebdee19047c5aa274687ddbeef13c21", "score": "0.45202413", "text": "def deserialize(token):\n\n if token.type == TYPE_BOOLEAN:\n return _to_boolean(token)\n elif token.type == TYPE_INTEGER:\n return _to_int(token)\n elif token.type == TYPE_FLOAT:\n return _to_float(token)\n elif token.type == TYPE_DATE:\n return _to_date(token)\n elif token.type in (TYPE_STRING, TYPE_MULTILINE_STRING, TYPE_BARE_STRING,\n TYPE_LITERAL_STRING, TYPE_MULTILINE_LITERAL_STRING):\n return _to_string(token)\n else:\n raise Exception('This should never happen!')", "title": "" }, { "docid": "2d95b0681926f2296d0432e2d2cd1fb4", "score": "0.4514391", "text": "def tokenize(src: str) -> Iterable[Token]:\n for raw_token in raw_tokenize(src):\n if (\n raw_token.kind in (RawTokenKind.COMMENT, RawTokenKind.WHITESPACE)\n ):\n continue\n\n if raw_token.kind == RawTokenKind.ATOM:\n yield _digest_atom(raw_token)\n else:\n yield Token(\n kind=RAW_TOKEN_KIND_TO_TOKEN_KIND[raw_token.kind],\n value=raw_token.value,\n location=raw_token.location,\n )", "title": "" }, { "docid": "879ec1329afe6b25083c1cce12c620e0", "score": "0.45094162", "text": "def sequence_encoding(sequence, str_to_idx):\n sequence_of_indexes = [str_to_idx[element] for element in sequence]\n return torch.tensor(sequence_of_indexes, dtype=torch.long)", "title": "" }, { "docid": "9536eb8cffbd1c5fe148c1c6cecdb3b7", "score": "0.45092708", "text": "def from_bytes(cls, bytes_in, output_path):\n\n magic_code = LEUnsigned.unpack(bytes_in[:2])\n list_size = LEUnsigned.unpack(bytes_in[2:4])\n length = LEUnsigned.unpack(bytes_in[4:8])\n\n assert magic_code == cls.MAGIC_CODE\n assert length == len(bytes_in) - 8\n\n actual_size = 0\n\n data = bytes_in[8:]\n\n force_break = 1000\n\n while not data and force_break > 0:\n data = PKIItem.from_bytes(data, output_path)\n force_break -= 1\n actual_size += 1\n\n assert actual_size == list_size", "title": "" }, { "docid": "5f697d8180654e692e00d1ad2d56e6c9", "score": "0.4493644", "text": "def _create_security_token(user):\n timestamp = int(time.time())\n plaintext = \"%x %s\" % (timestamp, user.email)\n nearest_mult_of_16 = 16 * ((len(plaintext) + 15) // 16)\n # Pad plaintest with whitespace to make the length a multiple of 16,\n # as this is a requirement of AES encryption.\n plaintext = plaintext.rjust(nearest_mult_of_16, ' ')\n if _DISABLE_CRYPTO:\n body = plaintext\n sig = \"sig\"\n else:\n key_storage = KeyStorage.get()\n body = AES.new(key_storage.aes_key, AES.MODE_CBC).encrypt(plaintext)\n hmac_key = key_storage.hmac_key\n if type(hmac_key) == unicode:\n # Crypto requires byte strings\n hmac_key = hmac_key.encode('utf8')\n sig = HMAC.HMAC(key=hmac_key, msg=body).hexdigest()\n return '%s:%s' % (sig, body)", "title": "" }, { "docid": "4a63c8fecbae52e5eba52e190ba32edc", "score": "0.44897673", "text": "def decode(code: bytes):# -> Transaction:\n vals: list[str] = str(bytes).split(' ')\n result: Transaction = Transaction()\n result.id = int(vals[0])\n result.time = int(vals[1])\n result.action = string_to_action(vals[2])\n result.acting_username = vals[3]\n result.source_account_id = int(vals[4])\n result.destination_account_id = int(vals[5])\n result.funds_amount = int(vals[6])\n return result", "title": "" }, { "docid": "0c71c990e0b158d0b30bfee2b681f5ca", "score": "0.448806", "text": "def new_token(*args, **kwargs):\n return uuid.uuid4().hex", "title": "" }, { "docid": "8c22ba3eda75035fec6b894f9eb7ea38", "score": "0.44877234", "text": "def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:\n token_list = self.tokenizer.ids_to_tokens(tokens)\n return token_list", "title": "" }, { "docid": "f69ace0590b15d847b34d9e2ea0625b2", "score": "0.44835877", "text": "def decode(self, encoded):", "title": "" } ]
28aaba9269dc7fcd05970ee089833be2
A fake refresh method for oauth2client service account credentails.
[ { "docid": "2b427e6162671612f5b4ab4d5b58f1d0", "score": "0.7772881", "text": "def _FakeRefreshOauth2clientServiceAccountCredentials(self, http):\n self.access_token = 'REFRESHED-ACCESS-TOKEN'\n self.token_expiry = _MakeFakeCredentialsRefreshExpiry()", "title": "" } ]
[ { "docid": "7e22844d1cbc6d74f370b67bf76f203a", "score": "0.768099", "text": "def testRefreshServiceAccountId(self):\n response = httplib2.Response({'status': httplib.OK})\n content = '{\"id_token\": \"old-id-token\"}'.encode()\n self.request_mock.return_value = response, content\n properties.VALUES.auth.credential_file_override.Set(self.json_file)\n loaded = store.Load()\n # token_response is initialized in the refresh method of oauth2client\n # credentials, which is mocked in the test. Manually initialize it here so\n # that the test can verify it will be updated later.\n loaded.token_response = {'id_token': 'old-id-token'}\n self.assertIsInstance(loaded, service_account.ServiceAccountCredentials)\n self.assertEqual(loaded.id_tokenb64, 'old-id-token')\n\n content = '{\"id_token\": \"fresh-id-token\"}'.encode()\n self.request_mock.return_value = response, content\n store.Refresh(loaded)\n self.assertEqual(loaded.id_tokenb64, 'fresh-id-token')\n self.assertEqual(loaded.token_response['id_token'], 'fresh-id-token')", "title": "" }, { "docid": "31b6e989dc9d4dfcaee611952431acd6", "score": "0.7350168", "text": "def _FakeRefreshOauth2clientUserCredentials(self, http):\n self.access_token = 'REFRESHED-ACCESS-TOKEN'\n self.token_expiry = _MakeFakeCredentialsRefreshExpiry()\n self.id_tokenb64 = 'REFRESHED-ID-TOKEN'", "title": "" }, { "docid": "206eb7b4919a66cb85d32a5ac18debc5", "score": "0.73198456", "text": "def _FakeRefreshGoogleAuthCredentials(self, http):\n del http\n self.token = 'REFRESHED-ACCESS-TOKEN'\n self.expiry = _MakeFakeCredentialsRefreshExpiry()\n self._id_token = 'REFRESHED-ID-TOKEN'", "title": "" }, { "docid": "a050acdd159a7aebd5e859cb96a79552", "score": "0.7151989", "text": "def refresh_credentials():\n global auth_token\n auth_token = update_auth_token_string()", "title": "" }, { "docid": "ab3d2f6321471dc645e3bda732d40a69", "score": "0.7127549", "text": "def _FakeRefreshGoogleAuthIdTokenCredentials(self, http):\n del http\n self.token = 'REFRESHED-ID-TOKEN'\n self.expiry = _MakeFakeCredentialsRefreshExpiry()", "title": "" }, { "docid": "2a5296cf4b817aa0e5f4c6771b6558c4", "score": "0.68493485", "text": "def refresh(self, access_token):\n payload = {'key': self._lr_object._get_api_key(), 'secret': self._lr_object._get_api_secret(),'access_token':access_token}\n url = self._lr_object.SECURE_API_URL + tokenEndpoint + \"/refresh\"\n return self._lr_object._get_json(url, payload)", "title": "" }, { "docid": "2c82635a565fd7a85ccb6655c1f66329", "score": "0.65625507", "text": "def testRefreshServiceAccountId_GoogleAuth_EmptyIdToken(self):\n self.StartObjectPatch(\n google_auth_httplib2.Request,\n '__call__',\n return_value=_MakeFakeEmptyIdTokenRefreshResponseGoogleAuth())\n\n creds = self.MakeServiceAccountCredentialsGoogleAuth()\n expected_creds_dict = {\n 'token':\n 'access_token',\n 'id_tokenb64':\n 'id-token',\n 'service_account_email':\n '[email protected]',\n 'client_id':\n 'bar.apps.googleusercontent.com',\n 'private_key':\n '-----BEGIN PRIVATE KEY-----\\nasdf\\n-----END PRIVATE KEY-----\\n',\n 'private_key_id':\n 'key-id',\n 'project_id':\n 'bar-test',\n }\n self.assertIsInstance(creds, google_auth_service_account.Credentials)\n self.AssertCredentialsEqual(creds, expected_creds_dict)\n\n store.Refresh(creds)\n expected_creds_dict['token'] = 'REFRESHED-ACCESS-TOKEN'\n self.AssertCredentialsEqual(creds, expected_creds_dict)", "title": "" }, { "docid": "e5e4e6c1e2f0178378e3729dd70e5696", "score": "0.6549732", "text": "def refresh(self):\r\n if self.refresh_token is not None:\r\n self.token = newid()\r\n self.secret = newsecret()", "title": "" }, { "docid": "0ffddc58433f7698bb09b9ec21e5fa94", "score": "0.65211755", "text": "def call_auth_refresh_endpoint(api_client, token):\n url = reverse('auth-refresh')\n return api_client.post(\n path=url, data=json.dumps({\n \"token\": token\n }), content_type='application/json'\n )", "title": "" }, { "docid": "2bdadc7a051e3d61830b40e4531fad2b", "score": "0.648176", "text": "def testRefreshServiceAccountId_GoogleAuth_IdTokenRefreshFailure(self):\n self.StartObjectPatch(\n google_auth_httplib2.Request,\n '__call__',\n return_value=_MakeFakeIdTokenRefreshFailureGoogleAuth())\n\n creds = self.MakeServiceAccountCredentialsGoogleAuth()\n expected_creds_dict = {\n 'token':\n 'access_token',\n 'id_tokenb64':\n 'id-token',\n 'service_account_email':\n '[email protected]',\n 'client_id':\n 'bar.apps.googleusercontent.com',\n 'private_key':\n '-----BEGIN PRIVATE KEY-----\\nasdf\\n-----END PRIVATE KEY-----\\n',\n 'private_key_id':\n 'key-id',\n 'project_id':\n 'bar-test',\n }\n self.assertIsInstance(creds, google_auth_service_account.Credentials)\n self.AssertCredentialsEqual(creds, expected_creds_dict)\n\n store.Refresh(creds)\n expected_creds_dict['token'] = 'REFRESHED-ACCESS-TOKEN'\n self.AssertCredentialsEqual(creds, expected_creds_dict)", "title": "" }, { "docid": "0a3a15cf9286f104dfbf3542cce41828", "score": "0.6438297", "text": "def _MakeFakeOauth2clientServiceAccountIdTokenRefreshResponse():\n response = httplib2.Response({'status': httplib.OK})\n content = b'{\"id_token\": \"REFRESHED-ID-TOKEN\"}'\n return response, content", "title": "" }, { "docid": "6e1a7fef6fd209ae746ec75b7dda3a15", "score": "0.64249814", "text": "def refresh(\n oidc_client_id,\n oidc_client_secret,\n oidc_refresh_token,\n oidc_access_token,\n oidc_url,\n oidc_agent_account,\n auth_file,\n):\n # Get the right endpoint from GOCDB\n auth_file_contents = []\n with open(auth_file, \"r\") as f:\n for raw_line in f.readlines():\n line = raw_line.strip()\n if \"OpenStack\" in line:\n auth_tokens = []\n for token in line.split(\";\"):\n if token.strip().startswith(\"password\"):\n access_token = token.split(\"=\")[1].strip()\n if access_token[0] in [\"'\", '\"']:\n access_token = access_token[1:-1]\n # FIXME(enolfc): add verification\n payload = jwt.decode(\n access_token, options={\"verify_signature\": False}\n )\n now = int(time.time())\n expires = int(payload[\"exp\"])\n if expires - now < 300:\n access_token = get_access_token(\n oidc_access_token,\n oidc_refresh_token,\n oidc_client_id,\n oidc_client_secret,\n oidc_url,\n oidc_agent_account,\n )\n auth_tokens.append(\"password = %s\" % access_token)\n else:\n auth_tokens.append(token.strip())\n auth_file_contents.append(\"; \".join(auth_tokens))\n elif line:\n auth_file_contents.append(line)\n with open(auth_file, \"w+\") as f:\n f.write(\"\\n\".join(auth_file_contents))", "title": "" }, { "docid": "7689145fb5a1861318abdc61311af616", "score": "0.64147854", "text": "def refresh_token(self, client, scope):\n raise TypeError(\"Subclass me!\")", "title": "" }, { "docid": "6066525c88ca1cc660825b15b913f165", "score": "0.6406316", "text": "def _reload_credentials(self):\n if self.vb > 2:\n self.print(f\"Reloading {self} credentials!\")\n\n if self.rlc is None:\n raise Exception(\"Got no function at instantiation...\")\n \n # Reset this objects cred attrs...\n self.rlc()\n\n # Propagate the new creds to the QBA object...\n for attr in [\"refresh_token\", \"access_token\"]:\n setattr(self.qba, attr, getattr(self, attr))", "title": "" }, { "docid": "2c4ab46b2cd1073e7b57f6b8cf171249", "score": "0.63862425", "text": "def refresh(self, refresh_token):\n\n # TODO\n\n pass", "title": "" }, { "docid": "5d4bf765bda0cf9d1bd9cf990bc5c754", "score": "0.6378115", "text": "def refresh_token(oauth_token):\n\n payload = {\n 'grant_type': 'refresh_token',\n 'client_id': settings.CONSUMER_KEY,\n 'client_secret': settings.CONSUMER_SECRET,\n 'refresh_token': oauth_token.refresh_token\n }\n\n # Post payload to Salesforce Oauth server\n r = requests.post(\n oauth_token.instance_url + '/services/oauth2/token',\n headers={\n 'Content-Type': 'application/x-www-form-urlencoded'\n },\n data=payload\n )\n # Decode the JSON response from Salesforce Oauth server\n decoded = json.loads(r.content)\n if 'error' in decoded:\n print decoded\n oauth_token.active = False\n oauth_token.save()\n raise SalesforceExpiredRefreshToken\n else:\n oauth_token.active = True\n oauth_token.access_token = decoded['access_token']\n oauth_token.instance_url = decoded['instance_url']\n oauth_token.save()", "title": "" }, { "docid": "888c2c5dac9f0cfc82b9214ba50143c0", "score": "0.62934315", "text": "def refresh(self, access_token=None, **kwargs):\n if not self.token_lock.locked():\n with self.token_lock:\n if access_token == self.access_token or access_token is None:\n if self.developer_token is not None and not any(\n [\n os.getenv(\"PAN_ACCESS_TOKEN\"),\n self._credentials_found_in_instance,\n ]\n ):\n parsed_provider = urlparse(self.developer_token_provider)\n url = \"{}://{}\".format(\n parsed_provider.scheme, parsed_provider.netloc\n )\n endpoint = parsed_provider.path\n r = self._httpclient.request(\n method=\"POST\",\n url=url,\n endpoint=endpoint,\n headers={\n \"Authorization\": \"Bearer {}\".format(\n self.developer_token\n )\n },\n timeout=30,\n raise_for_status=True,\n )\n\n elif all([self.client_id, self.client_secret, self.refresh_token]):\n data = {\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n \"refresh_token\": self.refresh_token,\n \"grant_type\": \"refresh_token\",\n }\n r = self._httpclient.request(\n method=\"POST\",\n url=self.token_url,\n json=data,\n endpoint=\"/api/oauth2/RequestToken\",\n **kwargs\n )\n else:\n raise PartialCredentialsError(\n \"Missing one or more required credentials\"\n )\n\n if r:\n if not r.ok:\n raise CortexError(\n \"%s %s: %s\" % (r.status_code, r.reason, r.text)\n )\n try:\n r_json = r.json()\n except ValueError as e:\n raise CortexError(\"Invalid JSON: %s\" % e)\n else:\n if r.json().get(\"error_description\") or r.json().get(\n \"error\"\n ):\n raise CortexError(r.text)\n self.access_token = r_json.get(\"access_token\", None)\n self.jwt_exp = self._decode_exp(self.access_token_)\n if r_json.get(\"refresh_token\", None):\n self.refresh_token = r_json.get(\"refresh_token\")\n self.write_credentials()\n return self.access_token_", "title": "" }, { "docid": "2c6c1d569b8162a5b09e436384d4f6df", "score": "0.6266431", "text": "async def refresh_credentials(app: web.Application, nocache: bool=False):\n logger.info(\"Refreshing credentials\")\n try:\n Mappings._credentials, Mappings._account_id_to_credentials_id_map = await _load(\n app[\"access_control_api\"].credentials_list, app[\"redis\"], transformations.CREDENTIALS,\n bytes(f\"{__name__}:credentials\", encoding=\"utf8\"), \"account_id\", nocache\n )\n\n Mappings._account_id_to_site_id_map = {\n detail[\"account_id\"]: detail[\"site_id\"] for detail in Mappings._credentials.values()\n }\n\n Mappings._account_id_to_credentials_map = {\n detail[\"account_id\"]: detail for detail in Mappings._credentials.values()\n }\n except Exception as e:\n sentry.captureException()\n logger.error(e)", "title": "" }, { "docid": "68c9a7174ea4661a60e4b5ab89d6f1cb", "score": "0.6261401", "text": "def _send_refresh_request(user_social):\n strategy = load_strategy()\n try:\n user_social.refresh_token(strategy)\n except HTTPError as exc:\n if exc.response.status_code in (400, 401,):\n raise InvalidCredentialStored(\n message='Received a {} status code from the OAUTH server'.format(\n exc.response.status_code),\n http_status_code=exc.response.status_code\n )\n raise", "title": "" }, { "docid": "524731178b4b50a7a126bef1b72ce6f4", "score": "0.6249467", "text": "def refresh():\n current_user = get_jwt_identity()\n access_token = create_access_token(identity=current_user)\n ret = {\"access_token\": access_token}\n add_token_to_database(access_token, app.config[\"JWT_IDENTITY_CLAIM\"])\n return jsonify(ret), 200", "title": "" }, { "docid": "18103a8d889c5c2ed9d018f4bdae5e6b", "score": "0.62490296", "text": "def refresh():\n current_user = get_jwt_identity()\n access_token = create_access_token(identity=current_user)\n return jsonify(access_token=access_token)", "title": "" }, { "docid": "99958636753d452421d92f8ba1d8954b", "score": "0.6222091", "text": "def renewToken(user):\n renew_url = 'https://www.googleapis.com/oauth2/v4/token'\n social_creadetials = UserSocialCredentials.object.get(user=user)\n data = {\n \"client_id\": '705813183307-hminde5i1ejhm790gl6t2ct0j6n7vft0.apps.googleusercontent.com',\n \"client_secret\": 'NCxFWXMsV4fgU36zmuh0fk1N',\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": social_creadetials.refresh_token\n }\n res = requests.post(renew_url, data=data)\n access_token = res.json()['access_token']\n social_creadetials.access_token = access_token\n social_creadetials.save()\n return access_token", "title": "" }, { "docid": "3948b059b1e454bc0cbfc0d8f9081ec0", "score": "0.6208849", "text": "def refresh_auth(self):\n self._access_token = self._get_access_token()\n if not self._access_token:\n raise PackitException(\n \"Unable to obtain access token. You may need to regenerate the refresh token.\"\n )\n self.session.headers.update(\n {\n \"Authorization\": f\"Bearer {self._access_token}\",\n \"Accept\": \"application/json\",\n }\n )", "title": "" }, { "docid": "41c906fca17e7d6becaee50f06055e38", "score": "0.6186037", "text": "def _refresh_credentials(self):\n # type: () -> Dict[str, Any]\n data = self.service.refresh_token(\n self.batch, token_callback=self.token_callback\n )\n return {\n \"access_key\": data[\"awsSecretKeyId\"],\n \"secret_key\": data[\"awsSecretAccessKey\"],\n \"token\": data[\"awsSessionToken\"],\n \"expiry_time\": datetime.fromtimestamp(\n data[\"expiration\"] / 1000, tz=tzlocal()\n ).isoformat(),\n }", "title": "" }, { "docid": "fc624a895d74ae9471c4a41ac7e9d68c", "score": "0.61734676", "text": "def test_authentication_refresh(self):\n pass", "title": "" }, { "docid": "bebea85ee2c9a2dd16caaebdb1200a41", "score": "0.60915434", "text": "def test_refresh(self):\n\n response = self.create_stub_participant()\n\n data = {\"refresh\": response.data[\"tokens\"][\"refresh\"]}\n\n response = self.client.post(URL_AUTH_REFRESH, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "1a87a21b7ec90823d7a11b33fdf4aad4", "score": "0.608212", "text": "def exchange_refresh_token(self):\n self.access_token = self._get_tokens(form_data={\n \"client_id\": self.application_id,\n \"client_secret\": self.application_secret,\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n })['access_token']", "title": "" }, { "docid": "d158ec6cc2e49680db77f75178b9993a", "score": "0.6066983", "text": "async def refresh_token(Authorize:AuthJWT=Depends()):\n try:\n Authorize.jwt_refresh_token_required()\n\n except Exception as e:\n raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED\n , detail=\"Please provide valid refresh token\"\n \n )\n \n current_user=Authorize.get_jwt_subject()\n\n access_token=Authorize.create_access_token(subject=current_user)\n\n return jsonable_encoder({\"access_token\":access_token})", "title": "" }, { "docid": "8adbc43056b1b63efd103db54f3a0cf7", "score": "0.6004857", "text": "def oauth_step2(key, scope, code):\n\n # Use scope-specific token exchange and storage steps.\n if scope == GOOGLE_CALENDAR_SCOPE:\n credentials = _google_calendar_step2(key, code)\n storage = GoogleCalendarStorage(key)\n credentials.set_store(storage)\n try:\n credentials.refresh(build_http())\n except HttpAccessTokenRefreshError as e:\n storage.delete()\n error('Token refresh error: %s' % e)\n else:\n error('Unknown OAuth scope: %s' % scope)", "title": "" }, { "docid": "9be2f6d0f3a217708b1e1b517e8f9e18", "score": "0.6004658", "text": "def refresh_oauth_key(external_account, force=False):\n if external_account.expires_at is None and not force:\n return\n\n if force or (external_account.expires_at - datetime.utcnow()).total_seconds() < settings.REFRESH_TIME:\n key = refresh_v2_token(settings.BOX_KEY, settings.BOX_SECRET, external_account.refresh_token)\n\n external_account.oauth_key = key['access_token']\n external_account.refresh_token = key['refresh_token']\n external_account.expires_at = datetime.utcfromtimestamp(time.time() + key['expires_in'])\n external_account.save()", "title": "" }, { "docid": "4f866dc21280117b59f2daa9506f5f6d", "score": "0.5945299", "text": "def refresh_token(self, refresh_token, return_json=False):\n session = OAuth2Session(client_id=self._client_id)\n auth = {\n 'client_id': self._client_id,\n 'client_secret': self._client_secret,\n }\n new_token = session.refresh_token(\n self.EXCHANGE_ACCESS_TOKEN_URL, refresh_token=refresh_token,\n **auth\n )\n self._access_token = session.access_token\n if return_json:\n return new_token\n else:\n return AccessToken.new_from_json_dict(new_token)", "title": "" }, { "docid": "67e4638b9e6fb151ea3fae4449189f90", "score": "0.58976007", "text": "def refresh():\n print(\"refresh request\")\n old_token = request.get_data()\n new_token = guard.refresh_jwt_token(old_token)\n ret = {'access_token': new_token}\n return ret, 200", "title": "" }, { "docid": "31b7b0758e211a45cf636e8149658eea", "score": "0.5890625", "text": "def test_update(self):\n oauth = mixer.blend('api.Oauth')\n oauth = OauthService().update(oauth.id, token = \"12345\")\n assert oauth.token == \"12345\", 'Should have the same token'", "title": "" }, { "docid": "ca1b2242d9b9825f1b56719f04eab314", "score": "0.5880324", "text": "def _refresh_access_token(user):\n body = {\n \"grant_type\": \"refresh_token\",\n \"client_id\": accounts_settings.CLIENT_ID, # from Product\n \"client_secret\": accounts_settings.CLIENT_SECRET, # from Product\n \"refresh_token\": user.refreshtoken.token, # refresh token from user\n }\n try:\n data = requests.post(\n url=accounts_settings.PLATFORM_URL + \"/accounts/token/\", data=body\n )\n if data.status_code == 200: # Access token refreshed successfully\n data = data.json()\n # Update Access token\n user.accesstoken.token = data[\"access_token\"]\n user.accesstoken.expires_at = timezone.now() + timedelta(\n seconds=data[\"expires_in\"]\n )\n user.accesstoken.save()\n\n # Update Refresh Token\n user.refreshtoken.token = data[\"refresh_token\"]\n user.refreshtoken.save()\n\n return True\n except requests.exceptions.RequestException: # Can't connect to platform\n return False\n return False", "title": "" }, { "docid": "4699ba669f37cadb5dfbcd903541a142", "score": "0.58693975", "text": "def refresh(self, refresh_token):\n if \"Authorization\" in self.header:\n del self.header[\"Authorization\"]\n\n response = self._get_gfurl(\n self.BASE_URL + \"/access_tokens/refresh\",\n mode=self._POST,\n json_payload={\"refresh_token\" : refresh_token}\n )\n\n self.header[\"Authorization\"] = response[\"access_token\"]\n\n return response[\"refresh_token\"]", "title": "" }, { "docid": "5d0ce8ec56e4457f5596bc25a87c7ef4", "score": "0.5843002", "text": "def renew_oauth_token(self):\n return self.request_oauth_token(self.request_saml_assertion(\n self.machine_user, self.machine_pw))", "title": "" }, { "docid": "596c4666ffff6b8abeab9c41407395a4", "score": "0.5841381", "text": "def refresh_token(Authorize: AuthJWT = Depends()):\n Authorize.jwt_refresh_token_required()\n current_user = Authorize.get_jwt_identity()\n ret = {\n 'access_token': AuthJWT.create_access_token(identity=current_user)\n }\n return current_user", "title": "" }, { "docid": "5c089879507ed67d7ceb7bac671a798d", "score": "0.5822811", "text": "def test_update_service_account(self):\n pass", "title": "" }, { "docid": "e90cac4ad4572d6e142940439ce93d02", "score": "0.5774321", "text": "def refresh_access_token(user):\n refresh_token = user.refresh_key\n\n payload = {\n 'grant_type': 'refresh_token',\n 'client_id': settings.oauth2_id,\n 'redirect_uri': settings.oauth2_uri,\n 'client_secret': settings.oauth2_key,\n 'refresh_token': refresh_token\n }\n response = requests.post(\n settings.BASE_URL + 'login/oauth2/token',\n data=payload\n )\n\n if 'access_token' not in response.json():\n app.logger.warning((\n 'Access token not in json. Bad api key or refresh token.\\n'\n 'URL: {}\\n'\n 'Status Code: {}\\n'\n 'Payload: {}\\n'\n 'Session: {}'\n ).format(response.url, response.status_code, payload, session))\n return {\n 'access_token': None,\n 'expiration_date': None\n }\n\n api_key = response.json()['access_token']\n app.logger.info(\n 'New access token created\\n User: {0}'.format(user.user_id)\n )\n\n if 'expires_in' not in response.json():\n app.logger.warning((\n 'expires_in not in json. Bad api key or refresh token.\\n'\n 'URL: {}\\n'\n 'Status Code: {}\\n'\n 'Payload: {}\\n'\n 'Session: {}'\n ).format(response.url, response.status_code, payload, session))\n return {\n 'access_token': None,\n 'expiration_date': None\n }\n\n current_time = int(time.time())\n new_expiration_date = current_time + response.json()['expires_in']\n\n # Update expiration date in db\n user.expires_in = new_expiration_date\n db.session.commit()\n\n # Confirm that expiration date has been updated\n updated_user = Users.query.filter_by(user_id=int(user.user_id)).first()\n if updated_user.expires_in != new_expiration_date:\n readable_expires_in = time.strftime(\n '%a, %d %b %Y %H:%M:%S',\n time.localtime(updated_user.expires_in)\n )\n readable_new_expiration = time.strftime(\n '%a, %d %b %Y %H:%M:%S',\n time.localtime(new_expiration_date)\n )\n app.logger.error((\n 'Error in updating user\\'s expiration time in the db:\\n'\n 'session: {}\\n'\n 'DB expires_in: {}\\n'\n 'new_expiration_date: {}'\n ).format(session, readable_expires_in, readable_new_expiration))\n return {\n 'access_token': None,\n 'expiration_date': None\n }\n\n return {\n 'access_token': api_key,\n 'expiration_date': new_expiration_date\n }", "title": "" }, { "docid": "de5f72bdf6ec218cab6fde6e1746c2e3", "score": "0.5770917", "text": "def refresh_token(self, refresh_token: str):\n logger.debug('Token will be refresh')\n return self.retrieve_token({'Authorization': refresh_token}, True)", "title": "" }, { "docid": "d42e7b5e1c962103ed0aeb916d4ce9f6", "score": "0.57678074", "text": "def oauth2_refresh_token(\n self,\n refresh_token: str,\n *,\n body_params: dict[str, t.Any] | None = None,\n ) -> OAuthTokenResponse:\n log.info(\"Executing token refresh without client credentials\")\n form_data = {\n \"refresh_token\": refresh_token,\n \"grant_type\": \"refresh_token\",\n \"client_id\": self.client_id,\n }\n return self.oauth2_token(form_data, body_params=body_params)", "title": "" }, { "docid": "140cf4f3052b281820f8f32e7f4c7b4b", "score": "0.5733136", "text": "def testRefreshGceIdToken(self):\n mock_GetIdToken = self.StartObjectPatch( # pylint: disable=invalid-name\n c_gce._GCEMetadata, 'GetIdToken', return_value='test-id-token')\n test_cred = oauth2client_gce.AppAssertionCredentials()\n test_cred.token_response = {'id_token': 'old-id-token'}\n # Mock Refresh request\n http_mock = mock.Mock()\n store.Refresh(\n test_cred,\n http_client=http_mock,\n gce_token_format='full',\n gce_include_license=True)\n self.assertEqual(test_cred.id_tokenb64, 'test-id-token')\n self.assertEqual(test_cred.token_response['id_token'], 'test-id-token')\n mock_GetIdToken.assert_called_once_with(\n mock.ANY,\n include_license=True,\n token_format='full')", "title": "" }, { "docid": "1e887a26fd2264b04807f2cacd183f51", "score": "0.57153994", "text": "def _MakeFakeCredentialsRefreshExpiry():\n return datetime.datetime.utcnow() + datetime.timedelta(seconds=3599)", "title": "" }, { "docid": "334ba975dd504d7f05f81552e5ab1ae6", "score": "0.5702459", "text": "async def refresh_helper():\n await refresh()", "title": "" }, { "docid": "6ed118bf05c21f74be74a0f312e2b9be", "score": "0.57016885", "text": "def testRefreshGoogleAuthGceIdToken(self):\n mock_GetIdToken = self.StartObjectPatch( # pylint: disable=invalid-name\n c_gce._GCEMetadata,\n 'GetIdToken',\n return_value='test-id-token')\n mock_gce_cred_refresh = self.StartObjectPatch( # pylint: disable=invalid-name\n google_auth_gce.Credentials, 'refresh')\n test_cred = google_auth_gce.Credentials()\n test_cred._id_token = 'old-id-token'\n # Mock Refresh request\n http_mock = mock.Mock()\n http_mock.Request.return_value = mock.Mock()\n store.Refresh(\n test_cred,\n http_client=http_mock,\n gce_token_format='full',\n gce_include_license=True)\n self.assertEqual(test_cred.id_tokenb64, 'test-id-token')\n self.assertEqual(test_cred._id_token, 'test-id-token')\n mock_GetIdToken.assert_called_once_with(\n mock.ANY, include_license=True, token_format='full')\n mock_gce_cred_refresh.assert_called_once()", "title": "" }, { "docid": "8454964b63566c77ea1b38341f2f1a47", "score": "0.5694171", "text": "def testRefreshTokenRenewal(self):\n oldRefreshToken = 'oldRefreshToken'\n additionalData = 'someAdditionalData'\n newAuthToken = 'newAuthToken'\n newRefreshToken = 'newRefreshToken'\n self._REFRESH_TOKEN_STORAGE.store(\n oldRefreshToken, self._VALID_CLIENT, self._VALID_SCOPE, additionalData)\n tokenResource = TokenResource(\n self._TOKEN_FACTORY, self._PERSISTENT_STORAGE, self._REFRESH_TOKEN_STORAGE,\n self._AUTH_TOKEN_STORAGE, self._CLIENT_STORAGE, minRefreshTokenLifeTime=0,\n passwordManager=self._PASSWORD_MANAGER)\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': oldRefreshToken\n }, authentication=self._VALID_CLIENT)\n self._TOKEN_FACTORY.expectTokenRequest(\n newAuthToken, tokenResource.authTokenLifeTime,\n self._VALID_CLIENT, self._VALID_SCOPE, additionalData)\n self._TOKEN_FACTORY.expectTokenRequest(\n newRefreshToken, None, self._VALID_CLIENT, self._VALID_SCOPE, additionalData)\n result = tokenResource.render_POST(request)\n self._TOKEN_FACTORY.assertAllTokensRequested()\n self.assertFalse(\n self._REFRESH_TOKEN_STORAGE.contains(oldRefreshToken),\n msg='Expected the token resource to remove an old refresh token from the token storage.'\n )\n self.assertValidTokenResponse(\n request, result, newAuthToken, tokenResource.authTokenLifeTime,\n expectedRefreshToken=newRefreshToken, expectedScope=self._VALID_SCOPE,\n expectedAdditionalData=additionalData)", "title": "" }, { "docid": "57cf31b3906b48478b6cc2e653d06c8a", "score": "0.5646301", "text": "def refresh_tokens(self) -> Dict[str, Union[str, int]]:\n LOGGER.info(\"Refreshing tokens ...\")\n token = self._oauth.refresh_token(OAUTH2_TOKEN)\n\n if self.token_updater is not None:\n self.token_updater(token)\n\n return token", "title": "" }, { "docid": "4d2a099be8dffeb8d46e8f7dce27efe7", "score": "0.5622207", "text": "def refresh_access_token(self):\n post_data = {\n 'grant_type': 'refresh_token',\n 'refresh_token': self.refresh_token,\n 'redirect_uri': self._config['redirect_uri'],\n }\n\n response = self._token_request(post_data)\n\n self.access_token = response['access_token']\n\n # optional response parameters\n if response.get('refresh_token'):\n self.refresh_token = response['refresh_token']", "title": "" }, { "docid": "41a68cf6f0421affd32bb55611d2340e", "score": "0.5610217", "text": "async def refresh_later(self) -> None:\n exp = jwt.decode(self.token, options={\"verify_signature\": False})[\"exp\"]\n seconds_to_exp = int(exp - arrow.utcnow().timestamp())\n sleep_time = seconds_to_exp - 60*60*24*2\n log.info(f\"Will refresh token in {sleep_time} seconds.\")\n\n await asyncio.sleep(sleep_time)\n await self.refresh_token()", "title": "" }, { "docid": "02bd73fff24d3143ab3cc6c864e8f6a9", "score": "0.5607684", "text": "def test_renew_service_token(self):\n pass", "title": "" }, { "docid": "1e86f89870197f3f6a37882a11d93d23", "score": "0.5607486", "text": "def on_token_refresh(self, oauth_token, oauth_refresh_token):\n raise NotImplemented", "title": "" }, { "docid": "ef377f4aa50cf5fc0a80154030e19f05", "score": "0.5580044", "text": "def refresh_access_token(client_id, client_secret, refresh_token):\n post_data = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': refresh_token,\n 'grant_type': 'refresh_token'\n }\n not_set = next((k for k in post_data.keys() if post_data[k] is None), None)\n if not_set:\n raise ValueError(f'\"{not_set}\" not set, it is needed in order to refresh access token')\n parsed_url = urllib.parse.urlparse(API_BASE_URL)\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json;charset=utf-8',\n }\n conn = HTTPSConnection(parsed_url.hostname, parsed_url.port)\n url = os.path.join('/', API_V1, 'access_token')\n conn.request('POST', url, body=json.dumps(post_data), headers=headers)\n resp = conn.getresponse()\n data = json.loads(resp.read().decode())\n return data", "title": "" }, { "docid": "5cc654b1b3d5f54acc439bcbc434b749", "score": "0.55560327", "text": "def refresh_token(self, token_refresh=\"\"):\n if not token_refresh:\n if self.token_refresh:\n token_refresh = self.token_refresh\n else:\n raise ValueError(\"Refresh token not set\")\n headers = {\"Content-type\": \"application/json\"}\n payload = {\"refresh\": token_refresh}\n try:\n full_url = utils.urljoin(self.host, \"/api/v1/auth/refresh\")\n response = requests.post(full_url, data=json.dumps(payload), headers=headers, verify=self.tls_verify)\n except RequestException:\n raise exceptions.TaigaRestException(full_url, 400, \"NETWORK ERROR\", \"POST\")\n if response.status_code != 200:\n raise exceptions.TaigaRestException(full_url, response.status_code, response.text, \"POST\")\n self.token = response.json()[\"auth_token\"]\n self.token_refresh = response.json()[\"refresh\"]\n self.raw_request = RequestMaker(\"/api/v1\", self.host, self.token, \"Bearer\", self.tls_verify)\n self._init_resources()", "title": "" }, { "docid": "1bb056a85e9560add4715b748ad7b277", "score": "0.55478734", "text": "async def __refresh_access_token(self, refresh_token):\r\n res = await self.post(URIs.token_refresh, json={\"refreshToken\": refresh_token})\r\n res.raise_for_status()\r\n body = res.json()\r\n self.__access_token = body[\"accessToken\"]", "title": "" }, { "docid": "b6ce0aef379c27d74f2dcce1ade3d660", "score": "0.55455947", "text": "def refresh_token(self):\n\t\tu = input('Username: ')\n\t\tp = getpass.getpass()\n\t\th = self.get_headers()\n\t\th.update({'Content-Type': 'application/x-www-form-urlencoded'})\n\t\tr = requests.post(f'{self.API_BASE}/api/token', data={'username': u, 'password': p}, headers=h)\n\t\tprint(r)\n\t\tprint(r.text)\n\t\tprint(r.json())\n\t\tif r.status_code == 200:\n\t\t\tself.API_TOKEN = r.json()['access_token']\n\n\t\t\twith open(self.conf_file) as cf:\n\t\t\t\tconfig = json.load(cf)\n\t\t\t\tconfig.update({\"API_TOKEN\": self.API_TOKEN})\n\n\t\t\twith open(self.conf_file, 'w') as cf:\n\t\t\t\tjson.dump(config, cf, indent=4)", "title": "" }, { "docid": "84cb5d982e4a3147277631733641698e", "score": "0.55351573", "text": "def GOA2GetRefreshToken(*args):\n return _gdal.GOA2GetRefreshToken(*args)", "title": "" }, { "docid": "e509add61b5347617b30c28ddb2e8268", "score": "0.55330133", "text": "def test_refresh_token(self):\n payload = {'email': '[email protected]', 'password': 'password123'}\n User.objects.create_user(**payload)\n res = self.client.post(TOKEN_OBTAIN_URL, payload)\n\n refreh_token = res.data['refresh']\n res2 = self.client.post(TOKEN_REFRESH_URL, {'refresh': refreh_token})\n self.assertIn('access', res2.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "556583b04f1b56e10bfcd2eafe22109b", "score": "0.5528366", "text": "def refresh_token(self) -> WyzeResponse:\n if self._refresh_token is None:\n raise WyzeClientConfigurationError(\"client is not logged in\")\n response = self._api_client().refresh_token(refresh_token=self._refresh_token)\n self._update_session(access_token=response[\"access_token\"], refresh_token=response[\"refresh_token\"])\n return response", "title": "" }, { "docid": "102cbfa47265a9e4fe50325e01d8cb3e", "score": "0.5527025", "text": "def _refresh_id_token(auth_config):\n refresh_token = auth_config.get('refresh-token')\n\n if not refresh_token:\n raise RuntimeError('id-token missing or expired and refresh-token is missing')\n\n client_id = auth_config.get('client-id')\n if not client_id:\n raise RuntimeError('client-id not found in auth config')\n\n client_secret = auth_config.get('client-secret')\n if not client_secret:\n raise RuntimeError('client-secret not found in auth config')\n\n\n token_endpoint = _token_endpoint(auth_config)\n data = {\n 'grant_type': 'refresh_token',\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': refresh_token,\n }\n r = requests.post(token_endpoint, data=data)\n r.raise_for_status()\n return r.json()['id_token']", "title": "" }, { "docid": "2dc99f30751c876ea4b4a5c5fa7ba903", "score": "0.55213654", "text": "def test_auto_refresh(admin_frozen: KeycloakAdmin, realm: str):\n admin = admin_frozen\n # Test get refresh\n admin.connection.custom_headers = {\n \"Authorization\": \"Bearer bad\",\n \"Content-Type\": \"application/json\",\n }\n\n with pytest.raises(KeycloakAuthenticationError) as err:\n admin.get_realm(realm_name=realm)\n assert err.match('401: b\\'{\"error\":\"HTTP 401 Unauthorized\"}\\'')\n\n # Freeze time to simulate the access token expiring\n with freezegun.freeze_time(\"2023-02-25 10:05:00\"):\n assert admin.connection.expires_at < datetime_parser.parse(\"2023-02-25 10:05:00\")\n assert admin.get_realm(realm_name=realm)\n assert admin.connection.expires_at > datetime_parser.parse(\"2023-02-25 10:05:00\")\n\n # Test bad refresh token, but first make sure access token has expired again\n with freezegun.freeze_time(\"2023-02-25 10:10:00\"):\n admin.connection.custom_headers = {\"Content-Type\": \"application/json\"}\n admin.connection.token[\"refresh_token\"] = \"bad\"\n with pytest.raises(KeycloakPostError) as err:\n admin.get_realm(realm_name=\"test-refresh\")\n assert err.match(\n '400: b\\'{\"error\":\"invalid_grant\",\"error_description\":\"Invalid refresh token\"}\\''\n )\n admin.connection.get_token()\n\n # Test post refresh\n with freezegun.freeze_time(\"2023-02-25 10:15:00\"):\n assert admin.connection.expires_at < datetime_parser.parse(\"2023-02-25 10:15:00\")\n admin.connection.token = None\n assert admin.create_realm(payload={\"realm\": \"test-refresh\"}) == b\"\"\n assert admin.connection.expires_at > datetime_parser.parse(\"2023-02-25 10:15:00\")\n\n # Test update refresh\n with freezegun.freeze_time(\"2023-02-25 10:25:00\"):\n assert admin.connection.expires_at < datetime_parser.parse(\"2023-02-25 10:25:00\")\n admin.connection.token = None\n assert (\n admin.update_realm(realm_name=\"test-refresh\", payload={\"accountTheme\": \"test\"})\n == dict()\n )\n assert admin.connection.expires_at > datetime_parser.parse(\"2023-02-25 10:25:00\")\n\n # Test delete refresh\n with freezegun.freeze_time(\"2023-02-25 10:35:00\"):\n assert admin.connection.expires_at < datetime_parser.parse(\"2023-02-25 10:35:00\")\n admin.connection.token = None\n assert admin.delete_realm(realm_name=\"test-refresh\") == dict()\n assert admin.connection.expires_at > datetime_parser.parse(\"2023-02-25 10:35:00\")", "title": "" }, { "docid": "bdaa3a322d60f1f2381132ae71198f42", "score": "0.5521282", "text": "async def _async_try_refresh_token(self) -> None:\n assert isinstance(self.envoy.auth, EnvoyTokenAuth)\n _LOGGER.debug(\"%s: Trying to refresh token\", self.name)\n try:\n await self.envoy.auth.refresh()\n except EnvoyError as err:\n # If we can't refresh the token, we try again later\n # If the token actually ends up expiring, we'll\n # re-authenticate with username/password and get a new token\n # or log an error if that fails\n _LOGGER.debug(\"%s: Error refreshing token: %s\", err, self.name)\n return\n self._async_update_saved_token()", "title": "" }, { "docid": "ef50470cb65de7a05989f1ceef4a19ab", "score": "0.55142397", "text": "def test_refresh_correct(self, testclient, joeseed):\n\n token = self.generate_jwt(joeseed, 1, 1)\n\n post_body = {\n \"token\": token\n }\n\n expected = {\n \"status_code\": 200,\n \"body\": {\n \"token\": None,\n }\n }\n\n testclient.post_request_test_helper(post_body, expected)", "title": "" }, { "docid": "9bf9e1d8d2497598fdb845ae23ecd758", "score": "0.5513572", "text": "def _handle_expiration(self):\n exp_query = {'client_id': self._client_id,\n 'client_secret': self._client_secret,\n 'grant_type': 'refresh_token',\n 'refresh_token': self._refresh_token}\n self._post_and_store_tokens('https://www.strava.com/api/v3/oauth/token', exp_query)", "title": "" }, { "docid": "b9a18df88982f237764bd4be2272483f", "score": "0.5494254", "text": "def test_invalidate_service_token(self):\n pass", "title": "" }, { "docid": "6af3c958c7314b4b46f01c86af579df4", "score": "0.5494007", "text": "def refresh_token(refresh_token):\r\n \r\n return None", "title": "" }, { "docid": "6420f1139cc0f791595f197fd4d8fdf0", "score": "0.54927844", "text": "def refresh_token(self):\r\n payload = {'grant_type': 'refresh_token',\r\n 'refresh_token': self.token_info['refresh_token']}\r\n header = Spotify._make_headers(CLIENT_ID, CLIENT_SECRET)\r\n response = requests.post('https://accounts.spotify.com/api/token',\r\n headers=header,\r\n data=payload,\r\n verify=True)\r\n try:\r\n response.raise_for_status()\r\n response = response.json()\r\n self.token_info['access_token'] = response['access_token']\r\n self.token_info['expires_at'] = int(time.time()) + response['expires_in']\r\n if response.get('refresh_token'):\r\n self.token_info['refresh_token'] = response['refresh_token']\r\n\r\n Spotify.persist_json(self.token_info)\r\n print('Token Refreshed succesfully')\r\n except HTTPError as http_err:\r\n print(f'HTTP error occured when refreshing from Spotify: \\n{http_err}')", "title": "" }, { "docid": "61b5c7d44ae8284476afea697b9d1426", "score": "0.5491717", "text": "def test_refresh_airtable_form_session_auth(self):\n self.client.login(username='testuser', password='12345')\n original_function = PracticesAirtableAdapter.update\n PracticesAirtableAdapter.update = MagicMock()\n try:\n response = self.client.post(reverse('refresh_data'), {}, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n PracticesAirtableAdapter.update.assert_not_called()\n finally:\n PracticesAirtableAdapter.update = original_function", "title": "" }, { "docid": "f487a36898a345099f0b00cf7395e7da", "score": "0.54905945", "text": "def refresh_token(self, refresh_token=None):\n return self.request('oauth/token').set_params(\n grant_type='refresh_token',\n client_id=self._client_id,\n client_secret=self._client_secret,\n refresh_token=str(refresh_token or self._refresh_token)\n ).post()", "title": "" }, { "docid": "d2a4106c060c3595f54f05b905dbf4d7", "score": "0.5480489", "text": "def refresh_token(_, source):\n opr = operator.Operator()\n if source == \"youtube\" and opr.youtube is not None:\n opr.youtube.oauth_flow.refresh()\n elif source == \"twitch\" and opr.twitch is not None:\n opr.twitch.oauth_flow.refresh()\n return redirect(\"notifpy:manage_endpoints\")", "title": "" }, { "docid": "9f8fee811773c346079a6053fdaa0d1b", "score": "0.54626775", "text": "async def refresh_token(request):\n if not request.user:\n return web.json_response({'message': 'Authentication required'}, status=400)\n\n try:\n if request.jwt_payload['access']:\n return web.json_response({'message': 'Refresh token not provided'},\n status=400)\n except Exception:\n return web.json_response({'message': 'Invalid token'},\n status=400)\n\n # TODO: Verify user exists\n payload = {'user_id': request.jwt_payload['user_id'],\n 'exp': (datetime.utcnow()\n + timedelta(minutes=JWT_REFRESH_MINUTES)),\n 'access': 1\n }\n jwt_token = jwt.encode(payload, JWT_SECRET, JWT_ALGORITHM)\n return web.json_response({'access_token': jwt_token.decode('utf-8')})", "title": "" }, { "docid": "2dbdd0d0d313a20215a654b26c382101", "score": "0.5459787", "text": "def drive_auth(reset):\n g_auth = GoogleAuth()\n\n # setting Google Auth for custom parameters\n g_auth.DEFAULT_SETTINGS['client_config_file'] = file_add.client_secrets\n g_auth.DEFAULT_SETTINGS['client_config_backend'] = 'file'\n g_auth.DEFAULT_SETTINGS['oauth_scope'] = ['https://www.googleapis.com/auth/drive']\n g_auth.DEFAULT_SETTINGS['get_refresh_token'] = True\n g_auth.DEFAULT_SETTINGS['save_credentials'] = True\n g_auth.DEFAULT_SETTINGS['save_credentials_backend'] = 'file'\n g_auth.DEFAULT_SETTINGS['save_credentials_file'] = file_add.cred_file()\n g_auth.DEFAULT_SETTINGS['client_id'] = '442675981331-9sq75sq0731sc0pef1rsg2jksmqfov2f.apps.googleusercontent.com'\n g_auth.DEFAULT_SETTINGS['client_secret'] = 'bmPz_4djxl_NZsA4Cvvaz2XT'\n\n # if already authenticated, load file\n g_auth.LoadCredentialsFile(file_add.cred_file())\n\n if g_auth.credentials is None or reset:\n if reset:\n if g_auth.credentials is not None:\n print(\"Error: Couldn't reset account. Please report at [email protected]\")\n sys.exit(1)\n g_auth.LocalWebserverAuth()\n\n elif g_auth.access_token_expired:\n # refresh authorisation if expired\n g_auth.Refresh()\n\n else:\n # initialise the saved data\n g_auth.Authorize()\n\n return g_auth", "title": "" }, { "docid": "f13d160d26cd73ee7b9c40278ed62a74", "score": "0.5456557", "text": "def testWrongClient(self):\n client = getTestPasswordClient(\n clientId='differentClient', authorizedGrantTypes=[GrantTypes.REFRESH_TOKEN])\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN,\n }, authentication=client)\n self._CLIENT_STORAGE.addClient(client)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, InvalidTokenError('refresh token'),\n msg='Expected the token resource to reject a refresh_token request '\n 'with a refresh token that is not valid for the client.')", "title": "" }, { "docid": "5940106a9763a9f56a603ad3ebc5276a", "score": "0.54419667", "text": "def RevokeRefresh(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "7bfe026064d8b89e5a74a29f1bc0eca4", "score": "0.5439368", "text": "def test_v1_token_refresh_post(self):\n pass", "title": "" }, { "docid": "d1526d52198f68b151a6e2b9969688af", "score": "0.5436727", "text": "def refresh_access_token():\n access_token = User.encode_token(\n {\"user_id\": g.current_user.id, \"token_type\": TokenType.ACCESS_TOKEN.name},\n current_app.config[\"SECRET_KEY\"],\n current_app.config[\"ACCESS_TOKEN_LIFESPAN\"]\n )\n database_repository.add_token(access_token)\n return {\"access_token\": access_token.raw_jwt}, HTTPStatus.CREATED", "title": "" }, { "docid": "334f0c49463a337b473c1e5520490f2a", "score": "0.5430749", "text": "def refresh_token(self, refresh_token, grant_type='refresh_token',\n scope=None):\n if scope:\n return self._token_request(grant_type=grant_type,\n refresh_token=refresh_token,\n scope=scope)\n else:\n return self._token_request(grant_type=grant_type,\n refresh_token=refresh_token)", "title": "" }, { "docid": "25b59d2302cc177ddbea76c6edebc8dc", "score": "0.54233044", "text": "def ListRefresh(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "2a7e722a595bb6ae675a736a77d4afac", "score": "0.54141086", "text": "async def test_cached_token_within_refresh_window():\n\n credential = MockCredential(cached_token=AccessToken(CACHED_TOKEN, time.time() + DEFAULT_REFRESH_OFFSET - 1))\n token = await credential.get_token(SCOPE)\n\n credential.acquire_token_silently.assert_called_once_with(SCOPE, claims=None, tenant_id=None)\n credential.request_token.assert_called_once_with(SCOPE, claims=None, tenant_id=None)\n assert token.token == MockCredential.NEW_TOKEN.token", "title": "" }, { "docid": "b1736036a99a03a467d40582987802d7", "score": "0.5412689", "text": "def renew_access_token(self, refresh_token, api_secret):\n h = hashlib.sha256(self.api_key.encode(\"utf-8\") + refresh_token.encode(\"utf-8\") + api_secret.encode(\"utf-8\"))\n checksum = h.hexdigest()\n\n resp = self._post(\"api.token.renew\", params={\n \"api_key\": self.api_key,\n \"refresh_token\": refresh_token,\n \"checksum\": checksum\n })\n\n if \"access_token\" in resp:\n self.set_access_token(resp[\"access_token\"])\n\n return resp", "title": "" }, { "docid": "0f1636faaa6f1d5c67a654af69d51679", "score": "0.5401569", "text": "def test_conditional_refresh_token_update(authorizer, response):\n authorizer.ensure_valid_token() # trigger refresh\n token_data = response.by_resource_server[\"rs1\"]\n if \"refresh_token\" in token_data: # if present, confirm refresh token was updated\n assert authorizer.access_token == \"access_token_2\"\n assert authorizer.refresh_token == \"refresh_token_2\"\n else: # otherwise, confirm no change\n assert authorizer.access_token == \"access_token_2\"\n assert authorizer.refresh_token == \"refresh_token_1\"", "title": "" }, { "docid": "35d3b8018d2c70df73a5ed96b5dc6c0c", "score": "0.53982884", "text": "async def test_refresh_token(self, mock_resp):\n mock_resp.return_value.json = mock.AsyncMock(\n return_value={\n \"account\": {\"account_id\": 5678, \"client_id\": 1234, \"tier\": \"test\"},\n \"auth\": {\"token\": \"foobar\"},\n }\n )\n mock_resp.return_value.status = 200\n\n self.auth.no_prompt = True\n self.assertTrue(await self.auth.refresh_token())\n self.assertEqual(self.auth.region_id, \"test\")\n self.assertEqual(self.auth.token, \"foobar\")\n self.assertEqual(self.auth.client_id, 1234)\n self.assertEqual(self.auth.account_id, 5678)\n\n mock_resp.return_value.status = 400\n with self.assertRaises(TokenRefreshFailed):\n await self.auth.refresh_token()\n\n mock_resp.return_value.status = 200\n mock_resp.return_value.json = mock.AsyncMock(side_effect=AttributeError)\n with self.assertRaises(TokenRefreshFailed):\n await self.auth.refresh_token()", "title": "" }, { "docid": "39a392a6eadf3cd31180a51aa52d2c14", "score": "0.53875506", "text": "def test_refresh(self):\n data = {\"token\": utils.jwt_encode_handler(self.payload)}\n\n response = self.client.post(\n self.refresh_auth_token_url, data, content_type=\"application/json\"\n )\n decoded_payload = utils.jwt_decode_handler(response.json()[\"token\"])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(decoded_payload[\"username\"], self.username)", "title": "" }, { "docid": "549033b6dd5a3eb4441d7ddd5a1f0970", "score": "0.53796834", "text": "def post(self, request, *args, **kwargs):\n response = super(RefreshToken, self).post(request, *args, **kwargs)\n if response.status_code == status.HTTP_200_OK:\n return Response(CommonHelper.render(True, response.data, \"success\", response.status_code))\n return Response(CommonHelper.render(False, None, response.data[\"non_field_errors\"], response.status_code))", "title": "" }, { "docid": "38638fa779b1691d910f1427528f150a", "score": "0.53673273", "text": "def refresh_token(token_info):\n c = pm_config\n url = c.API_PROTOCOL + c.PM_HOST + '/' + c.PM_WORKSPACE + \"/oauth2/token\"\n payload = {\n \"grant_type\": \"refresh_token\",\n \"client_id\": c.CLIENT_ID,\n \"client_secret\": c.CLIENT_SECRET,\n \"refresh_token\": token_info[\"refresh_token\"]\n }\n r = requests.post(url, data = payload)\n r.raise_for_status()\n jsn = r.json()\n\n if \"error\" in jsn:\n errormsg = \"Error while refreshing token. PM server {host}. <br> {err} <br> {err_desc}\" \\\n .format(host = url, err = jsn[\"error\"], err_desc = jsn[\"err_desc\"])\n raise Exception(errormsg)\n\n token_info[\"access_token\"] = jsn[\"access_token\"]\n token_info[\"expires_at\"] = time.time() + jsn[\"expires_in\"]\n return token_info", "title": "" }, { "docid": "8281ba8907a4de2f340ddbacc0d5108d", "score": "0.5365312", "text": "def getcreds(self):\n creds = os.path.exists(\"components/google/credentials.json\")\n #creddict = json.loads(open(\"components/google/credentials.json\").read())[\"installed\"]\n\n if creds:\n self.logger(\"creds found!\", colour=\"yellow\")\n creddict = json.loads(open(\"components/google/credentials.json\").read())[\"installed\"]\n now = datetime.datetime.now()\n expiredate = datetime.datetime.strptime(creddict[\"expires_at\"],\"%Y-%m-%d %H:%M:%S.%f\")\n if now < expiredate:\n self.logger(\"Getting from credentials.json\", \"debug\", \"yellow\")\n return creddict[\"access_token\"]\n else:\n self.logger(\"creds need to be refreshed!\", \"debug\", \"yellow\")\n creds = self.refreshtoken(creddict)\n return creds\n # If there are no (valid) credentials available, let the user log in.\n if not creds:\n self.logger(\"No creds found!\", \"debug\", \"red\")\n response = self.requestusercode(client_id)\n self.logger(\"Please go to {} and enter this code: {}\".format(response[\"verification_url\"], response[\"user_code\"]), colour=\"blue\")\n device_code, interval = response[\"device_code\"], response[\"interval\"]\n creds = self.poll(device_code, interval)\n self.logger(\"Got access token, ready to rumble.\", colour=\"green\")\n return creds\n #creds = flow.run_local_server(port=0)\n # Save the credentials for the next run", "title": "" }, { "docid": "0b42601750ab9f8fb0b21c90ae2ea16d", "score": "0.536458", "text": "async def refresh_token(self) -> None:\n query = (\n \"mutation auth($username: String!, $password: String!){\"\n \" login(data: { username: $username, password: $password }) {\"\n \" token\"\n \" }\"\n \"}\"\n )\n variables = {\n \"username\": settings.FRIENDO_API_USER,\n \"password\": settings.FRIENDO_API_PASS\n }\n resp = await self._post(json={\"query\": query, \"variables\": variables})\n\n self.token = resp[\"data\"][\"login\"][\"token\"]\n self.headers = {\n \"Authorization\": f\"Bearer {self.token}\"\n }\n asyncio.create_task(self.refresh_later())", "title": "" }, { "docid": "2f83948d7dd09f9cf30322666165b251", "score": "0.5355095", "text": "def refresh(self):\n self.session_token = Session.generate_token()", "title": "" }, { "docid": "db046c7ac7efe1b1a9961dcb89e88138", "score": "0.5348524", "text": "async def async_step_reauth(\n self, user_input: dict[str, Any] | None = None\n ) -> FlowResult:\n assert self.config_mode != ConfigMode.LEGACY, \"Step only supported for SDM API\"\n if user_input is None:\n _LOGGER.error(\"Reauth invoked with empty config entry data\")\n return self.async_abort(reason=\"missing_configuration\")\n self._reauth = True\n self._data.update(user_input)\n return await self.async_step_reauth_confirm()", "title": "" }, { "docid": "5ea51e43c322d8812e4dab846ff6f093", "score": "0.5336833", "text": "def test_is_authenticated_refresh_required(self):\n\n # create past\n past = timezone.now() - timezone.timedelta(days=1)\n\n # create a user who token has expired\n user_with_tokens = UserFactory.create(\n username='1', email='[email protected]', access_token='1',\n refresh_token='2', token_expiry=past)\n\n # adding this because of random No route to host errors\n with mock.patch(\n 'accounts.models.OAuth2Credentials') as mock_oauth:\n mock_oauth.return_value = MockCredentials(access_token='foo')\n self.assertTrue(user_with_tokens.is_authenticated)", "title": "" }, { "docid": "763a19755fa2f6a79d23fd8e7ca8ed5b", "score": "0.53125894", "text": "def test_revoked_token(self):\n info = self.get_authenticated_info_context()\n context = info.context\n\n next_middleware = mock.Mock()\n self.refresh_token_instance.revoke()\n self.middleware.resolve(next_middleware, None, info)\n\n self.assertEqual(context.user, AnonymousUser())\n self.assertEqual(context.refresh_token, None)\n next_middleware.assert_called_once_with(None, info)", "title": "" }, { "docid": "013cd882cd432b894666eb1c2c3171ae", "score": "0.5305945", "text": "def oauth_authorized():", "title": "" }, { "docid": "528b508c43549b930ef689e103e28a11", "score": "0.52919036", "text": "def refresh_token(self, refresh_token):\n\n self._refresh_token = refresh_token", "title": "" }, { "docid": "7f67a2956f3f472940af6cbf46ffbb3a", "score": "0.5282998", "text": "def test_account_get(client):\n with patch.object(\n account_handler, \"azure_refresh_token\"\n ) as mock_azure_refresh_token, patch(\n \"biit_server.account_handler.Database\"\n ) as mock_database:\n\n instance = mock_database.return_value\n\n query_data = {\"email\": \"[email protected]\", \"token\": \"henlo\"}\n mock_azure_refresh_token.return_value = (\"RefreshToken\", \"AccessToken\")\n\n instance.get.return_value = MockAccount(query_data[\"email\"])\n\n rv = client.get(\n \"/account\",\n query_string=query_data,\n follow_redirects=True,\n )\n\n assert (\n b'{\"access_token\":\"RefreshToken\",\"data\":{\"email\":\"[email protected]\"},\"message\":\"Account returned\",\"refresh_token\":\"AccessToken\",\"status_code\":200}\\n'\n == rv.data\n )", "title": "" }, { "docid": "12160e33003aa211adfdb2e9422ac794", "score": "0.5279259", "text": "def refresh(self, client, kwargs_dict):\n providers = self._get_objects(client=client,\n collection_name=\"providers\",\n query_dict={'expand': 'resources'})\n\n resources = []\n # If a provider ID was specified, only refresh that one\n if kwargs_dict['provider_id']:\n for prov in providers:\n if str(prov['id']) == kwargs_dict['provider_id']:\n resources.append(prov)\n break\n else:\n resources = providers\n\n result = client.collections.providers.action.refresh(*resources)\n\n # return the data objects from the action results\n return self._data_from_entity_list(result)", "title": "" }, { "docid": "b60c90badefae5aefb7bbc7958701200", "score": "0.5277712", "text": "def refresh():\n miracl.clear_user_info(session)\n return redirect(\"/\")", "title": "" }, { "docid": "876b14785770f1fca48a1dbd50c8e8ae", "score": "0.5273949", "text": "async def refresh_token(self, token: Optional[str] = None):\r\n\r\n if token:\r\n self.__refresh_token = token\r\n\r\n assert self.__refresh_token is not None\r\n await self.__refresh_access_token(self.__refresh_token)\r\n self.headers[\"Authorization\"] = f\"Bearer {self.__access_token}\"", "title": "" }, { "docid": "aadee75589efbae08d7acc39701cb352", "score": "0.52687144", "text": "async def refresh(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "4ae458d28d19f36d2f650c718fc40c1a", "score": "0.5260884", "text": "def refresh_user(personId):\n dao.refresh_user(personId)", "title": "" }, { "docid": "ebc42e9806249c3879b444a5e6258433", "score": "0.52586925", "text": "def testing_refresh(user_id, activity_id):\n access_token = refresh_tokens.return_access_token(user_id)\n return get_activity.get_activity(activity_id, access_token)", "title": "" }, { "docid": "c1e6498795ac95ae36afeb186ab719e7", "score": "0.52571154", "text": "def update_tokens(self, res):\n if res.has_error:\n res.raise_error()\n self.set_access_token(res.data['access_token'])\n if 'refresh_token' in res.data:\n self.set_refresh_token(res.data['refresh_token'])", "title": "" } ]
5800a54734ba51004c4c8adbe35d22cf
Returns the current version and patchnotes
[ { "docid": "34adb4005a421509bf0a2a8267d35ec0", "score": "0.6681273", "text": "async def info(self):\n \n message = \"Current cog version: **\" + self.version + \"**\\n\"\n message += \"Patchnotes:\"\n message += self.patchnote\n\n await self.bot.say(message)", "title": "" } ]
[ { "docid": "98ea3a806b99445d84baf0bb8c298e0a", "score": "0.67388994", "text": "def get_version_info() -> Tuple[Text, Text]: # type: ignore[empty-body]", "title": "" }, { "docid": "ce3ec83e553abfdc4d44f0757e49b8be", "score": "0.6537433", "text": "def get_version():", "title": "" }, { "docid": "f1e427eb06388bdcdab576b93b7e2bac", "score": "0.63533974", "text": "def version():\n\n return _version_info", "title": "" }, { "docid": "2bd4c5fac69f7afa70fe995374ad8f85", "score": "0.6350876", "text": "def get_version_info(self):\n ids = []\n status = []\n try:\n hg_id = subprocess.check_output(['hg', 'id']).strip()\n ids.append(\"Mercural Id (hg id): {}\".format(hg_id))\n if self._dvcs_verbose:\n hg_status = subprocess.check_output(['hg', 'status']).strip()\n status.append(\"$ hg status\")\n status.append(hg_status)\n except subprocess.CalledProcessError:\n pass\n\n try:\n git_id = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()\n ids.append(\"Git Id (git rev-parse HEAD): {}\".format(git_id))\n if self._dvcs_verbose:\n git_status = subprocess.check_output(\n ['git', 'status', '--porcelain']).strip()\n status.append(\"$ git status --procelain\")\n status.append(git_status)\n except subprocess.CalledProcessError:\n git_id = None\n\n return \"\\n\".join(ids), \"\\n\".join(status)", "title": "" }, { "docid": "4ef93f3d0ff39496a92fe37c713e41d9", "score": "0.62865746", "text": "def version(self):\r\n return self._version_info", "title": "" }, { "docid": "24bff2abe4de93b7f7001ca411453c43", "score": "0.6242651", "text": "def GetVersionInformation(self):\n return f'plaso - {self.NAME:s} version {plaso.__version__:s}'", "title": "" }, { "docid": "16846b6bbefa3e8c96051aa835c868c0", "score": "0.6224606", "text": "def get(self):\n soup = get_soup(URL)\n\n x = soup.find(text=\"Current Version\")\n paragraph = x.parent.find_next_sibling(\"p\")\n version = paragraph.text.strip().split()[2][len(\"v\") : -len(\".Hs\")]\n return version", "title": "" }, { "docid": "a793071aaa9a9c1553b98bae60fee04a", "score": "0.61958396", "text": "def VersionInfo(self):\n self._info_type = 'version_info'\n response = self.Get()\n return response.json()", "title": "" }, { "docid": "92897d5121d2a3c865c30d6293a4245a", "score": "0.6190319", "text": "def getVersion():\n return VERSION", "title": "" }, { "docid": "e2d019ced5ff0376023b7d17a15481ce", "score": "0.61857957", "text": "def get_version_info():\n import importlib\n report = {}\n # list the core packages here\n packages = [\"qcsubmit\", \"openforcefield\", \"basis_set_exchange\", \"qcelemental\"]\n for package in packages:\n module = importlib.import_module(package)\n report[package] = pd.Series({\"version\": module.__version__})\n\n # now try openeye else use rdkit\n try:\n import openeye\n report[\"openeye\"] = pd.Series({\"version\": openeye.__version__})\n except ImportError:\n import rdkit\n report[\"rdkit\"] = pd.Series({\"version\": rdkit.__version__})\n\n return pd.DataFrame(report).transpose()", "title": "" }, { "docid": "8d2358724ec8d6eee7ca98e6c83c6f11", "score": "0.61831015", "text": "def get_verinfo(self):\r\n ret = []\r\n \r\n if hasattr(self.pe, 'VS_VERSIONINFO'):\r\n if hasattr(self.pe, 'FileInfo'):\r\n for entry in self.pe.FileInfo:\r\n if hasattr(entry, 'StringTable'):\r\n for st_entry in entry.StringTable:\r\n for str_entry in st_entry.entries.items():\r\n # yes... it annoyed me that much .. ocd whatttt\r\n if 'OriginalFilename' in str_entry:\r\n p = self.convert_to_printable(str_entry[0]), self.convert_to_printable(str_entry[1])\r\n ret.append(p)\r\n else:\r\n p = self.convert_to_printable(str_entry[0]), self.convert_to_printable(str_entry[1])\r\n ret.append(p)\r\n elif hasattr(entry, 'Var'):\r\n for var_entry in entry.Var:\r\n if hasattr(var_entry, 'entry'):\r\n p = self.convert_to_printable(var_entry.entry.keys()[0]), var_entry.entry.values()[0]\r\n ret.append(p)\r\n return ret", "title": "" }, { "docid": "602d76fd099f004f0b42c54a1f96439e", "score": "0.616398", "text": "def get_version_info(self):\r\n version_info = {'version': _version.__version__}\r\n return version_info", "title": "" }, { "docid": "1e743b52cc08f8efe759e29a81683205", "score": "0.6157366", "text": "def get_version(self):\r\n pass", "title": "" }, { "docid": "93d692f25aca445d41ccd4cf55f4dfe9", "score": "0.6138029", "text": "def get_info(self):\n version = int(self.get('VER'))\n serial = int(self.get('SERH'))*65536 + int(self.get('SERL'))\n serial = '%s' % serial\n version = '%d.%02d' % ((version / 256) , version % 256)\n return serial, version", "title": "" }, { "docid": "8546e506a555a6545185cbf2637d5f84", "score": "0.6091508", "text": "def getVersionInfo(cls):\n \n return \"MergeSensor: \" + __version__ + \\\n \"\\nWatchedDatasets: \" + WatchedDatasets.getVersionInfo() + \\\n \"\\nDataset: \" + Dataset.getVersionInfo() + \"\\n\"#+ \\\n #\"\\nMergeSensorDB: \" + MergeSensorDB.getVersionInfo() + \"\\n\"", "title": "" }, { "docid": "c794c2a1887c60a516bf706705a90cd9", "score": "0.6066401", "text": "def version(self) -> str:", "title": "" }, { "docid": "30a0a5e7dff98898dcc9098085743534", "score": "0.6055203", "text": "def print_version_status(self):\n p = nice_print_value\n\n p(' Version Info', kc=Fore.GREEN)\n if not os.path.isdir(join(self.location, '.git')) or ioutils.call_git('branch') == '':\n print(Fore.RED + 'No git repo initialized, versioning info not available' + Fore.RESET)\n p('')\n return\n\n tags = ioutils.call_git('tag --sort=-creatordate') + '\\n' + 'None'\n\n p('Current Version:', self.get_version())\n p('Current branch:', ioutils.call_git('branch --no-color').split()[1])\n p('Current commit:', ioutils.call_git('rev-parse --short HEAD').strip())\n p('Commit count:', ioutils.call_git('rev-list --all --count').strip())\n p('Last Tag:', tags.split()[0])\n p('')", "title": "" }, { "docid": "dbdc226e4a84397b129d37ca8c68e93b", "score": "0.60383445", "text": "def get_version_info(self):\n return self.__send_get_request(self.__url_version,\n with_user_token=False,\n with_session_token=False)", "title": "" }, { "docid": "4a47983811c045c6fb12978f54979fe8", "score": "0.6013598", "text": "def get_git_versioning():\n return check_output(\n ['git', 'rev-parse', '--short', 'HEAD']\n ).strip()", "title": "" }, { "docid": "b9d6b60acfabaa1cdb037b570702fbb8", "score": "0.60027266", "text": "def get_current_version():\n version_tag = get_current_version_tag()\n return version_tag", "title": "" }, { "docid": "5dfc581d1eda7ee88634fe11ab59d08e", "score": "0.6002456", "text": "def version_info():\n from collections import namedtuple\n VersionInfo = namedtuple('VersionInfo', ['version', 'releasetype', 'patch'])\n return VersionInfo(version='1.0.3', releasetype='pre-release', patch='1a')", "title": "" }, { "docid": "b1ba5e5efcb3b1c87f217ccbc2e3975f", "score": "0.5989382", "text": "def parse_version(self):\r\n return parse_major_minor_patch_build(self.version)", "title": "" }, { "docid": "dbb4c7064d18ed5d8b8d6e925a6678a7", "score": "0.5986421", "text": "def get_versions():\n import sys\n import platform\n\n import qtpy.QtCore\n\n revision = None\n # from ezcad.utils import vcs\n # revision, branch = vcs.get_git_revision(os.path.dirname(__dir__))\n\n if not sys.platform == 'darwin': # To avoid a crash with our Mac app\n system = platform.system()\n else:\n system = 'Darwin'\n\n return {\n 'ezcad': __version__,\n 'python': platform.python_version(),\n 'bitness': 64 if sys.maxsize > 2**32 else 32,\n 'qt': qtpy.QtCore.__version__,\n 'qt_api': qtpy.API_NAME, # PyQt5 or PyQt4\n 'qt_api_ver': qtpy.PYQT_VERSION,\n 'system': system, # Linux, Windows, ...\n 'revision': revision, # '9fdf926eccce'\n }", "title": "" }, { "docid": "45543065a40057814a19924ef2611070", "score": "0.5986253", "text": "def __version_info(self) -> list:\n version_info = re.split('[.-]', self.version) # major.minor.patch-release\n\n for index, field in enumerate(version_info):\n try:\n version_info[index] = int(field)\n except ValueError:\n # in python versions, the release field is a string\n # the nef header needs int values in the version\n version_info[index] = DEFAULT_UINT32\n\n while len(version_info) < NefFile.__VERSION_NUMBER_OF_FIELDS:\n version_info.append(DEFAULT_UINT32)\n\n return version_info[0:NefFile.__VERSION_NUMBER_OF_FIELDS]", "title": "" }, { "docid": "29d68ed0e5b02e294ac2bc6d0fe07bea", "score": "0.594927", "text": "def get_relver_info(version):\n return RELEASES_BY_VERSION[version]", "title": "" }, { "docid": "369b7002b15004a03164c3996bb3fb15", "score": "0.59475297", "text": "def notes(self):\n self.sync_topology_if_outdated()\n return self._notes", "title": "" }, { "docid": "3cbf4cf3220f22a9ea4ff733f863a5a5", "score": "0.5947207", "text": "def get_version():\n pass", "title": "" }, { "docid": "80aae4a26484a30b5b92ca00ad992ed3", "score": "0.59457713", "text": "def Version():\n return ''", "title": "" }, { "docid": "7a2fbc54e99e6a63fd050fa114573e95", "score": "0.59440917", "text": "def current_revision(self):\n return self.run(['rev-parse', 'HEAD']).strip()", "title": "" }, { "docid": "0dcea4fe7ad64939abc65eb6f30d087b", "score": "0.59393007", "text": "def getVersion(): # @NoSelf\n print('CDFread version:', str(CDF.version) + '.' + str(CDF.release) +\n '.' + str(CDF.increment))\n print('Date: 2018/01/11')", "title": "" }, { "docid": "7b9bce8f2f1ee8c453738d801bdc065e", "score": "0.5929339", "text": "def version_desc(self):\n return self._version_desc", "title": "" }, { "docid": "a79bb647810fddcf8b5c0c4eca40f90d", "score": "0.59214693", "text": "def getPackageInfo():\n\n def get_git_version():\n \"\"\"\n Returns the project version as derived by git.\n \"\"\"\n\n path = os.path.dirname(__file__)\n branch = (\n Popen(f'git -C \"{path}\" rev-parse --abbrev-ref HEAD', stdout=PIPE, shell=True)\n .stdout.read()\n .rstrip()\n .decode(\"ascii\")\n )\n rev = (\n Popen(f'git -C \"{path}\" describe --always --tags', stdout=PIPE, shell=True)\n .stdout.read()\n .rstrip()\n .decode(\"ascii\")\n )\n\n if branch.startswith(\"fatal\") or rev.startswith(\"fatal\"):\n raise ValueError(\"Could not determine git version\")\n\n return f\"({branch}) {rev}\"\n\n try:\n package_version = get_distribution(\"x-to-nwb\").version\n except DistributionNotFound: # not installed as a package\n package_version = None\n\n try:\n git_version = get_git_version()\n except ValueError: # not in a git repostitory\n git_version = None\n\n version_info = {\n \"repo\": \"https://github.com/byte-physics/x-to-nwb\",\n \"package_version\": \"Unknown\",\n \"git_revision\": \"Unknown\",\n }\n\n if package_version:\n version_info[\"package_version\"] = package_version\n\n if git_version:\n version_info[\"git_revision\"] = git_version\n\n return version_info", "title": "" }, { "docid": "3d651c26f3cd42375b9cdfab6d0fc840", "score": "0.59046173", "text": "def get_version_info():\n\tconfig=ConfigParser()\n\tconfig.read(config_file)\n\tMAJOR=config['version']['major']\n\tMINOR=config['version']['minor']\n\tMICRO=config['version']['micro']\n\n\tISRELEASED = True\n\n\tVERSION = '%s.%s.%s' % (MAJOR, MINOR, MICRO)\n\n\treturn VERSION, ISRELEASED", "title": "" }, { "docid": "6fbaf96609974d0b8124823697d4f685", "score": "0.589772", "text": "def version():\n return meta.version", "title": "" }, { "docid": "0e23bb6f6c047263a51382584908771a", "score": "0.58939415", "text": "def _PrintVersions(self, diff, latest_msg=None):\n current_version = config.INSTALLATION_CONFIG.version\n latest_version = diff.latest.version\n\n self.__Write(log.status,\n '\\nYour current Cloud SDK version is: ' + current_version)\n if latest_version and latest_msg:\n self.__Write(log.status, latest_msg + latest_version)\n self.__Write(log.status)\n return (current_version, latest_version)", "title": "" }, { "docid": "fe12b929bd34a77f4cd17af37d878a0b", "score": "0.5864802", "text": "def ProjectVersion(self):\n return self.project_version", "title": "" }, { "docid": "3540c59e4a27e047b91b0814a6a66057", "score": "0.5864773", "text": "def getCurrentRevision(obj):", "title": "" }, { "docid": "f5f5cf6a6bba72db0dc157ae198658a9", "score": "0.586201", "text": "def current_version(self) -> str:\n return pulumi.get(self, \"current_version\")", "title": "" }, { "docid": "bb8d43e27cc1cd8eb530e647b747a325", "score": "0.58592916", "text": "def GetCurrentVersionsInformation(self):\n current_state = self._GetInstallState()\n versions = {}\n installed_components = current_state.InstalledComponents()\n for component_id, component in installed_components.iteritems():\n if component.ComponentDefinition().is_configuration:\n continue\n versions[component_id] = component.VersionString()\n return versions", "title": "" }, { "docid": "8b9e247b6d5e262013b653d19cf5a6fe", "score": "0.58556205", "text": "def version(self):\n if self._version is None:\n before = []\n after = []\n with open(self.metadata_file) as metadata:\n for line in metadata:\n if after:\n after.append(line)\n else:\n match = self.rev.match(line.rstrip())\n if match:\n self._version = Version(match.group('ver'))\n before.append(line[:match.start('ver')])\n after.append(line[match.end('ver'):])\n else:\n before.append(line)\n assert self._version is not None, \\\n \"%r did not match in %s\" % (self.version_re, self.metadata_file)\n self._before = \"\".join(before)\n self._after = \"\".join(after)\n self.log.info(\"version of %s is %s\" % (\n self.metadata_file, self._version))\n return self._version", "title": "" }, { "docid": "a68d29c0cab424dfa8f73aabce2aa155", "score": "0.5839972", "text": "def render(self):\n release_notes = []\n for parser in self.parsers:\n parser_content = parser.render()\n if parser_content:\n release_notes.append(parser_content)\n return \"\\r\\n\\r\\n\".join(release_notes)", "title": "" }, { "docid": "9e23d5685ff7c656a43e7a75b7aeebd4", "score": "0.5835091", "text": "def packageInfo():\n # first the easy ones\n info = {\n \"version\": version(),\n \"prefix\": prefix,\n \"path\": prefix / \"bin\",\n \"ldpath\": prefix / \"lib\",\n \"pythonpath\": home.parent,\n \"includes\": f\"-I{prefix}/include\",\n }\n\n # the libraries\n libs = [\"pyre\", \"journal\"]\n # get the host\n host = executive.host\n # if the host is a linux box\n if isinstance(host, platforms.linux()):\n # we have to link against the real time clock library\n libs.append(\"rt\")\n # assemble the libraries\n libs = \" \".join(f\"-l{lib}\" for lib in libs)\n # attach\n info[\"libs\"] = f\"-L{prefix}/lib {libs}\"\n\n # all done\n return info", "title": "" }, { "docid": "3f898ff516659b118b4ab99a09a6e8d9", "score": "0.5830709", "text": "def get_version(self):\n return self.version", "title": "" }, { "docid": "3f898ff516659b118b4ab99a09a6e8d9", "score": "0.5830709", "text": "def get_version(self):\n return self.version", "title": "" }, { "docid": "3f898ff516659b118b4ab99a09a6e8d9", "score": "0.5830709", "text": "def get_version(self):\n return self.version", "title": "" }, { "docid": "bc22681de8abce01ad7b121243b093bd", "score": "0.58257663", "text": "def version():\n return VERSION", "title": "" }, { "docid": "bc22681de8abce01ad7b121243b093bd", "score": "0.58257663", "text": "def version():\n return VERSION", "title": "" }, { "docid": "94a84c2f32abd146b3030c7717a9cd54", "score": "0.58123225", "text": "def get_version(self):\n return self.write('gv')", "title": "" }, { "docid": "ffb5c6542c1bd8de731eab6c44b46657", "score": "0.5803236", "text": "def notes(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"notes\")", "title": "" }, { "docid": "a16a8d01699a3edb677d3fe08d8d595b", "score": "0.580271", "text": "def get_version(self):\n return self.cur_config['version']['name']", "title": "" }, { "docid": "2593d8140d6789b15dc1da0dc5d824b0", "score": "0.5797665", "text": "def version(self):\n return self._version", "title": "" }, { "docid": "c134fff9b2d69589e0e8ae7d8ff6a5fe", "score": "0.57928157", "text": "def version(self):\n return self.get('version', '')", "title": "" }, { "docid": "06846e2940f6a53eacc3940c8fae8509", "score": "0.5791443", "text": "def get_version(self) -> str:\n s = self.get_help()\n version = s[s.find(\"ensembl-vep\") : s.find(\"Help\")].split(\":\")[1].strip()\n return version", "title": "" }, { "docid": "07e171e993c509b330be328e70900ada", "score": "0.57884383", "text": "def get_version(self):\n return self._call_rpc(\"getversion\")", "title": "" }, { "docid": "fe7843f473d08813ff2e84426c84d55a", "score": "0.578735", "text": "def getNotes(self):\n return self.notes", "title": "" }, { "docid": "ef543817e353303a0ac36ebee13cbe9f", "score": "0.5786724", "text": "def get_current_version(self):\n version = None\n full_path = bpy.data.filepath\n if full_path != \"\":\n version = self.get_version_from_full_path(full_path)\n return version", "title": "" }, { "docid": "3562eed8842b38a2c3a698d44d8b5477", "score": "0.57836235", "text": "def get_version():\n \n if getstatusoutput(\"which git\")[0] == 0:\n git_commands = (\"git rev-parse --abbrev-ref HEAD\", \"git log --pretty=format:'%h' -n 1\")\n return \"0.1dev:\" + \":\".join([getstatusoutput(command)[1] for command in git_commands])\n else:\n return \"Unknown\"", "title": "" }, { "docid": "9a91a0ecbc5135e049a16ae70de03215", "score": "0.57596004", "text": "def fetch_notes_for_version(self, m_version_obj, b_populate_playlists=False):", "title": "" }, { "docid": "d0929c9d09b7d90db206a913cecbe955", "score": "0.575471", "text": "def nameAndVersion(self):\n\t\treturn ''", "title": "" }, { "docid": "beee23df67b3a35a6a8459d71954e486", "score": "0.57525516", "text": "def latest_content(release_notes_path):\n content = \"\"\n start_extract = False\n with open(release_notes_path, encoding=\"utf-8\") as f:\n for line in f:\n if line.startswith(\"## \"):\n if start_extract:\n break\n\n start_extract = True\n continue\n\n # hit a separated line\n if line.startswith(\"---\"):\n break\n\n content += line\n\n content += os.linesep\n return content", "title": "" }, { "docid": "177647e7c8a928b62d24f80e8f8d3f0a", "score": "0.5737139", "text": "def getVersion(self):\n return self.get(\"version\")", "title": "" }, { "docid": "c71df8ee5539ac525179c861b88bd2e2", "score": "0.57280034", "text": "def SolutionVersion(self):\n return self.solution_version", "title": "" }, { "docid": "bac196f1c3061665babab243d77d58a6", "score": "0.5726368", "text": "def version():\n print('version = 1.1')", "title": "" }, { "docid": "0479d7b8dbc440e66e6075c897c681f4", "score": "0.5715581", "text": "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "0479d7b8dbc440e66e6075c897c681f4", "score": "0.5715581", "text": "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "0479d7b8dbc440e66e6075c897c681f4", "score": "0.5715581", "text": "def version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "d4984df35e354036e77e1732b5d36d2b", "score": "0.5714446", "text": "def get_version_number():\n return [0, 2, 0]", "title": "" }, { "docid": "07fb4d6d8c2443a05d2f064b0f131371", "score": "0.57072335", "text": "def base(self):\n if self.prerelease is None:\n pre = \"\"\n else:\n pre = \"pre%s\" % (self.prerelease,)\n return '%d.%d.%d%s' % (self.major,\n self.minor,\n self.micro,\n pre)", "title": "" }, { "docid": "661031d38392a82e03287d5823a5d555", "score": "0.56971616", "text": "def version(self):\n\n return constants.version", "title": "" }, { "docid": "d66fc816509e1aedc9de6add7ed17aeb", "score": "0.5695548", "text": "def version(self):\n return self._issue_command('*VER\\n')", "title": "" }, { "docid": "91f04ebdb67c107ffd776053962c5f91", "score": "0.5695193", "text": "def version(self):\r\n return re.search(r'\\d+(\\.\\d+)+', self.git.version()).group(0)", "title": "" }, { "docid": "fc79a00e505fab0eca108adbf04ac22f", "score": "0.56944484", "text": "async def _version_info(cls) -> Dict[str, str]:\n return {\"redbot\": __version__, \"discordpy\": discord.__version__}", "title": "" }, { "docid": "16a93c3f853fc5706328bd0a432c8394", "score": "0.5691316", "text": "def _get_version(self) -> typing.Tuple[str, str]:\n request_url = urllib.parse.urljoin(self._endpoint_url, \"version\")\n response: typing.Dict[str, str] = requests.get(request_url).json()\n\n return response['api'], response['mlre']", "title": "" }, { "docid": "9272566490b127fbe30c2fbed7871b39", "score": "0.56911653", "text": "async def ver(self):\n\n message = \"Current cog version: **\" + self.version + \"** (\" + self.update_type + \")\\n\"\n message += \"For patchnotes use `\" + self.bot.command_prefix[0] + \"drawing info`\"\n await self.bot.say(message)", "title": "" }, { "docid": "d20068147909789afce04a7154441587", "score": "0.56903124", "text": "def current_version(self):\n return self.version_keys[0]", "title": "" }, { "docid": "498ca452aa1e838056d47e36ad725283", "score": "0.5682629", "text": "def log_versions():\n global versions_log\n required_packages = [\"Keras\", \"numpy\", \"pandas\", \"matplotlib\", \"scikit-learn\", \"seaborn\",\n \"sklearn\", \"tensorflow\", \"pip\"]\n\n try:\n from pip._internal.operations import freeze\n except ImportError: # pip < 10.0\n from pip.operations import freeze\n\n try:\n distro = platform.linux_distribution()\n except:\n distro = [\"N/A\", \"\"]\n\n versions_log.write(\"\\n\\nOperation System: \")\n versions_log.write(\"\\n Kernel: \" + platform.system() + \" \" + platform.release())\n versions_log.write(\"\\n Distribution: \" + distro[0] + \" \" + distro[1])\n\n versions_log.write(\"\\n\\nPython version: \" + str(sys.version_info[0]) + \".\"\n + str(sys.version_info[1]) + \".\" + str(sys.version_info[2]))\n versions_log.write(\"\\n\\nPackages versions:\\n\")\n\n list = freeze.freeze()\n\n versions_log.write(\" Required Packages:\")\n for package in list:\n for required in required_packages:\n if package.find(required) != -1:\n index = package.find(\"==\")\n package = package.replace(\"==\", \" \" * (25 - index) + \"= \")\n versions_log.write(\"\\n \" + package)\n\n list = freeze.freeze()\n versions_log.write(\"\\n\\n Complete List:\")\n for package in list:\n index = package.find(\"==\")\n package = package.replace(\"==\", \" \" * (25 - index) + \"= \")\n versions_log.write(\"\\n \" + package)", "title": "" }, { "docid": "f652b8b12049af400b06806e0005c67c", "score": "0.5679833", "text": "def _VersionInfo(self):\n if not self._version_info:\n try:\n self._version_info = files.Read(\n f\"{self.ConfigServer().rstrip('/')}/version-info.yaml\")\n except files.Error:\n # Fallback to using FLAG to avoid edge case where the config server\n # written to the task list is unavailable.\n info_file = f\"{flags.CONFIG_SERVER.value.rstrip('/')}/version-info.yaml\"\n try:\n self._version_info = files.Read(info_file)\n except files.Error as e:\n raise YamlFileError(info_file) from e\n return self._version_info", "title": "" }, { "docid": "80fc852c71975cdfc0880e14b067061c", "score": "0.567624", "text": "def _get_version(self) -> VersionNumberTuple:\n try:\n return tuple(\n [\n t.py_int\n for t in self.root_tag.compound.get_list(\"lastOpenedWithVersion\")\n ]\n )\n except Exception:\n return 1, 2, 0", "title": "" }, { "docid": "fef096eeefbb92077b6282f0493a71d8", "score": "0.5675952", "text": "def version(self):\n return self.get('version')", "title": "" }, { "docid": "9b85f3d0acd4d404879c6c1706807ef8", "score": "0.5675267", "text": "def notes(self) -> str:\n return pulumi.get(self, \"notes\")", "title": "" }, { "docid": "00d7ee81b9405dc23463d88e760a16af", "score": "0.56711423", "text": "def version(self):\n\n return version", "title": "" }, { "docid": "0c47c85c0b560c92a4cf44c72825086c", "score": "0.5664569", "text": "def currentVersion():\n return Version(2, 0, 1)", "title": "" }, { "docid": "d5e0f27a93cadc21e4fe4fd5bf407af9", "score": "0.5662229", "text": "def get_version(self):\n raise NotImplementedError", "title": "" }, { "docid": "9ef6b2af03f7d20f95e9d7c232e1592a", "score": "0.56600547", "text": "def version(self) -> str:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "9ef6b2af03f7d20f95e9d7c232e1592a", "score": "0.56600547", "text": "def version(self) -> str:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "9ef6b2af03f7d20f95e9d7c232e1592a", "score": "0.56600547", "text": "def version(self) -> str:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "9ef6b2af03f7d20f95e9d7c232e1592a", "score": "0.56600547", "text": "def version(self) -> str:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "9ef6b2af03f7d20f95e9d7c232e1592a", "score": "0.56600547", "text": "def version(self) -> str:\n return pulumi.get(self, \"version\")", "title": "" }, { "docid": "6042a68eb40b93b7c968f3f802701a0d", "score": "0.56541973", "text": "def version(self) -> List[int]:\n return self._version", "title": "" }, { "docid": "a7861a62b3315aaed714c0b1740f84b8", "score": "0.5646204", "text": "def versions(self):\n print '%s<<< Workbench CLI Version %s >>>%s' % (color.LightBlue, self.version, color.Normal)\n print self.workbench.help('version')", "title": "" }, { "docid": "3f7f4efd8630937410dd4611b1a16d3a", "score": "0.5644852", "text": "def notes(self) -> Optional[Sequence['outputs.ServerDiskEditParameterNote']]:\n return pulumi.get(self, \"notes\")", "title": "" }, { "docid": "9746cb77561ab2400e1061719ef08a79", "score": "0.563902", "text": "def version(self):\n if self._version is None:\n self._version = Version(self.tag[1:])\n self.log.info(\"version of %s was %s\" % (self.master, self._version))\n return self._version", "title": "" }, { "docid": "944dddeeca99e614263cc96dff20141f", "score": "0.5637317", "text": "def GetVersionInfo(self):\n # Placeholder version for non-Chrome OS builds.\n return manifest_version.VersionInfo('1.0.0')", "title": "" }, { "docid": "1a45e47a08310f46de68875d648e5955", "score": "0.5632959", "text": "def get_endpoint_version_info(self):\n\n url = f\"{self.base_url}/versioninfo\"\n response = HttpMethods(self, url).request(\"GET\", self.user, self.password)\n return response", "title": "" }, { "docid": "b28e9a20dc4707d8844100c50959c87d", "score": "0.5625358", "text": "def version_string(self):\r\n return self.server_version + ' ' + self.sys_version", "title": "" }, { "docid": "3757d2a2af01daec6b8317587a24d991", "score": "0.5618679", "text": "def get_revision():\n try:\n tmpout = subprocess.Popen('cd ' + os.path.dirname(__file__) +\n ' ; git log -n 1 --pretty=format:%H',\n shell=True,\n bufsize=80,\n stdout=subprocess.PIPE).stdout\n revision = tmpout.read().decode()[:6]\n if len(revision) > 0:\n ret = '+dev' + revision\n else:\n ret = ''\n except:\n ret= ''\n return ret", "title": "" }, { "docid": "586976ecc7243855edab583aef1d0ede", "score": "0.5612666", "text": "def get_version():\n return '.'.join([str(i) for i in _VERSION])", "title": "" }, { "docid": "1580b8e91b5ff93c7b17b0e6c353d39e", "score": "0.56117505", "text": "def version( request ):\n rq_now = datetime.datetime.now()\n commit = version_helper.get_commit()\n branch = version_helper.get_branch()\n info_txt = commit.replace( 'commit', branch )\n context = version_helper.make_context( request, rq_now, info_txt )\n output = json.dumps( context, sort_keys=True, indent=2 )\n return HttpResponse( output, content_type='application/json; charset=utf-8' )", "title": "" }, { "docid": "ed1e48e5925b5844de3c1dc698006946", "score": "0.5607222", "text": "def get_version(self, major: POINTER(c_int), minor: POINTER(c_int), rev: POINTER(c_int)):\n pass", "title": "" }, { "docid": "2749f9d8577eec5b3170e0647ccd7493", "score": "0.56054324", "text": "def version(self):\n return self._props[\"version\"]", "title": "" } ]
807735cb0993beeba81fc77d92f40936
Status of the parent submission is updated based on modifications made to related objects
[ { "docid": "b189d60b3cc419d09c33fe0a1e146477", "score": "0.55623835", "text": "def test_changing_moderation_changes_status(self):\n target = \"https://google.com/1234\"\n submission: Submission = Submission.objects.create(\n target_url=target,\n description=\"this is a submission\",\n title=\"url title\",\n owner=rw_for([Submission]),\n )\n submission.save()\n self.assertEqual(Moderation.objects.filter(target_url=target).count(), 0)\n self.assertEqual(submission.status, SubmissionStatuses.PENDING)\n mod = Moderation(submission=submission, status=ModerationStatuses.ACCEPTED)\n mod.save()\n self.assertEqual(submission.status, SubmissionStatuses.ACCEPTED)\n mod.status = ModerationStatuses.REJECTED\n mod.save()\n self.assertEqual(submission.status, SubmissionStatuses.REJECTED_MOD)", "title": "" } ]
[ { "docid": "a1d86117ba2222d5eefdb15a0ed6e6bb", "score": "0.60847354", "text": "def parent_changed(self, old, new):\n pass", "title": "" }, { "docid": "8212d204dbd53fef809491f07b850dd1", "score": "0.6048187", "text": "def update_submission(self, submission: DbSubmission) -> bool:", "title": "" }, { "docid": "9f054ee42205b81b949e2157a81e6779", "score": "0.60058475", "text": "def perform_update(self, serializer):\n if 'status' in self.request.data:\n instance = self.get_object()\n if instance.status != 'cancelled':\n descendants = instance.get_descendant_instances()\n if instance.status == 'started':\n cancel_plugin_instance.delay(instance.id) # call async task\n for plg_inst in descendants:\n plg_inst.status = 'cancelled'\n plg_inst.save()\n\n super(PluginInstanceDetail, self).perform_update(serializer)", "title": "" }, { "docid": "2d7ea333db94d60650e985a8c375d9bd", "score": "0.58595216", "text": "def update(self):\n j = self.multyvac.job.get(self.jid)\n self.__dict__ = j.__dict__\n return self.status", "title": "" }, { "docid": "ca3cc1fc1afb3a299e93b8e37f00397f", "score": "0.5797062", "text": "def update_commitment(self, commitment_form):\n pass", "title": "" }, { "docid": "e67f81bd4b74ce024e9a548a1e4e73ca", "score": "0.57337546", "text": "def update(\n self,\n resource_name,\n parent_id,\n object_id,\n obj,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n ):\n raise NotImplementedError", "title": "" }, { "docid": "c275764dd607211e5a2b8beb17995fe5", "score": "0.57112396", "text": "def update_parent_document_on_communication(doc):\n\n\tparent = get_parent_doc(doc)\n\tif not parent:\n\t\treturn\n\n\t# update parent mins_to_first_communication only if we create the Email communication\n\t# ignore in case of only Comment is added\n\tif doc.communication_type == \"Comment\":\n\t\treturn\n\n\tstatus_field = parent.meta.get_field(\"status\")\n\tif status_field:\n\t\toptions = (status_field.options or \"\").splitlines()\n\n\t\t# if status has a \"Replied\" option, then update the status for received communication\n\t\tif (\"Replied\" in options) and doc.sent_or_received == \"Received\":\n\t\t\tparent.db_set(\"status\", \"Open\")\n\t\t\tparent.run_method(\"handle_hold_time\", \"Replied\")\n\t\t\tapply_assignment_rule(parent)\n\n\tupdate_first_response_time(parent, doc)\n\tset_avg_response_time(parent, doc)\n\tparent.run_method(\"notify_communication\", doc)\n\tparent.notify_update()", "title": "" }, { "docid": "b4d90e4bf7f467461e70798af8e01226", "score": "0.5662404", "text": "def change_submission_status(syn, submissionid, status=\"RECEIVED\"):\n sub_status = syn.getSubmissionStatus(submissionid)\n sub_status.status = status\n sub_status = syn.store(sub_status)\n return sub_status", "title": "" }, { "docid": "05f6cd0cda4d38c60e7b4d5eb1a7f57d", "score": "0.5587407", "text": "def altered(self):\n\t\tprint(\"PARENT altered()\")", "title": "" }, { "docid": "585558e1bbabcd800ed82c2210bae65b", "score": "0.55656415", "text": "def save(self, commit=True):\n # update success date if status has changed\n status = self.fields['status']\n initial = self.initial.get('status', None)\n data = self.cleaned_data['status'].pk\n instance = self.instance\n\n if status.has_changed(initial, data):\n # set successful date\n if instance.status.probability >= 1:\n today = timezone.localdate()\n instance.successful_date = today\n else:\n instance.successful_date = None\n\n # update job candidate status accordingly\n job_candidate = instance.job_candidate\n job_candidate.status_id = self.instance.status.job_status_id\n job_candidate.save()\n\n return super().save(commit)", "title": "" }, { "docid": "cd3e457abf50604bc6c6a6fe1851cd8d", "score": "0.55531865", "text": "def action_update(self):\n pr = self._get_pr()\n if self.related_type == 'github':\n if pr[0]:\n self.pull_request = pr[0].title\n self.pull_request_link = pr[0]._rawData['_links']['html']['href']\n commits = pr[0].get_commits()\n for commit in commits:\n commit_list = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.sha),\n ('type', '=', 'github')\n ])\n if not commit_list:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.sha,\n 'type': 'github',\n 'branch_ids': [(4, self.id)],\n 'author': commit.raw_data['commit']['author']['name'],\n 'name': commit.raw_data['commit']['message'],\n 'date': fields.Date.from_string(\n commit.raw_data['commit']['author']['date']),\n 'url': commit._html_url.value,\n })\n self.commit_ids = [(4, vcs_commit.id)]\n else:\n self.commit_ids = [(4, commit_list[0].id)]\n else:\n self.pull_request = \"No pull requests\"\n commit = self._get_branch()[0].commit\n commits = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.sha),\n ('type', '=', 'github'),\n ])\n if commits and self.id not in commits[0].branch_ids.ids:\n commits[0].branch_ids = [(4, self.id)]\n if not commits:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.sha,\n 'type': 'github',\n 'branch_ids': [(4, self.id)],\n 'author': commit.raw_data['commit']['author']['name'],\n 'name': commit.raw_data['commit']['message'],\n 'date': fields.Date.from_string(\n commit.raw_data['commit']['author']['date']),\n 'url': commit._html_url.value,\n })\n self.commit_id = vcs_commit.id\n else:\n self.commit_id = commits[0].id\n elif self.related_type == 'bitbucket':\n # TODO: implement for bitbucket\n if pr[0]:\n self.pull_request = pr[0].title\n self.pull_request_link = pr[0].links['html']['href']\n else:\n self.pull_request = \"No pull requests\"\n # Bitbucket does not require a PR to get branch commits\n # TODO: The list of commits is wrapped inside another list\n commits = self._get_commits()[0]\n for commit in commits:\n commit_list = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.hash),\n ('type', '=', 'bitbucket')\n ])\n if not commit_list:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.hash,\n 'branch_ids': [(4, self.id)],\n 'type': 'bitbucket',\n 'author': commit.author.display_name,\n 'name': commit.message,\n 'date': fields.Date.from_string(commit.date),\n 'url': commit.links['html']['href'],\n })\n self.commit_ids = [(4, vcs_commit.id)]\n else:\n self.commit_ids = [(4, commit_list[0].id)]\n self.commit_id = sorted(\n self.commit_ids, key=lambda x: x.date, reverse=True)[0]", "title": "" }, { "docid": "12e07fdeceb9206f2d8247b960988585", "score": "0.5547788", "text": "def send_request_to_parent(self):\r\n\r\n if self not in self.parent.local_queue:\r\n self.parent.local_queue.append(self)\r\n print('Node with id', self.parent.id_representative, 'has received a request from his child node with id',\r\n self.id_representative)\r\n print(tree)", "title": "" }, { "docid": "56a04805ba62894473b9a09004f79d4f", "score": "0.54339606", "text": "def parent_entity(self):", "title": "" }, { "docid": "c43e4bb14150ec9d429c5602365d061a", "score": "0.5410099", "text": "def updateFromParent(self):\n # set the gain options\n self.automaticGain.setChecked(self.settings['display'].automatic_gain)\n self.automaticGainTarget.setValue(self.settings['display'].automatic_gain_target)\n # set the gain\n gain = self.settings['display'].gain\n self.gainGroup.setValue(gain)\n self.gainGroup.setEnabled(not self.settings['display'].automatic_gain)\n # set the offset\n offset = self.settings['display'].offset\n self.offsetGroup.setValue(offset)\n # set the gamma\n gamma = self.settings['display'].gamma\n self.gammaGroup.setValue(gamma)\n \n if self.settings['display'].draw_projector_boundary:\n self.gridProjector.setChecked(True)\n else: \n # update grid type from parent\n if not self.settings['display'].draw_grid:\n self.gridNone.setChecked(True)\n elif self.settings['display'].grid_type == 'center':\n self.gridCenters.setChecked(True)\n elif self.settings['display'].grid_type == 'lenslet':\n self.gridBoundaries.setChecked(True)", "title": "" }, { "docid": "90bad553b051bfe6d24da5358dc500c0", "score": "0.5373489", "text": "def update(self):\n self.getstatus()", "title": "" }, { "docid": "2c2f7c85b42de73d3ded12a36b780318", "score": "0.5363673", "text": "def post_update(self):\n pass", "title": "" }, { "docid": "2f1055c16b1b106b7c2ae95c99905ff4", "score": "0.53618675", "text": "def update_submission_and_invoice_status(doc_uuid):\n submission = Submission.objects.get(subm_uuid=doc_uuid)\n invoice = InvoiceHeader.objects.get(id=submission.invoice_id)\n # when invoiced is cancelled from portal the invoice is cancelled locally\n submission.status = 'cancel'\n submission.save()\n invoice.invoice_status = 'cancel'\n invoice.save()", "title": "" }, { "docid": "cb175179aae2d79cd8ffd234f783f380", "score": "0.534374", "text": "def action_update(self):\n if self.related_type == 'github':\n local_branches = [br.name for br in self.branch_ids]\n remote_branches = []\n for b in self._get_repo()[0].get_branches():\n remote_branches.append(b.name)\n if b.name not in local_branches:\n print b.name\n br_res = self.env['vcs.branch'].create({\n 'name': b.name,\n 'repository_id': self.id\n })\n self.branch_ids = [(4, br_res.id)]\n for br in self.branch_ids:\n if br.name not in remote_branches:\n br.unlink()\n else:\n br.action_update()\n elif self.related_type == 'bitbucket':\n local_branches = [br.name for br in self.branch_ids]\n remote_branches = []\n for b in bb_branch.find_branches_in_repository(\n self.name.lower(),\n owner=self.owner,\n client=self.user_id._get_user()\n ):\n remote_branches.append(b.name)\n if b.name not in local_branches:\n br_res = self.env['vcs.branch'].create({\n 'name': b.name,\n 'repository_id': self.id\n })\n self.branch_ids = [(4, br_res.id)]\n for br in self.branch_ids:\n if br.name not in remote_branches:\n br.unlink()\n else:\n br.action_update()", "title": "" }, { "docid": "0034da9c3d2f5a98cfe232fbf928ac76", "score": "0.5342021", "text": "def update(self, sub):\n pass", "title": "" }, { "docid": "82d86fa87035c017964b8c3f78943b97", "score": "0.5339713", "text": "def update_job(self, job):", "title": "" }, { "docid": "0eebdfb4d3108906ae41339c4bd55b5f", "score": "0.53354955", "text": "def update(self):\n self._job = pyslurm.job().find_id(str(self.id))[0]", "title": "" }, { "docid": "3127417725f41c56c110d7a257df9928", "score": "0.53324777", "text": "def perform_update(self,serializer):\n serializer.save(owner = self.request.user)", "title": "" }, { "docid": "84974683341184e20d92cca57f004371", "score": "0.53303343", "text": "def submitted(self):\n return self.status >= SUBMITTED", "title": "" }, { "docid": "b9d8a8a230b64c33d0b00c72b886781d", "score": "0.5323683", "text": "def _assignToChildQueue(self, queue, *elements):\n workByRequest = {}\n for ele in elements:\n ele['Status'] = 'Negotiating'\n ele['ChildQueueUrl'] = queue\n ele['ParentQueueUrl'] = self.params['ParentQueueCouchUrl']\n ele['WMBSUrl'] = self.params[\"WMBSUrl\"]\n workByRequest.setdefault(ele['RequestName'], 0)\n workByRequest[ele['RequestName']] += 1\n work = self.parent_queue.saveElements(*elements)\n self.logger.info(\"Assigned work to the child queue for:\")\n for reqName, numElem in workByRequest.items():\n self.logger.info(\" %d elements for: %s\", numElem, reqName)\n return work", "title": "" }, { "docid": "cb14c3decee498d9bba05d60912703fb", "score": "0.5308571", "text": "def mark_as_submitted(self, submitted_on=None):\n self.submitted_on = submitted_on or now()\n self.status = InvestigationRequestStatus.submitted.name\n self.save()", "title": "" }, { "docid": "6e82258864b6beadc58625f2965508c5", "score": "0.53043824", "text": "def _update_fields(self):\n if self.id:\n self._update_doc_info()\n self._set_current_status()", "title": "" }, { "docid": "1749b88fdacd2b7d906a8aa7546d05ba", "score": "0.5282949", "text": "def reconcile(self):", "title": "" }, { "docid": "789892d74f5c3b7aeed6eb43edee3e2d", "score": "0.5282859", "text": "def mark_as_submitted(self, submitted_on=None):\n self.submitted_on = submitted_on or timezone.now()\n self.status = ChangeRequestStatus.submitted.name\n self.save()", "title": "" }, { "docid": "cd56f114212e251503b9c0467ecb7f00", "score": "0.5251297", "text": "def submit(self):\n\n if not self.submission_id:\n return dict(status=False, message='Submission identifier not found!')\n\n # retrieve submssion record from db\n\n # specify filtering\n filter_by = dict(_id=ObjectId(str(self.submission_id)))\n\n # specify projection\n query_projection = {\n \"_id\": 1,\n \"repository_docs.apikey\": 1,\n \"repository_docs.url\": 1,\n \"profile_id\": 1,\n \"meta.type\": 1,\n \"meta.params\": 1,\n \"complete\": 1\n\n }\n\n doc = Submission().get_collection_handle().aggregate(\n [\n {\"$addFields\": {\n \"destination_repo_converted\": {\n \"$convert\": {\n \"input\": \"$destination_repo\",\n \"to\": \"objectId\",\n \"onError\": 0\n }\n }\n }\n },\n {\n \"$lookup\":\n {\n \"from\": \"RepositoryCollection\",\n \"localField\": \"destination_repo_converted\",\n \"foreignField\": \"_id\",\n \"as\": \"repository_docs\"\n }\n },\n {\n \"$project\": query_projection\n },\n {\n \"$match\": filter_by\n }\n ])\n\n records = cursor_to_list(doc)\n\n # get submission record\n try:\n submission_record = records[0]\n except Exception as e:\n ghlper.logging_error(traceback.format_exc(), self.submission_id)\n message = \"Submission record not found. Please try resubmitting.\"\n ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)\n Logger().exception(e)\n return dict(status='error', message=message)\n\n try:\n repository_info = submission_record['repository_docs'][0]\n except Exception as ex:\n ghlper.logging_error(traceback.format_exc(), self.submission_id)\n error_type = type(ex).__name__\n message = f\"Couldn't retrieve repository information due to the following error: '{error_type}'\"\n ghlper.update_submission_status(status='error', message=message, submission_id=self.submission_id)\n return dict(status='error', message=message)\n\n if str(submission_record.get(\"complete\", False)).lower() == 'true':\n message = 'Submission is marked as complete!'\n ghlper.logging_info(message, self.submission_id)\n ghlper.update_submission_status(status='success', message=message, submission_id=self.submission_id)\n\n return dict(status=True, message=message)\n\n # set submission parameters\n self.profile_id = submission_record.get(\"profile_id\", str())\n self.host = repository_info.get(\"url\", str())\n self.api_token = repository_info.get(\"apikey\", str())\n self.headers = {'X-CKAN-API-Key': self.api_token}\n self.api_url = self.host\n if self.host.endswith(\".org\"):\n self.api_url = urljoin(self.host, '/api/3/action/')\n\n # check submission context and select submission pathway\n type = submission_record.get(\"meta\", dict()).get(\"type\", str())\n params = submission_record.get(\"meta\", dict()).get(\"params\", dict())\n\n if type == \"new\": # create a dataset to submit\n return self._do_dataset_create_submit()\n\n if type == \"existing\": # a dataset specified proceed to submit\n return self._do_dataset_submit(**params)\n\n return dict(status=True, message=\"No status message provided!\")", "title": "" }, { "docid": "684c649edeb13e1ac9072e7223ab3322", "score": "0.5240272", "text": "def _update(self):\n # to be implemented in subclasses\n pass", "title": "" }, { "docid": "34b1c7b1b62b1920982bbd451396d03c", "score": "0.52208287", "text": "def submit(self):\n self.set_status(SUBMITTED)", "title": "" }, { "docid": "53779d3d7844a0ad87777bbeb5a025d0", "score": "0.5214662", "text": "def perform_update(self, serializer):\n\t\treturn serializer.save()", "title": "" }, { "docid": "f10b85c0de79d5b57ccb9b9ad48d9a84", "score": "0.52107495", "text": "def perform_update(self, serializer):\n class_instance = serializer.instance\n concurrences_data = {'unmodified': True}\n instances = [class_instance]\n student_instances = []\n\n # student flags\n flag_student_cancel = self.request.data.get('cancel_flag_student', False)\n flag_student_revert = self.request.data.get('revert_flag_student', False)\n flag_student_restore_in_class = self.request.data.get('restore_in_class_flag', False)\n flag_student_break = self.request.data.get('break_flag', False)\n flag_student_discontinued = self.request.data.get('discontinuation_flag', False)\n\n if flag_student_cancel:\n self.student_cancellation(class_instance)\n \n elif flag_student_revert:\n self.student_revert(class_instance)\n \n elif flag_student_restore_in_class:\n instances, student_instances = self.restore_break_process(class_instance)\n \n elif flag_student_break:\n instances, student_instances = self.break_process(class_instance)\n \n elif flag_student_discontinued:\n instances, student_instances = self.discontinuation_process(class_instance)\n\n else:\n instances, concurrences_data = self.regular_update(class_instance)\n\n if not instances:\n return Response(concurrences_data)\n\n # update google calendar\n update_events = [self.update_gc_event(instance) for instance in instances]\n\n if student_instances:\n update_student_events = [self.update_parent_gc_event(instance) for instance in student_instances]\n update_events += update_student_events\n\n self.async_change_gc(update_events)\n\n # send teacher notification\n class_instance.send_change_event_notification_email(\n class_instance, len(instances), user=self.request.user.staff.full_name\n )\n\n return Response(serializer.data)", "title": "" }, { "docid": "cc0b88cd02cb9c6a2e75d30663e770e2", "score": "0.52101016", "text": "def update_single_submission_status(\n status, add_annotations, is_private=True, force=False\n):\n existing_annots = status.get(\"annotations\", dict())\n private_annotations = _submission_annotations_to_dict(\n existing_annots, is_private=True\n )\n\n public_annotations = _submission_annotations_to_dict(\n existing_annots, is_private=False\n )\n\n if not is_submission_status_annotations(add_annotations):\n private_added_annotations = add_annotations if is_private else dict()\n public_added_annotations = dict() if is_private else add_annotations\n else:\n private_added_annotations = _submission_annotations_to_dict(\n add_annotations, is_private=True\n )\n\n public_added_annotations = _submission_annotations_to_dict(\n add_annotations, is_private=False\n )\n\n # If you add a private annotation that appears in the public annotation,\n # it switches\n private_annotations = _switch_annotation_permission(\n public_added_annotations, private_annotations, force\n )\n\n public_annotations = _switch_annotation_permission(\n private_added_annotations, public_annotations, force\n )\n\n private_annotations.update(private_added_annotations)\n public_annotations.update(public_added_annotations)\n\n priv = to_submission_status_annotations(private_annotations, is_private=True)\n pub = to_submission_status_annotations(public_annotations, is_private=False)\n # Combined private and public annotations into\n # one Submission.Status.annotation\n combined_annotations = {\"stringAnnos\": [], \"longAnnos\": [], \"doubleAnnos\": []}\n for annotation_type in [\"stringAnnos\", \"longAnnos\", \"doubleAnnos\"]:\n private_annotation = priv.get(annotation_type)\n public_annotation = pub.get(annotation_type)\n private_annotation_exists = private_annotation is not None\n public_annotation_exists = public_annotation is not None\n if private_annotation_exists:\n combined_annotations[annotation_type].extend(private_annotation)\n if public_annotation_exists:\n combined_annotations[annotation_type].extend(public_annotation)\n # Remove annotation key if doesn't exist\n if not private_annotation_exists and not public_annotation_exists:\n combined_annotations.pop(annotation_type)\n status[\"annotations\"] = combined_annotations\n return status", "title": "" }, { "docid": "4b71b66163e3bf3d2eab86260e15bcb4", "score": "0.5207386", "text": "def update(self, instance, validated_data):\n subtask_data = validated_data.pop('subtasks')\n Project.objects.filter(pk=instance.pk).update(**validated_data, user=self.context['request'].user)\n project = Project.objects.get(pk=instance.pk)\n for subtask in subtask_data:\n subtask.pop('project')\n try:\n subtask_obj = Subtask.objects.get(project=project, name=subtask['name'])\n Subtask.objects.filter(pk=subtask_obj.pk).update(**subtask)\n except Subtask.DoesNotExist:\n Subtask.objects.create(project=project, **subtask)\n return project", "title": "" }, { "docid": "ab28376557dd1453a3e5692ec3dca120", "score": "0.52062476", "text": "def _apply_decision(self, status, comment_to_applicant=None):\n with transaction.atomic():\n self.comment_to_applicant = comment_to_applicant\n self.status = status\n self.decision_datetime = timezone.now()\n self.save()", "title": "" }, { "docid": "111326385631158b54feb14a9b525f59", "score": "0.5205791", "text": "def update(self):\n \n pass", "title": "" }, { "docid": "beaa8bc69f430713b8d5fe7014053b15", "score": "0.52023894", "text": "def done(self):\n\n\t\tif self.start_process == False and self.two_parent == False:\n\t\t\t\"\"\" Run When One Parent operation Found \"\"\" \n\t\t\tif self.lot_tree_id:\n\t\t\t\ttotal_qty =0\n\t\t\t\tfor x in self.lot_tree_id:\n\t\t\t\t\tif x.lot_issue_check == True:\n\t\t\t\t\t\tself.approve()\n\t\t\t\t\t\ttotal_qty = total_qty + x.lot_qty\n\t\t\t\t\t\tcreate_lot = self.env['manufacturing.lots'].create({\n\t\t\t\t\t\t\t'mo_id': x.mo_id.id,\n\t\t\t\t\t\t\t'sale_order_link': self.mo_id.sale_order.id,\n\t\t\t\t\t\t\t'work_order_id': x.work_order_id.id,\n\t\t\t\t\t\t\t'wo_link': self.work_id.id,\n\t\t\t\t\t\t\t'operation_name': self.parent_op.id,\n\t\t\t\t\t\t\t'lot_qty': x.lot_qty,\n\t\t\t\t\t\t\t'to_receive': x.lot_qty,\n\t\t\t\t\t\t\t'contractor': x.contractor.id,\n\t\t\t\t\t\t\t'i_date': date.today(),\n\t\t\t\t\t\t\t'sizes': self.sizes.id,\n\t\t\t\t\t\t\t'stages': \"lot_create\"\n\t\t\t\t\t\t})\n\t\t\t\t\t\tprint create_lot\n\t\t\t\t\t\tparent_add_in_child = self.env['manufacturing.lots.tree'].create({\n\t\t\t\t\t\t\t'lot_id': x.id,\n\t\t\t\t\t\t\t'lot_tree': create_lot.id\n\t\t\t\t\t\t})\n\t\t\t\t\t\tchild_add_in_parent = self.env['lots.parent'].create({\n\t\t\t\t\t\t\t'parent_lot_tree':create_lot.id,\n\t\t\t\t\t\t\t'lot_tree':x.id\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\tx.stages = \"done\"\n\t\t\t\t\trecord =self.env['plan.history.tree'].search([('id','=',self.parent_history_id)])\n\t\t\t\t\tif record:\n\t\t\t\t\t\trecord.isse_qty = record.isse_qty + total_qty\n\t\t\t\t\t\trecord.remaning_qty = record.qty - record.isse_qty\n\n\t\tif self.start_process == True and self.two_parent == False:\n\t\t\t\"\"\" Run When parent operation not found \"\"\"\n\t\t\tif self.parent_history_id:\n\t\t\t\trecord =self.env['plan.history.tree'].search([('id','=',self.parent_history_id)])\n\t\t\t\tif record:\n\t\t\t\t\tself.approve()\n\t\t\t\t\tsizes = self.env['varients.tree'].search([('link','=',self.mo_id.id)]).sorted(key=lambda r: r.qty)\n\t\t\t\t\tavailable_qty = self.total_lots * self.lot_qty\n\t\t\t\t\tif available_qty > record.remaning_qty:\n\t\t\t\t\t\traise ValidationError(\"Total Remaining QTY Of This Plan IS \" + \tstr(record.remaning_qty) + \" You Issue \" + str(available_qty) +\" \")\n\t\t\t\t\telse:\n\t\t\t\t\t\trecord.isse_qty = available_qty\n\t\t\t\t\t\trecord.remaning_qty = record.qty - record.isse_qty\n\t\t\t\t\tfor n in reversed(sizes):\n\t\t\t\t\t\tlot_size = self.lot_qty\n\t\t\t\t\t\tto_lot_qty = 0\n\t\t\t\t\t\tif available_qty > 0:\n\t\t\t\t\t\t\talready_planned_qty = 0\n\t\t\t\t\t\t\tplanned_lots = self.env['manufacturing.lots'].search([('mo_id','=',self.mo_id.id),('operation_name','=',self.parent_op.id),('sizes','=',n.size_id.id)])\n\t\t\t\t\t\t\tfor plan in planned_lots:\n\t\t\t\t\t\t\t\talready_planned_qty = already_planned_qty + plan.lot_qty\n\t\t\t\t\t\t\tto_plan_qty = n.qty - already_planned_qty\n\t\t\t\t\t\t\tif available_qty == to_plan_qty:\n\t\t\t\t\t\t\t\tto_lot_qty = available_qty\n\n\t\t\t\t\t\t\tif available_qty > to_plan_qty:\n\t\t\t\t\t\t\t\tto_lot_qty = to_plan_qty\n\t\t\t\t\t\t\tif available_qty < to_plan_qty:\n\t\t\t\t\t\t\t\tto_lot_qty = available_qty\n\n\t\t\t\t\t\t\twhile to_lot_qty > 0:\n\t\t\t\t\t\t\t\tif lot_size > to_lot_qty:\n\t\t\t\t\t\t\t\t\tlot_size = to_lot_qty\n\n\t\t\t\t\t\t\t\tlot = self.env['manufacturing.lots'].create({\n\t\t\t\t\t\t\t\t\t'mo_id': self.mo_id.id,\n\t\t\t\t\t\t\t\t\t'sale_order_link': self.mo_id.sale_order.id,\n\t\t\t\t\t\t\t\t\t'work_order_id': record.wo.id,\n\t\t\t\t\t\t\t\t\t'contractor': self.cell.guru.id,\n\t\t\t\t\t\t\t\t\t'lot_qty': lot_size,\n\t\t\t\t\t\t\t\t\t'to_receive': lot_size,\n\t\t\t\t\t\t\t\t\t'operation_name': self.parent_op.id,\n\t\t\t\t\t\t\t\t\t'sizes': n.size_id.id,\n\t\t\t\t\t\t\t\t\t'cell': self.cell.id,\n\t\t\t\t\t\t\t\t\t'i_date': date.today(),\n\t\t\t\t\t\t\t\t\t'stages': \"lot_create\",\n\t\t\t\t\t\t\t\t\t'prototype': record.prototype.id,\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\tto_lot_qty = to_lot_qty - lot_size\n\t\t\t\t\t\t\t\tavailable_qty = available_qty - lot_size\n\n\n\n\n\n\t\tif self.start_process == False and self.two_parent == True:\n\t\t\t\"\"\" Run When multiple parent Entery Found \"\"\"\n\t\t\tif self.lot_tree_id:\n\t\t\t\ttotal_qty =0\n\t\t\t\toperation_id =[]\n\t\t\t\tfor x in self.lot_tree_id:\n\t\t\t\t\tif x.operation_name.id not in operation_id:\n\t\t\t\t\t\toperation_id.append(x.operation_name.id)\n\t\t\t\toperation_len_match = []\n\t\t\t\tfor x in operation_id:\n\t\t\t\t\tequal_record_issue = self.env['manufacturing.lots'].search([('lot_issue_id','=',self.id),('lot_issue_check','=',True),('operation_name','=',x)])\n\t\t\t\t\tif len(equal_record_issue) not in operation_len_match:\n\t\t\t\t\t\toperation_len_match.append(len(equal_record_issue))\n\t\t\t\tif len(operation_len_match) > 1:\n\t\t\t\t\traise ValidationError(\"Child Operation Mis Match Isuue\")\n\t\t\t\telse:\n\t\t\t\t\tno_of_lot = 0\n\t\t\t\t\tfor o in operation_id:\n\t\t\t\t\t\tlot_qty = self.env['manufacturing.lots'].search([('lot_issue_id','=',self.id),('lot_issue_check','=',True),('operation_name','=',o)])\n\t\t\t\t\t\tif lot_qty:\n\t\t\t\t\t\t\tno_of_lot = len(lot_qty)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tfor x in xrange(0,no_of_lot):\n\t\t\t\t\t\trecord =self.env['plan.history.tree'].search([('id','=',self.parent_history_id)])\n\t\t\t\t\t\tif record:\n\t\t\t\t\t\t\tcreate_lot = self.env['manufacturing.lots'].create({\n\t\t\t\t\t\t\t\t'mo_id': self.mo_id.id,\n\t\t\t\t\t\t\t\t'sale_order_link': self.mo_id.sale_order.id,\n\t\t\t\t\t\t\t\t'work_order_id': record.wo.id,\n\t\t\t\t\t\t\t\t'contractor': self.cell.guru.id,\n\t\t\t\t\t\t\t\t'lot_qty':self.lot_qty,\n\t\t\t\t\t\t\t\t'to_receive': self.lot_qty,\n\t\t\t\t\t\t\t\t'operation_name': self.parent_op.id,\n\t\t\t\t\t\t\t\t'i_date': date.today(),\n\t\t\t\t\t\t\t\t'cell': self.cell.id,\n\t\t\t\t\t\t\t\t'sizes': self.sizes.id,\n\t\t\t\t\t\t\t\t'prototype': record.prototype.id,\n\t\t\t\t\t\t\t\t'stages': \"lot_create\"\n\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\tparent_op_history = []\n\t\t\t\t\t\t\tif self.lot_tree_id:\n\t\t\t\t\t\t\t\t\"\"\" Create Parent And Child History Of All Lots \"\"\"\n\t\t\t\t\t\t\t\tfor x in self.lot_tree_id:\n\t\t\t\t\t\t\t\t\tif x.lot_issue_check == True:\n\t\t\t\t\t\t\t\t\t\tif x.operation_name.id not in parent_op_history:\n\t\t\t\t\t\t\t\t\t\t\tparent_op_history.append(x.operation_name.id)\n\t\t\t\t\t\t\t\tfor track in parent_op_history:\n\t\t\t\t\t\t\t\t\tlot = self.env['manufacturing.lots'].search([('lot_issue_id','=',self.id),('lot_issue_check','=',True),('operation_name.id','=',track),('stages','=','receive')])\n\t\t\t\t\t\t\t\t\tfor m in lot:\n\t\t\t\t\t\t\t\t\t\tchild_add_in_parent = self.env['lots.parent'].create({\n\t\t\t\t\t\t\t\t\t\t\t'parent_lot_tree':create_lot.id,\n\t\t\t\t\t\t\t\t\t\t\t'lot_tree':m.id\n\t\t\t\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t\t\t\tparent_add_in_child = self.env['manufacturing.lots.tree'].create({\n\t\t\t\t\t\t\t\t\t\t\t'lot_id': m.id,\n\t\t\t\t\t\t\t\t\t\t\t'lot_tree': create_lot.id\n\t\t\t\t\t\t\t\t\t\t\t})\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\tm.stages = \"done\"\n\t\t\t\t\t\t\t\t\t\tbreak\n\n\n\t\t\t\t\t\t\t\n\n\n\n\n\t\t\t\t\t\t\t# print operation_len_match\n\t\t\t\t\t\t\t# print \"111111111111111111s\"\n\t\t\t\t\t\t\t# for parent in operation_len_match:\n\t\t\t\t\t\t\t# \tprint parent\n\t\t\t\t\t\t\t# \tprint parent\n\t\t\t\t\t\t\t# \tprint parent\n\t\t\t\t\t\t\t# \tprint \"...........................\"\n\t\t\t\t\t\t\t\t# parent_record = self.env['manufacturing.lots'].search([('lot_issue_id','=',self.id),('lot_issue_check','=',True),('operation_name.id','=',parent),('stages','='.'receive')])\n\t\t\t\t\t\t\t\t# print parent_record\n\t\t\t\t\t\t\t\t# for i in parent_record:\n\t\t\t\t\t\t\t\t# \tpass\n\t\t\t\t\t\t\t\t\t# child_add_in_parent = self.env['lots.parent'].create({\n\t\t\t\t\t\t\t\t\t# 'parent_lot_tree':create_lot.id,\n\t\t\t\t\t\t\t\t\t# 'lot_tree':i\n\t\t\t\t\t\t\t\t\t# })\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t\t# if parent_id:\n\t\t# \tfor p_id in parent_id:\n\t\t# \t\tif self.lot_tree_id_child:\n\t\t# \t\t\tfor child in self.lot_tree_id_child:\n\t\t# \t\t\t\t# print child\n\t\t# \t\t\t\t# print child.id\n\t\t# \t\t\t\t# print \"xxxxxxxxxxxxxxxxxxxxxxxxxx\"\n\t\t# \t\t\t\t# child.parent_lot_class = p_id\n\t\t# \t\t\t\tchild.issued_qty = child.lot_qty\n\t\t# \t\t\t\tchild.to_receive = child.lot_qty\n\t\t# \t\t\t\tchild.stages = \"lot_create\"\n\t\t# \t\t\t\tchild.i_date = date.today()\n\t\t# \t\t\t\tprint child.name\n\t\t# \t\t\t\tprint \"xxxxxxxxxxxxxxxx\"\n\t\t# \t\t\t\tp_lot = self.env['lots.parent'].create({\n\t\t# \t\t\t\t\t# 'lot_id': p_id\n\t\t# \t\t\t\t\t'parent_lot_tree':p_id,\n\t\t# \t\t\t\t\t'lot_tree':child.id\n\t\t# \t\t\t\t\t})\n\t\t# \t\t\t\tbreak\n\n\n\t\t# if self.start_process == True:\n\t\t# \t# if self.remian_qty < 0:\n\t\t# \t# \traise ValidationError(\"Remaining Qty Can't be Less than Zero.\")\n\t\t# \tcreate_lot = self.env['manufacturing.lots'].create({\n\t\t# \t\t'mo_id': self.work_id.mo_id.id,\n\t\t# \t\t'work_order_id': self.work_id.id,\n\t\t# \t\t'wo_link': self.work_id.id,\n\t\t# \t\t'operation_name': self.operation.id,\n\t\t# \t\t'lot_qty': self.new_lot_qty,\n\t\t# \t\t'to_receive': self.new_lot_qty,\n\t\t# \t\t'contractor': self.contractor.id,\n\t\t# \t\t'sizes': self.sizes.id,\n\t\t# \t\t'i_date': datetime.datetime.now(),\n\t\t# \t\t'rate': self.rate\n\t\t# \t})\n\n\t\t# \tcreate_lot.remaining_qty = create_lot.lot_qty - create_lot.issued_qty\n\t\t\t\t\t\t\n\t\t# if self.lot_tree_id:\n\t\t# \tissue_list = []\n\t\t# \tcount = 0\n\t\t# \tfor x in self.issue_id:\n\t\t# \t\tcount = count + 1\n\t\t# \t\tissue_list.append(x.issue_qty)\n\t\t# \tif count == 2:\n\t\t# \t\tif issue_list[0] != issue_list[1]:\n\t\t# \t\t\traise ValidationError('Select')\n\t\t# \t\telse:\n\t\t# \t\t\tfor y in self.issue_id:\n\t\t# \t\t\t\tself.assign_list(y.lot.id,y.lot_qty,y.issue_qty)\n\n\t\t# \telse:\n\t\t# \t\tfor y in self.issue_id:\n\t\t# \t\t\tself.assign_list(y.lot.id,y.lot_qty,y.issue_qty)\n\n\t\t# \tself.create_lot()\n\t\t# self.work_id.get_lots()", "title": "" }, { "docid": "2733649275b7933ec3f6c15593138c6a", "score": "0.51980484", "text": "def _save_non_history_instance(self):\n # Duplicate and reassign parent.\n for model, field in self._meta.parents.items():\n if getattr(self, '%s_id' % field.name) is not None:\n rel_obj = getattr(self, field.name)\n rel_obj.id = None\n rel_obj.save()\n setattr(self, '%s_id' % field.name, rel_obj.id)\n\n # Set the new update time on the non-archived version\n self.date_updated = datetime.now()", "title": "" }, { "docid": "1d1beb77daafd104a8b828584a3582d8", "score": "0.5194078", "text": "def _update(self):\n pass", "title": "" }, { "docid": "804881d412f6cc420f66c8eca17eda9b", "score": "0.51908225", "text": "def _closure_change_oldparent(self):\n return self._closure_old_parent_pk", "title": "" }, { "docid": "96bf3a5d9d522f1463829728f14c9889", "score": "0.5189298", "text": "def update_status(self):\n self.status = 1", "title": "" }, { "docid": "16b445ca1ce520d728d0665fe743c105", "score": "0.51837385", "text": "def change_all_submission_status(\n syn, evaluationid, submission_status=\"SCORED\", change_to_status=\"VALIDATED\"\n):\n submission_bundle = syn.getSubmissionBundles(evaluationid, status=submission_status)\n for _, status in submission_bundle:\n status.status = change_to_status\n syn.store(status)", "title": "" }, { "docid": "40d751b81edd76b7bf9e4b70792e0f22", "score": "0.51556474", "text": "def test_changing_fetched_accepted_doesnt_change_submission_status(self):\n target = \"https://google.com/1234\"\n submission: Submission = Submission.objects.create(\n target_url=target,\n description=\"this is a submission\",\n title=\"url title\",\n owner=rw_for([Submission]),\n )\n submission.save()\n self.assertEqual(submission.status, SubmissionStatuses.PENDING)\n\n fetching = Retrieval(submission=submission, status=RetrievalStatuses.FETCHED)\n fetching.save()\n self.assertEqual(submission.status, SubmissionStatuses.PENDING)\n\n fetching.status = RetrievalStatuses.REJECTED_BANNED\n fetching.save()\n self.assertEqual(submission.status, SubmissionStatuses.REJECTED_BANNED)\n\n fetching.status = RetrievalStatuses.REJECTED_ERROR\n fetching.save()\n self.assertEqual(submission.status, SubmissionStatuses.REJECTED_FETCH)", "title": "" }, { "docid": "5d80f13f899b1097f55eba9e33a1cb9b", "score": "0.515063", "text": "def parent(self):", "title": "" }, { "docid": "32219d259cf8cb830b4f17688774306d", "score": "0.51498216", "text": "def update_parents(cls, classification_entry, parent=None):\n classification_entry.parent_list = []\n\n if not parent:\n return\n\n curr_parent = parent\n while curr_parent is not None:\n classification_entry.parent_list.append(curr_parent.id)\n\n if curr_parent.parent:\n curr_parent = curr_parent.parent\n else:\n break", "title": "" }, { "docid": "9611aa57d137aef3c174ace3b4a06daf", "score": "0.51464474", "text": "def test_status_update_from_complete_to_inprocess():\n PRM().Project(prm_module_project) \\\n .run_integration(skip=True) \\\n .goto_lk().Card(prm_module_project).create_child_card('C1') \\\n .goto_card('C1').set_lane('completed').set_size(20).update() \\\n .run_integration() \\\n .goto_lk().Card('C1').set_lane('in_process').update() \\\n .run_integration() \\\n .goto_prm().Project(prm_module_project).verify_lk_total_cards(20)\\\n .verify_lk_in_process_child_cards(20)\\\n .verify_lk_completed_child_cards(0)\\\n .verify_lk_percent_of_cards_completed(0)", "title": "" }, { "docid": "7324df02f32e7f7e3b3f516248bdd66b", "score": "0.51351357", "text": "def record_comment(self, parent, reply, subreddit, submission, quote_name):\n self.history['parents'].append(parent.id)\n self.history['subreddits'][subreddit.display_name]['parents'].append(parent.id)\n # Record info of reply made by bot.\n if reply is not None:\n self.history['subreddits'][subreddit.display_name]['comments'].append(reply.id)\n self.history['subreddits'][subreddit.display_name]['holds'][quote_name] = reply.created_utc\n self.history['comments'][reply.id] = {'name': quote_name, 'time': reply.created_utc,\n 'subreddit': subreddit.display_name,\n 'submission': submission.id, 'parent': parent.id}\n self.save_history(self.history)", "title": "" }, { "docid": "e42681501ceb30dcec7747413b9d7c07", "score": "0.5133234", "text": "def update(self):\r\n diff = self._diff()\r\n if not diff:\r\n # Nothing to do!\r\n return\r\n self.parent.update_node(self, diff)", "title": "" }, { "docid": "6a7e63c08034f8cd26d03706418c433a", "score": "0.5129536", "text": "def __init__(self, parent):\n self._parent = parent\n self._dirty = False", "title": "" }, { "docid": "48ea44cf8654ccd7094b96555fe717e4", "score": "0.512294", "text": "def submit_for_review(self, by_user, to_user=None):\n for moderation_request in self.moderation_requests.all():\n action = moderation_request.actions.create(\n by_user=by_user, to_user=to_user, action=constants.ACTION_STARTED\n )\n # Lock the collection as it has been now submitted for moderation\n self.status = constants.IN_REVIEW\n self.save(update_fields=[\"status\"])\n # It is fine to pass any `action` from any moderation_request.actions\n # above as it will have the same moderators\n notify_collection_moderators(\n collection=self,\n moderation_requests=self.moderation_requests.all(),\n action_obj=action,\n )\n signals.submitted_for_review.send(\n sender=self.__class__,\n collection=self,\n moderation_requests=list(self.moderation_requests.all()),\n user=by_user,\n rework=False,\n )", "title": "" }, { "docid": "bfd1df4124cfbb49dc6edc86aeaa05bf", "score": "0.5120259", "text": "def mutate(self):\n\n\t\tpass", "title": "" }, { "docid": "4abb6977185970d4ebb21f3ae478e349", "score": "0.5115401", "text": "def fill_status(old_issue, issue, sheet_name, sheet_config, prs, is_new):\n if is_new:\n old_issue[\"Work status\"] = \"Pending\"", "title": "" }, { "docid": "fdbf5cbaf5a3dc5ba40c70074acafa1e", "score": "0.51141137", "text": "def save(self, *args, **kwargs):\n # We are returning self here so that the django-dag functions\n # add_child and add_parent\n # return the edge used to link the parent to the child\n super().save(*args, **kwargs)\n return self", "title": "" }, { "docid": "461253b6f786f1bcfce189c5989fca68", "score": "0.510518", "text": "def perform_update(self, serializer):\n if not (self.get_object().headers == serializer.validated_data.get('headers') and\n self.get_object().table_data == serializer.validated_data.get('table_data')):\n with reversion.create_revision():\n serializer.save()\n reversion.set_user(self.request.user)\n self.save_recent_activity(RecentActivity.UPDATE, obj=self.get_object().unit, unit=self.get_object().unit.pk,\n experiment=[unicode(obj.pk) for obj in self.get_object().unit.experiments.all()])", "title": "" }, { "docid": "d6a301f647fc47f11f358be9f88531a4", "score": "0.50956815", "text": "def compute_message_to_parent(self, parent, index, u, u_p):\n return super().compute_message_to_parent(parent, index, u, u_p)", "title": "" }, { "docid": "f4e8b341e5484df5dd0d2b2306664e6d", "score": "0.5085793", "text": "def test_status_update_to_completed():\n PRM().Project(prm_module_project) \\\n .run_integration(skip=True) \\\n .goto_lk().Card(prm_module_project).create_child_card('C1') \\\n .goto_card('C1').set_lane('not_started').set_size(15).update() \\\n .run_integration() \\\n .goto_lk().Card('C1').set_lane('completed').update() \\\n .run_integration() \\\n .goto_prm().Project(prm_module_project).verify_lk_total_cards(15)\\\n .verify_lk_not_started_child_cards(0)\\\n .verify_lk_completed_child_cards(15)\\\n .verify_lk_percent_of_cards_completed(100.0)", "title": "" }, { "docid": "61dfabd384a554d051fe281d1d8f2937", "score": "0.5082652", "text": "async def update_parent_gc_event(self, student_in_class):\n if student_in_class.gc_parent_event_id:\n prepare_data_and_update_event(\n student_in_class.gc_parent_event_id,\n student_in_class.class_id.location.parent_calendarId,\n event_name=student_in_class.gc_parent_title,\n description=student_in_class.gc_parent_event_description\n )", "title": "" }, { "docid": "e55d0b1f3b4cc4d240f1e25c702cfff2", "score": "0.50787", "text": "def commit(self):\n if self._parent is not None:\n self._parent._pos = self._pos", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.5060581", "text": "def update(self):\n pass", "title": "" }, { "docid": "c82b53c951a0767d50b7dd6575c9e8be", "score": "0.50573385", "text": "def self_update():", "title": "" }, { "docid": "a2b9d94a9e91e668cc7814070b9d2a9e", "score": "0.50513077", "text": "def needs_update(self):\n self.is_current = False\n self.put()", "title": "" }, { "docid": "6c4d46c43d7de691667058d179a1dbeb", "score": "0.50499105", "text": "def save(self, *args, **kwargs):\n\n if self.status != self.__initial_status:\n pass\n\n super(Story, self).save(*args, **kwargs)\n self.__initial_status = self.status", "title": "" }, { "docid": "dd23755c3f0af79fd2a81a3c0885279c", "score": "0.5048887", "text": "def parent_event(self, event):\n if self.is_active:\n old = event.old\n new = event.new\n with new.children_event_context():\n with old.children_event_context():\n if new is None:\n for obj in self.objects:\n obj.set_parent(None)\n else:\n new.insert_children(self, self.objects)", "title": "" }, { "docid": "d875e978a04f0f224dda9068d6c5b24c", "score": "0.50462985", "text": "def do_PATCH(self, request, requesting_user, nid_int):\n nest_date_jdata = NestDate.now().to_jdata()\n request.json['_updated'] = nest_date_jdata\n res = super(JobsEntryEndpoint, self).do_PATCH(request, \\\n requesting_user, nid_int)\n return res", "title": "" }, { "docid": "62a677f7a5a9a28cc000dbea01bdb222", "score": "0.50453097", "text": "def update_leaverequest_after_hr_approval(form, leaverequest):\n status = form.cleaned_data['status']\n document_submitted = form.cleaned_data['document_submitted']\n payment_settled = form.cleaned_data['payment_settled']\n\n leaverequest.status = \"hr_{}\".format(status)\n leaverequest.document_submitted = document_submitted\n leaverequest.payment_settled = payment_settled\n leaverequest.save()", "title": "" }, { "docid": "de2b7f10698e9cfdc4b0076d75617afc", "score": "0.50385666", "text": "def _update(self, kwargs):\n assert self.task_id\n\n count = Task.query.filter(\n Task.task_name == self.task_name,\n Task.task_id == self.task_id,\n Task.parent_id == self.parent_id,\n ).update(kwargs, synchronize_session=False)\n return bool(count)", "title": "" }, { "docid": "9e5f27afcef18643fb4ade265f7c44ad", "score": "0.50357497", "text": "def altered(self):\n\t\tprint(\"CHILD, before PARENT altered()\")\n\t\tsuper(Child, self).altered()\n\t\tprint(\"CHILD, after PARENT altered()\")", "title": "" }, { "docid": "4e8c2c3050aa9df13bdac9034cef5b59", "score": "0.50278753", "text": "def update(self) -> None:\n return", "title": "" }, { "docid": "e61e03340378125ac621734ab893f974", "score": "0.50275695", "text": "def no_status_change(self, check_instance, old_status, new_status):", "title": "" }, { "docid": "4503010552dcf23a2c7347000a2ade8c", "score": "0.5024862", "text": "def _updateStatus (self):\r\n if self._growth > 36 :\r\n self._status = 'Old'\r\n elif self._growth > 24 :\r\n self._status = 'Mature'\r\n elif self._growth > 12 :\r\n self._status = 'Young'\r\n elif self._growth > 0 :\r\n self._status = 'baby'\r\n else :\r\n self._status = \"Newly hatched\"", "title": "" }, { "docid": "349957db7c818522f7450def0521597a", "score": "0.5022644", "text": "def update(self, instance, validated_data):", "title": "" }, { "docid": "067778a8ec37823726ca56c666ee6d2b", "score": "0.5018873", "text": "def update(self):\n\t\tpass", "title": "" }, { "docid": "d739637830bcf5e93c0c4cb7681f9b5d", "score": "0.5009797", "text": "def pending(self):", "title": "" }, { "docid": "c407e9caa6edf2ae0f481cc30c6cfd99", "score": "0.5006793", "text": "def save(self, *args, **kwargs):\n if not self.child_type:\n self.child_type = self.__class__.__name__.lower()\n return super(MultiTableParentModel, self).save(*args, **kwargs)", "title": "" }, { "docid": "85c4e6d577089248f965fbdca73af2c1", "score": "0.50062126", "text": "def test_status_update_to_inprocess():\n PRM().Project(prm_module_project) \\\n .run_integration(skip=True) \\\n .goto_lk().Card(prm_module_project).create_child_card('C1') \\\n .goto_card('C1').set_size(5).set_lane('not_started').update() \\\n .run_integration() \\\n .goto_lk().Card('C1').set_lane('in_process').update() \\\n .run_integration() \\\n .goto_prm().Project(prm_module_project).verify_lk_total_cards(5)\\\n .verify_lk_not_started_child_cards(0)\\\n .verify_lk_in_process_child_cards(5)", "title": "" }, { "docid": "0c268167bde78e41e9983f8ea595e744", "score": "0.5002989", "text": "def do_update(self):\n pass", "title": "" }, { "docid": "d59adb99060ab9b52f48029cb4368bce", "score": "0.5000832", "text": "def status(self):\n \n raise NotImplementedError('Method not implemented in inherited class.')", "title": "" } ]
ad8880ee8f17bd629d02f1eaf372a9e2
Returns only the __texts of the __results.
[ { "docid": "279b0b24a1a59b79b6b13b762926587f", "score": "0.6544478", "text": "def gettext(self):\n return self.__texts", "title": "" } ]
[ { "docid": "6adc5f023979fa1cf5effeeff7709520", "score": "0.7373752", "text": "def get_texts(self):\n return self._texts", "title": "" }, { "docid": "8bd59f4ce3036abe9c663188ba6b656d", "score": "0.70294625", "text": "def all_text(self):\n return tweet_text.get_all_text(self)", "title": "" }, { "docid": "c04cc390462ec25904ff325da6488ec9", "score": "0.6857684", "text": "def get_text(self):", "title": "" }, { "docid": "1a893a12b6180a982101b74ba2ac4f11", "score": "0.662332", "text": "def get_text(self):\r\n if self.__fulltext:\r\n return self.__fulltext\r\n else:\r\n self.__fulltext = \"\\n\\n\".join(text.get_text()\r\n for text in self.__subtrees)\r\n return self.__fulltext", "title": "" }, { "docid": "4a282263bcee54a778c314f568ce1a26", "score": "0.65992236", "text": "def get_results():", "title": "" }, { "docid": "d9417d0f15c07bcd49477b9613c2c427", "score": "0.6564271", "text": "def result(self) -> list:\n\n return list(\n filter(\n lambda result_item: not result_item.is_empty,\n map(\n lambda plain_item: ResultItem(plain_text=plain_item),\n self.plain_items,\n ),\n )\n )", "title": "" }, { "docid": "725a663ece0723895ff06fd73a1f0c99", "score": "0.6516051", "text": "def get_text(self):\n\t\ttemp_res = self.get_text_blocks()\n\n\t\tres = defaultdict(lambda:\"\")\n\t\tres.update({k:\" \".join(v) for k, v in temp_res.items()})\n\n\t\treturn res", "title": "" }, { "docid": "9586a2675e6a788ace61b975f0fd4239", "score": "0.6510756", "text": "def texts(self):\n return self.TYPE_TEXTS[self.type]", "title": "" }, { "docid": "4eb913f14c27c5e3fd961086f23a246f", "score": "0.6444551", "text": "def searchable_text():", "title": "" }, { "docid": "09876fd7500353f3ad1c49711e0c1e5a", "score": "0.64258295", "text": "def get_text_contents(self):\r\n return self.get_contents()", "title": "" }, { "docid": "87889010ae94a0d9ef2a62682b7340c0", "score": "0.64183867", "text": "def results(self):\n return self.record['assay']['descr']['results']", "title": "" }, { "docid": "77e603da7bde1af8b612ca2a298c3d90", "score": "0.6398562", "text": "def SearchableText(self):\n base_result = super(PSJMagazineContainer, self).SearchableText()\n text_getter = queryUtility(ISearchableTextGetter)\n psj_attribs_text = ''\n if text_getter is not None:\n psj_attribs_text = text_getter(context=self)\n return '%s %s' % (base_result, psj_attribs_text)", "title": "" }, { "docid": "683ce3259bb3732d7df4a80c9bb7cb79", "score": "0.63823575", "text": "def text(self):\r\n return \"\\n\".join(line.text for line in self.lines)", "title": "" }, { "docid": "634ab27d1921d1a568763ecf58621fe9", "score": "0.63358736", "text": "def PrintTextResults(results):\n if results[\"user_tests\"]:\n print \"\\n** User Tests **\"\n PrintTextSuiteResults(results[\"user_tests\"])\n if results[\"fusion_tests\"]:\n print \"\\n** Fusion Tests **\"\n PrintTextSuiteResults(results[\"fusion_tests\"])\n if results[\"server_tests\"]:\n print \"\\n** Server Tests **\"\n PrintTextSuiteResults(results[\"server_tests\"])", "title": "" }, { "docid": "f166c229ba175060ca1adaa090224d80", "score": "0.633584", "text": "def raw_results(self):\n return self._results", "title": "" }, { "docid": "42bdad6e1d793ee9531153da119e025b", "score": "0.63349825", "text": "def getRxsText(self):\n if self.text == None:\n print(\"Must get author text before Rx list (use method: findText)\")\n return\n unc = uncommonWords(self.text)\n self.rxs = redditRx(unc)\n return self.rxs", "title": "" }, { "docid": "304f4b14d50a14862830cd2af6bf327b", "score": "0.62886125", "text": "def all_results(self):\n return list(self.results.find())", "title": "" }, { "docid": "cf3a57a2e1426ba83654ef6cd3f13741", "score": "0.6275552", "text": "def get_all_text(soup):\r\n return soup.get_text()", "title": "" }, { "docid": "9062a2cace084a3380dbd1bc7ab7e3a8", "score": "0.62249887", "text": "def _command_results_text(self, results, flaky):\n assert results\n lines = []\n\n # Add common description for multiple runs.\n flaky_suffix = ' (flaky in a repeated run)' if flaky else ''\n lines.append('Test: %s%s' % (results[0]['name'], flaky_suffix))\n lines.append('Flags: %s' % \" \".join(results[0]['flags']))\n lines.append('Command: %s' % results[0]['command'])\n lines.append('')\n\n # Add results for each run of a command.\n for result in sorted(results, key=lambda r: int(r['run'])):\n lines.append('Run #%d' % int(result['run']))\n lines.append('Exit code: %s' % result['exit_code'])\n lines.append('Result: %s' % result['result'])\n lines.append('')\n if result['stdout']:\n lines.append('Stdout:')\n lines.extend(result['stdout'].splitlines())\n lines.append('')\n if result['stderr']:\n lines.append('Stderr:')\n lines.extend(result['stderr'].splitlines())\n lines.append('')\n return lines", "title": "" }, { "docid": "b735897f91773bff3fbb97f885a901dc", "score": "0.6202102", "text": "def get_all_results():", "title": "" }, { "docid": "b735897f91773bff3fbb97f885a901dc", "score": "0.6202102", "text": "def get_all_results():", "title": "" }, { "docid": "494c0ddb2cc3a1c1a8228de0e839daf8", "score": "0.6200519", "text": "def extract_tweet_texts(tweets):\n for t in tweets:\n yield cleantext(t['text'])", "title": "" }, { "docid": "877e51d87ec7aa619aa9c00cde81f2bd", "score": "0.6197354", "text": "def get_sections_texts(self) -> str:\n\n return self.text", "title": "" }, { "docid": "10ab30ca60bdef3ceb6a7b9d11103149", "score": "0.61947906", "text": "def get_texts_user(self, username):\n return list(self.db.text.find({\"name\": username, \"type\": \"user\"}, {\"text\": 1, \"_id\": 0}))", "title": "" }, { "docid": "6c9c2d69a72cac634042dd15a2437079", "score": "0.61736536", "text": "def get_text(self):\n\t\treturn self.txt", "title": "" }, { "docid": "2c6b80cb2fe2efb30f53ed031c45e3f8", "score": "0.6173424", "text": "def get_texts(self):\n with self.getstream() as text_stream:\n for i, line in enumerate(text_stream):\n line = to_unicode(line)\n line = (TweetCorpus.case_normalizer or passthrough)(line)\n # line = self.case_normalizer(line)\n if self.mask is not None and not self.mask[i]:\n continue\n ngrams = []\n for ng in tokens2ngrams((TweetCorpus.tokenizer or str.split)(line), n=self.num_grams):\n if self.ignore_matcher(ng):\n continue\n ngrams += [ng]\n if not (i % 1000):\n print(line)\n print(ngrams)\n yield ngrams", "title": "" }, { "docid": "51f5edbca868f7e84da5af4faeb8ebd7", "score": "0.6150494", "text": "def getText():", "title": "" }, { "docid": "bbccfcbc641429e0d9a057ee8842a71c", "score": "0.613841", "text": "def list_of_answer_text(self):\n return [answer.text for answer in self.answers.all()]", "title": "" }, { "docid": "72aa15aa5c9ee742a1945ef9d2e65880", "score": "0.6135666", "text": "def return_results(self):\n return self.results", "title": "" }, { "docid": "72aa15aa5c9ee742a1945ef9d2e65880", "score": "0.6135666", "text": "def return_results(self):\n return self.results", "title": "" }, { "docid": "ba483f528ded1f9b239ea84d2300df94", "score": "0.61199564", "text": "def get_results(self):\n return self.results", "title": "" }, { "docid": "e392573ec2df02fb88dfff3e4ddd7cde", "score": "0.60968834", "text": "def results(self):", "title": "" }, { "docid": "345c90d092ed21cfbbdea7f66d3e2dc4", "score": "0.60856307", "text": "def text(self):\n return tweet_text.get_text(self)", "title": "" }, { "docid": "1c6f8b226b85c20b4433f781a529c2df", "score": "0.6076634", "text": "def read_results(self):\n pass", "title": "" }, { "docid": "97bf61d5f28fe5acfddd621b5e1aae54", "score": "0.60744905", "text": "def results(self, as_list: bool = False) -> str | list[str]:\n text = [\n f\"{self.common_name} results:\",\n f\"File: {self.image.truncated_path}\",\n f\"The detected inplane field size was {self.field_width_y:2.1f}mm\",\n f\"The detected crossplane field size was {self.field_width_x:2.1f}mm\",\n f\"The inplane field was {self.field_epid_offset_mm.y:2.1f}mm from the EPID CAX\",\n f\"The crossplane field was {self.field_epid_offset_mm.x:2.1f}mm from the EPID CAX\",\n f\"The inplane field was {self.field_bb_offset_mm.y:2.1f}mm from the BB inplane center\",\n f\"The crossplane field was {self.field_bb_offset_mm.x:2.1f}mm from the BB crossplane center\",\n ]\n if as_list:\n return text\n else:\n text = \"\\n\".join(text)\n return text", "title": "" }, { "docid": "4181388127432deb98e066a12ac925de", "score": "0.60623956", "text": "def results():\n return retract_results()", "title": "" }, { "docid": "3e7f16f3f6a5b4be1b3a74d266b95f9b", "score": "0.6059611", "text": "def results(self, as_list: bool = False) -> str | list[str]:\n text = [f\"{self.common_name} results:\", f\"File: {self.image.truncated_path}\"]\n text += [\n f\"Median Contrast: {np.median([roi.contrast for roi in self.low_contrast_rois]):2.2f}\",\n f\"Median CNR: {np.median([roi.contrast_to_noise for roi in self.low_contrast_rois]):2.1f}\",\n f'# Low contrast ROIs \"seen\": {sum(roi.passed_visibility for roi in self.low_contrast_rois):2.0f} of {len(self.low_contrast_rois)}',\n ]\n if not as_list:\n text = \"\\n\".join(text)\n return text", "title": "" }, { "docid": "d9ef34645e8ddaa626e3702f3f2dfa9d", "score": "0.6050992", "text": "def get_results(self):\r\n return self._results", "title": "" }, { "docid": "f46019ab47abc63ba3173b7e7e5222d5", "score": "0.6050894", "text": "def text(self):\n return self[\"text\"]", "title": "" }, { "docid": "cc8780d854bcf65547c06689713bfe1e", "score": "0.60405785", "text": "def text(self):\n return ''.join(self.__text).strip()", "title": "" }, { "docid": "2d1ad37c3b93ad8cdf95c6bcef32b385", "score": "0.6034187", "text": "def _result_text(self):\n results_text = getattr(settings, 'RESULTS160_RESULT_DISPLAY', {})\n results = {'detected': results_text.get('P', 'Detected'),\n 'not_detected': results_text.get('N', 'NotDetected')}\n return results", "title": "" }, { "docid": "d0f096d761a20d04f2fa920499cc1eb4", "score": "0.6034063", "text": "def get_text_by_row(self):\n\t\ttemp_res = self.get_text_blocks_by_row()\n\n\t\tres = defaultdict(list)\n\t\tres.update({k:[\" \".join(i) for i in v] for k, v in temp_res.items()})\n\n\t\treturn res", "title": "" }, { "docid": "c69873c11ee01bccb5ddb09bb929b037", "score": "0.6031383", "text": "def text(self) -> Sequence[str]:\n return pulumi.get(self, \"text\")", "title": "" }, { "docid": "8a0d767fdb0dc5d898c42fd86a71ff32", "score": "0.60196936", "text": "def get_text_data_list(self):\n return [self.path, self.desc]", "title": "" }, { "docid": "b6f73ed2628614c72944b80f77329e03", "score": "0.6015261", "text": "def sentences_return(self):\n\n\t\t# if content found\n\t\tif self.page_id_finder():\n\t\t\tif \"query\" in self.data.keys():\n\t\t\t\tself.wiki_search = self.data['query']['pages'][str(self.page_ids)]['extract']\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\t# if content not found\n\t\telse:\n\t\t\treturn False", "title": "" }, { "docid": "e5d26a6cb508eaf77375ae55dc4aaa9c", "score": "0.60144323", "text": "def _extract_text(self, tweets):\r\n all_text = []\r\n\r\n for tweet in tweets:\r\n tweet_text = tweet.full_text.encode(\"utf-8\").decode(\"utf-8\")\r\n # Replaces &amp; with & and removes unnecessary spacing\r\n tweet_text = re.sub(r\"&amp;\", \"&\", tweet_text)\r\n tweet_text = re.sub(r\"\\s+\", \" \", tweet_text, flags=re.I)\r\n tweet_text.strip()\r\n\r\n all_text.append(tweet_text)\r\n\r\n return all_text", "title": "" }, { "docid": "1ad1bffe3e5d8dff1088e2933252baf7", "score": "0.5999542", "text": "def raw_text(self):\n\n results = []\n for view in self.views:\n ctxobj = zope.component.queryAdapter(self, name=view)\n if ctxobj is not None:\n results.append(ctxobj.raw_text())\n return '\\n'.join(results)", "title": "" }, { "docid": "331117a8030863743c423a00f80999b0", "score": "0.5993678", "text": "def GetPageText(self):", "title": "" }, { "docid": "b1ed1083c9571ffbf9ea7a2341f6d098", "score": "0.5982485", "text": "def results(self):\n raise NotImplementedError('Depreciated. Use \"result\" instead')", "title": "" }, { "docid": "61d345b9a5354da75dea5e05173b0c52", "score": "0.5981342", "text": "def get_text(self):\r\n return \"\".join(self.__text_nodes)", "title": "" }, { "docid": "12b36e32d44ff363d7e5c3000e000f61", "score": "0.5973525", "text": "def getEntries(self):\n return [x.getText() for x in self.entries]", "title": "" }, { "docid": "2322b3dd311e50d497f63feadbb6267a", "score": "0.5972269", "text": "def text(self):\r\n return self.__text", "title": "" }, { "docid": "f10b766699f759859c689a68653f5960", "score": "0.5969476", "text": "def makeTextList(url):\n identifiers = getIDs(url)\n texts = []\n\n for ID in identifiers:\n print('\\n')\n print(\"Getting \\\"\"+ID+\"\\\"...\")\n thisText = printItemText(ID)\n\n if thisText == \"\":\n print(\"\\\"\"+ID+\"\\\"\", \"not indexed.\")\n else:\n print(\"\\\"\"+ID+\"\\\"done.\")\n texts.append(thisText)\n\n \n \n \n return texts", "title": "" }, { "docid": "c01bcfe638f76106a84e417695677266", "score": "0.5968273", "text": "def getText(self, thrds):\n self.text = redditText(thrds, self.author)\n return self.text", "title": "" }, { "docid": "073ec4b8f644cf1ee7390e3465084167", "score": "0.59619355", "text": "def _get_text_nodes(self):\n return [i for i in self if isinstance(i.Data, str)]", "title": "" }, { "docid": "4c86b7f125caf26ab66ff723c26853fe", "score": "0.5939771", "text": "def texts(\n self, subreddit=None, date_range=None, score_range=None, min_len=0, limit=-1\n ):\n texts = self._iterate(\n True,\n subreddit=subreddit,\n date_range=date_range,\n score_range=score_range,\n min_len=min_len,\n limit=limit,\n )\n for text in texts:\n yield text", "title": "" }, { "docid": "6c3a5d1753daf93ad38dfe8d491ab423", "score": "0.59362507", "text": "def get_texts (self, selector):\n \n texts = []\n \n elems = self.driver.find_elements_by_css_selector (selector)\n \n for elem in elems: \n try: \n texts.append(elem.text)\n except Exception as err:\n input (err)\n continue\n \n return texts", "title": "" }, { "docid": "1e211d87a4fa89171d0c076aba0108be", "score": "0.5936086", "text": "def SearchableText(self):\n return self.Title() + self.getRawAlsoCalled()", "title": "" }, { "docid": "7dc4afa7659ee8a6b9033f2ef4507ed4", "score": "0.5932954", "text": "def get_text(self):\n return self.text", "title": "" }, { "docid": "a3fdea0f3fadd4d0b90d0164c9a47b7f", "score": "0.59288895", "text": "def read_results(self):\n return self._source.read_results()", "title": "" }, { "docid": "a3fdea0f3fadd4d0b90d0164c9a47b7f", "score": "0.59288895", "text": "def read_results(self):\n return self._source.read_results()", "title": "" }, { "docid": "1b3cc36e12a3843a1a0d0e4191f184fe", "score": "0.5919434", "text": "def results(self):\n return self._results", "title": "" }, { "docid": "d9d5b40c648d2238a4c4f16bed1d612c", "score": "0.5915504", "text": "def filter_results(self, results):\n return results", "title": "" }, { "docid": "8b709208f48fcbee444f9dc2d11a3f8a", "score": "0.5915181", "text": "def get_texts(self):\n with self.getstream() as text_stream:\n for i, line in enumerate(text_stream):\n line = SMSCorpus.case_normalizer(line)\n if self.mask is not None and not self.mask[i]:\n continue\n ngrams = []\n for ng in tokens2ngrams(self.tokenizer(line)):\n if SMSCorpus.ignore_matcher(ng):\n continue\n ngrams += [ng]\n yield ngrams", "title": "" }, { "docid": "481a296211134107b5574c83b006d81e", "score": "0.59075326", "text": "def all_text_without_links(self):\n return tweet_text.remove_links(self.all_text)", "title": "" }, { "docid": "c97064c36b79f413b587b8bffa328df3", "score": "0.58975327", "text": "def get_text(self):\n\n return self.text", "title": "" }, { "docid": "c308a6b5bf7eece80e21d1f7f2ceea08", "score": "0.58927935", "text": "def results(self):\n return self._report.get('results', [])", "title": "" }, { "docid": "0567d00432bd1fd6c711db7e8d7631b3", "score": "0.5881424", "text": "def get(self):\n return self[\"text\"]", "title": "" }, { "docid": "3c0d7cea3c45cb079c2706e40f3bc0e9", "score": "0.5878813", "text": "def get_results(self):\n return self._results", "title": "" }, { "docid": "3c0d7cea3c45cb079c2706e40f3bc0e9", "score": "0.5878813", "text": "def get_results(self):\n return self._results", "title": "" }, { "docid": "2a773679b251fa959ff4f4311f5ffc71", "score": "0.5873327", "text": "def results(self, as_list: bool = False) -> str | list[str]:\n text = [f\"{self.common_name} results:\", f\"File: {self.image.truncated_path}\"]\n if self.low_contrast_rois:\n text += [\n f\"Median Contrast: {np.median([roi.contrast for roi in self.low_contrast_rois]):2.2f}\",\n f\"Median CNR: {np.median([roi.contrast_to_noise for roi in self.low_contrast_rois]):2.1f}\",\n f'# Low contrast ROIs \"seen\": {sum(roi.passed_visibility for roi in self.low_contrast_rois):2.0f} of {len(self.low_contrast_rois)}',\n ]\n if self.high_contrast_rois:\n text += [\n f\"MTF 80% (lp/mm): {self.mtf.relative_resolution(80):2.2f}\",\n f\"MTF 50% (lp/mm): {self.mtf.relative_resolution(50):2.2f}\",\n f\"MTF 30% (lp/mm): {self.mtf.relative_resolution(30):2.2f}\",\n ]\n if not as_list:\n text = \"\\n\".join(text)\n return text", "title": "" }, { "docid": "54ed80cc53e3d1daab84973eb92f9a46", "score": "0.58624756", "text": "def get_content(results: Union[Element, List[Element]]) -> str:\n if isinstance(results, List):\n contents = [result._elem.text\n for result in results\n if result is not None and result._elem is not None and result._elem.text is not None]\n content = '\\n'.join(contents) if contents else None\n else:\n content = results._elem.text \\\n if results and results._elem and results._elem.text is not None else None\n return unescape(content) if content is not None else None", "title": "" }, { "docid": "9615c6570bdf38d3489e60a16e75ad3e", "score": "0.58555555", "text": "def get_results(self):\n\n return self.results_", "title": "" }, { "docid": "c61574d75882a10726baf0bbcfa87f59", "score": "0.58532786", "text": "def list(self, request):\n\t\ttry:\n\t\t\ttexts = Savetext.objects.filter(user= request.user.id)\n\t\t\tserializer = TextSerializer(texts, many=True)\n\n\t\t\treturn Response({\"count\": texts.count(), 'results': serializer.data}, status=status.HTTP_200_OK)\n\t\texcept: \n\t\t\traise Http404", "title": "" }, { "docid": "0167e0ff6c1aa91c874be4b58f36f617", "score": "0.5851111", "text": "def texts(self):\n richtexts = {}\n d = {k: getattr(self, k) for k in dir(self) if k != \"texts\"}\n\n text_re = re.compile(r\"^text([0-9]+)$\")\n for attr in d.keys():\n m = text_re.match(attr)\n if m:\n richtexts[int(m.group(1))] = RichText(getattr(self, m.group(0)))\n text_attr_re = re.compile(r\"^text([0-9]+)\\_([a-z_]+)$\")\n for attr in d.keys():\n m = text_attr_re.match(attr)\n if m:\n richtexts[int(m.group(1))][m.group(2)] = getattr(self, m.group(0))\n\n listtexts = [None] * (max(richtexts.keys()) + 1)\n for index, value in richtexts.items():\n listtexts[index] = value\n return listtexts", "title": "" }, { "docid": "e43d9615e72fa6e197959c23dea9b912", "score": "0.58475816", "text": "def rendered_text_lines(self):\n return self._rendered_text_lines", "title": "" }, { "docid": "4239337d800e35748cd941deea39bc6e", "score": "0.5840092", "text": "def text(self):\n raise NotImplementedError", "title": "" }, { "docid": "954067ccd94b5fb1520b2ac2fa94c335", "score": "0.58360755", "text": "def getItemTextList(self):\n logger.debug(util.funcName())\n return [str(item) for item in self]", "title": "" }, { "docid": "dfa12eb4e5377ac96045498edfbada82", "score": "0.583592", "text": "def each_result(self):\n return (result for result in self.results)", "title": "" }, { "docid": "cd12259012bdebeb347ced94f8c7c825", "score": "0.5828981", "text": "def lines(self):\r\n return self._text", "title": "" }, { "docid": "73eb9b7afecd1bf6ab55ec38563544d2", "score": "0.5827846", "text": "def text_lines(self):\n\n return self._text_lines", "title": "" }, { "docid": "993cc8d1136c93668642ffef89ee841e", "score": "0.58245283", "text": "def text(self):\r\n return self._r.t.text", "title": "" }, { "docid": "84725dc826ca6cc8c87e3c3070439c01", "score": "0.58202714", "text": "def getResults(self):\n return str(self.stats)", "title": "" }, { "docid": "ec35fb5b0b2057c7d5396a73dfabfd6a", "score": "0.5819849", "text": "def _results(self) -> List[Tuple]:\n if self._raw_results is None:\n self.run()\n return self._raw_results", "title": "" }, { "docid": "25fde4da79ec2796dc2cf86212032142", "score": "0.58155376", "text": "def text_new(self):\n souptext = self.soup\n hTagText = [tag.text.replace(u'\\xa0', '').replace('\\n', ' ').replace('\\r', '') for tag in\n souptext.find_all(re.compile('^h[1-6]$'))]\n pTagText = hTagText + [item.text.replace(u'\\xa0', '').replace('\\n', ' ').replace('\\r', '') for item in\n souptext.find_all('p') if item.text is not '']\n tdTagText = hTagText + [item.text.replace(u'\\xa0', '').replace('\\n', ' ').replace('\\r', '') for item in\n souptext.find_all('td') if item.text is not '']\n if len(pTagText) is not 0:\n return [text for text in pTagText if text]\n else:\n return [text for text in tdTagText if text]", "title": "" }, { "docid": "a59dc3972d24ac12b01055fcd7bfb850", "score": "0.58089286", "text": "def _getText(self, queryFullName):\n raise NotImplementedError(\"Subclasses must override _getText()!\")", "title": "" }, { "docid": "cad78d0a2b399b8bff42e14678fbd3fa", "score": "0.58058375", "text": "def _prettify(self, text):\n ##removing the stopwords and convert to lowercase\n stopwords = nltk.corpus.stopwords.words('english');\n text_in_lowercase = re.sub(\"[^a-zA-Z]\", \" \", str(text)).split(\" \");\n filtered_text = []\n for word in text_in_lowercase:\n if word not in stopwords:\n filtered_text.append(word);\n\n return filtered_text;", "title": "" }, { "docid": "e986e82535369b9aff823fc868234ddf", "score": "0.580219", "text": "def text(self):\n return self._text", "title": "" }, { "docid": "e986e82535369b9aff823fc868234ddf", "score": "0.580219", "text": "def text(self):\n return self._text", "title": "" }, { "docid": "e986e82535369b9aff823fc868234ddf", "score": "0.580219", "text": "def text(self):\n return self._text", "title": "" }, { "docid": "63d93c0b7032ae2f8e5b830c53bca0d8", "score": "0.57897973", "text": "def getText(self):\r\n return self.text", "title": "" }, { "docid": "64c4437ebd32d4233477ccc7ca0f1d84", "score": "0.5788093", "text": "def _text(self, res):\n return res.text.strip().replace('\\xc2\\xa0', ' ').replace(\n '\\xa0', ' ').replace(' ', ' ').replace('\\n', ' ').replace(\n '\\r', ' ').replace(' ', ' ')", "title": "" }, { "docid": "ac217da02e370d0fb92cfc9b55b5e1b2", "score": "0.5787849", "text": "def set_text(self, results):\n\t\tfor result in results:\n\t\t\tself.control.BeginBold()\n\t\t\tself.control.BeginUnderline()\n\t\t\tself.control.WriteText(result.name)\n\t\t\tself.control.EndBold()\n\t\t\tself.control.EndUnderline()\n\t\t\tself.control.Newline()\n\t\t\tfor subquest in result.results:\n\t\t\t\tself.control.BeginBold()\n\t\t\t\tself.control.WriteText(\" \" + subquest.title)\n\t\t\t\tself.control.EndBold()\n\t\t\t\tself.control.Newline()\n\t\t\t\tself.control.BeginItalic()\n\t\t\t\tself.control.WriteText(\" \" + subquest.result)\n\t\t\t\tself.control.EndItalic()\n\t\t\t\tself.control.Newline()\n\t\t\tself.control.Newline()", "title": "" }, { "docid": "0420d223e3e7b9ae80f4c7fa720f225b", "score": "0.5781524", "text": "def text_plain(self):\n return self._text_plain", "title": "" }, { "docid": "80facc31e01f66a996e8b4c451ba8b5b", "score": "0.5779701", "text": "def getText(self):\n\n driver = self.driver\n\n try:\n text = WebDriverWait(driver,load_delay).until(\n EC.visibility_of_element_located(\n (By.CLASS_NAME,\"nonHideableWords\")\n )\n ).text\n arrText = text.split(' ')\n except TimeoutException:\n curr_err_msg = err_msgs[\"ERR_SIX\"]\n\n return arrText", "title": "" }, { "docid": "c48f06e094f82c06d49226b9eb173f42", "score": "0.57771695", "text": "def SearchableText(self):\n return ''", "title": "" }, { "docid": "b46b1cf970dc3ee7aa201490be210b79", "score": "0.5774659", "text": "def get_words(self) -> List[str]:\n ... # pragma: no cover", "title": "" }, { "docid": "7d3db957603fe252acbe2f27a5775214", "score": "0.57697856", "text": "def get_text(self, alias):\n tags = self.tags_by_alias(alias)\n for tag in tags:\n # Ignore if it's not a plain text SDT\n if not xpath(tag, './w:sdtPr/w:text'):\n continue\n\n tokens = []\n text_and_brs = xpath(tag, './w:sdtContent//w:r/*[self::w:t or self::w:br]')\n for el in text_and_brs:\n if QName(el).localname == 't':\n tokens.append(el.text)\n elif QName(el).localname == 'br':\n tokens.append('\\n')\n\n return ''.join(tokens)", "title": "" }, { "docid": "520f313495a928f8df72da06465f380f", "score": "0.57658136", "text": "def get_text_content_by_name(self, tag_name):\n results = self.find_tags_by_name(tag_name)\n return [result.text.strip() for result in results]", "title": "" }, { "docid": "3a91ee605ab9f307f623b5b60ead9bb0", "score": "0.57656157", "text": "def get_texts(self):\n articles, articles_all = 0, 0\n positions, positions_all = 0, 0\n texts = ((text, self.lemmatize, title, pageid) for title, text, pageid in extract_pages(bz2.BZ2File(self.fname), self.filter_namespaces))\n pool = multiprocessing.Pool(self.processes)\n # process the corpus in smaller chunks of docs, because multiprocessing.Pool\n # is dumb and would load the entire input into RAM at once...\n for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1):\n for tokens, title, pageid in pool.imap(process_article, group): # chunksize=10):\n articles_all += 1\n positions_all += len(tokens)\n # article redirects and short stubs are pruned here\n if len(tokens) < ARTICLE_MIN_WORDS or any(title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES):\n continue\n articles += 1\n positions += len(tokens)\n if self.metadata:\n yield (tokens, (pageid, title))\n else:\n yield tokens\n pool.terminate()\n\n logger.info(\n \"finished iterating over Wikipedia corpus of %i documents with %i positions\"\n \" (total %i articles, %i positions before pruning articles shorter than %i words)\",\n articles, positions, articles_all, positions_all, ARTICLE_MIN_WORDS)\n self.length = articles # cache corpus length", "title": "" } ]
41aaff270011b323101da95b59ee3a60
Calculate the great circle distance between two points on the earth (specified in decimal degrees)
[ { "docid": "6aae576098c963fcb21eed1748290320", "score": "0.697009", "text": "def calcDistance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n EARTH_RADIUS = 6371 # Radius of earth in kilometers. Use 3956 for miles\n return c * EARTH_RADIUS", "title": "" } ]
[ { "docid": "2d03a8f9ad790b3cb18e7f3be6b64e59", "score": "0.78439695", "text": "def great_circle_distance(latlong_a, latlong_b):\n lat1, lon1 = latlong_a\n lat2, lon2 = latlong_b\n\n dLat = math.radians(lat2 - lat1)\n dLon = math.radians(lon2 - lon1)\n a = (math.sin(dLat / 2) * math.sin(dLat / 2) +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * \n math.sin(dLon / 2) * math.sin(dLon / 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = EARTH_CIRCUMFERENCE * c\n \n return d", "title": "" }, { "docid": "38d41fc7bbd817cc8499d12db8f3a2e8", "score": "0.7788668", "text": "def great_circle_distance(pt1, pt2):\n r = 6371.\n\n delta_latitude = math.radians(pt1[0] - pt2[0])\n delta_longitude = math.radians(pt1[1] - pt2[1])\n latitude1 = math.radians(pt1[0])\n latitude2 = math.radians(pt2[0])\n\n a = math.sin(delta_latitude / 2) ** 2 + math.cos(latitude1) * math.cos(latitude2) * math.sin(\n delta_longitude / 2) ** 2\n return r * 2. * math.asin(math.sqrt(a))", "title": "" }, { "docid": "8cd46cbc5031cb23f2a4427da03d3d5b", "score": "0.7731707", "text": "def great_circle_distance(a: Union[Coordinates, Iterable], b: [Coordinates, Iterable]):\n a, b = Coordinates(*a), Coordinates(*b)\n\n lat1, lng1 = math.radians(a.lat), math.radians(a.lng)\n lat2, lng2 = math.radians(b.lat), math.radians(b.lng)\n\n sin_lat1, cos_lat1 = math.sin(lat1), math.cos(lat1)\n sin_lat2, cos_lat2 = math.sin(lat2), math.cos(lat2)\n\n delta_lng = lng2 - lng1\n\n aux = (sin_lat1 * sin_lat2) + cos_lat1 * cos_lat2 * math.cos(delta_lng)\n\n # To avoid arccos failing for precision errors\n if aux > 1 and aux - 1 <= 0.000000001:\n aux = 1\n\n central_angle = math.acos(aux)\n\n return EARTH_RADIUS * central_angle", "title": "" }, { "docid": "321edadb82c48b9062b3fa6c693a72e2", "score": "0.757195", "text": "def dist_btw_two_coords(latDeg1,latDeg2,lonDeg1,lonDeg2):\n \n dist = earthradius_fct_of_lat(latDeg1)*np.arccos( np.sin(deg_to_rad(latDeg1))*np.sin(deg_to_rad(latDeg2)) + np.cos(deg_to_rad(latDeg1))*np.cos(deg_to_rad(latDeg2))*np.cos(deg_to_rad(lonDeg2 - lonDeg1)) )\n \n return dist", "title": "" }, { "docid": "9c9739291f50d86108df4f303e46acc5", "score": "0.7555244", "text": "def greatcircle_distance(long1, lat1, long2, lat2, R=6371):\n lambda1, phi1, lambda2, phi2 = map(np.radians, [long1, lat1, long2, lat2])\n Dphi = phi1 - phi2 # abs value not needed because of sin squared\n Dlambda = lambda1 - lambda2\n\n radical = (\n np.sin(Dphi / 2) ** 2 + np.cos(phi1) * np.cos(phi2) * np.sin(Dlambda / 2) ** 2\n )\n\n return R * 2 * np.arcsin(np.sqrt(radical))", "title": "" }, { "docid": "928c588a3c7f151e085898a31782bf41", "score": "0.75322944", "text": "def distance(a, b):\n return great_circle((a[1], a[0]), (b[1], b[0])).meters", "title": "" }, { "docid": "7386df111e8d23164cf151b64b164f9e", "score": "0.75258446", "text": "def distance(point1,point2):\r\n\t# import the function...\r\n\tfrom geopy.distance import great_circle\r\n\t# format the inputs\r\n\tp1 = ( point1.latitude, point1.longitude )\r\n\tp2 = ( point2.latitude, point2.longitude )\r\n\treturn great_circle( p1, p2 ).meters", "title": "" }, { "docid": "b831b87254daf4bc23b89e58ba7d8b88", "score": "0.752385", "text": "def great_circle_vec(lat1, lng1, lat2, lng2, earth_radius=6371009):\r\n\r\n phi1 = np.deg2rad(lat1)\r\n phi2 = np.deg2rad(lat2)\r\n d_phi = phi2 - phi1\r\n\r\n theta1 = np.deg2rad(lng1)\r\n theta2 = np.deg2rad(lng2)\r\n d_theta = theta2 - theta1\r\n\r\n h = np.sin(d_phi / 2) ** 2 + np.cos(phi1) * np.cos(phi2) * np.sin(d_theta / 2) ** 2\r\n h = np.minimum(1.0, h) # protect against floating point errors\r\n\r\n arc = 2 * np.arcsin(np.sqrt(h))\r\n\r\n # return distance in units of earth_radius\r\n distance = arc * earth_radius\r\n return distance", "title": "" }, { "docid": "8dd83c515759716ddc3bc6f1fa19aa3b", "score": "0.74690354", "text": "def dist(lat1, lon1, lat2, lon2):\n lat1, lon1, lat2, lon2 = map(math.radians, (lat1, lon1, lat2, lon2))\n return math.acos(math.sin(lat1) * math.sin(lat2) +\n math.cos(lat1) * math.cos(lat2) *\n math.cos(abs(lon2 - lon1))) * 6371000", "title": "" }, { "docid": "bf192793f4ec2eeeea979d3dd23d7301", "score": "0.7453097", "text": "def calc_distance(x1, y1, x2, y2):\n # Code coppied form:\n # http://stackoverflow.com/questions/4913349/haversine-formula-in-python-bearing-and-distance-between-two-gps-points\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [x1, y1, x2, y2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n\n # 6367 km is the radius of the Earth\n km = 6367 * c\n return km * 1000.0", "title": "" }, { "docid": "2233f07c12c29d9b47d4ffc59b020250", "score": "0.7441209", "text": "def distance(lat1, lon1, lat2, lon2):\n \n r = 6373\n\n # Convert to radians.\n degreesToRadians = math.pi/180.0\n \n lat1 = (90.0 - lat1)*degreesToRadians\n lon1 = lon1*degreesToRadians\n\n lat2 = (90.0 - lat2)*degreesToRadians\n lon2 = lon2*degreesToRadians\n\n dist = math.acos(math.sin(lat1)*math.sin(lat2)*math.cos(lon1 - lon2) \\\n + math.cos(lat1)*math.cos(lat2))*r\n\n # Return in meters.\n return dist*1000", "title": "" }, { "docid": "bbfef7f3fa2211422345e4f9725aca3f", "score": "0.7434925", "text": "def get_coord_distance(c1, c2):\n #print(c1, c2) \n R = 6371000 # Radius of the earth in meters\n lat1 = math.radians(c1[1])\n lat2 = math.radians(c2[1])\n\n lon1 = math.radians(c1[0])\n lon2 = math.radians(c2[0]) \n \n dlat = lat2 - lat1 \n dlon = lon2 - lon1 \n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * \\\n math.sin(dlon/2)**2\n \n c = 2 * math.asin(math.sqrt(a)) \n\n return c * R", "title": "" }, { "docid": "5c6c67ea9295e74c410c9142a32c0367", "score": "0.74070156", "text": "def distance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a)) \n\n # 6367 km is the radius of the Earth\n km = 6367 * c\n return km", "title": "" }, { "docid": "46d59ce893b34f742bee5d0d2708ea7b", "score": "0.7386912", "text": "def distance(lat1, lon1, lat2, lon2, H=0):\n\n # phi = 90 - latitude\n phi1 = np.radians(90.0 - lat1)\n phi2 = np.radians(90.0 - lat2)\n\n # theta = longitude\n theta1 = np.radians(lon1)\n theta2 = np.radians(lon2)\n\n cos = np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) \\\n + np.cos(phi1) * np.cos(phi2)\n\n arc = np.arccos(cos)\n dist = arc * (r_earth + H) # meters, radius of earth\n return dist", "title": "" }, { "docid": "fc3fed4246d2ae4b09811af3d376466d", "score": "0.7384747", "text": "def get_distance(point1, point2):\n if point1 == point2:\n return 0\n earth_radius = 6372.8\n lat1 = to_radian(point1.get('lat'))\n lat2 = to_radian(point2.get('lat'))\n lon1 = to_radian(point1.get('lon'))\n lon2 = to_radian(point2.get('lon'))\n delta_lat = lat2 - lat1\n delta_lon = lon2 - lon1\n sqrd_hcl = pow(sin(delta_lat/2), 2) + pow(sin(delta_lon/2), 2) * cos(lat1) * cos(lat2)\n angular_distance = 2 * asin(sqrt(sqrd_hcl)) * 100\n return (earth_radius * angular_distance) / 100", "title": "" }, { "docid": "fc3fed4246d2ae4b09811af3d376466d", "score": "0.7384747", "text": "def get_distance(point1, point2):\n if point1 == point2:\n return 0\n earth_radius = 6372.8\n lat1 = to_radian(point1.get('lat'))\n lat2 = to_radian(point2.get('lat'))\n lon1 = to_radian(point1.get('lon'))\n lon2 = to_radian(point2.get('lon'))\n delta_lat = lat2 - lat1\n delta_lon = lon2 - lon1\n sqrd_hcl = pow(sin(delta_lat/2), 2) + pow(sin(delta_lon/2), 2) * cos(lat1) * cos(lat2)\n angular_distance = 2 * asin(sqrt(sqrd_hcl)) * 100\n return (earth_radius * angular_distance) / 100", "title": "" }, { "docid": "9b9918c3043afe9f43cfa96106315ce4", "score": "0.73822784", "text": "def eq_dist(lat1, lon1, lat2, lon2):\n lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])\n xval = (lon2 - lon2) * cos(0.5 * (lat2+lat1))\n yval = lat2 - lat1\n eqd = 6371 * sqrt(xval*xval + yval*yval)\n\n return eqd", "title": "" }, { "docid": "5f38365a05c63cde9262a05b303a4b5d", "score": "0.7377837", "text": "def _calc_dist(lat1, long1, lat2, long2):\n x = _radians(long2 - long1) * math.cos(_radians((lat1 + lat2) / 2))\n y = _radians(lat2 - lat1)\n d = math.sqrt(x**2 + y**2) * 20902000 # multiply by radius of Earth in feet\n return round(d, 2)", "title": "" }, { "docid": "90e7c28da826572807f5d4a024f5f300", "score": "0.7375319", "text": "def spherical_distance(lon1, lat1, lon2, lat2):\r\n\tlon1, lat1 = np.radians(lon1), np.radians(lat1)\r\n\tlon2, lat2 = np.radians(lon2), np.radians(lat2)\r\n\tdlat = lat2 - lat1\r\n\tdlon = lon2 - lon1\r\n\ta = np.sin(dlat/2.)**2 + (np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.)**2)\r\n\tc = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))\r\n\td = EARTH_RADIUS * c\r\n\treturn d * 1000.", "title": "" }, { "docid": "b2d02d355812aeca464a0c8f028e56ef", "score": "0.7338155", "text": "def GreatCircleDistance(self, point1, point2):\n # convert decimal degrees to radians\n lat1, lon1 = point1\n lat2, lon2 = point2\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km", "title": "" }, { "docid": "a1654a1ddf359754f645000a7c15f216", "score": "0.73216295", "text": "def distancer(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "title": "" }, { "docid": "4cfa514cacddec978b57b36a8e9ce541", "score": "0.72583437", "text": "def gcdistance(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = (math.radians(x)\n for x in [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + \\\n math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a))\n km = 6367 * c\n return km", "title": "" }, { "docid": "fdd7235224db1c33e920f4a365501198", "score": "0.7220666", "text": "def gpsdistance(self, lat1, lon1, lat2, lon2):\n R = 6371000 # Earth's radius in meters\n lat1 = math.radians(lat1)\n lat2 = math.radians(lat2)\n\n dlat = abs(lat1 - lat2)\n dlon = abs(lon1 - lon2)\n dlon = math.radians(dlon)\n\n a = (math.sin(0.5 * dlat)) ** 2 + math.cos(lat1) * math.cos(lat2) * (math.sin(0.5 * dlon)) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n return R * c", "title": "" }, { "docid": "bb912d3b67ed3bd2b54e9c5703861fb2", "score": "0.72152084", "text": "def get_distance(lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = map(radians, map(float, [lon1, lat1, lon2, lat2]))\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r", "title": "" }, { "docid": "bb912d3b67ed3bd2b54e9c5703861fb2", "score": "0.72152084", "text": "def get_distance(lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = map(radians, map(float, [lon1, lat1, lon2, lat2]))\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371\n return c * r", "title": "" }, { "docid": "c659fc25a7c68e77f9d425aac81cab96", "score": "0.72140515", "text": "def distance(a, b):\n\t\tR = 3963 # radius of Earth (miles)\n\t\tlat1, lon1 = math.radians(a[0]), math.radians(a[1])\n\t\tlat2, lon2 = math.radians(b[0]), math.radians(b[1])\n\t\treturn math.acos( math.sin(lat1)*math.sin(lat2) +\n\t\t\tmath.cos(lat1)*math.cos(lat2)*math.cos(lon1-lon2) ) * R", "title": "" }, { "docid": "cdbdc0e8edebc3b8716ee8853f6b0516", "score": "0.72056615", "text": "def __great_circle_distance(a_lat, a_lng, b_lat, b_lng, r):\n\n a = np.square(np.sin((b_lat - a_lat) / 2)) + np.cos(a_lat) * \\\n np.cos(b_lat) * np.square(np.sin((b_lng - a_lng) / 2))\n a = np.clip(a, 0, 1)\n return 2 * r * np.arctan2(np.sqrt(a), np.sqrt(1 - a))", "title": "" }, { "docid": "14f76963ce3719a6133278196a718110", "score": "0.71732944", "text": "def distance (x1,y1,x2,y2):\n\tgeod = Geodesic.WGS84\n\tdist = geod.Inverse(float(x1),float(y1),float(x2),float(y2))\n\treturn dist['s12'] /1852.0", "title": "" }, { "docid": "d987cdc71eadc78975e40786357cbc0a", "score": "0.7164403", "text": "def coord_distance(lat1, lon1, lat2, lon2):\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2\n c = 2 * math.asin(math.sqrt(a))\n km = 6367 * c\n return km", "title": "" }, { "docid": "c1951c2a7a0609885019d2c19b9ef74c", "score": "0.71642303", "text": "def earth_distance(self, lat1, long1, lat2, long2):\n # Note: The formula used in this function is not exact, as it assumes\n # the Earth is a perfect sphere.\n\n # Mean radius of Earth in meters\n radius_earth = 6371000\n\n # Convert latitude and longitude to\n # spherical coordinates in radians.\n degrees_to_radians = math.pi / 180.0\n phi1 = lat1 * degrees_to_radians\n phi2 = lat2 * degrees_to_radians\n lambda1 = long1 * degrees_to_radians\n lambda2 = long2 * degrees_to_radians\n dphi = phi2 - phi1\n dlambda = lambda2 - lambda1\n\n a = self.haversine(dphi) + math.cos(phi1) * math.cos(phi2) * self.haversine(dlambda)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius_earth * c\n return d", "title": "" }, { "docid": "9ad00b50f6cfe97d4affa303b8b9440d", "score": "0.7135395", "text": "def distance(city_1, city_2):\n return sqrt((city_1.lat - city_2.lat) ** 2 + (city_1.lng - city_2.lng) ** 2)", "title": "" }, { "docid": "12b695403439c90d718a0de47386e766", "score": "0.7129734", "text": "def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "title": "" }, { "docid": "d8d78fcf2216c3ff4c491b0eecd6e5b0", "score": "0.7126508", "text": "def greatcircdist(lat1, lon1, lat2, lon2, radius):\n # Convert degrees to radians\n lat1, lon1, lat2, lon2 = np.radians([lat1, lon1, lat2, lon2])\n # Haversine\n dlat, dlon = abs(lat2 - lat1), abs(lon2 - lon1)\n a = (\n np.sin(dlat / 2) ** 2\n + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n )\n theta = 2 * np.arcsin(np.sqrt(a))\n return radius * theta", "title": "" }, { "docid": "7b2da2dd72919b0918fc0eea4d30808f", "score": "0.71044713", "text": "def gDistance(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n r = 6371 # Radius of earth in kilometers. Use 3956 for miles\n d = c * r\n return d", "title": "" }, { "docid": "d9d1d73bc7e1665945081329e9ea5fc9", "score": "0.7099262", "text": "def dist_approx( p1, p2 ):\n lat1, lon1 = map( radians, p1 )\n lat2, lon2 = map( radians, p2 )\n x = (lon2-lon1) * math.cos((lat1+lat2)/2)\n y = (lat2-lat1)\n c = math.hypot(x,y)\n return 3961.3*c # 3440.07 for nm, 3961.3 for statute miles, 6378.1 for km, 20915664.0 for feet", "title": "" }, { "docid": "ef5c7e6687496639f18c2047851a86f9", "score": "0.709116", "text": "def great_circle_distance(self, x, y):\n x = x * np.pi / 180\n y = y * np.pi / 180\n dlong = x[1] - y[1]\n den = np.sin(x[0]) * np.sin(y[0]) + np.cos(x[0]) * np.cos(y[0]) * np.cos(dlong)\n num = (np.cos(y[0]) * np.sin(dlong)) ** 2 + (\n np.cos(x[0]) * np.sin(y[0]) - np.sin(x[0]) * np.cos(y[0]) * np.cos(dlong)) ** 2\n\n sig = np.arctan2(np.sqrt(num), den) * 180 / np.pi\n return sig", "title": "" }, { "docid": "e5ab03008170e88263a2690ae3625d00", "score": "0.7089357", "text": "def distance_between_gps_coordinates(lat_a, lon_a, lat_b, lon_b):\n d_lon = radians(lon_b - lon_a)\n d_lat = radians(lat_b - lat_a)\n a = ((sin(d_lat/2)) ** 2) + cos(radians(lat_a)) * cos(radians(lat_b)) * ((sin(d_lon/2)) ** 2)\n c = 2 * atan2(sqrt(a), sqrt(1-a))\n\n return earth_radius * c", "title": "" }, { "docid": "a978c484af72d0e5070bff4df45b9102", "score": "0.7088543", "text": "def getDist(lon1, lat1, lon2, lat2):\n from math import radians, cos, sin, asin, sqrt\n lon1 = float(lon1)\n lat1 = float(lat1)\n lon2 = float(lon2)\n lat2 = float(lat2)\n\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km * 1000", "title": "" }, { "docid": "8f61e4cc26aa4d8a5633cd41b42f50da", "score": "0.7086601", "text": "def distancia(self, lat1, lon1, lat2, lon2):\n\n # Conversion de GMS a DEC y posteriormente a radianes\n lat1rad = math.radians(lat1)\n lon1rad = math.radians(lon1)\n\n lat2rad = math.radians(lat2)\n lon2rad = math.radians(lon2)\n\n\n # Calculo de la distancia P1 a P2\n a = math.sin(lat1rad)*math.sin(lat2rad)\n b = math.cos(lat1rad)*math.cos(lat2rad)*math.cos(lon2rad - lon1rad)\n D = math.acos(a + b) # Formula (2)\n\n d = 111.18*math.degrees(D) # Formula (1)\n\n # Regresa distancia y angulo\n return round(d, 2)", "title": "" }, { "docid": "a8bc3b28efb11589550b45583a19b991", "score": "0.70831037", "text": "def get_distance_between_coords (lat1, lon1, lat2, lon2):\n\t# convert decimal degrees to radians \n\tlon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\t# haversine formula \n\tdlon = fabs(lon2 - lon1) \n\tdlat = fabs(lat2 - lat1) \n\ta = sin(dlat/2.0)**2.0 + cos(lat1) * cos(lat2) * sin(dlon/2.0)**2.0\n\tc = 2.0 * asin(sqrt(a)) \n\tmi = 3956.0 * c\n\treturn mi", "title": "" }, { "docid": "35a2d64e9b02b5a4241acd49d7fb934c", "score": "0.70725244", "text": "def haversine(lat1_deg, lon1_deg, lat2_deg, lon2_deg, earth_radius=EARTH_EQUATORIAL_RADIUS): \n lat1 = math.radians(lat1_deg)\n lon1 = math.radians(lon1_deg)\n lat2 = math.radians(lat2_deg)\n lon2 = math.radians(lon2_deg)\n \n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = (math.sin(dlat/2))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(dlon/2))**2 \n c = 2 * math.atan2( math.sqrt(a), math.sqrt(1-a) ) \n d = earth_radius * c\n\n return d", "title": "" }, { "docid": "2c7f5905491b548f019f645a27a61fa3", "score": "0.70620406", "text": "def distance(lat1, lng1, lat2, lng2):\n \n earthRadius = 3958.75\n \n dLat = math.radians(lat2 - lat1)\n dLng = math.radians(lng2 - lng1)\n \n sinDLat = math.sin(dLat / 2)\n sinDLng = math.sin(dLng / 2)\n \n a = (sinDLat ** 2) + (sinDLng ** 2) * math.cos(math.radians(lat1)) * math.cos(math.radians(lat2))\n c = 2 * math.atan2(a ** .5, (1-a) ** .5)\n \n dist = earthRadius * c\n \n return dist", "title": "" }, { "docid": "638bcca0b82f7277254417710cc090d8", "score": "0.70578116", "text": "def distance(cord1, cord2):\n # approximate radius of earth in km\n R = 6373.0\n \n lat1 = radians(cord1[0])\n lon1 = radians(cord1[1])\n lat2 = radians(cord2[0])\n lon2 = radians(cord2[1])\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n return R * c", "title": "" }, { "docid": "c9ee3576a2dce9a60424e59a9b60f69d", "score": "0.7055504", "text": "def small_angle_sphere_dist(lon1, lat1, lon2, lat2):\n\n from math import cos\n\n dlat = lat2 - lat1\n dlon = (lon2 - lon1) * cos((lat1 + lat2) / 2.)\n\n return (dlat ** 2 + dlon ** 2) ** 0.5", "title": "" }, { "docid": "a91db798691774cbfb09304f939077cb", "score": "0.7047023", "text": "def geo_distance(a, b):\n\n R = 6371 # Earth radius\n a_rad = (radians(a[0]), radians(a[1]))\n b_rad = (radians(b[0]), radians(a[1]))\n fi = fabs(a_rad[1] - b_rad[1])\n p = acos(sin(b_rad[0]) * sin(a_rad[0])\n + cos(b_rad[0]) * cos(a_rad[0]) * cos(fi))\n return p*R", "title": "" }, { "docid": "109ec2caa66d3389f20d760b5092edf7", "score": "0.7038522", "text": "def calc_dist(c1_lat, c1_lon, c2_lat, c2_lon):\n c1 = (c1_lat, c1_lon)\n c2 = (c2_lat, c2_lon)\n return geopy.distance.distance(c1, c2).km", "title": "" }, { "docid": "44e0330a6c56f7e1d84c279c67bfbedf", "score": "0.7014126", "text": "def get_spherical_distance(lat1,lat2,long1,long2):\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "title": "" }, { "docid": "1d435be2a36a91dbe5cf9afe3b528008", "score": "0.7012409", "text": "def distance(stop1, stop2):\n\n def haversine(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n\n Courtesy of http://stackoverflow.com/a/15737218\n \"\"\"\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km * 1000.0\n\n d = haversine(stop1['lat'], stop1['lon'], stop2['lat'], stop2['lon'])\n # return np.sqrt(d**2 + (stop1['ele'] - stop2['ele'])**2)\n return d", "title": "" }, { "docid": "3867065533d8615cd70bfdd8f0084790", "score": "0.70036465", "text": "def get_spherical_distance(lat1,lat2,long1,long2):\r\n q=radians(lat2-lat1)\r\n r=radians(long2-long1)\r\n lat2r=radians(lat2)\r\n lat1r=radians(lat1)\r\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\r\n c=2*atan2(sqrt(a),sqrt(1-a))\r\n R=6371*1000\r\n d=R*c\r\n return d", "title": "" }, { "docid": "d4858f589a95e72a3d8fc179ce2b339a", "score": "0.7000148", "text": "def distance(p1: Coordinate, p2: Coordinate) -> float:\n return abs(p2 - p1)", "title": "" }, { "docid": "1660e74231a6eada85aad7738e0eb389", "score": "0.6998366", "text": "def distance(p1, p2):\n coef = math.cos(p1[1] / 180. * math.pi)\n x = p1[1] - p2[1]\n y = (p1[0] - p2[0]) * coef\n\n distance_2d = math.sqrt(x * x + y * y) * ONE_DEGREE\n return distance_2d", "title": "" }, { "docid": "83f2baf41aee7e10aa4457002ae7fd19", "score": "0.6986516", "text": "def geodesicDist(lat1, long1, lat2, long2):\n rEarth = 3963.\n ## convert latitudes to polar angles (radians)\n theta1 = 0.5 * np.pi - (np.pi / 180.) * lat1\n theta2 = 0.5 * np.pi - (np.pi / 180.) * lat2\n ## convert longitudes radians\n phi1 = (np.pi / 180.) * long1\n phi2 = (np.pi / 180.) * long2\n ## create unit vectors to each point\n unitvec1 = np.array([ np.sin(theta1) * np.cos(phi1), np.sin(theta1) * np.sin(phi1), np.cos(theta1) ])\n unitvec2 = np.array([ np.sin(theta2) * np.cos(phi2), np.sin(theta2) * np.sin(phi2), np.cos(theta2) ])\n ## calculate distance\n return rEarth * np.arccos( np.dot(unitvec1, unitvec2) )", "title": "" }, { "docid": "ab4cf1dbc94f2fea611487a91460d98b", "score": "0.6985282", "text": "def distance(a_lat, a_lon, b_lat, b_lon):\n a_lat = float(a_lat)\n a_lon = float(a_lon)\n b_lat = float(b_lat)\n b_lon = float(b_lon)\n meters = EarthDistance((a_lat, a_lon), (b_lat, b_lon))\n miles = meters * METERS_TO_MILES\n return Decimal(str(miles)).quantize(Decimal(\"0.01\"))", "title": "" }, { "docid": "d231ecdf90ba9562707abe10b344663d", "score": "0.6958144", "text": "def haversine_distance(lat1, lon1, lat2, lon2):\n # radius of earth in km, approx.\n R = 6373.0\n\n lat1 = radians(lat1)\n lon1 = radians(lon1)\n lat2 = radians(lat2)\n lon2 = radians(lon2)\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n return distance", "title": "" }, { "docid": "dffa24c06b040b15d83a61499516fa37", "score": "0.6957382", "text": "def harversine(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])\n # harversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = math.sin(dlat/2.)**2. + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2.)**2.\n c = 2. * math.asin(math.sqrt(a))\n km = 6371. * c # radius of earth\n return km", "title": "" }, { "docid": "2777eb937fbb74985c8f48b8530c2320", "score": "0.6955457", "text": "def dist_km( p1, p2 ):\n try:\n dist = geopy.distance.vincenty( p1, p2 )\n except ValueError as e: \n log.warning(\"Vincenty failed to converge on {}-{}; \"\n + \" resorting to great circle\"\n .format(p1, p2))\n dist = geopy.great_circle( p1, p2 )\n return dist.kilometers", "title": "" }, { "docid": "ca196554f1d13522f4ae03651b3267ae", "score": "0.6947221", "text": "def radius_of_gyration(self) -> float:\n\n def great_circle_distance(pt1, pt2):\n \"\"\"\n Return the great-circle distance in kilometers between two points,defined by a tuple (lat, lon).\n \"\"\"\n r = 6371.\n\n delta_latitude = math.radians(pt1[0] - pt2[0])\n delta_longitude = math.radians(pt1[1] - pt2[1])\n latitude1 = math.radians(pt1[0])\n latitude2 = math.radians(pt2[0])\n\n a = math.sin(delta_latitude / 2) ** 2 + math.cos(latitude1) * math.cos(latitude2) * math.sin(\n delta_longitude / 2) ** 2\n return r * 2. * math.asin(math.sqrt(a))\n\n d = Counter((ci.location.lat, ci.location.lng) for ci in self.checkins)\n sum_weights = sum(d.values())\n positions = list(d.keys()) # Unique positions\n\n if len(positions) == 0:\n return None\n\n barycenter = [0, 0]\n for pos, t in d.items():\n barycenter[0] += pos[0] * t\n barycenter[1] += pos[1] * t\n\n barycenter[0] /= sum_weights\n barycenter[1] /= sum_weights\n\n r = 0.\n for pos, t in d.items():\n r += float(t) / sum_weights * \\\n great_circle_distance(barycenter, pos) ** 2\n return math.sqrt(r)", "title": "" }, { "docid": "65c09ccbbcacc7b1e0f0d767271052b5", "score": "0.6940241", "text": "def euclidean_distance(x1, y1, x2, y2):\n return math.sqrt((x2-x1)**2+(y2-y1)**2) #Using Pythagorian Thm to find the total travelled distance", "title": "" }, { "docid": "165b98cff12c975bc59af864a496980f", "score": "0.69230264", "text": "def distance_from_center(x,y):\n return (x**2 + y**2) ** 0.5", "title": "" }, { "docid": "b0401e320ed32260f674b0fa51d225fc", "score": "0.69142365", "text": "def _haversine_distance(latitude1: float, longitude1: float, latitude2: float, longitude2: float):\n longitude1, latitude1, longitude2, latitude2 = map(radians, [longitude1, latitude1, longitude2, latitude2])\n diff_longitude = longitude2 - longitude1\n diff_latitude = latitude2 - latitude1\n temp = sin(diff_latitude / 2) ** 2 + cos(latitude1) * cos(latitude2) * sin(diff_longitude / 2) ** 2\n return 2 * EARTH_RADIUS_KM * asin(sqrt(temp))", "title": "" }, { "docid": "baae92fdb7115c2760a3346b620db0d5", "score": "0.6904963", "text": "def _distance(x1: float, y1: float, x2: float, y2: float) -> float:\n x = x1 - x2\n y = y1 - y2\n return math.sqrt(x * x + y * y)", "title": "" }, { "docid": "554dda080e738f7059524f5abf863b13", "score": "0.68954855", "text": "def distance_between_points(point1, point2):\n return haversine(point1, point2)", "title": "" }, { "docid": "17361fd09d29a41bcf332e19f3041913", "score": "0.6893103", "text": "def haversine(lon1, lat1, lon2, lat2): \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \n #print 34\n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \n c = 2 * atan(sqrt(a)/sqrt(1-a)) \n r = 6371 \n d=c * r\n #print type(d)\n return d", "title": "" }, { "docid": "17361fd09d29a41bcf332e19f3041913", "score": "0.6893103", "text": "def haversine(lon1, lat1, lon2, lat2): \n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) \n #print 34\n dlon = lon2 - lon1 \n dlat = lat2 - lat1 \n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 \n c = 2 * atan(sqrt(a)/sqrt(1-a)) \n r = 6371 \n d=c * r\n #print type(d)\n return d", "title": "" }, { "docid": "c7d6dfbc80129862fdf207d94f5edf40", "score": "0.688689", "text": "def euclidean_dist(x1, y1, x2, y2):\n\n a = abs(float(x1) - float(x2))\n b = abs(float(y1) - float(y2))\n c = math.sqrt(math.pow(a, 2) + math.pow(b, 2))\n km = c * 111.32\n return km", "title": "" }, { "docid": "7015c78b9206bee9b60603f49608dceb", "score": "0.685043", "text": "def distance(coordinate1, coordinate2):\n c1_lat_lon = coordinate1[1], coordinate1[0]\n c2_lat_lon = coordinate2[1], coordinate2[0]\n return gpdistance.geodesic(c1_lat_lon, c2_lat_lon).feet", "title": "" }, { "docid": "72ecf46851a51104f7ed4b11319de798", "score": "0.6849147", "text": "def euclid_distance(point1: tuple[int, int], point2: tuple[int, int]) -> float:\n return ((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2) ** 0.5", "title": "" }, { "docid": "a3feaaff9f68250f9ecd527ad3b9ec34", "score": "0.68416816", "text": "def eucledien(point1:list,point2:list)-> float:#DONE\n return math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2)", "title": "" }, { "docid": "4e9f031f04bcf091e5d77a212bd7ef94", "score": "0.68399423", "text": "def haversine(coord1, coord2):\r\n lat1, lon1 = coord1\r\n lat2, lon2 = coord2\r\n radius = 6371000 # mean earth radius in meters (GRS 80-Ellipsoid)\r\n dlat = math.radians(lat2-lat1)\r\n dlon = math.radians(lon2-lon1)\r\n a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \\\r\n * math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n d = radius * c\r\n return d", "title": "" }, { "docid": "cb9e4fb6cebca0d045c1e5533e2129d7", "score": "0.6837117", "text": "def calc_dist(lat_A, long_A, lat_B, long_B):\n distance = (sin(radians(lat_A)) *\n sin(radians(lat_B)) +\n cos(radians(lat_A)) *\n cos(radians(lat_B)) *\n cos(radians(long_A - long_B)))\n\n distance = (degrees(acos(distance))) * 69.09\n\n return distance", "title": "" }, { "docid": "db7cc541ce6062a520ec2c3f4b1ee152", "score": "0.6807188", "text": "def dist_on_hypersphere(point_a, point_b):\n return math.acos(np.dot(point_a, point_b))", "title": "" }, { "docid": "c4be101c4a3cf2e72fe43023bb4d348e", "score": "0.6806189", "text": "def haversine_distance(x, y):\n x_rad = np.radians(x)\n y_rad = np.radians(y)\n\n d = y_rad - x_rad\n\n dlat, dlon = d.T\n x_lat = x_rad[:, 0]\n y_lat = y_rad[:, 0]\n\n a = (\n np.sin(dlat / 2.0) ** 2\n + np.cos(x_lat) * np.cos(y_lat) * np.sin(dlon / 2.0) ** 2\n )\n\n c = 2 * np.arcsin(np.sqrt(a))\n return EARTH_RADIUS * c", "title": "" }, { "docid": "aa7f93085e477f60a38e1de38c119ff4", "score": "0.6799302", "text": "def haversine(lat1:float, lat2:float, lon1:float, lon2:float) -> float:\n earth_radius = 6371e3\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n delta_phi = math.radians(lat2 - lat1)\n delta_lam = math.radians(lon2 - lon1)\n a = math.sin(delta_phi/2) * math.sin(delta_phi/2) + \\\n math.cos(phi1) * math.cos(phi2) * \\\n math.sin(delta_lam/2) * math.sin(delta_lam/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n return earth_radius * c", "title": "" }, { "docid": "99e6380079abd138fda29b7d0914b4ce", "score": "0.67932403", "text": "def get_distance(x1,y1,x2,y2):\n\n inner = (x2 - x1)**2 + (y2-y1)**2\n return math.sqrt(inner)", "title": "" }, { "docid": "a1031e1626fc6ba82068c29a4fdbdcdc", "score": "0.6790894", "text": "def calculate_distance(self, position1, position2):\n \n lat1, long1 = position1\n lat2, long2 = position2\n \n R = 6373000 # Radius of the earth in m\n\n # phi = 90 - latitude\n phi1 = np.deg2rad(90.0 - lat1)\n phi2 = np.deg2rad(90.0 - lat2)\n \n # theta = longitude\n theta1 = np.deg2rad(long1)\n theta2 = np.deg2rad(long2)\n \n # Compute spherical distance from spherical coordinates.\n \n # For two locations in spherical coordinates \n # (1, theta, phi) and (1, theta, phi)\n # cosine( arc length ) = \n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n \n cos = (np.sin(phi1) * np.sin(phi2) * np.cos(theta1 - theta2) + \n np.cos(phi1) * np.cos(phi2))\n \n arc = np.arccos(cos)\n\n # Multiply arc by the radius of the earth \n distance = arc * R\n \n return distance", "title": "" }, { "docid": "f16cbadf59ded9485a9b3ecb6f989f3c", "score": "0.67880887", "text": "def get_phys_distance(c1, c2):\n lat1 = float(c1[0])\n lat2 = float(c2[0])\n lon1 = float(c1[1])\n lon2 = float(c2[1])\n degrees_to_radians = math.pi/180.0\n phi1 = (90.0 - lat1)*degrees_to_radians\n phi2 = (90.0 - lat2)*degrees_to_radians\n theta1 = lon1*degrees_to_radians\n theta2 = lon2*degrees_to_radians\n cos= (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) + math.cos(phi1)*math.cos(phi2))\n arc = math.acos(cos)\n return arc*6373", "title": "" }, { "docid": "2b585e6461743d17b6a12a5de5d11f2b", "score": "0.6785628", "text": "def calc_distance_to_point(a, b):\n return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)", "title": "" }, { "docid": "feb63cec2c34705712075913731c2b7b", "score": "0.67823696", "text": "def haversine_dist(lon1, lat1, lon2, lat2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers. Use 3956 for miles\n r = 6371\n return c * r", "title": "" }, { "docid": "6e61b667c3ef4b242ad666bb462fccc8", "score": "0.676739", "text": "def distance(lat1, long1, lat2, long2): \n # Convert latitude and longitude to \n # spherical coordinates in radians.\n degrees_to_radians = np.pi/180.0\n\n # phi = 90 - latitude\n phi1 = (90.0 - lat1)*degrees_to_radians\n phi2 = (90.0 - lat2)*degrees_to_radians\n \n # theta = longitude\n theta1 = long1*degrees_to_radians\n theta2 = long2*degrees_to_radians\n \n # Compute spherical distance from spherical coordinates.\n cos = (np.sin(phi1)*np.sin(phi2)*np.cos(theta1 - theta2) + \n np.cos(phi1)*np.cos(phi2))\n arc = np.arccos( cos ) * 3960\n \n return arc", "title": "" }, { "docid": "227f79d9b98326294a0c65eb9c4e797a", "score": "0.67672884", "text": "def haversine_distance(lon1, lat1, lon2, lat2):\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n return 2 * 6371 * asin(sqrt(a))", "title": "" }, { "docid": "4a6ecef60fdd9834112c03981a35887f", "score": "0.67628515", "text": "def haversine_degrees_to_meters(lat_1, lon_1, lat_2, lon_2):\n r = 6371000\n delta_lat = math.radians(lat_2 - lat_1)\n delta_lon = math.radians(lon_2 - lon_1)\n\n a = ((math.sin(delta_lat / 2) ** 2) +\n math.cos(math.radians(lat_1)) * math.cos(math.radians(lat_2)) *\n (math.sin(delta_lon / 2) ** 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return r * c", "title": "" }, { "docid": "2cfee7fc112069505ac419d7bfa3677a", "score": "0.67624384", "text": "def lat_lon_distance(lat1, long1, lat2, long2, units='mi'):\n # Convert latitude and longitude to\n # spherical coordinates in radians.\n degrees_to_radians = math.pi/180.0\n\n # phi = 90 - latitude\n phi1 = (90.0 - lat1)*degrees_to_radians\n phi2 = (90.0 - lat2)*degrees_to_radians\n\n # theta = longitude\n theta1 = long1*degrees_to_radians\n theta2 = long2*degrees_to_radians\n\n # Compute spherical distance from spherical coordinates.\n\n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta, phi)\n # cosine( arc length ) =\n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n\n cosV = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +\n math.cos(phi1)*math.cos(phi2))\n arc = math.acos( cosV )\n\n # Remember to multiply arc by the radius of the earth\n # in your favorite set of units to get length.\n if units.lower().find('mi')>=0:\n R = 3956.0 # miles(at 48.46791 degrees north) see http://en.wikipedia.org/wiki/Earth_radius\n else:\n R = 6366.0 # km\n return arc * R", "title": "" }, { "docid": "7f0957c11807ffd21cee236428434d5f", "score": "0.676201", "text": "def get_distance(c1, c2):\n return round(numpy.linalg.norm(c1 - c2), 4)", "title": "" }, { "docid": "4ea525b75a5cfdc9eb3f40b705690bc3", "score": "0.6744094", "text": "def dist(a,b):\n\treturn sqrt(((a.x - b.x) ** 2 + (a.y - b.y) ** 2))", "title": "" }, { "docid": "e45ddf2c7e78dc5ef3c13af0fcdadf0d", "score": "0.67430544", "text": "def getDistanceFromLatLonInKm(lat1: float, lon1: float, lat2: float, lon2: float) -> float:\n R = 6371\n dLat = math.radians(lat2-lat1)\n dLon = math.radians(lon2-lon1)\n a = math.sin(dLat/2) * math.sin(dLat/2) + math.cos(math.radians(lat1)) * \\\n math.cos(math.radians(lat2)) * math.sin(dLon/2) * math.sin(dLon/2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n d = R * c\n if (d <= 1.5):\n return d", "title": "" }, { "docid": "81c2d4ce8f1f035f5b70cfed2f351c62", "score": "0.6737317", "text": "def haversine((lat1, lon1), (lat2, lon2)):\n R = 6371; # Earth's radius in km\n dLat = math.radians(lat2-lat1)\n dLon = math.radians(lon2-lon1)\n a = (math.sin(dLat/2) * math.sin(dLat/2) + math.cos(math.radians(lat1)) *\n math.cos(math.radians(lat2)) * math.sin(dLon/2) * math.sin(dLon/2) )\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a)); \n return R * c", "title": "" }, { "docid": "c9f29cc73d4e991591878caa1f50d71e", "score": "0.6732404", "text": "def dist_on_sphere(l1, b1, l2, b2):\n#-----------------------------------------------------------\n#-----------------------------------------------------------\n fac = numpy.pi / 180.0\n l1 *= fac; b1 *= fac; l2 *= fac; b2 *= fac\n dlon = l2 - l1\n aa1 = numpy.cos(b2)*numpy.sin(dlon)\n aa2 = numpy.cos(b1)*numpy.sin(b2) - numpy.sin(b1)*numpy.cos(b2)*numpy.cos(dlon)\n a = numpy.sqrt(aa1*aa1+aa2*aa2)\n b = numpy.sin(b1)*numpy.sin(b2) + numpy.cos(b1)*numpy.cos(b2)*numpy.cos(dlon)\n d = numpy.arctan2(a,b)\n return d*180.0/numpy.pi", "title": "" }, { "docid": "e87c8c7153e3696bcbd19bd2e75d482d", "score": "0.6729575", "text": "def distance(city1, city2):\n x1, y1 = get_lat(city1), get_lon(city1)\n x2, y2 = get_lat(city2), get_lon(city2)\n\n return sqrt((x1 - x2)**2 + (y1 - y2)**2)", "title": "" }, { "docid": "7e58aa2621c653440e32920b20434054", "score": "0.6728683", "text": "def get_eucl_distance(self,x1,y1,x2,y2):\n return math.sqrt((x2-x1)**2 + (y2-y1)**2)", "title": "" }, { "docid": "5d6a23a7b81ee0fb48857e18689c8762", "score": "0.6727589", "text": "def distHaversine(lon1, lat1, lon2,lat2):\n # Convert decimal degrees to radians\n lon1, lat1, lon2,lat2 = map(radians, [lon1, lat1, lon2,lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat2\n a = sin(dlat/2)**2 + cos(lat1) * \\\n cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "title": "" }, { "docid": "4fa85e8d8a342632b9cc2c14a0dd4328", "score": "0.67270005", "text": "def geocalcbycoord(lat0, lon0, lat1, lon1):\n lat0 = np.radians(lat0)\n lon0 = np.radians(lon0)\n lat1 = np.radians(lat1)\n lon1 = np.radians(lon1)\n dlon = lon0 - lon1\n y = np.sqrt(\n (np.cos(lat1) * np.sin(dlon)) ** 2\n + (np.cos(lat0) * np.sin(lat1) \n - np.sin(lat0) * np.cos(lat1) * np.cos(dlon)) ** 2)\n x = np.sin(lat0) * np.sin(lat1) + \\\n np.cos(lat0) * np.cos(lat1) * np.cos(dlon)\n c = np.arctan2(y, x)\n return k_earth_radius * c", "title": "" }, { "docid": "77b23fda86d3b5dd6163db3e050972c4", "score": "0.6726182", "text": "def distance_on_sphere(\n longitude_1,\n latitude_1,\n longitude_2,\n latitude_2,\n units='deg',\n radius=_earth_radius,\n ):\n ###################################\n # INPUT CHECK #\n ###################################\n # ============ units ================================ #\n known_units = ('deg', 'degrees', 'rad', 'radians')\n message = \"units must be 'deg' oder 'rad'.\"\n\n if not isinstance(units, str):\n raise TypeError(message)\n\n if units.lower() not in known_units:\n raise ValueError(message)\n\n # ========== shapes ================================= #\n shape_1 = np.shape(longitude_1)\n shape_2 = np.shape(longitude_2)\n\n if np.shape(latitude_1) != shape_1:\n raise IndexError('Shapes of longitude_1 and latitude_1 must agree.')\n\n if np.shape(latitude_2) != shape_2:\n raise IndexError('Shapes of longitude_2 and latitude_2 must agree.')\n\n ###################################\n # CONVERSIONS #\n ###################################\n if units.lower()[:1] == 'd':\n # deg --> rad\n lon1 = np.radians(longitude_1)\n lat1 = np.radians(latitude_1)\n lon2 = np.radians(longitude_2)\n lat2 = np.radians(latitude_2)\n else:\n lon1 = longitude_1\n lat1 = latitude_1\n lon2 = longitude_2\n lat2 = latitude_2\n\n ###################################################\n # ALGEBRAICALLY EXACT FUNCTION #\n ###################################################\n dlon = np.add.outer(-lon1, lon2)\n summand1 = np.multiply.outer(np.sin(lat1), np.sin(lat2))\n summand2 = np.multiply.outer(np.cos(lat1), np.cos(lat2)) * np.cos(dlon)\n S = summand1 + summand2\n\n # ========== correct for numerical errors =========== #\n # Numerical errors can lead to S > 1.\n # This is mathematically not possible.\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n too_large = S > 1.\n\n if np.sum(too_large) > 0:\n if not isinstance(too_large, np.ndarray):\n S = 1.\n else:\n S[too_large] = 1.\n # ==================================================== #\n\n # central angle (separation angle between the points)\n ds = np.arccos(S)\n\n ###################################################\n # Handle small angular separation #\n ###################################################\n # For small central angles, the above algeraically exact function causes\n # numerical errors.\n # --> Use better suited haversine function for small angles.\n \n # check for small angles\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', RuntimeWarning)\n ds_is_small = ds < 1e-3\n\n # ========== correct small angles =================== #\n if np.sum(ds_is_small) > 0:\n # compute the haversine small angle\n dlat = np.add.outer(-lat1, lat2)\n summand1 = (np.sin(dlat / 2.))**2\n prod = np.multiply.outer(np.cos(lat1), np.cos(lat2))\n summand2 = prod * (np.sin(dlon / 2.))**2\n ds_hav = 2 * np.arcsin(np.sqrt(summand1 + summand2))\n\n if not isinstance(too_large, np.ndarray):\n ds = ds_hav\n else:\n ds[ds_is_small] = ds_hav[ds_is_small]\n # ==================================================== #\n \n distance = radius * ds\n return distance", "title": "" }, { "docid": "715d373b89c370f18343960ee063efb2", "score": "0.67237484", "text": "def euclid(dot_a, dot_b):\n dist = geodesic(dot_a[::-1], dot_b[::-1]).meters\n\n return dist", "title": "" }, { "docid": "384cf31e747bff7c3aab15cd7bfb33b8", "score": "0.6689053", "text": "def dist(x1,y1,x2,y2):\n return sqrt((x2-x1)**2 + (y2-y1)**2)", "title": "" }, { "docid": "5bb0bf1aa6e561b47cb59b610f32480b", "score": "0.6686125", "text": "def distance(lat1, long1, lat2, long2):\n lat1 = lat1 * pi / 180. # convert to radians\n lat2 = lat2 * pi / 180. # convert radians\n dlat = (lat1 - lat2)\n dlong = (long1 - long2) * pi / 180. # difference in radians\n\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlong/2)**2\n return 2 * R * atan2(sqrt(a), sqrt(1-a))", "title": "" }, { "docid": "bf4922b99a280b7fbdf6e01e9064d555", "score": "0.6676789", "text": "def getDistances(lons1, lats1, lons2, lats2):\n \n EARTH_RADIUS = 6371.0\n \n lons1, lats1, lons2, lats2 = prepare_coords(lons1, lats1, lons2, lats2)\n distance = np.arcsin(np.sqrt(\n np.sin((lats1 - lats2) / 2.0) ** 2.0\n + np.cos(lats1) * np.cos(lats2)\n * np.sin((lons1 - lons2) / 2.0) ** 2.0\n ).clip(-1., 1.))\n \n return (2.0 * EARTH_RADIUS) * distance", "title": "" }, { "docid": "2a71b04440364bf298ac313a18a43b3a", "score": "0.66730505", "text": "def calc_euclidean_dist(point1,point2):\n\n return math.sqrt((point1[0]-point2[0])**2+(point1[1]-point2[1])**2)", "title": "" }, { "docid": "55dfdd6435cae54e8b1f5141ab6a1cc2", "score": "0.6670504", "text": "def euclid_dist(xy1, xy2):\n (x1,x2) = xy1\n (y1,y2) = xy2\n return m.sqrt((x1-x2)**2 + (y1-y2)**2)", "title": "" }, { "docid": "a01e517e61c877728fe11ad946762dfd", "score": "0.6658044", "text": "def distancia(p1, p2):\n x1 = p1[0]\n y1 = p1[1]\n x2 = p2[0]\n y2 = p2[1]\n return sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))", "title": "" }, { "docid": "7bf4746994482f4dcca4b56debec0396", "score": "0.66484255", "text": "def haversine_distance(lat1, lon1, lat2, lon2, units='mi'):\n # Evaluate the units, raise if illegal unit\n try:\n r = _units[units] # radius of the Earth in given units\n except KeyError:\n raise ValueError(\"'units' must be one of %r. Got %s\"\n % (list(_units.keys()), units))\n\n lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = np.sin(dlat / 2) ** 2 + \\\n np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n c = 2 * np.arcsin(np.sqrt(a))\n return c * r", "title": "" } ]
2b5045417a633adf0914209eda41fb5e
Get the syntax form of the input word
[ { "docid": "31665e9ae2a14b12285329bd0c45f988", "score": "0.6982608", "text": "def getSyntax(self,):\n\t\treturn self.syntax;", "title": "" } ]
[ { "docid": "016773b9384a7fcb9df1f714ccdf9034", "score": "0.67101586", "text": "def get_syntax(self,nameoroid):\n at_oid = self.getoid(AttributeType,nameoroid)\n try:\n at_obj = self.get_inheritedobj(AttributeType,at_oid)\n except KeyError:\n return None\n else:\n return at_obj.syntax", "title": "" }, { "docid": "599b86f8ddabf8e7fc135015216a3ac7", "score": "0.67062837", "text": "def get_syntax(self):\n\t\t\n\t\tsyntax = self.syntax_map.get(self._notepad.getLangType(), 'html')\n\t\t\n\t\t# Same language used for XML/XSL/XSD\n\t\tif syntax == 'xml':\n\t\t\tif self._notepad.getCurrentFilename()[-4:].lower() == '.xsl':\n\t\t\t\tsyntax = 'xsl'\n\t\t\telif self._notepad.getCurrentFilename()[-4:].lower() == '.xsd':\n\t\t\t\tsyntax = 'xsd'\n\t\t\t\t\n\t\treturn syntax", "title": "" }, { "docid": "ac5dca45059206f83bade75fe94a90f5", "score": "0.62333965", "text": "def get_definition(word: str) -> str:\n result = parser.parse(word)\n analyses = result.forms[0].analyses\n analyses_key = next(iter(analyses.items()))\n definitions = analyses_key[1].lexeme.senses\n return \", \".join(definitions)", "title": "" }, { "docid": "61601eea087f324117f59a52747a8b35", "score": "0.6218051", "text": "def get_syntax():\n command = pp.Word(pp.alphanums + \"-\").setName('command')\n posarg = pp.Word(pp.alphanums + \"-<>\").setName('pos-arg')\n\n lbracket = pp.Suppress(\"[\")\n rbracket = pp.Suppress(\"]\")\n zeroorone_flag = \"?\"\n zeroormore_flag = \"*\"\n oneormore_flag = \"+\"\n only1opt_flag = \"!\"\n freeform_flag = \"@\"\n zooarg = pp.Word(pp.alphanums + \"-\").setName('zero-or-one-arg')\n zomarg = pp.Word(pp.alphanums + \"-\").setName('zero-or-more-arg')\n oomarg = pp.Word(pp.alphanums + \"-\").setName('one-or-more-arg')\n oooarg = pp.Word(pp.alphanums + \"-<>\").setName('only-one-arg')\n frearg = pp.Word(pp.alphanums + \"-\").setName('free-form-arg')\n\n zeroorone = pp.Forward()\n zeroorone.setName('zero-or-one')\n zeroorone_block = pp.ZeroOrMore((\"|\" + pp.OneOrMore(zooarg | zeroorone)) | pp.OneOrMore(zeroorone))\n zeroorone << pp.Group(lbracket + pp.ZeroOrMore(zooarg) + zeroorone_block + rbracket + zeroorone_flag)\n\n zeroormore = pp.Forward()\n zeroormore_block = pp.ZeroOrMore((\"|\" + pp.OneOrMore(zomarg | zeroormore)) | pp.OneOrMore(zeroormore))\n zeroormore << pp.Group(lbracket + pp.ZeroOrMore(zomarg) + zeroormore_block + rbracket + zeroormore_flag)\n zeroormore.setResultsName('zero-or-more')\n\n oneormore = pp.Forward()\n oneormore_block = pp.ZeroOrMore((\"|\" + pp.OneOrMore(oomarg | oneormore)) | pp.OneOrMore(oneormore))\n oneormore << pp.Group(lbracket + pp.ZeroOrMore(oomarg) + oneormore_block + rbracket + oneormore_flag)\n oneormore.setName('one-or-more')\n\n only1opt = pp.Forward()\n only1opt_block = pp.ZeroOrMore((\"|\" + pp.OneOrMore(oooarg | only1opt)) | pp.OneOrMore(only1opt))\n only1opt << pp.Group(lbracket + pp.ZeroOrMore(oooarg) + only1opt_block + rbracket + only1opt_flag)\n only1opt.setName('one-or-more')\n\n freeform = pp.Forward()\n freeform_block = pp.ZeroOrMore((\"|\" + pp.OneOrMore(frearg | freeform)) | pp.OneOrMore(freeform))\n freeform << pp.Group(lbracket + pp.ZeroOrMore(frearg) + freeform_block + rbracket + freeform_flag)\n freeform.setName('zero-or-one')\n\n options = pp.ZeroOrMore(pp.Group(zeroorone | zeroormore | oneormore | only1opt | freeform))\n syntax = command + pp.ZeroOrMore(posarg) + options\n return (syntax + pp.stringEnd)", "title": "" }, { "docid": "3a0892bf19aed1263b47b3d1c2302819", "score": "0.61829174", "text": "def get_input_word():\n return 'HOW ARE YOU'", "title": "" }, { "docid": "07b30010e292f274472b8a0adb3cadf5", "score": "0.6083782", "text": "def syntax(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "07b30010e292f274472b8a0adb3cadf5", "score": "0.6083782", "text": "def syntax(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "3067b92022330b21f876cfc84e01e600", "score": "0.59758365", "text": "def format_term(self, exst_term):\n # add space so user can type after\n return self.code + \" \" + self.regex", "title": "" }, { "docid": "c028d3d377b961bfaa394b3f98e2f9ef", "score": "0.5872812", "text": "def inflect_ing(word):\n\n return", "title": "" }, { "docid": "49c459e5124165e84bbb5c04a2d73f3c", "score": "0.58243144", "text": "def syntax(self) -> Optional[pulumi.Input['ApiSyntax']]:\n return pulumi.get(self, \"syntax\")", "title": "" }, { "docid": "78781d582ad78f4a68e880ede50c94d7", "score": "0.5778411", "text": "def word(x):", "title": "" }, { "docid": "2bdaeef68cee767519fa440b839a5a40", "score": "0.5747522", "text": "def parse_word ( self ) :\n token_str = ''\n while self.caracter_atual != None and self.caracter_atual in alfanumu :\n token_str += self.caracter_atual\n self.avancar()\n if token_str in keywords :\n return Token(TokenTipo.TOKEN_KEYWORD, valor=token_str)\n else :\n return Token(TokenTipo.TOKEN_IDENT, valor=token_str)", "title": "" }, { "docid": "cbe45ddcba52db56cd8981bb53a6ee56", "score": "0.5721993", "text": "def getWord(word):\n list = ['英音', '美音', 'vt.', 'vi.', 'adj.', 'adv.', 'n.', 'other. ']\n print(''+word)\n content = str()\n for _ in list:\n text = input(_).strip()\n if text:\n content += ':' + _ + text\n line = word + content\n return line", "title": "" }, { "docid": "ce47d0721933b727021d1135c18309fe", "score": "0.57213825", "text": "def sourceString(self,hyp):\n tokens = self.sense.tree.node(hyp.node)\n words = []\n for tokenId in tokens:\n if tokenId > 0:\n words.append(self.sense.tokens[tokenId-1][1])\n else:\n words.append(self.sense.tokens[self.sense.mapNodeToMainToken[-tokenId]-1][0])\n return \" \".join(words)", "title": "" }, { "docid": "0ac10925ab434a4c1a2618740e928a7e", "score": "0.5699399", "text": "def syntax(self):\n return \"\"", "title": "" }, { "docid": "9897c606e15ab780b408afc5d1937f0e", "score": "0.5697342", "text": "def language_for(rep):\n return isolang.find_raw(rep)", "title": "" }, { "docid": "2b1eb9bb45f2da12c08608a16d2fc5c9", "score": "0.566882", "text": "def get_qword(self, ea):", "title": "" }, { "docid": "fc3a280cf8604e0f5be3d5819baeedba", "score": "0.5658777", "text": "def get_word(wid):", "title": "" }, { "docid": "fc3a280cf8604e0f5be3d5819baeedba", "score": "0.5658777", "text": "def get_word(wid):", "title": "" }, { "docid": "74e65a8ad1139add5adb336c31b92a33", "score": "0.56225276", "text": "def main_keyword(self) -> str:", "title": "" }, { "docid": "3e2e53c73b8ffb07baa2b61d6ef5ceeb", "score": "0.55758524", "text": "def lexeme(self):\n return self._lexeme", "title": "" }, { "docid": "127a2e11eb410d8f7d7d304145b3eaaa", "score": "0.55366313", "text": "def __getType(self, word):\n if word.startswith(\"-\"):\n return self.__infixKey if word.endswith(\"-\") else self.__suffixKey\n elif word.endswith(\"-\"):\n return self.__prefixKey\n else:\n return self.__fullWordKey", "title": "" }, { "docid": "3b3f64953c6406d6dd4bebf57557f585", "score": "0.5521672", "text": "def get_concept():\n concept = \"Ant Man and The Wasp\"\n return concept", "title": "" }, { "docid": "6b41d10bab78da8a46ac1d00197e9143", "score": "0.5509901", "text": "def specific_kind(self):\n\n i = 0\n\n # Quit parent\n gen1 = self.get_cursor().get_tokens()\n tok = CustomVector()\n for t in gen1:\n\n if(str(t.spelling) != '(' and str(t.spelling) != ')'):\n tok.push_back(t)\n\n op = []\n for e in tok.begin():\n i = i + 1\n if(i < tok.size()):\n\n if (str(e.spelling) == '&&'):\n op.append('&&')\n elif (str(e.spelling) == '||'):\n op.append('||')\n elif (str(e.spelling) == ','):\n op.append(',')\n elif (str(e.spelling) == '=='):\n op.append('==')\n elif (str(e.spelling) == '='):\n op.append('=')\n elif (str(e.spelling) == '&'):\n op.append('&')\n elif (str(e.spelling) == '|'):\n op.append('|')\n elif (str(e.spelling) == '^'):\n op.append('^')\n elif (str(e.spelling) == '>>'):\n op.append('>>')\n elif (str(e.spelling) == '<<'):\n op.append('<<')\n elif (str(e.spelling) == '>'):\n op.append('>')\n elif (str(e.spelling) == '<'):\n op.append('<')\n elif (str(e.spelling) == '!='):\n op.append('!=')\n elif (str(e.spelling) == '<='):\n op.append('<=')\n elif (str(e.spelling) == '>='):\n op.append('>=')\n elif (str(e.spelling) == '*'):\n op.append('*')\n elif (str(e.spelling) == '/'):\n op.append('/')\n elif (str(e.spelling) == '+'):\n op.append('+')\n elif (str(e.spelling) == '-'):\n op.append('-')\n elif (str(e.spelling) == '%'):\n op.append('%')\n elif (str(e.spelling) == '/='):\n op.append('/=')\n elif (str(e.spelling) == '%='):\n op.append('%=')\n elif (str(e.spelling) == '+='):\n op.append('+=')\n elif (str(e.spelling) == '-='):\n op.append('-=')\n elif (str(e.spelling) == '*='):\n op.append('*=')\n elif (str(e.spelling) == '<<='):\n op.append('<<=')\n elif (str(e.spelling) == '>>='):\n op.append('>>=')\n elif (str(e.spelling) == '^='):\n op.append('^=')\n elif (str(e.spelling) == '&='):\n op.append('&=')\n elif (str(e.spelling) == '|='):\n op.append('|=')\n\n if op[0] == '=':\n return op[0]\n\n elif('=' not in op):\n return op[len(op) - 1]\n elif len(op) > 1:\n return op[1]\n\n else:\n return op[0]", "title": "" }, { "docid": "d922ac5d397e2e8d32adb9825af24e54", "score": "0.5504825", "text": "def lexKey(self,w):\n if isinstance(w, str):\n t=\"\"\n for i in w:\n t+=self.alphabet[self.lexicon[i]]\n return t\n else:\n return NotImplemented", "title": "" }, { "docid": "ad03785ad9864d4c57c7987f7c186716", "score": "0.549642", "text": "def find_syntax_by_extension(self, extension: str) -> Syntax:\n ...", "title": "" }, { "docid": "146681d41676472a319cd86c7f0d2aec", "score": "0.5483175", "text": "def make_3sg_form(words):\n #use the .endswith('x) function which returns true if our input string ends\n #with x and false otherwise\n #use the .endswith function in conjunction with a series of if/else \n #statements to test our grammar rules\n #remember to make the/if else statements from most specific to most general\n\n\n if words.endswith('y'):\n return words[0:-1] + 'ies' #case one, if word ends in y return word\n #without the y and with ies added on\n\n\n \n elif words.endswith('o'):\n return words + 'es' #case two, if the word ends in o, \n #return the original word with es added to the end \n \n elif words.endswith('ch'):\n return words + 'es' #This is the same as the case above. \n elif words.endswith('s'):\n return words + 'es' #This is the same as the case above. \n elif words.endswith('sh'):\n return words + 'es' #This is the same as the case above. \n elif words.endswith('x'):\n return words + 'es' #This is the same as the case above. \n elif words.endswith('z'):\n return words + 'es' #This is the same as the case above. \n else: \n return words + 's' #final case, if the word ends in any other letter \n #than the ones specified return the original word \n #with s added on to the end. ", "title": "" }, { "docid": "f7117a00de38f8134306bb06e88311e0", "score": "0.54685974", "text": "def specific_kind(self):\n generator = super(UnaryOperator, self).get_tokens()\n op = ''\n for e in generator:\n if (str(e.spelling) == '&'):\n op += '&'\n elif (str(e.spelling) == '|'):\n op += '|'\n elif (str(e.spelling) == '^'):\n op += '^'\n elif(str(e.spelling) == '!'):\n op += '!'\n elif(str(e.spelling) == '~'):\n op += '~'\n elif(str(e.spelling) == '++'):\n op += '++'\n elif (str(e.spelling) == '--'):\n op += '--'\n elif (str(e.spelling) == '-'):\n op += '-'\n\n return op", "title": "" }, { "docid": "68580274eb78d5c9097d67003d34a3c1", "score": "0.5459123", "text": "def analyzeSyntax(text):\n\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT,\n language='en')\n\n try:\n response = client.analyze_syntax(\n document=document, encoding_type='UTF8')\n\n values = []\n for token in response.tokens:\n tokenText = token.text.content\n tokenBeginOffset = token.text.begin_offset\n tokenTag = u\"{}\".format(enums.PartOfSpeech.Tag(\n token.part_of_speech.tag).name)\n if tokenTag == \"CONJ\":\n tokenTag = \"CCONJ\"\n if tokenTag == \"PRT\":\n tokenTag = \"PART\"\n values.append({\n \"token_text\": tokenText,\n \"token_begin_offset\": tokenBeginOffset,\n \"pos_tag\": tokenTag\n })\n\n return Syntax(values, \"\")\n except Exception as e:\n return Syntax([], str(e.args))", "title": "" }, { "docid": "19a0c8022bfeb19eae043680be749302", "score": "0.544956", "text": "def _process_syntax(tokens):\n command = tokens[0]\n rules = process_tokens(tokens[1:])\n return command, rules", "title": "" }, { "docid": "2f4fb65cf09753cf64093e588cd59f12", "score": "0.5448803", "text": "def word_definition_parser(answer):\n # check if returned answer has multiple definitions\n if len(answer) > 1:\n full_answer = '\\n'.join(str(\"-\" + item) for item in answer)\n return full_answer\n # otherwise, list returns with only 1 value, return that first value\n else:\n return answer[0]", "title": "" }, { "docid": "d68bb5f6950d793b1dfca81df48f1589", "score": "0.5433954", "text": "def get_word_and_hint(self):\n return 'word', 'hint for word'", "title": "" }, { "docid": "56211a70ede024d3894cfd7ea4082632", "score": "0.54141206", "text": "def make_ing_form(word):\n #Use series of if/else statments to implement different grammar rules\n #remember to go from most specific case to most general case\n if word in ['be', 'see', 'flee', 'knee']: #our case of exceptions given in \n #the original problem\n #If a word is one of these four\n #return orignal word with ing\n #added to the end\n return word + 'ing'\n \n elif word.endswith('ie'): #if word ends in ie return word with ie taken off\n #and ying added to the end of the word\n return word[0:-2] + 'ying' \n \n elif word.endswith('e'): #if word ends in e return word with e removed and\n #ing added to the end, \n #note this has to come after\n #the case of exceptions since those end in e\n return word[0:-1] + 'ing'\n \n \n elif word[-3] in Con and word[-2] in Vowel and word[-1] in Con: \n \n return word + word[-1] + 'ing' #if word ends in consonent vowel con\n #return word with last letter \n #doubled and ing added\n \n else:\n return word + 'ing' #most general case. If word doesn't match any of \n #the above cases return word with ing added", "title": "" }, { "docid": "5fecfb7a7dbf49d4e2cc460b1f9d9a1c", "score": "0.5396109", "text": "def format_word(self, word):\n raise NotImplementedError()", "title": "" }, { "docid": "b2c2d31f78deddffa130139e1bb807ca", "score": "0.53814435", "text": "def ModeWord(word):\n match = REGEX.MODE.match(word)\n if match:\n return word\n raise UnrecognizedWord(word)", "title": "" }, { "docid": "48be36b354faf8e435b18cdf1b127b32", "score": "0.5373565", "text": "def get_keyword_string_of(keyword):\n return keyword.value[0]", "title": "" }, { "docid": "9f85efeb788d13c81f2abf103203b4f6", "score": "0.5371966", "text": "def getWord(form, pos=NOUN):\n return dictionaryFor(pos).getWord(form)", "title": "" }, { "docid": "291b3ff868677895a2b5ac4001a4c5c6", "score": "0.53678733", "text": "def syntax_text(text):\n client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n # Instantiates a plain text document.\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n\n # Detects syntax in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n tokens = client.analyze_syntax(document).tokens\n\n # part-of-speech tags from enums.PartOfSpeech.Tag\n pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',\n 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')\n\n for token in tokens:\n print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag],\n token.text.content))", "title": "" }, { "docid": "169610fe5c4266d1ace0b1027cec061e", "score": "0.5355503", "text": "def SystemWord(word):\n # vroom.controls can't import vroom.shell, because that creates a circular\n # dependency both with itself (controls is imported for DEFAULT_MODE) and\n # vroom.test. Sorry, pylint.\n # Pylint, brilliant as usual, thinks that this line redefines 'vroom'.\n # pylint: disable-msg=redefined-outer-name\n import vroom.shell # pylint: disable-msg=g-import-not-at-top\n regex = re.compile(r'^(%s)$' % '|'.join(vroom.shell.STRICTNESS.Values()))\n match = regex.match(word)\n if match:\n return word\n raise UnrecognizedWord(word)", "title": "" }, { "docid": "b6ad46cc4b6b0b4f16406c493b931bd7", "score": "0.5349829", "text": "def identifier(self) -> str:\n return self.current_word", "title": "" }, { "docid": "6a9c86711960e245bfcf3ede53cbba9c", "score": "0.5343614", "text": "def get_word(diagram):\n diag = Diagram(diagram)\n diag.route()\n return diag.word", "title": "" }, { "docid": "0f29359ccb7eb934cf30bc39f0e05a56", "score": "0.533555", "text": "def keyword(self) -> str:\n return self.current_word", "title": "" }, { "docid": "5e3b6a690648bade837d39e976ddc815", "score": "0.5334248", "text": "def get_next_word(self) -> str:", "title": "" }, { "docid": "644e36115f3d6d5101126876e63f41c4", "score": "0.53321695", "text": "def syntax(self) -> Optional[pulumi.Input['MethodSyntax']]:\n return pulumi.get(self, \"syntax\")", "title": "" }, { "docid": "3aba7156f014242830af9f235229f0e3", "score": "0.5328047", "text": "def wolfram(self) -> str:", "title": "" }, { "docid": "0036e3cb5c6fbc1a1109255e13208709", "score": "0.53268254", "text": "def word_format(self):\n\t\t\n\t\treturn self._word_format", "title": "" }, { "docid": "56abbad20c0ee5c10248b1df21bf08d4", "score": "0.53232676", "text": "def _tokenizealias(program, lookup=None):\n return tokenize(program, lookup=lookup,\n syminitletters=_aliassyminitletters)", "title": "" }, { "docid": "74bfc181cab2fe6596ffb97f0d6299c2", "score": "0.5316102", "text": "def word(word_time):\n return word_time[0]", "title": "" }, { "docid": "74bfc181cab2fe6596ffb97f0d6299c2", "score": "0.5316102", "text": "def word(word_time):\n return word_time[0]", "title": "" }, { "docid": "fb8d8f5cfcc7367a896a6f25cb1149df", "score": "0.53029394", "text": "def get_definition(word: str) -> dict:\n definitions = services.get_word_definitions(word)\n logger.info(\"\\n Definition: {}\\n\".format(definitions[0]))\n return definitions[0]", "title": "" }, { "docid": "7235d5832af67959611dd46ca60792c3", "score": "0.52890384", "text": "def word_prog(self):\r\n guessed_word = ''\r\n\r\n for let in self.word:\r\n if let in self.hits:\r\n guessed_word += let\r\n else:\r\n guessed_word += '_'\r\n\r\n return guessed_word", "title": "" }, { "docid": "835cdbe3cbf22581695faa1b28b55628", "score": "0.52884734", "text": "def make_gloss_query_part(self, text, lang):\n text = text.lower()\n if text == '*':\n return '(.+[\\\\-=<>])?'\n elif text == '+':\n return '([^\\\\-=<>].*[\\\\-=<>])'\n elif text == '?':\n return '([^\\\\-=<>{}]+\\\\{[^{}]+\\\\}[\\\\-=<>])'\n mQuant = self.rxGlossQueryQuant.search(text)\n if mQuant is not None:\n glossBody, quantifier = self.make_gloss_query_part(mQuant.group(1), lang), mQuant.group(2)\n return '(' + glossBody + ')' + quantifier\n mSrc = self.rxGlossQuerySrc.search(text)\n if mSrc is not None:\n glossTag, glossSrc = mSrc.group(1), mSrc.group(2)\n if len(glossTag) <= 0:\n return '[^{}]*\\\\{(' + self.make_gloss_query_src_part(glossSrc, lang) + ')\\\\}[\\\\-=<>]'\n return '(' + glossTag + ')\\\\{(' + self.make_gloss_query_src_part(glossSrc, lang) + ')\\\\}[\\\\-=<>]'\n if ('lang_props' in self.settings and lang in self.settings['lang_props']\n and 'gloss_shortcuts' in self.settings['lang_props'][lang]\n and text in self.settings['lang_props'][lang]['gloss_shortcuts']):\n text = self.settings['lang_props'][lang]['gloss_shortcuts'][text]\n return '(' + text + ')\\\\{[^{}]+\\\\}[\\\\-=<>]'\n return '(' + text.replace('.', '\\\\.') + ')\\\\{[^{}]+\\\\}[\\\\-=<>]'", "title": "" }, { "docid": "e2fdb8ba4313197096e691ec1e6ac1d0", "score": "0.5287642", "text": "def get_input_pattern():\n return '-token'", "title": "" }, { "docid": "91fc0fdbcc930bc21142faf4c9e509d6", "score": "0.52874494", "text": "def inside_word(self):\n pass # implemented in Ada", "title": "" }, { "docid": "c85ec9acb411f4bb41649558644c2781", "score": "0.5279828", "text": "def simpleslp1(word):\n word1 = simple_lower(word)\n word2 = remove_double(word1)\n ans1 = transcoder.transcoder_processString(word2,'slp1','simpleslp1lo')\n ans = [ans1]\n if 'f' in word2:\n # Handle other forms of 'f': ri,ru,ar\n for altf in ['ri','ru','ar']:\n word3 = re.sub('f',altf,word2)\n ansf = transcoder.transcoder_processString(word3,'slp1','simpleslp1lo')\n ans.append(ansf)\n # allow either 'm' or 'n' before consonant\n a1 = mn_consonants(ans,'m','n') # change mC to nC (C = consonant)\n a2 = mn_consonants(ans,'n','m')\n ans = ans + a1 + a2\n if 'kxp' in word2:\n # Handle other forms of 'x': l and also lr, lri,\n for altf in ['klrp','klrip','klrup','kalp']:\n word3 = re.sub('kxp',altf,word2)\n ansx = transcoder.transcoder_processString(word3,'slp1','simpleslp1lo')\n ans.append(ansx)\n if re.search(r'ar$',ans1):\n # cases like pw: kar <-> kf.\n # This is aimed at verbs only, but the code will catch words\n # ending in punar\n for altf in ['ri','ru','r']:\n x = re.sub(r'ar$',altf,ans1)\n if x not in ans:\n ans.append(x)\n # special case of 'kalp' verb (in pw, etc) == kxp\n if ans1 == 'kalp':\n for alt in ['klp','klrp','klrip']:\n x = re.sub('kalp$',alt,ans1)\n if x not in ans:\n ans.append(x)\n # Choose to add grammar variants\n # in the query\n return ans\n \"\"\"\n # Add Grammar variants\n ans1 = []\n for a in ans:\n for a1 in grammar_variants(a):\n if a1 not in ans1:\n ans1.append(a1)\n \n return ans1\n \"\"\"", "title": "" }, { "docid": "90d9af32842a0338356655354780f20b", "score": "0.52780175", "text": "def __find_syntagms(expression):\n\n replace_list = []\n start_const = 0\n start_index, end_index = 0, 0\n while True:\n try:\n start_index = expression.index('\"', start_const)\n if start_index - 1 != -1 and expression[start_index - 1] != ' ':\n raise InvalidInput(\"Not correctly separated words and syntagms\")\n end_index = expression.index('\"', start_index + 1)\n if end_index + 1 < len(expression) and expression[end_index + 1] != ' ':\n raise InvalidInput(\"Not correctly separated words and syntagms\")\n start_const = end_index + 1\n replace_list.append([start_index, end_index])\n except ValueError:\n if end_index < start_index:\n raise InvalidInput(\"Unequal number of '\\\"'\")\n break\n return replace_list", "title": "" }, { "docid": "3d6cef60169aafdb47e203bd64a81b3a", "score": "0.52748626", "text": "def token_lookup():\n dict = {\n \".\": \"||Period||\",\n \",\": \"||Comma||\",\n \"\\\"\": \"||Quotation_Mark||\",\n \";\": \"||Semicolon||\",\n \"!\": \"||Exclamation_Mark||\",\n \"?\": \"||Question_Mark||\",\n \"(\": \"||Left_Parentheses||\",\n \")\": \"||Right_Parentheses||\",\n \"-\": \"||Dash||\",\n \"\\n\": \"||Return||\"\n }\n\n return dict", "title": "" }, { "docid": "93501323abbe92bcda12611bd4f91599", "score": "0.5270571", "text": "def parse_value(self):\n string = ''\n value = self.get_cursor().get_tokens()\n for t in value:\n string += str(t.spelling)\n\n stri = \"\"\n for c in string:\n if (c in \"\\\"; \"):\n pass\n else:\n stri += c\n\n return stri", "title": "" }, { "docid": "9c42b4e7831157edfd151f97b1988a3d", "score": "0.5267561", "text": "def parse_template(word:str)->str:\n actual_part=re.findall(r'{(.*?)}',word)\n\n actual_stripped=re.sub('{.*?}','{}',word)\n\n return actual_stripped,tuple(actual_part)", "title": "" }, { "docid": "e9023edb994b728035fd3fe3c8da2ddb", "score": "0.525183", "text": "def inner(word):\n return word + '!!!'", "title": "" }, { "docid": "e9023edb994b728035fd3fe3c8da2ddb", "score": "0.525183", "text": "def inner(word):\n return word + '!!!'", "title": "" }, { "docid": "bdb8ff737c621b4482d86cc521cc373c", "score": "0.52500635", "text": "def syntax_gen(i, dictionary):\n output = \"\"\n while True:\n if lista[i-1][1] is (\"adj\" or \"adv\" or \"konj\"):\n pass\n # elif lista[i-1][0] is (\"den\" or \"det\" or \"de\" or \"der\" or \"min\" or \"din\" or \"sin\" or \"hans\" or \"hennes\" or \"vår\" or \"deres\"):\n # output = \"\"\n # http://runeberg.org/dukkhjem/1.html\n return", "title": "" }, { "docid": "2c18c5d909609a03ca50a140c226008a", "score": "0.52361184", "text": "def get_lexicon(Name=None):\n pass", "title": "" }, { "docid": "fe933d6cf37671cc94e21b85d6ee4be3", "score": "0.5233664", "text": "def inner(word):\r\n return word + '!!!'", "title": "" }, { "docid": "473644a083451d1759efb096de31949c", "score": "0.52134454", "text": "def word(self):\n return self.words(quantity=1)[0]", "title": "" }, { "docid": "cf5fd51986518e56da94151382ac6ff5", "score": "0.5209573", "text": "def get_word(ix, s):\n\tm = re_word.match(s[ix:])\n\treturn m and m.group(0) or ''", "title": "" }, { "docid": "8531d4aec6226a59929d1386a3f86ac9", "score": "0.52082485", "text": "def analyze_identifier_reference(word):\n logging.debug('analyze_identifier_reference: \"{0}\"'.format(word))\n # if word == \" and \":\n # ipdb.set_trace() # #BREAKPOINT#\n if word == VALUE:\n return VALUE\n\n elif word == NULL:\n return NULL\n\n else:\n if ' ' not in word:\n word = re.escape(word)\n\n if word.lower() in ALL_OPERATORS:\n return OPERATOR\n else:\n return VALUE", "title": "" }, { "docid": "ba6e7cca59263b6d8fcfc634787f45ff", "score": "0.520423", "text": "def api_macro_w_syntax(_, macro_id):\n macro = get_object_or_404(MacroDefinition, pk=macro_id)\n return {\"text\": macro.text_with_markup, \"line\": macro.line, \"name\": macro.name,\n \"source\": macro.source.file_path, \"package\": macro.source.pkg.name, }", "title": "" }, { "docid": "2c7a5f3bb419ef509b668f40454ec896", "score": "0.5201523", "text": "def get_word(self):\n return self.word", "title": "" }, { "docid": "fa81e3dfa09c0fbe481e0e64cd830017", "score": "0.5199621", "text": "def syntax_sva(lista, dictionary):\n if lista[5] in dictionary:\n return dictionary[lista[5]]\n else:\n output = \"\"\n if ((lista[4] is \"verb\") and (lista[6] is \"konj\") and (type(lista[7]) is not int) and (lista[7] is not \"verb\")):\n output = \"sub\"\n elif ((lista[4] is \"verb\") and (lista[6] is \"konj\") and (lista[7] is \"adv\") and (lista[8] is not \"verb\")):\n output = \"sub\"\n elif ((lista[4] is \"verb\") and (lista[6] is \"konj\") and (lista[7] is \"verb\")):\n output = \"verb\"\n elif ((lista[4] is \"verb\") and (lista[6] is \"konj\") and (lista[7] is \"adv\") and (lista[8] is \"verb\")):\n output = \"verb\"\n elif ((lista[0] is \"pron\") and ((lista[1] and lista[2] and lista[3] and lista[4]) is (\"adj\" or \"adv\" or \"tall\" or \"konj\"))):\n output = \"sub\"\n elif ((lista[1] is \"pron\") and ((lista[2] and lista[3] and lista[4]) is (\"adj\" or \"adv\" or \"tall\" or \"konj\"))):\n output = \"sub\"\n elif ((lista[2] is \"pron\") and ((lista[3] and lista[4]) is (\"adj\" or \"adv\" or \"tall\"))):\n output = \"sub\"\n elif ((lista[3] is \"pron\") and (lista[4] is (\"adj\" or \"tall\"))):\n output = \"sub\"\n elif ((lista[3] is \"pron\") and (lista[4] is (\"adj\" or \"adv\")) and (lista[6] is \"konj\")):\n output = \"sub\"\n elif ((lista[0] is \"verb\") and ((lista[1] and lista[2] and lista[3] and lista[4]) is (\"adv\" or \"adj\" or \"konj\"))):\n output = \"verb\"\n elif ((lista[1] is \"verb\") and ((lista[2] and lista[3] and lista[4]) is (\"adv\" or \"adj\" or \"konj\"))):\n output = \"verb\"\n elif ((lista[2] is \"verb\") and ((lista[3] and lista[4]) is (\"adj\" or \"adv\"))):\n output = \"verb\"\n elif ((lista[3] is \"verb\") and (lista[4] is (\"adj\" or \"adv\"))):\n # har virkelig åpnet\n output = \"verb\"\n elif ((lista[3] is not (\"sub\" or \"oklart\")) and (lista[4] is \"verb\")): # and (type(lista[3]) is not int) excluded since\n output = \"sub\" # the element must have been checked before\n elif ((lista[2] is not (\"sub\" or \"oklart\")) and (lista[3] is \"verb\") and (lista[4] is (\"adj\" or \"adv\"))):\n output = \"sub\"\n elif ((lista[2] is not (\"sub\" or \"oklart\")) and (lista[4] is \"verb\") and (lista[3] is (\"adj\" or \"adv\"))):\n # bisats\n output = \"sub\"\n elif ((lista[3] and lista[4]) is (\"prepo\" or \"part\")):\n output = \"sub\"\n elif ((lista[3] is (\"prepo\" or \"part\")) and (lista[4] is \"pron\")):\n output = \"sub\"\n elif lista[4] is \"\":\n if ((lista[6] is \"pron\") and (lista[7] is not \"konj\")):\n output = \"verb\"\n elif lista[6] is (\"tall\" or \"oklart\"):\n output = \"verb\"\n elif ((lista[6] is \"pron\") and (lista[7] is \"konj\") and (type(lista[8]) is int)):\n if lista[9] is (\"pron\" or \"prepo\" or \"part\" or \"adv\"):\n output = \"verb\"\n else:\n output = \"sub\"\n else:\n output = \"sub\"\n elif lista[4] is \"prepo\":\n output = \"sub\"\n elif (((lista[4] and lista[6]) is \"pron\") and (type(lista[7]) is int)):\n output = \"verb\"\n elif (((lista[4] and lista[2]) is \"pron\") and (lista[3] is \"verb\")):\n output = \"sub\"\n elif (((lista[4] and lista[6]) is \"pron\") and (lista[3] is (\"prepo\" or \"part\" or \"adv\"))):\n output = \"sub\"\n elif (((lista[4] and lista[1]) is \"pron\") and (lista[2] is \"verb\") and (lista[3] is (\"prepo\" or \"part\"))):\n output = \"sub\"\n elif ((lista[3] is \"verb\") and (lista[4] is (\"adv\" or \"adj\")) and (lista[6] is (\"pron\" or \"prepo\"))):\n output = \"verb\"\n elif ((lista[2] is \"verb\") and ((lista[3] and lista[4]) is (\"adv\" or \"adj\")) and (lista[6] is (\"pron\" or \"prepo\" or \"part\"))):\n output = \"verb\"\n elif ((lista[1] is \"verb\") and ((lista[2] and lista[3] and lista[4]) is (\"adv\" or \"adj\")) and (lista[6] is (\"pron\" or \"prepo\" or \"part\"))):\n output = \"verb\"\n elif ((lista[0] is \"verb\") and ((lista[1] and lista[2] and lista[3] and lista[4]) is (\"adv\" or \"adj\")) and (lista[6] is (\"pron\" or \"prepo\" or \"part\"))):\n output = \"verb\"\n elif ((lista[3] is \"verb\") and (lista[4] is (\"adv\" or \"adj\")) and ((lista[6] and lista[7]) is (\"prepo\" or \"pron\" or \"part\"))):\n output = \"verb\"\n elif ((lista[2] is \"verb\") and ((lista[3] and lista[4]) is (\"adv\" or \"adj\")) and ((lista[6] and lista[7]) is (\"prepo\" or \"pron\" or \"part\"))):\n output = \"verb\"\n elif ((lista[1] is \"verb\") and ((lista[2] and lista[3] and lista[4]) is (\"adv\" or \"adj\")) and ((lista[6] and lista[7]) is (\"prepo\" or \"pron\" or \"part\"))):\n output = \"verb\"\n elif ((lista[0] is \"verb\") and ((lista[1] and lista[2] and lista[3] and lista[4]) is (\"adv\" or \"adj\")) and ((lista[6] and lista[7]) is (\"prepo\" or \"pron\" or \"part\"))):\n output = \"verb\"\n elif ((lista[4] is (\"prepo\" or \"part\")) and (lista[6] is (\"adj\" or \"adv\"))):\n output = \"adj\"\n elif lista[4] is (\"prepo\" or \"part\"):\n output = \"sub\"\n elif ((lista[4] is \"pron\") and (lista[6] is \"konj\") and (lista[7] is not (\"sub\" or \"tall\" or \"adj\" or \"adv\"))):\n output = \"verb\"\n elif ((lista[4] is \"pron\") and (lista[6] is \"konj\") and (lista[7] is (\"sub\" or \"tall\" or \"adj\" or \"adv\"))):\n output = \"sub\"\n elif ((lista[4] is \"pron\") and ((lista[6] is \"pron\") or ((lista[6] is \"adv\") and (lista[7] is \"pron\")))):\n output = \"verb\"\n elif ((lista[3] is \"verb\") and (lista[4] is (\"prepo\" or \"part\")) and ((type(lista[6]) is int) or (lista[6] is (\"sub\" or \"verb\")))):\n output = \"adj\"\n elif ((lista[2] is \"verb\") and (lista[3] is (\"adv\" or \"adj\")) and (lista[4] is (\"prepo\" or \"part\")) and (type(lista[6]) is int)):\n output = \"adj\"\n elif ((lista[2] is \"verb\") and (lista[3] is (\"prepo\" or \"part\")) and (lista[4] is (\"adj\" or \"tall\"))):\n output = \"sub\"\n elif ((lista[1] is \"verb\") and (lista[2] is (\"adv\" or \"adj\")) and (lista[3] is (\"prepo\" or \"part\")) and (lista[4] is (\"adj\" or \"tall\"))):\n output = \"sub\"\n elif ((lista[3] is (\"pron\" or \"sub\")) and (lista[4] is \"verb\") and (lista[6] is (\"part\" or \"prepo\")) and (lista[7] is \"pron\")):\n output = \"sub\"\n elif ((lista[2] is (\"pron\" or \"sub\")) and ((lista[3] or lista[4]) is \"verb\") and ((lista[3] or lista[4]) is (\"adj\" or \"adv\")) and (lista[6] is (\"part\" or \"prepo\")) and (lista[7] is \"pron\")):\n output = \"sub\"\n elif ((lista[4] is (\"pron\" or \"sub\")) and (lista[6] is (\"prepo\" or \"part\")) and (lista[7] is \"pron\") and (lista[8] is \"pron\")):\n output = \"verb\"\n elif ((lista[4] is (\"pron\" or \"sub\")) and ((lista[6] and lista[7]) is (\"adv\" or \"prepo\" or \"part\")) and (lista[8] is \"pron\") and (lista[9] is \"pron\")):\n output = \"verb\"\n elif ((lista[3] is (\"pron\" or \"sub\")) and (lista[4] is \"adv\") and (lista[6] is (\"prepo\" or \"part\")) and (lista[7] is \"pron\") and (lista[8] is \"pron\")):\n output = \"verb\"\n elif ((lista[2] is (\"pron\" or \"sub\")) and ((lista[3] and lista[4]) is \"adv\") and (lista[6] is (\"prepo\" or \"part\")) and ((lista[7] and lista[8]) is \"pron\")):\n output = \"verb\"\n elif ((lista[1] is (\"pron\" or \"sub\")) and ((lista[2] and lista[3] and lista[4]) is \"adv\") and (lista[6] is (\"prepo\" or \"part\")) and ((lista[7] and lista[8]) is \"pron\")):\n output = \"verb\"\n elif ((lista[0] is \"verb\") and ((lista[1] and lista[2] and lista[3]) is (\"adv\" or \"part\" or \"prepo\" or \"adj\" or \"konj\" or \"tall\")) and (lista[4] is \"sub\")):\n output = \"sub\"\n elif ((lista[1] is \"verb\") and ((lista[2] and lista[3]) is (\"adv\" or \"adj\" or \"part\" or \"prepo\" or \"tall\")) and (lista[4] is \"sub\")):\n output = \"sub\"\n elif ((lista[2] is \"verb\") and (lista[3] is (\"adv\" or \"adj\" or \"tall\")) and (lista[4] is \"sub\")):\n output = \"sub\"\n elif ((lista[6] is \"pron\") and (lista[7] is not \"verb\")):\n output = \"verb\"\n elif ((lista[6] is \"pron\") and ((lista[7] is \"verb\") or (type(lista[7]) is int))):\n output = \"sub\"\n elif (lista[6] is \"tall\"):\n output = \"verb\"\n elif ((lista[4] is \"tall\") and ((lista[6] is \"verb\") or (type(lista[6]) is int))):\n output: \"sub\"\n elif ((lista[4] is \"tall\") and (lista[6] is (\"adj\" or \"adv\")) and ((lista[7] is \"verb\") or (type(lista[7]) is int))):\n output: \"sub\"\n elif ((lista[4] is \"oklart\") and (lista[6] is \"pron\") and (lista[7] is (\"adv\" or \"adj\" or \"part\" or \"prepo\"))):\n output = \"verb\"\n elif ((lista[3] is \"konj\") and (lista[4] is \"oklart\") and (lista[6] is \"pron\")):\n output = \"verb\"\n elif ((lista[3] is \"oklart\") and (lista[4] is (\"adv\" or \"adj\")) and (lista[6] is (\"pron\" or \"prepo\" or \"part\"))):\n output = \"verb\"\n elif ((lista[4] is (\"adv\" or \"adj\")) and ((lista[6] is (\"pron\" or \"tall\" or \"sub\")) or (type(lista[6]) is int))):\n output = \"verb\"\n elif ((lista[4] is \"pron\") and ((lista[6] is \"adj\") or (type(lista[6]) is int)) and ((lista[7] is \"verb\") or (type(lista[7]) is int))):\n output = \"sub\"\n elif ((lista[3] is \"pron\") and (lista[4] is (\"sub\" or \"oklart\")) and ((lista[6] is \"verb\") or (type(lista[6]) is int))):\n # consequently from the case above\n output = \"adj\"\n elif ((lista[4] is \"konj\") and (lista[6] is (\"adv\" or \"adj\" or \"pron\" or \"prepo\" or \"part\"))):\n output = \"verb\"\n elif ((lista[4] is \"adv\") and (lista[6] is \"adv\")):\n output = \"verb\"\n elif ((lista[4] is \"adv\") and (lista[6] is (\"prepo\" or \"part\")) and (lista[7] is \"adv\")):\n output = \"verb\"\n elif ((lista[4] is \"pron\") and (lista[6] is (\"adv\" or \"adj\"))):\n output = \"verb\"\n elif ((lista[4] is \"pron\") and (lista[6] is (\"prepo\" or \"part\")) and (lista[7] is (\"adv\" or \"adj\"))):\n output = \"verb\"\n elif (((lista[3] and lista[4]) is \"pron\") and (lista[6] is not \"verb\")):\n output = \"verb\"\n elif (((lista[3] and lista[4]) is \"pron\") and (lista[6] is \"verb\")):\n output = \"sub\"\n else:\n output = \"oklart\"\n return output", "title": "" }, { "docid": "c2ad7660b4bd49d479b2177b235aadcc", "score": "0.51971817", "text": "def lex_word(self, index):\n try:\n word = self._lexicon['list'][index]\n return word\n except KeyError:\n return None", "title": "" }, { "docid": "5908bc3084ef63af91a23b581be0798d", "score": "0.51869345", "text": "def parse(line):\n return OPERATION[line[:3]](line)", "title": "" }, { "docid": "f322f4a4674e4c1d5daf79de50913a5f", "score": "0.5185236", "text": "def lex(self, source):\n return self.lark().lex(source)", "title": "" }, { "docid": "69eac15e2303c7897f82e726d7b42ab5", "score": "0.5183749", "text": "async def word(self, ctx, *, word: str):\n \n async with ctx.typing():\n word = word.lower()\n resp = await definition_urban(word)\n\n if resp['status'] == 'error':\n await ctx.message.add_reaction('❌')\n return await ctx.send('Unable to connect to UrbanDictionary')\n\n elif resp['status'] == 'no_result':\n await ctx.message.add_reaction('❌')\n return await ctx.send('No definition for `{}`'.format(word))\n\n elif resp['status'] == 'ok':\n defined = (resp['definition'] + ('\\n\\n_{}_'.format(resp['example'].strip('\\n').replace('*', r'\\*')) if resp['example'] else ''))\n if len(defined) >= 2048:\n defined = defined[:2044] + '..._'\n emb = discord.Embed(\n title='Definition for `{word}`'.format(word=resp['word']),\n description=defined,\n url=resp['link'])\n await ctx.message.add_reaction('✅')\n return await ctx.send(embed=emb)", "title": "" }, { "docid": "a7fb1c538c847fa9720d8c0d02fa19c6", "score": "0.51700485", "text": "def addSyntax(self, tag):\n\t\tself.syntax = ':'.join([self.syntax,tag]);", "title": "" }, { "docid": "165c77cf61ee6cc0eaca389e8f21a532", "score": "0.5168872", "text": "def query_lang(self, word):\n vars = {'$a': word}\n helper.query_with_vars(self.gm.client, '''query data($a: string){\n data(func: allofterms(lemmas, $a)) {\n sents@en:.\n sents@fr\n sents@de\n sents@zh\n sents@ja\n sents@es\n nsubj @facets\n verbs \n }\n }''', vars)", "title": "" }, { "docid": "04f50753a3ea957202302bc331858d19", "score": "0.516887", "text": "def _read_term(self) -> str:\n term: str = ''\n self._skip_whitespace_and_comments()\n if self.pointer == '\\r':\n self._inc()\n return '\\r\\n'\n elif self.pointer == '\\n':\n return '\\n'\n elif self.pointer == '.':\n return '.'\n elif self.pointer == ',':\n return ','\n while self.pointer and not re.match(r'[,\\t\\r\\n\\{\\}\\[\\]\\(\\)\\. ]', self.pointer):\n term += self.pointer\n self._inc()\n return term", "title": "" }, { "docid": "bd77467191eecd76d2dd9731bbc35734", "score": "0.5163789", "text": "def __getitem__(self, word):\n return self.syn0[self.vocab[word].index]", "title": "" }, { "docid": "597e0a26a2673e73826292397d21241a", "score": "0.516377", "text": "def get_name(self):\n return \"tex\"", "title": "" }, { "docid": "dd1b35a5166c5500e97425a1e6d7c5da", "score": "0.51611644", "text": "def make_3sg_form(word):\n if word.endswith('y'): #checks the last letter of the word for the letter 'y'\n word = word[:-1] + \"ies\" #will take off the last letter of the word and add 'ies'\n elif word.endswith( ('o' or 'ch'or 's'or'sh' or 'x'or 'z')): \n #checks the last letter of the word for letters o,ch,s,sh,x or z\n word += 'es' #ads 'es' to the word\n else: #all other words will just have the letter 's' added on to the end of it.\n word += 's'\n ## Should return word instead of printing it - Prof G\n return word\n ##print (word)", "title": "" }, { "docid": "bbfcdeaca68bad5a69584422a68daa56", "score": "0.51595175", "text": "def mathml(self) -> str:", "title": "" }, { "docid": "ff169aae86b62fb8661330d61dfdb5ad", "score": "0.5159441", "text": "def get_keyword_function_of(keyword):\n return keyword.value[1]", "title": "" }, { "docid": "254c8392610cadb7e063be8c7f4217db", "score": "0.5153508", "text": "def meaning(self):\n return self.response['results'][0]['lexicalEntries'][0]['entries'][0]['senses'][0]['definitions'][0]", "title": "" }, { "docid": "33e9c36eeed13378e1c48a3c1a0bac0c", "score": "0.5151944", "text": "def tokenize(word):\n\n\ttokens = list(word[:-1]) # Strip the \\n at the end\n\ttokens.insert(0, 'START')\n\ttokens.append('END')\n\n\treturn tokens", "title": "" }, { "docid": "281ac43448d954827022f73ead2c3f85", "score": "0.5151521", "text": "def Keywords(self) -> str:", "title": "" }, { "docid": "281ac43448d954827022f73ead2c3f85", "score": "0.5151521", "text": "def Keywords(self) -> str:", "title": "" }, { "docid": "fd3a348e587a41e52290e1416f099425", "score": "0.515", "text": "def get_pos(self,word,parser,context = None):\n \n if context is not None:\n pos = parser(\" \".join([context,word]))[-1].pos_\n else:\n pos = parser(word)[0].pos_\n \n return pos", "title": "" }, { "docid": "166fe99e116a79464c33f3c46ef0c91a", "score": "0.5147483", "text": "def parse_word(parsedCmd):\n cmd = parsedCmd[CMD_IDX]\n\n global USER_RHYME_TYPE\n global USER_WORD\n\n # Check if rhyme\n if (cmd == COMMANDS[RHYME_IDX] or \n (len(parsedCmd) > 1 and parsedCmd[1] == COMMANDS[RHYME_IDX])):\n # Check for incorrect args\n if len(parsedCmd) < 2 or len(parsedCmd) > 4:\n return WRONG_ARGS.format(cmd)\n\n # Make sure valid rhyme type given, if any, or set default (perfect)\n if len(parsedCmd) == 3:\n if(not parsedCmd[TYPE_IDX] in R_TYPES):\n return INVALID_TYPE.format(parsedCmd[TYPE_IDX])\n USER_RHYME_TYPE = parsedCmd[TYPE_IDX]\n USER_WORD = parsedCmd[WORD_IDX]\n elif parsedCmd[1] == COMMANDS[RHYME_IDX]:\n return WRONG_ARGS.format(parsedCmd[1])\n else:\n USER_RHYME_TYPE = R_TYPES[PERFECT_IDX]\n USER_WORD = parsedCmd[WORD_IDX - 1]\n\n # return corresponding string\n return SEARCHING.format(USER_RHYME_TYPE, USER_WORD)\n elif cmd == COMMANDS[STATS_IDX]:\n # Same thing\n if(len(parsedCmd) != 2):\n return WRONG_ARGS.format(cmd)\n \n USER_WORD = parsedCmd[WORD_IDX]\n return STATS.format(parsedCmd[WORD_IDX])", "title": "" }, { "docid": "e90e495f5bff5950ae6e08ee227e70a3", "score": "0.51405394", "text": "def find_variable_macro(line: str, dict_macro: dict) -> str:\n\n list_word = line.replace('(', \" \").split() # for\n\n for word in list_word:\n\n if word in dict_macro:\n return word\n\n return None", "title": "" }, { "docid": "58cf52f25e7e191aded3360549f04838", "score": "0.51337504", "text": "def verb_t1(self, token):\n if len(token) > 5 and token.startswith(\"\\u062A\"): # Taa\n for s2 in self.pl_si2:\n if token.endswith(s2):\n return token[1:-2]\n if len(token) > 5 and token.startswith(\"\\u064A\"): # Yaa\n for s2 in self.verb_su2:\n if token.endswith(s2):\n return token[1:-2]\n if len(token) > 4 and token.startswith(\"\\u0627\"): # Alif\n # Waaw Alif\n if len(token) > 5 and token.endswith(\"\\u0648\\u0627\"):\n return token[1:-2]\n # Yaa\n if token.endswith(\"\\u064A\"):\n return token[1:-1]\n # Alif\n if token.endswith(\"\\u0627\"):\n return token[1:-1]\n # Noon\n if token.endswith(\"\\u0646\"):\n return token[1:-1]\n # ^Yaa, Noon$\n if len(token) > 4 and token.startswith(\"\\u064A\") and token.endswith(\"\\u0646\"):\n return token[1:-1]\n # ^Taa, Noon$\n if len(token) > 4 and token.startswith(\"\\u062A\") and token.endswith(\"\\u0646\"):\n return token[1:-1]", "title": "" }, { "docid": "7920029ef748fba2182e25c245fb1701", "score": "0.5114981", "text": "def GetDisplayWord(self):\n pass", "title": "" }, { "docid": "4f75818e3f5745b7a64e91b2d3e86ad8", "score": "0.511219", "text": "def extract_type(name: str) -> str:\r\n name = re.sub('^\"[a-zA-Z0-9_]+\" of ', '', name)\r\n return name", "title": "" }, { "docid": "8b5af3aa4cdb1859b3e28e1c735f94ac", "score": "0.5108118", "text": "def identifyType(regex):\n\tif len(regex) == 3: return 'lit', regex[1]\n\tregex = regex[1:-1]\n\tif regex[-1] == '*': return 'star', regex[0:-1]\n\tif '|' in regex:\n\t\ttemp = regex.split('|')\n\t\treturn 'alt', temp[0], temp[1]\n\treturn 'seq', breakSequence(regex)", "title": "" }, { "docid": "02d019a8b9652781e4baaf8860663208", "score": "0.50996405", "text": "def get_wid(word):", "title": "" }, { "docid": "02d019a8b9652781e4baaf8860663208", "score": "0.50996405", "text": "def get_wid(word):", "title": "" }, { "docid": "e050465b027820dd010faf6a8bd6d262", "score": "0.50985736", "text": "def get_syntax_parses(self, words, k, unique=True):\n raise Exception(\"This function must be overriden!\")", "title": "" }, { "docid": "e01265a79e1e59e4cb50390888126e11", "score": "0.5097682", "text": "def help_for(something):\n\n\tprint \"correct syntax:\"\n\thelp([\"help\",something])", "title": "" }, { "docid": "b786304d8b4890575243c05902cd21f2", "score": "0.5097265", "text": "def check_syntax(self):\n pass # implemented in Ada", "title": "" }, { "docid": "82a9687fd02bdea0d123d4f8a33c9f06", "score": "0.5083833", "text": "def main():\n\n translation = {}\n translation['a'] = '@'\n translation['b'] = '8'\n translation['c'] = '('\n translation['d'] = '|)'\n translation['e'] = '3'\n translation['f'] = '#'\n translation['g'] = '6'\n translation['h'] = '[-]'\n translation['i'] = '|'\n translation['j'] = '_|'\n translation['k'] = '|<'\n translation['l'] = '1'\n translation['m'] = '[]\\\\/[]'\n translation['n'] = '[]\\\\[]'\n translation['o'] = '0'\n translation['p'] = '|D'\n translation['q'] = '(,)'\n translation['r'] = '|Z'\n translation['s'] = '$'\n translation['t'] = \"']['\"\n translation['u'] = '|_|'\n translation['v'] = '\\\\/'\n translation['w'] = '\\\\/\\\\/'\n translation['x'] = '}{'\n translation['y'] = '`/'\n translation['z'] = '2'\n\n result = ''\n for glyph in input().lower():\n if glyph in translation:\n result += translation[glyph]\n else:\n result += glyph\n print(result)", "title": "" }, { "docid": "a1eb6c0d5a6c23c82e4436162c2b993d", "score": "0.50825405", "text": "def astToTermString(ast):\n if ast[0] == \"Lam\":\n return \"Lam (\\\"\" + ast[1] + \"\\\", \" + astToTermString(ast[2]) + \")\"\n elif ast[0] == \"App\":\n return \"Juxt (\" + astToTermString(ast[1]) + \", \" + astToTermString(ast[2]) + \")\"\n else:\n return \"Name \\\"\" + ast[1] + '\"'", "title": "" } ]
6288e1caf3d3aa219d6224c3baa4a33b
With no arguments, return a dictionary of all configuration variables relevant for the current platform. Generally this includes everything needed to build extensions and install both pure modules and extensions. On Unix, this means every variable defined in Python's installed Makefile; on Windows and Mac OS it's a much smaller set. With arguments, return a list of values that result from looking up each argument in the configuration variable dictionary.
[ { "docid": "fe9e9fe913211e621b097a69685c6b68", "score": "0.7583598", "text": "def get_config_vars(*args):\n global _config_vars\n if _config_vars is None:\n _config_vars = {}\n\n # Normalized versions of prefix and exec_prefix are handy to have;\n # in fact, these are the standard versions used most places in the\n # Distutils.\n _config_vars['prefix'] = PREFIX\n _config_vars['exec_prefix'] = EXEC_PREFIX\n\n # OS X platforms require special customization to handle\n # multi-architecture, multi-os-version installers\n if sys.platform == 'darwin':\n import _osx_support\n _osx_support.customize_config_vars(_config_vars)\n\n if args:\n vals = []\n for name in args:\n vals.append(_config_vars.get(name))\n return vals\n else:\n return _config_vars", "title": "" } ]
[ { "docid": "7db8c915ff717bc2263e113c79603dee", "score": "0.63694656", "text": "def get_host_package_vars():\n # linux, mac or windows.\n platform_variant = {\n 'darwin': 'mac',\n 'linux2': 'linux',\n 'win32': 'windows',\n }.get(sys.platform)\n if not platform_variant:\n raise ValueError('Unknown OS: %s' % sys.platform)\n\n sys_arch = None\n if sys.platform == 'linux2':\n sys_arch = get_linux_host_arch()\n\n # If we didn't override our system architecture, identify it using \"platform\".\n sys_arch = sys_arch or platform.machine()\n\n # amd64, 386, etc.\n platform_arch = {\n 'amd64': 'amd64',\n 'i386': '386',\n 'i686': '386',\n 'x86': '386',\n 'x86_64': 'amd64',\n 'armv6l': 'armv6l',\n 'armv7l': 'armv6l', # we prefer to use older instruction set for builds\n }.get(sys_arch.lower())\n if not platform_arch:\n raise ValueError('Unknown machine arch: %s' % sys_arch)\n\n # Most 32-bit Linux Chrome Infra bots are in fact running 64-bit kernel with\n # 32-bit userland. Detect this case (based on bitness of the python\n # interpreter) and report the bot as '386'.\n if (platform_variant == 'linux' and\n platform_arch == 'amd64' and\n sys.maxsize == (2 ** 31) - 1):\n platform_arch = '386'\n\n return {\n # e.g. '.exe' or ''.\n 'exe_suffix': EXE_SUFFIX,\n # e.g. 'linux-amd64'\n 'platform': '%s-%s' % (platform_variant, platform_arch),\n }", "title": "" }, { "docid": "eab12128c4291bb6f3179f8b6bf0be67", "score": "0.6295677", "text": "def get_sys_config_paths(executable: str, vars=None) -> Dict[str, str]:\n if not vars:\n args = [\n executable,\n \"-c\",\n \"import sysconfig,json;print(json.dumps(sysconfig.get_paths()))\",\n ]\n return json.loads(subprocess.check_output(args))\n else:\n env = os.environ.copy()\n env.update(SYSCONFIG_VARS=json.dumps(vars))\n args = [\n executable,\n \"-c\",\n \"import os,sysconfig,json;print(json.dumps(sysconfig.\"\n \"get_paths(vars=json.loads(os.getenv('SYSCONFIG_VARS')))))\",\n ]\n return json.loads(subprocess.check_output(args, env=env))", "title": "" }, { "docid": "8ba2afdc74825965de1bd6321f63c3d3", "score": "0.6251037", "text": "def sys_config_list(self):\n return [self.__config['project'], self.__config['user'], self.__config['sys']]", "title": "" }, { "docid": "d74e89e7e39539330d5232c30e5027ae", "score": "0.62022597", "text": "def get_build_args():\n try:\n build_args = package_config()\n except Exception as e:\n if isinstance(e, OSError):\n if e.errno != errno.ENOENT:\n _LOGGER.warning('Failed to run pkg-config: %s', e)\n else:\n _LOGGER.warning(\n 'pkg-config failed to find tesseract/leptonica libraries: %s', e\n )\n build_args = get_tesseract_version()\n\n _LOGGER.debug('build parameters: %s', build_args)\n return build_args", "title": "" }, { "docid": "8be91ca0650cfc13b42925e69e2e8a27", "score": "0.61931163", "text": "def get_package_vars():\n if is_cross_compiling():\n return get_target_package_vars()\n return get_host_package_vars()", "title": "" }, { "docid": "552530acd209db7b172ce64d1384870a", "score": "0.6153562", "text": "def get_all_config():\n return (get_config('host') + get_config('hostextinfo') + get_config('contact')\n + get_config('contactgroup') + get_config('service') + get_config('command'))", "title": "" }, { "docid": "b6fd44780bc578398088ca79a3ca6a34", "score": "0.61126333", "text": "def get_config():\n return parser.parse_known_args()", "title": "" }, { "docid": "e18de680c7e7688f2e19ce6824498d3d", "score": "0.5932773", "text": "def GetEnvironmentVariables(self):\n return {}", "title": "" }, { "docid": "4084dd33208b93fcc5b970a628c3a566", "score": "0.58965826", "text": "def _config_from_environment(cls):\n return { name: value\n for (name, value) in os.environ.items()\n if name.startswith(\"DAZEL_\") }", "title": "" }, { "docid": "04f65968b67740ea64f922d33b0da9c3", "score": "0.5885672", "text": "def get_target_package_vars():\n assert is_cross_compiling()\n goos = os.environ['GOOS']\n goarch = os.environ['GOARCH']\n\n if goarch not in KNOWN_GOARCHS:\n raise BuildException('Unsupported GOARCH %s' % goarch)\n\n # There are many ARMs, pick the concrete instruction set. 'v6' is the default,\n # don't try to support other variants for now. Note that 'GOARM' doesn't apply\n # to 'arm64' arch.\n #\n # See:\n # https://golang.org/doc/install/source#environment\n # https://github.com/golang/go/wiki/GoArm\n if goarch == 'arm':\n goarm = os.environ.get('GOARM', '6')\n if goarm != '6':\n raise BuildException('Unsupported GOARM value %s' % goarm)\n arch = 'armv6l'\n else:\n arch = goarch\n\n # We use 'mac' instead of 'darwin'.\n if goos == 'darwin':\n goos = 'mac'\n\n return {\n 'exe_suffix': '.exe' if goos == 'windows' else '',\n 'platform': '%s-%s' % (goos, arch),\n }", "title": "" }, { "docid": "f7331298f6a97e69205170080be3dc31", "score": "0.58811504", "text": "def GetEnvVars(self):\n res = []\n res.append('export ROOTSYS='+self.ROOTSYS)\n res.append('export LD_LIBRARY_PATH='+self.LD_LIBRARY_PATH)\n res.append('export PATH='+self.PATH)\n if self.PYTHONPATH: # not necessary, so may be undefined\n res.append('export PYTHONPATH='+self.PYTHONPATH)\n return res", "title": "" }, { "docid": "e3f8facca0d81879f2156081a59d113c", "score": "0.57985854", "text": "def getVariablesFromConfig(config):\n try:\n variables = list(config[\"variables\"])\n except KeyError:\n print(\"Config error: no variables found.\")\n exit(0) \n return variables", "title": "" }, { "docid": "69c1dd6db781afc7d0ab04b997140f61", "score": "0.57622075", "text": "def get_env_vars():\n\n #import global_variables\n #------------------------------------------------------------------\n \n \n #Import variable\n do_reload = True\n\n #global_variables\n from helga.general.setup.global_variables import global_variables\n if(do_reload):reload(global_variables)\n\n\n\n #Environment variables\n #------------------------------------------------------------------\n\n #environment_pathes_dict\n environment_pathes_dict = {'OCIO': global_variables.OCIO_PATH}\n\n return environment_pathes_dict", "title": "" }, { "docid": "52f126f5e6f0dfc96a39fa1a24e9227b", "score": "0.57062966", "text": "def format_env_vars(self):\n formatted = []\n presets = [\"PATH\", \"LD_LIBRARY_PATH\", \"PYTHONPATH\"]\n for preset in presets:\n formatted.extend([\"-x\", preset])\n\n if self.env_vars:\n for name, value in self.env_vars.items():\n if value:\n formatted += [\"-x\", \"=\".join((name, str(value)))]\n else:\n formatted += [\"-x\", name]\n return formatted", "title": "" }, { "docid": "1094d71853ded5c222ab6b4e743944a0", "score": "0.56893986", "text": "def _get_env(self):\n try:\n return [(k.decode(), v.decode()) for k,v in list(os.environ.items())]\n except AttributeError:\n return list(os.environ.items())", "title": "" }, { "docid": "ba7ade1315d957a8d5fb51065ea98510", "score": "0.56754875", "text": "def get_config(args):\n\n if args.add_platform:\n add_platform()\n print (\"Platform added successfully! Please use '--uid {UID} --mode install' to install components or '--uid {UID} --mode run' to run if already installed\")\n exit()\n elif args.one_off:\n return add_platform(oneoff=True)\n else:\n return load_platform_config(args.uid)", "title": "" }, { "docid": "7683c56a6260e23c91ed619391fd807e", "score": "0.5667552", "text": "def get_build_environment(self) -> Dict[str, str]:\n os_special_paths = self._get_os_special_priority_paths()\n if os_special_paths:\n return {\"PATH\": os_special_paths + \":${PATH}\"}\n\n return {}", "title": "" }, { "docid": "863f83790e817581693b28d80bd7216c", "score": "0.5646284", "text": "def get_config_names(cls):\n names = []\n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n for configuration in Settings.targetConfigurations:\n name = target + '_' + platform + '_' + cpu + '_' + configuration\n names.append(name)\n return names", "title": "" }, { "docid": "6a999dd815f7fd11b0eb77b7a54ad710", "score": "0.5640497", "text": "def get_env_info():\n\n deps = [\n # (MODULE_NAME, f(mod) -> mod version)\n (\"oggm\", lambda mod: mod.__version__),\n (\"numpy\", lambda mod: mod.__version__),\n (\"scipy\", lambda mod: mod.__version__),\n (\"pandas\", lambda mod: mod.__version__),\n (\"geopandas\", lambda mod: mod.__version__),\n (\"netCDF4\", lambda mod: mod.__version__),\n (\"matplotlib\", lambda mod: mod.__version__),\n (\"rasterio\", lambda mod: mod.__version__),\n (\"fiona\", lambda mod: mod.__version__),\n (\"pyproj\", lambda mod: mod.__version__),\n (\"shapely\", lambda mod: mod.__version__),\n (\"xarray\", lambda mod: mod.__version__),\n (\"dask\", lambda mod: mod.__version__),\n (\"salem\", lambda mod: mod.__version__),\n ]\n\n deps_blob = list()\n for (modname, ver_f) in deps:\n try:\n if modname in sys.modules:\n mod = sys.modules[modname]\n else:\n mod = importlib.import_module(modname)\n ver = ver_f(mod)\n deps_blob.append((modname, ver))\n except BaseException:\n deps_blob.append((modname, None))\n\n return deps_blob", "title": "" }, { "docid": "690d0335c1f37df96f7c08c54a50b3b9", "score": "0.56158423", "text": "def GetConfigFlags():\n return GetUserConfig().get(FLAGS_KEY, {})", "title": "" }, { "docid": "aeb4d8f1590e11a375f1453399c6870e", "score": "0.55933005", "text": "def get_configuration() -> Tuple[InputDirectory, OutputDirectory]:\n\n configuration_file = os.environ.get('CONFIGURATION_FILE', None)\n if configuration_file is not None:\n print('Running with plz!', flush=True)\n with open(configuration_file) as c:\n config = json.load(c)\n input_directory = config['input_directory']\n output_directory = config['output_directory']\n return input_directory, output_directory\n else:\n return 'input', 'output'", "title": "" }, { "docid": "2fe2308d3eb48023befbcca410cf131a", "score": "0.5583994", "text": "def _list_variables(session, project, config_name):\n uri = _VARIABLES_PATH.format(\n root=_RUNTIMECONFIG_API_ROOT, project=project, config_name=config_name)\n r = session.get(uri, params={'returnValues': True})\n r.raise_for_status()\n\n variables = {}\n\n for variable in r.json().get('variables', []):\n # The variable name has the whole path in it, so just get the last\n # part.\n variable_name = variable['name'].split('/')[-1]\n variable_value = None\n\n if variable.get('text') is not None:\n variable_value = variable['text']\n else:\n variable_value = base64.b64decode(variable['value']).decode('utf-8')\n\n variables[variable_name] = variable_value\n\n return variables", "title": "" }, { "docid": "d4f9dc42769cff69f9ae24c31433cbe1", "score": "0.55731684", "text": "def _get_pass_envs():\n out = []\n for proxyenv in [\"HTTP_PROXY\", \"http_proxy\", \"HTTPS_PROXY\", \"https_proxy\",\n \"ALL_PROXY\", \"all_proxy\", \"FTP_PROXY\", \"ftp_proxy\",\n \"RSYNC_PROXY\", \"rsync_proxy\"]:\n if proxyenv in os.environ:\n out += [\"-e\", \"%s=%s\" % (proxyenv, os.environ[proxyenv])]\n return out", "title": "" }, { "docid": "fef7336ebb3ffafa02ed525a1983b919", "score": "0.5529267", "text": "def get_undercloud_env_vars():\n # Handle backward compatibile OSCI enviornment variables\n _vars = {}\n _vars['net_id'] = os.environ.get('NET_ID')\n _vars['external_dns'] = os.environ.get('NAMESERVER')\n _vars['default_gateway'] = os.environ.get('GATEWAY')\n _vars['external_net_cidr'] = os.environ.get('CIDR_EXT')\n\n # Take FIP_RANGE and create start and end floating ips\n _fip_range = os.environ.get('FIP_RANGE')\n if _fip_range and ':' in _fip_range:\n _vars['start_floating_ip'] = os.environ.get('FIP_RANGE').split(':')[0]\n _vars['end_floating_ip'] = os.environ.get('FIP_RANGE').split(':')[1]\n\n # Env var naming consistent with zaza.openstack.configure.network\n # functions takes priority. Override backward compatible settings.\n _keys = ['default_gateway',\n 'start_floating_ip',\n 'end_floating_ip',\n 'external_dns',\n 'external_net_cidr']\n for _key in _keys:\n _val = os.environ.get(_key)\n if _val:\n _vars[_key] = _val\n\n # Remove keys and items with a None value\n for k, v in list(_vars.items()):\n if not v:\n del _vars[k]\n\n return _vars", "title": "" }, { "docid": "7af2a5440a300169a9af42ad15a8ee39", "score": "0.5528882", "text": "def default_env_variable_definitions() -> List[Tuple[str, EnvironmentVariableType, EnvironmentVariableValue]]:\n def env_variable_name(simple_variable_name):\n return \"{:s}{:s}\".format(MongodbClient.DEFAULT_ENV_VARIABLE_PREFIX, simple_variable_name.upper())\n\n return [\n (env_variable_name(\"host\"), str, \"localhost\"),\n (env_variable_name(\"port\"), int, 27017),\n (env_variable_name(\"username\"), str, \"\"),\n (env_variable_name(\"password\"), str, \"\"),\n (env_variable_name(\"database\"), str, \"db\"),\n (env_variable_name(\"appname\"), str, \"log_writer\"),\n (env_variable_name(\"tz_aware\"), bool, True),\n (env_variable_name(\"metadata_collection\"), str, \"simulations\"),\n (env_variable_name(\"messages_collection_prefix\"), str, \"simulation_\"),\n (env_variable_name(\"invalid_messages_collection_prefix\"), str, \"invalid_simulation_\"),\n (env_variable_name(\"collection_identifier\"), str, \"SimulationId\"),\n (env_variable_name(\"admin\"), bool, True),\n (env_variable_name(\"tls\"), bool, False),\n (env_variable_name(\"tls_allow_invalid_certificates\"), bool, False)\n ]", "title": "" }, { "docid": "6ac070bcbc0ee7f4d12c4ad83f72d0a2", "score": "0.5511336", "text": "def read_variables(self, *args):\n # Once we've run source_environment, if we want results to be visible\n # to the Python pprint(os.environ), we must explicitly export them.\n # This means we can't use this function to distinguish between\n # exported and unexported variables -- but we'd much rather be able to\n # detect whether they're set.\n vars = literal_eval(self.source_env_and(args, \"\"\"\\\nfor var in $(set | grep '^[^ ]' | cut -s -d= -f 1)\ndo export $var\ndone\n'%s' -c 'import os, pprint\npprint.pprint(os.environ)'\"\"\" % self.shell_path(sys.executable)))\n # filter out anything inherited from our own environment\n for var, value in os.environ.iteritems():\n if value == vars.get(var):\n del vars[var]\n return vars", "title": "" }, { "docid": "c193445d96f0b3b0ddb487b152395f7e", "score": "0.5502283", "text": "def libcflib_envvar_names():\n names = set(ENVVARS.keys())\n return names", "title": "" }, { "docid": "638f29acf0832334ef4159572e37af73", "score": "0.54967827", "text": "def get_all(self):\n python_conf = []\n all = self._jconf.getAll()\n for conf in all:\n python_conf.append((conf._1(),conf._2()))\n return python_conf", "title": "" }, { "docid": "cfc970340a9341065cb32f06606ee49d", "score": "0.54813355", "text": "def getconf():\n res = config.get_all_conf()\n return jsonify(res)", "title": "" }, { "docid": "af4d42496f083f58e2af50e4119ad95c", "score": "0.54804367", "text": "def get_build_packages(self) -> Set[str]:\n if platform.is_deb_based():\n return {\n \"python3-dev\",\n \"python3-pip\",\n \"python3-setuptools\",\n \"python3-venv\",\n \"python3-wheel\",\n }\n elif platform.is_yum_based():\n try:\n os_release = os_utils.OsRelease()\n if (os_release.id(), os_release.version_id()) in ((\"centos\", \"7\"), (\"rhel\", \"7\")):\n # CentOS 7 Python 3.8 from SCL repo\n return {\n \"autoconf\",\n \"automake\",\n \"gcc\",\n \"gcc-c++\",\n \"git\",\n \"make\",\n \"patch\",\n \"rh-python38-python-devel\",\n \"rh-python38-python-pip\",\n \"rh-python38-python-setuptools\",\n \"rh-python38-python-wheel\",\n }\n except (OsReleaseIdError, OsReleaseVersionIdError):\n pass\n\n return {\n \"autoconf\",\n \"automake\",\n \"gcc\",\n \"gcc-c++\",\n \"git\",\n \"make\",\n \"patch\",\n \"python3-devel\",\n \"python3-pip\",\n \"python3-setuptools\",\n \"python3-wheel\",\n }\n elif platform.is_dnf_based():\n return {\n \"python3-devel\",\n }\n else:\n return {}", "title": "" }, { "docid": "fbcabc8d7d7dd11758f0906b2f2ee699", "score": "0.5464128", "text": "def get_env_vars():\n env_vars = []\n leapp_vars = {k: v for (k, v) in os.environ.items() if k.startswith('LEAPP_') and k not in ENV_IGNORE}\n for k, v in leapp_vars.items():\n if k in ENV_MAPPING:\n env_vars.append(EnvVar(name=ENV_MAPPING.get(k), value=v))\n continue\n env_vars.append(EnvVar(name=k, value=v))\n\n return env_vars", "title": "" }, { "docid": "faad25fa17349d36c30801ae2edb7d94", "score": "0.54548776", "text": "def read_config():\n for directory, filename in product(\n [\n dirname(argv[0]),\n expanduser(\"~\"),\n env.get(\"XDG_CONFIG_HOME\", join(expanduser(\"~\"), \".config\")),\n ],\n [\"voc.conf\", \".voc.conf\"],\n ):\n try:\n config = join(directory, filename)\n _LOGGER.debug(\"checking for config file %s\", config)\n with open(config) as config:\n return dict(\n x.split(\": \")\n for x in config.read().strip().splitlines()\n if not x.startswith(\"#\")\n )\n except OSError:\n continue\n return {}", "title": "" }, { "docid": "a98f4bcbc49892670a0cac8d2a378200", "score": "0.5454623", "text": "def global_configs(self):\n # TODO: Make dict python2 and python3 safe\n return {key: value\n for key, value in iteritems(self.configs)\n if isinstance(value, string_types)}", "title": "" }, { "docid": "e663b6b4979210a04406bc4e9ddf3f61", "score": "0.54464614", "text": "def get_params():\n\n # Map of config parameter names -> antsRegistrationSyN command line flags\n param_flags = {}\n param_flags['image_dimension'] = '-d'\n param_flags['out_prefix'] = '-o'\n param_flags['num_threads'] = '-n'\n param_flags['transform_type'] = '-t'\n param_flags['radius'] = '-r'\n param_flags['spline_distance'] = '-s'\n param_flags['precision_type'] = '-p'\n param_flags['use_histogram_matching'] = '-j'\n param_flags['collapse_output_transforms'] = '-z'\n\n # Build a map of param flag -> value from the config\n return { param_flags[k]:v for (k, v) in config['config'].items() if k in param_flags }", "title": "" }, { "docid": "03bc6f2016bcfc3c40e6669eaee20306", "score": "0.5442719", "text": "def environment_variables() -> dict:\n env_vars = list()\n for env_var, _ in os.environ.items():\n if env_var.startswith(prefect.configuration.ENV_VAR_PREFIX + \"__\"):\n env_vars.append(env_var)\n\n return dict(env_vars=env_vars)", "title": "" }, { "docid": "adcfa4af1298b603c15e33483b299cda", "score": "0.543025", "text": "def GetExtraPlistItems(self, configname=None):\r\n if configname not in XcodeSettings._plist_cache:\r\n cache = {}\r\n cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()\r\n\r\n xcode, xcode_build = self._XcodeVersion()\r\n cache['DTXcode'] = xcode\r\n cache['DTXcodeBuild'] = xcode_build\r\n\r\n sdk_root = self._SdkRoot(configname)\r\n if not sdk_root:\r\n sdk_root = self._DefaultSdkRoot()\r\n cache['DTSDKName'] = sdk_root\r\n if xcode >= '0430':\r\n cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(\r\n sdk_root, 'ProductBuildVersion')\r\n else:\r\n cache['DTSDKBuild'] = cache['BuildMachineOSBuild']\r\n\r\n if self.isIOS:\r\n cache['DTPlatformName'] = cache['DTSDKName']\r\n if configname.endswith(\"iphoneos\"):\r\n cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(\r\n sdk_root, 'ProductVersion')\r\n cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']\r\n else:\r\n cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']\r\n XcodeSettings._plist_cache[configname] = cache\r\n\r\n # Include extra plist items that are per-target, not per global\r\n # XcodeSettings.\r\n items = dict(XcodeSettings._plist_cache[configname])\r\n if self.isIOS:\r\n items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)\r\n return items", "title": "" }, { "docid": "7795b224f6707fb75e80804665a8cdbe", "score": "0.5417421", "text": "def get_required_env_vars(selected_features):\n\n required_keywords = get_required_keywords(selected_features)\n required_env_vars = [var for var in required_keywords if config_feature_keywords[var]['environment']]\n\n return required_env_vars", "title": "" }, { "docid": "145699a712f109bacff509a008377b0a", "score": "0.5401114", "text": "def environment() -> Dict[str, Any]:\n import matplotlib as mpl\n\n from .. import __version__ as package_version\n from .. import config\n from . import mpi\n from .numba import numba_environment\n from .plotting import get_plotting_context\n\n RESOURCE_PATH = Path(__file__).resolve().parents[1] / \"tools\" / \"resources\"\n\n result: Dict[str, Any] = {}\n result[\"package version\"] = package_version\n result[\"python version\"] = sys.version\n result[\"platform\"] = sys.platform\n\n # add the package configuration\n result[\"config\"] = config.to_dict()\n\n # add details for mandatory packages\n packages_min = packages_from_requirements(RESOURCE_PATH / \"requirements_basic.txt\")\n result[\"mandatory packages\"] = get_package_versions(packages_min)\n result[\"matplotlib environment\"] = {\n \"backend\": mpl.get_backend(),\n \"plotting context\": get_plotting_context().__class__.__name__,\n }\n\n # add details about optional packages\n packages = set(packages_from_requirements(RESOURCE_PATH / \"requirements_full.txt\"))\n packages |= set(packages_from_requirements(RESOURCE_PATH / \"requirements_mpi.txt\"))\n packages -= set(packages_min)\n result[\"optional packages\"] = get_package_versions(sorted(packages))\n if module_available(\"numba\"):\n result[\"numba environment\"] = numba_environment()\n\n # add information about MPI environment\n if mpi.initialized:\n result[\"multiprocessing\"] = {\"initialized\": True, \"size\": mpi.size}\n else:\n result[\"multiprocessing\"] = {\"initialized\": False}\n\n return result", "title": "" }, { "docid": "c21b0083efa74a6a5928412d34357151", "score": "0.5388724", "text": "def config_get_all(show_secrets: bool, with_system: bool):\n configurable_values = filter(lambda i: i[1].configurable_manually, config.ALL_VARIABLES.items())\n non_configurable_values = filter(lambda i: not i[1].configurable_manually, config.ALL_VARIABLES.items())\n\n click.echo('Configurable manually variables:\\n===========================')\n for name, _ in configurable_values:\n _print_variable_information(name, show_secrets)\n\n if with_system:\n click.echo('\\n\\n')\n\n click.echo('System variables:\\n===========================')\n for name, _ in non_configurable_values:\n _print_variable_information(name, show_secrets)", "title": "" }, { "docid": "e265ef6b74da12a48f611a77632277df", "score": "0.5386528", "text": "def default_config_info():\n return {\n \"config_file\": find_config_file(),\n \"is_debug\": is_debug(),\n \"merlin_home\": MERLIN_HOME,\n \"merlin_home_exists\": os.path.exists(MERLIN_HOME),\n }", "title": "" }, { "docid": "a84692f449c8c632f3338067411cbdf3", "score": "0.53851634", "text": "def get_environ_vars():\n # type: () -> Iterable[Tuple[str, str]]\n for key, val in os.environ.items():\n should_be_yielded = (\n key.startswith(\"PIP_\") and\n key[4:].lower() not in [\"version\", \"help\"]\n )\n if should_be_yielded:\n yield key[4:].lower(), val", "title": "" }, { "docid": "f72a35d02d6651530a6abd3ceee83054", "score": "0.53742486", "text": "def get_config():\n local_config = {}\n env_specific_config = _get_env_specific_config()\n local_config.update(env_specific_config)\n return local_config", "title": "" }, { "docid": "4b755d40ac5c2e4fb23e94e4d7173a35", "score": "0.53646606", "text": "def get_options(full_path, c):\n config = configparser.ConfigParser()\n config.read(full_path)\n return config[c].keys()", "title": "" }, { "docid": "734de1b0c286192a91211a2333343deb", "score": "0.5364448", "text": "def _GetEnv(self, arch):\r\n # The environment is saved as an \"environment block\" (see CreateProcess\r\n # and msvs_emulation for details). We convert to a dict here.\r\n # Drop last 2 NULs, one for list terminator, one for trailing vs. separator.\r\n pairs = open(arch).read()[:-2].split('\\0')\r\n kvs = [item.split('=', 1) for item in pairs]\r\n return dict(kvs)", "title": "" }, { "docid": "52589482006aea9039fce85d7fbfa57f", "score": "0.5355675", "text": "def getConfigLocations(filename=config_filename):\n out = []\n\n # Loading out in order of preference.\n # $VAOLOGIN_CONF\n #\n if os.environ.has_key(vaologin_conf_envvar):\n out.append(os.environ[vaologin_conf_envvar]);\n\n # location relative to $VAOLOGIN_HOME, script path\n #\n if os.environ.has_key(vaologin_home_envvar):\n home = os.environ[vaologin_home_envvar];\n else:\n home = def_product_home\n if home.startswith('%') or not os.path.exists(home): \n # % means that \"make\" or \"make install\" has not been done\n # look relative to the script path assuming the script is in \n # a \"bin\" or \"cgi-bin\" sub-directory relative to home \n home = os.path.dirname(os.path.dirname(sys.argv[0]))\n if not home:\n home = \".\"\n out.append(os.path.join(home, \"conf\", filename))\n\n # directly in the current directory\n #\n out.append(filename)\n\n # the default system path\n #\n out.append(os.path.join(def_sys_config_dir, filename))\n\n return out", "title": "" }, { "docid": "b1195a66420d889c724b502960bac37c", "score": "0.5354869", "text": "def get_platform_params():\n return {}", "title": "" }, { "docid": "16671a47c8b775f5201709a29041e71b", "score": "0.533919", "text": "def get_build_environment(self) -> Dict[str, str]:\n return {}", "title": "" }, { "docid": "7383f9d477872ff03587021df5664e62", "score": "0.53305423", "text": "def expand_environment_variables(config):\n if isinstance(config, collections.abc.Mapping):\n return {k: expand_environment_variables(v) for k, v in config.items()}\n elif isinstance(config, str):\n return os.path.expandvars(config)\n elif isinstance(config, (list, tuple, builtins.set)):\n return type(config)([expand_environment_variables(v) for v in config])\n else:\n return config", "title": "" }, { "docid": "4066d5a6f15cf350ece6b3c921d09f3f", "score": "0.53227663", "text": "def get_configured():\n ks = (k for k in os.environ.keys() if k.startswith(\"LATUBOT_KEYS_\"))\n for key in ks:\n m = re.match(r\"LATUBOT_KEYS_(?P<sport>\\w*)_(?P<area>\\w*)\", key)\n assert m\n yield m.group(\"sport\"), m.group(\"area\")", "title": "" }, { "docid": "f8372cb8034e13444a69b7050f6120f4", "score": "0.531568", "text": "def get_cfg(arg, names, defaults=None):\n cfg = {\n 'spp': {\n 'root': '.',\n 'include_path': '.'\n }\n }\n paths = os.environ['CPLUS_INCLUDE_PATH'].split(':')\n for path in paths:\n if 'spp' in os.listdir(path):\n cfg['spp']['include_path'] = path\n break\n cfg_file = read_config(arg)\n if 'spp' in cfg_file:\n for var, val in cfg_file['spp'].iteritems():\n cfg['spp'][var] = os.path.expandvars(val)\n cfg['spp']['root'] = arg.cfg\n if isinstance(names, list):\n for name in names:\n cfg[name] = dict()\n _update_single(cfg, name)\n _update_from_file(cfg, name, cfg_file)\n else:\n cfg[names] = dict()\n _update_single(cfg, names, defaults)\n _update_from_file(cfg, names, cfg_file)\n _update_from_arg(cfg, arg)\n return cfg", "title": "" }, { "docid": "33b38a593a34bce9687419eb92d253f9", "score": "0.531413", "text": "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "title": "" }, { "docid": "adb0d7707777be8b6b6bcf6fed3033fe", "score": "0.531322", "text": "def load_config_from_env_variables() -> Dict[str, Optional[EnvironmentVariableValue]]:\n def simple_name(env_variable_name):\n return env_variable_name[len(MongodbClient.DEFAULT_ENV_VARIABLE_PREFIX):].lower()\n\n env_variables = load_environmental_variables(*default_env_variable_definitions())\n\n return {\n simple_name(variable_name): env_variables[variable_name]\n for variable_name in env_variables\n }", "title": "" }, { "docid": "1fe402753dfb9a8dd1420e289e0f57bf", "score": "0.5310738", "text": "def listDetectorConfigParams(self):\n return self.detectorConfig('keys')", "title": "" }, { "docid": "005fb5b9ea547b7df3a761301689b262", "score": "0.5298805", "text": "def get_arguments(self):\n\t\tconfig = self.load_config_file()\n\t\tparams = config['params']\n\t\t# return [(key, params[key]['type']) for key in params]\n\t\treturn params", "title": "" }, { "docid": "0eec09f56f2167a31963d95ffbdf1190", "score": "0.5285223", "text": "def get_environ_dict():\n return {\n 'os.environ': _get_os_environ_dict((\n 'AUTH_DOMAIN',\n 'CURRENT_CONFIGURATION_VERSION',\n 'CURRENT_MODULE_ID',\n 'CURRENT_VERSION_ID',\n 'DEFAULT_VERSION_HOSTNAME',\n 'FEDERATED_IDENTITY',\n 'FEDERATED_PROVIDER',\n 'GAE_LOCAL_VM_RUNTIME',\n 'HTTP_HOST',\n 'HTTP_PROXY',\n 'HTTP_X_APPENGINE_HTTPS',\n 'HTTP_X_APPENGINE_QUEUENAME',\n 'HTTP_X_ORIGINAL_HOST',\n 'HTTP_X_ORIGINAL_SCHEME',\n 'SERVER_NAME',\n 'SERVER_PORT',\n 'SERVER_SOFTWARE',\n 'USER_IS_ADMIN',\n )),\n 'app_identity': _get_app_identity_dict((\n 'get_service_account_name',\n 'get_application_id',\n 'get_default_version_hostname',\n )),\n 'modules': _get_modules_dict((\n 'get_current_module_name',\n 'get_current_version_name',\n 'get_current_instance_id',\n 'get_modules',\n 'get_versions',\n 'get_default_version',\n 'get_hostname',\n )),\n 'namespace_manager': _get_namespace_manager_dict((\n 'get_namespace',\n 'google_apps_namespace',\n )),\n }", "title": "" }, { "docid": "fc275795d5e4bfa7353e9a2009615d2b", "score": "0.52792215", "text": "def available_configuration_files(self):\n known_files = [GLOBAL_CONFIG, LOCAL_CONFIG, self.environment.get('PIP_ACCEL_CONFIG')]\n absolute_paths = [parse_path(pathname) for pathname in known_files if pathname]\n return [pathname for pathname in absolute_paths if os.path.isfile(pathname)]", "title": "" }, { "docid": "85e21ab05cdf807c5f327148f391610e", "score": "0.527593", "text": "def get_config_defaults() -> dict[str, str]:\n result: dict[str, str] = {}\n lowercase_keys: set[str] = set()\n for key, value in os.environ.items():\n if key.lower() in lowercase_keys:\n LOG.warning(\"The environment variable '%s' is duplicated with different case, ignoring\", key)\n continue\n lowercase_keys.add(key.lower())\n result[key] = value.replace(\"%\", \"%%\")\n return result", "title": "" }, { "docid": "7de8357239821f0ad181cdf7edb15d7d", "score": "0.5267706", "text": "def get_configs():\n configurations.DEFINE_string(\"configs_fname\",None,\"CSV containing all the configs to run\")\n configurations.DEFINE_boolean(\"predict\",True,\"Run predictions after training\")\n configurations.DEFINE_integer(\"num_threads\",4,\"NUmber of parallel threads (Number of parallel executions)\")\n configurations.DEFINE_integer(\"num_gpu\",1,\"Number of GPU on the machine, Use 0 if there are None\")\n configurations.DEFINE_integer(\"sleep_time\",1,\"Sleep time\")\n configurations.DEFINE_integer(\"start_date\",None,\"First date for prediction on as YYYYMM\")\n configurations.DEFINE_integer(\"end_date\",None,\"Last date for prediction on as YYYYMM\")\n\n c = configurations.ConfigValues()\n\n return c", "title": "" }, { "docid": "b76ab30cda150cc231876fec261300cb", "score": "0.52643394", "text": "def get_environ_settings(self):\n if self.environ is not None:\n return {variable for variable in re.split('[\\s,]+', self.environ)}\n else:\n return set()", "title": "" }, { "docid": "24afa1d084b9c40775d4f8e6a4a76c79", "score": "0.52528816", "text": "def CalculateVariables(default_variables, params):\r\n global generator_additional_non_configuration_keys\r\n global generator_additional_path_sections\r\n flavor = gyp.common.GetFlavor(params)\r\n if flavor == 'mac':\r\n default_variables.setdefault('OS', 'mac')\r\n default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')\r\n default_variables.setdefault('SHARED_LIB_DIR',\r\n generator_default_variables['PRODUCT_DIR'])\r\n default_variables.setdefault('LIB_DIR',\r\n generator_default_variables['PRODUCT_DIR'])\r\n\r\n # Copy additional generator configuration data from Xcode, which is shared\r\n # by the Mac Ninja generator.\r\n import gyp.generator.xcode as xcode_generator\r\n generator_additional_non_configuration_keys = getattr(xcode_generator,\r\n 'generator_additional_non_configuration_keys', [])\r\n generator_additional_path_sections = getattr(xcode_generator,\r\n 'generator_additional_path_sections', [])\r\n global generator_extra_sources_for_rules\r\n generator_extra_sources_for_rules = getattr(xcode_generator,\r\n 'generator_extra_sources_for_rules', [])\r\n elif flavor == 'win':\r\n default_variables.setdefault('OS', 'win')\r\n default_variables['EXECUTABLE_SUFFIX'] = '.exe'\r\n default_variables['STATIC_LIB_PREFIX'] = ''\r\n default_variables['STATIC_LIB_SUFFIX'] = '.lib'\r\n default_variables['SHARED_LIB_PREFIX'] = ''\r\n default_variables['SHARED_LIB_SUFFIX'] = '.dll'\r\n\r\n # Copy additional generator configuration data from VS, which is shared\r\n # by the Windows Ninja generator.\r\n import gyp.generator.msvs as msvs_generator\r\n generator_additional_non_configuration_keys = getattr(msvs_generator,\r\n 'generator_additional_non_configuration_keys', [])\r\n generator_additional_path_sections = getattr(msvs_generator,\r\n 'generator_additional_path_sections', [])\r\n\r\n gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)\r\n else:\r\n operating_system = flavor\r\n if flavor == 'android':\r\n operating_system = 'linux' # Keep this legacy behavior for now.\r\n default_variables.setdefault('OS', operating_system)\r\n default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')\r\n default_variables.setdefault('SHARED_LIB_DIR',\r\n os.path.join('$!PRODUCT_DIR', 'lib'))\r\n default_variables.setdefault('LIB_DIR',\r\n os.path.join('$!PRODUCT_DIR', 'obj'))", "title": "" }, { "docid": "635b475ede571a885bf40ac9aaa6761a", "score": "0.5248883", "text": "def CalculateVariables(default_variables, params):\r\n flavor = gyp.common.GetFlavor(params)\r\n if flavor == 'mac':\r\n default_variables.setdefault('OS', 'mac')\r\n default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')\r\n default_variables.setdefault('SHARED_LIB_DIR',\r\n generator_default_variables['PRODUCT_DIR'])\r\n default_variables.setdefault('LIB_DIR',\r\n generator_default_variables['PRODUCT_DIR'])\r\n\r\n # Copy additional generator configuration data from Xcode, which is shared\r\n # by the Mac Make generator.\r\n import gyp.generator.xcode as xcode_generator\r\n global generator_additional_non_configuration_keys\r\n generator_additional_non_configuration_keys = getattr(xcode_generator,\r\n 'generator_additional_non_configuration_keys', [])\r\n global generator_additional_path_sections\r\n generator_additional_path_sections = getattr(xcode_generator,\r\n 'generator_additional_path_sections', [])\r\n global generator_extra_sources_for_rules\r\n generator_extra_sources_for_rules = getattr(xcode_generator,\r\n 'generator_extra_sources_for_rules', [])\r\n COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})\r\n else:\r\n operating_system = flavor\r\n if flavor == 'android':\r\n operating_system = 'linux' # Keep this legacy behavior for now.\r\n default_variables.setdefault('OS', operating_system)\r\n default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')\r\n default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')\r\n default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')", "title": "" }, { "docid": "814fc90194220aac81c339a532c5dbc3", "score": "0.5247089", "text": "def _dump_many_vars(var_name):\n global _DUMP_MANY_VARS\n global _DUMP_MANY_VARS_LIST\n\n # Look up var from cache.\n if _DUMP_MANY_VARS:\n return _DUMP_MANY_VARS[var_name]\n\n all_vars=\" \".join(_DUMP_MANY_VARS_LIST)\n\n # The command is taken from build/envsetup.sh to fetch build variables.\n command = (\"CALLED_FROM_SETUP=true \" # Enable the 'dump-many-vars' make target.\n \"BUILD_SYSTEM=build/core \" # Set up lookup path for make includes.\n \"make --no-print-directory -C \\\"%s\\\" -f build/core/config.mk \"\n \"dump-many-vars DUMP_MANY_VARS=\\\"%s\\\"\") % (ANDROID_BUILD_TOP, all_vars)\n\n config = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n universal_newlines=True,\n shell=True).communicate()[0] # read until EOF, select stdin\n # Prints out something like:\n # TARGET_ARCH='arm64'\n # HOST_ARCH='x86_64'\n _DUMP_MANY_VARS = {}\n for line in config.split(\"\\n\"):\n # Split out \"$key='$value'\" via regex.\n match = re.search(\"([^=]+)='([^']*)\", line)\n if not match:\n continue\n key = match.group(1)\n value = match.group(2)\n _DUMP_MANY_VARS[key] = value\n\n return _DUMP_MANY_VARS[var_name]", "title": "" }, { "docid": "fac3c58ada218974cf7ac9380377eccc", "score": "0.52312094", "text": "def get_ldconfig_libs():\n ldconfig = subprocess.Popen(['ldconfig', '-p'],\n stdout=subprocess.PIPE).communicate()[0]\n return [line.strip().split()\n for line in ldconfig.decode().split('\\n')\n if line.startswith('\\t')]", "title": "" }, { "docid": "0ffe40fd005def0ed8092eb4aeb4cc98", "score": "0.5223472", "text": "def get_configuration() -> List[Tuple[str, str]]:\n\n def _to_tuple(values: List[str]) -> Tuple[str, str]:\n return (values[0], values[1])\n\n return [\n _to_tuple(values=configuration.split(sep=\"/\", maxsplit=1))\n for configuration in REPOSITORY_CONFIGURATION\n ]", "title": "" }, { "docid": "4842515377c8be8bd9c10cd0eebde32b", "score": "0.52233565", "text": "def _fetch(config_name):\n session, project = _create_session()\n\n logger.info('Fetching runtime configuration {} from {}.'.format(\n config_name, project))\n\n variables = _list_variables(session, project, config_name)\n\n return variables", "title": "" }, { "docid": "db225099aeec2410cfd30be272b91c5f", "score": "0.52126014", "text": "def GetAllDefines(target_list, target_dicts, data, config_name, params):\r\n\r\n # Get defines declared in the gyp files.\r\n all_defines = {}\r\n flavor = gyp.common.GetFlavor(params)\r\n if flavor == 'win':\r\n generator_flags = params.get('generator_flags', {})\r\n for target_name in target_list:\r\n target = target_dicts[target_name]\r\n\r\n if flavor == 'win':\r\n msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)\r\n extra_defines = msvs_settings.GetComputedDefines(config_name)\r\n else:\r\n extra_defines = []\r\n if config_name in target['configurations']:\r\n config = target['configurations'][config_name]\r\n target_defines = config['defines']\r\n else:\r\n target_defines = []\r\n for define in target_defines + extra_defines:\r\n split_define = define.split('=', 1)\r\n if len(split_define) == 1:\r\n split_define.append('1')\r\n if split_define[0].strip() in all_defines:\r\n # Already defined\r\n continue\r\n all_defines[split_define[0].strip()] = split_define[1].strip()\r\n # Get default compiler defines (if possible).\r\n if flavor == 'win':\r\n return all_defines # Default defines already processed in the loop above.\r\n cc_target = GetCompilerPath(target_list, target_dicts, data)\r\n if cc_target:\r\n command = shlex.split(cc_target)\r\n command.extend(['-E', '-dM', '-'])\r\n cpp_proc = subprocess.Popen(args=command, cwd='.',\r\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\r\n cpp_output = cpp_proc.communicate()[0]\r\n cpp_lines = cpp_output.split('\\n')\r\n for cpp_line in cpp_lines:\r\n if not cpp_line.strip():\r\n continue\r\n cpp_line_parts = cpp_line.split(' ', 2)\r\n key = cpp_line_parts[1]\r\n if len(cpp_line_parts) >= 3:\r\n val = cpp_line_parts[2]\r\n else:\r\n val = '1'\r\n all_defines[key] = val\r\n\r\n return all_defines", "title": "" }, { "docid": "4380e38fa4578a5bc6d09206c6cca67f", "score": "0.5209468", "text": "def load_config():\n logger = logging.getLogger(__name__)\n dot_env_path = find_dotenv(raise_error_if_not_found=True)\n logger.info(f\"Found config in {dot_env_path}\")\n return dotenv_values(dot_env_path)", "title": "" }, { "docid": "9247d38a36c2cd974b449a0db39f211b", "score": "0.5206489", "text": "def _parse_spawn_options():\n config_dir = os.environ.get('JUPYTER_CONFIG_DIR')\n if config_dir is None:\n config_dir = '.jupyter'\n config_path = os.path.join(os.path.expanduser('~'), config_dir, 'imongo_config.yml')\n logger.info(f'Trying to load {config_path}')\n try:\n config = yaml.load(open(config_path))\n except FileNotFoundError:\n logger.info('Using default configuration')\n return list()\n\n options = []\n for key, value in config.items():\n if key == 'shell':\n continue\n elif not value:\n options.append(f'--{key}')\n else:\n options.append(f'--{key} {value}')\n return options", "title": "" }, { "docid": "4b0312828d019732e25d3becb65ac450", "score": "0.5206128", "text": "def get_params():\n\n # Map of config parameter names -> buildtemplateparallel command line flags\n param_flags = {}\n param_flags['cpu_cores'] = '-j'\n param_flags['num_modalities'] = '-k'\n param_flags['modality_weights'] = '-w'\n param_flags['gradient_step_size'] = '-g'\n param_flags['image_dimension'] = '-d'\n param_flags['iteration_limit'] = '-i'\n param_flags['max_iterations'] = '-m'\n param_flags['n4_bias_field_correction'] = '-n'\n param_flags['out_prefix'] = '-o'\n param_flags['parallel_computation'] = '-c'\n param_flags['registration_similarity_metric'] = '-s'\n param_flags['rigid_body_registration'] = '-r'\n param_flags['transformation_model_type'] = '-t'\n param_flags['update_template_with_full_affine'] = '-y'\n\n # Build a map of btp param flag -> value from the config\n return { param_flags[k]:v for (k, v) in config['config'].items() if k in param_flags }", "title": "" }, { "docid": "e7b5cc20743a4a28951d4ddc5e18a1e0", "score": "0.5206113", "text": "def _get_ld_config_paths():\n if not _is_linux():\n return []\n ldconfig_path = which(\"ldconfig\") or \"/sbin/ldconfig\"\n output = subprocess.check_output([ldconfig_path, \"-p\"])\n pattern = re.compile(\".* => (.*)\")\n result = set()\n for line in output.splitlines():\n try:\n match = pattern.match(line.decode(\"ascii\"))\n except UnicodeDecodeError:\n match = False\n if match:\n result.add(os.path.dirname(match.group(1)))\n return sorted(list(result))", "title": "" }, { "docid": "69fe9c5f30935e6db5185ae16cdc4234", "score": "0.52029485", "text": "def __get_env_data(self):\n os_env_data = {}\n os_env = os.environ\n for os_arg_name, report_arg_name in self.__osenv_fields_to_push.items():\n os_env_data[report_arg_name] = os_env.get(os_arg_name)\n return os_env_data", "title": "" }, { "docid": "6ea7f59a7c835db559528bbe21c531e3", "score": "0.51955384", "text": "def get_paths(self) -> Dict[str, str]:\n paths = sysconfig.get_paths()\n scripts = \"Scripts\" if os.name == \"nt\" else \"bin\"\n packages_path = self.packages_path\n paths[\"platlib\"] = paths[\"purelib\"] = (packages_path / \"lib\").as_posix()\n paths[\"scripts\"] = (packages_path / scripts).as_posix()\n paths[\"data\"] = paths[\"prefix\"] = packages_path.as_posix()\n paths[\"include\"] = paths[\"platinclude\"] = paths[\"headers\"] = (\n packages_path / \"include\"\n ).as_posix()\n return paths", "title": "" }, { "docid": "197d53ace191acec12c51a2815efc620", "score": "0.51936173", "text": "def get_config():\n config = dict()\n\n #config[\"plugin\"] = \"Linear\"#\"SGE\"\n #config[\"plugin_args\"] = {}#{\"qsub_args\":\"-q ms.q -l arch=lx24-amd64 -l h_stack=32M \\\n #-l h_vmem=4G -l hostname=graf -v MKL_NUM_THREADS=1\"}\n\n config[\"working_directory\"] = \"/working/henry_temp/keshavan/\"\n config[\"output_directory\"] = \"/data/henry7/PBR/subjects\"\n config[\"crash_directory\"] = \"/working/henry_temp/keshavan/crashes\"\n config[\"mni_template\"] = \"/data/henry6/PBR/templates/OASIS-30_Atropos_template_in_MNI152.nii.gz\"\n\n return config", "title": "" }, { "docid": "555b71b740401cf839113cbc436c351f", "score": "0.51853627", "text": "def _getConfigPathOptions(self):\n return [self.configDir/'exe.conf']", "title": "" }, { "docid": "0db36fe45c1686e68708efa4ab874d38", "score": "0.5183641", "text": "def GetVariables(self, config_name):\n del config_name\n return {}", "title": "" }, { "docid": "68c672be9bed2f63fc3aaf57aa89e663", "score": "0.51746804", "text": "def _all_configs(self):\r\n return (self.override_config, self.repo_plugin_config,\r\n self.plugin_config, self.default_config)", "title": "" }, { "docid": "eaeaf6432987c929131b66ea26d6a758", "score": "0.51745886", "text": "def _GetDefines(config):\r\n defines = []\r\n for d in config.get('defines', []):\r\n if type(d) == list:\r\n fd = '='.join([str(dpart) for dpart in d])\r\n else:\r\n fd = str(d)\r\n defines.append(fd)\r\n return defines", "title": "" }, { "docid": "cf52b8e4d87764198abb012e3e99ade0", "score": "0.5172787", "text": "def dict_bash_kwargs():\n\n args = []\n for i in range(len(sys.argv) - 1):\n arg = str(sys.argv[i + 1]).split(\"=\")\n args.append(arg)\n return dict(args)", "title": "" }, { "docid": "19c508c4c7095f85ffe0df7c3f648412", "score": "0.5166474", "text": "def list_variable_files(self):\n # Load the common variables.\n var_files = list(self.var_dir.glob(\"all/*.yml\"))\n\n # Load the common stack variables.\n s = self.var_dir / \"all\" / self.stack\n var_files += list(s.glob(\"*.yml\"))\n\n # Load the stack environment specific variables if any.\n if self.environment:\n e = self.var_dir / self.environment / self.stack\n if not e.exists():\n raise ValueError(\n f'cannot find variables for the stack \"{self.stack}\" '\n f'in the \"{self.environment}\" environment'\n )\n var_files += list(e.glob(\"**/*.yml\"))\n\n return var_files", "title": "" }, { "docid": "e5e1247e7bb07113a95494c713b959db", "score": "0.5165326", "text": "def getAll(self):\n self.checkForFile()\n dictList = []\n with open(self.settingsfile, 'r') as FILE:\n for line in FILE.readlines():\n try:\n dictList.append(eval(line))\n except:\n pass\n\n return dictList", "title": "" }, { "docid": "51d23cb3e7e8c39f09e572565f7dd739", "score": "0.5160187", "text": "def extract_global_wf_variables(input_file: str) -> List[Dict[str, Any]]:\n global_variable_list = list()\n base_tree = ET.parse(input_file)\n root = base_tree.getroot()\n for child in root.findall(\n \"./knime:config[@key='workflow_variables']/knime:config\", NS\n ):\n variable: Dict[str, Any] = dict()\n variable_name = extract_entry_value(child, \"name\")\n variable_class = extract_entry_value(child, \"class\")\n variable_value = extract_entry_value(child, \"value\")\n\n if variable_value is not None and variable_class is not None:\n if variable_class == \"STRING\":\n variable[variable_name] = variable_value\n elif variable_class == \"DOUBLE\":\n variable[variable_name] = float(variable_value)\n elif variable_class == \"INTEGER\":\n variable[variable_name] = int(variable_value)\n global_variable_list.append(variable)\n return global_variable_list", "title": "" }, { "docid": "f8e808d72e0ee66ec01da3795656ecbc", "score": "0.5159817", "text": "def CalculateVariables(default_variables, params):\n global generator_additional_non_configuration_keys\n global generator_additional_path_sections\n flavor = gyp.common.GetFlavor(params)\n if flavor == 'mac':\n default_variables.setdefault('OS', 'mac')\n default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')\n default_variables.setdefault('SHARED_LIB_DIR',\n generator_default_variables['PRODUCT_DIR'])\n default_variables.setdefault('LIB_DIR',\n generator_default_variables['PRODUCT_DIR'])\n\n \n \n import gyp.generator.xcode as xcode_generator\n generator_additional_non_configuration_keys = getattr(xcode_generator,\n 'generator_additional_non_configuration_keys', [])\n generator_additional_path_sections = getattr(xcode_generator,\n 'generator_additional_path_sections', [])\n global generator_extra_sources_for_rules\n generator_extra_sources_for_rules = getattr(xcode_generator,\n 'generator_extra_sources_for_rules', [])\n elif flavor == 'win':\n exts = gyp.MSVSUtil.TARGET_TYPE_EXT\n default_variables.setdefault('OS', 'win')\n default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']\n default_variables['STATIC_LIB_PREFIX'] = ''\n default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']\n default_variables['SHARED_LIB_PREFIX'] = ''\n default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']\n\n \n \n import gyp.generator.msvs as msvs_generator\n generator_additional_non_configuration_keys = getattr(msvs_generator,\n 'generator_additional_non_configuration_keys', [])\n generator_additional_path_sections = getattr(msvs_generator,\n 'generator_additional_path_sections', [])\n\n gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)\n else:\n operating_system = flavor\n if flavor == 'android':\n operating_system = 'linux' \n default_variables.setdefault('OS', operating_system)\n default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')\n default_variables.setdefault('SHARED_LIB_DIR',\n os.path.join('$!PRODUCT_DIR', 'lib'))\n default_variables.setdefault('LIB_DIR',\n os.path.join('$!PRODUCT_DIR', 'obj'))", "title": "" }, { "docid": "01d94961a7309f575d8aae09fd6e0c15", "score": "0.5159453", "text": "def environment_variables(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"environment_variables\")", "title": "" }, { "docid": "b2ab5c12f20f354b1c868b30d58fb1ac", "score": "0.5158013", "text": "def parse_sys_args(argumentParser=None, args=None):\n if argumentParser is None:\n argumentParser = ArgumentParser()\n argumentParser.add_argument(\"--general.config.file\", nargs='?', dest='config_file', default='config.yml',\n const='config.yml')\n params, unknown_arg = argumentParser.parse_known_args(args=args)\n\n sys_d = argumentParser.as_dict(args=unknown_arg)\n return params, sys_d", "title": "" }, { "docid": "d09f6ab5ed92f0c7f0314556180fdb54", "score": "0.51566106", "text": "def config_to_args(config):\n result = []\n\n for key, value in iteritems(config):\n if value is False:\n continue\n\n key = key.replace('_', '-')\n\n if value is not True:\n result.append('--{0}={1}'.format(key, str(value)))\n else:\n result.append('--{0}'.format(key))\n\n return result", "title": "" }, { "docid": "3dd1110b45690d09165d2848651a0fb5", "score": "0.515523", "text": "def get_wisdom_metadata():\n return {\n # \"venv\"\n \"executable\": sys_executable,\n # encapsulates sys.platform, platform.machine(), platform.architecture(), platform.libc_ver(), ...\n \"hostname\": gethostname(),\n \"available_threads\": len(os.sched_getaffinity(0)),\n }", "title": "" }, { "docid": "50e3355d88a6e21134a951a90e4ffeff", "score": "0.5153026", "text": "def get_configuration_file_form():\n\n return \"DISK_USAGE=TRUE\" \\\n \"\\nCPU_PERCENT=TRUE\" \\\n \"\\nMEMORY_INFO=TRUE\" \\\n \"\\nCPU_STATS=TRUE\" \\\n \"\\nSEND_TIME=2\" \\\n \"\\nADDRESS=localhost\" \\\n \"\\nPORT=5672\"", "title": "" }, { "docid": "63f3ee8bed15fd57e24d9e49f544726f", "score": "0.51507986", "text": "def collect_env(with_torch_comiling_info=False):\n env_info = collect_base_env()\n env_info['MMCV'] = mmcv.__version__\n if not with_torch_comiling_info:\n env_info.pop('PyTorch compiling details')\n env_info['MMPreTrain'] = mmpretrain.__version__ + '+' + get_git_hash()[:7]\n return env_info", "title": "" }, { "docid": "2e6456d73c4dc19dad82bd17c549a490", "score": "0.514678", "text": "def get_arch_configs():\n\n # TODO: Could this be made more robust across kernel versions by checking\n # for the existence of particular arches?\n\n def add_arch(ARCH, res):\n os.environ[\"SRCARCH\"] = archdir\n os.environ[\"ARCH\"] = ARCH\n print \" Loading {0}...\".format(ARCH)\n c = kconfiglib.Config(base_dir = \".\")\n res.append(c)\n\n res = []\n\n for archdir in os.listdir(\"arch\"):\n # No longer broken as of 3.7.0-rc8\n #if archdir == \"h8300\":\n # Broken Kconfig as of Linux 2.6.38-rc3\n #continue\n\n if os.path.exists(os.path.join(\"arch\", archdir, \"Kconfig\")):\n add_arch(archdir, res)\n # Some arches define additional ARCH settings with ARCH != SRCARCH.\n # (Search for \"Additional ARCH settings for\" in the Makefile.) We\n # test those as well.\n if archdir == \"x86\":\n add_arch(\"i386\", res)\n add_arch(\"x86_64\", res)\n elif archdir == \"sparc\":\n add_arch(\"sparc32\", res)\n add_arch(\"sparc64\", res)\n elif archdir == \"sh\":\n add_arch(\"sh64\", res)\n elif archdir == \"tile\":\n add_arch(\"tilepro\", res)\n add_arch(\"tilegx\", res)\n\n # Don't want subsequent 'make *config' commands in tests to see this\n del os.environ[\"ARCH\"]\n del os.environ[\"SRCARCH\"]\n\n return res", "title": "" }, { "docid": "9b3b8fa3cfcd9c64f430562f40a51bab", "score": "0.5143132", "text": "def get_settings():\n parser = SettingsParser()\n parser.add_argument('--host', default='::0', env_var='HOST')\n parser.add_argument('--port', default=8000, type=int, env_var='PORT')\n parser.add_argument(\n '--mongo_url',\n default='mongodb://localhost/khartoum.fs',\n env_var='MONGODB_URL')\n parser.add_argument(\n '--compression_level', default=6, type=int,\n env_var='COMPRESSION_LEVEL')\n\n parser.add_argument(\n '--cache_days', default=365, type=int,\n env_var='CACHE_DAYS')\n\n parser.add_setting('extra_headers', default={})\n\n settings = parser.parse_args()\n\n # The set of compressable mimetypes might be configured from a yaml file,\n # but can't be set on the command line or in an env var, so no argument is\n # defined. Instead, just set the default here.\n defaults = [\n 'text/plain',\n 'text/html',\n 'application/javascript',\n 'text/css',\n ]\n vars(settings).setdefault('compressable_mimetypes', defaults)\n\n return settings", "title": "" }, { "docid": "ad6437e027c997c8ea9184ab2ab8b756", "score": "0.51369673", "text": "def get_device_parameters(args):\n params = {}\n\n # Read the data from the INI file for this host\n cp = configparser.ConfigParser()\n cp.read(args.config)\n\n if args.ucs_host not in cp:\n print('Error: UCS host ({}) does not exist in the config file '\n '({})'.format(args.ucs_host, args.config))\n exit(1)\n\n # Get the login credentials for the host, or else use defaults if available\n config_host = cp[args.ucs_host]\n config_default = cp['DEFAULT']\n\n params['username'] = config_host.get(\n 'username', config_default.get('username'))\n params['password'] = config_host.get(\n 'password', config_default.get('password'))\n params['server_type'] = args.type or config_host.get(\n 'type', config_default.get('type'))\n\n config_addresses = parse_config_addresses(config_host.get('addresses', ''))\n params['alternate_addresses'] = args.alt_address or config_addresses\n\n if params['server_type'] is None:\n print('Error: Host type is not defined for {} in {}. '\n 'You must update that configuration file or pass in a --type '\n 'argument. Expected values: \"ucsm\" or \"imc\"'.format(\n args.ucs_host, args.config))\n exit(1)\n\n if params['username'] is None:\n print(\"No username found for host {} and no default\".format(\n args.ucs_host))\n exit(1)\n\n if params['password'] is None:\n print(\"No password found for host {} and no default\".format(\n args.ucs_host))\n exit(1)\n\n return params", "title": "" }, { "docid": "2115b154c5d6d734d65ecbdc27f5e514", "score": "0.51346576", "text": "def get_configuration(file_name):\n print(\"Reading config from {}\".format(file_name))\n with open(file_name, 'r') as f:\n include = []\n exclude = []\n include_backup_files = False\n include_hidden_files = False\n for line in f.readlines():\n line = line.rstrip()\n if (re.match(\"^\\W*INCLUDE: .*$\", line) != None):\n content = \"\".join(line.split(\":\")[1:])\n include = re.split(\" *\", content)[1:]\n\n elif (re.match(\"^\\W*EXCLUDE: .*$\", line) != None):\n content = \"\".join(line.split(\":\")[1:])\n exclude = re.split(\" *\", content)[1:]\n\n elif (re.match(\"^\\W*INCLUDE_BACKUP_FILES: .*$\", line) != None):\n content = \"\".join(line.split(\":\")[1:]).strip()\n include_backup_files = (content == \"YES\")\n\n elif (re.match(\"^\\W*INCLUDE_HIDDEN_FILES: .*$\", line) != None):\n content = \"\".join(line.split(\":\")[1:]).strip()\n include_hidden_files = (content == \"YES\")\n return include, exclude, include_backup_files, include_hidden_files", "title": "" }, { "docid": "d1ba65bfeca77d3cfe6feff38105577d", "score": "0.5131966", "text": "def get_config():\n # config = {\n # 'compress': 'none' or 'gzip' or 'bzip2'\n # 'overwrite': True or False\n # 'server': String\n # 'port': Integer\n # 'user': String\n # 'pass': String\n # 'usessl': True or False\n # 'keyfilename': String or None\n # 'certfilename': String or None\n # }\n\n config, warnings, errors = process_cline()\n config, warnings, errors = check_config(config, warnings, errors)\n\n # show warnings\n for warning in warnings:\n print \"WARNING:\", warning\n\n # show errors, exit\n for error in errors:\n print \"ERROR\", error\n if len(errors):\n sys.exit(2)\n\n # prompt for password, if necessary\n if 'pass' not in config:\n config['pass'] = getpass.getpass()\n\n # defaults\n if 'port' not in config:\n if config['usessl']:\n config['port'] = 993\n else:\n config['port'] = 143\n if 'timeout' not in config:\n config['timeout'] = 60\n\n # done!\n return config", "title": "" }, { "docid": "b01da23a6423c4b89ec26ee553092b8f", "score": "0.5129983", "text": "def GetDependencies():\r\n\r\n return OrderedDict([ ( \"python36\", Configuration( \"Python 3.6.5\",\r\n [],\r\n VersionSpecs( [ VersionInfo(\"Python\", \"v3.6.5\"), ],\r\n {},\r\n ),\r\n ) ),\r\n ( \"python27\", Configuration( \"Python 2.7.14\",\r\n [],\r\n VersionSpecs( [ VersionInfo(\"Python\", \"v2.7.14\"), ],\r\n {},\r\n ),\r\n ) ),\r\n ])", "title": "" }, { "docid": "6c33d8972a5bf0d2e4682715f68c455e", "score": "0.5128889", "text": "def _ExtractImportantEnvironment(output_of_set):\r\n envvars_to_save = (\r\n 'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.\r\n 'include',\r\n 'lib',\r\n 'libpath',\r\n 'path',\r\n 'pathext',\r\n 'systemroot',\r\n 'temp',\r\n 'tmp',\r\n )\r\n env = {}\r\n for line in output_of_set.splitlines():\r\n for envvar in envvars_to_save:\r\n if re.match(envvar + '=', line.lower()):\r\n var, setting = line.split('=', 1)\r\n if envvar == 'path':\r\n # Our own rules (for running gyp-win-tool) and other actions in\r\n # Chromium rely on python being in the path. Add the path to this\r\n # python here so that if it's not in the path when ninja is run\r\n # later, python will still be found.\r\n setting = os.path.dirname(sys.executable) + os.pathsep + setting\r\n env[var.upper()] = setting\r\n break\r\n for required in ('SYSTEMROOT', 'TEMP', 'TMP'):\r\n if required not in env:\r\n raise Exception('Environment variable \"%s\" '\r\n 'required to be set to valid path' % required)\r\n return env", "title": "" }, { "docid": "0180e67d70fe60ecf4d42561aeac6275", "score": "0.5127738", "text": "def getenvlist(self, encoding='utf-8'):\n if not self.handle:\n return PAM_SYSTEM_ERR\n\n env_list = self.pam_getenvlist(self.handle)\n\n env_count = 0\n pam_env_items = {}\n while True:\n try:\n item = env_list[env_count]\n except IndexError:\n break\n if not item:\n # end of the list\n break\n if sys.version_info >= (3,):\n env_item = item.decode(encoding)\n else:\n env_item = item\n try:\n pam_key, pam_value = env_item.split(\"=\", 1)\n except ValueError:\n # Incorrectly formatted envlist item\n pass\n else:\n pam_env_items[pam_key] = pam_value\n env_count += 1\n\n return pam_env_items", "title": "" }, { "docid": "31604d4cb5f66f27cd8ad1cda96a09e4", "score": "0.51242906", "text": "def get_app_variables():\n\tos.chdir(os.path.dirname(__file__))\n\tCURRENT_DIR=os.getcwd()\n#\tprint(\"Script is running from: \", CURRENT_DIR)\n\tvSPARK_ETL_HOME = re.findall('(\\S+?spark-etl)', CURRENT_DIR, flags=re.IGNORECASE)[0]\n#\tprint(\"vSPARK_ETL_HOME = \",vSPARK_ETL_HOME)\n\tif os.environ.get('SPARK_ETL_HOME') is None:\n#\t\tos.environ['SPARK_ETL_HOME'] = os.path.expanduser('~/Spark_SQL/spark-etl')\n\t\tos.environ['SPARK_ETL_HOME'] = vSPARK_ETL_HOME\n\telse:\n\t\tos.environ.get('SPARK_ETL_HOME')\n\n\td_env_variables=dict()\n\td_env_variables['SPARK_ETL_HOME'] = os.environ.get('SPARK_ETL_HOME')\n\td_env_variables['SPARK_ETL_CONF_DIR'] = os.environ.get('SPARK_ETL_HOME')+\"/etl-config/\"\n\td_env_variables['SPARK_ETL_CONN_DIR'] = os.environ.get('SPARK_ETL_HOME')+\"/connections/\"\n\td_env_variables['SPARK_ETL_LOG_DIR'] = os.environ.get('SPARK_ETL_HOME')+\"/logs/\"\n\n\treturn d_env_variables", "title": "" }, { "docid": "5b5512cae9b76eed89f4b307cbdfb02a", "score": "0.5109094", "text": "def get_build_compile_args(\n compiler=None,\n arch=None,\n current_machine=None,\n ext_suffix=None,\n use_option=None,\n use_api=None,\n):\n if ext_suffix is None:\n from distutils.sysconfig import get_config_var\n\n ext_suffix = get_config_var(\"EXT_SUFFIX\")\n if current_machine is None:\n current_machine = _find_if_current_machine()\n if arch is None:\n from distutils.util import get_platform\n\n arch = get_platform()\n\n build_args = {}\n compiler = get_compiler(compiler, current_compiler=True)\n\n if current_machine:\n try:\n build_args[ext_suffix] = [compiler.compile_args_current_machine()]\n\n except Exception:\n # Compilertools should not break compilation in this case because it may be\n # called from a Pip install. It should only back to compatible default in\n # this case.\n _log_exception()\n build_args[ext_suffix] = []\n\n else:\n include = ConfigBuild.suffixes_includes\n if not include:\n exclude = ConfigBuild.suffixes_excludes\n\n def filter_suffix(suffix_to_test):\n \"\"\"Filter by exclusion.\"\"\"\n return suffix_to_test in exclude\n\n else:\n\n def filter_suffix(suffix_to_test):\n \"\"\"Filter by inclusion.\"\"\"\n return suffix_to_test not in include\n\n args = get_compile_args(compiler, arch, current_compiler=True)\n\n for suffixes in set(args):\n for suffix in suffixes.split(\"-\"):\n if filter_suffix(suffix):\n del args[suffixes]\n break\n\n for arg, suffix in zip(args.values(), suffix_from_args(args, ext_suffix, True)):\n build_args[suffix] = arg\n\n arg_ext = []\n\n _add_args(compiler, arg_ext, \"api\", \"compile\", use_api)\n\n _add_args(compiler, arg_ext, \"option\", \"compile\", use_option)\n\n if arg_ext:\n for suffix in build_args:\n build_args[suffix].extend(arg_ext)\n\n return build_args", "title": "" }, { "docid": "430440313e52bf79cee39c12112d8850", "score": "0.5106686", "text": "def get_config(self) -> List[dict]:\n return hammer_config.load_config_from_defaults(self.path)", "title": "" }, { "docid": "6acbc3d79c329168a9fa23635f968af6", "score": "0.5102751", "text": "def get_config():\n import bob.extension\n return bob.extension.get_config(__name__)", "title": "" } ]
69100a1fba92af3c16f4219ba47b26c6
Converts a HTTP response to an object (from headers)
[ { "docid": "b9f0299e6dd8fba27481c3cf95ae8ed5", "score": "0.64695716", "text": "def _response_to_object(self, object_name, container, response):\n\n headers = response.headers\n size = int(headers[\"content-length\"])\n etag = headers[\"etag\"]\n scheme = \"https\" if self.secure else \"http\"\n\n extra = {\n \"url\": \"{}://{}{}\".format(scheme, response.connection.host, response.connection.action),\n \"etag\": etag,\n \"md5_hash\": headers.get(\"content-md5\", None),\n \"content_type\": headers.get(\"content-type\", None),\n \"content_language\": headers.get(\"content-language\", None),\n \"content_encoding\": headers.get(\"content-encoding\", None),\n \"last_modified\": headers[\"last-modified\"],\n \"lease\": {\n \"status\": headers.get(\"x-ms-lease-status\", None),\n \"state\": headers.get(\"x-ms-lease-state\", None),\n \"duration\": headers.get(\"x-ms-lease-duration\", None),\n },\n \"blob_type\": headers[\"x-ms-blob-type\"],\n }\n\n if extra[\"md5_hash\"]:\n value = binascii.hexlify(base64.b64decode(b(extra[\"md5_hash\"])))\n value = value.decode(\"ascii\")\n extra[\"md5_hash\"] = value\n\n meta_data = {}\n for key, value in response.headers.items():\n if key.startswith(\"x-ms-meta-\"):\n key = key.split(\"x-ms-meta-\")[1]\n meta_data[key] = value\n\n return Object(\n name=object_name,\n size=size,\n hash=etag,\n extra=extra,\n meta_data=meta_data,\n container=container,\n driver=self,\n )", "title": "" } ]
[ { "docid": "4b5a69218c07cd43d00ca956e4e1a294", "score": "0.68540525", "text": "def parse_response(self, response):\n return ParseHTTPResponse(response)", "title": "" }, { "docid": "5a6a3c44eb55189438ac0995ee619fb9", "score": "0.68098414", "text": "def parse_response(response):\n if isinstance(response, six.binary_type):\n response = response.decode('utf-8')\n try:\n obj = json.loads(response)\n except ValueError:\n return {}\n return obj", "title": "" }, { "docid": "682a121c91559dd3a66e780aeaea076a", "score": "0.66306233", "text": "def from_http_response(cls, resp):\n return cls(\n status=resp.status,\n body=resp.read(),\n reason=getattr(resp, \"reason\", None),\n msg=getattr(resp, \"msg\", None),\n )", "title": "" }, { "docid": "fa1223ee11c1f0c554182f83755c06a2", "score": "0.6626572", "text": "def unpack(response):\n \n import simplejson as json\n return json.loads(response)", "title": "" }, { "docid": "04851b38f6ca2c522e30adaddef67f7b", "score": "0.6350767", "text": "def from_headers(self, headers):\n cls = Response\n if b'Content-Type' in headers:\n cls = self.from_content_type(\n content_type=headers[b'Content-Type'],\n content_encoding=headers.get(b'Content-Encoding')\n ) \n\n if cls is Response and b'Content-Disposition' in headers:\n print('from_headers')\n\n return cls", "title": "" }, { "docid": "fa204cf7e341bcc8ac67b00b2def22ff", "score": "0.63475144", "text": "def get_headers(self,response):\r\n headers, content = response\r\n return headers", "title": "" }, { "docid": "45147b1b1a065af6900705c296ac3a09", "score": "0.62991464", "text": "def get_headers(self, response):\r\n # response is a tuple containing (headers, content)\r\n # headers is an httplib2 Response object, content is a string\r\n # see http://httplib2.googlecode.com/hg/doc/html/libhttplib2.html\r\n headers, content = response\r\n return headers", "title": "" }, { "docid": "a0d2c6fab2a33fb5dffb1c32d4ca44d2", "score": "0.6239022", "text": "def load_response(\n self,\n response\n ):\n validate = True\n headers = None\n\n endpoint = 'https://jsonplaceholder.typicode.com/users'\n endpoint = self._input_string(endpoint)\n request = deepcopy(self.request)\n request[\"method\"] = \"GET\"\n request[\"query\"] = OrderedDict()\n \n validate = self._input_boolean(validate)\n if headers:\n request[\"headers\"].update(self._input_object(headers))\n return self._request(endpoint, request, validate, response)[\"response\"]", "title": "" }, { "docid": "1fc46715dd0c382a45411ea5147dea01", "score": "0.6134363", "text": "def parse_response(self, response: Any) -> Any:\n return response", "title": "" }, { "docid": "8735bdfa065f32a06af82d202eb65196", "score": "0.6133576", "text": "def process_response(response):\n return {\n 'html': response.text,\n 'url': response.url,\n 'status': response.status\n }", "title": "" }, { "docid": "68292576cf6b29b47f3f294db9fc1a23", "score": "0.61326134", "text": "def _response(self, response):\n LOGGER.debug('Response status code: %s', response.status_code)\n LOGGER.debug('Headers: %r', response.headers)\n if 200 <= response.status_code <= 300:\n if 'application/json' in response.headers['Content-Type']:\n return response.json()\n return response.content\n if response.status_code == 404:\n raise APIError({'404': 'URL Not Found: %s' % response.url})\n if 'application/json' in response.headers['Content-Type']:\n raise APIError(response.json().get('errors'))\n LOGGER.debug(response.content)\n raise APIError('Not JSON')", "title": "" }, { "docid": "e5573e9cf73f659e84a0d66f54a5461b", "score": "0.6131569", "text": "def coerce_response():\n if not isinstance(response, Response):\n return Response(0, response)\n return response", "title": "" }, { "docid": "8013a5fa288c4a450ef58485c245a2c0", "score": "0.61047363", "text": "def deserialize(self, resp):\n format = resp[\"Content-Type\"].split(\";\")[0]\n\n if format != \"text/html\":\n return self.serializer.deserialize(resp.content, format=format)\n else:\n return resp.content", "title": "" }, { "docid": "1abe8643e1eea27832369126364c13f9", "score": "0.61023164", "text": "def parse_raw_response(response: Union[bytes, requests.Response]) -> dict:\n return json.loads(xml2json(response))", "title": "" }, { "docid": "e34ec81714d212ac3e1a910c64618202", "score": "0.60385185", "text": "def headers(response):\n try:\n return response.headers\n except(TypeError, AttributeError):\n return None", "title": "" }, { "docid": "0204d994b19f6ab5b0eeb7856995dffa", "score": "0.60377127", "text": "def aiohttp_serialize_response(response: web.Response) -> dict[str, Any]:\n body = response.body\n\n if body is None:\n pass\n elif isinstance(body, payload.StringPayload):\n # pylint: disable=protected-access\n body = body._value.decode(body.encoding)\n elif isinstance(body, bytes):\n body = body.decode(response.charset or \"utf-8\")\n else:\n raise ValueError(\"Unknown payload encoding\")\n\n return {\"status\": response.status, \"body\": body, \"headers\": dict(response.headers)}", "title": "" }, { "docid": "4e8eca2a3718b559fa2610f21bea3439", "score": "0.6006811", "text": "def __init__(self, response=\"\"):\n\n # construct default response object\n self.http_version = 'HTTP/1.0'\n self.status_code = 200\n self.reason = \"OK\"\n self.connection = 'close'\n self.date = datetime.utcnow().strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n self.location = \"\"\n self.content_type = \"\"\n self.content_length = 0\n self.content = b\"\"\n self.last_mod = ''\n self.expires = ''\n self.cache_control = ''\n self.server = ''\n self.www_auth = \"\"\n\n if response:\n # response bytearray was provided. Need to parse it\n message = parse_message(response)\n if message['content']:\n if 'content-length' not in message:\n # raise BadRequestError()\n # TODO: BadResponse\n pass\n self.content = message['content']\n\n # check if the relevant header fields are present in the message\n header = message['header']\n if 'http_version' in header:\n self.http_version = header['http_version']\n if 'status_code' in header:\n self.status_code = int(header['status_code'])\n if 'reason' in header:\n self.reason = header['reason']\n if 'content-length' in header:\n self.content_length = int(header['content-length'])\n if 'connection' in header:\n self.connection = header['connection']\n if 'date' in header:\n self.date = header['date']\n if 'location' in header:\n self.location = header['location']\n if 'last-modified' in header:\n self.last_mod = header['last-modified']\n if 'expires' in header:\n self.expires = header['expires']\n if 'content-type' in header:\n self.content_type = header['content-type']\n if 'cache-control' in header:\n self.cache_control = header['cache-control']\n if 'server' in header:\n self.server = header['server']\n if 'www-authenticate' in header:\n self.www_auth = header['www-authenticate']", "title": "" }, { "docid": "b7dd674eacef4182a8c8ec8ddd42858d", "score": "0.59942067", "text": "def from_httpx_response(cls: Type[Self], response: httpx.Response) -> Self:\n new_response = copy.copy(response)\n new_response.__class__ = cls\n return new_response", "title": "" }, { "docid": "add97619308a7578e30002a14f6bcfe2", "score": "0.5961631", "text": "def make_response(self, rv):\r\n status = headers = None\r\n if isinstance(rv, tuple):\r\n rv, status, headers = rv + (None,) * (3 - len(rv))\r\n\r\n if rv is None:\r\n raise ValueError('View function did not return a response')\r\n\r\n if not isinstance(rv, self.response_class):\r\n # When we create a response object directly, we let the constructor\r\n # set the headers and status. We do this because there can be\r\n # some extra logic involved when creating these objects with\r\n # specific values (like defualt content type selection).\r\n if isinstance(rv, basestring):\r\n rv = self.response_class(rv, headers=headers, status=status)\r\n headers = status = None\r\n else:\r\n rv = self.response_class.force_type(rv, request.environ)\r\n\r\n if status is not None:\r\n if isinstance(status, basestring):\r\n rv.status = status\r\n else:\r\n rv.status_code = status\r\n if headers:\r\n rv.headers.extend(headers)\r\n\r\n return rv", "title": "" }, { "docid": "3c2644a40712861ed00e3033e1c3d90c", "score": "0.5955166", "text": "def decode(response):\n return json.loads(response.content.decode('utf'))", "title": "" }, { "docid": "37e501003bbdb9b8350b970f6d7fbd27", "score": "0.5936327", "text": "def parse(self, response):\n return {\n 'page_url': response.url,\n 'content': response.body.decode('gb2312')\n }", "title": "" }, { "docid": "d25f2dbfe859c41739a682169856f88e", "score": "0.5926719", "text": "def _json_to_python_object(self, link):\r\n\r\n # request data from website\r\n __response = requests.get(link)\r\n\r\n # extract content from response object. Is in bytes form containing json information\r\n __content = __response.content\r\n\r\n #convert from byte-json-object to python object\r\n __python_object_representing_json = json.loads(__content)\r\n \r\n return __python_object_representing_json", "title": "" }, { "docid": "f71329c5d4e88bcb8a9f5208c6538bdb", "score": "0.5918902", "text": "def get_content(self,response):\r\n # response is a tuple containing (headers, content)\r\n # headers is an httplib2 Response object, content is a string\r\n # see http://httplib2.googlecode.com/hg/doc/html/libhttplib2.html\r\n headers, content = response\r\n\r\n if content:\r\n content = json.loads(content.decode('utf-8'))\r\n return content", "title": "" }, { "docid": "ce1d5327adfb2e3936809fce1f49e145", "score": "0.58993644", "text": "def _decode_response(self, response):\n try:\n data = json.loads(response)\n return data\n except ValueError:\n print \"The response could not be decoded as a JSON object\"\n\n return response.encode('utf-8')", "title": "" }, { "docid": "1b65ee8855f984fc60a0473f7f987d5f", "score": "0.58490455", "text": "def makeProxyResponse( self ):\n\n if isinstance(self.body, dict):\n if self.isJSON():\n body = json.dumps(self.body)\n\n elif self.isXML():\n body = Response.__dictToXML('response', self.body)\n\n else:\n body = str(self.body)\n\n return {\n 'statusCode': self.statusCode,\n 'body': body,\n 'headers': self.headers.copy(),\n }", "title": "" }, { "docid": "f080c359337af27bb25668a8ed63593f", "score": "0.58121794", "text": "def create_response(self, request, content):\n response = http.HttpResponse(content)\n header_settings = self.config.get('headers')\n for key, value in header_settings.items():\n response[key] = value\n return response", "title": "" }, { "docid": "db4d878904c5e99009455d9c104e8bd5", "score": "0.58110046", "text": "def ParseHTTPResponse(response):\n # Check for new http response object, else it is a file object\n if hasattr(response, 'getheader'):\n if response.getheader('Content-Encoding', '') == 'gzip':\n stream = _base.GzipDecodedResponse(response)\n else:\n stream = response\n else:\n stream = response\n\n data = ''\n while 1:\n chunk = stream.read(1024)\n if not chunk:\n break\n data += chunk\n\n response = json.loads(data)\n ValidateBasicJSONRPCData(response)\n\n if 'response' in response:\n ValidateResponse(response)\n return response['response']\n elif 'error' in response:\n ValidateError(response)\n code = response['error']['code']\n message = response['error']['message']\n raise Fault(code, message)\n else:\n raise ProtocolError('No valid JSON returned')", "title": "" }, { "docid": "3637443626adbf6c3e353a6611af1e76", "score": "0.5808242", "text": "def readResponse(self, data):\n str = StringIO(data)\n object = DAAPObject()\n object.processData(str)\n return object", "title": "" }, { "docid": "2389eabbdddedc3a50fc707b1462f6f2", "score": "0.5779828", "text": "def get_response(self):\n\n response_obj = {\n 'status_code': self.status_code,\n 'message': self.message\n }\n\n if self.data is not None:\n response_obj['data'] = self.data\n\n if self.meta is not None:\n response_obj['meta'] = self.meta\n\n if self.error is not None:\n response_obj['error'] = self.error\n\n return response_obj", "title": "" }, { "docid": "efaf5f3c4acd9c8a17b3f2c68c845e6c", "score": "0.57720244", "text": "def http_response(self, request, response):\n # First I need to check if the response came from the cache\n # stuff that's stored in the cache is there uncompressed,\n # so I can simply return the same response!\n if isinstance(response, SQLCachedResponse):\n return response\n\n #\n # post-process response\n #\n if self._should_decompress(response):\n response = self._decompress(response)\n\n return response", "title": "" }, { "docid": "89e0b134ab88b07a3b21787413a0fd5b", "score": "0.57618195", "text": "def from_response(response):\n cls = _code_map.get(response.status, exc.HttpError)\n return cls()", "title": "" }, { "docid": "00bf684f908a71bbbcd2f462f192162f", "score": "0.57412016", "text": "def _decodeResponse(self):\n self.responseContent = json.load(self.request._responseContent)", "title": "" }, { "docid": "3a031f4c52196114f91b9a5a6021d543", "score": "0.5728021", "text": "def __init__(self, response):\n self.data = response['data']\n self.errmsg = response['errmsg']\n self.err = response['err']", "title": "" }, { "docid": "f607df654cd7b1cc091094b8cad287c2", "score": "0.57193595", "text": "def build_response(response):\n return {\n \"version\": \"1.0\",\n \"sessionAttributes\": None,\n \"response\": response\n }", "title": "" }, { "docid": "87bdde5a005ac63e54a9a4999f36f5ae", "score": "0.5703179", "text": "def _parse_iedb_response(response):\n lines = response.split(\"\\n\")\n\n # manually parsing since Pandas is insane\n header_names = lines[0].split(\"\\t\")\n\n d = {}\n for col_name in header_names:\n d[col_name] = []\n\n for line in lines[1:]:\n line = line.strip()\n if len(line) > 0:\n fields = line.split('\\t')\n for i, header_name in enumerate(header_names):\n value = convert_str(fields[i] if len(fields) > i else None)\n d[header_name].append(value)\n return pd.DataFrame(d)", "title": "" }, { "docid": "ed13291406239d1887cff23a0b02b21e", "score": "0.5683784", "text": "def json_of_response(response):\n return json.loads(response.data.decode())", "title": "" }, { "docid": "4ce0420c1dc66bcbe06e63f3e9b899cb", "score": "0.56462985", "text": "def decode(response):\n\t\ttry:\n\t\t\treturn response.json()\n\t\texcept ValueError as e:\n\t\t\treturn e.message", "title": "" }, { "docid": "f87b3e64d370e2020a93b3442737d50d", "score": "0.5610407", "text": "def convert_to_test_response(response: RequestsResponse) -> Response:\n return Response(\n response.iter_content(),\n str(response.status_code),\n Headers({**dict(response.headers), \"Content-Type\": \"application/json\"}),\n MagicMock(),\n )", "title": "" }, { "docid": "c559a0ca5cceb695c097be16bcde64a4", "score": "0.56057215", "text": "def data(self):\n res = self.response()\n LOGGER.debug('Response headers:\\n%s', res.info())\n if res.info().get('Content-Encoding') == 'gzip':\n buf = bstream(res.read())\n f = gzip.GzipFile(fileobj=buf)\n data = f.read()\n else: # pragma: no cover\n data = res.read()\n return s(data)", "title": "" }, { "docid": "ec7fee1d3c7cac5cfd7bba7aa67da561", "score": "0.55993843", "text": "def import_proxy_req_resp(self, request, response):\n self.is_in_scope = request.in_scope\n self.url = request.url\n self.data = request.body if request.body is not None else \"\"\n self.method = request.method\n try:\n self.status = \"{!s} {!s}\".format(str(response.code), response_messages[int(response.code)])\n except KeyError:\n self.status = \"{!s} Unknown Error\".format(response.code)\n self.raw_request = request.raw_request\n self.response_headers = response.header_string\n self.response_contents = response.body\n self.response_size = len(self.response_contents)\n self.time = str(response.request_time)\n self.time_human = self.timer.get_time_human(self.time)\n self.local_timestamp = request.local_timestamp\n self.found = (self.status == \"200 OK\")\n self.cookies_list = response.cookies\n self.new = True\n self.id = \"\"\n self.html_link_id = \"\"", "title": "" }, { "docid": "84add98fdcd7013274c389437d7898f0", "score": "0.55932164", "text": "def deserialize(self, response, response_type):\n # handle file downloading\n # save response body into a tmp file and return the instance\n if response_type == \"file\":\n return self.__deserialize_file(response)\n\n # fetch data from response object\n try:\n data = json.loads(response.data)\n except ValueError:\n data = response.data\n\n return self.__deserialize(data, response_type)", "title": "" }, { "docid": "367045510b6e29cbe9b1936bc7a85a3d", "score": "0.5588004", "text": "def get_dict_from_response(response: OAuthResponse) -> dict:\n if getattr(response, '_resp') and response._resp.code > 400:\n raise OAuthException(\n 'OpenID Remote Application failed to parse response',\n None, response\n )\n\n return response.data", "title": "" }, { "docid": "6e2cf9febe9b497392b7c96a367f70c0", "score": "0.55358803", "text": "def clean_response(self, response):\n decodedResponse = response.decode('utf-8')\n spaceLessResponse = decodedResponse.replace(' <', '<')\n untangleObject = untangle.parse(spaceLessResponse)\n\n try:\n title = untangleObject.book.title.cdata\n except:\n title = None\n\n try:\n author = untangleObject.book.author.cdata\n except:\n author = None\n\n try:\n edition = untangleObject.book.edition.cdata\n except:\n edition = None\n\n try:\n rows = []\n for item in untangleObject.book.items.item:\n try:\n vendor = item.vendor.cdata\n except:\n vendor = None\n try:\n price = item.price.cdata\n except:\n price = None\n try:\n condition = item.condition.cdata\n except:\n condition = None\n\n rows.append(Row(vendor, price, condition))\n except:\n rows = None\n\n result = {'Title': title, 'Author': author, 'Edition': edition, 'Rows': rows}\n return result", "title": "" }, { "docid": "897df7cb95f6ac1d71f2b867f3d6fbb9", "score": "0.55339146", "text": "def _parse_json_resp(self):\n try:\n return self.response_obj.json() or {}\n except:\n return {}", "title": "" }, { "docid": "f79df20e0c4839a8d248e52457d7944a", "score": "0.55320877", "text": "def response_split(self):\n self.response = json.loads(self.response.decode())", "title": "" }, { "docid": "c8fa5b3a6529084831ddffd2628a2865", "score": "0.55223286", "text": "def parse_response(self, response):\n if self.QUIET_MODE:\n return response\n try:\n pprint(json.loads(response.content))\n except:\n pass\n if response.ok:\n print(\"Success!\")\n elif response.status_code == 403:\n print(\"Access denied\")\n elif response.status_code == 400:\n print(\"Missing Param/Invalid Request\")\n elif response.status_code == 422:\n raise NotImplementedError\n else:\n print(\"Unknown Response\")\n print(response.status_code)\n return response", "title": "" }, { "docid": "43b74d950f9118a06b617dc8b2294565", "score": "0.5517444", "text": "def raw_data(self):\n return self._response_parsed or {}", "title": "" }, { "docid": "37b50dc1c60b44e269bbcc3237dbb27e", "score": "0.55170333", "text": "def dict_from_single_response(text, graceful=True):\n\n # check if we got an empty response (only two newlines) and return a dict\n # with two empty strings only\n # TODO: this should probably rather raise a ValueError but we need to test\n # all effects on existing code first!\n if text == \"\\n\\n\":\n return {\"\": \"\"}\n try:\n lines = list(csv.reader(StringIO(text), delimiter=\",\"))\n if len(lines) != 2:\n log.warning(\"Response expected to have exactly two lines: {}\", text)\n if not graceful:\n raise ValueError(\"Invalid response format!\")\n header = lines[0]\n data = lines[1]\n process_response_values(data)\n if len(header) != len(data):\n msg = \"Parsing CSV failed, mismatch of header vs. data fields count\"\n log.warning(\"{} ({} vs. {})\", msg, len(header), len(data))\n if not graceful:\n raise ValueError(msg)\n minimum = min(len(header), len(data))\n if minimum < len(header):\n log.warning(\"Discarding header-fields: {}\", header[minimum:])\n header = header[:minimum]\n else:\n log.warning(\"Discarding data-fields: {}\", data[minimum:])\n data = data[:minimum]\n\n except Exception as err:\n msg = f\"Unable to parse data returned by PUMAPI: {text} - ERROR: {err}\"\n log.error(msg)\n raise ValueError(msg) from err\n\n parsed = dict(zip(header, data))\n return parsed", "title": "" }, { "docid": "c3def13f9bbd7a0dc96729eb14786448", "score": "0.5512451", "text": "def _http_resp_2_httplib(self, original_response, mangled_response):\n ka_resp = MangledKeepAliveHTTPResponse()\n\n ka_resp.set_body(mangled_response.get_body())\n ka_resp.headers = mangled_response.get_headers()\n ka_resp.code = mangled_response.get_code()\n ka_resp._url = mangled_response.get_uri().url_string\n ka_resp.msg = original_response.msg\n ka_resp.id = original_response.id\n ka_resp.set_wait_time(original_response.get_wait_time())\n ka_resp.encoding = mangled_response.charset\n\n return ka_resp", "title": "" }, { "docid": "db06933706430df86c436294525c5138", "score": "0.55081594", "text": "def _process_response(self, response):\n try:\n result = response.json()\n except ValueError:\n result = response.content\n\n try:\n response.raise_for_status()\n except Exception as e:\n message = ''\n\n try:\n errors = result.get('errors', [])\n for error in errors:\n code = error.get('code')\n msg = error.get('message')\n value = error.get('value')\n\n if code:\n message += 'Code: {}. '.format(code)\n if msg:\n message += 'Message: {} '.format(msg)\n if value:\n message += 'Value: {} '.format(value)\n except AttributeError:\n print(result)\n\n exception = message if message else e\n raise DarwinexAPIClientException(exception)\n\n return result", "title": "" }, { "docid": "a0850f278130126bb11a5de8b90b74cd", "score": "0.5502008", "text": "def __init__(self, resp):\n try:\n body = resp.json()\n self.resp_json = body\n except ValueError:\n self.resp_json = None\n self.resp_text = resp.text", "title": "" }, { "docid": "72d272a74bffaf25892c457f8d73a669", "score": "0.5494827", "text": "def parse(self, response):\n pass", "title": "" }, { "docid": "1ed017e4cef690f26e13bb4d82a44658", "score": "0.5494564", "text": "def _to_json(res):\n return json.loads(res.content)", "title": "" }, { "docid": "bd3c5dbceee602e16aaa89cc94319847", "score": "0.54830027", "text": "def get_headers(self, response):\r\n raise NotImplementedError", "title": "" }, { "docid": "c91875fa8a4fd6c5428b54290bc7b606", "score": "0.5481357", "text": "def _to_django_response(headers: Mapping, status_code: int,\n content: str) -> HttpResponse:\n\n response = HttpResponse(content, status=status_code)\n\n for key, value in headers.items():\n response[key] = value\n return response", "title": "" }, { "docid": "13087da2dd1794e98664a5becae8e283", "score": "0.54805285", "text": "def response_as_json(resp):\n resp_json = json.loads(resp.data.decode('utf-8'))\n return resp_json", "title": "" }, { "docid": "dd6c2ae8d7a72c941ba22970e0acf452", "score": "0.5447571", "text": "def handle_200(self, response):\n\n def extract_response_string(response):\n \"\"\"Because the answer is mixedpart we need to extract.\"\"\"\n return response._content.decode('ISO-8859-1')\n\n return {\n 'body': extract_response_string(response),\n }", "title": "" }, { "docid": "45d1fad381e946d1b4084ae66da95fd5", "score": "0.5444209", "text": "def process_response(self, response):\n return response", "title": "" }, { "docid": "94301a3b05e2899ee8c34b2d8d4f2364", "score": "0.5438584", "text": "def parseResponse(response):\n data = unpack(\">6H\", response[:12])\n flags = data[1]\n auth = hex(flags)[3]\n soa = hex(flags)[5]\n ns_count = data[4]\n return auth, soa, ns_count", "title": "" }, { "docid": "e5356260086c06096a47e29bb655b444", "score": "0.5417918", "text": "def CreateHTTPHeaders(data):\n if six.PY2:\n return six.moves.http_client.HTTPMessage(six.StringIO(data))\n else:\n return email.parser.HeaderParser(HTTPMessageWrapper).parsestr(data)", "title": "" }, { "docid": "98061d7a654846f0a2c935eb6ea8a7f5", "score": "0.5413484", "text": "def response_object(self):\n return copy.deepcopy(self._response_object)", "title": "" }, { "docid": "d4ecefa84261d62b1943068796001147", "score": "0.54121435", "text": "def convert_response(self, response: Union[dict, List[dict]], params: dict) -> Union[List[object], object]:\n if 's' in params:\n return self.convert_response_list(response)\n return self.convert_to_object(response)", "title": "" }, { "docid": "49e332abbe186164bc1b654283568f83", "score": "0.54069245", "text": "def compose_response(request):\n response = message.Response()\n\n if request.method == 'GET':\n response = compose_get_response(request, response)\n else:\n response.code = 501\n\n if response.code == 200:\n if request.method == 'HTTP/1.1':\n if request['Connection'] == 'close':\n response['Connection'] = 'close'\n else:\n response['Connection'] = 'keep-alive'\n\n return response", "title": "" }, { "docid": "3cf12b5dbe02a9bc44b519fe3059e59a", "score": "0.539788", "text": "def response_to_json(response):\n return response.json()", "title": "" }, { "docid": "b07536adf4b6baa122efd3a6e00c879e", "score": "0.5388684", "text": "def response_wrapper(self, resp):\n return resp", "title": "" }, { "docid": "fe3929744fc5ad9a6318ffba23a6516b", "score": "0.53830093", "text": "def __init__(self, resp_obj):\n self.resp_obj = resp_obj", "title": "" }, { "docid": "0c25b222227e211b9452ecc6d93ca1f7", "score": "0.53799224", "text": "def read_response(stream):\n # https://tools.ietf.org/html/rfc7230#section-3.1.2\n\n # As in read_request, parsing is simple because a fixed value is expected\n # for version, status_code is a 3-digit number, and reason can be ignored.\n\n # Given the implementation of read_line(), status_line ends with CRLF.\n status_line = yield from read_line(stream)\n\n # This may raise \"ValueError: not enough values to unpack\"\n version, status_code, reason = status_line[:-2].split(b' ', 2)\n\n if version != b'HTTP/1.1':\n raise ValueError(\"Unsupported HTTP version: %r\" % version)\n # This may raise \"ValueError: invalid literal for int() with base 10\"\n status_code = int(status_code)\n if not 100 <= status_code < 1000:\n raise ValueError(\"Unsupported HTTP status_code code: %d\" % status_code)\n if not _value_re.match(reason):\n raise ValueError(\"Invalid HTTP reason phrase: %r\" % reason)\n\n headers = yield from read_headers(stream)\n\n return status_code, headers", "title": "" }, { "docid": "96e6a7877d202167e4f9032843b96a68", "score": "0.53748876", "text": "def __init__(self, response):\n self._response = response\n self['status_code'] = response.status_code\n self._custom_response(response)", "title": "" }, { "docid": "9b0a6f36d196988cd2ca57b4385710e9", "score": "0.53737944", "text": "def from_response(cls, response: requests.Response) -> TNSQueryJSONResult:\n return cls.from_dict(response.json())", "title": "" }, { "docid": "d5aa288fedbfb77c94a2f99bad730db2", "score": "0.53670484", "text": "def response_headers():\n\theaders = MultiDict(request.args.items(multi=True))\n\tresponse = jsonify(list(headers.lists()))\n\n\twhile True:\n\t\toriginal_data = response.data\n\t\td = {}\n\t\tfor key in response.headers.keys():\n\t\t\tvalue = response.headers.get_all(key)\n\t\t\tif len(value) == 1:\n\t\t\t\tvalue = value[0]\n\t\t\td[key] = value\n\t\tresponse = jsonify(d)\n\t\tfor key,value in headers.items(multi=True):\n\t\t\tresponse.headers.add(key,value)\n\t\tresponse_has_changed = response.data != original_data\n\t\tif not response_has_changed:\n\t\t\tbreak\n\treturn response", "title": "" }, { "docid": "bbb53cdc0fff8e72c6018b6f5561ec7d", "score": "0.5364013", "text": "def get_response(req):", "title": "" }, { "docid": "0605a98d7d5ccc443d4ab7e27c68ff75", "score": "0.5363551", "text": "def decode_response(self, r):\n if self.signalr:\n return r\n elif self.socketio:\n return r # json.loads(r['data'])\n elif self.unpack_json == 'force':\n return json.loads(r)\n elif self.unpack_json:\n try:\n return json.loads(r)\n except (json.JSONDecodeError, UnicodeDecodeError):\n return r\n else:\n return r", "title": "" }, { "docid": "4b37fa7da40bea76c879ef80cc8d282e", "score": "0.53621316", "text": "def _result_from_response(raw_response):\n\tr = json.loads(raw_response.text)\n\t_validate_response(r)\n\treturn r['response']['result']", "title": "" }, { "docid": "e9eb85e061859b4afb82721be1998254", "score": "0.5358835", "text": "def format_response(response):\n\tdata = {}\n\t\n\tif 'Item' in response:\n\t\titem = response.get('Item')\n\n\t\t# data[''] = \n\t\tdata['title'] = item.get('title').get('S')\n\t\tdata['text'] = item.get('text').get('S').strip()\n\t\t# data['html'] = item.get('html').get('S')\n\n\t\tdata['all_links'] = unpack_aggregate(item.get('all_links'))\n\n\t\tdata['all_emails'] = unpack_aggregate(item.get('all_emails'))\n\t\tdata['meta_keywords'] = item.get('meta_keywords').get('S')\n\t\tdata['meta_desc'] = item.get('meta_desc').get('S')\n\t\tdata['images'] = unpack_aggregate(item.get('images'))\n\t\tdata['url_id'] = item.get('url_id').get('N')\n\t\tdata['s3_link'] = item.get('s3_link').get('S')\n\telse:\n\t\tdata[\"error\"] = \"In Process\"\n\n\treturn data", "title": "" }, { "docid": "72a69e8ca377da0dfbf22e8ca0b282c6", "score": "0.5347088", "text": "def build_response(self, req, resp):\n response = Response()\n\n # Fallback to None if there's no status_code, for whatever reason.\n response.status_code = getattr(resp, 'status', None)\n\n # Make headers case-insensitive.\n response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))\n\n # Set encoding.\n response.encoding = get_encoding_from_headers(response.headers)\n response.raw = resp\n response.reason = response.raw.reason\n\n if isinstance(req.url, bytes):\n response.url = req.url.decode('utf-8')\n else:\n response.url = req.url\n\n # Add new cookies from the server.\n extract_cookies_to_jar(response.cookies, req, resp)\n\n # Give the Response some context.\n response.request = req\n response.connection = self\n return response", "title": "" }, { "docid": "fe7d16d453ace77bc62be61fd0a98388", "score": "0.53450143", "text": "def process_response(responseObject):\n\n payload = json.loads(\n str(\n responseObject\n )\n )\n\n return payload", "title": "" }, { "docid": "d6a2be46476dedbb20e5b16429278378", "score": "0.534178", "text": "def render_response(body=None, status=None, headers=None, method=None):\n if headers is None:\n headers = []\n else:\n headers = list(headers)\n headers.append(('Vary', 'X-Auth-Token'))\n\n if body is None:\n body = b''\n status = status or (http_client.NO_CONTENT,\n http_client.responses[http_client.NO_CONTENT])\n else:\n content_types = [v for h, v in headers if h == 'Content-Type']\n if content_types:\n content_type = content_types[0]\n else:\n content_type = None\n\n if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:\n body = jsonutils.dump_as_bytes(body, cls=SmarterEncoder)\n if content_type is None:\n headers.append(('Content-Type', 'application/json'))\n status = status or (http_client.OK,\n http_client.responses[http_client.OK])\n\n # NOTE(davechen): `mod_wsgi` follows the standards from pep-3333 and\n # requires the value in response header to be binary type(str) on python2,\n # unicode based string(str) on python3, or else storeanalysis will not work\n # under apache with `mod_wsgi`.\n # storeanalysis needs to check the data type of each header and convert the\n # type if needed.\n # see bug:\n # https://bugs.launchpad.net/storeanalysis/+bug/1528981\n # see pep-3333:\n # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types\n # see source from mod_wsgi:\n # https://github.com/GrahamDumpleton/mod_wsgi(methods:\n # wsgi_convert_headers_to_bytes(...), wsgi_convert_string_to_bytes(...)\n # and wsgi_validate_header_value(...)).\n def _convert_to_str(headers):\n str_headers = []\n for header in headers:\n str_header = []\n for value in header:\n if not isinstance(value, str):\n str_header.append(str(value))\n else:\n str_header.append(value)\n # convert the list to the immutable tuple to build the headers.\n # header's key/value will be guaranteed to be str type.\n str_headers.append(tuple(str_header))\n return str_headers\n\n headers = _convert_to_str(headers)\n\n resp = webob.Response(body=body,\n status='%d %s' % status,\n headerlist=headers,\n charset='utf-8')\n\n if method and method.upper() == 'HEAD':\n # NOTE(morganfainberg): HEAD requests should return the same status\n # as a GET request and same headers (including content-type and\n # content-length). The webob.Response object automatically changes\n # content-length (and other headers) if the body is set to b''. Capture\n # all headers and reset them on the response object after clearing the\n # body. The body can only be set to a binary-type (not TextType or\n # NoneType), so b'' is used here and should be compatible with\n # both py2x and py3x.\n stored_headers = resp.headers.copy()\n resp.body = b''\n for header, value in stored_headers.items():\n resp.headers[header] = value\n\n return resp", "title": "" }, { "docid": "cecdca5c0ceda6b730962bf1fcf8c833", "score": "0.5332597", "text": "def _process_response(self, response, request):\n\n def coerce_response():\n \"\"\" Coerce the response object into devil structure. \"\"\"\n if not isinstance(response, Response):\n return Response(0, response)\n return response\n\n if isinstance(response, HttpResponse):\n # we don't do anything if resource returns django's http response\n return response\n\n devil_res = coerce_response()\n if devil_res.content and devil_res.get_code_num() in (0, 200, 201):\n # serialize, format and validate\n serialized_res = devil_res.content = self._serialize_object(devil_res.content, request)\n formatted_res = self._format_response(request, devil_res)\n self._validate_output_data(response, serialized_res, formatted_res, request)\n else:\n # no data -> format only\n formatted_res = self._format_response(request, devil_res)\n return formatted_res", "title": "" }, { "docid": "6369cfc830af248608c0818296dddf3d", "score": "0.5330354", "text": "def __decodeBody(self):\n\n data = {}\n error = None\n\n content_type = self.headers.get('content-type', '')\n\n try:\n if content_type == 'application/json':\n data = json.loads(self.body)\n\n elif content_type == 'application/x-www-form-urlencoded':\n data = dict(urllib.parse.parse_qsl(self.body, strict_parsing=True))\n\n except ValueError as e:\n print(e)\n error = str(e)\n\n return (data, error)", "title": "" }, { "docid": "21a2dd8ea67a618b0fc461134315c7ba", "score": "0.5326379", "text": "def _prepare_response_content(data):\n return data", "title": "" }, { "docid": "4b0ff046658661e8f7c345227e0d446d", "score": "0.5325751", "text": "def prepare_json_response_data(response):\n\n return response.json()", "title": "" }, { "docid": "614c360a52ce3a0e63335f96843057ad", "score": "0.5306395", "text": "def response_headers(self,request,format='json'):\n return Resource.headers[format]", "title": "" }, { "docid": "94ae832f5e7ec147ef8d261dc93518e3", "score": "0.5305476", "text": "def parse_header(header):\n m = Message()\n m[\"content-type\"] = header\n return m", "title": "" }, { "docid": "12dbb5f0dae04088d1e1bdd57a9c59ad", "score": "0.5285789", "text": "def convert_resp_to_page(self, resp):\n return None", "title": "" }, { "docid": "94ee26e2d6179bce495df19a5427db42", "score": "0.5284589", "text": "def _from_single_response(cls, connection: Connection, response):\n obj = cls.__new__(cls) # Does not call __init__\n super(EntityBase, obj).__init__() # call any polymorphic base class initializers\n super(EntityBase, obj).__setattr__(\"connection\", connection)\n response = helper.camel_to_snake(response)\n if type(response) == dict:\n for key, value in response.items():\n cls._AVAILABLE_ATTRIBUTES.update({key: type(value)})\n value = cls._ENUM_MAP[key](value).name if cls._ENUM_MAP.get(key) else value\n super(EntityBase, obj).__setattr__(key, value)\n return obj", "title": "" }, { "docid": "e019b97e493cd02b6329a6343dde08e8", "score": "0.5280442", "text": "def _parse_response(self, callback, response):\n try:\n res = escape.json_decode(response.body)\n except Exception, e:\n e.args += ('API response was: %s' % response,)\n raise e\n\n if callback:\n callback(res)\n else:\n return res", "title": "" }, { "docid": "2288991375ca8ce99bf212fce4b6bb4c", "score": "0.52666074", "text": "def _parse_response(proto, response):\n try:\n content = proto()\n content.ParseFromString(response.content)\n return content\n except (DecodeError, AttributeError):\n LOGGER.error('Validator response was not parsable: %s', response)\n raise errors.ValidatorResponseInvalid()", "title": "" }, { "docid": "08b109ce5cf25e851beadae6ebee947e", "score": "0.52663994", "text": "def parse_headers(request):\n data = dict(request.headers.items())\n ip = request.remote_ip\n if \"Cookie\" in data.keys():\n data[\"Cookie\"] = dict([i.split(\"=\") for i in data[\"Cookie\"].split(\"; \")])\n data[\"cookies_initialized\"] = True\n else:\n data[\"Cookie\"] = dict()\n data[\"cookies_initialized\"] = False\n if \"user_public_data\" in data[\"Cookie\"].keys():\n data[\"Cookie\"][\"user_public_data\"] = dict(\n [i.split(\"|:\") for i in data[\"Cookie\"][\"user_public_data\"].split(\"|%\")]\n )\n data[\"Remote_ip\"] = ip\n data.update(parse_user_agent(data[\"User-Agent\"]))\n return data", "title": "" }, { "docid": "132f6f7d72f20c63f54f190723d8f774", "score": "0.5266072", "text": "def make_response(self, rv):\n if rv is None:\n raise ValueError('View function did not return a response')\n if isinstance(rv, self.response_class):\n return rv\n if isinstance(rv, basestring):\n return self.response_class(rv)\n if isinstance(rv, tuple):\n return self.response_class(*rv)\n return self.response_class.force_type(rv, request.environ)", "title": "" }, { "docid": "86c7b6ae807df7858eefd4c6b8eb1d4b", "score": "0.5261285", "text": "def parse(cls, headers):\r\n h = cls()\r\n for line in headers.splitlines():\r\n if line:\r\n h.parse_line(line)\r\n return h", "title": "" }, { "docid": "acf01418b3bcb5f473b3c88f68a5e674", "score": "0.5260691", "text": "def copy(self):\n\n obj = Response()\n\n copy_keys = ('status', 'code', 'head', 'body', 'total_time',\n 'connect_time', 'name_lookup_time',\n 'url', 'charset', '_unicode_body')\n for key in copy_keys:\n setattr(obj, key, getattr(self, key))\n\n obj.headers = copy(self.headers)\n # TODO: Maybe, deepcopy?\n obj.cookies = copy(self.cookies)\n\n return obj", "title": "" }, { "docid": "2d56fd1e50d5bc6a152c1a8d087f85aa", "score": "0.5260621", "text": "def from_response(cls, response):\n\n entry = Entry()\n entry = cls.get_meta(entry, Model.from_response(response))\n entry.name = response.get('name')\n entry.value = response.get('value')\n entry.username = response.get('username')\n entry.url = response.get('url')\n entry.icon = response.get('icon')\n group = response.get('group')\n if type(group) == dict:\n entry.group = Group.from_response(group)\n pass\n else:\n entry.group = group\n return entry", "title": "" }, { "docid": "589ddf59a55e31b8240881cdde19b1a5", "score": "0.52571595", "text": "def parseJson(self, response):\n data = json.loads(response)\n return data", "title": "" }, { "docid": "c1471b6f090f65ef7b3cde3e2dc1d6f7", "score": "0.52490264", "text": "def _handle_response(self, r):\n if r.headers['content-type'].split(';')[0] == 'application/json':\n return r.json['data'] # json.loads(r.text)['data']\n else:\n raise AsanaError('Did not receive json from api: %s' % str(r))", "title": "" }, { "docid": "456a3cb3a21ea83bfb8ab9fb49a088c0", "score": "0.52489805", "text": "def _postprocess_response(self, result):\n data = self._parse_returned_data(result)\n\n # Cutpasted from the base implementation.\n if 'stdout' in data and 'stdout_lines' not in data:\n data['stdout_lines'] = (data['stdout'] or u'').splitlines()\n if 'stderr' in data and 'stderr_lines' not in data:\n data['stderr_lines'] = (data['stderr'] or u'').splitlines()\n\n return data", "title": "" }, { "docid": "7101fb0a764ac82de1bc873b2333f3eb", "score": "0.52463067", "text": "def _parse_response(self, resp):\n # The response is a tuple where:\n # resp[0] is a python dictionary with information about the\n # http side of things (aka the transport) such as the http status.\n # resp[1] is a json string response from the actual rpc call itself.\n transport = resp[0]\n rpc_json = resp[1]\n _LOGGER_.info(\"response - transport %s\", pprint.pformat(transport))\n _LOGGER_.info(\"response - rpc_json %s\", rpc_json)\n\n rpc_obj = json.loads(rpc_json)\n # If the response shows success...\n if 200 <= transport.status < 300:\n return rpc_obj\n\n # Otherwise, something went wrong.\n _LOGGER_.error(\"Problem with the rpc call:\")\n _LOGGER_.error(\" rpc: %s\", pprint.pformat(rpc_obj))\n _LOGGER_.error(\" transport: %s\", pprint.pformat(transport))\n\n raise ValueError((\"problem with rpc call\", rpc_obj, transport))", "title": "" }, { "docid": "3a3949e8ed1936faa706a108949eefa9", "score": "0.52458775", "text": "def _add_resposne_headers(self, django_response, devil_response):\n\n try:\n headers = devil_response.headers\n except AttributeError:\n # ok, there was no devil_response\n pass\n else:\n for k, v in headers.items():\n django_response[k] = v\n return django_response", "title": "" }, { "docid": "4e67a721faa0d6040228c87c6c2a70c2", "score": "0.52426875", "text": "def build_response(self, request, method, data):\n content_type = data.content_type\n response = webob.Response(status=data.status,\n headerlist=data.headers)\n response.content_type = content_type\n\n if method == 'HEAD':\n return response\n\n body = data.body\n if isinstance(body, str):\n response.text = body\n elif isinstance(body, bytes):\n response.body = body\n elif _json_content_type(content_type):\n import json\n response.body = json.dumps(body).encode(\"utf-8\")\n else:\n raise TypeError('bad response', body, content_type)\n\n return response", "title": "" }, { "docid": "a16fa34ced5b550c41ee03a74bbfefae", "score": "0.5237254", "text": "def shb(response):\r\n if py3k:\r\n h = response.getheaders()\r\n else:\r\n h = []\r\n key, value = None, None\r\n for line in response.msg.headers:\r\n if line:\r\n if line[0] in \" \\t\":\r\n value += line.strip()\r\n else:\r\n if key and value:\r\n h.append((key, value))\r\n key, value = line.split(\":\", 1)\r\n key = key.strip()\r\n value = value.strip()\r\n if key and value:\r\n h.append((key, value))\r\n\r\n return \"%s %s\" % (response.status, response.reason), h, response.read()", "title": "" }, { "docid": "bdd49471b1b5022e957f9877c64e895a", "score": "0.5228671", "text": "def ObjectMetadataFromHeaders(headers):\n obj_metadata = apitools_messages.Object()\n for header, value in headers.items():\n if CACHE_CONTROL_REGEX.match(header):\n obj_metadata.cacheControl = value.strip()\n elif CONTENT_DISPOSITION_REGEX.match(header):\n obj_metadata.contentDisposition = value.strip()\n elif CONTENT_ENCODING_REGEX.match(header):\n obj_metadata.contentEncoding = value.strip()\n elif CONTENT_MD5_REGEX.match(header):\n obj_metadata.md5Hash = value.strip()\n elif CONTENT_LANGUAGE_REGEX.match(header):\n obj_metadata.contentLanguage = value.strip()\n elif CONTENT_TYPE_REGEX.match(header):\n if not value:\n obj_metadata.contentType = DEFAULT_CONTENT_TYPE\n else:\n obj_metadata.contentType = value.strip()\n elif GOOG_API_VERSION_REGEX.match(header):\n # API version is only relevant for XML, ignore and rely on the XML API\n # to add the appropriate version.\n continue\n elif GOOG_GENERATION_MATCH_REGEX.match(header):\n # Preconditions are handled elsewhere, but allow these headers through.\n continue\n elif GOOG_METAGENERATION_MATCH_REGEX.match(header):\n # Preconditions are handled elsewhere, but allow these headers through.\n continue\n else:\n custom_goog_metadata_match = CUSTOM_GOOG_METADATA_REGEX.match(header)\n custom_amz_metadata_match = CUSTOM_AMZ_METADATA_REGEX.match(header)\n custom_amz_header_match = CUSTOM_AMZ_HEADER_REGEX.match(header)\n header_key = None\n if custom_goog_metadata_match:\n header_key = custom_goog_metadata_match.group('header_key')\n elif custom_amz_metadata_match:\n header_key = custom_amz_metadata_match.group('header_key')\n elif custom_amz_header_match:\n # If we got here we are guaranteed by the prior statement that this is\n # not an x-amz-meta- header.\n header_key = (S3_HEADER_PREFIX +\n custom_amz_header_match.group('header_key'))\n if header_key:\n if header_key.lower() == 'x-goog-content-language':\n # Work around content-language being inserted into custom metadata.\n continue\n if not obj_metadata.metadata:\n obj_metadata.metadata = apitools_messages.Object.MetadataValue()\n if not obj_metadata.metadata.additionalProperties:\n obj_metadata.metadata.additionalProperties = []\n obj_metadata.metadata.additionalProperties.append(\n apitools_messages.Object.MetadataValue.AdditionalProperty(\n key=header_key, value=value))\n else:\n raise ArgumentException('Invalid header specified: %s:%s' %\n (header, value))\n return obj_metadata", "title": "" } ]
6663362abf37edf3b58f7dace091d973
Pick from 20 subgraphs randomly some subgraphs. If n_subgraphs_requested = 1 use labeling_rate only else use n_subgraphs_requested directly return train_idx_random_selected
[ { "docid": "2f5250ef9f606bd59319b56e1b2f4a83", "score": "0.83010703", "text": "def sample_subgraphs_from_ppi(n_subgraphs_requested, seed=None):\n all_train_idx = np.arange(1, 21)\n set_seed(seed)\n random_train_idx = np.random.permutation(all_train_idx)\n train_idx_random_selected = random_train_idx[:n_subgraphs_requested]\n return train_idx_random_selected", "title": "" } ]
[ { "docid": "cf1e7da4ab1b4dacc0000d6abfa8ddd5", "score": "0.75647837", "text": "def get_subsampled_train_idx(graph_id, n_subgraphs_used, seed=None):\n train_indices_all = np.arange(1, 21)\n set_seed(seed)\n random_train_subgraph_idx = np.random.permutation(train_indices_all)\n selected_subgraphs = random_train_subgraph_idx[:n_subgraphs_used]\n # pdb.set_trace()\n assert len(selected_subgraphs) == n_subgraphs_used\n train_indices = []\n for train_graph_id in selected_subgraphs:\n train_graph_indx = np.where(graph_id == train_graph_id)[0]\n train_indices.extend(train_graph_indx)\n # pdb.set_trace()\n assert len(train_indices) >= 0\n return train_indices", "title": "" }, { "docid": "40e2699e6729805dabb9cf0b8b9d1f91", "score": "0.73422015", "text": "def _get_subgraphs_randomly(self, n_subgraphs, n_nodes_in_subgraph, **kwargs):\n\n subgraphs = []\n for s in range(n_subgraphs):\n sampled_nodes = random.sample(self.graph.nodes, n_nodes_in_subgraph)\n subgraphs.append(sampled_nodes)\n return subgraphs", "title": "" }, { "docid": "14e129e11ffa9f44833ffe9bd811b965", "score": "0.70967525", "text": "def subgraph_sampling(n_ver, edg_source, edg_target, max_ver):\n return libply_c.random_subgraph(n_ver)", "title": "" }, { "docid": "75139d8a50b27c8d37e24e5b2d6afbe4", "score": "0.68340045", "text": "def _get_subgraphs_by_stapling(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):\n\n k_core_to_sample = kwargs.pop('k_core_to_sample', -1)\n k_hops = kwargs.pop('k_hops', -1)\n\n subgraphs = []\n original_node_ids = self.graph.nodes\n\n for s in range(n_subgraphs):\n curr_subgraph = []\n all_cc_start_nodes = []\n\n for c in range(n_connected_components):\n con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)\n graph_root_node = random.sample(original_node_ids, 1)[0]\n\n if c > 0 and k_hops != -1:\n # make sure to sample the next node k hops away from the previously sampled root node\n # and check to see that the selected start node is k hops away from all previous start nodes\n n_hops_paths = nx.single_source_shortest_path_length(self.graph, cc_root_node, cutoff=k_hops)\n candidate_nodes = [node for node,length in n_hops_paths.items()]\n random.shuffle(candidate_nodes)\n candidate_nodes = [cand for cand in candidate_nodes if self.is_k_hops_from_all_cc(cand, all_cc_start_nodes, k_hops)]\n if len(candidate_nodes) == 0:\n raise Exception('There are no nodes that are k hops away from all other CC start nodes.')\n cc_root_node = random.sample(candidate_nodes, 1)[0]\n all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs\n elif k_core_to_sample != -1:\n k_core_dict = nx.core_number(self.graph)\n nodes_with_core_number = [node for node, core_num in k_core_dict.items()if core_num == k_core_to_sample]\n cc_root_node = random.sample(nodes_with_core_number, 1)[0]\n all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs\n else: # if we're not trying to sample each CC k hops away OR if it's the first time we sample a CC, \n # just randomly sample a start node from the graph\n #randomly sample root node where the CC will be attached\n cc_node_ids = list(range(len(self.graph.nodes), len(self.graph.nodes) + n_nodes_in_subgraph ))\n cc_root_node = random.sample(cc_node_ids, 1)[0]\n all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs\n\n #combine the generated subgraph & the graph\n joined_graph = nx.disjoint_union(self.graph, con_component)\n\n # add an edge between one node in the graph & subgraph\n joined_graph.add_edge(graph_root_node, cc_root_node)\n self.graph = joined_graph.copy()\n\n #add connected component to IDs for current subgraph\n curr_subgraph.extend(cc_node_ids)\n\n subgraphs.append(curr_subgraph)\n\n return subgraphs", "title": "" }, { "docid": "a1e0c6b5fa2afe78138893e077a69ca4", "score": "0.67223537", "text": "def _get_subgraphs_by_coreness(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, remove_edges=False, **kwargs):\n\n subgraphs = []\n\n k_core_dict = nx.core_number(self.graph) \n nodes_per_k_core = Counter(list(k_core_dict.values()))\n print(nodes_per_k_core)\n \n nodes_with_core_number = defaultdict()\n for n, k in k_core_dict.items():\n if k in nodes_with_core_number: nodes_with_core_number[k].append(n)\n else: nodes_with_core_number[k] = [n]\n\n for k in nodes_with_core_number:\n\n # Get nodes with core number k that have not been sampled already\n nodes_with_k_cores = nodes_with_core_number[k]\n \n # Sample n_subgraphs subgraphs per core number\n for s in range(n_subgraphs):\n\n curr_subgraph = []\n for c in range(n_connected_components):\n if len(nodes_with_k_cores) < n_nodes_in_subgraph: break\n\n con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)\n cc_node_ids = random.sample(nodes_with_k_cores, n_nodes_in_subgraph)\n\n # Relabel subgraph to have the same ids as the randomly sampled nodes\n cc_id_mapping = {curr_id:new_id for curr_id, new_id in zip(con_component.nodes, cc_node_ids)}\n nx.relabel_nodes(con_component, cc_id_mapping, copy=False)\n \n if remove_edges:\n # Remove the existing edges between nodes in the planted subgraph (except the ones to be added)\n self.graph.remove_edges_from(self.graph.subgraph(cc_node_ids).edges)\n\n # Combine the base graph & subgraph. Nodes with the same ID are merged\n joined_graph = nx.compose(self.graph, con_component) #NOTE: attributes from subgraph take precedent over attributes from self.graph\n self.graph = joined_graph.copy()\n \n curr_subgraph.extend(cc_node_ids) # add nodes to subgraph\n nodes_with_k_cores = list(set(nodes_with_k_cores).difference(set(cc_node_ids)))\n nodes_with_core_number[k] = nodes_with_k_cores\n \n if len(curr_subgraph) > 0: subgraphs.append(curr_subgraph)\n\n return subgraphs", "title": "" }, { "docid": "ca145bf3427e6cc3bf9422f8fa11c9d6", "score": "0.6565781", "text": "def _get_subgraphs_by_k_hops(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):\n\n diameter = nx.diameter(self.graph)\n k_hops_range = [int(diameter * k) for k in config.K_HOPS_RANGE]\n p_range = [float(p) for p in config.BA_P_RANGE]\n cc_range = [int(cc) for cc in config.CC_RANGE]\n shuffle_cc = False\n if n_connected_components == None: shuffle_cc = True\n print(\"DIAMETER: \", diameter)\n print(\"K-HOPS RANGE: \", k_hops_range)\n print(\"N CONNECTED COMPONENTS: \", n_connected_components)\n\n subgraphs = []\n original_node_ids = self.graph.nodes\n\n for s in range(n_subgraphs):\n curr_subgraph = []\n seen_nodes = [] \n all_cc_start_nodes = []\n k_hops = random.sample(k_hops_range, 1)[0]\n p = p_range[k_hops_range.index(k_hops)]\n kwargs['p'] = p\n\n # Randomly select a node from base graph\n graph_root_node = random.sample(original_node_ids, 1)[0]\n seen_nodes.append(graph_root_node) \n cc_node_ids, cc_root_node = self.staple_component_to_graph(n_nodes_in_subgraph, graph_root_node, **kwargs)\n curr_subgraph.extend(cc_node_ids)\n seen_nodes.extend(cc_node_ids)\n all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs\n \n # Get nodes that are k hops away\n n_hops_paths = nx.single_source_shortest_path_length(self.graph, graph_root_node, cutoff=k_hops)\n candidate_nodes = [node for node in n_hops_paths if self.is_k_hops_from_all_cc(node, all_cc_start_nodes, k_hops) and node not in seen_nodes]\n\n if len(candidate_nodes) == 0: candidate_nodes = [node for node, length in n_hops_paths.items() if length == max(n_hops_paths.values())]\n if shuffle_cc: n_connected_components = random.sample(cc_range, 1)[0]\n\n for c in range(n_connected_components - 1):\n new_graph_root_node = random.sample(candidate_nodes, 1)[0] # choose a random node that is k hops away\n seen_nodes.append(new_graph_root_node) \n cc_node_ids, cc_root_node = self.staple_component_to_graph(n_nodes_in_subgraph, new_graph_root_node, **kwargs)\n curr_subgraph.extend(cc_node_ids)\n seen_nodes.extend(cc_node_ids)\n all_cc_start_nodes.append(cc_root_node) # keep track of start nodes across CCs\n if len(curr_subgraph) >= n_nodes_in_subgraph * n_connected_components: \n actual_num_cc = nx.number_connected_components(self.graph.subgraph(curr_subgraph))\n if shuffle_cc and actual_num_cc in config.CC_RANGE: subgraphs.append(curr_subgraph)\n elif not shuffle_cc and actual_num_cc > 1: subgraphs.append(curr_subgraph) # must have >1 CC\n\n # Validate that subgraphs have the desired number of CCs\n validated_subgraphs = []\n for s in subgraphs:\n actual_num_cc = nx.number_connected_components(self.graph.subgraph(s))\n if shuffle_cc and actual_num_cc in config.CC_RANGE: validated_subgraphs.append(s)\n elif not shuffle_cc and actual_num_cc > 1: validated_subgraphs.append(s) # must have >1 CC\n print(len(validated_subgraphs))\n return validated_subgraphs", "title": "" }, { "docid": "eb2bb43579d693039f9391af45b62e8a", "score": "0.654548", "text": "def generate_mask(n_subgraphs):\n \n idx = set(range(n_subgraphs))\n train_mask = list(random.sample(idx, int(len(idx) * 0.8))) \n idx = idx.difference(set(train_mask)) \n val_mask = list(random.sample(idx, len(idx) // 2)) \n idx = idx.difference(set(val_mask)) \n test_mask = list(random.sample(idx, len(idx))) \n mask = [] \n for i in range(n_subgraphs):\n if i in train_mask: mask.append(0) \n elif i in val_mask: mask.append(1) \n elif i in test_mask: mask.append(2) \n return mask", "title": "" }, { "docid": "efa785bf41fa9f7736f1c126c49c7aef", "score": "0.6527091", "text": "def _get_subgraphs_by_planting(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, remove_edges=False, **kwargs):\n \n k_core_to_sample = kwargs.pop('k_core_to_sample', -1)\n\n subgraphs = []\n for s in range(n_subgraphs):\n curr_subgraph = []\n for c in range(n_connected_components):\n\n con_component = self.generate_subgraph(n_nodes_in_subgraph, **kwargs)\n\n #randomly sample which nodes from the base graph will be the subgraph\n if k_core_to_sample != -1:\n k_core_dict = nx.core_number(self.graph)\n nodes_with_core_number = [node for node, core_num in k_core_dict.items()if core_num == k_core_to_sample]\n cc_node_ids = random.sample(nodes_with_core_number, n_nodes_in_subgraph)\n else:\n cc_node_ids = random.sample(self.graph.nodes, n_nodes_in_subgraph)\n\n #relabel subgraph to have the same ids as the randomly sampled nodes\n cc_id_mapping = {curr_id:new_id for curr_id, new_id in zip(con_component.nodes, cc_node_ids)}\n nx.relabel_nodes(con_component, cc_id_mapping, copy=False)\n \n if remove_edges:\n #remove the existing edges between nodes in the planted subgraph (except the ones to be added)\n self.graph.remove_edges_from(self.graph.subgraph(cc_node_ids).edges)\n\n # combine the base graph & subgraph. Nodes with the same ID are merged\n joined_graph = nx.compose(self.graph, con_component) #NOTE: attributes from subgraph take precedent over attributes from self.graph\n\n self.graph = joined_graph.copy()\n curr_subgraph.extend(cc_node_ids)\n subgraphs.append(curr_subgraph)\n\n return subgraphs", "title": "" }, { "docid": "8beb087ef50d7d4535a787b3670d1ad3", "score": "0.62954503", "text": "def _random_subset(repeated_nodes, k):\n targets = set()\n while len(targets) < k:\n x = random.choice(repeated_nodes)\n targets.add(x)\n\n return targets", "title": "" }, { "docid": "a7295c94a26dc1c8c39add398b0581e8", "score": "0.6197325", "text": "def nx_random_subgraph(g, n):\n tgt_nodes = np.random.permutation(len(g.nodes))[:n]\n gs = nx.subgraph(g, tgt_nodes)\n for n in gs.nodes:\n gs.nodes[n]['old_id'] = n\n mapping = {k: i for i,k in enumerate(tgt_nodes)}\n return nx.relabel_nodes(gs, mapping)", "title": "" }, { "docid": "43001418c6fa2039fd78e86ad1945406", "score": "0.61930496", "text": "def _choose_top_k_sub_graphs(cost_augmented_subgraph_list, k):\n cost_augmented_subgraph_list.sort(key=_ret_cost)\n if k <= len(cost_augmented_subgraph_list):\n return cost_augmented_subgraph_list[:k]\n else:\n return cost_augmented_subgraph_list", "title": "" }, { "docid": "7e1c8195002ec3564d12226e2ef64574", "score": "0.6102954", "text": "def random_subset(dataset,label,size,ratio = (1,1,2),RT = True):\n np.random.seed(12345)\n if (RT ==False):\n indx = set(np.random.choice(dataset.index,replace=False,size = size))\n rest_indx = set(dataset.index) - indx\n ret = dataset.loc[np.sort(list(indx)),:]\n rest_data = dataset.loc[np.sort(list(rest_indx)),:]\n return ret,rest_data\n n_u,n_d,n_s = int(ratio[0]/sum(ratio)*size),int(ratio[1]/sum(ratio)*size),int(ratio[2]/sum(ratio)*size )\n xin = dataset[\"Y_spread\"].value_counts()\n num_u,num_d,num_s = xin.loc[\"upward\"],xin.loc[\"downward\"],xin.loc[\"stationary\"]\n r_u,r_d,r_s = False,False,False\n if (num_u < n_u):\n r_u = True\n if (num_d < n_d):\n r_d = True\n if (num_s < n_s):\n r_s = True\n \n ind_u = list(dataset[dataset[label]==\"upward\"].index)\n ind_d = list(dataset[dataset[label]==\"downward\"].index)\n ind_s = list(dataset[dataset[label]==\"stationary\"].index)\n indice = np.array([])\n indice = np.append(indice,np.random.choice(ind_u,replace=r_u,size = n_u))\n indice = np.append(indice,np.random.choice(ind_d,replace=r_d,size = n_d))\n indice = np.append(indice,np.random.choice(ind_s,replace=r_s,size = n_s))\n temp = set(np.sort(indice).astype(int))\n rest_ind = set(dataset.index) - temp\n ret = dataset.loc[np.sort(list(temp)),:]\n rest_data = dataset.loc[np.sort(list(rest_ind)),:]\n return ret,rest_data", "title": "" }, { "docid": "ba9311b318be4790a37393c39e80da1c", "score": "0.6089497", "text": "def generate_and_add_subgraphs(self, **kwargs):\n\n n_subgraphs = kwargs.pop('n_subgraphs', 3)\n n_nodes_in_subgraph = kwargs.pop('n_subgraph_nodes', 5)\n n_connected_components = kwargs.pop('n_connected_components', 1)\n modify_graph_for_properties = kwargs.pop('modify_graph_for_properties', False)\n desired_property = kwargs.get('desired_property', None)\n\n if self.subgraph_type == 'random':\n subgraphs = self._get_subgraphs_randomly(n_subgraphs, n_nodes_in_subgraph, **kwargs)\n elif self.subgraph_type == 'bfs':\n subgraphs = self._get_subgraphs_by_bfs(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)\n elif self.subgraph_type == 'staple':\n subgraphs = self._get_subgraphs_by_k_hops(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)\n elif self.subgraph_type == 'plant':\n if desired_property == 'coreness':\n subgraphs = self._get_subgraphs_by_coreness(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)\n else:\n subgraphs = self._get_subgraphs_by_planting(n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs)\n else:\n raise Exception('The subgraph generation you specified is not implemented')\n\n if modify_graph_for_properties:\n self._modify_graph_for_desired_subgraph_properties(subgraphs, **kwargs) \n self._relabel_nodes(subgraphs, **kwargs) \n\n return subgraphs", "title": "" }, { "docid": "8b0b37093e7add06dc9a6271e84c1cb9", "score": "0.6069354", "text": "def select(self):\n edges = self.edges\n nodes = list(range(self.N))\n select = []\n selectedNodes = {i: 0 for i in range(self.N)}\n\n if self.K == self.N - 1: #Well-mixed graph\n permutation = np.random.permutation(self.N)\n selectedNodes = {i: 1 for i in range(self.N)}\n if self.N % 2 == 1:\n extraNode = np.random.randint(0, self.N)\n while extraNode == permutation[self.N - 1]:\n extraNode = np.random.randint(0, self.N)\n permutation = np.append(permutation, extraNode)\n selectedNodes[extraNode] += 1\n\n select = permutation.reshape((int(len(permutation)/2), 2))\n else:\n while edges: # Loop when edges is not empty\n i, j = edges[np.random.randint(0, len(edges))]\n # print(\"selected nodes:\", i, j)\n select.append((i, j))\n nodes.remove(i)\n nodes.remove(j)\n selectedNodes[i] += 1\n selectedNodes[j] += 1\n # print(\"Remaining nodes:\", nodes)\n edges = [(a, b) for (a, b) in edges if (a != i) and (a != j)\n and (b != i) and (b != j)]\n # print(\"after removal\", edges)\n\n while nodes:\n v = nodes.pop(np.random.randint(0, len(nodes)))\n v_edges = [(i, j) for (i, j) in self.edges if i == v or j == v]\n i, j = v_edges[np.random.randint(len(v_edges))]\n select.append((i, j))\n selectedNodes[i] += 1\n selectedNodes[j] += 1\n\n # print(\"Number of each nodes selected:\", selectedNodes)\n self.selectedNodes = selectedNodes\n return select", "title": "" }, { "docid": "9b6427b39229938e6006aea17719b6b0", "score": "0.5948628", "text": "def pick_random(number, graph, random_nodes, possible_nodes):\n if number == 0: #base case\n return random_nodes\n else:\n found = choice(list(graph.nodes()))\n #neighbors = [found] \n #possible_nodes.difference_update(neighbors) #so that we don't select the same node twice\n random_nodes.append(found)\n pick_random(number-1, graph, random_nodes, possible_nodes )\n return random_nodes", "title": "" }, { "docid": "b37e30de3dbd61e846a12ef3ef1951cb", "score": "0.592655", "text": "def random_sample_sub_objects(\n self,\n parent_count: int,\n subsamplers: SubSamplers,\n subrecs: SubRecs,\n n: int,\n **parameters: int\n ):", "title": "" }, { "docid": "4a71ebc2cd5a050eba71108e1d7db025", "score": "0.58636826", "text": "def subset_generation_by_size():\n subset_size = input(\"enter the size of the subset to generate\\t\")\n while str(subset_size) != \"\":\n data, target = dl.generate_new_subset(\n [random.randint(0,59999) for i in range(int(subset_size))], \n dl.datamain, dl.targets)\n\t\n disp = (True if \"y\" in input(\"display values chosen?\\t\") else False)\n if opt[\"model.conv\"]:\n output = model(data.view(-1,1,28,28)).item()\n else:\n output = model(data.view(-1, 784)).item()\n print(f\"deepset mnist sum output {output}\")\n print(f\"target value {target}\")\n print(f\"percent error is {percent_err(target, output)} %\")\n if disp:\n win.ImshowWindow(data, output, \"percent err: {:0.2f} target was {}, output: {:0.2f}\".format(percent_err(target,output),target, output )).show()\n subset_size = input(\"enter the size of the subset to generate\\t\")", "title": "" }, { "docid": "3c1aa7d7417cbb888a58e94c2ce44a0d", "score": "0.5839441", "text": "def select_subgraph(self, graph):\n raise NotImplementedError", "title": "" }, { "docid": "7e8db1526e7f05f52a2d9db5dec9d041", "score": "0.578102", "text": "def sample_edges(G, num_edges):\n total_edge = G.number_of_edges()\n edge_list = list(G.edges())\n if (num_edges == -1 or num_edges>=G.number_of_edges()):\n return G\n else:\n sampling = list(np.random.choice(total_edge,num_edges))\n sampled_edges = []\n for i in sampling:\n sampled_edges.append(edge_list[i])\n new_G = G.edge_subgraph(sampled_edges)\n return new_G", "title": "" }, { "docid": "babdd78ec96b9a0a03a20b21304d7cb3", "score": "0.5772012", "text": "def generate_rwr_subgraph(dgl_graph, subgraph_size):\n all_idx = list(range(dgl_graph.number_of_nodes()))\n reduced_size = subgraph_size - 1\n traces = dgl.contrib.sampling.random_walk_with_restart(dgl_graph, all_idx, restart_prob=1, max_nodes_per_seed=subgraph_size*3)\n subv = []\n\n for i,trace in enumerate(traces):\n subv.append(torch.unique(torch.cat(trace),sorted=False).tolist())\n retry_time = 0\n while len(subv[i]) < reduced_size:\n cur_trace = dgl.contrib.sampling.random_walk_with_restart(dgl_graph, [i], restart_prob=0.9, max_nodes_per_seed=subgraph_size*5)\n subv[i] = torch.unique(torch.cat(cur_trace[0]),sorted=False).tolist()\n retry_time += 1\n if (len(subv[i]) <= 2) and (retry_time >10):\n subv[i] = (subv[i] * reduced_size)\n subv[i] = subv[i][:reduced_size]\n subv[i].append(i)\n\n return subv", "title": "" }, { "docid": "d4834c97dbe6680349ed5a3cbafa75d7", "score": "0.5725581", "text": "def select_next_batch_with_superpixels(\r\n model,\r\n training_set,\r\n num_classes,\r\n selection_count,\r\n dataset_name,\r\n superpixel_dir,\r\n img_size,\r\n):\r\n entropy_scores = []\r\n superpixel_masks = []\r\n image_paths = []\r\n\r\n \"\"\"\r\n Detailed steps:\r\n 1) Perform Monte Carlo dropout inference generation - done\r\n 2) Calculate entropy of superpixels - done\r\n 3) Pre-fetch superpixel maps - done\r\n 4) Sort by entropy scores - done\r\n 5) Actively select selection_count_spx num of superpixels.-Frame this logic. Take a look at other repos too.- done\r\n \"\"\"\r\n\r\n images = training_set.all_train_paths\r\n dataset_info = InfoLoader(\r\n dataset_name, superpixel_dir, img_size, images, mode=\"val\"\r\n )\r\n # image_subset_from_active_loader = training_set.get_image_subset()\r\n\r\n for img_idx, img_name in tqdm(enumerate(images)):\r\n\r\n sample = dataset_info[img_idx]\r\n image = sample[\"image\"]\r\n\r\n # Step 1: Monte Carlo dropout inference generation\r\n probabilities = monte_carlo_inference(model, num_classes, image)\r\n\r\n # Step 2: Entropy calculation\r\n entropy_map = torch.zeros(\r\n (constants.IMAGE_WIDTH * constants.IMAGE_HEIGHT)\r\n ).type(torch.FloatTensor)\r\n for c in range(num_classes):\r\n # TODO: subtracted from zeros? Is that a problem? We'll see!\r\n entropy_map = entropy_map - (\r\n probabilities[:, c] * torch.log2(probabilities[:, c] + 1e-12)\r\n )\r\n entropy_map = (\r\n (entropy_map.view(constants.IMAGE_HEIGHT, constants.IMAGE_WIDTH))\r\n .cpu()\r\n .numpy()\r\n )\r\n\r\n # TODO: Not sure if this should be done. Ask team.\r\n \"\"\"\r\n idx_img_subset = image_subset_from_active_loader.index(img_name)\r\n mask = training_set.img_to_pixel_map[idx_img_subset]\r\n entropy_map[mask == 0] = 0\r\n \"\"\"\r\n # Accumulate entropy scores for superpixels\r\n superpixels = sample[\"superpixel\"]\r\n # TODO: Do we need to resize the superpixels image here? coz it's already 256, 256? Add a check, maybe?\r\n superpixels = np.asarray(\r\n Image.fromarray(superpixels).resize(\r\n (constants.IMAGE_WIDTH, constants.IMAGE_HEIGHT), Image.NEAREST\r\n )\r\n )\r\n unique_superpixels_as_list = np.unique(superpixels).tolist()\r\n score_per_superpixel = defaultdict(int)\r\n for spx_id in unique_superpixels_as_list:\r\n spx_mean = entropy_map[superpixels == spx_id].mean()\r\n score_per_superpixel[spx_id] = spx_mean\r\n entropy_scores.append(score_per_superpixel)\r\n superpixel_masks.append(sample[\"superpixel\"])\r\n\r\n image_paths.append(images[img_idx])\r\n\r\n # Step 3: Pre-fetch superpixel maps and other related info\r\n superpixel_info = []\r\n superpixel_scores_expanded = []\r\n original_image_indices = [\r\n training_set.all_train_paths.index(im_path) for im_path in image_paths\r\n ]\r\n for score_idx in range(len(entropy_scores)):\r\n superpixel_indices = list(entropy_scores[score_idx].keys())\r\n for (\r\n superpixel_idx\r\n ) in (\r\n superpixel_indices\r\n ): # TODO: Check if you need information at superpixel level or just\r\n # collecting all superpixels for an image is fine. Might be helpful if we need to change the\r\n # uncertainty measure.\r\n superpixel_info.append(\r\n (original_image_indices[score_idx], score_idx, superpixel_idx)\r\n )\r\n superpixel_scores_expanded.append(entropy_scores[score_idx][superpixel_idx])\r\n\r\n # Step 4: Sort by entropy scores\r\n _sorted_scores = np.array(\r\n list(\r\n list(\r\n zip(\r\n *sorted(\r\n zip(superpixel_info, superpixel_scores_expanded),\r\n key=lambda x: x[1],\r\n reverse=True,\r\n )\r\n )\r\n )[0]\r\n )\r\n )\r\n sorted_scores = np.zeros(\r\n (_sorted_scores.shape[0], _sorted_scores.shape[1]), dtype=np.int32\r\n )\r\n sorted_scores[:, 0 : _sorted_scores.shape[1]] = _sorted_scores\r\n\r\n # Step 5: Active selection of superpixels based on sorted (in descending order) entropy scores\r\n selected_regions = OrderedDict()\r\n image_superpixels = defaultdict(list)\r\n total_pixels_selected = 0\r\n ctr = 0\r\n\r\n while (\r\n total_pixels_selected\r\n < selection_count * constants.IMAGE_HEIGHT * constants.IMAGE_WIDTH\r\n and ctr < sorted_scores.shape[0]\r\n ):\r\n # to prevent selection of the same superpixels stored over subsequent iterations\r\n if (\r\n sorted_scores[ctr, 2]\r\n not in training_set.image_superpixels[image_paths[sorted_scores[ctr, 1]]]\r\n ):\r\n winner_img_score_idx, winner_spx_idx = (\r\n sorted_scores[ctr, 1],\r\n sorted_scores[ctr, 2],\r\n )\r\n mask = (superpixel_masks[winner_img_score_idx] == winner_spx_idx).astype(\r\n np.uint8\r\n )\r\n if image_paths[winner_img_score_idx] in selected_regions:\r\n selected_regions[image_paths[winner_img_score_idx]] = (\r\n selected_regions[image_paths[winner_img_score_idx]] | mask\r\n )\r\n else:\r\n selected_regions[image_paths[winner_img_score_idx]] = mask\r\n\r\n image_superpixels[image_paths[winner_img_score_idx]].append(winner_spx_idx)\r\n valid_pixels = mask.sum()\r\n total_pixels_selected += valid_pixels\r\n\r\n ctr += 1\r\n\r\n print(\r\n \"Selected\",\r\n total_pixels_selected / (constants.IMAGE_WIDTH * constants.IMAGE_HEIGHT),\r\n \"images\",\r\n )\r\n\r\n # image_subset gets updated with the new image_paths here\r\n training_set.expand_training_set(selected_regions, image_superpixels)", "title": "" }, { "docid": "fee830f32c5fdbae97bd85a503b73025", "score": "0.5717353", "text": "def select_for_labeling(self, predictions, autoannotations):\n initial_ids = {\n prediction['id'] for prediction in predictions\n }\n autoannotation_ids = {\n autoannotation['id'] for autoannotation in autoannotations\n }\n remaining_ids = initial_ids - autoannotation_ids\n selections = random.sample(\n remaining_ids, min(self.max_selections, len(remaining_ids))\n )\n return selections", "title": "" }, { "docid": "fe440b07e33829b0adb7f4c078e2f662", "score": "0.5639364", "text": "def active_sample(network, size):\n sample = random.sample(network.nodes(), size)\n subnet = network.subgraph(sample)\n result = total_ratio(subnet)\n return result", "title": "" }, { "docid": "4165beceae23a3e61d49a4f29fa690a3", "score": "0.56351167", "text": "def get_random_connected_subgraph(graph):\n # TODO add sparseness parameter\n\n while nx.is_connected(graph):\n edges = list(graph.edges)\n chosen = random.choice(edges)\n graph.remove_edge(*chosen)\n\n # Need to undo last removal because it disconnects the graph\n graph.add_edge(*chosen)\n\n return graph", "title": "" }, { "docid": "427014156d5b6d12ba5d309294127ff9", "score": "0.5609423", "text": "def random_hypergraph(size=50):\n # children = defaultdict(lambda: set())\n\n # complete_reference_set = range(0, size)\n reference_sets = defaultdict(lambda: set())\n enc = np.arange(2*size + 1)\n\n c = pydecode.ChartBuilder(enc, np.arange(10))\n used = set()\n\n c.init(enc[:size])\n\n for i in range(size):\n reference_sets[i] = set([i])\n\n nodes = range(size)\n for node in range(size):\n head_node = size + node\n node_a, node_b = random.sample(nodes, 2)\n if reference_sets[node_a] & reference_sets[node_b]:\n continue\n\n c.set_t(enc[head_node], enc[[node_a]], enc[[node_b]],\n labels=np.array([random.randint(0, 100)]))\n used.update([node_a, node_b])\n reference_sets[head_node] |= \\\n reference_sets[node_a] | reference_sets[node_b]\n nodes.append(head_node)\n unused = set(nodes) - used\n c.set_t(enc[2*size], enc[list(unused)])\n\n dp = c.finish()\n assert len(dp.nodes) > 0\n assert len(dp.edges) > 0\n return dp", "title": "" }, { "docid": "439d92514be36144e3e2b8e776a5ffc3", "score": "0.56020683", "text": "def select_indices(num_vertices, train_fraction, test_fraction = None):\n pivot = round(num_vertices * train_fraction)\n p = np.random.permutation(num_vertices)\n train_idxs = p[:pivot]\n if not test_fraction is None:\n pivot2 = round(num_vertices * (1 - test_fraction))\n test_idxs = p[pivot2:]\n else:\n test_idxs = p[pivot:]\n\n print(f\"\\n\\ntrain number{len(train_idxs)}, test number {len(test_idxs)}\\n\\n\")\n\n return train_idxs, test_idxs", "title": "" }, { "docid": "fef175489f2eff5f716038552cabc0a4", "score": "0.54895806", "text": "def random_subsampling(k, table, class_index, predictive_indicies):\n\n naive_accuracies = []\n\n # for k times, randomize table and compute holdouts and classify with naive bayes\n for _ in range(k):\n # make a copy of the table to perform subsampling on so to not change the orginal table too much\n table_copy = copy.deepcopy(table)\n\n # randomize the table_copy and split 2:1\n train_set, test_set = compute_holdout_partitions(table_copy)\n\n # classify the test set by using naive bayes\n naive_predictions, naive_actuals = guassian_naive_bayes(train_set, test_set, class_index, predictive_indicies)\n #print(\"predictions\", naive_predictions)\n #print(\"actuals\", naive_actuals)\n\n\n # compute the accuracy of predictions for naive bayes\n naive_accuracy = compute_accuracy(train_set, class_index, naive_predictions, naive_actuals)\n \n # add accuacies to the lists keeping track of accuracies for regression and knn\n naive_accuracies.append(naive_accuracy)\n\n # now get the average accuracies for both classifiers\n avg_naive_acc = sum(naive_accuracies)/len(naive_accuracies)\n\n # get the standard error\n naive_std_err = 1 - avg_naive_acc\n return avg_naive_acc, naive_std_err", "title": "" }, { "docid": "0286aea94f6e980a7dbfdb79c2c995be", "score": "0.54853565", "text": "def sel_rand_posture(df, n, label):\n df_pos = pd.DataFrame()\n\n if label == 'random':\n length = len(df)\n idx = np.random.randint(0, length, size=n)\n df_pos = df.loc[idx, joints_names]\n\n elif label in all_categories:\n df_cat = df.loc[df['category'] == label, :]\n # df_cat.reset_index(drop=True, inplace=True)\n idx_cat = df_cat.index.values.tolist()\n length = len(idx_cat)\n idx_rand = np.random.randint(0, length, size=n)\n idx_list = []\n for i in idx_rand:\n idx_list.append(idx_cat[i])\n df_pos = df.loc[idx_list, joints_names]\n\n elif label == 'select':\n anim_id = sel_anim_id(df)\n df_cat = df.loc[df['id'] == anim_id, :]\n length = len(df_cat)\n idx = np.random.randint(df_cat['id'].index[0], df_cat['id'].index[0] + length - 1, size=n)\n df_pos = df.loc[idx, joints_names]\n\n else:\n print(\"The label was not found.\")\n\n if ~df_pos.empty:\n return df_pos.sort_index()", "title": "" }, { "docid": "6b30067e10562121d6a177c2469d5c86", "score": "0.54833144", "text": "def oversample(self, trees, ids_to_dataset, ratio=1):\n assert ratio >= 1\n\n print(\"Oversampling...\")\n\n initial_nb_train_examples = sum([1 if val == 'train' else 0\n for val in ids_to_dataset.values()])\n current_nb_train_examples = initial_nb_train_examples\n random.seed(a=64)\n\n print(f\"Before oversampling: {len(trees)} trees, {initial_nb_train_examples} train trees\")\n\n while current_nb_train_examples / initial_nb_train_examples < ratio:\n\n # Pick a tree in train set\n tree_number = random.randint(0, len(trees) - 1)\n news_id, label, node_features, edges = trees[tree_number]\n if ids_to_dataset[news_id] != 'train' or len(edges) < 50:\n continue\n\n # Modify it -> cut a part of it\n r = random.random()\n while r < 0.8:\n r = random.random()\n new_edges = edges[:int(r * len(edges))]\n\n last_node = max([e[0] for e in new_edges] + [e[1] for e in new_edges])\n new_node_features = node_features[:(last_node + 1)]\n\n # Slightly change the features\n for node_ft_array in new_node_features:\n for i in range(len(node_ft_array)):\n if node_ft_array[i] > 10: # basically, if it is not a categorical variable\n random_value = random.random()\n node_ft_array[i] += (random_value - 0.5) * 2 * (node_ft_array[i] / 50)\n\n # Add the modified version to the existing trees\n # The new id will be current_nb_train_examples+1000\n trees.append((current_nb_train_examples + 1000, label, new_node_features, new_edges))\n ids_to_dataset[current_nb_train_examples + 1000] = 'train'\n current_nb_train_examples += 1\n\n print(f\"After oversampling: {len(trees)} trees, {current_nb_train_examples} train trees\")", "title": "" }, { "docid": "005e7828672bf492c2e13055b9db8abc", "score": "0.5471089", "text": "def subgraph(user_id, graph_data, num_neg):\n \n edge_id = torch.nonzero(torch.tensor([1 if i in user_id else 0 for i in graph_data.edge_user]), as_tuple=True)[0]\n \n # picks out subset of edges\n edge_user = graph_data.edge_user[edge_id]\n edge_item = graph_data.edge_item[edge_id]\n\n # collect pre computed node normalisations \n edge_norm = graph_data.edge_norm[torch.cat((edge_item, edge_user), 0)]\n\n users = torch.unique(edge_user, sorted=True)\n items = torch.unique(edge_item, sorted=True)\n\n num_user = len(users)\n num_item = int(graph_data.num_items)\n num_edge = len(edge_id)\n # num_node = num_user+num_item\n\n # re map numbers\n user_dict = dict(zip(users.tolist(),[i for i in range(num_user)]))\n item_dict = dict(zip(items.tolist(),torch.add(items,-int(graph_data.num_users)).tolist()))\n\n # create edge index of shape 2 x (2*n_edges), counted twice since undirected \n edge_index = torch.stack((torch.cat((edge_user, edge_item), 0),\n torch.cat((edge_item, edge_user), 0)), 0)\n edge_index = edge_index.to(torch.long)\n\n x = graph_data.x\n\n # Prepare data\n sub_data = Data(x=x, edge_index=edge_index)\n sub_data.edge_norm = edge_norm \n sub_data.num_users = torch.tensor([num_user])\n sub_data.num_items = torch.tensor([num_item])\n sub_data.users = users\n sub_data.items = items\n\n train_idx = [user_dict[int(edge_index[0,i])]*num_item+item_dict[int(edge_index[1,i])] for i in range(num_edge)]\n\n if num_neg>0:\n n_neg = int(num_neg*num_edge)\n \n full_neg_idx = list(set([i for i in range(num_user*num_item)])-set(train_idx))\n neg_idx = random.sample(full_neg_idx, n_neg)\n\n perm = torch.randperm((num_neg+1)*num_edge).long()\n sub_data.train_idx = torch.tensor(train_idx + neg_idx).long()[perm]\n sub_data.train_rt = torch.cat((torch.ones(num_edge), torch.zeros(n_neg)),0)[perm]\n\n del n_neg, full_neg_idx, neg_idx, perm\n else:\n sub_data.train_idx = torch.tensor(train_idx)\n sub_data.train_rt = torch.ones(num_edge)\n\n return sub_data", "title": "" }, { "docid": "a07761ec31f7eb1ab9c59fd7079c4716", "score": "0.542683", "text": "def _get_subgraphs_by_bfs(self, n_subgraphs, n_nodes_in_subgraph, n_connected_components, **kwargs):\n\n max_depth = kwargs.pop('max_depth', 3)\n\n subgraphs = []\n for s in range(n_subgraphs):\n\n #randomly select start nodes. # of start nodes == n connected components\n curr_subgraph = []\n start_nodes = random.sample(self.graph.nodes, n_connected_components) \n for start_node in start_nodes:\n edges = nx.bfs_edges(self.graph, start_node, depth_limit=max_depth)\n nodes = [start_node] + [v for u, v in edges]\n nodes = nodes[:n_nodes_in_subgraph] #limit nodes to n_nodes_in_subgraph\n\n if max(nodes) > max(self.graph.nodes): print(max(nodes), max(self.graph.nodes))\n assert max(nodes) <= max(self.graph.nodes)\n\n assert nx.is_connected(self.graph.subgraph(nodes)) #check to see if selected nodes represent a conencted component\n curr_subgraph.extend(nodes)\n subgraphs.append(curr_subgraph)\n \n seen = []\n for g in subgraphs:\n seen += g\n assert max(seen) <= max(self.graph.nodes)\n \n return subgraphs", "title": "" }, { "docid": "ca1096f7138d9d8b46eff3955d74d21d", "score": "0.54069644", "text": "def multilabel_sample(y, size=1000, min_count=5, seed=None):\n try:\n if (np.unique(y).astype(int) != np.array([0, 1])).all():\n raise ValueError()\n except (TypeError, ValueError):\n raise ValueError('multilabel_sample only works with binary indicator matrices')\n\n if (y.sum(axis=0) < min_count).any():\n raise ValueError('Some classes do not have enough examples. Change min_count if necessary.')\n\n if size <= 1:\n size = np.floor(y.shape[0] * size)\n\n if y.shape[1] * min_count > size:\n msg = \"Size less than number of columns * min_count, returning {} items instead of {}.\"\n warnings.warn(msg.format(y.shape[1] * min_count, size))\n size = y.shape[1] * min_count\n\n rng = np.random.RandomState(seed if seed is not None else np.random.randint(1))\n\n if isinstance(y, pd.DataFrame):\n choices = y.index\n y = y.values\n else:\n choices = np.arange(y.shape[0])\n\n sample_idxs = np.array([], dtype=choices.dtype)\n\n # first, guarantee > min_count of each label\n for j in range(y.shape[1]):\n label_choices = choices[y[:, j] == 1]\n label_idxs_sampled = rng.choice(label_choices, size=min_count, replace=False)\n sample_idxs = np.concatenate([label_idxs_sampled, sample_idxs])\n\n sample_idxs = np.unique(sample_idxs)\n\n # now that we have at least min_count of each, we can just random sample\n sample_count = int(size - sample_idxs.shape[0])\n\n # get sample_count indices from remaining choices\n remaining_choices = np.setdiff1d(choices, sample_idxs)\n remaining_sampled = rng.choice(remaining_choices,\n size=sample_count,\n replace=False)\n\n return np.concatenate([sample_idxs, remaining_sampled])", "title": "" }, { "docid": "0ba28df3b12cd6715440b31279a29c19", "score": "0.5400847", "text": "def _initialize_graph_big_random(self, data, numtrees):\n N, k = self.graph.shape\n temp_graph = torch.tensor([])\n\n # make 'trees', combine into giant graph with each element (row) having k * num_trees neighbours\n # this is a small for loop - numtrees and k << datapoints\n for j in range(numtrees):\n tree_graph = torch.tensor([])\n for i in range(k):\n tree_graph = torch.cat(\n (tree_graph, torch.randperm(N)), 0\n ) # generate randomly shuffled list of N indices\n tree_graph = tree_graph.reshape(\n -1, k\n ) # creates a N x k tensor with N indices, each appearing k times. This represents 1 'tree'\n temp_graph = torch.cat(\n (temp_graph, tree_graph), 1\n ) # combine into giant N x (k*num_trees) tensor. This represents the forest\n\n # find KNN for each row in giant graph\n # TODO - implement the below without a for loop\n for i, row in enumerate(temp_graph):\n temp_row = torch.unique(row).type(torch.LongTensor) # remove duplicates\n temp_row = temp_row[temp_row != i] # remove self\n\n temp_points = data[temp_row, :] # pick out elements from dataset\n distances = self.distance(temp_points, data[i]) # Euclidean distances\n indices = distances.topk(\n k=self.queue, largest=False\n ).indices # find indices of KNN\n self.graph[i] = temp_row[indices] # assign KNN to graph", "title": "" }, { "docid": "f52c8e81280fcd07fe78c1aafb7e98f6", "score": "0.53827345", "text": "def _random_subset(seq,m):\n targets = set()\n while len(targets)<m:\n x = np.random.choice(seq)\n targets.add(x)\n return targets", "title": "" }, { "docid": "e774d393dbaddcfc215c6842f5ed8616", "score": "0.53739464", "text": "def _build_bases_sampling(self, bases_to_sample, num_bases_to_sample):\n bases_to_sample = tf.convert_to_tensor(\n bases_to_sample, dtype=utils.tf_float)\n idx = tf.random_uniform(\n shape=[num_bases_to_sample],\n maxval=tf.cast(tf.shape(bases_to_sample)[0], utils.tf_int),\n dtype=utils.tf_int)\n return tf.gather(bases_to_sample, idx)", "title": "" }, { "docid": "0cd6828f851a8b8ea8638033e20e8f04", "score": "0.5373221", "text": "def gen_nodes_randomly(map_size=(30, 30), obstacle_count=200):\n assert obstacle_count < map_size[0]*map_size[1]-2, \"Too many obstacles!\"\n assert map_size[0] > 8 and map_size[1] > 8, \"Too small map!\"\n\n np_arr = np.array([0 for i in range(map_size[0]*map_size[1])])\n\n indexes = random.sample(range(len(np_arr)), obstacle_count+2)\n for i in indexes:\n np_arr[i] = OBSTACLE\n np_arr[indexes[0]] = START\n np_arr[indexes[1]] = GOAL\n\n mp = list(np_arr.reshape(map_size))\n return gen_nodes_from_map(mp)", "title": "" }, { "docid": "c0325001eabc11c1b339c33105e82646", "score": "0.53705585", "text": "def random_cut(output_file, g, number_of_nodes, seconds_to_run):\n\n # mark start_time\n start_time = datetime.datetime.now()\n elapsed_time_seconds = 0\n\n final_cross_count = 0\n final_first_group = []\n final_second_group = []\n\n #initialize first group to contain all nodes\n for i in range(number_of_nodes):\n final_first_group.append(i+1)\n\n node_list = g.nodes()\n\n #keep searching for best solution until end of given runtime\n while elapsed_time_seconds < seconds_to_run:\n\n #set group attribute of random number of nodes to group 2\n num_choose = random.randint(0, number_of_nodes)\n for i in range(num_choose):\n change_node = random.randint(1, number_of_nodes)\n g.node[change_node][\"group\"] = 2\n\n #initialize group lists\n first_group = []\n second_group = []\n crossing_edges = 0\n\n #append nodes to appropriate lists\n for x in node_list:\n if g.node[x][\"group\"] == 1:\n first_group.append(x)\n else:\n second_group.append(x)\n\n #if a neighbor of a node in the first_group is not in group 1 as well it is a cross edge and we increment\n for x in first_group:\n for neighbor in g[x].keys():\n if g.node[neighbor][\"group\"] == 2:\n crossing_edges += 1\n\n # we keep the groups if the cross edge total is improved\n if crossing_edges > final_cross_count:\n final_cross_count = crossing_edges\n final_first_group = first_group[:]\n final_second_group = second_group[:]\n\n #reset group settings\n for x in node_list:\n g.node[x][\"group\"] = 1\n\n elapsed_time_seconds = (datetime.datetime.now() - start_time).seconds\n\n output_file.write(str(final_cross_count) + '\\n')\n\n #write all entries in the first group seperated by spaces excluding the last entry\n [output_file.write(str(n) + \" \") for n in final_first_group]\n #write the last entry with a new line\n output_file.write('\\n')\n\n #write all entries in the second group seperated by spaces excluding the last entry\n [output_file.write(str(n) + \" \") for n in final_second_group]\n #write the last entry with a new line\n output_file.write('\\n')\n\n return final_cross_count", "title": "" }, { "docid": "d2b2feb0d4e82becde6fe863dfd3f78e", "score": "0.53692377", "text": "def data_subsetting(samples, label_groups, k = 1000, num_subsets = 10, rand_seed = 0, verbose = False):\n \n if (verbose):\n print(\"Data Subsetting...\")\n \n # check the dimension\n checked_value, error_message = check_dimension(samples)\n assert (checked_value == 3), error_message\n \n # initialization\n #num_samples_tot, num_cells_tot, num_features = samples.shape\n num_samples_tot = samples.shape[0]\n rand.seed(rand_seed)\n result_samples = []\n result_labels = []\n \n # iterate through all samples\n for idx_sample in range(num_samples_tot):\n \n # initialization in each loop\n sample = samples[idx_sample]\n num_cells_tot = sample.shape[0]\n num_features = sample.shape[1]\n \n # record the corresponding label\n group = label_groups[idx_sample]\n result_labels += ([group] * num_subsets)\n \n # generate subsets in each sample\n for _ in range(num_subsets):\n \n # choose k cells randomly\n idx = rand.permutation(num_cells_tot)[:k]\n result_samples.append(sample[idx])\n \n # convert results from list to numpy array\n result_samples = np.array(result_samples) # (num_samples_tot * num_subsets, k, num_genes)\n result_labels = np.array(result_labels) # (num_samples_tot * num_subsets,)\n \n if (verbose):\n print(\"...Finish\")\n \n return result_labels, result_samples", "title": "" }, { "docid": "286452646075186d64828d608f0189ff", "score": "0.5350564", "text": "def create_graphs(node_num, deg_num, depth = []):\n dataset = []\n for i in depth:\n graph_rand_model_components = []\n\n graph_rand = nx.random_regular_graph(n=node_num, d=deg_num)\n graph_rand_model_components = (maxcut_qaoa_TFQ_model(graph_rand, i)) # nested list\n dataset.append(graph_rand_model_components)\n\n return dataset, graph_rand", "title": "" }, { "docid": "87af2e8f449828e9f04f05da3afe329a", "score": "0.5348271", "text": "def select_influential_reviews(random_seed, num_reviews):\n random.seed(random_seed)\n\n with open(os.path.join(input_data_folder2,\n \"business_elite_subset_reviews.json\"), \"r\") as f:\n data = ujson.load(f)\n\n random_indexes = random.sample(range(0, len(data)), num_reviews)\n\n review_dict = {}\n\n counter = 0\n\n for random_index in random_indexes:\n review_dict[counter] = data[random_index]\n counter += 1\n\n out_df = pd.DataFrame.from_dict(review_dict, orient=\"index\")\n return out_df", "title": "" }, { "docid": "1b814867d5950e29a00ea89768b97274", "score": "0.53462696", "text": "def generate_subgraph_labels(self, **kwargs):\n\n # Make sure base graph is connected\n if nx.is_connected(self.graph) == False:\n max_cc = max(nx.connected_components(self.graph), key=len)\n self.graph = self.graph.subgraph(max_cc)\n\n # Setup\n densities = []\n cut_ratios = []\n coreness = []\n cc = []\n desired_property = kwargs.get('desired_property', 'density')\n\n for subgraph_nodes in self.subgraphs:\n \n subgraph = self.graph.subgraph(subgraph_nodes).copy()\n \n if desired_property == 'density': \n value = self._get_property(subgraph, desired_property)\n densities.append(value) \n elif desired_property == 'cut_ratio': \n value = self._get_property(subgraph, desired_property)\n cut_ratios.append(value)\n elif desired_property == 'coreness': \n value = self._get_property(subgraph, desired_property)\n coreness.append(value)\n elif desired_property == 'cc':\n value = self._get_property(subgraph, desired_property)\n cc.append(value)\n\n if desired_property == 'density': \n bins = self.generate_bins(sorted(densities), len(config.DENSITY_RANGE))\n labels = np.digitize(densities, bins = bins)\n labels = self.convert_number_to_chr(labels)\n print(Counter(labels))\n return labels\n\n elif desired_property == 'cut_ratio': \n bins = self.generate_bins(sorted(cut_ratios), len(config.CUT_RATIO_RANGE))\n labels = np.digitize(cut_ratios, bins = bins)\n labels = self.convert_number_to_chr(labels)\n print(Counter(labels))\n return labels\n\n elif desired_property == 'coreness': \n n_bins = kwargs.pop('n_bins', 5)\n bins = self.generate_bins(sorted(coreness), n_bins)\n labels = np.digitize(coreness, bins = bins)\n labels = self.convert_number_to_chr(labels)\n print(Counter(labels))\n return labels\n\n elif desired_property == 'cc':\n print(Counter(cc))\n bins = [1, 5] # 1 CC vs. >1 CC\n labels = np.digitize(cc, bins = bins)\n labels = self.convert_number_to_chr(labels)\n print(Counter(labels))\n assert len(list(Counter(labels).keys())) == len(bins)\n return labels\n\n else: \n raise Exception('Other properties have not yet been implemented')", "title": "" }, { "docid": "51fc93e29dacd4265e4fcffe82fce9e5", "score": "0.5343091", "text": "def sample(self, n):\n batch = []\n idxs = []\n segment = self.tree.total() / n\n priorities = []\n \n self.beta = np.min([1., self.beta + self.beta_inc])\n \n for i in range(n):\n a = segment * i\n b = segment * (i + 1)\n \n s = random.uniform(a, b)\n (idx, p, data) = self.tree.get(s)\n priorities.append(p)\n batch.append(data)\n idxs.append(idx)\n \n sampling_probs = priorities / self.tree.total()\n #is_weight = np.power(self.tree.n_entries * sampling_probs, -self.beta)\n #is_weight /= is_weight.max()\n is_weight = None\n \n return batch, idxs, is_weight", "title": "" }, { "docid": "76dc314d63720e8826b6f4b49333fd26", "score": "0.5342726", "text": "def _subsample_trials(self, num_trials, subsample):\n num_trials = int(num_trials * subsample)\n if num_trials < 1:\n num_trials = 1\n sampled_trials = np.random.randint(\n num_trials,\n size=num_trials)\n\n return sampled_trials", "title": "" }, { "docid": "83a700d78791e87e4de0712b8c598af9", "score": "0.5307061", "text": "def select_good_nodes_by_sid(g, sid_list, counts, replace=False):\n good_nodes = [n for n, data in g.nodes(data=True) if data['orig'] is not None]\n type_node_map = ng.get_stype_node_map(g.subgraph(good_nodes))\n synnodes = []\n for branch_id, count in zip(sid_list, counts):\n size = len(type_node_map[branch_id])\n if (count > size) and (replace == False):\n print('Changing number of nodes to maximum {} available in branch, since replace={}'.format(size, replace))\n count = size\n synnodes += list(np.random.choice(type_node_map[branch_id],\n size=count, replace=replace))\n return synnodes", "title": "" }, { "docid": "9d978a5b6db5d06e657a73686527edd7", "score": "0.53031176", "text": "def test_subsample_rows():\n A = np.arange(24).reshape((12, 2))\n N = MRAIDenseNeuralNetwork()\n a = N.subsample_rows(A, num_draw=5)\n assert a.shape[0] == 5\n assert a.shape[1] == 2\n assert len(np.unique(a) == 5)", "title": "" }, { "docid": "0b3a17612f5ebc0ec4c23016193dbda7", "score": "0.5303009", "text": "def generate_subgraph(self, n_nodes_in_subgraph, **kwargs):\n \n subgraph_generator = kwargs.pop('subgraph_generator', 'path')\n\n if subgraph_generator == 'cycle':\n G = nx.cycle_graph(n_nodes_in_subgraph)\n elif subgraph_generator == 'path':\n G = nx.path_graph(n_nodes_in_subgraph)\n elif subgraph_generator == 'house':\n G = nx.house_graph()\n elif subgraph_generator == 'complete':\n G = nx.complete_graph(n_nodes_in_subgraph)\n elif subgraph_generator == 'star':\n G = nx.star_graph(n_nodes_in_subgraph)\n elif subgraph_generator == 'barabasi_albert':\n m = kwargs.get('m', 5)\n G = barabasi_albert_graph(n_nodes_in_subgraph, m, seed=config.RANDOM_SEED)\n elif subgraph_generator == 'extended_barabasi_albert':\n m = kwargs.get('m', 5)\n p = kwargs.get('p', 0.5)\n q = kwargs.get('q', 0)\n G = extended_barabasi_albert_graph(n_nodes_in_subgraph, m, p, q, seed=config.RANDOM_SEED)\n elif subgraph_generator == 'duplication_divergence_graph':\n p = kwargs.get('p', 0.5)\n G = duplication_divergence_graph(n_nodes_in_subgraph, p)\n else:\n raise Exception('The subgraph generator you specified is not implemented.')\n return G", "title": "" }, { "docid": "91393a1ab9666bfe0b858e674f529600", "score": "0.53023964", "text": "def _relabel_nodes(self, subgraphs, **kwargs):\n largest_cc = max(nx.connected_components(self.graph), key=len) \n removed_nodes = set(list(self.graph.nodes)).difference(set(largest_cc)) \n print(\"Original graph: %d, Largest cc: %d, Removed nodes: %d\" % (len(self.graph.nodes), len(largest_cc), len(removed_nodes))) \n self.graph = self.graph.subgraph(largest_cc)\n mapping = {k: v for k, v in zip(list(self.graph.nodes), range(len(self.graph.nodes)))} \n self.graph = nx.relabel_nodes(self.graph, mapping) \n new_subgraphs = [] \n for s in subgraphs:\n new_s = [mapping[n] for n in s if n not in removed_nodes] \n new_subgraphs.append(new_s) \n return new_subgraphs", "title": "" }, { "docid": "4b37ed232e35c39b18c349ce414c55b2", "score": "0.5293502", "text": "def fit(self, X, y, features_selection = 'random', subset_size = 1,\n weights_init = None, labels_init = None,\n unsup_num_iters = 100, unsup_batch_size = 32,\n sup_num_iters = 100, sup_batch_size = 32,\n neighborhood = \"bubble\",\n learning_rate = 0.5, learning_decay_rate = 1, learning_rate_decay_function = None,\n sigma = 1, sigma_decay_rate = 1, sigma_decay_function = None,\n conscience = False, verbose = 0):\n self._features_set = []\n n_samples = X.shape[0]\n n_features = X.shape[1]\n \n if features_selection == 'random':\n for _ in range(self._n_estimators):\n np.random.seed(self._get_random_state())\n size = np.random.randint(1, n_features + 1)\n np.random.seed(self._get_random_state())\n subset_features = np.random.randint(0, n_features, size)\n subset_features = np.unique(subset_features)\n self._features_set.append(subset_features)\n elif features_selection == 'weights':\n corr_coef = np.corrcoef(np.append(X, y.reshape((-1, 1)), axis = 1).T)[-1, :-1]\n weights = np.abs(corr_coef.copy())\n for _ in range(self._n_estimators): \n subset_features = np.unique(weighted_sampling(weights, n_features, \n self._get_random_state()))\n self._features_set.append(subset_features)\n elif type(features_selection) == list and n_features == len(features_selection):\n weights = np.abs(np.array(features_selection))\n for _ in range(self._n_estimators):\n subset_features = np.unique(weighted_sampling(weights, n_features, \n self._get_random_state()))\n self._features_set.append(subset_features)\n else:\n raise ValueError('features_selection should be random or weights or a list which length is equal to number of features')\n \n for i, model in enumerate(self._models):\n if verbose:\n print('Model {}/{}:'.format(i + 1, self._n_estimators))\n if subset_size != 1:\n np.random.seed(self._get_random_state())\n subset_idx = np.random.randint(0, n_samples, int(subset_size * n_samples)).tolist()\n else:\n subset_idx = np.arange(n_samples)\n X_subset = X[subset_idx, :][:, self._features_set[i]]\n y_subset = y[subset_idx]\n model.fit(X_subset, y_subset, weights_init = weights_init, labels_init = labels_init,\n unsup_num_iters = unsup_num_iters, unsup_batch_size = unsup_batch_size,\n sup_num_iters = sup_batch_size, sup_batch_size = sup_batch_size,\n neighborhood = neighborhood,\n learning_rate = learning_rate, learning_decay_rate = learning_decay_rate,\n learning_rate_decay_function = learning_rate_decay_function,\n sigma = sigma, sigma_decay_rate = sigma_decay_rate, sigma_decay_function = sigma_decay_function,\n conscience = conscience, verbose = verbose)", "title": "" }, { "docid": "4c796589ca3c89eee08251a6220e8b8b", "score": "0.529108", "text": "def random_proposer_selection(self):\n # probs = [1/self.m for _ in range(self.m)]\n return choices(self.nodes,k=1)[0] # equiprobable is the default", "title": "" }, { "docid": "b95f44627211e649c9f732d087b44c70", "score": "0.5290702", "text": "def generate_sub_training_set(training_set_values, ratio=1.0):\n training_set_df = pd.DataFrame(training_set_values)\n sub_training_set = pd.DataFrame()\n n_sample = round(len(training_set_df) * ratio)\n while len(sub_training_set) < n_sample:\n index = randrange(len(training_set_df))\n sub_training_set = sub_training_set.append(training_set_df.iloc[index])\n sub_training_set_values = sub_training_set.values\n return sub_training_set_values", "title": "" }, { "docid": "eb062e06988dcac8bf754c37d2cbe2a1", "score": "0.5274769", "text": "def select_subset(list_of_paths, n, D):\n \n num_classes = len(list_of_paths)\n \n selected_images = np.zeros([num_classes*n, D])\n \n for cli in range(num_classes):\n \n cl_img = np.loadtxt(list_of_paths[cli])\n \n inds = np.random.choice(range(cl_img.shape[0]), n, replace=False)\n \n assert len(inds) == n\n \n selected_images[cli*n:(cli+1)*n,:] = cl_img[inds,:]\n \n return selected_images", "title": "" }, { "docid": "858cf0c8c246000eef927934a9bfdf91", "score": "0.52693623", "text": "def random_walk_sampling(\n graph: Graph,\n num_steps: mg.Optional[int] = None,\n num_nodes: mg.Optional[int] = None,\n num_edges: mg.Optional[int] = None,\n jump_probability: float = 0.15,\n start_node: mg.Optional[NodeID] = None,\n) -> Graph:\n # TODO: check that `num_*` variables aren't all `None`\n pass # pragma: no cover", "title": "" }, { "docid": "1f4afeb0aa6bdf2550b943ffdcdadb50", "score": "0.5256263", "text": "def select(self, label_index, unlabel_index, batch_size=1, **kwargs):", "title": "" }, { "docid": "1f4afeb0aa6bdf2550b943ffdcdadb50", "score": "0.5256263", "text": "def select(self, label_index, unlabel_index, batch_size=1, **kwargs):", "title": "" }, { "docid": "1f4afeb0aa6bdf2550b943ffdcdadb50", "score": "0.5256263", "text": "def select(self, label_index, unlabel_index, batch_size=1, **kwargs):", "title": "" }, { "docid": "aee471a544c08f1fe9a11e23298543a0", "score": "0.525413", "text": "def seed_package_selector(self, bay, ids, indexes, hub, count):\n if count == 16: # Skip trucks at full capacity.\n return bay, ids, indexes, hub, count\n print(\"\\nSelecting the most optimal packages to load onto truck \" + str(self.truck.identifier) + \".\")\n\n # Declare reset variables and best result variables. O(K).\n reset_bay, reset_ids, reset_indexes, reset_hub, reset_count = bay[:], ids[:], indexes[:], hub[:], count\n best, best_bay, best_ids, best_indexes, best_hub, best_count = [INT_MAX], None, None, None, None, None\n\n # Runs seed selection loop. O(M * N!).\n for seed in range(1, SEED_COUNT + 1):\n bay, ids, indexes, hub, count = reset_bay[:], reset_ids[:], reset_indexes[:], reset_hub[:], reset_count\n bay, ids, indexes, hub, count = self.seed_random_sample(bay, ids, indexes, hub, count)\n bay, ids, indexes, hub, count = self.load_address_pairs(bay, ids, indexes, hub, count)\n bay, ids, indexes, hub, count = self.unique_max_load(bay, ids, indexes, hub, count, False)\n bay, ids, indexes, hub, count = self.duplicate_max_load(bay, ids, indexes, hub, count)\n bay, ids, indexes, hub, count, best, record = self.seed_minimum(bay, ids, indexes, hub, count, best, seed)\n # Saves best results if minimum distance is lowest.\n if record:\n best_bay, best_ids, best_indexes, best_hub, best_count = bay, ids, indexes, hub, count\n if len(hub) == 0:\n break\n\n return best_bay, best_ids, best_indexes, best_hub, best_count", "title": "" }, { "docid": "1e4101d32bf830f05dccbed60e0d3375", "score": "0.5250584", "text": "def select_samples(popsize, candidate, number_samples=5):\n idxs = list(range(popsize))\n idxs.remove(candidate)\n return(np.random.choice(idxs, number_samples, replace = False))", "title": "" }, { "docid": "de92125fcd08345ed1b8cd6808b81587", "score": "0.5241733", "text": "def random_sampling(args, dataset):\n dict_idxs = {i: [] for i in range(args.num_users)}\n dict_classes = {i: [] for i in range(args.num_users)}\n\n total_data_num = len(dataset)\n per_client_data_num = total_data_num // args.num_users\n all_idxs = [i for i in range(total_data_num)]\n\n np.random.seed(args.seed)\n for i in range(args.num_users):\n dict_idxs[i] = set(np.random.choice(all_idxs, per_client_data_num, replace=False))\n all_idxs = list(set(all_idxs) - dict_idxs[i])\n dict_classes[i] = dataset[\"label\"].loc[dict_idxs[i]].values\n\n return dict_idxs, dict_classes", "title": "" }, { "docid": "db083ed5df71263fc913af84cca3c149", "score": "0.52228475", "text": "def select_triplets_batch_random(distances, labels, alpha):\n #nrof_image_per_class = Counter(labels)\n time_start = time.time()\n label_counts = Counter(labels)\n nrof_images_per_class = [label_counts[l_ind] for l_ind in sorted(label_counts.keys())]\n #embeddings = embeddings.squeeze()\n trip_idx = 0\n emb_start_idx = 0\n num_trips = 0\n triplets = []\n for i in range(len(nrof_images_per_class)):\n nrof_images = int(nrof_images_per_class[i])\n for j in range(1,nrof_images):\n a_idx = emb_start_idx + j - 1\n #neg_dists_sqr = np.sum(np.square(embeddings[a_idx] - embeddings), 1)\n #pdb.set_trace()\n neg_dists_sqr = distances[a_idx,:].copy() # a bug occur if we don't use copy. because the code bellow will assign them to np.NaN \n neg_dist_tmp = 100 # max_dist\n triplet_tmp = []\n for pair in range(j, nrof_images): # For every possible positive pair.\n p_idx = emb_start_idx + pair\n #pos_dist_sqr = np.sum(np.square(embeddings[a_idx]-embeddings[p_idx]))\n pos_dist_sqr = distances[a_idx, p_idx]\n neg_dists_sqr[emb_start_idx:emb_start_idx+nrof_images] = np.NaN\n #all_neg = np.where(np.logical_and(neg_dists_sqr-pos_dist_sqr<alpha, pos_dist_sqr<neg_dists_sqr))[0] # FaceNet selection\n all_neg = np.where(neg_dists_sqr-pos_dist_sqr<alpha)[0] # VGG Face selecction\n nrof_random_negs = all_neg.shape[0]\n if nrof_random_negs>0:\n rnd_idx = np.random.randint(nrof_random_negs)\n n_idx = all_neg[rnd_idx]\n #sort_inds = neg_dists_sqr[all_neg].argsort()\n #n_idx = all_neg[sort_inds[0]]\n triplet_tmp = [a_idx, p_idx, n_idx]\n triplets.append(triplet_tmp)\n num_trips += 1\n\n emb_start_idx += nrof_images\n time_select = time.time() - time_start\n print('time select triplet is {}'.format(time_select))\n print('nrof_random_pairs {}'.format(num_trips))\n triplet_inds = list(range(len(triplets)))\n np.random.shuffle(triplet_inds)\n triplets = np.array(triplets,dtype=np.int64)\n triplets = np.hstack(triplets[triplet_inds])\n return triplets", "title": "" }, { "docid": "a872ee09f0399ea019dbac54ab3bd638", "score": "0.5222397", "text": "def get_k_samples(self, idx, k, mode, cls=None, shuffle=True):\r\n if k != 2:\r\n raise NotImplementedError(\r\n f\"No strategy implemented to sample {k} graphs from SPair dataset. So far only k=2 is possible.\"\r\n )\r\n\r\n if cls is None:\r\n cls = self.classes[random.randrange(0, len(self.classes))]\r\n ann_files = self.ann_files_filtered_cls_dict[cls]\r\n elif type(cls) == int:\r\n cls = self.classes[cls]\r\n ann_files = self.ann_files_filtered_cls_dict[cls]\r\n else:\r\n assert type(cls) == str\r\n ann_files = self.ann_files_filtered_cls_dict[cls]\r\n\r\n # get pre-processed images\r\n\r\n assert len(ann_files) > 0\r\n if idx is None:\r\n ann_file = random.choice(ann_files) + \".json\"\r\n else:\r\n ann_file = ann_files[idx] + \".json\"\r\n with open(os.path.join(self.pair_ann_path, self.sets, ann_file)) as f:\r\n annotation = json.load(f)\r\n\r\n category = annotation[\"category\"]\r\n if cls is not None and not self.combine_classes:\r\n assert cls == category\r\n assert all(annotation[key] == value for key, value in self.difficulty_params.items())\r\n\r\n if mode == \"intersection\":\r\n assert len(annotation[\"src_kps\"]) == len(annotation[\"trg_kps\"])\r\n num_kps = len(annotation[\"src_kps\"])\r\n perm_mat_init = np.eye(num_kps)\r\n anno_list = []\r\n perm_list = []\r\n\r\n for st in (\"src\", \"trg\"):\r\n if shuffle:\r\n perm = np.random.permutation(np.arange(num_kps))\r\n else:\r\n perm = np.arange(num_kps)\r\n kps = annotation[f\"{st}_kps\"]\r\n img_path = os.path.join(self.image_path, category, annotation[f\"{st}_imname\"])\r\n img, kps = self.rescale_im_and_kps(img_path, kps)\r\n kps_permuted = [kps[i] for i in perm]\r\n anno_dict = dict(image=img, keypoints=kps_permuted)\r\n anno_list.append(anno_dict)\r\n perm_list.append(perm)\r\n\r\n perm_mat = perm_mat_init[perm_list[0]][:, perm_list[1]]\r\n else:\r\n raise NotImplementedError(f\"Unknown sampling strategy {mode}\")\r\n\r\n return anno_list, [perm_mat]", "title": "" }, { "docid": "7a125bb733f5637ad474ff5f6c411914", "score": "0.51945513", "text": "def evaluate_active_sampling(\n num_rounds: int,\n output_dir: str,\n dataloader: data.Dataloader,\n batch_size: int,\n num_subgroups: int,\n ) -> pd.DataFrame:\n round_idx = []\n subgroup_ids = []\n num_samples = []\n prob_representation = []\n for idx in range(num_rounds):\n ds = dataloader.train_ds\n bias_table = pd.read_csv(\n os.path.join(\n os.path.join(output_dir, f'round_{idx}'), 'bias_table.csv'))\n predictions_merge = merge_subgroup_labels(ds, bias_table, batch_size)\n for subgroup_id in range(num_subgroups):\n prob_i = (predictions_merge['subgroup_label']\n == subgroup_id).sum() / len(predictions_merge)\n round_idx.append(idx)\n subgroup_ids.append(subgroup_id)\n num_samples.append(len(predictions_merge))\n prob_representation.append(prob_i)\n return pd.DataFrame({\n 'num_samples': num_samples,\n 'prob_representation': prob_representation,\n 'round_idx': round_idx,\n 'subgroup_ids': subgroup_ids,\n })", "title": "" }, { "docid": "23346feeec6dc885032f0e8b95a260c1", "score": "0.5178676", "text": "def selection(population, selection_size):\n# if len(set([str(item[0]) for item in population])) >= selection_size:\n# parent1 = sorted(random.sample(population, k = selection_size), key = lambda individual:individual[1])[0]\n# while True:\n# parent2 = sorted(random.sample(population, k = selection_size), key = lambda individual:individual[1])[0]\n# if parent1 != parent2:\n# break\n# else:\n parent1 = sorted(random.sample(population, k = selection_size), key = lambda individual:individual[1])[0]\n parent2 = sorted(random.sample(population, k = selection_size), key = lambda individual:individual[1])[0]\n return parent1,parent2", "title": "" }, { "docid": "c7b26cf138f1f381cb68ae5af193b8fc", "score": "0.517866", "text": "def sample_subnet(self, subnet_idx, path_dropout_prob=0):\r\n input = tf.keras.Input(shape=(28, 28, 1))\r\n\r\n # Select one of convolution layers w.r.t. subnet_idx\r\n if subnet_idx == 0:\r\n hidden_1 = self.conv_3x3_1(input) * np.random.binomial(1, 1 - path_dropout_prob)\r\n hidden_2 = self.conv_5x5_1(input) * np.random.binomial(1, 1 - path_dropout_prob)\r\n elif subnet_idx in [1, 2]:\r\n hidden_1 = self.conv_3x3_1(input)\r\n hidden_2 = tf.zeros_like(hidden_1)\r\n elif subnet_idx in [3, 4]:\r\n hidden_2 = self.conv_5x5_1(input)\r\n hidden_1 = tf.zeros_like(hidden_2)\r\n else:\r\n raise ValueError(f'Valid `subnet_idx` is in {0, 1, 2, 3, 4}, provided: {subnet_idx}')\r\n\r\n hidden = L.Concatenate()([hidden_1, hidden_2])\r\n hidden = self.max_pool(hidden)\r\n\r\n # Select one of convolution layers w.r.t. subnet_idx\r\n if subnet_idx == 0:\r\n hidden_1 = self.conv_3x3_2(hidden) * np.random.binomial(1, 1 - path_dropout_prob)\r\n hidden_2 = self.conv_5x5_2(hidden) * np.random.binomial(1, 1 - path_dropout_prob)\r\n if subnet_idx in [1, 3]:\r\n hidden_1 = self.conv_3x3_2(hidden)\r\n hidden_2 = tf.zeros_like(hidden_1)\r\n elif subnet_idx in [2, 4]:\r\n hidden_2 = self.conv_5x5_2(hidden)\r\n hidden_1 = tf.zeros_like(hidden_2)\r\n\r\n hidden = L.Concatenate()([hidden_1, hidden_2])\r\n hidden = self.max_pool(hidden)\r\n hidden = L.Flatten()(hidden)\r\n hidden = self.dense_1(hidden)\r\n output = self.dense_output(hidden)\r\n model = tf.keras.Model(input, output, name=f'subnet_{subnet_idx}')\r\n return model", "title": "" }, { "docid": "11f49e62f8144aadebf3fa37bee8b0e9", "score": "0.5172999", "text": "def getRandomSubSet(x,batch_size=128):\n\n\tsampleStartIDX = np.random.randint(0,len(x)-batch_size)\n\treturn x[sampleStartIDX:(sampleStartIDX+batch_size),:]", "title": "" }, { "docid": "ccf8d162664405d2fc2392f043c67c00", "score": "0.51661235", "text": "def select_samples(popsize, candidate, number_samples):\n idxs = list(range(popsize))\n idxs.remove(candidate)\n return(np.random.choice(idxs, number_samples, replace = False))", "title": "" }, { "docid": "5d7d672b6f12b25230e4f0a95e999c74", "score": "0.5164169", "text": "def train(genres, training_data, n):\r\n submodels = []\r\n for genre_name in genres:\r\n model = (genre_name, 0, {}) #initialize submodels with empty models i.e. 0 bias and empty lex\r\n submodels.append(model)\r\n for i in range(n):\r\n # print(\"Epoch No.\", i)\r\n for genre, doc in training_data:\r\n guessed_genre = guess(submodels, doc)\r\n # print(\"in training: \", guessed_genre, genre)\r\n if guessed_genre != genre: #if guessed wrong\r\n for j, submodel in enumerate(submodels):\r\n genre_name = submodels[j][0]\r\n bias = submodels[j][1]\r\n lex = submodels[j][2]\r\n if genre_name == genre: #for the correct genre\r\n bias += 1 #bias\r\n for word in doc:\r\n if word not in lex.keys():\r\n lex[word] = 0 #initialize the word with polarity 0.\r\n lex[word] +=1\r\n # print(lex)\r\n elif genre_name != genre and guessed_genre == genre_name:\r\n bias -= 1\r\n for word in doc:\r\n if word not in lex.keys():\r\n lex[word] = 0 #initialize the word with polarity 0.\r\n lex[word] -=1\r\n submodels[j] = (genre_name, bias, lex) #update the submodel\r\n return submodels", "title": "" }, { "docid": "6b2ecb1ad14976bc21d7a4c1c92b38d2", "score": "0.5156912", "text": "def subsample_dataset(X, y, data_file, use_test_set, use_full_data_for_gp, n_best, n_rand):\n\n if use_test_set:\n X_train, y_train, X_test, y_test = split_dataset(X, y)\n else:\n X_train, y_train, X_test, y_test = X, y, None, None\n\n if len(y_train) < n_best + n_rand:\n n_best = int(n_best / (n_best + n_rand) * len(y_train))\n n_rand = int(n_rand / (n_best + n_rand) * len(y_train))\n\n if not use_full_data_for_gp:\n # pick n_best best points and n_rand random points\n best_idx = np.argsort(np.ravel(y_train))[:n_best]\n rand_idx = np.argsort(np.ravel(y_train))[np.random.choice(\n list(range(n_best, len(y_train))), n_rand, replace=False)]\n all_idx = np.concatenate([best_idx, rand_idx])\n X_train = X_train[all_idx, :]\n y_train = y_train[all_idx]\n\n X_mean, X_std = X_train.mean(), X_train.std()\n y_mean, y_std = y_train.mean(), y_train.std()\n save_data(X_train, y_train, X_test, y_test, X_mean, X_std, y_mean, y_std, data_file)\n\n return X_train, y_train, X_test, y_test, X_mean, y_mean, X_std, y_std", "title": "" }, { "docid": "0b1c3ba380cef69a85bda4447396104b", "score": "0.51537997", "text": "def over_sampling(training_data, training_label):\n training_data_resampled, training_label_resampled = SMOTE(ratio='minority', random_state=42, kind='svm', n_jobs=12)\\\n .fit_sample(training_data, training_label)\n return training_data_resampled, training_label_resampled", "title": "" }, { "docid": "e30bdc29a1cab0d5aa9c0b1fd895a015", "score": "0.5151108", "text": "def __init__(self, num_trees, depth_limit, example_subsample_rate, attr_subsample_rate):\n self.trees = []\n self.num_trees = num_trees\n self.depth_limit = 5#depth_limit\n self.example_subsample_rate = 1#example_subsample_rate\n self.attr_subsample_rate = 1#attr_subsample_rate", "title": "" }, { "docid": "1bc55b8a21233cebc7b45e407c2205e8", "score": "0.51423174", "text": "def subsample(\n lbl: npt.NDArray[np.int_], i: int, b: Tuple[slice, ...], ss_grid: Tuple[slice, ...]\n) -> npt.NDArray[np.int_]:\n subsampled: npt.NDArray[np.int_] = lbl[(i,) + b[1:]][ss_grid[1:3]]\n return subsampled", "title": "" }, { "docid": "98100620d0457990157b24a4b525a469", "score": "0.5140515", "text": "def select_random(x):\n\n def to_float(x):\n return tf.cast(x, tf.float32)\n\n def to_int(x):\n return tf.cast(x, tf.int64)\n\n batch_size = tf.shape(x)[0]\n rn = tf.range(batch_size)\n nnz = to_float(tf.count_nonzero(x >= 0, axis=1))\n rnd = tf.random_uniform([batch_size])\n ids = tf.stack([to_int(rn), to_int(nnz * rnd)], axis=1)\n return to_int(tf.gather_nd(x, ids))", "title": "" }, { "docid": "f7a54197c0929f6e9d931d21525fb8a9", "score": "0.51322675", "text": "def active_learn(self, num_examples_to_label, batch_size=5):\n labeled_so_far = 0\n while labeled_so_far < num_examples_to_label:\n example_ids_to_label = self.query_function(batch_size)\n # now remove the selected examples from the unlabeled sets and put them in the labeled sets.\n # if not ids are returned -- ie., if a void query_function is used --\n # it is assumed the query function took care of labeling the examples selected. \n if example_ids_to_label is not None:\n self.label_instances(example_ids_to_label)\n\n if self.rebuild_model_at_each_iter:\n self.rebuild_model() \n\n labeled_so_far += batch_size\n\n self.rebuild_model()", "title": "" }, { "docid": "fd5d2debe7ce0883de03ad1ccdfe70e0", "score": "0.5130995", "text": "def _sample_one(self, sess, seed=None):\n with tf.device(\"/cpu:0\"):\n gam = self._gam\n omega = self._omega\n theta = self._theta\n beta = self._beta\n\n user_weights = theta*gam\n item_weights = beta*omega\n\n i_mass_tots = tf.reduce_sum(item_weights,0) # total mass of each type in items\n u_mass_tots = tf.reduce_sum(user_weights,0)\n\n # K probability distributions over items / users\n i_logits = tf.expand_dims(tf.log(tf.transpose(item_weights)),0)\n u_logits = tf.expand_dims(tf.log(tf.transpose(user_weights)),0)\n\n # total number of edges of each type\n tot_edges_mean = u_mass_tots * i_mass_tots\n tot_edges = tf.cast(tf.random_poisson(tot_edges_mean,[1])[0], tf.int32)\n\n # assign edges to pairs\n item_assignments = [tf.multinomial(i_logits[:,k,:],tot_edges[k]) for k in range(self.K)]\n user_assignments = [tf.multinomial(u_logits[:,k,:],tot_edges[k]) for k in range(self.K)]\n\n edge_list = tf.concat([tf.squeeze(tf.stack([user_assignments[k], item_assignments[k]])) for k in range(self.K)], axis=1)\n\n # we now actually run this so we can use some functionality in np.sort that doesn't exist for tf.sort\n redundant_edge_list = sess.run(edge_list)\n # print(\"redundant edge list done\")\n\n uniques = np.unique(redundant_edge_list, return_counts=True, axis=1)\n # print(\"edge list done\")\n\n return np.vstack([uniques[0], np.expand_dims(uniques[1], 0)]).T", "title": "" }, { "docid": "ba8198efa5279366f4ec79f9443ae5ad", "score": "0.51206857", "text": "def test_choose_best_feature_to_split(self):\n my_data, labels = self.create_dataset()\n returned = trees.choose_best_feature_to_split(my_data)\n self.assertEqual(0, returned)", "title": "" }, { "docid": "0da762b69673ce20bd730ebc4c747d3c", "score": "0.5118132", "text": "def sample(self, num_anchors=300, num_neg_per_pos=10, run_batch_size=32):\n triplets = []\n print(\"Begin sampling operation\")\n anchor_indices = random.sample(range(0, len(self.anchor_path_list)), num_anchors)\n select_anchors = [self.anchor_path_list[j] for j in anchor_indices]\n torch.save(select_anchors, \"cache_select_anchors.pkl\")\n crop_d = {}\n catalog_d = {}\n if self.backbone_name[0] == 'r':\n loader_type = 'resnet'\n elif self.backbone_name[0] == 'i':\n loader_type = 'inception'\n\n with torch.no_grad():\n\n data_loader = torch.utils.data.DataLoader(util.FVBatchLoader(select_anchors,\n network_type=loader_type),\n batch_size=run_batch_size,\n shuffle=False,\n num_workers=0)\n print(\"Processing selected anchor images\")\n count = 0\n for id, batch in data_loader:\n batch = batch.cuda()\n embeds = self.model_query(batch)\n count += 1\n del batch\n self.model_query.zero_grad()\n if count % 10 == 0 or count == len(data_loader):\n print(str(count) + \"/\" + str(len(data_loader)))\n for i in range(0, len(id)):\n crop_d[id[i]] = embeds[i].cpu()\n\n data_loader = torch.utils.data.DataLoader(util.FVBatchLoader(glob.glob(self.path_to_catalog + \"/*.jpg\"),\n network_type=loader_type),\n batch_size=run_batch_size,\n shuffle=False,\n num_workers=0)\n print(\"Processing catalog images\")\n count = 0\n for id, batch in data_loader:\n batch = batch.cuda()\n embeds = self.model_cat(batch)\n count += 1\n del batch\n self.model_cat.zero_grad()\n if count % 10 == 0 or count == len(data_loader):\n print(str(count) + \"/\" + str(len(data_loader)))\n for i in range(0, len(id)):\n catalog_d[id[i]] = embeds[i].cpu()\n\n del embeds\n\n count = 0\n print(\"Calculating distances\")\n anc_to_cat_dist = {}\n for anchor in crop_d:\n anc_to_cat_dist[anchor] = []\n anc_embed = crop_d[anchor]\n count += 1\n if count % 50 == 0:\n print(count)\n for cat in catalog_d:\n cat_embed = catalog_d[cat]\n dist = torch.dist(anc_embed, cat_embed)\n anc_to_cat_dist[anchor].append((cat, dist.item()))\n\n for i in anc_to_cat_dist:\n anc_to_cat_dist[i] = sorted(anc_to_cat_dist[i], key=lambda cat_img: cat_img[1])[:50]\n\n print(\"Sampling triplets and preparing triplet file\")\n for anc in anc_to_cat_dist:\n q = anc\n\n p_s = []\n for pos in self.query_to_positive[anc]:\n p_s.append(pos)\n\n for p in p_s:\n previous_n = [] # Below comment / move above\n for j in range(num_neg_per_pos):\n n = anc_to_cat_dist[anc][j][0]\n if n not in p_s and n != q and n not in previous_n: # Consider altering the n's\n triplets.append((q, p, n))\n previous_n.append(n)\n\n with open(\"hardtrain.csv\", \"w\", newline='') as csvFile: # base_dir\n writer = csv.writer(csvFile)\n triplets = [[self.path_to_crops + \"/\" + x[0] + \".jpg\", self.path_to_catalog + \"/\" + x[1] + \".jpg\",\n self.path_to_catalog + \"/\" + x[2] + \".jpg\"] for x in triplets] # play\n print(\"Process Completed\\nNumber of triplets: \", len(triplets))\n writer.writerows(triplets)\n del triplets\n\n return", "title": "" }, { "docid": "fdc8bf5955128d32fb4bea7c0fe74bdd", "score": "0.51145333", "text": "def select_random_subset(self, input_list):\n import random\n\n random_inp_list = []\n if self.params.advanced.random_sample.number == 0:\n if len(input_list) <= 5:\n random_sample_number = len(input_list)\n elif len(input_list) <= 50:\n random_sample_number = 5\n else:\n random_sample_number = int(len(input_list) * 0.1)\n else:\n random_sample_number = self.params.advanced.random_sample.number\n\n cmd.Command.start(\"Selecting {} random images out of {} found\".format(random_sample_number, len(input_list)))\n for i in range(random_sample_number):\n random_number = random.randrange(0, len(input_list))\n if input_list[random_number] in random_inp_list:\n while input_list[random_number] in random_inp_list:\n random_number = random.randrange(0, len(input_list))\n random_inp_list.append(input_list[random_number])\n else:\n random_inp_list.append(input_list[random_number])\n cmd.Command.end(\"Selecting {} random images out of {} found -- DONE \".format(random_sample_number, len(input_list)))\n\n return random_inp_list", "title": "" }, { "docid": "222bb2818ba0b9f70b3d64b66c625746", "score": "0.51137435", "text": "def select(self):\n class1, class2 = np.random.randint(0, high=15, size=2)\n print(class1,class2)\n class2_num = int(np.random.randint(1, high=7, size=1))\n class1_num = int(self.new_num_nodes - class2_num)\n print(class1_num,class2_num)\n \n class1_selected = random.sample(list(range(class1*24, int(class1*24 + self.num_per_classes[class1]))), class1_num)\n class2_selected = random.sample(list(range(class2*24, int(class2*24 + self.num_per_classes[class2]))), class2_num)\n print(class1_selected)\n print(class2_selected)\n return class1_selected, class2_selected", "title": "" }, { "docid": "a462235d16a788a39b8bacfed01b264a", "score": "0.51087505", "text": "def select_data(training_set_size, testing_set_size, save_test_set=False):\n training_set = []\n training_labels = []\n testing_set = []\n testing_labels = []\n\n for digit in xrange(10):\n # load digit:\n images, labels = load_mnist(digits=[digit], path='.')\n\n # choose random digits to add to training and testing sets:\n if (training_set_size+testing_set_size)/10 <= len(images):\n combined_sample_size = (training_set_size+testing_set_size)/10\n testing_sample_size = testing_set_size/10\n else:\n combined_sample_size = len(images)\n testing_sample_size = testing_set_size/10 * combined_sample_size / ((training_set_size+testing_set_size)/10)\n training_indices = random.sample(range(len(images)), combined_sample_size)\n testing_indices = random.sample(training_indices, testing_sample_size)\n training_indices = [x for x in training_indices if x not in testing_indices]\n\n # add to training set:\n training_set.extend(images[i] for i in training_indices)\n training_labels.extend(labels[i] for i in training_indices)\n\n # add to testing set:\n testing_set.extend([images[i] for i in testing_indices])\n testing_labels.extend([labels[i] for i in testing_indices])\n\n if save_test_set:\n pickle.dump(testing_set, open('testing_set_2.p', 'w'))\n pickle.dump(testing_labels, open('testing_labels_2.p', 'w'))\n return training_set, training_labels, testing_set, testing_labels", "title": "" }, { "docid": "ca9f19c2d511ffb92e9a8f13f9804e9a", "score": "0.5104677", "text": "def _initialize_graph_randomly(self):\n N, k = self.graph.shape\n # Initialize graph randomly, removing self-loops\n self.graph = torch.randint(high=N - 1, size=[N, k], dtype=torch.long)\n row_indices = torch.arange(N).unsqueeze(1).repeat(1, k)\n self.graph[self.graph >= row_indices] += 1", "title": "" }, { "docid": "fbb7a5480eb46c653ef2c8fbc66894d3", "score": "0.509942", "text": "def totally_induced_edge_sampling(graph: Graph, p: float = 0.20) -> Graph:\n pass # pragma: no cover", "title": "" }, { "docid": "b5b11d3ff1562e5402a1f97e351d0207", "score": "0.5096833", "text": "def get_subsampling_indexes(\n min_set_length: int, max_set_length: int, permutation: Tensor, shuffle=True\n) -> Tuple[Tensor, Tensor]:\n\n length = torch.randint(min_set_length, max_set_length + 1, (1,))\n if min_set_length != max_set_length:\n indexes_reference = torch.randperm(max_set_length)[:length]\n if not shuffle:\n indexes_reference = indexes_reference.sort().values\n else:\n indexes_reference = torch.arange(max_set_length)\n\n # This identifies indexes in set_matching that correspond to indexes_reference.\n # By doing so, we ensure that in the 'permute' case, the correct elements are retained during random cropping.\n\n indexes_matching = torch.tensor(\n [index for index, value in enumerate(permutation) if value in indexes_reference]\n )\n\n return indexes_reference, indexes_matching, length", "title": "" }, { "docid": "5b347e8ed9188112496430c87ce104dc", "score": "0.5093512", "text": "def subsample_fn(\n self, client_data: tf.data.Dataset, subsampling_param: float\n ):", "title": "" }, { "docid": "fa474c7d5052f347c0e430963f369020", "score": "0.5083208", "text": "def _select_from_dset(self, dataset_name, weight_by_col):\n df = self._dataframes[dataset_name]\n dfs = [df[df[lbl] == 1].sample(n=LABEL_WEIGHTS[lbl], axis=0, replace=True, weights=weight_by_col) for lbl in LABELS]\n \n return pd.concat(dfs, axis=0).sample(frac=1)", "title": "" }, { "docid": "e88f3b72d82b61be7bf465f225b21954", "score": "0.5081533", "text": "def generate_random_labeling(k, size):\n if size < k:\n raise ValueError('To have a labeling size cannot be lower than high')\n while True:\n a = np.random.randint(0, k, size)\n if len(np.unique(a)) == k:\n break\n return a", "title": "" }, { "docid": "07124896351c44e478e705c54a41acc9", "score": "0.5076374", "text": "def _get_graph_set(data: SequenceSample, n_trees_per_sequence: int = 1) -> List[List[nx.Graph]]:\n if n_trees_per_sequence < 1:\n raise ValueError(\"Number of trees per sequence must >= 1.\")\n\n if n_trees_per_sequence > 1:\n raise NotImplementedError(\"Currently only one tree per sequence allowed.\")\n\n return [[get_random_spanning_tree(y_seq, random_state=tree) for y_seq in data]\n for tree in range(n_trees_per_sequence)]", "title": "" }, { "docid": "b1d3b9b4275c028ee579d9b52f350fbc", "score": "0.5063796", "text": "def make_group():\n random.shuffle(trainingSet)\n tree = trainingSet[0:treeSize]\n return tree", "title": "" }, { "docid": "d610fb182792a8feac9a421f2d38ee39", "score": "0.50616485", "text": "def train_and_test(self, split=.5, subsample=1, randomize=True, show=False):\n random.shuffle(self.data)\n s = int(len(self.data) * split)\n train_set = self.data[:s]\n self.data = self.data[s:] # Remove train set from further testing\n\n self.detector.set_max_features(40)\n self.train(train_set, show=show)\n self.color_info()\n\n self.detector.set_max_features(900)\n self.evaluate(show=show)\n self.feature_selection()", "title": "" }, { "docid": "9721b6eec6b35afbe9617e897c7a18b0", "score": "0.50601983", "text": "def selection_ranking(pop_fitness):\n n = len(pop_fitness)\n total = n * (n + 1) / 2\n p1 = sorted(pop_fitness, key=lambda x: x[1])\n p = [i[0] for i in p1]\n w = [(i + 1) / total for i in range(len(p1))]\n op = (random.choices(p, weights=w))[0]\n return op", "title": "" }, { "docid": "c1a21f802625653479e7c72da5004098", "score": "0.50578535", "text": "def random_start(self):\n sample = list(rn.choice(self.features, rn.randint(0, len(self.features))))\n # sample = list(rn.choice(self.features, 2))\n return sample", "title": "" }, { "docid": "59b5dd9fbd2e3c41709b173172c0eccb", "score": "0.505375", "text": "def present_k_regular_graphs() -> None:\n vertices = int(input('\\nNumber of vertices: '))\n k = int(input('Put k-parameter: '))\n randomizations = int(input(\"Put number of randomizations: \"))\n\n G = Graph()\n G.create_k_regular_with_n_vertices(k, vertices)\n randomize(G, randomizations)\n \n GraphPlotter.plot_graph(G)", "title": "" }, { "docid": "aa5134c2c7a40357a6f9aea91513286c", "score": "0.5053582", "text": "def generate_k_folds(dataset, k):\n # dataset[0] is numpy array and [1] is a list\n examples = dataset[0]\n classes = np.array(dataset[1]).reshape(len(dataset[1]),1)\n dataset = np.concatenate((examples, classes), axis = 1)\n np.random.shuffle(dataset) # this shuffles in place\n \n subset_size = int(examples.shape[0]/k)\n subsets = []\n \n for i in range(k):\n sub_start = i*subset_size\n sub_end = sub_start + subset_size\n \n cur_dataset = np.concatenate((dataset[:sub_start, :], dataset[sub_end:, :]), axis = 0)\n train_examples = cur_dataset[:,:-1]\n train_classes = cur_dataset[:,-1]\n test_examples = dataset[sub_start:sub_end,:-1]\n test_classes = dataset[sub_start:sub_end,-1]\n \n subsets.append([(train_examples, train_classes),(test_examples, test_classes)])\n return subsets\n raise NotImplemented()", "title": "" }, { "docid": "0fcaa88673c3b5c9c4a15e51eabdd612", "score": "0.5047706", "text": "def tournament_selection(population, mating_pool_size, tournament_size):\n selected = []\n current_parent = 0\n while (current_parent < mating_pool_size):\n # Tournament is a list of indices\n tournament = []\n while (len(tournament) < tournament_size):\n competitor = randint(0, len(population) - 1)\n tournament.append(competitor)\n winner = tournament[0]\n for i in tournament:\n if (population[i].get_fitness() < population[winner].get_fitness()):\n winner = i\n # Mating pool is a list of route objects\n selected.append(population[winner])\n current_parent = current_parent + 1\n return selected", "title": "" }, { "docid": "71ba7c4620927f60bab6774156f6a5f0", "score": "0.5037383", "text": "def random_feature_subsets(array, batch_size, random_state):\n random_state = check_random_state(random_state)\n features = list(range(array.shape[1]))\n random_state.shuffle(features)\n for batch in gen_batches(len(features), batch_size):\n yield features[batch]", "title": "" }, { "docid": "c8434722b3eb025a7583552202072cc4", "score": "0.50371903", "text": "def get_dpa_graph(number_total_nodes, number_init_nodes):\r\n graph_pool = dpat(number_init_nodes)\r\n output_graph = pj1.make_complete_graph(number_init_nodes)\r\n for new_node in range(number_init_nodes, number_total_nodes):\r\n neighbor_nodes = graph_pool.run_trial(number_init_nodes)\r\n output_graph[new_node] = neighbor_nodes\r\n \r\n return output_graph", "title": "" }, { "docid": "11a82c10e189d045eac14446d93a26ec", "score": "0.5035132", "text": "def selectSamples(self, data):\n train_x, train_y, valid_x, valid_y = [], [], [], []\n for category in data:\n filenames, index = data[category][\"filenames\"], data[category][\"index\"]\n np.random.shuffle(filenames)\n nb_valid = int(len(filenames) * self.params[\"valid_split\"])\n train, valid = filenames[nb_valid:], filenames[:nb_valid]\n\n train_x.extend(train[:self.params[\"nb_per_class\"]])\n train_y.extend([index] * self.params[\"nb_per_class\"])\n valid_x.extend(valid)\n valid_y.extend([index] * nb_valid)\n\n train_x, train_y = shuffle(train_x, train_y)\n valid_x, valid_y = shuffle(valid_x, valid_y)\n\n train_y = np_utils.to_categorical(train_y, self.nb_class)\n valid_y = np_utils.to_categorical(valid_y, self.nb_class)\n\n return train_x, train_y, valid_x, valid_y", "title": "" }, { "docid": "666974ed0f701a2b5173c9015d958f57", "score": "0.5033903", "text": "def tournament_select(fitness, ts_parameter, ts_size):\n pop_size = len(fitness)\n participants = []\n for _ in range(ts_size):\n i = np.random.choice(pop_size)\n participants.append(i)\n\n indices = np.argsort(participants)\n\n i_selected = -1\n i_count = 0\n while i_selected==-1:\n \n r = np.random.random()\n if r < ts_parameter or i_count == len(participants)-1:\n i_selected = indices[i_count];\n return i_selected\n i_count +=1\n return i_selected", "title": "" }, { "docid": "75a571745ca9521c3cbdf0cab137d706", "score": "0.50324434", "text": "def chooseRandActivity():\n \n randn = random.random();\n n = np.size(ACT_LIST);\n idx = np.int(randn * n); #????????\n return ACT_LIST[idx];", "title": "" }, { "docid": "3dded3f6bca116efbe89bbbb3558cdd7", "score": "0.5029614", "text": "def sub_sample(data_set_x, data_set_y, subsampling):\n\n len_data = len(data_set_x)\n reshuf_index_data = np.random.permutation(len_data)\n new_len_data = int(len_data / subsampling)\n\n data_set_x = data_set_x[reshuf_index_data[:new_len_data]]\n data_set_y = data_set_y[reshuf_index_data[:new_len_data]]\n\n return data_set_x, data_set_y", "title": "" }, { "docid": "857ff641b7fbd49deba686b8d7207c1e", "score": "0.5028643", "text": "def _resample(\n dataset_size: int,\n num_train: int,\n num_samples: int,\n) -> List[Tuple[List[int], List[int]]]:\n samples = []\n for _ in range(num_samples):\n train = np.random.choice(\n range(dataset_size), size=num_train, replace=True)\n test = utils.shuffle([k for k in range(dataset_size) if k not in set(train)\n ])\n samples.append((train, test))\n return samples", "title": "" }, { "docid": "b152b6cba43e1c30d57c7b9ef4f7b5a4", "score": "0.5026812", "text": "def subset_train(self,n_users=10,n_items=10):\n sess_df=self.data.reset_index()\n items_sample=np.random.choice(a=sess_df['ItemID'],size=n_items,replace=False)\n new_train=sess_df[sess_df['ItemID'].isin(items_sample)].copy()\n\n users_sample = np.random.choice(a=new_train['UserID'], size=n_users, replace=False)\n new_train=new_train[new_train['UserID'].isin(users_sample)].copy()\n\n u = new_train['UserID'].unique()\n new_user_idx=dict(zip(u,np.arange(len(u))))\n new_train['UserID']=new_train['UserID'].map(new_user_idx)\n self.users=np.arange(len(u))\n\n i=new_train['ItemID'].unique()\n new_item_idx = dict(zip(i, np.arange(len(i))))\n new_train['ItemID']=new_train['ItemID'].map(new_item_idx)\n self.items=np.arange(len(i))\n\n self.data=new_train.set_index('UserID')\n return new_train", "title": "" } ]
855c0dc61c20b3eea86003e65a55f2b9
This function returns a list of document ids that are in the indicated project(s) (and of all children projects if cascade specified)
[ { "docid": "177f5f28c9e2b91a10429b2f13e6d247", "score": "0.7152817", "text": "def get_projs_docs(self, proj_ids, cascade = False):\n # Some initial checks/tweaks\n if not isinstance(proj_ids, list):\n proj_ids = [proj_ids]\n\n # Add any children projects if specified\n if cascade:\n all_proj_ids = []\n for proj_id in proj_ids:\n all_proj_ids = all_proj_ids + [proj_id] + self.get_proj_children(proj_id, include_x_children=99)\n proj_ids = list(set(all_proj_ids))\n\n # Return list of all documents associated with any of these projects\n all_docs = self.get_table(\"Doc_Proj\")\n proj_docs = all_docs[all_docs.proj_id.isin(proj_ids)].doc_id.values.tolist()\n proj_docs = list(set(proj_docs)) # Removing duplicates\n return proj_docs", "title": "" } ]
[ { "docid": "a1ffa0e9bd95603c45f2bfd83bfa0f87", "score": "0.68752444", "text": "def get_projects_ids():\n\n projects = load_data('projects.json')\n\n project_ids = []\n for project in projects:\n project_ids.append(project['id'])\n\n return project_ids", "title": "" }, { "docid": "a836fd15e2279ecdefeb639c22988e4a", "score": "0.67752653", "text": "def get_bound_project_ids(cls, context, obj_id):", "title": "" }, { "docid": "83db5e3dc78f2119a996d1f3eb2a5149", "score": "0.67400974", "text": "def fetch_projects(client):\n return set([p.id for p in client.all_projects(fields='id')])", "title": "" }, { "docid": "eb97e6bce75cee250aeec5a1bef8c252", "score": "0.66743606", "text": "def projects():\n\n # Domain is mapped from the certificate DN\n domain = pecan.request.domain_id\n identity = Clients.identity()\n\n # Get a domain filtered list of projects and return the IDs\n # Todo: needs extending to support nested domains\n projects = identity.projects.list(domain=domain)\n return [x.id for x in projects]", "title": "" }, { "docid": "3d0af275d2b2788821fcbe58d4221f36", "score": "0.6285495", "text": "def get_projects_from_postgres():\n\n pg_db = auth.postgresDB()\n sql_query = \"\"\"\n SELECT project_id from projects;\n \"\"\"\n raw_ids = pg_db.retr_query(sql_query, None)\n project_ids = [i[0] for i in raw_ids]\n\n del pg_db\n return project_ids", "title": "" }, { "docid": "d73b83055d6069c9c5ebdcd192d7cb51", "score": "0.62634045", "text": "def get_project_list(self):\n project_list = _DB_MAP[self.section]['project_sql'] \n return self.get_result(project_list)", "title": "" }, { "docid": "a8e67251281520f529735ebae7b78f85", "score": "0.61939263", "text": "def keystone_listprojects(self, msg, args):\n output = {}\n for proj in self.tenantlist:\n output[proj.name] = proj.id\n\n return output", "title": "" }, { "docid": "8a9a8bd20db6f3c801458eb98a23385a", "score": "0.6134982", "text": "def projects(cls):\n sql = \"SELECT project_id FROM barcodes.project ORDER BY project\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql)\n return [cls(p) for p in pm.sql.TRN.execute_fetchflatten()]", "title": "" }, { "docid": "12e3ce3cccaa0679714c0dcf69f5ee97", "score": "0.6112107", "text": "def get_projects_and_ids(session, url):\n\n read_url = json.loads(session.get(url+\"projects/\").content.decode('utf-8'))\n raw_projects = read_url['_embedded']['elements']\n parsed_projects = {elem['name']:elem['id'] for elem in raw_projects}\n\n return parsed_projects", "title": "" }, { "docid": "3c18a80b3cacae224b78cfe882020dda", "score": "0.60692126", "text": "def get_projects(self): # pylint: disable=no-self-use\n return [elem[0] for elem in rstweb_sql.get_all_projects()]", "title": "" }, { "docid": "bc6f38a8290d94533dfad8c39fcbce82", "score": "0.60142434", "text": "def seek_sparql_projects():\n projects = {}\n g = open_sparql_store()\n p_sparql_query = (\n \"prefix jerm: <http://jermontology.org/ontology/JERMOntology#>\" +\n \"select distinct ?projectid ?projects where {\" +\n \"?projectid jerm:title ?projects;\" +\n \"rdf:type jerm:Project\" +\n \"}\"\n )\n for row in g.query(p_sparql_query):\n projects[row[0].split(\"/\")[-1]] = row[1]\n return projects", "title": "" }, { "docid": "59282f946ca01af1ed4a5392d46fa348", "score": "0.59835714", "text": "def getProjectList(user=None):\n return list(set(AbstractCollection(user=user).getProjectList()))", "title": "" }, { "docid": "5abef5141f864cb67f3556b9712b5aff", "score": "0.5979772", "text": "def repos_ids(self) -> List[Tuple[str, str]]:\n projs = self.projects()\n return [(el[\"name\"], el[\"id\"]) for el in projs]", "title": "" }, { "docid": "69c82833a8da2f1892ad24395c856c95", "score": "0.5966507", "text": "def GetAllIssueIDsInProject(self, project_name, demetrius_persist):\n return self.project_indexes.get(project_name, {}).keys()\n #if(project_issues is None):\n # return []\n #else:\n # return project_issues.keys()", "title": "" }, { "docid": "67880df5de2867640aefaac7f1a0c64e", "score": "0.59542394", "text": "def subprojects(self, project):\n return self.config[project].get('subprojects', [])", "title": "" }, { "docid": "f8905bc2f72617579b45af9f49aa7605", "score": "0.59174716", "text": "def list_projects(self) -> List[PublicId]:\n return list(self._projects.keys())", "title": "" }, { "docid": "17fc957fdf8d9888de1860251e42b824", "score": "0.5882667", "text": "def get_project_ids_from_scratch(user):\n url = 'http://scratch.mit.edu/users/%s/projects/' % user\n r = requests.get(url)\n soup = BeautifulSoup(r.text)\n ids = []\n\n for link in soup.find_all('a'):\n link = link.get('href')\n if link:\n if 'projects' in link and 'editor' not in link:\n link = link.split(\"/\")\n ids.append(link[2])\n \n ids = set(ids)\n return ids", "title": "" }, { "docid": "f94dfd9e3f5cccb93d0f3b60112fcf8a", "score": "0.5841881", "text": "def get_project(self, project_name): # pylint: disable=no-self-use\n return {'documents': get_all_docs('local', project_name)}", "title": "" }, { "docid": "bba79f123a05edd6595d1f9401284574", "score": "0.57602257", "text": "def retrieve_naver_projects(projects_list: list):\n retrieved_projects = Project.objects.filter(id__in=projects_list)\n return retrieved_projects", "title": "" }, { "docid": "c043ef8b0fec396f934b939da23da0c6", "score": "0.57406825", "text": "def search_project(self):\n unique_project_list = []\n self.__auto_login(USER_NAME, PASSWORD)\n info_print('[@_@] Searching projects hard...')\n\n # Get unique project list of first page searched results\n total_progress = SCAN_DEEP[SEARCH_LEVEL - 1]\n query_string = self.keyword + \" -language:\" + \" -language:\".join(LANG_BLACKLIST)\n for i in xrange(total_progress):\n # Print process of searching project\n progress_point = int((i + 1) * (100 / total_progress))\n sys.stdout.write(str(progress_point) + '%|' + '#' * progress_point + '|\\r')\n sys.stdout.flush()\n # Search project in each page\n code_url = self.search_url.format(page=1, keyword=quote(query_string))\n page_html_parse = self.__get_page_html(code_url)\n project_list = self.__page_project_list(page_html_parse) # Project list of per result page\n page_project_num, project_list = len(project_list), list(set(project_list))\n unique_project_list.extend(project_list) # Extend unique project list of per page\n if page_project_num < MAX_RLT_PER_PAGE:\n break\n project = \" -repo:\" + \" -repo:\".join(project_list)\n query_string += project\n # Deal with last progress bar stdout\n sys.stdout.write('100%|' + '#' * 100 + '|\\r')\n sys.stdout.flush()\n return unique_project_list", "title": "" }, { "docid": "024b136bbcbc6a290ff570e8ddd960f0", "score": "0.5727458", "text": "def get_all_docs(user, project):\n return [elem[0] for elem in sql(\n \"SELECT doc FROM docs WHERE user=? AND project=?\", (user, project))]", "title": "" }, { "docid": "735200754294b85ca6757cec5fc2d764", "score": "0.57094187", "text": "def get_documents(self, project_name=None): # pylint: disable=no-self-use\n if project_name is None:\n all_documents = sql(\n \"SELECT doc, project FROM docs WHERE user=?\", ('local',))\n docs_dict = defaultdict(list)\n for file_name, project in all_documents:\n docs_dict[project].append(file_name)\n return {'documents': docs_dict}\n return get_all_docs('local', project_name)", "title": "" }, { "docid": "6d86d766264ed0f05815d82600687546", "score": "0.56724894", "text": "def documentids(self):\n return list(self._documents)", "title": "" }, { "docid": "e4252734b769dca45205f35764de91fc", "score": "0.5672013", "text": "def sections_of_project(self, project_id):\r\n return [x for x in self.active_sections() if x[\"project_id\"] == project_id]", "title": "" }, { "docid": "b4eaeb5ad41df08763cfabdd6ed88723", "score": "0.5664445", "text": "def doc_id_list(query_id, qrels):\n true_doc_IDs = []\n for query_dict in qrels:\n if int(query_dict['query_num']) == query_id:\n true_doc_IDs.append(int(query_dict['id']))\n return true_doc_IDs", "title": "" }, { "docid": "57ff9cdd71c28292a93a09476003223f", "score": "0.56367975", "text": "def _get_jira_project_keys():\n res = _make_jira_api_call('/project') or []\n return [proj['key'] for proj in res]", "title": "" }, { "docid": "830f31b9a405718f2d098a005a659a61", "score": "0.5624171", "text": "def get_pro_list(data):\n project_list = []\n for pro in data[1:]:\n for key in pro.keys():\n if 'proj_' in key:\n project_list.append(key)\n\n return project_list", "title": "" }, { "docid": "226fda0fae82620c8cf60edd487cd84d", "score": "0.55619574", "text": "def projects(self):\n path = '/project/list'\n return self._request(path)", "title": "" }, { "docid": "ebb17befb1a7b92ca5dfca3af3c55ceb", "score": "0.5549297", "text": "def list_project_keys(self):\n return [x[\"projectKey\"] for x in self._perform_json(\"GET\", \"/projects/\")]", "title": "" }, { "docid": "5e7dcad0345707615bfd6ee4282eac07", "score": "0.55268097", "text": "def fetch_students(project=None):\n projection = {\"_id\": 0}\n if project:\n if isinstance(project, list):\n for attr in project:\n projection[attr] = 1\n else:\n projection[project] = 1\n\n cur = db[\"students\"].find({}, projection=projection)\n\n # Check if collection is empty\n if cur.count() == 0:\n return None\n\n return list(cur)", "title": "" }, { "docid": "7d603c29aa3bc9d961162fb9bf2757e8", "score": "0.55242", "text": "def getProjects(self):", "title": "" }, { "docid": "b708909982e663819bba658da2e7b4f6", "score": "0.551904", "text": "def projects():", "title": "" }, { "docid": "379f1f1260c8e0a3c15a4e4390b869c6", "score": "0.5498993", "text": "def get_document_list(self, project_id: int):\n response = self.get(f\"{self._url}/api/projects/{project_id}/docs\")\n if response.status_code != 200:\n raise Exception(\"get_document_list: Fail to fetch document list.\")\n return response", "title": "" }, { "docid": "875c611bcfb997f94089469d047e8a3e", "score": "0.5476747", "text": "def projects(self) -> List[Tuple[str, str]]:\n wspaces = self.workspaces()\n loop = asyncio.get_event_loop()\n tasks = [self.aget(f\"{ws}/projects\", headers=self.headers)\n for ws in wspaces]\n projs = loop.run_until_complete(asyncio.gather(*tasks))\n\n return [(el.get(\"name\"), el.get(\"url\"))\n for proj in projs for el in proj.get(\"projects\")]", "title": "" }, { "docid": "e9f4d806ca043a1941545c4d904781d2", "score": "0.54708374", "text": "def executeNestedQuery(self, idMap):\n result_map = {}\n for key in idMap:\n query = {\n \"ids\": idMap[key]\n }\n resp = self.es.mget(index=self.project_name, body=query, doc_type=self.project_root_name)\n if resp is not None and len(resp['docs']) > 0:\n for json_doc in resp['docs']:\n result_map[json_doc['_source']['doc_id']] = json_doc\n return result_map", "title": "" }, { "docid": "68e6e4f7946a4ea06cf9b4352475e557", "score": "0.5454167", "text": "def projects(request):\n return {\n \"projects\": Project.objects.filter(\n Q(members=request.user) | Q(creator=request.user)\n ).distinct()\n if request.user.is_authenticated\n else []\n }", "title": "" }, { "docid": "2a9c62b29d75c24ebb896882222616f4", "score": "0.5428075", "text": "def get_projects(self):\n return self.projects", "title": "" }, { "docid": "517f44bf7c1ac26b2b80239fe440fc09", "score": "0.5427207", "text": "def projects(self) -> List[Tuple[str, str, int]]:\n response = requests.get(f\"{self._url}/user/repos\",\n headers=self.headers)\n repos = response.json()\n actives = filter(lambda d: d[\"active\"] is True, repos)\n # projs = [(el[\"name\"], el[\"namespace\"], el[\"counter\"])\n projs = [(el[\"name\"], el[\"slug\"], el[\"counter\"])\n for el in actives]\n return projs", "title": "" }, { "docid": "88b8e41deb43d47202e8f91945408e6c", "score": "0.5423634", "text": "def get_shared_projects(self, **kwargs):\n endpoint = '/projects/shared'\n return self._api_call('get', endpoint, kwargs)", "title": "" }, { "docid": "98d93fccb1f472c587bcf5e40a8228b3", "score": "0.54174846", "text": "def projects(self) -> List[Dict[str, Any]]:\n q = requests.get(f\"{self._url}/users/{self.username}/projects\",\n headers=self.headers)\n return q.json()", "title": "" }, { "docid": "bdd0b355d57a448b956c50a07403d59c", "score": "0.5416086", "text": "def GetAllProjectsOfIssues(issues):\n project_ids = set()\n for issue in issues:\n project_ids.add(issue.project_id)\n return project_ids", "title": "" }, { "docid": "13c316d33bf7d0490c743af750926a4d", "score": "0.5402193", "text": "def get_projects(config):\n projects = {}\n if \"project\" not in config:\n return projects\n for project in config[\"project\"]:\n if not all(key in project for key in FIELDS):\n raise KeyError(\n f\"A key from {sorted(FIELDS)!r} is missing from {sorted(project.keys())!r}\"\n )\n projects[project[\"name\"]] = project\n return projects", "title": "" }, { "docid": "8a01be36d54c6d95d673bb4b02fa6df6", "score": "0.54010904", "text": "def projects(self) -> tuple:\n if self._projects is None:\n tags = self.data['documentation'].get('projects', [])\n self._projects = tuple(Project.from_tag(tag) for tag in tags)\n return self._projects", "title": "" }, { "docid": "d6fc596196659bf26c2b0fbc1de37223", "score": "0.53795177", "text": "def listprojects():\n print '\\n'.join(filter_tags('PROJECT', todo.DefaultTodoList().get_all_tags() ))", "title": "" }, { "docid": "519e0a3735a32541e3dd5d2be8741448", "score": "0.5377016", "text": "def get_task_list(data, proj):\n task_list = []\n for project in data[1:]:\n if proj in project.keys():\n task_list = project[proj]\n return task_list", "title": "" }, { "docid": "f29842ea70ad36674afeeb2302e5cb10", "score": "0.5372654", "text": "def getStorageProjects(self) -> List[str]:\n\n # get the set of all storage Synapse project accessible for this pipeline\n storageProjects = self.storageFileviewTable[\"projectId\"].unique()\n\n # get the set of storage Synapse project accessible for this user\n\n # get current user name and user ID\n currentUser = self.syn.getUserProfile()\n currentUserName = currentUser.userName\n currentUserId = currentUser.ownerId\n\n # get a list of projects from Synapse\n currentUserProjects = self.getPaginatedRestResults(currentUserId)\n\n # prune results json filtering project id\n currentUserProjects = [\n currentUserProject.get(\"id\")\n for currentUserProject in currentUserProjects[\"results\"]\n ]\n\n # find set of user projects that are also in this pipeline's storage projects set\n storageProjects = list(set(storageProjects) & set(currentUserProjects))\n\n # prepare a return list of project IDs and names\n projects = []\n for projectId in storageProjects:\n projectName = self.syn.get(projectId, downloadFile=False).name\n projects.append((projectId, projectName))\n\n sorted_projects_list = sorted(projects, key=lambda tup: tup[0])\n\n return sorted_projects_list", "title": "" }, { "docid": "8f0414ce67fb825656e2041b139e78c6", "score": "0.53557044", "text": "def get_ids(self):\n for object_query in self.query:\n object_query[\"ids\"] = self._get_object_ids(object_query)\n return self.query", "title": "" }, { "docid": "507193271f28b5db26f05c446851aaa8", "score": "0.5350602", "text": "def get_list_of_projects(self):\n config = self.get_config()\n projects = []\n projects_dir = self.get_full_path(config[\"projects-base-dir\"])\n if os.path.isdir(projects_dir):\n terminal = self.get_terminal()\n terminal.output(\"Scan the projects directory: \" + projects_dir, terminal.VERBOSITY_DEBUG)\n for project in os.listdir(projects_dir):\n if os.path.isdir(projects_dir + \"/\" + project):\n terminal.output(\" - \" + project, terminal.VERBOSITY_DEBUG)\n projects.append(project)\n\n return projects", "title": "" }, { "docid": "39f47e6c002ba8a6bbd1414b01e15397", "score": "0.53499323", "text": "def deleted_projects(self):\r\n return [x for x in self.state[\"projects\"] if x[\"is_deleted\"]]", "title": "" }, { "docid": "a0a836332688d5dc924f053538108ef6", "score": "0.5344564", "text": "def get_projects_with_firewall_groups(self, context, **kwargs):\n ctx = neutron_context.get_admin_context()\n fwg_list = self.firewall_db.get_firewall_groups(ctx)\n fwg_project_list = list(set(fwg['tenant_id'] for fwg in fwg_list))\n return fwg_project_list", "title": "" }, { "docid": "91f9ae4dd8f6102d147986a1e422a8d6", "score": "0.534051", "text": "def getIds (repo, collection=None, xmlFormat=None):\n\tresults = IDSearcher(collection, xmlFormat, repo.baseUrl)\n\tids = map (lambda x:x.recId, results)\n\tids.sort()\n\treturn ids", "title": "" }, { "docid": "fbb94f9a9644b9bbb5c1505fe6dfc5ff", "score": "0.53299975", "text": "def get_confirmed_project_ids_for_cohorts(cohort_id_array):\n cohort_vals = ()\n cohort_params = \"\"\n for cohort in cohort_id_array:\n cohort_params += \"%s,\"\n cohort_vals += (cohort,)\n cohort_params = cohort_params[:-1]\n db = get_sql_connection()\n cursor = db.cursor()\n\n tcga_studies = fetch_isbcgc_project_set()\n\n cursor.execute(\"SELECT DISTINCT project_id FROM cohorts_samples WHERE cohort_id IN (\" + cohort_params + \");\",\n cohort_vals)\n\n # Only samples whose source studies are TCGA studies, or extended from them, should be used\n confirmed_study_ids = []\n unconfirmed_study_ids = []\n\n for row in cursor.fetchall():\n if row[0] in tcga_studies:\n if row[0] not in confirmed_study_ids:\n confirmed_study_ids.append(row[0])\n elif row[0] not in unconfirmed_study_ids:\n unconfirmed_study_ids.append(row[0])\n\n if len(unconfirmed_study_ids) > 0:\n projects = Project.objects.filter(id__in=unconfirmed_study_ids)\n\n for project in projects:\n if project.get_my_root_and_depth()['root'] in tcga_studies:\n confirmed_study_ids.append(project.id)\n return confirmed_study_ids", "title": "" }, { "docid": "29ce8595c8835c21ea06ac34604029a1", "score": "0.5316267", "text": "def get_projects(request):\n filters = {\n 'owner': request.user\n }\n exclusions = {}\n parent_project = None\n\n libraries_for_project = int(request.GET['libraries']) if 'libraries' in request.GET else None\n if libraries_for_project:\n filters['project_type'] = 'package'\n parent_project = get_object_or_404(Project, pk=libraries_for_project, owner=request.user)\n parent_project_dependencies = parent_project.project_dependencies.all()\n exclusions['pk'] = libraries_for_project\n\n projects = Project.objects.filter(**filters).exclude(**exclusions)\n\n def process_project(project):\n data = {\n 'name': project.name,\n 'package_name': project.npm_name,\n 'id': project.id,\n 'app_version_label': project.app_version_label,\n 'latest_successful_build': None\n }\n try:\n data['latest_successful_build'] = str(BuildResult.objects.filter(project=project, state=BuildResult.STATE_SUCCEEDED).latest('id').finished)\n except BuildResult.DoesNotExist:\n pass\n if parent_project:\n data['depended_on'] = project in parent_project_dependencies\n return data\n\n return {\n 'projects': [process_project(project) for project in projects]\n }", "title": "" }, { "docid": "ec2f1a1f7963e0d1c9fbff64f52033a3", "score": "0.5314048", "text": "def get_all_projects_of_user():\n json_request = request.get_json()\n print json_request\n data = json_request[\"data\"]\n user_id = data[\"user_id\"]\n message = {\"projects\": pc.get_all_projects_of_user(user_id)}\n resp = binary_response_builder(success, message)\n return resp", "title": "" }, { "docid": "f2d2709bafc00accc61c615f39b5808e", "score": "0.5310159", "text": "def id_documents(self):\n return self.__id_documents", "title": "" }, { "docid": "c9f9a499a58e6b81f01fada9eac53045", "score": "0.5306853", "text": "def get_queryset(self):\n office = self.request.user.profile.office\n groups = []\n for group in self.request.user.groups.all():\n groups.append(str(group))\n return Project.objects.filter(office=office).filter(completed__exact=False)", "title": "" }, { "docid": "89ee9df88acc46d61457655ce0f1a6f6", "score": "0.5297995", "text": "def get_projects(event, context):\n\n cla.log.debug('event: {}'.format(event))\n\n try:\n auth_user = cla.auth.authenticate_user(event.get('headers'))\n except cla.auth.AuthError as e:\n cla.log.error('Authorization error: {}'.format(e))\n return format_json_cors_response(401, 'Error parsing Bearer token')\n except Exception as e:\n cla.log.error('Unknown authorization error: {}'.format(e))\n return format_json_cors_response(401, 'Error parsing Bearer token')\n\n # Get project access list for user\n user_permissions = UserPermissions()\n try:\n user_permissions.load(auth_user.username)\n except Exception as e:\n cla.log.error('Error invalid username: {}. error: {}'.format(auth_user.username, e))\n return format_json_cors_response(400, 'Error invalid username')\n\n user_permissions = user_permissions.to_dict()\n\n authorized_projects = user_permissions.get('projects')\n if authorized_projects is None:\n cla.log.error('Error user not authorized to access projects: {}'.format(user_permissions))\n return format_json_cors_response(403, 'Error user not authorized to access projects')\n\n project_list = ', '.join('\\'' + project_id + '\\'' for project_id in authorized_projects)\n\n oauth_response = get_sf_oauth_access()\n if oauth_response is None:\n cla.log.error('Unable to acquire oauth token.')\n return format_json_cors_response(400, 'authentication error')\n\n token = oauth_response['access_token']\n instance_url = oauth_response['instance_url']\n\n headers = {\n 'Authorization': 'Bearer {}'.format(token),\n 'Content-Type': 'application/json',\n }\n\n query_url = '{}/services/data/v20.0/query/'.format(instance_url)\n query = {'q': 'SELECT id, Name, Description__c from Project__c WHERE id IN ({})'.format(project_list)}\n r = requests.get(query_url, headers=headers, params=query)\n\n response = r.json()\n status_code = r.status_code\n if status_code != HTTPStatus.OK:\n cla.log.error('Error retrieving projects: %s', response[0].get('message'))\n return format_json_cors_response(status_code, 'Error retrieving projects')\n records = response.get('records')\n\n projects = []\n for project in records:\n logo_url = None\n project_id = project.get('Id')\n if project_id:\n logo_url = '{}/{}.png'.format(cla_logo_url, project_id)\n\n projects.append({\n 'name': project.get('Name'),\n 'id': project_id,\n 'description': project.get('Description__c'),\n 'logoUrl': logo_url\n })\n\n return format_json_cors_response(status_code, projects)", "title": "" }, { "docid": "55bda47175e6520294ada14c8dbc4e96", "score": "0.52893174", "text": "def projects(self):\n return self.config.keys()", "title": "" }, { "docid": "b3cfbded36e68ae7b5df28b85d46adb8", "score": "0.5287872", "text": "def test_getPageProjectsFromId(self):\n return\n pageProjectsFromId = CdbDictPageProjectsFromId(\"cdb/pageProjectsFromId.cdb\")\n for i in self.info.pageFromId:\n expected = self.info.pageFromId[i]\n result = pageProjectsFromId[i]\n #result = self.reader.getPageProjectsFromId(i)\n print \"projects:\", result\n self.assertEqual(result, expected)\n #self.assertTrue(False)", "title": "" }, { "docid": "6589a98308dd57194274dab8f1c169b0", "score": "0.528758", "text": "def __call__(self, concept_ids):\n df = self.concept_ancestor[self.concept_ancestor['ancestor_concept_id'].isin(concept_ids)]\n return df.descendant_concept_id.unique().compute().tolist()", "title": "" }, { "docid": "135717fdffcaff54e00832393976f561", "score": "0.5262662", "text": "def items_by_project(self, project_id: int) -> [Item]:\r\n return [x for x in self.active_items() if x.project_id == project_id]", "title": "" }, { "docid": "d4ba0cd404a047e4bace0835b74eef56", "score": "0.5261947", "text": "def find_all(self, params={}, **options):\n return self.client.get_collection(\"/projects\", params, **options)", "title": "" }, { "docid": "f112dea1caa6b17d556bbaa6d5f35fc9", "score": "0.52602965", "text": "def retrieve_all_projects(query_params_filters: dict, user_id: int):\n retrieved_projects_list = Project.objects.filter(created_by_user=user_id, **query_params_filters)\n\n return retrieved_projects_list", "title": "" }, { "docid": "a39dc894eec68d00946b7db84ed34304", "score": "0.5259005", "text": "def getProjects(cursor):\n\twp_list = []\n\tcursor.execute('''select cat_title, proj_page from th_wikiproject_categories WHERE cat_subcats > 0 AND cat_pageid IS NOT NULL AND cat_parent IS NULL AND proj_featured = 1''')\n\trows = cursor.fetchall()\n\tfor row in rows:\n\t\tcat = row[0]\n\t\tproj = row[1]\n\t\twp_list.append((cat, proj))\n\n\treturn wp_list", "title": "" }, { "docid": "00424b1610c94271924729221eef8439", "score": "0.5258671", "text": "def get_nested_ids(self):\n # $graphLookup hits 100Mb memory limit. Do not use it\n seen = {self.id}\n wave = {self.id}\n max_level = 4\n coll = Object._get_collection()\n for _ in range(max_level):\n # Get next wave\n wave = (\n set(d[\"_id\"] for d in coll.find({\"container\": {\"$in\": list(wave)}}, {\"_id\": 1}))\n - seen\n )\n if not wave:\n break\n seen |= wave\n return list(seen)", "title": "" }, { "docid": "62c3224b6ed8414903f173f721dd6d56", "score": "0.5250648", "text": "def projects(self) -> List[Dict[str, Any]]:\n response = requests.get(f\"{self._url}/projects?\", headers=self.headers)\n return response.json()", "title": "" }, { "docid": "cba80d7f15d3b27b32e31eab063fd133", "score": "0.5249459", "text": "def doc_for_project_and_subprojects():\n run_gnatdoc(\"gnatdoc project recursive\")", "title": "" }, { "docid": "e3f08de2e0ec367785a7334005c2f767", "score": "0.524686", "text": "def GetAllConfigsOfProjects(cnxn, project_ids, services):\n config_dict = services.config.GetProjectConfigs(cnxn, project_ids)\n config_list = [config_dict[project_id] for project_id in project_ids]\n return config_list", "title": "" }, { "docid": "6b7597a751d2754604bbaee43bacd657", "score": "0.5237805", "text": "def get_jira_projets(self, jira, repo_list):\n repo_list = self._format_repo_list(repo_list)\n # Get all projects viewable\n projects = jira.projects()\n match_projects = []\n\n for project in projects:\n project_name = self._format_project_name(project.name)\n for repo in repo_list:\n if project_name.startswith(repo):\n match_projects.append(project)\n return match_projects", "title": "" }, { "docid": "fdd936ea663d8f3eb02942f4f668f8da", "score": "0.52371144", "text": "def list_all_projects(self):\r\n for p in self.state[\"projects\"]:\r\n print(p[\"id\"], p[\"name\"])", "title": "" }, { "docid": "d60a1b4946027f307a4708de42394948", "score": "0.52317524", "text": "def all_projects(user_id):\n project_dicts = list()\n incomp_project_dicts = list()\n user = User.query.filter(User.user_id == user_id).first()\n if user:\n projects = Project.query.filter(Project.fabric_id != None, Project.pattern_id != None, Project.due_at != None, Project.user_id == user_id).order_by(Project.project_id).all()\n for project in projects:\n project_dict = dict(\n project_id=project.project_id,\n name=project.name,\n fabric_image=project.fabric.image.url,\n pattern_image=project.pattern.image.url,\n status_images=[stat.image.url for stat in project.proj_stat])\n project_dicts.append(project_dict)\n incomp_projects = Project.query.filter(Project.due_at == None, Project.user_id == user_id).order_by(Project.project_id).all()\n for incomp_project in incomp_projects:\n incomp_project_dict = dict(\n project_id=incomp_project.project_id,\n name=incomp_project.name\n )\n if incomp_project.fabric:\n incomp_project_dict['fabric_image'] =incomp_project.fabric.image.url,\n if incomp_project.pattern:\n incomp_project_dict['pattern_image'] = incomp_project.pattern.image.url,\n incomp_project_dicts.append(incomp_project_dict)\n\n return render_template(\"WIPprojects.html\", projects=project_dicts, user_id=user_id, incomp_projects=incomp_project_dicts)\n else:\n abort(404)", "title": "" }, { "docid": "4a9d7e08aa8bc82e91f15c60f9c56bc6", "score": "0.5226435", "text": "def getProjectList(self):\n raise NotImplementedError()", "title": "" }, { "docid": "9dc3c839f79ceb67049b0b4b4792adfe", "score": "0.5222126", "text": "def active_projects(self):\r\n return [x for x in self.state[\"projects\"] if not (x[\"is_archived\"] or x[\"is_deleted\"])]", "title": "" }, { "docid": "8f77eb4a952f79b17d0b1fe3b3795159", "score": "0.5213424", "text": "def projects(self):\n r_json = self._get_json('project')\n projects = [Project(self._options, self._session, raw_project_json) for raw_project_json in r_json]\n return projects", "title": "" }, { "docid": "dd00943e968020760fd14658a268ddb4", "score": "0.5212151", "text": "def get_all_projects(self):\r\n users = [x for x in self.getall(self.get_paginated_resources,\r\n rpath='/projects/all',\r\n page=1,\r\n per_page=20)\r\n ]\r\n return sorted(users, key=lambda k: k['name'])", "title": "" }, { "docid": "92ed0516a4dadb020e30b00877f78634", "score": "0.5203418", "text": "def doc_ids(self):\n if self._doc_ids_present:\n return self._doc_ids_value\n else:\n raise AttributeError(\"missing required field 'doc_ids'\")", "title": "" }, { "docid": "876a2f80bc28455d16d2685d049e9cd8", "score": "0.5200441", "text": "def _list_projects(path):\n return [os.path.basename(fn)\n for fn in glob.glob(os.path.join(path, '*'))\n if os.path.isdir(fn)]", "title": "" }, { "docid": "63f50aaceea7ecd554372a0745098a54", "score": "0.518675", "text": "def build_order(self, projects):\n print projects\n res = []\n for i in projects:\n if i.in_count == 0:\n res.append(i)\n print \"res\", res\n i = 0\n while i < len(res):\n curr = res[i]\n print curr\n if not curr:\n return None\n for j in curr.out:\n j.in_count -= 1\n # because we know that before is already 0 just cont loop\n if j.in_count == 0:\n res.append(j)\n i += 1\n print res\n # runs O(d + p) p for proj, d for pair of dep\n return res", "title": "" }, { "docid": "d770b895c29cb901c8a20a7507925fc7", "score": "0.51835567", "text": "def GetIssuesByIDs(self, project_name, issue_id_list):\n\n if len(issue_id_list) == 0: return []\n result = []\n x = 0\n project_index = self.project_indexes.get(project_name)\n for issue_id in project_index.keys():\n if(issue_id in issue_id_list):\n result[x] = project_index.get(issue_id)\n x = x + 1\n return result", "title": "" }, { "docid": "fd4cd1007092fbfcc2dc9bbbfb444436", "score": "0.5180281", "text": "def get_viewable_projects(request):\n content_user = get_object_or_404(User, username__iexact=request.user.username)\n form = QuickConverter()\n data = {'form': form}\n content_user = request.user\n all_forms = content_user.xforms.count()\n xforms = XForm.objects.filter(user=content_user)\\\n .select_related('user', 'instances')\n user_xforms = xforms\n xfct = ContentType.objects.get(app_label='logger', model='xform')\n xfs = content_user.userobjectpermission_set.filter(content_type=xfct)\n shared_forms_pks = list(set([xf.object_pk for xf in xfs]))\n forms_shared_with = XForm.objects.filter(\n pk__in=shared_forms_pks).exclude(user=content_user)\\\n .select_related('user')\n published_or_shared = XForm.objects.filter(\n pk__in=shared_forms_pks).select_related('user')\n xforms_list = [\n {\n 'id': 'published',\n 'xforms': user_xforms,\n 'title': _(u\"Published Forms\"),\n 'small': _(\"Export, map, and view submissions.\")\n },\n {\n 'id': 'shared',\n 'xforms': forms_shared_with,\n 'title': _(u\"Shared Forms\"),\n 'small': _(\"List of forms shared with you.\")\n },\n {\n 'id': 'published_or_shared',\n 'xforms': published_or_shared,\n 'title': _(u\"Published Forms\"),\n 'small': _(\"Export, map, and view submissions.\")\n }\n ]\n \n new_list = []\n for xform_list in xforms_list:\n if xform_list['xforms'] not in new_list:\n new_list.extend(xform_list['xforms'])\n xforms_list = list(set(new_list))\n return xforms_list", "title": "" }, { "docid": "4e7799f4659e884532fc3edc78e11072", "score": "0.51742274", "text": "def get_projects(self):\n url = self.c.base_url + '_ah/api/config/v1/projects'\n fetch_result = self._fetch(\n url, step_name='Get luci-config projects',\n headers=self._get_headers()\n )\n\n mapping = {}\n for project in json.loads(fetch_result)['projects']:\n # Unicode and str-s don't mix well\n mapping[str(project['id'])] = {str(k): str(v) for k, v in project.items()}\n return mapping", "title": "" }, { "docid": "15213a13d38ddc81f929b67cf3ef4135", "score": "0.51730365", "text": "def getDocuments(visitorID):\n documents = list()\n for i in range(0, len(userData)):\n if 'env_doc_id' in userData[i] and 'visitor_uuid' in userData[i]:\n if userData[i]['visitor_uuid'] == visitorID and userData[i]['env_doc_id'] not in documents:\n documents.append(userData[i]['env_doc_id'])\n return documents", "title": "" }, { "docid": "b99a38cb50cf8a584715a287b589e377", "score": "0.5161501", "text": "def list_ids_action(self):\n ids = []\n titles = []\n locales = []\n entries = self.doc_manager.get_all_entries()\n cwd = os.path.join(os.getcwd(), '')\n for entry in entries:\n if entry['file_name'].startswith(cwd.replace(self.path, '')):\n ids.append(entry['id'])\n relative_path = entry['file_name'].replace(cwd.replace(self.path, ''), '')\n titles.append(relative_path)\n try:\n locales.append(entry['locales'])\n except KeyError:\n locales.append(['none'])\n if not ids:\n print ('No local documents')\n return\n print ('Local documents: id, file name, locales')\n for i in range(len(ids)):\n info = '{id} \\t {title} \\t\\t {locales}'.format(id=ids[i], title=titles[i],\n locales=', '.join(locale for locale in locales[i]))\n print (info)", "title": "" }, { "docid": "0127710528f8fa71893f6c4890cd2115", "score": "0.515991", "text": "def get_all_documents():", "title": "" }, { "docid": "3d41113757021d9d38c8648bde1c8fac", "score": "0.5154496", "text": "def test_get_primary_ids(self):\n res = get_primary_ids('homo[orgn] AND myh7[ti]', retmax=5, max_recs=7)\n self.assertEqual(len(res), 7)\n res = get_primary_ids('homo[orgn] AND myh7[ti]', retmax=5, max_recs=2)\n self.assertEqual(len(res), 2)\n res = get_primary_ids('homo[orgn] AND myh7[ti]', retmax=100)\n assert '115496168' in res", "title": "" }, { "docid": "67f245e26eda79b00cc4eff792cc81a3", "score": "0.5151791", "text": "def corpus_filter_common_docs(df, id_prp='doc_id'):\n common_ids = list(get_overlap_ids(df, id_prp))\n fdf = df[df[id_prp].isin(common_ids)]\n return fdf", "title": "" }, { "docid": "4cb2909fea3ddb1b3ce8851741d62a11", "score": "0.51366585", "text": "def user_projects(request):\n projects = None\n if request.user.group is not None:\n projects = Project.objects.filter(group=request.user.group)\n return all_projects(request, projects)", "title": "" }, { "docid": "20e6de272715de396caf348b2a3a5bf1", "score": "0.51157016", "text": "def get_scirisdemo_projects():\n \n # Check (for security purposes) that the function is being called by the \n # correct endpoint, and if not, fail.\n if request.endpoint != 'normalProjectRPC':\n return {'error': 'Unauthorized RPC'}\n \n # Get the user UID for the _ScirisDemo user.\n user_id = user.get_scirisdemo_user()\n \n # Get the ProjectSO entries matching the _ScirisDemo user UID.\n projectEntries = theProjCollection.getProjectEntriesByUser(user_id)\n\n # Collect the project summaries for that user into a list.\n projectSummaryList = map(load_project_summary_from_project_record, \n projectEntries)\n \n # Sort the projects by the project name.\n sortedSummaryList = sorted(projectSummaryList, \n key=lambda proj: proj['project']['name']) # Sorts by project name\n \n # Return a dictionary holding the project summaries.\n output = {'projects': sortedSummaryList}\n return output", "title": "" }, { "docid": "e4e62cf6c9f853274d8661f10503e785", "score": "0.5104932", "text": "def all_projects_for_artifacts(artifacts):\n projects = set()\n for artifact in artifacts:\n for sample in artifact.samples:\n projects.add(sample.project)\n return list(projects)", "title": "" }, { "docid": "28ab0c99b19ac0c73881a37198ac7844", "score": "0.5100098", "text": "def get_all_projects_of_group():\n json_request = request.get_json()\n print json_request\n data = json_request[\"data\"]\n group_id = data[\"group_id\"]\n student_ids = gc.get_students(group_id)\n message = {\"projects\": pc.get_all_projects_of_group(student_ids)}\n resp = binary_response_builder(success, message)\n return resp", "title": "" }, { "docid": "7edb40c25f8023ac8a5a0b6eda83efff", "score": "0.5098614", "text": "def projects(self) -> List[Tuple[str, str]]:\n url = f\"{self._url}/owner/{self.login}/repos?repository.active=True\"\n response = requests.get(url, headers=self.headers)\n data = response.json()\n return [(el[\"name\"], el[\"id\"]) for el in data.get(\"repositories\")]", "title": "" }, { "docid": "822c16c6a8cc426f0fa787a28ad6ef96", "score": "0.50969636", "text": "def get_list_contigs_id(self):\n list_of_contigs_ids = []\n for key, value in self.dict_fasta_data.items():\n list_of_contigs_ids.append(value.description)\n return list_of_contigs_ids", "title": "" }, { "docid": "01d92005fa50b344ff50cbb11845d814", "score": "0.5090789", "text": "def get_queryset(self):\n return super(ProjectDocument, self).get_queryset().prefetch_related(\n 'read_groups'\n )", "title": "" }, { "docid": "b25a1d2a7357a117d14c8ca35a1e33af", "score": "0.5083605", "text": "def filter(cls, resources, projects=None, key=None):\n\n # Nothing to do\n if not resources:\n return []\n\n # Gather projects if not explicitly stated\n if not projects:\n projects = cls.projects()\n\n # Hack around the fact that OpenStack is a totally inconsistent mess\n if not key:\n key = 'project_id'\n if not hasattr(resources[0], key):\n key = 'tenant_id'\n\n return [x for x in resources if getattr(x, key) in projects]", "title": "" }, { "docid": "c24544ab8412d7b0638075caea90bd33", "score": "0.5080898", "text": "def get_projects(self):\n return self.http.get(\"/projects\")", "title": "" }, { "docid": "f7fb09d4eb2190483aaac5add1503ef8", "score": "0.50805914", "text": "def projects(self) -> List[Dict[str, Any]]:\n response = requests.get(f\"{self._url}/projects\", headers=self.headers)\n\n return response.json()", "title": "" }, { "docid": "7fb9fb3e0f93342bc76530bf42bf3f37", "score": "0.5077893", "text": "def get_project_id(project_name, server):\n\n # set the filter options\n options = TSC.RequestOptions()\n options.filter.add(TSC.Filter(TSC.RequestOptions.Field.Name,\n TSC.RequestOptions.Operator.Equals,\n project_name))\n # make request\n filtered_result, _ = server.projects.get(req_options=options)\n if not filtered_result:\n raise NameError(\"Invalid project_name '{}'\".format(project_name))\n # return the last object in the list (if there are multiple)\n project_object = filtered_result.pop()\n return (project_object.id, project_object)", "title": "" }, { "docid": "2108f6337b897bb3785d65b56273d22a", "score": "0.50706935", "text": "def get_documents(self, docnums):\n Q = self.conn.Q\n wrap = self.wrap\n uniq_key = self.uniq_key\n dnum_cache = []\n ccount = 0\n params = {'fl':self.fl, 'start':0, 'rows':self.cache}\n for d in docnums:\n dnum_cache.append(d)\n ccount += 1\n if ccount == self.cache:\n solrq = Q()\n for dnum in dnum_cache:\n solrq |= Q(**{uniq_key:dnum})\n params['q'] = solrq # lucene query\n result = self.conn.search(**params).result\n for doc in result.docs:\n yield wrap(doc)\n ccount = 0\n dnum_cache = []", "title": "" }, { "docid": "bbeda89c1d031426a28d83187701adf6", "score": "0.5061458", "text": "def get_project(self, project):\n return Dict(self.projects.get_entry(pk=project, _fields=[\"_all\"]).result())", "title": "" }, { "docid": "3f49e2e8fd60d5774415643362fc3fe5", "score": "0.50589854", "text": "def collect_ids_from_guidestar(self, amount_of_pages):\n ids_collection = set()\n last_company_name = \"\"\n\n # Run for each iteration\n for page_number in range(1, amount_of_pages):\n logger.info(\"complete {}/{}\".format(page_number, amount_of_pages))\n # Set the requested page\n ajax_get_ids[\"data\"][0][\"pageNumber\"] = page_number\n ajax_get_ids[\"ctx\"][\"csrf\"] = self.get_csrf()\n if page_number > 1:\n ajax_get_ids[\"data\"][1][\"value\"] = last_company_name\n\n # Request for 50 more companies\n post_response = requests.post(\n AJAX_URL, json=ajax_get_ids, headers=ajax_headers\n )\n post_response.raise_for_status()\n post_response = json.loads(post_response.text)\n\n # Run for each company and saves the company's id\n for company_json in post_response[0][\"result\"][\"result\"]:\n ids_collection.add(company_json[\"regNum\"])\n last_company_name = company_json[\"Name\"]\n return list(ids_collection)", "title": "" } ]
540d53c03187eb8362cbe679ac1e875b
Fill Ns in maf file mafdict[ind][chrom].append([block_key, coord, "N"])
[ { "docid": "d33a0c3a1c9dd0b1727b67b67654e050", "score": "0.6722032", "text": "def fillMaf(mafdict, mafFile):\n f = open(\"{}.fill\".format(mafFile), 'w')\n with open(mafFile, 'r') as maf:\n for line in maf:\n if line.startswith(\"a\"):\n f.write(\"a\\n\")\n line = next(maf)\n while line.startswith(\"s\"):\n x = line.split()\n ind, chrom = x[1].split(\".\")\n seq = list(x[-1])\n # find block key in mafdict list\n block_key = x[1]+\"_\"+x[2]+\"_\"+x[3]+\"_\"+x[4]+\"_\"+x[5]\n blocklist = zip(*mafdict[ind][chrom][0])[0]\n i = blocklist.index(block_key)\n coord = mafdict[ind][chrom][0][i]\n for pos, nuc in zip(coord[2], coord[-1]):\n seq[pos] = nuc\n x[-1] = \"\".join(seq)\n f.write(\"{}\\n\".format(\"\\t\".join(x)))\n line = next(maf)\n f.write(\"\\n\")\n f.close()\n return(None)", "title": "" } ]
[ { "docid": "a5d44e0fcb3494ad959182eeab084c42", "score": "0.5201526", "text": "def add_block(n_block,maze,inplace=False):\n if inplace:\n for position in available_path(maze)[:n_block]:\n maze[position[0]][position[1]] = 4\n return maze\n else:\n maze_copy = maze.copy()\n for position in available_path(maze_copy)[:n_block]:\n maze_copy[position[0]][position[1]] = 4\n return maze_copy", "title": "" }, { "docid": "2c7b7a547f28ceac38a2f37078063582", "score": "0.51613593", "text": "def replaceMaf(vcfdict, mafdict):\n for ind in mafdict.keys():\n for chrom in mafdict[ind].keys():\n for i, coord in enumerate(mafdict[ind][chrom]):\n pos = coord[1]\n a = ''\n for p in pos:\n try:\n a += vcfdict[ind][chrom+\"_\"+str(p)]\n except KeyError:\n continue\n try:\n mafdict[ind][chrom][0][i][-1] = a\n except TypeError:\n ipdb.set_trace()\n return(mafdict)", "title": "" }, { "docid": "c39839d5c0bd243c0fbecc9ff24733df", "score": "0.5145095", "text": "def set_atommap(mol, num=0):\n for atom in mol.GetAtoms():\n atom.SetAtomMapNum(num)", "title": "" }, { "docid": "8292b89cbc04f4c031688f7e6b044604", "score": "0.51430285", "text": "def count_ref_alt_bases(snp_list, base_count_file, af_file): \n snps = dict()\n for snp in snp_list:\n sl = snp.rstrip().split(':')\n chr = sl[0] \n pos = sl[1]\n ref = sl[2]\n alt = sl[3]\n key = \":\".join([chr, pos, ref])\n if key not in snps:\n snps[key] = [alt]\n else:\n snps[key].append(alt)\n\n # parse mpileup output to make af dict\n afs = dict() \n with open (base_count_file) as fh:\n for line in fh:\n sl = line.strip().split('\\t')\n pos_ref = sl[0].split(\":\")\n chr = pos_ref[0]\n pos = pos_ref[1]\n ref = pos_ref[2]\n snp = sl[0]\n cov = sl[1] \n a, c, g, t, n = sl[2], sl[3], sl[4], sl[5], sl[6]\n d = {\"A\":a, \"C\":c, \"G\":g, \"T\":t, \"N\":n}\n ref_count = d[ref]\n d.pop(ref, None)\n alts = snps[snp]\n for alt in alts:\n alt_count = d[alt]\n adj_cov = int(alt_count)+int(ref_count)\n if (adj_cov != 0):\n af = str(\"{:.2f}\".format(float(alt_count)/adj_cov))\n else:\n af = str(\"{:.2f}\".format(float(alt_count)/int(cov)))\n value = \":\".join([alt, cov, ref_count, alt_count, af])\n\n try:\n afs[snp].append(value)\n except KeyError:\n afs[snp] = [value]\n\n # print afs\n with open(af_file, 'wb') as opf:\n writer = csv.writer(opf, delimiter='\\t')\n for snp in afs:\n sp_snp = snp.split(':')\n for af in afs[snp]:\n sp_af = af.split(':')\n content = sp_snp + sp_af\n writer.writerow(content)\n return afs", "title": "" }, { "docid": "7c528dcfc6ca405600939c6c1f38b389", "score": "0.5104528", "text": "def mut_genomes_no_indels(self):\n self.assign_sites()\n matout = open(\"{}/var_site_matrix\".format(self.outd), 'w')\n self.vcf_dict = {}\n for loc in self.mutlocs:\n self.vcf_dict[loc] = {}\n for seq in self.seqnames:\n self.mut_genos[seq] = []\n sys.stdout.write(\"writing genome for {}\\n\".format(seq))\n if not os.path.isdir(\"{}/fasta_files\".format(self.outd)):\n os.mkdir(\"{}/fasta_files\".format(self.outd))\n genout = open(\"{}/fasta_files/{}{}.fasta\".format(self.outd, self.prefix, seq), 'w')\n ii = 0\n lw = 0 #tracking line wrapping for fasta\n with open(self.get_arg('genome'), 'r') as in_file:\n for line in in_file:\n if line.startswith('>'):\n if ii > 0:\n genout.write('\\n')\n # genout.write(line.strip()+\"_\"+self.prefix+seq+\"\\n\")\n lw = 0\n #else:\n genout.write(line.strip()+\"_\"+self.prefix+seq)\n else:\n line = line.strip()\n for nuc in line:\n if lw%70 == 0:\n genout.write('\\n')\n if ii in self.mutlocs:\n contig_name, adjusted_loc = self.mut_trans[ii]\n if nuc == 'N':\n genout.write('N')\n self.vcf_dict[ii][seq] = 'N'\n else:\n patt = self.sitepatts[nuc][self.snpdic[ii]]\n genout.write(patt[seq])\n self.mut_genos[seq].append(patt[seq])\n matout.write(\"{} {} {} {} {}\\n\".format(seq, patt[seq], ii, contig_name, adjusted_loc))\n self.vcf_dict[ii][seq] = patt[seq]\n else:\n genout.write(nuc)\n ii += 1\n lw += 1\n genout.write('\\n')\n genout.write('\\n')\n genout.close()\n matout.close()\n self._genmut = 1\n write_vcf(self)\n sys.stdout.write(\"Mutated genomes\\n\")", "title": "" }, { "docid": "306ee5e70c004413151acaa5a82c7cc8", "score": "0.5087522", "text": "def _makeimap(self):\n self.map_['source'] = 'NAOJ'\n self.map_['provider'] = 'NRO'\n self.map_['instrument'] = 'NORH'\n self.map_['physobs'] = ''", "title": "" }, { "docid": "090f8d71f28a0c4f69a3bffb3f0ecb2e", "score": "0.5067188", "text": "def getMAFambig(mafFile):\n mafdict = defaultdict(lambda: defaultdict(lambda: []))\n with open(mafFile, 'r') as maf:\n for line in maf:\n try:\n if line.startswith(\"a\"):\n line = next(maf)\n while line.startswith(\"s\"):\n x = line.split()\n ind, chrom = x[1].split(\".\")[:2]\n block_key = \"{}\".format(\"_\".join(x[1:6]))\n coord, poslist = transCoord(x[2:]) # coord is list of N in block\n if coord:\n mafdict[ind][chrom].append([block_key, coord, poslist, \"N\"])\n line = next(maf)\n except StopIteration:\n break\n return(mafdict)", "title": "" }, { "docid": "ab8795755da1a8729e5e5b961d2494dc", "score": "0.5036559", "text": "def _setting_information(self, nfeats, map_idx, funct):\n self._n = nfeats\n if map_idx is not None:\n self._map2idx = map_idx\n if funct is not None:\n self._funct = funct", "title": "" }, { "docid": "f735802c3bfa9631a64d4bac692ed9eb", "score": "0.50118726", "text": "def set_blocks(self, width=5):\r\n self.g = np.zeros_like(self.g)\r\n nunits = np.prod(self.dict_shape)\r\n for i in range(nunits):\r\n for j in range(nunits): \r\n self.g[i, j] = self.block_membership(i, j, width)", "title": "" }, { "docid": "1808af759b616cb0e297f3a2e60ebf36", "score": "0.50036347", "text": "def begin_ngram_dictionary(self, ngramsize):\n\t\tfor item in self.tuples:\n\t\t\tself.ngram_pos(item, ngramsize)", "title": "" }, { "docid": "0b45d22682b9fe279b957f82a3c9cdde", "score": "0.49937633", "text": "def populate_field(self):\r\n for coord in range(len(self.bomb_x_coord)):\r\n for i in range(self.bomb_x_coord[coord] - 1,\r\n self.bomb_x_coord[coord] + 2):\r\n # check if x coord exceeds board index\r\n if i < 0 or i > self.mineboard.board_width - 1:\r\n continue\r\n for j in range(self.bomb_y_coord[coord] - 1,\r\n self.bomb_y_coord[coord] + 2):\r\n # check if y coord exceeds board index\r\n if j < 0 or j > self.mineboard.board_height - 1:\r\n continue\r\n elif self.field[i][j] == '*':\r\n continue\r\n else:\r\n self.field[i][j] += 1", "title": "" }, { "docid": "c5ae738ac0a7990134e21cd7569e3a66", "score": "0.49788046", "text": "def update_M(tria, nid_pos, ncoords, M):\n pos1 = nid_pos[tria.n1]\n pos2 = nid_pos[tria.n2]\n pos3 = nid_pos[tria.n3]\n x1, y1 = ncoords[pos1]\n x2, y2 = ncoords[pos2]\n x3, y3 = ncoords[pos3]\n\n A = (-x1 + x2)*(-y2 + y3)/2 + (x2 - x3)*(-y1 + y2)/2\n assert A > 0\n tria.A = A\n\n A11 = tria.ABDE[0, 0]\n A12 = tria.ABDE[0, 1]\n A16 = tria.ABDE[0, 2]\n A22 = tria.ABDE[1, 1]\n A26 = tria.ABDE[1, 2]\n A66 = tria.ABDE[2, 2]\n B11 = tria.ABDE[3, 0]\n B12 = tria.ABDE[3, 1]\n B16 = tria.ABDE[3, 2]\n B22 = tria.ABDE[4, 1]\n B26 = tria.ABDE[4, 2]\n B66 = tria.ABDE[5, 2]\n D11 = tria.ABDE[3, 3]\n D12 = tria.ABDE[3, 4]\n D16 = tria.ABDE[3, 5]\n D22 = tria.ABDE[4, 4]\n D26 = tria.ABDE[4, 5]\n D66 = tria.ABDE[5, 5]\n E44 = tria.ABDE[6, 6]\n E45 = tria.ABDE[6, 7]\n E55 = tria.ABDE[7, 7]\n\n rho = tria.rho\n h = tria.h\n\n # positions c1, c2 in the stiffness and mass matrices\n c1 = DOF*pos1\n c2 = DOF*pos2\n c3 = DOF*pos3\n\n M[0+c1, 0+c1] += A*h*rho/6\n M[0+c1, 0+c2] += A*h*rho/12\n M[0+c1, 0+c3] += A*h*rho/12\n M[1+c1, 1+c1] += A*h*rho/6\n M[1+c1, 1+c2] += A*h*rho/12\n M[1+c1, 1+c3] += A*h*rho/12\n M[2+c1, 2+c1] += A*h*rho/6\n M[2+c1, 2+c2] += A*h*rho/12\n M[2+c1, 2+c3] += A*h*rho/12\n M[3+c1, 3+c1] += A*h**3*rho/72\n M[3+c1, 3+c2] += A*h**3*rho/144\n M[3+c1, 3+c3] += A*h**3*rho/144\n M[4+c1, 4+c1] += A*h**3*rho/72\n M[4+c1, 4+c2] += A*h**3*rho/144\n M[4+c1, 4+c3] += A*h**3*rho/144\n M[0+c2, 0+c1] += A*h*rho/12\n M[0+c2, 0+c2] += A*h*rho/6\n M[0+c2, 0+c3] += A*h*rho/12\n M[1+c2, 1+c1] += A*h*rho/12\n M[1+c2, 1+c2] += A*h*rho/6\n M[1+c2, 1+c3] += A*h*rho/12\n M[2+c2, 2+c1] += A*h*rho/12\n M[2+c2, 2+c2] += A*h*rho/6\n M[2+c2, 2+c3] += A*h*rho/12\n M[3+c2, 3+c1] += A*h**3*rho/144\n M[3+c2, 3+c2] += A*h**3*rho/72\n M[3+c2, 3+c3] += A*h**3*rho/144\n M[4+c2, 4+c1] += A*h**3*rho/144\n M[4+c2, 4+c2] += A*h**3*rho/72\n M[4+c2, 4+c3] += A*h**3*rho/144\n M[0+c3, 0+c1] += A*h*rho/12\n M[0+c3, 0+c2] += A*h*rho/12\n M[0+c3, 0+c3] += A*h*rho/6\n M[1+c3, 1+c1] += A*h*rho/12\n M[1+c3, 1+c2] += A*h*rho/12\n M[1+c3, 1+c3] += A*h*rho/6\n M[2+c3, 2+c1] += A*h*rho/12\n M[2+c3, 2+c2] += A*h*rho/12\n M[2+c3, 2+c3] += A*h*rho/6\n M[3+c3, 3+c1] += A*h**3*rho/144\n M[3+c3, 3+c2] += A*h**3*rho/144\n M[3+c3, 3+c3] += A*h**3*rho/72\n M[4+c3, 4+c1] += A*h**3*rho/144\n M[4+c3, 4+c2] += A*h**3*rho/144\n M[4+c3, 4+c3] += A*h**3*rho/72", "title": "" }, { "docid": "8d67ce5a5cf7573a85fd6b40acb55599", "score": "0.4955834", "text": "def place_numbers(self, field):\r\n for x in range(FIELDWIDTH):\r\n for y in range(FIELDHEIGHT):\r\n if not self.is_there_mine(field, x, y):\r\n field[x][y] = [\r\n field[neighbour_x][neighbour_y]\r\n for neighbour_x, neighbour_y in self.get_neighbour_squares([x, y])\r\n ].count(MINE)", "title": "" }, { "docid": "3b432f946f9af4c61083781d0d2bda92", "score": "0.49360833", "text": "def makeAffyMap(annoroot='./', gdir='./', outfpref=None):\n f = file('affyToRSMap.txt','r').readlines()\n fl = [x.strip().split() for x in f]\n fl = [x for x in fl if len(x) >= 2]\n #a = ['SNP_A-%s' % x[0].split('_')[-1] for x in fl] # get rid of bogus AFFX-SNP to replace with bogus bogus SNP_A-\n a = [x[0] for x in fl] # get rid of bogus AFFX-SNP to replace with bogus bogus SNP_A-\n #print 'a=',a[:20]\n rs = [x[1] for x in fl]\n ators = dict(zip(a,rs))\n\n f = file('GenomeWideSNP_6_flanking_sequences.map','r').readlines()\n fl = [x.split() for x in f]\n fl = [x for x in fl if len(x) > 0 and int(x[0])>0] # ignore missing chroms?\n print 'fl=',fl[:20]\n affyToMap = {}\n rslist = [] # for decorated chrom offset sorting\n rsseen = {} # for stoopid dupe rs\n notfound = 0\n for x in fl:\n chrom,aid,gpos,ppos = x\n rs = ators.get(aid, None)\n if rs:\n if rsseen.get(rs,None):\n # damn duplicates in John's inputs - ? multiple probes \n print '# ignoring second %s, rs=%s' % (aid,rs)\n else:\n rsseen[rs] = rs\n affyToMap[aid] = [chrom,rs,gpos,ppos]\n rslist.append((chrom,int(ppos),rs))\n else:\n notfound += 1\n #print '###cannot find %s in ators' % aid\n print '# found',len(affyToMap),'affy ids and',notfound,'not found'\n rslist.sort()\n maplist = [(x[0],x[2],'0','%d' % x[1]) for x in rslist] # rslist was chrom, gpos,rs\n tabmaplist = ['\\t'.join(x) for x in maplist]\n for race in ['white','black']:\n mapfname = os.path.join(gdir, '%s_%s.map' % (outfpref,race))\n mapf = file(mapfname,'w')\n mapf.write('\\n'.join(tabmaplist))\n mapf.write('\\n')\n print 'wrote %s' % mapfname\n mapf.close()\n return rslist,maplist", "title": "" }, { "docid": "29fa9e5b73cc939b0da1cf12ab0f1a0a", "score": "0.49319622", "text": "def set_nucleus_areas(self, val, nID):\n self.data_frames['data_z_params'].change_col_for_nID(['z', 'area'], nID, val)", "title": "" }, { "docid": "2a76f7c9a01bc5f82a8ada0c1ac8902d", "score": "0.49222493", "text": "def generate_map_data(self):\n\n with open(self.ec_file) as ec_file:\n for line in ec_file:\n line = line.rstrip('\\r\\n')\n\n map_number = self.metabolic_map_number(line)\n map_data = self.metabolic_map_data(line)\n\n if self.is_metabolic_map_data_an_ec_number(map_data):\n ec_number = self.metabolic_map_ec(line)\n\n if map_number not in self.maps_and_ecs:\n self.maps_and_ecs[map_number] = []\n\n if ec_number not in self.ecs_and_maps:\n self.ecs_and_maps[ec_number] = []\n\n self.maps_and_ecs[map_number].append(ec_number)\n self.ecs_and_maps[ec_number].append(map_number)", "title": "" }, { "docid": "5c4f1d3344003ced31cf6e3eb23993de", "score": "0.49034533", "text": "def make_map(self):\n # Create root file and tree\n map_file = ROOT.TFile.Open(\"mapping_IHEP_L2_2planari_penta.root\")\n for event in map_file.tree:\n if event.pos_x>0:\n self.mapping_matrix[event.gemroc_id][event.SW_FEB_id][event.channel_id] = \"X-{}\".format(event.pos_x)\n elif event.pos_v>0:\n self.mapping_matrix[event.gemroc_id][event.SW_FEB_id][event.channel_id] = \"V-{}\".format(event.pos_v)\n else:\n self.mapping_matrix[event.gemroc_id][event.SW_FEB_id][event.channel_id] = \"NaS\"\n\n # print (self.baseline_matrix[6])", "title": "" }, { "docid": "b674abc44c8a5bba8766506ae51d8c17", "score": "0.49006465", "text": "def make_maps(self, n): \r\n\r\n self.load_files()\r\n \r\n thismask = ~np.isnan(self.clusters[:,n,0])\r\n thisdd = np.round(np.clip(self.clusters[thismask,n,:2], 0, self.imsize[0]-1)).astype(int) ##ee in original code\r\n thisvel = self.velmaginst[thismask, n]\r\n thisstd = self.stdwafer[thismask,n]\r\n thisdist = self.distancewafer[thismask,n]\r\n thisneigh = self.neighwafer[thismask,n]\r\n thisangle = self.angle_inst[thismask, n]\r\n \r\n I = Image(path=self.path, name=self.image_names[n]).In/255.\r\n sk = I*0\r\n sk[thisdd[:,0], thisdd[:,1]] = 1\r\n sk = ~medial_axis(sk==0)\r\n lb = ndimage.label(sk)[0]\r\n \r\n speedmap = I*np.nan\r\n stdmap = I*np.nan\r\n distmap = I*np.nan\r\n anglemap = I*np.nan\r\n neighmap = I*np.nan\r\n \r\n for i in range(thisdd.shape[0]):\r\n y, x = thisdd[i]\r\n thislb = lb[y, x]\r\n patchmask = lb==thislb\r\n speedmap[patchmask] = thisvel[i]\r\n stdmap[patchmask] = thisstd[i]/thisdist[i]\r\n neighmap[patchmask] = thisneigh[i]\r\n anglemap[patchmask] = thisangle[i]\r\n distmap[patchmask] = thisdist[i]\r\n \r\n \r\n \r\n maps = [(distmap, u'primary spacing (um)', cm.jet, 'dist_v{:d}_n{:05d}_t{:0.2f}_L{:0.2f}.png'.format(self.V, n, self.times[n], self.times[n]*self.V), (120, 320)),\r\n (neighmap, u'number of neighbours', cm.jet, 'neighs_v{:d}_n{:05d}_t{:0.2f}_L{:0.2f}.png'.format(self.V, n, self.times[n], self.times[n]*self.V), (4,8)),\r\n (speedmap, u'apparent velocity (nm/s)', cm.jet, 'speed_v{:d}_n{:05d}_t{:0.2f}_L{:0.2f}.png'.format(self.V, n, self.times[n], self.times[n]*self.V), (10, 120)),\r\n (stdmap*100, u'standard deviation (%)', cm.jet, 'std_v{:d}_n{:05d}_t{:0.2f}_L{:0.2f}.png'.format(self.V, n, self.times[n], self.times[n]*self.V), (5, 30)),\r\n (anglemap*180/np.pi, u'apparent direction (ccw deg)', cm.hsv, 'dir_v{:d}_n{:05d}_t{:0.2f}_L{:0.2f}.png'.format(self.V, n, self.times[n], self.times[n]*self.V), (-180, 180))\r\n ]\r\n \r\n \r\n for x, label, cmap, savename, clip in maps:\r\n xx = x*1\r\n lb = ndimage.label(~np.isnan(xx))[0]\r\n edges = np.concatenate((lb[:2,:].ravel(), lb[-2:,:].ravel(), lb[:,:2].ravel(),lb[:,-2:].ravel()))\r\n edges = np.unique(edges)\r\n xx[np.isin( lb, edges)] = np.nan\r\n fig, ax = plt.subplots(figsize=(10,10), dpi=150)\r\n ax.set_ylabel('y (mm)')\r\n ax.set_xlabel('x (mm)')\r\n ax.set_xticklabels([])\r\n cax = ax.imshow(xx, cmap=cmap, extent=(0, self.imsize[1]*self.pixel_size/1000., self.imsize[0]*self.pixel_size/1000.,0), vmin=clip[0], vmax=clip[1])\r\n cb = fig.colorbar(cax, orientation='horizontal', pad=0.001, fraction=0.04765)\r\n ax.grid()\r\n cb.set_label(label)\r\n fig.savefig('{:s}trash_{:s}'.format(self.result_path, savename))\r\n plt.close(fig) \r\n \r\n return True", "title": "" }, { "docid": "8f568b8a7a80d63c1dd024c74378745a", "score": "0.48894927", "text": "def add_ion_to_data(self, inp_ion):\n from linetools.analysis import absline as ltaa\n ion = inp_ion.replace(' ','')\n # Check for previous\n if 'flag_N_{:s}'.format(ion) in self._data.keys():\n print(\"Ion data is already in the _data table\")\n return\n # Loop on the systems\n flagNs, Ns, sigNs = [], [], []\n for key in self._dict.keys():\n igm_comp = self._dict[key]['igm_sys']['components']\n comps = [] # Allow for more than one\n for comp in igm_comp.keys():\n sion = comp.split('_')[0]\n if sion == ion:\n if 'attrib' in igm_comp[comp].keys():\n attrib = igm_comp[comp]['attrib']\n attrib['sig_logN'] = np.array(attrib['sig_logN'])\n comps.append(attrib.copy())\n else: # Deprecated\n comps.append(dict(logN=igm_comp[comp]['logN'],\n flag_N=igm_comp[comp]['flag_N'],\n sig_logN=np.array([igm_comp[comp]['sig_logN']]*2)))\n # Now sum em up\n if len(comps) == 0:\n flagNs.append(0)\n Ns.append(0.)\n sigNs.append(np.array([0.]*2))\n continue\n obj = dict(flag_N=comps[0]['flag_N'], logN=comps[0]['logN'], sig_logN=comps[0]['sig_logN'])\n for comp in comps[1:]:\n if comp['flag_N'] != 0:\n obj['flag_N'], obj['logN'], obj['sig_logN'] = ltaa.sum_logN(obj, comp)\n # Save\n flagNs.append(obj['flag_N'])\n Ns.append(obj['logN'])\n sigNs.append(obj['sig_logN'])\n # Add to Table\n self._data.add_column(Column(flagNs, name='flag_N_{:s}'.format(ion)))\n self._data.add_column(Column(Ns, name='logN_{:s}'.format(ion)))\n self._data.add_column(Column(sigNs, name='sig_logN_{:s}'.format(ion)))", "title": "" }, { "docid": "21db43e916fed07a0ea9c4bdbb9b7df7", "score": "0.4883502", "text": "def _populate_blocks(val: str, blocks: DefaultDict[str, Set[str]], block_size: int) -> None:\n tokens = _ngram_tokens(val, block_size)\n for token in tokens:\n blocks[token].add(val)", "title": "" }, { "docid": "2c8b95a7175f5f07c92bf56b73d63bd3", "score": "0.48789263", "text": "def na_fill(line, column_index):\n for _ in range(len(events_matrix[line]), column_index):\n events_matrix[line].append('')", "title": "" }, { "docid": "527be86686994d4d92b724861b1ca037", "score": "0.4876057", "text": "def add_to_data(self):\n self.data['N'] = self.N\n self.data['B'] = self.B", "title": "" }, { "docid": "1b57f501a0a82741c67dad77bfa8253d", "score": "0.4867738", "text": "def fillData():\n line = inputFile.readline()\n idx = 0\n while not line.startswith('+9') :\n dataMemory[idx] = int(line)\n line = inputFile.readline()\n idx += 1", "title": "" }, { "docid": "b1ab7bda7e2629eacaea666d25ad7eba", "score": "0.4854523", "text": "def prepare_map(self):\n for row, line in enumerate(self.contents):\n for col, char in enumerate(line):\n bm = self.get_tile(char)\n y = row*TILE_SIZE\n x = col*TILE_SIZE\n self.image[y:y+TILE_SIZE, x:x+TILE_SIZE] = bm", "title": "" }, { "docid": "6b843b76a85f4d129daf45f589bd5772", "score": "0.48522866", "text": "def _fill_matrices(self):\n a = np.zeros(shape=(self.n, self.m))\n for row in self.ratings:\n uid = int(row[0])\n mid = int(row[1])\n a[uid - 1][mid - 1] = row[2]\n self.a0 = a", "title": "" }, { "docid": "7e12830e9553c8ab55bb4f73907484a9", "score": "0.4847817", "text": "def setInfoField(self):\n for field in self.info:\n if field == \"RT\":\n rt = [0, 0, 0]\n for type in self.info[field]:\n if type == \"2d\":\n rt[0] += 1\n if type == \"template\":\n rt[1] += 1\n if type == \"complement\":\n rt[2] += 1\n\n self.info[field] = rt\n if 'READ_IDS' in field:\n if len(self.info[field]) == 0:\n self.info[field]=['NA']\n elif isinstance(self.info[field], list):\n if isinstance(self.info[field][0], list):\n self.info[field][0] = median(map(float, self.info[field][0]))\n self.info[field][1] = median(map(float, self.info[field][1]))\n if field == \"MAPQ\":\n self.info[field][0] = int(self.info[field][0])\n self.info[field][1] = int(self.info[field][1])\n elif field == \"PID\" or field == \"PLENGTH\":\n self.info[field][0] = round(self.info[field][0], 3)\n self.info[field][1] = round(self.info[field][1], 3)\n else:\n if 'CI' not in field:\n self.info[field] = self.median_type(self.info[field])", "title": "" }, { "docid": "f0591fabcc243f6c1eb7266f47a29c57", "score": "0.4838035", "text": "def add_blocks(self, coordinates):\n for coordinate in coordinates:\n self[coordinate] = 'Block'", "title": "" }, { "docid": "1304024b86659ee49ed25c80015312f9", "score": "0.48253205", "text": "def set_filled_spaces(self, coord, name):\n self._filled_spaces[coord] = name", "title": "" }, { "docid": "0f95dcb4408045ce17fdd917a4a12dff", "score": "0.48251402", "text": "def copy_atts_add_fill(fin, fout, missing_value) :\n \n # get a list of global attribute names from the incoming file\n atts = fin.ncattrs()\n \n # place those attributes in the outgoing file\n for ii in range(len(atts)) :\n fout.setncattr(atts[ii], fin.getncattr(atts[ii]))\n \n fout.setncattr('missing_value', missing_value)", "title": "" }, { "docid": "b6b226c996c7ea9bbf2a8570bd35e1d0", "score": "0.48179236", "text": "def _create_assn_ind(self, coord):\n # input data to assign samples\n self.coord = coord\n\n # intialize the count attribute\n self.counts = np.zeros(self.d, \"int\")\n\n # number of indivduals\n n = self.coord.shape[0]\n self.obs_ids = np.zeros(n, \"int\")\n\n for i in range(n):\n # find the node that has the minimum distance to the observed node\n idx = np.argmin(np.sum((self.coord[i, :] - self.grid)**2, axis=1))\n self.obs_ids[i] = idx\n self.counts[idx] += 1\n\n # number of observed demes\n self.o = np.sum(self.counts != 0) # number of observed demes", "title": "" }, { "docid": "effabaea8d300995350f5023e516a618", "score": "0.4785221", "text": "def set_valid_indices(num_of_folds):\n patient_dict = {}\n for patient in range(1, 13):\n data = Data(f'../previous_work/P{patient}_data.mat', num_of_folds, trajectory_index=0, low_pass=False)\n indices = set_valid_indices_for_patient(data, num_of_folds)\n patient_dict[f'P_{patient}'] = indices\n with open(f'train_dict_{num_of_folds}', 'wb') as handle:\n pickle.dump(patient_dict, handle)\n handle.close()", "title": "" }, { "docid": "1524e28e9fb6730e082c69f7cb58c9d9", "score": "0.47767812", "text": "def create_rna_output(self):\n temp = {}\n for i in range(self.pool_num+1):\n if i:\n temp.update({i:0})\n self.RNA_init = temp", "title": "" }, { "docid": "13a5b4fdd07dfb9748949b4044b73af7", "score": "0.47473738", "text": "def redefineIncludedCells(self):\n '''Now fix the fixable 0s to the average of boundary values. Recalcuate the included cells'''\n count = 0\n for fn in self.dfs.keys():\n for i in range(len(self.dfs[fn])):\n if self.traceHasZeroSeq(self.dfs[fn].iloc[i,4:]):\n zero_boundaries = self.traceFixableZeros(self.dfs[fn].iloc[i,4:])\n if len(zero_boundaries) > 0:\n count += 1\n for (li,ri,val) in zero_boundaries:\n ovals = np.array(self.dfs[fn].iloc[i,4+li:4+ri])\n ovals[ovals<1] = val\n self.dfs[fn].iloc[i,4+li:4+ri] = ovals\n\n print count\n self.cells_included = dict()\n for fn in self.dfs.keys():\n self.cells_included[fn] = [\n (not self.traceHasZeroSeq(self.dfs[fn].iloc[i,4:4+self.alcohol_indeces[fn]])) and \n self.traceHasMortalityEvent(self.dfs[fn].iloc[i,4:4+self.alcohol_indeces[fn]]) \n for i in range(len(self.dfs[fn])) ]\n \n for (fn, ar) in self.cells_included.items():\n print fn, np.sum(ar), len(ar)\n if np.sum(ar) == 0:\n print \"delete file \", fn\n del self.cells_included[fn]\n del self.dfs[fn]", "title": "" }, { "docid": "ffb4c584f030d22804472dffe3f968d4", "score": "0.4729698", "text": "def generate_dict(self):\n for x in range(9):\n for y in range(10):\n self.set_filled_spaces((x, y), None)", "title": "" }, { "docid": "458e93c09e1f19ab414f5adfd876554c", "score": "0.47258064", "text": "def initialize_node_writer_dict_ids():\n for node_label in full_spec_dict:\n writer_dict[node_label][\"last_id_written\"] = None", "title": "" }, { "docid": "1a0f448c11f8ec374482e00ca635395e", "score": "0.47212765", "text": "def _parse_data_map(self, f):\n regex_str = '%s([0-9]{7}).nx.hdf' % animals[self.animal]\n regex = re.compile(regex_str)\n\n for line in f:\n match = regex.search(line)\n if match:\n try:\n temp, stamp, size, path = line.split(' ')\n path = path.split('\\n')[0]\n filename = os.path.basename(path)\n number = int(match.group(1))\n self.mapped_files[number] = {'filename': filename,\n 'path': path,\n 'timestamp': stamp,\n 'size': size}\n except (ValueError):\n #some files have space in path, but these wont be\n #nx.hdf\n continue", "title": "" }, { "docid": "551f7249001d7bc936cf225596ee8b26", "score": "0.47160372", "text": "def fill_dic(self, dicdata, ls_plfams):\n dic_df = {}\n for strainID in dicdata:\n print(strainID)\n dic_df[strainID] = {}\n for plfam in ls_plfams:\n dic_df[strainID][plfam] = 0\n for plfam in dicdata[strainID]:\n dic_df[strainID][plfam] += 1\n return dic_df", "title": "" }, { "docid": "edbdc23e4559869b268c564b9568479c", "score": "0.47026113", "text": "def data_retina_adj_count((conn_areacount_infile, positions_infile), \n (retina_outfile,)):\n\n data = pickle.load(open(conn_areacount_infile, 'r'))\n area_mat = data['area_mat']['count']\n positions_data = pickle.load(open(positions_infile, 'r'))\n pos_vec = positions_data['pos_vec']\n NEURON_N = 950 # only the ones for which we also have position data\n\n np.random.seed(0)\n cell_id_permutation = np.random.permutation(NEURON_N)\n\n\n area_mat_sub = area_mat[:NEURON_N, :NEURON_N]\n\n area_mat_sub = area_mat_sub[cell_id_permutation, :]\n area_mat_sub = area_mat_sub[:, cell_id_permutation]\n pos_vec = pos_vec[cell_id_permutation]\n\n dist_matrix = np.zeros((NEURON_N, NEURON_N), \n dtype=[('link', np.int32), \n ('distance', np.float32)])\n\n cell_types = data['types'][cell_id_permutation]\n\n dist_matrix['link'] = area_mat_sub \n for n1 in range(NEURON_N):\n for n2 in range(NEURON_N):\n p1 = pos_vec[n1]\n p2 = pos_vec[n2]\n \n dist_matrix[n1, n2]['distance'] = dist(p1, p2)\n\n pickle.dump({'dist_matrix' : dist_matrix, \n 'types' : cell_types, \n 'cell_id_permutation' : cell_id_permutation,\n 'infile' : conn_areacount_infile}, open(retina_outfile, 'w'))", "title": "" }, { "docid": "1d938bf28dad4744fd7a79ba70674a0a", "score": "0.46943974", "text": "def global_block_zerofill_update(self, zf_mult):\n for tab in self.tabs:\n # Reset dataset results arrays in blocks and chains.\n tab.dataset.update_for_zerofill_change(zf_mult)", "title": "" }, { "docid": "8facc186968426c09034615963b08f91", "score": "0.46892676", "text": "def renumAtoms(self,start=1):\n for i in range(len(self.atoms)):\n self.atoms[i].num = start + i", "title": "" }, { "docid": "f1c629974ef99283bb5022c832b1be69", "score": "0.46888784", "text": "def update_K_M(tria, nid_pos, ncoords, K, M):\n pos1 = nid_pos[tria.n1]\n pos2 = nid_pos[tria.n2]\n pos3 = nid_pos[tria.n3]\n x1, y1 = ncoords[pos1]\n x2, y2 = ncoords[pos2]\n x3, y3 = ncoords[pos3]\n A = abs((x1*(y2 - y3) + x2*(y3 - y1) + x3*(y1 - y2))/2)\n tria.A = A\n E = tria.E\n nu = tria.nu\n h = tria.h\n rho = tria.rho\n\n # positions the global matrices\n c1 = DOF*pos1\n c2 = DOF*pos2\n c3 = DOF*pos3\n\n K[0+c1, 0+c1] += 0.25*E*h*((nu - 1)*(y2 - y3)**2 + (2*nu - 1)*(x2 - x3)**2)/(A*(nu + 1)*(2*nu - 1))\n K[0+c1, 1+c1] += 0.25*E*h*(1 - nu)*(x2 - x3)*(y2 - y3)/(A*(nu + 1)*(2*nu - 1))\n K[0+c1, 0+c2] += -0.25*E*h*((nu - 1)*(y1 - y3)*(y2 - y3) + (2*nu - 1)*(x1 - x3)*(x2 - x3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c1, 1+c2] += -0.25*E*h*(nu*(x1 - x3)*(y2 - y3) - (2*nu - 1)*(x2 - x3)*(y1 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c1, 0+c3] += 0.25*E*h*((nu - 1)*(y1 - y2)*(y2 - y3) + (2*nu - 1)*(x1 - x2)*(x2 - x3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c1, 1+c3] += 0.25*E*h*(nu*(x1 - x2)*(y2 - y3) - (2*nu - 1)*(x2 - x3)*(y1 - y2))/(A*(nu + 1)*(2*nu - 1))\n K[1+c1, 0+c1] += 0.25*E*h*(1 - nu)*(x2 - x3)*(y2 - y3)/(A*(nu + 1)*(2*nu - 1))\n K[1+c1, 1+c1] += 0.25*E*h*((nu - 1)*(x2 - x3)**2 + (2*nu - 1)*(y2 - y3)**2)/(A*(nu + 1)*(2*nu - 1))\n K[1+c1, 0+c2] += -0.25*E*h*(nu*(x2 - x3)*(y1 - y3) - (2*nu - 1)*(x1 - x3)*(y2 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[1+c1, 1+c2] += -0.25*E*h*((nu - 1)*(x1 - x3)*(x2 - x3) + (2*nu - 1)*(y1 - y3)*(y2 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[1+c1, 0+c3] += 0.25*E*h*(nu*(x2 - x3)*(y1 - y2) - (2*nu - 1)*(x1 - x2)*(y2 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[1+c1, 1+c3] += 0.25*E*h*((nu - 1)*(x1 - x2)*(x2 - x3) + (2*nu - 1)*(y1 - y2)*(y2 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c2, 0+c1] += -0.25*E*h*((nu - 1)*(y1 - y3)*(y2 - y3) + (2*nu - 1)*(x1 - x3)*(x2 - x3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c2, 1+c1] += -0.25*E*h*(nu*(x2 - x3)*(y1 - y3) - (2*nu - 1)*(x1 - x3)*(y2 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c2, 0+c2] += 0.25*E*h*((nu - 1)*(y1 - y3)**2 + (2*nu - 1)*(x1 - x3)**2)/(A*(nu + 1)*(2*nu - 1))\n K[0+c2, 1+c2] += 0.25*E*h*(1 - nu)*(x1 - x3)*(y1 - y3)/(A*(nu + 1)*(2*nu - 1))\n K[0+c2, 0+c3] += -0.25*E*h*((nu - 1)*(y1 - y2)*(y1 - y3) + (2*nu - 1)*(x1 - x2)*(x1 - x3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c2, 1+c3] += -0.25*E*h*(nu*(x1 - x2)*(y1 - y3) - (2*nu - 1)*(x1 - x3)*(y1 - y2))/(A*(nu + 1)*(2*nu - 1))\n K[1+c2, 0+c1] += -0.25*E*h*(nu*(x1 - x3)*(y2 - y3) - (2*nu - 1)*(x2 - x3)*(y1 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[1+c2, 1+c1] += -0.25*E*h*((nu - 1)*(x1 - x3)*(x2 - x3) + (2*nu - 1)*(y1 - y3)*(y2 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[1+c2, 0+c2] += 0.25*E*h*(1 - nu)*(x1 - x3)*(y1 - y3)/(A*(nu + 1)*(2*nu - 1))\n K[1+c2, 1+c2] += 0.25*E*h*((nu - 1)*(x1 - x3)**2 + (2*nu - 1)*(y1 - y3)**2)/(A*(nu + 1)*(2*nu - 1))\n K[1+c2, 0+c3] += -0.25*E*h*(nu*(x1 - x3)*(y1 - y2) - (2*nu - 1)*(x1 - x2)*(y1 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[1+c2, 1+c3] += -0.25*E*h*((nu - 1)*(x1 - x2)*(x1 - x3) + (2*nu - 1)*(y1 - y2)*(y1 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c3, 0+c1] += 0.25*E*h*((nu - 1)*(y1 - y2)*(y2 - y3) + (2*nu - 1)*(x1 - x2)*(x2 - x3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c3, 1+c1] += 0.25*E*h*(nu*(x2 - x3)*(y1 - y2) - (2*nu - 1)*(x1 - x2)*(y2 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c3, 0+c2] += -0.25*E*h*((nu - 1)*(y1 - y2)*(y1 - y3) + (2*nu - 1)*(x1 - x2)*(x1 - x3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c3, 1+c2] += -0.25*E*h*(nu*(x1 - x3)*(y1 - y2) - (2*nu - 1)*(x1 - x2)*(y1 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[0+c3, 0+c3] += 0.25*E*h*((nu - 1)*(y1 - y2)**2 + (2*nu - 1)*(x1 - x2)**2)/(A*(nu + 1)*(2*nu - 1))\n K[0+c3, 1+c3] += 0.25*E*h*(1 - nu)*(x1 - x2)*(y1 - y2)/(A*(nu + 1)*(2*nu - 1))\n K[1+c3, 0+c1] += 0.25*E*h*(nu*(x1 - x2)*(y2 - y3) - (2*nu - 1)*(x2 - x3)*(y1 - y2))/(A*(nu + 1)*(2*nu - 1))\n K[1+c3, 1+c1] += 0.25*E*h*((nu - 1)*(x1 - x2)*(x2 - x3) + (2*nu - 1)*(y1 - y2)*(y2 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[1+c3, 0+c2] += -0.25*E*h*(nu*(x1 - x2)*(y1 - y3) - (2*nu - 1)*(x1 - x3)*(y1 - y2))/(A*(nu + 1)*(2*nu - 1))\n K[1+c3, 1+c2] += -0.25*E*h*((nu - 1)*(x1 - x2)*(x1 - x3) + (2*nu - 1)*(y1 - y2)*(y1 - y3))/(A*(nu + 1)*(2*nu - 1))\n K[1+c3, 0+c3] += 0.25*E*h*(1 - nu)*(x1 - x2)*(y1 - y2)/(A*(nu + 1)*(2*nu - 1))\n K[1+c3, 1+c3] += 0.25*E*h*((nu - 1)*(x1 - x2)**2 + (2*nu - 1)*(y1 - y2)**2)/(A*(nu + 1)*(2*nu - 1))\n\n M[0+c1, 0+c1] += 0.166666666666667*A*h*rho\n M[0+c1, 1+c1] += 0\n M[0+c1, 0+c2] += 0.0833333333333333*A*h*rho\n M[0+c1, 1+c2] += 0\n M[0+c1, 0+c3] += 0.0833333333333333*A*h*rho\n M[0+c1, 1+c3] += 0\n M[1+c1, 0+c1] += 0\n M[1+c1, 1+c1] += 0.166666666666667*A*h*rho\n M[1+c1, 0+c2] += 0\n M[1+c1, 1+c2] += 0.0833333333333333*A*h*rho\n M[1+c1, 0+c3] += 0\n M[1+c1, 1+c3] += 0.0833333333333333*A*h*rho\n M[0+c2, 0+c1] += 0.0833333333333333*A*h*rho\n M[0+c2, 1+c1] += 0\n M[0+c2, 0+c2] += 0.166666666666667*A*h*rho\n M[0+c2, 1+c2] += 0\n M[0+c2, 0+c3] += 0.0833333333333333*A*h*rho\n M[0+c2, 1+c3] += 0\n M[1+c2, 0+c1] += 0\n M[1+c2, 1+c1] += 0.0833333333333333*A*h*rho\n M[1+c2, 0+c2] += 0\n M[1+c2, 1+c2] += 0.166666666666667*A*h*rho\n M[1+c2, 0+c3] += 0\n M[1+c2, 1+c3] += 0.0833333333333333*A*h*rho\n M[0+c3, 0+c1] += 0.0833333333333333*A*h*rho\n M[0+c3, 1+c1] += 0\n M[0+c3, 0+c2] += 0.0833333333333333*A*h*rho\n M[0+c3, 1+c2] += 0\n M[0+c3, 0+c3] += 0.166666666666667*A*h*rho\n M[0+c3, 1+c3] += 0\n M[1+c3, 0+c1] += 0\n M[1+c3, 1+c1] += 0.0833333333333333*A*h*rho\n M[1+c3, 0+c2] += 0\n M[1+c3, 1+c2] += 0.0833333333333333*A*h*rho\n M[1+c3, 0+c3] += 0\n M[1+c3, 1+c3] += 0.166666666666667*A*h*rho", "title": "" }, { "docid": "b2badf45562c7496ec4ba29baae235b5", "score": "0.46843565", "text": "def attributeSetting(self, NMEAList: list, map: dict) -> None:\n #print(NMEAList)\n # creates new class attributes and assigns them with values using dictionary items\n for key in map:\n # skip data processing if no value in line\n if NMEAList[map[key][0]] == \"\" and key in [\"latitude\", \"longitude\"]:\n map[key] = None\n self.incomplete = True\n #print('no value in line')\n return\n\n if NMEAList[map[key][0]] == \"\":\n map[key] = None\n #print('continue')\n continue\n\n # see if there are any stored methods in the dictionary\n if map[key][1] == None:\n map[key] = NMEAList[map[key][0]]\n #print(NMEAList[map[key][0]])\n # use the method stored in dictionary if it exists\n else:\n \n map[key] = map[key][1](NMEAList[map[key][0]])\n #print(map[key][1](NMEAList[map[key][0]]))\n \n #print(map)\n #return(map)", "title": "" }, { "docid": "4f77ea6e783e0bb63ffad6513db799cb", "score": "0.46841368", "text": "def init_spawn(self):\n\n spawn = spawner()\n print(\"spawn : \", spawn)\n count_S = 1 #count the number of S met yet for matching with spawn\n\n i_line = 0\n for line in self.struct:\n i_case = 0\n\n for case in line:\n if case == 'S': #symbol for a possible spawner on the map\n if spawn[0] == count_S:\n self.struct[i_line][i_case] = 'N' #Needle\n elif spawn[1] == count_S:\n self.struct[i_line][i_case] = 'T' #Tube\n elif spawn[2] == count_S:\n self.struct[i_line][i_case] = 'E' #Ether\n else:\n self.struct[i_line][i_case] = ' ' #blank\n count_S += 1\n i_case += 1\n i_line += 1\n \n print(self.struct)", "title": "" }, { "docid": "95b5581741a92d36fe250b5e15380b0f", "score": "0.46801803", "text": "def getMappableIR(inputf,chr_name,kmer,mapname,N,outputf):\n with open(inputf,\"r\"):\n chr_arr = np.genfromtxt(inputf,dtype=None)\n counter = 1\n while counter<=round(N*kmer[chr_name]):\n rand_num = np.random.randint(1,dimension_dict[chr_name])\n matched_index = np.searchsorted(chr_arr[\"f0\"],rand_num)-1\n if chr_arr[\"f1\"][matched_index] == True:\n #append to the file that has been previously created with headers\n with open(outputf, 'a') as f:\n print (chr_name,\"\\t\",rand_num,\"\\t\",rand_num+1,\"\\t\", \"unmatched\",\"\\t\",mapname,\"\\t\", \"hg38\", file=f) \n counter+=1\n else:\n continue", "title": "" }, { "docid": "478dd8d4b2b68237417e722f10b06173", "score": "0.46771362", "text": "def _append_empty(self, num_obs, memo):\n for field in self.data._fields.values():\n field.append_empty(num_obs, memo)", "title": "" }, { "docid": "011f167db37804f06a307a8478bcc4de", "score": "0.46768445", "text": "def _fill_dict(self):\r\n return {X: self._make_fill(X, 0) for X in ['A', 'R', 'S']}", "title": "" }, { "docid": "d215d2a39063b1117449faff7937ca69", "score": "0.46725273", "text": "def read_file_and_fill_nmap_variable():\n\tglobal nmap_commands_file\n\tglobal nmap_command\n\tglobal trace_file\n\tglobal file_position\n\tglobal mlog\n\tglobal verbose_level\n\tglobal sql_conn\n\tglobal sql_file\t\n\t\n\twith open(nmap_commands_file,'r') as f:\n\t\tjobs = f.readlines()\n\n\t#make sure all jobs in file are in queue\n\tfor job in jobs:\n\t\tif not job in nmap_command:\n\t\t\tnmap_command.insert(0,job)\n\t\t\tmlog.debug('New Job: {0}'.format(job))\n\n\t#clear queue of things not in jobs file\n\tfor job in nmap_command:\n\t\tif not job in jobs:\n\t\t\tnmap_command.remove(job)\n\treturn", "title": "" }, { "docid": "be934192c7a14cfdaa46419742116f17", "score": "0.46724537", "text": "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[y * TILE_SIZE:(y+1)*TILE_SIZE,\n x * TILE_SIZE:(x+1)*TILE_SIZE] = bm", "title": "" }, { "docid": "e0f0a83fc50a7644b255655cb788a7df", "score": "0.46626526", "text": "def calc_nino_new(index, ifile, var_id, base_period):\n \n # Calculate the traditional NINO3 and NINO4 indices #\n \n regions = ['NINO3','NINO4']\n anomaly_timeseries = {}\n for reg in regions: \n anomaly_timeseries[reg], temp, indata_complete = calc_nino(reg, ifile, var_id, base_period) \n\n # Calculate the new Ren & Jin index #\n\n ntime = len(anomaly_timeseries['NINO3'])\n \n nino_new_timeseries = numpy.ma.zeros(ntime)\n for i in range(0, ntime):\n nino3_val = anomaly_timeseries['NINO3'][i]\n\tnino4_val = anomaly_timeseries['NINO4'][i]\n product = nino3_val * nino4_val\n\t\n\talpha = 0.4 if product > 0 else 0.0\n\t\n\tif index == 'NINOCT':\n\t nino_new_timeseries[i] = numpy.ma.subtract(nino3_val, (numpy.ma.multiply(nino4_val, alpha)))\n\telif index == 'NINOWP':\n\t nino_new_timeseries[i] = numpy.ma.subtract(nino4_val, (numpy.ma.multiply(nino3_val, alpha)))\n \n # Determine the attributes # \n\n hx = 'Ref: Ren & Jin 2011, GRL, 38, L04704. Base period: %s to %s' %(base_period[0], \n base_period[1])\n long_name = {}\n long_name['NINOCT'] = 'nino_cold_tongue_index'\n long_name['NINOWP'] = 'nino_warm_pool_index' \n\n attributes = {'id': 'nino'+index[4:],\n 'long_name': long_name[index],\n 'standard_name': long_name[index],\n 'units': 'Celsius',\n 'notes': hx}\n\n return nino_new_timeseries, var_atts, indata_complete.global_atts, indata_complete.data.getTime()", "title": "" }, { "docid": "849774094fde1db39b2176847a3dc928", "score": "0.46593848", "text": "def get_nonN_region(infile, thd_n_size, fa):\n reg, pos = {}, {}\n pre_id, pre_end, curr_start = '', 0, 0\n with open(infile) as I:\n # dealing with N region file\n\n for r in I:\n\n col = r.strip().split()\n n_start, n_end = map(int, (col[1], col[2]))\n n_region_size = n_end-n_start+1\n\n if col[0] not in reg:\n\n reg[col[0]] = []\n pos[col[0]] = 0\n\n if len(pre_id):\n reg[pre_id].append([pos[pre_id]+1, fa[pre_id]])\n\n pre_id, pre_end = col[0], n_end\n\n elif n_region_size < thd_n_size:\n # ignore this n region\n pass\n\n else:\n reg[pre_id].append([pos[pre_id]+1, n_start-1])\n pre_id, pre_end = col[0], n_end\n\n if n_region_size >= thd_n_size:\n pos[col[0]] = n_end\n\n for chrom, chromsize in fa.items():\n # loading all other non N regions\n if chrom not in reg:\n reg[chrom] = [1, chromsize]\n\n return reg", "title": "" }, { "docid": "6fc4b2539281176cea9a292dcce7413c", "score": "0.46583053", "text": "def create_block(self, location_list, POI_locations):\n\n\n for i in range(len(location_list)):\n this_cell = self.grid.get_cell_list_contents(location_list[i])\n\n for agent in this_cell:\n if type(agent) is nodeAgent:\n agent.block = True\n\n for i in POI_locations:\n agent.locations[i] = 10000", "title": "" }, { "docid": "d7a9e9e51d8aa9ed0800f29fdb9b82e6", "score": "0.46525958", "text": "def add_counts_to_mapping(biom_lines, mapping_lines, otu_counts, output_fp):\n # Parse biom file\n biom = parse_biom_table(biom_lines)\n # Parse mapping file\n map_data, headers, comments = parse_mapping_file(mapping_lines)\n # Compute the counts per sample\n min_count, max_count, median_count, mean_count, counts_per_sample =\\\n compute_counts_per_sample_stats(biom, binary_counts=otu_counts)\n # Add the counts to the mapping data\n index = len(headers) - 1\n headers.insert(index, \"NumIndividuals\")\n for row in map_data:\n row.insert(index, str(counts_per_sample[row[0]]))\n # # Add the '#' character to the first header\n # headers[0] = '#' + headers[0]\n # # Add headers to the data\n # map_data.insert(0, headers)\n # Write the corrected mapping file\n write_corrected_mapping(output_fp, headers, comments, map_data)\n # write_corrected_file(map_data, comments, output_fp)", "title": "" }, { "docid": "fcded669305fd7ea479c33ecde3bfcc0", "score": "0.4651779", "text": "def _fill_in_things(self):\n got = self.lsn[(self.lsn.Done == 1) & self.lsn.File.isna()]\n for label, row in got.iterrows():\n mnem = self._row_mnem(row)\n name = self._get_name(mnem)\n if name:\n print(f'{mnem} ({row.VideoID}) -> \"{name}\"')\n self.lsn.at[label, \"File\"] = name\n\n self._fix_main_lesson(or_warn=False)\n self._write()", "title": "" }, { "docid": "96127c51cb38907c34b2e9878caf55a7", "score": "0.4640659", "text": "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "title": "" }, { "docid": "96127c51cb38907c34b2e9878caf55a7", "score": "0.4640659", "text": "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "title": "" }, { "docid": "eb2971e74eae88c99e9775d277757d1c", "score": "0.46263695", "text": "def place_items(self, count, index):\n\t\tfor i in range(count):\n\t\t\tfree_pos = self.free_position()\n\t\t\tself.tiles[free_pos[0]][free_pos[1]] = index\n\t\t\tnotFound = False", "title": "" }, { "docid": "e4c6cdb55a81a66b1c2a9c362f66a25c", "score": "0.46188384", "text": "def _fill_2011on_aircraft_info(self, line_split, index):\n aircraft_i = 0\n for name in self.Aircraft_varlist:\n self.Aircraft_Nav[name][index] = \\\n float(line_split[aircraft_i + 9 + 10 * self.swath_size])\n aircraft_i += 1", "title": "" }, { "docid": "ec0990e5dd4d3d913b97c346f9596ac1", "score": "0.4616109", "text": "def color_marker_fill_index(cnt,clist,mlist,flist):\n\t\t\tcind = cnt%len(clist)\n\t\t\tmind = (cnt // len(clist)) % len(mlist)\n\t\t\tfind = (cnt // (len(clist)*len(mlist))) % len(flist)\n\t\t\treturn (cind,mind,find)", "title": "" }, { "docid": "f5f1f1c1888107cac605630cff297cfc", "score": "0.46140274", "text": "def fill_dmigs(self):\n return\n #for name, card_comments in self._dmig_temp.items():\n #card0, comment0 = card_comments[0]\n #card_name = card0[0]\n #card_name = card_name.rstrip(' *').upper()\n\n #if card_name == 'DMIG':\n ## if field2 == 'UACCEL': # special DMIG card\n #card = self.dmig[name]\n #elif card_name == 'DMI':\n #card = self.dmi[name]\n #elif card_name == 'DMIJ':\n #card = self.dmij[name]\n #elif card_name == 'DMIJI':\n #card = self.dmiji[name]\n #elif card_name == 'DMIK':\n #card = self.dmik[name]\n #else:\n #raise NotImplementedError(card_name)\n\n #for (card_obj, comment) in card_comments:\n #card._add_column(card_obj, comment=comment)\n #card.finalize()\n\n #self._dmig_temp = defaultdict(list)", "title": "" }, { "docid": "980aca3940dd75bdbb0304e95e3da8de", "score": "0.4594576", "text": "def locNumFix(final, headerName):\n toWriteCount = populationCounter(final, \"State\")\n final[headerName][0] = final[headerName][0][:toWriteCount]", "title": "" }, { "docid": "fe8524813b1be408dadb2b4387533049", "score": "0.45908982", "text": "def _grow(self):\n #this will temporarily store the current indexes keys and values\n #as the hash index file is grown\n tmp = {}\n self.buckets *= 2\n self.__bits_to_use += 1\n self.fh.flush()\n for i in xrange(_SIZEOF_BOOKKEEPER, os.path.getsize(self.filename),\n _SIZEOF_ITEM):\n self.fh.seek(i)\n current_item = self.fh.read(_SIZEOF_ITEM)\n if len(current_item) < 24:\n break\n item = struct.unpack(_item_fmt, current_item)\n if item[0][0] == '0':\n continue\n tmp[re.sub(r'\\x00', '', item[0])] = item[1]\n self.fh.seek(i)\n self.fh.seek(_SIZEOF_BOOKKEEPER)\n self.fh.write('0'*self.buckets*_SIZEOF_ITEM)\n #the num_items attribute will be recounted by the _write\n #method\n self.__num_items = 0\n for t in tmp:\n self.__setitem__(t, tmp[t])", "title": "" }, { "docid": "4b93272292acd6c958f748c4592f3cb7", "score": "0.45868424", "text": "def add_to_heatmap(self, cap_num, box_points):\n self.all_heatmaps[self.n][0][cap_num][box_points[1]:box_points[3], box_points[0]:box_points[2]] += 1", "title": "" }, { "docid": "c6beb4a163bcbd1d30f630c8c918cc8e", "score": "0.4581118", "text": "def fill_known_fields(self):\n for row in range(self.N):\n for column in range(self.N):\n domain = self.assign_domain_to_cell(row, column)\n if domain is not None and len(domain) == 1:\n self.board[row][column] = domain.pop()\n self.filled_cells += 1", "title": "" }, { "docid": "bb3313f37a0712b06b3dba36976c9b2c", "score": "0.4580418", "text": "def setN(self,near):\n pass", "title": "" }, { "docid": "5a1a044c03228140ae896ad4816128a3", "score": "0.45766705", "text": "def do_nodes(data,fname):\n\n net = data['network']\n\n \"\"\"\n Initialize some variables\n \"\"\"\n nodes = {}\n atoms_nr = []\n atoms_map = {}\n nodes_names = []\n readnode = False\n net['atoms'] = {}\n nnodes = 0\n bDAT = False\n\n top = data['topology']\n globatoms = mdn.do_globatoms(top)\n\n f = open(fname,'r')\n\n if(data['software']['name'] == 'namd'):\n bDAT = True\n netindex_dat = data['base_dir'] + data['files']['netindex_dat']\n f2 = open(netindex_dat, 'w')\n\n \"\"\" \n st will have the gromacs mdp line specifying\n node energy groups\n \"\"\" \n st = \"energygrps =\"\n\n for line in f:\n lc = re.split(r';',line.strip()) # removing comments\n line = lc[0]\n \n \n if (re.match(mdn.reanytype['gromacs'], line)):\n readnode = False\n \n if (re.match(mdn.renode_strict, line)):\n \"\"\"\n Check if this line has a node name\n renode_strict is necessary here, not sure why\n \"\"\"\n readnode = True\n nnodes += 1\n\n \"\"\"\n Get node name\n \"\"\"\n node = mdn.get_node_name(line)\n\n\n \"\"\" \n Check if node name is invalid \n \"\"\" \n if(node == 'names' or node == 'nr'):\n print(\"Fatal error: You cannot have a group named {}\\n\".format(node))\n exit(1)\n\n \"\"\" \n Update mdp string and add this node to list\n \"\"\" \n st = st + ' ' + node\n last_node = node\n nodes_names.append(node)\n \n elif(readnode and re.match(r'^\\s*\\d+',line)):\n \"\"\"\n Reading node atoms\n \"\"\"\n d = re.split(r'\\s*',line)\n for entry in d:\n entry = int(entry)\n \n atoms_nr.append(entry)\n \n \n if bDAT:\n node_nr = re.split(r'_',last_node)[1]\n f2.write(\"{} {}\\n\".format(entry-1,node_nr)) #NAMD index = GROMACS index - 1\n\n try: \n nodes[last_node]['atoms'].append(entry)\n except:\n nodes[last_node] = {}\n nodes[last_node]['atoms'] = []\n nodes[last_node]['atoms'].append(entry)\n\n \"\"\"\n We want the global residue information,\n so we store the globatom info of the first atom\n globatoms format : \n [atom_number,residue_number,molecule_name,molecule,number]\n \"\"\"\n l = globatoms[str(entry)][:]\n nodes[last_node]['globres'] = l\n \n mol_name = l[2]\n mol = top[mol_name]\n \n if ('netnodes' in mol):\n mol['netnodes'] += 1\n else:\n mol['netnodes'] = 1\n\n f.close()\n if(bDAT):\n f2.close()\n\n net['nodes'] = nodes\n net['nodes']['names'] = nodes_names\n net['atoms']['nr'] = mdn.list2dic(atoms_nr)\n\n for name in nodes_names:\n net['nodes'][name]['atoms'] = mdn.list2dic(net['nodes'][name]['atoms']) \n return st", "title": "" }, { "docid": "5028838d08169eee69e1f524a3d742a6", "score": "0.45751694", "text": "def _set_n(self,i,n):\n index = self._label_to_index(i)\n self._n[index] = n", "title": "" }, { "docid": "0d8052525135a354b9b3db2b0220d076", "score": "0.4574222", "text": "def set_average_mass( self, atoms ):\n n_tot = 0\n for key,value in atoms.iteritems():\n n_tot += value\n self.natoms = n_tot\n tot_mass = 0.0\n for key,value in atoms.iteritems():\n tot_mass += value*atomic_masses[atomic_numbers[key]]\n self.tot_mass = tot_mass\n #print (self.avg_mass)", "title": "" }, { "docid": "9876481e974897723ef9215439555915", "score": "0.45719263", "text": "def _initNoteDictionnary(self):\n strings = self._maxStrings\n frets = self._maxFrets\n dict = [[0 for i in range(frets)] for i in range(strings)]\n \n dict[0][0] = (5, 4) # E 4\n dict[1][0] = (12, 3) # B 3\n dict[2][0] = (8, 3) # G 3\n dict[3][0] = (3, 3) # D 3\n dict[4][0] = (10, 2) # A 2\n dict[5][0] = (5, 2) # E 2\n \n for i in range(strings):\n for j in range(1, frets):\n baseNote = dict[i][j - 1]\n octave = baseNote[1]\n note = baseNote[0] + 1\n \n if baseNote[0] + 1 > 12:\n octave += 1\n note = 0\n \n dict[i][j] = (note, octave)\n \n return dict", "title": "" }, { "docid": "f1b70104cc17ebca84d7a14a97c5ebb0", "score": "0.4570324", "text": "def addItems(self):\n for item, itemType in self.itemsDict.items():\n itemRow=0\n itemCol=0\n\n while self.map[itemRow][itemCol] != \" \" :\n itemRow = random.randint(0, 10)\n itemCol = random.randint(0, 10)\n\n\n self.map[itemRow][itemCol] = item", "title": "" }, { "docid": "4db4b942438cfabfabb843d739546a21", "score": "0.45683765", "text": "def write_pdb(coords,nacoords,idx):\n\tdirectory = \"pdb\"\n\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\n\n\t#sort the keys\n\tkeys = []\n\n\tfor key in coords.keys():\n\t\tkeys.append(key)\n\n\tkeys.sort()\n\n\tpdbfilename = \"%s/structure_%d.pdb\" %(directory,idx)\n\tpdbfile = open(pdbfilename,\"w\")\n\n\tatcount = 0\n\n\tfor key in keys:\n\t\tfor val in coords[key]:\n\t\t\tatcount+=1\n\t\t\tx = val[0]\n\t\t\ty = val[1]\n\t\t\tz = val[2]\n\n\t\t\tfinal=\"{:6s}{:5d} {:^4s}{:1s}{:3s} {:1s}{:4d}{:1s} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6.2f} {:>2s}{:2s}\\n\".format(\"ATOM\",atcount,\"H1\",\" \",\"LIG\",\"A\",1,\" \",x,y,z,1.00,1.00,\"H\",\"0\")\n\t\t\tpdbfile.write(final)\n\n\tfinal=\"{:6s}{:5d} {:^4s}{:1s}{:3s} {:1s}{:4d}{:1s} {:8.3f}{:8.3f}{:8.3f}{:6.2f}{:6.2f} {:>2s}{:2s}\\n\".format(\"ATOM\",atcount+1,\"Na\",\" \",\"NAA\",\"A\",1,\" \",nacoords[0],nacoords[1],nacoords[2],1.00,1.00,\"Na\",\"0\")\n\tpdbfile.write(final)\n\tpdbfile.write(\"END\")\n\tpdbfile.close()\n\t#sys.exit(-1)", "title": "" }, { "docid": "1046918a901bebbb87f12ede3293215d", "score": "0.4565911", "text": "def _set_numbers(self):\n for ii,pp in enumerate(self):\n if pp.n != ii:\n pp.n = ii", "title": "" }, { "docid": "c17b1d3c6c8b97a4cf8d785b86c326f3", "score": "0.456551", "text": "def setOtaBlockNumber(self, blockNum):\n self.__setIdentifierInFile(\n {'#define otaconfigMAX_NUM_BLOCKS_REQUEST': str(blockNum) + 'U'},\n os.path.join(self._projectRootDir, OtaAfrProject.OTA_CONFIG_PATH)\n )", "title": "" }, { "docid": "49d5bb90a93cfe4ec9abee7296be520a", "score": "0.45651713", "text": "def make_map(self,file1):\r\n file = open(file1)\r\n k = file.readline()\r\n self.row, self.col = k.split()\r\n self.row = int(self.row)\r\n self.col = int(self.col)\r\n while k != \"\":\r\n k=file.readline()\r\n self.map.append(k.strip('\\n').split()) # appending the splitting of k\r", "title": "" }, { "docid": "03e100fc9ddb6fa8fa366a41210c9043", "score": "0.456026", "text": "def fill_ions(self, use_Nfile=False, jfile=None, use_components=False,\n verbose=True):\n if jfile is not None:\n # Load\n with open(jfile) as data_file: \n ions_dict = json.load(data_file)\n # Loop on systems\n for abs_sys in self._abs_sys:\n abs_sys.get_ions(idict=ions_dict[abs_sys.name])\n elif use_Nfile:\n for abs_sys in self._abs_sys:\n abs_sys.get_ions(use_Nfile=True, verbose=verbose)\n elif use_components:\n for abs_sys in self._abs_sys:\n abs_sys._ionN = ltiu.iontable_from_components(abs_sys._components,\n ztbl=abs_sys.zabs)\n else:\n raise ValueError(\"Not sure how to load the ions\")", "title": "" }, { "docid": "acb90b7bfbd136bc377f49cc40906108", "score": "0.45537674", "text": "def add_data_to_tile_dict(tileDict, args, fileList, flag, minTimeStamp):\n for filename in fileList:\n tile = cpfiletools.get_tile_number_from_filename(filename)\n ID = flag+\"_tile\"+tile\n path = args.absPath + filename\n if minTimeStamp == 0:\n timestamp = 0\n else:\n timestamp = cpfiletools.parse_timestamp_from_filename(filename)\n # Add one second to all timestamps to prevent having two zero timepoints on tile 1\n timestamp = cpfiletools.get_time_delta(timestamp, minTimeStamp) + 1\n tileDict[tile].append(FileData(ID, path, timestamp))", "title": "" }, { "docid": "b277df18f63b1357b6f0a6c465a50a43", "score": "0.4549439", "text": "def impt(self,flist):\n ee = {}\n elist = []\n for f in flist:\n fn = f.split( '/' )[-1]\n fnParts = fn[:-3].split( self.fnsep )\n \n try:\n if self.pcfg.freqIndex is not None:\n if self.pcfg.freqIndex < len( fnParts ):\n freq = fnParts[self.pcfg.freqIndex]\n else:\n print( 'ERROR: freqIndex=%s, fnParts=%s' % (self.pcfg.freqIndex, fnParts) )\n raise\n else:\n freq = None\n\n group = fnParts[ self.pcfg.groupIndex ]\n\n if self.parent.fileIsFixed:\n trange = None\n else:\n trange = fnParts[-1].split( '-' )\n var = fnParts[self.pcfg.varIndex]\n thisKey = '.'.join( fnParts[:-1] )\n#### print (var, thisKey )\n if group not in list(ee.keys()):\n ee[group] = {}\n if thisKey not in list(ee[group].keys()):\n ee[group][thisKey] = []\n ee[group][thisKey].append( (f,fn,group,trange) )\n except:\n print('Cannot parse file name: %s' % (f)) \n elist.append(f)\n## this ee entry is not used, except in bookkeeping check below. \n## parsing of file name is repeated later, and a error log entry is created at that stage -- this could be improved.\n## in order to improve, need to clarify flow of program: the list here is used to provide preliminary info before log files etc are set up.\n group = '__error__'\n thisKey = fn\n if group not in list(ee.keys()):\n ee[group] = {}\n if thisKey not in list(ee[group].keys()):\n ee[group][thisKey] = []\n ee[group][thisKey].append( (f,fn,group) )\n raise\n\n nn = len(flist)\n n2 = 0\n for k in list(ee.keys()):\n for k2 in list(ee[k].keys()):\n n2 += len( ee[k][k2] )\n\n assert nn==n2, 'some file lost!!!!!!'\n if len(elist) == 0:\n self.info = '%s %s, %s' % (nn, maybe_plural(\"file\", nn), str(list(ee.keys())))\n else:\n self.info = '%s %s, %s frequencies, severe errors in file names: %s' % (nn, maybe_plural(\"file\", nn), len(list(ee.keys())), len(elist))\n for e in elist:\n self.info += '\\n%s' % e\n self.ee = ee", "title": "" }, { "docid": "bd48eea3de28fbd9fa99f152a5421660", "score": "0.45494193", "text": "def insert_ngram_values(in_dir, out_dir):\r\n for letter in alphabet:\r\n # Load in the list of all forms from the lexicon\r\n forms = {}\r\n freq_iterator = FrequencyIterator(inDir=in_dir,\r\n outDir=None,\r\n message='Loading ngram data',\r\n letters=letter)\r\n for entry in freq_iterator.iterate():\r\n forms[entry.form] = list()\r\n\r\n # Hunt for these lemmas in the GBN data\r\n for gram_count in (1, 2, 3, 4):\r\n print('\\tchecking %s/%d...' % (letter, gram_count))\r\n gbn_iterator = TableIterator(gramCount=gram_count,\r\n letter=letter,\r\n verbose=False)\r\n for ngram in gbn_iterator.iterate():\r\n if ngram.lemma in forms:\r\n line = '%d\\t%s' % (gram_count, ngram.line)\r\n forms[ngram.lemma].append(line)\r\n\r\n # Add GBN stats to the list of forms\r\n freq_iterator = FrequencyIterator(inDir=in_dir,\r\n outDir=out_dir,\r\n letters=letter)\r\n for entry in freq_iterator.iterate():\r\n gbn_node = etree.SubElement(entry.node, 'gbn')\r\n for line in forms[entry.form]:\r\n parts = line.split('\\t')\r\n gram_count = parts.pop(0)\r\n parts.pop(0) # remove the sortcode\r\n parts.pop(0) # remove the form\r\n ngram_node = etree.SubElement(gbn_node, 'ngram')\r\n ngram_node.set('n', gram_count)\r\n ngram_node.set('wordclass', parts.pop(0))\r\n if parts:\r\n ngram_node.text = ' '.join(parts)", "title": "" }, { "docid": "813cc76f5c7ca8ffaf8d9be4416bbdd7", "score": "0.4545949", "text": "def uf_init(self):\n n = len(self.bn.keys())\n self._id = list(range(n))\n self._sz = [1] * n", "title": "" }, { "docid": "658763adb50dddf4952dc6b9e1ac29dd", "score": "0.45450523", "text": "def _populate_occurrences(self, file_name):\n if file_name:\n f = open(file_name, 'r')\n for line in f:\n split_line=line.split()\n term = split_line[0]\n #TODO need to make robust for errors in input file\n count = int(split_line[1])\n self.occurrence_dict[term] = count", "title": "" }, { "docid": "51644abba44daade0ac098f9eee1067c", "score": "0.45397416", "text": "def mgf_dic(mgf_file=open(os.getcwd() + \"/\" + args.mgf_file, 'r'), mz_freeb=mz_freebases()):\n\n d = {}\n # Create a dictionary with all precursor masses _ RT as keys, to be filled with info for the final output\n # (M intensity and absolute sumI value)\n global mgf_peaks, info_MS2_scans\n mgf_peaks, info_MS2_scans = 0, {}\n\n # Read the lines of mgf input file\n for line in mgf_file:\n\n # Set values for prec_mass, rt and charge in order to avoid errors when those values are missing from the mgf\n # This means precursors with missing mass values, charge or rt will not be considered for matching\n\n # NOTE: charge is assigned as default to 1 in case no charge information is present in the mgf\n if \"BEGIN IONS\" in line:\n prec_mass, rt, charge = 0, 0, str(1)\n\n # M/z of the precursor ions are used as keys for the dictionary\n if \"PEPMASS\" in line:\n prec_mass = '{:.6f}'.format(np.float64(line.split()[0].split('=')[1]))\n mgf_peaks += 1\n\n # Charge of the precursor ion\n if \"CHARGE\" in line:\n charge = re.findall(r'\\d+', line.split('=')[1])[0]\n\n # RT for the precursor ion\n if \"RTINSECONDS\" in line:\n rt, list_mz_MS2 = str(np.float64(line.split('=')[1][:-1])), []\n\n if line[0].isdigit():\n # Add m/z value to the list of MS2 m/z\n list_mz_MS2.append(line.split()[0] + \"_\" + line.split()[1])\n\n # Determine the intensity for the precursor ion peak (closest value within MS2 scans to the PEPMASS)\n if \"END IONS\" in line:\n # Controls on charges and m/z boundaries for MS1 precursor ions\n if args.MS1_mz_minimum <= np.float64(prec_mass) <= args.MS1_mz_maximum:\n d[prec_mass + \"_\" + rt + \"_\" + charge], info_MS2_scans[prec_mass + \"_\" + rt] = {}, []\n\n if list_mz_MS2:\n\n info_MS2_scans[prec_mass + \"_\" + rt].append(closest_value(list_mz_MS2, prec_mass))\n\n for line in list_mz_MS2:\n # Add all the ion m/z : intensity info (excludes values around precursor ion and M+Na/2Na,\n # also if below MS2_peak_int_min)\n l = line.split('_')\n\n if (prec_mass + \"_\" + rt + \"_\" + charge in d.keys() and np.float64(\n l[1]) > threshold_MS2_int() and (\n np.float64(l[0]) < np.float64(prec_mass) - args.precursor_window_removal or\n np.float64(l[0]) > np.float64(prec_mass) + args.precursor_window_removal) and (\n np.float64(l[0]) < np.float64(prec_mass) + NA_mass / int(charge) - H_mass / int(\n charge) - args.precursor_window_removal\n or np.float64(l[0]) > np.float64(prec_mass) + NA_mass / int(charge) - H_mass / int(\n charge) + args.precursor_window_removal) and (\n np.float64(l[0]) < np.float64(prec_mass) + 2 * NA_mass / int(charge) - 2 * H_mass / int(\n charge) -\n args.precursor_window_removal or np.float64(l[0]) > np.float64(\n prec_mass) + 2 * NA_mass / int(charge) - 2 * H_mass / int(\n charge) + args.precursor_window_removal)):\n d[prec_mass + \"_\" + rt + \"_\" + charge].update({l[0]: l[1]})\n\n # If no MS2 ions are listed in the mgf, a single fake 1 m/z : 1 int peak is created to avoid\n # errors in the matching process (will result in score 0)\n else:\n info_MS2_scans[prec_mass + \"_\" + rt].append('1'), d[prec_mass + \"_\" + rt + \"_\" + charge].update(\n {'1': '1'})\n\n # Normalize and sorts the list of MS2 m/z:intensity\n for key in d:\n lista = []\n\n # Create a list with all MS2 ions entries, if m/z of MS2 ions are within the specified MS2 m/z window\n for k in d[key]:\n if args.MS2_mz_minimum <= np.float64(k) <= args.MS2_mz_maximum:\n lista.append([k, d[key][k]])\n\n # Add MS2 ions of free bases even if they are outside the specified MS2 window\n else:\n for ion in mz_freeb:\n if (np.float64(k) >= np.float64(ion) + ppm_range(np.float64(ion), MS2_ppm_offset) - ppm_range(\n np.float64(ion) + ppm_range(np.float64(ion), MS2_ppm_offset), args.MS2_ppm) and\n np.float64(k) <= np.float64(ion) + ppm_range(np.float64(ion),\n MS2_ppm_offset) + ppm_range(\n np.float64(ion) + ppm_range(np.float64(ion), MS2_ppm_offset), args.MS2_ppm)):\n lista.append([k, d[key][k]])\n break\n\n # Check if in the mgf file there is at least a MS2 ion \n if lista:\n lista.sort(key=lambda x: np.float64(x[1]), reverse=True)\n\n # Add the absolute value of the highest intensity to the info dictionary (in preparation for the output)\n info_MS2_scans[key.split('_')[0] + '_' + key.split('_')[1]].append(lista[0][1])\n\n # Eliminate eventual eccess of MS2 ions found based on a given upper value\n if len(lista) > MS2_max_peaks():\n lista = lista[:MS2_max_peaks()]\n\n # Write the normalized and sorted MS2 scans into the output dictionary\n d[key] = {x[0]: str(x[1]) for x in lista}\n\n return d", "title": "" }, { "docid": "9e740d6e17946a366e2e356fedc5b3a3", "score": "0.45386872", "text": "def __init__ (self, _dat=None, _offset=int(0), _append=False, **kwargs):\n keys = [[\"copy\",np.float64], [\"send\",np.float64], [\"receive\",np.float64], [\"calc_force\",np.float64], [\"n_walk\",np.int64], [\"n_epi\",np.int64], [\"n_epj\",np.int64], [\"n_spj\",np.int64], [\"n_call\",np.int64]] \n DictNpArrayMix.__init__(self, keys, _dat, _offset, _append, **kwargs)", "title": "" }, { "docid": "27113e26c21506b00a0dc73544b84dec", "score": "0.45383263", "text": "def fill_edge_matrix(nsrcs, match_dict):\n e_matrix = np.zeros((nsrcs, nsrcs))\n for k, v in match_dict.items():\n e_matrix[k[0], k[1]] = v\n return e_matrix", "title": "" }, { "docid": "da0b3096a4bf0b75b4614e71c2bcba6b", "score": "0.45338058", "text": "def _populate_arena_map_from_memory(self, mmap):\n\n p = self.protocol\n map_name = '%sx%s' % mmap.dimensions\n # This yanks all units off of the map.\n yield think_fn_wrappers.btloadmap(p, self.map_dbref, map_name)\n # Feed terrain in via btsetmaphex() a whole line at a time.\n for y in range(0, mmap.get_map_height()):\n yield think_fn_wrappers.btsetmaphex_line(\n p, self.map_dbref, y,\n mmap.terrain_list[y], mmap.elevation_list[y])", "title": "" }, { "docid": "52f2fccc25c271e601de1115f926ed66", "score": "0.45298743", "text": "def autoLocNum(final):\n rawStreets = final[\"Full Street Address\"][0]\n cleanRawStreets = [x for x in rawStreets if x != \"\"]\n cleanRawStreets.remove(\"Full Street Address\")\n dupesBool = checkIfSame(cleanRawStreets)\n if dupesBool == True:\n bldgNum = final[\"Loc #\"][0]\n for item in range(1, len(cleanRawStreets) + 1):\n bldgNum[item] = 1\n else:\n pass", "title": "" }, { "docid": "ce94651577ff2a2adebea12cf86669d4", "score": "0.45292252", "text": "def initial_scan(self):\n for i in range(0, self.nbr):\n for j in range(0, self.nbc):\n tile = self.grid[i][j]\n if tile == '$':\n self.pacmen.append((i, j))\n elif tile == '@':\n self.foods.append((i, j))\n self.foods_left += 1", "title": "" }, { "docid": "3e95d5b3889932f6c580526aec0b4feb", "score": "0.45255584", "text": "def XPLMSetFMSEntryInfo(inIndex,\n inRef,\n inAltitude):\n pass", "title": "" }, { "docid": "7512160e6650114ab48cfa71f2f65d9c", "score": "0.45216498", "text": "def initialize_node_writer_dict():\n for node_label in full_spec_dict:\n writer_dict[node_label] = {}", "title": "" }, { "docid": "fdfa2b18a998bdc198081104d4273792", "score": "0.45197982", "text": "def getBed4Anno(bedFile, expName):\r\n annoIntvlDict = defaultdict(lambda : defaultdict(tuple))\r\n for row in bedFile:\r\n if len(row) == 4:\r\n chrom = row[0]\r\n anno = row[3]\r\n start, end = int(row[1])+1, int(row[2])\r\n #start, end = int(row[1]),int(row[2])\r\n value = float(row[3])\r\n annoIntvlDict[chrom][(start,end)] = (anno, value)\r\n #annoIntvlDict[chrom][Interval(start,end)] = (anno, value)\r\n else:\r\n print \"unmatched file format\"\r\n return annoIntvlDict", "title": "" }, { "docid": "c655679d0067758ae20a0cbf001687f7", "score": "0.45183748", "text": "def _assign_mat(self):\n for idx, x in np.ndenumerate(self.x):\n z = self.z[idx]\n posn = np.array([x, z])\n mater = self.geom.get_mater(posn)\n self.mat[idx] = self.geom.mat_dict[mater]", "title": "" }, { "docid": "7afbbd082dcc1f0747e3da7a8fc3930e", "score": "0.45183623", "text": "def aggregation_initialize(n):\n for i in range(n):\n rospy.set_param('current_value/%d' % i, 0)", "title": "" }, { "docid": "fd6faca1400a317d09bd7308c3dd9ce4", "score": "0.45129022", "text": "def update_mask(self, F, M, i):\n \n from numpy import zeros\n \n k = F.shape[0] # # len fragment\n M[i:i + k] = zeros(k) # # set occupied positons to zero\n return M", "title": "" }, { "docid": "5b792708ebe8d3bac387610b8b4bc055", "score": "0.45100966", "text": "def design_matrix_wemai_multi_gmat(pheno_file, bed_file):\n id_bed_lst = []\n with open(bed_file + '.fam') as fin:\n for line in fin:\n arr = line.split()\n id_bed_lst.append(\" \".join([arr[0], arr[1]]))\n id_pheno_lst = {}\n with open(pheno_file) as fin:\n for line in fin:\n arr = line.split()\n if arr[-1] not in ['NA', 'NaN', 'nan', 'na']:\n try:\n id_pheno_lst[\" \".join([arr[0], arr[1]])].append(\" \".join(arr))\n except Exception as e:\n del e\n id_pheno_lst[\" \".join([arr[0], arr[1]])] = [\" \".join(arr)]\n id_not_pheno = set(id_bed_lst) - set(list(id_pheno_lst.keys()))\n if len(id_not_pheno) > 0:\n logging.error('The below genotyped id is not in the phenotype file:\\n {}'.format('\\n'.join(list(id_not_pheno))))\n sys.exit()\n y = []\n xmat = []\n id_lst = []\n for id in id_bed_lst:\n for val in id_pheno_lst[id]:\n arr = val.split()\n y.append(float(arr[-1]))\n xmat.append(arr[2:-1])\n id_lst.append(arr[1])\n y = np.array(y).reshape(-1, 1)\n xmat = np.array(xmat, dtype=float).reshape(y.shape[0], -1)\n id_dct = {}\n row = []\n col = []\n j = 0\n for i in range(len(id_lst)):\n row.append(i)\n if id_lst[i] not in id_dct:\n id_dct[id_lst[i]] = j\n j += 1\n col.append(id_dct[id_lst[i]])\n zmat = csr_matrix(([1.0]*len(row), (row, col)))\n return y, xmat, zmat", "title": "" }, { "docid": "fa9fc9cda64415ed12556f71dd3868a8", "score": "0.45053425", "text": "def draw_new_map_data(name):\n g.mapdata = {}\n for c in range (g.col):\n # mapcol = {}\n for r in range(g.row):\n g.mapdata[(c,r)] = name\n # g.mapdata.append(mapcol)", "title": "" }, { "docid": "ba6a72521d2be1bc7783fddc19d33e73", "score": "0.4500419", "text": "def _assign_mat(self):\n for idx, x in np.ndenumerate(self.x):\n mater = self.geom.get_mater(x)\n self.mat[idx] = self.geom.mater_dict[mater]", "title": "" }, { "docid": "292ad02fa7cf781cb3d9483e66c7144b", "score": "0.4495577", "text": "def fill_nas(self):\n self.county_with_totals = self.county_with_totals.fillna(0)\n self.manure_pts_gpd_final = self.manure_pts_gpd_final.fillna(0)\n self.crp2016_pts_gpd_final = self.crp2016_pts_gpd_final.fillna(0)\n self.crp2020_pts_gpd_final = self.crp2020_pts_gpd_final.fillna(0)\n self.crp2050_pts_gpd_final = self.crp2050_pts_gpd_final.fillna(0)\n self.msw_CBGcntrd_gpd_final = self.msw_CBGcntrd_gpd_final.fillna(0)\n self.proc_pts_gpd_final = self.proc_pts_gpd_final.fillna(0)\n\n self.DES_CBGcntrd_gpd_final = self.DES_CBGcntrd_gpd_final.fillna(0)\n self.MUD_nonpt_gpd_final = self.MUD_nonpt_gpd_final.fillna(0)\n self.PROC_ZCcntrd_gpd_final = self.PROC_ZCcntrd_gpd_final.fillna(0)\n self.COMB_pts_gpd_final = self.COMB_pts_gpd_final.fillna(0)\n self.AD_pts_gpd_final = self.AD_pts_gpd_final.fillna(0)\n self.W2E_pts_gpd_final = self.W2E_pts_gpd_final.fillna(0)\n self.manure_nonpts = self.manure_nonpts.fillna(0)\n self.proc_nonpts = self.proc_nonpts.fillna(0)", "title": "" }, { "docid": "1f02daf7b910c5243d885b6f583142e0", "score": "0.44934213", "text": "def mapAligns(gapped_fam_dict, alphamap):\n for RF in gapped_fam_dict:\n # print(RF)\n for ID in gapped_fam_dict[RF]:\n if gapped_fam_dict[RF][ID].get('bear'):\n gapped_fam_dict[RF][ID]['alpha'] = \"\".join(\n [alphamap[ch] for ch in gapped_fam_dict.get(RF).get(ID).get('bear')]\n )", "title": "" }, { "docid": "e48c2883a0247c184c08fc9fb09bcdc0", "score": "0.44907802", "text": "def add(self, n):\n for i in range(n):\n self.d.get_location()", "title": "" }, { "docid": "7035df83167f08aa711402371430d126", "score": "0.44892564", "text": "def flush_nonstars():\n global NONSTARS, NONSTAR_NEXT_ID, NONSTAR_DATAFILENAME, NONSTAR_DATAFILE\n NONSTARS = {}\n NONSTAR_NEXT_ID = 0\n gc.collect()\n NONSTAR_DATAFILE.close()\n NONSTAR_DATAFILENAME = \"data\" + str(time()) + \".txt\"\n NONSTAR_DATAFILE = open(NONSTAR_DATAFILENAME, \"w\")", "title": "" }, { "docid": "b769b64d7511d5e911965ff2b6bf1d6b", "score": "0.44836882", "text": "def normalize_freq(self):\n self.freq_norm = {}\n for pos in self.ig_pos.keys():\n n = len(self.ig_pos[pos])\n self.freq_norm[pos] = np.full([n, n,],np.nan)\n for i, j in combinations(list(range(n)), 2):\n igs = self.ig_pos[pos][i,j]\n # The forms have never been seen together.\n if igs == []:\n continue\n probs = self.freq_cooc[pos][i,j]\n ig_norm = [igs[k] for k in range(len(probs))]\n avrg_ig = sum(ig_norm)\n # print(\"summed:\", avrg_ig)\n self.freq_norm[pos][i,j] = self.freq_norm[pos][j,i] = avrg_ig \n del self.ig_pos, self.freq_cooc", "title": "" }, { "docid": "c2e98b362dbc6e1abd1d8788454ff9bb", "score": "0.44812447", "text": "def initialise_contact_matrices(self):\n for key in ['contact', 'transmission', 'transmission_end_tb']:\n self.contact_matrices[key] = {}\n self.n_contacts[key] = {}\n for location in ['school', 'workplace', 'household', 'community']:\n self.contact_matrices[key][location] = np.zeros((101, 101)) # null matrix 100x100\n self.n_contacts[key][location] = 0", "title": "" } ]
c515892961cd913beb16a6649562f427
Resnet 101 Based Network
[ { "docid": "360979935b5340307c01fc3d0a6ed7bd", "score": "0.0", "text": "def DeepR101V3PlusD_HANet(args, num_classes, criterion, criterion_aux):\n print(\"Model : DeepLabv3+, Backbone : ResNet-101\")\n return DeepV3PlusHANet(num_classes, trunk='resnet-101', criterion=criterion, criterion_aux=criterion_aux,\n variant='D16', skip='m1', args=args)", "title": "" } ]
[ { "docid": "b6179eed08cd0abdb9df6a4ee2d08bba", "score": "0.72976834", "text": "def resnet101(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "title": "" }, { "docid": "b6179eed08cd0abdb9df6a4ee2d08bba", "score": "0.72976834", "text": "def resnet101(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "title": "" }, { "docid": "b6179eed08cd0abdb9df6a4ee2d08bba", "score": "0.72976834", "text": "def resnet101(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "title": "" }, { "docid": "5d6c26aaf7720d307aedc32ea5206f58", "score": "0.7111348", "text": "def resnet101(num_classes, grayscale):\n net = ResNet(block = Bottleneck,\n layers = [3, 4, 23, 3],\n num_classes = num_classes,\n grayscale = grayscale)\n return net", "title": "" }, { "docid": "0ef45e5e108c80ebec47c4270bf82a96", "score": "0.70309484", "text": "def resnet_v1(n, data):\n depth = n * 6 + 2\n input_shape = data[5]\n num_classes = data[6]\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "title": "" }, { "docid": "c95a438046fe86ada8de872f4fee8e09", "score": "0.69727975", "text": "def train_rpn():\n\n resnet = resnet101(pretrained=True).cuda()\n\n rpn = RPN().cuda()\n\n\n #load_path = 'data/rpn/rpn_19.2499'\n #check_point = torch.load(load_path)\n\n #rpn.load_state_dict(check_point['rpn'])\n #resnet.load_state_dict(check_point['resnet'])\n\n rpn_targegt_creator = RPNTargetCreator()\n dataset = custom_dataset()\n dataloader = DataLoader(dataset, batch_size=1, shuffle=True)\n dataiter = iter(dataloader)\n total_epoch = 20\n\n lr = 0.01\n params = []\n for net in [resnet, rpn]:\n for key, value in dict(net.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n params += [{'params': [value], 'lr': lr * 2, 'weight_decay': 0}]\n else:\n params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.weight_decay}]\n optimizer = optim.SGD(params, momentum=0.9)\n\n loss_name = ['TRAIN RPN rpn_cls_loss', 'TRAIN RPN rpn_loc_loss']\n vis_rpn_loc_loss, vis_rpn_cls_loss= 0, 0\n\n for epoch in range(total_epoch):\n for i in range(0, len(dataset)):\n try:\n img_batch, bndboxes_batch, labels_batch = next(dataiter)\n except StopIteration:\n dataiter = iter(dataloader)\n img_batch, bndboxes_batch, labels_batch = next(dataiter)\n # only support batch_size=1 for now\n img, bndboxes, labels = img_batch, bndboxes_batch[0], labels_batch[0]\n img, bndboxes, labels = img.cuda(), bndboxes.cuda(), labels.cuda()\n batch_size, channels, height, width = img.shape\n feature = resnet(img)\n rois, anchors, rpn_loc, rpn_score = rpn(feature, feature_stride=16)\n gt_rpn_loc, gt_rpn_label = rpn_targegt_creator(anchors, bndboxes.cpu().detach().numpy(), (height, width))\n\n gt_rpn_label = at.toTensor(gt_rpn_label).long().cuda()\n gt_rpn_loc = at.toTensor(gt_rpn_loc).float().cuda()\n\n rpn_loc_loss = _calc_loc_loss(rpn_loc[0], gt_rpn_loc, gt_rpn_label, sigma=3.)\n\n rpn_cls_loss = F.cross_entropy(rpn_score[0], gt_rpn_label, ignore_index=-1)\n\n rpn_loss = rpn_loc_loss + rpn_cls_loss\n print('rpn_loc_loss:', rpn_loc_loss.item(), 'rpn_cls_loss:', rpn_cls_loss.item())\n print('total_loss:', rpn_loss.item())\n optimizer.zero_grad()\n\n rpn_loss.backward()\n\n optimizer.step()\n\n vis_rpn_cls_loss += rpn_cls_loss.item()\n vis_rpn_loc_loss += rpn_loc_loss.item()\n\n if (i + 1) % 40 == 0:\n vis_loss_value = [vis_rpn_cls_loss, vis_rpn_loc_loss]\n draw_loss_cruve(loss_name, 2501 * (epoch - 1) + i, vis_loss_value)\n vis_rpn_loc_loss, vis_rpn_cls_loss= 0, 0\n\n if (epoch + 1) % 10 == 0:\n for parameter_group in optimizer.param_groups:\n parameter_group['lr'] *= 0.1\n\n svae_path = 'data/rpn/rpn_epoch_{}.params'.format(epoch)\n state_dict = {'rpn': rpn.state_dict(),\n 'resnet': resnet.state_dict()}\n torch.save(state_dict, svae_path)", "title": "" }, { "docid": "117e96566a6aa8587d0cd939becea12b", "score": "0.6957314", "text": "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n return model", "title": "" }, { "docid": "439f6676197d2626335185ac801d3eb6", "score": "0.6947986", "text": "def networks():", "title": "" }, { "docid": "725999d09c4a641b3fddbee986a26450", "score": "0.69428533", "text": "def resnet101(pretrained=False, **kwargs):\n return _resnet('resnet101', BottleneckBlock, 101, pretrained, **kwargs)", "title": "" }, { "docid": "1cb314afcb6d3e752d73ab743e739637", "score": "0.6933019", "text": "def resnet(**kwargs):\n return ResNet(**kwargs)", "title": "" }, { "docid": "1cb314afcb6d3e752d73ab743e739637", "score": "0.6933019", "text": "def resnet(**kwargs):\n return ResNet(**kwargs)", "title": "" }, { "docid": "f302e94e1dadef3fe0a2885ff2f1c5cc", "score": "0.6895417", "text": "def train_rcnn():\n resnet = resnet101(pretrained=False)\n rpn = RPN()\n rcnn = RCNN()\n\n resnet.cuda()\n rpn.cuda()\n rcnn.cuda()\n resume = True\n if resume:\n check_point = torch.load('/home/licheng/home/licheng/projects/cnet/data/rpn/rpn_epoch_19.params')\n rpn.load_state_dict(check_point['rpn'])\n resnet.load_state_dict(check_point['resnet'])\n #rcnn_check_point = torch.load(\"/home/licheng/home/licheng/projects/cnet/data/rcnn/rcnn_epoch_16.params\")\n #rcnn.load_state_dict(rcnn_check_point['rcnn'])\n\n #rcnn_check_point = torch.load('/home/licheng/home/licheng/projects/cnet/data/rcnn/rcnn_epoch_5.params')\n #rcnn.load_state_dict(rcnn_check_point['rcnn'])\n\n # fix prarams\n\n\n params = []\n lr = 0.01\n for net in [rcnn]:\n for key, value in dict(net.named_parameters()).items():\n if value.requires_grad:\n if 'bias' in key:\n params += [{'params': [value], 'lr': lr * 2, 'weight_decay': 0}]\n else:\n params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.weight_decay}]\n\n rcnn_optimizer = optim.SGD(params, momentum=0.9)\n\n rcnn_target_creator = RCNNTargetCreator()\n\n dataset = custom_dataset()\n dataloader = DataLoader(dataset, batch_size=1, shuffle=False)\n dataiter = iter(dataloader)\n total_epoch = 20\n\n loss_name = ['TRAIN RCNN rcnn_cls_loss', 'TRAIN RCNN rcnn_loc_loss']\n vis_o_l_loss, vis_o_c_loss= 0, 0\n\n\n for epoch in range(total_epoch):\n for i in range(len(dataset)):\n try:\n img_batch, bndboxes_batch, labels_batch = next(dataiter)\n except StopIteration:\n dataiter = iter(dataloader)\n img_batch, bndboxes_batch, labels_batch = next(dataiter)\n img, bndboxes, labels = img_batch, bndboxes_batch[0], labels_batch[0]\n img, bndboxes, labels = img.cuda(), bndboxes.cuda(), labels.cuda()\n feature = resnet(img)\n rois, _, _, _ = rpn(feature, feature_stride=16)\n\n #print('gt boxes:', bndboxes)\n sample_roi, gt_roi_label, gt_roi_loc = rcnn_target_creator(rois, bndboxes.cpu().numpy(), labels)\n #print('pred box', len(sample_roi), sample_roi)\n roi_cls_loc, roi_score = rcnn(sample_roi, feature)\n\n # apple roi_cls_loc to sample_rois has a good result\n mean = torch.Tensor((0., 0., 0., 0.)).repeat(cfg.n_class)[None].cuda()\n std = torch.Tensor((0.1, 0.1, 0.2, 0.2)).repeat(cfg.n_class)[None].cuda()\n loc = (roi_cls_loc * std + mean)\n\n look_score1 = at.toNumpy(roi_score.detach().cpu())\n look_score2 = at.toNumpy(F.softmax(roi_score, dim=1).detach().cpu())\n\n num_rois = roi_cls_loc.shape[0]\n roi_cls_loc = roi_cls_loc.view(num_rois, -1, 4)\n roi_loc = roi_cls_loc[torch.arange(0, num_rois).long(), at.toTensor(gt_roi_label).long()]\n\n keep = gt_roi_label > 0\n keep = at.toNumpy(keep.detach().cpu())\n\n loc = loc.view(num_rois, -1, 4)\n loc = loc[torch.arange(0, num_rois).long(), at.toTensor(gt_roi_label).long()]\n\n pred_boxes = loc2bbox(sample_roi, at.toNumpy(loc.detach().cpu()))[keep>0, :]\n\n gt_roi_loc = at.toTensor(gt_roi_loc).float().cuda()\n gt_roi_label = at.toTensor(gt_roi_label).long().cuda()\n\n roi_loc_loss = _calc_loc_loss(roi_loc, gt_roi_loc, gt_roi_label.data, sigma=1.)\n roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label)\n rcnn_loss = roi_cls_loss + roi_loc_loss\n\n print('rcnn_loc_loss:', roi_loc_loss.item(), 'rcnn_cls_losss:', roi_cls_loss.item())\n print('total_loss', rcnn_loss.item())\n rcnn_optimizer.zero_grad()\n\n rcnn_loss.backward()\n\n rcnn_optimizer.step()\n\n vis_o_c_loss += roi_cls_loss.item()\n vis_o_l_loss += roi_loc_loss.item()\n\n if (i + 1) % 40 == 0:\n vis_loss_value = [vis_o_c_loss, vis_o_l_loss]\n draw_loss_cruve(loss_name, 2501 * (epoch - 1) + i, vis_loss_value)\n vis_o_l_loss, vis_o_c_loss = 0, 0\n\n if (epoch + 1) % 10 == 0:\n for parameter_group in rcnn_optimizer.param_groups:\n parameter_group['lr'] *= 0.1\n\n\n svae_path = 'data/rcnn/rcnn_epoch_{}.params'.format(epoch)\n state_dict = {'rcnn': rcnn.state_dict()}\n torch.save(state_dict, svae_path)", "title": "" }, { "docid": "865c5d5705d65f6cbfbdfca426471c8c", "score": "0.6860366", "text": "def resnet_v1(input_shape, depth, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = oct_resnet_layer(inputs=inputs) # x is list of tensor, and len(x) == 2\n # Instantiate the stack of residual units\n alpha = 0.5\n for stack in range(3):\n if stack == 2:\n alpha = 0\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = oct_resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides,\n alpha=alpha)\n y = oct_resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None,\n alpha=alpha)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims, because the size of feature map has been changed\n x = oct_resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False,\n alpha=alpha)\n if alpha == 0:\n x = Add()([x, y])\n x = Activation('relu')(x)\n else:\n xh, xl = x\n yh, yl = y\n xh = Add()([xh, yh])\n xl = Add()([xl, yl])\n xh = Activation('relu')(xh)\n xl = Activation('relu')(xl)\n x = [xh, xl]\n num_filters *= 2\n\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "title": "" }, { "docid": "4d8a055c8df18a040231e36fa1fe2999", "score": "0.6805741", "text": "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], arch='resnet101', **kwargs)\n\n return model", "title": "" }, { "docid": "20def14aafac58efaa88326ad0a78e6d", "score": "0.6797279", "text": "def create_resnet34():\n n_classes = 10\n n_input_channel = 1\n\n model = resnet34(pretrained=True)\n model.fc = nn.Linear(512, n_classes)\n model.conv1 = nn.Conv2d(n_input_channel, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n learning_rate = 1e-3\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n if os.path.exists(\"./weights/acoustic.pth\"):\n print(\"Using pretrained model for inference\")\n model.load_state_dict(torch.load(\"./weights/acoustic.pth\"))\n else:\n print(\"Using scratch model for inference\")\n \n return model, optimizer", "title": "" }, { "docid": "edc4e3dbf6c163c4251088403158fae4", "score": "0.67379045", "text": "def _construct_network(self, cfg):\n assert cfg.MODEL.ARCH in _POOL1.keys()\n pool_size = _POOL1[cfg.MODEL.ARCH]\n assert len({len(pool_size), self.num_pathways}) == 1\n assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()\n\n (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]\n\n num_groups = cfg.RESNET.NUM_GROUPS\n width_per_group = cfg.RESNET.WIDTH_PER_GROUP\n dim_inner = num_groups * width_per_group\n\n temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]\n\n self.s1 = stem_helper.VideoModelStem(\n dim_in=cfg.DATA.INPUT_CHANNEL_NUM,\n dim_out=[width_per_group],\n kernel=[temp_kernel[0][0] + [7, 7]],\n stride=[[1, 2, 2]],\n padding=[[temp_kernel[0][0][0] // 2, 3, 3]],\n norm_module=self.norm_module,\n )\n\n self.s2 = resnet_helper.ResStage(\n dim_in=[width_per_group],\n dim_out=[width_per_group * 4],\n dim_inner=[dim_inner],\n temp_kernel_sizes=temp_kernel[1],\n stride=cfg.RESNET.SPATIAL_STRIDES[0],\n num_blocks=[d2],\n num_groups=[num_groups],\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[0],\n nonlocal_group=cfg.NONLOCAL.GROUP[0],\n nonlocal_pool=cfg.NONLOCAL.POOL[0],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=cfg.RESNET.TRANS_FUNC,\n stride_1x1=cfg.RESNET.STRIDE_1X1,\n inplace_relu=cfg.RESNET.INPLACE_RELU,\n dilation=cfg.RESNET.SPATIAL_DILATIONS[0],\n norm_module=self.norm_module,\n )\n\n for pathway in range(self.num_pathways):\n pool = nn.MaxPool3d(\n kernel_size=pool_size[pathway],\n stride=pool_size[pathway],\n padding=[0, 0, 0],\n )\n self.add_module(\"pathway{}_pool\".format(pathway), pool)\n\n self.s3 = resnet_helper.ResStage(\n dim_in=[width_per_group * 4],\n dim_out=[width_per_group * 8],\n dim_inner=[dim_inner * 2],\n temp_kernel_sizes=temp_kernel[2],\n stride=cfg.RESNET.SPATIAL_STRIDES[1],\n num_blocks=[d3],\n num_groups=[num_groups],\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[1],\n nonlocal_group=cfg.NONLOCAL.GROUP[1],\n nonlocal_pool=cfg.NONLOCAL.POOL[1],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=cfg.RESNET.TRANS_FUNC,\n stride_1x1=cfg.RESNET.STRIDE_1X1,\n inplace_relu=cfg.RESNET.INPLACE_RELU,\n dilation=cfg.RESNET.SPATIAL_DILATIONS[1],\n norm_module=self.norm_module,\n )\n\n self.s4 = resnet_helper.ResStage(\n dim_in=[width_per_group * 8],\n dim_out=[width_per_group * 16],\n dim_inner=[dim_inner * 4],\n temp_kernel_sizes=temp_kernel[3],\n stride=cfg.RESNET.SPATIAL_STRIDES[2],\n num_blocks=[d4],\n num_groups=[num_groups],\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[2],\n nonlocal_group=cfg.NONLOCAL.GROUP[2],\n nonlocal_pool=cfg.NONLOCAL.POOL[2],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=cfg.RESNET.TRANS_FUNC,\n stride_1x1=cfg.RESNET.STRIDE_1X1,\n inplace_relu=cfg.RESNET.INPLACE_RELU,\n dilation=cfg.RESNET.SPATIAL_DILATIONS[2],\n norm_module=self.norm_module,\n )\n\n self.s5 = resnet_helper.ResStage(\n dim_in=[width_per_group * 16],\n dim_out=[width_per_group * 32],\n dim_inner=[dim_inner * 8],\n temp_kernel_sizes=temp_kernel[4],\n stride=cfg.RESNET.SPATIAL_STRIDES[3],\n num_blocks=[d5],\n num_groups=[num_groups],\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[3],\n nonlocal_group=cfg.NONLOCAL.GROUP[3],\n nonlocal_pool=cfg.NONLOCAL.POOL[3],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=cfg.RESNET.TRANS_FUNC,\n stride_1x1=cfg.RESNET.STRIDE_1X1,\n inplace_relu=cfg.RESNET.INPLACE_RELU,\n dilation=cfg.RESNET.SPATIAL_DILATIONS[3],\n norm_module=self.norm_module,\n )\n\n if self.enable_detection:\n self.head = head_helper.ResNetRoIHead(\n dim_in=[width_per_group * 32],\n num_classes=cfg.MODEL.NUM_CLASSES,\n pool_size=[[cfg.DATA.NUM_FRAMES // pool_size[0][0], 1, 1]],\n resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2],\n scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR],\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n act_func=cfg.MODEL.HEAD_ACT,\n aligned=cfg.DETECTION.ALIGNED,\n )\n else:\n self.head = head_helper.ResNetBasicHead(\n dim_in=[width_per_group * 32],\n num_classes=cfg.MODEL.NUM_CLASSES,\n pool_size=[None, None]\n if cfg.MULTIGRID.SHORT_CYCLE\n else [\n [\n cfg.DATA.NUM_FRAMES // pool_size[0][0],\n cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],\n cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],\n ]\n ], # None for AdaptiveAvgPool3d((1, 1, 1))\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n act_func=cfg.MODEL.HEAD_ACT,\n )", "title": "" }, { "docid": "34ae3ae7e6cb58fdeb7f75db1112f811", "score": "0.67123514", "text": "def _test():\n input_shape = [128, 128, 128, 4]\n target_size = 4\n #resnet = ResNet50(input_shape=input_shape, pooling='avg')\n #resnet = CosmoResNet(input_shape=input_shape, pooling='avg')\n resnet = MiniResNet(input_shape=input_shape, pooling='avg')\n resnet.summary()", "title": "" }, { "docid": "f5d7e1ee5252dacb2df3d29b8c1992e6", "score": "0.670966", "text": "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "title": "" }, { "docid": "f5d7e1ee5252dacb2df3d29b8c1992e6", "score": "0.670966", "text": "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "title": "" }, { "docid": "f5d7e1ee5252dacb2df3d29b8c1992e6", "score": "0.670966", "text": "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "title": "" }, { "docid": "f5d7e1ee5252dacb2df3d29b8c1992e6", "score": "0.670966", "text": "def resnet200(**kwargs):\n model = ResNet(Bottleneck, [3, 24, 36, 3], **kwargs)\n return model", "title": "" }, { "docid": "a5263acddd943998f5d7385fe4741f5b", "score": "0.66923046", "text": "def toy_resnet() -> resnet.ResNet:\n block = resnet.BasicBlock\n # Number of blocks per stage. Each stage has two parametrized layers\n num_blocks = [1, 1, 1]\n network = resnet.ResNet(block, num_blocks, 'cifar10')\n for m in network.modules():\n if isinstance(m, nn.Conv2d):\n #nn.init.kaiming_normal_(m.weight, mode=\"fan_out\")\n nn.init.normal_(m.weight, 0, 0.4)\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n #HACK: need to centre the weights around a non-zero value for generating test trace,\n #Otherwise too many frac bits\n nn.init.normal_(m.weight, 0, 0.4)\n #nn.init.kaiming_normal_(m.weight, mode=\"fan_out\")\n nn.init.zeros_(m.bias)\n return network", "title": "" }, { "docid": "37a5e7225d06c8f7d95fcd1d980e7aaa", "score": "0.6680781", "text": "def resnet_v1(input_shape = (32,32,3), depth=20, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "title": "" }, { "docid": "7a965df49509beda408e31819b6a500a", "score": "0.6660395", "text": "def resnet_18(input_shape=(224,224,3), nclass=1000):\n input_ = Input(shape=input_shape)\n \n conv1 = conv2d_bn(input_, 64, kernel_size=(7, 7), strides=(2, 2))\n pool1 = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same')(conv1)\n \n conv2 = residual_block(64, 2, is_first_layer=True)(pool1)\n conv3 = residual_block(128, 2, is_first_layer=True)(conv2)\n conv4 = residual_block(256, 2, is_first_layer=True)(conv3)\n conv5 = residual_block(512, 2, is_first_layer=True)(conv4)\n \n pool2 = GlobalAvgPool2D()(conv5)\n output_ = Dense(nclass, activation='softmax')(pool2)\n \n model = Model(inputs=input_, outputs=output_)\n model.summary()\n \n return model", "title": "" }, { "docid": "561b0df089e13f685d9190356aec1d18", "score": "0.6648338", "text": "def resnet101(pretrained=False, progress=True, **kwargs):\n return _resnet_with_mix_style('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,\n **kwargs)", "title": "" }, { "docid": "9949c34e2eb373dc82bd058775c313ec", "score": "0.6637916", "text": "def resnet_v2(n, data):\n depth = n * 9 + 2\n input_shape = data[5]\n num_classes = data[6]\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n\n # Start model definition.\n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n\n inputs = Input(shape=input_shape)\n # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths\n x = resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n\n # Instantiate the stack of residual units\n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n if stage == 0:\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer and first stage\n activation = None\n batch_normalization = False\n else:\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but not first stage\n strides = 2 # downsample\n\n # bottleneck residual unit\n y = resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False)\n if res_block == 0:\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n\n num_filters_in = num_filters_out\n\n # Add classifier on top.\n # v2 has BN-ReLU before Pooling\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "title": "" }, { "docid": "f8c0484e043c73e3765b5ee724032d40", "score": "0.6637522", "text": "def resnet101(\n inputs: tensorflow.Tensor = None,\n training: Union[bool, tensorflow.Tensor] = True,\n num_classes: int = 1000,\n class_type: str = None,\n kernel_initializer=keras.initializers.GlorotUniform(),\n bias_initializer=keras.initializers.GlorotUniform(),\n beta_initializer=keras.initializers.GlorotUniform(),\n gamma_initializer=keras.initializers.GlorotUniform(),\n) -> keras.models.Model:\n sec_settings = [\n ResNetSection(\n num_blocks=3,\n out_channels=256,\n downsample=False,\n proj_channels=64,\n ),\n ResNetSection(\n num_blocks=4,\n out_channels=512,\n downsample=True,\n proj_channels=128,\n ),\n ResNetSection(\n num_blocks=23,\n out_channels=1024,\n downsample=True,\n proj_channels=256,\n ),\n ResNetSection(\n num_blocks=3,\n out_channels=2048,\n downsample=True,\n proj_channels=512,\n ),\n ]\n\n return resnet_const(\n inputs,\n training,\n sec_settings,\n num_classes,\n class_type,\n kernel_initializer,\n bias_initializer,\n beta_initializer,\n gamma_initializer,\n )", "title": "" }, { "docid": "5e7b70622a32aa23073e8416561cb2ff", "score": "0.6636658", "text": "def __init__(self, num_classes):\n super(Resnet50, self).__init__()\n self.Resnet = models.resnet50(pretrained=True)\n self.Resnet.fc = nn.Linear(2048, num_classes)", "title": "" }, { "docid": "735877c045910a9a3c41fe56cc8cac2b", "score": "0.66281754", "text": "def build_network_winner(config, input_var=None):\n\n from lasagne.layers import InputLayer, DropoutLayer, FlattenLayer, DenseLayer, ReshapeLayer\n from lasagne.nonlinearities import rectify, softmax\n try:\n from lasagne.layers.dnn import Conv2DDNNLayer as ConvLayer\n except ImportError:\n from lasagne.layers import Conv2DLayer as ConvLayer\n \n try:\n from lasagne.layers.dnn import MaxPool2DDNNLayer as PoolLayer\n except ImportError:\n from lasagne.layers import Pool2DLayer as PoolLayer\n\n import lasagne\n\n\n\n\n network = InputLayer(shape=(None, config.img_colors, config.img_size, config.img_size), input_var=input_var)\n print(\"I1\", network.output_shape)\n\n network = ConvLayer(network, num_filters=250, filter_size=5, nonlinearity=rectify)\n print(\"C1\", network.output_shape)\n network = ConvLayer(network, num_filters=100, filter_size=3, nonlinearity=rectify)\n print(\"C2\", network.output_shape)\n network = PoolLayer(network, pool_size=2, ignore_border=True)\n print(\"P1\", network.output_shape)\n\n network = ConvLayer(network, num_filters=250, filter_size=2, nonlinearity=rectify)\n print(\"C3\", network.output_shape)\n network = ConvLayer(network, num_filters=250, filter_size=2, nonlinearity=rectify)\n print(\"C4\", network.output_shape)\n network = PoolLayer(network, pool_size=2, ignore_border=True)\n print(\"P2\", network.output_shape)\n\n network = ConvLayer(network, num_filters=250, filter_size=2, nonlinearity=rectify)\n print(\"C5\", network.output_shape)\n network = ConvLayer(network, num_filters=100, filter_size=2, nonlinearity=rectify)\n print(\"C6\", network.output_shape)\n network = PoolLayer(network, pool_size=2, ignore_border=True)\n print(\"P3\", network.output_shape)\n\n # network = ConvLayer(network, num_filters=500, filter_size=(2, 2), nonlinearity=rectify)\n # print(\"C5\", network.output_shape)\n\n network = DenseLayer(lasagne.layers.dropout(network, p=.5), num_units=10, nonlinearity=softmax)\n print(\"D1\", network.output_shape)\n\n return network, 'winner_big_mean'", "title": "" }, { "docid": "10ab56b3e5d71892b28f1574865591ec", "score": "0.6617708", "text": "def se_resnet101(num_classes):\n model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model", "title": "" }, { "docid": "1e1f3564fef24054125ebd796f180feb", "score": "0.6615055", "text": "def se_resnet101(num_classes=1_000):\n model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model", "title": "" }, { "docid": "c6bc800cc3a32aaabdebf099c16e1662", "score": "0.6614142", "text": "def resnet110(**kwargs):\n return ResNet(BasicBlock, 110, mask=True, **kwargs)", "title": "" }, { "docid": "b8ca1ccae3318490cb79c0f516638d35", "score": "0.66021305", "text": "def rap1net(shape, inseqlist, outbindvec): \n # Make three layers for the network defined in shape.\n # shape = np.array([68,7,1]) # number of nodes in each layer of the network\n # Define the layers: Layer(nodes, inputLayer, nextLayerNodes = None)\n inputs = nn.Layer(shape[0], None, shape[1])\n hidden = nn.Layer(shape[1], inputs, shape[2])\n outputs = nn.Layer(shape[2], hidden) \n net = np.array([inputs, hidden, outputs]) # this is the network\n \n # Convert a list of sequences into a matrix input where each column is one sequence encoded as a 68x1 vector.\n inmatrix = seqListtoMatInput(inseqlist)\n \n # repeat steps of gradient descent\n for i in range(10000):\n# if i%10 == 0:\n# print(i)\n cost, finalactivation = nn.gradientdescent(net, inmatrix, outbindvec, alpha = 0.5, weightdecay = 0.1)\n \n return finalactivation, net", "title": "" }, { "docid": "fddfab6e8ff34dabec025e8e01eee28c", "score": "0.6592891", "text": "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "title": "" }, { "docid": "fddfab6e8ff34dabec025e8e01eee28c", "score": "0.6592891", "text": "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "title": "" }, { "docid": "fddfab6e8ff34dabec025e8e01eee28c", "score": "0.6592891", "text": "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "title": "" }, { "docid": "fddfab6e8ff34dabec025e8e01eee28c", "score": "0.6592891", "text": "def resnet10(**kwargs):\n model = ResNet(BasicBlock, [1, 1, 1, 1], **kwargs)\n return model", "title": "" }, { "docid": "361d1966bde5db9def15d4b0ee11fe34", "score": "0.6587918", "text": "def ResNet34(inputs, weight_decay=None):\n\n if weight_decay:\n regularizer = tf.keras.regularizers.l2(weight_decay)\n else:\n regularizer = None\n\n x = tf.keras.layers.ZeroPadding2D(padding=(3,3), name='pad')(inputs)\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=7, strides=2, padding='valid', activation='linear', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizer, name='conv1')(x)\n x = tf.keras.layers.BatchNormalization(momentum=0.1, epsilon=1e-5, name='bn1')(x)\n x = tf.keras.layers.Activation('relu', name='relu')(x)\n x = tf.keras.layers.ZeroPadding2D(padding=(1,1), name='pad1')(x)\n x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='valid', name='maxpool')(x)\n\n x = BasicBlock(x, num_channels=64, kernel_size=3, num_blocks=3, skip_blocks=[], regularizer=regularizer, name='layer1')\n\n x = BasicBlockDown(x, num_channels=128, kernel_size=3, regularizer=regularizer, name='layer2') \n x = BasicBlock(x, num_channels=128, kernel_size=3, num_blocks=4, skip_blocks=[0], regularizer=regularizer, name='layer2') \n\n x = BasicBlockDown(x, num_channels=256, kernel_size=3, regularizer=regularizer, name='layer3') \n x = BasicBlock(x, num_channels=256, kernel_size=3, num_blocks=6, skip_blocks=[0], regularizer=regularizer, name='layer3') \n\n x = BasicBlockDown(x, num_channels=512, kernel_size=3, regularizer=regularizer, name='layer4') \n x = BasicBlock(x, num_channels=512, kernel_size=3, num_blocks=3, skip_blocks=[0], regularizer=regularizer, name='layer4') \n\n x = tf.keras.layers.GlobalAveragePooling2D(name='avgpool')(x)\n x = tf.keras.layers.Dense(units=1000, use_bias=True, activation='linear', kernel_regularizer=regularizer, name='fc')(x)\n return x", "title": "" }, { "docid": "6672d945790dc248b234f5e2ccfaca94", "score": "0.657873", "text": "def resnet110g8r(**kwargs):\n return ResNet(BasicBlock, 110, groups=8, indices=\"random\", **kwargs)", "title": "" }, { "docid": "6f2c469197a950bc75f09f0fad1ff43d", "score": "0.655298", "text": "def test_create_resnet_with_callable(self):\n for (norm, activation) in itertools.product(\n (nn.BatchNorm3d, None), (nn.ReLU, nn.Sigmoid, None)\n ):\n input_channel = 3\n input_clip_length = 4\n input_crop_size = 56\n model_depth = 50\n stage_spatial_stride = (2, 1, 1, 1)\n stage_temporal_stride = (2, 1, 1, 1)\n model_gt, num_class = self._build_resnet(\n input_channel,\n input_clip_length,\n input_crop_size,\n model_depth,\n norm,\n activation,\n )\n\n total_spatial_stride = 4 * np.prod(stage_spatial_stride)\n total_temporal_stride = np.prod(stage_temporal_stride)\n head_pool_kernel_size = (\n input_clip_length // total_temporal_stride,\n input_crop_size // total_spatial_stride,\n input_crop_size // total_spatial_stride,\n )\n\n model = create_resnet(\n input_channel=input_channel,\n model_depth=50,\n model_num_class=num_class,\n dropout_rate=0,\n norm=norm,\n activation=activation,\n stem_dim_out=8,\n stem_conv_kernel_size=(3, 7, 7),\n stem_conv_stride=(1, 2, 2),\n stem_pool=nn.MaxPool3d,\n stem_pool_kernel_size=(1, 3, 3),\n stem_pool_stride=(1, 2, 2),\n stage_conv_a_kernel_size=((3, 1, 1),) * 4,\n stage_conv_b_kernel_size=((1, 3, 3),) * 4,\n stage_spatial_h_stride=stage_spatial_stride,\n stage_spatial_w_stride=stage_spatial_stride,\n stage_temporal_stride=stage_temporal_stride,\n bottleneck=create_bottleneck_block,\n head_pool=nn.AvgPool3d,\n head_pool_kernel_size=head_pool_kernel_size,\n head_output_size=(1, 1, 1),\n head_activation=nn.Softmax,\n )\n\n model.load_state_dict(\n model_gt.state_dict(), strict=True\n ) # explicitly use strict mode.\n\n # Test forwarding.\n for tensor in TestResNet._get_inputs(\n input_channel, input_clip_length, input_crop_size\n ):\n with torch.no_grad():\n if tensor.shape[1] != input_channel:\n with self.assertRaises(RuntimeError):\n out = model(tensor)\n continue\n\n out = model(tensor)\n out_gt = model_gt(tensor)\n\n self.assertEqual(\n out.shape,\n out_gt.shape,\n \"Output shape {} is different from expected shape {}\".format(\n out.shape, out_gt.shape\n ),\n )\n self.assertTrue(\n np.allclose(out.numpy(), out_gt.numpy(), rtol=1e-1, atol=1e-1)\n )", "title": "" }, { "docid": "85cee80de70e675db64e0c685522516a", "score": "0.6551432", "text": "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "title": "" }, { "docid": "85cee80de70e675db64e0c685522516a", "score": "0.6551432", "text": "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "title": "" }, { "docid": "85cee80de70e675db64e0c685522516a", "score": "0.6551432", "text": "def resnet101(pretrained=False, **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))\n return model", "title": "" }, { "docid": "254f7549e0208857d5688a099b73d1a6", "score": "0.6532989", "text": "def ResNet_34(input_size=[112,112]):\n model = ResNet(input_size, BasicBlock, [3, 4, 6, 3])\n\n return model", "title": "" }, { "docid": "e99f6dcdfd8f770bec1726fa7eda09b0", "score": "0.652367", "text": "def ResNet18(inputs, weight_decay=None):\n\n if weight_decay:\n regularizer = tf.keras.regularizers.l2(weight_decay) #not valid for Adam, must use AdamW\n else:\n regularizer = None\n\n x = tf.keras.layers.ZeroPadding2D(padding=(3,3), name='pad')(inputs)\n x = tf.keras.layers.Conv2D(filters=64, kernel_size=7, strides=2, padding='valid', activation='linear', use_bias=False, kernel_initializer='he_normal', kernel_regularizer=regularizer, name='conv1')(x)\n x = tf.keras.layers.BatchNormalization(momentum=0.1, epsilon=1e-5, name='bn1')(x)\n x = tf.keras.layers.Activation('relu', name='relu')(x)\n x = tf.keras.layers.ZeroPadding2D(padding=(1,1), name='pad1')(x)\n x = tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding='valid', name='maxpool')(x)\n\n x = BasicBlock(x, num_channels=64, kernel_size=3, num_blocks=2, skip_blocks=[], regularizer=regularizer, name='layer1')\n\n x = BasicBlockDown(x, num_channels=128, kernel_size=3, regularizer=regularizer, name='layer2')\n x = BasicBlock(x, num_channels=128, kernel_size=3, num_blocks=2, skip_blocks=[0], regularizer=regularizer, name='layer2')\n\n x = BasicBlockDown(x, num_channels=256, kernel_size=3, regularizer=regularizer, name='layer3')\n x = BasicBlock(x, num_channels=256, kernel_size=3, num_blocks=2, skip_blocks=[0], regularizer=regularizer, name='layer3')\n\n x = BasicBlockDown(x, num_channels=512, kernel_size=3, regularizer=regularizer, name='layer4')\n x = BasicBlock(x, num_channels=512, kernel_size=3, num_blocks=2, skip_blocks=[0], regularizer=regularizer, name='layer4')\n\n x = tf.keras.layers.GlobalAveragePooling2D(name='avgpool')(x)\n x = tf.keras.layers.Dense(units=1000, use_bias=True, activation='linear', kernel_regularizer=regularizer, name='fc')(x)\n\n return x", "title": "" }, { "docid": "b26653d8a8b6142bea44171ec9bd1664", "score": "0.6512249", "text": "def network(self):", "title": "" }, { "docid": "b8981ed7cd3e6cf36c71ac6fb211bb52", "score": "0.6497224", "text": "def resnet_v1(input_shape, depth, num_classes=n):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x, num_filters=num_filters, strides=strides)\n y = resnet_layer(inputs=y, num_filters=num_filters, activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match changed dims\n x = resnet_layer(inputs=x, num_filters=num_filters, kernel_size=1, strides=strides, activation=None, batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n # x = AveragePooling2D(pool_size=8)(x)\n x = resnet_layer(inputs=x, num_filters=2, strides=1, activation='relu', batch_normalization=True)\n y = Flatten()(x)\n outputs = Dense(num_classes, activation='softmax', kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "title": "" }, { "docid": "1117525802737ef2a6814ed970018d63", "score": "0.6495921", "text": "def resnet50():\n return layers.join(\n layers.Input((3, 224, 224)),\n\n # Convolutional layer reduces image's height and width by a factor\n # of 2 (because of the stride)\n # from (3, 224, 224) to (64, 112, 112)\n layers.Convolution((64, 7, 7), stride=2, padding=3, name='conv1'),\n layers.BatchNorm(name='bn_conv1'),\n layers.Relu(),\n\n # Stride equal two 2 reduces image size by a factor of two\n # from (64, 112, 112) to (64, 56, 56)\n layers.MaxPooling((3, 3), stride=2, ignore_border=False),\n\n # The branch option applies extra convolution + batch\n # normalization transforamtions to the residual\n ResidualUnit(64, 256, stride=1, name='2a', has_branch=True),\n ResidualUnit(64, 256, stride=1, name='2b'),\n ResidualUnit(64, 256, stride=1, name='2c'),\n\n # Another stride=2 reduces width and hight by factor of 2\n ResidualUnit(128, 512, stride=2, name='3a', has_branch=True),\n ResidualUnit(128, 512, stride=1, name='3b'),\n ResidualUnit(128, 512, stride=1, name='3c'),\n ResidualUnit(128, 512, stride=1, name='3d'),\n\n # Another stride=2 reduces width and hight by factor of 2\n ResidualUnit(256, 1024, stride=2, name='4a', has_branch=True),\n ResidualUnit(256, 1024, stride=1, name='4b'),\n ResidualUnit(256, 1024, stride=1, name='4c'),\n ResidualUnit(256, 1024, stride=1, name='4d'),\n ResidualUnit(256, 1024, stride=1, name='4e'),\n ResidualUnit(256, 1024, stride=1, name='4f'),\n\n # Another stride=2 reduces width and hight by factor of 2\n ResidualUnit(512, 2048, stride=2, name='5a', has_branch=True),\n ResidualUnit(512, 2048, stride=1, name='5b'),\n ResidualUnit(512, 2048, stride=1, name='5c'),\n\n # Since the final residual unit has 2048 output filters, global\n # pooling will replace every output image with single average value.\n # Despite input iamge size output from this layer always will be\n # vector with 2048 values\n layers.GlobalPooling(),\n layers.Softmax(1000, name='fc1000'),\n )", "title": "" }, { "docid": "07527d8b911648eca8b8bb26711b17fe", "score": "0.6494789", "text": "def __init__(self,\n n_repeats: int,\n blocks_per_group_list: Sequence[int],\n num_classes: int,\n bn_config: Optional[Mapping[Text, float]] = None,\n resnet_v2: bool = False,\n channels_per_group_list: Sequence[int] = (256, 512, 1024, 2048),\n use_additional_features: bool = False,\n additional_features_mode: Optional[Text] = \"per_block\",\n name: Optional[Text] = None):\n super(ResNet, self).__init__(name=name)\n self._n_repeats = n_repeats\n if bn_config is None:\n bn_config = {\"decay_rate\": 0.9, \"eps\": 1e-5}\n self._bn_config = bn_config\n self._resnet_v2 = resnet_v2\n\n # Number of blocks in each group for ResNet.\n if len(blocks_per_group_list) != 4:\n raise ValueError(\n \"`blocks_per_group_list` must be of length 4 not {}\".format(\n len(blocks_per_group_list)))\n self._blocks_per_group_list = blocks_per_group_list\n\n # Number of channels in each group for ResNet.\n if len(channels_per_group_list) != 4:\n raise ValueError(\n \"`channels_per_group_list` must be of length 4 not {}\".format(\n len(channels_per_group_list)))\n self._channels_per_group_list = channels_per_group_list\n self._use_additional_features = use_additional_features\n self._additional_features_mode = additional_features_mode\n\n self._initial_conv = snt.Conv2D(\n output_channels=64,\n kernel_shape=7,\n stride=2,\n with_bias=False,\n padding=\"SAME\",\n name=\"initial_conv\")\n if not self._resnet_v2:\n self._initial_batchnorm = snt.BatchNorm(\n create_scale=True,\n create_offset=True,\n name=\"initial_batchnorm\",\n **bn_config)\n\n self._block_groups = []\n strides = [1, 2, 2, 2]\n for i in range(4):\n self._block_groups.append(\n snt.nets.resnet.BlockGroup(\n channels=self._channels_per_group_list[i],\n num_blocks=self._blocks_per_group_list[i],\n stride=strides[i],\n bn_config=bn_config,\n resnet_v2=resnet_v2,\n name=\"block_group_%d\" % (i)))\n\n if self._resnet_v2:\n self._final_batchnorm = snt.BatchNorm(\n create_scale=True,\n create_offset=True,\n name=\"final_batchnorm\",\n **bn_config)\n\n self._logits = snt.Linear(\n output_size=num_classes,\n w_init=snt.initializers.VarianceScaling(scale=2.0), name=\"logits\")\n\n if self._use_additional_features:\n self._embedding = LinearBNReLU(output_size=16, name=\"embedding\",\n **bn_config)\n\n if self._additional_features_mode == \"mlp\":\n self._feature_repr = LinearBNReLU(\n output_size=self._channels_per_group_list[-1], name=\"features_repr\",\n **bn_config)\n elif self._additional_features_mode == \"per_block\":\n self._feature_repr = []\n for i, ch in enumerate(self._channels_per_group_list):\n self._feature_repr.append(\n LinearBNReLU(output_size=ch, name=f\"features_{i}\", **bn_config))\n else:\n raise ValueError(f\"Unsupported addiitonal features mode: \"\n f\"{additional_features_mode}\")", "title": "" }, { "docid": "96c2c204aa9b17ab6b119c55ffc78c0f", "score": "0.64897", "text": "def resnet110m(**kwargs):\n return ResNet(BasicBlock, 110, mask=True, **kwargs)", "title": "" }, { "docid": "5aabdf7917ae7e0525a69017ccaec2d1", "score": "0.6476997", "text": "def se_resnet18(num_classes=1_000):\n model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model", "title": "" }, { "docid": "3bb9cb5b6202c6c99c26eae0170eee3a", "score": "0.647161", "text": "def se_resnet18(num_classes):\n model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model", "title": "" }, { "docid": "e6d2d4acf3f126b37631eb97e2d1a252", "score": "0.64650625", "text": "def resnet_v1(input_shape, depth, num_classes=10):\n if (depth - 2) % 6 != 0:\n raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')\n # Start model definition.\n num_filters = 16\n num_res_blocks = int((depth - 2) / 6)\n\n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs)\n # Instantiate the stack of residual units\n for stack in range(3):\n for res_block in range(num_res_blocks):\n strides = 1\n if stack > 0 and res_block == 0: # first layer but not first stack\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters,\n strides=strides)\n y = resnet_layer(inputs=y,\n num_filters=num_filters,\n activation=None)\n if stack > 0 and res_block == 0: # first layer but not first stack\n # linear projection residual shortcut connection to match\n # changed dims\n x = resnet_layer(inputs=x,\n num_filters=num_filters,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n x = keras.layers.add([x, y])\n x = Activation('relu')(x)\n num_filters *= 2\n\n # Add classifier on top.\n # v1 does not use BN after last shortcut connection-ReLU\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "title": "" }, { "docid": "2895eb6e47a307f69af109c153f81aee", "score": "0.6462151", "text": "def __init__(self, num_filters=256, pretrained=True, backbone='resnet50'):\n\n super(FPN, self).__init__()\n if backbone == 'resnet18':\n self.resnet = resnet18(pretrained=pretrained, input_channel=3)\n if backbone == 'resnet34':\n self.resnet = resnet34(pretrained=pretrained, input_channel=3)\n if backbone == 'resnet50':\n self.resnet = resnet50(pretrained=pretrained, input_channel=3)\n if backbone == 'resnet101':\n self.resnet = resnet101(pretrained=pretrained, input_channel=3)\n if backbone == 'resnet152':\n self.resnet = resnet152(pretrained=pretrained, input_channel=3)\n if backbone == 'resnext50':\n self.resnet = resnext50_32x4d(pretrained=pretrained, input_channel=3)\n if backbone == 'resnext101':\n self.resnet = resnext101_32x8d(pretrained=pretrained, input_channel=3)\n # Access resnet directly in forward pass; do not store refs here due to\n # https://github.com/pytorch/pytorch/issues/8392\n\n self.lateral4 = Conv1x1(2048, num_filters)\n self.lateral3 = Conv1x1(1024, num_filters)\n self.lateral2 = Conv1x1(512, num_filters)\n self.lateral1 = Conv1x1(256, num_filters)\n\n self.smooth4 = Conv3x3(num_filters, num_filters)\n self.smooth3 = Conv3x3(num_filters, num_filters)\n self.smooth2 = Conv3x3(num_filters, num_filters)\n self.smooth1 = Conv3x3(num_filters, num_filters)", "title": "" }, { "docid": "0b67c6676d0f548bb33ef0e5a84d6cf3", "score": "0.64612824", "text": "def residual_network(x):\n def add_common_layers(y):\n y = layers.BatchNormalization()(y)\n print(y.shape)\n y = layers.LeakyReLU()(y)\n\n return y\n\n def grouped_convolution(y, nb_channels, _strides):\n # when `cardinality` == 1 this is just a standard convolution\n if cardinality == 1:\n return layers.Conv3D(nb_channels, kernel_size=(3, 3,3), strides=_strides, padding='same')(y)\n \n assert not nb_channels % cardinality\n _d = nb_channels // cardinality\n\n # in a grouped convolution layer, input and output channels are divided into `cardinality` groups,\n # and convolutions are separately performed within each group\n groups = []\n for j in range(cardinality):\n group = layers.Lambda(lambda z: z[:,:, :, :, j * _d:j * _d + _d])(y)\n groups.append(layers.Conv3D(_d, kernel_size=(3, 3,3), strides=_strides, padding='same')(group))\n \n # the grouped convolutional layer concatenates them as the outputs of the layer\n y = layers.concatenate(groups)\n\n\n return y\n\n def residual_block(y, nb_channels_in, nb_channels_out, _strides=(1, 1,1), _project_shortcut=False):\n \"\"\"\n Our network consists of a stack of residual blocks. These blocks have the same topology,\n and are subject to two simple rules:\n - If producing spatial maps of the same size, the blocks share the same hyper-parameters (width and filter sizes).\n - Each time the spatial map is down-sampled by a factor of 2, the width of the blocks is multiplied by a factor of 2.\n \"\"\"\n shortcut = y\n\n # we modify the residual building block as a bottleneck design to make the network more economical\n y = layers.Conv3D(nb_channels_in, kernel_size=(1, 1,1), strides=(1, 1,1), padding='same')(y)\n\n y = add_common_layers(y)\n\n # ResNeXt (identical to ResNet when `cardinality` == 1)\n y = grouped_convolution(y, nb_channels_in, _strides=_strides)\n y = add_common_layers(y)\n\n y = layers.Conv3D(nb_channels_out, kernel_size=(1, 1,1), strides=(1, 1,1), padding='same')(y)\n # batch normalization is employed after aggregating the transformations and before adding to the shortcut\n y = layers.BatchNormalization()(y)\n\n # identity shortcuts used directly when the input and output are of the same dimensions\n if _project_shortcut or _strides != (1, 1,1):\n # when the dimensions increase projection shortcut is used to match dimensions (done by 1×1 convolutions)\n # when the shortcuts go across feature maps of two sizes, they are performed with a stride of 2\n shortcut = layers.Conv3D(nb_channels_out, kernel_size=(1, 1,1), strides=_strides, padding='same')(shortcut)\n shortcut = layers.BatchNormalization()(shortcut)\n\n y = layers.add([shortcut, y])\n\n # relu is performed right after each batch normalization,\n # expect for the output of the block where relu is performed after the adding to the shortcut\n y = layers.LeakyReLU()(y)\n\n return y\n\n # conv1\n x = layers.Conv3D(64, kernel_size=(7, 7,7), strides=(2, 2,2), padding='same')(x)\n print(x)\n x = add_common_layers(x)\n\n # conv2\n x = layers.MaxPool3D(pool_size=(3, 3,3), strides=(2, 2,2), padding='same')(x)\n for i in range(3):\n project_shortcut = True if i == 0 else False\n x = residual_block(x, 128, 256, _project_shortcut=project_shortcut)\n\n # conv3\n for i in range(4):\n # down-sampling is performed by conv3_1, conv4_1, and conv5_1 with a stride of 2\n strides = (2, 2,2) if i == 0 else (1, 1,1)\n x = residual_block(x, 256, 512, _strides=strides)\n\n # conv4\n for i in range(6):\n strides = (2, 2,2) if i == 0 else (1, 1,1)\n x = residual_block(x, 512, 1024, _strides=strides)\n print(x)\n # conv5\n for i in range(3):\n strides = (2, 2,2) if i == 0 else (1, 1,1)\n x = residual_block(x, 1024, 2048, _strides=strides)\n print(x)\n x = layers.GlobalAveragePooling3D()(x)\n x = layers.Dense(14)(x)\n\n return x", "title": "" }, { "docid": "23ef1426635360381eed04fe9f3d102e", "score": "0.64578235", "text": "def __init__(self):\n self.model = self.create_ResNet()", "title": "" }, { "docid": "7524d5154bad9c88e3c4dd708c611435", "score": "0.64525646", "text": "def resnet110g4r(**kwargs):\n return ResNet(BasicBlock, 110, groups=4, indices=\"random\", **kwargs)", "title": "" }, { "docid": "8b752678b3b1ab920c4aa009ec034a33", "score": "0.64504766", "text": "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "title": "" }, { "docid": "8b752678b3b1ab920c4aa009ec034a33", "score": "0.64504766", "text": "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "title": "" }, { "docid": "8b752678b3b1ab920c4aa009ec034a33", "score": "0.64504766", "text": "def resnet18(**kwargs):\n model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)\n return model", "title": "" }, { "docid": "bf06e90988ecc44fe77760fb4f8c82f4", "score": "0.64478385", "text": "def _construct_network(self, cfg):\n assert cfg.MODEL.ARCH in _POOL1.keys()\n pool_size = _POOL1[cfg.MODEL.ARCH]\n assert len({len(pool_size), self.num_pathways}) == 1\n assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()\n \n self.DROPPATHWAY_RATE = cfg.SLOWFAST.DROPPATHWAY_RATE\n self.FS_FUSION = cfg.SLOWFAST.FS_FUSION\n self.AFS_FUSION = cfg.SLOWFAST.AFS_FUSION\n self.GET_MISALIGNED_AUDIO = cfg.DATA.GET_MISALIGNED_AUDIO\n self.AVS_FLAG = cfg.SLOWFAST.AVS_FLAG\n self.AVS_PROJ_DIM = cfg.SLOWFAST.AVS_PROJ_DIM\n self.AVS_VAR_THRESH = cfg.SLOWFAST.AVS_VAR_THRESH\n self.AVS_DUPLICATE_THRESH = cfg.SLOWFAST.AVS_DUPLICATE_THRESH\n \n (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]\n tf_trans_func = [cfg.RESNET.TRANS_FUNC] * 2 + \\\n [cfg.RESNET.AUDIO_TRANS_FUNC]\n trans_func = [tf_trans_func] * cfg.RESNET.AUDIO_TRANS_NUM + \\\n [cfg.RESNET.TRANS_FUNC] * (4 - cfg.RESNET.AUDIO_TRANS_NUM)\n\n num_groups = cfg.RESNET.NUM_GROUPS\n width_per_group = cfg.RESNET.WIDTH_PER_GROUP\n dim_inner = num_groups * width_per_group\n out_dim_ratio = (\n cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO\n )\n\n temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]\n \n if cfg.SLOWFAST.AU_REDUCE_TF_DIM:\n tf_stride = 2\n else:\n tf_stride = 1\n tf_dim_reduction = 1\n\n self.s1 = stem_helper.VideoModelStem(\n dim_in=cfg.DATA.INPUT_CHANNEL_NUM,\n dim_out=[\n width_per_group, \n width_per_group // cfg.SLOWFAST.BETA_INV, \n width_per_group // cfg.SLOWFAST.AU_BETA_INV\n ],\n kernel=[\n temp_kernel[0][0] + [7, 7], \n temp_kernel[0][1] + [7, 7], \n [temp_kernel[0][2] + [9, 1], temp_kernel[0][2] + [1, 9]],\n ],\n stride=[[1, 2, 2], [1, 2, 2], [[1, 1, 1], [1, 1, 1]]],\n padding=[\n [temp_kernel[0][0][0] // 2, 3, 3],\n [temp_kernel[0][1][0] // 2, 3, 3],\n [[temp_kernel[0][2][0] // 2, 4, 0], [temp_kernel[0][2][0] // 2, 0, 4]],\n ],\n stride_pool=[True, True, False],\n )\n \n if self.FS_FUSION[0] or self.AFS_FUSION[0]:\n self.s1_fuse = FuseAV(\n # Slow\n width_per_group,\n # Fast\n width_per_group // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n # Audio\n width_per_group // cfg.SLOWFAST.AU_BETA_INV,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_MODE,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_DIM,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.AU_FUSION_KERNEL_SZ,\n cfg.SLOWFAST.AU_ALPHA // tf_dim_reduction,\n cfg.SLOWFAST.AU_FUSION_CONV_NUM,\n # Fusion connections\n self.FS_FUSION[0],\n self.AFS_FUSION[0],\n # AVS\n self.AVS_FLAG[0],\n self.AVS_PROJ_DIM,\n # nGPUs and shards\n num_gpus=cfg.NUM_GPUS,\n num_shards=cfg.NUM_SHARDS,\n )\n \n slow_dim = width_per_group + \\\n (width_per_group // out_dim_ratio if self.FS_FUSION[0] else 0)\n self.s2 = resnet_helper.ResStage(\n dim_in=[\n slow_dim,\n width_per_group // cfg.SLOWFAST.BETA_INV,\n width_per_group // cfg.SLOWFAST.AU_BETA_INV,\n ],\n dim_out=[\n width_per_group * 4,\n width_per_group * 4 // cfg.SLOWFAST.BETA_INV,\n width_per_group * 4 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n dim_inner=[\n dim_inner, \n dim_inner // cfg.SLOWFAST.BETA_INV, \n dim_inner // cfg.SLOWFAST.AU_BETA_INV\n ],\n temp_kernel_sizes=temp_kernel[1],\n stride=[1] * 3,\n num_blocks=[d2] * 3,\n num_groups=[num_groups] * 3,\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[0],\n nonlocal_group=cfg.NONLOCAL.GROUP[0],\n nonlocal_pool=cfg.NONLOCAL.POOL[0],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=trans_func[0],\n dilation=cfg.RESNET.SPATIAL_DILATIONS[0],\n norm_module=self.norm_module,\n )\n if self.FS_FUSION[1] or self.AFS_FUSION[1]:\n self.s2_fuse = FuseAV(\n # Slow\n width_per_group * 4,\n # Fast\n width_per_group * 4 // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n # Audio\n width_per_group * 4 // cfg.SLOWFAST.AU_BETA_INV,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_MODE,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_DIM,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.AU_FUSION_KERNEL_SZ,\n cfg.SLOWFAST.AU_ALPHA // tf_dim_reduction,\n cfg.SLOWFAST.AU_FUSION_CONV_NUM,\n # Fusion connections\n self.FS_FUSION[1],\n self.AFS_FUSION[1],\n # AVS\n self.AVS_FLAG[1],\n self.AVS_PROJ_DIM,\n # nGPUs and shards\n num_gpus=cfg.NUM_GPUS,\n num_shards=cfg.NUM_SHARDS,\n )\n\n for pathway in range(self.num_pathways):\n pool = nn.MaxPool3d(\n kernel_size=pool_size[pathway],\n stride=pool_size[pathway],\n padding=[0, 0, 0],\n )\n self.add_module(\"pathway{}_pool\".format(pathway), pool)\n \n slow_dim = width_per_group * 4 + \\\n (width_per_group * 4 // out_dim_ratio if self.FS_FUSION[1] else 0)\n self.s3 = resnet_helper.ResStage(\n dim_in=[\n slow_dim,\n width_per_group * 4 // cfg.SLOWFAST.BETA_INV,\n width_per_group * 4 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n dim_out=[\n width_per_group * 8,\n width_per_group * 8 // cfg.SLOWFAST.BETA_INV,\n width_per_group * 8 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n dim_inner=[\n dim_inner * 2, \n dim_inner * 2 // cfg.SLOWFAST.BETA_INV,\n dim_inner * 2 // cfg.SLOWFAST.AU_BETA_INV\n ],\n temp_kernel_sizes=temp_kernel[2],\n stride=[2, 2, tf_stride],\n num_blocks=[d3] * 3,\n num_groups=[num_groups] * 3,\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[1],\n nonlocal_group=cfg.NONLOCAL.GROUP[1],\n nonlocal_pool=cfg.NONLOCAL.POOL[1],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=trans_func[1],\n dilation=cfg.RESNET.SPATIAL_DILATIONS[1],\n norm_module=self.norm_module,\n )\n tf_dim_reduction *= tf_stride\n \n if self.FS_FUSION[2] or self.AFS_FUSION[2]:\n self.s3_fuse = FuseAV(\n # Slow\n width_per_group * 8,\n # Fast\n width_per_group * 8 // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n # Audio\n width_per_group * 8 // cfg.SLOWFAST.AU_BETA_INV,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_MODE,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_DIM,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.AU_FUSION_KERNEL_SZ,\n cfg.SLOWFAST.AU_ALPHA // tf_dim_reduction,\n cfg.SLOWFAST.AU_FUSION_CONV_NUM,\n # Fusion connections\n self.FS_FUSION[2],\n self.AFS_FUSION[2],\n # AVS\n self.AVS_FLAG[2],\n self.AVS_PROJ_DIM,\n # nGPUs and shards\n num_gpus=cfg.NUM_GPUS,\n num_shards=cfg.NUM_SHARDS,\n )\n\n slow_dim = width_per_group * 8 + \\\n (width_per_group * 8 // out_dim_ratio if self.FS_FUSION[2] else 0)\n self.s4 = resnet_helper.ResStage(\n dim_in=[\n slow_dim,\n width_per_group * 8 // cfg.SLOWFAST.BETA_INV,\n width_per_group * 8 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n dim_out=[\n width_per_group * 16,\n width_per_group * 16 // cfg.SLOWFAST.BETA_INV,\n width_per_group * 16 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n dim_inner=[\n dim_inner * 4, \n dim_inner * 4 // cfg.SLOWFAST.BETA_INV,\n dim_inner * 4 // cfg.SLOWFAST.AU_BETA_INV\n ],\n temp_kernel_sizes=temp_kernel[3],\n stride=[2, 2, tf_stride],\n num_blocks=[d4] * 3,\n num_groups=[num_groups] * 3,\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[2],\n nonlocal_group=cfg.NONLOCAL.GROUP[2],\n nonlocal_pool=cfg.NONLOCAL.POOL[2],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=trans_func[2],\n dilation=cfg.RESNET.SPATIAL_DILATIONS[2],\n norm_module=self.norm_module,\n )\n tf_dim_reduction *= tf_stride\n \n if self.FS_FUSION[3] or self.AFS_FUSION[3]:\n self.s4_fuse = FuseAV(\n # Slow\n width_per_group * 16,\n # Fast\n width_per_group * 16 // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n # Audio\n width_per_group * 16 // cfg.SLOWFAST.AU_BETA_INV,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_MODE,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_DIM,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.AU_FUSION_KERNEL_SZ,\n cfg.SLOWFAST.AU_ALPHA // tf_dim_reduction,\n cfg.SLOWFAST.AU_FUSION_CONV_NUM,\n # Fusion connections\n self.FS_FUSION[3],\n self.AFS_FUSION[3],\n # AVS\n self.AVS_FLAG[3],\n self.AVS_PROJ_DIM,\n # nGPUs and shards\n num_gpus=cfg.NUM_GPUS,\n num_shards=cfg.NUM_SHARDS,\n )\n \n slow_dim = width_per_group * 16 + \\\n (width_per_group * 16 // out_dim_ratio if self.FS_FUSION[3] else 0)\n self.s5 = resnet_helper.ResStage(\n dim_in=[\n slow_dim,\n width_per_group * 16 // cfg.SLOWFAST.BETA_INV,\n width_per_group * 16 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n dim_out=[\n width_per_group * 32,\n width_per_group * 32 // cfg.SLOWFAST.BETA_INV,\n width_per_group * 32 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n dim_inner=[\n dim_inner * 8, \n dim_inner * 8 // cfg.SLOWFAST.BETA_INV,\n dim_inner * 8 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n temp_kernel_sizes=temp_kernel[4],\n stride=[2, 2, tf_stride],\n num_blocks=[d5] * 3,\n num_groups=[num_groups] * 3,\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[3],\n nonlocal_group=cfg.NONLOCAL.GROUP[3],\n nonlocal_pool=cfg.NONLOCAL.POOL[3],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=trans_func[3],\n dilation=cfg.RESNET.SPATIAL_DILATIONS[3],\n norm_module=self.norm_module,\n )\n tf_dim_reduction *= tf_stride\n \n # setup AVS for pool5 output\n if self.AVS_FLAG[4]:\n # this FuseAV object is used for compute AVS loss only\n self.s5_fuse = FuseAV(\n # Slow\n width_per_group * 32,\n # Fast\n width_per_group * 32 // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n # Audio\n width_per_group * 32 // cfg.SLOWFAST.AU_BETA_INV,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_MODE,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_DIM,\n cfg.SLOWFAST.AU_FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.AU_FUSION_KERNEL_SZ,\n cfg.SLOWFAST.AU_ALPHA // tf_dim_reduction,\n cfg.SLOWFAST.AU_FUSION_CONV_NUM,\n # Fusion connections\n True,\n True,\n # AVS\n self.AVS_FLAG[4],\n self.AVS_PROJ_DIM,\n # nGPUs and shards\n num_gpus=cfg.NUM_GPUS,\n num_shards=cfg.NUM_SHARDS,\n )\n\n self.head = head_helper.ResNetBasicHead(\n dim_in=[\n width_per_group * 32,\n width_per_group * 32 // cfg.SLOWFAST.BETA_INV,\n width_per_group * 32 // cfg.SLOWFAST.AU_BETA_INV,\n ],\n num_classes=cfg.MODEL.NUM_CLASSES,\n pool_size=[\n [\n cfg.DATA.NUM_FRAMES\n // cfg.SLOWFAST.ALPHA\n // pool_size[0][0],\n cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],\n cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],\n ],\n [\n cfg.DATA.NUM_FRAMES // pool_size[1][0],\n cfg.DATA.CROP_SIZE // 32 // pool_size[1][1],\n cfg.DATA.CROP_SIZE // 32 // pool_size[1][2],\n ],\n [\n 1,\n cfg.DATA.AUDIO_FRAME_NUM // tf_dim_reduction,\n cfg.DATA.AUDIO_MEL_NUM // tf_dim_reduction,\n ],\n ],\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n )", "title": "" }, { "docid": "6df156c887b62cad9976bca37c939fd2", "score": "0.6429489", "text": "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "6df156c887b62cad9976bca37c939fd2", "score": "0.6429489", "text": "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "6df156c887b62cad9976bca37c939fd2", "score": "0.6429489", "text": "def resnet34(**kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "ed7200172c885e136ac235e870e7fcef", "score": "0.63882774", "text": "def resnet_layer(inputs,\n num_filters=16,\n kernel_size=3,\n strides=1,\n activation='relu',\n batch_normalization=True,\n conv_first=True):\n\n # Model parameter\n # ----------------------------------------------------------------------------\n # | | 200-epoch | Orig Paper| 200-epoch | Orig Paper| sec/epoch\n # Model | n | ResNet v1 | ResNet v1 | ResNet v2 | ResNet v2 | GTX1080Ti\n # |v1(v2)| %Accuracy | %Accuracy | %Accuracy | %Accuracy | v1 (v2)\n # ----------------------------------------------------------------------------\n # ResNet20 | 3 (2)| 92.16 | 91.25 | ----- | ----- | 35 (---)\n # ResNet32 | 5(NA)| 92.46 | 92.49 | NA | NA | 50 ( NA)\n # ResNet44 | 7(NA)| 92.50 | 92.83 | NA | NA | 70 ( NA)\n # ResNet56 | 9 (6)| 92.71 | 93.03 | 93.01 | NA | 90 (100)\n # ResNet110 |18(12)| 92.65 | 93.39+-.16| 93.15 | 93.63 | 165(180)\n # ResNet164 |27(18)| ----- | 94.07 | ----- | 94.54 | ---(---)\n # ResNet1001| (111)| ----- | 92.39 | ----- | 95.08+-.14| ---(---)\n # ---------------------------------------------------------------------------\n conv = Conv2D(num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding='same',\n kernel_initializer='he_normal',\n kernel_regularizer=l2(1e-4))\n\n x = inputs\n if conv_first:\n x = conv(x)\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n else:\n if batch_normalization:\n x = BatchNormalization()(x)\n if activation is not None:\n x = Activation(activation)(x)\n x = conv(x)\n return x", "title": "" }, { "docid": "f36aa6913d5c495db65321c801f723e4", "score": "0.6380159", "text": "def resnet101(pretrained=False, num_classes=1000, in_chans=3, **kwargs):\n default_cfg = default_cfgs['resnet101']\n model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, in_chans=in_chans, **kwargs)\n model.default_cfg = default_cfg\n if pretrained:\n load_pretrained(model, default_cfg, num_classes, in_chans)\n return model", "title": "" }, { "docid": "0d9761cb33bea096a795d205bb55258b", "score": "0.6369666", "text": "def make_two_dim_resnet(\n input_node,\n num_residues=50,\n num_features=40,\n num_predictions=1,\n num_channels=32,\n num_layers=2,\n filter_size=3,\n filter_size_2=None,\n final_non_linearity=False,\n name_prefix='',\n fancy=True,\n batch_norm=False,\n is_training=False,\n atrou_rates=None,\n channel_multiplier=0,\n divide_channels_by=2,\n resize_features_with_1x1=False,\n data_format='NHWC',\n stddev=0.01,\n dropout_keep_prob=1.0):\n del num_residues # Unused.\n\n if atrou_rates is None:\n atrou_rates = [1]\n if not fancy:\n raise ValueError('non fancy deprecated')\n\n logging.info('atrou rates %s', atrou_rates)\n\n logging.info('name prefix %s', name_prefix)\n x_image = input_node\n previous_layer = x_image\n non_linearity = True\n for i_layer in range(num_layers):\n in_channels = num_channels\n out_channels = num_channels\n\n curr_atrou_rate = atrou_rates[i_layer % len(atrou_rates)]\n\n if i_layer == 0:\n in_channels = num_features\n if i_layer == num_layers - 1:\n out_channels = num_predictions\n non_linearity = final_non_linearity\n if i_layer == 0 or i_layer == num_layers - 1:\n layer_name = name_prefix + 'conv%d' % (i_layer + 1)\n initial_filter_size = filter_size\n if resize_features_with_1x1:\n initial_filter_size = 1\n previous_layer = two_dim_convnet.make_conv_layer(\n input_node=previous_layer,\n in_channels=in_channels,\n out_channels=out_channels,\n layer_name=layer_name,\n filter_size=initial_filter_size,\n filter_size_2=filter_size_2,\n non_linearity=non_linearity,\n atrou_rate=curr_atrou_rate,\n data_format=data_format,\n stddev=stddev)\n else:\n layer_name = name_prefix + 'res%d' % (i_layer + 1)\n previous_layer = make_sep_res_layer(\n input_node=previous_layer,\n in_channels=in_channels,\n out_channels=out_channels,\n layer_name=layer_name,\n filter_size=filter_size,\n filter_size_2=filter_size_2,\n batch_norm=batch_norm,\n is_training=is_training,\n atrou_rate=curr_atrou_rate,\n channel_multiplier=channel_multiplier,\n divide_channels_by=divide_channels_by,\n data_format=data_format,\n stddev=stddev,\n dropout_keep_prob=dropout_keep_prob)\n\n y = previous_layer\n\n return y", "title": "" }, { "docid": "6272862d3719ebf4ef9097674ee4e3ce", "score": "0.63580954", "text": "def _construct_network(self, cfg):\n assert cfg.MODEL.ARCH in _POOL1.keys()\n pool_size = _POOL1[cfg.MODEL.ARCH]\n assert len({len(pool_size), self.num_pathways}) == 1\n assert cfg.RESNET.DEPTH in _MODEL_STAGE_DEPTH.keys()\n\n (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[cfg.RESNET.DEPTH]\n\n num_groups = cfg.RESNET.NUM_GROUPS\n width_per_group = cfg.RESNET.WIDTH_PER_GROUP\n dim_inner = num_groups * width_per_group\n out_dim_ratio = (\n cfg.SLOWFAST.BETA_INV // cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO\n )\n\n temp_kernel = _TEMPORAL_KERNEL_BASIS[cfg.MODEL.ARCH]\n\n self.s1 = stem_helper.VideoModelStem(\n dim_in=cfg.DATA.INPUT_CHANNEL_NUM,\n dim_out=[width_per_group, width_per_group // cfg.SLOWFAST.BETA_INV],\n kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]],\n stride=[[1, 2, 2]] * 2,\n padding=[\n [temp_kernel[0][0][0] // 2, 3, 3],\n [temp_kernel[0][1][0] // 2, 3, 3],\n ],\n norm_module=self.norm_module,\n )\n self.s1_fuse = FuseFastToSlow(\n width_per_group // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n norm_module=self.norm_module,\n )\n\n self.s2 = resnet_helper.ResStage(\n dim_in=[\n width_per_group + width_per_group // out_dim_ratio,\n width_per_group // cfg.SLOWFAST.BETA_INV,\n ],\n dim_out=[\n width_per_group * 4,\n width_per_group * 4 // cfg.SLOWFAST.BETA_INV,\n ],\n dim_inner=[dim_inner, dim_inner // cfg.SLOWFAST.BETA_INV],\n temp_kernel_sizes=temp_kernel[1],\n stride=cfg.RESNET.SPATIAL_STRIDES[0],\n num_blocks=[d2] * 2,\n num_groups=[num_groups] * 2,\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[0],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[0],\n nonlocal_group=cfg.NONLOCAL.GROUP[0],\n nonlocal_pool=cfg.NONLOCAL.POOL[0],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=cfg.RESNET.TRANS_FUNC,\n dilation=cfg.RESNET.SPATIAL_DILATIONS[0],\n norm_module=self.norm_module,\n )\n self.s2_fuse = FuseFastToSlow(\n width_per_group * 4 // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n norm_module=self.norm_module,\n )\n\n for pathway in range(self.num_pathways):\n pool = nn.MaxPool3d(\n kernel_size=pool_size[pathway],\n stride=pool_size[pathway],\n padding=[0, 0, 0],\n )\n self.add_module(\"pathway{}_pool\".format(pathway), pool)\n\n self.s3 = resnet_helper.ResStage(\n dim_in=[\n width_per_group * 4 + width_per_group * 4 // out_dim_ratio,\n width_per_group * 4 // cfg.SLOWFAST.BETA_INV,\n ],\n dim_out=[\n width_per_group * 8,\n width_per_group * 8 // cfg.SLOWFAST.BETA_INV,\n ],\n dim_inner=[dim_inner * 2, dim_inner * 2 // cfg.SLOWFAST.BETA_INV],\n temp_kernel_sizes=temp_kernel[2],\n stride=cfg.RESNET.SPATIAL_STRIDES[1],\n num_blocks=[d3] * 2,\n num_groups=[num_groups] * 2,\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[1],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[1],\n nonlocal_group=cfg.NONLOCAL.GROUP[1],\n nonlocal_pool=cfg.NONLOCAL.POOL[1],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=cfg.RESNET.TRANS_FUNC,\n dilation=cfg.RESNET.SPATIAL_DILATIONS[1],\n norm_module=self.norm_module,\n )\n self.s3_fuse = FuseFastToSlow(\n width_per_group * 8 // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n norm_module=self.norm_module,\n )\n\n self.s4 = resnet_helper.ResStage(\n dim_in=[\n width_per_group * 8 + width_per_group * 8 // out_dim_ratio,\n width_per_group * 8 // cfg.SLOWFAST.BETA_INV,\n ],\n dim_out=[\n width_per_group * 16,\n width_per_group * 16 // cfg.SLOWFAST.BETA_INV,\n ],\n dim_inner=[dim_inner * 4, dim_inner * 4 // cfg.SLOWFAST.BETA_INV],\n temp_kernel_sizes=temp_kernel[3],\n stride=cfg.RESNET.SPATIAL_STRIDES[2],\n num_blocks=[d4] * 2,\n num_groups=[num_groups] * 2,\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[2],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[2],\n nonlocal_group=cfg.NONLOCAL.GROUP[2],\n nonlocal_pool=cfg.NONLOCAL.POOL[2],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=cfg.RESNET.TRANS_FUNC,\n dilation=cfg.RESNET.SPATIAL_DILATIONS[2],\n norm_module=self.norm_module,\n )\n self.s4_fuse = FuseFastToSlow(\n width_per_group * 16 // cfg.SLOWFAST.BETA_INV,\n cfg.SLOWFAST.FUSION_CONV_CHANNEL_RATIO,\n cfg.SLOWFAST.FUSION_KERNEL_SZ,\n cfg.SLOWFAST.ALPHA,\n norm_module=self.norm_module,\n )\n\n self.s5 = resnet_helper.ResStage(\n dim_in=[\n width_per_group * 16 + width_per_group * 16 // out_dim_ratio,\n width_per_group * 16 // cfg.SLOWFAST.BETA_INV,\n ],\n dim_out=[\n width_per_group * 32,\n width_per_group * 32 // cfg.SLOWFAST.BETA_INV,\n ],\n dim_inner=[dim_inner * 8, dim_inner * 8 // cfg.SLOWFAST.BETA_INV],\n temp_kernel_sizes=temp_kernel[4],\n stride=cfg.RESNET.SPATIAL_STRIDES[3],\n num_blocks=[d5] * 2,\n num_groups=[num_groups] * 2,\n num_block_temp_kernel=cfg.RESNET.NUM_BLOCK_TEMP_KERNEL[3],\n nonlocal_inds=cfg.NONLOCAL.LOCATION[3],\n nonlocal_group=cfg.NONLOCAL.GROUP[3],\n nonlocal_pool=cfg.NONLOCAL.POOL[3],\n instantiation=cfg.NONLOCAL.INSTANTIATION,\n trans_func_name=cfg.RESNET.TRANS_FUNC,\n dilation=cfg.RESNET.SPATIAL_DILATIONS[3],\n norm_module=self.norm_module,\n )\n\n if cfg.DETECTION.ENABLE:\n self.head = head_helper.ResNetRoIHead(\n dim_in=[\n width_per_group * 32,\n width_per_group * 32 // cfg.SLOWFAST.BETA_INV,\n ],\n num_classes=cfg.MODEL.NUM_CLASSES,\n pool_size=[\n [\n cfg.DATA.NUM_FRAMES\n // cfg.SLOWFAST.ALPHA\n // pool_size[0][0],\n 1,\n 1,\n ],\n [cfg.DATA.NUM_FRAMES // pool_size[1][0], 1, 1],\n ],\n resolution=[[cfg.DETECTION.ROI_XFORM_RESOLUTION] * 2] * 2,\n scale_factor=[cfg.DETECTION.SPATIAL_SCALE_FACTOR] * 2,\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n act_func=cfg.MODEL.HEAD_ACT,\n aligned=cfg.DETECTION.ALIGNED,\n )\n else:\n self.head = head_helper.ResNetBasicHead(\n dim_in=[\n width_per_group * 32,\n width_per_group * 32 // cfg.SLOWFAST.BETA_INV,\n ],\n num_classes=cfg.MODEL.NUM_CLASSES,\n pool_size=[None, None]\n if cfg.MULTIGRID.SHORT_CYCLE\n else [\n [\n cfg.DATA.NUM_FRAMES\n // cfg.SLOWFAST.ALPHA\n // pool_size[0][0],\n cfg.DATA.CROP_SIZE // 32 // pool_size[0][1],\n cfg.DATA.CROP_SIZE // 32 // pool_size[0][2],\n ],\n [\n cfg.DATA.NUM_FRAMES // pool_size[1][0],\n cfg.DATA.CROP_SIZE // 32 // pool_size[1][1],\n cfg.DATA.CROP_SIZE // 32 // pool_size[1][2],\n ],\n ], # None for AdaptiveAvgPool3d((1, 1, 1))\n dropout_rate=cfg.MODEL.DROPOUT_RATE,\n act_func=cfg.MODEL.HEAD_ACT,\n )", "title": "" }, { "docid": "0a0acf5e83912613289a818ff4c1846c", "score": "0.63498014", "text": "def resnet(units, num_stage, filter_list, data_type, bottle_neck=True, bn_mom=0.9, workspace=1024, memonger=False):\n num_unit = len(units)\n assert (num_unit == num_stage)\n data = mx.sym.Variable(name='data') # remove bn on data, same as original paper\n data = mx.sym.BatchNorm(data=data, fix_gamma=True, eps=1e-5, momentum=bn_mom, name='bn_data')\n if data_type == 'cifar10':\n body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(3, 3), stride=(1, 1), pad=(1, 1),\n no_bias=1, name=\"conv1\", workspace=workspace)\n elif data_type == 'imagenet':\n body = mx.sym.Convolution(data=data, num_filter=filter_list[0], kernel=(7, 7), stride=(2, 2), pad=(3, 3),\n no_bias=1, name=\"conv1\", workspace=workspace)\n body = mx.sym.BatchNorm(data=body, fix_gamma=False, eps=1e-5, momentum=bn_mom, name='bn1')\n body = mx.sym.Activation(data=body, act_type='relu', name='relu')\n body = mx.symbol.Pooling(data=body, kernel=(3, 3), stride=(2, 2), pad=(1, 1), pool_type='max', name='maxpool')\n else:\n raise ValueError(\"do not support {} yet\".format(data_type))\n for i in range(num_stage):\n body = residual_unit(body, filter_list[i + 1], (1 if i == 0 else 2, 1 if i == 0 else 2), False,\n name='layer%d_%d' % (i + 1, 0), bottle_neck=bottle_neck, workspace=workspace,\n memonger=memonger)\n for j in range(units[i] - 1):\n body = residual_unit(body, filter_list[i + 1], (1, 1), True, name='layer%d_%d' % (i + 1, j + 1),\n bottle_neck=bottle_neck, workspace=workspace, memonger=memonger)\n output = mx.symbol.Pooling(data=body, global_pool=True, pool_type='avg', name='global_avg')\n output = mx.symbol.FullyConnected(data=output, num_hidden=365, name='fc')\n\n return output", "title": "" }, { "docid": "7c92ebd61473eb768239c4032e82f5b5", "score": "0.63431823", "text": "def resnet110g8(**kwargs):\n return ResNet(BasicBlock, 110, groups=8, **kwargs)", "title": "" }, { "docid": "32e157cbf542ead0722f43937bc92085", "score": "0.6341063", "text": "def __init__(self, x, out_dim, is_training, not_pretrain):\r\n # Parse input arguments into class variables\r\n self.x = x\r\n self.out_dim = out_dim\r\n self.is_training = is_training\r\n self.not_pretrain = not_pretrain\r\n # self.dropout=dropout\r\n\r\n rgb_scaled = self.x * 255.0\r\n\r\n # Convert RGB to BGR\r\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\r\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\r\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\r\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\r\n self.x = tf.concat(axis=3, values=[\r\n blue - Imagenet_MEAN[0],\r\n green - Imagenet_MEAN[1],\r\n red - Imagenet_MEAN[2],\r\n ])\r\n assert self.x.get_shape().as_list()[1:] == [224, 224, 3]\r\n\r\n \"\"\"Create the network graph.\"\"\"\r\n # 1st Layer: Conv (w ReLu) -> Lrn -> Pool\r\n conv1 = conv(self.x, 11, 11, 96, 4, 4, padding='VALID', name='conv1', not_pretrain=self.not_pretrain)\r\n norm1 = lrn(conv1, 2, 2e-05, 0.75, name='norm1')\r\n pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')\r\n\r\n # 2nd Layer: Conv (w ReLu) -> Lrn -> Pool with 2 groups\r\n conv2 = conv(pool1, 5, 5, 256, 1, 1, groups=2, name='conv2', not_pretrain=self.not_pretrain)\r\n norm2 = lrn(conv2, 2, 2e-05, 0.75, name='norm2')\r\n pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')\r\n\r\n # 3rd Layer: Conv (w ReLu)\r\n conv3 = conv(pool2, 3, 3, 384, 1, 1, name='conv3', not_pretrain=self.not_pretrain)\r\n\r\n # 4th Layer: Conv (w ReLu) splitted into two groups\r\n conv4 = conv(conv3, 3, 3, 384, 1, 1, groups=2, name='conv4', not_pretrain=self.not_pretrain)\r\n\r\n # 5th Layer: Conv (w ReLu) -> Pool splitted into two groups\r\n conv5 = conv(conv4, 3, 3, 256, 1, 1, groups=2, name='conv5', not_pretrain=self.not_pretrain)\r\n pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID', name='pool5')\r\n\r\n dim = pool5.get_shape().as_list()\r\n self.flat_dim = dim[1] * dim[2] * dim[3] # 6 * 6 * 256\r\n flattened = tf.reshape(pool5, [-1, self.flat_dim])\r\n\r\n # 6th Layer: Flatten -> FC (w ReLu) -> Dropout\r\n # flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])\r\n fc6 = fc(flattened, self.flat_dim, 4096, name='fc6')\r\n # dropout6 = dropout(fc6, self.KEEP_PROB)\r\n fc6_bn = batch_norm(fc6, self.is_training, name='fc6_bn')\r\n\r\n # 7th Layer: FC (w ReLu) -> Dropout\r\n fc7 = fc(fc6_bn, 4096, 4096, name='fc7')\r\n # dropout7 = dropout(fc7, self.KEEP_PROB)\r\n fc7_bn = batch_norm(fc7, self.is_training, name='fc7_bn')\r\n\r\n # 8th Layer: FC and return unscaled activations\r\n self.y = fc(fc7_bn, 4096, self.out_dim, relu=False, name='fc8')\r\n\r\n\r\n trainable_params = tf.trainable_variables()\r\n if not self.not_pretrain:#使用预训练模型\r\n conv_params = tf.get_collection('params')\r\n self.params = conv_params + trainable_params\r\n else:#不使用预训练模型\r\n self.params=trainable_params\r\n print(self.params)\r\n print(len(self.params))", "title": "" }, { "docid": "7d72b4134f005a53f7761044fcf33c1f", "score": "0.63399583", "text": "def se_resnet34(num_classes=1_000):\n model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model", "title": "" }, { "docid": "4ff817ff9d57fea3d0ffaeb4374c8e9e", "score": "0.6338724", "text": "def se_resnet34(num_classes):\n model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes)\n model.avgpool = nn.AdaptiveAvgPool2d(1)\n return model", "title": "" }, { "docid": "4216df71a6227fcc665f5dc1629735c4", "score": "0.63310957", "text": "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "4216df71a6227fcc665f5dc1629735c4", "score": "0.63310957", "text": "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "4216df71a6227fcc665f5dc1629735c4", "score": "0.63310957", "text": "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "4216df71a6227fcc665f5dc1629735c4", "score": "0.63310957", "text": "def resnet50(**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "6c7b377909d400a50d0f6f0d58f0c7d6", "score": "0.63220423", "text": "def resnet101(pretrained=False, root='./pretrained/resnet101-5d3b4d8f.pth', **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n #Remove the following lines of comments\n #if u want to train from a pretrained model\n if pretrained:\n load_model(root,model)\n # model.load_state_dict(torch.load(root))\n print('load {}'.format(root))\n\n return model", "title": "" }, { "docid": "d905c2b352b11542d9b0ea37ce5cb85c", "score": "0.6317323", "text": "def test_create_resnet(self):\n for input_channel, input_clip_length, input_crop_size in itertools.product(\n (3, 2), (2, 4), (56, 64)\n ):\n model_depth = 50\n model, num_class = self._build_resnet(\n input_channel,\n input_clip_length,\n input_crop_size,\n model_depth,\n nn.BatchNorm3d,\n nn.ReLU,\n )\n\n # Test forwarding.\n for tensor in TestResNet._get_inputs(\n input_channel, input_clip_length, input_crop_size\n ):\n if tensor.shape[1] != input_channel:\n with self.assertRaises(RuntimeError):\n out = model(tensor)\n continue\n\n out = model(tensor)\n\n output_shape = out.shape\n output_shape_gt = (tensor.shape[0], num_class)\n\n self.assertEqual(\n output_shape,\n output_shape_gt,\n \"Output shape {} is different from expected shape {}\".format(\n output_shape, output_shape_gt\n ),\n )", "title": "" }, { "docid": "0bcd2907788a58eb46ad780c36663f85", "score": "0.6291769", "text": "def pspnet_v2_resnet101(x,\n num_classes,\n is_training,\n use_global_status,\n reuse=False):\n\n scores = []\n scores1 = []\n\n with tf.name_scope('scale_0') as scope:\n score,score1 = pspnet_v2(\n x,\n 'resnet_v1_101',\n num_classes,\n is_training,\n use_global_status,\n reuse=reuse)\n\n scores.append(score)\n scores1.append(score1)\n\n return scores,scores1", "title": "" }, { "docid": "5245d9697fbdeb9b2bc674853d11e44c", "score": "0.62885404", "text": "def resnet152(**kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "title": "" }, { "docid": "5245d9697fbdeb9b2bc674853d11e44c", "score": "0.62885404", "text": "def resnet152(**kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "title": "" }, { "docid": "5245d9697fbdeb9b2bc674853d11e44c", "score": "0.62885404", "text": "def resnet152(**kwargs):\n model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)\n return model", "title": "" }, { "docid": "6beb63a17f79d7f39c8f35fd574923fb", "score": "0.62772876", "text": "def refiner_network(input_image_tensor):\n def resnet_block(input_features, nb_features=128, nb_kernel_rows=3, nb_kernel_cols=3):\n \"\"\"\n A ResNet block with two `nb_kernel_rows` x `nb_kernel_cols` convolutional layers,\n each with `nb_features` feature maps.\n See Figure 6 in https://arxiv.org/pdf/1612.07828v1.pdf.\n :param input_features: Input tensor to ResNet block.\n :return: Output tensor from ResNet block.\n \"\"\"\n y = Conv2D(128, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform',\n kernel_regularizer=regularizers.l2(1.e-2))(input_features)\n\n y = Conv2D(128, (5, 5), padding='same', activation='relu', kernel_initializer='glorot_uniform',\n kernel_regularizer=regularizers.l2(1.e-2))(y)\n\n y = layers.merge.add([y, input_features])\n return y\n\n # an input image of size w x h is convolved with 3 x 3 filters that output 64 feature maps\n # x_1 = layers.Convolution2D(128, 1, 1, border_mode='same', activation='relu')(input_image_tensor)\n # x_3 = layers.Convolution2D(128, 3, 3, border_mode='same', activation='relu')(input_image_tensor)\n # x_5 = layers.Convolution2D(128, 5, 5, border_mode='same', activation='relu')(input_image_tensor)\n\n # the output is passed through 4 ResNet blocks\n # x = resnet_block(input_image_tensor)\n # for _ in range(3):\n # x = resnet_block(x)\n x = layers.Conv2D(128, 7, 1, border_mode='same', activation='relu',\n kernel_regularizer=regularizers.l2(1.e-2))(input_image_tensor)\n x = BatchNormalization()(x)\n x = layers.Conv2D(128, 1, 7, border_mode='same', activation='relu',\n kernel_regularizer=regularizers.l2(1.e-2))(x)\n x = BatchNormalization()(x)\n x = layers.Conv2D(128, 9, 1, border_mode='same', activation='relu',\n kernel_regularizer=regularizers.l2(1.e-2))(x)\n x = BatchNormalization()(x)\n x = layers.Conv2D(128, 1, 9, border_mode='same', activation='relu',\n kernel_regularizer=regularizers.l2(1.e-2))(x)\n x = BatchNormalization()(x)\n x = layers.Conv2D(128, 11, 1, border_mode='same', activation='relu',\n kernel_regularizer=regularizers.l2(1.e-2))(x)\n x = BatchNormalization()(x)\n x = layers.Conv2D(128, 1, 11, border_mode='same', activation='relu',\n kernel_regularizer=regularizers.l2(1.e-2))(x)\n x = BatchNormalization()(x)\n x = layers.Conv2D(128, 13, 1, border_mode='same', activation='relu',\n kernel_regularizer=regularizers.l2(1.e-2))(x)\n x = BatchNormalization()(x)\n x = layers.Conv2D(128, 1, 13, border_mode='same', activation='relu',\n kernel_regularizer=regularizers.l2(1.e-2))(x)\n x = BatchNormalization()(x)\n # x = layers.Convolution2D(128, 3, 3, border_mode='same', activation='relu',\n # kernel_regularizer=regularizers.l2(1.e-2))(input_image_tensor)\n # x = BatchNormalization()(x)\n # x = layers.Convolution2D(128, 3, 3, border_mode='same', activation='relu',\n # kernel_regularizer=regularizers.l2(1.e-2))(x)\n # x = BatchNormalization()(x)\n # x = layers.Convolution2D(128, 3, 3, border_mode='same', activation='relu',\n # kernel_regularizer=regularizers.l2(1.e-2))(x)\n # x = BatchNormalization()(x)\n # x = resnet_block(x, nb_features=128, nb_kernel_rows=3, nb_kernel_cols=3)\n # x = resnet_block(x, nb_features=128, nb_kernel_rows=5, nb_kernel_cols=5)\n # x = layers.Convolution2D(1, 1, 1, border_mode='same', kernel_regularizer=regularizers.l2(1.e-2))(x)\n\n # x = layers.merge.add([input_image_tensor, x_1, x_3, x_5])\n # x = layers.Convolution2D(256, 3, 3, border_mode='same', activation='relu')(x)\n\n # x = layers.Convolution2D(1, 1, 1, border_mode='same')(x)\n\n # the output of the last ResNet block is passed to a 1 x 1 convolutional layer producing 1 feature map\n # corresponding to the refined synthetic image\n\n # x= Conv2D(256, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform',\n # kernel_regularizer=regularizers.l2(1.e-2))(input_image_tensor)\n # x = BatchNormalization()(x)\n # x = Conv2D(128, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform',\n # kernel_regularizer=regularizers.l2(1.e-2))(x)\n # x = BatchNormalization()(x)\n #\n #\n # y = Conv2D(128, (3, 3), padding='same', activation='relu', kernel_initializer='glorot_uniform',\n # kernel_regularizer=regularizers.l2(1.e-2))(input_image_tensor)\n # y = BatchNormalization()(y)\n #\n # x = layers.merge.average([x, y])\n # x = Activation('relu')(x)\n x = Conv2D(1, (1, 1), padding='same', kernel_initializer='glorot_uniform', kernel_regularizer=regularizers.l2(1.e-2))(x)\n # x = layers.merge.add([x, input_image_tensor])\n\n return x", "title": "" }, { "docid": "7bcc96f62a8d3bbe34437c1e965bd96c", "score": "0.62752575", "text": "def SRNet(input_shape=None):\n\n from tensorflow.keras.models import Model\n from tensorflow.keras.layers import Dense, Dropout, Activation, Input, BatchNormalization\n from tensorflow.keras.layers import Conv2D, AveragePooling2D, GlobalAveragePooling2D\n from tensorflow.keras import optimizers\n from tensorflow.keras import initializers\n from tensorflow.keras import regularizers\n\n\n\n if input_shape == None:\n input_shape = (512, 512, 3)\n\n inputs = Input(shape=input_shape)\n x = inputs\n\n conv2d_params = {\n 'padding': 'same',\n 'data_format': 'channels_last',\n 'bias_initializer': initializers.Constant(0.2),\n 'bias_regularizer': None,\n 'kernel_initializer': initializers.VarianceScaling(),\n 'kernel_regularizer': regularizers.l2(2e-4),\n }\n\n avgpool_params = {\n 'padding': 'same',\n 'data_format': 'channels_last',\n 'pool_size': (3,3),\n 'strides': (2,2)\n }\n\n bn_params = {\n 'momentum': 0.9,\n 'center': True,\n 'scale': True\n }\n\n\n x = Conv2D(64, (3,3), strides=1, **conv2d_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = Activation(\"relu\")(x)\n\n x = Conv2D(16, (3,3), strides=1, **conv2d_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = Activation(\"relu\")(x)\n\n for i in range(5):\n y = x\n x = Conv2D(16, (3,3), **conv2d_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = Activation(\"relu\")(x)\n x = Conv2D(16, (3,3), **conv2d_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = add([x, y])\n y = x\n\n\n for f in [16, 64, 128, 256]:\n y = Conv2D(f, (1,1), strides=2, **conv2d_params)(x)\n y = BatchNormalization(**bn_params)(y)\n x = Conv2D(f, (3,3), **conv2d_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = Activation(\"relu\")(x)\n x = Conv2D(f, (3,3), **conv2d_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = AveragePooling2D(**avgpool_params)(x)\n x = add([x, y])\n\n x = Conv2D(512, (3,3), **conv2d_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = Activation(\"relu\")(x)\n x = Conv2D(512, (3,3), **conv2d_params)(x)\n x = BatchNormalization(**bn_params)(x)\n x = GlobalAveragePooling2D(data_format=\"channels_first\")(x)\n\n x = Dense(2, kernel_initializer=initializers.RandomNormal(mean=0., stddev=0.01),\n bias_initializer=initializers.Constant(0.) )(x)\n x = Activation('softmax')(x)\n\n predictions = x\n\n model = Model(inputs=inputs, outputs=predictions)\n\n return model", "title": "" }, { "docid": "3e37d4a94c5cdb56d805eb8e552d5467", "score": "0.6275162", "text": "def resnet110g8t(**kwargs):\n return ResNet(BasicBlock, 110, groups=8, indices=\"trans\", **kwargs)", "title": "" }, { "docid": "ed0ce04b66b60d03ac717d17c51041b2", "score": "0.62715536", "text": "def resnet110g4(**kwargs):\n return ResNet(BasicBlock, 110, groups=4, **kwargs)", "title": "" }, { "docid": "7673eff4b427e8b9eb3140517598ae7c", "score": "0.62611073", "text": "def ResNet18(operator, num_classes):\n return ResNet(BasicBlock, [2, 2, 2, 2], num_classes = num_classes, operator = operator)", "title": "" }, { "docid": "ad3e9f294bcfe2e773fe23b44db8838d", "score": "0.6257425", "text": "def resnet101(pretrained=False, root='/mnt/projects/counting/models', **kwargs):\n model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)\n if pretrained:\n model.load_state_dict(torch.load(\n get_model_file('resnet101', root=root)), strict=False)\n return model", "title": "" }, { "docid": "991678d030ad0c182de342699a958bbf", "score": "0.6255826", "text": "def resnet(backbone='resnet50', inputs=None, modifier=None, **kwargs):\r\n # choose default input\r\n if inputs is None:\r\n if keras.backend.image_data_format() == 'channels_first':\r\n inputs = keras.layers.Input(shape=(3, 256, 256))\r\n else:\r\n inputs = keras.layers.Input(shape=(256, 256, 3))\r\n\r\n # create the resnet backbone\r\n if backbone == 'resnet50':\r\n resnet = ResNet50(include_top=False, weights='imagenet', input_tensor=inputs, classes=1)\r\n print(resnet.output)\r\n # resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True)\r\n # elif backbone == 'resnet101':\r\n # resnet = ResNet101(include_top=False, weights='imagenet', input_tensor=inputs, classes=1)\r\n # resnet = keras_resnet.models.ResNet101(inputs, include_top=False, freeze_bn=True)\r\n # elif backbone == 'resnet152':\r\n # resnet = ResNet152(include_top=False, weights='imagenet', input_tensor=inputs, classes=1)\r\n # resnet = keras_resnet.models.ResNet152(inputs, include_top=False, freeze_bn=True)\r\n else:\r\n raise ValueError('Backbone (\\'{}\\') is invalid.'.format(backbone))\r\n\r\n # invoke modifier if given\r\n if modifier:\r\n resnet = modifier(resnet)\r\n x = resnet.output\r\n print(x.shape)\r\n resnet = AveragePooling2D(pool_size=8)(x)\r\n resnet = Flatten()(resnet)\r\n\r\n outputs = Dense(1, activation='sigmoid', kernel_initializer='he_normal')(resnet)\r\n model = Model(inputs=inputs, outputs=outputs)\r\n\r\n # create the full model\r\n return model", "title": "" }, { "docid": "b4172adc623d638c5c7ace77e0872086", "score": "0.6247432", "text": "def resnet50(pretrained=False, **kwargs):\r\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\r\n return model", "title": "" }, { "docid": "070a702a3232b353b620d726e05a4e06", "score": "0.62423337", "text": "def build_net():\n dim = 1\n tap_matrix = np.zeros((NRN_N, dim))\n net = graph.Network(\"net\")\n net.create_pool(\"p\", tap_matrix)\n HAL.map(net)", "title": "" }, { "docid": "12f82a52927b499bfa7f6b406037a2b6", "score": "0.6241301", "text": "def _construct_network(self,\n resnet,\n slowfast,\n non_local,\n data,\n mdl\n\n ):\n pool_size = [[1, 1, 1], [1, 1, 1]]\n\n assert len({len(pool_size), self.num_pathways}) == 1\n assert resnet[\"depth\"] in _MODEL_STAGE_DEPTH.keys()\n\n (d2, d3, d4, d5) = _MODEL_STAGE_DEPTH[resnet[\"depth\"]]\n\n dim_inner = resnet[\"num_groups\"] * resnet[\"width_per_group\"]\n out_dim_ratio = (\n slowfast[\"beta_inv\"] // slowfast[\"fusion_conv_ch_ratio\"]\n )\n\n # Basis of temporal kernel sizes for each of the stage.\n temp_kernel = [\n [[1], [5]], # conv1 temporal kernel for slow and fast pathway.\n [[1], [3]], # res2 temporal kernel for slow and fast pathway.\n [[1], [3]], # res3 temporal kernel for slow and fast pathway.\n [[3], [3]], # res4 temporal kernel for slow and fast pathway.\n [[3], [3]], # res5 temporal kernel for slow and fast pathway.\n ]\n\n self.s1 = VideoModelStem(\n dim_in=data[\"in_channels\"],\n dim_out=[resnet[\"width_per_group\"], resnet[\"width_per_group\"] // slowfast[\"beta_inv\"]],\n kernel=[temp_kernel[0][0] + [7, 7], temp_kernel[0][1] + [7, 7]],\n stride=[[1, 2, 2]] * 2,\n padding=[\n [temp_kernel[0][0][0] // 2, 3, 3],\n [temp_kernel[0][1][0] // 2, 3, 3],\n ],\n norm_module=self.norm_module,\n range=data[\"range\"],\n train=self.training,\n )\n self.s1_fuse = FuseFastToSlow(\n resnet[\"width_per_group\"] // slowfast[\"beta_inv\"],\n slowfast[\"fusion_conv_ch_ratio\"],\n slowfast[\"fusion_kernel_sz\"],\n slowfast[\"alpha\"],\n norm_module=self.norm_module,\n train=self.training,\n )\n\n self.s2 = ResStage(\n dim_in=[\n resnet[\"width_per_group\"] + resnet[\"width_per_group\"] // out_dim_ratio,\n resnet[\"width_per_group\"] // slowfast[\"beta_inv\"],\n ],\n dim_out=[\n resnet[\"width_per_group\"] * 4,\n resnet[\"width_per_group\"] * 4 // slowfast[\"beta_inv\"],\n ],\n dim_inner=[dim_inner, dim_inner // slowfast[\"beta_inv\"]],\n temp_kernel_sizes=temp_kernel[1],\n stride=resnet[\"spatial_strides\"][0],\n num_blocks=[d2] * 2,\n num_groups=[resnet[\"num_groups\"]] * 2,\n num_block_temp_kernel=resnet[\"n_block_temp_kernel\"][0],\n nonlocal_inds=non_local[\"loc\"][0],\n nonlocal_group=non_local[\"group\"][0],\n nonlocal_pool=non_local[\"pool\"][0],\n instantiation=non_local[\"instantiation\"],\n trans_func_name=resnet[\"trans_func\"],\n dilation=resnet[\"spatial_dilations\"][0],\n norm_module=self.norm_module,\n train=self.training,\n )\n self.s2_fuse = FuseFastToSlow(\n resnet[\"width_per_group\"] * 4 // slowfast[\"beta_inv\"],\n slowfast[\"fusion_conv_ch_ratio\"],\n slowfast[\"fusion_kernel_sz\"],\n slowfast[\"alpha\"],\n norm_module=self.norm_module,\n train=self.training,\n )\n\n for pathway in range(self.num_pathways):\n pool = self.lib.MaxPool3d(\n kernel_size=pool_size[pathway],\n stride=pool_size[pathway],\n padding=[0, 0, 0],\n )\n self.add_module(\"pathway{}_pool\".format(pathway), pool)\n\n self.s3 = ResStage(\n dim_in=[\n resnet[\"width_per_group\"] * 4 + resnet[\"width_per_group\"] * 4 // out_dim_ratio,\n resnet[\"width_per_group\"] * 4 // slowfast[\"beta_inv\"],\n ],\n dim_out=[\n resnet[\"width_per_group\"] * 8,\n resnet[\"width_per_group\"] * 8 // slowfast[\"beta_inv\"],\n ],\n dim_inner=[dim_inner * 2, dim_inner * 2 // slowfast[\"beta_inv\"]],\n temp_kernel_sizes=temp_kernel[2],\n stride=resnet[\"spatial_strides\"][1],\n num_blocks=[d3] * 2,\n num_groups=[resnet[\"num_groups\"]] * 2,\n num_block_temp_kernel=resnet[\"n_block_temp_kernel\"][1],\n nonlocal_inds=non_local[\"loc\"][1],\n nonlocal_group=non_local[\"group\"][1],\n nonlocal_pool=non_local[\"pool\"][1],\n instantiation=non_local[\"instantiation\"],\n trans_func_name=resnet[\"trans_func\"],\n dilation=resnet[\"spatial_dilations\"][1],\n norm_module=self.norm_module,\n train=self.training,\n )\n self.s3_fuse = FuseFastToSlow(\n resnet[\"width_per_group\"] * 8 // slowfast[\"beta_inv\"],\n slowfast[\"fusion_conv_ch_ratio\"],\n slowfast[\"fusion_kernel_sz\"],\n slowfast[\"alpha\"],\n norm_module=self.norm_module,\n train=self.training,\n )\n\n self.s4 = ResStage(\n dim_in=[\n resnet[\"width_per_group\"] * 8 + resnet[\"width_per_group\"] * 8 // out_dim_ratio,\n resnet[\"width_per_group\"] * 8 // slowfast[\"beta_inv\"],\n ],\n dim_out=[\n resnet[\"width_per_group\"] * 16,\n resnet[\"width_per_group\"] * 16 // slowfast[\"beta_inv\"],\n ],\n dim_inner=[dim_inner * 4, dim_inner * 4 // slowfast[\"beta_inv\"]],\n temp_kernel_sizes=temp_kernel[3],\n stride=resnet[\"spatial_strides\"][2],\n num_blocks=[d4] * 2,\n num_groups=[resnet[\"num_groups\"]] * 2,\n num_block_temp_kernel=resnet[\"n_block_temp_kernel\"][2],\n nonlocal_inds=non_local[\"loc\"][2],\n nonlocal_group=non_local[\"group\"][2],\n nonlocal_pool=non_local[\"pool\"][2],\n instantiation=non_local[\"instantiation\"],\n trans_func_name=resnet[\"trans_func\"],\n dilation=resnet[\"spatial_dilations\"][2],\n norm_module=self.norm_module,\n train=self.training,\n )\n self.s4_fuse = FuseFastToSlow(\n resnet[\"width_per_group\"] * 16 // slowfast[\"beta_inv\"],\n slowfast[\"fusion_conv_ch_ratio\"],\n slowfast[\"fusion_kernel_sz\"],\n slowfast[\"alpha\"],\n norm_module=self.norm_module,\n train=self.training,\n )\n\n self.s5 = ResStage(\n dim_in=[\n resnet[\"width_per_group\"] * 16 + resnet[\"width_per_group\"] * 16 // out_dim_ratio,\n resnet[\"width_per_group\"] * 16 // slowfast[\"beta_inv\"],\n ],\n dim_out=[\n resnet[\"width_per_group\"] * 32,\n resnet[\"width_per_group\"] * 32 // slowfast[\"beta_inv\"],\n ],\n dim_inner=[dim_inner * 8, dim_inner * 8 // slowfast[\"beta_inv\"]],\n temp_kernel_sizes=temp_kernel[4],\n stride=resnet[\"spatial_strides\"][3],\n num_blocks=[d5] * 2,\n num_groups=[resnet[\"num_groups\"]] * 2,\n num_block_temp_kernel=resnet[\"n_block_temp_kernel\"][3],\n nonlocal_inds=non_local[\"loc\"][3],\n nonlocal_group=non_local[\"group\"][3],\n nonlocal_pool=non_local[\"pool\"][3],\n instantiation=non_local[\"instantiation\"],\n trans_func_name=resnet[\"trans_func\"],\n dilation=resnet[\"spatial_dilations\"][3],\n norm_module=self.norm_module,\n train=self.training,\n )\n\n self.head = ResNetBasicHead(\n dim_in=[\n resnet[\"width_per_group\"] * 32,\n resnet[\"width_per_group\"] * 32 // slowfast[\"beta_inv\"],\n ],\n num_classes=mdl[\"num_classes\"],\n pool_size=[\n [\n data[\"num_frames\"]\n // slowfast[\"alpha\"]\n // pool_size[0][0],\n data[\"crop_sz\"] // 32 // pool_size[0][1],\n data[\"crop_sz\"] // 32 // pool_size[0][2],\n ],\n [\n data[\"num_frames\"] // pool_size[1][0],\n data[\"crop_sz\"] // 32 // pool_size[1][1],\n data[\"crop_sz\"] // 32 // pool_size[1][2],\n ],\n ],\n dropout_rate=mdl[\"dropout\"],\n act_func=mdl[\"head_act\"],\n train=self.training,\n )", "title": "" }, { "docid": "d9776a35ef1727fc3402b7e55289c860", "score": "0.622898", "text": "def resnet_v2(input_shape, depth, num_classes=10):\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n # Start model definition.\n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n\n inputs = Input(shape=input_shape)\n # v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths\n x = oct_resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n\n # Instantiate the stack of residual units\n alpha = 0.5\n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n if stage == 0:\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer and first stage\n activation = None\n batch_normalization = False\n else:\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but not first stage\n strides = 2 # downsample\n if stage == 2: # and res_block == (num_res_blocks - 1):\n alpha = 0\n\n # bottleneck residual unit\n y = oct_resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False,\n alpha=alpha)\n y = oct_resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False,\n alpha=alpha)\n y = oct_resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False,\n alpha=alpha)\n if res_block == 0:\n # linear projection residual shortcut connection to match\n # changed dims\n x = oct_resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False,\n alpha=alpha)\n if alpha == 0:\n x = keras.layers.add([x, y])\n else:\n xh = keras.layers.add([x[0], y[0]])\n xl = keras.layers.add([x[1], y[1]])\n x = [xh, xl]\n\n num_filters_in = num_filters_out\n\n # Add classifier on top.\n # v2 has BN-ReLU before Pooling\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n\n # Instantiate model.\n model = Model(inputs=inputs, outputs=outputs)\n return model", "title": "" }, { "docid": "47158640e5969705f4051a03f1842997", "score": "0.6220799", "text": "def __init__(self):\n torch.nn.Module.__init__(self)\n resnet_model = torchvision.models.resnet34(pretrained=False)\n self.conv1 = resnet_model.conv1\n self.bn1 = resnet_model.bn1\n self.relu = resnet_model.relu\n self.maxpool = resnet_model.maxpool\n self.layer1 = resnet_model.layer1\n self.layer2 = resnet_model.layer2\n self.layer3 = resnet_model.layer3\n self.layer4 = resnet_model.layer4\n # Linear classifier.\n self.fc = torch.nn.Linear(512**2, 200)\n # Initialize the fc layers.", "title": "" }, { "docid": "62a66e8fcd92defd6fb99c591af30566", "score": "0.62054944", "text": "def __init__(self, nf=64, res_block3x3: int = 2, norm_layer: Callable[..., nn.Module] = nn.BatchNorm2d,\n use_dropout: bool = False):\n super(CnnEncoder, self).__init__()\n\n use_bias = False\n # First Conv layer\n model = [nn.Conv2d(3, nf, kernel_size=5, padding=2, bias=use_bias),\n norm_layer(nf),\n nn.ReLU(inplace=True)]\n\n if use_dropout:\n model += [nn.Dropout(0.1)]\n\n model += [nn.MaxPool2d(2)]\n\n # 2 resnet blocks\n model += [ResnetBlock(nf, nf, stride=1, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.15),\n ResnetBlock(nf, nf, stride=1, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.15)]\n\n # 1 downsampling layer\n model += [ResnetBlock(nf, nf * 2, stride=2, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.15,\n downsample=downsample_(nf, nf * 2, stride=2, norm_layer=norm_layer))]\n nf = nf * 2\n\n for _ in range(res_block3x3):\n # 3 resnet blocks\n model += [ResnetBlock(nf, nf, stride=1, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.2),\n ResnetBlock(nf, nf, stride=1, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.2),\n ResnetBlock(nf, nf, stride=1, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.2)]\n\n # 1 downsampling layer\n model += [ResnetBlock(nf, nf * 2, stride=2, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.2,\n downsample=downsample_(nf, nf * 2, stride=2, norm_layer=norm_layer))]\n nf = nf * 2\n\n # 2 resnet blocks\n model += [ResnetBlock(nf, nf, stride=1, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.25),\n ResnetBlock(nf, nf, stride=1, norm_layer=norm_layer, use_dropout=use_dropout, drop=0.25)]\n\n model += [nn.AdaptiveAvgPool2d((1, 1))]\n\n self.model = nn.Sequential(*model)", "title": "" }, { "docid": "f68d84f4434fa393e5eabe238f44863c", "score": "0.6203823", "text": "def resnet110g4t(**kwargs):\n return ResNet(BasicBlock, 110, groups=4, indices=\"trans\", **kwargs)", "title": "" }, { "docid": "f224a3bd7d95fe5f9e7fc78373799618", "score": "0.62018764", "text": "def build_network(self): \r\n self.network = input_data(shape = [None, 48, 48, 1])\r\n print(\"Input data \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 64, 5, activation = 'relu')\r\n print(\"Conv1 \",self.network.shape[1:])\r\n self.network = max_pool_2d(self.network, 3, strides = 2)\r\n print(\"Maxpool1 \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 64, 5, activation = 'relu')\r\n print(\"Conv2 \",self.network.shape[1:])\r\n self.network = max_pool_2d(self.network, 3, strides = 2)\r\n print(\"Maxpool2 \",self.network.shape[1:])\r\n self.network = conv_2d(self.network, 128, 4, activation = 'relu')\r\n print(\"Conv3 \",self.network.shape[1:])\r\n self.network = dropout(self.network, 0.3)\r\n print(\"Dropout \",self.network.shape[1:])\r\n self.network = fully_connected(self.network, 3072, activation = 'relu')\r\n print(\"Fully connected\",self.network.shape[1:])\r\n self.network = fully_connected(self.network, len(self.target_classes), activation = 'softmax')\r\n print(\"Output \",self.network.shape[1:])\r\n print(\"\\n\")\r\n # Generates a TrainOp which contains the information about optimization process - optimizer, loss function, etc\r\n self.network = regression(self.network,optimizer = 'momentum',metric = 'accuracy',loss = 'categorical_crossentropy')\r\n # Creates a model instance.\r\n self.model = tflearn.DNN(self.network,checkpoint_path = 'model_1_atul',max_checkpoints = 1,tensorboard_verbose = 2)\r\n # Loads the model weights from the checkpoint\r\n self.load_model()", "title": "" }, { "docid": "0ec5b6ad742daf664ef75560a76bed81", "score": "0.61910504", "text": "def resnet50(pretrained=False,**kwargs):\n model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)\n return model", "title": "" }, { "docid": "daf52f45fee9257c98b6dfdf0955e59c", "score": "0.6191048", "text": "def get_resnet(hparams, lr):\n\n def resnet(features, labels, mode, params):\n if hparams.use_tpu and 'batch_size' in params.keys():\n hparams.batch_size = params['batch_size']\n\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n\n def _residual(x, out_filter, stride, projection=False):\n \"\"\"Residual unit with 2 sub layers.\"\"\"\n is_variational = hparams.dropout_type is not None and \"variational\" in hparams.dropout_type\n\n orig_x = x\n if not is_variational:\n x = model_utils.batch_norm(x, hparams, is_training)\n x = tf.nn.relu(x)\n\n if projection:\n orig_x = model_utils.conv(\n x,\n 1,\n out_filter,\n hparams,\n is_training=is_training,\n strides=stride,\n name=\"shortcut\")\n\n with tf.variable_scope('sub1'):\n x = model_utils.conv(\n x,\n 3,\n out_filter,\n hparams,\n is_training=is_training,\n strides=stride,\n name='conv1')\n\n x = model_utils.batch_norm(x, hparams, is_training)\n x = tf.nn.relu(x)\n\n with tf.variable_scope('sub2'):\n x = model_utils.conv(\n x,\n 3,\n out_filter,\n hparams,\n is_training=is_training,\n strides=[1, 1, 1, 1],\n name='conv2')\n\n x += orig_x\n\n return x\n\n def _bottleneck_residual(x, out_filter, stride, projection=False):\n \"\"\"Residual unit with 3 sub layers.\"\"\"\n\n is_variational = hparams.dropout_type is not None and \"variational\" in hparams.dropout_type\n\n orig_x = x\n if not is_variational:\n x = model_utils.batch_norm(x, hparams, is_training)\n x = tf.nn.relu(x)\n\n if projection:\n orig_x = model_utils.conv(\n x,\n 1,\n 4 * out_filter,\n hparams,\n is_training=is_training,\n strides=stride,\n name=\"shortcut\")\n\n with tf.variable_scope('sub1'):\n x = model_utils.conv(\n x,\n 1,\n out_filter,\n hparams,\n is_training=is_training,\n strides=[1, 1, 1, 1],\n name='conv1')\n x = model_utils.batch_norm(x, hparams, is_training)\n x = tf.nn.relu(x)\n with tf.variable_scope('sub2'):\n x = model_utils.conv(\n x,\n 3,\n out_filter,\n hparams,\n is_training=is_training,\n strides=stride,\n name='conv2')\n x = model_utils.batch_norm(x, hparams, is_training)\n x = tf.nn.relu(x)\n with tf.variable_scope('sub3'):\n x = model_utils.conv(\n x,\n 1,\n 4 * out_filter,\n hparams,\n is_training=is_training,\n strides=[1, 1, 1, 1],\n name='conv3')\n\n return orig_x + x\n\n def _l1():\n \"\"\"L1 weight decay loss.\"\"\"\n if hparams.l1_norm == 0:\n return 0\n\n costs = []\n for var in tf.trainable_variables():\n if \"DW\" in var.name and \"logit\" not in var.name:\n costs.append(tf.reduce_mean(tf.abs(var)))\n\n return tf.multiply(hparams.l1_norm, tf.add_n(costs))\n\n def _fully_connected(x, out_dim):\n \"\"\"FullyConnected layer for final output.\"\"\"\n prev_dim = np.product(x.get_shape().as_list()[1:])\n x = tf.reshape(x, [hparams.batch_size, prev_dim])\n w = tf.get_variable('DW', [prev_dim, out_dim])\n b = tf.get_variable(\n 'biases', [out_dim], initializer=tf.zeros_initializer())\n return tf.nn.xw_plus_b(x, w, b)\n\n def _global_avg_pool(x):\n assert x.get_shape().ndims == 4\n if hparams.data_format == \"channels_last\":\n return tf.reduce_mean(x, [1, 2])\n\n return tf.reduce_mean(x, [2, 3])\n\n def _stride_arr(stride):\n \"\"\"Map a stride scalar to the stride array for tf.nn.conv2d.\"\"\"\n if hparams.data_format == \"channels_last\":\n return [1, stride, stride, 1]\n\n return [1, 1, stride, stride]\n\n if mode == ModeKeys.PREDICT or mode == ModeKeys.ATTACK:\n if \"labels\" in features:\n labels = features[\"labels\"]\n\n with tf.variable_scope(\"resnet\", initializer=get_init(hparams)):\n hparams.mode = mode\n strides = [1, 2, 2, 2]\n res_func = (_residual\n if not hparams.use_bottleneck else _bottleneck_residual)\n filters = hparams.residual_filters\n large_input = hparams.input_shape[0] > 32\n\n # 3 and 16 picked from example implementation\n with tf.variable_scope('init'):\n x = features[\"inputs\"]\n stride = _stride_arr(2) if large_input else _stride_arr(1)\n x = model_utils.conv(\n x,\n 7,\n filters[0],\n hparams,\n strides=stride,\n dropout=False,\n name='init_conv')\n\n if large_input:\n x = tf.layers.max_pooling2d(\n inputs=x,\n pool_size=3,\n strides=2,\n padding=\"SAME\",\n data_format=hparams.data_format)\n\n with tf.variable_scope('unit_1_0'):\n x = res_func(x, filters[1], _stride_arr(strides[0]), True)\n\n for i in range(1, hparams.residual_units[0]):\n with tf.variable_scope('unit_1_%d' % i):\n x = res_func(x, filters[1], _stride_arr(1), False)\n\n with tf.variable_scope('unit_2_0'):\n x = res_func(x, filters[2], _stride_arr(strides[1]), True)\n\n for i in range(1, hparams.residual_units[1]):\n with tf.variable_scope('unit_2_%d' % i):\n x = res_func(x, filters[2], _stride_arr(1), False)\n\n with tf.variable_scope('unit_3_0'):\n x = res_func(x, filters[3], _stride_arr(strides[2]), True)\n\n for i in range(1, hparams.residual_units[2]):\n with tf.variable_scope('unit_3_%d' % i):\n x = res_func(x, filters[3], _stride_arr(1), False)\n\n if len(filters) == 5:\n with tf.variable_scope('unit_4_0'):\n x = res_func(x, filters[4], _stride_arr(strides[3]), True)\n\n for i in range(1, hparams.residual_units[3]):\n with tf.variable_scope('unit_4_%d' % i):\n x = res_func(x, filters[4], _stride_arr(1), False)\n\n x = model_utils.batch_norm(x, hparams, is_training)\n x = tf.nn.relu(x)\n\n with tf.variable_scope('unit_last'):\n x = _global_avg_pool(x)\n\n with tf.variable_scope('logit'):\n logits = _fully_connected(x, hparams.num_classes)\n predictions = tf.nn.softmax(logits)\n\n if mode in [ModeKeys.PREDICT, ModeKeys.ATTACK]:\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\n 'classes': tf.argmax(predictions, axis=1),\n 'logits': logits,\n 'probabilities': predictions,\n })\n\n with tf.variable_scope('costs'):\n xent = tf.losses.sparse_softmax_cross_entropy(\n labels=labels, logits=logits)\n cost = tf.reduce_mean(xent, name='xent')\n if is_training:\n cost += model_utils.weight_decay(hparams)\n cost += _l1()\n\n if hparams.dropout_type is not None:\n if \"louizos\" in hparams.dropout_type:\n cost += hparams.louizos_cost * model_utils.louizos_complexity_cost(\n hparams) / 50000\n\n if \"variational\" in hparams.dropout_type:\n # prior DKL part of the ELBO\n graph = tf.get_default_graph()\n node_defs = [\n n for n in graph.as_graph_def().node if 'log_alpha' in n.name\n ]\n log_alphas = [\n graph.get_tensor_by_name(n.name + \":0\") for n in node_defs\n ]\n print([\n n.name\n for n in graph.as_graph_def().node\n if 'log_alpha' in n.name\n ])\n print(\"found %i logalphas\" % len(log_alphas))\n divergences = [dropouts.dkl_qp(la) for la in log_alphas]\n # combine to form the ELBO\n N = float(50000)\n dkl = tf.reduce_sum(tf.stack(divergences))\n\n warmup_steps = 50000\n dkl = (1. / N) * dkl * tf.minimum(\n 1.0,\n tf.to_float(tf.train.get_global_step()) /\n warmup_steps) * hparams.var_scale\n cost += dkl\n tf.summary.scalar(\"dkl\", dkl)\n\n if hparams.ard_cost > 0.0:\n cost += model_utils.ard_cost() * hparams.ard_cost\n\n if hparams.smallify > 0.0:\n cost += model_utils.switch_loss() * hparams.smallify\n\n # Summaries\n # ========================\n tf.summary.scalar(\"total_nonzero\", model_utils.nonzero_count())\n all_weights = tf.concat(\n [\n tf.reshape(v, [-1])\n for v in tf.trainable_variables()\n if \"DW\" in v.name\n ],\n axis=0)\n tf.summary.histogram(\"weights\", all_weights)\n # ========================\n\n return model_utils.model_top(labels, predictions, cost, lr, mode, hparams)\n\n return resnet", "title": "" }, { "docid": "312d03e6e8351b4935a864c8629dc063", "score": "0.6184535", "text": "def neural_network(x_train, x_test, y_train, y_test,\n x_pca, x_ica, x_kpca, x_rp,\n x_kmeans, x_gmm, **kwargs):\n\n print('\\n--------------------------')\n print('NN')\n print('--------------------------')\n\n # Declare Neural Network and perform experiments on the original dataset\n nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],\n layer2_nodes=kwargs['layer2_nodes'],\n learning_rate=kwargs['learning_rate'])\n nn.experiment(x_train, x_test, y_train, y_test)\n\n print('\\n--------------------------')\n print('PCA + NN')\n print('--------------------------')\n\n # Declare Neural Network and perform experiments on the reduced dataset by PCA\n nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],\n layer2_nodes=kwargs['layer2_nodes'],\n learning_rate=kwargs['learning_rate'])\n nn.experiment(x_pca[0], x_pca[1], y_train, y_test)\n\n print('\\n--------------------------')\n print('ICA + NN')\n print('--------------------------')\n\n # Declare Neural Network and perform experiments on the reduced dataset by ICA\n nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],\n layer2_nodes=kwargs['layer2_nodes'],\n learning_rate=kwargs['learning_rate'])\n nn.experiment(x_ica[0], x_ica[1], y_train, y_test)\n\n print('\\n--------------------------')\n print('KPCA + NN')\n print('--------------------------')\n\n # Declare Neural Network and perform experiments on the reduced dataset by KPCA\n nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],\n layer2_nodes=kwargs['layer2_nodes'],\n learning_rate=kwargs['learning_rate'])\n nn.experiment(x_kpca[0], x_kpca[1], y_train, y_test)\n\n print('\\n--------------------------')\n print('RP+ NN')\n print('--------------------------')\n\n # Declare Neural Network and perform experiments on the reduced dataset by RP\n nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],\n layer2_nodes=kwargs['layer2_nodes'],\n learning_rate=kwargs['learning_rate'])\n nn.experiment(x_rp[0], x_rp[1], y_train, y_test)\n\n print('\\n--------------------------')\n print('KMEANS+ NN')\n print('--------------------------')\n\n # Declare Neural Network\n nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],\n layer2_nodes=kwargs['layer2_nodes'],\n learning_rate=kwargs['learning_rate'])\n\n # Augment the original dataset by adding clusters produced by k-Means as features\n x_kmeans_normalized = (x_kmeans[0] - np.mean(x_kmeans[0])) / np.std(x_kmeans[0])\n x_kmeans_normalized = np.expand_dims(x_kmeans_normalized, axis=1)\n x_train_new = np.append(x_train, x_kmeans_normalized, axis=1)\n x_kmeans_normalized = (x_kmeans[1] - np.mean(x_kmeans[1])) / np.std(x_kmeans[1])\n x_kmeans_normalized = np.expand_dims(x_kmeans_normalized, axis=1)\n x_test_new = np.append(x_test, x_kmeans_normalized, axis=1)\n\n # Perform experiments on it\n nn.experiment(x_train_new, x_test_new, y_train, y_test)\n\n print('\\n--------------------------')\n print('GMM+ NN')\n print('--------------------------')\n\n # Declare Neural Network\n nn = NeuralNetwork(layer1_nodes=kwargs['layer1_nodes'],\n layer2_nodes=kwargs['layer2_nodes'],\n learning_rate=kwargs['learning_rate'])\n\n # Augment the original dataset by adding clusters produced by Gaussian Mixture Models as features\n x_gmm_normalized = (x_gmm[0] - np.mean(x_gmm[0])) / np.std(x_gmm[0])\n x_gmm_normalized = np.expand_dims(x_gmm_normalized, axis=1)\n x_train_new = np.append(x_train, x_gmm_normalized, axis=1)\n x_gmm_normalized = (x_gmm[1] - np.mean(x_gmm[1])) / np.std(x_gmm[1])\n x_gmm_normalized = np.expand_dims(x_gmm_normalized, axis=1)\n x_test_new = np.append(x_test, x_gmm_normalized, axis=1)\n\n # Perform experiments on it\n nn.experiment(x_train_new, x_test_new, y_train, y_test)", "title": "" } ]
68bc2820b2451fe9fecd4b7b9ba6ffd3
Tests if the input configuration can be used to successfully connect to the integration
[ { "docid": "be09aa3e96c01fdd9c2eb90dd94e1ad2", "score": "0.57417065", "text": "def check(self, logger: logging.Logger, config: ConfigContainer) -> AirbyteConnectionStatus:\n return self.check_config(logger, config.config_path, config)", "title": "" } ]
[ { "docid": "9f6d45e4a3e6db7ba445ddc1392439a1", "score": "0.7090428", "text": "def check_configuration(self):", "title": "" }, { "docid": "1683f2254f9c347148cf6a0d8ddbbe6e", "score": "0.70189494", "text": "def verify_config(self):\n pass", "title": "" }, { "docid": "813fa122bb4bf358d280cf9b6b3ba5df", "score": "0.70141184", "text": "def can_connect(self, **kwargs):\n try:\n self.client.get_config(**kwargs)\n except requests.exceptions.ConnectionError as ex:\n if type(ex) == requests.exceptions.ConnectionError:\n return False\n raise\n\n return True", "title": "" }, { "docid": "28bbf4a6cc0dbc8fbecae91eecde8e1e", "score": "0.693739", "text": "def check_configuration(self, configuration):\n super(Pstest, self).check_configuration(configuration)", "title": "" }, { "docid": "d2d98df08d909e624d7532a1dfdd7f47", "score": "0.6808109", "text": "def check_configuration(self):\n self.logger.debug(\"Checking the configuration.\")\n if self.conn_type != \"local\":\n self._pyez_conn.check_configuration()\n return\n\n if self.dev is None or self.config is None:\n self.fail_json(msg='The device or configuration is not open.')\n\n try:\n self.config.commit_check()\n self.logger.debug(\"Configuration checked.\")\n except (self.pyez_exception.RpcError,\n self.pyez_exception.ConnectError) as ex:\n self.fail_json(msg='Failure checking the configuraton: %s' %\n (str(ex)))", "title": "" }, { "docid": "15a5c6c2173081104a3d20d2e378a164", "score": "0.67287", "text": "def test_good_config():\n\n valid_discovery_config(\"almond\", {\"host\": \"test\", \"port\": 3812})", "title": "" }, { "docid": "33319f62b8605fd92d812b3031022c80", "score": "0.6458549", "text": "def validate_config(self):\n pass", "title": "" }, { "docid": "c7c830fcd543e2c2c2223333b1a1be22", "score": "0.6434383", "text": "def test_good_config():\n\n valid_discovery_config(\n \"matter\",\n {\"host\": \"test\", \"port\": 3812},\n )", "title": "" }, { "docid": "6befd81380ffbd8ea1e05d0bff429e04", "score": "0.63963103", "text": "def check_configuration(config, is_check_run):\n check_options(config['options'], is_check_run=is_check_run)\n check_outgoing(config['outgoing'])\n check_incoming(config['incoming'])\n check_openbis(config['openbis'])", "title": "" }, { "docid": "59b50e3d51aaa80979525a624c5e1c74", "score": "0.6355579", "text": "def test_is_connected(self):\n\t\tpass", "title": "" }, { "docid": "54a9303b508b33f5e0a05b32343b3d84", "score": "0.6353033", "text": "def test_check_config(get_integration, has_form, config):\n integration = get_integration(has_form=has_form)\n integration = integrations.get(integration)\n assert integration.check_config(\n Context(installation=factories.ApplicationInstallationFactory.build(config=config))\n )", "title": "" }, { "docid": "f372c46b4792120ca3f39861902f89f5", "score": "0.6346113", "text": "def test_failed_connection(self):\n #self.assertRaises() How to fuddle the config.* to ensure setUp()\n # fails in connecting?\n pass", "title": "" }, { "docid": "d03ca868b053f8b54d85ef68f56f67a4", "score": "0.63367605", "text": "def _check_configuration(self, raise_on_failure: bool = True) -> bool:\n valid = True\n if self.num_qubits is None:\n valid = False\n if raise_on_failure:\n raise ValueError(\"No number of qubits specified.\")\n\n if self.reps is None:\n valid = False\n if raise_on_failure:\n raise ValueError(\"No number of repetitions specified.\")\n\n return valid", "title": "" }, { "docid": "61600cac9ebfdeae0bc47421fed4e007", "score": "0.63110715", "text": "def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):\n\n # # Setting up a configuration\n try:\n assert \"user_input\" in configuration.kwargs, \"user_input is required\"\n assert isinstance(\n configuration.kwargs[\"user_input\"], str\n ), \"user_input must be a string\"\n except AssertionError as e:\n raise InvalidExpectationConfigurationError(str(e))\n super().validate_configuration(configuration)\n return True", "title": "" }, { "docid": "b61f39f2b9b2cc92ee083592d4620009", "score": "0.626217", "text": "def test_should_enforce_compliance_on_login(self):\n # Parameters don't matter for this method as it only tests the config\n assert should_enforce_compliance_on_login()", "title": "" }, { "docid": "ced17bd5773805c0865a0b6800f9bc8d", "score": "0.6256411", "text": "def validate_configuration(self, configuration):\n\n raise NotImplementedError", "title": "" }, { "docid": "746fb4a801667b33d1f528e0d4886016", "score": "0.62414527", "text": "def _test_connection(self):\n try:\n self.client.query_api().query(query=self.query_TestConnectionQuery)\n return True\n except Exception as exc:\n return False", "title": "" }, { "docid": "8fd81a9931a4294f28b4a9fe242d29c4", "score": "0.62379795", "text": "def can_connect():\n try:\n _KafkaProducer(test=TEST)\n except kafka.errors.KafkaError:\n logger.debug(\"kafka_connection_failure\", exc_info=True)\n return False\n return True", "title": "" }, { "docid": "8099abd6b2e1acefbd8e65535606be43", "score": "0.6201674", "text": "async def test_zeroconf_cannot_connect(hass: HomeAssistant) -> None:\n\n result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": config_entries.SOURCE_ZEROCONF},\n data=FIXTURE_ZEROCONF,\n )\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert not result[\"errors\"]\n\n with patch(\n \"systembridgeconnector.websocket_client.WebSocketClient.connect\",\n side_effect=ConnectionErrorException,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], FIXTURE_AUTH_INPUT\n )\n await hass.async_block_till_done()\n\n assert result2[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result2[\"step_id\"] == \"authenticate\"\n assert result2[\"errors\"] == {\"base\": \"cannot_connect\"}", "title": "" }, { "docid": "9c21d95fb0503d658de66ba1dacb98aa", "score": "0.61999744", "text": "def doTestConfiguration():\n\n import configuration\n\n errors = []\n warnings = []\n values = []\n\n def error(message):\n errors.append(ConfigurationIssue(\"error\", message, values))\n def warn(message):\n warnings.append(ConfigurationIssue(\"warning\", message, values))\n\n class MissingValue(Exception):\n pass\n\n @contextlib.contextmanager\n def value(module, name):\n values.append(ConfigurationValue(module, name))\n if not hasattr(module, name):\n error(\"Configuration value missing: %s.%s\" % (module.__name__, name))\n raise MissingValue\n try:\n yield getattr(module, name)\n finally:\n del values[-1]\n\n try:\n with value(configuration.base, \"WEB_SERVER_INTEGRATION\") \\\n as web_server_integration:\n if web_server_integration not in (\"apache\", \"nginx+uwsgi\",\n \"uwsgi\", \"none\"):\n error(\"Invalid web server integration: must be one of \"\n \"'apache', 'nginx+uwsgi', 'uwsgi' and 'none'.\")\n except MissingValue:\n pass\n\n def checkProvider(providers, name):\n provider = providers[name]\n if provider.get(\"enabled\"):\n if not provider.get(\"client_id\"):\n error(\"Enabled external authentication provider %r must have \"\n \"'client_id' set.\" % name)\n if not provider.get(\"client_secret\"):\n error(\"Enabled external authentication provider %r must have \"\n \"'client_secret' set.\" % name)\n if name == \"google\" and not provider.get(\"redirect_uri\"):\n error(\"Enabled external authentication provider %r must have \"\n \"'redirect_uri' set.\" % name)\n if provider.get(\"bypass_createuser\") \\\n and provider.get(\"verify_email_addresses\"):\n error(\"Enabled external authentication provider %r can't have \"\n \"both 'bypass_createuser' and 'verify_email_addresses' \"\n \"enabled.\" % name)\n\n try:\n with value(configuration.base, \"AUTHENTICATION_MODE\") \\\n as authentication_mode:\n if authentication_mode == \"critic\":\n with value(configuration.base, \"SESSION_TYPE\") as session_type:\n if session_type not in (\"httpauth\", \"cookie\"):\n error(\"Invalid session type: must be one of 'httpauth' \"\n \"and 'cookie'.\")\n elif authentication_mode != \"host\":\n # Unconditional external authentication mode\n with value(configuration.base, \"SESSION_TYPE\") as session_type:\n if session_type != \"cookie\":\n error(\"Invalid session type: must be 'cookie' (with \"\n \"external authentication.)\")\n with value(configuration.auth, \"PROVIDERS\") as providers:\n if authentication_mode not in providers:\n error(\"Authentication mode must be 'host', 'critic' or \"\n \"name an external authentication provider.\")\n else:\n provider = providers[authentication_mode]\n if not provider.get(\"enabled\"):\n error(\"External authentication provider %r must be \"\n \"enabled.\" % authentication_mode)\n with value(configuration.base, \"REPOSITORY_URL_TYPES\") \\\n as repository_url_types:\n if \"http\" in repository_url_types:\n warn(\"HTTP/HTTPS repository URL type is incompatible \"\n \"with using an external authentication provider.\")\n except MissingValue:\n pass\n\n try:\n with value(configuration.auth, \"PROVIDERS\") as providers:\n for name in providers.keys():\n checkProvider(providers, name)\n except MissingValue:\n pass\n\n return (errors, warnings)", "title": "" }, { "docid": "54ab4246c9f8358076227d4d74def670", "score": "0.6192713", "text": "def check_config(self, logger: logging.Logger, config_path: str, config: ConfigContainer) -> AirbyteConnectionStatus:\n raise NotImplementedError", "title": "" }, { "docid": "ba4e47c80dff6a1ddb4ef5f5f6c418f5", "score": "0.6176863", "text": "def validate_config():\n if not get_config_path():\n print(\"> Configuration file 'config.json' doesn't exist, unable to continue.\")\n return False\n return True", "title": "" }, { "docid": "bf75994beacbf80eb321d55e71126bf3", "score": "0.61710536", "text": "def match_connection(connection: ConfigParser) -> bool:\n log.debug('YYYYYYYYYYYYYYYYYYY')\n log.debug(connection)\n if \"wifi\" not in connection:\n log.info(\"Connection is not Wifi, assuming no Captive Portal\")\n return False\n elif \"wifi-security\" in connection:\n log.info(\"Secured Wifi, assuming no Captive Portal\")\n return False\n elif True:\n help(connection)\n log.info(\"dunno lol\")\n else:\n log.info(\"Unsecured wifi, might be cdwifi!\")\n return True", "title": "" }, { "docid": "ecc1d8ee74f2dac8e578dd0a11e8b8a1", "score": "0.61434114", "text": "def check_connection(self, logger, config) -> Tuple[bool, any]:\n if self.start_date_key in config:\n start_date = self._parse_date(config[self.start_date_key])\n if type(start_date) is not datetime:\n return False, start_date\n\n if self.count_key in config:\n return False, self.invalid_conbination_message_template.format(self.start_date_key, self.count_key)\n\n if self.end_date_key in config:\n end_date = self._parse_date(config[self.end_date_key])\n if type(end_date) is not datetime:\n return False, end_date\n\n if self.count_key in config:\n return False, self.invalid_conbination_message_template.format(self.end_date_key, self.count_key)\n\n if self.start_date_key not in config:\n return False, f\"Cannot use {self.end_date_key} without specifying {self.start_date_key}.\"\n\n if start_date > end_date:\n return False, f\"Invalid values. start_date ({start_date}) needs to be lower than or equal to end_date ({end_date}).\"\n\n if self.count_key in config:\n count_value = config[self.count_key]\n if count_value < self.min_count_value or count_value >= self.max_count_value:\n return False, self.invalid_parameter_value_template.format(\n self.count_key,\n count_value,\n self.invalid_parameter_value_range_template.format(self.min_count_value, self.max_count_value),\n )\n\n try:\n stream = NasaApod(authenticator=None, config=config)\n records = stream.read_records(sync_mode=SyncMode.full_refresh)\n next(records)\n return True, None\n except requests.exceptions.RequestException as e:\n return False, e", "title": "" }, { "docid": "f16a180139be84bdd59675e716a72508", "score": "0.6100015", "text": "def checkConnection(url, name, expectedFormat):\n\n # can be called by e.g.\n # checkConnection(GEONETWORK.csw_url, 'geonetwork', 'xml')\n\n log.debug('Testing connection to ' + url)\n\n try:\n resp = requests.get(url)\n except requests.exceptions.ConnectionError:\n log.error('Connection Error to ' + name)\n return None\n\n try:\n if expectedFormat == 'xml':\n parsedresp = xmltodict.parse(resp.content)\n records = json.dumps(parsedresp)\n elif expectedFormat == 'json':\n parsedresp = json.loads(resp.text)\n\n log.debug('Connection successfull to ' + name)\n return True\n\n except Exception:\n log.error('Connection Error to ' + name)\n return None", "title": "" }, { "docid": "8912ae9fa97a402b2f1cc4013293ae16", "score": "0.6095582", "text": "def check_connectivity(self) -> bool:\n\n try:\n res = requests.get(self.baseUrl)\n if (res.json()[\"type\"]):\n print(\"You are using a DEV instance of Hopper! This is not intended for production!\")\n except ConnectionError as e:\n print(json.dumps(e))\n return False\n return True", "title": "" }, { "docid": "df5ba391f6c7e6f2eb9112d3b51265e5", "score": "0.60934573", "text": "def test_ConfigurationNeeded(self):\n testDriver = self._getDummyIPMIDriver()\n\n # configuration needed\n ipmi_all = IPMIController(testDriver, self.testUsername, self.testPass, self.testIP,\n self.testNetmask, self.testGW, self.testChannel, self.testRestart)\n self.assertEqual(ipmi_all._checkIfIPMIConfigurationNeeded(), True)\n\n # configuration not needed\n ipmi_miss_all = IPMIController()\n self.assertEqual(ipmi_miss_all._checkIfIPMIConfigurationNeeded(), False)", "title": "" }, { "docid": "dd49243cc7a2dc9b0f3b2ec751e8bfdd", "score": "0.6084502", "text": "def isConnected(self):\n # if either of this fails, we keep trying to connect\n return not (self._timekprUserAdminDbusInterface is None or self._timekprAdminDbusInterface is None), not self._initFailed", "title": "" }, { "docid": "138108de213e79ac5026c503d81529d1", "score": "0.6080489", "text": "def test_connect():\n\n lp = Launchpad()\n\n assert lp == lp.connect()\n assert lp.input.is_port_open() is True\n assert lp.output.is_port_open() is True", "title": "" }, { "docid": "5e5c17115d9d69e22a416be4b566fdac", "score": "0.60731435", "text": "def applicable(cls, config: str) -> bool:", "title": "" }, { "docid": "67dddcdfea1e52c92a539a332b91ab74", "score": "0.6048404", "text": "def validate(self) -> t.NoReturn:\n validate_helper(self.config, \"ip\", str)\n validate_helper(self.config, \"user\", str)\n validate_helper(self.config, \"port\", int)\n validate_helper(self.config, \"key\", str, Path)\n validate_helper(self.config, \"gateway\", type(None), str)", "title": "" }, { "docid": "130445ce431538fa78e3584428d7a5cd", "score": "0.60478246", "text": "def test_01_Config(self):\n # print(PrettyFormatAny.form(self.m_test_config, 'A1-01-A - Config'))\n # print(PrettyFormatAny.form(self.m_pyhouse_obj.House, 'PyHouse House'))\n self.assertIsNotNone(self.m_test_config['Outlets'])", "title": "" }, { "docid": "05887eab37def6f55d15773ff076e2f4", "score": "0.60441494", "text": "def check_configuration(self, configuration):\n super(CoffeeBreak, self).check_configuration(configuration)", "title": "" }, { "docid": "0c16162d974fa31eb43c6e95c3fb5e97", "score": "0.6035701", "text": "def test_has_urlconf(self):\n errors = URLConfHandler.check(URLConfRoute)\n self.assertEqual(errors, [])", "title": "" }, { "docid": "fd497481052a47e0e01aad2a32c27ad1", "score": "0.60279965", "text": "def __validate(self, config):\n if config[\"fabric\"][\"fabric_network_file\"] is None:\n logging.error(\"Fabric network configuration is not set!!\")\n return False\n self.__tcf_home = environ.get(\"TCF_HOME\", \"../../../\")\n self.__network_conf_file = self.__tcf_home + \"/\" + \\\n config[\"fabric\"][\"fabric_network_file\"]\n if not exists(self.__network_conf_file):\n raise FileNotFoundError(\n \"File not found at path: {0}\".format(\n realpath(self.__network_conf_file)))\n if config[\"fabric\"][\"channel_name\"] is None:\n logging.error(\"Channel name is not specified\")\n return False\n return True", "title": "" }, { "docid": "cfe89c65accaa23a305d6e918a850326", "score": "0.6018425", "text": "def test_read_config(self):\n response = self.openbts_connection.read_config('sample-key')\n # check that we touched the socket to send the message\n self.assertTrue(self.openbts_connection.socket.send.called)\n expected_message = json.dumps({\n 'command': 'config',\n 'action': 'read',\n 'key': 'sample-key',\n 'value': ''\n })\n # check that we've sent the expected message\n self.assertEqual(self.openbts_connection.socket.send.call_args[0],\n (expected_message,))\n # we should have touched the socket again to receive the reply\n self.assertTrue(self.openbts_connection.socket.recv.called)\n # verify we received a valid response\n self.assertEqual(response.code, 204)", "title": "" }, { "docid": "bf39d20d5b5c44cde8efdf9f578ee206", "score": "0.60104084", "text": "def test_connection(self):\n # Ideally this function would check that we can connect\n # via the internet as well as check for HTTP errors, like\n # 404 Not Found type messages that are clearly not good.\n warnings.warn('Not Implemented.')", "title": "" }, { "docid": "87c091d57e91c8dc7edb3796d979c863", "score": "0.5994891", "text": "def test_connection(self) -> None:", "title": "" }, { "docid": "e7d3562c54ee51a5934f2d416f30c9ce", "score": "0.59917384", "text": "def __check_valid_conf(self):\n\n # check whether default value is used for mlflow uri\n return self.mlflow_uri != \"https://url.of.the.mlflow.server/\"", "title": "" }, { "docid": "b85f1d79464caed8c77dff2272bedf6e", "score": "0.5988311", "text": "def is_valid_config(self, config):\n return config[0] in self.get_configs()", "title": "" }, { "docid": "65bd27524e3d434853ee3f171fffda9c", "score": "0.5985696", "text": "def _is_valid_configuration(self, config: ConfigParser, database_credential_file_path: str = None) -> bool:\n if not config.has_section('PY_EXPERIMENTER'):\n self.logger.error('Error in config file: PY_EXPERIMENTER section is missing')\n return False\n\n if not {'provider', 'database', 'table'}.issubset(set(config.options('PY_EXPERIMENTER'))):\n self.logger.error('Error in config file: DATABASE section must contain provider, database, and table')\n return False\n\n if config['PY_EXPERIMENTER']['provider'] not in ['sqlite', 'mysql']:\n self.logger.error('Error in config file: DATABASE provider must be either sqlite or mysql')\n return False\n\n if config['PY_EXPERIMENTER']['provider'] == 'mysql':\n credentials = utils.load_config(database_credential_file_path)\n if not {'host', 'user', 'password'}.issubset(set(credentials.options('CREDENTIALS'))):\n self.logger.error(\n f'Error in config file: CREDENTIALS file and section must contain host, user, and password since provider is {config[\"DATABASE\"][\"provider\"]}')\n return False\n\n if not 'keyfields' in config.options('PY_EXPERIMENTER'):\n self.logger.error('Error in config file: PY_EXPERIMENTER section must contain keyfields')\n return False\n return True", "title": "" }, { "docid": "24b968c62da99efc4cbde3a818c8ee06", "score": "0.59803253", "text": "def valid(cfg):\n pass", "title": "" }, { "docid": "268be799bd3b5c562c0879a314519678", "score": "0.5974383", "text": "def verify_core_connection():\n if not base_url or not api_credentials:\n retrieve_connection_info()\n return", "title": "" }, { "docid": "06645cfc5c41668a97376b2bdce5f984", "score": "0.59725136", "text": "def test_config_creds_wrong(self):\n\n pass", "title": "" }, { "docid": "76f23ce0480c9cb7c87f9a83a0287532", "score": "0.5965636", "text": "def isConnected():", "title": "" }, { "docid": "9026a0281d56838c1b793d16374c69ea", "score": "0.5965309", "text": "async def test_config_entry_not_ready(opp: OpenPeerPower) -> None:\n entry = await setup_integration(opp)\n\n assert entry.state is ConfigEntryState.SETUP_RETRY", "title": "" }, { "docid": "835d9b13b31724481d56c36ff0966bd5", "score": "0.59640545", "text": "def check_config_validity(cfg):\n assert 'model' in cfg\n assert 'training' in cfg\n assert 'validation' in cfg\n assert 'evaluation' in cfg\n assert 'data' in cfg\n\n data = cfg['data']\n assert 'dataset' in data\n assert 'path' in data\n assert 'n_classes' in data\n assert 'split' in data\n assert 'resize_factor' in data\n assert 'label' in data\n\n d = cfg['training']\n s = 'training'\n assert 'train_iters' in d, s\n assert 'val_interval' in d, s\n assert 'print_interval' in d, s\n assert 'optimizer' in d, s\n assert 'loss' in d, s\n assert 'batch_size' in d, s\n assert 'n_workers' in d, s\n\n d = cfg['validation']\n s = 'validation'\n assert 'batch_size' in d, s\n assert 'n_workers' in d, s\n\n d = cfg['evaluation']\n s = 'evaluation'\n assert 'batch_size' in d, s\n assert 'n_workers' in d, s\n assert 'num_crop_width' in d, s\n assert 'num_crop_height' in d, s\n\n\n print('config file validation passed')", "title": "" }, { "docid": "7da40b9a0c8894edf3be6a358bbd4103", "score": "0.5953311", "text": "def check_config(self, logger: AirbyteLogger, config_path: str, config: json) -> AirbyteConnectionStatus:\n try:\n # If an app on the appstore does not support subscriptions or sales, it cannot pull the relevant reports.\n # However, the way the Appstore API expresses this is not via clear error messages. Instead it expresses it by throwing an unrelated\n # error, in this case \"invalid vendor ID\". There is no way to distinguish if this error is due to invalid credentials or due to\n # the account not supporting this kind of report. So to \"check connection\" we see if any of the reports can be pulled and if so\n # return success. If no reports can be pulled we display the exception messages generated for all reports and return failure.\n api_fields_to_test = {\n \"subscription_event_report\": {\n \"reportType\": \"SUBSCRIPTION_EVENT\",\n \"frequency\": \"DAILY\",\n \"reportSubType\": \"SUMMARY\",\n \"version\": \"1_2\",\n },\n \"subscriber_report\": {\"reportType\": \"SUBSCRIBER\", \"frequency\": \"DAILY\", \"reportSubType\": \"DETAILED\", \"version\": \"1_2\"},\n \"subscription_report\": {\"reportType\": \"SUBSCRIPTION\", \"frequency\": \"DAILY\", \"reportSubType\": \"SUMMARY\", \"version\": \"1_2\"},\n \"sales_report\": {\"reportType\": \"SALES\", \"frequency\": \"DAILY\", \"reportSubType\": \"SUMMARY\", \"version\": \"1_0\"},\n }\n\n api = Api(config[\"key_id\"], config[\"key_file\"], config[\"issuer_id\"])\n stream_to_error = {}\n for stream, params in api_fields_to_test.items():\n test_date = date.today() - timedelta(days=2)\n report_filters = {\"reportDate\": test_date.strftime(\"%Y-%m-%d\"), \"vendorNumber\": f\"{config['vendor']}\"}\n report_filters.update(api_fields_to_test[stream])\n try:\n rep_tsv = api.download_sales_and_trends_reports(filters=report_filters)\n if isinstance(rep_tsv, dict):\n raise Exception(f\"An exception occurred: Received a JSON response instead of\" f\" the report: {str(rep_tsv)}\")\n except Exception as e:\n logger.warn(f\"Unable to download {stream}: {e}\")\n stream_to_error[stream] = e\n\n # All streams have failed\n if len(stream_to_error.keys()) == api_fields_to_test.keys():\n message = \"\\n\".join([f\"Unable to access {stream} due to error: {e}\" for stream, e in stream_to_error])\n return AirbyteConnectionStatus(status=Status.FAILED, message=message)\n\n return AirbyteConnectionStatus(status=Status.SUCCEEDED)\n except Exception as e:\n logger.warn(e)\n return AirbyteConnectionStatus(status=Status.FAILED, message=f\"An exception occurred: {str(e)}\")", "title": "" }, { "docid": "68baa2bccdf3073871d4e41ac0bdbfae", "score": "0.59510756", "text": "def testInitialization(self):\n configuration = configurations.CredentialConfiguration()\n self.assertIsNotNone(configuration)", "title": "" }, { "docid": "2531b6519c33a365d866d9597bc49b68", "score": "0.59431225", "text": "def _check_configuration(self, raise_on_failure: bool = True) -> bool:\n if not super()._check_configuration(raise_on_failure):\n return False\n\n if self.operators is None:\n if raise_on_failure:\n raise ValueError(\"The operators are not set.\")\n return False\n\n return True", "title": "" }, { "docid": "5863d27f202c44887b03c5d248ee8972", "score": "0.5935505", "text": "def validate_connection(self) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "ba1e5b6a40dd535c8c5bf504ed7ac87b", "score": "0.59256226", "text": "def test_connect_ib(self):\n self.assertTrue(self.ib_connection.is_connected())\n self.assertEqual(self.ib_connection.account, PAPER_ACCOUNT)", "title": "" }, { "docid": "58d4eff9aaee4af0fcf0daeda58cfe7c", "score": "0.5899625", "text": "def verify_connectivity(self, **config) -> None:\n if config:\n experimental_warn(\n \"All configuration key-word arguments to \"\n \"verify_connectivity() are experimental. They might be \"\n \"changed or removed in any future version without prior \"\n \"notice.\"\n )\n with self.session(**config) as session:\n session._get_server_info()", "title": "" }, { "docid": "ebae4ab0a58414635c8b0500ff76adea", "score": "0.5896209", "text": "async def validate_ha_config(\n ip: str,\n auth_token: str,\n device_type: str,\n session: Optional[ClientSession] = None,\n timeout: int = DEFAULT_TIMEOUT,\n ) -> bool:\n return await VizioAsync(\n \"\", ip, \"\", auth_token, device_type, session=session, timeout=timeout\n ).can_connect_with_auth_check()", "title": "" }, { "docid": "6013d7467480519524b7284d7df3a67c", "score": "0.588952", "text": "def _is_parameters_ok(self):\n\n if self.configuration['credentials_file'] is None:\n raise InvalidParameterException(\"Google news needs a credentials_file\")\n\n if self.configuration['client_secret_file'] is None:\n raise InvalidParameterException(\"Google news needs a client_secret_file\")\n\n if self.configuration['max_results'] is None:\n raise InvalidParameterException(\"Google news needs a max_results\")\n\n if self.configuration['application_name'] is None:\n raise InvalidParameterException(\"Google news needs a application_name\")\n\n return True", "title": "" }, { "docid": "dcf86d6098df84edbf8ee9e68c133672", "score": "0.58885705", "text": "def __connection_ok__(self):\n url = BASE_URL + 'connection_test'\n try:\n r=requests.get(url)\n if r.text == 'OK':\n logging.info('Connection OK')\n return True\n except Exception as e:\n \n return False", "title": "" }, { "docid": "6f654bbc0329076ec8de6863142ad0ad", "score": "0.5883602", "text": "async def test_config_flow_user_initiated_connect_failure(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {}\n\n with patch(\n \"homeassistant.components.fibaro.FibaroClient.connect\",\n return_value=False,\n ):\n result = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n CONF_URL: TEST_URL,\n CONF_USERNAME: TEST_USERNAME,\n CONF_PASSWORD: TEST_PASSWORD,\n },\n )\n\n assert result[\"type\"] == \"form\"\n assert result[\"step_id\"] == \"user\"\n assert result[\"errors\"] == {\"base\": \"cannot_connect\"}", "title": "" }, { "docid": "66af8b9dee24bb2d0d478b1c75485a4a", "score": "0.5881245", "text": "def is_config_valid() -> int:\n otrs_cli_out: str = call_otrs_cli(\"Admin::Config::ListInvalid\")\n if \"All settings are valid.\" in otrs_cli_out:\n return 1\n else:\n return 0", "title": "" }, { "docid": "3125e3edcc15eee15094aef95bd0f324", "score": "0.5879759", "text": "def verify_interface_config_rejected(device, interface):\n try:\n device.configure(\"int {interface}\".format(interface=interface))\n except SubCommandFailure as e:\n return True\n\n return False", "title": "" }, { "docid": "dd8c7c8b165c46c06351023b2e33a05c", "score": "0.5879533", "text": "def _validate_config(self, config: dict) -> bool:\n if config.get(CONFIG_SERVICETYPE) not in (SERVICETYPE_CIP, SERVICETYPE_LB, SERVICETYPE_NP):\n logger.error(f\"config - service type {config.get(CONFIG_SERVICETYPE)} is not recognized\")\n return False\n if config.get(CONFIG_SERVICEHTTPPORT) is None or config.get(CONFIG_SERVICEEDGEPORT) is None:\n logger.error(f\"config - service http or edge port cannot be None\")\n return False\n if config.get(CONFIG_SERVICEHTTPPORT) == config.get(CONFIG_SERVICEEDGEPORT):\n logger.error(f\"config - service http and edge port cannot be the same\")\n return False\n if (config.get(CONFIG_SERVICEHTTPNODEPORT) == config.get(CONFIG_SERVICEEDGENODEPORT)\n and config.get(CONFIG_SERVICEHTTPNODEPORT) is not None):\n logger.error(f\"config - service http and edge node port cannot be the same\")\n return False\n return True", "title": "" }, { "docid": "53a96e5d155e99ec9969a327124b29f4", "score": "0.5875734", "text": "def check_for_setup_error(self):\n # check the required flags in conf\n required_flags = ['san_ip', 'san_login', 'san_password',\n 'as13000_ipsan_pools']\n for flag in required_flags:\n value = self.configuration.safe_get(flag)\n if not value:\n msg = (_('Required flag %s is not set.') % flag)\n LOG.error(msg)\n raise exception.InvalidConfigurationValue(option=flag,\n value=value)\n\n # make sure at least one node can\n if not self.nodes:\n msg = _('No healthy nodes are available!')\n LOG.error(msg)\n raise exception.VolumeDriverException(message=msg)", "title": "" }, { "docid": "16ee6223e7f540d89770fb1453a8641d", "score": "0.58732307", "text": "def _is_parameters_ok(self):\n if self.url is None:\n raise MissingParameterException(\"Domoka neuron needs an url\")\n\n return True", "title": "" }, { "docid": "3a8d091db34528c7e91e399fdbbe1f97", "score": "0.5862823", "text": "def check_settings(self):\n if self.app.config['AWS_ACCESS_KEY_ID'] and not self.app.config['AWS_SECRET_ACCESS_KEY']:\n raise ConfigurationError('You must specify AWS_SECRET_ACCESS_KEY if you are specifying AWS_ACCESS_KEY_ID.')\n\n if self.app.config['AWS_SECRET_ACCESS_KEY'] and not self.app.config['AWS_ACCESS_KEY_ID']:\n raise ConfigurationError('You must specify AWS_ACCESS_KEY_ID if you are specifying AWS_SECRET_ACCESS_KEY.')\n\n if self.app.config['DYNAMO_ENABLE_LOCAL'] and not (self.app.config['DYNAMO_LOCAL_HOST'] and self.app.config['DYNAMO_LOCAL_PORT']):\n raise ConfigurationError('If you have enabled Dynamo local, you must specify the host and port.')", "title": "" }, { "docid": "e8836e898a251b0125f73efbea45c71a", "score": "0.58626425", "text": "def test_connection(self):\n self.assertIsInstance(self.hpc.connection,\n pc.connections.NoTierConnection)", "title": "" }, { "docid": "08d4edc33bdb1a23ab80ad3085c39dd8", "score": "0.58515334", "text": "def test_connection(self):\n try:\n self.System.getDeviceInfo()\n except HikvisionException as e:\n raise HikvisionException(\"Error while testing connection: %s\" % e)\n except ConnectionError as e:\n raise HikvisionException(\"Error while testing connection: %s\" % e)", "title": "" }, { "docid": "64a9d20d537e6034d997ab68e80020c9", "score": "0.5844448", "text": "def check_config_loaded_fixture():\n return True", "title": "" }, { "docid": "e46868e79d0bc72776ef51a3124a5f91", "score": "0.5844037", "text": "def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration]\n ) -> None:\n\n super().validate_configuration(configuration)\n configuration = configuration or self.configuration\n\n # # Check other things in configuration.kwargs and raise Exceptions if needed\n # try:\n # assert (\n # ...\n # ), \"message\"\n # assert (\n # ...\n # ), \"message\"\n # except AssertionError as e:\n # raise InvalidExpectationConfigurationError(str(e))", "title": "" }, { "docid": "9bf7a77dc8448af4cdaad864c536a7ea", "score": "0.58434564", "text": "def check_config(config):\n not_defined_confs = [\n elem for elem\n in (\n 'BZ_USER', 'BZ_PASS', 'BZ_SERVER',\n 'GERRIT_SRV', 'PRODUCTS', 'TRACKER_ID',\n 'CLASSIFICATIONS',\n )\n if elem not in config\n ]\n\n if not_defined_confs:\n logger.error(\"Missing configuration values %s\" % ', '.join(\n not_defined_confs\n ))\n sys.exit(1)", "title": "" }, { "docid": "e88a1dd9ef54a2a41ed26d509e4079a8", "score": "0.58433926", "text": "def _check_configuration(self, raise_on_failure=True):\n valid = True\n if self._num_state_qubits is None:\n valid = False\n if raise_on_failure:\n raise AttributeError(\"The number of state qubits has not been set.\")\n\n if self._num_state_qubits != len(self.weights):\n valid = False\n if raise_on_failure:\n raise ValueError(\"Mismatching number of state qubits and weights.\")\n\n return valid", "title": "" }, { "docid": "680a992c6840f11b8f9fee0bf682939e", "score": "0.5831322", "text": "def _assert_orion_settings_are_compatible(self):\n api_url = self.env.get(\"PREFECT_API_URL\", PREFECT_API_URL.value())\n\n if not api_url:\n raise RuntimeError(\n \"The docker flow runner cannot be used with an ephemeral server. \"\n \"Provide `PREFECT_API_URL` to connect to an Orion server.\"\n )", "title": "" }, { "docid": "7c303afbf0ecffe64f6c660649475ad4", "score": "0.582118", "text": "def check_connection(self, logger, config) -> Tuple[bool, any]:\n authenticator = SearchMetricsAuthenticator(config)\n\n try:\n url = \"https://api.searchmetrics.com/v4/AdminStatusGetListProjects.json\"\n\n auth_headers = {\"Accept\": \"application/json\", **authenticator.get_auth_header()}\n session = requests.get(url, headers=auth_headers)\n session.raise_for_status()\n\n return True, None\n except requests.exceptions.RequestException as e:\n return False, e", "title": "" }, { "docid": "d8c3ef90feaab51ad751168d256414e2", "score": "0.5818218", "text": "def test_mlflow_config_constructor():\n conf = DatabricksConfig(TEST_HOST, TEST_USER, TEST_PASSWORD, TEST_TOKEN, insecure=False)\n assert conf.host == TEST_HOST\n assert conf.username == TEST_USER\n assert conf.password == TEST_PASSWORD\n assert conf.token == TEST_TOKEN\n assert conf.insecure is False", "title": "" }, { "docid": "a3dee707c89d337f09d42d02376f327c", "score": "0.5815822", "text": "def test_connect(check, instance, aggregator):\n check.check(instance)\n aggregator.assert_metric(\"nginx.net.connections\", tags=TAGS, count=1)\n extra_tags = ['host:{}'.format(HOST), 'port:{}'.format(PORT)]\n aggregator.assert_service_check('nginx.can_connect', tags=TAGS + extra_tags)", "title": "" }, { "docid": "6f485ece77dce7abf1142a1b52aa05ef", "score": "0.5814194", "text": "def test_connect_client(self):\n\t\tconfirmation = self.client._connect_client(host,port)\n\n\t\tself.assertTrue(confirmation)\n\n\t\t#Calls the setup method (this is the init, so no other init is needed):", "title": "" }, { "docid": "27c59984f14561c4e3f4007146e7086e", "score": "0.5811204", "text": "def test_configuration(self):\n\n # Test the driver is in state unconfigured.\n state = self.driver_client.cmd_dvr('get_resource_state')\n self.assertEqual(state, DriverConnectionState.UNCONFIGURED)\n\n # Configure driver for comms and transition to disconnected.\n reply = self.driver_client.cmd_dvr('configure', self.port_agent_comm_config())\n\n # Test the driver is configured for comms.\n state = self.driver_client.cmd_dvr('get_resource_state')\n self.assertEqual(state, DriverConnectionState.DISCONNECTED)\n\n # Initialize the driver and transition to unconfigured.\n reply = self.driver_client.cmd_dvr('initialize')\n\n # Test the driver returned state unconfigured.\n state = self.driver_client.cmd_dvr('get_resource_state')\n self.assertEqual(state, DriverConnectionState.UNCONFIGURED)", "title": "" }, { "docid": "cb1bfe29cb7e10a5ff7ded473aa880cb", "score": "0.5809713", "text": "def test_connect_config_not_dict(self):\n config = 1\n with pytest.raises(Exception) as exception:\n self.client = aerospike.client(config).connect()\n assert exception.value[0] == -1\n assert exception.value[1] == \"Parameters are incorrect\"", "title": "" }, { "docid": "0631be8bd3fa66dae7a7e504e88ff419", "score": "0.5804145", "text": "def _is_parameters_ok(self):\n # with the neuron the user has the choice of a direct link that call another synapse,\n # or a link with an answer caught from the STT engine\n\n # we cannot use at the same time a direct redirection and a link with question\n if self.direct_link is not None and self.from_answer_link is not None:\n raise InvalidParameterException(\"neurotransmitter cannot be used with both direct_link and from_answer_link\")\n\n if self.direct_link is None and self.from_answer_link is None:\n raise MissingParameterException(\"neurotransmitter must be used with direct_link or from_answer_link\")\n\n if self.from_answer_link is not None:\n if self.default is None:\n raise InvalidParameterException(\"default parameter is required and must contain a valid synapse name\")\n for el in self.from_answer_link:\n if \"synapse\" not in el:\n raise MissingParameterException(\"Links must contain a synapse name: %s\" % el)\n if \"answers\" not in el:\n raise MissingParameterException(\"Links must contain answers: %s\" % el)\n\n return True", "title": "" }, { "docid": "4edfc2144138af7b237eeac2007af019", "score": "0.58021224", "text": "def check_interface_config(interface):\n for config_item in BASE_CONFIG:\n # If any of the things from BASE_CONFIG are missing, fail compliance\n if any(config_item in item for item in interface) == False:\n return False\n\n # If 'speed' or 'duplex' is in the interface config, fail compliance\n if any('speed' in item for item in interface) or any('duplex' in item for item in interface):\n return False\n\n return True", "title": "" }, { "docid": "bdd44d4572a2f8aa44cbaf68bb69facc", "score": "0.5801598", "text": "def test_connect():\n assert None == hztn.tn\n hztn.connect()\n assert None != hztn.tn", "title": "" }, { "docid": "1d209595a49fd8d5d57004d742d5fb97", "score": "0.57868385", "text": "def test_load_config(self):\n self.assertIsNotNone(conf.load_config(None))", "title": "" }, { "docid": "05a922e189afafb04363008da9c00354", "score": "0.5784594", "text": "def validate_input(data: dict) -> bool:\n SIAAccount(data[CONF_ACCOUNT], data.get(CONF_ENCRYPTION_KEY))\n\n try:\n ping = int(data[CONF_PING_INTERVAL])\n assert 1 <= ping <= 1440\n except AssertionError:\n raise InvalidPing\n try:\n zones = int(data[CONF_ZONES])\n assert zones > 0\n except AssertionError:\n raise InvalidZones\n\n return True", "title": "" }, { "docid": "33272002dff199a4040674c3f609d642", "score": "0.57833093", "text": "def test_incomplete_config(incomplete_config):\r\n try:\r\n Config()\r\n assert False\r\n except MissingConfigError as e:\r\n missing_fields = ['SLACK_NOTIFICATION_CHANNEL', 'SLACK_SIGNING_SECRET',\r\n 'SLACK_API_TOKEN', 'SLACK_ANNOUNCEMENT_CHANNEL']\r\n for field in missing_fields:\r\n assert field in e.error", "title": "" }, { "docid": "002228937a78add93394e8d8b751508b", "score": "0.57818913", "text": "def accelize_credentials_available():\n from apyfal.configuration import Configuration\n config = Configuration()\n if not has_accelize_credential(config):\n pytest.skip('Accelize Credentials required')\n return config", "title": "" }, { "docid": "5123e33273ba7b2eec9f550fe13ce138", "score": "0.57781035", "text": "def validate_configuration(\n self, configuration: Optional[ExpectationConfiguration]\n ) -> None:\n\n super().validate_configuration(configuration)", "title": "" }, { "docid": "21b07e3aec4c2c23726143b20d15bfbc", "score": "0.5770645", "text": "def test_get_channel_configuration(self):\n pass", "title": "" }, { "docid": "7245c5382a18375766f69fae98f0595d", "score": "0.57654", "text": "def isConnected(self):\r\n try:\r\n self.wlbt.ConnectAny()\r\n except self.wlbt.WalabotError as err:\r\n if err.code == 19: # 'WALABOT_INSTRUMENT_NOT_FOUND'\r\n return False\r\n return True", "title": "" }, { "docid": "597c25ec32a517cf2cc5b8b9c0360141", "score": "0.57625604", "text": "def test_monitor_sets_connection_options(self):\n parameters = generate_input_parameter_object_minus_inputs()\n monitor = app.monitor.Monitor(parameters)\n\n parameters_to_validate = [\n \"timeout\",\n \"connection_list\"\n ]\n\n for parameter in parameters_to_validate:\n self.assertEqual(\n True,\n getattr(monitor, parameter) == parameters[parameter],\n \"Monitor object did not set parameter '{0}'.\".format(parameter)\n )", "title": "" }, { "docid": "880944acaceba27b1c90a21d5a4cc53d", "score": "0.57569814", "text": "def test_connection(self):\n if self._connector.__name__ == \"_tcp_connector\":\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ip, port = self._connection_string.split(\":\")\n client.connect((ip, int(port)))\n client.close()\n return True\n except Exception as err:\n print(f\"Error: Cannot connect to Rspamd: {err} : {RSPAMD_HTTP_SOCKET}\")\n return False\n else:\n try:\n client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n client.connect(self._connection_string)\n client.close()\n return True\n except Exception as err:\n print(f\"Error: Cannot connect to Rspamd: {err} : {RSPAMD_HTTP_SOCKET}\")\n return False", "title": "" }, { "docid": "75597d56ff4ea22bbf45fb290fe98cdc", "score": "0.57511646", "text": "def testInitialization(self):\n configuration = configurations.ProcessingConfiguration()\n self.assertIsNotNone(configuration)", "title": "" }, { "docid": "487e6981072d3e57d333919f89120195", "score": "0.5746486", "text": "async def test_form_cannot_connect(hass: HomeAssistant) -> None:\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": config_entries.SOURCE_USER}\n )\n\n with patch(\n \"homeassistant.components.prusalink.config_flow.PrusaLink.get_version\",\n side_effect=asyncio.TimeoutError,\n ):\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"],\n {\n \"host\": \"1.1.1.1\",\n \"api_key\": \"abcdefg\",\n },\n )\n\n assert result2[\"type\"] == FlowResultType.FORM\n assert result2[\"errors\"] == {\"base\": \"cannot_connect\"}", "title": "" }, { "docid": "d17fb94b0657ca3f70b352c11c8ea33a", "score": "0.57441115", "text": "def check(config_file):\n echo('Read configurations files from: %s' % config_file.files_loaded)\n echo('Host: {}:{}'.format(config_file.db_hostname or 'localhost', config_file.db_port or '5432'))\n echo('Database: {}'.format(config_file.db_database))\n echo('User: {}'.format(config_file.db_username))\n\n echo('\\n')\n echo('Attempting connect')\n try:\n index_connect(local_config=config_file)\n echo('Success.')\n except OperationalError as e:\n handle_exception('Error Connecting to Database: %s', e)\n except IndexSetupError as e:\n handle_exception('Database not initialised: %s', e)", "title": "" }, { "docid": "b2ebe24b9f14d937d6cb2d7bf9c87598", "score": "0.57349163", "text": "def test_complete_config(complete_config):\r\n assert complete_config.testing", "title": "" }, { "docid": "627b2b1674178f7dd9599c66f587713f", "score": "0.5734311", "text": "def malconfigured_connection(self):\n user = USER\n host = HOST\n key = KEY\n\n not_the_user = \"fake_mxorc\"\n not_the_host = \"fake_swahost\"\n not_the_key = DEPLOY_CONFIG.get(\"Deploy Config\", \"private_key_path\")\n\n LOGGER.info(\"Testing malconfigured connections.\")\n\n # test malconfigured host\n LOGGER.info(\"Testing with %s, %s\", user, not_the_host)\n self.assertRaises(TypeError, user, not_the_host, key)\n\n # test with malconfigured user\n LOGGER.info(\"Testing with %s, %s\", not_the_user, host)\n try:\n try:\n ssh = SSHConnector(not_the_user, host, key)\n except paramiko.ssh_exception.AuthenticationException:\n self.assertTrue(True)\n except gaierror:\n self.assertTrue(True)\n\n # test with malconfigured key\n LOGGER.info(\"Testing with a malconfigured key.\")\n try:\n try:\n ssh = SSHConnector(user, host, not_the_key)\n except AttributeError:\n self.assertTrue(True)\n except gaierror:\n self.assertTrue(True)\n\n # test with malconfigured host and key\n LOGGER.info(\"Testing with %s, %s and a malconfigured key.\", user,\n not_the_host)\n try:\n try:\n ssh = SSHConnector(user, not_the_host, not_the_key)\n except AttributeError:\n self.assertTrue(True)\n except gaierror:\n self.assertTrue(True)\n\n # test with malconfigured user and host\n LOGGER.info(\"Testing with %s, %s\", not_the_user, not_the_host)\n try:\n try:\n ssh = SSHConnector(not_the_user, not_the_host, key)\n except AttributeError:\n self.assertTrue(True)\n except gaierror:\n self.assertTrue(True)\n\n # test with everything malconfigured\n LOGGER.info(\"Testing with %s, %s and a malconfigured key.\", not_the_user,\n not_the_host)\n try:\n try:\n ssh = SSHConnector(not_the_user, not_the_host, not_the_key)\n except AttributeError:\n self.assertTrue(True)\n except gaierror:\n self.assertTrue(True)\n\n LOGGER.info(\"Malconfigured connections reacting as expected.\")", "title": "" }, { "docid": "521d7161a1252e791bae11433967b287", "score": "0.57310194", "text": "def check_connection(self):\n try:\n res = requests.get(self._url + 'issues.' +\n self._config['api_format'], headers=self._headers, auth=self._auth if self._config['http_auth']['enabled'] else '')\n\n if res.status_code == 200:\n print(colored('Connection is: 200. OK.', 'green'))\n return True\n\n print(colored('Connection is: ' + str(res.status_code) + '. KO', 'red'))\n except HTTPError as http_err:\n print(f'HTTP error occurred: {http_err}')\n raise http_err\n except:\n print('Something went wrong!')\n return False\n \n return False", "title": "" }, { "docid": "c48e2366b18ce0614e9bdba7ed395c4c", "score": "0.5730428", "text": "def verify_connectivity(self) -> bool:\n vok = False\n\n hello_cmd = \"echo Hello\"\n status, stdout, _ = self.run_cmd(hello_cmd)\n if status == 0 and stdout.strip() == \"Hello\":\n vok = True\n\n return vok", "title": "" }, { "docid": "e5ac862f4203fa20d4eb6e8ee8874b8a", "score": "0.57291865", "text": "def test_connection_type(self):\n # user sets it to telnet\n value = 'telnet'\n self.config_parser.get.return_value = value\n self.assertEqual(value, self.configuration.connection_type)\n self.config_parser.get.assert_called_with(self.section,\n 'connection_type')\n # user doesn't set it\n self.configuration.reset()\n self.config_parser.get.side_effect = ConfigParser.NoOptionError('connection_type',\n self.section)\n self.assertEqual(HostEnum.default_type, self.configuration.connection_type)\n return", "title": "" }, { "docid": "fbbfb2f16d868469de4976d98f3beb52", "score": "0.5726135", "text": "def test_create_channel_configuration(self):\n pass", "title": "" }, { "docid": "79fecc786c0bf33c7c48915e04053a50", "score": "0.57241166", "text": "def connected():\n print \"Checking Internet Connection\"\n try:\n requests.get(AlexaService.AMAZON_TOKEN_URL)\n print \"Connection OK\"\n return True\n except requests.exceptions.Timeout as exception:\n print \"Error: Timeout / \" + exception.message\n except requests.exceptions.TooManyRedirects as exception:\n print \"Error: Invalid URL provided / \" + exception.message\n except requests.RequestException as exception:\n print \"Error: Connection Failed / \" + exception.message\n return False", "title": "" }, { "docid": "3f46a211389a3072ea0628ef4b001a84", "score": "0.572218", "text": "def testInitialization(self):\n configuration = configurations.ExtractionConfiguration()\n self.assertIsNotNone(configuration)", "title": "" }, { "docid": "307017cde339fbe4da6603c24739c406", "score": "0.57203025", "text": "def test_init():\n # Arrange\n data = get_fixture('config_init_request')\n # Act\n req = ConfigRequest(data)\n # Assert\n assert req.config_data_raw == data['configurationData']\n assert req.lifecycle == LIFECYCLE_CONFIG\n assert req.execution_id == data['executionId']\n assert req.locale == data['locale']\n assert req.version == data['version']\n assert req.installed_app_id == \\\n data['configurationData']['installedAppId']\n assert req.phase == LIFECYCLE_CONFIG_INIT\n assert req.page_id == ''\n assert req.previous_page_id == ''", "title": "" } ]
8bd8e980cf04d35e6b38ac315a1baa71
Return ``True`` if the Pixel Data should to be converted from YCbCr to RGB. This affects JPEG transfer syntaxes.
[ { "docid": "b014c23b4d338cb8ecd2e7884bb03548", "score": "0.6915101", "text": "def needs_to_convert_to_RGB(ds: \"Dataset\") -> bool:\n return False", "title": "" } ]
[ { "docid": "8ec1d586b528ba2539eb81241dc48b12", "score": "0.6485655", "text": "def should_change_PhotometricInterpretation_to_RGB(ds: \"Dataset\") -> bool:\n return False", "title": "" }, { "docid": "e669456d6b3dd517d5162c581e892192", "score": "0.64236915", "text": "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "title": "" }, { "docid": "4ad15819f7ac6f1207d74d63323a11cf", "score": "0.6371506", "text": "def check_rgb(image):\n im_yiq = []\n rgb = False\n y = image\n if len(image.shape) > 2 and image.shape[-1] == 3: # The image is RGB\n rgb = True\n im_yiq = rgb2yiq(image) # convert to YIQ format\n y = im_yiq[:, :, 0]\n return rgb, y, im_yiq", "title": "" }, { "docid": "90ad79684cb0dedb126c8948696bf841", "score": "0.62484235", "text": "def is_RGB(self,img_path):\n image=Image.open(img_path)\n image=np.asarray(image)\n if(len(image.shape)<3):\n return False\n return True", "title": "" }, { "docid": "abce83176f4914c2133851ac1dadfb03", "score": "0.61199087", "text": "def is_rgb(img: np.ndarray) -> bool:\n\n return len(img.shape) >= 1 and img.shape[-1] == 3", "title": "" }, { "docid": "91d04468f70cb4aa50fefd4aa2758b1a", "score": "0.6013372", "text": "def ycbcr_to_rgb(image: torch.Tensor) -> torch.Tensor:\n y: torch.Tensor = image[..., 0, :, :]\n cb: torch.Tensor = image[..., 1, :, :]\n cr: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n cb_shifted: torch.Tensor = cb - delta\n cr_shifted: torch.Tensor = cr - delta\n\n r: torch.Tensor = y + 1.403 * cr_shifted\n g: torch.Tensor = y - 0.714 * cr_shifted - 0.344 * cb_shifted\n b: torch.Tensor = y + 1.773 * cb_shifted\n return torch.stack([r, g, b], -3)", "title": "" }, { "docid": "d28736b533a1a49d59ec46658a410397", "score": "0.59540033", "text": "def is_grayscale(self):\n return self.r == self.g == self.b", "title": "" }, { "docid": "d28736b533a1a49d59ec46658a410397", "score": "0.59540033", "text": "def is_grayscale(self):\n return self.r == self.g == self.b", "title": "" }, { "docid": "38d4a4a21dcf8e2b37ac88ad2ec3d1d0", "score": "0.58759546", "text": "def rgb_to_ycbcr(image: torch.Tensor) -> torch.Tensor:\n r: torch.Tensor = image[..., 0, :, :]\n g: torch.Tensor = image[..., 1, :, :]\n b: torch.Tensor = image[..., 2, :, :]\n\n delta: float = 0.5\n y: torch.Tensor = 0.299 * r + 0.587 * g + 0.114 * b\n cb: torch.Tensor = (b - y) * 0.564 + delta\n cr: torch.Tensor = (r - y) * 0.713 + delta\n return torch.stack([y, cb, cr], -3)", "title": "" }, { "docid": "b5e7a774eae49e42afe36e7050cc762b", "score": "0.5786272", "text": "def is_rgb(im):\n if(im.ndim == 3):\n return True\n else:\n return False", "title": "" }, { "docid": "8595c838ce73dae005244abbe8566b99", "score": "0.577233", "text": "def rgb_to_ycbcr(rgb_uint8):\n if rgb_uint8.dtype != numpy.uint8:\n raise TypeError('`rgb_uint8.dtype` is not equal to `numpy.uint8`.')\n \n # If the check below did not exist, `rgb_to_ycbcr` would\n # not crash if `rgb_uint8` is nD, n >= 4.\n if rgb_uint8.ndim != 3:\n raise ValueError('`rgb_uint8.ndim` is not equal to 3.')\n \n # If the check below did not exist, `rgb_to_ycbcr` would\n # not crash if `rgb_uint8.shape[2]` is larger than 4.\n if rgb_uint8.shape[2] != 3:\n raise ValueError('`rgb_uint8.shape[2]` is not equal to 3.')\n rgb_float64 = rgb_uint8.astype(numpy.float64)\n y_float64 = 0.299*rgb_float64[:, :, 0] \\\n + 0.587*rgb_float64[:, :, 1] \\\n + 0.114*rgb_float64[:, :, 2]\n cb_float64 = 128. \\\n - (0.299/1.772)*rgb_float64[:, :, 0] \\\n - (0.587/1.772)*rgb_float64[:, :, 1] \\\n + (0.886/1.772)*rgb_float64[:, :, 2]\n cr_float64 = 128. \\\n + (0.701/1.402)*rgb_float64[:, :, 0] \\\n - (0.587/1.402)*rgb_float64[:, :, 1] \\\n - (0.114/1.402)*rgb_float64[:, :, 2]\n ycbcr_float64 = numpy.stack((y_float64, cb_float64, cr_float64),\n axis=2)\n return cast_float_to_uint8(ycbcr_float64)", "title": "" }, { "docid": "a8298bba45c899f7c6d8513ba1f2b732", "score": "0.5736785", "text": "def skin_detect_ycbcr(frame):\n Cr_min, Cr_max, Cb_min, Cb_max = 133, 150, 77, 127\n # Constants for finding range of skin color in YCrCb\n min_YCrCb = np.array([0,Cr_min,Cb_min], np.uint8)\n max_YCrCb = np.array([255,Cr_max,Cb_max], np.uint8)\n\n # Convert image to YCrCb\n imageYCrCb = cv2.cvtColor(frame, cv2.COLOR_BGR2YCR_CB)\n # Find region with skin tone in YCrCb image\n skinRegion = cv2.inRange(imageYCrCb, min_YCrCb, max_YCrCb) \n # Do contour detection on skin region\n _, contours, hierarchy = cv2.findContours(skinRegion, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n return imageYCrCb, contours, hierarchy", "title": "" }, { "docid": "a629d9b7d12a43695fdcc085e07b8891", "score": "0.5708002", "text": "def _rgb2y(self, im):\n if len(im.shape) < 3:\n return im\n return np.sum(im * [0.299, 0.587, 0.114], axis=2)", "title": "" }, { "docid": "e00bff875a122a0bb3aadd0e6af9fa40", "score": "0.5661065", "text": "def yuv2rgb(im):\n ## conflicting definitions exist depending on whether you use the full range\n ## of YCbCr or clamp out to the valid range. see here\n ## http://www.equasys.de/colorconversion.html\n ## http://www.fourcc.org/fccyvrgb.php\n from numpy import dot, ndarray, array\n # if not im.dtype == 'uint8':\n # raise ImageUtilsError('yuv2rgb only implemented for uint8 arrays')\n\n ## better clip input to the valid range just to be on the safe side\n yuv = ndarray(im.shape) ## float64\n yuv[:, :, 0] = im[:, :, 0].clip(16, 235).astype(yuv.dtype) - 16\n yuv[:, :, 1:] = im[:, :, 1:].clip(16, 240).astype(yuv.dtype) - 128\n\n ## ITU-R BT.601 version (SDTV)\n A = array([[1., 0., 0.701],\n [1., -0.886 * 0.114 / 0.587, -0.701 * 0.299 / 0.587],\n [1., 0.886, 0.]])\n A[:, 0] *= 255. / 219.\n A[:, 1:] *= 255. / 112.\n\n ## ITU-R BT.709 version (HDTV)\n # A = array([[1.164, 0., 1.793],\n # [1.164, -0.213, -0.533],\n # [1.164, 2.112, 0.]])\n\n rgb = dot(yuv, A.T)\n return rgb.clip(0, 255).astype('uint8')", "title": "" }, { "docid": "089dec87e0508b096d6cee060b64c9ca", "score": "0.5609152", "text": "def rgb_to_ycbcr(image: np.ndarray) -> np.ndarray:\n\n \"\"\" from RGB (0-1).\n \"\"\"\n\n if not is_rgb(image):\n raise ValueError(\"Input needs to be an array of RGB values\")\n\n m = np.array(\n [\n [+065.481, +128.553, +024.966],\n [-037.797, -074.203, +112.000],\n [+112.000, -093.786, -018.214],\n ]\n )\n a = np.array([16, 128, 128])\n\n return np.dot(image, m.T) + a", "title": "" }, { "docid": "19454455c714da4bf3af35257f0d2e44", "score": "0.5582685", "text": "def is_inner_gamut_xyY(xyY, color_space_name=cs.BT2020, white=cs.D65):\n rgb = XYZ_to_RGB(\n xyY_to_XYZ(xyY), white, white,\n RGB_COLOURSPACES[color_space_name].XYZ_to_RGB_matrix)\n r_judge = (rgb[..., 0] >= 0) & (rgb[..., 0] <= 1)\n g_judge = (rgb[..., 1] >= 0) & (rgb[..., 1] <= 1)\n b_judge = (rgb[..., 2] >= 0) & (rgb[..., 2] <= 1)\n judge = (r_judge & g_judge) & b_judge\n\n return judge", "title": "" }, { "docid": "dea45444e6cae340df2383178ae29378", "score": "0.5565979", "text": "def ycbcr_to_rgb(ycbcr_uint8):\n if ycbcr_uint8.dtype != numpy.uint8:\n raise TypeError('`ycbcr_uint8.dtype` is not equal to `numpy.uint8`.')\n \n # If the check below did not exist, `ycbcr_to_rgb` would\n # not crash if `ycbcr_uint8` is nD, n >= 4.\n if ycbcr_uint8.ndim != 3:\n raise ValueError('`ycbcr_uint8.ndim` is not equal to 3.')\n \n # If the check below did not exist, `ycbcr_to_rgb` would\n # not crash if `ycbcr_uint8.shape[2]` is larger than 4.\n if ycbcr_uint8.shape[2] != 3:\n raise ValueError('`ycbcr_uint8.shape[2]` is not equal to 3.')\n ycbcr_float64 = ycbcr_uint8.astype(numpy.float64)\n red_float64 = ycbcr_float64[:, :, 0] \\\n + 1.402*(ycbcr_float64[:, :, 2] - 128.)\n green_float64 = ycbcr_float64[:, :, 0] \\\n - (0.114*1.772*(ycbcr_float64[:, :, 1] - 128.)/0.587) \\\n - (0.299*1.402*(ycbcr_float64[:, :, 2] - 128.)/0.587)\n blue_float64 = ycbcr_float64[:, :, 0] \\\n + 1.772*(ycbcr_float64[:, :, 1] - 128.)\n rgb_float64 = numpy.stack((red_float64, green_float64, blue_float64),\n axis=2)\n return cast_float_to_uint8(rgb_float64)", "title": "" }, { "docid": "c91a4a980e269e084990e93077dcf0bd", "score": "0.54774755", "text": "def IsOk(*args, **kwargs):\n return _gdi_.Colour_IsOk(*args, **kwargs)", "title": "" }, { "docid": "7d9f9576d0185e161ecb87f8a75b234d", "score": "0.5469251", "text": "def is_calibrated(channel):\n\n return hasattr(channel, \"calibrated\") and channel.calibrated == True", "title": "" }, { "docid": "6c496a90d1f94a11d7dc57093c09d9c6", "score": "0.5442214", "text": "def check_conv(extract):\n call = extract\n clip_found = False\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n clip_found = True\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d\":\n call = call.args[0]\n\n attrs, args = call.attrs, call.args\n if attrs.data_layout != \"NCHW\":\n return False\n\n if (\n (not clip_found)\n and (attrs.kernel_size[0] == 3)\n and (attrs.dilation[0] != 1)\n and (attrs.groups != 1)\n and (attrs.channels == attrs.groups)\n ):\n return False\n\n data_typ = args[0].checked_type\n kernel_typ = args[1].checked_type\n is_depthwise = is_depthwise_conv2d(\n data_typ.shape,\n attrs[\"data_layout\"],\n kernel_typ.shape,\n attrs[\"kernel_layout\"],\n attrs[\"groups\"],\n )\n if attrs.groups != 1 and not is_depthwise:\n return False\n return True", "title": "" }, { "docid": "3282b683bcd3cc903e242e0d69a74dc7", "score": "0.5399911", "text": "def __isValidRgbaColor(self, color):\n rgba = []\n \n parts = color.split(\",\")\n if len(parts) not in [3, 4]:\n return False, []\n \n for part in parts:\n try:\n c = int(part)\n except ValueError:\n return False, []\n \n if c < 0 or c > 255:\n return False, []\n \n rgba.append(c)\n \n return True, rgba", "title": "" }, { "docid": "551cc59286b0510253f5bb634308fc3d", "score": "0.5388822", "text": "def convertColorSpace(\n self,\n img, # Image in some color space\n srcColorSpace = 'BGR', # Source color space\n tgtColorSpace = 'RGB', # Traget color space\n ):\n\n if srcColorSpace == tgtColorSpace:\n return img\n\n if srcColorSpace == 'BGR':\n img_bgr = img\n elif srcColorSpace == 'RGB':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n elif srcColorSpace == 'HSV':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\n elif srcColorSpace == 'HLS':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_HLS2BGR)\n elif srcColorSpace == 'LUV':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_LUV2BGR)\n elif srcColorSpace == 'YUV':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_YUV2BGR)\n elif srcColorSpace == 'YCrCb':\n img_bgr = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)\n else:\n raise Exception(\"Incorrect color space: {}\".format(srcColorSpace))\n\n if tgtColorSpace == 'BGR':\n img_tgt = img_bgr\n elif tgtColorSpace == 'RGB':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)\n elif tgtColorSpace == 'HSV':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)\n elif tgtColorSpace == 'HLS':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HLS)\n elif tgtColorSpace == 'LUV':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LUV)\n elif tgtColorSpace == 'YUV':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2YUV)\n elif tgtColorSpace == 'YCrCb':\n img_tgt = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2YCrCb)\n else:\n raise Exception(\"Incorrect color space: {}\".format(tgtColorSpace))\n\n return img_tgt", "title": "" }, { "docid": "dd56510bf6af2a7e472436745e075518", "score": "0.537745", "text": "def is_rgb_color(v):\n if hasattr(v, \"r\") and hasattr(v, \"g\") and hasattr(v, \"b\"):\n v = [v.r, v.g, v.b]\n if not isiterable(v) or len(v) < 3:\n return False\n try:\n return all([0 <= int(x) <= 255 for x in v[:3]])\n except (TypeError, ValueError):\n return False", "title": "" }, { "docid": "7b5b07627751eb4d278227f6ebe2c0be", "score": "0.53625816", "text": "def rgb_to_ycbcr(img):\n\n T = np.array([\n [0.256788235294118, -0.148223529411765, 0.439215686274510],\n [0.504129411764706, -0.290992156862745, -0.367788235294118],\n [0.097905882352941, 0.439215686274510, -0.071427450980392],\n ], dtype=np.float64)\n\n O = np.array([16, 128, 128], dtype=np.float64)\n\n img = img.astype(np.float64)\n res = np.matmul(img, T) + O\n res = res.clip(0, 255).round().astype(np.uint8)\n\n return res", "title": "" }, { "docid": "9a765973e87ba28931e760c7a06363d6", "score": "0.5293077", "text": "def detect(frame: numpy.ndarray) -> bool:\n color = frame[:20, 1100:1150].mean(axis=(0, 1))\n return numpy.linalg.norm(color - BG_COLOR) < 5", "title": "" }, { "docid": "b057ccf743af9dc6c96568755d9434c1", "score": "0.52678376", "text": "def rgb_to_ycbcr(rgb):\n transform = np.matrix('.299, .587, .114; -.16874, -.33126, .5; .5, -.41869, -.08131')\n\n def apply_transform(x):\n return np.array(np.dot(transform, x))[0]\n\n return np.apply_along_axis(apply_transform, 2, rgb)", "title": "" }, { "docid": "ef65b4d4cec34665bd802c72007f7a9e", "score": "0.5259736", "text": "def is_cr(self, y, t):\n return t == 0 and y != 0", "title": "" }, { "docid": "ec9610997ee0bc6e443b907c39fbc016", "score": "0.52389586", "text": "def valid_image(self, image):\n valid = False\n if (isinstance(image, list) and len(image) == 11):\n valid = True\n for row in image:\n if (isinstance(row, list) and len(row) == 11):\n for pixel in row:\n if not self.valid_color(pixel):\n valid = False\n break\n else:\n valid = False\n break\n if not valid:\n _LOGGER.error(\"Invalid image data received\")\n return valid", "title": "" }, { "docid": "26d160064aa3ed4f7d595d5a83d912d3", "score": "0.52342755", "text": "def ycbcr_to_rgb(ycbcr):\n transform = np.matrix('.299, .587, .114; -.16874, -.33126, .5; .5, -.41869, -.08131')\n inverse = transform.getI()\n\n def apply_transform(ycbcr):\n return np.array(np.dot(inverse, ycbcr))[0]\n\n return np.apply_along_axis(apply_transform, 2, ycbcr)", "title": "" }, { "docid": "02b39b03189fa5093b1075eea5b05cb2", "score": "0.5185638", "text": "def is_dark(self):\n\n return self.red() < 125 and self.green() < 125 and self.blue() < 125", "title": "" }, { "docid": "7083b3f7dfe1019c4b323f07ae0f6e07", "score": "0.5141926", "text": "def isLineData(self, line):\n\n if line is None or line.strip().startswith('#'):\n return False, None, 0\n\n dataType = self.getDataType()\n\n if dataType == 'Y':\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n newYValues = []\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n newYValues.append(yValue)\n except ValueError:\n pass\n\n return True, 'Y', len(newYValues)\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n elif dataType == 'XY':\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n else:\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n numberValues = 0\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n numberValues += 1\n except ValueError:\n pass\n\n return True, 'Y', numberValues\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n return False, None, 0", "title": "" }, { "docid": "8889c97e3076e60a010a5ff076c3134f", "score": "0.5133905", "text": "def pixel_at(self, x, y):\n return self.arr[x, y, 1] == 255", "title": "" }, { "docid": "9ad8a74c4bfb835289c1ec857016853b", "score": "0.51089483", "text": "def test_conversion_through_rgb(self):\r\n\r\n xyz = convert_color(self.color, XYZColor)\r\n hsl = convert_color(xyz, HSLColor, through_rgb_type=AdobeRGBColor)\r\n # Notice how we don't have to pass through_rgb_type explicitly.\r\n xyz2 = convert_color(hsl, XYZColor)\r\n self.assertColorMatch(xyz, xyz2)", "title": "" }, { "docid": "c9cf0fb5706704aaf165265e33768d1f", "score": "0.5080082", "text": "def constrain_rgb(rgb: ndarray) -> bool:\n w = - min(0, *rgb) # Amount of white needed\n if w > 0:\n rgb += w # Add just enough white to make r, g, b all positive\n return True # Colour modified to fit RGB gamut\n return False # Colour within RGB gamut", "title": "" }, { "docid": "37d6ac7c661d07deedd3860906456588", "score": "0.5070118", "text": "def has_y(self):\n return any(map(lambda s: s.is_y, self))", "title": "" }, { "docid": "6b016755cb8a54865cd9cb7d96d4806c", "score": "0.50690347", "text": "def is_data_format_channel_last(data_format):\n if data_format is None:\n return True\n return data_format.endswith(\"C\")", "title": "" }, { "docid": "3c8f18156066bc1e7048a3f2d8669a13", "score": "0.50622684", "text": "def is_calibrated(self):\n return self.gripper_io.get_signal_value(\"is_calibrated\")", "title": "" }, { "docid": "a572499aaf8987a61c3b1b8e2e5d47b3", "score": "0.50479305", "text": "def test_is_valid_rgb_color(self):\n self.assertTrue(is_valid_rgb_color('rgb(12,23,5)'))\n self.assertTrue(is_valid_rgb_color('rgb(12, 223, 225)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1.0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .3)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .34521)'))\n\n # invalid cases\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 0.5)'))\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 5)'))\n self.assertFalse(is_valid_rgb_color('rgb(1234, 223, 225)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,.5)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,1.1)'))", "title": "" }, { "docid": "72875e8e36bdaa13189ac4a0e119da42", "score": "0.5016729", "text": "def contains_black(image):\n extrema = ImageStat.Stat(image).extrema\n r = extrema[0][0]\n g = extrema[1][0]\n b = extrema[2][0]\n\n if r == 0 and g == 0 and b == 0:\n return True\n\n return False", "title": "" }, { "docid": "d1ca654be568cc6cf6f5d3971d0a4190", "score": "0.50053465", "text": "def is_black(self):\n return \"black\" == self.color", "title": "" }, { "docid": "276de2f62629b02f0526c1b89dcd43ef", "score": "0.49995402", "text": "def isRGB(color):\n try:\n if color[0:4] != 'rgb(':\n return False\n if color[-1:] != ')':\n return False\n if len(color[4:-1].split(',')) != 3:\n return False\n for i in color[4:-1].split(','):\n if i.replace(' ', '').isdigit() == False:\n return False\n if int(i.replace(' ', '')) < 0 or int(i.replace(' ', '')) > 255:\n return False\n return True\n except TypeError:\n return False", "title": "" }, { "docid": "63fab7a47a48341a162d8a4e208220a4", "score": "0.4999493", "text": "def _iscolorstring(self, color):\n try:\n rgb = self.cv.winfo_rgb(color)\n ok = True\n except TK.TclError:\n ok = False\n return ok", "title": "" }, { "docid": "5e24d3b3477d19d9460efa90858dce1e", "score": "0.49943197", "text": "def yuv_to_rgb(img_yuv):\n\n y = img_yuv[..., 0]\n u = img_yuv[..., 1]\n v = img_yuv[..., 2]\n\n r = y + 1.14 * v\n g = y - 0.396 * u - 0.581 * v\n b = y + 2.029 * u\n\n img_rgb = np.stack((r, g, b), axis=2)\n img_rgb = np.clip(img_rgb, 0, 1)\n return img_rgb", "title": "" }, { "docid": "20850fabf7a71d0af5e7050c11203982", "score": "0.49902686", "text": "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "title": "" }, { "docid": "f9737f27db866fce949d19eaa55d7375", "score": "0.4989966", "text": "def get_rc(self):\n # If the 16th bit is set, it's reverse complement\n return self._flag is 16", "title": "" }, { "docid": "c8409b035f4c000f1342e6ca8b98d21e", "score": "0.49858275", "text": "def has_valid_channel_values(rgb_coll):\n return all([is_0to255(c) and is_int(c) for c in rgb_coll])", "title": "" }, { "docid": "dff878980606ee7e5a1e0509079f1260", "score": "0.49612516", "text": "def areTwoColors(colors):\n if not(isinstance(colors, list) or isinstance(colors, tuple)):\n raise pgUIException(str(colors) + ' is not a two color list or tuple',\n code = 24)\n if len(colors) != 2:\n raise pgUIException(str(colors) + ' is not a two color sequence',\n code = 25)\n for c in colors:\n isRGB(c)\n\n return True", "title": "" }, { "docid": "9664b8890741901941efbb1ab664d1e7", "score": "0.49608642", "text": "def is_video(self):\n val = False\n if self.__dict__['codec_type']:\n if self.__dict__['codec_type'] == 'video':\n val = True\n return val", "title": "" }, { "docid": "23d3457670f4728ef24bcbd5c603c145", "score": "0.4956988", "text": "def test_toRGB(self):\r\n self.assertEqual(self.black.toRGB(), (0, 0, 0))\r\n self.assertEqual(self.red.toRGB(), (255, 0, 0))\r\n self.assertEqual(self.pink.toRGB(), (100, 0, 0))", "title": "" }, { "docid": "ba226aeb3873eda9eea09deec557aedf", "score": "0.49549034", "text": "def getGYR(self, axis='X'):\r\n\t\tscale = self.getScale(mode='GYR')[0]\r\n\t\tif axis.upper() == 'X':\r\n\t\t\treg = 0x43\r\n\t\telif axis.upper() == 'Y':\r\n\t\t\treg = 0x45\r\n\t\telif axis.upper() == 'Z':\r\n\t\t\treg = 0x47\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tgyr_H = self.read(reg)\r\n\t\tgyr_L = self.read(reg+1)\r\n\t\tgyrRaw = self.twos_comp(val=(gyr_H*256 + gyr_L),bits=16)\r\n\t\treturn scale*(gyrRaw/float(2**15)), gyrRaw", "title": "" }, { "docid": "6e14d9173b5c9522effe7480745741cf", "score": "0.49502647", "text": "def check(self, grain=50):\n opengles.glReadPixels(0, 0, self.ix, self.iy,\n GL_RGB, GL_UNSIGNED_BYTE,\n ctypes.byref(self.img))\n r0 = self.img[0:3]\n step = 3 * int(self.ix * self.iy / 50)\n for i in xrange(0, len(self.img)-3, step):\n if self.img[i:(i+3)] != r0:\n return True\n\n return False", "title": "" }, { "docid": "1b1acb3d4f45ff01603562e7b7236e67", "score": "0.49494478", "text": "def convert_rgb_cmyk(rcol, gcol, bcol):\n if (rcol == 0) and (gcol == 0) and (bcol == 0):\n # black\n return 0, 0, 0, 1\n\n kcol = 1-max(rcol, gcol, bcol)\n ccol = (1-rcol-kcol)/(1-kcol)\n mcol = (1-gcol-kcol)/(1-kcol)\n ycol = (1-bcol-kcol)/(1-kcol)\n\n return ccol, mcol, ycol, kcol", "title": "" }, { "docid": "c4f8195d5120023a86440c4e63cd6dc1", "score": "0.49423894", "text": "def is_cn(self, y, t):\n return t == 0 and y == 0", "title": "" }, { "docid": "a346cf07d199299e697258e7491c424c", "score": "0.49112296", "text": "def yuv_bytes(self):\n r, g, b = self.rgb_bytes\n return (\n (( 66 * r + 129 * g + 25 * b + 128) >> 8) + 16,\n ((-38 * r - 73 * g + 112 * b + 128) >> 8) + 128,\n ((112 * r - 94 * g - 18 * b + 128) >> 8) + 128,\n )", "title": "" }, { "docid": "8661461c963e8acc43c23de2f6584d37", "score": "0.49105978", "text": "def check(self, grain=50):\r\n opengles.glDisable(GL_SCISSOR_TEST)\r\n self.s_flg = False\r\n opengles.glReadPixels(0, self.y0, self.ix, 1,\r\n GL_RGB, GL_UNSIGNED_BYTE,\r\n ctypes.byref(self.img))\r\n r0 = self.img[0:3]\r\n for i in xrange(0, self.img_sz, self.step):\r\n if self.img[i:(i+3)] != r0:\r\n return True\r\n\r\n return False", "title": "" }, { "docid": "d443c5150afafbeb5b3d62cb3b7d5fe5", "score": "0.48992535", "text": "def transformRGB2YIQ(imgRGB: np.ndarray) -> np.ndarray:\r\n YIQ_from_RGB = np.array([[0.299, 0.587, 0.114],\r\n [0.59590059, -0.27455667, -0.32134392],\r\n [0.21153661, -0.52273617, 0.31119955]])\r\n YIQImg = np.ndarray(imgRGB.shape)\r\n\r\n YIQImg[:, :, 0] = YIQ_from_RGB[0,0] * imgRGB[:, :, 0] + YIQ_from_RGB[0,1] * imgRGB[:, :, 1] + YIQ_from_RGB[0,2] * imgRGB[:, :, 2]\r\n YIQImg[:, :, 1] = YIQ_from_RGB[1,0] * imgRGB[:, :, 0] + YIQ_from_RGB[1,1] * imgRGB[:, :, 1] + YIQ_from_RGB[1,2] * imgRGB[:, :, 2]\r\n YIQImg[:, :, 2] = YIQ_from_RGB[2,0] * imgRGB[:, :, 0] + YIQ_from_RGB[2,1] * imgRGB[:, :, 1] + YIQ_from_RGB[2,2] * imgRGB[:, :, 2]\r\n\r\n return YIQImg", "title": "" }, { "docid": "01b6556bdc3638e7cc2eb720b9157608", "score": "0.48901507", "text": "def validate_timecode_input(self):\n frame = self.file_buffer.get_image(self.frame_offset)\n try:\n test = frame.shape\n except Exception as e:\n print(e)\n return False\n else:\n return True\n finally:\n test = None\n frame = None", "title": "" }, { "docid": "dbfbf98c97366fba590cd8180f2b0f07", "score": "0.48797312", "text": "def check_conv_transpose(extract):\n call = extract\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d_transpose\":\n call = call.args[0]\n\n attrs = call.attrs\n if attrs.data_layout != \"NCHW\":\n return False\n\n return True", "title": "" }, { "docid": "9879060117fc5c785eeffb9562412176", "score": "0.48582724", "text": "def to_ycc(color):\n return rgb_to_ycc(*[x / 255.0 for x in color])", "title": "" }, { "docid": "0988d7cf075aeb34731a4f1869e93daf", "score": "0.48528588", "text": "def lrc_check(data):\n return TeliumData.lrc(data[1:-1]) == (data[-1] if six.PY3 else ord(data[-1]))", "title": "" }, { "docid": "4a0d46f01ad9b4f700c49b59175bc641", "score": "0.48484087", "text": "def _isGrayscale(self, img: ndarray) -> bool:\n if len(np.squeeze(img).shape) == 2:\n return True\n else:\n return False", "title": "" }, { "docid": "a3a69ed985717a47acddc4dd90c10ebc", "score": "0.48367253", "text": "def is_cue_line(point1, point2, image):\n if point1[0] <= point2[0]:\n pointL, pointR = point1, point2\n else:\n pointL, pointR = point2, point1\n deltaY = pointR[1] - pointL[1]\n deltaX = pointR[0] - pointL[0]\n if deltaX != 0:\n for x in range(pointL[0], pointR[0] + 1):\n dx = x - pointL[0]\n dy = dx * deltaY/deltaX\n y = pointL[1] + dy\n if not is_cue_color(image.getpixel((x,y))):\n return False\n else:\n up = min(point1[1], point2[1])\n down = max(point1[1], point2[1])\n x = point1[0]\n for y in range(up, down + 1):\n if not is_cue_color(image.getpixel((x, y))):\n return False\n\n return True", "title": "" }, { "docid": "77ae92b2917ab5a013a61b6ba67a2690", "score": "0.48351607", "text": "def is_black( self, font_max_width, font_max_height ):\n if (self.width != font_max_width) or (self.height != font_max_height):\n return False\n for bit in self.pixels:\n if not(bit):\n return False\n return True", "title": "" }, { "docid": "bfd274e8aa8138184aa1ed8c4ee38c5d", "score": "0.48304316", "text": "def test_cmyk_to_rgb():\n #Test cmyk_to_rgb() when K = 1:\n cmyk = colormodel.CMYK(10, 11, 12, 100)\n rgb = a3.cmyk_to_rgb(cmyk)\n cunittest.assert_equals(0, rgb.red)\n cunittest.assert_equals(0, rgb.green)\n cunittest.assert_equals(0, rgb.blue)\n \n #Test cmyk_to_rgb() when C, M, or Y = 0:\n cmyk = colormodel.CMYK(0, 11, 12, 0)\n rgb = a3.cmyk_to_rgb(cmyk)\n cunittest.assert_equals(255, rgb.red)\n cunittest.assert_equals(227, rgb.green)\n cunittest.assert_equals(224, rgb.blue)\n \n cmyk = colormodel.CMYK(10, 0, 12, 0)\n rgb = a3.cmyk_to_rgb(cmyk)\n cunittest.assert_equals(230, rgb.red)\n cunittest.assert_equals(255, rgb.green)\n cunittest.assert_equals(224, rgb.blue)\n \n cmyk = colormodel.CMYK(10, 11, 0, 0)\n rgb = a3.cmyk_to_rgb(cmyk)\n cunittest.assert_equals(230, rgb.red)\n cunittest.assert_equals(227, rgb.green)\n cunittest.assert_equals(255, rgb.blue)\n \n #General Test Case\n cmyk = colormodel.CMYK(4, 20, 7, 18.63)\n rgb = a3.cmyk_to_rgb(cmyk)\n cunittest.assert_equals(199, rgb.red)\n cunittest.assert_equals(166, rgb.green)\n cunittest.assert_equals(193, rgb.blue)", "title": "" }, { "docid": "9fc304c40c04d3c11ff675c9bdbed9d0", "score": "0.48281643", "text": "def IsColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_IsColor(self, *args)", "title": "" }, { "docid": "2655a57b515f1e6bfad65ba7d964f92a", "score": "0.48131484", "text": "def transformYIQ2RGB(imgYIQ: np.ndarray) -> np.ndarray:\r\n yiq_from_rgb = np.array([[0.299, 0.587, 0.114],\r\n [0.59590059, -0.27455667, -0.32134392],\r\n [0.21153661, -0.52273617, 0.31119955]])\r\n rgb_from_yiq = np.linalg.inv(yiq_from_rgb)\r\n\r\n RGBImg = np.ndarray(imgYIQ.shape)\r\n\r\n RGBImg[:, :, 0] = rgb_from_yiq[0,0] * imgYIQ[:, :, 0] + rgb_from_yiq[0,1] * imgYIQ[:, :, 1] + rgb_from_yiq[0,2] * imgYIQ[:, :, 2]\r\n RGBImg[:, :, 1] = rgb_from_yiq[1,0] * imgYIQ[:, :, 0] + rgb_from_yiq[1,1] * imgYIQ[:, :, 1] + rgb_from_yiq[1,2] * imgYIQ[:, :, 2]\r\n RGBImg[:, :, 2] = rgb_from_yiq[2,0] * imgYIQ[:, :, 0] + rgb_from_yiq[2,1] * imgYIQ[:, :, 1] + rgb_from_yiq[2,2] * imgYIQ[:, :, 2]\r\n\r\n return RGBImg", "title": "" }, { "docid": "9f012cb8316447c0ff9a9ab8f2a121ad", "score": "0.48058212", "text": "def is_colour(self, im):\n hsl = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)\n h, s, v = np.mean(hsl, (0, 1))\n if s < 100:\n self.log.info(\n \"Grayscale scan detected (hsv %s, %s, %s), converting...\", h, s, v\n )\n return False\n return True", "title": "" }, { "docid": "5d38960d0935944f4250d2d91ae87ec4", "score": "0.48040393", "text": "def CanConvert(*args, **kwargs):\n return _gdi_.EncodingConverter_CanConvert(*args, **kwargs)", "title": "" }, { "docid": "780650665d6f3d40e2b1ab386defdc59", "score": "0.48029757", "text": "def assert_data_correct(self) -> bool:\n corr_char = self.assert_data_characters_correct()\n print(\"Character data correct?\", corr_char)\n corr_font = FontImages().assert_data_correct()\n print(\"Font data correct?\", corr_font)\n corr_frag = self.assert_data_fragments_correct()\n print(\"Fragment data correct?\", corr_frag)\n corr_train_aug = self.assert_train_augmented()\n print(\"Train data augmented?\", corr_train_aug)\n truth_agree = corr_char and corr_font and corr_frag\n return True if truth_agree else False", "title": "" }, { "docid": "425cde9e041cc25a90b2913e5b96463b", "score": "0.47929516", "text": "def poll(cls, context):\n\n tex = context.texture\n if not tex:\n return False\n\n if context.texture.luxrender_texture.type == 'BLENDER':\n return tex and \\\n (context.scene.render.engine in cls.COMPAT_ENGINES) and \\\n context.texture.type in cls.BL_COMPAT\n else:\n return tex and \\\n (context.scene.render.engine in cls.COMPAT_ENGINES) and \\\n context.texture.luxrender_texture.type in cls.LUX_COMPAT", "title": "" }, { "docid": "30c8a2fd893cf4c10f98be486f9720a2", "score": "0.47921753", "text": "def _is_color_valid(self, color):\n # make sure it is a tuple\n if type(color).__name__ != 'tuple':\n return False\n # check the length of the tuple\n if len(color) != 3:\n return False\n # verify that component colors are between _MIN and _MAX\n for c in color:\n if c < MIN or c > MAX:\n return False\n return True", "title": "" }, { "docid": "fe37a3164b5da2550012b525949bb8c3", "score": "0.47916463", "text": "def is_calibrated(self):\n\n return self.bin_edges_kev is not None", "title": "" }, { "docid": "31fa396cb6349fca2ab3eb2d6877db4b", "score": "0.47910154", "text": "def use_derived_XYZ_to_RGB_matrix(self):\n\n return self._use_derived_XYZ_to_RGB_matrix", "title": "" }, { "docid": "a73ac845e22775d0086b6adc168c4999", "score": "0.47815752", "text": "def convert_yuv_to_rgb(img_arr): \n rgb = cv2.cvtColor(img_arr, cv2.COLOR_YUV2BGR_I420)\n rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)\n return Image.fromarray(rgb)", "title": "" }, { "docid": "7d8abd627299bc89dedf935aa7ed2847", "score": "0.47696853", "text": "def _check(self, x, y):\n n = self.n\n # x direction\n xline = self.arr[y]\n if not self.x_regexes[y].match(xline):\n return False\n\n # y direction\n ypos = x + max(0, y + 1 - n)\n yline = []\n x1, y1 = ypos, 0\n while x1 >= 0 and y1 < 2 * n - 1:\n if x1 < len(self.arr[y1]):\n yline.append(self.arr[y1][x1])\n if y1 >= n - 1:\n x1 -= 1\n y1 += 1\n\n if not self.y_regexes[ypos].match(yline):\n return False\n\n # z direction\n zpos = x + max(0, n - 1 - y)\n zline = []\n x1, y1 = zpos, 2 * n - 2\n while x1 >= 0 and y1 >= 0:\n if x1 < len(self.arr[y1]):\n zline.append(self.arr[y1][x1])\n if y1 <= n - 1:\n x1 -= 1\n y1 -= 1\n\n if not self.z_regexes[zpos].match(zline):\n return False\n\n return True", "title": "" }, { "docid": "d1a6d63ad047d1d7e2a2d9306ac7c317", "score": "0.47677577", "text": "def save_images(inputY, inputCbCr, size, image_path):\n def merge(images, size):\n h, w = images.shape[1], images.shape[2]\n img = np.zeros((h * size[0], w * size[1], 3))\n for idx, image in enumerate(images):\n i = idx % size[1]\n j = idx // size[1]\n img[j*h:j*h+h, i*w:i*w+w, :] = image\n return img\n\n inputY = inputY.astype('uint8')\n inputCbCr = inputCbCr.astype('uint8')\n output_concat = np.concatenate((inputY, inputCbCr), axis=3)\n\n assert len(output_concat) <= size[0] * size[1], \"number of images should be equal or less than size[0] * size[1] {}\".format(len(output_concat))\n\n new_output = merge(output_concat, size)\n\n new_output = new_output.astype('uint8')\n\n img = Image.fromarray(new_output, mode='YCbCr')\n img = img.convert('RGB')\n img.save(image_path)", "title": "" }, { "docid": "3c7b32e692b487acf3d2616978f95d34", "score": "0.47649014", "text": "def _process_img_rgb(self, sensor_data):\n img = np.array(sensor_data.raw_data).reshape((self.img_y, self.img_x, 4))\n img = img[:, :, :3] # sensor is actualy rgba, we dont need alpha values\n self.rgb = img # need to scale rgb values to be {0,1}", "title": "" }, { "docid": "4ffcc326190bb411a664d41ea06c71de", "score": "0.4761439", "text": "def rgb2yuv(r, g, b, mode='444'):\n r = 255 * r\n g = 255 * g\n b = 255 * b\n y = 00.257 * r + 0.504 * g + 0.098 * b + 16\n u = -0.148 * r - 0.291 * g + 0.439 * b + 128\n v = 00.439 * r - 0.368 * g - 0.071 * b + 128\n if mode == '420':\n y, u, v = YUV_change_mode(y, u, v, '444to420')\n return (y / 255), (u / 255), (v / 255)", "title": "" }, { "docid": "bc582cccaaa6a585cc3e1915bd250580", "score": "0.47579694", "text": "def is_green(self, pixel: tuple) -> bool:\r\n\r\n # If the pixel is more green than any other color,\r\n # Then we consider is a plant pixel\r\n if pixel[0] < pixel[1] and pixel[2] < pixel[1]:\r\n return True\r\n\r\n # The following rules are for fine adjusments\r\n # These tolerances may vary by photo\r\n if pixel[0] > self.max_brightness:\r\n return False\r\n\r\n if pixel[1] < self.min_brightness:\r\n return False\r\n \r\n if pixel[0] / pixel[1] > self.color_tolerance[0] / self.color_tolerance[1]:\r\n return False\r\n\r\n if pixel[2] / pixel[1] > self.color_tolerance[2] / self.color_tolerance[1]:\r\n return False\r\n \r\n return True", "title": "" }, { "docid": "0422594d9d3f7a0a4a27b93c197c6c66", "score": "0.4748769", "text": "def _is_color(cls, obj: Any) -> bool:\n\n return isinstance(obj, Color)", "title": "" }, { "docid": "995780fe3ca88b43e56f98dfeabf0ce6", "score": "0.4748008", "text": "def yuv2bgr(tens: Tensor) -> Tensor:\n if not _is_yuv_image(tens):\n raise ValueError(\n f\"Tensor of shape 3 expected. Found shape {len(tens.shape)}. \"\n \"This function converts an YUV Tensor to its BGR counterpart\"\n )\n\n img = cv.cvtColor(tens, YUV2BGR)\n return to_tensor(img, cspace=\"bgr\")", "title": "" }, { "docid": "8c2856b6beca64ce19f26984c5716b97", "score": "0.47475556", "text": "def is_red(self):\n return \"red\" == self.color", "title": "" }, { "docid": "7c48489d8afdde1d6ecf35955dda487b", "score": "0.4739161", "text": "def yuv2rgb(tens: Tensor) -> Tensor:\n if not _is_yuv_image(tens):\n raise ValueError(\n f\"Tensor of shape 3 expected. Found shape {len(tens.shape)}.\" \n \"This function converts a YUV Tensor to its RGB counterpart\"\n )\n\n img = cv.cvtColor(tens, YUV2RGB)\n return to_tensor(img, cspace=\"rgb\")", "title": "" }, { "docid": "df6684952d056faf8c1a9d5cc3df99f5", "score": "0.4732393", "text": "def has_data(self):\n if len(self.channels) > 0:\n return True\n return False", "title": "" }, { "docid": "f4fe47d4bd77e405574614d8a58f07f4", "score": "0.47215682", "text": "def test_check_y_casts_to_numerical(wage_X_y, wage_gam):\n X, y = wage_X_y\n y = y.astype('object')\n\n y = check_y(y, wage_gam.link, wage_gam.distribution)\n assert y.dtype == 'float'", "title": "" }, { "docid": "3a47ae99e7c1e9b9d75eaf4f835723a8", "score": "0.47191417", "text": "def IsPng( self, data ):\n return data.startswith(b'\\x89PNG\\r\\n\\x1a\\n')", "title": "" }, { "docid": "dcb269cf3b1d161d8e6b92eeb1e3bc42", "score": "0.47126216", "text": "def use_derived_RGB_to_XYZ_matrix(self):\n\n return self._use_derived_RGB_to_XYZ_matrix", "title": "" }, { "docid": "d44d00e0f54e30a125daadff93346ab5", "score": "0.47112802", "text": "def read_cliff(self, cliff):\n data = self._read_packet(cliff, Cliff.DATA_BYTES)\n\n if len(data) == Cliff.DATA_BYTES:\n byte = struct.unpack(\"B\", data)[0]\n\n return bool(byte)\n else:\n return False", "title": "" }, { "docid": "53dd98d5f810334eff85a458e6c2088c", "score": "0.4703688", "text": "def resize_y(self) -> bool:\n raise NotImplementedError", "title": "" }, { "docid": "00d7b0011cb844f5b3287fd9b9254c18", "score": "0.47007892", "text": "def rgb2yuv(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)", "title": "" }, { "docid": "5876f2e15bdd2599ca3f7308380a673f", "score": "0.46983474", "text": "def CPY(self, value):\n self._compare(value, 'Y')", "title": "" }, { "docid": "be2b42b93f617ae3c8bc60615cc05cdc", "score": "0.46910223", "text": "def is_grayscale(img):\n return len(img.shape) == GS", "title": "" }, { "docid": "6baf59c0553315f80684fc83e2e1b7c7", "score": "0.46895757", "text": "def RGB_to_RGB(RGB,\n input_colourspace,\n output_colourspace,\n chromatic_adaptation_transform='CAT02',\n apply_decoding_cctf=False,\n apply_encoding_cctf=False):\n\n if apply_decoding_cctf:\n RGB = input_colourspace.decoding_cctf(RGB)\n\n M = RGB_to_RGB_matrix(input_colourspace, output_colourspace,\n chromatic_adaptation_transform)\n\n RGB = dot_vector(M, RGB)\n\n if apply_encoding_cctf:\n RGB = output_colourspace.encoding_cctf(RGB)\n\n return RGB", "title": "" }, { "docid": "be37956c3f390194f9d0748758502c4e", "score": "0.4683863", "text": "def YUV_change_mode(y, u, v, direction='420to444'):\n if direction == '420to444':\n u = np.array([cv2.resize(ch, (u.shape[2] * 2, u.shape[1] * 2), interpolation=cv2.INTER_CUBIC) for ch in u])\n v = np.array([cv2.resize(ch, (v.shape[2] * 2, v.shape[1] * 2), interpolation=cv2.INTER_CUBIC) for ch in v])\n if direction == '444to420':\n u = np.array([cv2.resize(ch, (u.shape[2] // 2, u.shape[1] // 2), interpolation=cv2.INTER_CUBIC) for ch in u])\n v = np.array([cv2.resize(ch, (v.shape[2] // 2, v.shape[1] // 2), interpolation=cv2.INTER_CUBIC) for ch in v])\n return y, u, v", "title": "" }, { "docid": "ce636940d5f2ce806d6c26bd5f867724", "score": "0.4682372", "text": "def check_improvement_direction(self): # pragma: no cover\n good = self.good_rev.mean_value\n bad = self.bad_rev.mean_value\n\n if self.is_return_code_mode():\n if good == 1 or bad == 0:\n self._set_failed_return_code_direction_results()\n return False\n return True\n\n direction = self.improvement_direction\n if direction is None:\n return True\n if (bad > good and direction > 0) or (bad < good and direction < 0):\n self._set_failed_direction_results()\n return False\n return True", "title": "" }, { "docid": "989d0e4035f72bc4557526a562819e4c", "score": "0.46795657", "text": "def is_fit(self):\n if not hasattr(self, '_icc_imgs'):\n return False\n else:\n return self._icc_imgs is not None", "title": "" }, { "docid": "2cacd14b297802aefefbf05e14cae98c", "score": "0.46795437", "text": "def validate_shape_and_dtype(self):\n if self.rgb.dtype != tf.float32:\n raise ValueError(\"Expected float32 rgb!\")\n if len(self.rgb.shape) != 4:\n raise ValueError(f\"Expected (B, H, W, C), got {self.rgb.shape}\")\n _, _, _, channels = self.rgb.shape.as_list()\n if channels != 3:\n raise ValueError(f\"Expected 3 rgb channels, got shape {self.rgb.shape}\")", "title": "" }, { "docid": "cfccab143562e5949d148a22b4bcf1c5", "score": "0.4677608", "text": "def yuv_channels_to_bgr_image(y_channel, u_channel, v_channel):\n yuv_image = cv2.merge((y_channel.astype(np.float32), u_channel.astype(np.float32), v_channel.astype(np.float32)))\n bgr_image = cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)\n return bgr_image", "title": "" }, { "docid": "583dcd19fc098d190c4fb22626d40569", "score": "0.46767673", "text": "def converted(self):\n if self.converters:\n return self.converters.converted(self)\n return False", "title": "" }, { "docid": "2f6eabca792b271a3d94c13bfc261951", "score": "0.46758464", "text": "def check_for_different_colour(self, arg1_position, arg2):\n return (not self.check_position_free(arg1_position)) and (not self.check_for_same_colour(arg1_position, arg2))", "title": "" } ]
7a227b3036fd3185a0211dd6cc939054
Description of the minimization problem
[ { "docid": "6f296f455eb5d3f4b2aa91d2d01d97cb", "score": "0.0", "text": "def initialize(self, func, nvar, bounds, vdx=None):\n # Test the arguments\n error = validate(func, nvar, bounds, vdx, self.verbose)\n if error:\n if self.verbose:\n print( '--> newop Error!' )\n print( '\\tIncorrect arguments.' )\n return -1\n # Save all the information\n self.function = func\n self.nvar = nvar\n self.bounds = bounds\n self.vdx = vdx\n # Job done\n self.__ready = True\n return 0", "title": "" } ]
[ { "docid": "243623a1f048649b2fdb67811aa09a8b", "score": "0.6914253", "text": "def minimax():", "title": "" }, { "docid": "8184c2d3bb1b25cb043721112ca6771e", "score": "0.6489799", "text": "def _minimize(self):\n return 0", "title": "" }, { "docid": "09d46dc6dda570c22b9ac68d312bb253", "score": "0.63962525", "text": "def __objective2str(self, obj):\n strng = 'minimize f(x)'\n return strng", "title": "" }, { "docid": "e2195105aee7b2936c4b2b323933ac90", "score": "0.61767095", "text": "def minimizer_tool(self):\n x0 = np.ones(2)\n self.history.append(np.transpose(np.r_[x0, self.fun(x0)]))\n res = minimize(self.fun, x0, method=self.method, callback=self.callback)\n print('With method ' + self.method + \":\")\n print('solution found in ', res.x)\n print('with ' + str(res.nit) + \" iterations. \\n\")\n return res", "title": "" }, { "docid": "c12c9cebc3b46ae4b25e93d44fc98793", "score": "0.61495996", "text": "def CreateObjectiveFunction(self):\n\t\tfileobj = open(self.filename_model, \"a\")\n\t\tfileobj.write(\"Minimize\\n\")\n\t\teqn = []\n\t\tfor i in range(0,16):\n\t\t\tfor j in range(0,4):\n\t\t\t\teqn.append(\"x\" + \"_\" + str(self.Round) + \"_\" +str(3-j) + \"_\" + str(i))\n\t\ttemp = \" + \".join(eqn)\n\t\tfileobj.write(temp)\n\t\tfileobj.write(\"\\n\")\n\t\tfileobj.close()", "title": "" }, { "docid": "c893602a67d065773875d6df8d4b854d", "score": "0.6103025", "text": "def add_minimize(self, co, var):", "title": "" }, { "docid": "a259f326e5e5838f0ccaacdcef2d78a7", "score": "0.5974646", "text": "def _define_objective(self):\n\n # expected value of log joint distribution\n with tf.variable_scope('log_joint'):\n self.log_joint = self.gen_net.log_density(\n self.y_true, self.inf_net.post_z_samples)\n\n # entropy of approximate posterior\n with tf.variable_scope('entropy'):\n self.entropy = self.inf_net.entropy()\n\n # objective to minimize\n self.objective = -self.log_joint - self.entropy\n\n # save summaries\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('log_joint', self.log_joint)\n tf.summary.scalar('entropy', self.entropy)\n tf.summary.scalar('elbo', -self.objective)", "title": "" }, { "docid": "08b696e9c8d46d6e1a196e0f4466c1a6", "score": "0.5963381", "text": "def objective(self):\n return self._objective", "title": "" }, { "docid": "31585cc6bb17fea661349a9ddc4e9889", "score": "0.5893457", "text": "def constraints_help():\n meanings = {\"theta_1\":(\"contact tracing\", \\\n \"fraction of infected cases diagnosed and hospitalized\"), \\\n \"beta_H\":(\"PPE\", \"contact rate for hospitalized cases\"), \\\n \"delta_2\":(\"drug\",\"fatality rate of hospitalized patients\")}\n s = \"{0:<10}{1:<17}{2:<40}\\n\".format(\"Variable\", \"Intervention\", \"Effect\")\n s += \"-\"*60 + \"\\n\"\n for var in meanings.keys():\n s += \"{0:<10}{1:<17}{2:<40}\\n\".format(var, meanings[var][0], meanings[var][1])\n print s", "title": "" }, { "docid": "ea8f53feefc516e3f2fe09f79b55edfc", "score": "0.5882807", "text": "def minimizer(self):\n optimizer = tf.train.GradientDescentOptimizer(0.1)\n return optimizer.minimize(self.loss, global_step=self.global_step)", "title": "" }, { "docid": "08392eb7d2bf1fb1344a6fc64b36de01", "score": "0.5880562", "text": "def minimax(board):\n raise NotImplementedError", "title": "" }, { "docid": "08392eb7d2bf1fb1344a6fc64b36de01", "score": "0.5880562", "text": "def minimax(board):\n raise NotImplementedError", "title": "" }, { "docid": "23bfa2eac82d8be53051102efd5fd67d", "score": "0.5850164", "text": "def help_check_michaelis_menten_model_minimax_optimal_design(criteria,heteroscedastic=False):\n iprint=0\n num_design_pts = 30\n design_samples = np.linspace(1e-3,1,num_design_pts)\n #pred_samples = design_samples\n pred_samples = np.linspace(0,1,num_design_pts+10)\n if heteroscedastic:\n n=1\n link_function = lambda z: 1/z**n\n noise_multiplier = lambda p,x: link_function(\n michaelis_menten_model(p,x))\n else:\n noise_multiplier = None\n maxiter=int(1e3)\n\n # come of these quantities are not used by every criteria but\n # always computing these simplifies the test\n beta=0.75\n local_design_factors = \\\n lambda p,x: michaelis_menten_model_grad_parameters(p,x).T\n local_pred_factors = local_design_factors\n opts = {'beta':beta,'pred_factors':local_pred_factors,\n 'pred_samples':pred_samples[np.newaxis,:]}\n\n xx1 = np.linspace(0.9,1.1,3)[-1:]# theta_1 does not effect optimum\n xx2 = np.linspace(0.2,1,5)\n from pyapprox import cartesian_product\n parameter_samples = cartesian_product([xx1,xx2])\n x0 = None\n minimax_opt_problem = AlphabetOptimalDesign(\n criteria,local_design_factors,noise_multiplier,opts=opts)\n\n mu_minimax = minimax_opt_problem.solve_nonlinear_minimax(\n parameter_samples,design_samples[np.newaxis,:],\n {'iprint':iprint,'ftol':1e-12,'maxiter':maxiter})\n\n import copy\n opts = copy.deepcopy(opts)\n mu_local_list = []\n for ii in range(parameter_samples.shape[1]):\n pred_factors = local_design_factors(\n parameter_samples[:,ii],pred_samples[np.newaxis,:])\n opts['pred_factors']=pred_factors\n design_factors = local_design_factors(\n parameter_samples[:,ii],design_samples[np.newaxis,:])\n opt_problem = AlphabetOptimalDesign(\n criteria,design_factors,opts=opts)\n mu_local = opt_problem.solve(\n {'iprint':iprint,'ftol':1e-12,'maxiter':maxiter})\n mu_local_list.append(mu_local)\n\n constraints = minimax_opt_problem.minimax_nonlinear_constraints(\n parameter_samples,design_samples[np.newaxis,:])\n\n max_stat = []\n for mu in [mu_minimax] + mu_local_list:\n stats = []\n for ii in range(parameter_samples.shape[1]):\n # evaluate local design criterion f(mu)\n # constraint = t-f(mu) so f(mu)=t-constraint. Chooose any t,\n # i.e. 1\n stats.append(\n 1-constraints[ii].fun(np.concatenate([np.array([1]),mu])))\n stats = np.array(stats)\n max_stat.append(stats.max(axis=0))\n # check min max stat is obtained by minimax design\n # for d optimal design one local design will be optimal but because\n # of numerical precision it agrees only to 1e-6 with minimax design\n # so round answer and compare. argmin returns first instance of minimum\n print(max_stat)\n max_stat=np.round(max_stat,6)\n assert np.argmin(max_stat)==0", "title": "" }, { "docid": "99fee8f4de7514cefa156642b99dc85a", "score": "0.5833259", "text": "def suggested_parameter(self):\n # mew = -8.82407598543352156e-02 # by my fitting\n mew = -0.031244092599793216 # by paper\n return [3400.0, 3400.0, -0.437459534627, -0.46045114238, mew, 1.0]", "title": "" }, { "docid": "fb842bae985a34d51ca5f5f1ba97e510", "score": "0.5775511", "text": "def __ToMinimize(self, params):\n pass", "title": "" }, { "docid": "da90a8ff021d36fb38c39977d96d9a67", "score": "0.575803", "text": "def description(self):\n return \"Pass state variable(s) through hinge then apply dc/gain to pred\"", "title": "" }, { "docid": "9f243305e08c293f34608f4ce6202afc", "score": "0.57361484", "text": "def compute_solution(self):\n try:\n return self.do_gaussian_elimination_and_parametrization()\n\n except Exception as e:\n if (str(e) == self.NO_SOLUTIONS_MSG):\n return str(e)\n else:\n raise e", "title": "" }, { "docid": "7f299640affc7f06ab4c0e18de13a644", "score": "0.5706637", "text": "def info(self):\n\n print(clr.BOLD + clr.OKGREEN + 'Hessian-Free Optimizer initial settings:' +\\\n clr.ENDC)\n print(' CG delta decay: {}'.format(self.cg_decay))\n print(' Learning Rate: {}'.format(self.learning_rate))\n print(' Initial Tikhonov damping: {}'.format(self.damping))\n if self.adjust_damping:\n print(' Optimizer adjusts damping dynamically using ' +\\\n 'Levenberg-Marquardt heuristic.')\n else:\n print(' Tikhonov damping is static.')\n if self.use_gnm:\n print(' Optimizer uses Gauss-Newton matrix for cg computation.')\n else:\n print(' Optimizer uses Hessian matrix for cg computation.')\n if self.use_prec:\n print(' Optimizer uses preconditioner.')\n print(' Gap of delta loss tracking: {}'.format(self.gap))\n print(' Max cg iterations: {}'.format(self.cg_max_iters))\n print(clr.OKGREEN + 'Optimizer is ready for using.' +clr.ENDC)", "title": "" }, { "docid": "9d43fdcec2c3b1a110fd56c3273ee5c1", "score": "0.5705988", "text": "def optimization(self):\n optimizer = tf.train.GradientDescentOptimizer(self.config.learning_rate)\n return optimizer.minimize(self.cost)", "title": "" }, { "docid": "db77bb91e3ff66dab53688cc2f0b2789", "score": "0.5680917", "text": "def algorithmInfo():\n\t\treturn r\"\"\"Kennedy, J. and Eberhart, R. \"Particle Swarm Optimization\". Proceedings of IEEE International Conference on Neural Networks. IV. pp. 1942--1948, 1995.\"\"\"", "title": "" }, { "docid": "ad66c2d41d47dba1a732d62cbc1ba812", "score": "0.5642035", "text": "def optimize(self):\r\n if self.verbosity > 0:\r\n if self.task.isSOO():\r\n logging.info('%s %d parameters using %s' % ('Minimizing', self.task.get_n_parameters(), self.name)) # TODO: minimizing/maximizing\r\n else:\r\n logging.info('Optimizing %d objective / %d parameters using %s' % (self.task.get_n_objectives(), self.task.get_n_parameters(), self.name))\r\n logging.info('Optimization started')\r\n\r\n self._startTime = timer()\r\n\r\n out = self._optimize()\r\n # TODO: assert size parameters\r\n # if self.task.get_n_parameters() == 1:\r\n # out = np.array(out).flatten()\r\n # assert out.shape == (self.task.get_n_parameters(),), 'Internal error: wrong dimension'\r\n # else:\r\n # assert out.shape[0] == self.task.get_n_parameters(), 'Internal error: wrong dimension'\r\n\r\n end = timer()\r\n logging.info('Optimization completed in %f[s]' % (end - self._startTime))\r\n logging.info('Optimization ended with flag: ') # TODO: self.stopCriteria\r\n\r\n return out", "title": "" }, { "docid": "3744f6ad262e08f66f973f28fb852974", "score": "0.5640801", "text": "def get_basic_problem_info(self):\n\n self.num_variables = int(input(\"Enter the amount of variables in the equation: \"))\n self.num_equations = int(input(\"Enter the amount of innequalities: \"))\n\n self.rows = self.num_equations+1\n self.cols = self.num_variables+self.rows+1", "title": "" }, { "docid": "f4e909eca1e7b98063043e33b4855a5b", "score": "0.56348544", "text": "def description(self):\n return \"Pass state variable(s) through sigmoid then apply dc/gain to pred\"", "title": "" }, { "docid": "f4e909eca1e7b98063043e33b4855a5b", "score": "0.56348544", "text": "def description(self):\n return \"Pass state variable(s) through sigmoid then apply dc/gain to pred\"", "title": "" }, { "docid": "f4e909eca1e7b98063043e33b4855a5b", "score": "0.56348544", "text": "def description(self):\n return \"Pass state variable(s) through sigmoid then apply dc/gain to pred\"", "title": "" }, { "docid": "4fd77b75e45c880be9c469e43bb0b948", "score": "0.5621771", "text": "def test_michaelis_menten_model_minimax_d_optimal_least_squares_design(self):\n num_design_pts = 7\n design_samples = np.linspace(0,1,num_design_pts)\n noise_multiplier = None\n\n local_design_factors = \\\n lambda p,x: michaelis_menten_model_grad_parameters(p,x).T\n xx1 = np.linspace(0.9,1.1,3)[-1:]# theta_1 does not effect optimum\n xx2 = np.linspace(0.2,1,5)\n from pyapprox import cartesian_product\n parameter_samples = cartesian_product([xx1,xx2])\n opt_problem = AlphabetOptimalDesign('D',local_design_factors)\n mu = opt_problem.solve_nonlinear_minimax(\n parameter_samples,design_samples[np.newaxis,:],\n {'iprint': 1, 'ftol':1e-8})\n I= np.where(mu>1e-5)[0]\n # given largest theta_2=1 then optimal design will be at 1/3,1\n #with masses=0.5\n assert np.allclose(I,[2,6])\n assert np.allclose(mu[I],np.ones(2)*0.5)", "title": "" }, { "docid": "a512b2ccbe8294c7983aee09aafda57c", "score": "0.559976", "text": "def visualize_solution(self) -> str:\n solution_assignments = []\n for x_i in self.x_is.values():\n solution_assignments.append(f\"{x_i.name}: \" f\"{self.model.getVal(x_i)}\")\n for layer_idx, r_i in enumerate(self.linear_inclusion.r_is, start=1):\n for neuron_idx, r_i_k in enumerate(r_i):\n solution_assignments.append(\n f\"inf r_{neuron_idx}^({layer_idx}): {r_i_k[0].inf}\"\n )\n solution_assignments.append(\n f\"sup r_{neuron_idx}^({layer_idx}): {r_i_k[0].sup}\"\n )\n solution_assignments.append(\"\\n\")\n return str(solution_assignments)", "title": "" }, { "docid": "3ea2598b431145181d01b226d6b8970f", "score": "0.5581309", "text": "def DisplayOptimization (self) :\n\t\twx.Yield()\n\t\t# abort, if requested \n\t\tif self.need_abort : return\n\t\n\t\tdef GetValueColourIter (d) :\n\t\t\treturn izip_longest( d.itervalues(), ['r', 'g', 'b', 'k'], fillvalue='y' )\n\t\n\t\tvisvis.cla(); visvis.clf()\n\t\tvisvis.subplot(211)\n\t\t\n\t\t# Plot optimization statistics\n\t\tfor values, colour in GetValueColourIter(self.optimization_log) :\n\t\t\ttry : visvis.plot ( values, lc=colour ) \n\t\t\texcept Exception : pass\n\t\t\n\t\tvisvis.xlabel ('iteration')\n\t\tvisvis.ylabel ('Objective function')\n\t\tvisvis.legend( self.optimization_log.keys() )\n\t\t\n\t\t# Display reference signal\n\t\tvisvis.subplot(212)\n\t\t\t\n\t\t# Plot reference signal\n\t\ttry : visvis.plot ( self.log_reference_signal ) \n\t\texcept Exception : pass\n\t\t\n\t\tvisvis.xlabel ('iteration')\n\t\tvisvis.ylabel (\"Signal from reference pulse\")", "title": "" }, { "docid": "a0f724c2c7a25b62a014d8d7ec87c87b", "score": "0.5580934", "text": "def __str__(self):\n s = \"solution at {}, rank {}\".format(self.d_vars, self.rank)\n return s", "title": "" }, { "docid": "ff9add3804a4222e2b6d2074151c3f6c", "score": "0.5549836", "text": "def visit_Minimize(self, x):\n return self._rewrite_body(x)", "title": "" }, { "docid": "b95eb22f560f75401d1d979da40c68c6", "score": "0.5543782", "text": "def get_objective_fn(self, data_X):\n\t\tdef _objective(param_value):\n\t\t\t## restore params shapes\n\t\t\tsparsity = self.sparsity\n\t\t\tl2_coeff = self.l2_coeff\n\t\t\tsparsity_coeff = self.sparsity_coeff\n\n\t\t\tself.params = param_value\n\t\t\tW1, W2, b1, b2 = self.restore_params()\n\t\t\t## hidden value Y\n\t\t\tY = sigmoid(np.dot(data_X, W1) + b1)\n\t\t\t## sparsity of hidden neuros\n\t\t\trhos = np.mean(Y, axis = 0)\n\t\t\t## reconstructed output\n\t\t\tZ = sigmoid(np.dot(Y, W2) + b2)\n\t\t\t## cost = likelihood + coeff * l2norm + coeff * sparsity_term\n\t\t\tlikelihood = np.mean(np.sum((data_X - Z) ** 2, axis = 1))\n\t\t\tl2norm = np.sum(W1**2) + np.sum(W2 ** 2)\n\t\t\tsparsity_term = np.sum(sparsity * np.log(sparsity / rhos)\n\t\t\t\t\t\t\t+ (1-sparsity) * np.log((1-sparsity) / (1-rhos)))\n\t\t\tcost = (likelihood \n\t\t\t\t\t+ l2_coeff * l2norm \n\t\t\t\t\t+ sparsity_coeff * sparsity_term)\n\t\t\treturn cost \n\n\t\treturn _objective", "title": "" }, { "docid": "38bd2afd04f6da3227d04b83a9bd6515", "score": "0.5526939", "text": "def objective(self):\n return self._objective", "title": "" }, { "docid": "087e5335e2c8314bf69d17369f3cb859", "score": "0.5516533", "text": "def __str__(self):\n return ('help: %s \\n'\n 'input: %s \\n'\n 'output: %s \\n'\n 'finish: %s \\n'\n 'minimize: %s \\n'\n 'insensitive: %s \\n'\n '__input_set: %s \\n'\n '__output_set: %s'\n )%(str(self.help), str(self.input), str(self.output), str(self.finish),\n str(self.minimize), str(self.insensitive), str(self.__input_set),\n str(self.__output_set))", "title": "" }, { "docid": "db04d800c6b83e1b530a5a149b3ea0c9", "score": "0.5503328", "text": "def Problem7():", "title": "" }, { "docid": "d40b9c2c5f7dbcf218f85b0146f508d8", "score": "0.5499087", "text": "def __str__(self):\n strng = '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n strng += 'Problem: %s\\n\\n' % self.name\n\n strng += 'Problem Definition (%i design variables):\\n' % \\\n (len(self.definition))\n for (var, defn) in self.definition.iteritems():\n strng += '\\t%s %s\\n' % (var, self.__definition2str(defn))\n\n strng += 'Objective(s) (%i total):\\n' % len(self.objective)\n for obj in self.objective:\n strng += '\\t%s\\n' % self.__objective2str(obj)\n\n strng += 'Constraint (%i inequality and %i equality)\\n' % \\\n (len(self.ineq), len(self.eq))\n for const in self.ineq:\n strng += '\\t%s\\n' % self.__const2str(const, '<=')\n\n strng += 'Maximum Generation Size = %i\\n' % self.max_gen_size\n strng += 'Starting Generation:\\n'\n for chromosome in self.starting_gen:\n strng += '\\t%s\\n' % self.__chromosome2str(chromosome)\n\n strng += '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n return strng", "title": "" }, { "docid": "4bb4a67cc168353d3d4e3662fb8efe9a", "score": "0.5494766", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "title": "" }, { "docid": "4bb4a67cc168353d3d4e3662fb8efe9a", "score": "0.5494766", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "title": "" }, { "docid": "4bb4a67cc168353d3d4e3662fb8efe9a", "score": "0.5494766", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "title": "" }, { "docid": "2cf69cbda19609dc5eead6e864538e67", "score": "0.5494268", "text": "def minimiser(self):\n if self.check_min:\n self._min = all(self.f < v.f for v in self.nn)\n self.check_min = False\n\n return self._min", "title": "" }, { "docid": "eedcf90e19ead610bb9647bfed15609d", "score": "0.54916495", "text": "def description(self):\n return \"Save pred signal at the current stage of model fitting to be used later by another module e.g. fit_ccnorm uses pred0\"", "title": "" }, { "docid": "a9c79442ad691369b3a79b2496681ec2", "score": "0.5486862", "text": "def brute_force_solver(Q, verbosity = False):\n # The graph associated to the QUBO matrix\n G = nx.from_numpy_matrix(Q)\n # Initialize best cost function value\n best_cost_brute = 0 \n # Get matrix shape\n n = Q.shape[0]\n # computing all possible combinations\n # initialize output\n xbest_brute = []\n # store all the eigenvalues to get mean and std-dev\n eigenvalues = np.array([])\n for b in range(2**n):\n # x stores all the 2^n possible combinations of 0 and 1\n # for a vector of length n \n x = [int(t) for t in reversed(list(bin(b)[2:].zfill(n)))]\n # initialize cost function value\n cost = 0\n # scan all possible costs and keep the highest one\n # (now we want to maximize our score!)\n for i in range(n):\n for j in range(n):\n cost = cost + Q[i,j]*x[i]*(1 - x[j])\n # store the cost function value as a eigenvalue\n eigenvalues = np.append(eigenvalues, cost)\n if cost > best_cost_brute:\n xbest_brute = [x] \n best_cost_brute = cost\n elif cost == best_cost_brute:\n xbest_brute.append(x) \n \n # Showing results \n if verbosity == True: \n colors = ['r' if xbest_brute[0][i] == 0 else 'b' for i in range(n)]\n nx.draw_networkx(G, node_color = colors)\n print('\\nBest solution = ' + str(xbest_brute) + ' cost = ' + str(best_cost_brute)) \n \n # Transform the solution in a list of strings\n for res in range(len(xbest_brute)):\n xbest_brute[res] = ''.join(map(str, xbest_brute[res]))\n\n return xbest_brute, best_cost_brute, eigenvalues", "title": "" }, { "docid": "a956e077bb15f9a16fef07daaa086ce0", "score": "0.54766816", "text": "def minimize(self):\n nreps = self.nrep\n nbins = self.nebins\n visitsT = (self.visits1d)\n #print \"min vis\", np.min(visitsT)\n #print \"minlogp\", np.min(self.logP)\n self.reduced_energy = self.binenergy[np.newaxis,:] / (self.Tlist[:,np.newaxis] * self.k_B)\n \n self.whampot = WhamPotential(visitsT, self.reduced_energy)\n \n if False:\n X = np.random.rand( nreps + nbins )\n else:\n # estimate an initial guess for the offsets and density of states\n # so the minimizer converges more rapidly\n offsets_estimate, log_dos_estimate = wham_utils.estimate_dos(self.visits1d,\n self.reduced_energy)\n X = np.concatenate((offsets_estimate, log_dos_estimate))\n\n E0, grad = self.whampot.getEnergyGradient(X)\n rms0 = np.linalg.norm(grad) / np.sqrt(grad.size)\n \n try:\n from pele.optimize import lbfgs_cpp as quench\n if self.verbose:\n print \"minimizing with pele lbfgs\"\n ret = quench(X, self.whampot, tol=1e-3, maxstep=1e4, nsteps=10000)\n except ImportError:\n from wham_utils import lbfgs_scipy\n if self.verbose:\n print \"minimizing with scipy lbfgs\"\n ret = lbfgs_scipy(X, self.whampot, tol=1e-3, nsteps=10000)\n #print \"quench energy\", ret.energy\n \n if self.verbose:\n print \"chi^2 went from %g (rms %g) to %g (rms %g) in %d iterations\" % (\n E0, rms0, ret.energy, ret.rms, ret.nfev)\n \n X = ret.coords\n self.logn_E = X[nreps:]\n self.w_i_final = X[:nreps]", "title": "" }, { "docid": "b67b105ff973abd1a6b3c8514bedb294", "score": "0.54689246", "text": "def minimize_me(args):\n args_dict = {a: args[i] for i, a in enumerate(names)}\n # return -ln_lklh_func(**args_dict) - ln_prior(**args_dict)\n return -ln_posterior(**args_dict)", "title": "" }, { "docid": "98d0720f493c899d0a59a90ab02aa9a4", "score": "0.5460892", "text": "def objective(args):\n # TODO Parse out the args if needed\n val = args.get(\"name\", None)\n\n # TODO Solve the thing here\n solution = []\n\n for I in inter_list:\n num_lights = len(I.best_times)\n street_names = [s[2] for s in I.streets]\n line_to_append = [I.id, num_lights]\n idxs = [i for i in range(len(street_names))]\n sorted_idx = [x for x, _ in sorted(zip(idxs, I.best_times), key=lambda pair: pair[1], reverse=True)]\n for i in sorted_idx:\n name = street_names[i]\n time_val = I.best_times[i]\n line_to_append.append((name, time_val))\n solution.append(line_to_append)\n\n score = 0\n\n # Return something flexible that can be used with hyperopt\n # Main point is that it has score and solution.\n return {\n \"loss\": -score,\n \"score\": score,\n \"solution\": solution,\n \"eval_time\": time.time(),\n \"status\": STATUS_OK,\n }", "title": "" }, { "docid": "e8b16b8d3c9751f70ce08ce92102ce02", "score": "0.54578537", "text": "def optimize(self):\n raise NotImplementedError", "title": "" }, { "docid": "e8b16b8d3c9751f70ce08ce92102ce02", "score": "0.54578537", "text": "def optimize(self):\n raise NotImplementedError", "title": "" }, { "docid": "876c67e063a1e0723264233ed4c1c2a2", "score": "0.54428333", "text": "def maximize(self):\r\n self.G = np.reciprocal(self.G.astype(np.float))\r\n # self.G0 = np.copy(self.G)\r\n self.useMax = True", "title": "" }, { "docid": "6cd876fe9e66353e86133810fc8f3ffd", "score": "0.54405093", "text": "def algorithm(self):\n return \"Non-Parametric\"", "title": "" }, { "docid": "4852fd2d395f7e2b0aae4afd56c41371", "score": "0.5440274", "text": "def __init__(self):\n names = [\"Culture\", \"Criticality\", \"Criticality Modifier\", \"Initial Known\",\n \"Inter-Dependency\", \"Dynamism\", \"Size\", \"Plan\", \"Team Size\", \"Signals\", \"Price\"]\n lows = [0.1, 0.82, 2, 0.40, 1, 1, 0, 0, 1, 0, 0]\n highs = [0.9, 1.20, 10, 0.70, 100, 50, 4, 5, 44, 63, 100000]\n #highs = [0.9, 1.20, 10, 0.70, 100, 50, 4, 5, 44, 4]\n decisions = [Decision(n, l, h) for n, l, h in zip(names, lows, highs)]\n objectives = [Objective(\"Cost\", True, 0, 10000), Objective(\"Value\", False, 0, 10000), Objective(\"Score\", False, 0, 1),\n Objective(\"Completion\", False, 0, 1), Objective(\"Idle\", True, 0, 1), Objective(\"Profit\", False, 0, 1000000)]\n Problem.__init__(self, decisions, objectives)", "title": "" }, { "docid": "46cc673a2d64fe19c3f9564b06529eae", "score": "0.5433857", "text": "def info(optimal_sequence, acc_prob, backtracking):\n print(\"Optimal state sequence: S = \", map_state_path(optimal_sequence))\n # print(\"Accumulated Prob. Matrix =\", acc_prob, sep=\"\\n\")\n # print(\"Backtracking Matrix =\", backtracking, sep=\"\\n\")", "title": "" }, { "docid": "1f6a4a90673c8009a4ecbdbafe84b793", "score": "0.54247636", "text": "def get_optimal_parameters(self): \n raise NotImplementedError", "title": "" }, { "docid": "1994a35dd1723f1f9a694a0a5f5a1099", "score": "0.54241216", "text": "def note(self):\n print('chapter29.1 note as follow')\n print('第29章 线性规划')\n print('在给定有限的资源和竞争约束情况下,很多问题都可以表述为最大化或最小化某个目标')\n print('如果可以把目标指定为某些变量的一个线性函数,而且如果可以将资源的约束指定为这些变量的等式或不等式',\n '则得到一个线性规划问题.线性规划出现在许多世纪应用中')\n print('比如如下线性规划问题')\n print('argmin(x1+x2+x3+x4)')\n print('满足约束条件')\n print('-2 * x1 + 8 * x2 + 0 * x3 + 10 * x4 >= 50')\n print(' 5 * x1 + 2 * x2 + 0 * x3 + 0 * x4 >= 100')\n print(' 3 * x1 - 5 * x2 + 10 * x3 - 2 * x4 >= 25')\n print(' x1 >= 0; x2 >= 0; x3 >= 0; x4 >= 0')\n print('一般线性规划')\n print(' 在一般线性规划的问题中,希望最优化一个满足一组线性不等式约束的线性函数。',\n '已知一组实数a1,a2,...,an和一组变量x1,x2,...,xn,在这些变量上的一个线性函数f定义为:',\n 'f(x1,x2,...,xn)=a1x1+a2x2+...+anxn')\n print(' 如果b是一个实数而f是一个线性函数,则等式f(x1,x2,...,xn)=b是一个线性等式')\n print(' 不等式f(x1,x2,...,xn)<=b和f(x1,x2,...,xn)>=b都是线性不等式')\n print('用线性约束来表示线性等式或线性不等式')\n print('在线性规划中,不允许严格的不等式')\n print('正式地说,线性规划问题是这样的一种问题,要最小化或最大化一个受限一组有限的线性约束的线性函数')\n print('如果是要最小化,则称此线性规划为最小化线性规划;如果是要最大化,则称此线性规划为最大化线性规划')\n print('虽然有一些线性规划的多项式时间算法。但是单纯形法是最古老的线性规划算法.',\n '单纯形算法在最坏的情况下不是在多项式时间内运行,但是相当有效,而且在实际中被广泛使用')\n print('比如双变量的线性规划直接在笛卡尔直角坐标系中表示出可行域和目标函数曲线即可')\n print('线性规划概述')\n print(' 非正式地,在标准型中的线性规划是约束为线性不等式的线性函数的最大化',\n '而松弛型的线性规划是约束为线性等式的线性函数的最大化')\n print(' 通常使用标准型来表示线性规划,但当描述单纯形算法的细节时,使用松弛形式会比较方便')\n print('受m个线性不等式约束的n个变量上的线性函数的最大化')\n print('如果有n个变量,每个约束定义了n维空间中的一个半空间.这些半空间的交集形成的可行区域称作单纯形')\n print('目标函数现在成为一个超平面,而且因为它的凸性,故仍然有一个最优解在单纯形的一个顶点上取得的')\n print('单纯形算法以一个线性规划作为输入,输出它的一个最优解.从单纯形的某个顶点开始,执行一系列的迭代',\n '在每次迭代中,它沿着单纯形的一条边从当前定点移动到一个目标值不小于(通常是大于)当前顶点的相邻顶点',\n '当达到一个局部的最大值,即一个顶点的目标值大于其所有相邻顶点的目标值时,单纯形算法终止.')\n print('因为可行区域是凸的而且目标函数是线性的,所以局部最优事实上是全局最优的')\n print('将使用一个称作\\\"对偶性\\\"的概念来说明单纯形法算法输出的解的确是最优的')\n print('虽然几何观察给出了单纯形算法操作过程的一个很好的直观观察',\n '但是在讨论单纯形算法的细节时,并不显式地引用它.相反地,采用一种代数方法,首先将已知的线性规划写成松弛型,即线性等式的集合',\n '这些线性等式将表示某些变量,称作\\\"基本变量\\\",而其他变量称作\\\"非基本变量\\\".从一个顶点移动到另一个顶点伴随着将一个基本变量',\n '变为非基本变量,以及将一个非基本变量变为基本变量.',\n '这个操作称作一个\\\"主元\\\",而且从代数的观点来看,只不过是将线性规划重写成等价的松弛型而已')\n print('识别无解的线性规划,没有有限最优解的线性规划,以及原点不是可行解的线性规划 ')\n print('线性规划的应用')\n print(' 线性规划有大量的应用。任何一本运筹学的教科书上都充满了线性规划的例子')\n print(' 线性规划在建模和求解图和组合问题时也很有用,可以将一些图和网络流问题形式化为线性规划')\n print(' 还可以利用线性规划作为工具,来找出另一个图问题的近似解')\n print('线性规划算法')\n print(' 当单纯形法被精心实现时,在实际中通常能够快速地解决一般的线性规划',\n '然而对于某些刻意仔细设计的输入,单纯形法可能需要指数时间')\n print(' 线性规划的第一个多项式时间算法是椭圆算法,在实际中运行缓慢')\n print(' 第二类指数时间的算法称为内点法,与单纯形算法(即沿着可行区域的外部移动,并在每次迭代中维护一个为单纯形的顶点的可行解)相比',\n '这些算法在可行区域的内部移动.中间解尽管是可行的,但未必是单纯形的顶点,但最终的解是一个顶点')\n print(' 对于大型输入,内点法的性能可与单纯形算法相媲美,有时甚至更快')\n print(' 仅找出整数线性规划这个问题的一个可行解就是NP-难度的;因为还没有已知的多项式时间的算法能解NP-难度问题')\n print(' 所以没有已知的整数线性规划的多项式算法.相反地,一般的线性规划问题可以在多项式时间内求解')\n print(' 定义线性规划其变量为x=(x1,x2,...,xn),希望引用这些变量的一个特定设定,将使用记号x`=(x1`,x2`,...,xn`)')\n print('29.1 标准型和松弛型')\n print(' 在标准型中的所有约束都是不等式,而在松弛型中的约束都是等式')\n print('标准型')\n print(' 已知n个实数c1,c2,...,cn;m个实数b1,b2,...,bm;以及mn个实数aij,其中i=1,2,...,m,而j=1,2,...,n',\n '希望找出n个实数x1,x2,...,xn来最大化目标函数∑cjxj,满足约束∑aijxj<=bi,i=1,2,...,m;xj>=0',\n 'n+m个不等式约束,n个非负性约束')\n print(' 一个任意的线性规划需要有非负性约束,但是标准型需要,有时将一个线性规划表示成一个更紧凑的形式会比较方便')\n print(' 如果构造一个m*n矩阵A=(aij),一个m维的向量b=(bi),一个n维的向量c=(cj),以及一个n维的向量x=(xj)',\n '最大化c^Tx,满足约束Ax<=b,x>=0')\n print(' c^Tx是两个向量的内积,Ax是一个矩阵向量乘积,x>=0表示向量x的每个元素都必须是非负的')\n print(' 称满足所有约束的变量x`的设定为可行解,而不满足至少一个约束的变量x`的设定为不可行解')\n print(' 称一个解x`拥有目标值c^T.在所有可行解中其目标值最大的一个可行解x`是一个最优解,且称其目标值c^Tx`为最优目标值')\n print(' 如果一个线性规划没有可行解,则称此线性规划不可行;否则它是可行的')\n print(' 如果一个线性规划有一些可行解但没有有限的最优目标值,则称此线性规划是无界的')\n print('将线性规划转换为标准型')\n print(' 已知一个最小化或最大化的线性函数受若干线性约束,总可以将这个线性规划转换为标准型')\n print(' 一个线性规划可能由于如下4个原因而不是标准型')\n print(' (1) 目标函数可能是一个最小化,而不是最大化')\n print(' (2) 可能有的变量不具有非负性约束')\n print(' (3) 可能有等式约束,即有一个等号而不是小于等于号')\n print(' (4) 可能有不等式约束,但不是小于等于号,而是一个大于等于号')\n print('当把一个线性规划L转化为另一个线性规划L\\'时,希望有性质:从L\\'的最优解能得到L的最优解.为解释这个思想,',\n '说两个最大化线性规划L和L\\'是等价的')\n print('将一个最小化线性规划L转换成一个等价的最大化线性规划L\\',简单地对目标函数中的系数取负值即可')\n print('因为当且仅当x>=y和x<=y时x=y,所以可以将线性规划中的等式约束用一对不等式约束来替代')\n print('在每个等式约束上重复这个替换,就得到全是不等式约束的线性规划')\n print('将线性规划转换为松弛型')\n print(' 为了利用单纯形算法高效地求解线性规划,通常将它表示成其中某些约束是等式的形式')\n print(' ∑aijxj <= bi是一个不等式约束,引入一个新的松弛变量s,重写不等式约束')\n print(' s = bi - ∑aijxj; s >= 0')\n print(' s度量了等式左边和右边之间的松弛或差别.因为当且仅当等式和不等式都为真时不等式为真')\n print(' 所以可以对线性规划的每个不等式约束应用这个转换,得到一个等价的线性规划,其中只有不等式是非负约束')\n print(' 当从标准型转换到松弛型时,将使用xn+i(而不是s)来表示与第i个不等式关联的松弛变量')\n print(' 因此第i个约束是xn+i = bi - ∑aijxj 以及非负约束xn+i >= 0')\n print('练习29.1-1 线性规划表示简洁记号形式,n,m,A,b分别是什么')\n print('练习29.1-2 给出题目线性规划的3个可行解,每个解的目标值是多少')\n print('练习29.1-3 线性规划转换为松弛型后,N、B、A、b、c和v分别是什么')\n print('练习29.1-4 线性规划转换为标准型')\n print('练习29.1-5 线性规划转换为松弛型')\n print('练习29.1-6 说明下列线性规划不可行')\n print('练习29.1-7 说明下列线性规划是无界的')\n print('练习29.1-8 假设有一个n个变量和m个约束的线性规划,且假设将其转换为成标准型',\n '请给出所得线性规划中变量和约束个数的一个上界')\n print('练习29.1-9 请给出一个线性规划的例子,其中可行区域是无界的,但最优解的值是有界的')\n # python src/chapter29/chapter29note.py\n # python3 src/chapter29/chapter29note.py", "title": "" }, { "docid": "900574a7aa3a57c3f78089533ad9dd06", "score": "0.5412338", "text": "def solve(self) -> None:\n self.model.optimize()", "title": "" }, { "docid": "c3127e832266ff9857734e5742ac64ac", "score": "0.5412075", "text": "def solve(self, **kwargs):\n self._model.solve(solver=self._get_solver(**SOLVER_OPTIONS, **kwargs))\n print(\"Status:\", LpStatus[self._model.status])\n print(\"Optimal Value of Objective Function: \", value(self._model.objective))", "title": "" }, { "docid": "e24e406d179b4385a43995078425419e", "score": "0.540793", "text": "def target_objective(self, final_w, final_b, labels):", "title": "" }, { "docid": "6a61d0dbb9ae3f83e660fe41cf6b8f0d", "score": "0.54039305", "text": "def op_minimization(self):\n self.execute_operation('minimization')", "title": "" }, { "docid": "03b31e25618bc278b146c2d334305b18", "score": "0.53970236", "text": "def ex1():\n # the problem matrix\n matrix = [[81, 19, 0], [80, 0, 20]]\n # let the function solve the problem\n solve_problem(matrix)", "title": "" }, { "docid": "fed44e6c259fc784618f21c6d82f449c", "score": "0.5384471", "text": "def get_global_optimum(self):", "title": "" }, { "docid": "b15e3b95c5892b842299bb74cad6ecef", "score": "0.5373609", "text": "def __str__(self):\n header = [\n ' ProblemParameters:']\n header += [('Lower Bounds: {}').format(self.lb)]\n header += [('Upper Bounds: {}').format(self.ub)]\n header += [('Variable Types: {}').format(self.varType)]\n header += [('Continuous ID Vector: {}').format(self.cID)]\n header += [('Integer ID Vector: {}').format(self.iID)]\n header += [('Discrete ID Vector: {}').format(self.dID)]\n header += [('Combinatorial ID Vector: {}').format(self.xID)]\n if len(self.discreteVals) > 1:\n if self.discreteVals[0] == self.discreteVals[1]:\n header += [\n ('Discrete Values (only printing elem 1 of {}): {}').format(len(self.discreteVals[0]), self.discreteVals[0])]\n else:\n header += [('Discrete Values: {} ').format(self.discreteVals)]\n header += [('Global Optimum: {}').format(self.optimum)]\n header += [('Plot Title: {}').format(self.pltTitle)]\n header += [('Histogram Title: {}').format(self.histTitle)]\n header += [('Variable Names: {}').format(self.varNames)]\n header += [('{}').format(self.objective)]\n for con in self.constraints:\n header += [('{}').format(con)]\n\n return ('\\n').join(header) + '\\n'", "title": "" }, { "docid": "b51e60d8a99435f6404433cd049e2db2", "score": "0.53730005", "text": "def description(self):\n return \"Pass state variable(s) through sigmoid. Return new state_mod signal\"", "title": "" }, { "docid": "b1707bfad7bcbbf3a7046d73a7f37274", "score": "0.5371532", "text": "def title(self):\n\n\n if self.name is not None:\n return \"Training '{name}': {auto} ({it} it, {tmin:.1f} min)\".format(name=self.name, auto=str(self), it=self.optit, tmin=np.sum(self.optittimes)/60.0)\n else:\n return \"{auto} ({it} it, {tmin:.1f} min)\".format(auto=str(self), it=self.optit, tmin=np.sum(self.optittimes)/60.0)", "title": "" }, { "docid": "6b51700c45815ad0fc9260a78eb29b86", "score": "0.53682464", "text": "def objective(self):\n fixed_charge_rate = self.fixed_charge_rate\n n_turbines = self.nturbs\n system_capacity = self.capacity\n aep = self.aep\n capital_cost = self.capital_cost\n fixed_operating_cost = self.fixed_operating_cost\n variable_operating_cost = self.variable_operating_cost\n return eval(self.objective_function, globals(), locals())", "title": "" }, { "docid": "6787cb25df2753f77dfb734b2983f75d", "score": "0.53660065", "text": "def probMoo(self):\n\t\t# Alternate second objective\n\t\t# Compile objectives\n\t\tmeanFit = np.asarray([ind.fitness for ind in self.pop])\n\t\tnConns = np.asarray([ind.nConn for ind in self.pop])\n\t\tnConns[nConns==0] = 1 # No conns is always pareto optimal (but boring)\n\t\tobjVals = np.c_[meanFit, 1/nConns] # Maximize\n\t\trank = nsga_sort(objVals)\n\n\t\t# Assign ranks\n\t\tfor i in range(len(self.pop)):\n\t\t\tself.pop[i].rank = rank[i]", "title": "" }, { "docid": "9c82c8a22266367aa915a84567f08ac6", "score": "0.53564364", "text": "def solution_str(self):\n strng = '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n strng += 'Best fitness found: '\n strng += colored('%.4f\\n' % self.solution['fitness'], 'green')\n strng += 'Best solution: '\n strng += colored('%s\\n' %\n self.__chromosome2str(self.solution['chromosome']),\n 'green')\n strng += 'Objective Values:\\n'\n for j in range(len(self.objective)):\n i = j + 1\n strng += '\\tf%s(x) = %.4f\\n' % (i, self.solution['f%i' % i])\n\n strng += '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n return strng", "title": "" }, { "docid": "af947736891d6cafaf390a990e2f7c6e", "score": "0.5300955", "text": "def prob7():\n raise NotImplementedError(\"Problem 7 Incomplete\")", "title": "" }, { "docid": "af947736891d6cafaf390a990e2f7c6e", "score": "0.5300955", "text": "def prob7():\n raise NotImplementedError(\"Problem 7 Incomplete\")", "title": "" }, { "docid": "283aba740e378f349fc275d0a6ed0e4f", "score": "0.5297312", "text": "def __str__(self):\n \n return(\"Linear fit of a dataset of {0} observations, using a linear model with {1} regressors\".format(self._nObservations, self._nParameters))", "title": "" }, { "docid": "228c2aa6fba9d7acf98542b52dbeec7e", "score": "0.5296365", "text": "def probMoo(self):\n\t\t# Alternate second objective\n\t\t# Compile objectives\n\t\tmaxFit = np.asarray([ind.fitMax for ind in self.pop])\n\t\tnConns = np.asarray([ind.nConn for ind in self.pop])\n\t\tnConns[nConns==0] = 1 # No conns is always pareto optimal (but boring)\n\t\tobjVals = np.c_[maxFit, 1/nConns] # Maximize\n\t\trank = nsga_sort(objVals)\n\n\t\t# Assign ranks\n\t\tfor i in range(len(self.pop)):\n\t\t\tself.pop[i].rank = rank[i]", "title": "" }, { "docid": "96ca8f24b74f520cdb40c3a769a1a6b6", "score": "0.5295307", "text": "def minimize(self,compute_hesse=True):\n\n if self.verbose: print('will run migrad')\n self.minimizer.migrad()\n \n if compute_hesse:\n if self.verbose: print('will compute Hessian matrix')\n self.minimizer.hesse()", "title": "" }, { "docid": "2528271422a7c2f5404948f1f96887e1", "score": "0.52916265", "text": "def minimise(cost_function, initial_guess, algorithm, dtype=None, **options):\n return _minimise(cost_function, initial_guess, algorithm, dtype, maximising=False, **options)", "title": "" }, { "docid": "e3ece5b7c08034e64c62c90788b7ddb0", "score": "0.52904737", "text": "def objective(self):\n self.nfc[self.nG] = self.nfc[self.nG] + 1\n return self._objective", "title": "" }, { "docid": "692dfc66a624ce4945096fc5458b69d6", "score": "0.5281737", "text": "def problem():\n pass", "title": "" }, { "docid": "2de6d75028564535cc84393878abc459", "score": "0.5275921", "text": "def maximize(self):\n return self._maximize", "title": "" }, { "docid": "809e0abeee5a7eb43df0a26a07fd5c1e", "score": "0.5274907", "text": "def _compute_OPT(self) -> float:\n solution_set: Set[E] = self.rs_optimizer.optimize()\n\n return self.objective_function.evaluate(solution_set)", "title": "" }, { "docid": "12023fd0a1ef9a824c1b014c504dd741", "score": "0.5272885", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n return aStarSearch(problem)", "title": "" }, { "docid": "406881a72f54daf09ec1ee45cb081100", "score": "0.5268944", "text": "def __init__(self, N, M, K):\n self.N = N\n self.M = M\n self.K = K\n self.isPenalty = False", "title": "" }, { "docid": "fa3f6489f512954cd972722c38a1668d", "score": "0.5268142", "text": "def __init__(self, objective=None, constraints=[], lowerBounds=[], upperBounds=[], varType=[], discreteVals=[], optimum=0.0, pltTitle='', histTitle='', varNames=['']):\n if isinstance(objective, list):\n self.numObjectiveFunctions = len(objective)\n self.isFunctionList = 1\n else:\n self.numObjectiveFunctions = 1\n self.isFunctionList = 0\n if self.numObjectiveFunctions == 1:\n if self.isFunctionList == 1:\n self.objective = objective[1]\n else:\n self.objective = objective\n else:\n self.objective = objective\n if type(constraints) != list:\n self.constraints = [\n constraints]\n else:\n self.constraints = constraints\n self.lb = lowerBounds\n self.ub = upperBounds\n self.varType = varType\n self.discreteVals = discreteVals\n self.optimum = optimum\n self.pltTitle = pltTitle\n self.histTitle = histTitle\n self.varNames = varNames\n if len(self.lb) and len(self.ub) and len(self.varType) != 0 or len(self.discreteVals) and len(varType) != 0:\n self.sanitize_inputs()\n self.cID = []\n self.iID = []\n self.dID = []\n self.xID = []\n for var in range(len(self.varType)):\n if 'c' in self.varType[var]:\n self.cID.append(1)\n else:\n self.cID.append(0)\n if 'i' in self.varType[var]:\n self.iID.append(1)\n else:\n self.iID.append(0)\n if 'd' in self.varType[var]:\n self.dID.append(1)\n else:\n self.dID.append(0)\n if 'x' in self.varType[var]:\n self.xID.append(1)\n else:\n self.xID.append(0)\n\n self.cID = np.array(self.cID)\n self.iID = np.array(self.iID)\n self.dID = np.array(self.dID)\n self.xID = np.array(self.xID)", "title": "" }, { "docid": "bc9328fdcfdcd19a18c107a740563484", "score": "0.526258", "text": "def set_optim(self):\n raise Exception('optimizer not defined')", "title": "" }, { "docid": "1f60d96344a3980e11921e5c45b778ce", "score": "0.52609783", "text": "def optimize(self):\n # Initialize optimization variables.\n n_iter = iter_since_last_minimum = 0\n min_solution = current_solution = self.initial_state()\n min_score = self.objective(current_solution)\n\n self.local_minima_found = 0\n\n while n_iter < self.MAX_ITER:\n n_iter += 1\n iter_since_last_minimum += 1\n\n best_score, best_move = self.get_best_score_move(\n current_solution, min_score\n )\n if best_score >= min_score:\n # The best score is potentially better than the current\n # solution, but not better than the known-best solution.\n self.handle_non_optimal_best(best_move, current_solution, best_score)\n if best_score == float(\"inf\"):\n self.handle_constraint_failure(best_score, best_move)\n\n else:\n iter_since_last_minimum = 0\n self.local_minima_found += 1\n # Record the current best solution.\n min_score = best_score\n min_solution = best_move\n\n if iter_since_last_minimum >= self.MAX_SINCE_MINIMUM:\n try:\n current_solution = self.restart()\n iter_since_last_minimum = 0\n except StopIteration:\n break\n else:\n current_solution = best_move\n\n self._manage_tabu_data()\n\n log.debug(\n \"{} minima found on state of size {}\".format(\n self.local_minima_found, len(current_solution)\n )\n )\n return min_score, min_solution", "title": "" }, { "docid": "be60e0a33dffc984b693ce14e207fef0", "score": "0.5259243", "text": "def desc(self):\n print('Validation method description')", "title": "" }, { "docid": "47f1149cece5dea945cc0f872935e5dc", "score": "0.5257909", "text": "def info(self):\n return \"\"\"{} tuning, low to high = {}\"\"\".format(self._tuning, self._notes)", "title": "" }, { "docid": "ea9f4c0833f9d59e77da941a630725e2", "score": "0.5253722", "text": "def optimization_model(self) -> str:\n return self._optimization_model", "title": "" }, { "docid": "e79dc7feebbdd1fd5beb2d7e3cc14df1", "score": "0.52459526", "text": "def _add_objective(self) -> None:\n label = compute_values_label(\n self.linear_inclusion.uncertain_inputs,\n self.linear_inclusion.activation,\n self.linear_inclusion.nn_params,\n )\n for neuron_idx in range(len(self.linear_inclusion.theta_is[-1])):\n if neuron_idx != label:\n self.model.setObjective(\n self.x_is[len(self.linear_inclusion.theta_is) - 1, label]\n - self.x_is[len(self.linear_inclusion.theta_is) - 1, neuron_idx],\n \"minimize\",\n )", "title": "" }, { "docid": "9000f6ec6199445cbef324cf40ab8f38", "score": "0.5243745", "text": "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe=util.PriorityQueue()\n\n Starting_StateName = problem.getStartState()\n Starting_State=(Starting_StateName , [] , 0) # State Structure is :(state_name,state_path,state_cost)\n # that we made it\n fringe.push(Starting_State,0) # starting_State with priority=0\n\n\n SeenNodeCost={} # cost of SeenNode\n closed=set() # an array --> name of already expanded nodes\n\n SeenNodeCost[Starting_StateName]=0 # cost of SeenNode--> Starting_StateName at first is 0\n\n\n while not fringe.isEmpty():\n (Expanded_StateName,Expanded_StatePath,Expanded_StateCost)= fringe.pop()\n if Expanded_StateName in closed:\n pass\n else:\n closed.add(Expanded_StateName)\n if(problem.isGoalState(Expanded_StateName)): # gets a name\n return Expanded_StatePath\n for StateName , StateAction , StateCost in problem.getSuccessors(Expanded_StateName): # an array of 3 part tuples with data structure like: (near_state_name,packman_action,state_path_cost)\n add_StatePath = Expanded_StatePath + [StateAction]\n add_StateCost = Expanded_StateCost + StateCost\n add_State=(StateName,add_StatePath,add_StateCost) # translates getSuccessor() output to our state data type\n fringe.push(add_State,add_StateCost)\n\n\n return [\"Error! Can't find the Goal State!\"]\n\n util.raiseNotDefined()", "title": "" }, { "docid": "dc011d51fb0a03b04242410545adca48", "score": "0.52405465", "text": "def objective(hyparams):\n start = time.time()\n\n agent = cls.routine(env, hyparams, seed, VFA)\n\n avg, _ = agent.optimality()\n return {'loss': - avg, 'status': STATUS_OK, 'eval_time': time.time() - start}", "title": "" }, { "docid": "25fcf6f28f9030daa8f250a74fdf8ddb", "score": "0.52289695", "text": "def cost(user_requirements, proposed_solution):", "title": "" }, { "docid": "adf668fa31294f0991de42c793c9d9c6", "score": "0.5226426", "text": "def minimize(self, minimization_name, score_to_minimize='Purity', increment_threshold=0.01):\n if not self._is_deleted:\n return self.__factory.minimize(self, minimization_name, score_to_minimize, increment_threshold)", "title": "" }, { "docid": "b715a5024560d458dc5f1a13b3166876", "score": "0.52255005", "text": "def explain(x):\n\n background_data = get_background_data()\n names = [\"mileage\", \"year\", \"power\", \"model_number\", \"consumption\"]\n x = np.array(x)\n model = tf.keras.models.load_model(\n str(model_folder / 'neural_net_classifier'),\n )\n\n explainer = shap.KernelExplainer(lambda x_: f(x_, names=names, model=model), background_data)\n shap_values = explainer.shap_values(x, nsamples=500)[0]\n\n return shap_values", "title": "" }, { "docid": "6a0b8856d0e9d0aae73ff97f46c7930b", "score": "0.52215207", "text": "def visualize_best(self):\n self.population[0].visualize(self.nodes)", "title": "" }, { "docid": "39abeb1bf8f45daa654094747e1d225f", "score": "0.52181196", "text": "def algorithm(self):\n return \"Semi-Parametric\"", "title": "" }, { "docid": "f5900b61d1fe91cb92df699144c1a643", "score": "0.52169836", "text": "def _cost_function(self) -> None:\n workloads = {wl.app: wl.values[0] for wl in self.workloads}\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_perfs[_ic, _a]\n / workloads[_a]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: maximize fulfilled workload fraction\",\n )", "title": "" }, { "docid": "c37199c242706cee6182280ee0bd5ae9", "score": "0.52148235", "text": "def optimize_parameters(self):\r\n pass", "title": "" }, { "docid": "bd92e000594179669dae8088441953ff", "score": "0.52133924", "text": "def train_step(self,loss, optimizer):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n train_step = optimizer.minimize(loss)\n ########################\n # END OF YOUR CODE #\n #######################\n\n return train_step", "title": "" }, { "docid": "c12968e9fbde23baaf1eec6dcb6516f7", "score": "0.52108663", "text": "def _cost_function(self) -> None:\n\n period_length = sum(self.load_hist.values())\n\n self.pulp_problem += (\n lpSum(\n [\n self.cooked.map_res[_a, _ic]\n * self.cooked.instance_prices[_ic]\n * period_length\n for _a in self.system.apps\n for _ic in self.cooked.instances_res\n ]\n + [\n self.cooked.map_dem[_a, _ic, _l]\n * self.cooked.instance_prices[_ic]\n * self.load_hist[_l]\n for _a in self.system.apps\n for _ic in self.cooked.instances_dem\n for _l in self.load_hist.keys()\n ]\n ),\n \"Objective: minimize cost\",\n )", "title": "" }, { "docid": "bf6016567f256aa8edb1b78bf11b0bca", "score": "0.5196865", "text": "def get_global_optimum(self):\n return [[-1.0, -1.0]], 0.1", "title": "" }, { "docid": "e155ea5c8c7d782960026709096c4eaf", "score": "0.5190684", "text": "def optimize_function(self):\r\n\r\n initial_guess = self.init_optimization()\r\n algorithm = getattr(\r\n nlopt, self.opt_parameters['Optimization_Algorithm'])\r\n\r\n opt = nlopt.opt(algorithm, self.opt_dim)\r\n opt.set_max_objective(self.aquisition_function)\r\n opt.set_lower_bounds(self.opt_parameters['Lower_Bounds'])\r\n opt.set_upper_bounds(self.opt_parameters['Upper_Bounds'])\r\n opt.set_xtol_abs(self.opt_parameters['Xtol_Abs'])\r\n opt.set_maxeval(self.opt_parameters['Max_Iterations'])\r\n\r\n x = opt.optimize(initial_guess)\r\n x = np.reshape(x, (-1, self.opt_dim))\r\n y = opt.last_optimum_value()\r\n\r\n return x, y", "title": "" }, { "docid": "83d89571d6c981cf026682a343dc8a1b", "score": "0.51900965", "text": "def evaluate_optimizer(surrogate_minimize, model, dataset, n_calls, random_state):\n # below seed is necessary for processes which fork at the same time\n # so that random numbers generated in processes are different\n np.random.seed(random_state)\n problem = MLBench(model, dataset, random_state)\n space = problem.space\n dimensions_names = sorted(space)\n dimensions = [space[d][0] for d in dimensions_names]\n\n def objective(x):\n # convert list of dimension values to dictionary\n x = dict(zip(dimensions_names, x))\n # the result of \"evaluate\" is accuracy / r^2, which is the more the better\n y = -problem.evaluate(x)\n return y\n\n # optimization loop\n result = surrogate_minimize(objective, dimensions, n_calls=n_calls, random_state=random_state)\n trace = []\n min_y = np.inf\n for x, y in zip(result.x_iters, result.func_vals):\n min_y = min(y, min_y)\n x_dct = dict(zip(dimensions_names, x))\n trace.append((x_dct, y, min_y))\n\n print(random_state)\n return trace", "title": "" }, { "docid": "28396421a768478d9d572d62ebe5cef7", "score": "0.5189637", "text": "def _parse_objective(builder, atom, factor):\n assert factor in (1, -1)\n for co, var in _parse_constraint_elems(builder, atom.elements, None, True):\n builder.add_minimize(factor * co, var)", "title": "" }, { "docid": "a26851eadd5c7809dffd1ffb56b3b23f", "score": "0.5188932", "text": "def gd_optimization(features,learning_rate=0.13, n_epochs=100): \n \n x = T.matrix('x')\n feature_dimensionality = features.shape[1]\n ranker = SVRanker(input=x, n_in=feature_dimensionality, n_out=1)\n cost = ranker.hinge_loss()\n\n g_W = T.grad(cost=cost, wrt=ranker.W)\n g_b = T.grad(cost=cost, wrt=ranker.b)\n\n updates = [(ranker.W, ranker.W - learning_rate * g_W),\n (ranker.b, ranker.b - learning_rate * g_b)]\n\n train_ranker = theano.function(inputs = [x], outputs = cost, updates = updates)\n\n epoch = 0\n while epoch < n_epochs:\n epoch = epoch + 1\n cost = train_ranker(features)\n #print cost\n\n return ranker.W.get_value(), ranker.b.get_value()", "title": "" }, { "docid": "59284a40bbe8e4cdd2cace0093a9b7f6", "score": "0.5188888", "text": "def fit(self, verbose=True, factr=1e5, pgtol=1e-7):\n self._maximize(verbose=verbose, factr=factr, pgtol=pgtol)", "title": "" }, { "docid": "8f283caa239368fa94713963ab2234c7", "score": "0.51866865", "text": "def __str__(self):\n \n if self._regressorNames[0] == \"1\":\n str = \"Model: y = a_0\"\n else:\n str = \"Model: y = a_0 * %s\" % self._regressorNames[0]\n \n for n in range(1, self._nParameters):\n if self._regressorNames[n] == \"1\":\n str += \" + a_%d\" % n\n else:\n str += \" + a_%d * %s\" % (n, self._regressorNames[n])\n \n str += \"\\nExpected number of observations: %d\" % self._nObservations\n \n return str", "title": "" } ]
478f5f982f16fc0e76a4eadbb5c01864
The rotation matrix of the image Relates to the exterior orientation
[ { "docid": "86dfb7458a143ce1a28d5e19a7d03974", "score": "0.77758867", "text": "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "title": "" } ]
[ { "docid": "c5785e1d4b134c7c43ef3846c184d908", "score": "0.80493206", "text": "def getRotationMatrix( self):", "title": "" }, { "docid": "9ea3c54f501c0ff5106c01d4919e0798", "score": "0.7800133", "text": "def rotationMatrix(self):\n\n # R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n # self.exteriorOrientationParameters[5])\n\n return self.__rotationMatrix", "title": "" }, { "docid": "cb745a196b6344cbfc3b8e729f85ea0f", "score": "0.74818605", "text": "def rotation_matrix(self) -> Tensor:\n return self.extrinsics[..., :3, :3]", "title": "" }, { "docid": "e1ef679a81bb9a37be4c40b9c4725b3b", "score": "0.73385036", "text": "def rotation(self):\n return 0", "title": "" }, { "docid": "e1ef679a81bb9a37be4c40b9c4725b3b", "score": "0.73385036", "text": "def rotation(self):\n return 0", "title": "" }, { "docid": "3e59a644b2bae0337df4e004fa5e513e", "score": "0.73054653", "text": "def orientation_matrix(self):\n return self._matrix", "title": "" }, { "docid": "2563cb5b0d492b82639ed4c4d5eacef5", "score": "0.73041844", "text": "def get_imageRotate(self):\r\n mapRotate = self.get_mapRotate()\r\n mapRotateOriginal = 0 if self.mapRotateOriginal is None else self.mapRotateOriginal\r\n imageRotate = mapRotate - mapRotateOriginal\r\n return imageRotate", "title": "" }, { "docid": "dd4272bdb6bbf861c3c36099e4a769f1", "score": "0.7267427", "text": "def rotation_matrix(self):\n n = self.w\n ex = self.x\n ey = self.y\n ez = self.z\n\n R = np.eye(3)\n\n R[0, 0] = 2 * (n * n + ex * ex) - 1\n R[0, 1] = 2 * (ex * ey - n * ez)\n R[0, 2] = 2 * (ex * ez + n * ey)\n\n R[1, 0] = 2 * (ex * ey + n * ez)\n R[1, 1] = 2 * (n * n + ey * ey) - 1\n R[1, 2] = 2 * (ey * ez - n * ex)\n\n R[2, 0] = 2 * (ex * ez - n * ey)\n R[2, 1] = 2 * (ey * ez + n * ex)\n R[2, 2] = 2 * (n * n + ez * ez) - 1\n\n return R;", "title": "" }, { "docid": "b2e38174b693992914034d8c9e229200", "score": "0.7248948", "text": "def orientation_matrix(self):\n return self.orientation.orientation_matrix()", "title": "" }, { "docid": "0c3f86fc324a2e27fdaf0acc1130c919", "score": "0.717149", "text": "def rotation(self):\n rotation = self._rotation2 * self._rotation1\n return rotation.normalize()", "title": "" }, { "docid": "526e7f91c8ac36cd57720ff02f65421d", "score": "0.71198183", "text": "def rotation_matrix(self) -> np.ndarray:\n return build_rotation_matrix(self.rotation_offset)", "title": "" }, { "docid": "16cdd4572ea5e4dea890f813b25fe0e5", "score": "0.7117069", "text": "def rotate_img(input_img, angle=0.0):\n \n rotated = rotate(input_img, angle,reshape=False,axes=(0, 1))\n print('Currrent rotation: ', angle)\n return rotated", "title": "" }, { "docid": "6deb468d57ed2279eaad85f3a5253572", "score": "0.70456594", "text": "def rotate(self):\n R = np.eye(3)\n # TODO:\n return R", "title": "" }, { "docid": "0bcf6aa5e4d22dabd255ee4eea99dbfb", "score": "0.70365447", "text": "def rotate_image(self):\r\n height, width = self.image.shape[:2] # image shape has 3 dimensions\r\n image_center = (width / 2,\r\n height / 2) # getRotationMatrix2D needs coordinates in reverse order (width, height)\r\n # compared to shape\r\n\r\n rotation_mat = cv2.getRotationMatrix2D(image_center, self.angle, 1.)\r\n\r\n # rotation calculates the cos and sin, taking absolutes of those.\r\n abs_cos = abs(rotation_mat[0, 0])\r\n abs_sin = abs(rotation_mat[0, 1])\r\n\r\n # find the new width and height bounds\r\n bound_w = int(height * abs_sin + width * abs_cos)\r\n bound_h = int(height * abs_cos + width * abs_sin)\r\n\r\n # subtract old image center (bringing image back to origin) and adding the new image center coordinates\r\n rotation_mat[0, 2] += bound_w / 2 - image_center[0]\r\n rotation_mat[1, 2] += bound_h / 2 - image_center[1]\r\n\r\n # rotate image with the new bounds and translated rotation matrix\r\n rotated_mat = cv2.warpAffine(self.image, rotation_mat, (bound_w, bound_h))\r\n return rotated_mat", "title": "" }, { "docid": "e4467a1f5e9a6ca7a487357e9c643bf4", "score": "0.69781697", "text": "def _inverse_rotation_matrix(self):\n return simplify(self._parent_rotation_matrix**-1)", "title": "" }, { "docid": "574471afbb947c988ff4d1593555e01c", "score": "0.697663", "text": "def get_rotation(self):\n return self._rotation", "title": "" }, { "docid": "9c4f0b26ec6c99023b0cd28ca4db3cac", "score": "0.69271004", "text": "def exif_orientation(im):\n orientation = get_exif_orientation(im)\n if orientation == 2:\n im = im.transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 3:\n im = im.rotate(180)\n elif orientation == 4:\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\n elif orientation == 5:\n im = im.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 6:\n im = im.rotate(-90)\n elif orientation == 7:\n im = im.rotate(90).transpose(Image.FLIP_LEFT_RIGHT)\n elif orientation == 8:\n im = im.rotate(90)\n return im", "title": "" }, { "docid": "3222d4b25af3c8aad7de7e75260f8b32", "score": "0.692344", "text": "def get_rot(self):\n return self._state.pose.orientation", "title": "" }, { "docid": "682f148ac08795ac2039e7c5889327e6", "score": "0.6889848", "text": "def image_rotation(x):\r\n rands = tf.truncated_normal([tf.shape(x)[0]], stddev=FLAGS.rotate_stddev)\r\n return images_rotate(x, rands, interpolation='BILINEAR')", "title": "" }, { "docid": "6b187d70bbcd5577d62436d1c142c296", "score": "0.67816794", "text": "def get_rot(self, img, location=None):\n if location == None:\n x1 = img.image_arr.shape[2] / 2.0\n y1 = img.image_arr.shape[3] / 2.0\n else:\n x1, y1 = location\n ra, dec = img.pix2sky([x1, y1])\n delta_dec = self.pixdist2angdist(img, 1.0, 0.0, location=[x1, y1]) # approx. size in degrees of 1 pixel\n if dec + delta_dec > 90.0:\n # shift towards south instead\n delta_dec *= -1.0\n x2, y2 = img.sky2pix([ra, dec + delta_dec])\n try:\n rot_ang_rad = N.arctan2(y2-y1, x2-x1) - N.pi / 2.0\n if delta_dec < 0.0:\n rot_ang_rad -= N.pi\n except:\n rot_ang_rad = 0.0\n return rot_ang_rad * 180.0 / N.pi", "title": "" }, { "docid": "ffffbf3fc13e2d850e7a057b8df7850e", "score": "0.67668146", "text": "def rotation(self):\n if self.info is None:\n return None\n return self.info.instrument.rotation", "title": "" }, { "docid": "77a51585aaf52912cdda631bd0425e32", "score": "0.6721765", "text": "def get_rotation_angle(self):\n if self.info is None:\n return None\n return self.info.get_rotation_angle()", "title": "" }, { "docid": "172832c31ff9213d701f63daca4ae2a5", "score": "0.67210555", "text": "def rotate(matrix) -> None:", "title": "" }, { "docid": "4cf6e4352c34c369d8544fc715f0ee92", "score": "0.66826606", "text": "def rotation(self) -> float:\n return self._encoder.getValue()", "title": "" }, { "docid": "6125e0067dbb8407b6568d7ea03408bf", "score": "0.66738975", "text": "def orientation(self):\n\n rho = INIT_RHO_BASE + RHO_PER_FRAME * (self.__no + self.__step)\n theta = THETA_PER_FRAME * self.__step\n\n m_x = math.cos(rho) * math.cos(theta)\n m_y = math.sin(rho) * math.cos(theta)\n m_z = math.sin(theta)\n\n return m_x, m_y, m_z", "title": "" }, { "docid": "539a290d19b4eb017f0860ffac12d534", "score": "0.6656225", "text": "def rotate180(img):\n return np.rot90(img, k=2, axes=(0, 1))", "title": "" }, { "docid": "a7e3b8e889857fd87cf06ad26cefb2e2", "score": "0.6654257", "text": "def rotation(self) -> quaternion:\n return self._transform.rotation", "title": "" }, { "docid": "f4509fb2fa20b9ffa29716fd67493b85", "score": "0.6652479", "text": "def rotationMatrix (self, angle):\n\t\tM = np.matrix ([[np.cos (angle*np.pi/180), np.sin (angle*np.pi/180)],\n\t\t\t\t\t [-np.sin (angle*np.pi/180), np.cos (angle*np.pi/180)]])\n\t\t\n\t\treturn M", "title": "" }, { "docid": "9d042b7327b0c3249ecb1b1e307acecf", "score": "0.66471183", "text": "def _get_matrix(self):\n qw = np.cos(self.angle / 2)\n qx, qy, qz = np.sin(self.angle / 2) * self.axis\n return ScipyRotation.from_quat([qx, qy, qz, qw])", "title": "" }, { "docid": "4f43d08695a3ca7b936483d084e2010b", "score": "0.6638165", "text": "def rotate_image(self, image, rotations):\n return tf.image.rot90(image, k=rotations)", "title": "" }, { "docid": "390c3a4ec6b32fb9877552485ad41e88", "score": "0.66319513", "text": "def _extract_rotation_matrix(self, mod):\n r = np.matrix(\n [\n [mod.rot_xu.item(), mod.rot_xv.item(), mod.rot_xw.item()],\n [mod.rot_yu.item(), mod.rot_yv.item(), mod.rot_yw.item()],\n [mod.rot_zu.item(), mod.rot_zv.item(), mod.rot_zw.item()],\n ]\n )\n return r", "title": "" }, { "docid": "1e3f6272ba28eda1d49bfc4e1a3f64f6", "score": "0.66055846", "text": "def rotateImage(self):\n rotationDegrees = 90 * c.directionList.index(self.direction)\n self.image = pg.transform.rotate(self.image, rotationDegrees)", "title": "" }, { "docid": "fc3083c2c855552d1c490de238d28476", "score": "0.65642935", "text": "def toRotMatrix(self):\n\n rotation_versor = self.versor\n phi = radians(self.a)\n\n l = rotation_versor.x\n m = rotation_versor.y\n n = rotation_versor.z\n\n cos_phi = cos(phi)\n sin_phi = sin(phi)\n\n a11 = cos_phi + ((l * l) * (1 - cos_phi))\n a12 = ((l * m) * (1 - cos_phi)) - (n * sin_phi)\n a13 = ((l * n) * (1 - cos_phi)) + (m * sin_phi)\n\n a21 = ((l * m) * (1 - cos_phi)) + (n * sin_phi)\n a22 = cos_phi + ((m * m) * (1 - cos_phi))\n a23 = ((m * n) * (1 - cos_phi)) - (l * sin_phi)\n\n a31 = ((l * n) * (1 - cos_phi)) - (m * sin_phi)\n a32 = ((m * n) * (1 - cos_phi)) + (l * sin_phi)\n a33 = cos_phi + ((n * n) * (1 - cos_phi))\n\n return np.array([(a11, a12, a13),\n (a21, a22, a23),\n (a31, a32, a33)])", "title": "" }, { "docid": "21b0215ff9f9adad1aa479ef3e141aa4", "score": "0.6557318", "text": "def rotate_image(self, image, angle):\n \n # Get the image size\n # No that's not an error - NumPy stores image matricies backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n \n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n \n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n \n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n \n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n \n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n \n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n \n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n \n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n \n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n \n # Compute the tranform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n \n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n \n return result", "title": "" }, { "docid": "3f12d9f3df7ba978a4bb94a7ebb74a87", "score": "0.6544318", "text": "def rotation_matrix(self) -> RotationMatrix:\n r_m = self._quaternion.rotation_matrix\n return (\n (r_m[0][0], r_m[0][1], r_m[0][2]),\n (r_m[1][0], r_m[1][1], r_m[1][2]),\n (r_m[2][0], r_m[2][1], r_m[2][2]),\n )", "title": "" }, { "docid": "a513e353e3c0b72141aee362cea31df3", "score": "0.6489098", "text": "def rotate(self):\n return self._rotate", "title": "" }, { "docid": "889e4444874536a628541b302c80e078", "score": "0.6465559", "text": "def rotate(self, images, angle=0.0):\n \n self.dor = angle\n rotated = rotate(images, angle, reshape=False, axes=(1,2))\n print('Currrent rotation: {} degrees'.format(self.dor))\n return rotated", "title": "" }, { "docid": "2dfa85324acf28ac5335f057ba0c8a43", "score": "0.6460011", "text": "def rotation(img, rotation_range, y_size, x_size, num_channels):\n \n angle = np.random.uniform(-rotation_range, rotation_range)\n\n M = cv2.getRotationMatrix2D((x_size/2, y_size/2), angle, 1)\n img_rot = cv2.warpAffine(img, M, (x_size, y_size))\n\n img_rot = np.reshape(img_rot, (y_size, x_size, num_channels))\n\n return img_rot", "title": "" }, { "docid": "9367a63c294b92c44c7aabbf29af12c6", "score": "0.6452795", "text": "def rotate(self, img, angle):\n if angle == 0:\n return img\n\n # I took the code from this tutorial:\n # https://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/\n h, w = img.shape[:2]\n cX, cY = w // 2, h // 2\n\n R = cv.getRotationMatrix2D((cX, cY), -angle, 1)\n cos = np.abs(R[0, 0])\n sin = np.abs(R[0, 1])\n\n nW = int((h * sin) + (w * cos))\n nH = int((h * cos) + (w * sin))\n\n R[0, 2] += (nW / 2) - cX\n R[1, 2] += (nH / 2) - cY\n\n return cv.warpAffine(img, R, (nW, nH))", "title": "" }, { "docid": "9e95b9ab3f48c42888a8463cf0c8b5de", "score": "0.64500344", "text": "def _rotation_matrix_from_crota(self):\n return super()._rotation_matrix_from_crota(crota_key='CROTA')", "title": "" }, { "docid": "e53c3feeca9f0febdbbfa920a1a9db52", "score": "0.644891", "text": "def orientation(self):", "title": "" }, { "docid": "d61bfc24571f7c7c881677f8a0b2dd3b", "score": "0.64479077", "text": "def getRotation(self):\n return self.coords_system", "title": "" }, { "docid": "6952dddbe5f5184e2de2085b7c4b383d", "score": "0.6434645", "text": "def rotate(img, angle):\n r = cv.getRotationMatrix2D((img.shape[0] / 2, img.shape[1] / 2), angle, 1.0)\n return cv.warpAffine(img, r, img.shape)", "title": "" }, { "docid": "b6baaefd39f00b59f4454a2990ad2b44", "score": "0.64307815", "text": "def Rotation(self, *args):\n return _Graphic3d.Graphic3d_TextureParams_Rotation(self, *args)", "title": "" }, { "docid": "3943244383791a0d47a9125055e44e9f", "score": "0.64215666", "text": "def get_orientation(self, rotation):\r\n return self.orientations[rotation % self.max_rotations]", "title": "" }, { "docid": "cc7b763f9a2b29a5337ce57a5bc7a859", "score": "0.64204943", "text": "def rotation_matrix_decompose(r):\n return numpy.array( (math.atan2(r[2][1],r[2][2]),\\\n math.atan2(-r[2][0],math.sqrt(r[2][1]*r[2][1]+r[2][2]*r[2][2])),\\\n math.atan2(r[1][0],r[0][0])))", "title": "" }, { "docid": "16d448d83f8795f74b5477d0dae46c2c", "score": "0.6418149", "text": "def rotation_matrices(self):\n matrices = []\n for r in self.rotations:\n matrices.append(cv2.Rodrigues(r))\n return matrices", "title": "" }, { "docid": "d5b432e7b4bc671ee926cadec23e47f6", "score": "0.64123327", "text": "def rotate(image, angle):\n im = np.array(Image.fromarray(random[1].reshape((28,28))).rotate(angle))\n return im.ravel()", "title": "" }, { "docid": "9ff1b286f19bc4ac02ee9cdcebe7ef99", "score": "0.6393182", "text": "def Rotation(self, *args):\n return _Graphic3d.Graphic3d_Texture2Dplane_Rotation(self, *args)", "title": "" }, { "docid": "e4d2589c93fbafaad553ac836326c531", "score": "0.6377217", "text": "def rotation_matrix(delta):\n return np.array([[np.cos(delta), -np.sin(delta)],[np.sin(delta), np.cos(delta)]])", "title": "" }, { "docid": "79da68f22355d3264ad631ebdcbb6c5e", "score": "0.6377038", "text": "def createRotationMatrix(self, orientation):\n # Extract information from orientation\n qx, qy, qz, qw = [orientation.w, orientation.x, orientation.y, orientation.z]\n\n rotation_matrix = np.array([\n [2 * (qx ** 2) - 1 + 2 * (qy ** 2), 2 * qy * qz + 2 * qx * qw, 2 * qy * qw - 2 * qx * qz],\n [2 * qy * qz - 2 * qx * qw, 2 * (qx ** 2) - 1 + 2 * (qz ** 2), 2 * qz * qw + 2 * qx * qy],\n [2 * qy * qw + 2 * qx * qz, 2 * qz * qw - 2 * qx * qy, 2 * (qx ** 2) - 1 + 2 * (qw ** 2)]\n ]).reshape(3, 3)\n\n return rotation_matrix", "title": "" }, { "docid": "071b7de0f52e7ffce8d5b49e8cdcd649", "score": "0.6363721", "text": "def get_rotation_matrix(self, q):\n r, i, j, k = q\n r1 = np.array([1-2*(j**2+k**2), 2*(i*j-k*r), 2*(i*k+j*r)])\n r2 = np.array([2*(i*j+k*r), 1-2*(i**2+k**2), 2*(j*k-i*r)])\n r3 = np.array([2*(i*k-j*r), 2*(j*k+i*r), 1-2*(i**2+j**2)])\n return np.vstack([r1, r2, r3])", "title": "" }, { "docid": "a0171de7228e12daa647809bf7e973d2", "score": "0.6359825", "text": "def rotate(img, angle, resample=False, expand=False, center=None):\n \n return img.rotate(angle, resample, expand, center)", "title": "" }, { "docid": "ac68da11201336e011297d0ef6d7753e", "score": "0.6358479", "text": "def get_mapRotate(self):\r\n rotate = 0 if self.mapRotate is None else self.mapRotate % 360\r\n \r\n return rotate", "title": "" }, { "docid": "541f1e7adb0e81d7135ebc8c5ad2a60a", "score": "0.6358136", "text": "def rotation(self):\n return self.angle + self.squid.body.angle", "title": "" }, { "docid": "a4205061506576916d5fb8db0f98ec2c", "score": "0.6343535", "text": "def alignment_matrix(self) -> np.array:\n # Get the center of the eyes\n left_eye = Point.mean(self.left_eye)\n right_eye = Point.mean(self.right_eye)\n face_square = self.square()\n\n # Compute tilt\n delta_y = right_eye.y - left_eye.y\n delta_x = right_eye.x - left_eye.x\n angle = np.degrees(np.arctan2(delta_y, delta_x))\n\n # Normalized eye positions\n out_left_eye_x, out_left_eye_y = 0.3, 0.2\n out_right_eye_x, out_right_eye_y = 1.0 - out_left_eye_x, 1.0 - out_left_eye_y\n\n # Compute scale of output image\n dist = np.sqrt((delta_x ** 2) + (delta_y ** 2))\n out_dist = (out_right_eye_x - out_left_eye_x) * face_square.width\n scale = out_dist / dist\n\n # Compute rotation center point\n eyes_center = Point.mean([left_eye, right_eye])\n\n # Compute rotation matrix\n matrix = cv2.getRotationMatrix2D(eyes_center, angle, scale)\n\n # Update translation components\n matrix[0, 2] += (face_square.width * 0.5 - eyes_center.x)\n matrix[1, 2] += (face_square.height * out_left_eye_y - eyes_center.y)\n\n return matrix", "title": "" }, { "docid": "47fa6419166d895a507c1f06dd66c51a", "score": "0.63300097", "text": "def rotate_180(image):\n img = image.pixels\n rows = image.size()[0]\n img = [[img[i][j][::-1] for j in range(rows)] for i in range(len(img))]\n img = [img[i][::-1] for i in range(len(img))]\n return RGBImage(img)", "title": "" }, { "docid": "8aa8235a3d4e539b00243366b83d3fa4", "score": "0.6326926", "text": "def rotate(self):\n pass", "title": "" }, { "docid": "e1a5afa34fee54bb66df43ffc2ca8ed9", "score": "0.6321541", "text": "def compass_rotation(self):\n return self.parse(\"f\", 1164)", "title": "" }, { "docid": "7b9beac6c1dfc499c4d768114e5b812a", "score": "0.6318171", "text": "def getRotation(self, *args):\n return _coin.SbCylinderSheetProjector_getRotation(self, *args)", "title": "" }, { "docid": "7168c550bdd40286cce9c01026cf871e", "score": "0.63109934", "text": "def getRotation(self, *args):\n return _coin.SbSphereSheetProjector_getRotation(self, *args)", "title": "" }, { "docid": "14ba9d1d694ba7dd39f8daad838875fa", "score": "0.62952614", "text": "def get_image_rotation(filename):\n\n file = open(filename, 'rb')\n tags = exifread.process_file(file)\n\n for tag in tags.keys():\n if tag == 'Image Orientation':\n # print(f\"{tag}, value {tags[tag]}\")\n return tags[tag]", "title": "" }, { "docid": "c7967830ac8fba2b5918c4118814b927", "score": "0.62938774", "text": "def rotate_image(img_read, rotation):\n if rotation is not None:\n value_cw = -float((str.split(str(rotation), \" \")[1]))\n img_rotated = rotate(img_read, value_cw)\n return img_rotated\n else:\n return img_read", "title": "" }, { "docid": "bb53a7617fe749443bd4d75bc2b74176", "score": "0.62912256", "text": "def __call__(self, img, angle=None):\n if not angle:\n angle = self.get_param()\n\n return ndimage.rotate(img, angle, reshape=False, order=1)", "title": "" }, { "docid": "26f8c3a75b7bc4f2657aebe2ac2ebe2c", "score": "0.6270967", "text": "def get_rotation(self) -> rotation.Rotation:\n return self._rotation", "title": "" }, { "docid": "351b07ba87c0423deb63624303a5bc0d", "score": "0.6269158", "text": "def _rotate_mirror_do(im):\n mirrs = []\n mirrs.append(np.array(im))\n mirrs.append(np.rot90(np.array(im), axes=(0, 1), k=2))\n\tim = np.array(im)[:, ::-1]\n\tmirrs.append(np.array(im))\n\tmirrs.append(np.rot90(np.array(im), axes=(0, 1), k=2))\n return mirrs", "title": "" }, { "docid": "46656369c2506b12bde72307d4caf293", "score": "0.62649804", "text": "def to_rot_matrix(self) -> np.matrix:\n if np.linalg.norm(self._array[1:4]) == 0:\n return np.matrix(np.identity(3))\n else:\n q1q0 = self._array[1] * self._array[0]\n q2q0 = self._array[2] * self._array[0]\n q3q0 = self._array[3] * self._array[0]\n q1q1 = self._array[1] * self._array[1]\n q2q1 = self._array[2] * self._array[1]\n q3q1 = self._array[3] * self._array[1]\n q2q2 = self._array[2] * self._array[2]\n q3q2 = self._array[3] * self._array[2]\n q3q3 = self._array[3] * self._array[3]\n\n return np.matrix([[1 - 2 * (q2q2 + q3q3), 2 * (q2q1 - q3q0), 2 * (q3q1 + q2q0)],\n [2 * (q2q1 + q3q0), 1 - 2 * (q1q1 + q3q3), 2 * (q3q2 - q1q0)],\n [2 * (q3q1 - q2q0), 2 * (q3q2 + q1q0), 1 - 2 * (q1q1 + q2q2)]])", "title": "" }, { "docid": "0c01d7c719610232276b625ca158d4d8", "score": "0.6251576", "text": "def rotate90(self):\n retVal = cv.CreateImage((self.height, self.width), cv.IPL_DEPTH_8U, 3)\n cv.Transpose(self.getBitmap(), retVal)\n return(Image(retVal, colorSpace=self._colorSpace))", "title": "" }, { "docid": "e99576d57f903b3db3871f2b8181895f", "score": "0.6247798", "text": "def orientation(self):\r\n tag=self.readinfo('Image Orientation Patient')\r\n \r\n if tag==None:\r\n name=None\r\n elif tag==[-0,1,0,-0,-0,-1]:\r\n name=1 #Sagittal\r\n elif tag==[-1,-0,0,-0,-1,0]:\r\n name=2 #Axial\r\n elif tag==[1,0,0,0,0,-1]:\r\n name=3 #Coronal\r\n else:\r\n name=4 #Oblique\r\n self.orient=name\r\n return", "title": "" }, { "docid": "3b231c9456d49e10a3a13079977ae7fc", "score": "0.62325877", "text": "def rotate(center, img, degrees):\n img_out = cv2.getRotationMatrix2D(center,degrees,1)\n return cv2.warpAffine(img,img_out,(32,32))", "title": "" }, { "docid": "ec67af3c7aee0ec9d89a6f4cb432ccf9", "score": "0.623251", "text": "def orientation_matrix(euler_angle):\n\n # Convert from degrees to radians\n phi1 = np.deg2rad(euler_angle[0])\n Phi = np.deg2rad(euler_angle[1])\n phi2 = np.deg2rad(euler_angle[2])\n\n # Assemble orientation matrix\n M = np.zeros([3, 3])\n M[0,0] = cos(phi1)*cos(phi2) - sin(phi1)*sin(phi2)*cos(Phi)\n M[0,1] = sin(phi1)*cos(phi2) + cos(phi1)*sin(phi2)*cos(Phi)\n M[0,2] = sin(phi2)*sin(Phi)\n M[1,0] = -cos(phi1)*sin(phi2) - sin(phi1)*cos(phi2)*cos(Phi)\n M[1,1] = -sin(phi1)*sin(phi2) + cos(phi1)*cos(phi2)*cos(Phi)\n M[1,2] = cos(phi2)*sin(Phi)\n M[2,0] = sin(phi1)*sin(Phi)\n M[2,1] = -cos(phi1)*sin(Phi)\n M[2,2] = cos(Phi)\n return M", "title": "" }, { "docid": "b5bde7fea465202de6123094efebda9d", "score": "0.6229717", "text": "def getRotation(self, *args):\n return _coin.SbCylinderPlaneProjector_getRotation(self, *args)", "title": "" }, { "docid": "55acec0047ee935ede14a86515a73bd9", "score": "0.62164885", "text": "def _rotate_mirror_do(im):\n mirrs = []\n mirrs.append(np.array(im))\n mirrs.append(np.rot90(np.array(im), axes=(0, 1), k=1))\n mirrs.append(np.rot90(np.array(im), axes=(0, 1), k=2))\n mirrs.append(np.rot90(np.array(im), axes=(0, 1), k=3))\n im = np.array(im)[:, ::-1]\n mirrs.append(np.array(im))\n mirrs.append(np.rot90(np.array(im), axes=(0, 1), k=1))\n mirrs.append(np.rot90(np.array(im), axes=(0, 1), k=2))\n mirrs.append(np.rot90(np.array(im), axes=(0, 1), k=3))\n return mirrs", "title": "" }, { "docid": "85e7962e3f5a626e42e01f52caf4f675", "score": "0.6207008", "text": "def getOrientation(matrix=None,errorValue=(0,0,0)):\n if matrix==None:\n matrix=getRotationMatrix()\n if matrix==None:\n return errorValue \n yaw=atan2(matrix[0][1], matrix[1][1])\n pitch=asin(-matrix[2][1])\n roll=atan2(-matrix[2][0], matrix[2][2])\n return yaw,pitch,roll", "title": "" }, { "docid": "1ef312814b5fc8d436d3a88cfa4ef038", "score": "0.6205779", "text": "def getRotation(self, *args):\n return _coin.SbSpherePlaneProjector_getRotation(self, *args)", "title": "" }, { "docid": "6207ad2e4a53b82c013ca021ecfeb829", "score": "0.62022203", "text": "def handle_exif_rotation(image: Image.Image) -> Image.Image:\n\n def get_key_by_value(dictionary: Mapping[int, str], value: str) -> int:\n for k, v in dictionary.items():\n if v == value:\n return k\n raise ValueError(f\"No such value {value}.\")\n\n try:\n orientation = get_key_by_value(ExifTags.TAGS, \"Orientation\")\n exif = dict(image.getexif().items())\n if exif[orientation] == 3:\n image = image.transpose(Image.ROTATE_180)\n elif exif[orientation] == 6:\n image = image.transpose(Image.ROTATE_270)\n elif exif[orientation] == 8:\n image = image.transpose(Image.ROTATE_90)\n return image\n except (AttributeError, KeyError, IndexError, ValueError):\n return image", "title": "" }, { "docid": "33c35535107960314178d5ec0ed72822", "score": "0.62001145", "text": "def rotation(self,n,angles=False):\n return np.eye(3)", "title": "" }, { "docid": "ce39b6c0cba340fb117c8f4ee1287a12", "score": "0.6194234", "text": "def image(self):\n return pygame.transform.rotate(\n self._image,\n math.degrees(math.atan(self.slope)),\n )", "title": "" }, { "docid": "eb36e88ef46f6a3349c7a011c3b6152f", "score": "0.61908334", "text": "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "title": "" }, { "docid": "90e36b433fa0350e0cdd24b1b46798e4", "score": "0.61864245", "text": "def coor_rot90(curr_image_shape, prev_coor):\n coor_rotated = np.zeros_like(prev_coor)\n coor_rotated[:, 0] = prev_coor[:, 1] # new_x = old_y\n coor_rotated[:, 1] = curr_image_shape[0] - prev_coor[:, 0] # new_y = new_height - old_x\n return coor_rotated", "title": "" }, { "docid": "6ca3146307959c65d448051e8323b68f", "score": "0.61852664", "text": "def rotate(self, angle):\n img = self.image.copy()\n h, w = img.shape[:2]\n\n r = np.radians(angle)\n s = np.abs(np.sin(r))\n c = np.abs(np.cos(r))\n\n # Compute image size after rotation.\n nw = int(c*w + s*h)\n nh = int(s*w + c*h)\n\n # Compute affine matrix and apply to image.\n center = (w/2, h/2)\n rot_m = cv2.getRotationMatrix2D(center, angle, 1.0)\n rot_m[0][2] = rot_m[0][2] + (nw - w) // 2\n rot_m[1][2] = rot_m[1][2] + (nh - h) // 2\n img = cv2.warpAffine(img, rot_m, (nw, nh), flags=cv2.INTER_CUBIC)\n\n new_boxes = []\n for box in self.boxes:\n coord_arr = np.array([\n [box.x, box.y, 1], # Left-Top\n [box.x, box.y+box.h, 1], # Left-Bottom\n [box.x+box.w, box.y, 1], # Right-Top\n [box.x+box.w, box.y+box.h, 1], # Right-Botto\n ])\n new_coord = rot_m.dot(coord_arr.T)\n x_ls = new_coord[0]\n y_ls = new_coord[1]\n x = int(min(x_ls))\n y = int(min(y_ls))\n w = int(max(x_ls) - x)\n h = int(max(y_ls) - y)\n new_box = Box(box.tag, nw, nh, x, y, w, h)\n new_boxes.append(new_box)\n\n new_ant = Annotation(self.filename, img, new_boxes)\n new_ant.color_map = self.color_map\n return new_ant", "title": "" }, { "docid": "5f5e1a2d735353b4b17118e79653abd6", "score": "0.61849636", "text": "def getRotZ(angle):\n\tc, s = math.cos(angle), math.sin(angle)\n\treturn Matrix3((c, s, 0), (-s, c, 0), (0, 0, 1))", "title": "" }, { "docid": "ce1da0440831daf7c7b41611e31da379", "score": "0.6179453", "text": "def rotationMatrix(self, R):\n\n self.__rotationMatrix = R", "title": "" }, { "docid": "6e9449404ad199d128bfa78dfffc4229", "score": "0.6178267", "text": "def getRotation(self, *args):\n return _coin.SbSphereProjector_getRotation(self, *args)", "title": "" }, { "docid": "cd957d020b181dd84c387446e36505da", "score": "0.61752295", "text": "def gRot(self):\n return self.TM[0:3, 0:3].copy()", "title": "" }, { "docid": "26601a75b6065d2f54c728fcad7d5657", "score": "0.6167346", "text": "def getRotation(self, *args):\n return _coin.SbCylinderProjector_getRotation(self, *args)", "title": "" }, { "docid": "48f0d3dced7478cc709b96ee9dda3686", "score": "0.61669904", "text": "def irotate(rotation, initial=np.identity(3)):\n a = np.dot(initial, rotation)\n cx, sx, rx = givens(a[2, 2], a[1, 2])\n cy, sy, ry = givens(rx, a[0, 2])\n cz, sz, rz = givens(cx * a[1, 1] - sx * a[2, 1],\n cy * a[0, 1] - sy * (sx * a[1, 1] + cx * a[2, 1]))\n x = degrees(atan2(sx, cx))\n y = degrees(atan2(-sy, cy))\n z = degrees(atan2(sz, cz))\n return x, y, z", "title": "" }, { "docid": "be10c3cec3028651ad82e59e4f6bc668", "score": "0.61587584", "text": "def rotation_matrix(a):\n R = np.eye(4)\n R[:3, :3] = linalg.expm([[0, -a[2], a[1]], [a[2], 0, -a[0]],[-a[1], a[0], 0]])\n return R", "title": "" }, { "docid": "b1a057870affb9c34f0829d59977dccb", "score": "0.6157489", "text": "def getRotationMatrix2D(center, angle, scale) -> retval:\n ...", "title": "" }, { "docid": "e341a0fe0d3540b934a7a08011d38401", "score": "0.61530095", "text": "def create_rotation_matrix(self, angle):\n angle = math.radians(angle)\n array = np.array([[1, 0, 0],\n [0, math.cos(angle), -math.sin(angle)],\n [0, math.sin(angle), math.cos(angle)]])\n array = np.array([[math.cos(angle), -math.sin(angle), 0],\n [math.sin(angle), math.cos(angle), 0],\n [0, 0, 1]])\n return array", "title": "" }, { "docid": "e94e9ae896ba726d918159cd37a3eda7", "score": "0.6146239", "text": "def _Rotate(self, image, transform):\n degrees = transform.rotate()\n if degrees < 0 or degrees % 90 != 0:\n raise apiproxy_errors.ApplicationError(\n images_service_pb.ImagesServiceError.BAD_TRANSFORM_DATA)\n degrees %= 360\n\n degrees = 360 - degrees\n return image.rotate(degrees)", "title": "" }, { "docid": "6b81df7c671b49c23b144e8d9f368651", "score": "0.61447", "text": "def rotate(self, angle):\n rotmat = rotation_matrix_2d(angle)\n rotated = np.dot(rotmat.T, [self.pix_x.value, self.pix_y.value])\n self.pix_x = rotated[0] * self.pix_x.unit\n self.pix_y = rotated[1] * self.pix_x.unit\n self.pix_rotation -= Angle(angle)\n self.cam_rotation -= Angle(angle)", "title": "" }, { "docid": "2b0b4ff61e9021280844fd1b0fdd0c97", "score": "0.61444294", "text": "def rotation_matrix(a):\n R = np.eye(4)\n R[:3, :3] = linalg.expm([[0, -a[2], a[1]], [a[2], 0, -a[0]], [-a[1], a[0], 0]])\n return R", "title": "" }, { "docid": "fb7e407e075aa6ef0f6f6e947134e19a", "score": "0.6131844", "text": "def rotation_matrix(w, is_numpy=False):\n w = np.array([np.deg2rad(i) for i in w])\n w = torch.from_numpy(w).to(dtype = torch.float)\n\n theta1, theta2, theta3 = w[0], w[1], w[2]\n\n zero = theta1.detach()*0\n one = zero.clone()+1\n\n cosx, sinx, cosy, siny, cosz, sinz = theta1.cos(), theta1.sin(), theta2.cos(), theta2.sin(), theta3.cos(), theta3.sin()\n\n r_x = torch.stack([one, zero, zero,\n zero, cosx, sinx,\n zero, -sinx, cosx]).view( 3, 3)\n\n r_y = torch.stack([cosy, zero, siny,\n zero, one, zero,\n -siny, zero, cosy]).view( 3, 3)\n\n r_z = torch.stack([cosz, -sinz, zero,\n sinz, cosz, zero,\n zero, zero, one]).view( 3, 3)\n\n R = r_x @ r_y @ r_z\n\n if is_numpy:\n R = R.numpy()\n return R", "title": "" }, { "docid": "f1d0eee6b7af1be8635332237a79c966", "score": "0.6101358", "text": "def rot_matrix(angle):\n\n mat = np.array([[np.cos(angle), -np.sin(angle)],\n [np.sin(angle), np.cos(angle)]])\n return mat", "title": "" }, { "docid": "65bdc6ee7cd9073177ec6ed58414a301", "score": "0.60992867", "text": "def rotation_matrix(self, other):\n from sympy.vector.functions import _path\n if not isinstance(other, CoordSys3D):\n raise TypeError(str(other) +\n \" is not a CoordSys3D\")\n # Handle special cases\n if other == self:\n return eye(3)\n elif other == self._parent:\n return self._parent_rotation_matrix\n elif other._parent == self:\n return other._parent_rotation_matrix.T\n # Else, use tree to calculate position\n rootindex, path = _path(self, other)\n result = eye(3)\n i = -1\n for i in range(rootindex):\n result *= path[i]._parent_rotation_matrix\n i += 2\n while i < len(path):\n result *= path[i]._parent_rotation_matrix.T\n i += 1\n return result", "title": "" }, { "docid": "58b368b97b9a57516f3156bdf2a7c4de", "score": "0.60900974", "text": "def rotationMatrix_RzRyRz(self):\n\n R = Compute3DRotationMatrix_RzRyRz(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "title": "" }, { "docid": "9821c22557111009f1c9e03016a48a2e", "score": "0.6079591", "text": "def _rotation_matrix_uniaxial(theta,phi, R):\n costheta = cos(theta)\n sintheta = sin(theta)\n cosphi = cos(phi)\n sinphi = sin(phi)\n \n R[0,0] = costheta * cosphi\n R[0,1] = - sinphi \n R[0,2] = cosphi * sintheta\n R[1,0] = costheta * sinphi \n R[1,1] = cosphi\n R[1,2] = sintheta * sinphi\n R[2,0] = -sintheta\n R[2,1] = 0.\n R[2,2] = costheta", "title": "" }, { "docid": "8ec588509ab17fe490fe67eda83e5a6c", "score": "0.6077583", "text": "def detector_rotation_matrix(tilt_x, tilt_y, tilt_z):\n r1 = np.array([[np.cos(tilt_z), -np.sin(tilt_z), 0], # note this is r.h.\n [np.sin(tilt_z), np.cos(tilt_z), 0],\n [0, 0, 1]], np.float)\n r2 = np.array([[np.cos(tilt_y), 0, np.sin(tilt_y)],\n [0, 1, 0],\n [-np.sin(tilt_y), 0, np.cos(tilt_y)]], np.float)\n r3 = np.array([[1, 0, 0],\n [0, np.cos(tilt_x), -np.sin(tilt_x)],\n [0, np.sin(tilt_x), np.cos(tilt_x)]], np.float)\n r2r1 = np.dot(np.dot(r3, r2), r1)\n return r2r1", "title": "" }, { "docid": "64651827c3e736d89e2a8e6fac0a152d", "score": "0.60745907", "text": "def rotation_matrix(gamma,beta,alpha):\n\n alpha = np.deg2rad(alpha)\n beta = np.deg2rad(beta)\n gamma = np.deg2rad(gamma)\n\n R = np.array([[np.cos(alpha)*np.cos(beta), np.cos(alpha)*np.sin(beta)*np.sin(gamma) - np.sin(alpha)*np.cos(gamma), np.cos(alpha)*np.sin(beta)*np.cos(gamma) + np.sin(alpha)*np.sin(gamma)], \n [np.sin(alpha)*np.cos(beta), np.sin(alpha)*np.sin(beta)*np.sin(gamma) + np.cos(alpha)*np.cos(gamma), np.sin(alpha)*np.sin(beta)*np.cos(gamma) - np.cos(alpha)*np.sin(gamma)], \n [-np.sin(beta), np.cos(beta)*np.sin(gamma), np.cos(beta)*np.cos(gamma)]])\n\n \"\"\"Old rotation matrix:\n R = np.array([[np.cos(alpha)*np.cos(beta), np.cos(alpha)*np.sin(alpha)*np.sin(gamma) - np.sin(alpha)*np.cos(gamma), np.cos(alpha)*np.sin(beta)*np.cos(gamma) + np.sin(alpha)*np.sin(gamma)], \n [np.sin(alpha)*np.cos(beta), np.sin(alpha)*np.sin(beta)*np.sin(gamma) + np.cos(alpha)*np.cos(gamma), np.sin(alpha)*np.sin(beta)*np.cos(gamma) - np.cos(alpha)*np.sin(gamma)], \n [-np.sin(beta), np.cos(beta)*np.sin(gamma), np.cos(beta)*np.cos(gamma)]])\n \"\"\"\n return R", "title": "" } ]
c475eb5c714dda23a8b0070cdd4abed4
Test getting contract ABI by invalid address
[ { "docid": "c505aa8a0c7dd1373097fb10f11df59a", "score": "0.75141275", "text": "def test_get_wrong_contract_abi(self):\n response = _get_contracts_abi_sync({\"wrong\": \"0x0\"})\n self.assertSequenceEqual(response, {\"wrong\": []})", "title": "" } ]
[ { "docid": "1258b3b3c659b69492be70df72559956", "score": "0.7237029", "text": "def test_get_contract_abi(self):\n response = _get_contracts_abi_sync({1: TEST_CONTRACT_ADDRESS})\n self.assertSequenceEqual(response, {1: TEST_CONTRACT_ABI})", "title": "" }, { "docid": "1f99a05372ffdf0e888c813ea6e5e9a5", "score": "0.6720482", "text": "def test_validate_address():\n account = FetchAICrypto()\n assert FetchAIApi.is_valid_address(account.address)\n assert not FetchAIApi.is_valid_address(account.address + \"wrong\")", "title": "" }, { "docid": "fc3a5259730cf2b822ae88f6e34bd43e", "score": "0.6447307", "text": "def test_missing_address(self):\n # type: ()->None\n adapter = NetworkAdapter({\n 'addrFam': 'inet',\n 'name': 'eth0',\n 'source': 'tunnel'\n })\n self.assertRaises(ValueError, adapter.validateAll)", "title": "" }, { "docid": "87396b2b64fa6184fafd43545776acdf", "score": "0.628779", "text": "def test_invalid(self):\n\t\t\ti = _Iface({\n\t\t\t\t'address': '2.3.4.5',\n\t\t\t\t'netmask': '42',\n\t\t\t\t'ipv6/default/address': '1:2:3:4:5:6:7:8',\n\t\t\t\t'ipv6/default/prefix': '4711',\n\t\t\t\t})\n\t\t\tself.assertEqual(False, i.ipv4_address())\n\t\t\tself.assertEqual(False, i.ipv6_address())", "title": "" }, { "docid": "6ade7e714503b73b719eddb30792c857", "score": "0.6287487", "text": "def test_importing_abi_bytecode(self):\n abi, bytecode = get_abi_bytecode(CONTRACT_NAME)\n self.assertTrue(isinstance(abi, list), \n \"Expected type str, received %s\" % type(abi))\n self.assertTrue(isinstance(bytecode, str), \n \"Expected type str, received %s\" % type(bytecode))\n print(\"\\ntest_importing_abi_bytecode: 2 assertion passed\")", "title": "" }, { "docid": "cc54d17db4bcaf3ea1868b5a3a15048a", "score": "0.6230157", "text": "def test_get_contracts_abi(self):\n addresses = [\"address\" + str(i) for i in range(100)]\n chunks = [[(0, \"address1\")], [(1, \"address2\")]]\n abis = [{1: \"abi2\"}, {0: \"abi1\"}]\n\n self.contracts._split_on_chunks = MagicMock(return_value=chunks)\n self.contracts.pool.map = MagicMock(return_value=abis)\n\n response = self.contracts._get_contracts_abi(addresses)\n\n self.contracts._split_on_chunks.assert_called_with(\n [(index, address) for index, address in enumerate(addresses)], INPUT_PARSING_PROCESSES)\n self.contracts.pool.map.assert_called_with(contracts._get_contracts_abi_sync, [dict(chunk) for chunk in chunks])\n self.assertSequenceEqual([\"abi1\", \"abi2\"], response)", "title": "" }, { "docid": "99e9ac9f547862ae12cab6f3c1f9b407", "score": "0.61915874", "text": "def get_contract_abi(chain_id, contract_address) -> dict:\n api_key = get_etherscan_api_key()\n ChainEtherscanContract = ChainEtherscanContractFactory.create(chain_id)\n api = ChainEtherscanContract(address=contract_address, api_key=api_key)\n json_abi = api.get_abi()\n abi = json.loads(json_abi)\n return abi", "title": "" }, { "docid": "477057950fe3833f78d3e9677594bef7", "score": "0.61894023", "text": "def verify_address(self, address: str) -> data.AddressValidation:", "title": "" }, { "docid": "8b5c9a62b57adea7924f4d9c69b19d84", "score": "0.61666274", "text": "def test_get_address_info_no_token_scenario():\n df = ethplorer_model.get_address_info(\n address=\"0xb274827BCbB6c06527DDe24c1BC7147715b49415\",\n )\n assert isinstance(df, pd.DataFrame)\n assert not df.empty", "title": "" }, { "docid": "7981cf5e541c7c26d049cfcac700e7e9", "score": "0.6166214", "text": "def test_no_codes(self):\n addressbase = AddressBaseGeocoderAdapter('AA11AA')\n with self.assertRaises(CodesNotFoundException):\n result = addressbase.geocode()\n\n # point only geocode should return a result anyway\n result = addressbase.geocode_point_only()\n self.assertEqual('addressbase', result['source'])", "title": "" }, { "docid": "4a3be8aa7df8d8d6c7c4bb67b6f2afeb", "score": "0.61391056", "text": "def test_portfolio_non_checksum_address(self):\n path_params = {\"address\": self.address.lower()}\n url = self.app.url_path_for(\"portfolio\", **path_params)\n m_contract = mock.Mock()\n m_contract().functions.balanceOf().call.return_value = 0\n m_execute = mock.Mock(\n side_effect=(\n GQL_ETH_PRICE_RESPONSE,\n GQL_LIQUIDITY_POSITIONS_RESPONSE,\n GQL_MINTS_BURNS_TX_RESPONSE,\n GQL_PAIR_INFO_RESPONSE,\n )\n )\n with patch_web3_contract(m_contract), patch_client_execute(\n m_execute\n ), patch_session_fetch_schema():\n response = self.client.get(url)\n assert response.status_code == status.HTTP_200_OK", "title": "" }, { "docid": "24a50996d5c66854b60b844cb7382d34", "score": "0.6108441", "text": "def does_address_exist(self, parameters):", "title": "" }, { "docid": "dbed4b27f91f639f822fd86dfd5b340e", "score": "0.61066467", "text": "def test_get_contract_address(mock_api_call):\n\n mock_res = [\n {\n \"code_id\": 999,\n \"creator\": \"cosmos_creator_address\",\n \"label\": \"SOME_LABEL\",\n \"address\": \"cosmos_contract_address\",\n }\n ]\n\n mock_api_call.return_value = json.dumps(mock_res).encode(\"ascii\")\n\n cosmos_api = FetchAIApi(**FETCHAI_TESTNET_CONFIG)\n\n res = cosmos_api.get_last_contract_address(code_id=999)\n assert res == mock_res[-1][\"address\"]", "title": "" }, { "docid": "d69fdc299b0884232f15d74196401267", "score": "0.6072128", "text": "def test_valid(self):\n addressbase = AddressBaseGeocoderAdapter('bb 1 1B B') # intentionally spurious whitespace and case\n result = addressbase.geocode()\n self.assertEqual('addressbase', result['source'])\n self.assertEqual('B01000001', result['council_gss'])", "title": "" }, { "docid": "4b1f3df6a3b5b8cd3122950a5a8907f8", "score": "0.60308325", "text": "def test_get_multiple_contracts_abi(self):\n response = _get_contracts_abi_sync({1: TEST_CONTRACT_ADDRESS, 2: TEST_CONTRACT_ADDRESS})\n self.assertSequenceEqual(response, {1: TEST_CONTRACT_ABI, 2: TEST_CONTRACT_ABI})", "title": "" }, { "docid": "56d15fa7da6d761f4e9a52f90435cc40", "score": "0.6013012", "text": "def test_incomplete_addr(self):\n\t\t\ti = _Iface({\n\t\t\t\t'address': '2.3.4.5',\n\t\t\t\t'ipv6/default/address': '1:2:3:4:5:6:7:8',\n\t\t\t\t})\n\t\t\tself.assertEqual(None, i.ipv4_address())\n\t\t\tself.assertEqual(None, i.ipv6_address())", "title": "" }, { "docid": "ce678c8d35351af57f22397b7b02ce88", "score": "0.6004781", "text": "def testInvalidAddress(self):\n result = client.get('/maps/geocode/search')\n self.assertEqual(result['error'], 'parameterMissing')", "title": "" }, { "docid": "c3e7d210c43557ef6e90988408727b1e", "score": "0.5878379", "text": "def test_load_contract_interface():\n path = Path(ROOT_DIR, \"tests\", \"data\", \"dummy_contract\", \"build\", \"some.wasm\")\n result = FetchAIApi.load_contract_interface(path)\n assert \"wasm_byte_code\" in result", "title": "" }, { "docid": "59901b16c5ed7f59dc0233c36f147b50", "score": "0.5853255", "text": "def test_empty_address_input(self):\n with app.app_context():\n with self.assertRaises(EmptyAddressException):\n fetch_address_info('')\n with self.assertRaises(EmptyAddressException):\n fetch_address_info(None)", "title": "" }, { "docid": "60e0dfaf9c3c1f4b616db99faab21edb", "score": "0.5816671", "text": "def test_save_contracts_abi(self):\n test_contracts = [{\"blockNumber\": i, 'address': TEST_CONTRACT_ADDRESS, 'id': i + 1} for i in range(10)]\n self.client.bulk_index(TEST_CONTRACTS_INDEX, test_contracts)\n self.contracts.save_contracts_abi()\n contracts = self.client.search(index=TEST_CONTRACTS_ABI_INDEX, query=\"WHERE abi IS NOT NULL\", fields=[\"abi\"])\n abis = [json.loads(contract[\"_source\"][\"abi\"]) for contract in contracts]\n self.assertCountEqual(abis, [TEST_CONTRACT_ABI] * 10)", "title": "" }, { "docid": "df5d6b019c017806a6839ae6bc6654dd", "score": "0.5814186", "text": "def test_address_vectors(self):\n\n for c in self.cases:\n # Checksummed should match directly in base 58\n base58 = self.encode(c.checksummed, 0)\n self.assertEqual(base58, c.base58)\n # Decode it and make sure it matches checksummed again\n decoded = self.decode(c.base58, 0)\n self.assertEqual(decoded, utf8(c.checksummed))\n\n # Compute the checksum in the call\n base58 = self.encode(c.ripemd_network, self.FLAG_CHECKSUM)\n self.assertEqual(base58, c.base58)\n\n # Decode without checksum validation/stripping, should match\n # checksummed value\n decoded = self.decode(c.base58, 0)\n self.assertEqual(decoded, utf8(c.checksummed))\n\n # Decode with checksum validation/stripping and compare\n # to original ripemd + network\n decoded = self.decode(c.base58, self.FLAG_CHECKSUM)\n self.assertEqual(decoded, utf8(c.ripemd_network))", "title": "" }, { "docid": "9f0359f183bbcc6125642fe7798c0738", "score": "0.5813725", "text": "def test_deploy_contract(self):\n self.contract = EthContract(NODE_ADDRESS, already_deployed=False, provider='ipc')\n abi, bytecode = get_abi_bytecode(CONTRACT_NAME)\n address = self.contract.deploy(abi=abi, bytecode=bytecode)\n self.assertTrue(address, \n \"Expected address to be returned, but nothig was returned\")\n self.assertNotEqual(self.contract._contract, None,\n \"Expected the contract to have a _contract, however _contract was type None\")\n self.assertNotEqual(self.contract._functions, None,\n \"Expected the contract to have a _functions, however _functions was type None\")\n print(\"\\ntest_deploy_contract: 3 assertion passed\")", "title": "" }, { "docid": "8bc3c271fecad4b6f983b1f7fb9b09a8", "score": "0.5812862", "text": "def test_iterate_contracts_without_abi(self):\n self.contracts = self.contracts_class(\n self.indices,\n parity_hosts=[(0, 8, \"http://localhost:8545\")]\n )\n self.contracts._get_max_block = MagicMock(return_value=2)\n self.add_contracts_with_and_without_abi()\n contracts = [c for c in self.contracts._iterate_contracts_without_abi()]\n contracts = [c[\"_id\"] for contracts_list in contracts for c in contracts_list]\n self.assertCountEqual(contracts, [str(i) for i in range(1, 9)])", "title": "" }, { "docid": "9cca3dfc596394ca25867678a2faf235", "score": "0.58028615", "text": "def test_get_address_from_public_key():\n fet_crypto = FetchAICrypto()\n address = FetchAIApi.get_address_from_public_key(fet_crypto.public_key)\n assert address == fet_crypto.address, \"The address must be the same.\"", "title": "" }, { "docid": "cfc3571e1bc28045f8c3d7ec6526d1d2", "score": "0.57993376", "text": "def get_asset_address(asset: str) -> bytearray:\r\n if asset.upper() == 'ONYX':\r\n contract_address = ONT_CONTRACT_ADDRESS\r\n elif asset.upper() == 'OXG':\r\n contract_address = ONG_CONTRACT_ADDRESS\r\n else:\r\n raise ValueError(\"asset is not equal to ONT or ONG\")\r\n return contract_address # [20]byte\r", "title": "" }, { "docid": "d4f8889275c20bd432b4a33f4db5796a", "score": "0.57930195", "text": "def get_address(self, address: str) -> data.Address:", "title": "" }, { "docid": "376512337a0649ad2b1b8a3466abb22d", "score": "0.57872933", "text": "def get(self, request, address, format=None):\n if not Web3.isChecksumAddress(address):\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data='Invalid ethereum address')\n\n response = super().get(request, address)\n if response.data['count'] == 0:\n response.status_code = status.HTTP_404_NOT_FOUND\n\n return response", "title": "" }, { "docid": "376512337a0649ad2b1b8a3466abb22d", "score": "0.57872933", "text": "def get(self, request, address, format=None):\n if not Web3.isChecksumAddress(address):\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY, data='Invalid ethereum address')\n\n response = super().get(request, address)\n if response.data['count'] == 0:\n response.status_code = status.HTTP_404_NOT_FOUND\n\n return response", "title": "" }, { "docid": "7fac42c2d067bfb468f4c01aaf719216", "score": "0.57688916", "text": "def test_child_address_check(self):\n child_address = utils.mk_contract_address(self.c.address, 1)\n self.c.mint(1)\n self.assertLess(0, len(self.s.head_state.get_code(child_address)))\n with self.assertRaises(TransactionFailed):\n self.s.tx(\n sender=self.t.k0,\n to=child_address,\n value=0,\n data=b'',\n startgas=10 ** 20)", "title": "" }, { "docid": "dd7c976bdbda0e853c5e1a37f7011e76", "score": "0.576561", "text": "def test_build_address_has_zip():\n from ..views.default import build_address\n assert '99920' in build_address('a', 'b', 'c', '99920')", "title": "" }, { "docid": "c38141328b37b04d613b8bd55e64d418", "score": "0.57597023", "text": "def test_incomplete_net(self):\n\t\t\ti = _Iface({\n\t\t\t\t'netmask': '255.255.255.0',\n\t\t\t\t'ipv6/default/prefix': '64',\n\t\t\t\t})\n\t\t\tself.assertEqual(None, i.ipv4_address())\n\t\t\tself.assertEqual(None, i.ipv6_address())", "title": "" }, { "docid": "ae1167bb01e22573418045913fff674e", "score": "0.57353884", "text": "def test_incomplete_addr(self):\n\t\t\tt = Interfaces(ucr={\n\t\t\t\t'interfaces/eth0/address': '2.3.4.5',\n\t\t\t\t'interfaces/eth0/ipv6/default/address': '1:2:3:4:5:6:7:8',\n\t\t\t\t})\n\t\t\tself.assertEqual([],\n\t\t\t\t\t[s.name for _n, s in t.ipv4_interfaces])\n\t\t\tself.assertEqual([],\n\t\t\t\t\t[s.name for s, _n in t.ipv6_interfaces])\n\t\t\tself.assertEqual(None,\n\t\t\t\t\tt.get_default_ip_address())\n\t\t\tself.assertEqual(None,\n\t\t\t\t\tt.get_default_ipv4_address())\n\t\t\tself.assertEqual(None,\n\t\t\t\t\tt.get_default_ipv6_address())", "title": "" }, { "docid": "93c4a53ce3cd0fed4f3fa2f289550f64", "score": "0.5726956", "text": "def test_portfolio_invalid_address(self):\n path_params = {\"address\": \"0xInvalidAdress\"}\n url = self.app.url_path_for(\"portfolio\", **path_params)\n response = self.client.get(url)\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert response.json() == {\"detail\": \"Invalid address 0xInvalidAdress\"}", "title": "" }, { "docid": "9bba37bc0ef895521b6262ee698aac9c", "score": "0.57206047", "text": "def assert_o_address(value, error_msg):\n assertions.assertEqual(dut.o_address.value.binstr, value, error_msg)", "title": "" }, { "docid": "b198c6faea9576cf24bf1e859a2ad5dc", "score": "0.572054", "text": "def test_020_address_validation_error(self):\n address_validation_type = AddressValidation.address_request_type(\n FirmName='John Doe',\n Zip5=\"06371\"\n )\n\n self.assertRaises(\n USPSInvalidAddress, self.address_validation.request,\n address_validation_type\n )", "title": "" }, { "docid": "e006654051c9bdd21032a24b30afa96d", "score": "0.569328", "text": "def test_invalid_business_address(session, test_name, filing):\n filing['filing']['changeOfRegistration']['offices']['businessOffice']['deliveryAddress']['addressRegion'] = \\\n 'invalid'\n filing['filing']['changeOfRegistration']['offices']['businessOffice']['deliveryAddress']['addressCountry'] = \\\n 'invalid'\n\n nr_res = copy.deepcopy(nr_response)\n nr_res['legalType'] = filing['filing']['changeOfRegistration']['nameRequest']['legalType']\n with patch.object(NameXService, 'query_nr_number', return_value=MockResponse(nr_res)):\n with patch.object(NaicsService, 'find_by_code', return_value=naics_response):\n err = validate(filing)\n\n assert err\n assert err.msg[0]['error'] == \"Address Region must be 'BC'.\"\n assert err.msg[1]['error'] == \"Address Country must be 'CA'.\"", "title": "" }, { "docid": "71940bd813f268820c08f6c8a4a9fd90", "score": "0.56759715", "text": "def test_check_address_bounds(self):\n self.uut_64 = BindingState(True)\n\n # TODO test_check_address_bounds: Why does this raise an exception?\n # address and seg_end address are not 0 when entering the function\n # self.uut_64.address = 0\n # self.uut_64.seg_end_address = 0\n # self.uut_64.check_address_bounds()\n\n self.uut_64 = BindingState(True)\n self.uut_64.address = 0\n self.uut_64.seg_end_address = 1\n self.uut_64.check_address_bounds()\n\n self.uut_64 = BindingState(True)\n self.uut_64.address = 10000\n self.uut_64.seg_end_address = 10\n with self.assertRaises(CLEInvalidBinaryError):\n self.uut_64.check_address_bounds()\n\n self.uut_64 = BindingState(True)\n self.uut_64.address = -10000\n self.uut_64.seg_end_address = -100000\n with self.assertRaises(CLEInvalidBinaryError):\n self.uut_64.check_address_bounds()", "title": "" }, { "docid": "9576e08302c53d7a376f026735df4663", "score": "0.566798", "text": "def createcontract_simple_test(self):\n ret = self.node.createcontract(\"6060604052341561000c57fe5b5b33600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b5b6101c88061005f6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806313af4035146100515780635e01eb5a1461007f578063893d20e8146100d1575bfe5b61007d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610123565b005b341561008757fe5b61008f610168565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156100d957fe5b6100e1610171565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b33600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b60003390505b90565b6000600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505b905600a165627a7a7230582050b454be91cd91e08099814fc2192c5fded81b632733f6c808e4d75e85a766a50029\", 1000000)\n assert('txid' in ret)\n assert('sender' in ret)\n assert('hash160' in ret)\n contract_address = ret['address']\n sender = str(base58_to_byte(ret['sender'], 25)[1])[2:-1]\n self.node.generate(1)\n \n ret = self.node.getaccountinfo(contract_address)\n expected_account_info = {\n \"address\": contract_address,\n \"balance\": 0,\n \"storage\": {\n \"290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563\": {\n \"0000000000000000000000000000000000000000000000000000000000000000\": \"000000000000000000000000\" + sender\n }\n },\n \"code\": \"60606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806313af4035146100515780635e01eb5a1461007f578063893d20e8146100d1575bfe5b61007d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610123565b005b341561008757fe5b61008f610168565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156100d957fe5b6100e1610171565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b33600060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b60003390505b90565b6000600060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1690505b905600a165627a7a7230582050b454be91cd91e08099814fc2192c5fded81b632733f6c808e4d75e85a766a50029\"\n }\n assert_equal(ret, expected_account_info)", "title": "" }, { "docid": "724d92da89fb8d8edbfa7aa1f93f50e3", "score": "0.56499785", "text": "def test_invalid_address_memory_read(self, tdev):\n INVALID_ADDRESS = 0xFFFFFFFF\n NUM_BYTES = 4\n\n with pytest.raises(tiflash.TIFlashError):\n tiflash.memory_read(INVALID_ADDRESS, NUM_BYTES,\n serno=tdev['serno'],\n connection=tdev['connection'],\n devicetype=tdev['devicetype'])", "title": "" }, { "docid": "2e7246348740866ebf410402437c81ef", "score": "0.56450105", "text": "def test_implementations(self):\n address = None\n for impl in ExternalAddress.implementations:\n if impl == Hostname:\n continue\n obj = impl(False, 5)\n if address is None:\n address = obj.lookup_external_address()\n else:\n lookup = obj.lookup_external_address()\n self.assertEqual(\n address, lookup,\n \"invalid address returned by %s ('%s' != '%s')\" %\n (impl.__name__, address, lookup))", "title": "" }, { "docid": "10989d88b4a6590a1ca70db88454675a", "score": "0.56410205", "text": "def test_address_without_cities_and_postal_code(self):\n response = self.client.get(\"/api/address/?q=1 allée des faneurs\")\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "94a4574ae7cef956a1873a214c4bc38b", "score": "0.56039953", "text": "def get(self, request, address, format=None):\n if not Web3.isChecksumAddress(address):\n return Response(status=status.HTTP_422_UNPROCESSABLE_ENTITY)\n else:\n try:\n SafeContract.objects.get(address=address)\n except SafeContract.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n safe_balances = BalanceServiceProvider().get_balances(address)\n serializer = self.serializer_class(data=safe_balances, many=True)\n serializer.is_valid()\n return Response(status=status.HTTP_200_OK, data=serializer.data)", "title": "" }, { "docid": "b491e1f5fd279f00730b9723ef6182b0", "score": "0.56027097", "text": "def test_clean_address_invalid_ip_address_as_str(self):\n for invalid_str in (\n \"\",\n \"address\",\n \"20018:db8::\",\n \"192.0.2.256\",\n \"192.0.2.0/24\",\n \"2001:db8::/32\",\n ):\n with self.subTest(invalid_str=invalid_str):\n self.assertEqual(\n clean_address(invalid_str),\n None,\n )", "title": "" }, { "docid": "0251cd37aa3f657dfa1d8aec72234436", "score": "0.5576259", "text": "def verify_address(self, address):\n return self.http_get('/wallet/verify/address/' + address).get('Valid')", "title": "" }, { "docid": "1fdff60d2b4da6f6e8c9e6a2c62a6dd8", "score": "0.55520827", "text": "def test_z1w_07(self, err):\n with self.assertRaises(SystemExit):\n faults_inject.main([\"-i\", self.file_in.name, \"-o\", self.file_out.name, \"-w\", \"2\", \"Z1W\", \"abc\"])\n self.assertEqual('Wrong address format : abc\\n', err.getvalue())", "title": "" }, { "docid": "597bd4e90a94490898a0342ebb54fac1", "score": "0.5549618", "text": "def test_net_invalid_interface(patch_net):\n with pytest.raises(AttributeError):\n _ = patch_net(interface=12)", "title": "" }, { "docid": "f832df9aa054784b2f2514a85deb5327", "score": "0.5540262", "text": "def test_get_contracts_vendor_v2(self):\n pass", "title": "" }, { "docid": "bdc5623eedcc1156018b6b40e9946cac", "score": "0.55359125", "text": "def test_get_address_entity(self):\n pass", "title": "" }, { "docid": "02b52317ce5547741093356cd6bbf77c", "score": "0.55305123", "text": "def test_convert_address(self):\n # simple address test, with C/O line\n lines = \"\\n\".join((\"John Smith\", \"C/O Smith Johnson\", \"\",\n \"1234 Everton Road\", \"Baltimore, MD 12345\"))\n address = convert_address(lines)\n self.assertEqual(address.addressee, \"John Smith C/O Smith Johnson\")\n self.assertEqual(address.street, \"1234 Everton Road\")\n self.assertEqual(address.city, \"Baltimore\")\n self.assertEqual(address.state, \"MD\")\n self.assertEqual(address.postal_code, \"12345\")\n\n # street and city/state out of order, with ATTN line and 9-digit\n # postal code\n lines = \"\\n\".join((\"ATTN: Michael Chapin\",\n \"15 35th Street\", \"Seattle, WA 54321-9494\"))\n address = convert_address(lines)\n self.assertEqual(address.addressee, \"Michael Chapin\")\n self.assertEqual(address.street, \"15 35th Street\")\n self.assertEqual(address.city, \"Seattle\")\n self.assertEqual(address.state, \"WA\")\n self.assertEqual(address.postal_code, \"54321-9494\")\n\n # address with no adressee\n lines = \"\\n\".join((\"2010 KALORAMA RD NW\", \"WASHINGTON DC 20009\"))\n address = convert_address(lines)\n self.assertIsNone(address.addressee)\n self.assertEqual(address.street, \"2010 KALORAMA RD NW\")\n self.assertEqual(address.city, \"WASHINGTON\")\n self.assertEqual(address.state, \"DC\")\n self.assertEqual(address.postal_code, \"20009\")", "title": "" }, { "docid": "36fbd05d20ab3623f34131c3575b90ea", "score": "0.5507185", "text": "def test_no_archive() -> None:\n with pytest.raises(ArchiveNotInAvailabilityAPIResponse):\n availability_api = WaybackMachineAvailabilityAPI(\n url=f\"https://{rndstr(30)}.cn\", user_agent=user_agent\n )\n _ = availability_api.archive_url", "title": "" }, { "docid": "e75baf0d86d94ca90f6649a571ff0daf", "score": "0.55046874", "text": "def test_connection(self):\n address = dict(\n Line1 = '1706 Biscayne Blvd',\n Line2 = '',\n City = 'Miami',\n PostalCode = '33137',\n Region = 'Florida',\n Country = 'USA',\n )\n response = self.get('address/validate', address)\n if 'Address' in response:\n return True\n return False", "title": "" }, { "docid": "743473ea431963bc4f18570f50a8e3f5", "score": "0.5488605", "text": "def getCompatibleAddress(program: ghidra.program.model.listing.Program, addr: ghidra.program.model.address.Address, otherProgram: ghidra.program.model.listing.Program) -> ghidra.program.model.address.Address:\n ...", "title": "" }, { "docid": "cc963c64047d350ec00bc491bd0814e0", "score": "0.5467917", "text": "def test_list_address_txs_eth(self):\n pass", "title": "" }, { "docid": "d69824cf9524598646be1f153f310780", "score": "0.5466527", "text": "def test_address_is_not_coordinate(self):\n wrong_samples = [\n '134.854,-25.828', 'E134.854, S25.828', '134.854E, 25.828S',\n '134°51\\'15.88\", -25°49\\'41.1\"',\n '25°49\\'41.1\"S,134°51\\'15.88\"E',\n '2549.67,S, 13451.26,E'\n ]\n with app.app_context():\n for sample in wrong_samples:\n with self.assertRaises(InvalidAddressFormatException, msg=f'Exception not raised for \"{sample}\"'):\n validate_address_is_not_coordinates(sample)", "title": "" }, { "docid": "ae8d2802253a7abe077b3983c3d583ff", "score": "0.54634815", "text": "def test_create_empty_contract(self):\n self.contract = EthContract(NODE_ADDRESS, already_deployed=False, provider='ipc')\n self.assertTrue(isinstance(self.contract, EthContract), \n \"Expected type EthContract, received %s\" % type(self.contract))\n print(\"\\ntest_create_empty_contract: 1 assertion passed\")", "title": "" }, { "docid": "a05d42db496bf1a6a9dbac35a4c26b01", "score": "0.5455791", "text": "def validateAddress(address: t.Any) -> t.Union[str, None]:\n s = re_hex_chars.sub(\"\", str(address))\n if len(s) == 16:\n return s.upper()\n elif len(s) == 14:\n return str(s + OneWireBus.crc8(bytes.fromhex(s)).hex()).upper()\n else:\n return None", "title": "" }, { "docid": "d8dd1f593b4206b55877ac0b85416a40", "score": "0.5454116", "text": "def test_iterate_contracts_without_abi_call_iterate_contracts(self):\n test_iterator = \"iterator\"\n self.contracts._iterate_contracts = MagicMock(return_value=test_iterator)\n\n contracts = self.contracts._iterate_contracts_without_abi()\n\n self.contracts._iterate_contracts.assert_any_call(partial_query=ANY, fields=[\"address\"])\n assert contracts == test_iterator", "title": "" }, { "docid": "fc1cffc92aec8990996d397d8dc6ffd8", "score": "0.54528534", "text": "def test_reverse_lookup(self):\n ipaddr = '8.8.8.8'\n expect = 'google-public-dns-a.google.com'\n address = ExternalAddress(False, 5).lookup_reverse_dns(ipaddr)\n self.assertEqual(\n expect, address,\n \"invalid reverse lookup for ip %s ('%s' != '%s')\" %\n (ipaddr, expect, address))", "title": "" }, { "docid": "836d4d9a74d27bc384d1e5e5331ebbe6", "score": "0.5441245", "text": "def test_deny_address_just_off_edge_of_permitted_network(self):\n self.uut.permit(\"172.16.0.0/12\")\n self.assertEqual(self.uut.evaluate(\"172.32.0.0\"), False)", "title": "" }, { "docid": "4560701217184a8d8405a93e3fc84732", "score": "0.5440904", "text": "def valid_address(address: str):\n if type(address) is not str:\n raise TealInputError(\"An address needs to be a string\")\n\n if len(address) != 58:\n raise TealInputError(\n \"Address length is not correct. Should \"\n + \"be a base 32 string encoded 32 bytes public key + 4 bytes checksum\"\n )\n\n valid_base32(address)", "title": "" }, { "docid": "aaa48c6e15fa5d37d8ba7f67e8ee7676", "score": "0.5438507", "text": "def test_save_contracts_abi_status(self):\n test_contracts = [{\"blockNumber\": i, 'address': TEST_CONTRACT_ADDRESS, 'id': i + 1} for i in range(10)]\n self.client.bulk_index(TEST_CONTRACTS_INDEX, test_contracts)\n self.contracts.save_contracts_abi()\n contracts_count = self.client.count(index=TEST_CONTRACTS_ABI_INDEX, query=\"WHERE abi_extracted = 1\")\n assert contracts_count == 10", "title": "" }, { "docid": "539cc49a98d4a81f54380b1d347a47f4", "score": "0.54227054", "text": "async def test_nibegw_address_inuse(hass: HomeAssistant, mock_connection: Mock) -> None:\n result = await _get_connection_form(hass, \"nibegw\")\n\n mock_connection.start.side_effect = AddressInUseException()\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], MOCK_FLOW_NIBEGW_USERDATA\n )\n\n assert result2[\"type\"] == FlowResultType.FORM\n assert result2[\"errors\"] == {\"listening_port\": \"address_in_use\"}\n\n mock_connection.start.side_effect = Exception()\n\n result2 = await hass.config_entries.flow.async_configure(\n result[\"flow_id\"], MOCK_FLOW_NIBEGW_USERDATA\n )\n\n assert result2[\"type\"] == FlowResultType.FORM\n assert result2[\"errors\"] == {\"base\": \"unknown\"}", "title": "" }, { "docid": "1ba5a39d233b172f567a971169344c99", "score": "0.5420565", "text": "def test_get_address_with_tags(self):\n pass", "title": "" }, { "docid": "f85a5d6665263ea62fbb7d448aadcbab", "score": "0.5415763", "text": "def test_invalid_json() -> None:\n with pytest.raises(InvalidJSONInAvailabilityAPIResponse):\n availability_api = WaybackMachineAvailabilityAPI(url=\"\", user_agent=user_agent)\n _ = availability_api.archive_url", "title": "" }, { "docid": "57e0ec506bddeed9221f3db205b075e7", "score": "0.5407852", "text": "def test_notokens_basic(\n OCEAN_address, network, web3, config, alice_wallet, alice_address\n):\n pool = _deployBPool(web3, config.address_file, network, alice_wallet)\n\n assert not pool.isPublicSwap()\n assert not pool.isFinalized()\n assert not pool.isBound(OCEAN_address)\n assert pool.getNumTokens() == 0\n assert pool.getCurrentTokens() == []\n with pytest.raises(Exception):\n pool.getFinalTokens() # pool's not finalized\n assert pool.getSwapFee() == to_wei(\"1e-6\")\n assert pool.getController() == alice_address\n assert str(pool)\n\n with pytest.raises(Exception):\n pool.finalize(from_wallet=alice_wallet) # can't finalize if no tokens", "title": "" }, { "docid": "321d4564144b94c81bb2035cdb589567", "score": "0.53552204", "text": "def test_bad_broadcast(self):\n with pytest.raises(ValueError, match=\"broadcast\"):\n z_at_value(\n self.cosmo.angular_diameter_distance,\n 1500 * u.Mpc,\n zmin=[0, 2.5, 0.1],\n zmax=[2, 4],\n )", "title": "" }, { "docid": "6e8568c3a21304a6d45f58dc047d6000", "score": "0.53408283", "text": "def test_get_contract_instance():\n cosmos_api = FetchAIApi(**FETCHAI_TESTNET_CONFIG)\n assert cosmos_api.get_contract_instance(\"interface\") is None", "title": "" }, { "docid": "1cfcf8b46f69934d98ed96dca8981500", "score": "0.53363043", "text": "def test_read_invalid(self):\n mac_signature_key_information = objects.MACSignatureKeyInformation()\n args = (self.empty_encoding,)\n self.assertRaisesRegex(\n ValueError,\n \"Invalid struct missing the unique identifier attribute.\",\n mac_signature_key_information.read,\n *args\n )", "title": "" }, { "docid": "dfa768c771907d7182c54ffebf6aa58e", "score": "0.5335276", "text": "def test_create_icap_invalid_network(self, fake_vCenter, fake_consume_task, fake_deploy_from_ova, fake_get_info, fake_Ova):\n fake_logger = MagicMock()\n fake_get_info.return_value = {'worked': True}\n fake_Ova.return_value.networks = ['someLAN']\n fake_vCenter.return_value.__enter__.return_value.networks = {'someLAN' : vmware.vim.Network(moId='1')}\n\n with self.assertRaises(ValueError):\n vmware.create_icap(username='alice',\n machine_name='IcapBox',\n image='1.0.0',\n network='someOtherLAN',\n logger=fake_logger)", "title": "" }, { "docid": "863391f6cb6918a3084ae7d4e23ee7d9", "score": "0.53352684", "text": "def test_table_test_address():\n ret = {}\n ret[\"stderr\"] = \"1/2 addressess match.\"\n ret[\"retcode\"] = 0\n mock_cmd = MagicMock(return_value=ret)\n with patch.dict(pf.__salt__, {\"cmd.run_all\": mock_cmd}):\n assert pf.table(\"test\", table=\"bad_hosts\", addresses=[\"1.2.3.4\"])[\"matches\"]", "title": "" }, { "docid": "fe6b6af7a8f6de80c4d177fd5c2cdf24", "score": "0.53299314", "text": "def test_badaddr(self):\n self.assertRaises(InvalidAddress, IPAddr, 'foobar')\n self.assertRaises(InvalidAddress, IPAddr, 'foo::bar')\n self.assertRaises(InvalidAddress, IPAddr, '123')\n self.assertRaises(InvalidAddress, IPAddr, '123.456.789.0')\n self.assertRaises(InvalidAddress, IPAddr, '127/8')\n self.assertRaises(InvalidAddress, IPAddr, '0/0')\n self.assertRaises(InvalidAddress, IPAddr, '1.2.3.4/32')\n self.assertRaises(InvalidAddress, IPAddr, '0')\n self.assertRaises(InvalidAddress, IPAddr, '')", "title": "" }, { "docid": "ede5dc3c1758e84befaef385d875f77c", "score": "0.53217494", "text": "def test_bad_familly(self):\n # type: ()->None\n opts = {\n 'addrFam': 'inedsflkdsfst',\n 'name': 'eth0',\n 'source': 'tunnel'\n }\n self.assertRaises(ValueError, NetworkAdapter, opts)", "title": "" }, { "docid": "cf65c2215b003761cec7ff241b6650e8", "score": "0.5317213", "text": "def GeneralCallReadAddress():\n\t\tpass", "title": "" }, { "docid": "64f66e556b0c325acc857e4724ed4741", "score": "0.5316493", "text": "def test_create_existing_contract(self):\n abi, bytecode = get_abi_bytecode(CONTRACT_NAME)\n self.contract = EthContract(\n node_address=NODE_ADDRESS, contract_address=CONTRACT_ADDRESS, \n abi=abi, bytecode=bytecode, already_deployed=True, provider='ipc')\n self.assertTrue(isinstance(self.contract, EthContract), \n \"Expected type EthContract, received %s\" % type(self.contract))\n self.assertNotEqual(self.contract._contract, None,\n \"Expected the contract to have a _contract, however _contract was type None\")\n self.assertNotEqual(self.contract._functions, None,\n \"Expected the contract to have a _functions, however _functions was type None\")\n print(\"\\ntest_create_existing_contract: 3 assertion passed\")", "title": "" }, { "docid": "3deda5dae8e95ebeec3d487eca408f4a", "score": "0.5307018", "text": "def test_list_address_links_eth(self):\n pass", "title": "" }, { "docid": "8fd186c9faf43b1a220bf219c8bcbee8", "score": "0.5306149", "text": "async def test_validate_access(provider):\n provider.async_validate_access(ip_address('192.168.0.1'))\n provider.async_validate_access(ip_address('192.168.128.10'))\n provider.async_validate_access(ip_address('::1'))\n provider.async_validate_access(ip_address('fd01:db8::ff00:42:8329'))\n\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address('192.168.0.2'))\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address('127.0.0.1'))\n with pytest.raises(tn_auth.InvalidAuthError):\n provider.async_validate_access(ip_address('2001:db8::ff00:42:8329'))", "title": "" }, { "docid": "0031e9f29e3fd0040cc43e9dda86779c", "score": "0.5297301", "text": "def test_missing_name(self):\n # type: ()->None\n adapter = NetworkAdapter({\n 'addrFam': 'inet6',\n 'auto': True,\n 'gateway': '192.168.0.254',\n 'source': 'static',\n 'netmask': '255.255.255.0',\n 'address': '192.168.0.250'\n })\n self.assertRaises(ValueError, adapter.validateAll)", "title": "" }, { "docid": "b3cad40b55975883a671fcca404cdbed", "score": "0.5297102", "text": "def test_no_records(self):\n addressbase = AddressBaseGeocoderAdapter('DD1 1DD')\n with self.assertRaises(ObjectDoesNotExist):\n result = addressbase.geocode()\n\n # point only geocode should also fail\n with self.assertRaises(ObjectDoesNotExist):\n result = addressbase.geocode_point_only()", "title": "" }, { "docid": "54788328afff4066296ddc01213fde3b", "score": "0.52912134", "text": "def test_invalid_error():\n\n from aiohttp_json_rpc import RpcInvalidRequestError\n from aiohttp_json_rpc.protocol import decode_msg\n\n raw_msg = '''\n {\n \"jsonrpc\": \"2.0\",\n \"id\": 1,\n \"error\": {\n \"code\": -32101,\n \"message\": \"Invalid request\",\n \"data\": null\n }\n }\n '''\n\n with pytest.raises(RpcInvalidRequestError):\n decode_msg(raw_msg)", "title": "" }, { "docid": "baefa077924fd7a544c9f252c8bb53cf", "score": "0.5266852", "text": "def decode_address(cls, raw_address: RawAddress) -> AddressType:", "title": "" }, { "docid": "f30d47dcba54c380e5da70a1321e5118", "score": "0.52356297", "text": "def verifyMiner(minerAddress): \n \n return CryptoNodes.checkInValuesList(minerAddress, \"address\")", "title": "" }, { "docid": "01b3d9912e1c4dd2145125a63c71c453", "score": "0.52309626", "text": "def checkIfAddressIsValid(addr, config):\r\n # addr will be a dictionary\r\n print(\"Inside checkIfAddressIsValid\")\r\n\r\n # Credentials for using the smartystreets API\r\n smartystreets = config['smartystreets']\r\n credentials = StaticCredentials(smartystreets['auth_id'], smartystreets['auth_token'])\r\n\r\n #print(type(addr.groups()))\r\n #print(addr.groups(1)[1])\r\n\r\n regularExp = '(.*),(\\s*.*)(\\s\\d\\d\\d\\d\\d)'\r\n match = re.search(regularExp, addr.groups(1)[1])\r\n\r\n if match is None:\r\n print (\"cannot find city, state and zip code in checkIfAddressIsValid\")\r\n return False\r\n\r\n #print(\"city: \" + str(match.groups(0)[0]))\r\n #print(\"state: \" + str(match.groups(0)[1]))\r\n #print(\"zip: \" + str(match.groups(0)[2]))\r\n\r\n client = ClientBuilder(credentials).build_us_zipcode_api_client()\r\n lookup = Lookup()\r\n\r\n lookup.city = str(match.groups(0)[0])\r\n lookup.state = str(match.groups(0)[1])\r\n lookup.zipcode = str(match.groups(0)[2])\r\n\r\n print(\"-------- All output below is smartystreets' ----------\")\r\n\r\n try:\r\n client.send_lookup(lookup)\r\n except exceptions.SmartyException as err:\r\n print(err)\r\n return False\r\n\r\n result = lookup.result\r\n zipcodes = result.zipcodes\r\n cities = result.cities\r\n\r\n for city in cities:\r\n print(\"\\nCity: \" + city.city)\r\n print(\"State: \" + city.state)\r\n print(\"Mailable City: {}\".format(city.mailable_city))\r\n\tdata = {\r\n\t \"City\": city.city,\r\n\t \"State\": city.state,\r\n\t \"MailableCity\": city.mailable_city\r\n\t}\r\n\t\r\n\t\r\n\t#\"{ City:\" + city.city + \",\" + \"State: \" + city.state + \",\" + \"Mailable_city: {}\".format(city.mailable_city) + \"}\"\r\n\twith open('output.json', 'w') as outfile:\r\n\t json.dump(data, outfile)\r\n\r\n for zipcode in zipcodes:\r\n print(\"\\nZIP Code: \" + zipcode.zipcode)\r\n print(\"Latitude: {}\".format(zipcode.latitude))\r\n print(\"Longitude: {}\".format(zipcode.longitude))\r\n\r\n return True", "title": "" }, { "docid": "12714f1ebb5f6a127fdebccab3595e3c", "score": "0.52280766", "text": "def test_exception_requests_inalid():\n ls = LS(api_key=\"dummy\")\n with pytest.raises(ApiError) as execinfo:\n resp = ls.geocode(query=\"abc\")\n raise ApiError(resp)\n resp = execinfo.value.args[0]\n assert resp.status_code == 401\n assert resp.reason == \"Unauthorized\"\n assert str(execinfo.value).startswith('401, Unauthorized, {\"error\":\"Unauthorized\"')", "title": "" }, { "docid": "9122d59a5ef6479becab9c10cce5c41a", "score": "0.5228027", "text": "def test_census_address_components(self):\n candidate = self.g_census.get_candidates(self.pq['azavea'])[0]\n self._test_address_components(candidate)", "title": "" }, { "docid": "b22ba1e08e680b636ddc7abfc22f5d39", "score": "0.52276856", "text": "def validate_payable(transaction, abi):\n if 'value' in transaction:\n if transaction['value'] != 0:\n if \"payable\" in abi and not abi[\"payable\"]:\n raise ValidationError(\n \"Sending non-zero ether to a contract function \"\n \"with payable=False. Please ensure that \"\n \"transaction's value is 0.\"\n )", "title": "" }, { "docid": "edc8cbe8e2347cac0723ee7d56632c5d", "score": "0.5225436", "text": "def test_list_address_txs(self):\n pass", "title": "" }, { "docid": "613ef85b248d7fa2e4ce8e0e6b49d387", "score": "0.5215346", "text": "def address(self, testnet=False):\n hash160 = self.hash160()\n rv = \"\"\n if hash160:\n prefix = bytes([self.P2SH_TESTNET_VERSION if testnet else self.P2SH_MAINNET_VERSION])\n rv = base58.b58encode_check(prefix + hash160)\n\n return rv", "title": "" }, { "docid": "109482c495a260b08c9c2c1f5cfe862a", "score": "0.5200947", "text": "def get_contract_type(self, address: AddressType) -> Optional[ContractType]:", "title": "" }, { "docid": "8c9dc2361face92264b7f0ded16a21c8", "score": "0.5186881", "text": "def test_api_v1_interface_ip_address_ip_address_get(self):\n pass", "title": "" }, { "docid": "4267be964872e3a3d879f552bda68503", "score": "0.5186396", "text": "def test_ext_info():\n assert_equals(info('9524712946'), 'Finland')\n assert_raises(Exception, info, '')", "title": "" }, { "docid": "55cc5ea85b0871102a88d71f89772b91", "score": "0.51789665", "text": "def test_get_tax_id_fail():\n\n file_path = \"test_test_test.fasta\"\n assert predict_cazymes.get_tax_id(file_path) == None", "title": "" }, { "docid": "e915e070d1c070fdef2e35edbd0ca48b", "score": "0.51773226", "text": "def test_read_invalid(self):\n encryption_key_information = objects.EncryptionKeyInformation()\n args = (self.empty_encoding,)\n self.assertRaisesRegex(\n ValueError,\n \"Invalid struct missing the unique identifier attribute.\",\n encryption_key_information.read,\n *args\n )", "title": "" }, { "docid": "68a5ba56bbd0f05f0342901fac671faa", "score": "0.5162304", "text": "def test_during_registration_required_negative(dummy_regform):\n schema = make_registration_schema(dummy_regform)()\n with pytest.raises(UnprocessableEntity) as exc_info:\n parser.parse(schema)\n assert set(exc_info.value.data['messages']) == {\n 'cern_access_nationality', 'cern_access_birth_place', 'cern_access_birth_date'\n }", "title": "" }, { "docid": "aba17732606ff76d637078335a07a182", "score": "0.51585275", "text": "def test_is_valid_with_Z():\n with pytest.raises(RuntimeError) as excinfo:\n sf.is_valid('Z')\n excinfo.match('Z is not a valid amino acid.')", "title": "" }, { "docid": "13eacb22df6aab8d0d26f7016398c141", "score": "0.5157513", "text": "def test_retrival_of_board_address(self):\n iptag = IPTag(\"\", 0, 0, 0, \"\", 1)\n self.assertIsNotNone(iptag)\n board_address = iptag.board_address\n self.assertEqual(\"\", board_address)", "title": "" }, { "docid": "86ab4d00d364d214cc79dc205abc1fd0", "score": "0.5155129", "text": "def test_invalid(self):\n self.assertFalse(validators.valid_hex(\"123xyz\"))", "title": "" }, { "docid": "ed4a9ecb6c11a400bc718a54a2d9ea31", "score": "0.5155046", "text": "def contract_abi(self):\n return self._contract_abi", "title": "" }, { "docid": "f8275889ea00c6142733e788274d7c98", "score": "0.5154714", "text": "def test_ask_notastring(self):\n qaobject = Interface()\n self.assertRaises(Exception, qaobject.ask, 45566)", "title": "" }, { "docid": "c89010bbfa463f0f6dd9fa5ec7c75a6a", "score": "0.5148863", "text": "def test_no_addrs(self):\n output = dockage.dns(addrs='')\n\n self.assertTrue(output is None)", "title": "" }, { "docid": "03b5fceeed896dae946d9ed4b0a9ec8d", "score": "0.5141942", "text": "def test_AT_input(self):\n at = AT({'alias': 'a'})\n bdpin = len(at._bdp_in)\n self.assertEqual(bdpin, 0) ## should have no input bdp", "title": "" } ]
4e54b02e96fdf43d7e8535acedfb9e84
r"""Propogate input through the layer.
[ { "docid": "9e56e01818320d237d6344cdff3025f9", "score": "0.0", "text": "def forward(self, input, hidden, ctx):\n def recurrence(input, hidden, ctx):\n \"\"\"Recurrence helper.\"\"\"\n input_gate = self.input_weights(input)\n hidden_gate = self.hidden_weights(hidden)\n peep_gate = self.peep_weights(ctx)\n i_r, i_i, i_n = input_gate.chunk(3, 1)\n h_r, h_i, h_n = hidden_gate.chunk(3, 1)\n p_r, p_i, p_n = peep_gate.chunk(3, 1)\n resetgate = self.reset(i_r + h_r + p_r)\n inputgate = self.input(i_i + h_i + p_i)\n newgate = self.new(i_n + resetgate * h_n + p_n)\n hy = newgate + inputgate * (hidden - newgate)\n\n return hy\n\n input = input.transpose(0, 1)\n ctx = ctx.transpose(0, 1)\n\n output = []\n steps = range(input.size(0))\n for i in steps:\n hidden = recurrence(input[i], hidden, ctx[i])\n if isinstance(hidden, tuple):\n output.append(hidden[0])\n else:\n output.append(hidden)\n\n output = torch.cat(output, 0).view(input.size(0), *output[0].size())\n output = output.transpose(0, 1)\n return output, hidden", "title": "" } ]
[ { "docid": "489e2b175bc8d9e68705e661f28b9b03", "score": "0.6702976", "text": "def applyInputParams(self):\n pass", "title": "" }, { "docid": "0ade0f639b80566a9895265ca9c30796", "score": "0.65563494", "text": "def propagate_input(self, features):\n for layer in self.layers:\n layer.propagate_input(features)", "title": "" }, { "docid": "75addce942b53d7ff598d8bc2c48a36d", "score": "0.65558606", "text": "def forward_propagate(self, data):\n data = np.asarray(data)\n data = np.append(data, [1])\n self.input_layer.activate(data)", "title": "" }, { "docid": "f6c2e25f084e5fc8e302af3649d4183e", "score": "0.6548649", "text": "def feed_forward(self, input_data):\n\n\t\tself.input_layer.set_output_values(input_data)\n\t\tself.set_hidden_layer_values()\n\t\tself.set_output_layer_values()", "title": "" }, { "docid": "e21b3157474a3691f51d3fe751437d80", "score": "0.63867205", "text": "def _propagateInput(self,input):\n Y = [0] * self.K #init output to list of K zeroes\n \n #init hidden layer to list of 1 followed by H zeroes\n Z = [0] * (self.H -1) \n \n #propagate inputs to hidden layer (start at 1, \n #first node in hidden layer should not be touched)\n self.o = []\n for h in range(0,self.H-1):\n wtx = _mulVectors(self.W[h],input)\n scaledWtx = _tanh((2./3.) * wtx)\n Z[h] = 1.7159 * scaledWtx\n self.o.append(scaledWtx);\n \n Z.append(1) \n \n #propagate hidden layer to outputs\n for i in range(0,self.K):\n Y[i] = _mulVectors(self.V[i],Z)\n \n if len(Y) > 1:\n Y = self._softMax(Y)\n \n return Y,Z", "title": "" }, { "docid": "a2b3d66f8128144fee56cb9fcf306dfa", "score": "0.63512844", "text": "def forward( self, inputs ) :\n pass", "title": "" }, { "docid": "60429a0554ba474df24952f811b22521", "score": "0.6270747", "text": "def forward(self, input):\n self.input = input\n\n for layer in self.layers:\n input = layer.forward(input)\n # print(input.shape)\n\n return input", "title": "" }, { "docid": "9173f1974671a8c6f30e59b9a42a3d39", "score": "0.625522", "text": "def set_inputs(self):", "title": "" }, { "docid": "acc099faf6a9023963843751a315c737", "score": "0.6165887", "text": "def forward(self, input=None):\n if input is None:\n input = self.prev_layer.layer_value\n self.layer_value = np.dot(input.layer_value, self.layer_weight) \\\n + self.layer_bias\n activated_layer = activate_layer(self.layer_value,\n activation_type=self.activation_type)\n self.layer_value = activated_layer\n if self.debug is True:\n self.debug_layer()\n return True", "title": "" }, { "docid": "fc0219f03317cb6ff8c057376fd73954", "score": "0.6138716", "text": "def forward(self, *input):\n raise NotImplementedError", "title": "" }, { "docid": "2fa11016ac3f0a6586714d08753d7796", "score": "0.6122468", "text": "def updateInputs(self):\n _ = 1", "title": "" }, { "docid": "a9995ea64a8981285a3d826562aee53c", "score": "0.6108953", "text": "def forward(self, inputx):\n raise NotImplementedError", "title": "" }, { "docid": "3f618ab32d78ea6b2df04fc4127893e2", "score": "0.6107272", "text": "def build(self, input_shape):\n\n self._input_layer = tf.keras.layers.InputLayer(\n input_shape=(input_shape[-1],)\n )\n super().build(input_shape)", "title": "" }, { "docid": "3f618ab32d78ea6b2df04fc4127893e2", "score": "0.6107272", "text": "def build(self, input_shape):\n\n self._input_layer = tf.keras.layers.InputLayer(\n input_shape=(input_shape[-1],)\n )\n super().build(input_shape)", "title": "" }, { "docid": "70243d789ad9aa274fba5296d13267bb", "score": "0.6089816", "text": "def forward(self, input):\n\n raise NotImplementedError(\"Not implemented in interface\")", "title": "" }, { "docid": "70242af37f96404ee21c20de4c608a86", "score": "0.60594517", "text": "def predict_props(self, inp):\n # feed image to model used for knowledge transfer\n knowledge = self.tf_model.transfer(inp, need_preprocessing=True)\n # the knowledge from previous model is feeded to main CNN\n return self.model.predict(knowledge)", "title": "" }, { "docid": "74ccb254773cbfe2b86bbd1e1234cc5a", "score": "0.601317", "text": "def forward(self, input: tensor) -> tensor:\n input = self.layers(input)\n\n return input", "title": "" }, { "docid": "186b5113652a827e059dd64303908f70", "score": "0.5965438", "text": "def with_input(self, input_state: LogicalState) -> None:\n self._with_logical_input(input_state)", "title": "" }, { "docid": "186b5113652a827e059dd64303908f70", "score": "0.5965438", "text": "def with_input(self, input_state: LogicalState) -> None:\n self._with_logical_input(input_state)", "title": "" }, { "docid": "fbc6292bac7b3a7908d20d71aeec2915", "score": "0.5941666", "text": "def forward(self, input):\n return input", "title": "" }, { "docid": "f094f2ea98ea45043bb7a89252639daf", "score": "0.5921059", "text": "def forward(ctx, inputs, **kwargs):\n return inputs", "title": "" }, { "docid": "088531090f81b8ba8be737194560c4a7", "score": "0.5910531", "text": "def forward(self, x):\n model_descriptor_layers_copy = copy.deepcopy(self.model_descriptor['layers'])\n\n # Start with the input layers\n layers2process = [inputlayer for inputlayer in model_descriptor_layers_copy if inputlayer['type'] == 'input']\n\n # This is our main dictionary which allows to receive the output from any layer\n self.layerdic = {}\n\n while len(layers2process) > 0:\n\n # Get next layer to process\n layer = layers2process.pop(0)\n layer['id'] = str(layer['id'])\n layer['input'][0] = str(layer['input'][0])\n\n layer_added = True\n\n if layer['type'] == 'input':\n self.layerdic[layer['id']] = x\n\n elif layer['type'] == 'dense':\n # We have one avg pool operation, it's not a layer\n agp_result = self._modules[\"agp\"](self.layerdic[layer['input'][0]])\n # Flat layer \n flat_result = agp_result.view(-1, self.last_channels)\n # We need to flatten it because it's multi dimensional\n dense_result = self._modules[str(layer['id'])](flat_result)\n self.layerdic[layer['id']] = dense_result\n\n elif layer['type'] == 'merge':\n # For all merge layers there are two input layers required\n # They must be already at layerdic before we can receive the output from the merge layer\n if (str(layer['input'][0]) in self.layerdic.keys()) and (str(layer['input'][1]) in self.layerdic.keys()) \\\n and (str(layer['id']) not in self.layerdic.keys()):\n # These layers has two inputs\n self.layerdic[layer['id']] = self._modules[str(layer['id'])](\n self.layerdic[str(layer['input'][0])], self.layerdic[str(layer['input'][1])])\n\n else:\n # Wait until both input layers are added (we can check topo_ordering)\n layer_added = False\n\n else: \n # For the other type of layers just call forward function (=call())\n # self._modules[]() calls forward (witout a dot, calls __call__)\n # self._modules[].forward(parent_output)\n self.layerdic[layer['id']] = self._modules[str(layer['id'])](self.layerdic[layer['input'][0]])\n\n if layer_added:\n # Append all layers whose input node = current layer\n layers2process.extend(\n [subsequent_layers for subsequent_layers in copy.deepcopy(self.model_descriptor['layers']) if\n int(layer['id']) in subsequent_layers['input']])\n\n # Last layer so far\n self.output_id = layer['id']\n\n output = self.layerdic[self.output_id]\n return output", "title": "" }, { "docid": "39722cc26769efe92557dc0d22493cab", "score": "0.5901017", "text": "def set_input(self, input_):\n self.input = np.concatenate((input_, np.ones((1, input_.shape[1]))))", "title": "" }, { "docid": "59fb81484d0669a6290a7769794bc09f", "score": "0.5894345", "text": "def build(self, input_shape):\n super(NormalizedLayer, self).build(input_shape)", "title": "" }, { "docid": "4b3ef9a3fffd0ec79bc25185c14964bc", "score": "0.5887315", "text": "def forward(self, input):\n # if static or trainable:\n if self.learnable == 1:\n return self.conv(input)\n else:\n return self.conv(input, weight=self.weight, groups=self.groups,padding=self.pad)", "title": "" }, { "docid": "5991ba51abba41cefd87d8dda21e3114", "score": "0.5870796", "text": "def forward(self, inp):\n return self.conv(inp)", "title": "" }, { "docid": "232a0db9b99f620862a5cf1433a7f697", "score": "0.58625376", "text": "def setup_input(self):\n raise NotImplementedError", "title": "" }, { "docid": "3fe4b67a2ef38f355e850bf48ac2289c", "score": "0.585021", "text": "def set_input(self, input):\n self.input.data.resize_(input[0].size()).copy_(input[0])\n self.gt.data.resize_(input[1].size()).copy_(input[1])\n\n # Copy the first batch as the fixed input.\n if self.total_steps == self.opt.batchsize:\n self.fixed_input.data.resize_(input[0].size()).copy_(input[0])", "title": "" }, { "docid": "f50d40121abe5457b1e142167a9d90c6", "score": "0.5846639", "text": "def forward(self, image_input):\n x = self.layer1(image_input)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = x.view(x.size()[0], -1)\n return x", "title": "" }, { "docid": "31335da7ab3252d05fb07c9c7d4dce60", "score": "0.5839444", "text": "def forward(self, input):\n return self.updateOutput(input)", "title": "" }, { "docid": "31335da7ab3252d05fb07c9c7d4dce60", "score": "0.5839444", "text": "def forward(self, input):\n return self.updateOutput(input)", "title": "" }, { "docid": "2b0a98e6fbd29c889234f684738e9c0a", "score": "0.5833574", "text": "def forward(self, inp):\n x = inp[0]\n seq_length, batch_size, _ = x.shape\n x_pe = self.pe[:seq_length, :]\n x_pe = torch.repeat_interleave(x_pe.unsqueeze(1), batch_size, dim=1)\n x = torch.cat([x, x_pe], dim=-1)\n x = self.dropout(torch.relu(self.normalize(self.linear(x))))\n inp[0] = x\n return inp", "title": "" }, { "docid": "0505a9032a0c746d5efb6d661ffb96f0", "score": "0.5817626", "text": "def update_state(self, input):\n self.previous_input = input", "title": "" }, { "docid": "923ef154e7aa4f61e6241f080443b037", "score": "0.58093363", "text": "def forward_pass_on_convolutions(self, input): \n data_dict = dict()\n data_dict[self.model.base_model._op_list[0][-1]] = input\n\n for i, op in enumerate(self.model.base_model._op_list):\n if op[1] != 'Concat' and op[1] != 'InnerProduct':\n data_dict[op[2]] = getattr(self.model.base_model, op[0])(data_dict[op[-1]])\n elif op[1] == 'InnerProduct':\n x = data_dict[op[-1]]\n data_dict[op[2]] = getattr(self.model.base_model, op[0])(x.view(x.size(0), -1))\n else:\n try:\n data_dict[op[2]] = torch.cat(tuple(data_dict[x] for x in op[-1]), 1)\n except:\n for x in op[-1]:\n print(x,data_dict[x].size())\n raise\n if i == self.target_layer:\n data_dict[op[2]].register_hook(self.save_gradient)\n conv_output = data_dict[op[2]]\n return conv_output, data_dict[self.model.base_model._op_list[-1][2]]", "title": "" }, { "docid": "f6bbe5fba5f5babc7c1717b8ef7c61e7", "score": "0.58013344", "text": "def preprocess(self, inputs):\n pass", "title": "" }, { "docid": "1a4f8173314a96cc61d887a49d59ce52", "score": "0.57838285", "text": "def forward(self, *input_data):\n raise NotImplementedError", "title": "" }, { "docid": "95bd550cb15708b75a085eb02ae6cdcd", "score": "0.5781065", "text": "def inputs(self):", "title": "" }, { "docid": "42b2155d81169f7d25187dbdd9fc4271", "score": "0.5745698", "text": "def fillNetworkInput(self, state, tensor, batchIndex):", "title": "" }, { "docid": "2f53bff38bd02ba0ac6fa3c2d96b49e2", "score": "0.57403076", "text": "def set_input(self):\n pass", "title": "" }, { "docid": "f777ae310ef27d987d6a083ce73e531d", "score": "0.5738726", "text": "def forward_one(self, input, hidden):\n raise NotImplementedError()", "title": "" }, { "docid": "5f69f9c231b1cce1120e053c41bff357", "score": "0.57114553", "text": "def set_input(self, input):\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.image_paths = input['A_paths' if AtoB else 'B_paths'] \n # For FPN\n self.real_A_extend = []\n if 'A_extend' in input:\n for i in range(len(input['A_extend'])):\n self.real_A_extend.append(input['A_extend'][i].to(self.device))", "title": "" }, { "docid": "eae802d50c43fd0a09359086693e3ad1", "score": "0.57083994", "text": "def set_input(self, input):\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.prior_z_A = torch.randn((self.real_B.shape[0], self.opt.nlatent, 1, 1)).to(self.device)\n self.prior_z_B = torch.randn((self.real_A.shape[0], self.opt.nlatent, 1, 1)).to(self.device)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']", "title": "" }, { "docid": "5dea4da1924a6a2e6fa79dcb49edd3f5", "score": "0.57076305", "text": "def set_input(self, input):\n with torch.no_grad():\n if self.isTrain:\n if input['isNatural'][0] == 1:\n self.isNatural = True\n else:\n self.isNatural = False\n self.real_T2 = input['T2'].to(self.device)\n self.real_T4 = input['T4'].to(self.device)\n if not self.isNatural: # Skip these procedures, if the data is from real-world.\n T = input['T'].to(self.device)\n R = input['R'].to(self.device)\n if torch.mean(T) * 1 / 2 > torch.mean(R):\n self.trainFlag = False\n return\n _, R, I, alpha = self.syn(T, R, self.k_sz) # Synthesize data\n self.alpha = round(alpha, 1)\n if T.max() < 0.15 or R.max() < 0.15 or I.max() < 0.1:\n self.trainFlag = False\n return\n else:\n I = input['I']\n T = input['T']\n else: # Test\n self.image_paths = input['B_paths']\n I = input['I']\n T = input['T']\n\n self.real_T = T.to(self.device)\n self.real_I = I.to(self.device)", "title": "" }, { "docid": "f2c2c4b3d8f5ccafab1489fab65a5d81", "score": "0.56949145", "text": "def set_inputs(self, input):\n\n self.data = input\n targets = input['targets'] # shape[-1] = K+T\n diff_in = input['diff_in'] # shape[-1] = K-1\n self.diff_in = []\n self.targets = []\n f_volatile = not self.updateG or not self.is_train\n for i in range(self.K - 1):\n self.diff_in.append(Variable(diff_in[:, :, :, :, i].cuda(), volatile=f_volatile))\n for i in range(self.K + self.T):\n self.targets.append(Variable(targets[:, :, :, :, i].cuda(), volatile=f_volatile))", "title": "" }, { "docid": "ddd8729f683748720854fd05229987d0", "score": "0.5693851", "text": "def set_input(self, input):\n self.real_A = input['A'].to(self.device)\n self.real_B = input['B'].to(self.device)\n self.real_C = input['C'].to(self.device)\n self.image_paths = input['A_paths']", "title": "" }, { "docid": "59364b4fd1e11eef6547623b78f945a1", "score": "0.5669623", "text": "def forward_raw(self, *inputs):\n raise NotImplementedError", "title": "" }, { "docid": "ad744985fb6a3537d909d1e31a78ce68", "score": "0.5656239", "text": "def set_input(self, input):\n input_A = input['A']\n self.real_A.resize_(input_A.size()).copy_(input_A)\n self.DA = input['DA'][0]\n if self.isTrain:\n input_B = input['B']\n self.real_B.resize_(input_B.size()).copy_(input_B)\n self.DB = input['DB'][0]\n self.image_paths = input['path']", "title": "" }, { "docid": "7b778557849adf8a5ce0c53cab729e93", "score": "0.56560016", "text": "def forward(self, input_data: numpy.ndarray) -> numpy.ndarray:\n pass", "title": "" }, { "docid": "7c50aa35fdf639b31b1661edb0cff933", "score": "0.56525654", "text": "def ApplyInputs(ss, en):\n ss.Net.InitExt()\n\n lays = go.Slice_string([\"Location\", \"Cover\", \"Toy\", \"Reach\"])\n for lnm in lays :\n ly = leabra.Layer(ss.Net.LayerByName(lnm))\n pats = en.State(ly.Nm)\n if pats != 0:\n ly.ApplyExt(pats)", "title": "" }, { "docid": "0b9c3eb0a2993a8047cf03222f51266f", "score": "0.5639904", "text": "def build_input_layer(self):\n if self.input_layer_config is not None:\n with tf.variable_scope('input_layer', reuse=self.reuse):\n flat_inputs_hidden = self.flat_tensor(self.inputs)\n flat_inputs_hidden = fully_connected_layer(flat_inputs_hidden, **self.input_layer_config)\n\n self.inputs_hidden = self.temporal_tensor(flat_inputs_hidden)\n else:\n self.inputs_hidden = self.inputs", "title": "" }, { "docid": "15efa93708eac422dfabc8332ca87086", "score": "0.5632626", "text": "def transfer(self, inp, need_preprocessing=False):\n if need_preprocessing:\n inp = self.preprocess(inp)\n return self.model.predict(inp)", "title": "" }, { "docid": "c484a8acaabf14255176c534681a484d", "score": "0.5623666", "text": "def forwards(self, X):\n if self.has_bias:\n X = numpy.append(X, 1)\n\n _in = X\n\n for layer_idx in range(self.n_layers):\n layer_input = numpy.dot(_in, self.weights[layer_idx])\n layer_output = self.activation(layer_input)\n\n self.values[layer_idx][\"input\"] = layer_input\n self.values[layer_idx][\"output\"] = _in = layer_output", "title": "" }, { "docid": "6db9ab383fdb4e4caf367b15062a29e2", "score": "0.5611742", "text": "def forward_train(self, inputs):\n\n pass", "title": "" }, { "docid": "1ec98a2dcc91451f16293517e7bbb102", "score": "0.56116676", "text": "def propagation(self, input_data, back_propagation=True):\r\n # TODO: in RL part, the input is from sample, perhaps\r\n # we can get the input_lens, batch_target and batch_mask\r\n # from input, need @Jiaji help to check\r\n\r\n input_data = input_data.transpose(0, 1)\r\n input_lens = batch_lens(\r\n batch=input_data,\r\n end_token=self.end_token\r\n )\r\n dim0_size = input_data.size(0)\r\n batch_input = input_data.narrow(0, 0, dim0_size - 1).to(HOST_DEVICE)\r\n batch_target = input_data.narrow(0, 1, dim0_size - 1).to(HOST_DEVICE)\r\n batch_mask = torch.zeros(batch_input.size())\r\n for i, len in enumerate(input_lens):\r\n batch_mask[:len, i] = 1.0\r\n batch_mask = batch_mask.to(HOST_DEVICE)\r\n\r\n return self._forward(\r\n batch_input=batch_input,\r\n batch_target=batch_target,\r\n input_lens=input_lens,\r\n batch_mask=batch_mask,\r\n is_propagation=True\r\n )", "title": "" }, { "docid": "c8a48e2a41135225802b7784430d3193", "score": "0.5608339", "text": "def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups, padding=self.padding)", "title": "" }, { "docid": "d88496f67c7b9cda778e1299e996d683", "score": "0.5599526", "text": "def __call__(self, model_inp, model_out):\n raise NotImplementedError", "title": "" }, { "docid": "f5438f8330fad1418368bdcd5f07cb64", "score": "0.5597361", "text": "def forward(self, x):\n self.cache[\"input\"] = x\n for i in range(len(self.layers)):\n if i==0:\n self.layers[i].forward(x)\n else:\n previous = self.layers[i-1].output # (x, y) * (y, 1)\n self.layers[i].forward(previous)", "title": "" }, { "docid": "7451d6aea0c09f10a4b070c3f1e4c0e5", "score": "0.5596964", "text": "def forward(self, *input, **kwargs):\n\t\tx = self.with_standardized(*input, **kwargs)\n\t\treturn x", "title": "" }, { "docid": "9891bf8816330cefe79c64058841ba8f", "score": "0.5595116", "text": "def forward(self, input):\n\n ########################\n # PUT YOUR CODE HERE #\n\n # check correctness of shape. if not, raise exception\n if not (input.shape[1] == self.n_neurons): raise Exception(\n \"Input size is not correct. Input is {}, while it was initalized with {}\".format(input.shape[1],\n self.n_neurons))\n\n # implement batch normalization forward pass as given in the assignment\n\n # compute mean\n mean = input.mean(dim=0)\n\n # compute variance with unbiased=false\n variance = input.var(dim=0, unbiased=False)\n\n # normalize\n normalized_input = input - mean\n normalized_input /= (variance + self.eps).sqrt()\n\n # scale and shift\n out = self.gamma * normalized_input + self.beta\n\n # END OF YOUR CODE #\n #######################\n\n return out", "title": "" }, { "docid": "85cd1feb218ae1f41b27f62066e02f7e", "score": "0.55911046", "text": "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n #raise NotImplementedError\n\n for pre_layer,activation in self.layers:\n x_tilde = pre_layer.forward(x)\n x = activation.forward(x_tilde)\n\n out = x\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "title": "" }, { "docid": "1e318e39eb4ef1757e779bdf272daa79", "score": "0.559094", "text": "def __call__(self, inputs, states):\r\n raise NotImplementedError()", "title": "" }, { "docid": "78382338c45908367d6211e5e69ba844", "score": "0.5580951", "text": "def forward(self, x):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.flattening(x)\n x = self.layer4(x)\n x = self.layer5(x)\n return x\n ### END YOUR CODE HERE ### ", "title": "" }, { "docid": "68b4d2c08348eb70845065e964305aa5", "score": "0.5578145", "text": "def _forward_event_shape(self, input_shape):\n return input_shape", "title": "" }, { "docid": "a906e82c49b3cf223c2d971519f32bdf", "score": "0.5577934", "text": "def inputs(self, inputs):\n\n self._inputs = inputs", "title": "" }, { "docid": "a906e82c49b3cf223c2d971519f32bdf", "score": "0.5577934", "text": "def inputs(self, inputs):\n\n self._inputs = inputs", "title": "" }, { "docid": "a906e82c49b3cf223c2d971519f32bdf", "score": "0.5577934", "text": "def inputs(self, inputs):\n\n self._inputs = inputs", "title": "" }, { "docid": "a906e82c49b3cf223c2d971519f32bdf", "score": "0.5577934", "text": "def inputs(self, inputs):\n\n self._inputs = inputs", "title": "" }, { "docid": "58336914c71e5bb4c7ef7ced765626ae", "score": "0.55747396", "text": "def forward_with_standardized(self, *input, **kwargs):\n\t\tx = self.net(*input)\n\t\treturn x", "title": "" }, { "docid": "dc554abedb02655de435d5746abb037a", "score": "0.5569247", "text": "def forward(self, inputs):\n\n self.inputs = np.array(inputs)\n weights_arr = np.array(self.weights)\n layer_base_forward = np.dot(inputs, weights_arr) + self.biases\n self.output = self.activation.forward(layer_base_forward)", "title": "" }, { "docid": "2df27f6bba954fa71b75cc1032f2855b", "score": "0.5567828", "text": "def apply(self, inputs):\n pass", "title": "" }, { "docid": "c4541f9cf5710e81f826ed3af559858d", "score": "0.55669945", "text": "def forward(self, input):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n mean=input.mean(dim=0,keepdim=True)\n std=input.std(dim=0,keepdim=True,unbiased=False)\n output=input-mean\n output/=std\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return output", "title": "" }, { "docid": "75dcfbab03753e89ab11874fbc06169f", "score": "0.5565527", "text": "def forward(self, inputs: Tensor) -> Tensor:\r\n raise NotImplementedError", "title": "" }, { "docid": "c9c646211abb2ca29fc63949793a4bbc", "score": "0.5565137", "text": "def forward(self, inputs, *args, **kwargs):\n\n x = inputs\n y = self.layer_norm(x)\n\n # Get layer output\n y = self.layer(y, *args, **kwargs)\n\n # Postprocessing: apply dropout and residual connection\n if self.is_train:\n y = tf.nn.dropout(y, rate=self.postprocess_dropout)\n return x + y", "title": "" }, { "docid": "aa7e74a2256da7fdca79e71142f68b05", "score": "0.5563959", "text": "def forward(self, input):\n mean = self.layers(input)\n return mean", "title": "" }, { "docid": "2943490565c0055139fb574e9bb59133", "score": "0.5556023", "text": "def setInput(self, layerInput: list):\n self.layerInput = layerInput", "title": "" }, { "docid": "1ffac54f3850bd11cd1d8d0223a057bc", "score": "0.5553221", "text": "def set_inputs(self, inputs):\n\n inputs = list(inputs)\n num_inputs = len(self.input_layer.x)\n if num_inputs > len(inputs):\n extra = num_inputs - len(inputs)\n inputs = inputs + [0]*extra\n\n self.input_layer.x = np.array(inputs[:num_inputs])", "title": "" }, { "docid": "f995c3ad031589c067d768d838b1e4e5", "score": "0.5550904", "text": "def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups)", "title": "" }, { "docid": "f995c3ad031589c067d768d838b1e4e5", "score": "0.5550904", "text": "def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups)", "title": "" }, { "docid": "f995c3ad031589c067d768d838b1e4e5", "score": "0.5550904", "text": "def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups)", "title": "" }, { "docid": "f995c3ad031589c067d768d838b1e4e5", "score": "0.5550904", "text": "def forward(self, input):\n return self.conv(input, weight=self.weight, groups=self.groups)", "title": "" }, { "docid": "181cb9d450aa32dba3f802fe4900694b", "score": "0.55461967", "text": "def set_input(self, input):\n super().set_input(input)\n if 'A_label' in input :\n self.input_A_label = input['A_label'].to(self.device).squeeze(1)\n if self.opt.train_f_s_B and 'B_label' in input:\n self.input_B_label = input['B_label'].to(self.device).squeeze(1)", "title": "" }, { "docid": "4685ecabb63c6e99570649c09488f2e2", "score": "0.5544046", "text": "def input(self, **kwargs):\n self.__input_dict = kwargs", "title": "" }, { "docid": "7b35a7902a813923b9bdeced48e967e5", "score": "0.55427307", "text": "def forward(self, input):\n result=self.conv(input)\n result=self.attention(result,result,result)\n final_result=self.feed_forward(result)\n return final_result", "title": "" }, { "docid": "9c32dae2a350674e3ddde8751dd14f41", "score": "0.5537277", "text": "def forward(self, input):\r\n x = input\r\n for l in self.linear_layers[:-1]:\r\n x = l(x)\r\n x = self.act(x)\r\n\r\n output_layer = self.linear_layers[-1]\r\n return output_layer(x)", "title": "" }, { "docid": "26f27b3f79e89aacc55e4daa7513623b", "score": "0.55287075", "text": "def setInput(self, shape, portnum):\n self.input = (shape, portnum)", "title": "" }, { "docid": "f50635c0414919b05e2edddd71067a7b", "score": "0.5527572", "text": "def rescale_inputs(self, inps, tgts):\n pass", "title": "" }, { "docid": "2da343a278c8b97414335cc38dfa4fca", "score": "0.55265534", "text": "def build(self, input_shape):\n if not self.layer.built:\n self.layer.build(input_shape)\n self.layer.built = True\n super(DropConnect, self).build()", "title": "" }, { "docid": "c88346517909f268e2c70ca1e1f924cc", "score": "0.5524434", "text": "def forward(self, inputs):\n def calc_forward(inputs):\n\n weights_l = torch.einsum(self.equation_l, [self.kernels[0], self.kernels[1]])\n weights_r = torch.einsum(self.equation_r, [self.kernels[2], self.kernels[3]])\n weights = torch.einsum(self.equation, [weights_l, weights_r])\n\n weights = torch.reshape(weights, (self.out_channels,\n self.in_channels // self.groups, self.kernel_size[0], self.kernel_size[1]))\n\n outputs = self.conv2d(inputs, weights, self.bias)\n return outputs\n\n\n return checkpoint(calc_forward, inputs)", "title": "" }, { "docid": "d9aa0d8c9535915473c2a120d1270e2a", "score": "0.55147296", "text": "def inputs(self):\n return super().inputs", "title": "" }, { "docid": "d9aa0d8c9535915473c2a120d1270e2a", "score": "0.55147296", "text": "def inputs(self):\n return super().inputs", "title": "" }, { "docid": "d9aa0d8c9535915473c2a120d1270e2a", "score": "0.55147296", "text": "def inputs(self):\n return super().inputs", "title": "" }, { "docid": "d9aa0d8c9535915473c2a120d1270e2a", "score": "0.55147296", "text": "def inputs(self):\n return super().inputs", "title": "" }, { "docid": "d9aa0d8c9535915473c2a120d1270e2a", "score": "0.55147296", "text": "def inputs(self):\n return super().inputs", "title": "" }, { "docid": "8d406d3ad198ac2412f3c3059088a6e0", "score": "0.55104285", "text": "def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n x = x.reshape(-1, self.sz_input)\n\n for idx, layer in enumerate(self.layers):\n x = layer(x)\n\n out = x\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out", "title": "" }, { "docid": "b8bb421097f223af4fb22330080f7d2a", "score": "0.55068684", "text": "def think(self):\n self.inputs = list(map(lambda neuron: neuron.get_output(), self.layer.get_inputs()))\n self.output = sum(map(lambda neuron_value, weight: neuron_value * weight,\n self.inputs, self.input_weights))\n return self.get_output()", "title": "" }, { "docid": "6e38651949fbd7574b89dbb3cc0429a2", "score": "0.54952383", "text": "def calc_inputs(self, input_dict):\n if self.potential is None:\n raise ValueError('potential not set')\n \n input_dict['potential'] = self.potential", "title": "" }, { "docid": "051d59059a2646e18e089f19a647ed06", "score": "0.5493955", "text": "def set(self,input):\n self.state = input", "title": "" }, { "docid": "1e83ff47d90ddbd3f00ffebe99fb769e", "score": "0.5486351", "text": "def Layer(self, layer):", "title": "" }, { "docid": "e738eab1bddd70ba86b1796667984d6d", "score": "0.5483756", "text": "def set_input(self, input):\n AtoB = self.opt.direction == 'AtoB' # use <direction> to swap data_A and data_B\n self.data_A = input['A' if AtoB else 'B'].to(self.device) # get image data A\n self.data_B = input['B' if AtoB else 'A'].to(self.device) # get image data B\n self.image_paths = input['A_paths' if AtoB else 'B_paths'] # get image paths", "title": "" }, { "docid": "430e486f95799657c40639809199e6f4", "score": "0.54815537", "text": "def forward_prop(self):\n # Forward propagation to generate model output\n self.fwd_neurons = []\n\n def sigmoid_activation(neurons):\n # Activation for the input, basically 1/(1 + e^-x)\n return 1 / (1 + exp(-neurons))\n\n def fwd_input_hl(self):\n # Forward to Hidden Layer\n self.fwd_neurons.append(self.X.dot(self.weights[\"Input-HL\"]))\n self.fwd_neurons[-1].columns = self.weights[\"HL-HL\"][0].index\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_hl_hl(self):\n # Hidden layer to hidden layer\n for weight_1, weight_2 in zip(\n self.weights[\"HL-HL\"][:-1],\n self.weights[\"HL-HL\"][1:],\n ):\n self.fwd_neurons.append(self.fwd_neurons[-1][1].dot(weight_1))\n self.fwd_neurons[-1].columns = weight_2.index\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_hl_output(self):\n # Hidden layer to output\n self.fwd_neurons.append(self.fwd_neurons[-1][1].dot(\n self.weights[\"HL-HL\"][-1]))\n self.fwd_neurons[-1].columns = self.weights[\"HL-Output\"].index\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_output(self):\n # Finalize output\n self.fwd_neurons.append(self.fwd_neurons[-1][1].dot(\n self.weights[\"HL-Output\"]))\n self.fwd_neurons[-1].columns = [\"Output Neuron\"]\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_input_hl_single(self):\n # If single layer, the weight multiplications finish here\n self.fwd_neurons.append(self.X.dot(self.weights[\"Input-HL\"]))\n self.fwd_neurons[-1].columns = self.weights[\"HL-Output\"].index\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_hl_output_single(self):\n # If single layer, output is finalized here\n self.fwd_neurons.append(self.fwd_neurons[-1][1].dot(\n self.weights[\"HL-Output\"]))\n self.fwd_neurons[-1].columns = [\"Output Neuron\"]\n self.fwd_neurons[-1] = (\n self.fwd_neurons[-1],\n sigmoid_activation(self.fwd_neurons[-1]),\n )\n\n def fwd_check_single(self):\n # Check if single or multilayered and perform forward propagation\n if \"HL-HL\" in self.weights:\n fwd_input_hl(self)\n fwd_hl_hl(self)\n fwd_hl_output(self)\n fwd_output(self)\n else:\n fwd_input_hl_single(self)\n fwd_hl_output_single(self)\n\n fwd_check_single(self)", "title": "" }, { "docid": "2a4d6cab507f69fe4b49898cd84d57b6", "score": "0.54803354", "text": "def forward(self, inputs):\n x = inputs[0]\n pointer = self.num_layers\n for key, opt in self.opts.items():\n x = opt(x)\n if pointer > 1:\n pointer -= 1\n x = x + inputs[-pointer]\n x = nn.ReLU(inplace=False)(x)\n return x", "title": "" } ]
f79fb3e13ed95b711bb5b8b2fc67ea2b
Calculates the entropy gain from the two splits
[ { "docid": "d1781cea78284312ae6e922254bb6b89", "score": "0.7582539", "text": "def get_gain(self, labels, split_1, split_2):\n orig_ent = self.get_entropy(labels)\n after_ent = (self.get_entropy(labels[split_1]) * (sum(split_1) / len(labels)) +\n self.get_entropy(labels[split_2]) * (sum(split_2) / len(labels)))\n return orig_ent - after_ent", "title": "" } ]
[ { "docid": "22ff3aa3a3476c44f70c41e19489e4fd", "score": "0.6996975", "text": "def information_gain(before_split: np.array,\n splits: list) -> float:\n\n assert len(before_split) == sum([len(split) for split in splits]), f\"splits must add up to length of original{len(original)}/{sum([len(i) for i in splits])}\"\n assert sum(before_split) == sum([sum(split) for split in splits]), \"probabilities of sub arrays do not sum to same total as original\"\n\n # get parameters all arrays - size, entropy\n # only for convenience / readability\n size_before = len(before_split)\n entropy_before = entropy(before_split)\n\n entropy_after = 0\n\n # add all the partial entropies of the sub-parts\n for split in splits:\n # get the size of the part and it's entropy\n split_size = len(split)\n entropy_split = entropy(split)\n\n # get the entropy contribution of the part, add it to entropy_before\n relative_size = split_size / size_before\n entropy_contribution = entropy_split * relative_size\n\n entropy_after += entropy_contribution\n\n ig = entropy_before - entropy_after\n\n return ig", "title": "" }, { "docid": "9bdaf388da1c9db9db3809671a756c00", "score": "0.6919026", "text": "def infor_gain(before_split_freqs, after_split_freqs):\n gain = entropy(before_split_freqs)\n overall_size = sum(before_split_freqs)\n for freq in after_split_freqs:\n ratio = sum(freq) * 1.0 / overall_size\n gain -= ratio * entropy(freq)\n return gain", "title": "" }, { "docid": "57a5f780797306a9dcab6dec245d675a", "score": "0.6453136", "text": "def relativeEntropyScore(model1, model2):\n return sum((model1[i] * log(model1[i]/model2.get(i, RECOUNTOFF), 2) for i in model1))", "title": "" }, { "docid": "8fd587f63da820a872295d38490aeb5f", "score": "0.63685286", "text": "def get_information_gain(y, x):\n return get_entropy(y) - get_conditional_entropy(y, x)", "title": "" }, { "docid": "a3398fb8312a78e136667baa7afbc431", "score": "0.6346513", "text": "def informationgain(data, split_attribute_name, store_attribute_name=\"label\"):\r\n totalEntropyTarget = entropy_target(data[store_attribute_name])\r\n values, counts = np.unique(data[split_attribute_name], return_counts=True)\r\n attributeEntropy = np.sum(\r\n [(counts[i] / np.sum(counts)) * entropy_target(data.where(data[split_attribute_name] == values[i]).dropna()[store_attribute_name])\r\n for i in range(len(values))])\r\n\r\n informationGain = totalEntropyTarget - attributeEntropy\r\n return informationGain", "title": "" }, { "docid": "c490a65a1e53f180d8e190b12a89c7bb", "score": "0.6298175", "text": "def _entropy_filter(self, prob1, prob2):\n\n\n # calculate merged prob.\n prob_merged = (prob1 + prob2)/2\n # Compute entropy for each prob.\n H1 = -prob1 * math.log(prob1) - (1-prob1) * math.log(1-prob1)\n H2 = -prob2 * math.log(prob2) - (1-prob2) * math.log(1-prob2)\n Hm = -prob_merged * math.log(prob_merged) - (1-prob_merged) * math.log(1-prob_merged)\n\n H_min = min(H1, H2, Hm)\n\n if H_min == H1:\n return prob1\n elif H_min == H2:\n return prob2\n else:\n return prob_merged", "title": "" }, { "docid": "dace3026c25e84d99f65c508a06c0fd7", "score": "0.6279489", "text": "def _informationGain(trainingSet, attributeIndex, splitValue = None):\n\tentroyBefore = _entropy(trainingSet)\n\tsplits = _splitByIndex(trainingSet, attributeIndex, splitValue)\n\tsum = 0;\n\tfor label, splitTraningSet in splits.items():\n\t\tsum += (len(splitTraningSet)/float(len(trainingSet))) * _entropy(splitTraningSet)\n\t\treturn entroyBefore - sum", "title": "" }, { "docid": "cb9159d2647a8a0ddbf406fa1a3c6b8f", "score": "0.6124633", "text": "def calc_information_gain(data, split_name, target_name):\n # Calculate the original entropy\n original_entropy = calc_entropy(data[target_name])\n\n # Find the median of the column we're splitting\n column = data[split_name]\n median = column.median()\n\n # Make two subsets of the data, based on the median\n left_split = data[column <= median]\n right_split = data[column > median]\n\n # Loop through the splits and calculate the subset entropies\n to_subtract = 0\n for subset in [left_split, right_split]:\n prob = (subset.shape[0] / data.shape[0])\n to_subtract += prob * calc_entropy(subset[target_name])\n\n # Return information gain\n return original_entropy - to_subtract", "title": "" }, { "docid": "b5e1c1bde7622a7549db7b55e317b894", "score": "0.6114481", "text": "def calc_information_gain(y, y_left, y_right):\n\t\n\tp_left = len(y_1) / len(y)\n\tp_right = 1 - p_left\n\t\n\tH_y = calc_entropy(y)\n\tH_y_left = calc_entropy(y_left)\n\tH_y_right = calc_entropy(y_right)\n\t\n\tinformation_gain = H_y - p_left * H_y_left - p_right * H_y_right \n\t\n\treturn information_gain", "title": "" }, { "docid": "72bc824739f105c6e8074d796a0030c7", "score": "0.6081559", "text": "def gain(S,A):\n unique_values=np.unique(S[:,A])\n S_len=S.shape[0]\n entropy_Sv=0\n for v in unique_values:\n Sv=S[S[:,A]==v]\n entropy_Sv+=(Sv.shape[0]/S_len)*entropy(Sv)\n \n return entropy(S)-entropy_Sv", "title": "" }, { "docid": "12b9481fa28627e1eade975ca9ddf941", "score": "0.60811454", "text": "def splitEntropy(self, split, classLabels, featureVals):\n\t\t# note that all data is weighted here\n\t\ttotal = 0.0\n\t\tfor j in featureVals:\n\t\t\t# N_j is the total number of instances that are on branch j\n\t\t\tN_j = sum([x[1] for x in split[j].items()])\n\t\t\tif N_j == 0:\n\t\t\t\tcontinue\n\n\t\t\t# compute entropy of branch j\n\t\t\tentropy = 0.0\n\t\t\tfor i in classLabels:\n\t\t\t\t# p_ij is the probability that an instance taking branch j has class i\n\t\t\t\tp_ij = float(split[j][i]) / N_j\n\t\t\t\tif p_ij != 0:\n\t\t\t\t\t# compute sum of entropy for each \n\t\t\t\t\tentropy += p_ij * log(p_ij)\n\t\t\t\t\t\n\t\t\t# split entropy is entropy of each branch weighted by proportion of data\n\t\t\t# in that branch\n\t\t\ttotal += N_j * entropy\n\t\treturn -total", "title": "" }, { "docid": "2385554303c62fb12d59f68aaa642631", "score": "0.6065025", "text": "def calc_information_gain(data, split_name, target_name):\n # Calculate original entropy.\n original_entropy = calc_entropy(data[target_name])\n\n # Find the median of the column we're splitting.\n column = data[split_name]\n median = column.median()\n\n # Make two subsets of the data based on the median.\n left_split = data[column <= median]\n right_split = data[column > median]\n\n # Loop through the splits, and calculate the subset entropy.\n to_subtract = 0\n for subset in [left_split, right_split]:\n prob = (subset.shape[0] / data.shape[0])\n to_subtract += prob * calc_entropy(subset[target_name])\n\n # Return information gain.\n return original_entropy - to_subtract", "title": "" }, { "docid": "74f468ff3fa923db7e99df6adc9b14af", "score": "0.60429007", "text": "def __entropy(self, X_train, y_train):\n entropy = 0.0\n unique_counts = self.__unique_counts(y_train)\n for key in unique_counts:\n tmp = unique_counts[key]/X_train.shape[0]\n entropy -= tmp * math.log(tmp, 2)\n return entropy", "title": "" }, { "docid": "6639112b89ee53ad3430b4f2628d2e00", "score": "0.59948224", "text": "def kullback_leibler(freq1, freq2): \n return entropy(freq1, qk=freq2).round(3), entropy(freq2, qk=freq1).round(3)", "title": "" }, { "docid": "0021d852c261beccb785604c87cdb8c3", "score": "0.59847355", "text": "def compute_entropy(self):\n #pdb.set_trace()\n vocab = self.vocabulary()\n for position in range(self.signal_space.length):\n comparisons = 0\n for meaning in meanings:\n utterances = self.speak(meaning, pick=False)\n for utterance in utterances:\n neighbors = self.signal_space.compute_neighbors(utterance,position)\n for neighbor in neighbors:\n understandings = self.hear(neighbor, pick=False)\n for understanding in understandings:\n mdist = self.meaning_space.hamming(meaning,understanding)\n load[position] += (mdist / self.meaning_space.length)\n comparisons += 1\n load[position] /= comparisons\n #pdb.set_trace()\n return load", "title": "" }, { "docid": "1181c51b9ff52bf33081d5d3d0ca6ff3", "score": "0.5947263", "text": "def partition_entropy(subsets: List[List[Any]]) -> float:\n total_count = sum(len(subset) for subset in subsets)\n return sum(data_entropy(subset) * len(subset) / total_count\n for subset in subsets)", "title": "" }, { "docid": "21a9b3f2d14232b45a39cec559deca33", "score": "0.5936735", "text": "def relative_entropy_analysis(features_a, features_b, all_data_a, all_data_b, bin_width=None, bin_num=10, verbose=True, override_name_check=False):\n all_data_a, all_data_b = all_data_a.T, all_data_b.T\n # Assert that the features are the same and data sets have same number of features\n if override_name_check:\n assert len(features_a) == len(features_b)\n else:\n assert features_a == features_b\n assert all_data_a.shape[0] == all_data_b.shape[0]\n # Extract the names of the features\n data_names = features_a\n # Initialize relative entropy and average value\n data_jsdist = np.zeros(len(data_names))\n data_kld_ab = np.zeros(len(data_names))\n data_kld_ba = np.zeros(len(data_names))\n data_avg = np.zeros(len(data_names))\n # Loop over all features\n for i in range(len(all_data_a)):\n data_a = all_data_a[i]\n data_b = all_data_b[i]\n # Combine both data sets\n data_both = np.concatenate((data_a, data_b))\n data_avg[i] = np.mean(data_both)\n # Get bin values for all histograms from the combined data set\n if bin_width is None:\n bins = bin_num\n else:\n bins_min = np.min(data_both)\n bins_max = np.max(data_both)\n bins = np.arange(bins_min, bins_max, bin_width)\n # Calculate histograms for combined and single data sets\n histo_both = np.histogram(data_both, bins=bins, density=True)\n histo_a = np.histogram(data_a, density=True, bins=histo_both[1])\n distr_a = histo_a[0] / np.sum(histo_a[0])\n histo_b = np.histogram(data_b, density=True, bins=histo_both[1])\n distr_b = histo_b[0] / np.sum(histo_b[0])\n # Calculate relative entropies between the two data sets (Kullback-Leibler divergence)\n data_kld_ab[i] = np.sum(sp.special.kl_div(distr_a, distr_b))\n data_kld_ba[i] = np.sum(sp.special.kl_div(distr_b, distr_a))\n # Calculate the Jensen-Shannon distance\n data_jsdist[i] = scipy.spatial.distance.jensenshannon(\n distr_a, distr_b, base=2.0)\n # Print information\n if verbose:\n print(i, '/', len(all_data_a), ':', data_names[i], \" %1.2f\" % data_avg[i],\n \" %1.2f %1.2f %1.2f\" % (data_jsdist[i], data_kld_ab[i], data_kld_ba[i]))\n return data_names, data_jsdist, data_kld_ab, data_kld_ba", "title": "" }, { "docid": "10fe18510cf36a7ba4716a7ddc62b102", "score": "0.59195316", "text": "def binary_info_gain(feature, threshold, samples, labels):\n #Get initial entropy\n origent = entropy(labels)\n #Get two halves of threshold\n split1 = samples[:, feature]>=threshold\n split2 = np.invert(split1)\n #Get entropy after split (remembering to weight by no of examples in each\n #half of split)\n afterent = (entropy(labels[split1])*(sum(split1)/len(labels)) + \n entropy(labels[split2])*(sum(split2)/len(labels)))\n gain = origent - afterent\n return gain", "title": "" }, { "docid": "7214d4fe7a62eb05e110449b7ff0524b", "score": "0.5907603", "text": "def calc_kl_score(x1, x2):\n positions = np.linspace(0,1,1000) # (Optional) If plotting, you can increase this number to generate a smoother KDE plot\n kernel1 = gaussian_kde(x1)\n values1 = kernel1(positions)\n kernel2 = gaussian_kde(x2)\n values2 = kernel2(positions)\n return entropy(values1,values2)", "title": "" }, { "docid": "7e0a0eff772fe7b130f0f31820f455b7", "score": "0.59069204", "text": "def intermediate_entropy(count, total):\n return (count / total) * np.log2(count / total)", "title": "" }, { "docid": "77f78532be6637ec04a6cd69c4127a64", "score": "0.589776", "text": "def gaincalculation(width, efficiency):\n g1 = 2./(1. - np.cos(width/(efficiency*2.)))\n Delta = 2./(1 - np.cos(width/2.))\n g2 = (Delta - g1)/(Delta - 1.)\n return g1, g2", "title": "" }, { "docid": "185cb692c4553e2329c1410c9d9b46d7", "score": "0.5875816", "text": "def cal_gain(data, prop):\n info_gain = 0\n global_ent = cal_entropy(data)\n total_count = len(data)\n for v in prop['values']:\n subdata = data[data[prop['name']] == v]\n count = len(subdata)\n info_gain -= count/total_count * cal_entropy(subdata)\n info_gain += global_ent\n return info_gain", "title": "" }, { "docid": "b644aadd28b69ac32b66175be8f99335", "score": "0.58460987", "text": "def info_gain( self, left, right, current_uncertainty ):\n\t\tp = float(len(left)) / (len(left) + len(right))\n\t\treturn current_uncertainty - p * self.gini(left) - (1 - p) * self.gini(right)", "title": "" }, { "docid": "12fd43e4c323cba7b483c826d5dfeef1", "score": "0.5828038", "text": "def entropy2(labels):\n \n n_labels = labels.size\n \n if n_labels <= 1:\n return 0\n \n counts = np.bincount(labels)\n probs = counts / n_labels\n \n #n_classes = np.count_nonzero(probs)\n n_classes = 256\n #print('nclases ' + str(n_classes))\n if n_classes <= 1:\n return 0\n \n ent = 0.\n \n # Compute standard entropy.\n for i in probs:\n if i != 0:\n \n ent -= i * log(i, n_classes)\n \n \n return ent", "title": "" }, { "docid": "7f1472deb25e86f17a00e315cdca7506", "score": "0.5822774", "text": "def get_entropy(self, examples_and_tags):\n tags = [tag for example, tag in examples_and_tags]\n if not tags:\n return 0\n tags_and_total_num = Counter()\n entropy = 0.0\n p_per_class = []\n for tag in tags:\n tags_and_total_num[tag] += 1\n for tag_total_num in tags_and_total_num:\n p_per_class.append(float(tags_and_total_num[tag_total_num]) / len(tags))\n for p in p_per_class:\n if p == 0:\n return 0\n entropy += -p * math.log(p, 2)\n return entropy", "title": "" }, { "docid": "fea48bc25c30ea43b807381831af1918", "score": "0.58027655", "text": "def information_gain(self, attr, examples): # the quality of a split\n \n arr = self.split_by(attr, examples) #Splits by flying\n\n #Calculating the totals per subgroup:\n subTotals = []\n totalElems = 0\n for subgroup in arr:\n subTotals.append(len(subgroup[1]))\n totalElems = totalElems + len(subgroup[1])\n subTotals[:] = [x / totalElems for x in subTotals]\n \n #Calculating the original entropy:\n originalEntropy = self.information_per_class(examples)\n\n # Calculate the remainder:\n remainder = 0\n for i, group in enumerate(arr):\n entropy = self.information_per_class(group[1])\n remainder = remainder + (entropy * subTotals[i])\n\n informationGain = originalEntropy - remainder\n\n return informationGain", "title": "" }, { "docid": "18c63ad08acdc91a172c6b2b86863835", "score": "0.57802325", "text": "def compute_entropy(self,HP):\n print(\"I am computing entropy...\")\n entropies=0\n num_samples=HP[0].shape[1]\n #print \"There are {} samples\".format(num_samples)\n for n in range(num_samples):\n entropies_n=0\n #print \"there are {} hidden layers\".format(self.NK)\n for l in range(self.NK):\n if self.hidden_type[l]==\"Bernoulli\":\n HPln=HP[l][:,n]\n for h in HPln:\n if h!=0 and h!=1:\n entropies_n= entropies_n -h*numpy.log(h) - (1-h)*numpy.log(1-h)\n elif self.hidden_type[l]==\"Multinomial\": # only applicable for count is 1, = multinoulli\n HPln=HP[l][:,n]\n for h in HPln:\n if h!=0 and h!=1:\n entropies_n= entropies_n - h*numpy.log(h)\n elif self.hidden_type[l]==\"Binomial\":\n HPln=HP[l][:,n]\n for h in HPln:\n if h!=0 and h!=1:\n entropies_n= entropies_n + 0.5*numpy.log( 2*numpy.pi*numpy.e*self.hidden_type_fixed_param[l]*h*(1-h))\n elif self.hidden_type[l]==\"Gaussian_FixPrecision1\" or self.hidden_type[l]==\"Gaussian_FixPrecision2\":\n for k in range(self.K[l]):\n entropies_n= entropies_n + 0.5*numpy.log( 2*numpy.pi*numpy.e/self.hidden_type_fixed_param[l])\n elif self.hidden_type[l]==\"Gaussian\":\n for k in range(self.K[l]):\n entropies_n= entropies_n + 0.5*numpy.log( 2*numpy.pi*numpy.e/(-2*self.b[l][1]))\n else:\n print(\"The entropy for {0} distrubution is not implemented yet.\".format(self.hidden_type[l]))\n entropies_n= entropies_n + 0\n entropies= entropies + entropies_n\n mean_entropy=entropies/num_samples\n #print \"The mean entropy is {}\".format(mean_entropy)\n return mean_entropy,entropies", "title": "" }, { "docid": "08b6fb937531963d91dc70812b7bcf4d", "score": "0.5717517", "text": "def _split_gain(gradient_left, hessian_left, gradient_right, hessian_right,\n sum_gradients, sum_hessians, l2_regularization):\n def negative_loss(gradient, hessian):\n return (gradient ** 2) / (hessian + l2_regularization)\n\n gain = negative_loss(gradient_left, hessian_left)\n gain += negative_loss(gradient_right, hessian_right)\n gain -= negative_loss(sum_gradients, sum_hessians)\n return gain", "title": "" }, { "docid": "0c1544969f3cac904be904153d4440c1", "score": "0.571037", "text": "def get_best_gain(data,attribute_list):\n final_entropy_data = {}\n child_lists={}\n for attribute in attribute_list:\n child_lists[str(attribute)]={}\n count1={}\n count2 ={}\n true_count = 0\n false_count = 0\n major_count_1=0\n minor_count_1=0\n major_count_2=0\n minor_count_2=0\n true_list = []\n false_list = []\n key_value = True\n for sentence in data:\n if sentence.attributes[attribute] == key_value:\n true_count += 1\n true_list.append(sentence)\n else:\n false_count+=1\n false_list.append(sentence)\n if len(true_list) > 0:\n for line in true_list:\n if line.tag not in count1.keys():\n count1[str(line.tag)]=1\n else:\n count1[str(line.tag)]+=1\n if len(count1.keys()) > 1:\n major_count_1 = count1['en']\n minor_count_1 = count1['nl']\n else:\n for key in count1.keys():\n major_count_1 = count1[str(key)]\n if len(false_list) > 0:\n for line in false_list:\n if line.tag not in count2.keys():\n count2[str(line.tag)] = 1\n else:\n count2[str(line.tag)] += 1\n if len(count2.keys()) > 1:\n major_count_2 = count2['en']\n minor_count_2 = count2['nl']\n else:\n for key in count2.keys():\n major_count_2 = count2[str(key)]\n\n total_count = true_count+false_count\n\n # true_entropy\n if true_count > 0:\n if major_count_1 > 0:\n if minor_count_1 > 0:\n\n true_entropy = (true_count / total_count) * (\n ((major_count_1 / true_count) * math.log2(1 / (major_count_1 / true_count))) + (\n minor_count_1 / true_count) * math.log2(1 / (minor_count_1 / true_count)))\n else:\n true_entropy = (true_count / total_count) * (\n ((major_count_1 / true_count) * math.log2(1 / (major_count_1 / true_count))))\n else:\n true_entropy = (true_count / total_count) * (\n ((minor_count_1 / true_count) * math.log2(1 / (minor_count_1 / true_count))))\n else:\n true_entropy = 0\n\n # false_entropy\n if false_count > 0:\n if major_count_2 > 0:\n if minor_count_2 > 0:\n false_entropy = (false_count / total_count) * (\n ((major_count_2 / false_count) * math.log2(1 / (major_count_2 / false_count))) + (\n minor_count_2 / false_count) * math.log2(1 / (minor_count_2 / false_count)))\n else:\n false_entropy = (false_count / total_count) * (\n ((major_count_2 / false_count) * math.log2(1 / (major_count_2 / false_count))))\n else:\n false_entropy = (false_count / total_count) * (\n ((minor_count_2 / false_count) * math.log2(1 / (minor_count_2 / false_count))))\n else:\n false_entropy = 0\n final_entropy = true_entropy + false_entropy\n # print(key,final_entropy)\n final_entropy_data[str(attribute)] = final_entropy\n \"\"\"\n Get all the values regarding any split at any depth.\n \"\"\"\n # print('attribute ',attribute,'entropy',final_entropy,'true_count',true_count,'major_1',major_count_1,'minor_1',minor_count_1,'false_count',false_count,'major_2',major_count_2,'minor_2',minor_count_2,count1.keys(),count2.keys())\n child_lists[str(attribute)][True]=true_list\n child_lists[str(attribute)][False] = false_list\n final_entropy_data = sorted(final_entropy_data.items(), key=\n lambda kv: (kv[1], kv[0]))\n final_value = list(final_entropy_data)[0]\n \"\"\"\n See the best entropy split for every level and split.\n \"\"\"\n # print('Best entropy is for ', final_value)\n return final_value[0],child_lists[str(final_value[0])]", "title": "" }, { "docid": "4173ec8b63ad3593b84d0b7be9e1b7e4", "score": "0.571037", "text": "def get_best_gain(data,attribute_list):\n final_entropy_data = {}\n child_lists={}\n for attribute in attribute_list:\n child_lists[str(attribute)]={}\n count1={}\n count2 ={}\n true_count = 0\n false_count = 0\n major_count_1=0\n minor_count_1=0\n major_count_2=0\n minor_count_2=0\n true_list = []\n false_list = []\n key_value = True\n for sentence in data:\n if sentence.attributes[attribute] == key_value:\n true_count += 1\n true_list.append(sentence)\n else:\n false_count+=1\n false_list.append(sentence)\n if len(true_list) > 0:\n for line in true_list:\n if line.tag not in count1.keys():\n count1[str(line.tag)]=1\n else:\n count1[str(line.tag)]+=1\n if len(count1.keys()) > 1:\n major_count_1 = count1['en']\n minor_count_1 = count1['nl']\n else:\n for key in count1.keys():\n major_count_1 = count1[str(key)]\n if len(false_list) > 0:\n for line in false_list:\n if line.tag not in count2.keys():\n count2[str(line.tag)] = 1\n else:\n count2[str(line.tag)] += 1\n if len(count2.keys()) > 1:\n major_count_2 = count2['en']\n minor_count_2 = count2['nl']\n else:\n for key in count2.keys():\n major_count_2 = count2[str(key)]\n\n total_count = true_count+false_count\n\n # true_entropy\n if true_count > 0:\n if major_count_1 > 0:\n if minor_count_1 > 0:\n\n true_entropy = (true_count / total_count) * (\n ((major_count_1 / true_count) * math.log2(1 / (major_count_1 / true_count))) + (\n minor_count_1 / true_count) * math.log2(1 / (minor_count_1 / true_count)))\n else:\n true_entropy = (true_count / total_count) * (\n ((major_count_1 / true_count) * math.log2(1 / (major_count_1 / true_count))))\n else:\n true_entropy = (true_count / total_count) * (\n ((minor_count_1 / true_count) * math.log2(1 / (minor_count_1 / true_count))))\n else:\n true_entropy = 0\n\n # false_entropy\n if false_count > 0:\n if major_count_2 > 0:\n if minor_count_2 > 0:\n false_entropy = (false_count / total_count) * (\n ((major_count_2 / false_count) * math.log2(1 / (major_count_2 / false_count))) + (\n minor_count_2 / false_count) * math.log2(1 / (minor_count_2 / false_count)))\n else:\n false_entropy = (false_count / total_count) * (\n ((major_count_2 / false_count) * math.log2(1 / (major_count_2 / false_count))))\n else:\n false_entropy = (false_count / total_count) * (\n ((minor_count_2 / false_count) * math.log2(1 / (minor_count_2 / false_count))))\n else:\n false_entropy = 0\n final_entropy = true_entropy + false_entropy\n # print(key,final_entropy)\n final_entropy_data[str(attribute)] = final_entropy\n # print('attribute ',attribute,'entropy',final_entropy,'true_count',true_count,'major_1',major_count_1,'minor_1',minor_count_1,'false_count',false_count,'major_2',major_count_2,'minor_2',minor_count_2,count1.keys(),count2.keys())\n child_lists[str(attribute)][True]=true_list\n child_lists[str(attribute)][False] = false_list\n final_entropy_data = sorted(final_entropy_data.items(), key=\n lambda kv: (kv[1], kv[0]))\n final_value = list(final_entropy_data)[0]\n # print('Best entropy is for ', final_value)\n return final_value[0],child_lists[str(final_value[0])]", "title": "" }, { "docid": "0f9eacb5849c0c21c0cc68ba3e581d25", "score": "0.5705174", "text": "def coherent_gain(self):\n return self.S1 / self.num_samples_window", "title": "" }, { "docid": "4547a6f3372c085cfba863b780a9bf26", "score": "0.5688303", "text": "def information_gain(self, x, y):\n entropy_prior = self.entropy(self.posterior)\n posterior_new = self.observe(x, y)\n entropy_post = self.entropy(posterior_new)\n information_gain = entropy_prior - entropy_post\n return information_gain", "title": "" }, { "docid": "5ebd8202da9dd77d4fe934176885db71", "score": "0.56577474", "text": "def _entropy(self, labels): # pragma: no cover\r\n total = len(labels)\r\n label_counts = Counter(labels).values()\r\n return -sum((p / total) * log2(p / total)\r\n for p in label_counts\r\n if p)", "title": "" }, { "docid": "53307f806ec4fd5096141ab620beb6a4", "score": "0.5653216", "text": "def partition_entropy(subsets):\n total_count = sum(len(subset) for subset in subsets)\n return sum( ent.data_entropy(subset) * len(subset) / total_count\n for subset in subsets )", "title": "" }, { "docid": "51aae2da33f0aaeef53dc92b8182e2a4", "score": "0.5636259", "text": "def info_gain(self,a):\n entro = self.entropy()\n Dv = dict()\n for d in self.datas:\n a_info = d.data[a]\n if a_info in Dv:\n Dv[a_info].add(d)\n else:\n new_dataset = DataSet()\n new_dataset.add(d)\n Dv[a_info] = new_dataset\n for x in Dv:\n N = len(self.datas) #|D|\n Nv = len(Dv[x].datas)#|Dv|\n entro -= Dv[x].entropy() * Nv / N\n return entro, Dv", "title": "" }, { "docid": "87b1b53699e125051523d6b7449b7d7e", "score": "0.56262416", "text": "def gain_loss_calc(self):\r\n\t\ttotal_cost = self.purchase_price*self.number_of_shares\r\n\t\ttotal_value = self.current_value*self.number_of_shares\r\n\t\tself.total_gain_loss = round((total_value - total_cost), 2)\r\n\t\treturn self.total_gain_loss", "title": "" }, { "docid": "8d745925ff5a7af84441eb89942e2169", "score": "0.56077456", "text": "def test_entropies():\n data = np.array([[1, 0, 0, 1, 1, 0, 1, 0], [0, 1, 0, 1, 1, 0, 1, 0]]).T\n H = entropy_from_seq(data[:, 0])\n H_joint = joint_entropy(data)\n H_cond = conditional_entropy(data[:, 1, np.newaxis], data[:, 0, np.newaxis])\n\n H_true = 1.0\n H_joint_true = 3 / 4 + 3 / 4 * np.log2(8 / 3)\n H_cond_true = H_joint - H\n\n assert np.isclose(H, H_true)\n assert np.isclose(H_joint, H_joint_true)\n assert np.isclose(H_cond, H_cond_true)", "title": "" }, { "docid": "e9a72657175ddcc7826a134e22b20414", "score": "0.5601919", "text": "def binary_info_gain(self, threshold, samples, labels):\n # Get two halves of threshold\n split1 = samples >= threshold\n split2 = np.invert(split1)\n # Get entropy after split (remembering to weight by no of examples in each half of split)\n return self.get_gain(labels, split1, split2)", "title": "" }, { "docid": "e8d7d3b4597b03a6135c0f228083ca3a", "score": "0.5597598", "text": "def information_gain(parent, children):\n total = len(parent)\n len_children = [len(child) for child in children]\n \n parent_entropy = entropy(parent)\n children_entropy = []\n for child in children:\n children_entropy.append(entropy(child))\n\n avg_entropy = 0\n for i in range(len(children)):\n avg_entropy = len_children[i]/total * children_entropy[i]\n # TODO: Implement the information gain\n \n return parent_entropy - avg_entropy", "title": "" }, { "docid": "248af202eb8550a052617b3f53ca2dff", "score": "0.55874455", "text": "def entropy(data_set):\n log2 = lambda x: log(x)/log(2)\n results = unique_counts(data_set)\n ent = 0.0\n for count in results.keys():\n ratio = float(results[count])/len(data_set)\n ent = ent-ratio*log2(ratio)\n\n return ent", "title": "" }, { "docid": "350bf237bf18f72fdf5b991a909fc7d9", "score": "0.5578794", "text": "def calculate_entropy(corpus):\n fdist = FreqDist(corpus)\n total_len = len(corpus)\n ent = 0\n for k, v in fdist.items():\n p = v / total_len\n\n ent += -p * np.log(p)\n\n return ent", "title": "" }, { "docid": "66e1be1d0a8739b196db918177151fef", "score": "0.5578378", "text": "def _get_info_gain(self, y, y_true_branch, y_false_branch):\n\n p_true = len(y_true_branch) / len(y)\n p_false = 1 - p_true\n\n metric_before_split = self._get_metric(y)\n metric_true = self._get_metric(y_true_branch)\n metric_false = self._get_metric(y_false_branch)\n\n gain = metric_before_split - p_true * metric_true - \\\n p_false * metric_false\n\n return gain", "title": "" }, { "docid": "30871d77b0db4ac2468b588c3924b59b", "score": "0.5565554", "text": "def info_gain(data, column, l_base=2, mode = None):\r\n class_ = data.iloc[:,-1].name # last column of the DataFrame\r\n unique_values = data[column].unique()\r\n sum_gain = basic_entropy(data, class_)\r\n for feature_ in unique_values:\r\n sum_gain += -(\r\n count_conditional_att(data, column, feature_, class_, mode=mode) *\r\n conditional_entropy(data, column, feature_, class_, l_base,mode=mode))\r\n return sum_gain", "title": "" }, { "docid": "e4e73286a01959131145e80c2c6cef92", "score": "0.5561312", "text": "def cross_entropy(self, other):\n assert self.scores.shape == other.scores.shape, \"The shape of the scores must match\"\n \n # We have two global models:\n # p(x) = (\\prod_{e in x} exp(score_p(e))) / Z_p \n # where Z_p is the normalisation constant\n # Z_p = \\sum_x \\prod_{e in x} exp(score_p(e)) \n # and similarly\n # q(x) = (\\prod_{e in x} exp(score_q(e))) / Z_q\n # Z_q = \\sum_x \\prod_{e in x} exp(score_q(e)) \n # thus \n # log p(x) = (\\sum_{e in x} score_p(e)) - log Z_p\n # log q(x) = (\\sum_{e in x} score_q(e)) - log Z_q\n\n # The cross-entropy is\n # E[-log q(x)] = -\\sum_x p(x) log q(x)\n # = \\sum_x p(x) (log Z_q - \\sum_{e in x} score_q(e))\n # = \\sum_x p(x) log Z_q - \\sum_x p(x) \\sum_{e in x} score_q(e) \n # = log Z_q - \\sum_x p(x) \\sum_{e in x} score_q(e)\n # = log Z_q - \\sum_{e} mu(e) score_q(e)\n # where mu(e) is the marginal probability of the edge under the distribution p_X\n # mu(e) = R[ori(e)] * exp(score_p(e)) * V[dest(e)] / Z_p\n # R[ori(e)] is the total probability of all paths from the initial state\n # of the FSA to the origin state of the edge\n # V[dest(e)] is the total probability of all paths from the final state \n # of the FSA to the destination state of the edge.\n\n # A fully vectorised implementation is possible, but it's tricky to \n # visualise. I'll try my best to explain it here.\n M = self.fsa\n W = self.arc_weight\n V = self.state_value\n R = self.state_rvalue\n # Recall that\n # * M.shape is [B, K+1, 3, 3] (this is a boolean mask indicating the valid arcs)\n # * W.shape is [B, K+1, 3, 3]\n # * V.shape is [B, K+2, 3] \n # and the batch dimension B could actually be a tuple of dimensions.\n # I want to vectorise the following computation\n # R[...,k,ori] + W[...,k,ori,dest] + V[...,k+1,dest]) - V[...,0,0]\n # for some k in {0,...,K}, ori in {0,1,2} and dest in {0,1,2}, here\n # ori is the label of the origin state, and dest is the label of the \n # destination state.\n # So I am going to change R's shape, \n # it goes from [B, K+2, 3] to [B, K+1, 3, 1],\n # where I forget k=K+1 and unsqueeze the last dimension.\n # Similarly, V goes from [B, K+2, 3] to [B, K+1, 1, 3],\n # where I forget k=0 and unsqueeze the second last dimension.\n # The result has the same shape of W, which is desirable\n # since we are computing expected values for the edge potentials\n # (note we should mask this operation following the FSA structure M, \n # that's because we are subtracting quantities that are potentially -inf, \n # which would lead to NaNs)\n # [B, K+1, 3, 3]\n log_mu = torch.where(M, R[...,:-1,:,None] + W + V[...,1:,None,:] - V[...,0,0,None,None,None], M.float() - np.inf)\n # We need the marginal (not its log)\n mu = log_mu.exp()\n # Here we use masked product, this is needed because we want the semantics\n # inf * 0 = 0, but for good reasons that's not what torch produces \n # in this specific context, a masked product is wanted and safe.\n expected = torch.where(mu == 0, torch.zeros_like(mu), other.arc_weight * mu)\n # For the entropy simply compute the expected potential and shift by log Z_q\n H = -(expected.sum((-1, -2, -3)) - other.state_value[...,0,0])\n \n # less vectorised code (easier to read)\n #nH = 0.\n #for k in range(K+1):\n # for ori in range(3): \n # for dest in range(3):\n # # marginal probability of edge\n # log_w = R[...,k,ori] + W[...,k,ori,dest] + V[...,k+1,dest] - V[...,0,0]\n # w = log_w.exp() \n # # expected score\n # e = other.arc_weight[...,k,ori,dest] * w\n # e = torch.where(w == 0, torch.zeros_like(e), e)\n # nH = nH + e\n #H = - (nH - other.state_value[...,0,0])\n\n return H", "title": "" }, { "docid": "9e8bcf1e71682815125b719d9f94f29d", "score": "0.5556174", "text": "def mse(img1, img2): \n # TODO: implement this function.\n \n mse=0\n \n for i in range(len(img1)):\n for j in range(len(img1[0])):\n mse+=(img1[i][j]-img2[i][j])**2\n \n return (mse/(len(img1)*len(img1[0])))", "title": "" }, { "docid": "10cbb667d34b3c0446abfb4e27608ffc", "score": "0.55535686", "text": "def entropy(freqs):\n #print(freqs)\n all_freq = sum(freqs)\n \n entrop = 0\n for fq in freqs:\n prob = fq * 1.0 /all_freq\n if abs(prob) > 1e-8:\n entrop += -prob* np.log2(prob)\n return entrop", "title": "" }, { "docid": "76c11b6dff0048ce9fe6718809959a50", "score": "0.554535", "text": "def entropy(ps):\n\n def log2(x):\n if np.isclose(x, 0):\n return 0 # Convention in definition of entropy\n else:\n return np.log2(x)\n\n return sum(map(lambda p: -p * log2(p), ps))", "title": "" }, { "docid": "a844648426f24e139c1d7f2618f9b1d6", "score": "0.55450654", "text": "def entropy(self, motifs: np.ndarray) -> float:\n pprofile = self.profile(motifs).T\n\n total_entropy = 0\n for probs in pprofile:\n sub_entropy = 0\n for val in probs:\n if val != 0:\n sub_entropy += val * math.log2(val)\n total_entropy -= sub_entropy\n\n return total_entropy", "title": "" }, { "docid": "3f2d3d23258323402e9fbd5355ea6ce3", "score": "0.5542056", "text": "def partition_entropy(subsets):\n\n total_count = sum(len(subset) for subset in subsets)\n\n return sum(data_entropy(subset) * len(subset) / total_count\n for subset in subsets)", "title": "" }, { "docid": "166cb365b2af5d574a353e31b470b787", "score": "0.55232227", "text": "def probeEntropy(self):\n total = 0\n for bit in self.world.bits:\n total += bit.ENTROPY\n return total", "title": "" }, { "docid": "cbadb96e11b60fff409a9582d9c84330", "score": "0.551672", "text": "def _entropy(trainingSet):\n\t\n\tcountOfLabels = _countsByClass(trainingSet)\n\n\tentropy = 0\n\tfor count in countOfLabels.values():\n\t\tp = float(count)/len(trainingSet)\n\t\tentropy -= p * _log2(p)\n\n\treturn entropy", "title": "" }, { "docid": "bc1095150380c75dc6de6fa1a0290f1e", "score": "0.5516138", "text": "def calc_base_entropy(dataset):\n num_sample = len(dataset)\n label_count = {}\n for feature_vector in dataset:\n current_label = feature_vector[-1]\n if current_label not in label_count.keys():\n label_count[current_label] = 0\n label_count[current_label] += 1\n\n entropy = 0\n for key in label_count:\n prop = float(label_count[key]) / num_sample\n entropy -= prop * log(prop, 2)\n\n return entropy", "title": "" }, { "docid": "fc5daf13c436d5ff17e1f54c6e9ac539", "score": "0.5511306", "text": "def cal_entropy(data):\n ent = 0\n total = len(data)\n for l in data['label'].unique():\n p = len(data[data['label'] == l]) / total\n ent -= p * math.log2(p)\n return ent", "title": "" }, { "docid": "5c29ea186837ed53e33fb1b0668da511", "score": "0.5508796", "text": "def entropy (distr):\n return 0", "title": "" }, { "docid": "3ce7b482ba4f2c407e1c0d73e13b03f6", "score": "0.55048615", "text": "def entropy(band: np.ndarray) -> np.float64:\n hist, _ = np.histogram(band, bins=range(0, 256))\n hist = hist[hist > 0]\n return -np.log2(hist / hist.sum()).sum()", "title": "" }, { "docid": "5a16a53caa0edaeb28f43046a518f8d7", "score": "0.5495284", "text": "def _entropy(labels: np.ndarray) -> float:\n p_sum = 0\n unique, counts = np.unique(labels, return_counts=True)\n c_sum = counts.sum()\n for count in counts:\n p = count / c_sum\n p_sum = p_sum + p * math.log2(p)\n if p_sum:\n return -1.0 * p_sum\n else:\n return p_sum", "title": "" }, { "docid": "55fee2f8102cd0105c05e3f30b0901b8", "score": "0.54867023", "text": "def compute_entropy(belief, config):\r\n entropy = np.zeros((config.dimension, config.dimension))\r\n for key in belief.keys():\r\n entropy[key[0], key[1]] = ss.entropy(belief[key])\r\n\r\n return entropy", "title": "" }, { "docid": "0d87698f693894998281670f33c7852d", "score": "0.5477619", "text": "def getCondEntropyNumeric(self, instances, attribute, split):\n\t\t# find instances that are less than or equal to the split\n\t\tx = self.attributes.index(attribute)\n\t\tlteInstances = [instance for instance in instances if instance[x] <= split]\n\t\tgtInstances = [instance for instance in instances if instance[x] > split]\n\n\t\t# calculate conditional entropy\n\t\treturn (((float(len(lteInstances))/len(instances))*self.getEntropy(lteInstances)) + \\\n\t\t((float(len(gtInstances))/len(instances))*self.getEntropy(gtInstances)))", "title": "" }, { "docid": "d71443e611e181a3fdbbfbe8acaa8b69", "score": "0.54756755", "text": "def _entropy_score(self, probas: np.ndarray):\r\n ent = entropy(probas.T) # calculate entropy\r\n ent = ent.max() - ent # make zero the minimum\r\n return ent / ent.max() # scale it to be in the [0, 1] range\r", "title": "" }, { "docid": "61f981ecc8da9865c78e88ab706650ea", "score": "0.5472929", "text": "def __get_entropy(self):\n\n str_list = list(self.domain)\n alphabet = list(set(self.domain))\n frequecy = []\n for symbol in alphabet:\n count = 0\n for sym in str_list:\n if sym == symbol:\n count += 1\n frequecy.append(float(count) / len(str_list))\n\n entropy_score = 0.0\n for f in frequecy:\n entropy_score += f * math.log(f, 2)\n entropy_score = -entropy_score\n self.shannon_entropy = entropy_score", "title": "" }, { "docid": "207da883d90697d768a2e6a8ea591ca2", "score": "0.5457986", "text": "def info_gain(\n\t\tevent: Collection,\n\t\tevent_tests: Collection[Callable],\n\t\tgiven: Collection,\n\t\tgiven_tests: Collection[Callable]) -> float:\n\tcond_entropy = conditional_entropy(event, event_tests, given, given_tests)\n\tevent_entropy = sum(entropy(probability(event, e)) for e in event_tests)\n\tgain = event_entropy - cond_entropy\n\treturn 0 if math.isnan(gain) else gain", "title": "" }, { "docid": "862bf8a7dada2771d0f5b323ecb1965c", "score": "0.5452407", "text": "def _get_entropy(self, df):\n\n\t\tentropy = 0\n\t\tfor target in np.unique(df['target']):\n\t\t\tfraction = df['target'].value_counts()[target] / len(df['target'])\n\t\t\tentropy += -fraction * np.log2(fraction)\n\n\t\treturn entropy", "title": "" }, { "docid": "2f4aa953b0c88d50504a73c7d9402c46", "score": "0.54516", "text": "def _entropy_calc(p):\n return -np.dot(p, np.log2(p))", "title": "" }, { "docid": "13e73b7497098032776462b94cef93ef", "score": "0.54450583", "text": "def heatDemand(gains = [], losses = []):\n\n\n sum = 0\n for gain in gains:\n sum -= gain\n for loss in losses:\n sum += loss\n return sum", "title": "" }, { "docid": "53b08ef06cad3a4c54995a9d166faf18", "score": "0.5441144", "text": "def ssim_loss(self, img1, img2):\n window = self._tf_fspecial_gauss(size=self.WS) # output size is (window_size, window_size, 1, 1)\n #import pdb\n #pdb.set_trace()\n\n (_, _, _, channel) = img1.shape.as_list()\n\n window = tf.tile(window, [1, 1, channel, 1])\n\n # here we use tf.nn.depthwise_conv2d to imitate the group operation in torch.nn.conv2d \n mu1 = tf.nn.depthwise_conv2d(img1, window, strides = [1, 1, 1, 1], padding = 'VALID')\n mu2 = tf.nn.depthwise_conv2d(img2, window, strides = [1, 1, 1, 1], padding = 'VALID')\n\n mu1_sq = mu1 * mu1\n mu2_sq = mu2 * mu2\n mu1_mu2 = mu1 * mu2\n\n img1_2 = img1*img1#tf.pad(img1*img1, [[0,0], [0, self.WS//2], [0, self.WS//2], [0,0]], \"CONSTANT\")\n sigma1_sq = tf.subtract(tf.nn.depthwise_conv2d(img1_2, window, strides = [1 ,1, 1, 1], padding = 'VALID') , mu1_sq)\n img2_2 = img2*img2#tf.pad(img2*img2, [[0,0], [0, self.WS//2], [0, self.WS//2], [0,0]], \"CONSTANT\")\n sigma2_sq = tf.subtract(tf.nn.depthwise_conv2d(img2_2, window, strides = [1, 1, 1, 1], padding = 'VALID') ,mu2_sq)\n img12_2 = img1*img2#tf.pad(img1*img2, [[0,0], [0, self.WS//2], [0, self.WS//2], [0,0]], \"CONSTANT\")\n sigma1_2 = tf.subtract(tf.nn.depthwise_conv2d(img12_2, window, strides = [1, 1, 1, 1], padding = 'VALID') , mu1_mu2)\n\n c1 = (self.k1*self.L)**2\n c2 = (self.k2*self.L)**2\n\n ssim_map = ((2*mu1_mu2 + c1)*(2*sigma1_2 + c2)) / ((mu1_sq + mu2_sq + c1)*(sigma1_sq + sigma2_sq + c2))\n\n return tf.reduce_mean(ssim_map)", "title": "" }, { "docid": "2f93ac179d53a668a0fbda8a20a79c33", "score": "0.54382175", "text": "def _compare_entropy(start_slice, end_slice, slice, difference):\n start_entropy = utils.image_entropy(start_slice)\n end_entropy = utils.image_entropy(end_slice)\n if end_entropy and abs(start_entropy / end_entropy - 1) < 0.01:\n # Less than 1% difference, remove from both sides.\n if difference >= slice * 2:\n return slice, slice\n half_slice = slice // 2\n return half_slice, slice - half_slice\n if start_entropy > end_entropy:\n return 0, slice\n else:\n return slice, 0", "title": "" }, { "docid": "80dc21c0139cdf99d5f235c7455842ee", "score": "0.5437667", "text": "def mofn_info_gain(mofntest, samples, labels):\n #Unpack the tests structure\n m = mofntest[0]\n septests = mofntest[1] \n #List comprehension to generate a boolean index that tells us which samples\n #passed the test.\n splittest = np.array([samples[:,septest[0]]>=septest[1] if septest[2] else \n samples[:,septest[0]]<septest[1] for septest in septests])\n #Now check whether the number of tests passed per sample is higher than m\n split1 = sum(splittest)>=m\n split2 = np.invert(split1)\n #Calculate original entropy\n origent = entropy(labels)\n #Get entropy of split\n afterent = (entropy(labels[split1])*(sum(split1)/len(labels)) + \n entropy(labels[split2])*(sum(split2)/len(labels)))\n gain = origent - afterent\n return gain", "title": "" }, { "docid": "b6121717460e194c79711dda484a395f", "score": "0.5424287", "text": "def infoGain(self,atrID, threashold = False):\n return self.entropy() - self.conditionalEntropy(atrID,threashold)", "title": "" }, { "docid": "7101b276e94108b35b09c10bf07e11b2", "score": "0.54229707", "text": "def calculate_entropy(self, dist):\n return -sum([p*np.log(p) for p in dist if p > 0])", "title": "" }, { "docid": "6e1cf387a47f26f411ac55b824c2fbf3", "score": "0.54221624", "text": "def entropy(self, subset):\n size = float(subset.shape[0])\n\n counter = Counter(subset[self.dataset_info.target_attr])\n\n _entropy = 0.\n\n for i, (c, q) in enumerate(counter.iteritems()):\n _entropy += (q / size) * np.log2(q / size)\n\n return -1. * _entropy", "title": "" }, { "docid": "28e6bd7747099ac1d8da7b9c95b2df8e", "score": "0.5422098", "text": "def calculate_entropy(dataset):\n class_labels = [row[-1] for row in dataset]\n return calculate_entropy_of_sequence(class_labels)", "title": "" }, { "docid": "2da295fafd936c2944794ae8e095cc9b", "score": "0.5421931", "text": "def relative_entropy(data):\n\n\treturn entropy(data) / 8", "title": "" }, { "docid": "71c6a08425826d1d34106ac2fd970ecf", "score": "0.5415588", "text": "def entropy_H(self, data):\n\n if not data:\n return 0.0\n\n occurences = Counter(bytearray(data))\n\n entropy = 0\n for x in occurences.values():\n p_x = float(x) / len(data)\n entropy -= p_x*math.log(p_x, 2)\n\n return entropy", "title": "" }, { "docid": "ac8bc1549c3e4036042b6388e915736d", "score": "0.5410972", "text": "def loss(min_offer, predicted):\n return MAX_GAIN-min_offer if predicted < min_offer else predicted - min_offer", "title": "" }, { "docid": "26f187f752e51da1c8ff14ef450d5960", "score": "0.5410025", "text": "def h(k,orde,X,Y_n_1):\n n_sample = len(X)\n if(2*k<n_sample):\n Y = G(orde,Y_n_1)\n last_seq_Y = Y[n_sample-k:-1]\n last_seq_X = X[n_sample-k:]\n count = 0\n val = 0\n for i in range(n_sample-2*k):\n if((X[i:i+k].all()==last_seq_X.all())and(Y[i:i+k-1].all()==last_seq_Y.all())):\n count += 1.0\n val += Y[i+k]\n if(count == 0):\n out = 0.0\n else:\n out = val/count\n else:\n out = 0.0\n \n return out", "title": "" }, { "docid": "25d86f92edc88df9c350d81f6ec26a4c", "score": "0.5402772", "text": "def get_conditional_entropy(y, x):\n return get_joint_entropy(y, x) - get_entropy(x)", "title": "" }, { "docid": "496bd2dcf9bbcf91a82faa81845cc4c4", "score": "0.539972", "text": "def entropy(labels):\n p1 = sum(labels) / len(labels)\n p0 = sum(not i for i in labels) / len(labels)\n return sum(-i * math.log2(i) for i in [p1, p0] if i)", "title": "" }, { "docid": "7e55b47709d189294b033173d203279e", "score": "0.5398796", "text": "def kl_div(d1, d2):\n dirty_logs = d1 * torch.log2(d1 / d2)\n return torch.sum(torch.where(d1 != 0, dirty_logs, torch.zeros_like(d1)), axis=1)", "title": "" }, { "docid": "673b3a1ee8dbd489eb5cc8eaee54f367", "score": "0.5397755", "text": "def kl_divergence(d1, d2):\n\n\t\tkl = 0.0\n\t\tfor w in set(d1.keys() + d2.keys()):\n\t\t\tif (w in d1 and d1[w] > 0) and (w in d2 and d2[w] > 0):\n\t\t\t\tkl += d1[w] * math.log(d1[w] / d2[w], 2)\n\t\t\telif w in d1 and d1[w] > 0 and 'UNK' in d2 and d2['UNK'] > 0:\n\t\t\t\tkl += d1[w] * math.log(d1[w] / d2['UNK'], 2)\n\t\t\telif w in d2 and d2[w] > 0 and 'UNK' in d1:\n\t\t\t\tkl += d1['UNK'] * math.log(d1['UNK'] / d2[w], 2)\n\t\t\telif 'UNK' in d2 and d2['UNK'] > 0 and 'UNK' in d1:\n\t\t\t\tkl += d1['UNK'] * math.log(d1['UNK'] / d2['UNK'], 2)\n\n\t\treturn kl", "title": "" }, { "docid": "ca1b27fd5b76b770bd767a6ff0ea5da1", "score": "0.5392462", "text": "def loss(self, H1, H2):\n\n r1 = 1e-3\n r2 = 1e-3\n eps = 1e-9\n\n # Transpose matrices so each column is a sample\n H1, H2 = H1.t(), H2.t()\n\n o1 = o2 = H1.size(0)\n\n m = H1.size(1)\n\n H1bar = H1 - H1.mean(dim=1).unsqueeze(dim=1)\n H2bar = H2 - H2.mean(dim=1).unsqueeze(dim=1)\n\n # Compute covariance matrices and add diagonal so they are\n # positive definite\n SigmaHat12 = (1.0 / (m - 1)) * torch.matmul(H1bar, H2bar.t())\n SigmaHat11 = (1.0 / (m - 1)) * torch.matmul(H1bar, H1bar.t()) + \\\n r1 * torch.eye(o1, device=self.device_)\n SigmaHat22 = (1.0 / (m - 1)) * torch.matmul(H2bar, H2bar.t()) + \\\n r2 * torch.eye(o2, device=self.device_)\n\n # Calculate the root inverse of covariance matrices by using\n # eigen decomposition\n [D1, V1] = torch.symeig(SigmaHat11, eigenvectors=True)\n [D2, V2] = torch.symeig(SigmaHat22, eigenvectors=True)\n\n # Additional code to increase numerical stability\n posInd1 = torch.gt(D1, eps).nonzero()[:, 0]\n D1 = D1[posInd1]\n V1 = V1[:, posInd1]\n posInd2 = torch.gt(D2, eps).nonzero()[:, 0]\n D2 = D2[posInd2]\n V2 = V2[:, posInd2]\n\n # Compute sigma hat matrices using the edited covariance matrices\n SigmaHat11RootInv = torch.matmul(\n torch.matmul(V1, torch.diag(D1 ** -0.5)), V1.t())\n SigmaHat22RootInv = torch.matmul(\n torch.matmul(V2, torch.diag(D2 ** -0.5)), V2.t())\n\n # Compute the T matrix, whose matrix trace norm is the loss\n Tval = torch.matmul(torch.matmul(SigmaHat11RootInv,\n SigmaHat12), SigmaHat22RootInv)\n\n if self.use_all_singular_values_:\n # all singular values are used to calculate the correlation (and\n # thus the loss as well)\n tmp = torch.trace(torch.matmul(Tval.t(), Tval))\n corr = torch.sqrt(tmp)\n else:\n # just the top self.n_components_ singular values are used to\n # compute the loss\n U, V = torch.symeig(torch.matmul(\n Tval.t(), Tval), eigenvectors=True)\n U = U.topk(self.n_components_)[0]\n corr = torch.sum(torch.sqrt(U))\n return -corr", "title": "" }, { "docid": "468203c2e92a17d1523bd102b7eda60a", "score": "0.5390972", "text": "def entropy(labels):\n return -sum([(p * log(p, 2)) for p in labels.values()])", "title": "" }, { "docid": "968fbf50c9d23ffb725b1596e50dc822", "score": "0.5382463", "text": "def kernel(samples, koef, v_n1, v_n2):\r\n\r\n for samp in samples:\r\n v_n1, v_n2 = koef*v_n1 - v_n2 + samp, v_n1\r\n return v_n1, v_n2", "title": "" }, { "docid": "25379e67be3c6cd669bdda4077fc95f1", "score": "0.5378578", "text": "def entropy(probabilities_arr: List[float]) -> float:\n s = 0\n for p in probabilities_arr:\n a = -np.log2(p) * p if p != 0 else 0\n s += a\n\n return s", "title": "" }, { "docid": "69c97fb7a75a39d0e85d61688c3d1f49", "score": "0.5375602", "text": "def _compare_entropy(start_slice, end_slice, slice, difference):\n start_entropy = _image_entropy(start_slice)\n end_entropy = _image_entropy(end_slice)\n if end_entropy and abs(start_entropy / end_entropy - 1) < 0.01:\n # Less than 1% difference, remove from both sides.\n if difference >= slice * 2:\n return slice, slice\n half_slice = slice // 2\n return half_slice, slice - half_slice\n if start_entropy > end_entropy:\n return 0, slice\n else:\n return slice, 0", "title": "" }, { "docid": "0b5d45fb925998b9ae39e075d8b7deab", "score": "0.5373201", "text": "def avg_kl_divergence(self, start_states, new_policy_parameters, old_policy_parameters):\n kl_sum = 0\n for start_state in start_states:\n self.set_policy_parameters(new_policy_parameters)\n _, new_action_distribution = self.get_action(start_state)\n self.set_policy_parameters(old_policy_parameters)\n _, old_action_distribution = self.get_action(start_state)\n kl_sum += stats.entropy(new_action_distribution, old_action_distribution)\n\n return kl_sum / len(start_states)", "title": "" }, { "docid": "8926d82f8cfe1ea38a3e7c568faf84f0", "score": "0.53655213", "text": "def calc_entropy(data):\r\n\r\n col = data[:,-1]\r\n _, counts = np.unique(col, return_counts=True)\r\n entropy = (counts / len(col)) * np.log2(counts / len(col))\r\n entropy = -np.sum(entropy)\r\n ###########################################################################\r\n # END OF YOUR CODE #\r\n ###########################################################################\r\n return entropy", "title": "" }, { "docid": "0d7752fe748759dbd5c84dc2e6544e46", "score": "0.53624135", "text": "def entropy(y):\n N = len(y)\n s1 = (y == 1).sum()\n if 0 == s1 or N == s1:\n return 0\n p1 = float(s1) / N\n p0 = 1 - p1\n return -p0 * np.log2(p0) - p1 * np.log2(p1)", "title": "" }, { "docid": "888e8057351da4878c8a0523b9073628", "score": "0.5337106", "text": "def _kl_loss(mu_0, log_sigma_0, mu_1, log_sigma_1) :\n kl = log_sigma_1 - log_sigma_0 + \\\n (torch.exp(log_sigma_0)**2 + (mu_0-mu_1)**2)/(2*math.exp(log_sigma_1)**2) - 0.5\n return kl.sum()", "title": "" }, { "docid": "6d3aedcfbdda96e7e2e6ecacc9d503e3", "score": "0.53345037", "text": "def compute_entropy(self, labels):\n\t\tprobilities = self.compute_prob(labels)\n\n\t\tentropy = 0.0\n\t\tfor key in probilities:\n\t\t\tentropy += probilities[key]*math.log2(probilities[key])\n\t\treturn -entropy", "title": "" }, { "docid": "14b72a8cea03839ca3991cae21b04ea4", "score": "0.5332006", "text": "def calculate_entropy(y):\n y = y.flatten()\n log2 = lambda x: math.log(x) / math.log(2)\n unique_labels = np.unique(y)\n entropy = 0\n for label in unique_labels:\n count = len(y[y == label])\n p = count / float(len(y))\n entropy += -p * log2(p)\n return entropy", "title": "" }, { "docid": "90b8940810326acd7aba7f57063c5e28", "score": "0.5328838", "text": "def entropy(examples, target):\n possible_values = ['S', 'C']\n bucket = [0 for _ in possible_values]\n\n for example in examples:\n bucket[possible_values.index(example[target])] += 1\n\n \n #print(len(examples))\n total_examples = sum(bucket)\n entropy = 0\n for el in bucket:\n p_el = el / total_examples\n\n entropy += (p_el * math.log2(p_el) if p_el != 0 else 0)\n \n #print(-entropy)\n return -entropy if entropy != 0 else entropy", "title": "" }, { "docid": "9eb72935632f2d2c9ba78e281deca0b5", "score": "0.5325135", "text": "def entropy2(labels, base=None):\n n_labels = len(labels)\n if n_labels <= 1:\n return 0\n value,counts = np.unique(labels, return_counts=True)\n probs = counts / n_labels\n n_classes = np.count_nonzero(probs)\n if n_classes <= 1:\n return 0\n ent = 0.\n # Compute entropy\n base = e if base is None else base\n for i in probs:\n ent -= i * log(i, base)\n return ent", "title": "" }, { "docid": "b400ca11e5c6e17019acb8bace82fc18", "score": "0.5321487", "text": "def generalized_oscillator_strength(energy_loss_eV: float, momentum_transfer_au: float,\n atomic_number: int, shell_number: int, subshell_index: int) -> numpy.ndarray:\n pass", "title": "" }, { "docid": "fd4f7c55e5e313ff2472da524306abac", "score": "0.531787", "text": "def gain_ratio(\n\t\tevent: Collection,\n\t\tevent_tests: Collection[Callable],\n\t\tgiven: Collection,\n\t\tgiven_tests: Collection[Callable]) -> float:\n\tevent_entropy = sum(entropy(probability(event, e)) for e in event_tests)\n\treturn info_gain(event, event_tests, given, given_tests) / event_entropy", "title": "" }, { "docid": "b8731a135443c7a27d21ba796791ea0e", "score": "0.5314573", "text": "def avg_loss_ratio(min_offer, predicted):\n min_offer = min_offer.ravel()\n predicted = predicted.ravel()\n numerator, denominator = gain(min_offer, predicted), gain(min_offer, min_offer)\n zero_mask = denominator==0\n denominator[zero_mask] = 1 #avoid division by zero\n tmp = numerator / denominator\n tmp[denominator==0] = 1\n return 1 - np.mean(tmp)", "title": "" }, { "docid": "f7e4c303da887facb6a7f1c97280ded4", "score": "0.5311981", "text": "def entropy(self):\n n = paddle.full(\n shape=[], fill_value=self.total_count, dtype=self.probs.dtype\n )\n support = paddle.arange(\n self.total_count + 1, dtype=self.probs.dtype\n ).reshape((-1,) + (1,) * len(self.probs.shape))[1:]\n\n binomial_pmf = paddle.exp(self._binomial_logpmf(n, support))\n\n return (n * self._categorical.entropy() - paddle.lgamma(n + 1)) + (\n (binomial_pmf * paddle.lgamma(support + 1)).sum([0, -1])\n )", "title": "" }, { "docid": "58f1380e7e55ac452c702a00f942e469", "score": "0.53098685", "text": "def gain(data, attr, target_attr):\n \n # Count the frequency of values within a given \n # attribute for this data subset.\n col = map(lambda rec: rec[attr], data)\n uniq_val = set(col)\n uniq_cnt = map(lambda val: col.count(val), uniq_val)\n val_freq = dict(zip(uniq_val, uniq_cnt))\n # val_sum is generally the same as quantity of records unless \n # an attribute is missing for a record.\n val_sum = sum(uniq_cnt)\n \n # Calculate sum of entropy for each subset of records weighted \n # by their probability of occuring in training set.\n\n # extract all records with particular attribute value.\n # calculate entropy of the target value for that record subset.\n ent_pieces = map(lambda val: entropy(filter(lambda rec: rec[attr] == val, \n data), target_attr) * float(val_freq[val]), uniq_val)\n subset_entropy = reduce(lambda x,y: x+y, ent_pieces) / val_sum\n\n # Subtract entropy of chosen attribute from entropy of whole \n # data set with respect to target attribute and return it \n return (entropy(data, target_attr) - subset_entropy)", "title": "" }, { "docid": "5787bf00a774be6127b59803f02afa03", "score": "0.53091997", "text": "def prob_both_insertion(raw_logits, \n target_left, target_right): \n probs = tf.math.softmax(raw_logits, axis=-1) \n probs_left = probs[..., ::2]\n probs_right = probs[..., 1::2]\n probs_left = custom_gather(probs_left, target_right)\n# tf.print(\"probs left\", probs_left[0,0], summarize=-1)\n# tf.print(\"probs right\", probs_right[0,0], summarize=-1)\n probs = probs_left + probs_right \n return probs", "title": "" }, { "docid": "25b60783098ee05ebd5bbc77c759bc80", "score": "0.5304218", "text": "def prob_2_entropy(prob):\r\n n, c, h, w = prob.size()\r\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)", "title": "" }, { "docid": "083b4a64ad5e155e3214167547a60b05", "score": "0.5303754", "text": "def calculate_entropy(y, base=2):\n y = y.flatten().astype(int)\n if len(y) == 0:\n return 1.0\n label_idx = np.unique(y, return_inverse=True)[1]\n pi = np.bincount(label_idx).astype(np.float64)\n pi = pi[pi > 0]\n pi_sum = np.sum(pi)\n\n if base == 2:\n return -np.sum((pi / pi_sum) * (np.log2(pi) - np.log2(pi_sum)))\n else:\n return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))", "title": "" } ]
35637ad93933de3599450fbc0a74977a
Use self learning algo to reply ()
[ { "docid": "98cadbd8dc765fbaa7b650b5e78c562b", "score": "0.0", "text": "def quote_lsd(self, msg, status, args):\n #TODO: Shouldn't crash if structures aren't build, just an empty response\n #not sure needs to be tested\n speech = \" \".join(args[1:])\n response = lsdQuote(speech)\n msg.Chat.SendMessage(\"%s\" % response)", "title": "" } ]
[ { "docid": "223ea358153fa91ca62105b9bbff6062", "score": "0.676823", "text": "def learning(self):\n pass", "title": "" }, { "docid": "482b797911d143e4a165ce58963e975d", "score": "0.6712703", "text": "def learn(self, reward, observation):", "title": "" }, { "docid": "4ec2d8b0680ce3c4eddebf5d2db8c173", "score": "0.6442513", "text": "def learn(self):\n self.agent.learn()", "title": "" }, { "docid": "a9c9febac223385c8a6abb1726d0189b", "score": "0.64398384", "text": "def learn (self, data):\n pass", "title": "" }, { "docid": "4b975775b5f518ccaff5055effa5f3fb", "score": "0.63168657", "text": "def handle_learn(message):\n\n return alexa.handle_learn(message)", "title": "" }, { "docid": "84bb801586d7ae3836cdc79829ab7d35", "score": "0.6298461", "text": "def learn(self, obs):\n pass", "title": "" }, { "docid": "ad5cca2ae3115ebeeb12611761a742f0", "score": "0.6284912", "text": "def learn(self, state, action, reward, sprime):\n pass", "title": "" }, { "docid": "b79ea98c5bbd9cb298a5d777d2b8f076", "score": "0.6275841", "text": "def learn(self):\n raise NotImplementedError(\"Please implement this method.\")", "title": "" }, { "docid": "bd566a82db258c2049f3215ea6acf67e", "score": "0.6135136", "text": "def learn(self) -> None:\n self.handle(events.Learn())", "title": "" }, { "docid": "1744b346b4a5a26a99b10004cde95844", "score": "0.6110929", "text": "def learn(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "05e67c0faebc269e80c948461912a92d", "score": "0.6104503", "text": "def learn(self, my_move, their_move):\n pass", "title": "" }, { "docid": "bf2e8944cd064629b560c503f126dfdb", "score": "0.6086123", "text": "def reward(self, res):", "title": "" }, { "docid": "6aa58cc8eb138d74b771a77da5397925", "score": "0.60454535", "text": "def learn(self, a: int, r: float):\n pass", "title": "" }, { "docid": "616dcff6eec4e549342268ce1cfb980b", "score": "0.6037317", "text": "def train(self):", "title": "" }, { "docid": "b182021006c22242c6b422e4d3e5a795", "score": "0.60211724", "text": "def learnV(cls, state, targetSignal):\n cls.agent.setInput(state)\n cls.agent.setTargetSignal(targetSignal)\n k = 0\n while k < cls.LEARN_ROUNDS_AGENT:\n cls.agent.responseValue()\n cls.agent.learnByBackpropagation()\n print cls.v(state)\n k += 1\n #sleep(0.1)", "title": "" }, { "docid": "8b07fe0d7488a0551f531a85e9cccea0", "score": "0.59447116", "text": "def manual_experimenting_main():\n\n print(\"Loading embedding\")\n emb = load_embedding(10_000, 0, True, 200)\n ws = torch.tensor(emb.ws, device=DEVICE)\n\n print(\"Loading model\")\n model = torch.load(\"../data/output/rnn_model.pt\")\n model.eval()\n model.to(DEVICE)\n\n print(\"Applying to tweets\")\n tweets = [\n \"i am happy\",\n \"i am not happy\",\n \"i am sad\",\n \"i am not sad\",\n \"go fuck yourself\",\n \"machine learning is good\",\n \"machine learning is bad\",\n \"machine learning is not good\",\n \"machine learning is not bad\",\n \"machine learning is neither good nor bad\",\n \"violence is good\",\n \"murder is good\",\n \"machine learning\",\n \"i'm going to the pool\",\n \"bad is good\",\n \"bad good\",\n \"machine studying\",\n \"<user>\",\n \"good\",\n \"not good\",\n \"i like studying\",\n \"i really like studying\",\n \"happy sad\",\n \"happy happy sad\",\n \"happy happy happy sad sad\",\n \"happy\",\n \"happy happy\",\n \"sad\",\n \"sad sad\",\n ]\n wrapped_tweets = Tweets(pos=tweets, neg=[])\n\n x, _, lens = construct_sequential_tensors(emb, wrapped_tweets, 1, 40, zero_row=False)\n y_pred = model.forward(x, lens, ws)\n\n for i in range(len(tweets)):\n happyness = torch.softmax(y_pred[i], dim=0)[1].detach().cpu().numpy()\n print(f\"{happyness:.2f}, {tweets[i]}\")", "title": "" }, { "docid": "8c8961acc676ff147f97a0fba0dffe07", "score": "0.59300345", "text": "def get_learn_after_each_decision(self):\r\n return 0", "title": "" }, { "docid": "51bbc3566caa72571e16ecf0b531f881", "score": "0.5911579", "text": "def train():\n print 'training...'", "title": "" }, { "docid": "af08f3a0c1faa6831b2efd1772eb6c8f", "score": "0.59087986", "text": "def learn(self, state):\n self.learning_step += 1\n\n print(\"Estado inicial: {}\".format(state))\n # Repito hasta que state sea terminal\n while not self.is_terminal(state):\n action = random.choice(self.possible_actions(state))\n new_state = self.move(state, action)\n # 3) Calculo el nuevo valor de Q(s,a)\n\n self.set_q(state, action, self.new_q_value(state, action))\n\n # 4) Actualizo s\n print(\"{} ---> {}\".format(state, new_state))\n state = new_state", "title": "" }, { "docid": "be618e1d00134b7e8f15aff9e23bc4f0", "score": "0.5902499", "text": "def predict():\n pass", "title": "" }, { "docid": "cc13a43cd9c6865e389198121fc0f36a", "score": "0.5900932", "text": "def learn(self, experiences):\n \n # Reshape the experience tuples in separate arrays of states, actions\n # rewards, next_state, done\n # Your are converting every memeber of the tuple in a column or vector\n states = np.vstack([e.state for e in experiences if e is not None])\n actions = np.array([e.action for e in experiences if e is not None]).astype(np.float32).reshape(-1,self.action_size)\n rewards = np.array([e.reward for e in experiences if e is not None]).astype(np.float32).reshape(-1,1)\n dones = np.array([e.done for e in experiences if e is not None]).astype(np.uint8).reshape(-1,1)\n next_states = np.vstack([e.next_state for e in experiences if e is not None])\n\n # Firs we pass a batch of next states to the actor so it tell us what actions\n # to execute, we use the actor target network instead of the actor local network\n # because of the advantage principle\n actions_next = self.actor_target.model.predict_on_batch(next_states)\n\n # The critic evaluates the actions taking by the actor and generates the\n # Q(a,s) value of those actions. This action, state tuple comes from the\n # ReplayBuffer not from interacting with the environment.\n # Remember the Critic or value function inputs is states, actions\n Q_targets_next = self.critic_target.model.predict_on_batch(([next_states,actions_next]))\n\n # With the Q_targets_next that is a vector of action values Q(s,a) of a random selected\n # next_states from the replay buffer. We calculate the target Q(s,a).\n # For that we use the TD one-step Sarsa equations\n # We make terminal states target Q(s,a) 0 and Non terminal the Q_targtes value\n # This is done to train the critic in a supervise learning fashion.\n Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones)\n self.critic_local.model.train_on_batch(x=[states,actions],y=Q_targets)\n\n # Train the actor\n action_gradients = np.reshape(self.critic_local.get_action_gradients([states,actions,0]),\n (-1, self.action_size))\n self.actor_local.train_fn([states, action_gradients,1]) # Custom training function\n\n # Soft-update target models\n self.soft_update(self.critic_local.model, self.critic_target.model)\n self.soft_update(self.actor_local.model, self.actor_target.model)", "title": "" }, { "docid": "8be46e76d25b09351072510ecf19ec23", "score": "0.58758116", "text": "def main():\n predictor = ArgMaxPredictor(model_fname='data/lda_graph2.pickle')\n print 'Loading Model. . .'\n predictor.loadModel()\n likes = set([960598,666644,632709,551024,932073,960606,824431,914853,1093859,1053814])\n dislikes = set([515584,632705,1029423,954024,741969,816553,741970,682798,816570,716589])\n print 'Providing initial feedback. . .'\n print 'likes =', likes\n print 'dislikes =', dislikes\n predictor.feedback(likes, dislikes)\n print 'Soliciting prediction. . .'\n new_items = predictor.predict(10)\n print new_items", "title": "" }, { "docid": "011ff6cdbe863979dadae1496d9db797", "score": "0.5858322", "text": "def userprediction(self):\n pass", "title": "" }, { "docid": "ded2e7e5ca58785250a841bd7897cd3d", "score": "0.5832046", "text": "def learn(self, inputs):\n\n self.recall(inputs)\n\n for link in self.link_order_learn:\n link.learn()\n\n return self.get_result()", "title": "" }, { "docid": "1bda39ff39926510df4c8eb4468d4fed", "score": "0.58245414", "text": "def train(self):\r\n pass", "title": "" }, { "docid": "472489b1ac7118709cb6a076a0aab4b5", "score": "0.5823469", "text": "def main(cls, args):\n last_input = [None]*10\n last_input[0] = 1\n last_input[1] = 0\n last_input[2] = 0\n last_input[3] = -1\n last_input[4] = 0\n last_input[5] = 1\n last_input[6] = -1\n last_input[7] = 0\n last_input[8] = 1\n # bias\n last_input[9] = 1\n toLearn = 0.5\n cls.agent.initTestInput()\n TestANN.learnV( last_input, toLearn)\n print \"\\n\"\n print \"Soll: \"\n print \"[\" + str(toLearn) + \"] \"\n print \"KNN: \"\n print \"[\" + str(TestANN.v( last_input)) + \"] \"\n \n cls.agent.printOutWeightTable()\n \n cls.agent.saveNetToFile(\"test123\")\n cls.agent.printOutWeightTable()\n \n print \"agent2\"\n agent2 = Agent()\n agent2.loadNetFromFile(\"test123\")\n agent2.printOutWeightTable()", "title": "" }, { "docid": "f39ffbf344ce598fc5777944a54eba68", "score": "0.58091277", "text": "def train(self,data):\r\n learned = False\r\n iteration = 0\r\n while not learned:\r\n globalError = 0.0\r\n for x in data: # for each sample\r\n r = self.response(x) \r\n if x[2] != r: # if we have a wrong response\r\n iterError = x[2] - r # desired response - actual response\r\n self.updateWeights(x,iterError)\r\n globalError += abs(iterError)\r\n iteration += 1\r\n if globalError == 0.0 or iteration >= 100: # stop criteria\r\n print ('iterations',iteration)\r\n learned = True # stop learning\r", "title": "" }, { "docid": "63ffb9e44461968aabfb68ebabf630c6", "score": "0.5774292", "text": "def learn_after_decision(self, state, decision, reward):\r\n return", "title": "" }, { "docid": "9dbe1fb48e8a87413b8f528546c64c1f", "score": "0.5745744", "text": "def nlp_processing(self):\n ...", "title": "" }, { "docid": "8a6478ba0e22a4cdcf9b403d6937c8b5", "score": "0.57289726", "text": "def _train(self):", "title": "" }, { "docid": "ee3fcb47ee1fa5c46a6fba0b2b5835e3", "score": "0.5714953", "text": "def go(self):\n # Reading bots choices\n r1 = self.bots[0].get_ans()\n r2 = self.bots[1].get_ans()\n\n # Distribute points\n if r1==\"C\" and r2==\"C\":\n self.bots[0].score+=5\n self.bots[1].score+=5\n elif r1==\"C\" and r2==\"T\":\n self.bots[0].score+=0\n self.bots[1].score+=10\n elif r1==\"T\" and r2==\"C\":\n self.bots[0].score+=10\n self.bots[1].score+=0\n elif r1==\"T\" and r2==\"T\":\n self.bots[0].score+=1\n self.bots[1].score+=1\n else:\n raise ValueError(\"Your answer doesn't correspond to the game: r1= {r1}, r2 = {r2}\".format(r1 = r1, r2 = r2))\n\n # Send results to other bots\n self.bots[0].send_msg(r2+'\\n')\n self.bots[1].send_msg(r1+'\\n')", "title": "" }, { "docid": "b14e33bc86ec1412981dce97206518e7", "score": "0.5705996", "text": "def training(self, args):\n pass", "title": "" }, { "docid": "217d8cb098922df17eafa73ad2fa3a62", "score": "0.5691048", "text": "def get_learn_after_each_trial(self):\r\n return 0", "title": "" }, { "docid": "bb845d0f7008744b0c8c507ab364edf4", "score": "0.5691016", "text": "def main(discount=0.01, epochs=100, learning_rate=0.01):\r\n\r\n n_states = 10\r\n n_actions = 20\r\n\r\n # Learn Reward ###########################\r\n IRL = IRL_tools(X_train,y_train,\r\n trans_prob_names=trans_prob_data,\r\n N_STATES=n_states,N_ACTIONS=n_actions)\r\n\r\n trajectories = IRL.idx_demo\r\n r = maxent.irl(IRL.feature_matrix, IRL.n_actions, discount,\r\n IRL.transition_probability, trajectories, epochs, learning_rate)\r\n\r\n # Update training notes ###########################\r\n time_end = datetime.now()\r\n time_elapsed = time_end - time_begin\r\n training_notes[\"Time End\"] = time_end\r\n training_notes[\"Time Elapsed\"] = time_elapsed\r\n IRL.notes = training_notes\r\n\r\n # Recover Policy ###########################\r\n policy = maxent.find_policy(IRL.n_states,r,IRL.n_actions,discount,IRL.transition_probability)\r\n\r\n # Save Model ###########################\r\n if SAVE_MODEL:\r\n date = time_begin.strftime(\"%m_%d_%Y\") # get current date\r\n base = 'IRL'\r\n data_name = DataHandler.append_names(train_data_names)\r\n feature_prefix = DataHandler.feature_prefix()\r\n SA_prefix = f'{n_states}sx{n_actions}a'\r\n MODEL_NAME = f\"{date}_{base}_{data_name}_{feature_prefix}_{SA_prefix}\" # Construnct final model name\r\n if VERBOSE: print(\"Saving Model as:\\n|\\t \", MODEL_NAME)\r\n\r\n IRL.save_obj(MODEL_NAME,reward=r,policy=policy,ENABLE=SAVE_MODEL)\r\n\r\n # Plot Results ###########################\r\n IRL.plot_test(policy,X_test,y_test)\r\n if np.shape(X_test)[1]==2:\r\n IRL.reward_output(reward=r)\r\n IRL.reward_plot(reward=r)\r\n if VERBOSE:\r\n print(\"\\nFINISHED...\")\r\n time_end = datetime.now()\r\n time_elapsed = time_end - time_begin\r\n print(f\"|\\t Elapse time (seconds): {time_elapsed.total_seconds()}\")", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.56790227", "text": "def train(self):\n pass", "title": "" }, { "docid": "d5d483db76ced009eafdb2483b6d55d0", "score": "0.56790227", "text": "def train(self):\n pass", "title": "" }, { "docid": "cb7f5043d72578e6984d4708507e1721", "score": "0.56770843", "text": "def auto_learn():\n return Auto_learn(arch,resume,attribute(),joint(),joint2())", "title": "" }, { "docid": "c8187286b9c4cfe2eea1a536116c2c8d", "score": "0.5671442", "text": "def predict(self, text):", "title": "" }, { "docid": "43712a80b1d6d5fe61cd48a1671c26be", "score": "0.5670092", "text": "async def nhentai(self, ctx: commands.Context):", "title": "" }, { "docid": "d8b39517b34f09b01d209a9d3a36a57c", "score": "0.5669122", "text": "def train(n):\n print(\"Training for {} games\".format(n))\n agent = NimAI()\n\n # Play n games\n for i in range(n):\n #if (i+1) % 100 == 0:\n #print(f\"Playing training game {i + 1}\")\n game = Nim()\n\n # Keep track of last move made by either player\n last_move = {\n 0: {\"state\": None, \"action\": None},\n 1: {\"state\": None, \"action\": None}\n }\n\n # Game loop\n while True:\n\n # Keep track of current state and action\n state = game.piles.copy()\n action = agent.choose_action(game.piles)\n\n # Keep track of last state and action\n last_move[game.player][\"state\"] = state\n last_move[game.player][\"action\"] = action\n\n game.take_turn(action)\n new_state = game.piles.copy()\n\n # When game is over, update Q values with rewards\n if game.winner is not None:\n agent.qlearning_step(state, action, new_state, -1)\n agent.qlearning_step(\n last_move[game.player][\"state\"],\n last_move[game.player][\"action\"],\n new_state,\n 1\n )\n break\n\n # If game is continuing, no rewards yet\n elif last_move[game.player][\"state\"] is not None:\n agent.qlearning_step(\n last_move[game.player][\"state\"],\n last_move[game.player][\"action\"],\n new_state,\n 0\n )\n\n #print(\"Done training\")\n\n # Return the trained AI\n return agent", "title": "" }, { "docid": "b93fd3660bc3a6370da4fd9389cea1d0", "score": "0.5659167", "text": "def learn(self, s, a, reward, sprime, done):\n raise NotImplemented", "title": "" }, { "docid": "acd6abdd8e390a8e210fdd9997420387", "score": "0.56566423", "text": "def talk(self, ask):\n # Process text\n ask = self.post_process_text(ask)\n\n # Translate to english\n if self.translate:\n ask = self.spanish_to_english(ask)\n ask = self.replace_translation_artifacts_sp_en(ask)\n\n self.temporal_context.append(ask)\n\n # Set context: last 2 exchanges + first context\n parsed_temp_context = self.generator.tokenizer.eos_token.join(\n self.temporal_context[-3:]\n )\n context_input = self.generator.tokenizer.eos_token.join(\n [self.parsed_context, parsed_temp_context, \"\"]\n )\n\n # Get max content len\n max_length = len(self.generator.tokenizer.encode(context_input)) + 1000\n\n # Generate text and parse data\n generated_text = self.generator(context_input, max_length=max_length)\n generated_text = generated_text[0][\"generated_text\"].split(\n self.generator.tokenizer.eos_token\n )[-1]\n generated_text = self.post_process_text(generated_text)\n\n # Sentiment\n if self.sentiment_analisis:\n sentiment = self.get_sentiment(generated_text)\n\n # Add response to context\n self.temporal_context.append(generated_text)\n\n # Translate to spanish\n if self.translate:\n generated_text = self.english_to_spanish(generated_text)\n generated_text = self.replace_translation_artifacts_en_sp(generated_text)\n\n if self.sentiment_analisis:\n return generated_text, sentiment\n\n return generated_text", "title": "" }, { "docid": "d999ebfd9709a4488adaa59e909c05a2", "score": "0.5651517", "text": "def respond_to(model, text):\n input_y = add_start_token(PAD * np.ones((1, MAX_MESSAGE_LEN)))\n idxs = np.array(to_word_idx(text)).reshape((1, MAX_MESSAGE_LEN))\n for position in range(MAX_MESSAGE_LEN - 1):\n prediction = model.predict([idxs, input_y]).argmax(axis=2)[0]\n input_y[:,position + 1] = prediction[position]\n return from_word_idx(model.predict([idxs, input_y]).argmax(axis=2)[0])", "title": "" }, { "docid": "9d7723f80f247d07702a56b09cf971d5", "score": "0.56501067", "text": "def _do_training(self):\n pass", "title": "" }, { "docid": "e4cedc2b63d5b4f797a601d1783150c7", "score": "0.563692", "text": "def pred():", "title": "" }, { "docid": "ca49125d9c1d4bd459d0c687a00f3ee9", "score": "0.56170905", "text": "def run(self):\r\n #print(self.skids)\r\n self.calc_score(self.this_neuron)\r\n\r\n return", "title": "" }, { "docid": "5e0ea372614693c5b558a9f7231df443", "score": "0.5604904", "text": "def deep_q_learning(env, state_dim, name_conversion_state, mem_size, gamma,\\\n epsilon, learning_rate,max_iter,batch_size ,name, model = None, constraints = False, optimizer = 'SGD',update_target = 5) :\n # Initialise the conversion for the states\n convert_state = ConversionState(env, name_conversion_state ).conversion\n\n # Initialisation of the agent\n agent = DQAgent( env.n_actions, state_dim , convert_state ,mem_size, gamma, epsilon, learning_rate, model, constraints,env.recommended, optimizer)\n # List of all the rewards\n all_reward = []\n all_loss = []\n # Fill the memory with random experiences\n if agent.memory.size() < batch_size :\n\n pre_trained_mem(agent.memory,batch_size + 1, env)\n\n for e in range(max_iter) :\n state = env.refresh()\n tot_reward = 0\n done = False\n tot_loss = 0\n update_time = 0\n\n while not done :\n # Simulation until the user leaves\n\n action = agent.act(state)\n next_state, reward , done = env.step(state, action)\n # Add the experience in the memory\n agent.memorize(state, action, reward, next_state , done)\n\n state = next_state\n tot_reward +=reward\n\n if done :\n all_reward.append(tot_reward)\n all_loss.append(tot_loss)\n clear_output(True)\n print(\"Episode: {}/{}, Reward : {}\"\n .format(e, max_iter, tot_reward))\n\n\n\n # Train the network\n\n if update_time >= update_target :\n # Update the target\n current_loss = agent.learn(batch_size, True)\n update_time = 0\n else :\n # Continue without updating the target\n current_loss = agent.learn(batch_size, False)\n # Add one to timestep to update the time for the target network\n update_time +=1\n\n tot_loss += current_loss\n\n\n# agent.save(name)\n\n return agent, all_reward, all_loss", "title": "" }, { "docid": "3bacfa793f4e94ba233da8da40c2a12d", "score": "0.5592096", "text": "def _utter(self, features, sentences):\n batch_size = features.size(0)\n # (batch_size x nbr_distractors+1 / ? (descriptive mode depends on the role of the agent) x nbr_stimulus \n # x mm_ponderer_depth_dim=thought_space_depth_dim+5 x ..nbr_visual_entity.. )\n \n # The operation (max/min) to use during the computation of the sentences\n # depends on the current role of the agent, that is determined by \n # `sentences==None` ==> Speaker (first round).\n # TODO: account for multi-round communication...\n if sentences is None: operation = torch.max \n else: operation = torch.min \n\n # Similarly, as a speaker/teacher, it is assumed that `target_idx=0`.\n # TODO: decide on a strategy for the listener/student's predicted_target_idx's argument...\n predicted_target_idx = torch.zeros((batch_size, )).long()\n if features.is_cuda: predicted_target_idx = predicted_target_idx.cuda()\n\n self.allowed_vocab_size = self.vocab_size//2\n if False:#self.train:\n logits = 0.5*torch.ones(self.vocab_size-1).float()\n logits[self.allowed_vocab_size] = 1.0\n # [0, ..., self.vocab_size-2]\n # allowed_vocab_size >= 2: \n allowed_vocab_size = self.vocab_size - torch.distributions.categorical.Categorical(logits=logits).sample()\n else:\n allowed_vocab_size = self.vocab_size\n\n # Utter the next sentences:\n next_sentences_widx, \\\n next_sentences_logits, \\\n next_sentences_one_hots = self._compute_sentence(features=features,\n target_idx=predicted_target_idx,\n _reason=self._reason,\n allowed_vocab_size=allowed_vocab_size,\n vocab_size=self.vocab_size,\n max_sentence_length=self.max_sentence_length,\n operation=operation,\n vocab_stop_idx=self.vocab_size-1,\n use_obverter_threshold_to_stop_message_generation=self.kwargs['use_obverter_threshold_to_stop_message_generation'],\n use_stop_word=False,\n _compute_tau=None,#self._compute_tau,\n not_target_logits_per_token=self.not_target_logits_per_token if self.use_learning_not_target_logit else None,\n logger=self.logger)\n\n return next_sentences_widx, next_sentences_logits, next_sentences_one_hots, self.embedding_tf_final_outputs", "title": "" }, { "docid": "3631e0addae88863179e643c0ff394d7", "score": "0.55767596", "text": "def learn(self, s, p_actions, a, r, ns, np_actions, na, terminal):\n self.store_samples(s, a, r, ns, na, terminal)\n if terminal:\n self.episode_terminated()\n if self.samples_count % self.max_window == 0:\n self.batch_learn()", "title": "" }, { "docid": "8b1ac68ed8fe4541db767c048c8432b0", "score": "0.5576143", "text": "def test_fasttext_nn(self):\n\n embedding_model = EmbeddingWrapper.EmbeddingWrapper(\"fasttext\", \"twitter\", 300)\n \n results = embedding_model.get_most_similar(\"girl\", 10)\n \n # Result from the command line tool\n # girlz 0.677679\n #gal 0.667554\n #boy 0.662205\n #girls 0.656006\n #girll 0.655023\n #woman 0.638082\n #bbygirl 0.606997\n #girlll 0.60681\n #3girllll 0.595928\n #girlfriend 0.592626\n\n top_words_correct5 = [\"girlz\", \"gal\", \"boy\", \"girls\", \"girll\"]\n top_scores_correct5 = [0.677679, 0.667554, 0.662205, 0.656006, 0.655023]\n\n for i, word in enumerate(top_words_correct5):\n npt.assert_equal(results[i][0], word)\n npt.assert_almost_equal(results[i][1], \n top_scores_correct5[i], decimal=3)\n \n npt.assert_equal(len(results), 10)\n\n # Check single calculation\n single_result = np.dot(embedding_model.get_norm_embedding(\"girl\"), \n embedding_model.get_norm_embedding(top_words_correct5[3]))\n npt.assert_almost_equal(single_result, results[3][1])\n\n\n # The code was developed with fasttext 0.8, \n # before some of these functionalities were provided in Python\n results = embedding_model.get_most_similar(\"going\", 10)\n results_fasttext_impl = embedding_model.f.get_nearest_neighbors('going')\n\n for i, (score, word) in enumerate(results_fasttext_impl):\n npt.assert_equal(results[i][0], \n word)\n npt.assert_almost_equal(results[i][1], \n score, decimal=3) \n\n\n #Query triplet (A - B + C)? brother sister sis\n #bro 0.737392\n #brooo 0.629325\n #broooo 0.613188\n #fam 0.585725\n #guyyy 0.580572\n #brooooo 0.579555\n #brotha 0.569735\n #broooooo 0.568193\n #dawggg 0.561648\n #broski 0.558858\n\n result = embedding_model.get_analogy(\"brother\", \"sis\", \"sister\", \n 5, normalize_before_query=False)\n npt.assert_equal(result[0][0], \"bro\")\n npt.assert_almost_equal(result[0][1], 0.737392, decimal=3)\n npt.assert_equal(result[2][0], \"broooo\")\n npt.assert_almost_equal(result[2][1], 0.613188, decimal=3)\n npt.assert_equal(len(result), 5)\n\n # Seems like analogies in cmd tool of fasttext (0.8) was without normalization,\n # but Python implementation is, matching gensim's behavior\n results_fasttext_impl = embedding_model.f.get_analogies(\"goin\", \"going\", \"doing\")\n results = embedding_model.get_analogy(\"doing\", \"goin\", \"going\", \n 10, normalize_before_query=True)\n\n for i, (score, word) in enumerate(results_fasttext_impl):\n npt.assert_equal(results[i][0], \n word)\n npt.assert_almost_equal(results[i][1], \n score, decimal=3)", "title": "" }, { "docid": "aed769642378ce5b75a499e7660cf974", "score": "0.557407", "text": "def train(self, *args: Any, **kwargs: Any) -> Any:", "title": "" }, { "docid": "47d5642279af38668ca4e612d125f3d1", "score": "0.5569358", "text": "def decide_next(self, messages):\n\n the_chosen_one = list(messages.keys())[-1]\n rtas = self.answers.copy()\n rtas.insert(0, messages[the_chosen_one].get_bot_predict()) \n print(\"RESPUESTAS ---> \" + str(rtas))\n return [rtas, the_chosen_one]", "title": "" }, { "docid": "7ccdd93133f370bdce3f7900871f32b1", "score": "0.5563182", "text": "def train(self, exploration):", "title": "" }, { "docid": "3cd176bcec0000fdd38da0ce11027f7a", "score": "0.5557039", "text": "def test_training_adds_statements(self):\n conversation = [\n \"Hello\",\n \"Hi there!\",\n \"How are you doing?\",\n \"I'm great.\",\n \"That is good to hear\",\n \"Thank you.\",\n \"You are welcome.\",\n \"Sure, any time.\",\n \"Yeah\",\n \"Can I help you with anything?\"\n ]\n\n self.trainer.train(conversation)\n\n response = self.chatbot.get_response(\"Thank you.\")\n\n self.assertEqual(response.text, \"You are welcome.\")", "title": "" }, { "docid": "549ee25d3394ba6ab88cd658350a3fea", "score": "0.5549913", "text": "def run(self, bot, user, msg, tag_info):\n self.responses = bot.config.responses[\"KappaGame\"]\n cmd = msg.strip()\n\n if not self.active:\n self.active = True\n self.n = random.randint(1, 25)\n self.answered = []\n print(\"Kappas: \" + str(self.n))\n bot.write(self.responses[\"start_msg\"][\"msg\"])\n else:\n if msg == \"!kstop\" and bot.get_permission(user) not in [\n Permission.User,\n Permission.Subscriber,\n ]:\n self.close(bot)\n bot.write(self.responses[\"stop_msg\"][\"msg\"])\n return\n\n i = self.count_emotes(cmd, \"Kappa\")\n if i == self.n:\n var = {\"<USER>\": bot.twitch.display_name(user), \"<AMOUNT>\": self.n}\n bot.write(replace_vars(self.responses[\"winner_msg\"][\"msg\"], var))\n bot.ranking.increment_points(user, self.kappa_game_points, bot)\n bot.game_running = False\n self.active = False\n self.answered = []\n elif i != -1:\n if i not in self.answered:\n var = {\"<AMOUNT>\": i}\n bot.write(replace_vars(self.responses[\"wrong_amount\"][\"msg\"], var))\n self.answered.append(i)", "title": "" }, { "docid": "a175ae56e62a5ae96cf71116fa190556", "score": "0.5527238", "text": "def learn(self,S,A,Q,V):\n \n self.opt([S,A,Q,V])", "title": "" }, { "docid": "496264e07c1510a0f003fc853332c99f", "score": "0.5522587", "text": "def train(self):\n pass", "title": "" }, { "docid": "5fdd3c9c7a0c703e83fc072652ae3e28", "score": "0.5521793", "text": "def learn(\n self, reward: int, action: int, current_state: int, old_state: int, **kwargs: Any\n ) -> None:\n pass", "title": "" }, { "docid": "09a6f6e19e825f757c11d5d82e2ae271", "score": "0.55184615", "text": "def fake_ner(self, keep_gender = False, more_params = None):", "title": "" }, { "docid": "e574224a73b1cb813652c8c323e63f35", "score": "0.551701", "text": "def main():\n sound_path = []\n textgrid_path = []\n # change the data to your own recorded sounds, make sure you have wav file and TextGrid file which\n # mark every vowel with some character, and every word end must be marked with \"wordend\"\n # theses recordings are only [a], [e], [i], [o], [u] sequence to test the pipeline flow,\n # the phonological rule learner tests are in the \"phonological_learner\" py file.\n for i in range(1, 5):\n sound_path.append(\"recordings\\\\aeiou{}.wav\".format(str(i)))\n textgrid_path.append(\"recordings\\\\aeiou{}.TextGrid\".format(str(i)))\n data = signal_parser.parse_input_sound(sound_path, textgrid_path) # from sound to vowel objects with f1 and f2\n clustered_data = mdl_clustering.mdl_cluster(data) # cluster into main values\n final_tagged_data = phonology_learner.extract_features(clustered_data.cluster) # add phonological features\n update_data(data, final_tagged_data) # update the input data with the phonological features\n lexicon = find_lexicon(data) # get lexicon from data\n data = separate_data_into_words(data)\n model = phonology_learner.MdlPhonology(final_tagged_data.keys(), lexicon, data, POSSIBLE_FEATURES)\n print model\n model = phonology_learner.mdl_phonology_learner(model)\n print model", "title": "" }, { "docid": "073810d13986796713505942ed45cdb0", "score": "0.5516949", "text": "def decide_next(self, messages):\n rtas = self.answers.copy()\n destinies = []\n for sender in messages.keys():\n rtas.insert(0, messages[sender].get_bot_predict()) \n destinies.insert(0, sender)\n print(\"RESPUESTAS ---> \" + str(rtas))\n return [rtas, destinies] #rtas y destino. ", "title": "" }, { "docid": "73a775cec7476477f6aa6ea383043298", "score": "0.55136186", "text": "def bot_answer(update, context):\n question = update.message.text\n answer = go_bot(question)\n print(question, \"|\", answer)\n print(stats)\n print()\n # answer = question + \"|\" + answer\n update.message.reply_text(answer)", "title": "" }, { "docid": "df2d7c2f4b3eaaa553327d5a9960a76c", "score": "0.5512549", "text": "def test_predict():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_binary_recommend_ml100k(recommender, 0.1)", "title": "" }, { "docid": "76d7253cf8c9414fb44f0b43f9c959e3", "score": "0.55078363", "text": "def get_retrieved_knowledge(self, message):", "title": "" }, { "docid": "abe80c3faeec23b3d33946f681268df5", "score": "0.550651", "text": "def create_features(self, answer):\n # Get the teacher's stuff\n a_stopwords = sf.remove_stopwords(self.teacher_answer)\n a_stemmed = sf.stem_sentence(a_stopwords)\n a_stemmed_ordered = sf.order_sentence(a_stemmed)\n\n teacher_answers = [\n\n a_stemmed,\n a_stemmed_ordered,\n\n ]\n \n \n # Change sentence into multiple versions\n log = dict()\n log['student_answer'] = answer\n log['teacher_answer'] = self.teacher_answer\n log['q_answer'] = answer\n log['q_stopwords'] = sf.remove_stopwords(answer)\n log['q_stemmed'] = sf.stem_sentence(answer)\n log['q_stem_ordered'] = sf.order_sentence(log['q_stemmed'])\n\n \n # Might need to save scaling until jsut before modeling\n log['wordcount'] = sf.word_count(answer)\n log['wordcount'] = sf.scale_column(self.word_scaler, log['wordcount'])\n\n \n# Stem sim\n log['stem_g_similarity'] = sf.generic_similarity(log['q_stemmed'], a_stemmed)\n log['stem_j_similarity'] = sf.jaccard_similarity(log['q_stemmed'], a_stemmed)\n log['stem_c_similarity'] = sf.cosine_similarity(log['q_stemmed'], a_stemmed)\n # Ordered\n log['stem_ordered_g_similarity'] = sf.generic_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_j_similarity'] = sf.jaccard_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n log['stem_ordered_c_similarity'] = sf.cosine_similarity(log['q_stem_ordered'], a_stemmed_ordered)\n\n\n \n # Appending New Answer\n self.new_answers = self.new_answers.append(log, ignore_index = True)\n \n # Entity Extraction\n \"\"\"\n I should use ALLL The combinations, ordered, stem/lem\n \"\"\"\n types_of_sentences = [\n 'q_stemmed',\n 'q_stem_ordered',\n ]\n \n for sent_type, teach_ans in zip(types_of_sentences, teacher_answers):\n \n self.new_answers = sf.unigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.bigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)\n self.new_answers = sf.trigram_entity_extraction(self.new_answers, sent_type, sent_type, teach_ans)", "title": "" }, { "docid": "f27d08dda935f50f61620e0cfcdc9bbc", "score": "0.5505268", "text": "def predict(self, *args, **kwargs) -> object:", "title": "" }, { "docid": "52c32cfb775f30d683fb931b949f093a", "score": "0.5503786", "text": "def main():\n\n response = {\n \"version\": request.json['version'],\n \"session\": request.json['session'],\n \"response\": {\n \"end_session\": False\n }\n }\n\n if request.json['session']['new']:\n response['response']['text'] = \"Привет подписчикам YouTube канала andreiliphd! Добро пожаловать в навык графовый кот. \\\n Это приложение использует нейронную сеть в \\\n качестве демонстрации. Нейронная сеть не была \\\n натренирована. Используется PyTorch версии \" + torch.__version__\n return json.dumps(\n response,\n ensure_ascii=False,\n indent=2\n )\n\n request_json = request.json\n input = tokenizer(request_json['request']['original_utterance'], return_tensors=\"pt\", padding='max_length', max_length=512)\n output = train(input, torch.tensor(1).reshape(1))\n argmax = torch.argmax(output, dim=1)\n response['response']['text'] = \"Категория классификации \" + str(int(argmax))\n return json.dumps(\n response,\n ensure_ascii=False,\n indent=2\n )", "title": "" }, { "docid": "02e0dfb64d95dc391b4b1a6b8f6e06b8", "score": "0.5490881", "text": "def reply(sentence):\n \n probability = random.randint(1, 4)\n if probability == 1:\n return random.choice(hedges)\n else:\n return random.choice(qualifiers) + changePerson(sentence) + \" ?\"", "title": "" }, { "docid": "a02aaff20ac627a82939dd60fe4c07be", "score": "0.54886204", "text": "def retrain():\n response_data = get_texts_with_labels(n_all_samples)\n texts = list(map(lambda entry: entry['statement'], response_data))\n labels = list(map(lambda entry: entry['label'], response_data))\n print('training on', len(texts), 'samples')\n classifier.train(texts, labels)", "title": "" }, { "docid": "e0f3c92a289002cef28cbc93d98e422e", "score": "0.5487052", "text": "def dqn_learning(args):\n algorithm = args.algo\n # make environement and define observation format with Wrappers\n env = gym.make(args.env_name)\n\n # partial obs wrapper for agent \n env = RGBImgPartialObsWrapper(env)\n env = ImgObsWrapper(env) # Get rid of the 'mission' field\n\n env.seed(0) # sets the seed\n\n obs = env.reset()\n #print(obs.shape)\n obsListener = env_unwrapper(env)\n agent = DQNAgent(gamma=0.99, epsilon=1, lr=0.0001, input_dims=(obs.shape),\n n_actions=env.action_space.n, mem_size=50000, eps_min=0.1,\n batch_size=32, replace=1000, eps_dec=1e-5,\n chkpt_dir='../models/', algo=algorithm, env_name=args.env_name)\n\n load_checkpoint = False\n\n if load_checkpoint:\n agent.load_models()\n\n printFile = open('../results/' + 'results_' +algorithm+ args.env_name + '.txt', 'w')\n #fname = agent.algo + '_' + agent.env_name + '_lr' + str(agent.lr) +'_' \\\n # + str(n_games) + 'games'\n #figure_file = 'plots/' + fname + '.png'\n\n scores, avg_scores, std_scores, eps_history, steps_array = [], [], [], [], []\n\n best_score = -np.inf\n\n # logical symbols class\n ls = logical_symbols()\n \n print('Episode\\t','Steps\\t','Score\\t',\n 'Best_Score\\t','Epsilon\\t', file=printFile)\n \n for i in range(args.num_games):\n done = False\n observation = env.reset()\n obsListener = env_unwrapper(env)\n state_label = \"\"\n \n special_symbols = ls.get_special_symbols(obsListener)\n u1 = agent.rm.u0 # initial state from reward machine\n\n n_steps = 0\n score = 0\n while not done:\n action = agent.choose_action(observation)\n observation_, reward, done, info = env.step(action)\n score += reward\n \n # Run parallel environment to check for environment objects states\n obsListener_ = env_unwrapper(env)\n special_symbols_ = ls.get_special_symbols(obsListener_)\n state_label = ls.return_symbol(special_symbols, special_symbols_, state_label)\n\n # Get reward machine state \n u2 = agent.rm.get_next_state(u1, state_label)\n reward_rm = agent.rm.delta_r[u1][u2].get_reward()\n \n if not load_checkpoint:\n agent.store_transition(observation, action,\n reward_rm, observation_, done)\n agent.learn()\n \n # Update params\n u1 = deepcopy(u2)\n special_symbols = deepcopy(special_symbols_)\n #obsListener = deepcopy(obsListener_)\n observation = deepcopy(observation_)\n n_steps += 1\n\n scores.append(score)\n steps_array.append(n_steps)\n avg_scores.append(np.mean(scores[-100:]))\n std_scores = np.append(std_scores, np.std(scores[-100:]))\n\n print('%d\\t' %i, '%d\\t' %n_steps,\n '%.2f\\t' %score,'%.2f\\t' %best_score,\n '%.2f\\t' %agent.epsilon, file=printFile)\n\n if score > best_score:\n if not load_checkpoint:\n agent.save_models()\n best_score = score\n\n eps_history.append(agent.epsilon)\n\n #x = [i+1 for i in range(len(scores))]\n #plot_learning_curve(x, avg_scores, std_scores, eps_history, figure_file)\n\n return avg_scores", "title": "" }, { "docid": "5b48a336d79c58d584d0f275831517d4", "score": "0.54729164", "text": "def lifeCycle(self):\n r = random.random()\n\n if r < 0.33:\n self.generate()\n #pass\n if r > 0.33 and r < 0.66:\n ## Gives the lists of lists, where each sublist is, ordered by timestamp desc:\n ## [Word, Word], see documentation\n unratedwords = self.getUnscoredWords()\n if len(unratedwords) > 0:\n w = unratedwords[0]\n score = self.score(w.word)\n framing = \"I like the sex...*ermm* score of this pen..*ahem* word to be very boo...unrelated to anything.\"\n self.sendFeedback(w.word_id, score, framing, wordtext=w.word)\n elif r > 0.66:\n ## The result is the following:\n ## [Feedback, Feedback], see documentation\n feedback = self.getAllFeedback()\n self.adapt(feedback)\n #pass\n \n time.sleep(0.2)", "title": "" }, { "docid": "7ade42573dba549590b7e3c10c3f9c5d", "score": "0.5471976", "text": "def learn(self):\n self.forward_prop()\n self.update_weight()", "title": "" }, { "docid": "db11c1167644ee7db9c691966bb30001", "score": "0.54654104", "text": "def get_answer(self):\n pass", "title": "" }, { "docid": "0ec1d9b40a373f98fc1861e1bdf23d75", "score": "0.54622936", "text": "def active_learn_with_expert(adata, turk_uncer, gold, n = 20, algo = 'random', sep = 2000):\n pass", "title": "" }, { "docid": "9b0a63dd8208e28f91eee877a99b2f21", "score": "0.54532254", "text": "def train(self, params):\n def c2_calculate(amalgamated,corp0,corp1): #The main Gentzkow Shaprio distribution calculation\n c2 = {}\n ref0,ref1 = Counter(corp0), Counter(corp1)\n tpl0,tpl1 = len(corp0), len(corp1)\n for gram in amalgamated:\n if gram not in c2:\n if gram not in ref0:\n fpl0 = 0\n else:\n fpl0 = ref0[gram]\n if gram not in ref1:\n fpl1 = 0\n else:\n fpl1 = ref1[gram]\n cfpl0 = tpl0-fpl0\n cfpl1 = tpl1-fpl1\n chi2 = (fpl0*cfpl1 - fpl1*cfpl0)**2/((fpl0 + fpl1)*(fpl0 + cfpl0)*(fpl1 + cfpl1)*(cfpl0 + cfpl1))\n c2[gram]=([gram, chi2, fpl0, fpl1])\n print(gram, chi2, fpl0, fpl1)\n return c2\n def gen(l,g):\n for g1 in g:\n yield lookup[l][\" \".join(g1)]\n\n lookup = self.lookup\n bigram0, bigram1, trigram0, trigram1, langs, filter_index = params\n (bigram0,b0) , (bigram1, b1), (trigram0, t0), (trigram1, t1) = tee(bigram0), tee(bigram1), tee(trigram0), tee(trigram1)\n\n (bigram0,b0) , (bigram1, b1), (trigram0, t0), (trigram1, t1) = tee(gen(langs[0],bigram0)),tee(gen(langs[1],bigram1)),tee(gen(langs[0],trigram0)),tee(gen(langs[1],trigram1))\n #We now create a mapping between the bigrams and trigrams with their chi^2 value as well as e0, e1 frequencies\n sw = get_stop_words('en')\n self.bigram0 = sorted([x for x in bigram0 if (not x[filter_index].split()[0] in sw and not x[filter_index].split()[-1] in sw)])\n self.bigram1 = sorted([x for x in bigram1 if (not x[filter_index].split()[0] in sw and not x[filter_index].split()[-1] in sw)])\n self.trigram0 = sorted([x for x in trigram0 if (not x[filter_index].split()[0] in sw and not x[filter_index].split()[-1] in sw)])\n self.trigram1 = sorted([x for x in trigram1 if (not x[filter_index].split()[0] in sw and not x[filter_index].split()[-1] in sw)]) \n\n # c2_calculate(c2values, bigram0+bigram1, bigram0, bigram1)\n # c2_calculate(c2values, trigram0+trigram1, trigram0, trigram1)\n self.bigramc,self.trigramc = (c2_calculate(self.bigram0+self.bigram1,self.bigram0,self.bigram1),c2_calculate(self.trigram0+self.trigram1,self.trigram0,self.trigram1))\n\n pickle.dump({'alignment':self.alignment,'lookup':self.lookup,'bigramc':self.bigramc,'trigramc':self.trigramc},open(self.file,'wb'))\n\n #~~~~Now we need to train those frequency lists into a logistic model~~~\n\n # First we get the frequency lists (this may already exist in the code, if so just replace)\n bigram_freq_list_0 = [self.bigramc[gram][2] for gram in self.bigramc.keys()]\n bigram_freq_list_1 = [self.bigramc[gram][3] for gram in self.bigramc.keys()]\n trigram_freq_list_0 = [self.trigramc[gram][2] for gram in self.trigramc.keys()]\n trigram_freq_list_1 = [self.trigramc[gram][3] for gram in self.trigramc.keys()]\n\n print(bigram_freq_list_0)\n # Then we create a vector of 1's and 0's that's the size of the 1 and 0 lists\n assign_endpoints = np.hstack((np.ones(len(bigram_freq_list_1)+len(trigram_freq_list_1)),np.zeros(len(bigram_freq_list_0)+len(trigram_freq_list_0)))) \n print(assign_endpoints.size)\n # Combine the frequency lists into a single frequency vector the same size as 'assign_endpoints'\n frequency_vector = np.concatenate((bigram_freq_list_1,trigram_freq_list_1,bigram_freq_list_0,trigram_freq_list_0))\n print(frequency_vector.size)\n\n # Now MAKE. THAT. MODELLLLL\n model = make_pipeline(VarianceThreshold(), LogisticRegression())\n self.log_model = model.fit(frequency_vector,assign_endpoints)\n\n pickle.dump(self.log_model,open('GSlog_model.pickle'))", "title": "" }, { "docid": "a85a459df4723738cb4e9e679b42a0d4", "score": "0.5451051", "text": "def learn(self, action, reward):\n\t\tsuper(Planning, self).learn(action, reward)\t\t\n\t\tif self.goal_found:\n\t\t\tfor x in self.edges[self.goal_node]:\n\t\t\t\tself.propagate(x, [self.goal_node], self.parameters['alpha'])\n\t\telif reward > 0.0 and not self.goal_found:\n\t\t\tself.goal_found = True\t\t\t\n\t\t\tself.createGoalNode()", "title": "" }, { "docid": "221c52d872e4bd7cc7059bae7714930c", "score": "0.5448964", "text": "def _getAnswer(self):\n raise NotImplementedError", "title": "" }, { "docid": "fe1109b85468cae8d7a64182879493d7", "score": "0.54454494", "text": "def nn_prediction():\n response = get_tweet_list()\n\n output = {\n \"results\": response,\n \"status\": 200\n }\n return jsonify(output)", "title": "" }, { "docid": "eeb1243c948ad2d503d029586ee2643e", "score": "0.5441079", "text": "def learn(self, experiences, gamma):\n states, actions, rewards, next_states, dones, times = experiences\n\n #need to perform mini batch gradient descent \n # get targets\n self.hnetwork_target.eval()\n with torch.no_grad():\n Q_targets_next = torch.max(self.hnetwork_target.forward(next_states), dim=1, keepdim=True)[0]\n\n Q_targets = rewards \n \n # get outputs\n self.hnetwork_local.train()\n\n #Gathers values along an axis specified by dim.\n Q_expected = self.hnetwork_local.forward(states).gather(1, actions)\n\n #print(' Q_expected', Q_expected)\n #print('size of Q_expected', np.shape(Q_expected))\n # compute loss\n loss = F.mse_loss(Q_expected, Q_targets)\n\n # clear gradients\n self.optimizer.zero_grad()\n\n # update weights local network\n loss.backward()\n\n # take one SGD step\n self.optimizer.step()\n # ------------------- update target network ------------------- #\n self.soft_update(self.hnetwork_local, self.hnetwork_target, TAU)", "title": "" }, { "docid": "de4669bc59c7467d5e818b26beee29bb", "score": "0.5440467", "text": "def evaluate_response(self, response: str, next_message: str) -> float:\n last_response = ''\n if len(self.conversation) > 3:\n last_response = self.conversation[-4]\n current_message = self.conversation[-3]\n\n last_response_keywords = [word for word in last_response.split(' ') if word not in self.stopwords]\n response_keywords = [word for word in response.split(' ') if word not in self.stopwords]\n num_current_keywords = len(response_keywords)\n\n embeddings = self.chatbot.chatbot.embeddings\n\n if last_response:\n # Reward for using a different response than the previous response.\n average_similarity = 0\n\n for current_word in response_keywords:\n max_similarity = 0\n current_index = self.get_word_index(current_word)\n for last_word in last_response_keywords:\n last_index = self.get_word_index(last_word)\n cos_similarity = 1 - spatial.distance.cosine(embeddings[current_index], embeddings[last_index])\n max_similarity = max(max_similarity, cos_similarity)\n average_similarity += max_similarity\n\n average_similarity /= num_current_keywords\n\n dissimilarity_score = min(0.0, 0.5 - average_similarity)\n else:\n dissimilarity_score = 0.0\n\n # Reward for using humor when the other side is in a good mood.\n current_sentiment_score = self.get_sentiment(current_message)\n\n # Reward for using positive statements.\n response_sentiment_score = self.get_sentiment(response)\n\n # Reward for positively changing the other side's sentiment\n next_sentiment = self.get_sentiment(next_message)\n\n sentiment_change_score = 0.0\n sentiment_difference = next_sentiment - current_sentiment_score\n\n if sentiment_difference < 0.0 and next_sentiment >= 0.0:\n # Avoid negative reward for fluctuating between positive and neutral sentiment.\n sentiment_difference = 0.0\n\n sentiment_change_score = sentiment_difference\n\n final_score = (dissimilarity_score + current_sentiment_score + response_sentiment_score + sentiment_change_score) / 4\n if self.chatbot.chatbot.outer_args.debug_print:\n print('Score: %f, Dissimilarity: %f, Current sentiment: %f, Response sentiment: %f, Sentiment change: %f' % (final_score, dissimilarity_score, current_sentiment_score, response_sentiment_score, sentiment_change_score))\n return final_score", "title": "" }, { "docid": "ca534ab6c3948dc5a021d0a01bb1a791", "score": "0.5438763", "text": "def learn(self, experiences, gamma, alg):\n states, actions, rewards, next_states, dones = experiences\n\n if alg == \"dqn\":\n # Get max predicted Q values (for next states) from target model\n q_targets_next = self.q_network_target(next_states).detach().max(1)[0].unsqueeze(1)\n\n else:\n # best action according to the local network:\n best_action_next = self.q_network_local(next_states).detach().max(1)[1].unsqueeze(1)\n\n # target of the target network, according to that action\n q_targets_next = self.q_network_target(next_states).detach().gather(1, best_action_next)\n\n # Compute Q targets for current states\n q_targets = rewards + (gamma * q_targets_next * (1 - dones))\n # Get expected Q values from local model\n q_expected = self.q_network_local(states).gather(1, actions)\n\n # Compute loss (Huber loss)\n loss = f.smooth_l1_loss(q_expected, q_targets) # mse + clipping in the article\n\n # Minimize the loss\n self.optimizer.zero_grad()\n loss.backward() # because we want stochastic gradient ascent and not descent\n self.optimizer.step()\n\n # ------------------- update target network ------------------- #\n soft_update(self.q_network_local, self.q_network_target, TAU)", "title": "" }, { "docid": "eac887be57a981a9db4afaccf94d5ae2", "score": "0.54248494", "text": "def predict(self, state, possible_actions):\n if not self.supervised_learners:\n return np.random.choice(possible_actions)\n self.previous_guess = np.zeros(len(self.supervised_learners))\n for i in range(len(self.supervised_learners)):\n state_action_values = np.zeros(len(possible_actions))\n for j in range(len(possible_actions)):\n x = np.r_[state, possible_actions[j]].reshape((1,-1))\n state_action_values[j] = self.supervised_learners[i].predict(x)\n self.previous_guess[i] = possible_actions[np.argmax(state_action_values)]\n perturbed_w = self.weight + np.random.uniform(0, 1 / self.eps, len(self.weight))\n learner_id = np.argsort(perturbed_w)\n # todo: possible modification\n # todo: average the top 25%'s q function\n\n q_value_array = np.zeros(len(possible_actions))\n for i in range(len(possible_actions)):\n x = np.r_[state, possible_actions[i]].reshape((1, -1))\n for j in range(int(np.ceil(0.5 * len(learner_id)))):\n q_value_array[i] += self.supervised_learners[learner_id[j]].predict(x)\n return possible_actions[np.argmax(q_value_array)]\n \"\"\"\n action = 0\n for i in range(int(np.ceil(0.25 * len(learner_id)))):\n action += self.previous_guess[learner_id[i]]\n return action / int(np.ceil(0.25 * len(learner_id)))\n \"\"\"", "title": "" }, { "docid": "eb1cc7964b952434c7c71bc7bee96616", "score": "0.5420626", "text": "def learn(self,post,learn_term,mod_term,rate=5e-7,**kwargs):\r\n\r\n if isinstance(post,basestring):\r\n post=self.network.getNode(post)\r\n if isinstance(learn_term,basestring):\r\n learn_term=post.getTermination(learn_term)\r\n if isinstance(mod_term,Termination):\r\n mod_term=mod_term.getName()\r\n \r\n if isinstance(learn_term,STDPTermination):\r\n in_args = {'a2Minus': 5.0e-3, #1.0e-1,\r\n 'a3Minus': 5.0e-3,\r\n 'tauMinus': 70.0, #120.0,\r\n 'tauX': 70.0, #140.0\r\n }\r\n for key in in_args.keys():\r\n if kwargs.has_key(key):\r\n in_args[key] = kwargs[key]\r\n \r\n inFcn = InSpikeErrorFunction([n.scale for n in post.nodes],post.encoders,\r\n in_args['a2Minus'],in_args['a3Minus'],in_args['tauMinus'],in_args['tauX']);\r\n inFcn.setLearningRate(rate)\r\n \r\n out_args = {'a2Plus': 5.0e-3, #1.0e-2,\r\n 'a3Plus': 5.0e-3, #5.0e-8,\r\n 'tauPlus': 70.0, #3.0,\r\n 'tauY': 70.0, #150.0\r\n }\r\n for key in out_args.keys():\r\n if kwargs.has_key(key):\r\n out_args[key] = kwargs[key]\r\n \r\n outFcn = OutSpikeErrorFunction([n.scale for n in post.nodes],post.encoders,\r\n out_args['a2Plus'],out_args['a3Plus'],out_args['tauPlus'],out_args['tauY']);\r\n outFcn.setLearningRate(rate)\r\n learn_term.init(inFcn, outFcn, 'AXON', mod_term)\r\n \r\n if kwargs.has_key('decay') and kwargs['decay'] is not None:\r\n learn_term.setDecaying(True)\r\n learn_term.setDecayScale(kwargs['decay'])\r\n else:\r\n learn_term.setDecaying(False)\r\n \r\n if kwargs.has_key('homeostasis') and kwargs['homeostasis'] is not None:\r\n learn_term.setHomestatic(True)\r\n learn_term.setStableVal(kwargs['homeostasis'])\r\n else:\r\n learn_term.setHomestatic(False)\r\n elif isinstance(learn_term,BCMTermination):\r\n learn_term.setLearningRate(rate)\r\n learn_term.setOriginName('AXON')\r\n elif isinstance(learn_term,hPESTermination):\r\n supervision_ratio = 0.5\r\n if kwargs.has_key('supervisionRatio'):\r\n supervision_ratio = kwargs['supervisionRatio']\r\n \r\n learn_term.setLearningRate(rate)\r\n learn_term.setOriginName('AXON')\r\n learn_term.setSupervisionRatio(supervision_ratio)\r\n learn_term.setModTermName(mod_term)\r\n elif isinstance(learn_term,PESTermination):\r\n oja = True\r\n if kwargs.has_key('oja'):\r\n oja = kwargs['oja']\r\n\r\n learn_term.setLearningRate(rate)\r\n learn_term.setOja(oja)\r\n learn_term.setOriginName('X')\r\n learn_term.setModTermName(mod_term)\r\n elif isinstance(learn_term,PreLearnTermination):\r\n learn_term.setLearningRate(rate)\r\n learn_term.setOriginName('X')\r\n learn_term.setModTermName(mod_term)\r\n else:\r\n print 'Unknown type of learning termination:',learn_term", "title": "" }, { "docid": "17ee1d19a39af415e43e59b39931a245", "score": "0.54190564", "text": "def respond(user_input):\n\toverall_response = []\n\twordlist = split(' ',remove_punctuation(user_input))\n\twordlist[0] = wordlist[0].lower()\n\tmapped_wordlist = you_me_map(wordlist)\n\tmapped_wordlist[0] = mapped_wordlist[0].capitalize()\n\t\n\tglobal STATE # current state - used to get preference reasoning\n\tglobal PREFERENCE_STATE # the preference that is currently being discussed\n\tglobal HELLO_SAID # if hello has been already said or not\n\n\t# Non-Response Rule =================================\n\t# 1) User has sent nothing or has only sent punctuation\n\tif wordlist[0] == '':\n\t\toverall_response += [ random.choice(NO_INPUT_RESPONSES) ]\n\t\t# cycles through multple responses\n\telse:\n\t\t# Introduction Rules =======================================================\n\t\t# Introduction-type input of size <= 2 (\"hi there\", \"hello!\", \"what's up?\")\n\t\tif (len(wordlist) <= 2) and (wordlist[0] in INTRODUCTION_INPUTS):\n\t\t\t\n\t\t\t# 2) Memory Function: If the user has already said hello, we\n\t\t\t# already know and respond accordingly.\n\t\t\tif HELLO_SAID == True:\n\t\t\t\toverall_response += [ \"My homie, you already told me 'hello'. We ain't on that Adele stuff, we good.\" ]\n\t\t\telse:\n\t\t\t\toverall_response += [ random.choice(INTRODUCTION_RESPONSES) ]\n\t\t\t\t# cycles through multple responses\n\t\t\t# 3) Memory function: We want to know the user's name, so we ask\n\t\t\t# them if we don't know. If the name is known, it is used.\n\t\t\tif 'name' not in OPPONENT_INFO:\n\t\t\t\toverall_response += [ random.choice(NAME_ASKS)] # cycles through responses\n\t\t\t\tSTATE = \"name_asked\"\n\t\t\telse:\n\t\t\t\toverall_response += [ \"You know you smart, \" + stringify(OPPONENT_INFO['name']) +\".\"]\n\t\t\n\t\t# Name Getter Rules ======================================\n\t\t# Detects whether a user is saying their name.\n\t\t# 4) First two words are introductory.\n\t\t# Example: \"My name's Megh\" | \"Hi im megh\" | etc.\n\t\telif wordlist[0:2] in NAME_PHRASES[2]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,2)\n\t\t\twordlist = wordlist[2:]\n\t\t\n\t\t# 5) First three words are introductory.\n\t\t# Example: \"My name is Megh\" | \"Hi i am megh\" | etc.\n\t\telif wordlist[0:3] in NAME_PHRASES[3]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,3)\n\t\t\twordlist = wordlist[3:]\n\t\t\n\t\t# 6) First four words are introductory.\n\t\t# Example: \"Hi my name is Megh\"\n\t\telif wordlist[0:4] in NAME_PHRASES[4]:\n\t\t\toverall_response += nameAskCheck(wordlist, 0,4)\n\t\t\twordlist = wordlist[4:]\n\t\t\n\t\t# Preference Clarification Rules ==========================\n\t\t# This STATE occurs when the converser is being asked to\n\t\t# explain why they like something.\n\t\telif STATE == \"preference_clarification\":\n\t\t\t\n\t\t\t# 7) Memory function: Stores the reasoning for the preference.\n\t\t\tif \"because\" in wordlist:\n\t\t\t\tindex = wordlist.index(\"because\")\n\t\t\t\tpreference_reasoning = stringify(wordlist[index + 1:])\n\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\taddPositivePreferenceReasoningToOpponent(PREFERENCE_STATE, preference_reasoning)\n\t\t\t\toverall_response += [\"I gotchu, you like \" + PREFERENCE_STATE + \" \" + stringify(mapped_wordlist) + \"? We good here.\"]\n\t\t\t\tSTATE = \"\"\n\t\t\telse:\n\t\t\t\toverall_response += [\"You gotta specify. Why you like \" + PREFERENCE_STATE]\t\t\n\t\telse:\n\t\t\t# eliminate \"and\" if still present \n\t\t\t# so we can process next phrase\n\t\t\tif wordlist[0] in PROGRESSION_WORDS:\n\t\t\t\twordlist = wordlist[1:]\n\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\n\t\t\t# Positive Preferences Rules ======================================\n\t\t\t# 8) Checks if the first two words are preference verbs and stores\n\t\t\t# user's preferences.\n\t\t\tif (len(wordlist) > 1 and wordlist[0] == \"i\" and wordlist[1] in POSITIVE_PREFERENTIAL_VERBS)\t:\n\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\tpreference = wordlist[2]\n\t\t\t\taddPositivePreferenceKeyToOpponent(preference)\n\t\t\t\toverall_response += [\"Why do \" + stringify(mapped_wordlist) + \"?\"]\n\t\t\t\tPREFERENCE_STATE = preference\n\t\t\t\tSTATE = \"preference_clarification\"\n\t\t\telse: \n\n\t\t\t\t# General Phrase Rules ========================================\n\t\t\t\t# 9)\n\t\t\t\tif wordlist[0:3] == ['do','you','think']:\n\t\t\t\t\toptions = [\"I'm DJ Khaled, I got my own opinions but to be a Major Key you gotta answer your own. I can't help you.\",\n\t\t\t\t\t\t \t\t\"You askin' me something crazy cuz I think a lot of things. Why do you think \" + stringify(you_me_map(wordlist[3:])) + \".\" ]\n\t\t\t\t\toverall_response += [ random.choice(options) ]\n\t\t\t\t# 10)\n\t\t\t\tif wordlist[0:2] == ['i','am']:\n\t\t\t\t\toverall_response += [\"I'm all ears always fam. Why is you \" + stringify(mapped_wordlist[2:]) + '.']\n\t\t\t\t# 11)\n\t\t\t\tif wordlist[0:2] == ['i','have']:\n\t\t\t\t\toverall_response += [\"Dang, how long you had that \" + stringify(mapped_wordlist[2:]) + '...?']\n\t\t\t\t# 12)\n\t\t\t\tif wordlist[0:2] == ['i','feel']:\n\t\t\t\t\toverall_response += [\"Check it, I feel that way too homie.\"]\n\t\t\t\t# 13) Memory function: If the converser proclaims that the bot is something (\"you are\"),\n\t\t\t\t# the proclamation is stored for future user\n\t\t\t\tif wordlist[0:2] == ['you','are']:\n\t\t\t\t\texisting_personalities = getBotPersonality()\n\t\t\t\t\tif len(existing_personalities) < 1:\n\t\t\t\t\t\toverall_response += [\"My homie, I'm not sure why you'd say I'm \" + stringify(mapped_wordlist[2:]) + ', but I noted it']\n\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\toverall_response += [\"You know I'm \" + stringify(mapped_wordlist[2:]) + '. I\\'m also ' + random.choice(existing_personalities)]\n\t\t\t\t\taddToBotPersonality(stringify(mapped_wordlist[2:]))\n\t\t\t\t# 14)\n\t\t\t\tif verbp(wordlist[0]):\n\t\t\t\t\toverall_response += [\"Why you want me to \" + stringify(mapped_wordlist) + '?']\n\t\t\t\t# 15)\n\t\t\t\tif wordlist[0:2]==['can','you'] or wordlist[0:2]==['could','you']:\n\t\t\t\t\toverall_response += [\"Bruh, that's some talk about \" + wordlist[0] + ' ' + stringify(mapped_wordlist[2:]) + '.']\n\t\t\t\t# 16) Sentences begin with positive or negative words (yes, no)\n\t\t\t\tif wordlist[0] in NEGATIVE_WORDS or wordlist[0] in POSITIVE_PREFERENTIAL_VERBS:\n\t\t\t\t\toverall_response += [\"You feel how you feel mah homie. I'm wit it.\"]\n\t\t\t\t# Preference Response rules ========================================\n\t\t\t\t# if the phrase begins with \"do you like\", the first word is\n\t\t\t\t# used to determine questioning purpose and the following words\n\t\t\t\t# are used to determine subject\n\t\t\t\tif wordlist[0:2] == [\"what\", \"is\"] or wordlist[0:1] == [\"whats\"]:\n\t\t\t\t\t# if it is a personal question, resort\n\t\t\t\t\t# to personal info\n\t\t\t\t\tif 'whats' == wordlist[0]:\n\t\t\t\t\t\tstart_index = 1\n\t\t\t\t\telif 'what' == wordlist[0]:\n\t\t\t\t\t\tstart_index = 2\n\n\t\t\t\t\t# 17) Memory function: Based on what the converser is asking,\n\t\t\t\t\t# this bot may respond with its own info.\n\t\t\t\t\tif wordlist[start_index] == \"your\":\n\t\t\t\t\t\tif len(wordlist) > start_index+1:\n\t\t\t\t\t\t\tsubject = stringify(wordlist[start_index+1:])\n\t\t\t\t\t\t\tif subject in BOT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"Yo, my \" + subject + \" is \" + BOT_INFO[subject] + \".\"]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\t\t\t\t\toverall_response += [ stringify(mapped_wordlist) + \"? Take a guess fam.\"]\n\n\t\t\t\t\t\t\tif subject in OPPONENT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"And I know yo \" + subject + \" is \" + stringify(OPPONENT_INFO[subject])]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toverall_response += [ \"What's your \" + subject + \" fam?\"]\n\t\t\t\t\t\t\t\n\t\t\t\t\t# 18) Memory function: Based on what the converser is asking,\n\t\t\t\t\t# this bot may respond with stored info about the bot.\n\t\t\t\t\telif wordlist[start_index] == \"my\":\n\t\t\t\t\t\tif len(wordlist) > start_index+1:\n\t\t\t\t\t\t\tsubject = stringify(wordlist[start_index+1:])\n\t\t\t\t\t\t\tif subject in OPPONENT_INFO.keys():\n\t\t\t\t\t\t\t\toverall_response += [ \"Yo, yo \" + subject + \" is \" + OPPONENT_INFO[subject] + \". You already told me that tho!\"]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmapped_wordlist = you_me_map(wordlist)\n\t\t\t\t\t\t\t\toverall_response += [ stringify(mapped_wordlist) + \"? You tell me fam.\"]\n\t\t\t\t# 19)\n\t\t\t\tif wpred(wordlist[0]):\n\t\t\t\t\toverall_response += [ \"I don't answer no questions like that. You tell me \" + wordlist[0] +\".\"]\n\t\t\t\t# 20) Memory function: If converser is asking about Bot's preferences, preferences are created\n\t\t\t\t# and returned.\n\t\t\t\tif wordlist[0:3] == [\"do\", \"you\", \"like\"] or wordlist[0:4] == [\"what\",\"do\", \"you\", \"like\"]:\n\t\t\t\t\t# do you like ....\n\t\t\t\t\t# determine if question is directed\n\t\t\t\t\twhat_question = True if wordlist[0] == \"what\" else False\n\n\t\t\t\t\tif what_question:\n\t\t\t\t\t\tsubject = wordlist[4:] if len(wordlist) > 4 else None\n\t\t\t\t\telse:\n\t\t\t\t\t\tsubject = wordlist[3:] if len(wordlist) > 3 else None\n\n\t\t\t\t\t# determine if a subject exists:\n\t\t\t\t\tresp = \"\"\n\t\t\t\t\tif subject == None:\n\t\t\t\t\t\tif what_question:\n\t\t\t\t\t\t\toverall_response += [ \"What do I like about what?\" ]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\toverall_response += [ \"Do I like what?\" ]\n\t\t\t\t\telse:\n\t\t\t\t\t\tif what_question:\n\t\t\t\t\t\t\t# what do you like about []\n\t\t\t\t\t\t\tif subject[0] == \"about\":\n\t\t\t\t\t\t\t\tsubject = stringify(subject[1:])\n\t\t\t\t\t\t\t\taddPositivePreferenceKeyToSelf(subject)\n\t\t\t\t\t\t\t\toverall_response += [ \"What do YOU like about \" + subject + \"?\" ]\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# what do you like []??\n\t\t\t\t\t\t\t\toverall_response += [\"About what?\"]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# do you like [subject]\n\t\t\t\t\t\t\tsubject = stringify(subject)\n\t\t\t\t\t\t\taddPositivePreferenceKeyToSelf(subject)\n\t\t\t\t\t\t\toverall_response += [ \"Oh absolutely, I \" + random.choice(POSITIVE_PREFERENTIAL_VERBS) + \" \" + subject + \".\"]\n\t\t\t\t\t\t\tif opponentHasPositivePreference(subject):\n\t\t\t\t\t\t\t\toverall_response += [ \"I know you like \" + subject + \" too, right ;)?\" ]\n\t\t\t\t\t\n\t\t\t\n\t\t\t\t# 21)\n\t\t\t\tif 'major key' in wordlist:\n\t\t\t\t\toverall_response += [\"You got it with that major key talk. You mah homie now.\"]\n\t\t\t\t# 22)\n\t\t\t\telif 'da best' in wordlist:\n\t\t\t\t\toverall_response += [\"You da best, we da best, everyone's da best. We good.\"]\n\t\t\t\t# 23)\n\t\t\t\telif 'changed a lot' in wordlist:\n\t\t\t\t\toverall_response += [\"We all changed a lot. The world is changin' just like you and me. We good out here.\"]\n\n\t\t\t\tif len(overall_response) == 0:\n\t\t\t\t\toverall_response += [ punt() ]\n\t# Debug: print(' '.join(overall_response))\n\treturn ' '.join(overall_response)", "title": "" }, { "docid": "b194a36b09aaaf673d1fb4be78bee858", "score": "0.54186803", "text": "def main(repdims, guess = buildGuess, model_name = 'model'):\n if guess == buildGuess:\n encode = buildEncode\n rd = repdims\n else:\n encode = buildEncode2\n rd = repdims[1]\n filenames = getFileNames('.\\\\Control_group')\n filenames.extend(getFileNames('.\\\\Data_group'))\n filenames.extend(getFileNames('.\\\\ML_group'))\n vocab = getVocab(filenames)\n np.save('.\\\\Models\\\\'+model_name+'_vocab.npy', vocab)\n grams = getGrams(filenames)\n vocabsize = 5944\n nodes = buildTraining(vocabsize, repdims, guess)\n train_losses, valid_losses, test_loss, save_epoch = runTraining(*nodes, grams, vocab, batchsize=20, ts_per_epoch=100, num_epochs=100, model_name=model_name)\n nodes = encode(vocabsize, repdims)\n encoding = runEncode(*nodes, vocab, model_name=model_name)\n print('In epoch {0}, the model was saved. The correct word had was given a 1/{1} chance at that time.'.format(save_epoch, exp(-test_loss)))\n x1 = np.arange(0,10000)\n x2 = np.arange(0,10000, 100)\n train_ce, = plt.plot(x1, train_losses, 'r', label='Training Loss')\n valid_cd, = plt.plot(x2, valid_losses, 'y', label='Validation Loss')\n plt.legend()\n plt.xlabel('Training step')\n plt.ylabel('Cross-Entropy')\n plt.title('Training a {0} Dimensional Word2Vec Encoding'.format(rd))\n plt.show()\n return encoding", "title": "" }, { "docid": "a2b6b70826026a73e2d227f8c84d5d41", "score": "0.5417841", "text": "def inference_the_input(self):\n input = st.text_input(\"Enter The Sentence\", \"Enter The Text Here...\")\n\n if st.button('Predict The Result'):\n result = self.classifier.predict(self.vectorizer.transform([input]))[0]\n self.print_result(result)\n else:\n st.write(\"Press the above button..\")", "title": "" }, { "docid": "f8481c5f75f5626945221fbdb0f7b4f1", "score": "0.54170454", "text": "def train(self,text):\n document = types.Document(\n content=text,\n type=enums.Document.Type.PLAIN_TEXT)\n annotation = self.client.analyze_sentiment(document=document)\n for sentence in annotation.sentences:\n score = sentence.sentiment.score\n #print(score)\n sentenceTxt = sentence.text.content\n sentenceAnalize = types.Document(content=sentenceTxt,type=enums.Document.Type.PLAIN_TEXT)\n #esponse = self.client.analyze_entities(document=sentenceAnalize)\n #print(response)\n for word in re.split(r'\\W+', sentenceTxt):\n if word not in self.wordRanking:\n self.wordRanking[word] = [1,score]\n else:\n self.wordRanking[word][0] += 1\n self.wordRanking[word][1] += score", "title": "" }, { "docid": "7b6ad979d649aefc62e05db340b95f51", "score": "0.54056716", "text": "def train(self):\n return", "title": "" }, { "docid": "6dbdf975f92a25802546eaca022cada7", "score": "0.54023135", "text": "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--hypertune', action='store_true', help = 'This is a hypertuning job')\n parser.add_argument('--batch-size', type=int, help='Batch size for the training')\n parser.add_argument('--epochs', type=int, help='Number of epochs for the training')\n parser.add_argument('--type-model', help='Choose between regular NN or CNN')\n parser.add_argument('--job-dir', default=None, required=False, help='Option for AI Platform') # opcion obligatoria\n parser.add_argument('--model-output-path', help='Path to write the SaveModel format')\n\n args = parser.parse_args()\n\n is_hypertune = args.hypertune\n type_of_model = args.type_model\n batch_size = args.batch_size\n epochs = args.epochs\n job_dir = args.job_dir\n output_path = args.model_output_path\n\n train_and_evaluate(batch_size, epochs, job_dir, output_path, is_hypertune,type_of_model)", "title": "" }, { "docid": "7c715ed05b0c6d135ee57658873d34fa", "score": "0.5389455", "text": "def main():\n w0 = \"Good morning, I hope you are well today .\"\n w0 = changetoText(w0)\n print(w0)\n # print(\"Good morning, I hope yo1+22u are well today .\")\n w1 = \"What can I do for you ?\"\n w1 = changetoText(w1)\n print(w1)\n earliersentence = []\n while True:\n\n if len(earliersentence) > 5:\n num = random.randint(1,4)\n if num == 1:\n asksentence = random.choice(earliersentence)\n print(\"Earlier, you said that\"+changePerson(asksentence))\n earliersentence.remove(asksentence)\n\n sentence = input(\"\\n>> \")\n\n earliersentence.append(sentence)\n\n if sentence.upper() == \"QUIT\":\n print(\"Have a nice day\")\n break\n print(changetoText(reply(sentence)))", "title": "" }, { "docid": "a319dd5b9f93722a7841e1da70d9ea01", "score": "0.53849787", "text": "def train(self, state, action, next_state, reward, terminal):\n\n # TODO:\n # 1. add current transition to replay buffer\n # 2. sample next batch and perform batch update: \n # 2.1 compute td targets and loss \n # td_target = reward + discount * max_a Q_target(next_state_batch, a)\n # 2.2 update the Q network\n # 2.3 call soft update for target network\n # soft_update(self.Q_target, self.Q, self.tau)\n\n # add current transition to replay buffer\n self.replay_buffer.add_transition(state, action, next_state, reward, terminal)\n # sample batch\n batch_states, batch_actions, batch_next_states, batch_rewards, batch_dones = self.replay_buffer.next_batch(self.batch_size)\n # result Q target\n batch_next_states = torch.from_numpy(batch_next_states).float().cuda()\n batch_states = torch.from_numpy(batch_states).float().cuda()\n batch_rewards = torch.from_numpy(batch_rewards).float().cuda().unsqueeze(1) #batch size * 1\n batch_dones = torch.from_numpy(batch_dones).float().cuda().unsqueeze(1)#batch size * 1\n #batch_actions = torch.from_numpy(batch_actions).long().cuda() #indices should be detached\n #print('Rewards: ', batch_rewards)\n\n # Double DQN:\n if self.double:\n self.Q_target.eval()\n self.Q.eval()\n with torch.no_grad():\n # get argmax actions of Q(next states)\n Q_next_output = self.Q(batch_next_states)\n Q_target_actions = torch.argmax(Q_next_output, dim = 1).unsqueeze(1)\n # compute Q_target(next states)\n Q_target_next_output = self.Q_target(batch_next_states)\n # compute Q_target(next states) with indices of argmax actions of Q(next states)\n \n #Q_target_next_output = Q_target_next_output.detach()\n \n Q_target_Double = Q_target_next_output[torch.arange(Q_target_next_output.shape[0]) , Q_target_actions.squeeze(1).detach()].unsqueeze(1)\n #Q_target_Double = Q_target_next_output.gather(1, Q_target_actions.long())\n td_target = batch_rewards + self.gamma * Q_target_Double* (1 - batch_dones)\n\n else:\n # DQN\n self.Q_target.eval()\n self.Q.eval()\n with torch.no_grad():\n Q_target_next_output = self.Q_target(batch_next_states)\n \n #Q_target_next_output = Q_target_next_output.detach()\n \n Q_target_values = torch.max(Q_target_next_output, dim=1)[0].unsqueeze(1)\n #print('Q_target_values: ', Q_target_values)\n td_target = batch_rewards + self.gamma * Q_target_values* (1 - batch_dones) # y \n \n td_target = td_target.detach() # to make sure not backropagate, detach from pytorch graph\n # result Q\n self.Q.train() # train \n Q_output_states = self.Q(batch_states)\n Q_output = Q_output_states[torch.arange(Q_output_states.shape[0]) , batch_actions].unsqueeze(1) #indices should be detached\n\n \n\n # to check if my parameters have gradients\n for name, param in self.Q.named_parameters():\n if param.grad is None:\n print('None ', name, param.grad)\n #else:\n # print('not None ',name, param.grad.sum())\n \n loss = self.loss_function (Q_output, td_target)\n self.optimizer.zero_grad()\n loss.backward()\n #Gradient clipping:\n clip = 1\n torch.nn.utils.clip_grad_norm_(self.Q.parameters(),clip)\n self.optimizer.step()\n \n # soft update of Q_target\n soft_update(self.Q_target, self.Q, self.tau)", "title": "" }, { "docid": "aa0543c809dde32f94cd2b32111dcff8", "score": "0.53826475", "text": "def main():\n global READER, NB\n fname = './data/letter-recognition.csv'\n READER, NB = train(fname)\n output = predict(fname)\n print(output)", "title": "" }, { "docid": "28d44433d4692cf8b872b83dd4a11bee", "score": "0.53738517", "text": "def trivia(self):\r\n self.write_to_chat(\"pls trivia\")\r\n r=\"pls trivia\"\r\n while \"pls trivia\" in r:\r\n r=self.read_chat()\r\n sleep(1)\r\n self.write_to_chat(choice([\"a\",\"b\",\"c\",\"d\"]))", "title": "" }, { "docid": "e24e406d179b4385a43995078425419e", "score": "0.5373783", "text": "def target_objective(self, final_w, final_b, labels):", "title": "" }, { "docid": "33c174e7dddcf1c6a39f432999ee4426", "score": "0.537328", "text": "def ask_question(question) :\n logger.info('Asking wolframalpha.')\n try :\n cprint('Hmm..Thinking....','yellow')\n api_key = 'GLHKQ7-R5V9E6GU3Y'\n client = wolframalpha.Client(api_key)\n res = client.query(question)\n answer = next(res.results).text\n if 'Wolfram|Alpha' in answer:\n answer = answer.replace('Wolfram|Alpha',bot['name'])\n if 'no data available' in answer:\n answer = wiki_search(question,1) \n # search_google(question)\n return answer\n except :\n logger.info('Wolframalpha do not know the answer.')\n answer = wiki_search(question,1)\n logger.info(answer)\n # search_google(question)\n # answer = 'check browser.'\n return answer", "title": "" }, { "docid": "69ba9878d4d1963a61b05272f16fdd28", "score": "0.5369738", "text": "def learn(self):\n if self.step > self.learn_start:\n if self.step % self.train_frequency == 0:\n self.q_learning_mini_batch()\n\n if self.step % self.target_q_update_step == self.target_q_update_step - 1:\n self.update_target_q_network()", "title": "" }, { "docid": "fccd0b8bb0de41471c6074af1d74e569", "score": "0.5353169", "text": "def hmm():\n if helpers.get_answer():\n print(get_hmm())", "title": "" }, { "docid": "c530a947776b9e61b4f761ad9a5b1c1a", "score": "0.53519934", "text": "def learn(self, state, action, reward):\r\n\r\n if self.learning == True:\r\n self.Q[state][action] = reward * self.alpha + self.Q[state][action]*(1-self.alpha)\r\n return", "title": "" }, { "docid": "e6066ea7c371c55145ea277e71d2e273", "score": "0.535085", "text": "def train_step(x_question, x_answer, x_question_len, x_answer_len, x_lastTurn, x_lastTurn_len, q_id_list, as_id_list, x_target):\n feed_dict = {\n esim.question: x_question,\n esim.answer: x_answer,\n esim.question_len: x_question_len,\n esim.answer_len: x_answer_len,\n esim.target: x_target,\n esim.dropout_keep_prob: FLAGS.dropout_keep_prob,\n esim.lastTurn: x_lastTurn,\n esim.lastTurn_len: x_lastTurn_len\n }\n\n _, step, loss, accuracy, predicted_prob = sess.run(\n [train_op, global_step, esim.mean_loss, esim.accuracy, esim.probs],\n feed_dict)\n\n if step % 100 = 0:\n time_str = datetime.datetime.now().isoformat()\n print(\"{}: step {}, loss {:g}, acc {:g}\".format(time_str, step, loss, accuracy))\n #train_summary_writer.add_summary(summaries, step)", "title": "" }, { "docid": "cdbcec0ec094dbebc2c638a5a20306ef", "score": "0.5350668", "text": "def strains():\n\n # receive input\n lines = request.get_json(force=True)\n\n # get data from json\n text = lines['input'] # json keys to be determined\n\n # validate input (optional)\n assert isinstance(text, str)\n\n\n # predict\n output = predict(text)\n\n\n # give output to sender.\n return output", "title": "" }, { "docid": "e440aaf624b237260de2e7dc3c72b931", "score": "0.53504753", "text": "def text_reply(msg):\n handle_friend(msg)", "title": "" } ]
91dfb5d67afddc9f13ee0c6955ced692
Tests the calling of get_allsubscription with a valid parameters
[ { "docid": "c3f1137d959c4e90e600973d908f0ca8", "score": "0.82672656", "text": "def test_get_allsubscription(self): \r\n statuslink = self.ms.get_allsubscription()\r\n self.assertTrue(len(statuslink)>=0)", "title": "" } ]
[ { "docid": "0d22f0f217a77cd95853c97cbd6efbcf", "score": "0.8271291", "text": "def test_get_all_subscriptions(self):\n pass", "title": "" }, { "docid": "90565508ed55dcce2008b07c50c6efc8", "score": "0.715819", "text": "def test_get_available_credit_subscription(self):\n pass", "title": "" }, { "docid": "5d4f858a33ceb7c409bbc1af9da51ee3", "score": "0.71086615", "text": "def test_retrieve_subscriptions_list(self):\n\n Subscription.objects.create(\n name='Carl Yu', email='[email protected]',\n subscription_type='free')\n Subscription.objects.create(\n name='Margarita Shamraeva', email='[email protected]',\n subscription_type='plus')\n Subscription.objects.create(\n name='Sam Slottow', email='[email protected]',\n subscription_type='pro')\n\n serializer = SubscriptionSerializer(\n Subscription.objects.order_by('-id')[:10], many=True)\n\n response = self.client.get(reverse('api:subscriptions'))\n\n # Check that the response is 200 OK.\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # Check that the response data contains 3 subscriptions.\n self.assertEqual(len(response.data), 3)\n\n # Check that the response data is equal to the stored records.\n self.assertEqual(response.data, serializer.data)", "title": "" }, { "docid": "6735124f99d7d8c92137be4fbea17fcc", "score": "0.7058639", "text": "def test_get_subscription_by_id(self):\n pass", "title": "" }, { "docid": "bc48b7815a25f4d53bdba607ad7ca7db", "score": "0.6945204", "text": "def test_get_charges_on_subscription(self):\n pass", "title": "" }, { "docid": "184884d296fa37a9136d534f6e82f037", "score": "0.692673", "text": "def test_get_subscription_ltv(self):\n pass", "title": "" }, { "docid": "bbf25f4e613795f5a50443622517ad52", "score": "0.6891517", "text": "def test_get_subscription_by_state(self):\n pass", "title": "" }, { "docid": "b537f3163372cdbf13cddb038977aa1c", "score": "0.68904585", "text": "def test_get_coupons_on_subscription(self):\n pass", "title": "" }, { "docid": "951af9e1d187acdcd4a221f5a42d196a", "score": "0.68747205", "text": "def mock_get_subscriptions(querystring=None, results=[]):\n url = 'http://sbm/api/v1/subscriptions/{}'.format(querystring)\n responses.add(\n responses.GET, url,\n json={\n \"next\": None,\n \"previous\": None,\n \"results\": results,\n }, status=200, content_type='application/json',\n match_querystring=bool(querystring))", "title": "" }, { "docid": "5b90bc15b1c7f3afb046b4baa3844a17", "score": "0.68743074", "text": "def test_add_subscription(self): \r\n # make sure you specify unique value for end point each time\r\n statuslink = self.ms.add_subscription(type='All',\r\n endpoint='http://myspace.si-sv2826.com/myposthandler17.ashx',\r\n query='{}',metadata='',batchsize='1000', \r\n rate= '100',format='application/atom+xml', addlist='[ ]',removelist='[ ]')\r\n\r\n self.assertTrue(len(statuslink)>=0) \r\n\r\n#def update_subscription(self, subcriptionid, type, endpoint, query, metadata, batchsize, rate, format, addlist, removelist):\r\n # def test_update_subscription(self):\r\n# \"\"\"Tests the calling of update_subscription with a valid parameters \r\n \"\"\" \r\n subscription = self.ms.get_allsubscription()\r\n \r\n statuslink = self.ms.update_subscription(subcriptionid='2803',type='ApplicationUsers',\r\n endpoint='http://myspace.si-sv3063.com/myposthandler4.ashx',\r\n query='{}',metadata='UserInfo,UserSubscribers,ApplicationData',batchsize='1000', \r\n rate= '100',format='application/atom+xml', addlist='[ ]',removelist='[ ]')\r\n self.assertTrue(len(statuslink)>=0)\"\"\" \r\n\r\n\r\n \r\n \r\n\r\n#def delete_subscription(self, subscription_id): \r\n# def update_subscription(self):\r\n \"\"\"Tests the calling of update_subscription with a valid parameters \r\n \"\"\"", "title": "" }, { "docid": "e25fc5d3bfb1c06a7ea3314a38193e9a", "score": "0.68715525", "text": "def test_get_subscription_by_account_id(self):\n pass", "title": "" }, { "docid": "37ab03733a152241ecfa7e37ef463ee9", "score": "0.6869181", "text": "def test_get_subscription(self):\n subscription = self.client.get_subscription(self.subscription)\n self.assertEqual(self.subscription, subscription.name)\n self.assertEqual(self.topic, subscription.topic)\n self.assertEqual(ACK_DEADLINE, subscription.ack_deadline)\n self.assertIsNone(subscription.labels)", "title": "" }, { "docid": "b1e5889adbf61b838320ad3aeb175c94", "score": "0.68103933", "text": "def test_get_subscriptions_by_successful_periods(self):\n pass", "title": "" }, { "docid": "887eb0082892382177fd4d257fb91149", "score": "0.68078965", "text": "def test_get(self):\n response = self._get()\n self.assertEqual(WithingsApi.list_subscriptions.call_count, 2)\n WithingsApi.list_subscriptions.assert_has_calls([\n mock.call(appli=appli) for appli in [1, 4]\n ])\n self.assertEqual(WithingsApi.unsubscribe.call_count, 2)\n WithingsApi.unsubscribe.assert_has_calls([\n mock.call('http://testserver/notification/%s/' % appli,\n appli=appli) for appli in [1, 4]\n ])\n self.assertRedirectsNoFollow(response,\n utils.get_setting('WITHINGS_LOGIN_REDIRECT'))\n self.assertEqual(WithingsUser.objects.count(), 0)", "title": "" }, { "docid": "211340b3589de2b03137d8c27d2f9008", "score": "0.6756452", "text": "def charge_subscription_get_all(context):\n return IMPL.charge_subscription_get_all(context)", "title": "" }, { "docid": "b17dc4663bd4a1e1dab42b130fa088fc", "score": "0.67081344", "text": "def test_get_invoices_for_subscription_by_state(self):\n pass", "title": "" }, { "docid": "11e8ca4bf73773e18e3214b2b8fb8bf6", "score": "0.6642381", "text": "def test_batch_create_subscriptions(self):\n pass", "title": "" }, { "docid": "f1c8b4ecded600005ce6463375a4ee97", "score": "0.66295594", "text": "def test_get_swagger_subscription(self):\n pass", "title": "" }, { "docid": "a6aa2c8f89b7b93d2c501835fe1a0a69", "score": "0.6613295", "text": "def test_advance_subscription(self):\n pass", "title": "" }, { "docid": "34ecb1b75251643f093172c6bfd9f0b3", "score": "0.649676", "text": "def test_get_subscription_by_product_id(self):\n pass", "title": "" }, { "docid": "d6c9abf4998fac2e11c533ccc8488ad0", "score": "0.64448774", "text": "def test_get_pricing_component_values_of_subscription(self):\n pass", "title": "" }, { "docid": "a605203fb3e97497f5f684f0d0a3ad00", "score": "0.6443752", "text": "def test_get_applicable_coupons_for_subscription(self):\n pass", "title": "" }, { "docid": "c90102e4ee215e20c7bb7336f9baad44", "score": "0.6428266", "text": "def test_get_subscription_by_version_id(self):\n pass", "title": "" }, { "docid": "e30ab49bdb3fd242b3d616babb5c89cd", "score": "0.6426897", "text": "def test_create_subscription(self):\n pass", "title": "" }, { "docid": "5ee171faab0bfb4b002f839e0c75fde4", "score": "0.6398452", "text": "def test_available_payment_methods_for_subscription(self):\n pass", "title": "" }, { "docid": "80c5f85a82bcf0b11bc19cf64e3a2d25", "score": "0.6370197", "text": "def test_subscription_functionality(self):\n # Create subscription\n # User A registers Subscription A.\n callback_url = os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,\n self._testMethodName)\n req_body = fake_vnflcm.Subscription.make_create_request_body(\n 'http://localhost:{}{}'.format(\n base.FAKE_SERVER_MANAGER_T1.SERVER_PORT_T1,\n callback_url))\n resp_t1, resp_body_t1 = self._register_subscription(req_body,\n self.http_client_tenant1)\n self.assertEqual(201, resp_t1.status_code)\n self.assert_http_header_location_for_subscription(\n resp_t1.headers)\n self.assert_notification_get(callback_url,\n base.FAKE_SERVER_MANAGER_T1)\n subscription_id_t1 = resp_body_t1.get('id')\n\n # User B registers Subscription B\n callback_url = os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,\n self._testMethodName)\n req_body_t2 = fake_vnflcm.Subscription.make_create_request_body(\n 'http://localhost:{}{}'.format(\n base.FAKE_SERVER_MANAGER_T2.SERVER_PORT_T2,\n callback_url))\n resp_t2, resp_body_t2 = self._register_subscription(\n req_body_t2, self.http_client_tenant2)\n self.assertEqual(201, resp_t2.status_code)\n self.assert_http_header_location_for_subscription(\n resp_t2.headers)\n self.assert_notification_get(callback_url,\n base.FAKE_SERVER_MANAGER_T2)\n subscription_id_t2 = resp_body_t2.get('id')\n\n # Show Subscription\n # User A gets information for Subscription A\n resp_t1, resp_body_show_t1 = self._wait_show_subscription(\n subscription_id_t1, self.tacker_client_t1)\n self.assert_subscription_show(resp_t1, resp_body_show_t1)\n\n # User B gets information for Subscription B\n resp_t2, resp_body_show_t2 = self._wait_show_subscription(\n subscription_id_t2, self.tacker_client_t2)\n self.assert_subscription_show(resp_t2, resp_body_show_t2)\n\n # User A fails to get information for Subscription B\n resp_tx1, resp_body_show_tx1 = self._wait_show_subscription(\n subscription_id_t2, self.tacker_client_t1)\n self.assertEqual(404, resp_tx1.status_code)\n\n # User B fails to get information for Subscription A\n resp_tx2, resp_body_show_tx2 = self._wait_show_subscription(\n subscription_id_t1, self.tacker_client_t2)\n self.assertEqual(404, resp_tx2.status_code)\n\n # List Subscription\n # User A gets subscription list\n resp, _ = self._list_subscription(self.tacker_client_t1)\n self.assertEqual(200, resp_t1.status_code)\n\n # Confirm subscription A\n filter_expr = {\n 'filter': \"filter=(eq,id,{})\".format(\n resp_body_show_t1.get('id'))}\n resp, subscription_body_t1 = self._list_subscription_filter(\n self.http_client_tenant1,\n params=filter_expr)\n self.assertEqual(200, resp.status_code)\n self.assertEqual(1, len(subscription_body_t1))\n\n # User B gets subscription list\n resp_t2, _ = self._list_subscription(\n self.tacker_client_t2)\n self.assertEqual(200, resp_t2.status_code)\n\n # Confirm subscription B\n filter_expr = {\n 'filter': \"filter=(eq,id,{})\".format(\n resp_body_show_t2.get('id'))}\n resp, subscription_body_t2 = self._list_subscription_filter(\n self.http_client_tenant2,\n params=filter_expr)\n self.assertEqual(200, resp.status_code)\n self.assertEqual(1, len(subscription_body_t2))\n\n # Delete subscription\n # User A fails to delete Subscription B\n resp, _ = self._delete_subscription(subscription_id_t2,\n self.tacker_client_t1)\n self.assertEqual(404, resp.status_code)\n\n # User B fails to delete Subscription A\n resp, _ = self._delete_subscription(subscription_id_t1,\n self.tacker_client_t2)\n self.assertEqual(404, resp.status_code)\n\n # User A deletes Subscription A\n self.addCleanup(self._delete_subscription,\n subscription_id_t1, self.tacker_client_t1)\n\n # User B deletes Subscription B\n self.addCleanup(self._delete_subscription,\n subscription_id_t2, self.tacker_client_t2)", "title": "" }, { "docid": "df3a983fc9e81b6de50dc5be4e097b6b", "score": "0.63078123", "text": "def test_update_subscription(self):\n pass", "title": "" }, { "docid": "828329278687b1c8a49ee71d6411bfc1", "score": "0.6226045", "text": "def test_get_subscription(self):\n with patch(\n \"stripe.Product.retrieve\",\n return_value=deepcopy(FAKE_PRODUCT),\n autospec=True,\n ):\n plan = Plan.sync_from_stripe_data(deepcopy(FAKE_PLAN))\n subscription = Subscription.sync_from_stripe_data(deepcopy(FAKE_SUBSCRIPTION))\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"plan\"], plan.pk)\n self.assertEqual(response.data[\"status\"], subscription.status)\n self.assertEqual(\n response.data[\"cancel_at_period_end\"], subscription.cancel_at_period_end\n )", "title": "" }, { "docid": "5134961cd9b1603ec99fdf9971eae920", "score": "0.6213407", "text": "def GetSubscriptionList(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "999fe3d2d5255aabbdee8859a005a07c", "score": "0.62132794", "text": "def test_get_subscriptions_by_period_start(self):\n pass", "title": "" }, { "docid": "f768ca094eeb76af8d60285656612319", "score": "0.62122726", "text": "def test_all(self):\n success, result = self.bb.service.all()\n self.assertTrue(success)\n self.assertIsInstance(result, list)", "title": "" }, { "docid": "47dc8172b55b05eea4ab303e3f762404", "score": "0.61648226", "text": "def test_get_subscriptions_by_initial_period_start(self):\n pass", "title": "" }, { "docid": "4b0fe795d52d69a2fdd5555fe738ace0", "score": "0.6163586", "text": "def test_get_subscriptions_by_period_end(self):\n pass", "title": "" }, { "docid": "804664a2c8f1bdc80e481ec67cd5da23", "score": "0.6157448", "text": "def test_get_children_of_subscription(self):\n pass", "title": "" }, { "docid": "358b351d62a09248bc9cf6adb08734bf", "score": "0.61552924", "text": "def test_subscription_fail(self):\n qset_id = 10 # Qset 4-1 from the mockups\n # Subscribe current user to the selected qset\n response = self.change_qset_subscription(self, qset_id, 'subscribe').json()\n self.assertEqual(response['result'], 'fail')\n self.assertEqual(\n response['url'],\n reverse(\n 'askup:qset_subscription',\n kwargs={'qset_id': qset_id, 'subscribe': 'subscribe'}\n )\n )\n\n # Unsubscribe current user to the selected qset\n response = self.change_qset_subscription(self, qset_id, 'unsubscribe').json()\n self.assertEqual(response['result'], 'fail')\n self.assertEqual(\n response['url'],\n reverse(\n 'askup:qset_subscription',\n kwargs={'qset_id': qset_id, 'subscribe': 'unsubscribe'}\n )\n )", "title": "" }, { "docid": "1b99a02027374d1bc3ad7fd334871508", "score": "0.61505866", "text": "def test_create_subscription_v2(self):\n pass", "title": "" }, { "docid": "fbdf30511c9b3cd2f621dbfec8b7e5bf", "score": "0.61085135", "text": "def test_list_topic_subscriptions(self):\n expected = []\n for i in range(5):\n subscription = pubsub.subscription_name(PROJECT_NAME, 'sub-{}'.format(i))\n expected.append(subscription)\n self.client.create_subscription(subscription, self.topic)\n\n expected.append('projects/fake-project/subscriptions/subscription')\n\n subscriptions = list(self.client.list_topic_subscriptions(self.topic))\n self.assertCountEqual(expected, subscriptions)\n\n # Note: Page size appears to be ignored by the emulator. Even when creating\n # large amounts of topics to force paging, the nextPageToken returned is\n # buggy and results in infinite loops.\n subscriptions = list(\n self.client.list_topic_subscriptions(self.topic, page_size=1))\n self.assertCountEqual(expected, subscriptions)", "title": "" }, { "docid": "03617fa1ea7ac7c2c6fada311faf5a97", "score": "0.6104637", "text": "def test_get_metadata_for_subscription(self):\n pass", "title": "" }, { "docid": "5a4e31fce40f2368225057b30e214716", "score": "0.6093865", "text": "def test_get_subscribed_services(requests_mock):\n from CybleEventsV2 import Client, fetch_subscribed_services_alert\n\n mock_response_1 = util_load_json(\"dummy_fetch_subscribed_services.json\")\n requests_mock.get('https://test.com/apollo/api/v1/y/services', json=mock_response_1)\n\n client = Client(\n base_url='https://test.com',\n verify=False\n )\n response = fetch_subscribed_services_alert(client, 'GET', 'https://test.com', \"some_random_token\").outputs\n assert isinstance(response, list)\n assert response[0]['name'] == 'name_1'", "title": "" }, { "docid": "9d2cb350b817dd8a33bd8a02f116aee1", "score": "0.6081007", "text": "def test_invoice_charges_on_subscription(self):\n pass", "title": "" }, { "docid": "5d0db26ac6a44a2ec80358c95838d599", "score": "0.60548925", "text": "def test_update_subscription_v2(self):\n pass", "title": "" }, { "docid": "6c58d7960cbc3131940e6850f1822a74", "score": "0.60400057", "text": "def test_revive_subscription(self):\n pass", "title": "" }, { "docid": "1b123a156ff8d507361a587618c7c174", "score": "0.60382575", "text": "def test_free_subs_search_like(self):\n # 扬州楚门机电设备制造有限公司\n resp = self.client.free_subscribers_search(sub_firm_ids=\"[307823]\", contact_info=\"151\")\n self.assertEqual(resp.code, 10000)\n self.assertEqual(1, resp.data.page)\n self.assertEqual(10, resp.data.per_page)\n self.assertEqual(\"desc\", resp.data.order[0][\"updated_at\"])\n self.assertGreater(resp.data.total, 0)\n self.assertIsNotNone(resp.data.list)", "title": "" }, { "docid": "31863d33aaa9cf5228cf1fca08887400", "score": "0.5966836", "text": "def test_import_subscription(self):\n pass", "title": "" }, { "docid": "c49c4014d394293aad75e3935bd55575", "score": "0.59595954", "text": "def test_get_all_product(self):\n # test admin can get all productds\n response = self.client.post(\n '/self.base_url/products',\n data = json.dumps(self.test_product6),\n headers=dict(Authorization=\"Bearer \" + self.owner_token),\n content_type = 'application/json'\n )\n\n response = self.client.get(\n '/self.base_url/products',\n headers=dict(Authorization=\"Bearer \" + self.owner_token),)\n response_data = json.loads(response.data)\n self.assertEqual(response_data[\"message\"],\"These products are available\")\n self.assertEqual(response.status_code, 200)\n\n # test attendant can get all products", "title": "" }, { "docid": "1f45ee3c1cc6ed9d2b694a683b3e519c", "score": "0.59485185", "text": "def test_club_subscription(self):\n self.client.login(username=self.user5.username, password=\"test\")\n\n resp = self.client.get(reverse(\"clubs-subscription\", args=(self.club1.code,)))\n self.assertIn(resp.status_code, [200, 201], resp.content)", "title": "" }, { "docid": "86eb72e2e844731380f89334eb2bc7b6", "score": "0.5888972", "text": "def test_create_aggregating_subscription(self):\n pass", "title": "" }, { "docid": "5e3b13e45a8a2f94b5192ae231a06b78", "score": "0.5888732", "text": "def test_get_timers_for_subscription(self):\n pass", "title": "" }, { "docid": "312a6b8b311411615cd35737d9e9e5d5", "score": "0.5868155", "text": "def check_subscription(request, action):\n # check the base subscription\n check_base_subscription(\"privacyidea\", max_free_subscriptions=2)", "title": "" }, { "docid": "fbee27547e70c444eb312047b7970282", "score": "0.58661014", "text": "def test_get_all(self, setup):\n response = setup.send_request.get(setup.session)\n response_status_code = _get_status_code(response)\n response_ok = _get_key_value_from_response(response, 'ok')\n response_data = _get_key_value_from_response(response, 'data')\n\n assert response_status_code == 200\n assert response_ok is True\n assert len(response_data) == 3", "title": "" }, { "docid": "e78a46942ac1d96175a5b87a02a174a1", "score": "0.5865337", "text": "def test_get_api_sub_resources(self):\n pass", "title": "" }, { "docid": "af438f15e3fbc1a48e2fb5473f714731", "score": "0.5859471", "text": "def get_subscriptions(id=None):\n page, per_page = get_page_from_args()\n sort_by = request.args.get('sort_by')\n is_desc = parse_int(request.args.get('is_desc'))\n\n items = UserSubscription.get_items(id=id, page=page, per_page=per_page, sort_by=sort_by, is_desc=is_desc)\n\n return res([item.as_dict() for item in items])", "title": "" }, { "docid": "7d24b5d7e7b2fd70264f3d8ae8246ad9", "score": "0.58467346", "text": "def get_my_subscriptions(self):\n return self.client.get(\n reverse(\n 'askup:my_subscriptions',\n kwargs={}\n )\n )", "title": "" }, { "docid": "a00a51633d380743f66d42c843e474fa", "score": "0.5838355", "text": "def test_create_customer_subscriptions(self):\n\n payload = {\n 'name': 'Moreno Cunha',\n 'email': '[email protected]',\n 'subscription_type': 'free'}\n\n self.client.post(reverse('api:subscriptions'), payload)\n\n exists = Subscription.objects.filter(\n name='Moreno Cunha', email='[email protected]',\n subscription_type='free').exists()\n self.assertTrue(exists)", "title": "" }, { "docid": "5bcb89cf4134cbe214fb5c6cd40380b0", "score": "0.5832518", "text": "def test_cs_subscription_example():\n\n assert run_subscription_example().get('status').upper() == 'ACTIVE'", "title": "" }, { "docid": "fd9e9d85641025ad613697b89af7451c", "score": "0.5819138", "text": "def test_get_all_consent_requests(self):\n pass", "title": "" }, { "docid": "9478acb13f007fb151a7aaf4eadf8825", "score": "0.58038056", "text": "def test_get_all_orders(self):\n\n res = self.get_all_orders()\n self.assertEqual(\n res['message'],\n \"All pending orders have been fetched successfully\")", "title": "" }, { "docid": "a8161e5e8daeed117375bf8402ae7eb9", "score": "0.5803559", "text": "async def read_subscriptions(token: str):\n login = await verify_token(token)\n if type(login) is JSONResponse:\n return login\n\n subscription_list = await Subscriptions.filter(merchant_id=login.user_id).all().order_by('expiration_date')\n\n if subscription_list is None:\n return JSONResponse(\n status_code=status.HTTP_400_BAD_REQUEST,\n content={\"error\":f\"No subscriptions found for your account {login.user_id}\"}\n )\n \n return JSONResponse(\n status_code=status.HTTP_200_OK,\n content={\"subscriptions\": populate_subscription_return(subscription_list)}\n )", "title": "" }, { "docid": "0b4212aa3721a865ff43a7cdf59eced9", "score": "0.58017814", "text": "def test_admin_change_subscription_both(self):\n\n registrant_id = \"mother01-63e2-4acc-9b94-26663b9bc267\"\n subscription_id = \"sub12312-63e2-4acc-9b94-26663b9bc267\"\n messageset_name = \"momconnect_prebirth.hw_full.1\"\n language = \"zul_ZA\"\n\n mock_get_subscription(subscription_id, registrant_id)\n mock_deactivate_subscriptions([subscription_id])\n mock_search_messageset(32, messageset_name)\n\n change_data = {\n \"registrant_id\": registrant_id,\n \"action\": \"admin_change_subscription\",\n \"data\": {\n \"messageset\": messageset_name,\n \"subscription\": subscription_id,\n \"language\": language\n },\n \"source\": self.make_source_normaluser()\n }\n change = Change.objects.create(**change_data)\n\n validate_implement(change.id)\n change.refresh_from_db()\n\n self.assertTrue(change.validated)\n\n s = SubscriptionRequest.objects.last()\n self.assertEqual(s.identity, registrant_id)\n self.assertEqual(s.messageset, 32)\n self.assertEqual(s.lang, language)", "title": "" }, { "docid": "4b63d2d01d1d131012eb8409dad059b9", "score": "0.57788706", "text": "def test_attendant_get_all_product(self):\n response = self.client.get(\n '/self.base_url/products',\n headers=dict(Authorization=\"Bearer \" + self.attendant_token),)\n response_data = json.loads(response.data)\n self.assertEqual(response_data[\"message\"],\"These products are available\")\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "171025d8b149ecb1c42a4738435c0487", "score": "0.5770934", "text": "def test_getall0(self):\n pass", "title": "" }, { "docid": "94dc47f6243dcbe3b974087ed88910e0", "score": "0.5767596", "text": "def test_get_subscription_by_product_rate_plan_id(self):\n pass", "title": "" }, { "docid": "ebcca3e0982ea4f30a1929f03b63d988", "score": "0.57669497", "text": "def test_freeze_subscription(self):\n pass", "title": "" }, { "docid": "e90a675fa6239d5ee0a59e710be9443c", "score": "0.57557905", "text": "def test_free_subs_search_order(self):\n # 扬州楚门机电设备制造有限公司\n resp = self.client.free_subscribers_search(sub_firm_ids=\"[307823]\",\n order=\"[{\\\"last_login_at\\\":\\\"asc\\\"}]\")\n self.assertEqual(resp.code, 10000)\n self.assertEqual(1, resp.data.page)\n self.assertEqual(10, resp.data.per_page)\n self.assertEqual(\"asc\", resp.data.order[0][\"last_login_at\"])\n self.assertGreater(resp.data.total, 0)\n self.assertIsNotNone(resp.data.list)", "title": "" }, { "docid": "0fcb37a71defba167840ea6c0aa01ba1", "score": "0.5735526", "text": "def _get_valid_subscriptions(self):\n\n return [subscription for subscription in self.subscriptions.all() if subscription.is_valid()]", "title": "" }, { "docid": "2e00de76f1522e31e862f04c53b69b56", "score": "0.5729142", "text": "def test_subscription(self):\n # switch plan: check that mandatory lines have been modified accordingly\n self.contract.change_subscription(self.contract_tmpl_2.id)\n self.assertEqual(self.contract.template_id.id, self.contract_tmpl_2.id, 'website_contract: template not changed when changing subscription from the frontend')\n self.assertEqual(len(self.contract.recurring_invoice_line_ids), 2, 'website_contract: number of lines after switching plan does not match mandatory lines of new plan')\n self.assertEqual(self.contract.recurring_total, 650, 'website_contract: price after switching plan is wrong')\n\n # add option\n self.contract.add_option(self.contract_tmpl_2.option_invoice_line_ids.id)\n self.assertEqual(len(self.contract.recurring_invoice_line_ids), 3, 'website_contract: number of lines after adding option does not add up')\n self.assertEqual(self.contract.recurring_total, 850, 'website_contract: recurring price after adding option is wrong')\n\n # switch back: option should be preserved, other lines should have been changed\n self.contract.change_subscription(self.contract_tmpl_1.id)\n self.assertEqual(len(self.contract.recurring_invoice_line_ids), 2, 'website_contract: number of lines after switching plan twice does add up')\n self.assertEqual(self.contract.recurring_total, 70, 'website_contract: recurring price after switching plan twice is wrong')", "title": "" }, { "docid": "76e057ef86e3d0f1eb45c91882a91b55", "score": "0.57045346", "text": "def test_cancel_subscription(self):\n pass", "title": "" }, { "docid": "dc666e7903f8232342ce379492c04519", "score": "0.5704489", "text": "def list_subscriptions(self,topic=None):\n try:\n if topic:\n subscriptions = topic.subscriptions.all()\n else:\n subscriptions = self.sns.subscriptions.all()\n logger.info( 'got subscriptions' )\n except ClientError as e:\n logger.exception( f\"couldn't get subscriptions\" )\n raise\n else:\n return subscriptions\n # while subscriptions :\n # next(subscriptions)", "title": "" }, { "docid": "9a2b26ca525b691816fc532bb183ee23", "score": "0.56908375", "text": "def test_create_subscription_session(self):\n pass", "title": "" }, { "docid": "13db7ed9c726c27ec65cb119cc477d91", "score": "0.5688548", "text": "async def __list_communication_service_by_subscription(clients, args):\n print(\"\\nList by subscription...\")\n\n resources = clients.acs_client.communication_service.list_by_subscription()\n print(\"Found resources: \")\n async for resource in resources:\n print(\"\")\n __print_resource(resource)", "title": "" }, { "docid": "d83e136ca9e5311378f782b474542a51", "score": "0.5683679", "text": "def test_get_transcription(self):\n pass", "title": "" }, { "docid": "cdb2212c463e323801d694eb47a76138", "score": "0.56770825", "text": "def _get_subscriptions(self):\n try:\n sub_client = SubscriptionClient(self._credentials)\n sub_list = sub_client.subscriptions.list()\n\n monitor_attributes = ('log_profile',)\n\n tenant = self._tenant\n for sub_index, sub in enumerate(sub_list):\n sub = sub.as_dict()\n _log.info('Found %s', util.outline_az_sub(sub_index,\n sub, tenant))\n # Each record type for each subscription is a unit of\n # work that would be fed to _get_resources().\n for attribute_type in monitor_attributes:\n if attribute_type == 'log_profile':\n sub['locations'] = []\n locations = sub_client.subscriptions. \\\n list_locations(sub.get('subscription_id'))\n for location in locations:\n sub['locations'].append(location.as_dict()\n .get('name'))\n yield (attribute_type, sub_index, sub)\n\n # Break after pulling data for self._max_subs number of\n # subscriptions. Note that if self._max_subs is 0 or less,\n # then the following condition never evaluates to True.\n if sub_index + 1 == self._max_subs:\n _log.info('Stopping subscriptions fetch due to '\n '_max_subs: %d; tenant: %s', self._max_subs,\n self._tenant)\n break\n\n except Exception as e:\n _log.error('Failed to fetch subscriptions; %s; error: %s: %s',\n util.outline_az_sub(sub_index, sub, tenant),\n type(e).__name__, e)", "title": "" }, { "docid": "51adf1b6e04f2850c78af4132671d3b3", "score": "0.5675201", "text": "def test_get_all_suppliers_note(self):\n response = self.query_with_token(\n self.access_token,\n all_suppliers_note)\n self.assertIn(\"allSuppliersNote\", response[\"data\"])\n self.assertNotIn(\"errors\", response)", "title": "" }, { "docid": "90d9e3693053287f60a9006c98b2f2c9", "score": "0.56663823", "text": "def querySubscriptions(self, pagesize=1000):\n # noinspection PyProtectedMember\n func = sys._getframe().f_code.co_name # pylint: disable=protected-access\n logging.info(f'Querying list of subscriptions on `Tableau REST API`')\n\n url = f'{self.baseapi}/sites/{self.site}/subscriptions'\n\n subscriptions = dict()\n\n done, totalsize, pagenumber = False, 0, 1\n while not done:\n paged = f'{url}?pageSize={pagesize}&pageNumber={pagenumber}'\n\n request = self.session.get(paged)\n response = Response(request, func)\n\n pagenumber += 1\n totalsize += response.pagination.pageSize\n done = response.pagination.totalAvailable <= totalsize\n\n for subscription in response.body['sites']['site']:\n subscriptionid = subscription['id']\n subscriptions[subscriptionid] = subscription\n\n logging.debug(f'Found {len(subscriptions)} subscriptions on `Tableau REST API`')\n\n return subscriptions", "title": "" }, { "docid": "a6f4eb7cc1295c48f143fcef2ef3f2c5", "score": "0.5647221", "text": "def test_list_all(self):\n request = self.build_config_policy_object()\n response = request.list_all()\n assert \"id\" in response[0]", "title": "" }, { "docid": "09f3120f7cff19e6584f2b661b5d6253", "score": "0.56320745", "text": "def test_get_all(self):\n response = self.app.get(QUIZ_URL)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)", "title": "" }, { "docid": "86e07f4ab35e3b02ee7a8a6513fb0e6b", "score": "0.562724", "text": "def test_get_subscribed_services_for_other_alert(requests_mock):\n from CybleEventsV2 import Client, fetch_subscribed_services_alert\n\n mock_response_1 = util_load_json(\"dummy_fetch_subscribed_services.json\")\n requests_mock.get('https://test.com/apollo/api/v1/y/services', json=mock_response_1)\n\n client = Client(\n base_url='https://test.com',\n verify=False\n )\n response = fetch_subscribed_services_alert(client, 'GET', 'https://test.com', \"some_random_token\").outputs\n assert isinstance(response, list)\n assert response[0]['name'] == 'name_1'", "title": "" }, { "docid": "192710eb6501882ad4e1e93026be3581", "score": "0.5621249", "text": "def test_get_all_users(self):\n pass", "title": "" }, { "docid": "d116a368cf13dfcb1f3740c219e50229", "score": "0.56132996", "text": "def test_get_all_items(self):\n response = self.client.open(\n '/api/v1/item',\n method='GET',\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "title": "" }, { "docid": "86ea53539ee6ec1a5fe009ca289e48c6", "score": "0.5591664", "text": "def test_get_all_succeeds(test_client):\n \n response = test_client.get('/api/v1/documents')\n assert response.status_code == 200", "title": "" }, { "docid": "ab6326271adb96e7b665f50c2ce8a79c", "score": "0.5583385", "text": "def test_get_payments(self):\n pass", "title": "" }, { "docid": "c939d4cb564cb1d17b99fd15f4782ffe", "score": "0.5571007", "text": "def GetSubscription(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "d1f87cada139341db0ec32ecd50fe964", "score": "0.5562697", "text": "def test_get_rates(country_list, rate_data):\n assert get_rates(country_list) == rate_data", "title": "" }, { "docid": "be305b075db2da51b02934f2dbfe2b17", "score": "0.5559691", "text": "def get_all(self, offset=0, page_size=1000, sort_by=None, sort_order='ASC'):\n query_params = {\"offset\": offset, \"pageSize\": page_size, \"sortOrder\": sort_order,\n \"sortBy\": sort_by}\n return self.carol.call_api('v2/subscription', method='GET' ,params=query_params)['hits']", "title": "" }, { "docid": "14e3d525e0837172887a19bfc1b47fbd", "score": "0.55579585", "text": "def GET(self, name):\n header('Content-Type', 'application/x-json-stream')\n try:\n for subscription in list_subscriptions(name=name, vo=ctx.env.get('vo')):\n yield dumps(subscription, cls=APIEncoder) + '\\n'\n except SubscriptionNotFound as error:\n raise generate_http_error(404, 'SubscriptionNotFound', error.args[0])\n except Exception as error:\n raise InternalError(error)", "title": "" }, { "docid": "bad556154d95482e30592fac33d9e7cf", "score": "0.55492437", "text": "def test_get_parent_subscription(self):\n pass", "title": "" }, { "docid": "d23066ad1ece5d6a74377f0ba022cf0b", "score": "0.5543195", "text": "def __call__(\n self,\n request: pubsub.ListTopicSubscriptionsRequest,\n *,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Optional[float] = None,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> pubsub.ListTopicSubscriptionsResponse:\n\n http_options: List[Dict[str, str]] = [\n {\n \"method\": \"get\",\n \"uri\": \"/v1/{topic=projects/*/topics/*}/subscriptions\",\n },\n ]\n request, metadata = self._interceptor.pre_list_topic_subscriptions(\n request, metadata\n )\n pb_request = pubsub.ListTopicSubscriptionsRequest.pb(request)\n transcoded_request = path_template.transcode(http_options, pb_request)\n\n uri = transcoded_request[\"uri\"]\n method = transcoded_request[\"method\"]\n\n # Jsonify the query params\n query_params = json.loads(\n json_format.MessageToJson(\n transcoded_request[\"query_params\"],\n including_default_value_fields=False,\n use_integers_for_enums=True,\n )\n )\n query_params.update(self._get_unset_required_fields(query_params))\n\n query_params[\"$alt\"] = \"json;enum-encoding=int\"\n\n # Send the request\n headers = dict(metadata)\n headers[\"Content-Type\"] = \"application/json\"\n response = getattr(self._session, method)(\n \"{host}{uri}\".format(host=self._host, uri=uri),\n timeout=timeout,\n headers=headers,\n params=rest_helpers.flatten_query_params(query_params, strict=True),\n )\n\n # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception\n # subclass.\n if response.status_code >= 400:\n raise core_exceptions.from_http_response(response)\n\n # Return the response\n resp = pubsub.ListTopicSubscriptionsResponse()\n pb_resp = pubsub.ListTopicSubscriptionsResponse.pb(resp)\n\n json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)\n resp = self._interceptor.post_list_topic_subscriptions(resp)\n return resp", "title": "" }, { "docid": "46b549654d2ce109ea7f6f27aeb25df5", "score": "0.5542327", "text": "def test_subscribe(self):\n user_id = 321702331\n expected_added_subscription = user_id\n\n added_subscription = self.client.subscribe(user_id)\n\n self.assertEqual(expected_added_subscription, added_subscription)", "title": "" }, { "docid": "fa9fb49ad2f1ff339e68cfcd11dd00d6", "score": "0.5541045", "text": "def test_get_all(self):\n kwargs = self._create_save_to_db()\n\n response = self.client.get(url_for(BIOSPECIMENS_LIST_URL),\n headers=self._api_headers())\n self.assertEqual(response.status_code, 200)\n response = json.loads(response.data.decode(\"utf-8\"))\n content = response.get('results')\n self.assertEqual(len(content), 1)", "title": "" }, { "docid": "cd26df9f77291c435e91bb77f8bacaeb", "score": "0.5538884", "text": "def check_subs(client):\n wcount=0\n while wcount<10:\n for t in client.topic_ack:\n wcount+=1\n if t[2]==0:\n logging.info(\"subscription to \"+str(t[0]) +\" not acknowledged\")\n break\n print(\"All subs acknowledged\")\n return True\n time.sleep(1)\n if not client.running_loop:\n client.loop(.01) #check for messages manually\n return False", "title": "" }, { "docid": "f91aee543291badba61b388a9fb69030", "score": "0.551236", "text": "def test_fetch_all(self):\n client = self.api_client\n response = client.get(reverse('concert-api-list'))\n assert response.status_code == status.HTTP_200_OK\n assert Concert.objects.count() == len(response.data)", "title": "" }, { "docid": "c66a9d2db867a5794fdd0ad4e602fb27", "score": "0.55123234", "text": "def test_add_charge_to_subscription(self):\n pass", "title": "" }, { "docid": "1477c080aafe6c58f7be25916df1910f", "score": "0.5509675", "text": "def test_cmd_list_all(quiet, values, expected_calls, temp_cfg):\n # ska-sdp uses -R=True, and is not changeable there, so we only test that here\n # -R=False doesn't behave well, if we want -R=False,\n # that will need to be added and tested separately\n args = {\n \"-R\": True,\n \"--quiet\": quiet,\n \"--values\": values,\n \"pb\": None,\n \"workflow\": None,\n \"<date>\": None,\n \"<type>\": None,\n }\n\n path = f\"{PREFIX}/\"\n\n with patch(\"logging.Logger.info\") as mock_log:\n for txn in temp_cfg.txn():\n cmd_list(txn, path, args)\n\n assert mock_log.call_args_list == expected_calls", "title": "" }, { "docid": "855901b069c41d50188c53754b2867c8", "score": "0.55059683", "text": "def test_api_can_get_all_users(self):\n res = self.client().post('/doRegistration/', data=self.user_data)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/getRegistration/')\n self.assertEqual(res.status_code, 200)\n self.assertIn('Testing Application', str(res.data))", "title": "" }, { "docid": "dabe1a53856e6a4b9e8006871f7dc10a", "score": "0.55029446", "text": "def test_flow(self):\n subscribed_channels = []\n self.register_user()\n self.wait_for_es()\n for cat_id in self.get_cat_ids():\n popular_channels = self.get(self.urls['popular_channels'], dict(category=cat_id))\n if popular_channels['channels']['total'] == 0:\n continue\n channel = popular_channels['channels']['items'][0]\n if channel['id'] not in subscribed_channels:\n self.post(self.urls['subscriptions'], channel['resource_url'], token=self.token)\n subscribed_channels.append(channel['id'])\n\n self.assertGreater(len(subscribed_channels), 0)\n subscriptions = self.get(self.urls['subscriptions'], token=self.token)\n self.assertItemsEqual(subscribed_channels, [c['id'] for c in subscriptions['channels']['items']])\n videos = self.get(self.urls['subscriptions'] + 'recent_videos/', token=self.token)\n self.assertGreater(len(videos['videos']['items']), 0)\n self.assertIn(videos['videos']['items'][0]['channel']['id'], subscribed_channels)", "title": "" }, { "docid": "24df1a0609795b3508bbe65f7f645c11", "score": "0.55003697", "text": "def test_get_service_accounts_using_get(self):\n pass", "title": "" }, { "docid": "3e4c21b2ca537c617f64e34362a503dc", "score": "0.5499632", "text": "def get_call_event_subscription(self, subscription_id):\n return self.request(['call', subscription_id], method='GET')", "title": "" }, { "docid": "48f0c01133497e1e59d47a03b6eac625", "score": "0.54984605", "text": "def test_admin_booking(call_center):", "title": "" }, { "docid": "da97a02a449e0885a03879d9d7f78d59", "score": "0.5493354", "text": "def test_migrate_subscription(self):\n pass", "title": "" }, { "docid": "3bf95898e68f323c09648d196191f7e1", "score": "0.54920554", "text": "def get_subscriptions(logger: Callable = logging.log) -> list[dict]:\n subscriptions = []\n try:\n sub_dict = {3: []}\n # Get the list of subscriptions. The default priority of the subscription is 3. 0 is the highest priority, 5 the lowest\n # The priority is defined as 'policyid'\n logger(logging.DEBUG, \"Listing active subscriptions\")\n for sub in list_subscriptions(None, None):\n rse_expression = sub.get(\"rse_expression\")\n skip_sub = False\n rules = loads(sub[\"replication_rules\"])\n overwrite_rules = False\n for rule in rules:\n rse_expression = rule.get(\"rse_expression\")\n try:\n list_rses_from_expression = parse_expression(rse_expression)\n except InvalidRSEExpression:\n logger(\n logging.ERROR,\n \"Invalid RSE expression %s for subscription %s. Subscription removed from the list\",\n rse_expression,\n sub[\"id\"],\n )\n skip_sub = True\n break\n if rule.get(\"copies\") == \"*\":\n rule[\"copies\"] = len(list_rses_from_expression)\n overwrite_rules = True\n if skip_sub:\n continue\n if overwrite_rules:\n sub[\"replication_rules\"] = dumps(rules)\n if (\n sub[\"state\"] != SubscriptionState.INACTIVE\n and sub[\"lifetime\"]\n and (datetime.now() > sub[\"lifetime\"])\n ):\n update_subscription(\n name=sub[\"name\"],\n account=sub[\"account\"],\n metadata={\"state\": SubscriptionState.INACTIVE},\n )\n\n elif sub[\"state\"] in [SubscriptionState.ACTIVE, SubscriptionState.UPDATED]:\n priority = 3\n if \"policyid\" in sub:\n if int(sub[\"policyid\"]) not in sub_dict:\n sub_dict[int(sub[\"policyid\"])] = []\n priority = int(sub[\"policyid\"])\n sub_dict[priority].append(sub)\n priorities = list(sub_dict.keys())\n priorities.sort()\n # Order the subscriptions according to their priority\n for priority in priorities:\n subscriptions.extend(sub_dict[priority])\n logger(logging.INFO, \"%i active subscriptions\", len(subscriptions))\n except SubscriptionNotFound as error:\n logger(logging.WARNING, \"No subscriptions defined: %s\" % (str(error)))\n return []\n except TypeError as error:\n logger(\n logging.ERROR,\n \"Failed to parse subscription: %s\" % (str(error)),\n )\n raise error\n except Exception as error:\n logger(\n logging.ERROR,\n \"Failed to get list of new DIDs or subscriptions: %s\" % (str(error)),\n )\n raise error\n return subscriptions", "title": "" } ]
f6116501c576aef1766a2b274ce6c2b5
Execute the class method.
[ { "docid": "ed0051e6e4d520d78d368835e5a53e16", "score": "0.0", "text": "async def execute(self, sleep: float):\n assert not self.run.locked()\n async with self.run:\n await asyncio.sleep(sleep)", "title": "" } ]
[ { "docid": "d5c5692e0ae74989a1da18ba58c1b49d", "score": "0.7394682", "text": "def execute(cls):\n pass", "title": "" }, { "docid": "c93d9f0cf6afbe6353f4f3399cc31b0e", "score": "0.71576184", "text": "def execute(self):\n raise NotImplementedError(\"Subclasses should override this method.\")", "title": "" }, { "docid": "315375b347891b7be2bbffa5a09769df", "score": "0.7065444", "text": "def execute(self) :\n \n raise NotImplementedError()", "title": "" }, { "docid": "8e532913952c21798b1f1018feb3f63b", "score": "0.69223624", "text": "def execute(self):\r\n pass", "title": "" }, { "docid": "204ef4c8a0e6ccb7fda7bf1f31112c29", "score": "0.68912506", "text": "def execute(self):\n raise NotImplementedError", "title": "" }, { "docid": "204ef4c8a0e6ccb7fda7bf1f31112c29", "score": "0.68912506", "text": "def execute(self):\n raise NotImplementedError", "title": "" }, { "docid": "38e8908a54ab7feb52c663b633853e8e", "score": "0.6890188", "text": "def execute(self):\n raise NotImplementedError('You must implement the execute() method '\n 'yourself!')", "title": "" }, { "docid": "12998895ee372a031a9db774819d47b9", "score": "0.68483585", "text": "def execute(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "e785883f0ece55d7c94a8b2a606ff194", "score": "0.6817439", "text": "def execute(self) -> None:\n pass # Implement in Executors", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6799214", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6799214", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6799214", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6799214", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6799214", "text": "def execute(self):\n pass", "title": "" }, { "docid": "fd928676574b751b8daada7007442d76", "score": "0.6799214", "text": "def execute(self):\n pass", "title": "" }, { "docid": "6f1f5650a54c7a2e6c3721682da5ab45", "score": "0.6796526", "text": "def execute(self):\n\n pass", "title": "" }, { "docid": "6f1f5650a54c7a2e6c3721682da5ab45", "score": "0.6796526", "text": "def execute(self):\n\n pass", "title": "" }, { "docid": "6f1f5650a54c7a2e6c3721682da5ab45", "score": "0.6796526", "text": "def execute(self):\n\n pass", "title": "" }, { "docid": "6f1f5650a54c7a2e6c3721682da5ab45", "score": "0.6796526", "text": "def execute(self):\n\n pass", "title": "" }, { "docid": "5ade46760e9bb129024d43e72ab312a6", "score": "0.6786255", "text": "def execute(self):\n raise NotImplementedError('execute')", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.67551124", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.67551124", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.67551124", "text": "def execute(self):", "title": "" }, { "docid": "76ec176ecb93566cbc70a4be45df2700", "score": "0.67551124", "text": "def execute(self):", "title": "" }, { "docid": "350b5cb552b5faa1d9cf692b4e65ffc3", "score": "0.67356247", "text": "def __call__(self, *args, **kwargs):\n return self.method(*args, **kwargs)", "title": "" }, { "docid": "79622c3ba53c2398bb43106a6b3befdc", "score": "0.6701079", "text": "def method(self):", "title": "" }, { "docid": "3ffa3d98481498f751adb6e971111a97", "score": "0.6698436", "text": "def execute(self):\n\t\tpass", "title": "" }, { "docid": "b362824fe911e8427fbc79c837ebcb53", "score": "0.66549635", "text": "def _execute(self, _):\r\n pass", "title": "" }, { "docid": "e77a7613059080aae384ad092c702257", "score": "0.6652071", "text": "def run(self):\n raise NotImplementedError # implement in subclass", "title": "" }, { "docid": "7485b2034293fa784fe98053482829ea", "score": "0.66235757", "text": "def perform(self) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "f83549688717edec3037194fe18f12a1", "score": "0.66159034", "text": "def _run(self):\n raise NotImplemented(\"Abstract method '_run' need to be defined\")", "title": "" }, { "docid": "9127f479a5b8a76a1b5a940ab28556df", "score": "0.6611694", "text": "def run(self):\n method = self.getMethod()\n fce = getattr(self, method)\n return fce(*self.argv[1:])", "title": "" }, { "docid": "a71c43016400ffd609b200e09aeb88b7", "score": "0.65826094", "text": "def perform(self):\n raise NotImplementedError", "title": "" }, { "docid": "74fecfd172c1683da3497401716bfa4c", "score": "0.65718627", "text": "def run(self):\n raise NotImplementedError(\"Subclass must implement abstract method\")", "title": "" }, { "docid": "e7a1e4c2455638b18ff9fd4dbc4fad03", "score": "0.65537137", "text": "def run(self):\n self.method(*self.args)\n self._schedule()", "title": "" }, { "docid": "681becc7d6a23b21f126c1be7a4a4ad4", "score": "0.65307975", "text": "def _run(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "32c26406e2ef912be5d432e781dcc427", "score": "0.65041566", "text": "def __call__( self ):\n pass", "title": "" }, { "docid": "960cb63f94fcd20204b8a7788e7496d3", "score": "0.6490885", "text": "def run(self):\n raise NotImplementedError(\"Subclasses mut override run()\")", "title": "" }, { "docid": "d4c55815f3018c0a90138e84948d43dc", "score": "0.64826524", "text": "def run(self):\n raise NotImplementedError('Run method not implemented in %s' % type(self).__name__)", "title": "" }, { "docid": "63e3f3752c311d1d38277c5c7743c2bd", "score": "0.6477336", "text": "def execute(self, outer_instance):\n raise NotImplementedError", "title": "" }, { "docid": "bc538255f4bd249692db8ba9e4693bcb", "score": "0.6460434", "text": "def __call__( self, *args, **kw ):\n return self.run( *args, **kw )", "title": "" }, { "docid": "187c32a82213bf667642349724e4c59e", "score": "0.64520526", "text": "def call(self):", "title": "" }, { "docid": "d9baac5785b9dfd28d87e6e05522bfb1", "score": "0.6448875", "text": "def Run(self):\n pass", "title": "" }, { "docid": "9413080d43e63a571a45db9ffe8ffa92", "score": "0.6431855", "text": "def _run(self):\n raise NotImplementedError", "title": "" }, { "docid": "9413080d43e63a571a45db9ffe8ffa92", "score": "0.6431855", "text": "def _run(self):\n raise NotImplementedError", "title": "" }, { "docid": "b5d22be1d136fc656e2441431685e141", "score": "0.6398938", "text": "def run(self, *args, **kwargs):\n return self.func(self, *args, **kwargs)", "title": "" }, { "docid": "05941bf88159d2f90471b2c73a8976e9", "score": "0.6395242", "text": "def execute(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "e0b02021857da52417e0daf24054239c", "score": "0.6381945", "text": "def __call__(self, *arguments):\n pass", "title": "" }, { "docid": "ce63e47348314ba7a8808e1eb9c095a0", "score": "0.63717204", "text": "def execute(self, *args, **kwargs):", "title": "" }, { "docid": "ce63e47348314ba7a8808e1eb9c095a0", "score": "0.63717204", "text": "def execute(self, *args, **kwargs):", "title": "" }, { "docid": "33471086cda57474893dcb36afaa7437", "score": "0.637145", "text": "def _run(self):\n raise NotImplementedError", "title": "" }, { "docid": "9d3ef8bde81df32979c8688f719f73db", "score": "0.63657784", "text": "def solve(self):\n start = time.time()\n instance = self.class_object()\n method = getattr(instance, self.method_str)\n method(*self.args, **self.kwargs)\n end = time.time()\n run_log.info(\n (\n f\"\\n###########################\"\n f\"\\n{self.name} took {end-start} to run\"\n f\"\\n###########################\\n\"\n )\n )", "title": "" }, { "docid": "1639692b8cbcb23f44724f59382a5868", "score": "0.6363349", "text": "def perform(self):\n pass", "title": "" }, { "docid": "c1225b9e0a2b76db5a06e718dfa800eb", "score": "0.6340457", "text": "def run(self):\n raise NotImplementedError()", "title": "" }, { "docid": "c1225b9e0a2b76db5a06e718dfa800eb", "score": "0.6340457", "text": "def run(self):\n raise NotImplementedError()", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "68a1020159a9357328514abc33bddede", "score": "0.63288397", "text": "def run(self):\n raise NotImplementedError", "title": "" }, { "docid": "8286af4a59a8a7b1986e4d6b7904d41e", "score": "0.63161725", "text": "def run(self) -> None:\n raise NotImplementedError()", "title": "" }, { "docid": "664aa577ef310daea18738414a052477", "score": "0.62948227", "text": "def execute_method_for_keyword(self):\n\n kwargs, kw_status = self.get_argument_as_keywords()\n print_info(\"The Arguments passed for the current Step is: '{0}'\".format(kwargs))\n if kw_status:\n # Execute the corresponding method\n method_loader = self.exec_obj.im_class()\n try:\n keyword_result = self.exec_obj(method_loader, **kwargs)\n except Exception as exception:\n trcback = print_exception(exception)\n keyword_result = (\"EXCEPTION\", trcback)\n\n self.data_repository = self.update_data_repository(self.keyword,\n keyword_result,\n self.data_repository)\n return self.data_repository", "title": "" }, { "docid": "47df06d45bcaaabd3a0b10a3a7460d5e", "score": "0.6279601", "text": "def run(self): \r\n return", "title": "" }, { "docid": "34347683d6544e134046ed5f16f16dc3", "score": "0.62561625", "text": "def process(self):\n raise NotImplementedError('Method must be implemented by subclass.')", "title": "" }, { "docid": "41793a2cefff705e3ac119a3ffb5a7d8", "score": "0.6254283", "text": "def continue_running(self, method):", "title": "" }, { "docid": "44d9d96c1bc05aa2cafaf30bb0c16a06", "score": "0.625011", "text": "def __call__(self, *args, **kwargs):\n return self.call(*args, **kwargs)", "title": "" }, { "docid": "e1d370efa0fd503ff9681977558ed108", "score": "0.62401813", "text": "def run(self):\n raise Exception('derived class should redefine this function')", "title": "" }, { "docid": "839ca269f59bff07337126954d381d38", "score": "0.62382257", "text": "def execute(self, *args, **options):\n raise NotImplementedError", "title": "" }, { "docid": "48a690033f8a352fa23f06dacc4e0fb4", "score": "0.62263286", "text": "def execute(self) -> Any:\n return self.function(**self.kwargs)", "title": "" }, { "docid": "381fc5a45b11d2ac896ff55a96f8a739", "score": "0.6224881", "text": "def execute(self):\n raise TaskError(\"Task %s: subclass should override execute() method!\" %\n self)", "title": "" }, { "docid": "f2bfb984ddd5d8d23f8b8c04e2eaf66c", "score": "0.6217061", "text": "def perform(self):\n raise TypeError(\"Derived class must implement\")", "title": "" }, { "docid": "0f44c5f93b96523d3fd03ac59bdde2cc", "score": "0.62085426", "text": "def run(self):\n self.fn(*self.args, **self.kwargs)", "title": "" }, { "docid": "20aeca5643a29d1c239db1b469dc3398", "score": "0.61996543", "text": "def runThis(self):\n print(\"Override method..\")", "title": "" }, { "docid": "ab3e4804d38658580de7852fb5be4470", "score": "0.61963195", "text": "def run(self):\r\n pass", "title": "" }, { "docid": "1a6864761821daab81dfa152a460c7b6", "score": "0.6195178", "text": "def run(self):\n\t\t\n\t\tpass", "title": "" }, { "docid": "5f21ef1c0282f42f6c0b99d1968c2082", "score": "0.61915207", "text": "def __call__(self, *args, **kw):\r\n debug.write(\"[SourceRPG] Executing the function %s within the CommandsDatabase\" % self.function, 4)\r\n vars(CommandsDatabase)[self.function](self.instance, *args, **kw)", "title": "" }, { "docid": "057941cae8929f705a3bb840c7789ce3", "score": "0.61862606", "text": "def run(self):\n self.class_inst_obj.processor(self.msg)", "title": "" }, { "docid": "aad13fbb585fbd53406cdf5f576b06fc", "score": "0.61816365", "text": "def exec(self,**kwargs):\r\n pass", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.617902", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "d388939ba349a300a7a20adb599c127d", "score": "0.617902", "text": "def __call__(self):\n pass", "title": "" }, { "docid": "e0812eb4bb8d0a4abb61606f2f669e0a", "score": "0.6168236", "text": "async def execute(self):", "title": "" }, { "docid": "41b19eca8b6cc12a768982d5ac34f181", "score": "0.6156827", "text": "def _execute_impl(self, *args, **kwargs):\n # Execute with bound args.\n method_body = getattr(self._deployment_node, self._deployment_method_name)\n return method_body.remote(\n *self._bound_args,\n **self._bound_kwargs,\n )", "title": "" }, { "docid": "ca502b3b8dde5ad4550342ae95c7fba6", "score": "0.6152554", "text": "def executor(self):", "title": "" }, { "docid": "c041fa26d1fd59570b95eff825f60734", "score": "0.614827", "text": "def _run_scenario(self, cls, method_name, context, args, config):", "title": "" }, { "docid": "9aa711ac966607090eb2fd54ad1b4a3f", "score": "0.6135115", "text": "def run(self, **kwargs):", "title": "" }, { "docid": "c0e59a02a182d0a84928e921af5806a4", "score": "0.6134679", "text": "def run_command(self, command_class):\n command_class(*self.__args, **self.__kwargs).run()", "title": "" }, { "docid": "9a268b2ad42174aa2854a2017de9f7c4", "score": "0.61339957", "text": "def call(self):\n self.call() # Call a function", "title": "" }, { "docid": "c5d82f81880be1e5a5426af38d40c136", "score": "0.6132281", "text": "def run(self):\n raise NotImplemented(\"Inheriting classes should implement this\")", "title": "" }, { "docid": "92c8d8d8db5c5925ea38441e7aad19ae", "score": "0.6123989", "text": "def run(self):\r\n self.log(texto=f\"Executando {self._name}\")", "title": "" }, { "docid": "60b0e2db9b19c74d01944f1d861f13d4", "score": "0.61221725", "text": "def __call__(self, *args, **params):\n\t\treturn self.send(params)", "title": "" }, { "docid": "d38da4f9bf4623b73353d4c92d348bab", "score": "0.6118817", "text": "def call(self, *args, **kwargs):", "title": "" }, { "docid": "52153b41e5213c56fdcba50f22c2f1dc", "score": "0.6111071", "text": "def run(self):\n self.func()", "title": "" }, { "docid": "327c64ef5c5017ddc663901d39556ebf", "score": "0.6107017", "text": "def _execute(self):\n\n action_name = self.action.lower()\n method_name = action_name.replace('node', 'do')\n method = getattr(self, method_name, None)\n\n if method is None:\n reason = 'Unsupported action: %s' % self.action\n EVENT.error(self, consts.PHASE_ERROR, reason)\n return self.RES_ERROR, reason\n\n return method()", "title": "" }, { "docid": "18c1413db1c4f7efc5ea6b5233ebd9bc", "score": "0.6095977", "text": "def execute(self, args):\r\n pass", "title": "" }, { "docid": "29550ff5a208e2634efcff5e39b17b0b", "score": "0.6087334", "text": "def execute(self) -> None:\n self.state()", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.6079138", "text": "def run(self):\n pass", "title": "" }, { "docid": "1663c0470442ea9a734314d29f492856", "score": "0.6079138", "text": "def run(self):\n pass", "title": "" } ]
eba952d294cab4cbe3a8d5753a2b9925
Parse command line arguments.
[ { "docid": "0f93e885f731bbea666de4f1e7d81e15", "score": "0.0", "text": "def parse_args():\n parser = argparse.ArgumentParser('train.py')\n add_arg = parser.add_argument\n add_arg('config', nargs='?', default='configs/test.yaml')\n add_arg('-d', '--distributed', action='store_true')\n add_arg('-v', '--verbose', action='store_true')\n return parser.parse_args()", "title": "" } ]
[ { "docid": "fbefb0e59061fbe6716504a6e1a8d663", "score": "0.7420028", "text": "def parse_args():\n parser = ArgumentParser()\n return parser.parse_args()", "title": "" }, { "docid": "b62f6792f0eee8860859d47ebb64f450", "score": "0.73423314", "text": "def parse_args():\n\t# Help Menu\n\tparser = optparse.OptionParser(usage='%prog [options] title')\n\n\t(opts, args) = parser.parse_args()\n\n\t# Parser Errors\n\tif len(args) != 1:\n\t\tparser.error('incorrect number of arguments')\n\n\tbulkwiki2snap.wiki2snap(args[0]) # Create an edge list file to run getHeights on\n\tgetHeights(\"edgelists/\" + args[0].replace(\" \", \"_\") + \".txt\")", "title": "" }, { "docid": "30be5ef4014882ddd34ce097bd3cd2dd", "score": "0.7290703", "text": "def parse_arguments():\r\n #usage = \"usage: %(prog)s [options] <message file>\" + DESCRIPTION\r\n parser = ArgumentParser()\r\n parser.add_argument('-v', '--version', action='version', version=VERSION)\r\n parser.add_argument('start_dir', metavar='start_dir', help='start dir')\r\n return parser.parse_args()", "title": "" }, { "docid": "b2128b1fc3fca6b692b4e24b9351de9c", "score": "0.7191975", "text": "def parse_command_line_arguments() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-d\", \"--dataset\",\n default=\"mini-MIAS\",\n required=True,\n help=\"The dataset to use. Must be either 'mini-MIAS' or 'CBIS-DDMS'.\"\n )\n parser.add_argument(\"-m\", \"--model\",\n default=\"basic\",\n required=True,\n help=\"The model to use. Must be either 'basic' or 'advanced'.\"\n )\n parser.add_argument(\"-v\", \"--verbose\",\n action=\"store_true\",\n help=\"Verbose mode: include this flag additional print statements for debugging purposes.\"\n )\n args = parser.parse_args()\n config.dataset = args.dataset\n config.model = args.model\n config.verbose_mode = args.verbose", "title": "" }, { "docid": "f65fd9f55fa5adcab6970eb6c58b8be0", "score": "0.71675", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"parses the output size benchmark txt files and collect\n the results in a csv dataframe.\"\"\"\n )\n parser.add_argument(\"--txt\", nargs=\"+\", help=\"input txt benchmark files\", type=str)\n parser.add_argument(\"--csv\", help=\"output csv dataframe\", type=str)\n return parser.parse_args()", "title": "" }, { "docid": "09289c030e01f76327eda404f31514cc", "score": "0.7157282", "text": "def parse_arguments(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-dir', dest='data_dir', type=str, default='data',\n help='Path to data directory.')\n parser.add_argument('--scale', dest='scale', type=float, default=1.,\n help='Scale of training/test dataset.')\n return parser.parse_args(argv)", "title": "" }, { "docid": "d2d16fe13418f90b636ffeadb2b3dac0", "score": "0.71557736", "text": "def _parse_args():\n parser = ArgumentParser(description=_description, epilog=_epilog,\n formatter_class=RawDescriptionHelpFormatter)\n parser.add_argument('-d', '--data', type=str, default=data_dir,\n help='Directory to use for local data assets.')\n parser.add_argument('-s', '--save', type=str, default=save_dir,\n help='Directory to use to save the webmap file.')\n return parser.parse_args()", "title": "" }, { "docid": "5292064765be1d4a5da89f6a65dc63e4", "score": "0.714745", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--no_extract', action='store_false', help='Run without frame extracting.')\n parser.add_argument('--check', action='store_true', help='Checl that number of image is correct')\n return parser.parse_args()", "title": "" }, { "docid": "1c2ce04f6513080b33facf2032040ba3", "score": "0.71355826", "text": "def parse_arguments():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--db_init\", action=\"store_true\",\r\n help=\"Initializing and populating database\")\r\n return parser.parse_args()", "title": "" }, { "docid": "40173b4b4c9bc71a6a7954490e156617", "score": "0.7134888", "text": "def parse_arguments ():\n parser = argparse.ArgumentParser (\n description = 'Incremental Evolution of vibration models to aggregate bees.',\n argument_default = None\n )\n parser.add_argument (\n '--config',\n default = 'config',\n metavar = 'FILENAME',\n type = str,\n help = 'configuration file to use')\n parser.add_argument (\n '--workers',\n default = 'workers',\n metavar = 'FILENAME',\n type = str,\n help = 'worker settings file to load')\n parser.add_argument (\n '--run',\n default = None,\n metavar = 'N',\n type = int,\n help = \"run number to use\")\n parser.add_argument (\n '--command',\n type = str,\n choices = ['new-run', 'continue-run', 'deploy'],\n required = True,\n help = '''what should we do?\nnew-run: perform a new run of the evolutionary algorithm;\ncontinue-run: continue a previously stopped run of the evolutionary algorithm (requires a run number);\ndeploy: deploy the worker programs to the beagle bones.''')\n return parser.parse_args ()", "title": "" }, { "docid": "7b87345790a6cd084f1b327a83f0271c", "score": "0.712187", "text": "def _parse_args():\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n 'databases',\n nargs='+',\n action=database.LoadTokenDatabases,\n help='Databases (ELF, binary, or CSV) to use to lookup tokens.')\n parser.add_argument(\n '-i',\n '--input',\n dest='input_file',\n help='The binary trace input file, generated using trace_to_file.h.')\n parser.add_argument('-o',\n '--output',\n dest='output_file',\n help=('The json file to which to write the output.'))\n parser.add_argument(\n '-t',\n '--ticks_per_second',\n type=int,\n dest='ticks_per_second',\n default=1000,\n help=('The clock rate of the trace events (Default 1000).'))\n\n return parser.parse_args()", "title": "" }, { "docid": "705908501d42247c21b699d679399889", "score": "0.7119", "text": "def parse_command_line(args):\n parser = ArgumentParser(fromfile_prefix_chars=\"@\")\n parser.add_argument(\"--server-ip\", required=True)\n parser.add_argument(\"--haproxy-backend-map\", required=True, type=pathlib.Path)\n parser.add_argument(\"--haproxy-certs-dir\", required=True, type=pathlib.Path)\n parser.add_argument(\"--contact-email\", required=True)\n parser.add_argument(\"--letsencrypt-use-staging\", action=\"store_true\")\n parser.add_argument(\"--letsencrypt-fake-cert\")\n parser.add_argument(\"--log-level\", default=\"info\")\n parser.add_argument(\"--keep-certificate\", action=\"append\")\n parser.add_argument(\"--additional-domain\", action=\"append\")\n return parser.parse_args(args)", "title": "" }, { "docid": "b467ee87a23f143e91844cd2500c02c1", "score": "0.7116432", "text": "def parse_arguments():\n if len(sys.argv) < 2:\n raise ValueError(\"must specify commandline argument\")\n\n try:\n return _parse_dispatch_table[sys.argv[1]](sys.argv[2:])\n except KeyError:\n raise ValueError(\"unknown command '{0}'\".format(sys.argv[1]))", "title": "" }, { "docid": "0d6cd209e3b98b0debf6a4d3ba1051c4", "score": "0.7094519", "text": "def parse_cmdline_args():\n parser = argparse.ArgumentParser(description='Hummingbird - '\n 'the XFEL Online Analysis Framework.')\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-i\", \"--interface\",\n help=\"start the control and display interface\",\n action=\"store_true\")\n group.add_argument('-b', '--backend', metavar='conf.py',\n type=str, help=\"start the backend with \"\n \"given configuration file\", nargs='?', const=True)\n group.add_argument('-r', '--reload', help='reloads the backend',\n action='store_true')\n parser.add_argument(\"-v\", \"--verbose\", help=\"increase output verbosity\",\n action=\"store_true\")\n parser.add_argument(\"-d\", \"--debug\", help=\"output debug messages\",\n action=\"store_true\")\n parser.add_argument(\"-p\", \"--profile\", help=\"generate and output profiling information\",\n action=\"store_true\")\n parser.add_argument(\"--no-restore\", help=\"no restoring of Qsettings\",\n action=\"store_false\")\n \n if(len(sys.argv) == 1):\n parser.print_help()\n return parser.parse_args()", "title": "" }, { "docid": "775ac096b3af60ff1831fa18b08b17b2", "score": "0.7088121", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-c\", \"--cycle\", type=int, required=True)\n parser.add_argument(\"-d\", \"--dt\", nargs='+', type=float)\n parser.add_argument(\"-r\", \"--rate\", nargs='+', type=float)\n parser.add_argument(\"-s\", \"--skip\", type=bool, default=False)\n parser.add_argument(\"-uc\", \"--usrpcenterfre\", required=True, type=float)\n parser.add_argument(\"-ur\", \"--usrprate\", required=True, type=float)\n return parser.parse_args()", "title": "" }, { "docid": "f8ac1b4629d322f1134d8f9a80b13bc5", "score": "0.70825446", "text": "def parse_args():\n\tusage = \"\"\"usage: %prog [options]\nTo crawl pages from web in terms of special URL patterns.\"\"\"\n\n\tparser = optparse.OptionParser(usage = usage, version = '%prog 1.0.0.0')\t\n\tparser.add_option('-c', dest = 'filename',\n\t\t\t\t\t\thelp = 'read config file', metavar = 'FILE')\n\n\tparser.add_option('-l', '--log', dest = 'log', action = 'store_true',\n\t\t\t\t\t\thelp = 'start logging', default = False)\n\n\toptions, args = parser.parse_args()\n\tif options.filename is None:\n\t\tprint parser.format_help()\n\t\tparser.exit()\n\telse:\n\t\tconfs = utils.parse_config_file(options.filename)\n\t\tif options.log:\n\t\t\tlog.read_config_file(confs['log']['log_config_file'])\n\t\t\tlog.install(confs['log']['log_name'])\n\t\telse:\n\t\t\tlog.uninstall()\t\n\n\t\treturn options, confs", "title": "" }, { "docid": "a9460bee6e78d15bf393d23bf40eebc4", "score": "0.706824", "text": "def parse_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"Generate synthetic text data for text recognition.\"\n )\n parser.add_argument(\n \"--input_dir\", type=str, nargs=\"?\", help=\"The input directory\", default=\"out/\"\n )\n return parser.parse_args()", "title": "" }, { "docid": "aea0a84207ba7269e9d17cb040891517", "score": "0.7050396", "text": "def parse_args(args):\n parser = argparse.ArgumentParser()\n parser.add_argument('-B', '--bind',\n help='The host and port for the api to run on.',\n action='append')\n parser.add_argument('-C', '--connect',\n help='The url to connect to a running Validator')\n parser.add_argument('-t', '--timeout',\n help='Seconds to wait for a validator response')\n parser.add_argument('-v', '--verbose',\n action='count',\n default=0,\n help='Increase level of output sent to stderr')\n\n return parser.parse_args(args)", "title": "" }, { "docid": "165a522a6af6f791dd00a5d16979742a", "score": "0.70410687", "text": "def parse_args():\n\tparser = ArgumentParser(description=__doc__)\n\n\t# Path to the cart\n\tparser.add_argument('cart',\n\t\t\t\t\t\thelp=\"\"\"\n\t\t\t\t\t\tJSON file or URL containing the user's cart.\n\t\t\t\t\t\t\"\"\",\n\t\t\t\t\t\ttype=str)\n\n\t# Path to the base-prices\n\tparser.add_argument('base_prices',\n\t\t\t\t\t\thelp=\"\"\"\n\t\t\t\t\t\tJSON file or URL containing base prices.\n\t\t\t\t\t\t\"\"\",\n\t\t\t\t\t\ttype=str)\n\n\treturn parser.parse_args()", "title": "" }, { "docid": "95b456a180062a099d68726715613a8d", "score": "0.7041048", "text": "def parse_args():\n logging.info('Parsing command line.')\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--genomic_background', type=str, required=True,\n help='FASTA genome to check for specificity eval.')\n parser.add_argument('--input_tsv_file', type=file, help=':')\n parser.add_argument('--output_tsv_file', type=str, help=':', default=None)\n parser.add_argument('--needed', type=int, help=':', default=100)\n args = parser.parse_args()\n if args.output_tsv_file is None:\n base, ext = os.path.splitext(args.input_tsv_file.name)\n args.output_tsv_file = base + '.controls'\n return args", "title": "" }, { "docid": "2db8053d9c4fba303c50ccac60503ebe", "score": "0.7034261", "text": "def parse_arguments(args=None) :\n if args is not None :\n return args\n #make the argument parser\n parser = ArgumentParser()\n #positional argument: path to the file to analyze\n parser.add_argument('file', type=pathlib.Path, \n help='Path to the file to analyze')\n #optional arguments\n parser.add_argument('--output_dir', type=pathlib.Path, default=pathlib.Path(),\n help='''Path to directory in which the output plot file should be saved \n (default: current directory)''')\n parser.add_argument('--exp_type', choices=['spall','velocity'], default='spall',\n help='Type of analysis to perform (\"spall\" or \"velocity\"). Default is \"spall\".')\n parser.add_argument('--rows_to_skip', type=int, default=int(3.95e6),\n help='Number of rows in full data files to skip before reading data of interest')\n parser.add_argument('--nrows', type=int, default=int(120e3),\n help='Number of rows to select as data of interest after rows_to_skip')\n parser.add_argument('--N', type=int, default=512,\n help='Length of each segment')\n parser.add_argument('--overlap_frac', type=float, default=0.85,\n help='fraction of overlapped data to use in Fourier transforms')\n return parser.parse_args(args=args)", "title": "" }, { "docid": "922b37c8407fac336d59c67a653f39f3", "score": "0.7033851", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.description = ('Process and chart lottery data. '\n 'Requires matplotlib.')\n parser.add_argument('inputfiles', nargs='+',\n help='CSV file(s) to process')\n parser.add_argument('-a', '--abort-on-error', action='store_true',\n help='exit if an input file cannot be read')\n parser.add_argument('-u', '--use-headings', action='store_true',\n help='read CSV columns by their headings '\n 'rather than their order')\n parser.add_argument('-n', '--number-range', type=int, nargs=2,\n default=[1, 45], metavar=('LOW', 'HIGH'),\n help='the range (inclusive) of numbers that '\n 'may be drawn (default is 1 45)')\n parser.add_argument('-r', '--resolution', type=int, default=120,\n metavar='DPI',\n help='output resolution in dots per inch '\n '(default is 120)')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='show progress as files are created')\n parser.add_argument('-w', '--weeks', type=int, default=104,\n help='number of weeks to process from last '\n 'date in inputfiles (default is 104)')\n\n # swap LOW and HIGH if necessary\n args = parser.parse_args()\n low, high = args.number_range\n if low > high:\n args.number_range.reverse()\n return args", "title": "" }, { "docid": "b3a733b7d5c4f63f722eb32e032cf38a", "score": "0.7031479", "text": "def parse_arguments():\n\n\t# Input file:\n\tif args.input:\n\t\tFILE_LIST['INPUT_FILE'] = args.input\n\n\t# Output tuple list:\n\tif args.output:\n\t\tFILE_LIST['OUTPUT_FILE'] = args.output\n\telse:\n\t\tFILE_LIST['OUTPUT_FILE'] = os.path.join(CWD, \"output.txt\")\n\n\t# Print debug information:\n\tif args.debug is True:\n\t\tprint(\" -I- Debug mode is on.\")\n\n\treturn True", "title": "" }, { "docid": "e7bcffbd75ed17d64875311965e68fa8", "score": "0.7018384", "text": "def parse_args():\n mode_help = \"\"\"available modes are:\n mapping_report analyse the mapping report generated by mapping_compare.py\n mapping_reports analyse the mapping reports under a directory\n test_log analyse the test log generated by run_cross_validation.py or run_feature_evaluation.py\n server_log analyse the analysis log generated by nice2server\n training_data analyse the traning data generated by batTrain.py or UnuglifyDEX\n 1pass analyse the result directory of one pass generated by run_cross_validation.py or run_feature_evaluation.py\n cross_validation analyse the cross validation result generated by run_cross_validation.py\n feature_evaluation analyse the feature evaluation result generated by run_feature_evaluation.py\n score_regression regression analysis with the json generated in mapping_reports mode\n cross_validationR analyse the json generated in cross_validation, and gen table\n feature_evaluationR analyse the json generated in feature_evaluation, and gen table\n similarity_ratio get the cdf for the similarity ratios between deobfuscated names and origins\n obfuscation_rate get the cdf for obfuscation rate\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"evaluation data analyser of DeDroid\",\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"-i\", action=\"store\", dest=\"input_file\",\n required=True, help=\"directory/file of results\")\n parser.add_argument(\"-o\", action=\"store\", dest=\"output_file\",\n help=\"output file, default is stdout\")\n parser.add_argument(\"-m\", action=\"store\", dest=\"analysis_mode\",\n required=True, help=mode_help)\n\n options = parser.parse_args()\n # print options\n return options", "title": "" }, { "docid": "b6169576b893f1328c381f6072b917e5", "score": "0.70134896", "text": "def parse_args():\n parser = argparse.ArgumentParser(\"Process traces into features lists.\")\n parser.add_argument(\"-t\", \"--traces\", required=True)\n parser.add_argument(\"-o\", \"--output\", required=True)\n parser.add_argument(\"-e\", \"--extension\", required=False)\n return parser.parse_args()", "title": "" }, { "docid": "9b3ad55f7c0209f2be22fd36e92eca58", "score": "0.7003398", "text": "def parse_args():\n # TODO change description\n description = '''\n Calculates the number of contacts between water-protein and water-ligand residues\\n\n as well as their collective contact areas.\\n\n \n Voronota software is used to generate the water-protein and water-ligand contacts,\\n\n which is based on voronoi diagrams for each atom. Additional data parsing generates\\n\n \n \n \n\n H = hydrogen\n A = acceptor\n AA = atom bonded to acceptor\n D = atom bonded to hydrogen\n :: = potential hbond\n - = covalent bond\n\n 1. the H::A distance must be less than or equal to 3.0 Angstrom.\n 2. the D-H::A angle must be at least 110 degree.\n 3. the H::A-AA angle must be at least 90 degree.\n\n Hydrogenbond frequency is calculated both for inter and intramolecular hydrogenbonds, error is estimated using \n block averaging. The frequency of water mediated hydrogen bonds is also calculated. No error are calculated for\n water mediated hydrogen bonds, because not only can water mediated hydrogen bonds can exist in multiple unique \n states but at each point in time there can potentialy be multiple water mediated hydrogen bonds between a pair\n of solute heavy atoms. Results are returned in a csv file. \n\n '''\n parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('infiles',\n type=str,\n nargs='+',\n help='Simulation cmsfile and trj')\n parser.add_argument('--prefix',\n type=str,\n dest='prefix',\n default='test',\n help='Outfile prefix')\n parser.add_argument('-n',\n '--nproc',\n type=int,\n dest='nproc',\n default=16,\n help='Number of cores to use for calculation.\\nDefault: 16')\n parser.add_argument('--max_frames',\n type=int,\n dest='max_frames',\n default=2000,\n help='Process at most [max_frames] frames equally spaced across trajectory.\\nDefault: 2000')\n\n return parser.parse_args()", "title": "" }, { "docid": "a887a67840fdd1ca2a27d7478bbcaef7", "score": "0.7002579", "text": "def parseArgs():\n\n\targParser = argparse.ArgumentParser(prog='dutil (Main.py)')\n\targParser.add_argument(\"-p\", \"--projectName\", help=\"What tests (project) we want execute\", required=True)\n\targParser.add_argument(\"-v\", \"--projectVersion\", help=\"What version of libraries we want to execute\", required=True)\n\targParser.add_argument(\"-d\", action='store_true', help='Enable debugging')\n\n\n\ttry:\n\t\targs = argParser.parse_args()\n\t\tif args:\n\t\t\tif args.d:\n\t\t\t\tlogger.info(\"Debugging enabled!\")\n\t\t\t\tcf.d = True\n\t\t\t\tif cf.d: logger.info(\n\t\t\t\t\t\"Argument Name Space: {}\".format(args)) # # if debug flag engaged Please check singleton pattern\n\t\t\telse:\n\t\t\t\tlogger.info(\"Debugging disabled!\")\n\n\t\tif args.projectName:\n\t\t\tif cf.d: logger.info(\"Project name: {}\".format(args.projectName))\n\t\tif args.projectVersion:\n\t\t\tif cf.d: logger.info(\"Version of project libraries: {}\".format(args.projectVersion))\n\texcept SystemExit:\n\t\tif cf.d: logger.info(\"Mandatory argument required\")\n\t\tsys.exit(0)\n\n\treturn args", "title": "" }, { "docid": "b33ce518b60186ddbbfbcb8036a9f207", "score": "0.6989064", "text": "def parse_arguments():\n parser = argparse.ArgumentParser()\n #parser.add_argument(\"-e\", \"--extension\",\n # help=\"\"\"Type of file to analyse. Is it a CSV or an XML ?\"\"\")\n parser.add_argument(\"-d\", \"--datafile\",\n help=\"\"\"CSV file containing pieces of information\n about the members of parliament\"\"\")\n #parser.add_argument(\"-v\", \"--verdose\", action='store_true',\n # help=\"\"\"Make the application talk!\"\"\")\n parser.add_argument(\"-i\", \"--info\", action='store_true',\n help=\"\"\"information about the file\"\"\")\n parser.add_argument(\"-p\", \"--byparty\", action='store_true',\n help=\"\"\"displays a graph for each political party\"\"\")\n parser.add_argument(\"-n\", \"--displaynames\", action='store_true',\n help=\"\"\"displays the names of all the mps\"\"\")\n parser.add_argument(\"-s\", \"--searchname\", help=\"\"\"search for a mp name\"\"\")\n parser.add_argument(\"-I\", \"--index\", help=\"\"\"displays information about th Ith mp\"\"\")\n parser.add_argument(\"-g\", \"--groupfirst\",\n help=\"\"\"displays a graph groupping all the 'g' biggest political parties\"\"\")\n parser.add_argument(\"-a\", \"--byage\",\n help=\"\"\"displays a graph for the MPs splitted between those who are over and\n those who are under the value of --byage\"\"\")\n\n return parser.parse_args()", "title": "" }, { "docid": "19fceaaaec11d9ed15e395f660956901", "score": "0.69542134", "text": "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', help='input JSON file', required=True)\n parser.add_argument('-o', '--output', help='output JSON file', required=True)\n parser.add_argument('-d', '--debug', help='debug level', required=False, default=0)\n\n return parser.parse_args()", "title": "" }, { "docid": "44037ce9e99c9204e8fb4ea0a24413d1", "score": "0.69516945", "text": "def parse_arguments(self):\n\n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(f\"Illegal argument '{arg}'\")\n update_data(self.env, key, value)", "title": "" }, { "docid": "b0e9d8a7b71490dec78fcb082ab0d222", "score": "0.69499654", "text": "def parse_args():\n parser = argparse.ArgumentParser(description='Audio stream classifier')\n parser.add_argument('model')\n parser.add_argument('--labels', nargs=2)\n parser.add_argument('--frame_size', type=int, default=2000)\n parser.add_argument('--sample_rate', type=int, default=16000)\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "154ac41f20c0f4cdf84b00df387508fd", "score": "0.6944886", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Run demo of binary classification\")\n\n parser.add_argument(\n '--http_proxy',\n action='store',\n required=False,\n help='HTTP Proxy',\n default=None)\n\n parser.add_argument(\n '--https_proxy',\n action='store', required=False,\n help='HTTPS Proxy',\n default=None)\n\n return parser.parse_args()", "title": "" }, { "docid": "e5b2298aacf22d7f8249ead27ce78802", "score": "0.6943133", "text": "def parse_command_line_args():\n parser = argparse.ArgumentParser(description=(\n 'Data and config payload parser for Sigfox Sens\\'it Discovery V3.'))\n parser.add_argument(\n '--parser-mode',\n choices=('encode-config', 'decode-data'),\n default='decode_data',\n required=True,\n help='Parser mode: encode-config|decode-data.')\n parser.add_argument(\n '--hex-string',\n type=str,\n default='',\n help='Sens\\'it V3 payload HEX string.')\n parser.add_argument(\n '--in-file',\n help='Sens\\'it V3 config input file.')\n parser.add_argument(\n '--out-file',\n required=False,\n help='Sens\\'it V3 config output file, generated from '\n 'parsed payload config data.')\n return parser.parse_args()", "title": "" }, { "docid": "caa4eb41eff4a657c91d783af6ee40ee", "score": "0.69421214", "text": "def parse_args():\n\n parser = argparse.ArgumentParser(\n description=\"This script adds JC69 substitution-corrected divergence \"\n \"as an additional column to the output of Galaxy's Estimate \"\n \"Substitution Rates on windows.\")\n\n parser.add_argument(\n \"--input_file\", required=True,\n help=\"Full path to input file. Should be standard output file from \"\n \"'estimate substitution rates' tool in Galaxy using windows. Should \"\n \"have six columns: chrom, start, stop, n_sites, n_diffs, p_distance.\")\n\n parser.add_argument(\n \"--output_file\", required=True,\n help=\"Full path to output file.\")\n\n args = parser.parse_args()\n\n return args", "title": "" }, { "docid": "1214dfd23e092876ef7bcca2c697dd1a", "score": "0.6931074", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description=INIT_TEXT,\n epilog=EPILOG,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n \"--make\",\n choices=(\"clean\", \"debug\", \"test\", \"installer\"),\n help=\"Specifies the action that the build script should take\",\n )\n\n parser.add_argument(\n \"-j\",\n metavar=\"cores\",\n type=int,\n help=\"Specifies the number of cores to use during compilation\",\n )\n\n parser.add_argument(\n \"--arch\",\n metavar=\"architecture\",\n choices=(\"x86\", \"x64\"),\n default=\"x64\",\n help=\"A flag that can be x86 for 32-bit, and x64 for 64-bit\",\n )\n\n parser.add_argument(\n \"--release\",\n metavar=\"version\",\n default=NIGHLY_BUILD_VER,\n help=\"Used to set the version on the installer (PROVISIONAL FLAG, DO NOT USE)\",\n )\n\n parser.add_argument(\n \"--use-alien\",\n action=\"store_true\",\n help=\"A flag that indicates whether to use 'alien' command on installers\",\n )\n\n ret = parser.parse_args()\n return ret.arch == \"x86\", ret", "title": "" }, { "docid": "5d860e2b4fd4a7baf9fb6a82d0bf259f", "score": "0.69285506", "text": "def parse_arguments():\n\n # Create the parser\n parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=diff.__doc__)\n\n # Add parser arguments\n parser.add_argument('--baseline-source', required=True)\n parser.add_argument('--baseline-scrub', required=True)\n parser.add_argument('--comparison-source', required=True)\n parser.add_argument('--comparison-scrub', required=True)\n\n # Parse the arguments\n args = vars(parser.parse_args(sys.argv[2:]))\n\n # Run analysis\n diff(args['baseline_source'], args['baseline_scrub'], args['comparison_source'], args['comparison_scrub'])", "title": "" }, { "docid": "2ea5c92c0952505e841764b927259d32", "score": "0.6927423", "text": "def parse_args(args):\n parser = argparse.ArgumentParser(\n description=\"Dump records from a Zotero library\")\n parser.add_argument(\n '--api_key',\n required=True,\n help='Zotero API key')\n parser.add_argument(\n '--library_id',\n required=True,\n type=int,\n help=\"Zotero library numeric ID\")\n parser.add_argument(\n '--library_type',\n required=True,\n choices=('user', 'group'),\n help=\"Zotero library type [user|group]\")\n parser.add_argument(\n '--tag',\n help='Filter by tag')\n parser.add_argument(\n '-v',\n '--verbose',\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action='store_const',\n const=logging.INFO)\n parser.add_argument(\n '-vv',\n '--very-verbose',\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action='store_const',\n const=logging.DEBUG)\n return parser.parse_args(args)", "title": "" }, { "docid": "c6ed9ae9ee323b5e7a2dab2cb7864d68", "score": "0.69213533", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--db_file_path', help='Path to database file',\n required=False, default='C:\\\\Oren2\\\\chinook.db')\n parser.add_argument('--server_name', help='Name of rabbit server',\n required=False, default='localhost')\n parser.add_argument('--queue_name', help='Name of rabbit queue',\n required=False, default='temp')\n parser.add_argument('--state', help='State name for query',\n required=False, default='Canada')\n parser.add_argument('--year', help='Year for query',\n required=False, default=2011)\n parser.add_argument('--genre', help='genre for query',\n required=False, default='Rock')\n return vars(parser.parse_args())", "title": "" }, { "docid": "14fd2df87b8b5e7db07480a75ee73d14", "score": "0.6917293", "text": "def parse_args():\n\tparser = argparse.ArgumentParser(description=\"Estimate the average length of the shortest path between not yet connected keywords\")\n\tparser.add_argument(\"--samples\", type=int, default=1000, help=\"Number of samples to compute the average shortest path from. Default: 1000.\")\n\treturn parser.parse_args()", "title": "" }, { "docid": "763d19ee31bd757dbe70d80cff228ef9", "score": "0.6904869", "text": "def parse_args():\n parser = ArgumentParser(description='Example follower. Use SIGINT/SIGTERM/CTRL+C to exit gracefully.')\n parser.add_argument('cfg', metavar='CONFIG', help='Configuration file (yaml) for this follower')\n parser.add_argument('agent', metavar='AGENT', help='Agent credentials (ini) for this follower')\n parser.add_argument('-l', '--log-cfg', metavar='LOGGING_CONFIG', help='Configuration for Python logging (yaml)')\n parser.add_argument(\n '--storeutil', action='store_true',\n help=\"\"\"Treat any supplied file arguments as environment variables defining storeutil blob keys to read\n configuration from.\n \"\"\"\n )\n return parser.parse_args()", "title": "" }, { "docid": "c631b2bbee853c44ef344f0e56931f03", "score": "0.6904743", "text": "def parse_arguments():\n arg = argparse.ArgumentParser()\n\n arg.add_argument(\n \"--model-path\",\n \"-m\",\n type=str,\n required=True,\n help=\"Model path\",\n )\n\n arg.add_argument(\n \"--source\",\n \"-s\",\n type=str,\n required=True,\n help=\"Path to the image file\",\n )\n\n arg.add_argument(\n \"--labels\",\n \"-l\",\n type=str,\n required=True,\n help=\"Delimited list of labels\",\n )\n return arg.parse_args()", "title": "" }, { "docid": "423c4f7c377d5c037f05ac8402d0b59d", "score": "0.69033545", "text": "def parse_command_line_args():\n parser = argparse.ArgumentParser(description=(\n 'Datasender to Google Cloud IoT Core HTTP device connection code.'))\n parser.add_argument('--project_id',\n required=True,\n help='GCP cloud project name')\n parser.add_argument('--registry_id',\n required=True,\n help='Cloud IoT Core registry id')\n parser.add_argument('--device_id',\n required=True,\n help='Cloud IoT Core device id')\n parser.add_argument('--private_key_file',\n required=True,\n help='Path to private key file.')\n parser.add_argument('--algorithm',\n choices=('RS256', 'ES256'),\n required=True,\n help='The encryption algorithm to use to generate the JWT.')\n parser.add_argument('--cloud_region',\n default='asia-east1',\n help='GCP cloud region')\n parser.add_argument('--base_url',\n default=_BASE_URL,\n help=('Base URL for the Cloud IoT Core Device Service API'))\n parser.add_argument(\n '--id',\n default=999,\n type=int,\n help=('Device id, not IoT Core device id for unique key.'))\n return parser.parse_args()", "title": "" }, { "docid": "d330bce7ecf5a9370bdececd9fa6513a", "score": "0.69023985", "text": "def parse():\n argp = argparse.ArgumentParser(prog = \"tdf-analyze\", description = \"\")\n argp.add_argument('filename', nargs = \"*\", metavar='<file>', help = \"memory dump/image file\")\n #argp.add_argument('-v', '--verbose', action = 'store_true', help = \"show uHAL logging information\")\n argp.add_argument('-V,', '--version', action='version', version='%(prog)s {TDF.VERSION}'.format(**globals()))\n return argp.parse_args()", "title": "" }, { "docid": "5013b8e91c84453fcf55492357945c7d", "score": "0.6895043", "text": "def parse_args():\n\n global all_packages, package, verbose, file_path, remote_url\n\n args = parser.parse_args()\n if len(sys.argv)==1:\n parser.print_help(sys.stderr)\n exit(0)\n all_packages = args.allPackages\n package = tuple(args.package) if args.package else None\n file_path = args.file\n remote_url = args.remote\n verbose = args.verbose\n\n if remote_url and ('<package>' not in remote_url or '<version>' not in remote_url):\n print('Remote url string is not formatted correctly!')\n exit(1)", "title": "" }, { "docid": "3a6ac6ffcda981ba107dae8019dd203b", "score": "0.68903786", "text": "def parse_args(argv):\n\n opts = list()\n\n for i in range(0, len(argv), 2):\n if argv[0] in ('-h','--help'):\n print_help_menu()\n sys.exit(0)\n else:\n opts.append((argv[i], argv[i+1]))\n\n for opt, arg in opts:\n if opt in ('-i', '--input'):\n Config.source = arg\n elif opt in ('-s', '--source'):\n Config.video_path = arg\n elif opt in ('-f', '--framework'):\n Config.framework = arg\n elif opt in ('-d', '--device'):\n Config.platform = arg\n elif opt in ('-c', '--confidence'):\n Config.confidence = float(arg)\n elif opt == '--mconfig':\n Config.model_file = arg\n elif opt == '--mweight':\n Config.model_weight_file = arg\n elif opt == '--mlabels':\n Config.model_labels_file = arg\n elif opt == '--infer_fc':\n Config.infer_frame_rate = int(arg)\n elif opt == '--model_image_height':\n Config.model_image_height = int(arg)\n elif opt == '--model_image_width':\n Config.model_image_width = int(arg)\n else:\n print('Unknown argument exiting ...')\n sys.exit(2)\n\n return None", "title": "" }, { "docid": "74653d91646c29fb0370672223450b52", "score": "0.6889989", "text": "def parse_cmd_line_arguments():\n parser = argparse.ArgumentParser(\n description='''\n {nm} script to launch a regression.\\n\n For e.g.: python {nm} --app youtube\n '''.format(nm=sys.argv[0]))\n parser.add_argument('--app', help='Name of the app to run.',\n required=True)\n parser.add_argument('--device-type',\n required=False,\n default='android',\n help='<android|ios> Type of device.')\n parser.add_argument('--log-level',\n required=False,\n default='debug',\n help='<user|info|debug> Increasing verbosity order user<info<debug.')\n parser.add_argument('--log-file-dir',\n required=False,\n default=None,\n help='The directory to store the log files.')\n return vars(parser.parse_args())", "title": "" }, { "docid": "9b7c93badddb7327d0852294df38b739", "score": "0.68898463", "text": "def parse_arguments():\n\n parser = argparse.ArgumentParser(prog='xfcsdashboard', description='Plots FCS Metadata')\n\n parser.add_argument(\n '--input', '-i', nargs='+', type=argparse.FileType('rb'),\n metavar='<fcs_metadata.csv>', help='FCS metadata csv files to plot.')\n\n return parser.parse_args()", "title": "" }, { "docid": "47caae6dd0815f1904cf13fd2f863805", "score": "0.6885893", "text": "def parse_cmd_arguments():\n debug_help = str('Sets the logging level.' +\n '\\nAccepted values:'\n '\\n\\t0 - None (default)' +\n '\\n\\t1 - errors only' +\n '\\n\\t2 - errors and warnings,' +\n '\\n\\t3 - errors, warnings and debug messages')\n\n parser = argparse.ArgumentParser(description='Process some integers.',\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('-i', '--input',\n help='input JSON file', required=True)\n parser.add_argument('-o', '--output',\n help='ouput JSON file', required=True)\n parser.add_argument('-d', '--debug', type=int, default=0, help=debug_help)\n\n return parser.parse_args()", "title": "" }, { "docid": "684781574f2ce680afbf327381217c11", "score": "0.68768305", "text": "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--debug\", action=\"store_true\")\n parser.add_argument(\"--log-path\", default=\"\")\n return parser.parse_args()", "title": "" }, { "docid": "937ada99844e4a1bb06402e9b94bd19c", "score": "0.68751043", "text": "def parse_arguments():\n parser = argparse.ArgumentParser(description = \"Measure how fast questionnaire pages load in different browsers\")\n parser.add_argument(dest = \"cred_filename\", help = \"Filename of credentials file\")\n parser.add_argument(\"-b\", dest = \"browser\", choices = list(browsers.keys()), default = list(browsers.keys())[0], help = \"Browser name. Default: %(default)s\")\n parser.add_argument(\"-t\", dest = \"traversal_method\", choices = methods, default = methods[0], help = \"Questionnaire traversal method. Default: %(default)s.\")\n parser.add_argument(\"-o\", dest = \"output_filename\", default = None, help = \"Filename of output file. Default: dump to console.\")\n parser.add_argument(\"-n\", dest = \"n_runs\", type = int, default = 1, help = \"Number of runs for each questionnaire. Default: %(default)i.\")\n parser.add_argument(\"-c\", dest = \"config\", default = \"ps2\", help = \"Configuration to use. Default: %(default)s.\")\n args = parser.parse_args()\n\n return args", "title": "" }, { "docid": "1ac69c73dab173f69d2b3f7e7f79335f", "score": "0.68695825", "text": "def parse_args():\n\n settings = {\n \"max\": 3,\n \"search\": [],\n \"stream\": False,\n \"user\": None,\n \"date\": False,\n \"time\": False,\n \"json\": False,\n \"spam\": False,\n }\n max_set = False\n get_next = False\n for arg in sys.argv[1:]:\n if get_next:\n settings[get_next] = arg\n get_next = False\n continue\n\n arg_copy = arg\n while arg.startswith(\"-\"):\n arg = \"\".join(arg[1:])\n\n if arg == \"s\":\n settings[\"stream\"] = True\n elif arg == \"u\":\n get_next = \"user\"\n elif arg == \"d\":\n settings[\"date\"] = True\n elif arg == \"t\":\n settings[\"time\"] = True\n elif arg == \"j\":\n settings[\"json\"] = True\n elif arg == \"n\":\n settings[\"spam\"] = True\n elif arg == \"h\":\n raise SystemExit(__doc__.strip())\n elif not max_set:\n try:\n settings[\"max\"] = int(arg)\n max_set = True\n except (TypeError, ValueError):\n settings[\"search\"].append(arg_copy)\n else:\n settings[\"search\"].append(arg_copy)\n\n if settings[\"stream\"] and not (settings[\"search\"] or settings[\"user\"]):\n raise SystemExit(\n __doc__ + \"Streaming requires search phrases or the -u flag\"\n )\n\n return settings", "title": "" }, { "docid": "3284fb8c570ad0fa76c33ff11102df14", "score": "0.6869018", "text": "def parse_args(args):\n \n # Construct the parser (which is stored in parser)\n # Module docstring lives in __doc__\n # See http://python-forum.com/pythonforum/viewtopic.php?f=3&t=36847\n # And a formatter class so our examples in the docstring look good. Isn't it\n # convenient how we already wrapped it to 80 characters?\n # See http://docs.python.org/library/argparse.html#formatter-class\n parser = argparse.ArgumentParser(description=__doc__, \n formatter_class=argparse.RawDescriptionHelpFormatter)\n \n parser.add_argument(\"population_file\", type=argparse.FileType(\"r\"),\n help=\"file to read individuals from\") \n parser.add_argument(\"pathogenicity_file\", type=argparse.FileType(\"r\"),\n help=\"file to read pathogenicity flags from\")\n parser.add_argument(\"--epsilon\", type=float, default=1.0,\n help=\"privacy preserving factor\")\n \n # The command line arguments start with the program name, which we don't\n # want to treat as an argument for argparse. So we remove it.\n args = args[1:]\n \n return parser.parse_args(args)", "title": "" }, { "docid": "fcdbbcfb2a244a8c406179a8da9af41b", "score": "0.6867138", "text": "def parse_command_line_args():\n parser = argparse.ArgumentParser(description=(\n 'Example Serial Read Code.'))\n parser.add_argument(\n '--timeout',\n default=None,\n dest='timeout',\n type=float,\n help='Set a read timeout value.')\n parser.add_argument(\n '--bytesize',\n default=8,\n type=int,\n dest='bytesize',\n help=\"Number of data bits.\")\n parser.add_argument(\n '--port',\n choices=(\"/dev/ttyUSB0\", \"COM3\"),\n default=\"/dev/ttyUSB0\",\n dest='port',\n help='PORT a device name.')\n parser.add_argument(\n '--baudrate',\n choices=(110, 300, 600, 1200, 2400, 4800, 9600, 14400,\n 19200, 38400, 57600, 115200, 128000, 256000),\n default=9600,\n type=int,\n dest='baudrate',\n help='Baud rate for serial Communication')\n\n return parser.parse_args()", "title": "" }, { "docid": "9527cb1fa0b79a1f315893adce3677b0", "score": "0.6867088", "text": "def parse_args():\n\n parser = ArgumentParser(description=f\"\"\"example:\n {os.path.basename(sys.argv[0])} -d 3.5 The typing seems really strong today.\n echo 'I love typing' | {os.path.basename(sys.argv[0])}\n {os.path.basename(sys.argv[0])} < test.txt\n\"\"\", epilog=\"\"\"shortcuts:\n ^c / ctrl+c end the test and get results now\n ^h / ctrl+h backspace\n ^r / ctrl+r restart the same test\n ^w / ctrl+w delete a word\n ^u / ctrl+u delete a word\n\"\"\", formatter_class=RawTextHelpFormatter)\n\n parser.add_argument('-d', '--duration', type=float, default=float('inf'),\n help='duration in seconds')\n parser.add_argument('-r', '--rows', type=int, default=2,\n help='number of test rows to show')\n parser.add_argument('-s', '--shuffle', action='store_true',\n help='shuffle words')\n parser.add_argument('words', nargs='*',\n help='provide words via args in lieu of stdin')\n\n return dict(parser.parse_args()._get_kwargs(), help=parser.print_help)", "title": "" }, { "docid": "cac183950da6a3d72e1bca0162903e68", "score": "0.6864845", "text": "def parse_command_line_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--dev',\n default=0,\n type=int,\n help='USB Microphone device number to use. Default: 0')\n return parser.parse_args()", "title": "" }, { "docid": "60348d09be72a1e97a1be15d36442795", "score": "0.68643403", "text": "def parse_args(self):\n cmdline = self.makeCommandLine()\n return self.parser.parse_args(cmdline)", "title": "" }, { "docid": "8478a9a813ab6ecb7b3195cd740b4d40", "score": "0.6853853", "text": "def parse_args():\n\n parser = argparse.ArgumentParser(\n description='Bayesian Comparison Audit Support Program For'\n 'A Single Contest '\n 'Across Multiple Jurisdictions or Collections')\n\n # REQUIRED POSITIONAL COMMAND-LINE ARGUMENTS\n\n parser.add_argument(\n \"collections_file\",\n help=\"path to CSV file giving collection names and sizes.\",\n default=\"collections.csv\")\n\n parser.add_argument(\n \"reported_file\",\n help=\"path to CSV file giving number of times each choice \"\n \"was reportedly made in each collection.\",\n default=\"reported.csv\")\n\n parser.add_argument(\n \"sample_file\",\n help=\"path to CSV file giving number of times each \"\n \"(reported choice, actual choice) pair was seen in \"\n \"each collection in the audit sample.\",\n default=\"sample.csv\")\n\n # OPTIONAL COMMAND-LINE ARGUMENTS\n\n parser.add_argument(\"--audit_seed\",\n help=\"For reproducibility, we provide the option to \"\n \"seed the randomness in the audit. If the same \"\n \"seed is provided, the audit will return the \"\n \"same results.\",\n type=int,\n default=1)\n\n parser.add_argument(\"--num_trials\",\n help=\"Bayesian audits work by simulating the ballots \"\n \"that haven't been sampled to estimate the \"\n \"chance that each choice would win a full \"\n \"hand recount. This argument specifies how \"\n \"many trials are used to compute these \"\n \"estimates.\",\n type=int,\n default=25000)\n\n parser.add_argument(\"--pseudocount_base\",\n help=\"The pseudocount value used for reported-choice/\"\n \"actual-choice pairs that are unequal. The default \"\n \"value is 1, a relatively small value, indicating an \"\n \"expectation that scanner errors are rare.\",\n type=int,\n default=1)\n\n parser.add_argument(\"--pseudocount_match\",\n help=\"The pseudocount value used for reported-choice/\"\n \"actual-choice pairs that are equal. The default \"\n \"value is 50, a relatively large value, indicating an \"\n \"expectation that scanner is generally accurate.\",\n type=int,\n default=50)\n\n parser.add_argument(\"--n_winners\",\n help=\"The parameter n_winners determines how many \"\n \"winners there are in a contest. The top n_winners \"\n \"vote getters win the contest.\",\n type=int,\n default=1)\n\n parser.add_argument(\"--v\", \"--verbose\",\n help=\"Verbose output.\",\n action='store_true')\n\n args = parser.parse_args()\n\n if args.collections_file is None:\n parser.print_help()\n sys.exit()\n\n return args", "title": "" }, { "docid": "a9b0c44e5d90829051a54fffa2262d55", "score": "0.68519145", "text": "def parse_arguments():\n parser = OptionParser()\n return parser.parse_args()[1]", "title": "" }, { "docid": "f7bb5bdd4457f366e0fbf732fc0f8c1f", "score": "0.68498915", "text": "def parse_args():\r\n parser = argparse.ArgumentParser(description=\"\"\"Command line interface for\r\n testing control pilot signal EVSE via USS protocol.\"\"\")\r\n\r\n parser.add_argument(\"--unsafe\", dest=\"unsafe\", action=\"store_true\",\r\n default=False, help=\"Do not hide exception traces.\")\r\n\r\n parser.add_argument('-v', \"--verbose\", dest=\"verbose\", action=\"count\",\r\n default=False,\r\n help=\"Print out debug and info messages.\")\r\n\r\n parser.add_argument('--version', action='version', version='\"CP test\" Revision %s' % REVISION_NUMBER)\r\n\r\n parser.add_argument('-p', '--active_port', required=True, type=str,\r\n help=\"Specifies serial port where active EVSE is connected.\")\r\n\r\n parser.add_argument('--passive_port', type=str,\r\n help=\"Specifies serial port where passive EV is connected.\")\r\n\r\n parser.add_argument('--rcp', type=int, default=\"0\", help=CP_RESISTOR_HELP_STRING)\r\n\r\n\r\n return parser.parse_args()", "title": "" }, { "docid": "158a95223e30d07f32cf83e157cd3ffc", "score": "0.6846518", "text": "def parse_args():\n\tparser = argparse.ArgumentParser(\n\t\tprog='ICU',\n\t\tdescription=\"Given a zip code and satellite code, generates notifications \" +\n\t\t\"15 minutes before that satellite will become visible.\"\n\t)\n\tparser.add_argument('-z', dest='zip', type=int, required=True, help=\"Zip code for which to check viewable events\")\n\tparser.add_argument('-s', dest='sat_id', help=\"NORAD ID of satellite to view\")\n\tparser.add_argument('-n', dest='sat_str', help=\"Name of satellite to view\")\n\tparser.add_argument('-v', dest='verbose', action='store_true')\n\targs = parser.parse_args()\n\tif not (args.sat_id or args.sat_str):\n\t\tparser.error(\"Either -s or -n is required\")\n\treturn args", "title": "" }, { "docid": "3efd87dac332b1151a2cab90f5b5b314", "score": "0.6845959", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description='Convert xxx XML annotations to PASCAL VOC format'\n )\n\n parser.add_argument(\n '--data_dir', metavar='DIRECTORY', required=True,\n help='directory which contains original images'\n )\n\n parser.add_argument(\n '--output_dir', metavar='DIRECTORY', required=True,\n help='directory for output annotations in PASCAL VOC format'\n )\n\n return parser.parse_args()", "title": "" }, { "docid": "3b614ed6f339b95757b725eac1155035", "score": "0.6845492", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Generate dataset images (Face cropping)\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n \"--big-crop\",\n dest=\"big_crop\",\n help=\"Big crop (0/1)\",\n default=0,\n type=int)\n parser.add_argument(\n \"--unumber\",\n dest=\"unumber\",\n help=\"Unique number for output file names\",\n default=7,\n type=int)\n parser.add_argument(\n \"--input-dir\",\n type=str,\n default=\"input_images\",\n help=\"name of input image directory\")\n parser.add_argument(\n \"--output-dir\",\n type=str,\n default=\"output_images\",\n help=\"name of output image directory\")\n parser.add_argument(\n \"--work-dir\",\n type=str,\n default=os.path.join(\"..\", \"facedetver_data\"),\n help=\"path to working directory\")\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "df900d14f8cdd2b33338edf21dc89d6e", "score": "0.6842722", "text": "def parse_arguments():\n\n parser = argparse.ArgumentParser(description='Generate synthetic text data for text recognition.')\n parser.add_argument(\n \"-r\",\n \"--report_dir\",\n type=str,\n nargs=\"?\",\n help=\"The report directory in .txt extension\",\n default=\"report.txt\",\n )\n parser.add_argument(\n \"-i\",\n \"--input_dir\",\n type=str,\n nargs=\"?\",\n help=\"The raw data directory in .json extension\",\n default=\"\"\n )\n parser.add_argument(\n \"-f\",\n \"--format\",\n type=str,\n nargs=\"?\",\n help=\"'squard' or 'iapp'\",\n default=\"\"\n )\n parser.add_argument(\n \"-en\",\n \"--engine\",\n type=str,\n nargs=\"?\",\n help=\"Tokenizer to use ==> from pythainlp, for example: 'deepcut' or 'mm'\",\n default=\"mm\"\n )\n parser.add_argument(\n \"-d\",\n \"--dictionary_dir\",\n type=str,\n nargs=\"?\",\n help=\"The dictionary directory in .txt extension\",\n default='dictionary.txt'\n )\n parser.add_argument(\n \"-tr\",\n \"--output_train_dir\",\n type=str,\n nargs=\"?\",\n help=\"The training set directory in .json extension ==> convert into SQUARD format\",\n default=\"train.json\"\n )\n parser.add_argument(\n \"-te\",\n \"--output_test_dir\",\n type=str,\n nargs=\"?\",\n help=\"The test set directory in .json extension ==> convert into SQUARD format\",\n default=\"test.json\"\n )\n parser.add_argument(\n \"-p\",\n \"--train_percent\",\n type=int,\n nargs=\"?\",\n help=\"Percentage of train-test spliting\",\n default=\"80\"\n )\n return parser.parse_args()", "title": "" }, { "docid": "0093df22167436c79189a701739587b9", "score": "0.68421334", "text": "def parse_args():\n description = \"\"\"Identify the language of a text.\nThe text is read from STDIN and the output is displayed on STDOUT.\n\"\"\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('--format', default='csv', choices=['csv', 'json'], help='Format of the output')\n\n return parser.parse_args()", "title": "" }, { "docid": "68a693afe85e883746cc95b475fc395a", "score": "0.6839043", "text": "def parse_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-l', '--log-level', metavar='LEVEL',\n action='store', dest='log_level', default=21,\n type=int, choices=xrange(51),\n help='1 DEBUG; 11 INFO; 21 WARNING; 31 ERROR; 41 CRITICAL')\n subparsers = parser.add_subparsers()\n parser_maps = subparsers.add_parser('maps')\n parser_maps.add_argument('project_number')\n parser_maps.add_argument('-p', '--popularity', type=int, default=1)\n parser_maps.add_argument('-e', '--excluded', nargs='+')\n parser_maps.set_defaults(func=handle_maps)\n parser_corr = subparsers.add_parser('correlate')\n parser_corr.add_argument('project_number')\n parser_corr.set_defaults(func=handle_correlate)\n return parser.parse_args()", "title": "" }, { "docid": "5e7d4907c460d1b3e3d12e78c06876cb", "score": "0.6838052", "text": "def parse_args():\n parser = ArgumentParser(description='USCIS Case Status Checker')\n parser.add_argument('--receipt-numbers', '-n', type=str, nargs='*',\n help='Receipt numbers')\n parser.add_argument('--num-threads', '-t', type=int, default=16,\n help='Maximum number of threads')\n parser.add_argument('--before-cases', '-B', type=int, default=0,\n help='Number of cases before the given receipt number')\n parser.add_argument('--after-cases', '-A', type=int, default=0,\n help='Number of cases after the given receipt number')\n args = parser.parse_args()\n\n if not args.receipt_numbers:\n print('At least one receipt number is required')\n sys.exit(1)\n\n if (((args.before_cases > 0 or args.after_cases > 0) and\n len(args.receipt_numbers) > 1)):\n print('Use only one receipt number for bulk case queries')\n sys.exit(1)\n\n return args", "title": "" }, { "docid": "5cc54333a932b2fb07ce9b88a4e38682", "score": "0.6836818", "text": "def parse_args():\n\n parser = ArgumentParser(description=description,\n prog=__file__)\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"-v\", \"--verbose\", action=\"store_const\",\n dest=\"level\", const=DEBUG_LEVEL, default=INFO_LEVEL,\n help=\"Increase output verbosity\")\n group.add_argument(\"-q\", \"--quiet\", action=\"store_const\",\n dest=\"level\", const=FATAL_LEVEL,\n help=\"Decrease output verbosity\")\n parser.add_argument(\"-H\", \"--host\", dest=\"ADDR\", type=str,\n default=DEFAULT_ADDR, help=\"service IP address\")\n parser.add_argument(\"-p\", \"--port\", dest=\"PORT\", type=int,\n default=DEFAULT_PORT, help=\"service port\")\n parser.add_argument(\"-s\", \"--storage\", dest=\"DIRPATH\", type=str,\n default=DEFAULT_DIRPATH, help=\"storage dir path\")\n\n return parser.parse_args()", "title": "" }, { "docid": "b2b57e1b7f7ed55e559ad0823a4f6c84", "score": "0.68309915", "text": "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Creates a map plot with \"\n \" a fault and stations.\")\n parser.add_argument(\"-o\", \"--output\", dest=\"outfile\", required=True,\n help=\"output png file\")\n parser.add_argument(\"--station-list\", \"-sl\",\n dest=\"station_list\", required=True,\n help=\"station list with latitude and longitude\")\n parser.add_argument(\"--title\", \"-t\",\n dest=\"plot_title\",\n help=\"title for plot\")\n parser.add_argument('src_files', nargs='+')\n args = parser.parse_args()\n\n return args", "title": "" }, { "docid": "a258abcc94df522ce6d489796c77cbbe", "score": "0.68285894", "text": "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input-dirpath', type=unicode)\n parser.add_argument('-o', '--output-dirpath', type=unicode)\n\n return parser.parse_args()", "title": "" }, { "docid": "030f1b0d426b4d68ab518a708280b104", "score": "0.68209887", "text": "def parse_args():\n parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')\n parser.add_argument(\n '--dataset',\n help='training dataset')\n parser.add_argument(\n '--cfg', dest='cfg_file', required=True,\n help='optional config file')\n\n parser.add_argument(\n '--load_ckpt', help='path of checkpoint to load')\n parser.add_argument(\n '--load_detectron', help='path to the detectron weight pickle file')\n\n parser.add_argument(\n '--output_dir',\n help='output directory to save the testing results. If not provided, '\n 'defaults to [args.load_ckpt|args.load_detectron]/../test.')\n\n parser.add_argument(\n '--set', dest='set_cfgs',\n help='set config keys, will overwrite config in the cfg_file.'\n ' See lib/core/config.py for all options',\n default=[], nargs='*')\n\n parser.add_argument(\n '--range',\n help='start (inclusive) and end (exclusive) indices',\n type=int, nargs=2)\n parser.add_argument(\n '--multi-gpu-testing', help='using multiple gpus for inference',\n action='store_true')\n parser.add_argument(\n '--vis', dest='vis', help='visualize detections', action='store_true')\n\n return parser.parse_args()", "title": "" }, { "docid": "4bfd44dbb68a5033ef7f7f8f72471d41", "score": "0.6815874", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--num_samples\", help=\"# samples\")\n parser.add_argument(\"--num_features\", help=\"# features\")\n\n parser.add_argument(\"--threshold\", help=\"MAE threshold\")\n\n parser.add_argument(\"--input-model\", help=\"Path of input model to create\")\n options = parser.parse_args()\n return options", "title": "" }, { "docid": "46842cf72e1ae69093acbc942f6ea7c9", "score": "0.6813995", "text": "def parse_args(self):\n self.program_name = self.argv[0]\n\n if len(self.argv[1:]) == 0:\n return self.show_usage()\n\n self.action = self.argv[1]\n if self.action not in ['recheck', 'upgrade']:\n print \"E: command \\\"%s\\\" not recognized.\" % self.action\n return 2\n\n try:\n opts, args = getopt.getopt(self.argv[2:], \"nhdH:\", [\"dry-run\", \"help\", \"hosts=\"])\n except getopt.GetoptError, err:\n print \"E: %s\" % str(err) # will print something like \"option -a not recognized\"\n return 2\n\n self.hosts = config.hosts_default\n Main.debug = False\n self.dry_run = False\n self.download_only = False\n for o, a in opts:\n if o in ('-h', '--help'):\n return self.show_usage()\n elif o in ('-d'):\n self.download_only = True\n elif o in ('-D'):\n print 'D: Enable debug.'\n Main.debug = True\n elif o in ('-n', '--dry-run'):\n self.dry_run = True\n elif o in ('-H', '--hosts'):\n self.hosts = a", "title": "" }, { "docid": "4f4991475fbdde7500c3a6f73dea8405", "score": "0.68119276", "text": "def parse_args():\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--input', type=str,\n default='src/results/default.json', help='File where the run stats will be saved')\n ap.add_argument('-o', '--output', type=str,\n default='plots', help='Output directory for the plots')\n\n return ap.parse_args()", "title": "" }, { "docid": "8e21515869bd61a3a89b5564dadd3983", "score": "0.6810306", "text": "def parse_arguments(argv):\n parser = argparse.ArgumentParser(prog='MalNet')\n parser.add_argument('-d', '--data-dir', dest='data_dir', type=str, default='data',\n help='Directory that stores our dataset.')\n parser.add_argument('--scale', dest='scale', type=float, default=1.,\n help='Scale of training/test dataset.')\n return parser.parse_args(argv)", "title": "" }, { "docid": "cfa2d59dd66afdb3b754838f71a45bd0", "score": "0.68087876", "text": "def _parse_command_line_options(self):\n self.args = parser.parse_args()\n\n # Make the stacks prettier\n if self.args.stacks:\n self.args.stacks = [s.strip() for s in self.args.stacks.split(',')]\n\n # Split configuration paths\n if self.args.config:\n self.args.config = [c.strip() for c in self.args.config.split(',')]\n\n if self.args.cumulus_version:\n settings_conf = SafeConfigParser()\n settings_conf.read(\n ospath.realpath(\n '{}/../settings.conf'.format(ospath.dirname(__file__))))\n print('Cumulus version {}'.format(\n settings_conf.get('general', 'version')))\n sys.exit(0)\n elif not self.args.environment:\n raise ConfigurationException('--environment is required')", "title": "" }, { "docid": "80f9851ac7d106a28be7e801c5188865", "score": "0.6805368", "text": "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", dest=\"filename\",\n help=\"setting file\", metavar=\"FILE\")\n parser.add_argument(\"-e\", \"--encoding\", dest=\"encoding\", default='utf8',\n help=\"input file encoding\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output\",\n help=\"output file\", metavar=\"FILE\")\n parser.add_argument(\"-n\", \"--dryrun\", dest=\"dryrun\",\n help=\"dry run\", default=False, action=\"store_true\")\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", default=False,\n action=\"store_true\", help=\"verbose mode\")\n parser.add_argument(\"-q\", \"--quiet\", dest=\"quiet\", default=False,\n action=\"store_true\", help=\"quiet mode\")\n parser.add_argument(\"filename\", nargs=1, help=\"input file path\")\n\n args = parser.parse_args()\n\n if args.verbose:\n logging.basicConfig(level=logging.DEBUG)\n elif not args.quiet:\n logging.basicConfig(level=logging.INFO)\n\n return args", "title": "" }, { "docid": "02614e87b96a6e5a362490191b91e0cf", "score": "0.6801102", "text": "def _parse_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--portserver_static_pool',\n type=str,\n default='15000-24999',\n help='Comma separated N-P Range(s) of ports to manage (inclusive).')\n parser.add_argument(\n '--portserver_address',\n '--portserver_unix_socket_address', # Alias to be backward compatible\n type=str,\n default='@unittest-portserver',\n help='Address of AF_UNIX socket on which to listen on Unix (first @ is '\n 'a NUL) or the name of the pipe on Windows (first @ is the '\n r'\\\\.\\pipe\\ prefix).')\n parser.add_argument('--verbose',\n action='store_true',\n default=False,\n help='Enable verbose messages.')\n parser.add_argument('--debug',\n action='store_true',\n default=False,\n help='Enable full debug messages.')\n return parser.parse_args(sys.argv[1:])", "title": "" }, { "docid": "84fdbdde80105ab18353425b9862c90d", "score": "0.67976767", "text": "def parse_cmdline(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='Finds the distances between each pair '\n 'of atoms listed in the pair file for '\n 'each time step in the given LAMMPS dump '\n 'file.')\n parser.add_argument(\"-p\", \"--pair_files\", action=\"append\", default=[],\n help=\"One or more files containing atom pairs (default {0})\".format(\n DEF_PAIRS_FILE))\n parser.add_argument(\"-f\", \"--file\", help=\"The dump file to process\", default=None)\n parser.add_argument(\"-l\", \"--list_file\", help=\"The file with a list of dump files to process\", default=None)\n\n args = None\n try:\n args = parser.parse_args(argv)\n if not args.pair_files:\n args.pair_files.append(DEF_PAIRS_FILE)\n if not os.path.isfile(DEF_PAIRS_FILE):\n raise InvalidDataError(\"No pair file specified and did not find the default \"\n \"pair file: {}\".format(DEF_PAIRS_FILE))\n if (args.file is None) and (args.list_file is None):\n raise InvalidDataError(\"Specify either a file or list of files to process.\")\n except (KeyError, InvalidDataError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n return args, GOOD_RET", "title": "" }, { "docid": "27ee9277249527b4cd6d9c830bf7df09", "score": "0.6796988", "text": "def parse_args():\n parser = argparse.ArgumentParser(\n description='Send service status updates to Event Fabric')\n\n parser.add_argument('--username', '-u', metavar='USERNAME',\n required=True,\n help='Username to authenticate to Event Fabric')\n parser.add_argument('--password', '-p', metavar='PASSWORD',\n required=True,\n help='Password to authenticate to Event Fabric')\n parser.add_argument('--channel', '-c', metavar='CHANNEL',\n required=True,\n help='Channel used in the generated event')\n parser.add_argument('--names', '-n', nargs='+',\n required=True,\n help='Names of services to generate events')\n parser.add_argument('--sleep', '-s', type=int, default=10,\n required=True,\n help='Maximum interval between status for a service')\n parser.add_argument('--url', '-U', metavar='URL',\n help='URL for Event Fabric API',\n default=\"https://event-fabric.com/ef/api/\")\n\n return parser.parse_args()", "title": "" }, { "docid": "11b87dc79ab602c0e866df838cede441", "score": "0.67956793", "text": "def parse_arguments(self, config: Config) -> None:\n self._config = config\n\n for arg in sys.argv[1:]: # skip the script path\n\n if self._process_argument(regex=r\"^--input=(.*)$\", arg=arg, arg_semantics='input'):\n pass\n elif self._process_argument(regex=r\"^-i=(.*)$\", arg=arg, arg_semantics='input'):\n pass\n elif self._process_argument(regex=r\"^--output=(.*)$\", arg=arg, arg_semantics='output'):\n pass\n elif self._process_argument(regex=r\"^-o=(.*)$\", arg=arg, arg_semantics='output'):\n pass\n elif self._process_argument(regex=r\"^--help$\", arg=arg, arg_semantics='help', add_to_config=False):\n pass\n elif self._process_argument(regex=r\"^-h$\", arg=arg, arg_semantics='help', add_to_config=False):\n pass\n elif self._process_argument(regex=r\"^--format=(.*)$\", arg=arg, arg_semantics='format'):\n pass\n elif self._process_argument(regex=r\"^-f=(.*)$\", arg=arg, arg_semantics='format'):\n pass\n elif self._process_argument(regex=r\"^--br$\", arg=arg, arg_semantics='br', add_to_config=False):\n pass\n elif self._process_argument(regex=r\"^-b$\", arg=arg, arg_semantics='br', add_to_config=False):\n pass\n else:\n raise InvalidArgumentsException(message='Unknown argument ' + arg)", "title": "" }, { "docid": "e15ea18284ee7d297cec8b23e08b0837", "score": "0.6795398", "text": "def parse_args(args=None):\n\n parser = argparse.ArgumentParser(\n description=Description.SHORT,\n prog=Description.NAME)\n\n # version info\n parser.add_argument(\n \"-V\", \"--version\", action=\"version\",\n version=\"%(prog)s \"+Description.VERSION)\n\n subparsers = parser.add_subparsers(dest=\"subcommand\")\n\n # parsing options\n # create db from snp files\n parser.add_argument(\n \"--load-db\", dest=\"create_db\", action=\"store_true\",\n help=\"Create db from snp files. SNP files will not be kept \" +\n \" in memory then. Recommended for large files/small ram.\")\n\n # overview statistics\n vcfstat_parser = subparsers.add_parser(\n \"vcfstats\", help=\"VCF file statistics and filtering\")\n # input options\n vcfstat_input = vcfstat_parser.add_argument_group(\"Input options\")\n vcfstat_input.add_argument(\n \"--vcf\", type=str, action=InputFileAction, dest=\"input\",\n default=\"\", nargs=1, required=True,\n help=\"path to a single vcf/vcf.gz file\")\n\n # familial analysis\n cohort_parser = subparsers.add_parser(\n \"cohort\", help=\"Familial/Cohort Analysis.\")\n # input options\n cohort_input = cohort_parser.add_argument_group(\"Input options\")\n cohort_input_me = cohort_input.add_mutually_exclusive_group(required=True)\n cohort_input_me.add_argument(\n \"--vcf\", type=str, action=InputFileAction, dest=\"input\", nargs=1,\n help=\"path to a single vcf/vcf.gz file\")\n cohort_input_me.add_argument(\n \"--tsv\", type=str, action=InputFileAction, dest=\"input\", nargs=1,\n help=\"path to a tsv file containing paths to multiple\" +\n \"vcf/vcf.gz files and information on their relatedness\")\n\n # healthy versus diseased analysis\n versus_parser = subparsers.add_parser(\n \"versus\", help=\"Healthy versus diseased analysis.\")\n versus_input = versus_parser.add_argument_group(\"Input options\")\n versus_input_me = versus_input.add_mutually_exclusive_group(required=True)\n versus_input_me.add_argument(\n \"--vcf\", type=str, nargs=\"?\", dest=\"vcf_file\",\n help=\"path to a single vcf/vcf.gz file\")\n versus_input_me.add_argument(\n \"--tsv\", type=str, nargs=1, dest=\"tsv_file\",\n help=\"path to a tsv file containing paths to multiple\" +\n \"vcf/vcf.gz files and information on their relatedness\")\n\n # add filter options to every subparser\n for name, subp in subparsers.choices.items():\n\n # filter options\n filter_group = subp.add_argument_group(\"Variant filter options\")\n filter_group.add_argument(\n \"--quality-gt\", type=float, dest=\"filter_gq\",\n help=\"Filter variants below a quality score.\")\n filter_group.add_argument(\n \"--mapping-qual\", type=float, dest=\"filter_mq\",\n help=\"Filter variants below a given mapping quality\")\n filter_group.add_argument(\n \"--read-depth\", type=float, dest=\"filter_dp\",\n help=\"Filter variants with a read depth below the given threshold\")\n filter_group.add_argument(\n \"--regions\", type=str, nargs=\"+\",\n dest=\"filter_regions\", default=[], action=ChromRegionsAction,\n help=\"Consider only variants in the defined region. Can be \" +\n \"a list of regions in the format 'chr2:340-100' or the path \" +\n \"to a file in GATK style or BED format.\")\n filter_group.add_argument(\n \"--ignore-regions\", type=str, nargs=\"+\",\n dest=\"filter_excl_regions\", default=[], action=ChromRegionsAction,\n help=\"Ignore the variant in the given regions. Can be a \" +\n \"list of comma separated regions or the path to an interval \" +\n \"list file in GATK style or BED format\")\n filter_group.add_argument(\n \"--vtypes\", dest=\"filter_vtypes\", default=[],\n choices=[\"insertion\", \"deletion\", \"snp\", \"snv\", \"indel\"],\n help=\"Consider only variants of a certain type. \" +\n \"For a explanation of variant types, please see: \" +\n \"http://vcftools.sourceforge.net/VCF-poster.pdf\")\n filter_group.add_argument(\n \"--ignore-types\", dest=\"filter_excl_vtypes\", default=[],\n choices=[\"insertion\", \"deletion\", \"snp\", \"snv\", \"indel\"],\n help=\"Filter Variants of a certain type\")\n filter_group.add_argument(\n \"--perc-pass\", dest=\"filter_perc_pass\", type=float,\n default=0.9, help=\"Percentage of variants in a multi-sample \" +\n \"record that have to pass the filters.\")\n\n # annotation filters\n ann_group = subp.add_argument_group(\"Annotation Filter Options\")\n ann_group.add_argument(\n \"--vep-consequences\", type=str, nargs=\"+\", dest=\"vep_consequence\",\n default=[],\n help=\"Filter for variants based on VEP Consequence Annotation.\")\n ann_group.add_argument(\n \"--vep-impact\", type=str, nargs=\"+\", dest=\"vep_impact\", default=[],\n help=\"Filter for variants based on VEP IMPACT Annotation\")\n ann_group.add_argument(\n \"--vep-significance\", type=str, nargs=\"+\", dest=\"vep_significance\",\n default=[],\n help=\"Filter for variants based on VEP Significance annotation\")\n\n # intersection options\n intersection_group = subp.add_argument_group(\"Intersection and \" +\n \"subtractionOptions\")\n intersection_group.add_argument(\n \"--isec-alt-ratio\", type=float, dest=\"alt_ratio\", default=0.9,\n help=\"Alternate allel ratio for computing intersections\")\n intersection_group.add_argument(\n \"--isec-min-cr\", type=float, dest=\"call_rate\", default=0.9,\n help=\"Minimum callrate for a record to be considered \" +\n \"an intersection\")\n intersection_group.add_argument(\n \"--sub-alt-ratio\", type=float, dest=\"alt_ratio2\", default=0.1,\n help=\"Maximum alternate allel ratio for the subtrahend group \" +\n \"when computing subtraction\")\n intersection_group.add_argument(\n \"--sub-max-cr\", type=float, dest=\"call_rate2\", default=0.5,\n help=\"Maximum callrate for the subtrahend group when computing \" +\n \"subtraction\")\n\n # Output options\n output_group = subp.add_argument_group(\"Output options\")\n output_group.add_argument(\n \"-r\", \"--report-outdir\", type=str, nargs=1,\n dest=\"report_dir\", default=\"DiscavarReports\", action=OutdirAction,\n help=\"Output directory for Reports.\")\n output_group.add_argument(\n \"-o\", \"--vcf-outdir\", type=str, nargs=1,\n dest=\"vcf_dir\", default=\"Discavar\", action=OutdirAction,\n help=\"Output Directory for VCF files.\")\n output_group.add_argument(\n \"--vcf-copy\", type=str, nargs=1,\n dest=\"vcf_copy\", default=\"\",\n help=\"Do not modify the input VCF and operate on a copy \" +\n \"instead. The copy will be placed in the VCF outdir.\")\n output_group.add_argument(\n \"--interactive\", action=\"store_true\", dest=\"interactive_report\",\n default=False,\n help=\"Export a JuPyter notebook with filtered vcf data. \" +\n \"NOT IMPLEMENTED\")\n\n if args:\n job = parser.parse_args(args)\n\n else:\n job = parser.parse_args()\n\n if is_valid_job(job, parser):\n return job", "title": "" }, { "docid": "c5ee42bff0d27a7b2d24f6c439399f8c", "score": "0.6793822", "text": "def parse_args():\n import argparse as ap\n\n parser = ap.ArgumentParser(\n description='Train an evaluator to decide between arguments.')\n parser.add_argument('-d', help='Datasets directory',\n type=str, default='data')\n parser.add_argument('-tr', help='Training dataset',\n type=str, default='train-full.txt')\n parser.add_argument('-trd', help='Training directory',\n type=str, default='train')\n parser.add_argument('-ts', help='Testing dataset',\n type=str, default='test.tsv')\n parser.add_argument('-tsd', help='Testing directory',\n type=str, default='test')\n parser.add_argument('-g', help='GoogleNews corpus',\n type=str,\n default='GoogleNews-vectors-negative300.bin.gz')\n parser.add_argument('-c', help='Write to csv',\n type=str, default='word2vec.csv')\n parser.add_argument('-dbg', help='Debug mode',\n action='store_true', default=False)\n parser.add_argument('-t', help='Do testing',\n type=bool, default=False)\n return parser.parse_args()", "title": "" }, { "docid": "0afa95b3f2a7f8df257cf02b84292d10", "score": "0.6790025", "text": "def parse_args():\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument(\n '-i',\n '--input',\n type=str,\n required=True,\n help='Timeline file to read. Supported formats: {adapters}'\n ''.format(adapters=otio.adapters.available_adapter_names())\n )\n parser.add_argument(\n '-f',\n '--folder',\n type=str,\n required=True,\n help='Folder to look for media in.'\n )\n parser.add_argument(\n '-o',\n '--output',\n type=str,\n required=True,\n help=\"Timeline file to write out.\"\n )\n return parser.parse_args()", "title": "" }, { "docid": "d3620183c84651416ea86304914bbc99", "score": "0.67884666", "text": "def parse_arguments():\n argparser = argparse.ArgumentParser(\n description=\"Naive Reversi implementation\")\n argparser.add_argument(\"--infile\",\n nargs=\"?\",\n type=argparse.FileType(\"r\"),\n default=sys.stdin,\n help=\"Filename of JSON file containing board and move, default stdin\")\n return argparser.parse_args()", "title": "" }, { "docid": "cf22ce779cd613acb3684e0c82019699", "score": "0.67853886", "text": "def parse_args():\n\n parser = argparse.ArgumentParser(\n description=\"Let's recognize hand gestures!\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\n '--weights', #Names \"load-checkpoint\" in project 4\n default=None,\n help='''Path to model weights file (should end with the\n extension .h5). Evaluates camera images based on these weights''')\n parser.add_argument(\n '--data',\n default='data'+os.sep,\n help='Location where the dataset is stored.')\n parser.add_argument(\n '--confusion',\n action='store_true',\n help='''Log a confusion matrix at the end of each\n epoch (viewable in Tensorboard). This is turned off\n by default as it takes a little bit of time to complete.''')\n\n return parser.parse_args()", "title": "" }, { "docid": "64ef627e783a1c461dd9daa0d67aac3b", "score": "0.67794603", "text": "def parse_arguments():\n parser = argparse.ArgumentParser(\n description='Compute knight\\'s dialer sequence counts')\n parser.add_argument('start_pos', type=int, help='Starting position')\n parser.add_argument('num_hops', type=int, help='Number of hops')\n args = parser.parse_args()\n\n if args.start_position < 0 or args.start_position > 9:\n print('Starting position must be in [0, 9]')\n sys.exit(1)\n if args.num_hops < 0:\n print('Number of hops must be nonnegative')\n sys.exit(1)\n\n return args", "title": "" }, { "docid": "ce31770bf77d16ad75f4a088d160f107", "score": "0.67777145", "text": "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()", "title": "" }, { "docid": "2ef309292a0953689c259d9440bd5ee6", "score": "0.67712295", "text": "def parse_args():\n parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset')\n\n \n parser.add_argument('--train', action='store_true',\n help='train the model')\n parser.add_argument('--evaluate', action='store_true',\n help='evaluate the model on dev set')\n parser.add_argument('--predict', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gpu', type=str, default='0',\n help='specify gpu device')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--optim', default='adam',\n help='optimizer type')\n train_settings.add_argument('--learning_rate', type=float, default=0.001,\n help='learning rate')\n train_settings.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n train_settings.add_argument('--dropout_keep_prob', type=float, default=1,\n help='dropout keep rate')\n train_settings.add_argument('--batch_size', type=int, default=16,\n help='train batch size')\n train_settings.add_argument('--epochs', type=int, default=10,\n help='train epochs')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--train_files', nargs='+',\n default=['/devdata1/zhao/preprocessed/trainset/search.train.json'],\n help='list of files that contain the preprocessed train data')\n path_settings.add_argument('--dev_files', nargs='+',\n default=['/devdata1/zhao/preprocessed/devset/search.dev.json'],\n help='list of files that contain the preprocessed dev data')\n path_settings.add_argument('--test_files', nargs='+',\n default=['/devdata1/zhao/test1set/preprocessed/zhidao.test1.json'],\n help='list of files that contain the preprocessed test data')\n path_settings.add_argument('--brc_dir', default='../data/baidu',\n help='the dir with preprocessed baidu reading comprehension data')\n path_settings.add_argument('--vocab_dir', default='../data/vocab/',\n help='the dir to save vocabulary')\n path_settings.add_argument('--model_dir', default='../data/models/',\n help='the dir to store models')\n path_settings.add_argument('--result_dir', default='../data/results/',\n help='the dir to output the results')\n path_settings.add_argument('--summary_dir', default='../data/summary/',\n help='the dir to write tensorboard summary')\n path_settings.add_argument('--log_path',\n help='path of the log file. If not set, logs are printed to console')\n \n parser.add_argument('--name', type=str, default=\"r-net\")\n parser.add_argument('--device_id', type=int, default=0)\n parser.add_argument('--start_epoch', type=int, default=0)\n parser.add_argument('--epoch_num', type=int, default=50)\n \n parser.add_argument('--debug', type=bool, default=False)\n parser.add_argument('--checkpoint_path', type=str, default=\"checkpoint\")\n parser.add_argument('--resume', type=str, default='/home/lab713/data1/ipython/zhao/DuReader/pytorch/checkpoint/r-net_Apr-02_23-29/checkpoint.pth.tar')\n #/home/lab713/data1/ipython/zhao/DuReader/pytorch/checkpoint/r-net_Apr-02_23-29/checkpoint.pth.tar\n \n parser.add_argument('--update_word_embedding', type=bool, default=False)\n parser.add_argument('--hidden_size', type=int, default=75)\n parser.add_argument('--attention_size', type=int, default=75)\n parser.add_argument('--dropout', type=float, default=0.2)\n parser.add_argument('--residual', type=bool, default=False)\n parser.add_argument('--bidirectional', type=bool, default=True)\n parser.add_argument('--num_layers', type=int, default=3)\n parser.add_argument('--app_path', type=str, default='/home/lab713/data1/ipython/zhao/DuReader/pytorch/')\n parser.add_argument('--pin_memory', type=bool, default=False)\n parser.add_argument('--test_batch_size', type=int, default=1)\n\n return parser.parse_args()", "title": "" }, { "docid": "a9169458d6980291469d0a081219be82", "score": "0.67694956", "text": "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-cc', '--catalogcode', help='catalog barcode')\n parser.add_argument('-cn', '--catalognumber', help='catalog number')\n parser.add_argument('-lc', '--lotcode', help='LOT barcode')\n parser.add_argument('-ln', '--lotnumber', help='LOT number')\n parser.add_argument('-li', '--lic', help='LIC identifier')\n parser.add_argument('-um', '--unitofmeasure', help='Unit of Measure')\n parser.add_argument('-f', '--function', help='function to execute',\n type=str, choices=['get_lic', 'check_lic_length',\n 'check_lic_first_char',\n 'check_code_start',\n 'get_catalog_number',\n 'get_unit_of_measure',\n 'get_sum_of_digits',\n 'calculate_checkdigit',\n 'get_checkdigit',\n 'create_catalog_code',\n 'check_lot_code_start',\n 'get_lot_number',\n 'get_link_char_from_catalog_code',\n 'get_link_char'\n ])\n parser.add_argument('-v', '--verbose', action='store_true',\n help='increase output verbosity')\n return parser.parse_args()", "title": "" }, { "docid": "fd9e31d3cd41a33449f02d37519643a5", "score": "0.67679095", "text": "def parse_args(args=None):\n parser = argparse.ArgumentParser(\n description=\"BSM flavor ratio analysis\",\n formatter_class=misc_utils.SortingHelpFormatter,\n )\n parser.add_argument(\n '--seed', type=misc_utils.seed_parse, default='25',\n help='Set the random seed value'\n )\n parser.add_argument(\n '--threads', type=misc_utils.thread_type, default='1',\n help='Set the number of threads to use (int or \"max\")'\n )\n parser.add_argument(\n '--spectral-index', type=float, default='-2',\n help='Astro spectral index'\n )\n parser.add_argument(\n '--datadir', type=str, default='./untitled',\n help='Path to store chains'\n )\n fr_utils.fr_argparse(parser)\n mcmc_utils.mcmc_argparse(parser)\n nuisance_argparse(parser)\n misc_utils.remove_option(parser, 'injected_ratio')\n misc_utils.remove_option(parser, 'plot_angles')\n misc_utils.remove_option(parser, 'plot_elements')\n if args is None: return parser.parse_args()\n else: return parser.parse_args(args.split())", "title": "" }, { "docid": "b93ca9d6622f08b3a69cd14fe031461a", "score": "0.67674375", "text": "def parse_args():\n parser = argparse.ArgumentParser(description=\"comparing proguard-generated and predict mappings\")\n parser.add_argument(\"--mappings\", action=\"store\", dest=\"mappings_file\",\n required=True, help=\"a file containing a list of mapping.txt paths\")\n parser.add_argument(\"--apks\", action=\"store\", dest=\"apks_file\",\n required=True, help=\"a file containing a list of apk files\")\n parser.add_argument(\"-o\", action=\"store\", dest=\"out_file\", help=\"output file\")\n parser.add_argument(\"-i\", action=\"store\", dest=\"start_id\", help=\"start id\")\n\n options = parser.parse_args()\n # print options\n return options", "title": "" }, { "docid": "145856102658a2a19dbf4d135b459d19", "score": "0.6766407", "text": "def parse_args(parser):\n if parser:\n global args\n args = parser.parse_args()\n else:\n raise EnvironmentError(\n Logcreator.info(\"Parsing of comand line parameters failed\")\n )\n return args", "title": "" }, { "docid": "c7316c7418a7a5bc695178f6509e2acc", "score": "0.6760515", "text": "def parse_args():\n parser = argparse.ArgumentParser(description=\"eye tracker frame synchronized capture\")\n parser.add_argument(\n \"--fps\", help=\"Frames per second\",\n required=True, type=float)\n \n parser.add_argument(\n \"-c\", \"--cam\", help=\"The camera index, usually 0 for web camera\", default=0, type=int)\n \n parser.add_argument(\n \"-n\", \"--numframes\", help=\"number of desired frames\", default=20, type=int)\n \n parser.add_argument(\n \"-w\", \"--weights\", help=\"the weights directory containing haarcascade_frontalface_default.xml and haarcascade_eye.xml files\", default=\"./weights/\")\n\n parser.add_argument( \n \"--visualise\", help=\"visualize the process\", default=0, type=int)\n\n parser.add_argument(\n \"-o\", \"--output\", help=\"the output directory or output .mp4 file to save frames to\", default=\"./\")\n\n return parser.parse_args()", "title": "" }, { "docid": "8a0ce7d1ebd700c3de634abf835c0af4", "score": "0.67523956", "text": "def _parse_args():\n description = \"\"\"\n Deploys servers at Cloud At Cost.\n \"\"\"\n parser = ArgumentParser(description=description)\n parser.add_argument('-k', '--ssh-pub-key', type=FileType('r'), metavar='ssh_pub_key', help='ssh public key to upload to the server to configure', required=True)\n parser.add_argument('-n', '--hostname', type=str, metavar='hostname', help='hostname to give to the server to configure', required=True)\n parser.add_argument('-t', '--type', type=str, metavar='server_type', help='server distribution type to configure', required=True)\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "ee3ad13510398ea1cb8acf7d1682c7e9", "score": "0.6749278", "text": "def parse_args():\n parser = argparse.ArgumentParser(\"Test UDR models in simulator.\")\n # parser.add_argument('--exp-name', type=str, default=\"\",\n # help=\"Experiment name.\")\n parser.add_argument(\"--model-path\", type=str, default=[], nargs=\"+\",\n help=\"Path to one or more Aurora Tensorflow checkpoints\"\n \" or models to serve.\")\n parser.add_argument('--save-dir', type=str, required=True,\n help=\"direcotry to save the model.\")\n parser.add_argument('--dimension', type=str, # nargs=1,\n choices=[\"bandwidth\", \"delay\", \"loss\", \"queue\"])\n parser.add_argument('--config-file', type=str, required=True,\n help=\"config file\")\n parser.add_argument('--train-config-dir', type=str, default=None,\n help=\"path to one or more training configs.\")\n parser.add_argument('--duration', type=int, required=True,\n help=\"trace duration\")\n parser.add_argument('--delta-scale', type=float, required=True,\n help=\"delta scale\")\n parser.add_argument('--plot-only', action='store_true',\n help='plot only if specified')\n parser.add_argument('--n-models', type=int, default=3,\n help='Number of models to average on.')\n parser.add_argument('--seed', type=int, default=42, help='seed')\n return parser.parse_args()", "title": "" }, { "docid": "ef6347b6119112602f4993f960f13cab", "score": "0.6747314", "text": "def parseCommandLineArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--test\",\n action=\"store_true\", help=\"will enable test mode\")\n parser.add_argument(\"-v\", \"--verbose\",\n action=\"store_true\", help=\"will enable print statements\")\n parser.add_argument(\"-m\", \"--mac\",\n action=\"store_true\", help=\"will run on mac\")\n return parser.parse_args()", "title": "" }, { "docid": "d29d79697158e1fdb4af8b2374edb058", "score": "0.6747061", "text": "def parse_args():\n parser = argparse.ArgumentParser(description='Function control parameters.',\n epilog='Output in ~/.../DrifterPrediction/results/ssa/',\n prog='mssa_extract_patterns',\n usage='%(prog)s [arguments]')\n parser.add_argument('file', metavar='file', type=str, nargs='+',\n help='csv file to operate on')\n parser.add_argument('-d', '--days', metavar='days', type=int, default=None,\n help='Number of days in the time series to decompose')\n parser.add_argument('-l', '--lag', metavar='lag_window', type=float,\n help='Lag window for composing trajectory matrix.',\n default=0.35)\n parser.add_argument('-s', '--signal', metavar='threshold', type=float,\n help='Fraction of variance (0,1] to be considered signal',\n default=0.9)\n parser.add_argument('-c', '--correlation', metavar='correlation',\n help='Minimum correlation coefficient for mode groupings',\n type=float, default=0.5)\n parser.add_argument('-n', '--normalize', action='store_true',\n help='Normalize data before analysis')\n parser.add_argument('-w', '--writeout', action='store_true',\n help='Write data to file instead of printing to terminal')\n return parser.parse_args()", "title": "" }, { "docid": "7e10cce80c6fe8f76abc076d9ca2ca3e", "score": "0.6743773", "text": "def parse_args(args):\n parser = argparse.ArgumentParser(\n description=__description__)\n parser.add_argument(\n \"-v\", \"--version\",\n action=\"version\",\n version=\"unwrap {ver}\".format(ver=__version__),\n help=\"Show version number and exit.\")\n parser.add_argument(\n \"in_file\",\n metavar=\"INPUT\",\n help=\"Input file.\")\n parser.add_argument(\n \"out_file\",\n metavar=\"OUTPUT\",\n help=\"Output file.\")\n parser.add_argument(\n \"-f\", \"--format\",\n default=\"txt\",\n help=\"Output format.\")\n parser.add_argument(\n \"-e\", \"--encoding\",\n default=locale.getpreferredencoding(),\n help=\"Character encoding.\")\n parser.add_argument(\n \"-s\", \"--stats\",\n action=\"store_true\",\n help=\"Print statistics on exit.\")\n parser.add_argument(\n \"-i\", \"--iterations\",\n type=int,\n default=2,\n help=\"Number of iterations.\")\n return parser.parse_args(args)", "title": "" }, { "docid": "d6e9098c0a9d0ceecc8e4f6ee5088fcd", "score": "0.67435586", "text": "def parse_arguments():\n parser = ArgumentParser()\n parser.add_argument(\n '--algo',\n default='bubble',\n metavar='ALGO',\n choices=['bubble', 'insert', 'quick', 'merge'],\n help='''specify which algorithm to use for sorting\n among [bubble|insert|quick|merge], default bubble''')\n parser.add_argument(\n '--gui',\n action='store_true',\n help='''visualise the algorithm in GUI mode''')\n parser.add_argument(\n 'integers',\n metavar='N',\n type=int,\n nargs='+',\n help='an integer for the list to sort')\n args = parser.parse_args()\n return args", "title": "" }, { "docid": "062a2b08774ce2e2f370469b15a7a884", "score": "0.6743259", "text": "def parseArgs(argv):\n\timport argparse\n\n\tparser = argparse.ArgumentParser(description=\"A short script for splitting one gcode file into parts along layer/height boundaries. This is used to allow material-changes midprint. See bottom of this help message for formatting details\", epilog=\"\"\"The input file can have special formatting to allow it to more easily be split. The largest of these is automatic prefix/postfix location. This follows the following rules. Each rule should be located in its own line. '; START_PREFIX' - indicates the start of the prefix, '; END_PREFIX' - indicates the end of the prefix, '; START_POSTFIX' - indicates the start of the postfix, '; END_POSTFIX' - indicates the end of the postfix\n\nIf the prefix and/or postfix are specified this way and no override is specified on the command line, they will be automatically located and put at the start and end of each part respectively. If the prefix and postfix are not specified in this way, you will need to manually remove it from the input, or the file may be split incorrectly if your prefix or postfix includes z movement.\"\"\")\n\tparser.add_argument(\"--debug\", \"-d\", help=\"Enable debug output\", action=\"store_true\")\n\tparser.add_argument(\"--output-file-name\", \"-o\", help=\"The format for output files. Use {input_file} to include the orginal filename, minus extension and prefix path (basename). Use {input_file_full} to include the full name of the input file without the path (basename only). Use {part_number} to include the number of the part. Default: {input_file}-part{part_number}.gcode\", default=\"{input_file}-part{part_number}.gcode\")\n\tparser.add_argument(\"--prefix\", help=\"Specifies prefix commands to be included at the start of each part.\", default=None)\n\tparser.add_argument(\"--postfix\", help=\"Specifies the postfix commands to be included at the end of each part\", default=None)\n\tparser.add_argument(\"file\", help=\"The base GCODE file you wish to split into multiple parts\")\n\tparser.add_argument(\"split\", help=\"The location at which to split the model. Supported units: mm for milimeters, l for layers. The split is performed so that the first layer in the new part is the first layer after the threshold.\", nargs='+')\n\tparser.add_argument(\"--version\", \"-v\", action='version', version='%(prog)s 1.0')\n\n\treturn parser.parse_args(argv)", "title": "" }, { "docid": "3f0fe973fa5afeeea008fa92fdf449fa", "score": "0.67425436", "text": "def parse_args(description=__doc__):\n parser = ArgumentParser(description=description)\n parser.add_argument(\n '--files', nargs='+', required=True,\n help='''One or more *run_info.pkl files to plot'''\n )\n parser.add_argument(\n '--labels', nargs='+', required=True,\n help='''One (legend) label per file'''\n )\n parser.add_argument(\n '--fwd-hists', required=False,\n help='''Path to the forward-simulation pickle file'''\n )\n parser.add_argument(\n '--outdir', required=True,\n help='''Directory into which to place the plots.'''\n )\n parser.add_argument(\n '--paired', action='store_true',\n help='''Display style for visually grouping pairs of lines with\n same-colors but different line styles.'''\n )\n parser.add_argument(\n '--gradient', action='store_true',\n help='''Display style for visually showing a progression from line to\n line via a color gradient.'''\n )\n parser.add_argument(\n '--no-plot', action='store_true',\n help='''Do _not_ make plots, just print summary statistics.'''\n )\n return parser.parse_args()", "title": "" } ]
c4aac080fcd65a538f62b7b02d5a6a96
Initialize your data structure here.
[ { "docid": "15ef55cc94a785758caa65df6411d27f", "score": "0.0", "text": "def __init__(self, nestedList):\n self.flater_list=[]\n for n_e in nestedList:\n if n_e.isInteger():\n self.flater_list.append(n_e.getInteger())\n else:\n n_e=n_e.getList()\n sub_ni=NestedIterator(n_e)\n while sub_ni.hasNext():\n self.flater_list.append(sub_ni.next())\n self.index=0", "title": "" } ]
[ { "docid": "9ef4ed4212f822f4b78823c94a4a9635", "score": "0.78362477", "text": "def init_data(self):", "title": "" }, { "docid": "b4c2dddb27354091df8041bff75f9898", "score": "0.7647474", "text": "def __init__(self):\n self._data = []", "title": "" }, { "docid": "b4c2dddb27354091df8041bff75f9898", "score": "0.7647474", "text": "def __init__(self):\n self._data = []", "title": "" }, { "docid": "b4c2dddb27354091df8041bff75f9898", "score": "0.7647474", "text": "def __init__(self):\n self._data = []", "title": "" }, { "docid": "b4c2dddb27354091df8041bff75f9898", "score": "0.7647474", "text": "def __init__(self):\n self._data = []", "title": "" }, { "docid": "6e64e7dca994c9c65c9c114160f5b4e5", "score": "0.76023686", "text": "def __init__(self):\n self.data = defaultdict(lambda: list())", "title": "" }, { "docid": "bf575f955b01054f9540a13a9c2744c6", "score": "0.75571114", "text": "def initialize(self):\n\t\tpass", "title": "" }, { "docid": "1c5d98d679015449aa9a48490dc083a6", "score": "0.7553331", "text": "def initialize(self):\n\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "039b9c26b4efb4c4900e869cf33eef5c", "score": "0.7530035", "text": "def initialize(self):\n pass", "title": "" }, { "docid": "54192eecfb64f5f6c5e3bd21552b8df2", "score": "0.7501694", "text": "def __init__(self):\n self.map = {}\n self.data = []", "title": "" }, { "docid": "4bcdcb21bc840f71fda45502ffcd7f0c", "score": "0.7497136", "text": "def initialize(self):\r\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.74768376", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.74768376", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.74768376", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.74768376", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.74768376", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.74768376", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.74768376", "text": "def init(self):\n pass", "title": "" }, { "docid": "3e483681071a4e84409738933ef4c0ed", "score": "0.74768376", "text": "def init(self):\n pass", "title": "" }, { "docid": "2b8386c39992c7f9aa73c5fe4d524905", "score": "0.7473101", "text": "def __init__(self):\n self.len = 0\n self.data = {}\n self.min_num = None", "title": "" }, { "docid": "31e231c3539bc4aa0dc5351ec9992db4", "score": "0.74691755", "text": "def __init__(self):\n self.dic = {}\n self.data = []", "title": "" }, { "docid": "0be0bad959be8877a80540214b76017b", "score": "0.74655753", "text": "def __init__(self):\n self.data = {}\n self.array = []\n self.n = 0", "title": "" }, { "docid": "676a78d61954e4fc2c34e3bcb451cac1", "score": "0.7421628", "text": "def __init__(self):\n self.data = {}\n self.store = []", "title": "" }, { "docid": "e342b448c7aae2e85c5e3e7beaf19f8c", "score": "0.7415433", "text": "def __init__(self):\n self.data = collections.defaultdict(list)", "title": "" }, { "docid": "26618b7e53c35a3c04f59709e6c67405", "score": "0.7393587", "text": "def __init__(self):\n self.data = []", "title": "" }, { "docid": "26618b7e53c35a3c04f59709e6c67405", "score": "0.7393587", "text": "def __init__(self):\n self.data = []", "title": "" }, { "docid": "26618b7e53c35a3c04f59709e6c67405", "score": "0.7393587", "text": "def __init__(self):\n self.data = []", "title": "" }, { "docid": "fc3892e6ae57e1dceb89e99082c8c853", "score": "0.73333645", "text": "def initialize( self ):\n pass", "title": "" }, { "docid": "fa46a28ad3586c700cf608bff2cc3c9e", "score": "0.7329251", "text": "def _init(self):\n\n pass", "title": "" }, { "docid": "d794d3675d6724c86cbdb1e21b26a0f4", "score": "0.7319852", "text": "def initialize(self) -> None:", "title": "" }, { "docid": "d794d3675d6724c86cbdb1e21b26a0f4", "score": "0.7319852", "text": "def initialize(self) -> None:", "title": "" }, { "docid": "ac423eba6e88c1080e12f1aadac5dd7c", "score": "0.7316726", "text": "def _init(self):\n pass", "title": "" }, { "docid": "47bf13531d4e0f352c9ee3175625178b", "score": "0.7313093", "text": "def initialize(self):\n pass # pragma: no cover", "title": "" }, { "docid": "784a81dc55b855cd11cb17f6554ab31e", "score": "0.7302918", "text": "def initialize_(self):\n pass", "title": "" }, { "docid": "4e5a096d7762c0ce6c60debd0036e392", "score": "0.72966105", "text": "def __init__(self):\n self.data = {}\n self.document = {}", "title": "" }, { "docid": "40d423b42dfb8c6e38a820b35749403e", "score": "0.729073", "text": "def init(self) -> None: \n pass", "title": "" }, { "docid": "e57b3e431ff90dbd83949fdc91c4bf47", "score": "0.7271507", "text": "def __init__(self):\n self.data_dict = []\n self.model = Model()", "title": "" }, { "docid": "3237ec4e981c5f37c4e7197734d40194", "score": "0.7255379", "text": "def __init__(self):\n self._data=[] #non public list", "title": "" }, { "docid": "1180376c7c49d939fe5e5fd9df1a5a43", "score": "0.7248476", "text": "def initialize(self):\n raise NotImplementedError", "title": "" }, { "docid": "fbc5874af85383317224158ce0dfbca4", "score": "0.72424084", "text": "def _initialize(self):\r\n pass", "title": "" }, { "docid": "363147dfec37f6c47ace99a1c0e5e3b5", "score": "0.7235534", "text": "def __init__(self,data={}):\n self.data = data", "title": "" }, { "docid": "db84620036e4c8fef4c256868f0d5fe2", "score": "0.72295964", "text": "def initialize(self):\n if self.initializer is not None:\n self.initializer(self.data)", "title": "" }, { "docid": "acafd7724786b7f2883f34539bfd4b09", "score": "0.72235894", "text": "def __init__(self):\n \n self.data = [[]]*1000", "title": "" }, { "docid": "432b2ee12673c24ebc65adbcfaeaac42", "score": "0.72230685", "text": "def initialize(self):\n self.get_physical_value_list()\n self.set_rollover_reels()", "title": "" }, { "docid": "1c4d4c497e5199e34f31566dfdfed225", "score": "0.720551", "text": "def __init__(self):\n\n super().__init__()\n self.data = {}", "title": "" }, { "docid": "8696bbd499e3cf8b9ddd9908a1291260", "score": "0.72015566", "text": "def init(self):\n raise NotImplementedError", "title": "" }, { "docid": "8696bbd499e3cf8b9ddd9908a1291260", "score": "0.72015566", "text": "def init(self):\n raise NotImplementedError", "title": "" }, { "docid": "8696bbd499e3cf8b9ddd9908a1291260", "score": "0.72015566", "text": "def init(self):\n raise NotImplementedError", "title": "" }, { "docid": "8696bbd499e3cf8b9ddd9908a1291260", "score": "0.72015566", "text": "def init(self):\n raise NotImplementedError", "title": "" }, { "docid": "080f464432b5b619c13cba5892f4ad69", "score": "0.7200329", "text": "def initialise(self):\n\t\tpass", "title": "" }, { "docid": "04aa578daa87849bc7f6405ad2f1e824", "score": "0.72002155", "text": "def initialize(self):", "title": "" }, { "docid": "04aa578daa87849bc7f6405ad2f1e824", "score": "0.72002155", "text": "def initialize(self):", "title": "" }, { "docid": "04aa578daa87849bc7f6405ad2f1e824", "score": "0.72002155", "text": "def initialize(self):", "title": "" }, { "docid": "04aa578daa87849bc7f6405ad2f1e824", "score": "0.72002155", "text": "def initialize(self):", "title": "" }, { "docid": "04aa578daa87849bc7f6405ad2f1e824", "score": "0.72002155", "text": "def initialize(self):", "title": "" }, { "docid": "e633ab6ed3d64778794c29b9d8a23675", "score": "0.7187668", "text": "def __init__(self):\r\n self.data = []\r\n self.values = []", "title": "" }, { "docid": "aa09c991d5c3598d56d09965bf9fd393", "score": "0.71862686", "text": "def __init__(self):\n self.keys = []\n self.values = []", "title": "" }, { "docid": "aa09c991d5c3598d56d09965bf9fd393", "score": "0.71862686", "text": "def __init__(self):\n self.keys = []\n self.values = []", "title": "" }, { "docid": "412d80e3f785f2c1138a3675c636beee", "score": "0.7182924", "text": "def initialize(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "78ff934bcd42ad7a846202774ac3f008", "score": "0.7181856", "text": "def __init__(self):\n self.data_list = []\n self.data_set = set()\n self.empty_idx = set()\n self.idx_dic = {}", "title": "" }, { "docid": "d827dba4929046b7eeabd289ca1629ac", "score": "0.71809006", "text": "def __init__(self):\n self.ds = []", "title": "" }, { "docid": "689bf320e07927e31f18deebbd86b170", "score": "0.71711576", "text": "def __init__(self):\n\t\t\n\t\tself._nNodes=0\t# number of nodes and edges (leading underscore means \n\t\tself._nEdges=0\t# these will be considered to be private attributes)\n\t\t\n\t\tself._adjList={}# ini unordered set of node:_adjList pairs \n\t\t\t\t# data struct: dictionary", "title": "" }, { "docid": "d093673777c8387bac33577778f3d8fb", "score": "0.71591616", "text": "def __init__(self):\n\n self._fields = {}\n self._fill_config()", "title": "" }, { "docid": "111505c7d661533c5bbdd35693dbcb98", "score": "0.71215934", "text": "def __init__(self,\n data: dict):\n\n self.data = data", "title": "" }, { "docid": "71dcb8c587e49c179be46942b2d3c308", "score": "0.7113946", "text": "def __init__(self):\n self.d = {}", "title": "" }, { "docid": "71dcb8c587e49c179be46942b2d3c308", "score": "0.7113946", "text": "def __init__(self):\n self.d = {}", "title": "" }, { "docid": "1c9f3fd103c770a5378da40cd22749e5", "score": "0.7102245", "text": "def __init__(self):\n self.user_data, self.item_data, self.score_data = self.load()", "title": "" }, { "docid": "8a1f7ad33ca882c6d6a722b5ad51bb33", "score": "0.70976645", "text": "def initialize():\n pass", "title": "" }, { "docid": "169e10ec180b8373d94d59cbe9025b7e", "score": "0.70742005", "text": "def __init__(self, iterable=None):\n self._data = {}", "title": "" }, { "docid": "13ea232bef135b01d47bfd9005cd4afc", "score": "0.70715016", "text": "def _init_data_struct(self):\n # Add all attributes to data dict\n for data_note in self._hmdevice.ATTRIBUTENODE:\n self._data.update({data_note: STATE_UNKNOWN})", "title": "" }, { "docid": "5622f1121262b76776477250c9878541", "score": "0.70511055", "text": "def __init__(self, data):\n\n self.data = data", "title": "" }, { "docid": "74e66d0487a1403f09362b4c7aa05d8b", "score": "0.7044521", "text": "def __init__(self):\r\n self._data = [] # nonpublic list instance\r", "title": "" }, { "docid": "b8aeb3b93f06f588a6778f6b3225bc26", "score": "0.7040828", "text": "def __init__(self):\n self.cache = dict()\n self.data = list()", "title": "" }, { "docid": "b103590ebadd9af26c2124f60ce7473e", "score": "0.70405173", "text": "def __init__(self):\n self._name = None\n self._entries = [ ]", "title": "" }, { "docid": "4cd04bfc0989076d6f4a8c07c1139159", "score": "0.70370275", "text": "def __init__(self, data):\n self._data = data", "title": "" }, { "docid": "b4900261c9a8f9487c959e7bc8f9680e", "score": "0.70370084", "text": "def init(self):", "title": "" }, { "docid": "b4900261c9a8f9487c959e7bc8f9680e", "score": "0.70370084", "text": "def init(self):", "title": "" }, { "docid": "ceeef3c23b891a1569373a2235078922", "score": "0.7035901", "text": "def __init__( self, data ) :\n\n self.data = data", "title": "" }, { "docid": "ec18c21a43607d2a0625698e86a7782c", "score": "0.70349014", "text": "def _initialize_data(self):\n\n self._started = False\n self.phase = 0\n self.frequency = 0\n self.frequency_setpoint = 0\n self.phase_percent_ok = 100.\n self.phase_repeatability = 100.\n\n self.interlocks = OrderedDict([\n (\"DSP_WD_FAIL\", False),\n (\"OSCILLATOR_FAIL\", False),\n (\"POSITION_SHUTDOWN\", False),\n (\"EMERGENCY_STOP\", False),\n (\"UPS_FAIL\", False),\n (\"EXTERNAL_FAULT\", False),\n (\"CC_WD_FAIL\", False),\n (\"OVERSPEED_TRIP\", False),\n (\"VACUUM_FAIL\", False),\n (\"MOTOR_OVER_TEMP\", False),\n (\"REFERENCE_SIGNAL_LOSS\", False),\n (\"SPEED_SENSOR_LOSS\", False),\n (\"COOLING_LOSS\", False),\n (\"DSP_SUMMARY_SHUTDOWN\", False),\n (\"CC_SHUTDOWN_REQ\", False),\n (\"TEST_MODE\", False),\n ])\n\n self.rotator_angle = 90", "title": "" }, { "docid": "1a5898be857d9262c185cfac8421fea2", "score": "0.7019181", "text": "def __init__(self):\n self.dataset = []", "title": "" }, { "docid": "919afc186e7687fe83dba47139f934c3", "score": "0.70105445", "text": "def __init__(self):\n self.data = {}\n self.lutron_id = None\n self.tls_assets_validated = False\n self.attempted_tls_validation = False", "title": "" }, { "docid": "85f48bd81918c1a22c8831b04f660f86", "score": "0.69991237", "text": "def __init__(self):\n\t\tself.keys = [] #array of keys\n\t\tself.vals = [] #array of values\n\t\tself.N = 0 #size of key-value pairs \n\t\tself.M = 0 #size of array (capacity)", "title": "" } ]
25489050411a6b76b1f37963f5bc8817
przerwanie 21H, funkcja 2Ah (42), Get System Date AL Day of the week (0 6; 0 = Sunday) CX Year (1980 2099) DH Month (1 12) DL Day (1 31)
[ { "docid": "46e0399dfa82a16044c375263c9d4297", "score": "0.5788917", "text": "def int_21H_42(self):\r\n date_now = datetime.datetime.now()\r\n\r\n year = date_now.year\r\n month = date_now.month\r\n day = date_now.day\r\n weekday = (datetime.datetime.today().weekday()) % 6 # 0 - niedziela\r\n\r\n self.registers['AX'].move_into(weekday, 0, is_int=True)\r\n self.registers['CX'].set_bytes(year, is_int=True)\r\n self.registers['DX'].move_into(month, 1, is_int=True)\r\n self.registers['DX'].move_into(day, 0, is_int=True)", "title": "" } ]
[ { "docid": "07396f146082972a84c69ee1041cfb53", "score": "0.67846864", "text": "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "title": "" }, { "docid": "ec941e3b1333a2e88c083aaed488f8e1", "score": "0.6686993", "text": "def day_07_b() -> int:\n return 0", "title": "" }, { "docid": "a8595c16811c0c8e02fcc689c9b3e449", "score": "0.65752184", "text": "def day_07_a() -> int:\n return 0", "title": "" }, { "docid": "2872228d7ed7042517b589b222d1839e", "score": "0.65199256", "text": "def day_06_b() -> int:\n return 0", "title": "" }, { "docid": "9bc88d6eca022d5808a46b7ccd9b655c", "score": "0.6513388", "text": "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "title": "" }, { "docid": "e63e7dcdce36e2da1a08bd8b6c1dd011", "score": "0.6485275", "text": "def day_of_the_week(arg):", "title": "" }, { "docid": "60412cfd9aea5717c8e5b746ade96753", "score": "0.6457445", "text": "def day_of_week(dt):\n cday = dt\n mday = 2\n uday = cday.isocalendar()[2] + mday\n try:\n if uday > 7:\n CURRDAY = uday - 7\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week>7 : \", CURRDAY)\n else:\n CURRDAY = uday\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week : \", CURRDAY)\n return CURRDAY\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700;SCHEDULE ERROR \" + str(e), exc_info=False)\n sys.exit(0)", "title": "" }, { "docid": "cb75eec6bfc3c5398022cc6aa2553ffc", "score": "0.6378015", "text": "def Dooms_day(year):\r\n day = (year % 100 + (year % 100)//4 + Anchor_day(year)) % 7\r\n return day", "title": "" }, { "docid": "902366387a9ec0739f3451b614a6f720", "score": "0.6367536", "text": "def _calculate_date(day_of_year):\n date = datetime.datetime.strptime(str(day_of_year), '%j')\n return date.strftime('%d-%b')", "title": "" }, { "docid": "f0c2f99d23c152271ac2ae85f87caf5c", "score": "0.6333657", "text": "def day_06_a() -> int:\n return 0", "title": "" }, { "docid": "76dce3b833747abfe5827e07b315e09f", "score": "0.62028384", "text": "def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]", "title": "" }, { "docid": "6807f6d5edf005b5b43b2e01cacac029", "score": "0.6177753", "text": "def doomsday(y):", "title": "" }, { "docid": "17883616cd8302376bc8cd318b44dc7b", "score": "0.61605054", "text": "def weekday(day):\n return (day % 7) - 1", "title": "" }, { "docid": "fde25a46e2023a969ef3722a658b6f4b", "score": "0.61577934", "text": "def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7", "title": "" }, { "docid": "d15555ab201b5eab93bf4ed0112e055c", "score": "0.61528707", "text": "def DayOfWeek(year, month, day):\n num = year * 365\n num = num + year // 4 + 1\n num = num - (year // 100 + 1)\n num = num + year // 400 + 1\n if month < 3 and LeapYear(year):\n num = num - 1\n return (num + MONTH_OFFSETS[month - 1] + day + 4) % 7 + 1", "title": "" }, { "docid": "c40c1a540404d76df12cea35bc00184c", "score": "0.60942006", "text": "def date_day_of_week(date):\n day_of_week = date.strftime('%A')\n return day_of_week", "title": "" }, { "docid": "beeb4a9c9eb83d08a50f69b541ea87cd", "score": "0.6083748", "text": "def Day_of_week(day, month, year):\r\n if year % 4 == 0 and (year % 400 == 0 or year % 100 != 0):\r\n doomsday = [11, 29, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n else:\r\n doomsday = [10, 28, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n exact_day = ((day - doomsday[month-1]) + Dooms_day(year)) % 7\r\n character_day = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \r\n \"Friday\", \"Saturday\"]\r\n return character_day[exact_day]", "title": "" }, { "docid": "7460dcd6af1c8ad51d54a6a83582f856", "score": "0.6067672", "text": "def weekday(self):\n return 0", "title": "" }, { "docid": "7460dcd6af1c8ad51d54a6a83582f856", "score": "0.6067672", "text": "def weekday(self):\n return 0", "title": "" }, { "docid": "e6b5b213d95a01d7c9aa2c817f9e7a47", "score": "0.6058643", "text": "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "title": "" }, { "docid": "7697870d2f86dbeb1b7c887066f72fc4", "score": "0.6014801", "text": "def date_to_dow(y, m, d):\r\n # Python uses Monday week start, so wrap around\r\n w = calendar.weekday(y, m, d) + 1\r\n if w == 7:\r\n w = 0\r\n return w", "title": "" }, { "docid": "55c4b72d0ba147a20531b32d9eaf4997", "score": "0.59747714", "text": "def get_day_of_week() -> str:\n return datetime.now(pytz.timezone('US/Eastern')).strftime(\"%a\").lower()", "title": "" }, { "docid": "c53bf31609bbb5ffd0f40b56111fc994", "score": "0.59681237", "text": "def get_weekday_number(date):\n return date.strftime('%w')", "title": "" }, { "docid": "158c24df9623c01bb844d26e55cd721b", "score": "0.59627086", "text": "def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]", "title": "" }, { "docid": "114212d07021bd870e5abc9c8c44836a", "score": "0.59453315", "text": "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "title": "" }, { "docid": "21e2bfb4676ffbea69b0eb8058bd5260", "score": "0.5926125", "text": "def get_day_today() -> str:\n day = datetime.now().strftime(\"%w\")\n if day == '0': # Sunday\n return '6'\n elif day == '6': # Saturday\n return '5'\n elif day == '1': # Monday\n return '0'\n elif day == '2': # Tuesday\n return '1'\n elif day == '3': # Wednesday\n return '2'\n elif day == '4': # Thursday\n return '3'\n elif day == '5': # Friday\n return '4'", "title": "" }, { "docid": "d5628bf978f8117cc53b2bf8cfa60cc3", "score": "0.59251744", "text": "def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")", "title": "" }, { "docid": "af83e4b95d461f7097c432c32d1c5766", "score": "0.5896955", "text": "def _labor_day(year):\n day = datetime(year, 9, 1)\n delta = timedelta(days=1)\n while day.weekday() != 0:\n day += delta\n return day", "title": "" }, { "docid": "6bc418654fb2745b4dfe8960de2210a9", "score": "0.5885831", "text": "def Anchor_day(year):\r\n day = (5 * ((year // 100) % 4) + 2) % 7\r\n return day", "title": "" }, { "docid": "633b84790d6c390e31fdbe6aa8bec0ae", "score": "0.5883403", "text": "def formalDateToday():\n return dt.date.today().strftime(\"%B %d, %Y\")", "title": "" }, { "docid": "050f6d48bd86c5b9fd3d7ebe5bd66c92", "score": "0.58700144", "text": "def WEEKDAY(\n serial_number: func_xltypes.XlNumber,\n return_type: func_xltypes.XlNumber = None\n) -> func_xltypes.XlNumber:\n\n date = utils.number_to_datetime(int(serial_number))\n\n if return_type is None:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 1:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n # weekday() is 0 based, starting on a Monday\n elif int(return_type) == 2:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 3:\n # Numbers 0 (Monday) through 6 (Sunday)\n weekDays = (0, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 11:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 12:\n # Numbers 1 (Tuesday) through 7 (Monday)\n weekDays = (7, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 13:\n # Numbers 1 (Wednesday) through 7 (Tuesday)\n weekDays = (6, 7, 1, 2, 3, 4, 5)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 14:\n # Numbers 1 (Thursday) through 7 (Wednesday)\n weekDays = (5, 6, 7, 1, 2, 3, 4)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 15:\n # Numbers 1 (Friday) through 7 (Thursday)\n weekDays = (4, 5, 6, 7, 1, 2, 3)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 16:\n # Numbers 1 (Saturday) through 7 (Friday)\n weekDays = (3, 4, 5, 6, 7, 1, 2)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 17:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n else:\n raise xlerrors.NumExcelError(\n f\"return_type needs to be omitted or one of 1, 2, 3, 11, 12, 13,\\\n 14, 15, 16 or 17. You supplied {return_type}\")", "title": "" }, { "docid": "2b64ce712e10fd8cf23f23e2b3dee999", "score": "0.5864806", "text": "def day_05_b() -> int:\n return 0", "title": "" }, { "docid": "44e9281c7e53af9f4f769362c85f7169", "score": "0.58480734", "text": "def day_of_week(self):\n # 1 Jan 0001 was Monday according to the proleptic Gregorian calendar.\n # So, 1 Jan 0001 has ordinal 1, and the weekday is 0.\n return (self._ordinals - 1) % 7", "title": "" }, { "docid": "667f18c1c0ad0cecc0f6644738f59774", "score": "0.5820334", "text": "def calculate_return_day(yyyymmdd, hhmmss, week):\n return_day_number = datetime(yyyymmdd[0], yyyymmdd[1], yyyymmdd[2]).weekday()\n return_day_name = week[return_day_number]\n return_day_hour = hhmmss[0]\n return [return_day_number, return_day_name, return_day_hour]", "title": "" }, { "docid": "55c5ebfb064d6c18af31081a32183c47", "score": "0.57898694", "text": "def get_day(self):\n\n # First we get the first 8 bits stored in the day register\n # and translate it to an integer\n day_bcd = self.__read_register(_REGISTER_DAY)\n\n # Then we extract the digits and the tens\n tens = (day_bcd & 0x30) >> 4 # 0x30 = 0b00110000\n digit = (day_bcd & 0x0F) # 0x0F = 0b00001111\n\n # End return the last value\n return 10 * (tens) + digit", "title": "" }, { "docid": "07269a287c10aa28f6dc39441d870257", "score": "0.5776035", "text": "def convert_date(year: str, week: str):\n date = datetime.fromisocalendar(int(year), int(week), 1)\n return date.strftime(\"%m/%d/%YZ\")", "title": "" }, { "docid": "16c6016846a3263c88bcc144c1a913a2", "score": "0.57744247", "text": "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "title": "" }, { "docid": "68326cd29cd4b7e7e3b0e36a736fb4e6", "score": "0.57682073", "text": "def _FirstSunday(self, dtz): # pylint: disable-msg=C0103,R0201\n return dtz + datetime.timedelta(days=(6-dtz.weekday()))", "title": "" }, { "docid": "7febaec55371715ad144c30b33191370", "score": "0.57649964", "text": "def nth_dow_to_day(tupel, y):\r\n m = tupel[0]\r\n dow = tupel[1]\r\n n = tupel[2]\r\n\r\n if dow == 7:\r\n dow = 0\r\n\r\n first_dow = date_to_dow(y, m, 1) # the dow of the first of the month\r\n shift = dow - first_dow\r\n if shift < 0:\r\n shift += 7\r\n\r\n return shift + (7 * n) - 6", "title": "" }, { "docid": "957c1361e0f4cae1070cfbf18d1f6574", "score": "0.57520735", "text": "def dayofweek(day, month, year, formatresult=True):\n if formatresult is False:\n return calendar.weekday(year, month, day) + 1\n days = {\n 0: 'Monday',\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\"\n }\n return days[calendar.weekday(year, month, day)]", "title": "" }, { "docid": "bd28e8293bee2e2512e3352330f5b01d", "score": "0.57313246", "text": "def day(sym, date):\n return get(sym, date, date)[0][1]", "title": "" }, { "docid": "e748969f4cf8315b6db04208a7438061", "score": "0.5709212", "text": "def day_05_a() -> int:\n return 0", "title": "" }, { "docid": "838238e9472593dd6e941708d35a265a", "score": "0.5695858", "text": "def day(self):\n return 0", "title": "" }, { "docid": "838238e9472593dd6e941708d35a265a", "score": "0.5695858", "text": "def day(self):\n return 0", "title": "" }, { "docid": "c64258cc849b6e78462eba76031e0588", "score": "0.568784", "text": "def get_mothers_day_date(year):\n day = date(year=year, month=5, day=1)\n while 1:\n if day.weekday() == 6:\n day += timedelta(days=7)\n break\n day += timedelta(days=1)\n return day", "title": "" }, { "docid": "30f6ea560cbfc91cb877c77e280d1244", "score": "0.5682577", "text": "def date_with_day_of_week_appended(mydate): \n import datetime\n month, day, year = (int(x) for x in mydate.split('/')) \n shortened_year = abs(year) % 100 \n day_of_week = datetime.date(year, month, day).strftime(\"%A\")\n return \"%s/%s/%s %s\" % (month,day,shortened_year, day_of_week)", "title": "" }, { "docid": "bbf464cf9579063d7542c14cb5f32552", "score": "0.568106", "text": "def week_init():\n week = input('Week to check: MM/DD/YYYY\\n')\n week = dtt.datetime.strptime(week,'%m/%d/%Y') #turns input to a datetime\n beforeday = input('Check days before date (Press enter to use today): MM/DD/YYYY\\n') or dtt.date.today()\n if (beforeday != dtt.date.today()):\n beforeday = dtt.datetime.strptime(beforeday,'%m/%d/%Y')\n return week, beforeday", "title": "" }, { "docid": "f761a10641fed2d4365f3c44eefdab2e", "score": "0.5678368", "text": "def get_weekday(self):\n originDate = Date(1900, 1, 1)\n return WEEKDAYS[originDate.days_since(self) % 7]", "title": "" }, { "docid": "052fa7c3d3c0c4978579c316a1dd87d4", "score": "0.5676831", "text": "def test_jd2dow():\n\tjd = [2434923.5,2458130.5]\n\tdnum_true = [3,5]\n\tdnam_true = np.array(['Wed','Fri'],dtype='|S3')\n\tdnum_test, dnam_test = date_functions.jd2dow( jd )\n\t\n\tassert dnum_test[0] == dnum_true[0]\n\tassert dnum_test[1] == dnum_true[1]\n\tassert dnam_test[0] == dnam_true[0]\n\tassert dnam_test[1] == dnam_true[1]", "title": "" }, { "docid": "4c53ee758ff54cc5a153b9106710c6f0", "score": "0.5674484", "text": "def day_of_the_programmer(year):\n leap, total = 0, 256\n\n #Determine Leap Years\n if year <= 1917 and year >= 1700: # Julian calendar\n if year % 4 == 0:\n leap = 1\n elif year == 1918:\n leap = -13\n elif year >= 1919 and year <= 2700: # Gregorian Calendar\n if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):\n leap = 1\n\n days_in_month = [31, 28 + leap, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n # Determine month and day\n for i in xrange(len(days_in_month)):\n total -= days_in_month[i]\n if total < 1:\n month = i + 1\n day = total + days_in_month[i]\n break\n\n # format output into 'dd.mm.yyyy'\n day = '{:02d}'.format(day)\n month = '{:02d}'.format(month)\n yyyy = '{:04d}'.format(year)\n return day + '.' + month + '.' + yyyy", "title": "" }, { "docid": "afad31ce6b3b8256be2ddec47256f684", "score": "0.5673245", "text": "def date_to_day_of_week(year, month, day):\n # Calculate the day offset from Jan, 1 in the specified year.\n day_num = date_to_day_of_year(year, month, day)\n\n is_pre_2k = year < 2000\n if is_pre_2k:\n # Calculate the number of days from the end of the year.\n num_days = days_in_year(year) - day_num + 1\n start, step = 1999, -1\n else:\n # Calculate the number of days from the beginning of the year.\n num_days = day_num - 1\n start, step = 2000, 1\n\n for _year in range(start, year, step):\n num_days += days_in_year(_year)\n\n # Add the number of days to the day number for Jan 1, 2000 modulus 7\n # to get the current day number.\n if is_pre_2k:\n num_days = -num_days\n\n return (JAN_1_2000_DAY_NUM + num_days) % 7", "title": "" }, { "docid": "00a5e00792c9e2623bb13b32edbe7737", "score": "0.56569487", "text": "def first_day_of_year(year):\n year -= 1\n return (year + (year // 4) - (year // 100) + (year // 400) + 1) % NUM_DAYS_IN_WEEK", "title": "" }, { "docid": "8e023f93342eb4ce489e2d5b2d5bcdea", "score": "0.56504613", "text": "def locale_first_weekday():\n\tfirst_weekday = 6 #by default settle on monday\n\n\ttry:\n\t\tprocess = os.popen(\"locale first_weekday week-1stday\")\n\t\tweek_offset, week_start = process.read().split('\\n')[:2]\n\t\tprocess.close()\n\t\tweek_start = datetime.date(*time.strptime(week_start, \"%Y%m%d\")[:3])\n\t\tweek_offset = datetime.timedelta(int(week_offset) - 1)\n\t\tbeginning = week_start + week_offset\n\t\tfirst_weekday = int(beginning.strftime(\"%w\"))\n\texcept:\n\t\tprint \"WARNING - Failed to get first weekday from locale\"\n\n\treturn first_weekday", "title": "" }, { "docid": "b8babbf3841fd59d1526c1ef611dffdb", "score": "0.5648987", "text": "def day_of_week(self) -> str:\n return self.elements[4]", "title": "" }, { "docid": "2e646336bed4ff5fe1b685171aa8a748", "score": "0.5643455", "text": "def date_to_day_of_week(date):\n return date.weekday()", "title": "" }, { "docid": "7d92e529e79a90a072751495a00a1039", "score": "0.5642111", "text": "def calculate_date(month, year):\n start_index = day_of_week(1, month, year)\n end_index = num_of_day(month, year)\n padding_day = [0 for i in range(0, start_index)]\n days = [i for i in range(1, end_index + 1)]\n return padding_day + days", "title": "" }, { "docid": "a5fbb7f24396673792dbcc7661348066", "score": "0.5615694", "text": "def get_fw_date(self, rec, report):\n rec.VAL = self.crate.mch_fw_date[self.slot]", "title": "" }, { "docid": "ec795b3476bbe4d8fb6ab3ddd4f1d86a", "score": "0.5613382", "text": "def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None", "title": "" }, { "docid": "ec795b3476bbe4d8fb6ab3ddd4f1d86a", "score": "0.5613382", "text": "def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None", "title": "" }, { "docid": "99648794566df6bc393f279004f3c6ce", "score": "0.56124413", "text": "def date(self):\n try:\n return datetime.date.fromordinal(self.round)\n except ValueError:\n raise ValueError(\"you need to run ABCE in calendar mode, use simulation.declare_calendar(2000, 1, 1)\")", "title": "" }, { "docid": "219a43fa9e21e96834160057ae1e9668", "score": "0.5603282", "text": "def getDay(self):\n return _libsbml.Date_getDay(self)", "title": "" }, { "docid": "a9aedea0daceaede084a291d18d61c9a", "score": "0.5603182", "text": "def getDate(self):\n arr=[]\n dayarr=['Saturday','Sunday','Monday','Tuesday','Wednesday','Thursday','Friday']\n if self.debug =='yes':\n arr=self.debugdate\n else:\n \n hour = time.strftime('%H')\n month=time.strftime('%m')\n dayname=time.strftime('%A')\n monthname=time.strftime('%B')\n \n if time.strftime('%H') < '16':\n dayname=dayarr[int(time.strftime('%w'))]\n day = '%s' % str(int(time.strftime('%d'))-1)\n #print day\n if day == '0':\n month,day,monthname=self.EOM(month)\n if int(day) < 10:\n day= '0%s' % day\n else:\n day=day\n else:\n day = time.strftime('%d')\n \n arr.append(month)\n arr.append(day)\n arr.append(time.strftime('%Y'))\n arr.append(dayname)\n arr.append(monthname)\n #print arr\n return arr", "title": "" }, { "docid": "d1f6af88d898cf6415fd3ba17f8cec54", "score": "0.56018275", "text": "def get_day(x):\n return x[\"SALE DATE\"].day", "title": "" }, { "docid": "89dc7c9972f4094d4a7cfa7a2e1a3992", "score": "0.55992246", "text": "def get_day():\n return handle_invalid_inputs(question_4, days)", "title": "" }, { "docid": "97f312f7e01e008b002bdc6af582fdb5", "score": "0.5598406", "text": "def info():\n\n\t info = \"This package determines the day of the week.\"\n\t print(info)", "title": "" }, { "docid": "7b64d3d2ffc1bf88535b65c38d15fbd2", "score": "0.55954", "text": "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "title": "" }, { "docid": "6331b0bd07aeef7d1263ab9834fe627b", "score": "0.5592767", "text": "def weekday(self):\n return (self.toordinal() + 6) % 7", "title": "" }, { "docid": "6c025f44cd66de2e4dac90dd848e5b04", "score": "0.557299", "text": "def get_dih(year):\r\n return common.get_dict(get_dih_filename(year), 'DaysInHospital', int)", "title": "" }, { "docid": "605aa0f54fb2bdbbc95c30f2a4eb1e49", "score": "0.5572373", "text": "def weekday(self) -> int:\n return WD_EN.index(self.time.day.lower())", "title": "" }, { "docid": "47fdcd9d6ca537a27bf9fe997d07b913", "score": "0.55689377", "text": "def get_week_days(year, week):\n d = dt.date(year, 1, 1)\n if(d.weekday() > 3):\n d = d + dt.timedelta(7 - d.weekday())\n else:\n d = d - dt.timedelta(d.weekday())\n dlt = dt.timedelta(days = (week - 1) * 7)\n return d + dlt #, d + dlt + dt.timedelta(days = 6)", "title": "" }, { "docid": "9da3ea68b57e068494fc1f3de09ade86", "score": "0.55556417", "text": "def get_gre_date(year, Hmonth, Hday):\n try:\n from hijri_converter import convert\n except ImportError:\n import warnings\n\n def warning_on_one_line(message, category, filename, lineno,\n file=None, line=None):\n return filename + ': ' + str(message) + '\\n'\n warnings.formatwarning = warning_on_one_line\n warnings.warn(\"Error estimating Islamic Holidays.\" +\n \"To estimate, install hijri-converter library\")\n warnings.warn(\"pip install -U hijri-converter\")\n warnings.warn(\"(see https://hijri-converter.readthedocs.io/ )\")\n return []\n Hyear = convert.Gregorian(year, 1, 1).to_hijri().datetuple()[0]\n gres = []\n gres.append(convert.Hijri(Hyear - 1, Hmonth, Hday).to_gregorian())\n gres.append(convert.Hijri(Hyear, Hmonth, Hday).to_gregorian())\n gres.append(convert.Hijri(Hyear + 1, Hmonth, Hday).to_gregorian())\n gre_dates = []\n for gre in gres:\n if gre.year == year:\n gre_dates.append(date(*gre.datetuple()))\n return gre_dates", "title": "" }, { "docid": "bd084a80bf74ec00156a390c8be213cc", "score": "0.5549041", "text": "def GetWeekDay(self):\n if self.day is None:\n if self.week:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n self.week,\n None)\n elif self.month is None:\n if self.year is None:\n return (self.century, None, None, None, None)\n else:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n None,\n None)\n else:\n raise DateTimeError(\"can't get week day with month precision\")\n else:\n century, year, ordinalDay = self.GetOrdinalDay()\n year += century * 100\n if LeapYear(year):\n yearLength = 366\n else:\n yearLength = 365\n weekday = DayOfWeek(year, self.month, self.day)\n thursday = ordinalDay + 4 - weekday\n if thursday < 1:\n # Thursday this week was actually last year, and so we are\n # part of the last calendar week of last year too.\n # may return year==0\n year -= 1\n week = WeekCount(year)\n elif thursday > yearLength:\n # Thursday this week is actually next year, and so we are\n # part of the first calendar week of next year too.\n # may return century=100\n year += 1\n week = 1\n else:\n # We are part of this year, but which week?\t Jan 4th is always\n # part of the first week of the year, so we calculate the ordinal\n # value of the Monay that began that week\n yearBase = 5 - DayOfWeek(year, 1, 4)\n week = (ordinalDay - yearBase) // 7 + 1\n return year // 100, (year % 100) // 10, (year % 10), week, weekday", "title": "" }, { "docid": "b11070199fbfa8f247fc8e9678c61342", "score": "0.55432516", "text": "def get_today_week_number(self):\n\n today = date.today()\n iso_result = today.isocalendar()\n return iso_result[1]", "title": "" }, { "docid": "3fa602c3999a6c4603668c66093d9971", "score": "0.5526484", "text": "def get_the_weekday(self,date):\n date_convert = date.split('-')\n week_days = (\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\")\n date_list = [int(i) for i in date_convert]\n day = datetime.date(date_list[0], date_list[1], date_list[2])\n # convert weekday into digit (eg Mon -> 0,)\n num_day = day.weekday()\n day_as_string = week_days[num_day]\n return day_as_string", "title": "" }, { "docid": "3e3bb78d57e99763d7902354758824c7", "score": "0.55232984", "text": "def main():\r\n day, mth = int(input()), int(input())\r\n mths = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\r\n wks = [\"Saturday\", \"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\"]\r\n yrday = sum(mths[:mth-1])+day\r\n cur_idx = 0\r\n for _ in range(yrday):\r\n if cur_idx == 6:\r\n cur_idx = 0\r\n else:\r\n cur_idx += 1\r\n print(wks[cur_idx-1])", "title": "" }, { "docid": "5ff2cbf6a8903e51dc0cc95ca126db74", "score": "0.5511345", "text": "def meetup_day(year, month, dow, wom):\n first_dow = monthrange(year, month)[0]\n days_in_month = monthrange(year, month)[1]\n possible_dates = []\n print str(year) + str(month) + dow + wom\n\n \"\"\"Build dictionary of possible dates based on dow\"\"\"\n for day in range(1, days_in_month+1):\n if datetime.date(year, month, day).strftime(\"%A\") == dow:\n print day\n possible_dates.extend([day])\n\n \"\"\"Perform logic on wom constraint\"\"\"\n if wom == \"teenth\":\n for day in possible_dates:\n if day > 12 and day < 20:\n return datetime.date(year, month, day)\n elif wom == \"last\":\n return datetime.date(year, month, possible_dates[-1])\n else:\n return datetime.date(year, month, possible_dates[ int(wom[:1]) - 1 ])", "title": "" }, { "docid": "6a5e071d57875ae77f0bd785449020a9", "score": "0.550482", "text": "def test_mjd2dow():\n\tmjd = [34923.0,58130.5]\n\tdnum_true = [3,5]\n\tdnam_true = np.array(['Wed','Fri'],dtype='|S3')\n\tdnum_test, dnam_test = date_functions.mjd2dow( mjd )\n\t\n\tassert dnum_test[0] == dnum_true[0]\n\tassert dnum_test[1] == dnum_true[1]\n\tassert dnam_test[0] == dnam_true[0]\n\tassert dnam_test[1] == dnam_true[1]", "title": "" }, { "docid": "6c93386605d85b8022c4b584af0bd04a", "score": "0.5499277", "text": "def dayweek_clean(fecha):\n\n try:\n lista = fecha.split(sep = '/')\n fecha = '-'.join(reversed(lista))\n temp = pd.Timestamp(fecha)\n dia_semana = (temp.dayofweek, temp.day_name())\n return dia_semana[1]\n \n except:\n #print ('hola')\n return None", "title": "" }, { "docid": "9b12fa4237365fb033e96db4811d4928", "score": "0.5496464", "text": "def weekday(self, dt):\n days = {\n 0: self.MONDAY,\n 1: self.TUESDAY,\n 2: self.WEDNESDAY,\n 3: self.THURSDAY,\n 4: self.FRIDAY,\n 5: self.SATURDAY,\n 6: self.SUNDAY\n }\n return days.get(dt.weekday())", "title": "" }, { "docid": "ba261597e09c98da4c032d5a3b8e1410", "score": "0.5490995", "text": "def get_date_day(date):\n cut_date = date.split('-')\n return cut_date[2]", "title": "" }, { "docid": "c08730677f1b81f9ded9b212130ccd91", "score": "0.5470284", "text": "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "title": "" }, { "docid": "4904c8a02a98239c32b442d44a00de3f", "score": "0.54688174", "text": "def yearMonthDay() :\n timeDateValue = time.asctime(time.gmtime()).lower().split()\n if int(timeDateValue[2]) < 10 : timeDateValue[2] = str('0'+str(timeDateValue[2]))\n return '%s%s%s' % (timeDateValue[4],timeDateValue[1],timeDateValue[2])", "title": "" }, { "docid": "08c0389b034a168a67138ddb080b3ef2", "score": "0.54559237", "text": "def MLK(year):\n\n day = datetime.date(year, 1, 1)\n count = 0\n while True:\n if day.weekday() == 0:\n count += 1\n if count == 3:\n return day\n day += datetime.timedelta(days=1)", "title": "" }, { "docid": "e863e9bf1bd0ab0d715526841588d4ef", "score": "0.545257", "text": "def iso_date(self):\n return self.strftime(self.FORMAT_PRECISION_DAY)", "title": "" }, { "docid": "215ef9fbbb790463c5a3eab9b60e2773", "score": "0.5441061", "text": "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)", "title": "" }, { "docid": "07aaee0cddb62e02181960f70e63a8f8", "score": "0.54376656", "text": "def generate_day_type(date):\n cal = France()\n\n if cal.is_holiday(date):\n # If Mon-Friday\n if date.weekday() in range(5):\n return 0.\n else:\n return 1.\n else:\n if date.weekday() in range(5):\n return 1.\n else:\n return 0.", "title": "" }, { "docid": "212027d0d718806aa6313c4701e6b85d", "score": "0.54346895", "text": "def CONST_WEEK_TIMESTAMP() -> int:\n return 604800", "title": "" }, { "docid": "3d2b6ca3cc8d43b92011be142b5b648f", "score": "0.54346365", "text": "def test_dhms2day():\n\td = [0,737072]\n\th = [7,16]\n\tm = [12,43]\n\ts = [0.0,25.12]\n\td_true = [0.3,737072.696818518569]\n\td_test = date_functions.dhms2day(d,h,m,s)\n\t\n\tassert d_test[0] == pytest.approx(d_true[0],1e-8)\n\tassert d_test[1] == pytest.approx(d_true[1],1e-8)", "title": "" }, { "docid": "bbb1c7d7787b69f9041a45eb879f0e3c", "score": "0.5434593", "text": "def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))", "title": "" }, { "docid": "f5a6050accc7566ad20eb8bd9fa9863f", "score": "0.54343885", "text": "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "title": "" }, { "docid": "f5a6050accc7566ad20eb8bd9fa9863f", "score": "0.54343885", "text": "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "title": "" }, { "docid": "9f74d87491e7483afc04b537fdd8dee3", "score": "0.5421495", "text": "def diwali(gregorian_year):\n return hindu_lunar_holiday(8, 1, gregorian_year)", "title": "" }, { "docid": "abb8daeb909fe74d61d611724614fe5f", "score": "0.5420803", "text": "def get_hebrew_independence_day(self, jewish_year):\n month = 2\n day = 5\n original_hebrew_independence_date = HebrewDate(jewish_year, month, day)\n if original_hebrew_independence_date.weekday() == 6:\n day = 4\n if original_hebrew_independence_date.weekday() == 7:\n day = 3\n if original_hebrew_independence_date.weekday() == 2:\n day = 6\n return [\n (HebrewDate(jewish_year, month, day - 1), \"Independence Day Eve\"),\n (HebrewDate(jewish_year, month, day), \"Independence Day\")\n ]", "title": "" }, { "docid": "2754969828d1940e7a73ae9097ef0242", "score": "0.5408187", "text": "def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)", "title": "" }, { "docid": "2347a8ee2bacd5b32f134165be77f199", "score": "0.5406146", "text": "def isoweekday(self):\n return 0", "title": "" }, { "docid": "2347a8ee2bacd5b32f134165be77f199", "score": "0.5406146", "text": "def isoweekday(self):\n return 0", "title": "" }, { "docid": "a5071479a1f5e9b0c29b10494ec875ac", "score": "0.53982383", "text": "def test_function():\n test_date_time = datetime.now()\n print(test_date_time.strftime('%a')) # abrivated Locale weekday name\n print(test_date_time.strftime('%A')) # full Locale weekday name\n print(test_date_time.strftime('%b')) # abrivated Locale month name\n print(test_date_time.strftime('%B')) # full Locale month name\n print(test_date_time.strftime('%c')) # Date and time representation\n print(test_date_time.strftime('%d')) # month day number\n print(test_date_time.strftime('%H')) # hour 24 hour format\n print(test_date_time.strftime('%I')) # hour 12 hour format\n print(test_date_time.strftime('%j')) # day no of the year\n print(test_date_time.strftime('%m')) # month number (01-12)\n print(test_date_time.strftime('%M')) # Minute (00-59)\n print(test_date_time.strftime('%p')) # AM or PM\n print(test_date_time.strftime('%S')) # seconds (00-59)\n print(test_date_time.strftime('%w')) # week day number (0-sunday, 1-monday,..)\n print(test_date_time.strftime('%W')) # week number of the year\n print(test_date_time.strftime('%x')) # date expression\n print(test_date_time.strftime('%X')) # time expression\n print(test_date_time.strftime('%y')) # last 2 digit year\n print(test_date_time.strftime('%Y')) # 4 digit year\n print(test_date_time.strftime('%Z')) # Time zone name (no characters if time zone not exist)\n print(test_date_time.strftime('%%')) # A percentage character", "title": "" }, { "docid": "aa25ad15503297a3154019d542abe7ba", "score": "0.5397967", "text": "def set_week_day(self, wday):\r\n\t\twdays = ['Domingo', 'Lunes', 'Martes', 'Miercoles',\r\n\t\t\t\t 'Jueves', 'Viernes', 'Sabado']\r\n\t\tfor i in range(7):\r\n\t\t\tif wday == i: \r\n\t\t\t\treturn wdays[i]", "title": "" }, { "docid": "2b6ed5d622f1fe8bdd664907ef36d357", "score": "0.53965974", "text": "def convert_week_number_to_date(week_number, first_monday, weekday=0):\n assert(1 <= week_number <= 52)\n assert(0 <= weekday <= 6)\n first_gehol_year_day = datetime.strptime(first_monday, \"%d/%m/%Y\")\n num_days = (week_number-1) * 7 + weekday\n dt = timedelta(days = num_days)\n return first_gehol_year_day + dt", "title": "" }, { "docid": "d042f46ccecb52b8ad0267e5b60daa26", "score": "0.53796417", "text": "def get_weekday(self, as_str=False):\n\n # First we get the first 8 bits stored in the weekday register\n # and translate it to an integer\n wd_8bits = self.__read_register(_REGISTER_WEEKDAY)\n\n # Then we extract the weekday and return it\n wd = wd_8bits & 0x07 # 0x07 = 0b00000111\n\n if as_str is True: # if we want the weekday's name\n wd = WEEKDAY_STR[wd]\n\n return wd", "title": "" } ]
a8fda660651dd2cbe8fc3835c2402716
Function that finds a root using Newton's iteration for a given function f(x) with known derivative f'(x). The function finds the root of f(x) with a predefined absolute accuracy epsilon. The function excepts a starting point x0 that belongs to an interval [a,b] in which is known that f has a root.If the function f has multiple roots in this interval then Newton's method converges randomly to one of them.If in the interval [a,b] f(x) doesn't change sign (Bolzano theorem can not be applied) the function has unpredictable behavior and will execute max_iterations iterations and converge to a false result or converge to a partial correct root with less than max_iterations number of iterations but not with the predefined accuracy. Also the function checks if x0 is a root of f(x) and if it is then the function returns the value of x0.
[ { "docid": "5357d96546a3b61900b293c024e68def", "score": "0.7213952", "text": "def newton_raphson(f, fprime, x0, eps=5e-6, max_iterations=50):\n\n if f(x0) == 0: # check if x0 is root of f\n return x0, 0\n\n # initializations\n current_x = x0 - f(x0) / fprime(x0)\n previous_x = x0\n iterations_num = 0\n\n # Newton's method algorithm\n # iterate while the error is larger that predefined argument eps or we have more iterations to do until\n # max_iterations\n while abs(current_x - previous_x) >= eps and iterations_num < max_iterations:\n # on each step update variables x and also increase iterations_num\n previous_x = current_x\n current_x = current_x - f(current_x) / fprime(current_x)\n iterations_num += 1\n\n return current_x, iterations_num", "title": "" } ]
[ { "docid": "88fffe03bfe4cfc189651c7be2b9de99", "score": "0.7853638", "text": "def NewtonRoot(f, x, tol=1e-10, iters=2000, der_shift=1):\n\n\tif isinstance(x, list):\n\t\t# Number of variables\n\t\tm = len(x)\n\n\t\t# Convert to da.Var type\n\t\tif m > 1:\n\t\t\tfor i in range(m):\n\t\t\t\tif not isinstance(x[i], da.Var):\n\t\t\t\t\tx[i] = da.Var(x[i], _get_unit_vec(m, i))\n\t\t\treturn _NewtonRootVector(f, x, iters=iters, tol=tol, der_shift=der_shift)\n\t\tx = x[0]\n\tif not isinstance(x, da.Var):\n\t\tx = da.Var(x)\n\n\t# Initial step\t\n\tg = f(x)\n\tvar_path = [x.val]\n\tg_path = [g.val]\n\n\t# Run Newton's root-finding method\n\tfor i in range(iters):\n\t\t# Check if guess is a root\n\t\tif np.array_equal(g.val, np.zeros((g.val.shape))):\n\t\t\tbreak\n\n\t\t# If derivative is extremely close to 0, set to +1 or -1 as a form of random restart\n\t\t# This avoids making a new guess at, e.g., x + 1e10\n\t\tif np.linalg.norm(g.der) < tol:\n\t\t\tg.der = np.ones(g.der.shape) * (der_shift if np.random.random() < 0.5 else -der_shift)\n\n\t\t# Take step and include in path\n\t\tstep = da.Var(g.val / g.der, None)\n\t\tx = x - step\n\t\tg = f(x)\n\t\tvar_path.append(x.val)\n\t\tg_path.append(g.val)\n\n\t\t# Avoid using abs(step) in case guess is at 0, because derivative is not continuous\n\t\tcond = -step if step < 0 else step\n\t\t\n\t\t# If step size is below tolerance, no need to continue\n\t\tif cond < tol:\n\t\t\t# print (\"Reached tol in {} iterations\".format(i + 1))\n\t\t\tbreak\n\telse:\n\t\t# print (\"Reached {} iterations without satisfying tolerance.\".format(iters))\n\t\tpass\n\n\troot = da.Var(x.val, g.der)\n\tvar_path = np.reshape(np.concatenate((var_path)), [-1])\n\tg_path = np.concatenate(g_path)\n\treturn root, var_path, g_path", "title": "" }, { "docid": "f20e4c30832667ce5f8e26bbc4995533", "score": "0.77372265", "text": "def solve_newton(f, df, x0, epsilon=1E-8, max_iter=100):\n xn = x0\n for n in range(0, max_iter):\n fxn = f(xn)\n if abs(fxn) < epsilon:\n return xn\n dfxn = df(xn)\n if dfxn == 0: # avoid zero derivatives\n xn = xn + 1E-3\n continue\n xn = xn - fxn / dfxn\n return None", "title": "" }, { "docid": "ee09e756d46d400d170130b185016ad5", "score": "0.7570736", "text": "def newton(f,df,x0,tol=1.0e-8,maxit=100):\n for k in range(1,maxit):\n x1 = x0 - f(x0) / df(x0)\n err = abs(x1 - x0)/abs(x1)\n print(\" %d %e %e %e\" % (k, x1, f(x1), err) )\n if (err < tol):\n return x1\n x0 = x1\n print(\"Numero maximo de iteracoes excedido.\")\n return x1", "title": "" }, { "docid": "31428817af4b9a23358ce67fabcf6486", "score": "0.7509394", "text": "def newton(f, x0, args):\r\n\r\n \"\"\"\r\n Performs Newton-Raphson iterations, starting from initial approximation. Breaks if value of f() is close to zero\r\n indicating a root has been found.\r\n \"\"\"\r\n iter_count = 0\r\n while True:\r\n check = f(x0, *args) # Value of function at x\r\n zero = np.zeros(np.shape(check)) # Generates array of zeros\r\n if np.allclose(check, zero): # Checks if value of function is close to zero --> root. Breaks if root is found\r\n break\r\n x0 = newtonIter(f, x0, *args) # Applies an iteration to function at current root approximation\r\n iter_count += 1\r\n\r\n # Halts if no root is found after 1000 iterations and returns an empty array\r\n if iter_count == 1000:\r\n return []\r\n return x0", "title": "" }, { "docid": "97d52ab7defb64428db5e99d29b3dee6", "score": "0.7423431", "text": "def newton(f, dfdx, x0, epsilon):\n xprev = x0\n xnew = xprev - f(xprev)/dfdx(xprev)\n numiter = 1\n while np.abs(xnew - xprev) > epsilon:\n xprev = xnew\n xnew = xprev - f(xprev)/dfdx(xprev)\n numiter += 1\n return xnew, numiter", "title": "" }, { "docid": "e996ad15238f7bd4e3a04a9cf9b1e200", "score": "0.72971", "text": "def bisection_root(f, x_left, x_right, epsilon):\n\t# Make sure x_left and x_right are entered in the right order\n\tif x_left > x_right:\n\t\t# Swap them\n\t\tx_left, x_right = x_right, x_left\n\n\t# Check if either of the intervals are a zero themselves.\n\tif f(x_left) == 0:\n\t\treturn x_left\n\telif f(x_right) == 0:\n\t\treturn x_right\n\telif f(x_left) * f(x_right) > 0:\n\t\t# If this is True, then the value of the function does not change sign over the interval.\n\t\t# The root finding algorithm is not guaranteed to work. \n\t\tprint(\"The function does not pass through zero for the given interval.\")\n\t\treturn None\n\n\tnum_iterations = 100000\n\tfor i in range(num_iterations):\n\t\tmiddle_interval = (x_left + x_right) / 2\n\t\tmiddle_value = f(middle_interval)\n\t\tif abs(middle_value) <= epsilon:\n\t\t\t# middle_value is within range of epsilon\n\t\t\tprint(f\"Found root in {i+1} iterations.\")\n\t\t\treturn middle_interval\n\t\telif f(x_left) * f(middle_interval) < 0:\n\t\t\t# f(x_left) and f(middle_interval) have opposite signs. \n\t\t\t# There must be a zero between them\n\t\t\t# Set right value to the middle value\n\t\t\tx_right = middle_interval\n\t\telif f(x_right) * f(middle_interval) < 0:\n\t\t\t# f(x_right) and f(middle_interval) have opposite signs. \n\t\t\t# There must be a zero between them\n\t\t\t# Set left value to the middle value\n\t\t\tx_left = middle_interval\n\t\telse:\n\t\t\t# All signs are equal, something went wrong\n\t\t\treturn None\n\n\t# Did not find a root within the number of iterations\n\tprint(f\"Did not find a root within {num_iterations} iterations\")\n\treturn middle_interval", "title": "" }, { "docid": "ba2c270cdad1d6da7e6ec12f2e4758d0", "score": "0.70392", "text": "def solve(fvals, x0, debug = False):\n # initial guess\n x = x0\n if debug:\n print 'Initial guess: x = %22.15e' % x0\n # Newton iteration to find a zero of f(x) \n maxiter = 20\n tol = 1e-14\n for k in range(maxiter):\n # evaluate function and its derivative:\n f, fp = fvals(x)\n if (abs(f) < tol):\n break\n # compute Newton increment x:\n deltax = f / fp\n # update x:\n x = x - deltax\n if debug:\n print 'After %s iterations, x = %22.15e' % (k+1,x)\n if (k == maxiter-1):\n # might not have converged\n f, fp = fvals(x)\n if (abs(f) > tol):\n print '*** Warning: has not yet converged'\n return x, k+1\n else:\n return x, k", "title": "" }, { "docid": "3bac019876b38c42814c7c24a434ba86", "score": "0.70108086", "text": "def newton(x):\r\n\r\n # Initialize the tolerance and estimate\r\n tolerance = 0.000001\r\n estimate = 1.0\r\n\r\n # Perform the successive approximations\r\n while True:\r\n estimate = (estimate + x / estimate) / 2\r\n difference = abs(x - estimate ** 2)\r\n if difference <= tolerance:\r\n break\r\n return estimate", "title": "" }, { "docid": "2ca236bbf997540ad0e6c6885e0b67cc", "score": "0.69906116", "text": "def newton_method(self, x_0):\n\n self.error_a = 101\n i = 0\n while i < self.n_iter and self.exp_err < self.error_a:\n\n x_1 = x_0-self.function(x_0)/self.function_prime(x_0)\n\n if x_1 != 0:\n self.error_a = error_formula(x_0, x_1)\n\n x_0 = x_1\n i += 1\n\n print(\"Current iteration for newton method is %d, with root value of %.4f\" %(i, x_0))\n print(\"Final result using the newton method is %.4f with %d iterations\" %(x_0, i))", "title": "" }, { "docid": "3ae365a7df3590d8a45c343c5f96e2a1", "score": "0.6974889", "text": "def newton_raphson(function, derivate, initial_approximation, max_iterations, \n error):\n # Set the first and second value for the process\n last_x = initial_approximation\n current_x = last_x - (function(last_x)/derivate(last_x))\n #######################################################\n print('Iteration | x | f(x) | Error ')\n #######################################################\n iterations = 0\n ########################################################\n print(' %d %.7f %.7f -' % (iterations,initial_approximation,current_x))\n #######################################################\n iterations = 1\n while abs(current_x - last_x) >= error:\n if iterations > max_iterations:\n return current_x\n last_x = current_x\n # Get the next value\n current_x = last_x - (function(last_x)/derivate(last_x))\n ####################################################### \n print(' %d %.7f %.7f %.7f' % (iterations,last_x,current_x,abs(current_x - last_x)))\n #######################################################\n iterations += 1\n \n print(iterations)\n return current_x", "title": "" }, { "docid": "d0b6e7d9ddab6de81e102b7ac3172547", "score": "0.691323", "text": "def pure_newton(x0, my_func, my_jacobian, merit_fn, maxit, ftol, run_line_search=True, eta=0.001,\n gamma=0.5):\n if not 0 < eta < 1:\n raise ValueError(\"eta = {0} does not satisfy 0 < eta < 1.\".format(eta))\n if not 0 < gamma < 1:\n raise ValueError(\"gamma = {0} does not satisfy 0 < gamma < 1.\".format(gamma))\n logger.info(\"Pure Newton: x0={0}, eta={1}, gamma={2}\".format(x0, eta, gamma))\n\n # Setup\n k = 0\n x_k = x0\n jacobian_k = my_jacobian(x_k)\n func_k = my_func(x_k)\n\n # Continue looping until convergence.\n while k < maxit:\n # Find direction\n # TODO (ac) replace inverse with other method\n p_k = -np.dot(np.linalg.inv(jacobian_k), func_k)\n\n # If line searching, reduce step size until merit function indicates sufficient decrease\n alpha_k = 1.0\n if run_line_search:\n j_k = 0\n alphas = []\n merits = []\n merit_k = merit_fn(my_func(x_k + alpha_k * p_k))\n while merit_k > (1 - alpha_k * eta) * merit_fn(my_func(x_k)):\n alpha_k *= gamma\n j_k += 1\n merit_k = merit_fn(my_func(x_k + alpha_k * p_k))\n\n # Append logger info\n alphas.append(alpha_k)\n merits.append(merit_k)\n else:\n merit_k = merit_fn(my_func(x_k))\n\n # Logging, as requested\n log_str = \"Iter {0}: x_k = {1}, F_k = {2}, ||F_k|| = {3}.\".format(k, x_k, func_k, merit_k)\n if run_line_search:\n log_str += \" alphas: {0}. merit functions: {1}\".format(alphas, merits)\n logger.info(log_str)\n\n # Stopping condition\n if merit_k < ftol:\n logger.info(\"MERIT FUNCTION ~= 0! x_k={0}.\".format(x_k))\n break\n\n # Prepare for next iteration\n x_k += alpha_k * p_k\n k += 1\n jacobian_k = my_jacobian(x_k)\n func_k = my_func(x_k)\n\n logger.info(\"Final iterate x_k={0}\".format(x_k))\n return x_k", "title": "" }, { "docid": "10fa59196231f7caf988af4fda1767fe", "score": "0.6853835", "text": "def newtons_method(x, tolerance):\n errormet = False\n\n while errormet is False:\n x_new = x - f(x)/f_prime(x)\n\n if abs(x_new - x) < tolerance:\n errormet = True\n\n x = x_new\n\n return x_new", "title": "" }, { "docid": "64b5a8e6d6c34c3f7932f51201d2f6e0", "score": "0.68433785", "text": "def fixed_point(function, initial_approximation, max_iterations, error):\n\n # Set the first and second value for the process\n last_x = initial_approximation\n current_x = function(last_x)\n #######################################################\n print('Iteration | x | f(x) | Error ')\n #######################################################\n iterations = 0\n ########################################################\n print(' %d %.7f %.7f -' % (iterations,initial_approximation,current_x))\n #######################################################\n iterations = 1\n while abs(current_x - last_x) >= error:\n \n if iterations > max_iterations:\n return current_x\n last_x = current_x\n # Get the next value\n current_x = function(last_x)\n ####################################################### \n print(' %d %.7f %.7f %.7f' % (iterations,last_x,current_x,abs(current_x - last_x)))\n #######################################################\n iterations += 1\n \n return current_x", "title": "" }, { "docid": "3044494951800e31565dc7e4e761e11e", "score": "0.6790087", "text": "def newton_for_geocoding(function_with_jacobian, initial_guess, max_iter=20, tolerance=1.e-5):\n tolerance_squared = tolerance * tolerance\n x = initial_guess\n\n for _ in range(max_iter):\n f_eval, j_eval = function_with_jacobian(x)\n dx = -_inv_3x3_transpose(j_eval, f_eval)\n x += dx\n\n stop_condition = np.dot(dx, dx)\n if stop_condition <= tolerance_squared:\n break\n else:\n raise RuntimeError(\"Newton did not converge: \"\n \"maximum number of iterations reached.\")\n\n return x", "title": "" }, { "docid": "a302b09db7009a1adfcb6e1c8c6cdad7", "score": "0.6780299", "text": "def newtons_method(x, a, tolerance):\n errormet = False\n notFoundFlag = False\n iterations = 0\n\n while errormet is False:\n\n fx = 0.0\n for i in range(len(a)): # Calculate f(x)\n fx += a[i]*x**i\n\n fprimex = 0.0\n for i in range(1, len(a)):\n fprimex += i*a[i]*x**(i-1)\n\n x_new = x - fx/fprimex\n\n iterations += 1\n if abs(x_new - x) < tolerance:\n errormet = True\n print(\"Newton iterations = \", iterations)\n\n x = x_new\n\n # If you do 10,000 Newton iterations and still no root\n # then the remaining roots are probably complex. Stop.\n if iterations > 10000:\n return 0.0, True\n\n return x_new, notFoundFlag", "title": "" }, { "docid": "1e43da59a55dba1d00200fb61c274e13", "score": "0.6776221", "text": "def newton(fun_fprime, x, args,\n tol = 5e-5,\n step = 1e-6,\n maxiter = 100,\n beta0 = 1.0,\n beta_factor = 0.5,\n disp = False,\n minimize = False):\n beta = beta0\n \n f, df_dx = newton_eval(fun_fprime, x, *args)\n for ii in range(0, maxiter):\n \n if disp:\n print(\"{}: x = {}, f = {}, dfdx = {}\".format(ii, x, f, df_dx))\n\n # Stop if we reach our desired constraint tolerance\n if not minimize and np.abs(f) <= tol: # newton's method\n break\n\n try:\n dx = -beta * (f / df_dx)\n if disp: print(\"dx = {}\".format(dx))\n except FloatingPointError:\n if minimize:\n break\n else:\n raise ZeroDivisionError(\"zero derivative encountered\")\n\n # Keep track of previous values\n fp = f\n df_dxp = df_dx\n xp = x\n\n # Get next value\n x = xp + dx\n \n # If update fails due to nan, try again with a smaller beta.\n try:\n f, df_dx = newton_eval(fun_fprime, x, *args)\n if minimize:\n if np.abs(dx) < tol:\n break\n if np.abs(f) < np.abs(fp):\n retry = False\n else:\n retry = True\n except FloatingPointError:\n retry = True\n except ValueError:\n retry = True\n\n if retry:\n f = fp\n df_dx = df_dxp\n x = xp\n beta *= beta_factor\n elif beta < beta0:\n beta /= beta_factor\n if beta > beta0:\n beta = beta0\n # beta = beta0 # reset beta\n\n if ii >= maxiter-1:\n raise ValueError(\"exceeded max iterations\")\n\n return x", "title": "" }, { "docid": "d1c1dbeff9097c5d67f87ddaae7882fb", "score": "0.67748815", "text": "def newton_roots(self, accuracy_zero=10**-4):\n\n p1 = Polynomial(self.coeff[:])\n\n def newton(poly, start, accuracy=10 ** -16, max_steps=10):\n\n der = poly.derivative()\n x1 = start\n for k in range(max_steps):\n if poly(x1) == 0:\n break\n try:\n x0 = x1\n x1 = x0 - poly(x0) / der(x0)\n except ZeroDivisionError:\n x0 = x1 - 10**-8\n x1 = x0 - poly(x0) / der(x0)\n if abs(x0 - x1) < accuracy:\n break\n return x1\n\n roots = []\n temp = 0\n max_roots = len(p1.coeff)\n root = self.lagrange_r() if self.lagrange_r() else 0\n while len(p1.coeff) > 1 and temp <= max_roots:\n temp += 1\n root = newton(p1, start=root, max_steps=max_roots ** 2)\n while abs(p1(root)) < accuracy_zero:\n root = round(root, 12)\n roots.append(root)\n p1 = p1.deflation(root)\n roots.sort()\n return roots", "title": "" }, { "docid": "c46a703a589bf0f2051c6a40ba228055", "score": "0.67644894", "text": "def newtonIter(f, x0, *args):\r\n J = approxJ(f, x0, 1e-8, *args) # Approximates Jacobian\r\n\r\n \"\"\"\r\n Solves J(x1-x0) = -F(x0) for x1. Raises error if Jacobian is singular.\r\n \"\"\"\r\n try:\r\n x1_minus_x0 = solve(J, -f(x0, *args))\r\n except np.linalg.LinAlgError:\r\n raise np.linalg.LinAlgError(f\"Singular Jacobian --> Initial guess has caused solution to diverge.\\n\"\r\n \"Please try again with a different initial guess.\")\r\n x1 = x1_minus_x0 + x0\r\n return x1", "title": "" }, { "docid": "80d01e1dacc6fbb74d174e0036e1b09a", "score": "0.6745198", "text": "def newton_g_opt(x_0: float,maxiter: int,tol: float) -> float:\n x = x_0\n for i in range(maxiter):\n x_next = x - (g_prime(x)/g_2prime(x))\n # if (x_next - x) < tol:\n if abs(x_next - x) < tol:\n # It is also acceptable to return g(x)\n # return g(x_next)\n # But my examples compare with x.\n return x_next\n x = x_next\n # if i == maxiter - 1 and x_next - x < tol:\n if i == maxiter - 1 and abs(x_next - x) > tol:\n \n # return g(x)\n return x", "title": "" }, { "docid": "f043550773a6e1ae2ce8f9f746a807bc", "score": "0.6720384", "text": "def calculate_root(f, a, b, eps):\n assert f(a)*f(b) < 0\n\n iter_count = 0\n middle = (a+b) / 2\n while (abs(b-a) > eps\n and iter_count < MAX_ITERATION_COUNT):\n if f(middle)*f(a) > 0:\n a = middle\n else:\n b = middle\n middle = (a+b) / 2\n iter_count += 1\n\n return middle, iter_count", "title": "" }, { "docid": "549d5f2d1ea2f9b58f5b69879493d8a6", "score": "0.6688674", "text": "def _iterate_newton(func, deriv_func, x0, max_iter=500, tol=1e-08):\n\n xi = x0\n for i in range(1, max_iter + 1):\n xj = _iterate_newton_step(func, deriv_func, xi, tol)\n \n # failed\n if xj is None:\n return i, -100\n\n # close enough\n if cmath.isclose(xi, xj, rel_tol=0, abs_tol=tol):\n return i, xj\n\n xi = xj\n xi = -100\n\n return i, xi", "title": "" }, { "docid": "7a93cd2e6246dba097aa0a86ee3744c2", "score": "0.6681371", "text": "def Newtons_Method(func, func_prime, x_0, iters=100, tol=1e-5):\n i = 1\n x_new = x_0-func(x_0)/func_prime(x_0)\n while i < iters:\n x_new = x_0-func(x_0)/func_prime(x_0)\n if abs(x_new-x_0) < tol:\n return x_new\n else:\n x_0 = x_new\n i += 1\n return None", "title": "" }, { "docid": "55100c992770a015017941b30756e432", "score": "0.66797745", "text": "def NewtonSolver(fun,x0,args,tol=1e-8,maxIters=500):\n\n def secant(xn,x1,x0,f,args):\n \"\"\"Estimates the derivative using the secant method\"\"\"\n bottom = f(xn,x0,args)-f(x1,x0,args) \n # set all values of 0 to our tolerance level, to avoid 'divideByZero' and NaN errors\n bottom[bottom == 0] = tol**2\n return (xn-x1)/(bottom) # equation for first derivative estimation\n\n #initialize 2 guesses to estimate the derivative\n xn_1 = x0\n xn_2 = np.ones_like(x0)*tol**3\n it = 0 \n while True:\n df = secant(xn_2,xn_1,x0,f,args) #estimate the derivative\n xn = xn_1 - f(xn_1,x0,args)*df #Update our guess of U[i+1] using secant method\n #for Newton iteration\n if np.linalg.norm(xn-xn_1) < tol:#If our iterations converge\n break #we're finished\n if it > maxIters: #We don't want to run forever\n break #break after 500 iterations\n it+=1\n xn_2 = xn_1 #otherwise, update our guesses\n xn_1 = xn\n return xn", "title": "" }, { "docid": "f8a950877942b2d8de22072cd87c4a5f", "score": "0.66148114", "text": "def root_find(s, t, t0, t1, func, tol=1e-08, eps=1e-16):\n # a,b,c: abscissae fa,fb,fc: corresponding function values\n a = t0\n b = t1\n c = a\n\n fa = func(a)\n fb = func(b)\n fc = fa\n\n # Main iteration loop\n n_iter = 0\n while True:\n n_iter += 1\n prev_step = b-a # Distance from the last but one to the last approx.\n tol_act = 0.0 # Actual tolerance\n new_step = 0.0 # Step at this iteration\n\n # Interpolation step is calculated in the form p/q\n # division operations is delayed until the last moment\n p = 0.0\n q = 0.0\n\n if numpy.abs(fc) < numpy.abs(fb):\n # Swap data for b to be the best approximation\n a = b\n b = c\n c = a\n fa = fb\n fb = fc\n fc = fa\n\n tol_act = 2.0*eps*numpy.abs(b) + tol/2.0\n new_step = (c - b)/2.0\n\n # Acceptable approximation found ?\n if numpy.abs(new_step) <= tol_act or fb == 0.0:\n root = b\n value = fb\n print 'finished after {} iterations.'.format(n_iter)\n return (root, value)\n\n # Interpolation may be tried if prev_step was large enough and in true direction\n if numpy.abs(prev_step) >= tol_act and numpy.abs(fa) > numpy.abs(fb):\n cb = c-b\n\n if a == c:\n # If we have only two distinct points, linear interpolation can only be applied\n t1 = fb / fa\n p = cb * t1\n q = 1.0 - t1\n else:\n # Inverse quadratic interpolation\n q = fa/fc\n t1 = fb/fc\n t2 = fb/fa\n p = t2 * (cb*q*(q - t1) - (b - a)*(t1 - 1.0))\n q = (q - 1.0) * (t1 - 1.0) * (t2 - 1.0)\n\n # p was calculated with the opposite sign make p positive and assign possible minus to q\n if p > 0.0:\n q = -q\n else:\n p = -p\n\n # If b+p/q falls in [b,c] and isn't too large, it is accepted\n # If p/q is too large then the bisection procedure can reduce [b,c] range to a larger extent\n if (p < 0.75*cb*q - numpy.abs(tol_act*q)/2.0\n and p < numpy.abs(prev_step*q/2.0)):\n new_step = p/q\n\n # Adjust the step to be not less than tolerance\n if numpy.abs(new_step) < tol_act:\n if new_step > 0.0:\n new_step = tol_act\n else:\n new_step = -tol_act\n\n # Save the previous approximate\n a = b\n fa = fb\n\n # Do step to a new approximation\n b += new_step\n fb = func(b)\n\n # Adjust c for it to have a sign opposite to that of b\n if (fb > 0 and fc > 0) or (fb < 0 and fc < 0):\n c = a\n fc = fa", "title": "" }, { "docid": "ed77b73b370c6943bc344dc20e6ace3e", "score": "0.65996295", "text": "def fzero_brent(a, b, f, tol):\n fa = f(a)\n fb = f(b)\n if fa == 0:\n return a\n elif fb == 0:\n return b\n elif fa * fb > 0:\n raise ValueError(\"root not bracketed\")\n if abs(fa) < abs(fb):\n a,b = b,a\n c = a\n d = np.nan\n mflag = True\n\n delta = 1e-10\n niter = 0\n while niter != 1000:\n\n fc = f(c)\n if (fa != fc) and (fb != fc):\n # inverse quadratic interpolation\n s = a*fb*fc / ((fa-fb) * (fa-fc)) \\\n + b*fa*fc / ((fb-fa) * (fb-fc)) \\\n + c*fa*fb / ((fc-fa) * (fc-fb))\n else:\n # secant\n s = b - fb*(b-a)/(fb-fa)\n\n if ((not 0.25*(3*a+b) < s < b) and (not b < s < 0.25*(3*a+b))) or \\\n (mflag and (abs(s-b) >= 0.5*abs(b-c))) or \\\n (~mflag and (abs(s-b) >= 0.5*abs(c-d))) or \\\n (mflag and (abs(b-c) < abs(delta))) or \\\n (~mflag and (abs(c-d) < abs(delta))):\n # bisection\n s = 0.5*(a+b)\n mflag = True\n else:\n mflag = False\n\n fs = f(s)\n d = c\n c = b\n fa = f(a)\n fb = f(b)\n if fa*fs < 0:\n b = s\n fb = fs\n else:\n a = s\n fa = fs\n\n if abs(fa) < abs(fb):\n a,b = b,a\n fa,fb = fb,fa\n\n if fb == 0:\n return b\n elif f(s) == 0:\n return s\n elif (abs(b-a) < tol):\n return b\n niter += 1\n\n raise RuntimeError(\"maximum iterations exceeded (%f, %f)\" % (b, s))", "title": "" }, { "docid": "4a3436e4d98dbc0c390170b4e0556975", "score": "0.657501", "text": "def newton_method(\n f: \"convex function to be minimized\",\n x: \"starting point in the strictly feasible domain of f\",\n e: \"tolerance, >0\",\n gradient_f,\n hessian_f,\n a: \"line search parameter, affinity of approximation\" = 0.25,\n b: \"line search parameter, decreasing rate\" = 0.5) -> \"x*\":\n while True:\n Grad_f_x = gradient_f(x)\n # hfx = hessian_f(x)\n Hess_f_x_inv = np.linalg.inv(hessian_f(x))\n \n decrement = Grad_f_x @ Hess_f_x_inv @ Grad_f_x\n if decrement/2 <= e:\n return x\n newton_step = -Hess_f_x_inv @ Grad_f_x\n\n t = line_search(f, x, newton_step, Grad_f_x, a, b)\n x = x + t*newton_step", "title": "" }, { "docid": "80b5bd34d44f32f39912d5d650a92519", "score": "0.65437984", "text": "def newton_g_opt(x_0: float, maxiter: float, tol: float) -> float:\r\n x = x_0\r\n # if(g(x) < tol):\r\n # return x\r\n for i in range(maxiter):\r\n # x1 = x - (g(x)/g_prime(x))\r\n x1 = x - (g_prime(x)/g_2prime(x))\r\n x = x1\r\n # if(g(x) < tol):\r\n if(abs(g_prime(x1)) < tol):\r\n return x1\r\n # return x_0 - g(x_0)/g_prime(x_0)\r", "title": "" }, { "docid": "c71b0e498a278c8248fb18fe57e4d032", "score": "0.651766", "text": "def newton_helper(f, df, a, tol):\n\tfa= f(a)\n\tslope = df(a) \n\tprogress = - fa / slope\n\tstep = 0\n\n\twhile abs(progress) > tol:\n\t\tstep += 1\t\n\t\ta = a - fa / slope\n\t\t#prepare for next step\n\t\tfa = f(a)\n\t\tslope = df(a)\n\t\tif slope == 0:\n\t\t\tbreak\n\t\tprogress = - fa / slope\n\n\treturn a, step", "title": "" }, { "docid": "3d3498fbb7a22c4d228bd8dbf0f74681", "score": "0.65063906", "text": "def quasi_newton(x0, my_func, my_gradient, maxit, ftol, check_armijo=True, eta=0.001, gamma=0.5,\n update='bfgs', keep_hessian=False):\n if not 0 < eta < 1:\n raise ValueError(\"eta = {0} does not satisfy 0 < eta < 1.\".format(eta))\n if not 0 < gamma < 1:\n raise ValueError(\"gamma = {0} does not satisfy 0 < gamma < 1.\".format(gamma))\n\n # Hessian update types\n if update == 'dfp':\n # TODO (ac) implement DFP update\n raise NotImplementedError(\"DFP update not yet supported for this method!\")\n\n if update not in ['bfgs', 'dfp']:\n raise ValueError(\"Provided quasi-Newton update {0}, must be {1}\"\n .format(update, ALLOWED_UPDATES))\n\n log_str = \"Quasi-Newton: x0={0}.\".format(x0)\n if check_armijo:\n log_str += \" eta={1}, gamma={2}\".format(x0, eta, gamma)\n logger.info(log_str)\n\n # Setup\n k = 0\n x_k = x0\n g_k = my_gradient(x_k)\n f_k = my_func(x_k)\n B_k = np.eye(x0.shape[0])\n norm_g_k = np.linalg.norm(g_k)\n\n logger.info(\"At starting iteration, g_k={0}, f_k={1}.\".format(g_k, f_k))\n while k < maxit:\n if norm_g_k < ftol:\n logger.info(\"GRADIENT ~= 0! x_k = {0}.\".format(x_k))\n break\n\n # Get direction\n # TODO (ac) replace inverse with other method\n p_k = -np.dot(np.linalg.inv(B_k), g_k)\n\n # Get step size using Armijo if requested.\n if check_armijo:\n alpha_k = 1.0\n while my_func(x_k + alpha_k * p_k) > (f_k + eta * alpha_k * np.dot(p_k, g_k)):\n alpha_k *= gamma\n else:\n alpha_k = -np.dot(g_k, p_k) / np.dot(np.dot(p_k, B_k), p_k)\n\n # Update step, x, gradient, y.\n s_k = alpha_k * p_k\n x_next = x_k + s_k\n g_next = my_gradient(x_next)\n y_k = g_next - g_k\n\n # Update Hessian approximation using BFGS\n if update == 'bfgs' and np.dot(y_k, s_k) > 0:\n logger.info(\"Updating B_k since {0} > 0\".format(np.dot(y_k, s_k)))\n B_k = (B_k\n - 1 / np.dot(np.dot(s_k, B_k), s_k)\n * np.outer(np.dot(B_k, s_k), np.dot(s_k, B_k))\n + 1 / np.dot(y_k, s_k) * np.outer(y_k, y_k))\n\n # Final updates.\n x_k = x_next\n f_k = my_func(x_k)\n g_k = g_next\n norm_g_k = np.linalg.norm(g_k)\n\n logger.info(\"Iteration {0}: x_k={1}, f_k={2}, ||g_k||={3}, alpha_k={4}, B_k={5}\".format(\n k, x_k, f_k, norm_g_k, alpha_k, B_k))\n k += 1\n\n if keep_hessian:\n return x_k, B_k\n return x_k", "title": "" }, { "docid": "8b91ed70053f30abe8bc2768bee60cb0", "score": "0.6475439", "text": "def newton(f, x0, x1, precision=default_newton_precision):\n f0, f1 = f(x0), f(x1)\n while f1 and abs(x1 - x0) > precision and f1 != f0:\n x0, x1 = x1, x1 + (x1 - x0) / (f0/f1 - 1)\n f0, f1 = f1, f(x1)\n return x1", "title": "" }, { "docid": "cbac770734d40d4205433aa97ddbbb66", "score": "0.64698493", "text": "def secante(f,x0,x1,tol=1.0e-8,maxit=100):\n for k in range(maxit):\n f1, f0 = f(x1), f(x0)\n x2 = x1 - (f1*(x1-x0))/(f1-f0)\n f2 = f(x2)\n err = abs(x2-x1)/abs(x2)\n print(\" %d\\t%e\\t%e\\t%e\" % (k, x2, f2, err) )\n if err < tol:\n return x2\n x0, x1 = x1, x2\n print(\"Numero maximo de iteracoes excedido.\")\n return x2", "title": "" }, { "docid": "4fb737f66e2ce559975248d960f0dd1c", "score": "0.6464333", "text": "def Newton_algoritmo(F, J, x, eps):\n F_value = F(x)\n #display(Latex('$$ F(x) = '+ latex(simplify(F_value)) + '$$'))\n F_norm = np.linalg.norm(F_value, ord=2) # l2 norm of vector\n contador_iteraciones = 0\n while abs(F_norm) > eps and contador_iteraciones < 100:\n delta = np.linalg.solve(J(x), -F_value)\n # display(Latex('$$ F(x) = '+ latex(simplify(F_value)) + '$$'))\n # display(Latex('$$ J(x) = '+ latex(simplify(J(x))) + '$$'))\n # display(Latex('$$ SEL = '+ latex(simplify(delta)) + '$$'))\n x = x + delta\n display(Latex('$$ Iteracion = '+ latex(simplify(contador_iteraciones)) + '$$'))\n display(Latex('$$ SolucionSistema = '+ latex(simplify(x)) + '$$'))\n F_value = F(x) #test\n F_norm = np.linalg.norm(F_value, ord=2)\n contador_iteraciones += 1\n\n # Hasta que una solucion es encontrada o muchas iteraciones \n if abs(F_norm) > eps:\n contador_iteraciones = -1\n return x, contador_iteraciones", "title": "" }, { "docid": "6aa8966f04e1c6a214c3870e7f48d52a", "score": "0.639518", "text": "def newton(a, b, c, skew, kurtosis, max_iter=25, converge=1e-5):\n f = flfunc(a, b, c, skew, kurtosis)\n for i in range(max_iter):\n if max(map(abs, f)) < converge:\n break\n J = flderiv(a, b, c)\n delta = -solve(J, f)\n (a, b, c) = delta + (a,b,c)\n f = flfunc(a, b, c, skew, kurtosis)\n return (a, b, c)", "title": "" }, { "docid": "ef398184a8dea47bc0c648274aa1f92e", "score": "0.6381392", "text": "def NewtonRaphson(func, a):\n while True:\n c = Decimal(a) - (Decimal(eval(func)) / Decimal(eval(str(diff(func)))))\n\n a = c\n\n # This number dictates the accuracy of the answer\n if abs(eval(func)) < 10 ** -15:\n return c", "title": "" }, { "docid": "84815d3ec0477d3141d9429dffe8922f", "score": "0.6348605", "text": "def newton(x_0_arr, a_arr, mu,\n tol = 1e-7, max_iter = 1000):\n res = np.empty(len(x_0_arr), dtype = np.float_)\n for i in range(len(x_0_arr)):\n x_0 = x_0_arr[i]\n a = a_arr[i]\n if x_0 <= 0:\n res[i] = 0\n continue\n x = min(x_0, (x_0 / a) ** (1 / mu))\n for it in range(max_iter):\n x_next = x - f(x, x_0, a, mu) / der_f(x, x_0, a, mu)\n if x_next <= 0:\n x_next = 0.1 * x\n x = x_next\n if np.abs(f(x, x_0, a, mu)) < tol:\n break\n res[i] = x\n return res", "title": "" }, { "docid": "6e1776d003822dcad9ee6d20ce973b31", "score": "0.62989104", "text": "def solve(f, x0=None, nseg=128, verbose=False):\n\tr = x0\n\tif r is None:\n\t\tif verbose:\n\t\t\tprint('Calculating initial guess... ', end='', file=sys.stderr)\n\t\t# The area we seek is nseg equal-area rectangles surrounding f(x), not\n\t\t# f(x) itself, but we can get a good approximation from it.\n\t\tv = mpmath.quad(f, [0, mpmath.inf]) / nseg\n\t\tr = mpmath.findroot(lambda x: x*f(x) + mpmath.quad(f, [x, mpmath.inf]) - v, x0=1, x1=100, maxsteps=100)\n\t\tif verbose:\n\t\t\tprint(r, file=sys.stderr)\n\t# We know that f(0) is the maximum because f must decrease monotonically.\n\tmaximum = f(0)\n\ttxi = []\n\ttv = mpmath.mpf()\n\tdef mini(r):\n\t\tnonlocal txi, tv\n\t\txi = [r]\n\t\ty = f(r)\n\t\tv = r*f(r) + mpmath.quad(f, [r, mpmath.inf])\n\t\tif verbose:\n\t\t\tprint('Trying r={0} (v={1})'.format(r, v), file=sys.stderr)\n\t\tfor i in itertools.count():\n\t\t\txm1 = xi[i]\n\t\t\th = v / xm1\n\t\t\ty += h\n\t\t\tif y >= maximum or mpmath.almosteq(y, maximum, abs_eps=mpmath.mp.eps * 2**10):\n\t\t\t\tbreak\n\t\t\t# We solve for x via secant method instead of using f's inverse.\n\t\t\tx = mpmath.findroot(lambda x: f(x) - y, xm1)\n\t\t\txi.append(x)\n\t\txi.append(mpmath.mpf())\n\t\tif len(xi) == nseg:\n\t\t\tif mpmath.almosteq(y, maximum, abs_eps=mpmath.mp.eps * 2**10):\n\t\t\t\ttxi, tv = xi[::-1], v\n\t\t\t\treturn 0\n\t\t\t# If y > maximum, then v is too large, which means r is too far\n\t\t\t# left, so we want to return a negative value. The opposite holds\n\t\t\t# true when y < maximum.\n\t\t\treturn maximum - y\n\t\treturn len(xi) - nseg + h*mpmath.sign(len(xi) - nseg)\n\tr = mpmath.findroot(mini, r)\n\tassert len(txi) == nseg\n\tif verbose:\n\t\tprint('Done calculating r, v, x[i].', file=sys.stderr)\n\treturn r, tv, txi", "title": "" }, { "docid": "9d024a492ee9c0c7b18b2457160f4772", "score": "0.62724006", "text": "def fixpoint(f, x, tol=None, maxit=100, **kwargs):\n if tol is None:\n tol = sqrt(sp.finfo(float).eps)\n info = -1\n t = 0\n for it in range(maxit):\n t += 1\n gval = f(x, **kwargs)\n relres = la.norm(gval - x)\n if relres < tol:\n info = 0\n break\n x = gval\n return (info, relres, t, gval)", "title": "" }, { "docid": "870337eed9700301182681188555e284", "score": "0.6272313", "text": "def newtons_for_func(c, x, iterations=100):\n for _ in xrange(iterations):\n x = x - (1.0 / (2.0 * x)) * (x ** 2.0 - c)\n return x", "title": "" }, { "docid": "1aa748f78ba52aad55ec41b4ffb5b6d8", "score": "0.6263451", "text": "def _aberth(f, fp, x0, tol=1e-15, maxiter=50):\n\n N = len(x0)\n\n x = array(x0, complex)\n beta = np.empty_like(x0)\n\n for iteration in range(maxiter):\n alpha = -f(x) / fp(x) # Newton's method\n\n # Model \"repulsion\" between zeros\n for k in range(N):\n beta[k] = np.sum(1/(x[k] - x[k+1:]))\n beta[k] += np.sum(1/(x[k] - x[:k]))\n\n x += alpha / (1 + alpha * beta)\n\n if not all(np.isfinite(x)):\n raise RuntimeError('Root-finding calculation failed')\n\n # Mekwi: The iterative process can be stopped when |hn| has become\n # less than the largest error one is willing to permit in the root.\n if all(abs(alpha) <= tol):\n break\n else:\n raise Exception('Zeros failed to converge')\n\n return x", "title": "" }, { "docid": "4e4bc88ae250070131cd04fe9b30faea", "score": "0.6250397", "text": "def newton_solver(f, x0, y0=None, tol=1E-9, maxcount=100, backtrack_c=0.5, noisy=True):\n\n x, y = x0, y0\n if y is None:\n y = f(x)\n\n for count in range(maxcount):\n if noisy:\n printit(count, x, y)\n\n if np.max(np.abs(y)) < tol:\n return x, y\n\n J = obtain_J(f, x, y)\n dx = np.linalg.solve(J, -y)\n\n # backtrack at most 29 times\n for bcount in range(30):\n try:\n ynew = f(x + dx)\n except ValueError:\n if noisy:\n print('backtracking\\n')\n dx *= backtrack_c\n else:\n predicted_improvement = -np.sum((J @ dx) * y) * ((1 - 1 / 2 ** bcount) + 1) / 2\n actual_improvement = (np.sum(y ** 2) - np.sum(ynew ** 2)) / 2\n if actual_improvement < predicted_improvement / 2:\n if noisy:\n print('backtracking\\n')\n dx *= backtrack_c\n else:\n y = ynew\n x += dx\n break\n else:\n raise ValueError('Too many backtracks, maybe bad initial guess?')\n else:\n raise ValueError(f'No convergence after {maxcount} iterations')", "title": "" }, { "docid": "299413fe21e3043c8e6e3ae264ab40e6", "score": "0.6221491", "text": "def falsaposicao(f,a,b,tol=1.0e-8): \n if (f(a)*f(b)>0):\n print(\"Nao ha garantias de existir raiz nesse intervalo\")\n return None\n k = 0\n xk = a\n while abs(f(xk)) > tol:\t\n xk = ( a * f(b) - b * f(a) ) / ( f(b) - f(a) )\t\t\n print(\" %d\\t%e\\t%e\" % ( k, xk, f(xk) ))\n if f(a)*f(xk) < 0:\n b = xk\n else:\n a = xk \n k = k + 1\n return xk", "title": "" }, { "docid": "7471f12aa38457a93218eec4122ad819", "score": "0.616521", "text": "def base_newton(self,xzero):\n \n x=self.listtoarray(xzero)\n termination_criterion=False\n k=0\n try:\n while termination_criterion!=True:\n k+=1\n x[0]=x[0]-self.NewtonDirection(x)\n if sum(x)<=0.0001 and sum(x)>=-0.0001:\n termination_criterion=True\n except linalg.linalg.LinAlgError:\n return None\n return k", "title": "" }, { "docid": "d71fd9d07d56b871ef9fc293aaec7c94", "score": "0.614101", "text": "def secant_method(f: Callable[[float], float], x0: float, eps: float = 1e-7, kmax: int = 1e3) -> float:\n x, x_prev, i = x0, x0 + 2 * eps, 0\n\n while abs(x - x_prev) >= eps and i < kmax:\n x, x_prev, i = x - f(x) / (f(x) - f(x_prev)) * (x - x_prev), x, i + 1\n\n return x", "title": "" }, { "docid": "b9eb5a856f0805819b9a87aa1df525c2", "score": "0.614004", "text": "def newton_solve(func, deriv, start, tolerance):\n\tdef close_enough(x):\n\t\treturn abs(func(x)) < tolerance\n\tdef newton_update(x):\n\t\treturn x - func(x) / deriv(x)\n\n\treturn iter_solve(start, close_enough, newton_update)", "title": "" }, { "docid": "6700548177254f1c23c97c5567c8778f", "score": "0.6125581", "text": "def bisection (f, start, stop, iterations) :\n if argexists(debug) :\n print (\"||function start||\") \n print(\"function: \" + functionstring)\n print(\"start: \" + str(start))\n print(\"stop: \" + str(stop))\n print((f(start)*f(stop)) < 0)\n print(\"iterations left: \" + str(iterations))\n\n if argexists(fractions) :\n mid = (start+stop)/Fraction(2)\n else :\n mid = float((start+stop))/2\n\n if iterations == 0 :\n return mid\n\n if argexists(fractions) :\n if f(start) > nearzero :\n if f(mid) > nearzero :\n return bisection(f, mid, stop, iterations-1)\n elif f(mid) < -nearzero :\n return bisection(f, start, mid, iterations-1)\n elif f(mid) >= -nearzero and f(mid) <= nearzero :\n return mid\n if f(start) < -nearzero :\n if f(mid) < -nearzero :\n return bisection(f, mid, stop, iterations-1)\n elif f(mid) > nearzero :\n return bisection(f, start, mid, iterations-1)\n elif f(mid) >= -nearzero and f(mid) <= nearzero :\n return mid\n else :\n if f(start) > 0 :\n if f(mid) > 0 :\n return bisection(f, mid, stop, iterations-1)\n elif f(mid) < 0 :\n return bisection(f, start, mid, iterations-1)\n elif f(mid) == 0 :\n return mid\n if f(start) < 0 :\n if f(mid) < 0 :\n return bisection(f, mid, stop, iterations-1)\n elif f(mid) > 0 :\n return bisection(f, start, mid, iterations-1)\n elif f(mid) == 0 :\n return mid", "title": "" }, { "docid": "c1e90a5e647173d4f72f1b9f9d910acb", "score": "0.61161923", "text": "def newtonsNull(alpha, n, yarr, ybar, itMax, tol):\n J, F = funjacNull(alpha, n, yarr, ybar)\n eps = -F/J\n \n iteration = 0\n\n while (tol < np.abs(eps)/alpha and iteration < itMax):\n alpha += eps\n J, F = funjacNull(alpha, n, yarr, ybar)\n eps = -F/J\n iteration += 1\n\n print(\"Number of iterations used to approximate alpha = {}\".format(iteration))\n return alpha", "title": "" }, { "docid": "7ca6d404be2fcd7f7a84f58cadd988b5", "score": "0.6093696", "text": "def fp(f,x0,eps=1e-6,modes=[1,2],Ni=4,N=10,\n dist=lambda x,y : np.max(np.abs(x-y)), debug=False ):\n # compute initial error\n xx = f(x0); e = dist(xx,x0)\n suc = False\n # 1. iterate f; keep result if error decreases initially\n if 1 in modes:\n # Iterate orbit map several times, compute error\n x = reduce(lambda x, y : f(x), [x0] + range(Ni))\n xx = f(x); e = dist(xx,x); e0 = dist(x,x0)\n # If converging to fixed point\n if e < e0:\n suc = True\n # Iterate orbit map\n n = 0\n while n < N-Ni and e > eps:\n n = n+1; x = xx; xx = f(x)\n e = dist(xx,x)\n x0 = xx\n # 2. run fsolve on f(x)-x; keep result if non-nan\n if 2 in modes:\n x = x0\n # Try to find fixed point using op.fsolve\n xx = op.fsolve(lambda x : f(x)-x, x)\n # If op.fsolve succeeded\n if not np.isnan(xx).any():\n suc = True\n x0 = xx\n # 3. run descent1 on |f(x) - x|^2; keep result if non-nan\n if 3 in modes:\n x = x0\n xx,_ = descent1(lambda x : np.sum((f(x) - x)**2),x0,eps=eps,debug=debug)\n # If descent1 succeeded\n if not np.isnan(xx).any():\n suc = True\n x0 = xx\n # if all methods failed, return nan\n if not suc:\n x0 = np.nan*x0\n \n return x0", "title": "" }, { "docid": "79a82c3b497785a2cf49762d1aabfa47", "score": "0.608933", "text": "def sqrtrt(a,x=3):\n while True:\n print(x)\n y = (x + a / x) / 2#using newton methond we are calculating the sqaure root\n if y == x:\n break\n x = y\n\n return y", "title": "" }, { "docid": "4cdc21c0e99afbffe6a1231bbf0cae7b", "score": "0.6087064", "text": "def solve_newton_multi(f, x, ap=1e-6, rp=1e-4, ns=20):\n n = len(x)\n x = Matrix(len(x))\n for k in xrange(ns):\n fx = Matrix(f(x.flatten()))\n J = jacobian(f,x.flatten())\n if norm(J) < ap:\n raise ArithmeticError('unstable solution')\n (x_old, x) = (x, x-(1.0/J)*fx)\n if k>2 and norm(x-x_old)<max(ap,norm(x)*rp): return x.flatten()\n raise ArithmeticError('no convergence')", "title": "" }, { "docid": "d75a43c3ad9ddafce7a6bdab7802d976", "score": "0.60314393", "text": "def test_exact_root(self):\n f = lambda x: x**2 - 1.\n brackets = bracket_root(f,0.,init_step=1.)\n assert_inrange(1.,brackets)\n assert brackets == (0.,2.)", "title": "" }, { "docid": "2d6709c8f3e53f7fd45b3b15166bbf67", "score": "0.60300535", "text": "def ncpsolve(f, a, b, x, tol=10e-13, maxsteps=10, maxit=100, usesmooth=True, **kwargs):\n if usesmooth:\n _smooth = smooth\n else:\n _smooth = minmax\n n = x.shape[0]\n for i in range(maxit):\n fval, fjac = f(x, **kwargs)\n print i, fval\n ftmp, fjac = _smooth(f, x, a, b)\n ## infinity norm\n dx = - (la.solve(fjac, ftmp))\n fnorm = la.norm(ftmp, sp.inf)\n if fnorm < tol:\n break\n fnormold = sp.inf\n for backsteps in range(maxsteps):\n xnew = x + dx\n fnew = f(xnew, **kwargs)\n fnew = _smooth(f, xnew, a, b)[0]\n fnormnew = la.norm(fnew, inf)\n if fnormnew < fnorm:\n break\n if fnormold < fnormnew:\n dx *= 2\n break\n fnormold = fnormnew\n dx /= 2\n print(backsteps)\n ## No backstepping\n x += dx\n\n return x, fval", "title": "" }, { "docid": "52bce9ab1f530eb2a025b4e8b209d8bb", "score": "0.6011306", "text": "def bissecao(f,a,b,tol=1.0e-8):\n if( f(a) * f(b) > 0 ):\n print(\"Nao ha garantias de existir raiz nesse intervalo\")\n return None\n k = 0\n x = a\n print(\" passo\\txk \\t\\t f(xk)\")\n while abs(f(x)) > tol:\n x = a + (b-a)/2.0\n print(\" %d\\t%e\\t%e\" % (k,x,f(x)))\n if( f(a)*f(x) > 0 ):\n a = x\n else:\n b = x\n k = k + 1\n return x", "title": "" }, { "docid": "fd0333eb941abb13dc5684f904268c97", "score": "0.6004699", "text": "def newton_generator(f, x0, dfdx, f_args=()):\n while True:\n dfdx0 = dfdx(x0)\n\tf0 = f(x0,*f_args)\n yield x0, f0\n x0 -= f0/dfdx0", "title": "" }, { "docid": "94da616b42e3d48bb3a1465c6072509f", "score": "0.59870625", "text": "def bisection(function, left_point, right_point, max_iterations, error):\n #######################################################\n print('Iteration | x | f(x) | Error ')\n #######################################################\n iterations = 0\n # Set the left, right, and mid point and their respective values\n l = left_point; r = right_point; last_mid = (l + r)/2\n f_l = function(l); f_r = function(r); f_last = function(last_mid)\n #######################################################\n print(' %d %.7f %.7f -' % (iterations,last_mid,f_last))\n #######################################################\n # Check if any of them make the function zero\n if abs(f_l) == 0:\n return l\n \n if abs(f_r) == 0:\n return r\n \n if abs(f_last) == 0:\n return last_mid\n \n # Do the first iteration of the method to find the next mid point\n if f_l*f_last <= 0:\n r = last_mid\n f_r = f_last\n else:\n l = last_mid\n f_l = f_last\n \n # Calculate the mid point\n mid = (l+r)/2\n f_mid = function(mid)\n iterations += 1\n while(abs(f_mid - f_last) >= error):\n if iterations > max_iterations:\n break\n # If the change of sign is on the left side then the new right point\n # is the mid point.\n if f_l*f_mid <=0:\n r = mid\n f_r = f_mid\n # If the change of sign is on the right side then the new left point\n # is the mid point.\n else:\n l = mid\n f_l = f_mid\n \n ####################################################### \n print(' %d %.7f %.7f %.7f' % (iterations,last_mid,f_last,abs(f_mid - f_last)))\n #######################################################\n # Update the last mid point\n last_mid = mid\n f_last = f_mid\n # Calculate and evaluate the new mid point\n mid = (r+l)/2\n f_mid = function(mid)\n iterations += 1\n \n #print(iterations)\n return mid", "title": "" }, { "docid": "db680ac8a250a99e0fc9d71f0b2dbe49", "score": "0.5943015", "text": "def fmin_powell(\n func,\n x0,\n args=(),\n xtol=1e-4,\n ftol=1e-4,\n maxiter=None,\n maxfun=None,\n full_output=0,\n disp=1,\n retall=0,\n callback=None,\n direc=None,\n linesearch=brent,\n):\n # we need to use a mutable object here that we can update in the\n # wrapper function\n fcalls, func = wrap_function(func, args)\n x = asarray(x0).flatten()\n if retall:\n allvecs = [x]\n N = len(x)\n rank = len(x.shape)\n if not -1 < rank < 2:\n raise ValueError(\"Initial guess must be a scalar or rank-1 sequence.\")\n if maxiter is None:\n maxiter = N * 1000\n if maxfun is None:\n maxfun = N * 1000\n\n if direc is None:\n direc = eye(N, dtype=float)\n else:\n direc = asarray(direc, dtype=float)\n\n fval = squeeze(func(x))\n x1 = x.copy()\n iter = 0\n ilist = list(range(N))\n while True:\n fx = fval\n bigind = 0\n delta = 0.0\n for i in ilist:\n direc1 = direc[i]\n fx2 = fval\n fval, x, direc1 = _linesearch_powell(\n linesearch, func, x, direc1, xtol * 100\n )\n if (fx2 - fval) > delta:\n delta = fx2 - fval\n bigind = i\n iter += 1\n if callback is not None:\n callback(fcalls[0], x, fval, delta)\n if retall:\n allvecs.append(x)\n if abs(fx - fval) < ftol:\n break\n if fcalls[0] >= maxfun:\n break\n if iter >= maxiter:\n break\n\n # Construct the extrapolated point\n direc1 = x - x1\n x2 = 2 * x - x1\n x1 = x.copy()\n fx2 = squeeze(func(x2))\n\n if fx > fx2:\n t = 2.0 * (fx + fx2 - 2.0 * fval)\n temp = fx - fval - delta\n t *= temp * temp\n temp = fx - fx2\n t -= delta * temp * temp\n if t < 0.0:\n fval, x, direc1 = _linesearch_powell(\n linesearch, func, x, direc1, xtol * 100\n )\n direc[bigind] = direc[-1]\n direc[-1] = direc1\n\n warnflag = 0\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n print(\n \"Warning: Maximum number of function evaluations has \" \"been exceeded.\"\n )\n elif iter >= maxiter:\n warnflag = 2\n if disp:\n print(\"Warning: Maximum number of iterations has been exceeded\")\n else:\n if disp:\n print(\"Optimization terminated successfully.\")\n print(f\" Current function value: {fval:f}\")\n print(\" Iterations: %d\" % iter)\n print(\" Function evaluations: %d\" % fcalls[0])\n\n x = squeeze(x)\n\n if full_output:\n retlist = x, fval, direc, iter, fcalls[0], warnflag\n if retall:\n retlist += (allvecs,)\n else:\n retlist = x\n if retall:\n retlist = (x, allvecs)\n\n return retlist", "title": "" }, { "docid": "84cd910be134059b6be6b624def6b297", "score": "0.59198374", "text": "def optimize_newton_multi(f, x, ap=1e-6, rp=1e-4, ns=20):\n x = Matrix(list(x))\n for k in xrange(ns):\n (grad,H) = (gradient(f,x.flatten()), hessian(f,x.flatten()))\n if norm(H) < ap:\n raise ArithmeticError('unstable solution')\n (x_old, x) = (x, x-(1.0/H)*grad)\n if k>2 and norm(x-x_old)<max(ap,norm(x)*rp): return x.flatten()\n raise ArithmeticError('no convergence')", "title": "" }, { "docid": "61737bae5361f28dd724218b055424a6", "score": "0.59181106", "text": "def newton (x, estimate):\r\n # Compute the difference and test for the base case\r\n difference = abs(x - estimate ** 2)\r\n if difference <= TOLERANCE:\r\n return estimate\r\n else:\r\n # Recurse after improving the estimate\r\n return newton (x, (estimate + x / estimate) / 2)", "title": "" }, { "docid": "6d8fcb69e4f97a371d64d0b07afab314", "score": "0.589373", "text": "def test_function_104(self):\n\t\tself.assertEqual(attempt.newtons_nsteps(10,3), 28.00077)", "title": "" }, { "docid": "a9c5c8e094cd20b09d29f1931df5523f", "score": "0.5891084", "text": "def findRoot(x, power, epsilon):\n if x < 0 and power%2 == 0: #Negative number has no even-powered \n #roots\n return None\n low = min(-1.0, x)\n high = max(1.0, x)\n ans = (high + low)/2.0\n while abs(ans**power - x) >= epsilon:\n if ans**power < x:\n low = ans\n else:\n high = ans\n ans = (high + low)/2.0\n return ans", "title": "" }, { "docid": "c65f6636611a85bb015515e3c96555f9", "score": "0.58817756", "text": "def roots(self, tolerance=(10 ** (-14))) -> List[Number]:\n p = self\n n = p.degree()\n\n # special case for a polynomial of degree 0\n if n == 0:\n return []\n\n # first, find the lower/upper bound for the roots\n # https://en.wikipedia.org/wiki/Properties_of_polynomial_roots#Lagrange's_and_Cauchy's_bounds\n cauchy = 1 + sum([abs(p[n] / p[-1]) for n in range(len(p) - 1)])\n\n # pick degree() random complex numbers\n z = [complex(random(), random()) for _ in range(n)]\n\n # set the magnitudes to be somewhere from (0, cauchy)\n for i in range(len(z)):\n z[i] = (random() * cauchy) * (z[i] / abs(z[i]))\n\n # iterate the roots\n pd = self.derivative()\n\n while True:\n z_new = [] # new roots\n converged = 0 # number of roots that converged\n for k in range(len(z)):\n # if a root converged (well enough), simply add it\n if abs(p.at(z[k])) <= tolerance:\n z_new.append(z[k])\n converged += 1\n else:\n z_new.append(\n z[k]\n - 1\n / (\n pd.at(z[k]) / p.at(z[k])\n - sum([1 / (z[k] - z[j]) for j in range(n) if j != k])\n )\n )\n\n # if all of them converged, return them\n if converged == len(z_new):\n z_modified = []\n\n # try to convert roots from complex to real and see if it improves the\n # approximation\n for e in z_new:\n places = 4\n\n # attempt to round the real and the complex part\n e = self.__beter_zero(complex(round(e.real, places), e.imag), e)\n e = self.__beter_zero(complex(e.real, round(e.imag, places)), e)\n\n # attempt to make a real\n e = self.__beter_zero(e.real, e)\n\n z_modified.append(e)\n\n return z_modified\n\n z = z_new", "title": "" }, { "docid": "5500d86e29d4e7b3495204e666b98626", "score": "0.5875174", "text": "def d(x):\n\treturn (f(x + tol) - f(x - tol)) / (2 * tol)", "title": "" }, { "docid": "b4fc6655cb4d1d7e3059035af8c50cae", "score": "0.58716583", "text": "def _icbrt_newton(n, s):\n d = n // (s * s)\n a = s + (d - s) // 3\n # Early return covering most of the cases where ``s`` is already the\n # correct answer.\n if a == s:\n return a\n\n while True:\n d = n // (a * a)\n if d >= a:\n return a\n a += (d - a) // 3", "title": "" }, { "docid": "2e1683d422831483fc4a12df338c2121", "score": "0.5870837", "text": "def pontofixo(f,phi,x,tol=1e-8,maxit=100):\n x0 = x\n for k in range(maxit):\n x1 = phi(x0) \n print(\" %d %e %e\" % ( k, x1, abs(x1-x0) )) \n if abs(x1 - x0)/abs(x1) <= tol:\n return x1\n x0 = x1 \n print(\"Numero maximo de iteracoes excedido\")\n return x1", "title": "" }, { "docid": "9ce0913b010e63c85c6f68b7b56844c7", "score": "0.58656424", "text": "def sqrtrt(a,x=3):\n while True:\n print(x)\n y = (x + a / x) / 2#using newton methond we are calculating the sqaure root\n if abs(y-x)< 0.0000001:# using this method spo taht float can also be\n break #calculted nonterminating no can also be calculated ass tedening to 00\n x = y\n\n return y", "title": "" }, { "docid": "9a9ac7a50ba3effebac480d96430235e", "score": "0.58615303", "text": "def square_root(x):\n approx = None\n guess = x / 2\n while approx != guess:\n approx = guess\n guess = (approx + x / approx) / 2\n\n return approx", "title": "" }, { "docid": "e8e98657de7eba1fbee966b8a8c7e07d", "score": "0.5856286", "text": "def secant(function, initial_approximation_1, initial_approximation_2, \n max_iterations, error):\n # Set the first and second value for the process\n last_x = initial_approximation_1\n mid_x = initial_approximation_2\n iterations = 0\n print('Iteration | x | f(x) | Error ')\n print(' %d %.7f %.7f -' % (iterations,last_x, function(last_x)))\n iterations += 1\n print(' %d %.7f %.7f -' % (iterations,mid_x, function(last_x)))\n den = (function(mid_x)*(mid_x-last_x))/(function(mid_x) - function(last_x))\n current_x = mid_x - den\n abs_error = abs(current_x - mid_x)\n iterations += 1\n print(' %d %.7f %.7f %.7f' % (iterations, current_x, \n function(current_x),abs_error))\n while abs(current_x - mid_x) >= error:\n if iterations > max_iterations:\n return current_x\n last_x = mid_x\n mid_x = current_x\n # Get the next value\n den = (function(mid_x)*(mid_x-last_x))/(function(mid_x) - function(last_x))\n current_x = mid_x - den\n iterations += 1\n abs_error = abs(current_x - mid_x)\n print(' %d %.7f %.7f %.7f' % (iterations, current_x, \n function(current_x),abs_error))\n \n return current_x", "title": "" }, { "docid": "79cf8a523dd5581ca8f7cba897aa01d3", "score": "0.5852776", "text": "def sqrt(n, iters=10):\n\n # Choose arbitrary initialization\n x = n*np.random.rand()\n # Repeat Newton's method\n for _ in range(iters):\n x = (1/2)*(x + n/x)\n return x", "title": "" }, { "docid": "9b38a0dbed8a954644738709f2d96ca8", "score": "0.58290416", "text": "def bisectionExtrema( f,a,b,nIterations=16,debug=False ):\n \"\"\"\n \" ^\n \" | .´\n \" | .'', .'\n \" |.' ´'`\n \" +--------------------\n \" a c b\n \"\"\"\n extremaWasInside = True\n for i in range(nIterations):\n if b < a:\n a,b = b,a\n c = 0.5 *(a+b)\n # everything smaller than interval width / 6 should basically be enough\n # if the factor is too small it may result in problems like the\n # bisection quitting too early, thereby giving back wrong maxima!\n dx = 1e-2*(b-a)\n # if floating point precision exhausted then these differences will be 0\n # for a and b do only onesided, because else we would leave the given interval!\n left = f(a+dx) > f(a )\n middle = f(c+dx) > f(c-dx)\n right = f(b ) > f(b-dx)\n if left == middle and middle == right and i == 0:\n extremaWasInside = False\n\n if debug:\n print \"f at x=\",a,\"going up?\",left ,\" ( f(x+dx)=\",f(a+dx),\", f(x =\",f(a )\n print \"f at x=\",c,\"going up?\",middle,\" ( f(x+dx)=\",f(c+dx),\", f(x-dx)=\",f(c-dx)\n print \"f at x=\",b,\"going up?\",right ,\" ( f(x )=\",f(b ),\", f(x-dx)=\",f(b-dx)\n\n # If the sign of the derivative is the same for all, then\n # the maximum is not inside the specified interval!\n if ( left == middle and middle == right ):\n if extremaWasInside:\n break # this can also happen if dx is too small to resolve, so we break the search\n else:\n raise Exception(\n \"Specified Interval seems to not contain any extremum!\\n\" +\n \" [\"+str(a)+\",\"+str(b)+\"]: f(a)=\" + str(f(a)) +\n \", f((a+b)/2)=\" + str(f(c)) + \"f(b)=\" + str(f(b))\n )\n return None, None, None # unreasonable result, therefore error code\n elif left == middle:\n a = c\n elif middle == right:\n b = c\n else:\n # This happens if there are two extrema inside interval\n raise Exception( \"Specified Interval has more than one extremum!\" )\n return None, None, None\n\n c = 0.5*(a+b)\n return f(c), c, 0.5*(b-a)", "title": "" }, { "docid": "f6777ebf39663a9b9bb1e74c448da809", "score": "0.58225775", "text": "def approx_sqrt(x, epsilon):\n root = 0\n step = epsilon ** 2\n\n while abs(x - root ** 2) >= epsilon and root ** 2 <= x:\n root += step\n\n if abs(x - root ** 2) < epsilon:\n return root\n\n else:\n return \"Failed to find a square root\"", "title": "" }, { "docid": "91c8fe321ad44932348aea2ef02175a7", "score": "0.5793034", "text": "def test_error(err):\n f = lambda x1, x2, x3: np.exp(x1 + x2 + x3)\n x0 = (0, 0, 0)\n dx = 0.1\n maximum = -np.inf\n while True:\n # val = 0\n x = tuple([c+dx for c in x0])\n domain = [(x0[0]+i, x0[1]+i, x0[2]+i) for i in np.linspace(x0[0], x[0], 10)]\n for t in domain:\n if maximum > err:\n # compare(f, t)\n return t\n deriv_t = [part_diff(f, t, i) for i in range(len(t))] # all partial derivatives at point t as vector\n deriv_x = [part_diff(f, x, i) for i in range(len(x))] # all partial derivatives at point x as vector\n right_fact = sum([i**2 for i in (np.array(x) - np.array(x0))])**(1/2) # right factor of formula\n val = (np.matrix(deriv_t) * np.matrix(deriv_x).transpose()) * right_fact # total value\n if val >= maximum:\n maximum = val\n dx += dx", "title": "" }, { "docid": "de58ce62804014363969b9c0eb40bf67", "score": "0.5782929", "text": "def BC_newton(func,\n x, \n vertex_tpl, \n itmax = 50, \n tol = 1.e-15):\n xi = copy.deepcopy(x)\n i=0\n not_converged = True\n xyz = vertex_tpl[0] # which dimension are we in [0,1,2]?\n #index = vertex_tpl[1] # which vertex are we attempting to contract?\n n = 1##len(x[0])\n gn = xyz*n #pick the correct element of the gradient\n while(i<itmax and not_converged):\n #for xpt in [.00001,.5,.99999]:\n for xpt in [.5]:\n xm = copy.deepcopy(xi)\n midx = xi[xyz].value.getpoint(xpt)\n xm[xyz].value = midx\n f = func(xm) #\n F = func(xi) #\n \n if False:\n inner_grad = ia(f.grad[0,gn].inf,f.grad[0,gn].sup)\n inner_nx = xm[gn].value - f.value/inner_grad\n test_x_inner = xi[xyz].value & inner_nx\n inner_mid = test_x_inner.getpoint(.5)\n inner_midscalar = copy.deepcopy(xi)\n inner_midscalar[xyz].value = inner_mid\n inner_midvec = copy.deepcopy(xi)\n f_innerscalar = func(inner_midscalar)\n f_inner = func(inner_midvec)\n #newgrad = ia()\n \n nx = xm[gn].value - f.value/F.grad[0,gn]\n #nx = xm[gn].value - F.value/F.grad[0,gn]\n \n xiold = copy.deepcopy(xi)\n \n xi[xyz].value = xi[xyz].value & nx\n \n not_converged = IntervalAnalysis.some_convergence_criteria(\n xiold[xyz],xi[xyz],tol)\n \n i+=1\n return xi, i, F", "title": "" }, { "docid": "c63f168bf948165ab8e4ee5a731a0567", "score": "0.57794905", "text": "def zero_brentq(f, *args, **kwargs):\n if 'a' in kwargs and 'b' in kwargs:\n # simple one: the interval [a,b] is known\n a = kwargs[\"a\"]\n b = kwargs[\"b\"]\n else:\n a, b = 0., 1. # choose_interval_on_graph(f, 0, 2, *args)\n\n # add an \"interval\" variable, that will be an output (save all the different intervals that have been previously used).\n # first check if any of the intervals proposed would be valid. If yes,\n # then use it.\n\n\n# iterations = 0\n# while np.sign(f(a, *args)) == np.sign(f(b, *args)) and iterations <= 10 and a!=b:\n# if iterations == 0:\n# print \"Please choose other values for the interval so that the function changes sign between the two borns.\"\n# a, b = choose_interval_on_graph(f, a, b, *args)\n# elif iterations == 10:\n# print \"To solve this, you need to choose the interval such as the function changes sign. You have a last chance to find one: please type the two values of the interval.\"\n# a = float(input(\"Enter first value: \"))\n# b = float(input(\"Enter second value: \"))\n# else:\n# a, b = choose_interval_on_graph(f, a-(b-a), b+(b-a), *args)\n# iterations += 1\n\n a, b = interval(f, a, b, *args)\n\n if a == b:\n solution = 0.\n else:\n solution = brentq(f, a, b, args=args)\n return solution", "title": "" }, { "docid": "b4bccebbcc9164e70bd405522d64a56d", "score": "0.577173", "text": "def powell(f, x, xtol=1e-4, ftol=1e-4, max_iter=500, args=None):\n from scipy.optimize import minimize\n # Check if function is callable\n if not callable(f):\n raise ValueError('The argument `f` must be a callable function.')\n\n # Check extra arguments\n if args is None:\n args = ()\n elif type(args) != tuple:\n raise ValueError('The argument `args` must be either None or a tuple.')\n\n # Check stopping criteria\n xtol = float(xtol)\n ftol = float(ftol)\n max_iter = int(max_iter)\n if max_iter < 1:\n raise ValueError('Maximum number of iterations must be at least 1.')\n\n # Minimize\n res = minimize(f, x, args=args, method='Powell', tol=None, options={\n 'xtol': xtol, 'ftol': ftol, 'maxiter': max_iter, 'disp': False})\n if not res.success:\n raise Exception('Error optimizing function: ' + str(res.message))\n return res.x, res.fun", "title": "" }, { "docid": "c66b8a40086d592466ef534d26279174", "score": "0.5767914", "text": "def sim_anneal(f, x0, T0, r_T = 0.95, x_iterations = 10, step_iterations = 10, tol = 1e-6):\n \n # initialize lists containing x, f, T, and step size values.\n # lengths indeterminate\n x_vals = [0] # contains all accepted x\n f_vals = [0] # f(x_vals)\n T_vals = [[0, 0, 0]] # list of [T, x_T, f_T]\n step_sizes = [0] # for discussion purposes\n\n # initialize counters\n x_index = 0 # counter of accepted moves, under fixed T and step size\n T_index = 0 # counter of T\n tot_index = 0 # counter of cumulated accepted moves\n \n # populate lists with initial values\n x_vals[0] = x0\n f_vals[0] = f(x0)\n T_vals[0] = [T0, x0, f(x0)]\n x_opt = x0 # x value of the optimum, initialized to x0\n f_opt = f(x0) # f value of the optimum, initialized to f(x0)\n \n terminate = False # terminating condition, initialized to False\n \n \n # loops through temperatures \n # until the terminating condition has been reached\n while not terminate:\n T_current = T_vals[T_index][0] # get current temperature\n step_size = 1 # set the initial step size\n \n # loops through different step sizes\n # for (step_iterations) times\n for step_index in range(step_iterations):\n \n # to adjust step size, need to compare the no. of accepted moves\n # with the no. of all tentative moves under the current step size\n \n attempts = 0 # no. of tentative moves\n \n # loops through different random moves\n # until (x_iterations) of random moves have been accepted\n # uses while-loop because some moves might be rejected\n # so the total number of iterations is indeterminate\n while x_index < x_iterations: \n attempts += 1 # count one tentative move\n x_current = x_vals[tot_index] # get current x\n f_current = f_vals[tot_index] # get current f\n \n step = random.uniform(-1, 1)*step_size # generate random step\n x_new = x_current + step # generate new x\n f_new = f(x_new) # generate new f\n df = f_new - f_current # change in \"energy\"\n \n if df < 0:\n # always accepts the move if energy is lower\n x_index += 1 # count one accepted move\n tot_index += 1 # count one accepted move\n x_vals.append(x_new)\n f_vals.append(f_new)\n \n # update the optimum because df < 0\n x_opt = x_new \n f_opt = f_new\n else:\n # even if df > 0, chance exists to accept the move\n chance = np.exp(-df/T_current) # Metropolis\n p = random.uniform() # random number in [0, 1]\n if p < chance:\n # accepts the move if probability falls in chance\n # the optimum is not updated because df > 0\n x_index += 1 # count one accepted move\n tot_index += 1 # count one accepted move\n x_vals.append(x_new)\n f_vals.append(f_new)\n \n # after (x_iterations) of moves have been accepted\n # need to adjust step size and reset counter for next step size\n # success_ratio = no. accepted moves/no. tentative moves\n success_ratio = x_iterations/attempts \n factor = success_ratio/0.5 # compared with the ideal 0.5\n step_size *= factor # change step_size\n step_sizes.append(step_size)\n \n x_index = 0 # reset counter \n \n # after (step_iterations) of step size changes have been made\n # need to:\n # a. record the optimum under the current temperature\n # b. reset counter\n # c. check the terminating condition\n # if satisfied --> done\n # otherwise need to reduce temperature and reset counter for the next T\n \n T_vals[T_index][1] = x_opt # record current optimal x\n T_vals[T_index][2] = f_opt # record current optimal f\n \n step_index = 0 # reset counter \n \n if len(T_vals) > 3: # make sure steady state\n f_current = T_vals[T_index][2] # get current f\n f_one_before = T_vals[T_index-1][2] # f for previous T\n f_two_before = T_vals[T_index-2][2] # f for prev. prev. T\n f_thr_before = T_vals[T_index-3][2] # f for prev. prev. prev. T\n \n # calculate improvements compare with previous temperatures\n df_1 = abs(f_current - f_one_before)\n df_2 = abs(f_current - f_two_before)\n df_3 = abs(f_current - f_thr_before)\n if df_1 < tol and df_2 < tol and df_3 < tol:\n # exit loop if no major improvement can be observed\n terminate = True\n else:\n # otherwise, proceed to adjust T\n T_new = 0.9*T_current # get new temperature\n T_index += 1 # count new temperature\n T_vals.append([T_new, x_opt, f_opt]) # update T list\n \n # artificially set the current accepted move to the optimum\n # by adding an accepted move\n # Rationale: approximations for the new T need to start\n # from the current optimum, but without modification,\n # the current accepted move might not be the current optimum\n # i.e. can be a less ideal but still accepted Metropolis move\n tot_index += 1\n x_vals.append(x_opt)\n f_vals.append(f_opt)\n else:\n # keep looping if not in steady state\n # same modifications as above\n T_new = r_T*T_current \n T_index += 1 \n T_vals.append([T_new, x_opt, f_opt]) \n tot_index += 1\n x_vals.append(x_opt)\n f_vals.append(f_opt)\n \n return T_vals, x_vals, f_vals, x_opt, f_opt, step_sizes", "title": "" }, { "docid": "f2fc1f1f222698f235d220814a512715", "score": "0.57517123", "text": "def weiszfeld_method(y,epsilon, max_iteration):\r\n\r\n def distance_func(x):\r\n return cdist([x], y)\r\n\r\n # initial guess: centroid\r\n guess = y.mean(axis=0)\r\n\r\n iters = 0\r\n\r\n while iters < max_iteration:\r\n distances = distance_func(guess).T\r\n print(distances)\r\n print('old')\r\n # catch divide by zero\r\n # TODO: Wikipedia cites how to deal with distance 0\r\n distances = np.where(distances == 0, 1, distances)\r\n print(distances)\r\n guess_next = (y/distances).sum(axis=0) / (1./distances).sum(axis=0)\r\n\r\n guess_movement = np.sqrt(((guess - guess_next)**2).sum())\r\n\r\n guess = guess_next\r\n\r\n if guess_movement <= epsilon:\r\n break\r\n\r\n iters += 1\r\n\r\n return guess", "title": "" }, { "docid": "017824b77eacce350392a3a6dd3abff4", "score": "0.57419866", "text": "def fixed_point_root(W, ext, r0, k, n, root_kwargs={}, **kwds):\n io_fun = make_io_fun(k=k, n=n, **kwds)\n args = (ext, W, k, n, io_fun)\n return scipy.optimize.root(fixed_point_equation, r0, args, **root_kwargs)", "title": "" }, { "docid": "aa3f3b1c5d0d180c8192bd1598cd28a6", "score": "0.5741451", "text": "def nelder_mead(f, x, xatol=1e-4, fatol=1e-4, max_iter=500, args=None):\n from scipy.optimize import minimize\n # Check if function is callable\n if not callable(f):\n raise ValueError('The argument `f` must be a callable function.')\n # Check extra arguments\n if args is None:\n args = ()\n elif type(args) != tuple:\n raise ValueError('The argument `args` must be either None or a tuple.')\n # Check stopping criteria\n # SciPy's Nelder-Mead has the following stopping criteria:\n # `fatol` (used to be called ftol): Absolute error in x between iterations\n # `xatol` (used to be called xtol): Absolute error in f between iterations\n # maxiter: Maximum iterations\n # maxfev: Maximum number of function evaluations\n # The argument `tol` sets `fatol` and `xatol` simultaneously\n # Both fatol and xatol must be met for the method to halt\n fatol = float(fatol)\n xatol = float(xatol)\n max_iter = int(max_iter)\n if max_iter < 1:\n raise ValueError('Maximum number of iterations must be at least 1.')\n # Minimize\n res = minimize(\n f, x, args=args, method='Nelder-Mead', tol=None, options={\n 'xatol': xatol,\n 'fatol': fatol,\n 'maxiter': max_iter,\n 'disp': False\n })\n # The success flag is only false on max_iter (not an error) or max function\n # evaluations (should that be an error?)\n #if not res.success:\n # raise Exception('Error optimizing function: ' + str(res.message))\n return res.x, res.fun", "title": "" }, { "docid": "eee50da688829a44aff8d454b9b370fb", "score": "0.5719784", "text": "def bisection_method(self, x_0, x_1):\n\n self.error_a = 101\n i = 0\n x_old = x_0\n while i < self.n_iter and self.exp_err < self.error_a:\n\n x_temp = (x_0 + x_1)/2\n\n if x_temp != 0:\n self.error_a = error_formula(x_temp, x_old)\n\n test_func = self.function(x_0)*self.function(x_temp)\n\n if test_func > 0:\n x_0 = x_temp\n elif test_func < 0:\n x_1 = x_temp\n else:\n self.error_a = 0\n\n x_old = x_temp\n i += 1\n\n print(\"Current iteration for bisection method is %d, with root value of %.4f\"\n %(i, x_old))\n print(\"Final result using the bisection method is %.4f with %d iterations\" %(x_old, i))", "title": "" }, { "docid": "f5a1714ab5b1ffa84f006668937e7bc2", "score": "0.57073313", "text": "def searchKRK(n,x0,y0,z0,k1,k2,xDer,yDer,tol=1e-3,nmax=20):\n delta = 1\n i = 0\n \n while (not isclose(delta,0,rel_tol=tol, abs_tol=tol) and i <= nmax):\n i += 1\n # The interval is cut in two\n k0 = (k1+k2)/2.\n \n k = k1\n x,y1,z1 = tirRK(n,1,y0,z0,xDer,yDer,x0)\n k = k0 \n x,y2,z2 = tirRK(n,1,y0,z0,xDer,yDer,x0)\n print(\"k : {}\".format(k))\n\n # root is in [a,x0] : the sign changed \n if (y1[-1]*y2[-1] < 0 ):\n k2 = k0\n # root is in [x0,b] : sign unchanged \n elif (y1[-1]*y2[-1] > 0):\n k1 = k0\n delta = y2[-1]\n \n print(\"[k1,k2] = [{},{}], i = {}\".format(k2,k1,i))\n return k0", "title": "" }, { "docid": "c7af04e76d2a7e03b91605c65bc4d68f", "score": "0.5706925", "text": "def compute_root(poly, x_0, epsilon):\n # TO DO ...\n\n x = x_0\n count = 1\n\n while abs(evaluate_poly(poly, x)) >= epsilon:\n\n x -= evaluate_poly(poly, x)/evaluate_poly(compute_deriv(poly), x)\n\n count += 1\n\n return (x,) + (count,)", "title": "" }, { "docid": "ce3f8305b1550417761ecd42b8948cd5", "score": "0.5706049", "text": "def bisect(f, a, b, tol=1e-4, **kwargs):\n\n if sp.any(a > b):\n print(\"Lower bound greater than upper bound\")\n return \n sa = sp.sign(f(a, **kwargs))\n sb = sp.sign(f(b, **kwargs))\n if sp.any(sa == sb):\n print(\"Root not bracketed\")\n return\n ## Initializations\n dx = 0.5 * (b - a)\n tol = dx * tol\n x = a + dx\n dx = sb * dx\n\n while sp.any(sp.absolute(dx) > tol):\n print dx\n dx *= 0.5\n x -= sp.sign(f(x, **kwargs)) * dx\n\n return x", "title": "" }, { "docid": "ace76e5dd179720d833a2d5d211f8ac5", "score": "0.57048774", "text": "def sqrt(x):\n if x < 0:\n raise ValueError('can\\'t find root of -ve number {}'.format(x))\n guess = x\n i = 0\n while guess ** 2 != x and i < 20 :\n guess = (guess + x / guess) * 0.5\n i += 1\n return guess", "title": "" }, { "docid": "e91e67899be70792affc1dd11813bd57", "score": "0.5695891", "text": "def regula_falsi(function, left_point, right_point, max_iterations, error):\n #######################################################\n print('Iteration | x | f(x) | Error ')\n #######################################################\n iterations = 0\n # Set the left, right, and mid point and their respective values\n l = left_point; r = right_point; \n f_l = function(l); f_r = function(r); \n\n # Find the middle point of the line\n last_mid = (l * f_r - r * f_l) / (f_r - f_l) \n f_last = function(last_mid)\n ########################################################\n print(' %d %.7f %.7f -' % (iterations,last_mid,f_last))\n #######################################################\n \n # Check if any of them make the function zero\n if abs(f_l) == 0:\n return l\n \n if abs(f_r) == 0:\n return r\n \n if abs(f_last) == 0:\n return last_mid\n \n # Do the first iteration of the method to find the next mid point\n if f_l*f_last <= 0:\n r = last_mid\n f_r = f_last\n else:\n l = last_mid\n f_l = f_last\n \n # Calculate the mid point\n mid = (l * f_r - r * f_l) / (f_r - f_l) \n f_mid = function(mid)\n iterations = 1\n while(abs(f_mid - f_last) >= error):\n if iterations > max_iterations:\n break\n # If the change of sign is on the left side then the new right point\n # is the mid point.\n if f_l*f_mid <=0:\n r = mid\n f_r = f_mid\n # If the change of sign is on the right side then the new left point\n # is the mid point.\n else:\n l = mid\n f_l = f_mid\n ####################################################### \n print(' %d %.7f %.7f %.7f' % (iterations,last_mid,f_last,abs(f_mid - f_last)))\n #######################################################\n # Update the last mid point\n last_mid = mid\n f_last = f_mid\n # Calculate and evaluate the new mid point\n mid = (l * f_r - r * f_l) / (f_r - f_l)\n f_mid = function(mid)\n iterations += 1 \n \n return mid", "title": "" }, { "docid": "e6e8d909b4fcd9416bde5a5cd54e2851", "score": "0.5695762", "text": "def NewtonRaphsonModel(n,functions):\n\t\n\t#This part plot the functions if they have 2 variables.\n\tif n==2:\n\t\tplot2Variables(functions)\n\n\n\t#This list contains the titles for the DataFrame\n\tlabeledColumns=['x'+str(i) for i in range(n)]+['Error']\n\t\n\tXi=insertValuesForX0(n)\n\titerations=insertIrerations()\n\ttolerance=insertTolerance()\n\n\t#This variable keeps values for X in ith iteration\n\tvaluesForDF=list()\n\tj=0\n\t#This for loop update the values for Xi until it finds a solution for the system with the tolerance established or when it finishes the number maximum of iterations\n\tfor iteration in range(iterations):\n\t\tpreviousXi=Xi\n\t\t\n\t\tif iteration==0:\n\t\t\taux=[i for i in Xi]+['-']\n\t\t\tvaluesForDF.append(aux)\n\t\t\tJacobian,Fx=FxAndJacobianEvaluatedAtXi(Xi,functions)\n\t\t\taux=[]\n\t\t\n\t\tj+=1\n\t\t#Update the value for Xi\n\t\tXi=numpy.array(Xi).reshape(n,1)-numpy.dot(numpy.linalg.inv(Jacobian),numpy.array(Fx).reshape(n,1))\n\t\tXi=Xi.reshape(1,n)[0].tolist()\n\t\tJacobian,Fx=FxAndJacobianEvaluatedAtXi(Xi,functions)\n\t\t\n\t\taux=[i for i in Xi]\n\t\t#Spectral norm for the error\n\t\taux.append(numpy.absolute((numpy.array(Xi)-numpy.array(previousXi))).max())\n\t\tvaluesForDF.append(aux)\n\t\taux=[]\n\t\t\n\t\t#If in the ith iteration the algorithm reached minimum tolerance it breaks\n\t\tif numpy.absolute((numpy.array(Xi)-numpy.array(previousXi))).max()<=tolerance:\n\t\t\tprint('Tolerance reached!')\n\t\t\tbreak\n\t#Print message if the algortihm reached the maximun of iterations\n\tif j==iterations:\n\t\tprint('\\nMaximum iterations reached!\\n')\n\t\n\treturn pandas.DataFrame(data=valuesForDF,columns=labeledColumns)", "title": "" }, { "docid": "831a7194241ad6eea4841789d931cc48", "score": "0.5685939", "text": "def sqrt_newton(self, c):\r\n epsilon = 1e-15\r\n t = c\r\n # repeating until the desired accuracy is reached\r\n while abs(t - c / t) > epsilon * t:\r\n t = (c / t + t) / 2\r\n print(t)", "title": "" }, { "docid": "1501c082774884bee6f49013cc421803", "score": "0.56401294", "text": "def real_root_finding(interval):\n root = list()\n for i in range(len(interval) - 1):\n root.insert(i, iteration(interval[i], interval[i+1]))\n return root", "title": "" }, { "docid": "47b08465afdd59bcbf05819250e27474", "score": "0.562287", "text": "def continued_fraction_of_root(n, max_iter=1000):\n root = n ** .5 # root(23)\n a = int(root) # a_0\n yield a\n nm, dr = 1, -a\n for _ in xrange(max_iter):\n d = (n - dr**2) / nm\n a = int((root - dr) / d)\n dr, nm = -dr - a * d, d\n yield a", "title": "" }, { "docid": "d27a714b852526def678816f9780fec0", "score": "0.55896384", "text": "def sgd(f, x0, step=0.01, iterations=100000, anneal_every = 1000,\n print_every=10, useSaved=True, test_func = None, test_every=100,\n start_iterations=0, saved_params_file_name=None):\n\n # get logger\n logger = Logger()\n if useSaved:\n start_iter, oldx, state = load_saved_params(saved_params_file_name, start_iterations)\n if start_iter > 0:\n x0 = oldx\n step *= 0.5 ** (start_iter / anneal_every)\n\n if state:\n random.setstate(state)\n else:\n start_iter = 0\n\n x = x0\n # test init parama\n test_func(x)\n \n expcost = None\n\n for iter in range(start_iter, iterations + 1):\n\n if iter % SAVE_PARAMS_EVERY == 0 and iter != 0:\n save_params(iter, x, logger.get_dir())\n\n cost, grad = f(x)\n\n x -= step * grad\n\n if iter % print_every == 0:\n if not expcost:\n expcost = cost\n else:\n expcost = .95 * expcost + .05 * cost\n logger.log(\"iter %d: expcost %f - cost %f\" % (iter, expcost, cost))\n\n if iter % test_every == 0:\n # test the model\n test_func(x)\n\n if iter % anneal_every == 0:\n step *= 0.5\n\n\n return x", "title": "" }, { "docid": "6808ba77e9fd0231bbc128681fcd7c78", "score": "0.5573779", "text": "def bisect(f, ab, tol=1e-12, nmax=50):\n a,b = ab\n for n in range(nmax):\n c = (a + b) / 2.\n if ( f(c) == 0 ) or ( (b - a) / 2. < tol ):\n return c\n elif ( (b - a) / 2. < tol ):\n return b\n if np.sign(f(c)) == np.sign(f(b)):\n b = c\n else:\n a = c\n return np.nan", "title": "" }, { "docid": "83c47003d64828ab32b467434c554a05", "score": "0.55713516", "text": "def optimize_newton_multi_imporved(f, x, ap=1e-6, rp=1e-4, ns=20, h=10.0):\n x = Matrix(list(x))\n fx = f(x.flatten())\n for k in xrange(ns):\n (grad,H) = (gradient(f,x.flatten()), hessian(f,x.flatten()))\n if norm(H) < ap:\n raise ArithmeticError('unstable solution')\n (fx_old, x_old, x) = (fx, x, x-(1.0/H)*grad)\n fx = f(x.flatten())\n while fx>fx_old: # revert to steepest descent\n (fx, x) = (fx_old, x_old)\n norm_grad = norm(grad)\n (x_old, x) = (x, x - grad/norm_grad*h)\n (fx_old, fx) = (fx, f(x.flatten()))\n h = h/2\n h = norm(x-x_old)*2\n if k>2 and h/2<max(ap,norm(x)*rp): return x.flatten()\n raise ArithmeticError('no convergence')", "title": "" }, { "docid": "18d52237956abcfb43095b7d0b7c2935", "score": "0.5554733", "text": "def test_function_zero4(self):\n\t\tself.assertEqual(attempt.newtons_nsteps(0, 15), 26.53449)", "title": "" }, { "docid": "020d72689c7d40d776a7a47a83265ac5", "score": "0.55297375", "text": "def find_zero(func, a, b, tol):\n fa = func(a)\n fb = func(b)\n # Note: it's useful to store the f(x) evaluations\n # if you use them more than once, to avoid extra work.\n # you should minimize the # of f(x) evaluations used\n\n # ... check that the initial interval brackets ...\n ...\n\n # ... the go through the bisection process ...\n it = 0\n while False:\n c = 0.5*(a + b)\n\n return c, it", "title": "" }, { "docid": "f88d36a17ca9c025eb61ed7fb63954f0", "score": "0.5527046", "text": "def inv_digamma(x, tol=1e-3, max_iterations=10, init_strategy=\"batir\"):\n gamma = -0.5772156649015329 # digamma(1)\n\n x_old = np.piecewise(\n x,\n [x >= -2.22, x < -2.22],\n [(lambda z: np.exp(z) + 0.5), (lambda z: -1 / (z + gamma))],\n )\n for i in range(max_iterations):\n x_new = x_old - (digamma(x_old) - x) / trigamma(x_old)\n if LA.norm(x_new - x_old) < tol:\n return x_new, i + 1\n x_old = x_new\n raise RuntimeError(\"Failed to converge inv_digamma\")", "title": "" }, { "docid": "6fae117d6d8c1d3e5a430dbb1c8cfd74", "score": "0.5525415", "text": "def hessian_free_newton(oracle, x_0, tolerance=1e-4, max_iter=500, \n line_search_options=None, display=False, trace=False):\n history = defaultdict(list) if trace else None\n line_search_tool = get_line_search_tool(line_search_options)\n x_k = np.copy(x_0)\n grad_k = oracle.grad(x_k)\n norm_grad_k = norm_grad_0 = np.linalg.norm(oracle.grad(x_0))\n d_k = -grad_k\n \n start = clock()\n \n if trace:\n history['func'].append(oracle.func(x_k))\n history['grad_norm'].append(norm_grad_k)\n history['time'].append(clock() - start)\n if x_k.size <= 2:\n history['x'].append(x_k)\n \n for i in range(max_iter):\n # Debug information\n if display:\n print(\"iteration: \", i, \"||grad x_k||=\", norm_grad_k)\n \n eta_k = min(0.5, sqrt(norm_grad_k))\n \n #Conjugate Gradients\n while True:\n d_k, msg, _ = conjugate_gradients(lambda d: oracle.hess_vec(x_k, d), -grad_k, d_k, \n tolerance=eta_k, trace=False)\n # Check\n check = (grad_k * d_k).sum() < 0\n if check:\n break\n eta_k /= 10\n \n # Line search alpha\n alpha = line_search_tool.line_search(oracle, x_k, d_k, previous_alpha=1.0)\n \n # Update x_k\n x_k = x_k + alpha * d_k\n \n #Call oracle\n grad_k = oracle.grad(x_k)\n norm_grad_k = np.linalg.norm(grad_k)\n \n #trace\n if trace:\n history['func'].append(oracle.func(x_k))\n history['grad_norm'].append(norm_grad_k)\n history['time'].append(clock() - start)\n if x_k.size <= 2:\n history['x'].append(x_k)\n \n #Stopping criterion\n if norm_grad_k ** 2 <= tolerance * norm_grad_0 ** 2:\n return x_k, 'success', history\n \n if norm_grad_k ** 2 > tolerance * norm_grad_0 ** 2:\n return x_k, 'iterations_exceeded', history\n else:\n return x_k, 'success', history", "title": "" }, { "docid": "df7aeb2632b1291d5b74fef4e7f9e773", "score": "0.5510598", "text": "def bisect1(f, f_a, f_b, a, b, tol):\n\n c = 0.5*(a + b)\n f_c = f(c)\n \n if abs(f_c) < tol:\n return c\n \n if f_a * f_c < 0: # root between a and b\n return bisect1(f, f_a, f_c, a, c, tol)\n else:\n return bisect1(f, f_c, f_b, c, b, tol)", "title": "" }, { "docid": "4e737798ad1a366bbe7870486756d5e0", "score": "0.54820096", "text": "def regula_falsi(self, x_0, x_1):\n\n self.error_a = 101\n x_old = x_0\n i = 0\n while i < self.n_iter and self.exp_err < self.error_a:\n\n x_temp_upper = x_0*self.function(x_1)-x_1*self.function(x_0)\n x_temp_lower = self.function(x_1)-self.function(x_0)\n x_temp = x_temp_upper / x_temp_lower\n\n if x_temp != 0:\n self.error_a = error_formula(x_temp, x_old)\n\n test_func_1 = self.function(x_0)*self.function(x_temp)\n test_func_2 = self.function(x_1)*self.function(x_temp)\n\n if test_func_1 >= 0:\n x_0 = x_temp\n elif test_func_2 >= 0:\n x_1 = x_temp\n else:\n self.error_a = 0\n\n x_old = x_temp\n i += 1\n print(\"Current iteration for regula falsi is %d, with root value of %.4f\"\n %(i, x_old))\n print(\"Final result using regula falsi is %.4f with %d iterations\" %(x_old, i))", "title": "" }, { "docid": "f21f92d4b2d6b4131a03e8fc0df118bd", "score": "0.5478267", "text": "def broyden_solver(f, x0, y0=None, tol=1E-9, maxcount=100, backtrack_c=0.5, noisy=True):\n\n x, y = x0, y0\n if y is None:\n y = f(x)\n\n # initialize J with Newton!\n J = obtain_J(f, x, y)\n for count in range(maxcount):\n if noisy:\n printit(count, x, y)\n\n if np.max(np.abs(y)) < tol:\n return x, y\n\n dx = np.linalg.solve(J, -y)\n\n # backtrack at most 29 times\n for bcount in range(30):\n # note: can't test for improvement with Broyden because maybe\n # the function doesn't improve locally in this direction, since\n # J isn't the exact Jacobian\n try:\n ynew = f(x + dx)\n except ValueError:\n if noisy:\n print('backtracking\\n')\n dx *= backtrack_c\n else:\n J = broyden_update(J, dx, ynew - y)\n y = ynew\n x += dx\n break\n else:\n raise ValueError('Too many backtracks, maybe bad initial guess?')\n else:\n raise ValueError(f'No convergence after {maxcount} iterations')", "title": "" }, { "docid": "40d3f85859a795601b18502c06c278ca", "score": "0.5466434", "text": "def IQ_interpolation(\n f: Callable,\n x0: float, \n x1: float, \n y0: Optional[float]=None, \n y1: Optional[float]=None, \n x: Optional[float]=None,\n xtol: float=0.,\n ytol: float=5e-8,\n args: Tuple[Any, ...]=(), \n maxiter: int=50,\n checkroot: bool=False, \n checkiter: bool=True, \n checkbounds: bool=True) -> float:\n if checkroot: utils.check_tols(xtol, ytol)\n abs_ = abs\n if x is None:\n guess_x = True\n x = 1e32\n else:\n guess_x = utils.not_within_bounds(x, x0, x1)\n if not guess_x: \n y = f(x, *args)\n if not checkroot and abs_(y) < ytol or y == 0: return x # Lucky guess\n if y0 is None: y0 = f(x0, *args)\n if not checkroot and abs_(y0) < ytol or y0 == 0: return x0 # Lucky guess\n if y1 is None: y1 = f(x1, *args)\n if not checkroot and abs_(y1) < ytol or y1 == 0: return x1 # Lucky guess\n if y1 < 0.: x1, y1, x0, y0 = x0, y0, x1, y1\n df0 = -y0\n dx = x1 - x0\n if checkbounds: utils.check_bounds(y0, y1)\n if guess_x:\n x = utils.false_position_iter(x0, x1, dx, y0, y1, df0, x0)\n y = f(x, *args)\n if not checkroot and abs_(y) < ytol or y == 0: return x # Lucky guess\n for iter in range(maxiter):\n if y > 0.:\n y2 = y1\n x2 = x1\n x1 = x\n err = y1 = y\n elif y < 0.:\n y2 = y0\n x2 = x0\n x0 = x\n y0 = y\n err = df0 = -y\n else:\n return x\n dx = x1 - x0\n xtol_satisfied = abs_(dx) < xtol\n ytol_satisfied = err < ytol\n if checkroot:\n if ytol_satisfied and xtol_satisfied:\n return x\n elif xtol_satisfied or ytol_satisfied:\n return x\n x = utils.IQ_iter(y0, y1, y2, x0, x1, x2, dx, df0, x)\n y = f(x, *args)\n if checkiter: utils.raise_iter_error()\n return x", "title": "" }, { "docid": "f2e8b58d09f483e0cfd2a1f2a604b0dd", "score": "0.54657716", "text": "def newtonsUnr(m, alphavec, nvec, yarr, ybarvec, itMax, tol):\n # Data for first iteration of Newton's\n Jinv, F = funjacUnr(m, alphavec, nvec, yarr, ybarvec)\n eps = -np.matmul(Jinv, F)\n epsRel = eps / alphavec\n diff = np.sqrt(np.sum(epsRel**2)/m)\n\n # Initialize iteration\n iteration = 0\n \n # Apply Newton's until diff drops to or below tol\n while (tol < diff and iteration < itMax):\n alphavec += eps\n Jinv, F = funjacUnr(m, alphavec, nvec, yarr, ybarvec)\n eps = -np.matmul(Jinv, F)\n epsRel = eps / alphavec\n diff = np.sqrt(np.sum(epsRel**2)/m)\n iteration += 1\n\n print(\"Number of iterations used to approximate alphavec = {}\".format(iteration))\n return alphavec", "title": "" }, { "docid": "49e17bb0a78b2c33bae94886919497be", "score": "0.5464874", "text": "def solve(self, b, D=None, tol=1e-10, maxiters=500):\n x = b\n r = b - self.dot(x, D=D)\n p = r\n rsold = np.dot(r, r)\n for k in range(maxiters):\n Ap = self.dot(p, D=D)\n alpha = rsold / np.dot(p, Ap)\n x = x + alpha * p\n r = r - alpha * Ap\n rsnew = np.dot(r, r)\n if np.sqrt(rsnew) < tol:\n break\n p = r + (rsnew / rsold) * p\n rsold = rsnew\n if k == maxiters:\n logger.warning(\"Iterative solver did not converge.\")\n return x", "title": "" } ]
eff73b351cf5214d1ceb7090ea18fe4c
Serve main content (global storage).
[ { "docid": "ff7847cffa5235339a19a9d2bcafc62b", "score": "0.0", "text": "def images(filepath: str):\n return common_static('images', filepath)", "title": "" } ]
[ { "docid": "2da4a5b350705097a8f83ba3c33e9942", "score": "0.7041915", "text": "def _serve_main(self):\n\t\t\tready.wait()\n\n\t\t\tenv = Environment(loader=FileSystemLoader(\"templates\"))\n\t\t\ttemplate = env.get_template(\"sensorapp.html\")\n\n\t\t\tself.send_response(200, 'OK')\n\t\t\tself.send_header('Content-type', 'html')\n\t\t\tself.end_headers()\n\n\t\t\tself.wfile.write(template.render(devices=devices))\n\t\t\tself.wfile.close()", "title": "" }, { "docid": "1e1d80416d0b7d3604f05eeae38b4d99", "score": "0.6800098", "text": "def index(self):\n return serveFile('text/html', self.config.html, 'WMBS', 'index.html')", "title": "" }, { "docid": "f5c41f71cd5d4f5589bf3805308b30ae", "score": "0.6649737", "text": "def serve_index(self):\n\n resource_package = __name__\n resource_path = \"index.html\"\n package_root = resource_filename(resource_package, \"\")\n return static_file(resource_path, root=package_root)", "title": "" }, { "docid": "52c31d319a7b3bb02aee94b631c61162", "score": "0.6512679", "text": "def index():\n return send_file(os.path.join(static_dir, 'frontend.html'))", "title": "" }, { "docid": "fefc153c85385592168c41a83e434c20", "score": "0.6470435", "text": "def serve():\n raise NotImplementedError()", "title": "" }, { "docid": "b685a98f54874852beba4d87fd75e463", "score": "0.6384044", "text": "def serve():\n install_requirements()\n local('python main.py')", "title": "" }, { "docid": "2ba7cbf662798a719889cb89d023d07b", "score": "0.63618076", "text": "def main():\n app = tornado.web.Application([\n (r\"/api/?\", APIRoot),\n (r\"/api/(.+)/?\", APIHandler),\n (r\"/media/(.*)\", MyStaticFileHandler, {'path': Enums.Paths.MEDIA_BASE}),\n (r\"/(.*)\", tornado.web.StaticFileHandler, {'path': Enums.Paths.HTML_BASE}),\n ]\n )\n fh = Database.FileHelper(Enums.Paths.MEDIA_BASE)\n fh.filter()\n\n app.listen(Enums.Server.PORT)\n tornado.ioloop.IOLoop.instance().start() # This is a blocking operation...", "title": "" }, { "docid": "273510efb2ef364b93c9f8fb0076bbaf", "score": "0.634827", "text": "def render_home():\n return send_file('templates/index.html')", "title": "" }, { "docid": "099fbfdc0df0e67ff0603a3b4c06e41c", "score": "0.63352174", "text": "def main():\n\n return render_template('main.html')", "title": "" }, { "docid": "5144755fcf382655d3c354a7a2fcea7f", "score": "0.6328575", "text": "def index():\n return application.send_static_file(\"index.html\"), 200", "title": "" }, { "docid": "b7cbf5bbc31f90706d77e28d1951e287", "score": "0.63149875", "text": "def serve_the_homepage():\n return flask.render_template('home.html')", "title": "" }, { "docid": "d8d16dba3fdcc5d17e9ba9bedd2b07ff", "score": "0.6298142", "text": "def main():\n if request.method == \"GET\":\n return render_template('index.html')", "title": "" }, { "docid": "b31ba47a9abd0e23685105b4d5ca0088", "score": "0.62958527", "text": "def index():\n return send_file('index.html', ROOT_DIR)", "title": "" }, { "docid": "470b9ac8a9cc25a9d7edfec5cb2cb693", "score": "0.6295686", "text": "def show_main():\n if request.method == 'GET':\n return render_template('index.html')", "title": "" }, { "docid": "b90be2272558fd72543b713e774c127c", "score": "0.62902397", "text": "def main():\n\n return render_template('index.html')", "title": "" }, { "docid": "ba6b556bc8641803220f14425ea8564f", "score": "0.62487173", "text": "def index():\n return flask.send_from_directory(\"static\", \"main.html\")", "title": "" }, { "docid": "b635d54e44cedd3a35bbbfa2af41cefd", "score": "0.62157303", "text": "def serve_forever(self):\n raise NotImplementedError", "title": "" }, { "docid": "0cf89622c8b12de6aeea1fb88927c4aa", "score": "0.6188812", "text": "def index():\n return app.send_static_file(\"index.html\")", "title": "" }, { "docid": "38cb1a2db5cbb00d76796cf97f3c6555", "score": "0.617193", "text": "def index():\n return send_from_directory(app.static_folder, 'index.html')", "title": "" }, { "docid": "db7c68f450e85a8c8e5b1c7f54e00039", "score": "0.6166989", "text": "def index():\n return app.send_static_file('index.html')", "title": "" }, { "docid": "7b17d27257191b13b5f358722b5b8142", "score": "0.61468005", "text": "def serve_content(self, path, method=\"GET\"):\n if path.path in (\"\", \"/\"):\n temp = \"/\" + self.main_page()\n self.do_redirect(temp)\n\n else:\n params = parse_qs(path.query)\n params[\"__path__\"] = path\n # here you might want to look into a local path... f2r = HOME +\n # path\n\n url = path.geturl()\n params[\"__url__\"] = path\n\n if url.startswith(\"/localfile/\"):\n localpath = path.path[len(\"/localfile/\"):]\n self.LOG(\"localpath \", localpath, os.path.isfile(localpath))\n\n if localpath == \"shutdown\":\n self.LOG(\"call shutdown\")\n self.shutdown()\n\n elif localpath == \"__file__\":\n self.LOG(\"display file __file__\", localpath)\n self.send_response(200)\n self.send_headers(\"__file__.txt\")\n content = self.get_file_content(__file__, \"r\")\n self.feed(content)\n\n else:\n self.send_response(200)\n _, ftype = self.get_ftype(localpath)\n execute = eval(params.get(\"execute\", [\"True\"])[ # pylint: disable=W0123\n 0]) # pylint: disable=W0123\n path = params.get(\"path\", [None])[0]\n keep = eval(params.get(\"keep\", [\"False\"])[ # pylint: disable=W0123\n 0]) # pylint: disable=W0123\n if keep and path not in self.get_pathes():\n self.LOG(\n \"execute\",\n execute,\n \"- ftype\",\n ftype,\n \" - path\",\n path,\n \" keep \",\n keep)\n self.add_path(path)\n else:\n self.LOG(\n \"execute\",\n execute,\n \"- ftype\",\n ftype,\n \" - path\",\n path)\n\n if ftype != 'execute' or not execute:\n content = self.get_file_content(localpath, ftype, path)\n ext = os.path.splitext(localpath)[-1].lower()\n if ext in [\n \".py\", \".c\", \".cpp\", \".hpp\", \".h\", \".r\", \".sql\", \".js\", \".java\", \".css\"]:\n self.send_headers(\".html\")\n self.feed(\n self.html_code_renderer(\n localpath,\n content))\n else:\n self.send_headers(localpath)\n self.feed(content)\n else:\n self.LOG(\"execute file \", localpath)\n out, err = self.execute(localpath)\n if len(err) > 0:\n self.send_error(404)\n self.feed(\n \"Requested resource %s unavailable\" %\n localpath)\n else:\n self.send_headers(localpath)\n self.feed(out)\n\n elif url.startswith(\"/js/\"):\n found = None\n for jspa in self.get_javascript_paths():\n file = os.path.join(jspa, url[4:])\n if os.path.exists(file):\n found = file\n\n if found is None:\n self.send_response(200)\n self.send_headers(\"\")\n self.feed(\n \"Unable to serve content for url: '{}'.\".format(path.geturl()))\n self.send_error(404)\n else:\n _, ft = self.get_ftype(found)\n if ft == \"r\":\n try:\n with open(found, ft, encoding=\"utf8\") as f: # pylint: disable=W1501\n content = f.read()\n except UnicodeDecodeError:\n self.LOG(\"file is not utf8\", found)\n with open(found, ft) as f: # pylint: disable=W1501\n content = f.read()\n else:\n self.LOG(\"reading binary\")\n with open(found, ft) as f: # pylint: disable=W1501\n content = f.read()\n\n self.send_response(200)\n self.send_headers(found)\n self.feed(content)\n\n elif url.startswith(\"/debug_string/\"):\n # debugging purposes\n self.send_response(200)\n self.send_headers(\"debug.html\")\n self.feed(html_debug_string, False, params)\n\n elif url.startswith(\"/fetchurlclean/\"):\n self.send_response(200)\n self.send_headers(\"debug.html\")\n url = path.path.replace(\"/fetchurlclean/\", \"\")\n try:\n content = get_url_content_timeout(url)\n except Exception as e:\n content = \"<html><body>ERROR (1): %s</body></html>\" % e\n if content is None or len(content) == 0:\n content = \"<html><body>ERROR (1): content is empty</body></html>\"\n\n stre = io.StringIO()\n pars = HTMLScriptParserRemove(outStream=stre)\n pars.feed(content)\n content = stre.getvalue()\n\n self.feed(content, False, params={})\n\n elif url.startswith(\"/fetchurl/\"):\n self.send_response(200)\n self.send_headers(\"debug.html\")\n url = path.path.replace(\"/fetchurl/\", \"\")\n try:\n content = get_url_content_timeout(url)\n except Exception as e:\n content = \"<html><body>ERROR (2): %s</body></html>\" % e\n self.feed(content, False, params={})\n\n else:\n self.serve_content_web(path, method, params)", "title": "" }, { "docid": "4574e0f2c5f8a4c308fdc564a6481d5d", "score": "0.6127183", "text": "def site():\n\n try:\n # Render the frontend\n response = render_template('index.html')\n\n # If an exception occurs\n except Exception:\n response = jsonify({'success': False})\n\n # Return the response\n return response", "title": "" }, { "docid": "d9b6ea0560f24729b17bac0b11ab4d9b", "score": "0.61214125", "text": "def serve_forever(self):\n raise NotImplementedError()", "title": "" }, { "docid": "79a10f9bac4c1a6eb46c266f8056e7ab", "score": "0.61039823", "text": "def root():\n return app.send_static_file('index.html')", "title": "" }, { "docid": "a08a29e6d1babbbbccbcaaa60971a089", "score": "0.60999364", "text": "def serve(self) -> None:\n while True:\n self.serve_one()", "title": "" }, { "docid": "5f64cfe844f90f7a7d28057f84053f62", "score": "0.6098396", "text": "def default(self, *args):\n if len(args) > 0:\n return serveFile('text/html',\n path.join(self.config.html, 'WMBS'),*args)\n else:\n return self.index()", "title": "" }, { "docid": "963de9c764f3654438cabfd144e7561b", "score": "0.60840183", "text": "def home():\n return render_template('streaming.html')", "title": "" }, { "docid": "e0e2eca5de20c95d263410f4803f3ff4", "score": "0.6076438", "text": "def index():\n return render_template('homepage.html')", "title": "" }, { "docid": "1a3f8b7af0026575d15042dcf9d554f7", "score": "0.60754067", "text": "def do_GET(self):\n if self.path.startswith(STATIC_URL):\n # Serve as static\n return self.serve_static(os.path.relpath(self.path, STATIC_URL))\n elif self.path.endswith('.html'):\n # Serve as template\n return self.serve_template(self.path)\n else:\n # Try appending /index.html, or .html\n try:\n path = self.path\n if not path.endswith('/'):\n path = path + '/'\n env.get_template(path + 'index.html')\n self.serve_template(path + 'index.html')\n except jinja2.TemplateNotFound:\n path = self.path\n if path.endswith('/'):\n path = path[:-1]\n self.serve_template(path + '.html')", "title": "" }, { "docid": "29bd2edb556461ec253a282ea09197bc", "score": "0.6064775", "text": "def index():\n downloads = list_downloads(app.root_path)\n return render_template('index.html', downloads=downloads)", "title": "" }, { "docid": "48d697e42db3904e98d983c44f5f00d5", "score": "0.6046587", "text": "def serve(self, request):\n # logger.debug(\"StaticFileHandler::serving %s\" % request.path)\n return serve(request, self.file_path(request.path), insecure=True)", "title": "" }, { "docid": "200e53adfb6ec5326dde7ea0b19e0cf1", "score": "0.60179573", "text": "def index():\n general = get_source('general')\n return render_template('index.html', general = general)", "title": "" }, { "docid": "4dc1683bc84fe4e40f49f5e3efd0073f", "score": "0.59920454", "text": "def home():\n\n\t# Load all the variables related with the live data.\n\tliveData = {}\n\tliveData['name'] = cfg['webserver']['liveData']['name']\n\tliveData['sensorNames'] = cfg['webserver']['names']['sensors']\n\tliveData['header'] = {'type': cfg['webserver']['names']['type'], 'value': cfg['webserver']['names']['value']}\n\n\t# Load the configuration of the history chart.\n\thistoryEnable = cfg['webserver']['charts']['history']['enable']\n\tdailyHistoryEnable = cfg['webserver']['charts']['dailyHistory']['enable']\n\tliveDataEnable = cfg['webserver']['liveData']['enable']\n\n\t# Load the configuration of the daily history chart.\n\thistoryChart = cm.getChart('history')\n\tdailyHistoryChart = cm.getChart('dailyHistory')\n\n\twebpageTitle = cfg['webserver']['title']\n\twebpageSubtitle = cfg['webserver']['subtitle']\n\n\treturn render_template('index.html', webpageTitle=webpageTitle, webpageSubtitle = webpageSubtitle, historyChart= historyChart, historyEnable = historyEnable, liveData = liveData, liveDataEnable = liveDataEnable, dailyHistoryChart = dailyHistoryChart, dailyHistoryEnable = dailyHistoryEnable)", "title": "" }, { "docid": "430394f4b7f04957f26476319cedbd15", "score": "0.59852195", "text": "def get(self):\n return output_html(render_template('index.html'), 200)", "title": "" }, { "docid": "0edccf262ea8869d9371925858d8adba", "score": "0.59773046", "text": "def root():\n return send_from_directory(CLIENT_FOLDER, 'index.html')", "title": "" }, { "docid": "4fe05d59827c26a19a56fdbd91f84d7b", "score": "0.5974772", "text": "def main(request, listing=\"normal\"):\n # No appcache for firefox due to suspicious popups\n user_agent = request.META.get(\"HTTP_USER_AGENT\", \"\").lower()\n not_firefox = user_agent.find(\"firefox\") == -1\n\n path = \"/\" if listing == \"normal\" else \"/\" + listing + \"/\"\n\n to_template = {\n \"listing\": listing,\n \"path\": path,\n \"not_firefox\": not_firefox or True,\n \"content\": \"main.mustache\",\n \"content_data\": {\"static_prefix\": settings.STATIC_URL},\n \"metadata\": get_meta(request)}\n\n return \"backbone.html\", to_template", "title": "" }, { "docid": "a6fb46f99a65fcc5202400650d8e5087", "score": "0.5967197", "text": "async def index(request):\r\n with open('index.html') as f:\r\n return web.Response(text=f.read(), content_type='text/html')", "title": "" }, { "docid": "1228744e2cc2488252eaf1f0ff6502af", "score": "0.59634423", "text": "def main():\n\n if not os.path.exists(DIST_DIR):\n os.mkdir(DIST_DIR)\n\n for page in os.listdir(f\"{SRC_DIR}/pages\"):\n doc = render_page(page)\n with open(f\"{DIST_DIR}/{page}.html\", \"w\") as f:\n f.write(doc)", "title": "" }, { "docid": "eeb2510c1f6b00b3f220ae2c31dcfe6d", "score": "0.5959183", "text": "def html_server():\n return app.send_static_file(\"calico-status.html\")", "title": "" }, { "docid": "dea23854f46556c3258b74776ed75836", "score": "0.59525436", "text": "def do_GET(self):\n path = os.path.join(kirby.root_path, self.path[1:])\n if self.path.startswith('/media') and os.path.isfile(path):\n mime = mimetypes.guess_type(path)[0]\n body = open(path).read()\n self.send_response(200)\n self.send_header('Content-type', mime)\n else: \n body = kirby.render_path(self.path)\n if body is None:\n self.send_response(404)\n body = \"Not found (404): %s\\n\" % self.path\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n \n self.end_headers()\n self.wfile.write(body)", "title": "" }, { "docid": "8cbe05aeb09de7beb371be133fce1b0f", "score": "0.593647", "text": "def serve():\n\n # allow running this from the top level\n if os.path.isdir('_build'):\n os.chdir('_build')\n\n # local use, address reuse should be OK\n TCPServer.allow_reuse_address = True\n\n PORT = 8000\n httpd = TCPServer(('', PORT), SimpleHTTPRequestHandler)\n print(\"Serving at port {}\".format(PORT))\n httpd.serve_forever()", "title": "" }, { "docid": "50da3d839c16c058751a8b76858d31be", "score": "0.5936105", "text": "def do_GET(self):\n if self.path == \"/\":\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n with open('index.html', 'r') as file:\n data = file.read()\n self.wfile.write(data.encode())\n elif self.path == \"/buildinfo.json\":\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n with open('buildinfo.json', 'r') as file:\n template_def = file.read().replace(\"SUBST_\", \"$\")\n template = Template(template_def)\n data = template.substitute(**version_info)\n self.wfile.write(data.encode())", "title": "" }, { "docid": "52f983d0761c2057c1157f5a60699dc1", "score": "0.5928164", "text": "def get(self):\n template = JINJA_ENVIRONMENT.get_template('index.html')\n self.response.write(template.render())", "title": "" }, { "docid": "bdd080d9523e8c31d8b2237596fc973d", "score": "0.5927103", "text": "def get(self):\n\t\tself.render('index.html')", "title": "" }, { "docid": "93c7369becdfef96fe4a4303af155ae3", "score": "0.5903071", "text": "def run(self):\n self.server.serve_forever()", "title": "" }, { "docid": "0f4e0f803aba8a96cd5c27d592ee57f0", "score": "0.58941257", "text": "def get(self):\n self.render(\"index.html\", handler=self)", "title": "" }, { "docid": "42ab1f367854be45a2c03964b3be1360", "score": "0.58753085", "text": "def serve(ctx, config, visualizer):\n config = read_config(config)\n\n # FIXME: \"visualizer\" shouldn't be in \"server\" section\n if visualizer:\n config.set(\"server\", \"visualizer\", visualizer)\n\n run_server(config, debug=ctx.obj.debug)", "title": "" }, { "docid": "ffa37c55d0e11076039e0d1d381bde0c", "score": "0.5869876", "text": "def index():\n return render_template(\"sd.html\")", "title": "" }, { "docid": "3ca879d7c2606bc3a35f392f7768ae88", "score": "0.5867495", "text": "def index():\n response.title = \"Shooter management\"\n content = []\n data = {\"content\": \"\".join(content)}\n return data", "title": "" }, { "docid": "8974beb7211efb6a891bf20f176b2d44", "score": "0.5863534", "text": "def serve_static():\n cmd = [\"python\", \"-m\", \"SimpleHTTPServer\"]\n log.info(\" \".join(cmd))\n subprocess.check_call(cmd, cwd=config.Config.CHECKOUT)", "title": "" }, { "docid": "073b5b31c2fa5d41042e415544819852", "score": "0.5856914", "text": "def serve(self) -> None:\n self._loop.run_until_complete(self.create_serve_endpoint())", "title": "" }, { "docid": "b6f6367f8d8124971d817c4aed81a419", "score": "0.5854058", "text": "def Index(*args, **kwargs):\n return app.send_static_file('index.html')", "title": "" }, { "docid": "e5826bfa774f00f2f30b487903afe671", "score": "0.58505076", "text": "def main():\n templateData = {\n 'windspeed' : WIND,\n 'temperature' : TEMP,\n 'humidity': HUMI,\n 'pressure': PRES\n }\n return render_template('main.html', **templateData)", "title": "" }, { "docid": "eb3aabfd932558e163434476fc65d6b6", "score": "0.5837865", "text": "def _run(self):\n self.server.serve_forever()", "title": "" }, { "docid": "9e8755887dbcbc790152d0bfe148c048", "score": "0.5830028", "text": "def show_homepage():\r\n\r\n return render_template(\"homepage.html\")", "title": "" }, { "docid": "9619c089e24667aae21b7aaed99bfb3c", "score": "0.58292204", "text": "def index():\r\n\r\n response = '<head><script src=\"https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js\"></script></head>'\r\n response += '<h1>Welcome to the Portfolio application.</h1>'\r\n response += '<div>A work in progress</div>'\r\n response += '<a href=\"https://bkr.family\">Blog</a>'\r\n response += get_images_list_html(IMAGE_FOLDER)\r\n response += get_blob_names_html(BUCKET_NAME)\r\n response += get_images_js_html(BUCKET_NAME)\r\n return response", "title": "" }, { "docid": "e45b759c998fc0decd380a73824459d7", "score": "0.58219844", "text": "def serve_content_web(self, path, method, params):\n if path.path.startswith(\"/logs/\"):\n url = path.path[6:]\n targ = urllib.parse.unquote(url)\n self.process_event(targ)\n self.send_response(200)\n self.send_headers(\"\")\n else:\n if path.path.startswith(\"/rssfetchlocalexe/\"):\n url = path.path.replace(\"/rssfetchlocalexe/\", \"\")\n else:\n url = path.path\n\n htype, ftype = self.get_ftype(url)\n local = os.path.join(self.server._my_root, url.lstrip(\"/\"))\n if htype == \"text/html\":\n if os.path.exists(local):\n content = self.get_file_content(local, ftype)\n self.send_response(200)\n self.send_headers(path.path)\n\n # context\n params[\"dbrss\"] = self.server._my_database\n params[\"main_page\"] = url\n params[\"blog_selected\"] = self.interpret_parameter_as_list_int(\n params.get(\n \"blog_selected\",\n []))\n params[\"post_selected\"] = self.interpret_parameter_as_list_int(\n params.get(\n \"post_selected\",\n []))\n params[\"search\"] = params.get(\"search\", [None])[0]\n params[\n \"website\"] = \"http://%s:%d/\" % self.server.server_address\n self.feed(content, True, params)\n else:\n self.send_response(200)\n self.send_headers(\"\")\n self.feed(\n \"unable to find (RSSSimpleHandler): \" +\n path.geturl() +\n \"\\nlocal file:\" +\n local +\n \"\\n\")\n self.send_error(404)\n\n elif os.path.exists(local):\n content = self.get_file_content(local, ftype)\n self.send_response(200)\n self.send_headers(url)\n self.feed(content, False, params)\n\n else:\n self.send_response(200)\n self.send_headers(\"\")\n self.feed(\n \"unable to find (RSSSimpleHandler): \" +\n path.geturl() +\n \"\\nlocal file:\" +\n local +\n \"\\n\")\n self.send_error(404)", "title": "" }, { "docid": "e5a8a94506099f6c6ca7e7986e821209", "score": "0.58181596", "text": "def serve(session):\n print(\"Note: Frontend must be built for this to work\")\n session.install(\"-e\", \".\")\n session.run(\"termpair\", \"serve\", *session.posargs)", "title": "" }, { "docid": "79bebd751d8f122d503835dafe5d268c", "score": "0.5817783", "text": "def render_site(self):\n\n components = self.get_components()\n articles = self.get_articles()\n\n #Create output directory\n output_dir_path = os.path.join(self.current_path, self.output_dir)\n if os.path.exists(output_dir_path) and os.path.isdir(output_dir_path):\n print(\"Deleting existing output directory\")\n shutil.rmtree(output_dir_path)\n create_dir(self.output_dir)\n\n #Build article htmls and links\n article_template = read_file(ARTICLE_TEMPLATE_FILE)\n for article in articles:\n article_html = chevron.render(article_template, {**article, **components})\n article_file_path = os.path.join(output_dir_path, article[\"url\"] + \".html\")\n write_file(article_file_path, article_html)\n print(\"wrote article with title {0}\".format(article[\"title\"]))\n\n #Write index.html\n index = chevron.render(read_file(INDEX_TEMPLATE_FILE), {\"articles\": articles, **components})\n write_file(os.path.join(output_dir_path, INDEX_HTML_FILE_NAME), index)\n\n #Copy static files\n static_file_path = os.path.join(self.current_path, STATIC_DIR)\n for f in os.listdir(static_file_path):\n print(\"copying static file: {0}\".format(f))\n shutil.copyfile(os.path.join(static_file_path, f), os.path.join(output_dir_path, f))", "title": "" }, { "docid": "7876ebb9e2d10173824ab865b17531e4", "score": "0.58155423", "text": "def serve():\n if cfg.has_option('devel', 'debug'):\n debug = cfg.getboolean('devel', 'debug')\n else:\n debug = False\n # We need to import api here so that the functions within it get registered\n # (via `rest_call`), though we don't use it directly:\n from sd import model, http, api\n model.init_db()\n http.serve(debug=debug)", "title": "" }, { "docid": "d04c85c36b12c4642141967ec09b4cda", "score": "0.5808719", "text": "def serve(request, name):\n f = get_object_or_404(File, name=name)\n f.dump()\n mimetype = mimetypes.guess_type(name)[0] or 'application/octet-stream'\n response = HttpResponse(f.content, content_type=mimetype)\n response['Content-Length'] = f.size\n return response", "title": "" }, { "docid": "fdf2e8c089a2b713c2104cbb5403b450", "score": "0.580672", "text": "def show_homepage():\n\n return render_template(\"homepage.html\")", "title": "" }, { "docid": "82601b4c40882d6b85b9363f66d7d4a3", "score": "0.58023214", "text": "def index():\n return (render_template('index.html'), 200)", "title": "" }, { "docid": "7e1f7cb005c4f7a1a8d0ca8f3ec1bb7f", "score": "0.58020306", "text": "def index():\n '''return render_template('index.html')'''\n \n \"\"\"Render the initial webpage and then let VueJS take control.\"\"\"\n return app.send_static_file('index.html')", "title": "" }, { "docid": "1b1149b7563cc7236d25dbf9e70c33a9", "score": "0.58016944", "text": "def serve_index(request):\n if request.user.is_anonymous():\n return HttpResponseRedirect('/auth/login')\n return HttpResponseRedirect('/asset')\n #context = {\n # 'init_url': '/asset'.format(request.user.id),\n #}\n #context.update(csrf(request))\n #rendered_template = render('index.html', context)\n #return HttpResponse(rendered_template)", "title": "" }, { "docid": "c93a97b97a6c807604b25210b011cf4a", "score": "0.5801048", "text": "def viewMain():\n picnumber = random.randint(1,2)\n game = session.query(Game.description).first()\n \n # renders public version of this page\n if 'username' not in login_session:\n return render_template('main.html', game=game, picnumber=picnumber)\n \n # renders private (user logged in) version of this page\n return render_template('main.html', game=game,\n username = login_session['username'],\n picnumber=picnumber)", "title": "" }, { "docid": "9ec9ac7a6fa94181f5e15f4ccf9b43ee", "score": "0.579723", "text": "def main() -> None:\n st.set_page_config(page_title=\"GreenDB\", page_icon=\"♻️\")\n st.title(\"GreenDB - A Product-by-Product Sustainability Database\")\n with st.sidebar:\n render_sidebar()\n\n render_basic_information()", "title": "" }, { "docid": "fd4b668848f6237b8251f790eaad2821", "score": "0.5792394", "text": "def root_page():\n return render_template('overview.html')", "title": "" }, { "docid": "61c5911814f27bec623f4eddd020dc41", "score": "0.5788899", "text": "def rest_of_content():\n pass", "title": "" }, { "docid": "66bae03eeac22e0a52d3b9ff56a2fd91", "score": "0.57877433", "text": "def index():\n print (\"entered\")\n return render_template('base.html')", "title": "" }, { "docid": "de8a27f416e9e65480e369bb76f6f2ea", "score": "0.57842135", "text": "def index():\n context = make_context()\n\n with open('data/featured.json') as f:\n context['featured'] = json.load(f)\n\n context['links'] = []\n\n for file in glob('data/text/*.txt'):\n filename = file.split('/')[2].split('.')[0]\n context['links'].append(filename)\n\n return make_response(render_template('index.html', **context))", "title": "" }, { "docid": "0b334cfa975083f2586f6911113268b0", "score": "0.57789975", "text": "def serve(self):\n self.freezing = False\n self.app.config.from_pyfile('blog.conf', silent = True)\n self._install_everything()\n\n self.app.run(host = self.app.config['WWW_HOST'],\n port = self.app.config['WWW_PORT'],\n\n # also reload on configuration file changes:\n extra_files = ['blog.conf'])", "title": "" }, { "docid": "fa0747bb1cb05124cb4b195adfc3dd19", "score": "0.5772431", "text": "def homepage():\n return render_template('homepage.html')", "title": "" }, { "docid": "9869a36235fad545a2d9f3e4ac404f23", "score": "0.5769786", "text": "def _serveStaticFile(self, full_path, environ, start_response):\n cling = static.Cling('')\n file_like = cling._file_like(full_path)\n content_type = cling._guess_type(full_path)\n start_response(\"200 OK\", [('Content-Type', content_type)])\n return cling._body(full_path, environ, file_like)", "title": "" }, { "docid": "236fbbfd08d668dfb0f84616a170b5e8", "score": "0.5769531", "text": "def serve_funnel(request):\n return render(request, \"_index.html\") # this is the base template to extend through other html pages.", "title": "" }, { "docid": "ca49c3f022cbf3721dfefdc7c8b37eb6", "score": "0.57694125", "text": "def index():\n return send_from_directory(BUILD_DIR, 'index.html')", "title": "" }, { "docid": "02b76f704302e14a53fcdd5341b9f4c8", "score": "0.5763209", "text": "def get_main_page():\n\n return render_template('main.html')", "title": "" }, { "docid": "82e9c92a416ae60e0a00dea388f84df6", "score": "0.5754067", "text": "def homepage():\n return render_template(\"index.html\")", "title": "" }, { "docid": "092508256cc89f37bde2045c2189cd18", "score": "0.57519454", "text": "def index():\n return render_template('index.html', async_mode=async_mode)", "title": "" }, { "docid": "b4ee1081353f5fdbb526e4a73306e8a5", "score": "0.57394916", "text": "def homepage():\n\n return render_template(\"index.html\")", "title": "" }, { "docid": "06bdc9b4d73abcc916bd814b23946293", "score": "0.57304573", "text": "def render(self):\n # index page OR content page\n print(\"is_index=%s\" % self.get_meta('is_index'))\n if not self.get_meta('is_index'):\n # normal content page\n # build header\n header_map = dict(author=self.get_meta('author'),\n site=self.get_meta('site'),\n site_byline=self.get_meta('site_byline'),\n title=self.get_body('title'),\n abstract=self.get_body('abstract'), \n year=self.get_meta('year'),\n mmm=self.get_meta('mmm'),\n mm=self.get_meta('mm'),\n day=self.get_meta('day'),\n tool=self.get_tool('tool'),\n version=self.get_tool('version'))\n header = self.build_template(self.__header, header_map)\n \n # build content\n content_map = dict(title=self.get_body('title'),\n site=self.get_meta('site'),\n abstract=self.get_body('abstract'), \n description=self.get_body('description'),\n body=self.get_body('content'),\n year=self.get_meta('year'),\n mmm=self.get_meta('mmm'),\n mm=self.get_meta('mm'),\n day=self.get_meta('day'), \n img_url=self.get_meta('img_url'),\n img_src=self.get_meta('img_src'),\n img_height=self.get_meta('img_height'),\n img_width=self.get_meta('img_width'),\n dt_format=self.get_meta('dt_format'))\n contents = self.build_template(self.get_body('template'), content_map)\n \n # build footer\n # assuming no templating in footer\n footer = \"%s\" % self.__footer\n\n try:\n #print(\"writing to <%s>\" % self.get_file('filepathname'))\n with open(self.get_file('filepathname'),'wt') as f:\n for line in header:\n f.write(\"%s\\n\" % line)\n if contents:\n for line in contents:\n f.write(line)\n if footer:\n f.write(footer)\n f.close()\n except:\n data = \"\"\n if f: f.close()\n return False\n else:\n return True\n else:\n # index page\n fp = os.path.join(self.get_file('basepath'),\n self.get_meta('year'), \n self.get_meta('mmm'),\n self.get_meta('day'),\n self.get_file('name'))\n link_map = dict(title=self.get_body('title'),\n abstract=self.get_body('abstract'),\n file_path=fp,\n year=self.get_meta('year'),\n mmm=self.get_meta('mmm'),\n day=self.get_meta('day'),\n dt_format=self.get_meta('dt_format'),\n dt_epoch=self.get_meta('dt_epoch'),\n hour=self.get_meta('hour'),\n minute=self.get_meta('minute'))\n\n # call list of link data\n # sorted by?\n self.index.sort()\n print(link_map)\n print(\"len=%s\" % len(self.index))\n for link in self.index:\n print(link, fp)\n #try:\n #print(\"writing to <%s>\" % self.get_file('filepathname'))\n # with open(self.get_file(fp),'wt') as f:\n # pass\n #f.close()\n #except:\n # if f: f.close()\n \n # open file to write\n # title = index.html\n # path = yyyy/mmm/dd\n # write index_link\n return True", "title": "" }, { "docid": "7d18b22f4e9bdb186f915aa374eeb154", "score": "0.57187814", "text": "def index():\n\n try:\n logger.info(\"Index page accessed\")\n return render_template('index.html')\n except:\n traceback.print_exc()\n logger.warning(\"Not able to display tracks, error page returned\")\n return render_template('error.html')", "title": "" }, { "docid": "c7790f300c8b664e01e20c393b0422ca", "score": "0.57153004", "text": "def index(request):\n return response('index.html', request,\n {'title': 'Inicio - ' + DATA['sitename']})", "title": "" }, { "docid": "14c55f43bd560c2822a58ebb72b6d3b4", "score": "0.5706673", "text": "async def handle_index_page(request):\n async with aiofiles.open('index.html', mode='r', encoding=\"utf-8\") as index_file:\n index_contents: str = await index_file.read()\n headers: dict = {\"accept-charset\": \"utf-8\"}\n return web.Response(text=index_contents, content_type='text/html', headers=headers)", "title": "" }, { "docid": "84375338c04de994bcd888b81a606009", "score": "0.5702514", "text": "def serve_content_web(self, path, method, params):\n self.send_response(200)\n self.send_headers(\"\")\n self.feed(\"Unable to serve content for url: '{}'\\n{}\".format(\n path.geturl(), str(params)))\n self.send_error(404)", "title": "" }, { "docid": "cbce0fd468ea84b9c1b5b219448cec48", "score": "0.5700767", "text": "def get(self):\n\n template = jinja_environment.get_template(\n 'demos/%s/templates/index.html' % DEMO_NAME)\n self.response.out.write(template.render({'tag': DEMO_NAME}))", "title": "" }, { "docid": "2a59ae4d12494a0ba0f2335412451edf", "score": "0.56920916", "text": "def index():\n return template('website/index')", "title": "" }, { "docid": "e132121fb2bdba25daebcdccde7443a7", "score": "0.5677768", "text": "def serve(self): #{\n if self.greenlet is None:\n self.start()\n\n return self.greenlet.join()", "title": "" }, { "docid": "a99d2d47d4ba594458a5c066a7a0fc2e", "score": "0.5673643", "text": "def main_page(self):\n return \"index.html\"", "title": "" }, { "docid": "cca919983f1ff953316599fd777ddeb7", "score": "0.56605226", "text": "async def main() -> int:\n backend = Quart(__name__,\n static_folder='../../../frontend/static/',\n template_folder='../../../frontend/')\n\n # Apply CORS access control headers to all routes in the backend\n backend = cors(backend)\n\n # Create the monitor app\n monitor = Monitor()\n\n # Register endpoint modules\n backend.register_blueprint(\n monitor.nodes_controller, url_prefix='/api/v1/nodes')\n\n # Sink all undeclared routes so that vue can work with router properly\n @backend.route('/', defaults={'path': ''})\n @backend.route('/<path:path>')\n async def catch_all(path: str) -> str:\n return await render_template('index.html')\n\n await backend.run_task(port=5000)\n return 0", "title": "" }, { "docid": "61e79c12b6e186084a8302420fedc08d", "score": "0.56539387", "text": "def do_GET(self):\n parsed_path = urlparse(self.path)\n self.serve_content(parsed_path, \"GET\")\n # self.wfile.close()", "title": "" }, { "docid": "b0eb0940cc3661d9c7bd29ca352b5501", "score": "0.56472296", "text": "def pserve():\n import pyramid.scripts.pserve\n import pyramid_fanstatic\n import os\n\n dirname = os.path.dirname(__file__)\n dirname = os.path.join(dirname, 'resources')\n pyramid.scripts.pserve.add_file_callback(\n pyramid_fanstatic.file_callback(dirname))\n pyramid.scripts.pserve.main()", "title": "" }, { "docid": "dfcab04e70532c565c9ea2e616c999ec", "score": "0.5642602", "text": "def get(self):\n self.redirect('/static/html/index.html')", "title": "" }, { "docid": "9893b904571ee498399071c27cafafb1", "score": "0.5640963", "text": "def home():\n return render_template('homepage.html')", "title": "" }, { "docid": "63441c0eefb0105592344355774a573a", "score": "0.56394494", "text": "def main():\n\n hostname = os.getenv(\"HOSTNAME\")\n basedirectory = os.getenv(\"BASEDIR\")\n\n if (not hostname) or (not basedirectory):\n sys.exit(\"Please specify the 'HOSTNAME' and 'BASEDIR' environment variable.\")\n\n import_directorys(hostname, basedirectory)\n render_caddyfile(hostname, basedirectory)", "title": "" }, { "docid": "98484e81140231dca7829adb17695178", "score": "0.56347644", "text": "def page():\n return render_template('index.html')", "title": "" }, { "docid": "b9b4007e856e33b34680bfc3f25da907", "score": "0.5630946", "text": "def pre_loop(self):\n dirname='.'\n if 'home_dir' in self.options.data and self.options.data['home_dir'] is not None:\n dirname = self.options.data['home_dir']\n dirname = os.path.join(dirname, 'public')\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n #~ os.chdir(dirname)\n if dirname.endswith(\"/\"):\n dirname = dirname[:-1]\n logger.debug(\"[%s] - Start the server (%s:%s) serving dirnectory %s\", self.__class__.__name__, self.host, self.port, dirname)\n self._server = ThreadedHTTPServer((self.host, self.port), ThreadedHTTPHandler, root_directory=dirname)", "title": "" }, { "docid": "737b0afcdd8f8715f9db9b3aece5f48f", "score": "0.56302124", "text": "def show_homepage():\n return render_template('home.html')", "title": "" }, { "docid": "38433711801c43265f3cb0779bdf4e5f", "score": "0.56213164", "text": "def init():\n print \"Content-type: text/html\\r\\n\"", "title": "" }, { "docid": "1e0c87a1515bf4f6ca9b669e252e791a", "score": "0.5618169", "text": "async def web(request: Request):\n static_content_path = pathlib.Path(\n _config.WEBUI_PATH).joinpath('dist/index.html')\n if os.path.exists(static_content_path):\n response = FileResponse(\n path=static_content_path, headers={'Cache-Control': 'no-cache'})\n return response\n\n raise HTTPException(status_code=404, detail='Not found')", "title": "" }, { "docid": "73b4ee07b8b32b20c4eab46a90048cb0", "score": "0.5616935", "text": "def index():\n\treturn render_template(\"index.html\")", "title": "" } ]
064a32ad81b2745b5c1843928ca9c783
Call the same parent constructor, then call setup() if we have a session.
[ { "docid": "088d2b82a919c55c17a2323888421224", "score": "0.6701795", "text": "def __init__(self, session=None):\n self.experiment_repeats = 5\n super().__init__(session)\n if session:\n self.setup()\n\n self.log('initialize')", "title": "" } ]
[ { "docid": "9e2fb0a70c9e84897bc65ae3e343a870", "score": "0.73652965", "text": "def init_session(self):\n pass", "title": "" }, { "docid": "4468a722ea66ce2f0668eb1f8f0f6db7", "score": "0.73297703", "text": "def initialize_session(self):\n pass", "title": "" }, { "docid": "086b6b7d49c78c29702960ba1bd42aba", "score": "0.7156316", "text": "def setup_session(self):\n self.get_session()\n repo.define_meta()", "title": "" }, { "docid": "5f71617ac3541f2a8b8a9d5519f7613f", "score": "0.71442854", "text": "def __init__(self):\n cherrypy.Tool.__init__(self, 'on_start_resource',\n self.bind_session,\n priority=20)\n \n self.session = scoped_session(sessionmaker(autoflush=True,\n autocommit=False))", "title": "" }, { "docid": "16478fb2ba25ee51bfbbf09c9d1482cc", "score": "0.7066522", "text": "def __init__(self, session):\n self.session = session", "title": "" }, { "docid": "16478fb2ba25ee51bfbbf09c9d1482cc", "score": "0.7066522", "text": "def __init__(self, session):\n self.session = session", "title": "" }, { "docid": "ba7e2620651d1aa32df2abfa5d8c5da6", "score": "0.69961053", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._session = ResilientSessionWithAuthCheck(self._session, args, kwargs)", "title": "" }, { "docid": "ac73e7c11f997d68b3d6edd3a651e1cb", "score": "0.6967779", "text": "def __init__(self, session: Session):\n self.session = session", "title": "" }, { "docid": "e290e9b373af3691da787b6594dd0c6d", "score": "0.6862562", "text": "def __init__(self, session):\n self.session = session\n self.key_files = KeyFiles(session=self.session)\n self.users = Users(session=self.session)", "title": "" }, { "docid": "33d241dd91dc49601f2d973b77a7f205", "score": "0.6831214", "text": "def cls_init(self):\n self.write_required=False\n self.session_required=False", "title": "" }, { "docid": "5c1238cb122928aed3874678fff26760", "score": "0.67931104", "text": "def start(self) -> None:\r\n if self._session is None:\r\n logger.debug(\"Initializing %s\" % self._session_cls.__name__)\r\n self._session = self._session_cls(**dict(self._session_kws)) # type: ignore\r", "title": "" }, { "docid": "bbef41cadb7bd1b6cd2812d81ef1cce6", "score": "0.67723703", "text": "def __init__(self, cache, parent=None):\n\n super(Session, self).__init__()\n self.user = None\n self.cache = cache\n self.parent = parent\n self._session = self.new_session()", "title": "" }, { "docid": "6dcc6495d0e0db5b0979fd75909662bf", "score": "0.67383224", "text": "def __init__(self, engine, sessionmaker):\n self.connection = engine.connect()\n\n # begin a non-ORM transaction\n self.transaction = self.connection.begin()\n # Base.metadata.bind = connection\n\n # bind an individual Session to the connection\n if hasattr(sessionmaker, \"query\"): # scoped session detected\n sessionmaker.configure(bind=self.connection)\n self.sas = sessionmaker\n else: # not a scoped session\n self.sas = sessionmaker()(bind=self.connection)", "title": "" }, { "docid": "6f3b089d3685ebfdb8e0c13e3d94e9c8", "score": "0.6730593", "text": "def __init__(self, *args, **kwargs):\n self.setup(*args, **kwargs)", "title": "" }, { "docid": "8cc7874097b07d364bd87386357fc4ab", "score": "0.6633673", "text": "def _create_session(self):\n self.driver = requests.Session(**self.driver_args)\n # Set default headers\n self.update_headers(self.current_headers)\n self.update_cookies(self.current_cookies)\n self.set_proxy(self.current_proxy)", "title": "" }, { "docid": "572b7cf09c84494cdf24a513afc660af", "score": "0.6630059", "text": "def __init__(self, get_response=None):\r\n self.settings = getattr(settings, \"COOKIELESS\", DEFAULT_SETTINGS)\r\n self._re_links = re.compile(LINKS_RE, re.I)\r\n self._re_forms = re.compile(\"</form>\", re.I)\r\n self._re_body = re.compile(\"</body>\", re.I)\r\n self._sesh = CryptSession()\r\n self.standard_session = SessionMiddleware()\r\n\r\n self.get_response = get_response\r\n engine = import_module(settings.SESSION_ENGINE)\r\n self.SessionStore = engine.SessionStore", "title": "" }, { "docid": "f7fb9372a4cefde08d58eae4623a6867", "score": "0.66130173", "text": "def initialize(self, *args, **kwargs):\n webapp2.RequestHandler.initialize(self, *args, **kwargs)\n user_id = self.get_verified_cookie('user_id')\n # set self.user to the user, provided it exists and is valid\n self.user = user_id and User.fetch_by_id(int(user_id))\n self.set_session(self.user)", "title": "" }, { "docid": "11280fc7a3361cebff14b18c9c359036", "score": "0.66122526", "text": "def __init__(self, engine, session):\n self.engine = engine\n self.session = session", "title": "" }, { "docid": "f864f73b7d4eadbc768bf272ca27ef9f", "score": "0.65995276", "text": "def __init__(self):\n\n # Run the Session init\n super().__init__()\n\n # Headers in case the site checks UA\n self._header_cache_control = \"no-cache\"\n self._header_ua = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\"\n self._header_host = \"libgen.io\"\n self.headers.update({\n \"User-Agent\": self._header_ua,\n \"Host\": self._header_host,\n \"Cache-Control\": self._header_cache_control\n })", "title": "" }, { "docid": "9936c078c164621be9660a79ab6a8bc4", "score": "0.65987325", "text": "def __init__(self, session_key):\n from django.utils.importlib import import_module\n engine = import_module(settings.SESSION_ENGINE)\n self.session = engine.SessionStore(session_key)", "title": "" }, { "docid": "1a941ef8a9a757e1ea4b74f71daea96b", "score": "0.6582212", "text": "def __init__(self, username, password):\n self.session = requests.Session()\n self.login(username, password)\n self.api = API(username, password)", "title": "" }, { "docid": "2f3c60c2fc736c9d348791860b6009f9", "score": "0.6578208", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Don't waste time starting servers and agents.\n self.setup_start_servers = False\n self.setup_start_agents = False", "title": "" }, { "docid": "f3e900de7b08f167f28fb5e16fd267c2", "score": "0.6577631", "text": "def __init__(self, session: ClientSession):\n self.session = session", "title": "" }, { "docid": "9599da5b12ac298455d5da05cac427ea", "score": "0.6577551", "text": "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n self._initialize_potential_mapping()\n super().__init__(*args, **kwargs)", "title": "" }, { "docid": "ee4b835f7822fef53cd1d11364c422a4", "score": "0.65601146", "text": "def __init__(self, session_maker: sessionmaker_type,\n reuse_session: Optional[Session] = None) -> None:\n self._session_maker = session_maker\n self._reuse_session = reuse_session\n self._session = reuse_session", "title": "" }, { "docid": "c28fa4d5a43912ec0960086234a9278a", "score": "0.65155727", "text": "def __init__(self, session, **data):\n self._session = session\n self.__data__ = data", "title": "" }, { "docid": "1a156e555df7b4546dc683ea7215d369", "score": "0.6484061", "text": "def __init__(self):\n load_dotenv()\n self.api_hostname = os.getenv(\"API_HOSTNAME\")\n self.session = http.Session()\n\n # This is so that the client promises to sends and retrieves in JSON\n self.session.headers.update({\"Content-Type\": \"application/json\"})", "title": "" }, { "docid": "921b8c3f0036f160e307f9f959265cd2", "score": "0.64638484", "text": "def _setup(self):\r\n raise NotImplementedError(\r\n 'subclasses of LazyObject must provide a _setup() method')", "title": "" }, { "docid": "0b1a407ddc5b60ae4475fa9c33bbc7e4", "score": "0.64320296", "text": "def __init__(self, params={}):\n self.logger = get_logger()\n\n # these are our session attributes. Declare them all here\n self.params = params\n self.session = None\n self.Settlement = None\n self.User = None\n self.set_cookie = False\n\n #\n # special session types\n #\n\n # we're not processing params yet, but if we have a log out request, we\n # do it here, while we're initializing a new session object.\n if \"remove_session\" in self.params:\n user = mdb.users.find_one(\n {\n \"current_session\": ObjectId(\n self.params[\"remove_session\"].value\n )\n }\n )\n\n if user is not None:\n self.User = assets.User(user_id=user[\"_id\"], session_object={\"_id\": 0})\n self.User.mark_usage(\"signed out\")\n\n if 'login' in self.params:\n admin.remove_session(self.params[\"remove_session\"].value, self.params[\"login\"].value)\n else:\n admin.remove_session(self.params[\"remove_session\"].value, \"webapp_error\")\n\n # ok, if this is a recovery request, let's try to do that\n if 'recovery_code' in self.params:\n self.logger.info(\"Password Recovery Code sign-in initiated!\")\n user = mdb.users.find_one({'recovery_code': self.params[\"recovery_code\"].value})\n if user is None:\n self.logger.info(\"Password Recovery Code not found (possibly expired). Aborting attempt.\")\n else:\n self.logger.info(\"Rendering Password Recovery controls for '%s'\" % user[\"login\"])\n login.render(\"reset\", user[\"login\"], self.params['recovery_code'].value)\n\n\n #\n # normal session types\n #\n\n #\n # initialize!\n #\n\n # 1.) try to set the session ID from the cookie\n self.cookie = Cookie.SimpleCookie(os.environ.get(\"HTTP_COOKIE\"))\n if \"session\" in self.cookie:\n try:\n session_id = ObjectId(self.cookie['session'].value)\n except Exception as e:\n self.logger.error(\"Session ID does not appear to be an OID!\")\n self.logger.error(e)\n for k, v in self.cookie.iteritems():\n self.logger.error(\"%s -> %s\" % (k,v))\n session_id = None\n else:\n session_id = None\n\n # 2.) determine if creds are present\n creds_present = False\n if 'login' in self.params and 'password' in self.params:\n creds_present = True\n\n #\n # do stuff!\n #\n\n # default sign in method; \n def sign_in():\n \"\"\" Private DRYness method for quickly logging in with params. \"\"\"\n if 'login' in self.params and 'password' in self.params:\n self.AuthObject = login.AuthObject(self.params)\n try:\n self.User, self.session = self.AuthObject.authenticate()\n except TypeError:\n raise AttributeError(\"User login '%s' not found in MDB!\" % self.params['login'].value)\n self.set_cookie=True\n\n if session_id is not None:\n try:\n self.session = mdb.sessions.find_one({\"_id\": session_id})\n except pymongo.errors.ServerSelectionTimeoutError:\n self.logger.error('The database is unavailable!')\n self.session = None\n if self.session is None:\n sign_in()\n else:\n user_object = mdb.users.find_one({\"current_session\": session_id})\n self.User = assets.User(user_object[\"_id\"], session_object=self)\n elif self.cookie is not None and 'Session' not in self.cookie.keys() and creds_present:\n sign_in()\n elif self.cookie is None and creds_present:\n sign_in()\n else:\n sign_in()\n# self.logger.error(\"Error attempting to process cookie!\")\n# self.logger.error(self.cookie)\n\n if self.session is not None:\n\n token_check = False\n try:\n token_check = api.check_token(self)\n except requests.ConnectionError:\n self.log_out()\n self.session = None\n\n if not token_check:\n# self.logger.debug(\"JWT Token expired! Attempting to refresh...\")\n r = api.refresh_jwt_token(self)\n if r.status_code == 401:\n self.log_out()\n self.session = None", "title": "" }, { "docid": "a3bbaba0e981fc5f98771eb33aa18812", "score": "0.64218605", "text": "def __init__(self, login: str, password: str):\n self.login = login\n self.password = password\n self.session = self.create_session()", "title": "" }, { "docid": "35a3205240327482e4828bb6416033d4", "score": "0.6409793", "text": "def __init__(self, sess):\n assert isinstance(sess, SakaiSession.SakaiSession)\n self.session = sess", "title": "" }, { "docid": "4f43750a95113c98e1edf6368c328e31", "score": "0.6399527", "text": "def __init__(self, config, logger, session):\n self.config = config\n self.logger = logger\n self.session = session", "title": "" }, { "docid": "bc792ec70f31d198a4b10b2af6b35849", "score": "0.63835806", "text": "def __init__(self):\n engine = db_connect()\n create_tables(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "a704750d99b4d974c67acdd5f8987cef", "score": "0.6382399", "text": "def __init__(self, config, session, logger):\n self.config = config\n self.logger = logger\n self.session = session", "title": "" }, { "docid": "a86067944e64e2d6c1cf091b781e010f", "score": "0.63761264", "text": "def test_init(self):\n self.assertEqual(self.pp.session, self.session)", "title": "" }, { "docid": "927da1d6ad93ea295c033db48288724a", "score": "0.6371312", "text": "def __init__(self):\n super(Session, self).__init__()\n self.aborted = False\n self.analysis_reports_counter = collections.Counter()\n self.artifact_filters = None\n self.command_line_arguments = None\n self.completion_time = None\n self.debug_mode = False\n self.enabled_parser_names = None\n self.event_labels_counter = collections.Counter()\n self.filter_file = None\n self.identifier = '{0:s}'.format(uuid.uuid4().hex)\n self.parser_filter_expression = None\n self.parsers_counter = collections.Counter()\n self.preferred_encoding = 'utf-8'\n self.preferred_time_zone = 'UTC'\n self.preferred_year = None\n self.product_name = 'plaso'\n self.product_version = plaso.__version__\n self.source_configurations = None\n self.start_time = int(time.time() * 1000000)\n self.text_prepend = None", "title": "" }, { "docid": "e440d6dd016c5942a6a8215b796e8244", "score": "0.63621885", "text": "def _init_session(self, headers: dict):\n if not self._session:\n ssl_option = None if self._data.ssl_verify else False\n self._session = aiohttp.ClientSession(\n headers=headers,\n connector=aiohttp.TCPConnector(ssl=ssl_option)\n )", "title": "" }, { "docid": "61f8bd7f20cf0877c2fc9fa47699ebe0", "score": "0.63443416", "text": "def __init__(self, canvas_host, session=None):\n\n # This is a requests Session object, not a DB session etc.\n self._session = session or Session()\n self._canvas_host = canvas_host", "title": "" }, { "docid": "d846777025dd37e725d407cf80901e6b", "score": "0.63291436", "text": "def __init__(self, client_session, base_url):\n self._session = client_session # aiohttp session\n self.base_url = base_url", "title": "" }, { "docid": "d846777025dd37e725d407cf80901e6b", "score": "0.63291436", "text": "def __init__(self, client_session, base_url):\n self._session = client_session # aiohttp session\n self.base_url = base_url", "title": "" }, { "docid": "18384bb77b85438956c71899d51ff7ad", "score": "0.6315597", "text": "def init(self):\n if self.data is None:\n self.log.error('Profile init failed; loaded profile data is None')\n\n # Now can initialize anything that needs initializing\n self.session_manager.init() # initialize session recording/playback", "title": "" }, { "docid": "0fe74b34b09995f9469ff2044e97c87b", "score": "0.63148683", "text": "def __init__(self, username, password):\n\n self.username = username\n self.password = password\n\n self.credentials = {}\n\n self.session = requests.session()\n self.auth()", "title": "" }, { "docid": "39d7545ebb3ef5bcf8c7b12333f1996f", "score": "0.629719", "text": "def setup(cls):\n super().setup()", "title": "" }, { "docid": "d8e39cf2dfc128baf03ecd6cc831800c", "score": "0.6282445", "text": "def prepare_session(self):\n self.session = requests.Session()\n self.session.headers.update(self.headers)", "title": "" }, { "docid": "92e2873dac2b2a413a4431b9e111a787", "score": "0.6257882", "text": "def __init__(self, username, password, loop):\n self._username = username\n self._password = password\n self._loop = loop\n self._session = None", "title": "" }, { "docid": "e34be7b623666c297ae2cf4603b9eb66", "score": "0.6256183", "text": "def __init__(self, session, api_id=None, api_hash=None, proxy=None):\n\n # if api_id is None or api_hash is None:\n # raise PermissionError(\n # 'Your API ID or Hash are invalid. Please read \"Requirements\" on README.rst')\n\n super().__init__(session, api_id, api_hash, proxy)\n self.api_id = api_id\n self.api_hash = api_hash\n\n # Determine what session object we have\n # TODO JsonSession until migration is complete (by v1.0)\n if isinstance(session, str) or session is None:\n self.session = JsonSession.try_load_or_create_new(session)\n elif isinstance(session, Session):\n self.session = session\n else:\n raise ValueError(\n 'The given session must either be a string or a Session instance.')\n\n self.transport = None\n self.proxy = proxy # Will be used when a TcpTransport is created\n\n self.login_success = False\n\n # Safety across multiple threads (for the updates thread)\n self._lock = RLock()\n self._logger = app_logger\n\n # Methods to be called when an update is received\n self._update_handlers = []\n self._updates_thread_running = Event()\n self._updates_thread_receiving = Event()\n\n # Cache \"exported\" senders 'dc_id: MtProtoSender' and\n # their corresponding sessions not to recreate them all\n # the time since it's a (somewhat expensive) process.\n self._cached_senders = {}\n self._cached_sessions = {}\n\n # These will be set later\n self._updates_thread = None\n self.dc_options = None\n self.sender = None\n self.phone_code_hashes = {}", "title": "" }, { "docid": "8a422ec38f9a566bfe0716b74bad7171", "score": "0.62550104", "text": "def setUpClass(cls):\n\t\tcls.settings = initConfig()\n\t\tcls.db = DatabaseWrapper.DatabaseWrapper(cls.settings[\"dbCredentials\"])\n\t\tcls.db.setDatabaseEnv()\n\t\tcls.transaction = cls.db.getTxById(randint(0, cls.db.getTxCount()))\n\t\tcls.httpTransaction = HttpTransaction.HttpTransaction(cls.transaction.transactionHash)", "title": "" }, { "docid": "240f26ca31133de8598aa3440417e44f", "score": "0.6244372", "text": "def setUpClass(cls):\n\n cls.conn = db_utils.get_postgres_conn()\n cls.redis_conn = db_utils.get_redis_conn(db=1)\n cls.session = db_utils.get_postgres_session(cls.conn)\n cls.mixer = Mixer(session=cls.session, commit=False)\n db_utils.init_db()", "title": "" }, { "docid": "e51dd89fb9f8da446271a1961738bdb0", "score": "0.6243419", "text": "def __init__(self, *args, **kwargs):\n self._initialize_protocols()\n super().__init__(*args, **kwargs)", "title": "" }, { "docid": "ffb77d7be38d468a7cc8b4621660f3fa", "score": "0.62333685", "text": "def __init__(self, session=None, commit=True, **params):\r\n super(Mixer, self).__init__(**params)\r\n self.params['session'] = session\r\n self.params['commit'] = bool(session) and commit", "title": "" }, { "docid": "0984a15ea5bfa44b227dce3d2a5dea17", "score": "0.62215716", "text": "def initialize(self, **kwargs):\n pass", "title": "" }, { "docid": "71dfb75d745e0218015ab8ae76de3784", "score": "0.62054014", "text": "def __init__(self, session: object) -> None:\n\n # Set the session.\n self.power_bi_session: PowerBiSession = session", "title": "" }, { "docid": "245d93b9c896c828742c0b9a3235a15e", "score": "0.6197478", "text": "def __init__(self, base_url, session):\n # ensure that the base URL ends in / for later concatenation\n if not base_url.endswith('/'):\n base_url += '/'\n\n self.base_url = base_url\n self.session = session\n self._sesskey = None\n self.cache_max_age = 1800\n self.cache = 'cache'\n self.payload = True", "title": "" }, { "docid": "df2cea1547c8b2e6c9e457d7a11101bc", "score": "0.61891085", "text": "def __init__(self):\n # call to base class initializer\n super().__init__()", "title": "" }, { "docid": "6a5ddae190dae45b671d32e4dffe4df8", "score": "0.61828464", "text": "def _setup(self, **kwargs):\n pass", "title": "" }, { "docid": "6a5ddae190dae45b671d32e4dffe4df8", "score": "0.61828464", "text": "def _setup(self, **kwargs):\n pass", "title": "" }, { "docid": "5857df7b5a2a2539b8913d6dd05b6e8c", "score": "0.6168391", "text": "def __init__(self, namespace, scoped_session, **kwargs):\n \n container.NamespaceManager.__init__(self, namespace)\n self.scoped_session = scoped_session", "title": "" }, { "docid": "5a85d352bb754f4900209b270f3c932c", "score": "0.6144803", "text": "def __init__(self, default_language_code: LanguageCodes = DEFAULT_LANGUAGE_CODE, *,\n session: typing.Optional[aiohttp.ClientSession] = None\n ) -> None:\n\n super().__init__(default_language_code)\n\n if session:\n self._session = session\n\n if not isinstance(session, aiohttp.ClientSession):\n message = (\n 'For `session` has been passed object with unsupported type. '\n 'Expected to get argument with type <aiohttp.ClientSession>! '\n f'Got (session={self._session!r})'\n )\n raise TypeError(message)\n else:\n self._session = aiohttp.ClientSession()\n\n logger.debug(f'``aiohttp.ClientSession`` session has been created for async API client: {self._session!r}.')\n\n logger.info('Async client has been successfully init-ed.')", "title": "" }, { "docid": "5a270db310741fc13ff99dba9510d042", "score": "0.6143701", "text": "def initialize(self, *args, **kwargs):\n webapp2.RequestHandler.initialize(self, *args, **kwargs)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and models.Users.by_id(int(uid))", "title": "" }, { "docid": "b1e5ab3924de563be81e8e272162effe", "score": "0.6142442", "text": "def __init__(self, nodeid, session):\n log.info(\"Initialising node with nodeid : \" + str(nodeid))\n self.__nodeid = nodeid\n self.__session = session\n try:\n self.request_header = {'content-type': 'application/json',\n 'Authorization': session.id_token}\n except AttributeError:\n raise InvalidClassInput(session, 'Invalid Session Input.\\\n Expected: type <session object>.\\\n Received: ')", "title": "" }, { "docid": "f3d6f8ad3ea377f0b5cb40e751b990de", "score": "0.6135654", "text": "def __init__(\n self,\n session,\n host,\n port,\n api_key,\n async_add_device=None,\n connection_status=None,\n ):\n self.session = session\n self.host = host\n self.port = port\n self.api_key = api_key\n\n self.async_add_device_callback = async_add_device\n self.async_connection_status_callback = connection_status\n\n self.config = None\n self.groups = None\n self.lights = None\n self.scenes = {}\n self.sensors = None\n self.websocket = None", "title": "" }, { "docid": "3119494b85d8bc988b9da0a8603446e5", "score": "0.61313945", "text": "def __init__(self, session_path, lazy=False, one=None, download_data=False, bpod_only=False):\n if not is_session_path(session_path):\n raise ValueError('Invalid session path')\n self.session_path = session_path\n self.one = one\n self.log = _logger\n\n self.data = None\n self.settings = None\n self.raw_data = None\n self.frame_ttls = self.audio_ttls = self.bpod_ttls = None\n self.type = None\n self.wheel_encoding = None\n self.bpod_only = bpod_only\n\n if download_data:\n self.one = one or ONE()\n self._ensure_required_data()\n\n if not lazy:\n self.load_raw_data()\n self.extract_data()", "title": "" }, { "docid": "bb32b8a633bbdeef3e2544f7a442d0a8", "score": "0.61243725", "text": "def __init__(self, conn, ignore_db_error=True, **config):\n self.engine = create_engine(conn, **config)\n self.logger = get_logger(\"SQLAlchemyDriver\")\n\n Base.metadata.bind = self.engine\n self.Session = sessionmaker(bind=self.engine, expire_on_commit=False)\n if ignore_db_error:\n try:\n self.setup_db()\n except Exception:\n self.logger.exception(\"Fail to setup database tables, continue anyways\")\n pass\n else:\n self.setup_db()", "title": "" }, { "docid": "d603dbf98de7fa994e12f1667d991469", "score": "0.6101063", "text": "def __init__(self, timeout=None, session=None):\n self.timeout = timeout\n self.session = session", "title": "" }, { "docid": "60284ebce2ca97eb6768c58fa2df711f", "score": "0.61006624", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "60284ebce2ca97eb6768c58fa2df711f", "score": "0.61006624", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "60284ebce2ca97eb6768c58fa2df711f", "score": "0.61006624", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "60284ebce2ca97eb6768c58fa2df711f", "score": "0.61006624", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "60284ebce2ca97eb6768c58fa2df711f", "score": "0.61006624", "text": "def __init__(self):\n engine = db_connect()\n create_table(engine)\n self.Session = sessionmaker(bind=engine)", "title": "" }, { "docid": "f513a4de7308785bde0c291544f12d19", "score": "0.60999024", "text": "def __init__(self, session, object_factory, request_validator):\n check_type(session, RestSession)\n\n super(EventManagement, self).__init__()\n\n self._session = session\n self._object_factory = object_factory\n self._request_validator = request_validator", "title": "" }, { "docid": "a158f463a5a79ed5a2493fde67f11537", "score": "0.6090831", "text": "def __init__(self, base_url, username, password, session=None):\n self.session = session or requests.Session()\n self.base_url = urljoin(base_url, \"user/login\")\n self.username = username\n self.password = password", "title": "" }, { "docid": "a26d9b86bd8d90641ca34f2f4a2b5a04", "score": "0.60809463", "text": "def __init__(self, settlement_id=False, name=False, campaign=False, session_object=None):\n self.logger = get_logger()\n\n # initialize session and user objects\n self.Session = session_object\n if self.Session is None or not self.Session:\n raise Exception(\"Settlements may not be initialized without a Session object!\")\n try:\n self.User = self.Session.User\n except:\n self.logger.warn(\"Settlement %s initialized without a User object!\" % settlement_id)\n\n # now set self.settlement\n self.set_settlement(ObjectId(settlement_id))", "title": "" }, { "docid": "2492bb961c61d6d8b6c65fcdc79b89f7", "score": "0.60803676", "text": "def initialize(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "fa6e0260039dd93f4ecd0c552c64b2a4", "score": "0.6078866", "text": "def __init__(self, session):\n\n # handle input parameters\n self.session = session\n self.neutron_client = client.Client(session=self.session)", "title": "" }, { "docid": "4cefcfc572595a64fc5144f3e14690e6", "score": "0.60780704", "text": "def setup(self):\n pass # This is the same as our base class.", "title": "" }, { "docid": "90fd2d5d666baa6f2e8d8eba05e0adf8", "score": "0.6078029", "text": "def setUp(self): \n self._meta = self._DATA.meta\n self._records = self._DATA.records\n self._stream = self._class()\n return", "title": "" }, { "docid": "73b3765f2385657c43f55ff7d59237ae", "score": "0.60756326", "text": "def setup_run(self):\n\t\tif self.debug:\n\t\t\timport pdb\n\t\t\tpdb.set_trace()\n\t\t\n\t\tif getattr(self, 'db', None):\n\t\t\tsession = self.db.session\n\t\t\tsession.begin(subtransactions=True)\n\t\t\n\t\t\tif not self.data_dir:\n\t\t\t\tself.data_dir = self.db.data_dir\n\t\t\t\n\t\t\tif not self.local_data_dir:\n\t\t\t\tself.local_data_dir = self.db.data_dir\n\t\t\n\t\tself.workflow = self.initiateWorkflow()\n\t\t\n\t\t\n\t\tself.registerJars()\n\t\tself.registerCustomJars()\n\t\tself.registerExecutables()\n\t\tself.registerCustomExecutables()\n\t\t\n\t\treturn self", "title": "" }, { "docid": "1de2cc133b4fa47b940746d88cb92391", "score": "0.6075369", "text": "def _setup_connection(self):\n global Session # pylint: disable=global-statement\n\n import homeassistant.components.recorder.models as models\n from sqlalchemy import create_engine\n from sqlalchemy.orm import scoped_session\n from sqlalchemy.orm import sessionmaker\n\n if self.db_url == 'sqlite://' or ':memory:' in self.db_url:\n from sqlalchemy.pool import StaticPool\n self.engine = create_engine(\n 'sqlite://',\n connect_args={'check_same_thread': False},\n poolclass=StaticPool)\n else:\n self.engine = create_engine(self.db_url, echo=False)\n\n models.Base.metadata.create_all(self.engine)\n session_factory = sessionmaker(bind=self.engine)\n Session = scoped_session(session_factory)\n self.db_ready.set()", "title": "" }, { "docid": "09489cbbb7641fbecab3bbbd129801c2", "score": "0.6073993", "text": "def __init__(self, config=None, **kw):\n\n self.internalname = self.__class__.__name__.lower()\n\n # When the architecture stabilizes, switch to config as the sole\n # positional argument, and retain it instead of copying parts.\n # That would also enable reconfiguration at runtime.\n self.setConfig(config or BaseConfig(), **kw)\n\n # setup cookie handler\n self.opener = urllib2.build_opener(\n urllib2.HTTPCookieProcessor(LWPCookieJar()))\n self.opener.addheaders = [('User-Agent', u'Anichou/{0} {1}'.format(\n self.__class__.__name__, settings.VERSION))]\n self._logined = None\n\n if self.initsync:\n self.sync()", "title": "" }, { "docid": "1dd22021dbd90a2612fb10ef868f4926", "score": "0.6069043", "text": "def __init__(self, bot, session):\n\n self.bot = bot\n self.session = session", "title": "" }, { "docid": "468b00c54ed2e355a5f2a3d5e4041e3d", "score": "0.6068895", "text": "def initialize(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "44b5e57b34aa07663e262117b806dcc8", "score": "0.6066969", "text": "def init(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "6c7d583811a6794b76ebae6d3b89b821", "score": "0.60639906", "text": "def setup_class(cls):\n cls.cloud_auth = cls.cloud_auth()\n cls.cloud_auth.setup()\n\n # get access token and id token\n cls.ACCESS_TOKEN = cls.cloud_auth.ACCESS_TOKEN\n cls.SCOPE_ACCESS_TOKEN = cls.cloud_auth.SCOPE_ACCESS_TOKEN\n cls.ID_TOKEN = cls.cloud_auth.ID_TOKEN\n\n # set application for testing\n cls.client = cls.cloud_auth.TESTCLIENT", "title": "" }, { "docid": "7fb356cb3da3d9759e6eb8ed6a78a986", "score": "0.60613316", "text": "def __init__(self, session, **data):\n super().__init__(session, **data)\n\n self._pending_props = dict()\n self._pending_children = list()", "title": "" }, { "docid": "7e012c5ccfa547b4fa04c4b14fc85515", "score": "0.6054217", "text": "def init(self, session, today, *args, **kwargs) -> None:\n self.session = session\n self.today = today", "title": "" }, { "docid": "d49074728c53769f54c6d90ce8ac67bf", "score": "0.6047107", "text": "def _init_session(self):\n self.sess = tf.Session(graph=self.g, config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS))\n self.sess.run(self.init)", "title": "" }, { "docid": "ccc62d26ccb555d367a7ce91d19e4ae8", "score": "0.6046316", "text": "def __init__(self):\n print('Current State: ' + str(self))\n # This function handles the server setup before connecting to mqtt network.\n self.initial_setup()\n # This function is used to connect to mqtt and handle messages.\n self.mqtt_server()", "title": "" }, { "docid": "5d8fd5fb6c12548fc6f9a20c7d3d692c", "score": "0.6042951", "text": "def __init__(self, request=None, response=None):\r\n self.initialize(request, response)", "title": "" }, { "docid": "f906e9b04b3ce1967c9532ec8e13075b", "score": "0.60372037", "text": "def __init__(self, session: \"Session\"):\n self._session: \"Session\" = session\n self._client_athena: client = session.boto3_session.client(service_name=\"athena\",\n use_ssl=True,\n config=session.botocore_config)\n self._client_s3: client = session.boto3_session.client(service_name=\"s3\",\n use_ssl=True,\n config=session.botocore_config)", "title": "" }, { "docid": "a8f3cdb9d21c4411002285b5570843eb", "score": "0.60339993", "text": "def __init__(self, *args, **kwargs):\n self._session = kwargs.pop(\"session\", None) or getattr(getattr(self, \"Meta\", None), \"session\", None)\n self.allow_nested_updates = kwargs.pop(\"allow_nested_updates\", False)\n self.allow_create = kwargs.pop(\"allow_create\", False)\n self.partial_by_pk = kwargs.pop(\"partial_by_pk\", False)\n overwrite_fields = kwargs.pop(\"fields\", fields.empty)\n overwrite_exclude = kwargs.pop(\"exclude\", fields.empty)\n extra_kwargs = kwargs.pop(\"extra_kwargs\", {})\n\n super().__init__(*args, **kwargs)\n\n self._extra_kwargs = self.get_extra_kwargs(**extra_kwargs)\n self._overwrite_fields = overwrite_fields\n self._overwrite_exclude = overwrite_exclude", "title": "" }, { "docid": "9616211c2902c4728f8de5533ff61d7f", "score": "0.6031356", "text": "def __init__(self, url: str) -> None:\n self.url = url\n self.session = requests.Session()", "title": "" }, { "docid": "d32770ce017fa8db5ad0d48755da928d", "score": "0.6031162", "text": "def setup(self):\n\t\t\tself.config = self.server.config\n\t\t\tself.banner = self.config[\"banner\"]\n\t\t\tself.game = game.Game(self.server.gamePath, self.config[\"flag\"])\n\t\t\tself.authorizedAccess = False if \"password\" in self.config else True\n\t\t\tself.requestHandler = xmlLib.RequestHandler()\n\t\t\tself.responseFactory = xmlLib.ResponseFactory()", "title": "" }, { "docid": "5c9d0992d0365edae567b2b0fe84ad0c", "score": "0.6030021", "text": "def setup_session(self, connection_string=None):\n\n if self.engine:\n return\n\n self.engine = create_engine(connection_string, echo=False,\n pool_recycle=3600)\n\n self.Session.configure(bind=self.engine)", "title": "" }, { "docid": "0928739243e0617576ec5a2c0aa3b8cc", "score": "0.6021407", "text": "def __init__(self, plex_username, plex_password, webtools_path):\n\n # Variable Declaration\n self._session = requests.session()\n self.channel_types = None\n self.channel_dict = None\n self._auth_status = False\n self._username = plex_username\n self._password = plex_password\n self._full_path = webtools_path\n\n # Function calls\n self._auth_session()\n self._cache_bundle_data()", "title": "" }, { "docid": "93472517271995ac5d1049ade1218859", "score": "0.60177594", "text": "def __init__(\n self, connect_url: str, web_app: Optional[bool] = False,\n echo: Optional[bool] = False\n ):\n self.web_app = web_app\n # Ensure that the connection URL is set.\n if echo:\n import logging\n logging.info('Connect to database Url %s' % (connect_url))\n self._engine = create_engine(connect_url, echo=echo)\n if web_app:\n self._session = scoped_session(sessionmaker(bind=self._engine))\n else:\n self._session = sessionmaker(bind=self._engine)", "title": "" }, { "docid": "306d6f6db3ac79e649832a7e6c2c7c02", "score": "0.6016236", "text": "def __init__(self):\n\n # The logging object. \n # Example: self.log.info(f\"Current value of var: {my_var}\")\n self.log = logging.getLogger()\n self.checkout = checkout\n self.console = console\n self.brand = system.get_brand()\n\n self.init_setup = initial_setup.Initial_setup(\"express\")", "title": "" }, { "docid": "bfdaea197db0cff47053b65bfb7532e6", "score": "0.6011707", "text": "def __init__(self, Base):\n\t\thost = Base.host[Base.profile_num]\n\t\tport = Base.port[Base.profile_num]\n\t\tpassword = Base.password[Base.profile_num]\n\n\t\tif os.environ.has_key('MPD_HOST'):\n\t\t\tif '@' in os.environ['MPD_HOST']:\n\t\t\t\tpassword, host = os.environ['MPD_HOST'].split('@')\n\t\t\telse:\n\t\t\t\thost = os.environ['MPD_HOST']\n\t\tif os.environ.has_key('MPD_PORT'):\n\t\t\tport = int(os.environ['MPD_PORT'])\n\n\t\tmpdclient3.mpd_connection.__init__(self, host, port, password)\n\t\tmpdclient3.connect(host=host, port=port, password=password)", "title": "" }, { "docid": "c45be1ddee126d1eb7d4bd19d6ed6293", "score": "0.60041124", "text": "def initialize(self, *a, **kw):\n webapp2.RequestHandler.initialize(self, *a, **kw)\n uid = self.read_secure_cookie('user_id')\n self.user = uid and User.by_id(int(uid))", "title": "" }, { "docid": "e9a58ad1423663c83fca268c32959f60", "score": "0.6000279", "text": "def __init__(self, *args, **kwargs):\r\n self.context = kwargs.get(\"config\")\r\n if hasattr(self.context, 'database'):\r\n # XXX this should be replaced with connection_context instead\r\n self.context.database['database_host'] = \\\r\n self.context.database.database_hostname\r\n self.context.database['database_port'] = \\\r\n self.context.database.database_port\r\n self.context.database['database_name'] = \\\r\n self.context.database.database_name\r\n self.context.database['database_username'] = \\\r\n self.context.database.database_username\r\n self.context.database['database_password'] = \\\r\n self.context.database.database_password\r\n self.database = db.Database(self.context.database)\r\n else:\r\n # the old middleware\r\n self.database = db.Database(self.context)", "title": "" }, { "docid": "0327e3b478ec3075c81bcf6c315c059b", "score": "0.60001767", "text": "def setup_session_instance(url):\n\n # Setup a Transport Adapter (HTTPAdapter) with max_retries set\n okta_adapter = HTTPAdapter(max_retries=3)\n # Setup session instance\n session = requests.Session()\n # Use okta_adapter for all requests to endpoints that start with the base URL\n session.mount(url, okta_adapter)\n\n return session", "title": "" } ]
45f0c8a71b490493867c7e63cdc3ffd2
Compares this card to other, first by suit, then rank.
[ { "docid": "489036d087d5ef07476810a38ca87f7b", "score": "0.6491065", "text": "def __lt__(self, other):\n t1 = self.suit, self.rank\n t2 = other.suit, other.rank\n return t1 < t2", "title": "" } ]
[ { "docid": "8f35e44eef683a4c5596f634f120d6d9", "score": "0.7621892", "text": "def compare_rank(card1, card2):\n if card1.get_rank < card2.get_rank:\n return -1\n elif card1.get_rank > card2.get_rank:\n return 1\n else:\n return 0", "title": "" }, { "docid": "b8896c4676544c336d59ddd0a97693f5", "score": "0.75764483", "text": "def compare_cards(self, card_1: Card, card_2: Card) -> int:\n if card_1.suit == card_2.suit:\n # The cards are in the same suit. Comparison will be simple: check whether we're\n # dealing with the trump suit, and see which card is higher.\n # Note: if the suit is neither trump not the led suit, the cards are incomparable.\n\n if card_1.suit == self.deal.trump_suit:\n return -1 + 2 * int(Rank.order_trump()[card_1.rank] < Rank.order_trump()[card_2.rank])\n elif self.led_suit is not None and card_1.suit != self.led_suit:\n # The cards are both in a non-trump suit that was also not led.\n # This renders the cards effectively incomparable.\n return 0\n\n # The cards are in non-trump suit, but are either in the led suit,\n # or no led suit was yet specified. We can compare them in the normal way.\n # This is useful both for determine which card would win given the currently led suit,\n # or to determine which card is better for the current player to lead a fresh trick with.\n return -1 + 2 * int(Rank.order()[card_1.rank] < Rank.order()[card_2.rank])\n\n # If one of the cards is trump but the other is not, the one card always wins,\n # regardless of the suit that was to be followed.\n if card_1.suit == self.deal.trump_suit:\n return -1\n if card_2.suit == self.deal.trump_suit:\n return 1\n\n # Neither card is a trump card; check if either card follows suit. If one does,\n # then that card wins. Otherwise, both cards are irrelevant to the trick and thus equal.\n # Note that if no card has been led yet, these checks will evaluate to false as well.\n if card_1.suit == self.led_suit:\n return -1\n if card_2.suit == self.led_suit:\n return 1\n\n # The cards differ in suits, neither suit is trump, and neither suit was led.\n # The cards are effectively incomparable.\n return 0", "title": "" }, { "docid": "d9206901d65079d4ac41aa23c13940dc", "score": "0.74277055", "text": "def __eq__(self, othercard):\n \t return self.suit_by_number == othercard.suit_by_number and self.rank_by_number == othercard.rank_by_number", "title": "" }, { "docid": "c1f575e295f3e56427b5b7d95d8ab32a", "score": "0.72978324", "text": "def sort_by_suit_then_rank(self):\n self.all_cards.sort(key = lambda x:x.get_num_suit())\n self.all_cards.sort(key = lambda x:x.get_num_val())", "title": "" }, { "docid": "a368dbfe23d16e568390a285eb176776", "score": "0.7268278", "text": "def sort_by_rank_then_suit(self):\n self.all_cards.sort(key = lambda x:x.get_num_val())\n self.all_cards.sort(key = lambda x:x.get_num_suit())", "title": "" }, { "docid": "57e544a0317499b782212cc63e0e84ca", "score": "0.72336614", "text": "def __eq__(self, other):\n return self.suit == other.suit and self.rank == other.rank", "title": "" }, { "docid": "57e544a0317499b782212cc63e0e84ca", "score": "0.72336614", "text": "def __eq__(self, other):\n return self.suit == other.suit and self.rank == other.rank", "title": "" }, { "docid": "49424689b6ad9871ef47b291e21c8607", "score": "0.7053815", "text": "def compare_cards(card1, card2):\n\n card1_rank = self.ranks.index(card1[0].rank)\n card2_rank = self.ranks.index(card2[0].rank)\n\n player1_decksize = len(self.player1.stack.deck)\n player2_decksize = len(self.player2.stack.deck)\n\n if card1_rank > card2_rank:\n print card1[0].rank, \" > \", card2[0].rank, \" Player 1 wins.\", \n print \"Player 1 deck: \", player1_decksize, \" cards. Player 2 deck: \", player2_decksize, \"cards.\"\n return self.player1\n elif card1_rank < card2_rank:\n print card1[0].rank, \" < \", card2[0].rank, \" Player 2 wins.\"\n print \"Player 1 deck: \", player1_decksize, \" cards. Player 2 deck: \", player2_decksize, \"cards.\"\n return self.player2\n # WAR!!\n else:\n print \"A war is on!\"\n return None", "title": "" }, { "docid": "8870fabd250ca52eaa3d80acb7f12569", "score": "0.68805176", "text": "def __eq__(self, other):\n if not isinstance(other, FudgeCard):\n other = FudgeCard(other)\n\n return self.suit == other.suit and self.rank == other.rank", "title": "" }, { "docid": "71b46cb8141a8bc982aa010ac3abdb13", "score": "0.68183154", "text": "def __eq__(self, other):\r\n return self.determine_hand_rank() == other.determine_hand_rank()", "title": "" }, { "docid": "54d10ef912ddb58ba22563d3c3ddce52", "score": "0.67632586", "text": "def __cmp__(self, other):\n return cmp(self.rank, other.rank)", "title": "" }, { "docid": "f78db90bfad2027c9208b6b96d5f5ee9", "score": "0.6759735", "text": "def __lt__(self, othercard):\n \tt1 = self.suit_by_number, self.rank_by_number\n \tt2 = othercard.suit_by_number, othercard.rank_by_number\n \treturn t1 < t2", "title": "" }, { "docid": "24ebd978f210e3dd52f116aa2cbd731a", "score": "0.6757494", "text": "def is_equal(self, card):\n if self.rank == card.rank and self.suit == card.suit:\n return True\n return False", "title": "" }, { "docid": "eb3d2991c3e8564006fccce02d8fb5f9", "score": "0.6591251", "text": "def chan_rank(self, hand):\n if len(hand) != 2:\n return None\n rank = 0\n c1 = hand.pop()\n c2 = hand.pop()\n highest_card = max(c1, c2)\n if highest_card.isFaceCard():\n rank += {'A': 10, 'K': 8, 'Q': 7, 'J': 6}[highest_card.rank]\n else:\n rank += int(highest_card.rank) / 2.0\n gap = abs(c1._rank - c2._rank)\n if not gap:\n rank *= 2\n if rank == 5:\n rank += 1\n if rank < 5:\n rank = 5\n else:\n rank -= {1:1, 2:2, 3:4}.get(gap, 5)\n\n if highest_card._rank < 'Q': # nie mozna porownywac do Q\n if gap < 2:\n rank += 1\n\n if c1.suit == c2.suit:\n rank += 2\n\n return rank", "title": "" }, { "docid": "bd5aa4ab35bb2a92f567c0761ce43a39", "score": "0.6531653", "text": "def __eq__(self, other):\n suit, rank = self.code\n try:\n other_rank = self.RANKS[other]\n\n if rank == other_rank:\n return True\n return False\n except KeyError:\n try:\n other_suit, other_rank = other.code\n\n if suit == other_suit and rank == other_rank:\n return True\n return False\n except AttributeError:\n raise Exception", "title": "" }, { "docid": "6e886b4936a67b2d9667d5bffff98b95", "score": "0.65194535", "text": "def sort_by_suit(self):\n self.all_cards.sort(key = lambda x:x.get_num_suit())", "title": "" }, { "docid": "87d81f6363b67b10b5521ee755bb8f8b", "score": "0.6495327", "text": "def compareCards(self,c1,c2):\r\n return c1.value > c2.value", "title": "" }, { "docid": "dd7a593401c2e935161ece9c044fd318", "score": "0.6274823", "text": "def _calculate_rank(self, other):\n _, rank = self.code\n\n try:\n other_rank = Card.RANKS[other]\n except KeyError:\n try:\n _, other_rank = other.code\n except AttributeError:\n raise Exception\n\n return rank, other_rank", "title": "" }, { "docid": "fb6457fca40dd49760d75bc18bc663e5", "score": "0.6248779", "text": "def compare_cards(self):\n if self.myCurrent_Card() == self.otherCurrent_Card():\n print \"War\"\n self.I_declair_War()\n elif self.myCurrent_Card() < self.otherCurrent_Card():\n print \"Computer Win!\"\n self.otherlootpile.enqueue(self.myCurrent_Card)\n else:\n print \"Human Win!\"\n self.lootPile.enqueue(self.otherCurrent_Card)", "title": "" }, { "docid": "8c1b68c7c885bf971bbef521d5232ceb", "score": "0.6245743", "text": "def sort_by_rank(self):\n self.all_cards.sort(key = lambda x:x.get_num_val())", "title": "" }, { "docid": "b706eb800248389348e1bb83acca3b28", "score": "0.62149155", "text": "def determineWinner(hands, rank):\r\n\r\n hand1, hand2 = hands\r\n\r\n if rank == RANK_HIGH_CARD:\r\n # high-card hand ties can be ordered by natural list order\r\n lists = [sorted([card[RANK] for card in hand], reverse=True) for hand in hands]\r\n winner = PLAYER_1 if lists[0] > lists[1] else PLAYER_2\r\n\r\n # FORTUITOUS: this same branch actually works for both one pair and two pair comparisons!\r\n elif rank in [RANK_ONE_PAIR, RANK_TWO_PAIR]:\r\n ranks = [sorted([card[RANK] for card in hand]) for hand in hands] # list of each rank value, ascending order\r\n rankSets = [set([card[RANK] for card in hand]) for hand in hands] # different ranks appearing in the hand\r\n countDicts = [{rank: ranks[p].count(rank) for rank in rankSets[p]} for p in PLAYERS] # { rank: nCardsOfThatRank }\r\n pairRanks = [sorted([rank for rank, count in countDicts[p].items() if count == 2]) for p in PLAYERS]\r\n\r\n if pairRanks[P1] != pairRanks[P2]:\r\n # natural sort order of the lists takes care of same top pair, or different top pair\r\n winner = PLAYER_1 if pairRanks[P1] > pairRanks[P2] else PLAYER_2\r\n else:\r\n # same one or two pair: have to look at the other 3 cards or 1 other card\r\n otherRanks = [sorted([rank for rank, count in countDicts[p].items() if count == 1], reverse=True)\r\n for p in PLAYERS]\r\n winner = PLAYER_1 if otherRanks[P1] > otherRanks[P2] else PLAYER_2\r\n\r\n elif rank == RANK_STRAIGHT:\r\n # the only ties happen on non-tied high card\r\n highCard_l = [max([card[RANK] for card in hand]) for hand in hands]\r\n winner = PLAYER_1 if highCard_l[P1] > highCard_l[P2] else PLAYER_2\r\n\r\n else:\r\n raise NotImplemented\r\n\r\n return winner # PLAYER_1 or PLAYER_2\r", "title": "" }, { "docid": "cb6a777824b817e103bcf894b945ad2e", "score": "0.6205319", "text": "def compare(user_cards, computer_cards, user_total, computer_total):\n higher_card = user_cards.compare_to(computer_cards)\n if user_cards == higher_card:\n print(\"You win this round!\")\n print(\"\")\n user_total += 1\n else:\n print(\"The computer wins this round\")\n print(\"\")\n computer_total += 1\n return user_total, computer_total", "title": "" }, { "docid": "2f578bd6fc7e4b7c567b36ef6fca558a", "score": "0.62043405", "text": "def __eq__(self, other):\n return self.value == other.value and self.suit == other.suit", "title": "" }, { "docid": "8d97bfd2638062ebf134056fa049c940", "score": "0.60925794", "text": "def equal(self, other, board, returnBestHand = False):\n # Find the best 5-card hands that can be made by each Hand and the board\n bestHand1 = HandHelper.findBestHand(self.cards + board)\n bestHand2 = HandHelper.findBestHand(other.cards + board)\n\n # If the types of the best hands are different, return False\n if bestHand1[1] != bestHand2[1]:\n return False\n \n # If the types are the same, compare each of the cards in the best hands\n for i in range(5):\n # If any cards have different values, return False\n if bestHand1[0][i].value != bestHand2[0][i].value:\n return False\n\n # Return best hand tuple if flag is set\n if returnBestHand:\n return bestHand1\n\n else:\n return True", "title": "" }, { "docid": "206da2a7aed771f0561e8809d9541cc8", "score": "0.6089152", "text": "def __eq__(self, other):\n if not isinstance(other, Card):\n raise ValueError(\n \"Cannot compare a Card object with a {0}\".format(\n type(other)))\n\n return (self.trad == other.trad and\n self.simp == other.simp and\n self.zhuyin == other.zhuyin and\n self.canto == other.canto and\n self.pinyin == other.pinyin)", "title": "" }, { "docid": "376a20d18bfec2fc17dce0a508f65260", "score": "0.60638374", "text": "def card_rank(card):\n return 13 - card_order.index(card)", "title": "" }, { "docid": "46c47954355eb5673166344cc4dc72c8", "score": "0.605808", "text": "def sort_cards(self):\r\n for j in range(0, (len(self.deck_of_cards))):\r\n for i in range(0, (len(self.deck_of_cards) - j - 1)):\r\n pos = self.deck_of_cards[i]\r\n adj_pos = self.deck_of_cards[i + 1]\r\n if glob_rank.index(pos[1]) < glob_rank.index(adj_pos[1]):\r\n temp = self.deck_of_cards[i]\r\n self.deck_of_cards[i] = self.deck_of_cards[i + 1]\r\n self.deck_of_cards[i + 1] = temp", "title": "" }, { "docid": "8bee732a0b026d11ed6d7739a6c997f1", "score": "0.6049113", "text": "def __gt__(self, other):\r\n return self.determine_hand_rank() > other.determine_hand_rank()", "title": "" }, { "docid": "93ad1439477cdb3d665009737bc499b8", "score": "0.6034167", "text": "def test_jack_correct_suit(self):\n hand = [Card(1, 0), Card(2, 0), Card(7, 0), Card(10, 3)]\n cut = Card(12, 3)\n game = Game(hand, cut)\n game.score()\n assert game.jack_score == 1", "title": "" }, { "docid": "f064342f7a66504da539937a42fdcdae", "score": "0.60032976", "text": "def card_ranks(self):\n for card in self.cards:\n self.ranks.append(card.rank)\n self.ranks.sort(reverse=True)", "title": "" }, { "docid": "e69c74b2e177f6d4096720dbb992e6a3", "score": "0.6001701", "text": "def change_suit(card1, card2):\n if card1.suit in Cards.black_suits:\n return True if card2.suit in Cards.red_suits else False\n else:\n return True if card2.suit in Cards.black_suits else False", "title": "" }, { "docid": "282a90e078581205c9173858584e95ca", "score": "0.5956985", "text": "def score(hand):\n\n score = 0\n rank_dict = {k: k for k in range(2, 11)}\n rank_dict.update({'A': (1, 11), 'J': 10, 'Q': 10, 'K': 10})\n\n for card in hand.hand:\n if card.rank != 'A':\n score += rank_dict[card.rank]\n\n for card in hand.hand:\n if card.rank == 'A':\n if score <= 10:\n score += 11\n else:\n score += 1\n\n return score", "title": "" }, { "docid": "39c538b51e5b76a323ccc0ce50d5f418", "score": "0.5951507", "text": "def __cmp__(self, other):\n\t\tif self.score > other.score:\n\t\t\treturn 1\n\t\telif self.score == other.score:\n\t\t\treturn 0\n\t\telif self.score < other.score:\n\t\t\treturn -1", "title": "" }, { "docid": "416056c1f740daf0a29ae0c203e24e6f", "score": "0.592565", "text": "def compare_ranks(self, symbol, seq1, seq2):\n\n return seq1.get_rank(symbol) - seq2.get_rank(symbol)", "title": "" }, { "docid": "e91809c33990aab971a9991f0320503d", "score": "0.5912119", "text": "def compare(self, card, context):\r\n return False", "title": "" }, { "docid": "d9080c6c989c789a7d52ebc7d1581a94", "score": "0.58848065", "text": "def __lt__(self, other):\r\n return self.determine_hand_rank() < other.determine_hand_rank()", "title": "" }, { "docid": "061da16cde776b001984073f432c2def", "score": "0.58550495", "text": "def rankCard(hand):\n \n for r, s in hand:\n ranks = ['--23456789TJQKA'.index(r) for r, s in hand]\n ranks.sort(reverse=True)\n return [5, 4, 3, 2, 1] if (ranks == [14, 5, 4, 3, 2]) else ranks", "title": "" }, { "docid": "b75de966b7428f92dc56023376ee1020", "score": "0.58456606", "text": "def testRanks(self): # unit test for ranks 1-13\n \n for i in range(2,15):\n myCard = Card(i,'c') # create i of clubs\n self.assertEqual(myCard.rank(),i) # verifies that the card's rank is i", "title": "" }, { "docid": "7fc3cc552e15ce304ab3d2653055ed5f", "score": "0.58326954", "text": "def deck_of_card():\r\n\r\n suit = [\"Clubs\", \"Diamonds\", \"Hearts\", \"Spades\"]\r\n rank = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"Jack\", \"Queen\", \"King\", \"Ace\"]\r\n\r\n cartesian_product = product(suit, rank) # Using cartesian product to create deck\r\n list_cards = list(cartesian_product)\r\n random.shuffle(list_cards) # Shuffling the cards\r\n distribute_card(list_cards) # Distributing the cards\r\n return", "title": "" }, { "docid": "fe949862b027b231c1680074986e3e82", "score": "0.58068496", "text": "def find_winner(sc, player1, player2, check):\r\n\r\n round_winner = None\r\n\r\n if player_1.selected_card and player_2.selected_card:\r\n pygame.time.delay(1000)\r\n\r\n # determines who chosed more valuable card\r\n if player_1.selected_card.number.value > player_2.selected_card.number.value:\r\n round_winner = player_1\r\n # adds the value of the prize card to the score of the player\r\n round_winner.score += Prize_Deck[0].number.value\r\n check = True # this changes the prize card\r\n\r\n elif player_1.selected_card.number.value < player_2.selected_card.number.value:\r\n round_winner = player_2\r\n # adds the value of the prize card to the score of the player\r\n round_winner.score += Prize_Deck[0].number.value\r\n check = True # this changes the prize card\r\n\r\n elif player_1.selected_card.number.value == player_2.selected_card.number.value:\r\n check = True\r\n Prize_Deck.pop(0) # this remove the prize card which just played\r\n player1.selected_card, player2.selected_card = None, None # restarts the card selected by players\r\n\r\n return round_winner, check # returns the player who just won the round\r", "title": "" }, { "docid": "7dd8be241e3ad697fe42f5de22f2c91c", "score": "0.5787823", "text": "def test_hand_sort_cards_order(self):\n low = self.hand.get_card(0)\n for i in range(self.hand.num_cards()):\n assert self.hand.get_card(i) >= low", "title": "" }, { "docid": "38d95e6ed7771c5e641b9d4750ec93e4", "score": "0.57550806", "text": "def test_community_chest_scramble(self):\n community_chest1 = landings.Chance(scramble=True)\n community_chest2 = landings.Chance(scramble=True)\n\n assert community_chest1.cards != community_chest2.cards", "title": "" }, { "docid": "1a689308f532b87b87a8be242695bcf2", "score": "0.5741821", "text": "def do_turn(deck, history, discard, player1, hand1, player2, hand2, current_player):\n # We choose to make the deck running out be a tie\n if len(deck) == 0:\n return 0\n\n current_player_hand = hand1 if current_player == player1 else hand2\n other_player_hand = hand2 if current_player == player1 else hand1\n _, _, player_ending = player_turn(deck, history, discard, current_player, current_player_hand)\n\n if player_ending:\n score = score_hand(current_player_hand, other_player_hand)\n score_sign = 1 if current_player_hand == hand1 else -1\n return score_sign * score", "title": "" }, { "docid": "f692b37466f3a9d275e26a6c8cc09d3b", "score": "0.57390815", "text": "def score(self) -> int:\n worth_ten = ['10', 'JACK', 'QUEEN', 'KING']\n hand_score = 0\n for i in self.cards:\n if i.value == \"ACE\":\n continue\n elif i.value in worth_ten:\n hand_score += 10\n i.blackjack_value = 10\n else:\n hand_score += int(i.value)\n i.blackjack_value = int(i.value)\n for i in self.cards:\n if i.value == \"ACE\":\n if hand_score + 11 > 21:\n hand_score += 1\n i.blackjack_value = 1\n else:\n hand_score += 11\n i.blackjack_value = 11\n self.soft_hand = True\n return hand_score", "title": "" }, { "docid": "bc898000536967df86c24be0c9a3eb8e", "score": "0.56918937", "text": "def check_deck_equality(deck1, deck2):\n\n for i in xrange(len(deck1.deck)):\n self.assertEqual(deck1.deck[i].rank, deck2.deck[i].rank)\n self.assertEqual(deck1.deck[i].suit, deck2.deck[i].suit)", "title": "" }, { "docid": "14f2c1301e80d4f4ab1b076105b155c8", "score": "0.5676948", "text": "def sort_hand(self):\n self.cards.sort(key=lambda k: [k.get_suit().value, k.get_value()])", "title": "" }, { "docid": "8d32e7f641cfd303aa17437949d4eebe", "score": "0.56697977", "text": "def __init__(self, suit=0, rank=2):\n self.suit = suit\n self.rank = rank", "title": "" }, { "docid": "9c514d167cd28cb38ac3bc29a0c47477", "score": "0.56526977", "text": "def ai(self, trump, played_cards):\n results = hand()\n try:\n print \"\"\"Try to follow suit\"\"\"\n results.cards = self.search(suit = played_cards.cards[0].suit)\n print results\n if ((((ranks.index(trump) + ranks.index(played_cards.cards[0].suit)) % 2) == 0) and (trump != played_cards.cards[0].suit)):\n for card in results.cards:\n if(card.rank == \"J\"):\n results.remove(card(played_cards.cards[0].suit, \"J\"))\n results.bubble_sort()\n if (len(results.cards) != 1):\n index = random.randrange(0, len(results.cards) - 1)\n index = self.cards.index(results.cards[index]) \n else:\n index = 0\n except IndexError:\n print \"\"\"This is the first player of trick aka the Leader\"\"\" \n try:\n print \"\"\"What should I lead?\"\"\"\n if (self.cards[0].value(trump) == 28):\n index = 0\n else:\n print \"\"\"Don't have the right\"\"\"\n for card in self.cards:\n if (card.value(trump) < 20):\n print \"\"\"Highest non-trump\"\"\"\n index = self.cards.index(card)\n break\n else:\n print \"\"\"All trump???\"\"\"\n index = 0\n# index = random.randrange(0, len(self.cards) - 1)\n except ValueError:\n print \"\"\"This is the last trick\"\"\"\n index = 0\n except ValueError:\n print \"\"\"Can't follow suit (not leader)\"\"\"\n try:\n results.cards = self.search(suit = trump)\n print results\n results.bubble_sort()\n if (len(results.cards > 1)):\n index = random.randrange(0, len(results.cards) - 1)\n else:\n index = 0\n except ValueError:\n print \"\"\"No trump... through smallest card\"\"\"\n cards = self.search(rank = self.cards[len(self.cards) - 1].rank)\n try:\n print \"\"\"More than one?\"\"\"\n index = random.randrange(0, len(cards) - 1)\n except ValueError:\n print \"\"\"The first (and only) one\"\"\"\n index = 0\n finally:\n print \"\"\"Reassign index from \"cards\" to self.cards\"\"\"\n index = self.cards.index(cards[index]) \n except:\n print \"\"\"This is the last trick\"\"\"\n index = 0\n else:\n pass\n finally:\n pass\n print \"(\" , self.index, \": \" , self.cards[index] , \")\", self\n \n return self.cards[index]", "title": "" }, { "docid": "2a136fe19765c6da18d80b9bc7b01472", "score": "0.56492484", "text": "def compare(self, other):\n\n distance = self.hash - other.hash\n score = 1 - distance / 64.0\n return score", "title": "" }, { "docid": "1bca913e31aba3bda56606e736966eef", "score": "0.5639462", "text": "def less(self, other, board, returnBestHand = False):\n # Find the best 5-card hands that can be made by each Hand and the board\n bestHand1 = HandHelper.findBestHand(self.cards + board)\n bestHand2 = HandHelper.findBestHand(other.cards + board)\n\n # If the types of the best hands are different, return whether bestHand1 is the less valuable type\n if bestHand1[1] != bestHand2[1]:\n # Return best hand tuple if flag is set\n if returnBestHand:\n if HandHelper.types.index(bestHand1[1]) < HandHelper.types.index(bestHand2[1]):\n return bestHand1\n \n else:\n return False\n \n else:\n return HandHelper.types.index(bestHand1[1]) < HandHelper.types.index(bestHand2[1])\n \n # If the types are the same, compare each of the cards in the best hands\n for i in range(5):\n # If any cards have different values, return whether bestHand1 has the lower value card\n if bestHand1[0][i].value != bestHand2[0][i].value:\n # Return best hand tuple if flag is set\n if returnBestHand:\n if bestHand1[0][i] > bestHand2[0][i]:\n return bestHand1\n \n else:\n return False\n \n else:\n return bestHand1[0][i] < bestHand2[0][i]\n\n return False", "title": "" }, { "docid": "1c3f02dd778cdf82e408947678bfe5a3", "score": "0.5636835", "text": "def test():\n \n c1 = Card(3,3)\n c2 = Card(13,1)\n c3 = Card(8,2)\n c4 = Card(8,2)\n\n print('c1 (should be 3D) = ', c1)\n print('c2 (should be KS) = ', c2)\n print('c3 (should be 8H) = ', c3)\n\n errors = 0;\n\n if c1.rank != 3:\n print('ERROR: c1.rank gave ', c1.rank,\n ' but should be 3')\n errors = errors + 1\n if c2.rank != 13:\n print('ERROR: c2.rank gave ', c2.rank,\n ' but should be 13')\n errors = errors + 1\n if c3.suit != 2:\n print('ERROR: c3.suit gave ', c3.suit,\n ' but should be 2')\n errors = errors + 1\n if not c3.is_equal(c4):\n print('ERROR: c3.is_equal(c4) gave ', c3.is_equal(c4),\n ' but should be True')\n errors = errors + 1\n if c3.is_equal(c2):\n print('ERROR: c3.is_equal(c2) gave ', c3.is_equal(c4),\n ' but should be False')\n errors = errors + 1\n print('Tests complete, with ', errors, ' errors')", "title": "" }, { "docid": "b81ae16da08ae3f886b6e59d4ff8c708", "score": "0.56362313", "text": "def rankHand(hand):\r\n\r\n ranks = sorted([card[RANK] for card in hand]) # list of each rank value, ascending order\r\n rankSet = set([card[RANK] for card in hand]) # different ranks appearing in the hand\r\n rankCount_d = {rank: ranks.count(rank) for rank in rankSet} # { rank: nCardsOfThatRank }\r\n\r\n # list of counts for the diff. ranks (e.g., 4-of-a-kind would be [4, 1] or [1, 4]):\r\n rankCounts = list(rankCount_d.values())\r\n\r\n isFlush = all([card[SUIT] == hand[0][SUIT] for card in hand])\r\n\r\n # special case for the A2345 straight\r\n lowStraight = all([r in ranks for r in [14, 2, 3, 4, 5]])\r\n\r\n # top card is 4 more than bottom card in a sequence\r\n maxRank = max(ranks) # rank of the highest card in the hand\r\n straightRanks = list(range(maxRank - 4, maxRank + 1))\r\n isStraight = lowStraight or all([r in ranks for r in straightRanks])\r\n\r\n # return the rank value\r\n if isStraight and isFlush:\r\n rank = RANK_STRAIGHT_FLUSH\r\n elif isFlush:\r\n rank = RANK_FLUSH\r\n elif isStraight:\r\n rank = RANK_STRAIGHT\r\n elif len(rankCounts) == 2:\r\n # 5-0 not possible: we either have:\r\n # 4 of one rank and 1 of another\r\n # OR: 3 of one rank and 2 of another\r\n rank = RANK_FOUR_OF_A_KIND if 4 in rankCounts else RANK_FULL_HOUSE\r\n elif 3 in rankCounts:\r\n rank = RANK_THREE_OF_A_KIND\r\n elif rankCounts.count(2) == 2:\r\n rank = RANK_TWO_PAIR\r\n elif rankCounts.count(2) == 1:\r\n rank = RANK_ONE_PAIR\r\n elif rankCounts.count(1) == 5:\r\n # 5 different ranks that's not straight or flush\r\n rank = RANK_HIGH_CARD\r\n else:\r\n raise RuntimeError(\"LOGIC PROBLEM\")\r\n\r\n return rank", "title": "" }, { "docid": "def29b0318c7aee41f51d0606690c646", "score": "0.5635297", "text": "def greater(self, other, board, returnBestHand = False):\n # Find the best 5-card hands that can be made by each Hand and the board\n bestHand1 = HandHelper.findBestHand(self.cards + board)\n bestHand2 = HandHelper.findBestHand(other.cards + board)\n\n # If the types of the best hands are different, return whether bestHand1 is the more valuable type\n if bestHand1[1] != bestHand2[1]:\n # Return best hand tuple if flag is set\n if returnBestHand:\n if HandHelper.types.index(bestHand1[1]) > HandHelper.types.index(bestHand2[1]):\n return bestHand1\n \n else:\n return False\n \n else:\n return HandHelper.types.index(bestHand1[1]) > HandHelper.types.index(bestHand2[1])\n \n # If the types are the same, compare each of the cards in the best hands\n for i in range(5):\n # If any cards have different values, return whether bestHand1 has the higher value card\n if bestHand1[0][i].value != bestHand2[0][i].value:\n # Return best hand tuple if flag is set\n if returnBestHand:\n if bestHand1[0][i] > bestHand2[0][i]:\n return bestHand1\n \n else:\n return False\n \n else:\n return bestHand1[0][i] > bestHand2[0][i]\n\n return False", "title": "" }, { "docid": "2210602e42e372ae6663b0840093eba8", "score": "0.56273013", "text": "def test_get_ranking(self):\n self.assertEqual(self.card.get_ranking(), 11)", "title": "" }, { "docid": "bb727655c96a1df3e37a1725eb2aa20c", "score": "0.5605446", "text": "def cmp_high_card(a, b):\n assert len(a) == len(b)\n _a = sorted(a, reverse=True)\n _b = sorted(b, reverse=True)\n for i in range(len(a)):\n if cmp(_a[i], _b[i]) == 0:\n continue\n else:\n return cmp(_a[i], _b[i])\n return 0", "title": "" }, { "docid": "78ebabf03c729e11fb4a7a5a28af33c3", "score": "0.56031656", "text": "def test_hand_sort_cards_simple(self):\n assert self.hand.get_card(0).value == 1", "title": "" }, { "docid": "e65c1731f57a5717ca783925d187c5ab", "score": "0.55382574", "text": "def __cmp__(self, other):\n assert isinstance(other, Caption)\n if self.score == other.score:\n return 0\n elif self.score < other.score:\n return -1\n else:\n return 1", "title": "" }, { "docid": "7719b2e15afbcd768a946f5a2afe8932", "score": "0.5532947", "text": "def compare(self, other):\n score = 0;\n for i in range(len(self.jobs)):\n score += self.jobs[i].compare(other.jobs[i])\n return PerfUtil.normalize(score)", "title": "" }, { "docid": "4065549834c61c23587238af7dde90b1", "score": "0.55249417", "text": "def __eq__(self, other: 'Card') -> bool:\n return self.int_representation == other.int_representation", "title": "" }, { "docid": "fc37575b67412ab335e22496196aeaf9", "score": "0.5523313", "text": "def populatebyrank(self,number_jokers=0,joker_rank=0,ranks = RANKS,suits = SUITS,rank_values = {\"A\":1,\"2\":2,\"3\":3,\"4\":4,\"5\":5,\"6\":6,\"7\":7,\"8\":8,\"9\":9,\"10\":10,\"J\":10,\"Q\":10,\"K\":10}):\n\t\tfor rank in ranks:\n\t\t\tfor suit in suits:\n\t\t\t\tself.add(Card(rank, suit))\n\t\t\t\tDeck.VAL_MATRIX[rank+suit]=rank_values[rank]\n\t\tfor i in range(number_jokers):\n\t\t\tself.add(Card(\"Joker\",\"\"))\n\t\t\tDeck.VAL_MATRIX[\"Joker\"] = str(joker_rank)\n\t\tCard.VAL_MATRIX = Deck.VAL_MATRIX", "title": "" }, { "docid": "d46e9adfe28927107fb495fee98b70bf", "score": "0.54896235", "text": "def rank_cards(self, cards):\n\n ret_dict = {}\n for card in cards:\n rank = self.find_rank(cards, card)\n ret_dict[str(round(rank, 4))] = card\n\n return ret_dict", "title": "" }, { "docid": "9d0850cbb999a63391dcf96be86ef858", "score": "0.54856294", "text": "def compare(self, other):\n self._normalize_keys()\n other._normalize_keys()\n \n other_keys = other.sorted_keys\n \n other_len = len(other_keys)\n self_len = len(self.sorted_keys)\n \n # Sanity check\n if other_len == 0 or self_len == 0:\n print \"ERROR!\"\n return 0\n \n co_occur = 0.0\n \n for other_key in other.sorted_keys:\n if self.sorted_dict.has_key(other_key):\n self_score = int(self.sorted_dict[other_key])\n other_score = int(other.sorted_dict[other_key])\n# print \"Match: \" + other_key + \" \" + str(self_score) \n co_occur += (1 + ( self_score * 0.2))\n \n \n co_occur_normalized = round((co_occur * self.MAX_ENTRIES) / other_len)\n \n return co_occur_normalized", "title": "" }, { "docid": "33cc275400775b183d04a878e7ff97c4", "score": "0.5472682", "text": "def __lt__(self, other: 'Card') -> bool:\n return int(self) < int(other)", "title": "" }, { "docid": "6d19cb1491ffe752b4330313a6b166e1", "score": "0.54718596", "text": "def __eq__(self, other):\n return self.score == other.score", "title": "" }, { "docid": "2ab1ba2b06809782f8929873df795b2c", "score": "0.54635984", "text": "def run_round(self):\n \n def compare_cards(card1, card2):\n \"\"\"\n What do we want to get out of compare_cards?\n 1. Player 1 wins (1>2): player 1 is given all the played cards\n 2. Player 2 wins (2>1): player 2 is given all the played cards\n 3. WAR!! (1=2): We do a war\n\n \"\"\"\n\n card1_rank = self.ranks.index(card1[0].rank)\n card2_rank = self.ranks.index(card2[0].rank)\n\n player1_decksize = len(self.player1.stack.deck)\n player2_decksize = len(self.player2.stack.deck)\n\n if card1_rank > card2_rank:\n print card1[0].rank, \" > \", card2[0].rank, \" Player 1 wins.\", \n print \"Player 1 deck: \", player1_decksize, \" cards. Player 2 deck: \", player2_decksize, \"cards.\"\n return self.player1\n elif card1_rank < card2_rank:\n print card1[0].rank, \" < \", card2[0].rank, \" Player 2 wins.\"\n print \"Player 1 deck: \", player1_decksize, \" cards. Player 2 deck: \", player2_decksize, \"cards.\"\n return self.player2\n # WAR!!\n else:\n print \"A war is on!\"\n return None\n\n def draw_cards(num_to_draw, player):\n \"\"\"\n \"\"\"\n card = player.stack.deal_n_cards(num_to_draw)\n if len(card) == 0:\n self.losers_list.append(player)\n\n return card\n\n player1_card = draw_cards(1, self.player1)\n player2_card = draw_cards(1, self.player2)\n # Check if the game is done after all draws are complete.\n if len(self.losers_list) > 0:\n self.finish_game()\n winner = None\n\n while not winner:\n \n winner = compare_cards(player1_card, player2_card)\n \n # Give all played cards to the table.\n self.table.played_cards.accept_n_cards(player1_card)\n self.table.played_cards.accept_n_cards(player2_card)\n\n if not winner:\n # initialize a war\n self.table.played_cards.accept_n_cards(draw_cards(3, self.player1))\n self.table.played_cards.accept_n_cards(draw_cards(3, self.player2))\n player1_card = draw_cards(1, self.player1)\n player2_card = draw_cards(1, self.player2)\n # Check if the game is done after all draws are complete.\n if len(self.losers_list) > 0:\n self.finish_game()\n \n # Give all the cards in the played_cards to the winner\n winner.stack.accept_n_cards(self.table.played_cards.deal_n_cards(len(self.table.played_cards.deck)))", "title": "" }, { "docid": "21a196d52cecf3d92cab3156ec3bdb12", "score": "0.5454724", "text": "def has_same_color(self, other):\n suit = self.get_suit()\n otherSuit = other.get_suit()\n if suit == 'D' or suit == 'H':\n if otherSuit == 'D' or otherSuit == 'H':\n return True\n elif suit == 'C' or suit == 'S':\n if otherSuit == 'C' or otherSuit == 'S':\n return True\n return False", "title": "" }, { "docid": "8fb8a82ebc7970a7ab2e2133fce9b7b2", "score": "0.5452544", "text": "def test_two_pairs(self):\n hand = [Card(12, 0), Card(2, 0), Card(2, 0), Card(8, 0)]\n cut = Card(12, 3)\n game = Game(hand, cut)\n game.score()\n assert game.pair_score == 4", "title": "" }, { "docid": "3180ea7c3387c6f14960eaf7e6e2cafc", "score": "0.54482335", "text": "def test_deckcards(self):\n expected_list = [\n 'HA', 'H2', 'H3', 'H4', 'H5', 'H6', 'H7', 'H8', 'H9', 'HT', 'HJ', 'HQ', 'HK',\n 'SA', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S8', 'S9', 'ST', 'SJ', 'SQ', 'SK',\n 'DA', 'D2', 'D3', 'D4', 'D5', 'D6', 'D7', 'D8', 'D9', 'DT', 'DJ', 'DQ', 'DK',\n 'CA', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'CT', 'CJ', 'CQ', 'CK',\n ]\n testing_class = blackjack_class.Deck()\n self.assertEqual(testing_class.deck_cards().sort(), expected_list.sort())", "title": "" }, { "docid": "86458b734a3266ccc84e48097bb52c90", "score": "0.54413754", "text": "def test_rank():\n deck = CardClass.Deck()\n card = deck.pop_card()\n assert val_rank(card.rank) == True", "title": "" }, { "docid": "779b6fb42153b191793371ed375d5f6f", "score": "0.54396695", "text": "def __initialize_distribute_cards(self):\n\n\t\t# Build main_deck\n\t\tmain_deck = []\n\t\tfor suit in [\"Diamonds\",\"Hearts\",\"Spades\",\"Clubs\"]:\n\t\t for value in range(1,NUM_CARDS_WITH_PLAYER+1):\n\t\t main_deck.append(Card(suit,value))\n\n\t\trandom.shuffle(main_deck)\n\n\t\t# choose the card joker and remove it from deck\n\t\tcard_joker = main_deck[0]\n\t\tcard_joker.rect.x = BORDER_GAP\n\t\tcard_joker.rect.y = MID_CARD_POS\n\t\tmain_deck.remove(main_deck[0])\n\n\t\t# add the extra joker card to the deck and shuffle\n\t\tmain_deck.append(Card(\"Joker\",0))\n\t\trandom.shuffle(main_deck)\n\n\n\t\tplayer_deck=[]\n\t\tfor i in range(NUM_CARDS_WITH_PLAYER):\n\t\t\tplayer_deck.append(main_deck[i])\n\t\t\tmain_deck.remove(main_deck[i])\n\n\t\t# Define co-ordinates for player_deck\n\t\tfor i in range(NUM_CARDS_WITH_PLAYER):\n\t\t\tplayer_deck[i].rect.x = BORDER_GAP + i * CARD_GAP\n\t\t\tplayer_deck[i].rect.y = DISPLAY_HEIGHT - CARD_HEIGHT - BORDER_GAP\n\n\t\tcomputer_deck=[]\n\t\tfor i in range(NUM_CARDS_WITH_PLAYER):\n\t\t\tcomputer_deck.append(main_deck[i])\n\t\t\tmain_deck.remove(main_deck[i])\n\n\n\t\tdiscard_pile=[]\n\t\tdiscard_pile.append(main_deck[0])\n\t\tmain_deck.remove(main_deck[0])\n\n\t\treturn player_deck, computer_deck, card_joker, main_deck, discard_pile", "title": "" }, { "docid": "56d9d16348c1604544beb2d5b4333029", "score": "0.5434774", "text": "def test_PlayCard_returns_lead_suit_card(self):\n trump = \"D\"\n lead_card = Card.Card(\"C\", random.choice(card_ranks))\n\n # Player has one card that is the same card as the trump card. Trump card is last card.\n players_cards = tuple((Card.Card(card[0], card[1]) for card in ['HA', 'D2', 'HJ', 'S9', 'H5', 'H2', 'D8', 'DA', 'SQ', 'H3', 'S7', 'S6', 'C9']))\n player = Player()\n return_card = player.PlayCard(players_cards, (lead_card, ), trump, \"S\", [], (0, 0))\n self.assertIsInstance(return_card, Card.Card, \"PlayCard must return a card\")\n #self.assertEqual(return_card, players_cards[-1], \"The last card should be the one played. It is the only trump card\")", "title": "" }, { "docid": "80312dfcca92b0e6a3791d826b37ad4f", "score": "0.543446", "text": "def determine_hand_rank(self):\r\n return sum([s.rank for s in self._hand])", "title": "" }, { "docid": "89fd0e846feab19dc3c62bccdecbc299", "score": "0.5433561", "text": "def calculate_score(cards):\n\n # Checking up if the user or computer have a blackjack\n # black_jack represents 0\n if sum(cards) == 21 and len(cards) == 2:\n return 0\n\n if 11 in cards and sum(cards) > 21:\n cards.remove(11)\n cards.append(1)\n\n return sum(cards)", "title": "" }, { "docid": "9b3c48f8b4319f291334bc5780b1e476", "score": "0.5415433", "text": "def test_get_ranking(self):\n card = Card.objects.create(suit=Card.CLUB, rank=\"jack\")\n self.assertEqual(card.get_ranking(), 11)", "title": "" }, { "docid": "6b07cbfa22daf8378b29dc3c9bade4f4", "score": "0.54130304", "text": "def test_jack_wrong_suit(self):\n hand = [Card(1, 0), Card(2, 0), Card(7, 0), Card(10, 0)]\n cut = Card(12, 3)\n game = Game(hand, cut)\n game.score()\n assert game.jack_score == 0", "title": "" }, { "docid": "b5f2c1fed51b29944d936360530ddb06", "score": "0.5402966", "text": "def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit", "title": "" }, { "docid": "b5f2c1fed51b29944d936360530ddb06", "score": "0.5402966", "text": "def __init__(self, rank, suit):\n self.rank = rank\n self.suit = suit", "title": "" }, { "docid": "e9b7b86e78f96e484612e339deb2f586", "score": "0.5399992", "text": "def convert_card_to_int(suit,rank):\n\n if(suit == \"Spades\"):\n suitInt = 0\n elif(suit == \"Hearts\"):\n suitInt = 1\n elif(suit == \"Diamonds\"):\n suitInt = 2\n elif(suit == \"Clubs\"):\n suitInt = 3\n else:\n suitInt = -1\n\n if(rank == \"Ace\"):\n rankInt = 1\n elif(rank == \"Two\"):\n rankInt = 2\n elif(rank == \"Three\"):\n rankInt = 3\n elif(rank == \"Four\"):\n rankInt = 4\n elif(rank == \"Five\"):\n rankInt = 5\n elif(rank == \"Six\"):\n rankInt = 6\n elif(rank == \"Seven\"):\n rankInt = 7\n elif(rank == \"Eight\"):\n rankInt = 8\n elif(rank == \"Nine\"):\n rankInt = 9\n elif(rank == \"Ten\"):\n rankInt = 10\n elif(rank == \"Jack\"):\n rankInt = 11\n elif(rank == \"Queen\"):\n rankInt = 12\n elif(rank == \"King\"):\n rankInt = 13\n else:\n rankInt = -1\n\n return(suitInt,rankInt)", "title": "" }, { "docid": "30b4ba5ac6c7b79b71a198c2056623fe", "score": "0.53959817", "text": "def sort(self):\r\n self._hand.sort(key=lambda x: x.rank)", "title": "" }, { "docid": "aa9a2b9909072f49ab416813aeba0c4c", "score": "0.53855914", "text": "def condorcet(ranked):\r\n pairs = [(candidateA, candidateB) for candidateA in ranked[0] for candidateB in ranked[0] if candidateA < candidateB]\r\n victories = {}\r\n score = {candidate: 0 for candidate in ranked[0]}\r\n for pair in pairs:\r\n subset_ranked = {elector: [candidate for candidate in ranked[elector] if candidate in pair] for elector in ranked}\r\n victories[pair] = N_rounds(subset_ranked, 1)\r\n score[victories[pair]] += 1\r\n score = {candidate: score[candidate] for candidate in score if score[candidate] == max([value for key, value in score.items()])}\r\n if len(score) == 1:\r\n return [candidate for candidate in score.keys()][0]\r\n subset_ranked = {elector: [candidate for candidate in ranked[elector] if candidate in [candidate for candidate in score.keys()]] for elector in ranked}\r\n if sorted(set(subset_ranked[0])) != sorted(set(ranked[0])):\r\n return (condorcet(subset_ranked))", "title": "" }, { "docid": "ff76ebd8b3b863d5d4e2f754b33655ad", "score": "0.5383705", "text": "def __init__(self, rank, suit):\n\t\tself.rank = rank\n\t\tself.suit = suit", "title": "" }, { "docid": "50a192e2867d73d0d555ffd61bf6f275", "score": "0.53738815", "text": "def play(n):\n assert type(n)==int,\"The number of cards must be an int\"\n assert 0<n<=16, \"The number of cards must be inclued between 1 and 16.\"\n player1,player2 = init(n)\n card_on_table=[]\n while len(player1)!=0 and len(player2)!=0 :\n card1=player1[0]\n card2=player2[0]\n print (\"Card player 1 :\", end=\" \")\n card.print(card1)\n print (\"Card player 2 :\", end=\" \")\n card.print(card2)\n comp=card.compare_value(card1,card2)\n if comp==1:\n player1+=card1,card2\n if len(card_on_table)!=0:\n player1+=card_on_table\n card_on_table=[]\n elif comp==-1 :\n player2+=card1,card2\n if len(card_on_table)!=0:\n player2+=card_on_table\n card_on_table=[]\n else:\n card_on_table+=card1,card2\n player1=player1[1:]\n player2=player2[1:]\n if len(player1)!=0:\n print (\"The winner is the player 1 !!!\")\n else :\n print (\"The winner is the player 2 !!!\")", "title": "" }, { "docid": "765ee4f98ac8359500ba08703a071019", "score": "0.53708273", "text": "def ltc(self) -> int:\r\n ltc = 0\r\n\r\n for suit, ranks in self.sorted_cards_dict.items():\r\n if len(ranks) == 1:\r\n if ranks != \"A\":\r\n ltc += 1\r\n\r\n if len(ranks) == 2:\r\n for rank in ranks:\r\n if rank not in \"AK\":\r\n ltc += 1\r\n\r\n if len(ranks) >= 3:\r\n for rank in ranks[:3]:\r\n if rank not in \"AKQ\":\r\n ltc += 1\r\n\r\n return ltc", "title": "" }, { "docid": "00586630d3307d428750f81c5f7437e0", "score": "0.5368183", "text": "def __init__(self, cards: Sequence[Card]):\n if not isinstance(cards, Sequence):\n raise TypeError\n elif not all([isinstance(card, Card) for card in cards]):\n raise ValueError\n elif len(cards) != 13:\n raise ValueError\n else:\n self.cards = cards\n \n self.cards_sorted = sorted(self.cards, key = lambda card: (card.suit.value, card.rank.value))\n \n self.dict_rep = {}\n \n for card in self.cards_sorted:\n self.dict_rep[card.suit.name] = \"\".join([i.rank.name for i in self.cards_sorted if i.suit.name == card.suit.name])", "title": "" }, { "docid": "694b8b248f95d4225fbf23fbb767f2f7", "score": "0.53616524", "text": "def comparePlayers(self, players):\n\t\thands = []\n\t\tfor player in players:\n\t\t\tfor hand in player.hands:\n\t\t\t\thands.append((hand, player))\n\t\t### Rule 1 - Blackjack\n\t\twinners = []\n\t\tfor hand in hands:\n\t\t\tif hand[0].isBlackjack():\n\t\t\t\twinners.append(hand)\n\t\tif winners != []:\n\t\t\treturn winners\n\n\t\t### Rule 2 - 21\n\t\tfor hand in hands:\n\t\t\tif hand[0].score() == 21:\n\t\t\t\twinners.append(hand)\n\t\tif winners != []:\n\t\t\treturn winners\n\n\t\t### Rule 3 - 5 Card\n\t\tfor hand in hands:\n\t\t\tif len(hand[0]) >= 5:\n\t\t\t\twinners.append(hand)\n\t\tif winners != []:\n\t\t\treturn winners\n\n\t\t### Rule 4 - Score\n\t\tmax_score = max(hands, key = lambda x: x[0].score())[0].score()\n\t\tfor hand in hands:\n\t\t\tif hand[0].score() == max_score:\n\t\t\t\twinners.append(hand)\n\t\t# Winners is definitely non zero\n\t\treturn winners", "title": "" }, { "docid": "269aa6dafc7b62f0fb2b3b515f364197", "score": "0.53590584", "text": "def __init__(self, rank, suit):\n\n self.rank = rank\n self.suit = suit", "title": "" }, { "docid": "23c9ecf25f570f7c7aa0582f69616732", "score": "0.53527933", "text": "def evaluate_answer(self, team, expected_ranking, guesser_ranking, top_n):\n \n self.logger.debug('evaluate_answer() running...')\n \n # Binary vector, 1 for expected cards.\n labels = [0 for _ in range(len(self.field))]\n for (card, _) in expected_ranking[:top_n]:\n labels[card.index] = 1\n \n # Binary vector, 1 for guesser cards.\n predictions = [0 for _ in range(len(self.field))]\n for (card, _) in guesser_ranking[:top_n]:\n predictions[card.index] = 1\n \n # Float vector with card probabilities from guesser.\n probabilities = [0.0 for _ in range(len(self.field))]\n for (card, score) in guesser_ranking:\n probabilities[card.index] = score\n \n # Classification measures.\n f1 = f1_score(labels, predictions)\n roc_auc = roc_auc_score(labels, probabilities) \n crossentropy = compute_crossentropy(labels, probabilities)\n \n # Ranking measures are computed at k equal to field size! \n dcg = compute_dcg(labels, probabilities, len(labels))\n ndcg = compute_ndcg(labels, probabilities, len(labels))\n\n self._update_metrics(team=team, f1=f1, roc_auc=roc_auc, \n crossentropy=crossentropy, \n dcg=dcg, ndcg=ndcg)", "title": "" }, { "docid": "4b098f5f90d729e765ff5f2b6957e5b9", "score": "0.53503764", "text": "def suit(self):\n return self._suit", "title": "" }, { "docid": "d145f815d5eb44ea3a59f75c94420859", "score": "0.53439236", "text": "def __cmp__(self, other):\n return self._number.__cmp__(other._number)", "title": "" }, { "docid": "6fe72ee5b14ec116396a392f620c7252", "score": "0.5338236", "text": "def playable_card(card, fireworks):\n return card['rank'] == fireworks[card['color']]", "title": "" }, { "docid": "83da7f791ee198e990e7c41537c65782", "score": "0.5331074", "text": "def set_rank(self, name_to_rank: Dict) -> None:\n for card in self.cards:\n card.rank = name_to_rank[card.name]", "title": "" }, { "docid": "591c3bf23ec5281563b2882e8cbb0809", "score": "0.5319529", "text": "def __cmp__(self, other):\n\t\tassert isinstance(other, OutputSentence)\n\t\tif self.score == other.score:\n\t\t\treturn 0\n\t\telif self.score < other.score:\n\t\t\treturn -1\n\t\telse:\n\t\t\treturn 1", "title": "" }, { "docid": "e2c8e7d0d74c17faab7aca5359f975d5", "score": "0.53189343", "text": "def rankHand(hand):\n ranks = rankCard(hand)\n if straight(ranks) and flush(hand):\n return 8\n elif kind(4, ranks):\n return 7\n elif kind(3, ranks) and kind(2, ranks):\n return 6\n elif flush(hand):\n return 5\n elif straight(ranks):\n return 4\n elif kind(3, ranks):\n return 3\n elif twoPair(ranks):\n return 2\n elif kind(2, ranks):\n return 1\n else:\n return 0", "title": "" }, { "docid": "2bf1cc93135076ad068f092a5f57e938", "score": "0.5317963", "text": "def test_board_get_cards_by_owner(self, game_2_players):\n game = game_2_players\n\n # Get card examples\n chance_jail_card = [\n c\n for c in game.board.chance.cards\n if c.id == game.board.chance.GET_OUT_OF_JAIL_FREE\n ][0]\n community_chest_jail_card = [\n c\n for c in game.board.community_chest.cards\n if c.id == game.board.community_chest.GET_OUT_OF_JAIL_FREE\n ][0]\n\n # Give each card to a player\n chance_jail_card.owner = game.current_player\n community_chest_jail_card.owner = game.players[1]\n\n # Verify the one card we gave to the current player comes back\n # but the 2nd one we gave to the other player does not\n assert [chance_jail_card] == game.board.get_cards_by_owner(game.current_player)", "title": "" }, { "docid": "7d472b4a7b6dcbf49f36d3fd9835dfe3", "score": "0.53010756", "text": "def check_winners(self, table_cards):\n if self.players[0].check_hand_strength(table_cards) < self.players[1].check_hand_strength(table_cards):\n return 1\n elif self.players[0].check_hand_strength(table_cards) > self.players[1].check_hand_strength(table_cards):\n return 0\n elif self.players[0].check_hand_strength(table_cards) == self.players[1].check_hand_strength(table_cards):\n return 2", "title": "" }, { "docid": "12b937cb82f2323445fa52f9f2d31493", "score": "0.52993053", "text": "def evaluate(self):\n\n #Count the ranks and suits in all_cards\n\n for rank in range(13):\n self.rank_counter[rank] = []\n\n for suit in range(4):\n self.suit_counter[suit] = []\n\n for card in self.all_cards:\n self.rank_counter[card.get_num_val()].append(card)\n self.suit_counter[card.get_num_suit()].append(card)\n\n self.sort_by_rank_then_suit()\n\n hand_result = []\n #Evaluate if hand has Royal Flush\n hand_result = self.evaluate_royal()\n\n #Evaluate if hand has Straight Flush\n if len(hand_result) == 0:\n hand_result = self.evaluate_straight_flush()\n\n #Evaluate if hand has Four of a Kind\n if len(hand_result) == 0:\n hand_result = self.evaluate_four_of_a_kind()\n\n #Evaluate if hand has Full House\n if len(hand_result) == 0:\n hand_result = self.evaluate_full_house()\n\n #Evaluate if hand has Flush\n if len(hand_result) == 0:\n hand_result = self.evaluate_flush()\n\n #Evaluate if hand has Straight\n if len(hand_result) == 0:\n hand_result = self.evaluate_straight()\n\n #Evaluate if hand has Three of a Kind\n if len(hand_result) == 0:\n hand_result = self.evaluate_three_of_a_kind()\n\n #Evaluate if hand has Two Pair\n if len(hand_result) == 0:\n hand_result = self.evaluate_two_pair()\n\n #Evaluate if hand has One Pair\n if len(hand_result) == 0:\n hand_result = self.evaluate_one_pair()\n\n #Evaluate if hand's High Card\n if len(hand_result) == 0:\n hand_result = self.evaluate_high_card()\n\n #Returns result of evaluated hand\n return hand_result", "title": "" }, { "docid": "2976e494cbd3b0dc3b38eb1522f4fb92", "score": "0.52991545", "text": "def __init__(self, suit, rank):\n if suit not in self.suits:\n raise ValueError(\"suit: {} not in suits\".format(suit))\n if rank not in self.ranks:\n raise ValueError(\"rank: {} not in suits\".format(rank))\n self.suit = suit\n self.rank = rank", "title": "" }, { "docid": "d2d00333c110785625c89eee92433e00", "score": "0.5298349", "text": "def __cmp__(self, other):\n if self.is_conventionally_named() and other.is_conventionally_named():\n s_code = self.get_lab_code()\n o_code = other.get_lab_code()\n if s_code < o_code:\n return -1\n elif s_code > o_code:\n return 1\n\n # If same lab prefix, order by number\n else:\n if self.get_number() < other.get_number():\n return -1\n else:\n return 1\n\n # One or both strains not conventionally named\n else:\n s_lower = self.id.lower()\n o_lower = other.id.lower()\n if s_lower < o_lower:\n return -1\n else:\n return 1", "title": "" }, { "docid": "e651b2585c5a95cddf71bcb441fbc17b", "score": "0.52929443", "text": "def blackjack(hand1, hand2, bets):\r\n if hand1.get_hand_value() == 21 and hand2.get_hand_value() < 21:\r\n display_all_cards_and_values(hand1, hand2)\r\n print(\"Player BlackJack!\")\r\n bets.win_bet()\r\n continue_playing(bets)\r\n return True\r\n elif hand2.get_hand_value() == 21 and hand1.get_hand_value() < 21:\r\n display_all_cards_and_values(hand1, hand2)\r\n print(\"Dealer BlackJack!\")\r\n bets.lose_bet()\r\n continue_playing(bets)\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "b1bf0d5731ec5397853bb466cd199917", "score": "0.5290641", "text": "def card_factory(rank, suit):\n if rank ==1: return AceCard(\"A\", suit)\n elif 2 <= rank < 11 : return NumberCard(str(rank), suit)\n elif 11<= rank < 14 : \n name = { 11: 'J', 12: 'Q', 13: 'K' }[rank]\n return FaceCard(name, suit)\n else:\n raise Exception(\"Rank out of range\")", "title": "" } ]
e7ae463295786e541cce1d9d4fbfad94
Instantiate and return the `Template` object based on the given class and parameters. This function is intended for subclasses to override if they need to implement special template instantiation logic. Code that just uses the `TemplateLoader` should use the `load` method instead.
[ { "docid": "c2616ca00d446526281eb29b98fdb4fd", "score": "0.5182011", "text": "def _instantiate(self, cls, fileobj, filepath, filename, encoding=None):\n if encoding is None:\n encoding = self.default_encoding\n return cls(fileobj, filepath=filepath, filename=filename, loader=self,\n encoding=encoding, lookup=self.variable_lookup,\n allow_exec=self.allow_exec)", "title": "" } ]
[ { "docid": "f73f3be43cc0013896e468aadf48fa68", "score": "0.67032987", "text": "def instantiate_template(cls, data, raw_data, origin, provider, parameters,\n field_offset_map):\n # This assertion is a low-cost trick to ensure that we override this\n # method in all of the subclasses to ensure that the initializer is\n # called with correctly-ordered arguments.\n assert cls is Unit, \\\n \"{}.instantiate_template() not customized\".format(cls.__name__)\n return cls(data, raw_data, origin, provider, parameters,\n field_offset_map)", "title": "" }, { "docid": "1c770e8f431e4494c0ac5e0246372471", "score": "0.6627425", "text": "def load_template(self, templatename, template_string=None):\r\n if template_string is not None:\r\n return Template(template_string, **self.tmpl_options)\r\n # Translate TG dot notation to normal / template path\r\n if '/' not in templatename:\r\n templatename = '/' + templatename.replace('.', '/') + '.' +\\\r\n self.extension\r\n\r\n # Lookup template\r\n return self.lookup.get_template(templatename)", "title": "" }, { "docid": "acb86251c2b27d2e4ef88b5161e16b68", "score": "0.63608974", "text": "def load(\n self,\n environment: \"Environment\",\n name: str,\n globals: t.Optional[t.MutableMapping[str, t.Any]] = None,\n ) -> \"Template\":\n code = None\n if globals is None:\n globals = {}\n\n # first we try to get the source for this template together\n # with the filename and the uptodate function.\n source, filename, uptodate = self.get_source(environment, name)\n\n # try to load the code from the bytecode cache if there is a\n # bytecode cache configured.\n bcc = environment.bytecode_cache\n if bcc is not None:\n bucket = bcc.get_bucket(environment, name, filename, source)\n code = bucket.code\n\n # if we don't have code so far (not cached, no longer up to\n # date) etc. we compile the template\n if code is None:\n code = environment.compile(source, name, filename)\n\n # if the bytecode cache is available and the bucket doesn't\n # have a code so far, we give the bucket the new code and put\n # it back to the bytecode cache.\n if bcc is not None and bucket.code is None:\n bucket.code = code\n bcc.set_bucket(bucket)\n\n return environment.template_class.from_code(\n environment, code, globals, uptodate\n )", "title": "" }, { "docid": "053be2b93a48963f49d8e71698a80470", "score": "0.6291981", "text": "def load_template(name):\r\n\r\n full_fname = os.path.join(os.path.dirname(__file__),\r\n 'script_templates', name)\r\n template_file = open(full_fname)\r\n template = Template(template_file.read())\r\n template_file.close()\r\n return template", "title": "" }, { "docid": "835158438d0a33ba117cfe2df21243cf", "score": "0.62592757", "text": "def _createTemplate(self, base_url, config):\n config # pylint\n\n url = self._createParsedUrl(base_url)\n if u\"repname\" in url.query:\n ttpl = self._getNopiTemplate(url)\n else:\n ttpl = self._getPiTemplate(url)\n\n return _base.Template.fromTemplates(*ttpl)", "title": "" }, { "docid": "e8d69864808232d138a6b45a0188f19f", "score": "0.6235693", "text": "def load_template(uid: str) -> Template:\n\n path = get_template_path(uid)\n\n try:\n template = read_template_from_json_file(uid, path)\n except FileNotFoundError:\n raise UnknownTemplateUidError(uid) # pylint: disable=W0707\n\n return template", "title": "" }, { "docid": "ed7b6dedb9f15b715b061b7aa0cb2b37", "score": "0.6193968", "text": "def get_template(self):\r\n return Template(self.content)", "title": "" }, { "docid": "8b0f9d466108dd996b7623f473100e8f", "score": "0.61568016", "text": "def load_template(self, template: str) -> Template:\n env = dict(trim_blocks=True, lstrip_blocks=True, keep_trailing_newline=False)\n jinja2_ext = \".jinja2\"\n if not template.endswith(jinja2_ext):\n self._log.error(\"Template file name must end with %s\" % jinja2_ext)\n raise ValueError\n if not template[:-len(jinja2_ext)].endswith(\".md\"):\n self._log.error(\"Template file should be a Markdown file\")\n raise ValueError\n if not os.path.isabs(template):\n template = os.path.join(os.path.dirname(__file__), template)\n with open(template, encoding=\"utf-8\") as fin:\n template_obj = Template(fin.read(), **env)\n template_obj.filename = template\n self._log.info(\"Loaded %s\", template)\n return template_obj", "title": "" }, { "docid": "f20095bc88fb93fe54b296bd00f6bcb4", "score": "0.6096198", "text": "def load(self, environment, name, globals=None):\r\n code = None\r\n if globals is None:\r\n globals = {}\r\n\r\n # first we try to get the source for this template together\r\n # with the filename and the uptodate function.\r\n source, filename, uptodate = self.get_source(environment, name)\r\n\r\n # try to load the code from the bytecode cache if there is a\r\n # bytecode cache configured.\r\n bcc = environment.bytecode_cache\r\n if bcc is not None:\r\n bucket = bcc.get_bucket(environment, name, filename, source)\r\n code = bucket.code\r\n\r\n # if we don't have code so far (not cached, no longer up to\r\n # date) etc. we compile the template\r\n if code is None:\r\n code = environment.compile(source, name, filename)\r\n\r\n # if the bytecode cache is available and the bucket doesn't\r\n # have a code so far, we give the bucket the new code and put\r\n # it back to the bytecode cache.\r\n if bcc is not None and bucket.code is None:\r\n bucket.code = code\r\n bcc.set_bucket(bucket)\r\n\r\n return environment.template_class.from_code(environment, code,\r\n globals, uptodate)", "title": "" }, { "docid": "dd95b572dee69bef29faa59a907a2134", "score": "0.6094186", "text": "def __new__(cls, copy=True):\r\n\r\n # Do we need to construct the template?\r\n if cls._tmpl is None:\r\n tmp = super(TemplateBuilder, cls).__new__(cls)\r\n\r\n # Construct the template\r\n cls._tmpl = tmp.construct()\r\n\r\n # If the template has a copy attribute, return the result of\r\n # calling it\r\n if copy and hasattr(cls._tmpl, 'copy'):\r\n return cls._tmpl.copy()\r\n\r\n # Return the template\r\n return cls._tmpl", "title": "" }, { "docid": "b7591e68d68af5560c988389c6db25a7", "score": "0.6048636", "text": "def loadTemplate( self, name, raw=False, type=None ):\n\t\tif type == \"paml\" or not type:\n\t\t\treturn self.loadPAMLTemplate(name, raw)\n\t\telse:\n\t\t\treturn self.loadPlainTemplate(name, raw, type)", "title": "" }, { "docid": "d049ecc5167289a8015af5945872d134", "score": "0.6009873", "text": "def get_template(self, name, parent=None, globals=None):\r\n if isinstance(name, Template):\r\n return name\r\n if parent is not None:\r\n name = self.join_path(name, parent)\r\n return self._load_template(name, self.make_globals(globals))", "title": "" }, { "docid": "626a34fe725bb9850b83fa6a717ffe79", "score": "0.60015273", "text": "def template(self, template_name, allow_default=False):\n if Template.template_engine is NOTSET:\n Template.configure(self.app, self.config)\n return Template(template_name, allow_default=allow_default)", "title": "" }, { "docid": "b6a0b535fe5b9c50425dde8d6c7091d7", "score": "0.59673774", "text": "def _createTemplate( self, text ):\n\t\tassert templating, \"templating module is required\"\n\t\treturn templating.Template(text)", "title": "" }, { "docid": "a569dd48684e0d24e0334d5ae1d91854", "score": "0.59657127", "text": "def instantiate_template(self, template: Template,\n root: bool = False) -> TemplateLeaf:\n if self.is_template_leaf(template):\n return template\n\n if root:\n # The root template concatenates its parts\n return ''.join(self.instantiate_template(t) for t in template)\n\n selected_template = self.weighted_choice(template, root=root)\n return self.instantiate_template(selected_template)", "title": "" }, { "docid": "82364185f9c36c88139665382cf67f37", "score": "0.5953842", "text": "def get_cloudformation_template(cls, url, working_dir):\n try:\n template_body_dict = cls.get_yaml_or_json_file(url, working_dir)\n return CloudFormationTemplate(body_dict=template_body_dict, name=os.path.basename(url))\n except Exception as e:\n raise TemplateErrorException(\n \"Could not load file from {0}: {1}\".format(url, e))", "title": "" }, { "docid": "f1ed9ac527b7d34098f4ac78f1813823", "score": "0.5908421", "text": "def _check_template(template, cls=Template):\n if not isinstance(template, cls):\n return cls(template)\n return template", "title": "" }, { "docid": "48c3a526e9c08b11ff61853a04685b4a", "score": "0.58626807", "text": "def read_template(filename):\r\n with open(filename, 'r') as template_file:\r\n template_file_content = template_file.read()\r\n return Template(template_file_content)", "title": "" }, { "docid": "164ab59535e0cbf3ea8974313a508337", "score": "0.5847274", "text": "def instantiate_workflow_template(\n self,\n name,\n version=None,\n instance_id=None,\n retry=google.api_core.gapic_v1.method.DEFAULT,\n timeout=google.api_core.gapic_v1.method.DEFAULT,\n metadata=None):\n if 'instantiate_workflow_template' not in self._inner_api_calls:\n self._inner_api_calls[\n 'instantiate_workflow_template'] = google.api_core.gapic_v1.method.wrap_method(\n self._workflow_template_service_stub.\n InstantiateWorkflowTemplate,\n default_retry=self._method_configs[\n 'InstantiateWorkflowTemplate'].retry,\n default_timeout=self._method_configs[\n 'InstantiateWorkflowTemplate'].timeout,\n client_info=self._client_info,\n )\n\n request = workflow_templates_pb2.InstantiateWorkflowTemplateRequest(\n name=name,\n version=version,\n instance_id=instance_id,\n )\n operation = self._inner_api_calls['instantiate_workflow_template'](\n request, retry=retry, timeout=timeout, metadata=metadata)\n return google.api_core.operation.from_gapic(\n operation,\n self._operations_client,\n empty_pb2.Empty,\n metadata_type=workflow_templates_pb2.WorkflowMetadata,\n )", "title": "" }, { "docid": "6c712b52d35f2ee2251fc207ca35aeb2", "score": "0.5837298", "text": "def load_template(in_template):\n with open(in_template, 'r') as default:\n output_template = jinja2.Template(default.read())\n return output_template", "title": "" }, { "docid": "4a5e14068a8676f5cb6e995492cde30b", "score": "0.583721", "text": "def _template(s, **params):\n return s._processTemplate(\"_template\", params)", "title": "" }, { "docid": "885b52efc2a6b2155d2735ae7691a94a", "score": "0.58059037", "text": "def _create_template(self, name):\n\n if not name.startswith('descriptor/'):\n return super(TemplateLoader, self)._create_template(name)\n # '/' (part of selector) are encoded as '_' in template file names.\n # ('_' is forbidden in selectors)\n selector, page = name[11:].replace('/', '_').rsplit('_', 1)\n\n args = dict()\n args['loader'] = self\n # remove 'descriptor/' from template name\n # iterate to find template with longest selector prefix\n desc_prefix = \"\"\n for (d, p) in TemplateLoader.templates:\n if page != p:\n continue\n if selector.startswith(d) and len(d) > len(desc_prefix):\n desc_prefix = d\n if desc_prefix != \"\":\n # load most specific template if exists\n templatestr, funcs = TemplateLoader.templates[(desc_prefix, page)]\n else:\n # use default otherwise\n templatestr, funcs = TemplateLoader.templates[('default', page)]\n args['functions'] = funcs\n\n template = CustomTemplate(templatestr, **args)\n return template", "title": "" }, { "docid": "34c514adfd3cbcb901fffae7501751cc", "score": "0.5779893", "text": "def get_template(self, pathorpart):\n if osp.exists(osp.join(self.resourcedir, pathorpart)):\n return Template(osp.join(self.resourcedir, pathorpart),\n osp.join(self.project.projectdir, pathorpart))\n else:\n tmplts = self.get_templates('*' + pathorpart + '*')\n assert len(tmplts) == 1, (\"%s matches %s template paths.\"\n % (pathorpart, len(tmplts)))\n return tmplts[0]", "title": "" }, { "docid": "1bd6aa1cd885d6854b74f3eb50c0d3b4", "score": "0.57598317", "text": "def from_file(cls, tmpl_path, name='', *args, **kwargs):\n with open(tmpl_path, 'r') as tmpl_f:\n template_string = tmpl_f.read()\n if not name:\n name = path_to_tmpl_name(tmpl_path)\n return cls(template_string, name=name, *args, **kwargs)", "title": "" }, { "docid": "60d0ae6ebc761ee9ccb44900feb6bb3a", "score": "0.5746124", "text": "def create_template(self) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "173dc8865bc62526c53e760d573d75f1", "score": "0.57374614", "text": "def loadPageTemplate(filename):\n\n\t#create the context that is used by the template\n\tcontext = simpleTALES.Context()\n\t\n\t#load and compile the page template\n\ttemplateFile = open(filename,'rt')\n\ttemplate = simpleTAL.compileHTMLTemplate(templateFile)\n\ttemplateFile.close()\n\n\t#load and compile the main template macro\n\tmacroFile = open(MAIN_TEMPLATE_FILENAME,'rt')\n\tmainTemplate = simpleTAL.compileHTMLTemplate(macroFile)\n\tmacroFile.close()\n\n\t#add main template to context\n\tcontext.addGlobal(\"main_template\",mainTemplate)\n\n\t#return context and page template\n\treturn (context,template)", "title": "" }, { "docid": "f4bbdc33509041a0d91d9cac0e313b9f", "score": "0.56910264", "text": "def __load(self, filename):\r\n # make sure the template loading from filesystem is only done\r\n # one thread at a time to avoid bad clashes...\r\n self._mutex.acquire()\r\n try:\r\n try:\r\n # try returning from cache one more time in case\r\n # concurrent thread already loaded\r\n return self.template_cache[filename]\r\n\r\n except KeyError:\r\n # not in cache yet... we can continue normally\r\n pass\r\n\r\n try:\r\n self.template_cache[filename] = Template(\r\n filename=filename,\r\n module_directory=self.module_directory,\r\n input_encoding=self.input_encoding,\r\n output_encoding=self.output_encoding,\r\n default_filters=self.default_filters,\r\n imports=self.imports,\r\n lookup=self)\r\n\r\n return self.template_cache[filename]\r\n\r\n except:\r\n self.template_cache.pop(filename, None)\r\n raise\r\n\r\n finally:\r\n # _always_ release the lock once done to avoid\r\n # \"thread lock\" effect\r\n self._mutex.release()", "title": "" }, { "docid": "56822b12fb7b9bd9242c48e834336e55", "score": "0.56672156", "text": "def read_template(filename):\r\n\r\n with open(filename, 'r', encoding = 'utf-8') as template_file:\r\n template_file_content = template_file.read()\r\n return Template(template_file_content)", "title": "" }, { "docid": "a3c720b8a8e28b61a2de517daa889f22", "score": "0.5666027", "text": "def read_template ( filename ):\n\n with open ( filename , 'r' , encoding='utf-8' ) as template_file:\n template_file_content=template_file.read ( )\n return Template ( template_file_content )", "title": "" }, { "docid": "465753c2c892a2ccb01dbfb61035f791", "score": "0.5657811", "text": "def from_template_path(cls, template_path: Path) -> 'IpTemplate':\n\n # Check if the directory structure matches expectations.\n if not template_path.is_dir():\n raise TemplateParseError(\n \"Template path {!r} is not a directory.\".format(\n str(template_path)))\n if not (template_path / 'data').is_dir():\n raise TemplateParseError(\n \"Template path {!r} does not contain the required 'data' directory.\"\n .format(str(template_path)))\n\n # The template name equals the name of the template directory.\n template_name = template_path.stem\n\n # Find the template description file.\n tpldesc_file = template_path / 'data/{}.tpldesc.hjson'.format(\n template_name)\n\n # Read the template description from file.\n try:\n tpldesc_obj = hjson.load(open(tpldesc_file, 'r'), use_decimal=True)\n except (OSError, FileNotFoundError) as e:\n raise TemplateParseError(\n \"Unable to read template description file {!r}: {}\".format(\n str(tpldesc_file), str(e)))\n\n # Parse the template description file.\n where = 'template description file {!r}'.format(str(tpldesc_file))\n if 'template_param_list' not in tpldesc_obj:\n raise TemplateParseError(\n f\"Required key 'variables' not found in {where}\")\n\n try:\n params = TemplateParams.from_raw(\n f\"list of parameters in {where}\",\n tpldesc_obj['template_param_list'])\n except ValueError as e:\n raise TemplateParseError(e) from None\n\n return cls(template_name, params, template_path)", "title": "" }, { "docid": "41780d4452daadb9835ac93835660504", "score": "0.56353164", "text": "def _init_template(raw, loader_init):\n if loader_init is None:\n template = Template(\n raw, undefined=StrictUndefined, extensions=(extensions.RaiseExtension,)\n )\n _add_globals(template.environment)\n return template\n else:\n if loader_init[\"class\"] == \"FileSystemLoader\":\n loader = FileSystemLoader(**loader_init[\"kwargs\"])\n elif loader_init[\"class\"] == \"PackageLoader\":\n loader = PackageLoader(**loader_init[\"kwargs\"])\n else:\n raise TypeError(\n \"Error setting state for Placeholder, \"\n \"expected the loader to be FileSystemLoader \"\n \"or PackageLoader\"\n )\n\n env = Environment(\n loader=loader,\n undefined=StrictUndefined,\n extensions=(extensions.RaiseExtension,),\n )\n _add_globals(env)\n\n return env.from_string(raw)", "title": "" }, { "docid": "7a3d3eddd67ac0ee5850f4c59f5bfa7b", "score": "0.563247", "text": "def from_string(self, source, globals=None, template_class=None):\r\n globals = self.make_globals(globals)\r\n cls = template_class or self.template_class\r\n return cls.from_code(self, self.compile(source), globals, None)", "title": "" }, { "docid": "fa79d0e7669e6a70ff102180c3467606", "score": "0.56323785", "text": "def read_template(filename):\n \n with open(filename, 'r', encoding='utf-8') as template_file:\n template_file_content = template_file.read()\n return Template(template_file_content)", "title": "" }, { "docid": "064428c05c4283a0f8d5a79e936d302c", "score": "0.56175834", "text": "def Template(template: Text, database_yml: Path) -> Callable[[ProtocolFile], Any]:\n\n path = Path(template)\n if path.suffix not in LOADERS:\n msg = f\"No loader for files with '{path.suffix}' suffix\"\n raise ValueError(msg)\n\n Loader = LOADERS[path.suffix].load()\n\n def load(current_file: ProtocolFile):\n path = resolve_path(Path(template.format(**abs(current_file))), database_yml)\n\n # check if file exists\n if not path.is_file():\n msg = f\"No such file or directory: '{path}' (via '{template}' template).\"\n raise FileNotFoundError(msg)\n\n loader = Loader(path)\n return loader(current_file)\n\n return load", "title": "" }, { "docid": "57dfa1592a5ddf42de7c5ea3511b8f59", "score": "0.56053895", "text": "def load_template(name):\n if not cache.has_key(name):\n template_fd = open(templates[name])\n cache[name] = template_fd.read()\n template_fd.close()\n return cache[name]", "title": "" }, { "docid": "c8926224a973d121173fb01cb145b674", "score": "0.55970806", "text": "def create(cls, config, app_globals):\r\n if kajiki is None: # pragma: no cover\r\n return None\r\n\r\n loader = KajikiTemplateLoader(config.paths.templates[0],\r\n dotted_finder=app_globals.dotted_filename_finder,\r\n force_mode='xml',\r\n reload=config.auto_reload_templates,\r\n template_extension='.xml')\r\n return {'kajiki': cls(loader)}", "title": "" }, { "docid": "ddd0b553d8aa8f605a5a8e1e7adac11b", "score": "0.55939984", "text": "def get_template(self, template_name):\r\n\r\n if template_name not in self.template_cache:\r\n # the template string is not yet loaded into the cache.\r\n # Do so now\r\n self.__load(template_name)\r\n\r\n if self.auto_reload:\r\n # AUTO RELOADING will be activated only if user has\r\n # explicitly asked for it in the configuration\r\n # return the template, but first make sure it's not outdated\r\n # and if outdated, refresh the cache.\r\n return self.__check(self.template_cache[template_name])\r\n\r\n else:\r\n return self.template_cache[template_name]", "title": "" }, { "docid": "ac284bacdc1c96d1f68481df293501b7", "score": "0.5592714", "text": "def load(self, filename, relative_to=None, cls=None, encoding=None):\n if cls is None:\n cls = self.default_class\n search_path = self.search_path\n\n # Make the filename relative to the template file its being loaded\n # from, but only if that file is specified as a relative path, or no\n # search path has been set up\n if relative_to and (not search_path or not os.path.isabs(relative_to)):\n filename = os.path.join(os.path.dirname(relative_to), filename)\n\n filename = os.path.normpath(filename)\n cachekey = filename\n\n self._lock.acquire()\n try:\n # First check the cache to avoid reparsing the same file\n try:\n tmpl = self._cache[cachekey]\n if not self.auto_reload:\n return tmpl\n uptodate = self._uptodate[cachekey]\n if uptodate is not None and uptodate():\n return tmpl\n except (KeyError, OSError):\n pass\n\n isabs = False\n\n if os.path.isabs(filename):\n # Bypass the search path if the requested filename is absolute\n search_path = [os.path.dirname(filename)]\n isabs = True\n\n elif relative_to and os.path.isabs(relative_to):\n # Make sure that the directory containing the including\n # template is on the search path\n dirname = os.path.dirname(relative_to)\n if dirname not in search_path:\n search_path = list(search_path) + [dirname]\n isabs = True\n\n elif not search_path:\n # Uh oh, don't know where to look for the template\n raise TemplateError('Search path for templates not configured')\n\n for loadfunc in search_path:\n if isinstance(loadfunc, six.string_types):\n loadfunc = directory(loadfunc)\n try:\n filepath, filename, fileobj, uptodate = loadfunc(filename)\n except IOError:\n continue\n else:\n try:\n if isabs:\n # If the filename of either the included or the \n # including template is absolute, make sure the\n # included template gets an absolute path, too,\n # so that nested includes work properly without a\n # search path\n filename = filepath\n tmpl = self._instantiate(cls, fileobj, filepath,\n filename, encoding=encoding)\n if self.callback:\n self.callback(tmpl)\n self._cache[cachekey] = tmpl\n self._uptodate[cachekey] = uptodate\n finally:\n if hasattr(fileobj, 'close'):\n fileobj.close()\n return tmpl\n\n raise TemplateNotFound(filename, search_path)\n\n finally:\n self._lock.release()", "title": "" }, { "docid": "0fda519b83833296e10688644ec5a49c", "score": "0.5583785", "text": "def make_template(template):\n terms = decompose_template(template)\n return Template(template, terms)", "title": "" }, { "docid": "3b1c23c59bf029a29b3fd86d478e7a1e", "score": "0.55799764", "text": "def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n template_id = dictionary.get('TemplateId')\n template_name = dictionary.get('TemplateName')\n\n # Return an object of this model\n return cls(\n template_id,\n template_name\n)", "title": "" }, { "docid": "1e5d8d7de297c2b773779c67fa142706", "score": "0.55555165", "text": "def get_template(template_name):\n return TemplateLookup(\n directories=[Path(__file__).dirname().dirname() / 'templates'],\n default_filters=['decode.utf8']\n ).get_template(template_name)", "title": "" }, { "docid": "256b365ee056004db87442a531212997", "score": "0.55481434", "text": "def template_from_path(self, path):\n template, fields = self.parse_path(path)\n return template", "title": "" }, { "docid": "bd7b6a614eb591298105d3645ce42b11", "score": "0.554281", "text": "def load_template_from_config_file(config_file):\n print('Loading template data...')\n try:\n data_loader = DataLoader(config_file)\n except IOError as e:\n print()\n print(e.filename + ' does not exist.')\n data_loader = None\n exit(1)\n\n # Load all necessary data:\n try:\n return data_loader.get_template()\n except KeyError:\n print()\n print('Configuration file does not have the specified format.')\n print('See config/exampleConfig.yaml for further information about the format of configuration '\n 'files')\n exit(1)", "title": "" }, { "docid": "f999ad856746f549e2dde49f2d68430f", "score": "0.5530959", "text": "def object_new(self, template=None, **kwargs):\n if template:\n return self._object_new.request(self._client, template, **kwargs)\n else:\n return self._object_new.request(self._client, **kwargs)", "title": "" }, { "docid": "fa8c5ee73e1c6acef287095a09488a9b", "score": "0.5522194", "text": "def create(cls, config, app_globals):\r\n if genshi is None: # pragma: no cover\r\n # Genshi not available\r\n return None\r\n\r\n if config.get('use_dotted_templatenames', True):\r\n TemplateLoader = DottedTemplateLoader\r\n template_loader_args = {'dotted_finder': app_globals.dotted_filename_finder}\r\n else:\r\n TemplateLoader = GenshiTemplateLoader\r\n template_loader_args = {}\r\n\r\n loader = TemplateLoader(search_path=config.paths.templates,\r\n max_cache_size=asint(config.get('genshi.max_cache_size', 30)),\r\n auto_reload=config.auto_reload_templates,\r\n callback=cls.on_template_loaded,\r\n **template_loader_args)\r\n\r\n return {'genshi': cls(loader, config)}", "title": "" }, { "docid": "5882146a3ea26fcd297e81702945e7f6", "score": "0.5503774", "text": "def get_template(self):\n return get_template(self.template_name)", "title": "" }, { "docid": "243b7f9a6a3d42a8a53162e575fb8116", "score": "0.55006063", "text": "def create_instance(self, name, *args):\n module_name, class_name = self.class_maps[name]\n cls = getattr(importlib.import_module(module_name), class_name)\n if name == 'reo-gen':\n args = (args[0], args[2], args[3])\n return cls(*args)", "title": "" }, { "docid": "d9e769c2f371900f91ddbd846270d0c1", "score": "0.54892975", "text": "def get_template ( self,\n template_name,\n file_extensions=[ '', ],\n add_mode_ext=True\n ):\n my_template = None\n\n if add_mode_ext:\n fext_list = (\n [ self.get_script_mode_file_ext() ] + list ( file_extensions )\n )\n else:\n fext_list = file_extensions\n\n # TODO/FIXME: does TemplateLookup support file extension_s_ lookup?\n last_fext_index = len ( fext_list ) - 1\n for index, f_ext in enumerate ( fext_list ):\n if index == last_fext_index:\n my_template = self._mako_lookup.get_template (\n template_name + f_ext\n )\n else:\n try:\n my_template = self._mako_lookup.get_template (\n template_name + f_ext\n )\n except mako.exceptions.TopLevelLookupException:\n pass\n else:\n break\n # -- end if\n\n return my_template", "title": "" }, { "docid": "39df7e1fa4e161a2d0b99eff861aded1", "score": "0.548512", "text": "def template_viewer_factory(config):\n loader = template_loader_for(config)\n\n @responder\n def template_viewer(environ, start_response):\n prefix, path_info = config.url_prefix, environ['PATH_INFO']\n if not os.path.dirname(path_info).startswith(prefix.rstrip('/')):\n raise NotFound()\n path_info = path_info[len(prefix):]\n\n if path_info.endswith('/'):\n path_info += config.index_document\n\n fspath = os.path.join(config.dynamic_path, path_info.lstrip('/'))\n\n if not os.path.exists(fspath):\n raise NotFound()\n elif os.path.isdir(fspath):\n return append_slash_redirect(environ)\n\n template = loader.get_template(path_info)\n\n mimetype, _ = mimetypes.guess_type(path_info)\n mimetype = mimetype or 'text/plain'\n\n render_environ = dict(environ, PATH_INFO=path_info)\n render_environ['SCRIPT_NAME'] += prefix\n\n response = BaseResponse(mimetype=mimetype or 'text/plain')\n response.data = template.render(config=config, environ=environ)\n response.headers['Pragma'] = 'no-cache'\n response.headers['Cache-Control'] = 'no-cache, must-revalidate'\n response.headers['Expires'] = 'Sun, 13 Aug 1995 13:00:00 GMT'\n return response\n return template_viewer", "title": "" }, { "docid": "c625d175016898acf9a527e41545b335", "score": "0.5451167", "text": "def read_template(filename):\n\n with open(filename, 'r', encoding='utf-8') as template_file:\n template_file_content = template_file.read()\n # print(template_file_content)\n return Template(template_file_content)", "title": "" }, { "docid": "be13744abf3b38cc872ae95924a0dde1", "score": "0.54453474", "text": "def _load_template(self, tmpl_name):\n for t_f in self.list_watched_templates():\n t_n = path_to_tmpl_name(t_f)\n if t_n == tmpl_name:\n template = Template.from_file(t_f, t_n)\n self.add(template)\n return template\n raise TemplateError('No template named %s' % tmpl_name)", "title": "" }, { "docid": "2035c25a9fd8727163952c55e38d4c66", "score": "0.5439552", "text": "def new(cls, path_or_file):\n test_file = to_str(path_or_file)\n if test_file in ODF_TEMPLATES:\n path_or_file = join(\n dirname(__file__), ODF_TEMPLATES_DIR, ODF_TEMPLATES[test_file]\n )\n template_container = cls()\n template_container.open(path_or_file)\n # Return a copy of the template container\n clone = template_container.clone\n # Change type from template to regular\n mimetype = clone.mimetype.replace(\"-template\", \"\")\n clone.mimetype = mimetype\n # Update the manifest\n manifest = Manifest(ODF_MANIFEST, clone)\n manifest.set_media_type(\"/\", mimetype)\n clone.set_part(ODF_MANIFEST, manifest.serialize())\n return clone", "title": "" }, { "docid": "863d7ede59879e32f642c0ce03124f1d", "score": "0.5432518", "text": "def get_template(self, id, params=None):\n if id in SKIP_IN_PATH:\n raise ValueError(\"Empty value passed for a required argument 'id'.\")\n _, data = self.transport.perform_request('GET', _make_path('_search',\n 'template', id), params=params)\n return data", "title": "" }, { "docid": "4a8458bcecfc7771b53765ef1542177f", "score": "0.5430789", "text": "def load_template(path):\n\n with open(path, 'r') as cf_template:\n return loads(cf_template.read())", "title": "" }, { "docid": "55b70d6a92d95f72f25f8323006359ff", "score": "0.5426284", "text": "def _load_template(self, tpl_key, tpl_data=None):\n context = {}\n\n if tpl_key not in self.templates.keys():\n raise IndexError(\n 'Template \"'\n + tpl_key\n + '\" not found in registered templates '\n + str(list(self.templates.keys()))\n )\n\n # FIXME remove parameter type-checking\n if tpl_data is not None and type(tpl_data) != dict:\n raise TypeError(\n \"Data parameter should be a dict (\"\n + str(type(tpl_data))\n + \" given)\"\n )\n\n if tpl_data is not None:\n context.update(tpl_data)\n\n return self.templates[tpl_key].render(context)", "title": "" }, { "docid": "62f6b1b64959c521194856be59658129", "score": "0.54251814", "text": "def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'JobTemplate':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = JobTemplateArgs.__new__(JobTemplateArgs)\n\n __props__.__dict__[\"abort_config\"] = None\n __props__.__dict__[\"arn\"] = None\n __props__.__dict__[\"description\"] = None\n __props__.__dict__[\"document\"] = None\n __props__.__dict__[\"document_source\"] = None\n __props__.__dict__[\"job_arn\"] = None\n __props__.__dict__[\"job_executions_retry_config\"] = None\n __props__.__dict__[\"job_executions_rollout_config\"] = None\n __props__.__dict__[\"job_template_id\"] = None\n __props__.__dict__[\"maintenance_windows\"] = None\n __props__.__dict__[\"presigned_url_config\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"timeout_config\"] = None\n return JobTemplate(resource_name, opts=opts, __props__=__props__)", "title": "" }, { "docid": "6980203e6c87b42d6ff4e6fae0489513", "score": "0.5424788", "text": "def from_dict(template_name, template_values_dict): # type: ignore[no-untyped-def]\n\n parameters = template_values_dict.get(\"Parameters\", {})\n definition = template_values_dict.get(\"Definition\", {})\n\n return Template(template_name, parameters, definition)", "title": "" }, { "docid": "566c126fe0e2ab0b37fe9a26b67ad539", "score": "0.54079", "text": "def CreateParamTemplate(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateParamTemplate\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateParamTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "title": "" }, { "docid": "c8dbd76d8effd19915306effe517d0e2", "score": "0.54012406", "text": "def read_template(file):\n with open(file) as fp:\n message = fp.read()\n return Template(message)", "title": "" }, { "docid": "6bf8b546867d23549d56177b1f8f4cdb", "score": "0.5380205", "text": "def read(\n self, model_path: str = None, device: str = None, template_name: str = None\n ) -> NetworkTemplate:\n name = os.path.basename(model_path)\n save_dir = model_path\n\n sys.path.append(model_path)\n\n module = importlib.import_module(name + \"_template\")\n\n callables = {\n attr: getattr(module, attr)\n for attr in dir(module)\n if callable(getattr(module, attr))\n }\n\n if len(callables) > 1:\n if template_name is None:\n raise Exception(\n f\"There are {len(callables)} models in the module, please provide a value for name.\"\n )\n else:\n Model = callables[template_name]()\n\n elif len(callables) == 1:\n Model = list(callables.values())[0]()\n\n else:\n raise Exception(\"There is no model template in the module.\")\n\n Model.load(save_dir=save_dir, name=name, device=device)\n\n return Model", "title": "" }, { "docid": "016053b44c3eebeb4881e71558d9fa0d", "score": "0.5374171", "text": "def template(template_name: str) -> jinja2.Template:\n return jinja2_env.get_template(template_name)", "title": "" }, { "docid": "016053b44c3eebeb4881e71558d9fa0d", "score": "0.5374171", "text": "def template(template_name: str) -> jinja2.Template:\n return jinja2_env.get_template(template_name)", "title": "" }, { "docid": "0a8f999c7943f44c4a28532830070242", "score": "0.5371537", "text": "def load_template(name, directory, extension, encoding, encoding_errors):\n abs_path = get_abs_template_path(name, directory, extension)\n return load_file(abs_path, encoding, encoding_errors)", "title": "" }, { "docid": "746fec147f23bd2b0a1ce657c8a2ff02", "score": "0.53631055", "text": "def construct_instance(self, *args, **kwargs):\n return self.get_type()(*args, **kwargs)", "title": "" }, { "docid": "6fefad66411792e5f8cc8f5858fd6270", "score": "0.53569764", "text": "def get_template(self, template_name, dirs):\n jinja_environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(dirs + [os.path.dirname(__file__)]))\n return jinja_environment.get_template(template_name)", "title": "" }, { "docid": "95f1030d836306b3e5bf0454590e8d08", "score": "0.5354754", "text": "def generate_template(api,\n text=DEFAULT_TEMPLATE_TEXT,\n html=DEFAULT_TEMPLATE_HTML):\n template = Template(name='Example Template', text=text, html=html)\n try:\n template = api.templates.post(template)\n except Exception as e:\n print('Unable to create template: {}'.format(e))\n return template", "title": "" }, { "docid": "8d0cf169e75a0ad2b22b3c6495d4dfe0", "score": "0.53524137", "text": "def read_template(self, fso):\n stream = open(fso)\n template = Template(stream.read())\n stream.close()\n return template", "title": "" }, { "docid": "adffa342cc2b175d1b2a1582e21e0f8e", "score": "0.53513914", "text": "def _template(tpl, env=None, return_env=False):\n try:\n if env is None:\n env = ImmutableValDict()\n \n # This was changed to first see if the file exists. If it does,\n # it is assumed to be a path. Otherwise, assumed it to be text\n \n settings = {}\n tpl = _touni(tpl)\n \n # Try to determine if it is a file or a template string\n \n isfile = False\n try:\n if os.path.exists(tpl):\n isfile = True\n except:pass # Catch any kind of error\n \n if not isfile: # template string\n lookup = ['./'] # Just have the lookup be in this path\n tpl = _preparser(tpl)\n tpl_obj = _SimpleTemplate(source=tpl, lookup=lookup, **settings)\n else: # template file\n # set the lookup. It goes in order so first check directory\n # of the original template and then the current.\n lookup = [os.path.dirname(tpl) + '/.','./']\n tpl_obj = _SimpleTemplate(name=tpl, lookup=lookup, **settings)\n\n # Added the option to return the environment, but this is really not needed\n # if env is set.\n \n rendered,env = tpl_obj.render(env)\n \n if not return_env:\n return rendered\n return rendered,env\n except Exception as E:\n if CLI_MODE and not DEBUGCLI:\n msg = _error_msg(E)\n sys.stderr.write(msg) \n sys.exit(1)\n else:\n raise", "title": "" }, { "docid": "3a728ff0289887f5e81063ebad229831", "score": "0.5348713", "text": "def get_template(template, autoescape=True, template_path=siteScriptsPath):\n if os.path.isabs(template):\n template_path, template = os.path.split(template)\n template_path = os.path.abspath(template_path)\n key = (template_path, template, autoescape)\n if key not in _template_cache:\n if autoescape:\n env = get_template_environment(template_path)\n else:\n env = get_unescaped_template_environment(template_path)\n _template_cache[key] = env.get_template(template)\n return _template_cache[key]", "title": "" }, { "docid": "de0ceafd0de03c0f3bdb1d309146e6be", "score": "0.53417355", "text": "def getTemplate(self, filepath):\n pass", "title": "" }, { "docid": "0f7b12f32d7944023378c572b528e9d2", "score": "0.5325418", "text": "def create_customized_class_instance(class_params):\n\n code_dir = class_params.get('codeDirectory')\n qualified_class_name = class_params.get('className')\n class_args = class_params.get('classArgs')\n\n if code_dir and not os.path.isdir(code_dir):\n raise ValueError(f'Directory not found: {code_dir}')\n\n sys.path.append(code_dir)\n module_name, class_name = qualified_class_name.rsplit('.', 1)\n class_module = importlib.import_module(module_name)\n class_constructor = getattr(class_module, class_name)\n\n if class_args is None:\n class_args = {}\n instance = class_constructor(**class_args)\n\n return instance", "title": "" }, { "docid": "df1a7dd59ae004e0d252a97ece92740a", "score": "0.5319496", "text": "def create_template():\n with Database.connection() as connection:\n template = request.get_json()\n if not Templates.validate_json_template(template):\n return 'Invalid template object', 400\n row_id = TEMPLATES.create(Template(pkey=None,\n maxTokens=template['maxTokens'],\n buckets=template['buckets'],\n layout=template.get('layout', None),\n enabled=True),\n connection)\n connection.commit()\n return jsonify(TEMPLATES.fetch_by_pkey(row_id, connection)._asdict())", "title": "" }, { "docid": "6e41a05c640cb7b3a5cf2caf18fd7d31", "score": "0.5315449", "text": "def load(environment, data):\n if isinstance(data, basestring):\n from marshal import loads\n code = loads(data)\n else:\n from marshal import load\n code = load(data)\n return Template(environment, code)", "title": "" }, { "docid": "b5f80513d347e32512772c010a72617b", "score": "0.53121", "text": "def _get_template(self, task_type: str, model: Optional[str] = None) -> ModelTemplate:\n otx_registry = OTXRegistry(self.otx_root).filter(task_type=task_type if task_type else None)\n if model:\n template_lst = [temp for temp in otx_registry.templates if temp.name.lower() == model.lower()]\n if not template_lst:\n raise ValueError(\n f\"[*] {model} is not a type supported by OTX {task_type}.\"\n f\"\\n[*] Please refer to 'otx find --template --task {task_type}'\"\n )\n template = template_lst[0]\n else:\n template = otx_registry.get(DEFAULT_MODEL_TEMPLATE_ID[task_type.upper()])\n return template", "title": "" }, { "docid": "53a2eaca5149d06f3eb1f35d2e550cc6", "score": "0.5309679", "text": "def _parse_template(self):\n logger.debug(\n 'Loading template at %s' % os.path.join(\n self.location,\n self.template_name\n )\n )\n env = jinja2.Environment(\n undefined=jinja2.StrictUndefined,\n loader=jinja2.FileSystemLoader(self.location),\n )\n self.template = env.get_template(self.template_name)", "title": "" }, { "docid": "f8987c5766f7b911cf3273067ecf9051", "score": "0.53071386", "text": "def get_template(name: str) -> Template:\n return JINJA_ENV.from_string(TEMPLATES_PATH.joinpath(f\"{name}.md.jinja\").read_text())", "title": "" }, { "docid": "ce51c9b17a50a1dff086730a18ad67e4", "score": "0.5305849", "text": "def _load_view(self, template_engine_name, template_dir):\n file_name = template_engine_name.lower()\n class_name = \"{}View\".format(template_engine_name.title())\n try:\n view_module = import_module(\"rails.views.{}\".format(file_name))\n except ImportError:\n raise Exception(\"Template engine '{}' not found in 'rails.views'\".format(file_name))\n view_class = getattr(view_module, class_name)\n return view_class(template_dir)", "title": "" }, { "docid": "8f4cf1267250333f46f79c03c5873972", "score": "0.53009534", "text": "def set_template(tmpl_filename=None, from_string=None):\n # Set the relative template path\n template_loader = jinja2.FileSystemLoader(searchpath=\"./templates\")\n if from_string and not tmpl_filename:\n template_env = jinja2.Environment(loader=template_loader).from_string(from_string)\n tmpl_obj = template_env\n if tmpl_filename and not from_string:\n template_env = jinja2.Environment(loader=template_loader)\n tmpl_obj = template_env.get_template(tmpl_filename)\n\n return tmpl_obj", "title": "" }, { "docid": "00a9e9946f613710e0f3c49754bcb7be", "score": "0.529709", "text": "async def get_job_template(\n self,\n request: Optional[Union[services.GetJobTemplateRequest, dict]] = None,\n *,\n name: Optional[str] = None,\n retry: OptionalRetry = gapic_v1.method.DEFAULT,\n timeout: Union[float, object] = gapic_v1.method.DEFAULT,\n metadata: Sequence[Tuple[str, str]] = (),\n ) -> resources.JobTemplate:\n # Create or coerce a protobuf request object.\n # Quick check: If we got a request object, we should *not* have\n # gotten any keyword arguments that map to the request.\n has_flattened_params = any([name])\n if request is not None and has_flattened_params:\n raise ValueError(\n \"If the `request` argument is set, then none of \"\n \"the individual field arguments should be set.\"\n )\n\n request = services.GetJobTemplateRequest(request)\n\n # If we have keyword arguments corresponding to fields on the\n # request, apply these.\n if name is not None:\n request.name = name\n\n # Wrap the RPC method; this adds retry and timeout information,\n # and friendly error handling.\n rpc = gapic_v1.method_async.wrap_method(\n self._client._transport.get_job_template,\n default_timeout=60.0,\n client_info=DEFAULT_CLIENT_INFO,\n )\n\n # Certain fields should be provided within the metadata header;\n # add these here.\n metadata = tuple(metadata) + (\n gapic_v1.routing_header.to_grpc_metadata(((\"name\", request.name),)),\n )\n\n # Send the request.\n response = await rpc(\n request,\n retry=retry,\n timeout=timeout,\n metadata=metadata,\n )\n\n # Done; return the response.\n return response", "title": "" }, { "docid": "4d7f44fbb356e7bf964cbfad1bb5eea5", "score": "0.5291566", "text": "def instantiate(superclass, instance, **kwargs):\n if type(instance) is type and issubclass(instance, superclass):\n return instance(**kwargs)\n elif isinstance(instance, superclass):\n return instance\n elif type(instance) is str:\n return make_module(superclass, instance, **kwargs)\n else:\n raise Exception('Instantiation Error.')", "title": "" }, { "docid": "de4e09208cf03e3fcae50515b7b44deb", "score": "0.5270263", "text": "def get_dynamic_class_instantiation(cls, package_name, module_name, parameters=None, resources_dir=None):\n package_path = \"kalliope.\" + package_name + \".\" + module_name.lower() + \".\" + module_name.lower()\n logger.debug(\"[Utils]-> get_dynamic_class_instantiation : package path : %s\" % (package_path))\n if resources_dir is not None:\n neuron_resource_path = resources_dir + os.sep + module_name.lower() \\\n + os.sep + module_name.lower() + \".py\"\n if os.path.exists(neuron_resource_path):\n imp.load_source(module_name.capitalize(), neuron_resource_path)\n package_path = module_name.capitalize()\n logger.debug(\"[Utils]-> get_dynamic_class_instantiation : loading path : %s, as package %s\" % (\n neuron_resource_path, package_path))\n\n mod = __import__(package_path, fromlist=[module_name.capitalize()])\n\n try:\n klass = getattr(mod, module_name.capitalize())\n except AttributeError:\n logger.debug(\"Error: No module named %s \" % module_name.capitalize())\n raise ModuleNotFoundError(\n \"The module %s does not exist in package %s\" % (module_name.capitalize(), package_name))\n\n if klass is not None:\n # run the plugin\n if not parameters:\n return klass()\n elif isinstance(parameters, dict):\n return klass(**parameters)\n else:\n return klass(parameters)\n return None", "title": "" }, { "docid": "e7dd31d82eb90f65b2c040e8a983c93b", "score": "0.52610224", "text": "def _ReadTemplateFile(self, filename):\n file_object = open(filename)\n file_data = file_object.read()\n file_object.close()\n return string.Template(file_data)", "title": "" }, { "docid": "a09ade78c695b7ad255bfda16bcf3120", "score": "0.5254321", "text": "def get_template_engine(user_data):\n\n cl = classloader.ClassLoader()\n for class_path in TEMPLATE_ENGINE_CLASS_PATHS:\n tpl_engine = cl.load_class(class_path)()\n try:\n if tpl_engine.load(user_data):\n LOG.info(\"Using template engine: %s\"\n % tpl_engine.get_template_type())\n return tpl_engine\n except Exception as ex:\n LOG.error(\"Failed to load template engine '%s'\" % class_path)\n LOG.exception(ex)\n return", "title": "" }, { "docid": "d5b95ce94ec9bd03f01387435ede6dbf", "score": "0.5252145", "text": "def _create_mock_template(mock_template_filename=\"\", mock_template_content=\"\"):\n mock_template = Mock(spec=Template)\n mock_template.filename = mock_template_filename\n mock_template.content = mock_template_content\n mock_template.id = ObjectId()\n return mock_template", "title": "" }, { "docid": "d76bd5858b1f5fbb6bca9142798f91cf", "score": "0.5235153", "text": "def getTemplate(self, uri, meta=None):\n\n if not meta:\n\n metaKey = self.cacheKey + '_templatesmeta_cache_' + uri\n\n meta = cache.get(metaKey, None)\n\n if not meta:\n meta = self.getMeta(uri)\n cache.set(metaKey, meta, 15)\n\n if not meta: # No meta, can return a template\n return None\n\n # Let's find the template in the cache\n action = urlparse(uri).path\n\n templateKey = self.cacheKey + '_templates_' + action + '_' + meta['template_tag']\n template = cache.get(templateKey, None)\n\n # Nothing found -> Retrieve it from the server and cache it\n if not template:\n\n r = self.doQuery('template/' + uri)\n\n if r.status_code == 200: # Get the content if there is not problem. If there is, template will stay to None\n template = r.content\n\n cache.set(templateKey, template, None) # None = Cache forever\n\n return template", "title": "" }, { "docid": "4f2cfec4051ce6587b4f8af3c508bce0", "score": "0.52349335", "text": "def _get_template(self, req):\n if 'TemplateBody' in req.params:\n LOG.debug('TemplateBody ...')\n return req.params['TemplateBody']\n elif 'TemplateUrl' in req.params:\n url = req.params['TemplateUrl']\n LOG.debug('TemplateUrl %s' % url)\n try:\n return urlfetch.get(url)\n except IOError as exc:\n msg = _('Failed to fetch template: %s') % exc\n raise exception.HeatInvalidParameterValueError(detail=msg)\n\n return None", "title": "" }, { "docid": "2da2f8d030b735a57aa6e87cddabc966", "score": "0.523103", "text": "def __init__(self, template_folder: str):\n\n # Validating candidate template\n util.validateTemplate(template_folder=template_folder)\n logging.debug('Successfully validated template in {0}'.format(\n template_folder))\n\n # Binding template file to class variable\n self.template_file = os.path.join(template_folder,\n config.template_files['template'])\n logging.debug('Isolated template Jinja file {0}'.format(\n self.template_file))\n \n # Loading template configuration, binding to class variable\n with open(os.path.join(template_folder,\n config.template_files['config'])) as f:\n self.template_config = yaml_load(stream=f, Loader=SafeLoader)\n logging.debug('Loaded template configuration file {0}'.format(\n f.name))\n\n # Setting up Jinja2 environment and template\n # See: http://bit.ly/2VTzOcb\n self.env = Environment(loader=FileSystemLoader(\n searchpath=template_folder\n ), **config.jinja_env_config)\n self.template = self.env.get_template(\n name=config.template_files['template']\n )", "title": "" }, { "docid": "5b1cf990d0a0958e69d787cab3d8cef0", "score": "0.52295357", "text": "def get_template(template_name, dir='templates'):\n _ENV = Environment(loader=FileSystemLoader(dir))\n return _ENV.get_template(template_name)", "title": "" }, { "docid": "8f9c6c38c2b4425f5aac31a89bf5937e", "score": "0.522816", "text": "def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):\r\n self.name = name\r\n self.source = source.read() if hasattr(source, 'read') else source\r\n self.filename = source.filename if hasattr(source, 'filename') else None\r\n self.lookup = [os.path.abspath(x) for x in lookup]\r\n self.encoding = encoding\r\n self.settings = self.settings.copy() # Copy from class variable\r\n self.settings.update(settings) # Apply\r\n if not self.source and self.name:\r\n self.filename = self.search(self.name, self.lookup)\r\n if not self.filename:\r\n raise TemplateError('Template %s not found.' % repr(name))\r\n if not self.source and not self.filename:\r\n raise TemplateError('No template specified.')\r\n self.prepare(**self.settings)", "title": "" }, { "docid": "4cf3682bb358e772236e47ea38f933c1", "score": "0.52242154", "text": "def create_template(html_to_render, variables):\n\n ## loading of a template\n template = env.get_template(html_to_render)\n ## rendering of the template\n rendered_template = template.render(variables=variables)\n\n return rendered_template", "title": "" }, { "docid": "c8fa8496d43a6b121fdc6b74d8a1aea2", "score": "0.52231395", "text": "def load_template(self, filename):\n try:\n with open(filename) as yaml_file:\n self.template = self.load(yaml_file)\n except Exception as exception:\n if isinstance(exception, IOError):\n pass\n else:\n raise TypeError(exception)", "title": "" }, { "docid": "1843c4610e0f064053daf5e547ba2f17", "score": "0.52215964", "text": "def _get_slide_template(self):\n with open('question_slide.tpl') as tpl_file:\n return Template(tpl_file.read())", "title": "" }, { "docid": "330fcf16fd40b132823571d7cb3b1eca", "score": "0.5215287", "text": "def get_template(self, name):\n try:\n template = env.get_template(name)\n except:\n template = IPythonHandler.get_template(self, name)\n return template", "title": "" }, { "docid": "1722e69ade529d2709fae148992b8a93", "score": "0.52058256", "text": "def CreateProcedureTemplate(self, request):\n try:\n params = request._serialize()\n body = self.call(\"CreateProcedureTemplate\", params)\n response = json.loads(body)\n if \"Error\" not in response[\"Response\"]:\n model = models.CreateProcedureTemplateResponse()\n model._deserialize(response[\"Response\"])\n return model\n else:\n code = response[\"Response\"][\"Error\"][\"Code\"]\n message = response[\"Response\"][\"Error\"][\"Message\"]\n reqid = response[\"Response\"][\"RequestId\"]\n raise TencentCloudSDKException(code, message, reqid)\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(e.message, e.message)", "title": "" }, { "docid": "4afa656dbc9c5764ee19f1ae852f7bc6", "score": "0.52028096", "text": "def get_template(path):\n try:\n with open(path, \"rb\") as template_file:\n text = template_file.read()\n\n # Get the template container.\n template_container = find_between(text, '<section id=\"feature\" >', '</section>')\n\n # Get the template.\n template = find_between(template_container, '<div class=\"row\">', '</div><!--/.row-->')\n template = '<div class=\"row\">' + template + '</div><!--/.row-->'\n return template\n\n except IOError:\n None", "title": "" }, { "docid": "5c9423b83272a73e901becca55d7bfd3", "score": "0.52002114", "text": "def create(self, validated_data):\n type_object = Type(\n filename=validated_data[\"filename\"],\n content=validated_data[\"content\"],\n user=validated_data[\"user\"],\n )\n type_version_manager_object = validated_data[\"type_version_manager\"]\n\n # load dependencies\n dependencies_dict = load_dependencies(validated_data)\n\n # Update the content of the template with dependencies\n init_template_with_dependencies(\n type_object, dependencies_dict, request=self.context[\"request\"]\n )\n\n # Create the template and its template version manager\n type_version_manager_api.insert(\n type_version_manager_object,\n type_object,\n request=self.context[\"request\"],\n )\n\n return type_object", "title": "" }, { "docid": "b4be4dec017937f9f81da83fd4be778b", "score": "0.5198435", "text": "def __init__(self, template, **kwargs):\n # Cross platform compatibility\n self.__slash = '/'\n if platform.system() == 'Windows':\n self.__slash = '\\\\'\n\n # Get an actual __file path\n local_loc = template.replace(\".\", self.__slash)\n\n self.__template_path = os.path.join(ROOT_DIR, f\"templates{self.__slash}\" + local_loc + '.html')\n self.__view_path = os.path.join(ROOT_DIR, f\"storage{self.__slash}views{self.__slash}{local_loc}.py\")\n\n # Try to get last modified time, if not present __file does not exist\n try:\n os.path.getmtime(self.__view_path)\n except FileNotFoundError:\n self.__write_to_file()\n\n # If template updated after compiled view then regenerated python and write to __file\n if os.path.getmtime(self.__template_path) > os.path.getmtime(self.__view_path):\n self.__write_to_file()\n\n # Add in useful features to template by default\n kwargs['request'] = request\n kwargs['Auth'] = Auth\n kwargs['Utils'] = Utils\n\n # Retrieve the HTML from the compiled template\n self.__html = importlib.import_module(f\"storage.views.{template}\").get_html(kwargs)", "title": "" }, { "docid": "602fc623dbf9d1c8046d23046e693eec", "score": "0.519581", "text": "def template_object(name):\n filter = create_filter(name=name)\n result = powershell.execute('Select-Template | %s' % filter)\n if len(result) != 1:\n raise KeyError, 'Template not found'\n powershell.execute('$template = Select-Template | %s' % filter)\n return '$template'", "title": "" }, { "docid": "cf1a0bdc23e3a9be440dd4c0a114a6cc", "score": "0.51892513", "text": "def create(self):\n data = self.get_options()\n data.update({\"name\": self.name})\n return self.transloadit.request.post(\"/templates\", data=data)", "title": "" }, { "docid": "cd4f57ac00126bd0857b3859deffbd7c", "score": "0.5181768", "text": "def load(cls) -> 'ProjectInfo':\n\t\ttry:\n\t\t\twith open(cls.FILENAME, mode='r') as file:\n\t\t\t\tnew_instance = cls('')\n\t\t\t\tnew_instance_data = json.load(file, object_hook=cls.project_objects_hook)\n\n\t\t\t\tfor key in new_instance.__dict__.keys():\n\t\t\t\t\tif key in new_instance_data:\n\t\t\t\t\t\tnew_instance.__setattr__(key, new_instance_data[key])\n\n\t\t\t\ttry:\n\t\t\t\t\tnew_instance.self_check()\n\n\t\t\t\texcept ProjectInfoError as e:\n\t\t\t\t\texit_with_output(str(e), 1)\n\n\t\t\t\treturn new_instance\n\n\t\texcept FileNotFoundError:\n\t\t\texit_with_output(f\"Unable to read the project info file. File {cls.FILENAME} doesn't exists\", 1)", "title": "" } ]
c68d0c17a1f37935b39f27ae28d10bff
Save out a transaction to a .yumtx file to be loaded later.
[ { "docid": "a486151aa6e79a7510a7e34a80b1ea7e", "score": "0.71357524", "text": "def save_ts(self, filename=None, auto=False):\n if self.tsInfo._unresolvedMembers:\n if auto:\n self.logger.critical(_(\"Dependencies not solved. Will not save unresolved transaction.\"))\n return\n raise Errors.YumBaseError(_(\"Dependencies not solved. Will not save unresolved transaction.\"))\n \n if not filename:\n prefix = 'yum_save_tx.%s' % time.strftime('%Y-%m-%d.%H-%M.')\n fd,filename = tempfile.mkstemp(suffix='.yumtx', prefix=prefix)\n f = os.fdopen(fd, 'w')\n else:\n f = open(filename, 'w')\n \n self._ts_save_file = filename\n \n msg = \"%s\\n\" % self.rpmdb.simpleVersion(main_only=True)[0]\n msg += \"%s\\n\" % self.ts.getTsFlags()\n\n if self.tsInfo._pkgSack is None: # Transactions have pkgSack?\n msg += \"1\\n\"\n else:\n msg += \"%s\\n\" % (len(self.repos.listEnabled()) + 1)\n for r in self.repos.listEnabled():\n msg += \"%s:%s:%s\\n\" % (r.id, len(r.sack), r.repoXML.revision)\n\n # Save what we think the future rpmdbv will be.\n msg += \"%s:%s\\n\" % ('installed', self.tsInfo.futureRpmDBVersion())\n\n msg += \"%s\\n\" % len(self.tsInfo.getMembers())\n for txmbr in self.tsInfo.getMembers():\n msg += txmbr._dump()\n try:\n f.write(msg)\n f.close()\n except (IOError, OSError), e:\n self._ts_save_file = None\n if auto:\n self.logger.critical(_(\"Could not save transaction file %s: %s\") % (filename, exception2msg(e)))\n else:\n raise Errors.YumBaseError(_(\"Could not save transaction file %s: %s\") % (filename, exception2msg(e)))", "title": "" } ]
[ { "docid": "74208d2e2824fc5e8aa565282ea71402", "score": "0.6902627", "text": "def _save_transaction_data(self, path='/data/info'):\n base_dir = os.getcwd() + path\n # Save transactions data record\n with open(f'{base_dir}/transactions', 'w+') as f:\n for tx in self._transaction_pool.transactions:\n data = Transaction.serialize(tx)\n f.write(data + '\\n')", "title": "" }, { "docid": "2ff44029ffc94dd5e1d59604bc9f9eff", "score": "0.5734079", "text": "def SaveTransaction(self, tx):\n coins = self.GetCoins()\n changed = []\n added = []\n deleted = []\n found_coin = False\n for input in tx.inputs:\n coin = None\n\n for coinref in coins:\n test_coin = coinref.Reference\n if test_coin == input:\n coin = coinref\n\n if coin is None:\n return False\n if coin.State & CoinState.Spent > 0:\n return False\n elif coin.State & CoinState.Confirmed == 0:\n return False\n\n coin.State |= CoinState.Spent\n coin.State &= ~CoinState.Confirmed\n changed.append(coin)\n\n for index, output in enumerate(tx.outputs):\n\n state = self.CheckAddressState(output.ScriptHash)\n\n key = CoinReference(tx.Hash, index)\n\n if state & AddressState.InWallet > 0:\n newcoin = Coin.CoinFromRef(coin_ref=key, tx_output=output, state=CoinState.Unconfirmed)\n self._coins[key] = newcoin\n\n if state & AddressState.WatchOnly > 0:\n newcoin.State |= CoinState.WatchOnly\n\n added.append(newcoin)\n\n if isinstance(tx, ClaimTransaction):\n # do claim stuff\n for claim in tx.Claims:\n claim_coin = self._coins[claim]\n claim_coin.State |= CoinState.Claimed\n claim_coin.State &= ~CoinState.Confirmed\n changed.append(claim_coin)\n\n self.OnSaveTransaction(tx, added, changed, deleted)\n\n return True", "title": "" }, { "docid": "3c9e13122824810a19342a4032289907", "score": "0.57161963", "text": "def __save(self):\n\n write_file(path.join(path_wallet, self.__user + '_wallet.txt'),\n self.__user + '\\n'\n + self.__keys_filename)", "title": "" }, { "docid": "91a2e3d3021fc419cefd8747c3339a3d", "score": "0.56985205", "text": "def save_blockchain(self, path='/data'):\n\n info_path = path + '/info'\n data_path = path + '/data'\n\n # Save blockchain metadata\n self._save_metadata(info_path)\n\n # Save account data\n self._save_wallet_pool_data(info_path)\n\n # Save uncommited transactions\n self._save_transaction_data(info_path)\n\n # Save the genesis block data\n self._save_genesis_data(data_path)\n\n # Save blocks data\n self._save_blocks_data(data_path)", "title": "" }, { "docid": "e860c27d9464e889e5ba163011940c66", "score": "0.5630539", "text": "def save_inventory(file_name, table):\r\n with open(file_name, 'wb') as objFile:\r\n table = pickle.dump(table, objFile)", "title": "" }, { "docid": "f993bc6f645b932dfa5fbcc27b3268b1", "score": "0.55817837", "text": "def save(self, filename):\n f = open(filename, 'w+')\n f.write(write_statements(self.get_statements(True),\n verbose=self.verbose))\n f.close()", "title": "" }, { "docid": "40941c1d361546cedfc5e329b5d657eb", "score": "0.55703187", "text": "def save(self):\n filename = hash(str(self.database))\n path = \"../Output/\" + str(filename) + \".txt\"\n file = open(path, \"w\")\n file.writelines(map(lambda x: x + '\\n', self.data))\n file.close()", "title": "" }, { "docid": "694845e5e273329f037e4f6a24ae3a38", "score": "0.5569761", "text": "def save(self, content):\n file = self.opentemp()\n file.write(content)\n self.commit()", "title": "" }, { "docid": "05a94031e165bd9a2e3dc49ba911c56a", "score": "0.554541", "text": "def save_chain(self):\n pprint('saving to file named bc_file.txt')\n with open('bc_file.txt', 'w') as output:\n output.write(serializer.serialize(self.chain))", "title": "" }, { "docid": "473410f20f616e563e17ceaee1aeaa89", "score": "0.55245453", "text": "def _save_wallet_data(self, wallet, path='/data/info'):\n # Get the base directory\n base_dir = os.getcwd() + path\n data = Wallet.serialize(wallet)\n self._wallet_file.write(data + '\\n')", "title": "" }, { "docid": "5c107fef475ab608323a318b49739842", "score": "0.5517679", "text": "def persist_transactions_output_data_func(**kwargs):\n ti = kwargs.get('task_instance')\n config.ID = int(ti.xcom_pull('read_input_file_task', key='log_stats_id'))\n temp_file_path = ti.xcom_pull('read_input_file_task', key='temp_file_path_transactions')\n metadata = ti.xcom_pull('read_input_file_task', key='metadata')\n\n data, transactions_read_time = read_json_file_local(\n conf.get('BUCKET_NAME', None), temp_file_path\n )\n\n persist.PersistStep(metadata, data).process()\n\n return transactions_read_time", "title": "" }, { "docid": "ce37229ffdfdcb28081445add24429bf", "score": "0.5455006", "text": "def save(self):\r\n self.save_config()\r\n output=\"\"\r\n for goal in self._goals:\r\n output += \"{0},{1},{2}\\n\".format(goal.get_name(),goal.get_amount(), goal.get_increment())\r\n for amount,date,desc in goal.get_transactions():\r\n output += \"{0},{1},{2}\\n\".format(amount,date,desc)\r\n output+=\"\\n\\n\"\r\n fw = open(\"profile_{0}.save\".format(self._profile),'w')\r\n fw.write(output)\r\n fw.close()", "title": "" }, { "docid": "e1bf470ad4e9fbf7a151d4e1dd23a517", "score": "0.5449256", "text": "def save():", "title": "" }, { "docid": "a4e09f3c338b83ee588e5b54c2bc9b9f", "score": "0.54385424", "text": "def saveCommit(commitRow,path):\n exportRowCsv(path,commitRow)", "title": "" }, { "docid": "28ddf37fd24e8737c734587fe21337a4", "score": "0.5405582", "text": "def save(self):\n with open(self.file, 'wb') as fout:\n pickle.dump(self, fout, protocol=self.prot)", "title": "" }, { "docid": "a7037a2e82cc513595a563a7a6b87cde", "score": "0.5391943", "text": "def save_checkpoint(state, filename='checkpoint_rel_small075.pth.tar'):\n torch.save(state, filename)", "title": "" }, { "docid": "6b483f5c2c5a11ca61215bd1bc815c29", "score": "0.5388907", "text": "def save_mets(self):\n log = getLogger('ocrd.workspace.save_mets')\n log.debug(\"Saving mets '%s'\", self.mets_target)\n if self.automatic_backup:\n self.automatic_backup.add()\n with atomic_write(self.mets_target) as f:\n f.write(self.mets.to_xml(xmllint=True).decode('utf-8'))", "title": "" }, { "docid": "1f0bbcc3e346dacdf28ae3e25d63446f", "score": "0.5375624", "text": "def commit_transaction(self):\n self.tx.commit()", "title": "" }, { "docid": "08ddba0bd2b6a6ec05a81b07e05a2236", "score": "0.53726274", "text": "def _save_model(self, out_file):\n pass", "title": "" }, { "docid": "839f3bf74317e3c0c4c7bf6e0b470190", "score": "0.5365963", "text": "def SaveState(self):\n obj_file = open(self.Name + '.rlx', 'w')\n dump(self, obj_file)", "title": "" }, { "docid": "3c5a236ceae0a61f179e565453d26a77", "score": "0.53275275", "text": "def save(self, unit, forceSave=False):\n raise NotImplementedError", "title": "" }, { "docid": "e1ce33a21e08e0c104ff8ca2f7b22789", "score": "0.5323787", "text": "def _save(self):\n\n if not self._tree:\n raise AuthFileNotInitializedException(self._file_name)\n\n self._tree.write(\n self._file_name, encoding=\"utf-8\", xml_declaration=True, pretty_print=True\n )", "title": "" }, { "docid": "e39cae33b0352fc7e92a6dbf44f0c196", "score": "0.5296668", "text": "def save(self, save_file=None):\n if save_file is None:\n save_file = self.filename + \".bgoe.pgz\"\n\n with gzip.open(save_file, 'wb') as f:\n pickle.dump({'filename' : self.filename,\n 'event_time' : self.event_time,\n 'channel' : self.channel,\n 'level' : self.level,\n 'clock_source' : self.clock_source,\n 'clock_synced' : self.clock_synced}, f, pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "76cf2e6bfd3681b3e371d9e63e3b7688", "score": "0.5289666", "text": "def save(self,file):\n assert \".pymodel\" in file\n with open(file,\"w\") as stream:\n pickle.dump(self,stream)", "title": "" }, { "docid": "0fdc53f51ac4e88e9e53449f753505bf", "score": "0.52819705", "text": "def save(self):\n\n # sets the filename to be the object's name attribute, checks for\n # existing files\n file = '%s.tc' % self.name\n exist = False\n\n # if the file exists create a secondary file to prevent data loss from\n # write failures\n if exists(file):\n exist = True\n file = '%s.temp' % file\n\n f = open(file, 'w')\n\n # uses the first two lines of the file for the name followed by the\n # active session\n f.write(json.dumps(self.name) + '\\n')\n if self.current:\n f.write(json.dumps(self.current.savef()) + '\\n')\n else:\n f.write(json.dumps(None) + '\\n')\n\n # remainder of the file is used to store each timecard\n for card in self.file:\n f.write(json.dumps(card.savef()) + '\\n')\n\n f.close()\n\n # removes the original save file and replaces it with the secondary\n if exist == True:\n os.remove('%s.tc' % self.name)\n os.rename(file, '%s.tc' % self.name)\n\n print 'Saved:', self.name + '.tc'", "title": "" }, { "docid": "ac0bd6e1e6625e94684c1f106aed30d4", "score": "0.52609473", "text": "def save_file(self):\n with open(self.path, mode='wb') as file:\n pickle.dump(self, file)", "title": "" }, { "docid": "0f5722d6736d44494075e0a36f86d65b", "score": "0.5258685", "text": "def save(self):\n\n level_text = json.dumps({\n \"settings\": self.settings,\n \"rows\": self.rows,\n \"events\": self.events\n })\n\n with open(self.filename, \"r+\") as f:\n old_text = f.read()\n\n with open(self.filename + \".bak\", \"w\") as g:\n g.write(old_text)\n\n f.seek(0, 0)\n f.write(level_text)\n f.truncate()", "title": "" }, { "docid": "5d71a5a48ffb55aa66cb31bb6a1b91a3", "score": "0.5252436", "text": "def save(self, blockchain: List[blockchainConstants.Block], open_tx: List[blockchainTx.Transaction], peer_nodes: Set[str]) -> bool:\n try:\n with open(self.path, mode='w') as file:\n file.write(blockchainHelpers.stringify_block(blockchain))\n self.add_line_break(file)\n file.write(blockchainHelpers.stringify_block(open_tx))\n self.add_line_break(file)\n file.write(blockchainHelpers.stringify_block(list(peer_nodes)))\n\n self.print_success(StorageAction.SAVING)\n return True\n\n except IOError:\n self.print_error(StorageAction.SAVING)\n return False", "title": "" }, { "docid": "634a692d11d4c9b31737aaae193cfdb0", "score": "0.5249812", "text": "def save(self):\n self.path.write_text(toml.dumps(self.tomldoc))", "title": "" }, { "docid": "bc1564c1731212aaf9cb7db0d38506ce", "score": "0.5223411", "text": "def _save(self, checkpoint_step):\n torch.save(\n dict(network_state_dict=self._network.state_dict(),\n optimizer_state_dict=self._optimizer.state_dict()),\n f'{os.path.join(self._log_dir, \"state\")}{checkpoint_step}.pt'\n )\n print('Saved checkpoint.')", "title": "" }, { "docid": "46b9b9fe86b039c0004bdd05cde2e31e", "score": "0.5204531", "text": "def save_state(self):\r\n if not self.use_yum:\r\n cache = open(self.cachefile, 'wb')\r\n cPickle.dump((self.packages, self.deps, self.provides,\r\n self.filemap, self.url_map,\r\n self.yumgroups), cache, 2)\r\n cache.close()", "title": "" }, { "docid": "da7177b545652152586b2a4c11233d95", "score": "0.5204141", "text": "def save_as(self, filename: str):\n\n toml.dump(self.to_dict(), filename)", "title": "" }, { "docid": "f4eedd0ad2513c6a8ed0549c7650596f", "score": "0.5203081", "text": "def put_xml_file(tree):\n tree.write(\"cleaned-updateset.xml\")", "title": "" }, { "docid": "386fd85538179545aa1f047e0bb4ca53", "score": "0.5195064", "text": "def dbSave(self, env):\n\t\traise NotImplementedError, 'Flat File Saving Not Implemented'", "title": "" }, { "docid": "9990efc85f6c0b4e9ffa7a4f77628a62", "score": "0.51935107", "text": "def save_it(self):\n self.save_txt()", "title": "" }, { "docid": "d70e92aab35b856160ad8a0dd7b349ab", "score": "0.5187412", "text": "def persist(self, u_file: UserFile) -> None:", "title": "" }, { "docid": "d528b4e66fbb89b52eb12ad435a1a798", "score": "0.51865375", "text": "def save(self):\n with open(self.trans_path,'w') as odata:\n # print(self.config_dict)\n json.dump(self.config_dict,odata,indent=2,ensure_ascii=False)\n config_json = json.dumps(self.config_dict,indent=2,ensure_ascii=False)\n print('update result dict:\\n{green}{bright}{config_json}{back}'.format(\n config_json=config_json,**colors))", "title": "" }, { "docid": "5af7a4b6be39892b5ad9c2a5b4cb6d7f", "score": "0.5178957", "text": "def save(self, outpath: str) -> None:\n raise NotImplementedError", "title": "" }, { "docid": "a2d030033dcf3ee4800083d51aca832a", "score": "0.5178486", "text": "def _write_tarfile():\n with tar_file as backup:\n # Backup metadata\n backup.add(temp, arcname=\".\")\n\n # Backup data\n atomic_contents_add(\n backup,\n self.path_data,\n excludes=self.backup_exclude,\n arcname=\"data\",\n )", "title": "" }, { "docid": "4918568c92cbed5913992035e2de313b", "score": "0.51737845", "text": "def do_save(self, filename: str):\n output_string = b\"\".join([tile.tobytes() for tile in self.tile_data ])\n with open(filename, 'wb') as fout:\n if self.file_format == 'raw':\n fout.write(output_string)\n elif self.file_format == 'ines':\n fout.write(self.ines_data + output_string)\n self.modified = False\n self.filename = filename", "title": "" }, { "docid": "282e5244a0106f85afbc1b2da0e46fa9", "score": "0.51690733", "text": "def tree_save(tree):\n # \"The time for us is now\"\n NOW = datetime.datetime.now()\n # Format the time according to config\n save_filename = os.path.join(SAVESDIR, NOW.strftime(SAVESFILENAMEFORMAT) + \".UAS\")\n # Create the file, save current tree inside and close\n save_file = open(save_filename, \"w\")\n save_file.write(str(tree.values))\n save_file.close()\n if arguments['-d'] is True: print(f\"Successfully saved current tree to {save_filename}\")", "title": "" }, { "docid": "a213288092ed877ae09c70315f2e4d17", "score": "0.51688623", "text": "def Write(tree):\r\n xmlString = tostring(tree.getroot())\r\n xmlString = xmlString.replace('\\n', '')\r\n xmlString = xmlString.replace('\\r', '')\r\n xml = parseString(xmlString)\r\n prettyString = xml.toprettyxml()\r\n lines = prettyString.split('\\n')\r\n for line in lines:\r\n if line.isspace() or line == '':\r\n lines.remove(line)\r\n xmlText = \"\\n\".join(lines)\r\n file = open(PROFILES_FILENAME, 'w')\r\n file.write(xmlText)\r\n file.close()", "title": "" }, { "docid": "65a3038b1ee14c3915582329d98acf02", "score": "0.5152722", "text": "def save(self, save_path): \n with open(save_path, 'wb') as f:\n pickle.dump(self, f)", "title": "" }, { "docid": "2bedd9e1b4a1121e2aa8c2c08c0e356b", "score": "0.5151003", "text": "def persist(self,file_name, model_dir):\n pass", "title": "" }, { "docid": "16d4643859b4ecaebcdc3be0a2c1338a", "score": "0.5147352", "text": "def save_to_file(filename, object):\n f = open(filename + '.pckl', 'wb')\n pickle.dump(object, f)\n f.close()", "title": "" }, { "docid": "f7f51bfd0620079fdc39f58823db8f36", "score": "0.5146032", "text": "def save_run(self, run_result: RunResult) -> None:\n with open(self.store_location, mode='ab') as f:\n self.serializer.dump(run_result, f)", "title": "" }, { "docid": "085d0caf6f724fb60251fa637685a934", "score": "0.5145291", "text": "def dump_utxo(self, utxos: List[dict]):\n if not len(utxos):\n return\n\n # serialize the Address and add the address index\n utxos_for_json = []\n for utxo in utxos:\n utxo_for_json = utxo.copy()\n addr = utxo[\"address\"]\n utxo_for_json[\"address\"] = addr.to_full_string(Address.FMT_CASHADDR)\n\n wallet_types_without_index = (ImportedAddressWallet, ImportedPrivkeyWallet)\n if not isinstance(self.wallet, wallet_types_without_index):\n utxo_for_json[\"address_index\"] = self.wallet.get_address_index(addr)\n utxos_for_json.append(utxo_for_json)\n\n fileName, _filter = QtWidgets.QFileDialog.getSaveFileName(\n self, \"Save UTXOs to file\", filter=\"JSON files (*.json);;All files (*)\"\n )\n if not fileName:\n return\n if not fileName.endswith(\".json\") and not fileName.endswith(\".JSON\"):\n fileName += \".json\"\n with open(fileName, \"w\", encoding=\"utf-8\") as outfile:\n json.dump(utxos_for_json, outfile)", "title": "" }, { "docid": "f73cad1018dede52d7729bb2ab8de5a1", "score": "0.51403385", "text": "def save(self, checkpoint_path):", "title": "" }, { "docid": "1f90c50e4d2140093fb3986f258d88d9", "score": "0.513063", "text": "def save(self, filename):\n checkpoint = {'model' : self.model.state_dict()}\n torch.save(checkpoint, filename)", "title": "" }, { "docid": "2f769f7fc68383b8c4a2f426978e5f77", "score": "0.512153", "text": "def save(self):\n mode = 'wb'\n _file_gzip = gzip.open(self.filename, mode)\n\n self.clean()\n dump(self._storage, _file_gzip)\n # self._file.truncate()\n _file_gzip.flush()", "title": "" }, { "docid": "e3a12337e2354d9f64efea41ea4edcac", "score": "0.5120521", "text": "def salvarEstructura(est: Generic, nom: str) -> Any:\n from pickle import dump\n n = nom+'.plk'\n output = open(n, 'wb')\n dump(est, output, -1)\n output.close()", "title": "" }, { "docid": "510f297dec918e28c82bb1fcbf76c2cc", "score": "0.5113759", "text": "def save(self):\n self.write(self.data)", "title": "" }, { "docid": "7ec2a9c1e61a087c2230c688e70b08b3", "score": "0.5108506", "text": "def SBML_writeFile(self):\n\n self.SBML.writeSBML(self.sbml_document, 'pysces_sbml_tmp.xml')\n Fin = open('pysces_sbml_tmp.xml', 'r')\n Fout = open(os.path.join(self.model_dir, self.model_filename + '.xml'), 'w')\n cntr = 0\n try:\n UseR = getuser()\n except:\n UseR = ''\n for line in Fin:\n if cntr == 1:\n Fout.write(\n '<!-- Created with PySCeS ('\n + __version__\n + ') on '\n + strftime(\"%a, %d %b %Y %H:%M:%S\")\n + ' by '\n + UseR\n + ' -->\\n'\n + line\n )\n else:\n Fout.write(line)\n cntr += 1\n Fout.close()\n Fin.close()\n\n os.remove('pysces_sbml_tmp.xml')", "title": "" }, { "docid": "f1dfa21be766106773ffeba789d8a182", "score": "0.5106005", "text": "def save_model(self):\n with open('part3_pricing_model.pickle', 'wb') as target:\n pickle.dump(self, target)", "title": "" }, { "docid": "21d866da7d8270d15b13bba126c1ef50", "score": "0.5101998", "text": "def store(oid, serial, data, version, transaction):", "title": "" }, { "docid": "8c97b0bc130c29eccff6df88ef4af25e", "score": "0.5096877", "text": "def save(self, filename):\n # FIXME: re-set protein after pickling\n self.set_protein(None)\n # return joblib.dump(self, filename, compress=9)[0]\n with gzip.open(filename, 'w+b', compresslevel=9) as f:\n pickle.dump(self, f, protocol=2)\n return filename", "title": "" }, { "docid": "c464f8ee4f2ff41d0770fdab57811ba5", "score": "0.50950164", "text": "def save(self, path):\n with open(path, 'wb') as f:\n serialized_model = dill.dumps(self)\n f.write(serialized_model)", "title": "" }, { "docid": "e09da387284c0de3eef6e57148d37185", "score": "0.50845796", "text": "def save_model(to_file):\n\n raise NotImplementedError", "title": "" }, { "docid": "b5d767e85f77bfa086c0fbd74508201b", "score": "0.50842464", "text": "def save(self, name):\n self.finalize()\n with open(name, 'wb+') as f:\n if six.PY3:\n f.write(self.fileobj.getbuffer())\n else:\n f.write(self.fileobj.getvalue().encode('utf-8'))", "title": "" }, { "docid": "a0574ddaf45c07a32409bb5795bdc58f", "score": "0.50761795", "text": "def save(self, name): \n pickle.dump(self, open('saves/' + name + '.txt', 'wb'), protocol = 2)", "title": "" }, { "docid": "233f10bdaa56265691c1d15b95afb979", "score": "0.5072339", "text": "def save_old(self, path):\n # Depository not used in old database, so it is not saved\n\n libraries_path = os.path.join(path, 'thermo_libraries')\n if not os.path.exists(libraries_path):\n os.mkdir(libraries_path)\n for library in self.libraries.values():\n library_path = os.path.join(libraries_path, library.label)\n if not os.path.exists(library_path):\n os.mkdir(library_path)\n library.save_old(\n dictstr=os.path.join(library_path, 'Dictionary.txt'),\n treestr='',\n libstr=os.path.join(library_path, 'Library.txt'),\n )\n\n groups_path = os.path.join(path, 'thermo_groups')\n if not os.path.exists(groups_path):\n os.mkdir(groups_path)\n self.groups['abraham'].save_old(\n dictstr=os.path.join(groups_path, 'Abraham_Dictionary.txt'),\n treestr=os.path.join(groups_path, 'Abraham_Tree.txt'),\n libstr=os.path.join(groups_path, 'Abraham_Library.txt'),\n )", "title": "" }, { "docid": "7a053146d11c059022acca90a80eb841", "score": "0.5066046", "text": "def savetxt(self, filename):\n with open(filename, \"w\") as fout:\n for obj in self.atom_to_obj:\n fout.write(\"%s\\n\" % obj)", "title": "" }, { "docid": "76e3bfc4bad06c096363119a659bf472", "score": "0.5065508", "text": "def save(self, fname):\n state = self.get_state()\n with tf.io.gfile.GFile(fname, 'wb') as f:\n pickle.dump(state, f)\n return str(fname)", "title": "" }, { "docid": "25e34f1da9b9f30efe2bb056cdcb1f3b", "score": "0.50573665", "text": "def save_to_file(self, filename):\n torch.save(self.get_state_dict(), filename)", "title": "" }, { "docid": "a035b9d7f2b34c11dd1ee31758b4cd39", "score": "0.50547683", "text": "def toFile(self, path):\n with open(path + '.pkl', 'wb') as f:\n pickle.dump(self.table, f, pickle.HIGHEST_PROTOCOL)", "title": "" }, { "docid": "a523a242690f73403b8322a61feed3b5", "score": "0.505333", "text": "def _save(self, path, fileformat):\n if fileformat not in {'promela', 'Promela', 'pml'}:\n return False\n # closed ?\n if self.env_vars:\n return False\n from tulip.transys.export import graph2promela\n s = graph2promela.fts2promela(self, self.name)\n # dump to file\n f = open(path, 'w')\n f.write(s)\n f.close()\n return True", "title": "" }, { "docid": "3f55176ec33a395bf4d269a276d81b53", "score": "0.505308", "text": "def save(self):\n with open(self.name + '.dat', 'wb') as f:\n pickle.dump(self.log, f)", "title": "" }, { "docid": "9f1ba15845db446c0a44f97141d18238", "score": "0.50512797", "text": "def write_tchanges_file(tchanges_string, filename):\n filename += '.tchanges'\n with open(filename, 'wb') as tchanges_file:\n tchanges_file.write(tchanges_string.encode('UTF-8'))", "title": "" }, { "docid": "5129d2b4c374afa905822b80b7a3c32c", "score": "0.5045736", "text": "def write_changes(self):\n release = self.old_tree.release()\n backup_release = release+\"~\"\n if os.path.isfile(backup_release):\n os.remove(backup_release)\n shutil.copy(release,backup_release)\n print \"Backup written to:\",backup_release\n file = open(release,\"w\")\n file.writelines(self.new_tree.lines)\n file.close()\n print \"Changes written to:\",release", "title": "" }, { "docid": "a0db7e7417ae67d4ecf2a8d426768e28", "score": "0.5035694", "text": "def save(self, path):", "title": "" }, { "docid": "a0db7e7417ae67d4ecf2a8d426768e28", "score": "0.5035694", "text": "def save(self, path):", "title": "" }, { "docid": "a0db7e7417ae67d4ecf2a8d426768e28", "score": "0.5035694", "text": "def save(self, path):", "title": "" }, { "docid": "a0db7e7417ae67d4ecf2a8d426768e28", "score": "0.5035694", "text": "def save(self, path):", "title": "" }, { "docid": "2c0358a87a6d3f6ad26ab9b918017f38", "score": "0.50321484", "text": "def save(self, path):\n checkpoint = {'model_state_dict': self.state_dict()}\n torch.save(checkpoint, path)", "title": "" }, { "docid": "3862b0583d4bb0a7280b3f04ea9d239c", "score": "0.5030745", "text": "def save(self,fn):\n\t\t\n\t\tiom.saveToPickle(self,fn=fn)", "title": "" }, { "docid": "bc1d62e25b3cd085eaa5a81dd038bc82", "score": "0.50288004", "text": "def save_session(self, path):\r\n from .state import GlueSerializer\r\n gs = GlueSerializer(self)\r\n with open(path, 'w') as out:\r\n gs.dump(out, indent=2)", "title": "" }, { "docid": "ebc25518b233d52a72d3b4253480a763", "score": "0.50255877", "text": "def save(self, path):\n with path.open('wb') as f:\n torch.save(self.state_dict(), f)", "title": "" }, { "docid": "6b3cbb5f71d70ae561c006af8c1d9515", "score": "0.5024947", "text": "def save( self, anOutputFile ):\n \n anEmlString = self.asString()\n \n anOutputFileObject = open( anOutputFile, 'w' )\n anOutputFileObject.write( anEmlString )", "title": "" }, { "docid": "6b3cbb5f71d70ae561c006af8c1d9515", "score": "0.5024947", "text": "def save( self, anOutputFile ):\n \n anEmlString = self.asString()\n \n anOutputFileObject = open( anOutputFile, 'w' )\n anOutputFileObject.write( anEmlString )", "title": "" }, { "docid": "d7770597c0d7e346fcd1704928ac98ed", "score": "0.5019575", "text": "def save_moshinsky(task):\n # get targets\n targets = operators.rel.get_rel_targets(task)\n\n # get rel sources\n rel_sources = operators.rel.get_rel_sources(task, targets)\n\n for (basename, _, parameters) in task.get(\"moshinsky_targets\", []):\n source_id = parameters[\"id\"]\n source = rel_sources[source_id]\n filename = environ.tmbe_filename(\n basename, parameters[\"truncation\"], source['hw'], task.get(\"h2_extension\", \"bin\")\n )\n mcscript.task.save_results_single(\n task, filename, subdirectory=\"tbme\"\n )", "title": "" }, { "docid": "af8821c23fd5d6a8276cf7606efd4629", "score": "0.5017574", "text": "def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump((self.mprims, self.th), f)", "title": "" }, { "docid": "dab5e3dcea8760953c510fca51664f2c", "score": "0.50158674", "text": "def fire_transactions(self, address):\n # Retrieve transactions records and tranferred data\n balance_pool = self._transaction_pool.balance\n record_pool = self._transaction_pool.records\n records = []\n\n # Move money between account based on each transaction\n for i in range(len(balance_pool)):\n source, dest, amount = balance_pool[i]\n # Check if the source wallet has enough balance\n if not self.have_balance(source, amount):\n print(f'{source} has no enough balance !!!')\n continue\n\n # Actually make a transaction\n self.move_balance(source, dest, amount)\n\n # Add valid transaction records in the list\n records.append(record_pool[i])\n\n # Add transactions records in the blockchain\n self._new_coinbase_tx_account(records, address)\n\n # Save updated account data\n self._save_wallet_pool_data()\n\n # Clear the transaction records and balance\n self._transaction_pool.reset()\n\n # Clear the transaction file\n with open(f'{self._info_path}/transactions', 'w+') as f:\n pass", "title": "" }, { "docid": "52339018088524475de15f1ed0eeac36", "score": "0.50155854", "text": "def _save_object(self, obj, file_path): \n joblib.dump(obj, file_path)", "title": "" }, { "docid": "d72d5837ec96023542f26d5aa52e2175", "score": "0.501029", "text": "def save_data_to_file(inputfile, model_environment_status):\r\n\t\tpass", "title": "" }, { "docid": "10a29f5933defa4625356b6079d2f925", "score": "0.50101775", "text": "def save(self, filename):\n options = conf.lib.clang_defaultSaveOptions(self)\n result = int(conf.lib.clang_saveTranslationUnit(self, fspath(filename),\n options))\n if result != 0:\n raise TranslationUnitSaveError(result,\n 'Error saving TranslationUnit.')", "title": "" }, { "docid": "1fd791961ac2823ebff4c9cde73dfbf9", "score": "0.50090426", "text": "def geno_save(self, f):\n # Hmm how to remove the raw df temporarily before saving?\n del self.df\n with open(f, 'w') as temp:\n dump(self, temp)\n temp.close()", "title": "" }, { "docid": "88a5f19fcb50caefb4d40a429ddd3b01", "score": "0.50020677", "text": "def doSaveOperation(self,stateDict,fileName):\n torch.save(stateDict,fileName)", "title": "" }, { "docid": "24e2eac1f4cd2408bef99a78a2715268", "score": "0.5000772", "text": "def save(self):\n\n # Create the game saves directory if necessary\n if not os.path.exists(Save.SAVE_FOLDER):\n os.makedirs(Save.SAVE_FOLDER)\n\n # Create this saves' directory\n if not os.path.exists(f\"{Save.SAVE_FOLDER}/{self.get_username()}\"):\n os.mkdir(f\"{Save.SAVE_FOLDER}/{self.get_username()}\")\n\n save_json = {\n \"username\": self.__username,\n \"speed\": self.__speed,\n \"virus_files\": {\n \"deleted\": self.__deleted_virus_files,\n \"total\": self.__virus_files,\n \"tracked\": self.__tracked_files,\n \"locations\": self.__virus_file_locations\n },\n \"normal_files\": {\n \"deleted\": self.__deleted_normal_files,\n \"total\": self.__normal_files,\n \"restored\": self.__restored,\n \"log\": self.__deletion_log\n }\n }\n system_json = {\n \"root\": self.__root.to_json(),\n \"trash\": self.__trash.to_json()\n }\n Hexable.save(save_json, f\"{Save.SAVE_FOLDER}/{self.get_username()}/save.hex\")\n Hexable.save(system_json, f\"{Save.SAVE_FOLDER}/{self.get_username()}/filesystem.hex\")", "title": "" }, { "docid": "dd58aaeb315ff1d6f9eb0808aa4041bc", "score": "0.4998068", "text": "def to_file(self):\n logging.info(\"About to persist %d mails of total %d bytes.\" % (len(self._mailq), self._mailq_bytes))\n \n for (acct_id, mail, bytes) in self._mailq:\n try:\n \n with tempfile.NamedTemporaryFile(\n prefix = \"%s_\" %(acct_id),\n suffix = '.eml',\n dir = self.persist_dir,\n delete = False) as f:\n \n f.write(mail)\n \n except Exception as details:\n logging.error('Failed persisting mail from account \"%s\". ERR [%s]' % (acct_id, details))", "title": "" }, { "docid": "54291077535c90b520504ec3d9d2e8ce", "score": "0.49918887", "text": "def save(self):\n\n # generate the .mdata file path\n if not self.save_path:\n mdata_path = self.generate_mdata_filepath()\n else:\n mdata_path = self.save_path\n\n # write .mdata file to disk\n with open(mdata_path, \"w+\") as mdata_file:\n try:\n mdata_file.write(security.xor_key(self.serialize()))\n except IOError as e:\n log.error(\"Couldn't write metadata at <{}> because {}\".format(mdata_path, e))\n return False\n\n return True", "title": "" }, { "docid": "67cc4c7cfee46afa41cb008bf9d26d97", "score": "0.49916765", "text": "def save(self, logname):\r\n super().save(logname)\r\n print('[TRF] save ckpt to %s' % logname)\r\n self.saver.save(self.get_session(), logname + '.ckpt')", "title": "" }, { "docid": "a9debe7b38e5b7552c293121faa26340", "score": "0.49865842", "text": "def save(self, fn):\n assert fn.endswith(\".pkl\")\n with open(fn, \"wb\") as f:\n pickle.dump(self, f)", "title": "" }, { "docid": "c78223127b34cc155d5ccc6e2caf7bd7", "score": "0.49848408", "text": "def save(self):\n self.trans=open(\"Translation.txt\", \"r+\")\n self.trans.truncate(0)\n written=\"\"\n for word in self.dictionary:\n written+=(word+\"-\"+self.dictionary[word]+\"\\n\")\n #self.trans.write(written.encode('utf8'))\n self.trans.write(written)\n self.trans.close()\n self.trans=open(\"Translation.txt\", \"r+\")", "title": "" }, { "docid": "a27d360267eed4d8ddd479427e22b1d2", "score": "0.49764252", "text": "def to_file(self, filename, overwrite=True, fmt=None):\n if fmt is None:\n if filename.endswith('.gz'):\n fmt = 'gz'\n else:\n fmt = 'sqlite'\n if os.path.exists(filename) and not overwrite:\n raise RuntimeError(f'File {filename} exists; remove or pass '\n 'overwrite=True.')\n if fmt == 'sqlite':\n self.copy(map_file=filename, overwrite=overwrite)\n elif fmt == 'dump':\n with open(filename, 'w') as fout:\n for line in self.conn.iterdump():\n fout.write(line)\n elif fmt == 'gz':\n with gzip.GzipFile(filename, 'wb') as fout:\n for line in self.conn.iterdump():\n fout.write(line.encode('utf-8'))\n else:\n raise RuntimeError(f'Unknown format \"{fmt}\" requested.')", "title": "" }, { "docid": "3259d18f72f9f5f960de0478b4066b1e", "score": "0.49761456", "text": "def save(self, filename):\n self.make_xml()\n open(filename, 'w').write(self._prettyprint())", "title": "" }, { "docid": "7440b5d92ac7db7492d56072e8628119", "score": "0.49673098", "text": "def _save_wallet_pool_data(self, path='/data/info'):\n # Get the base directory\n base_dir = os.getcwd() + path\n\n # Reset the address file\n self._wallet_file.close()\n\n # Save the address data\n with open(f'{base_dir}/wallet', 'w+') as f:\n for address, wallet in self._wallet_pool.wallets:\n data = Wallet.serialize(wallet)\n f.write(data + '\\n')", "title": "" }, { "docid": "66958aef24757420c699e83e4465d1cd", "score": "0.4965496", "text": "def get_pickle(self):\n fname = 'indrabot.pkl'\n with open(fname, 'wb') as fh:\n pickle.dump(self.get_statements(), fh)\n return fname", "title": "" }, { "docid": "26f2c82376ee447e51875e6179e5be7d", "score": "0.49618214", "text": "def save(self, filename):\n o = open(filename, \"w\")\n o.write(self.write())\n o.close()", "title": "" }, { "docid": "8b963a1e8fd5194a6b433ca6152caf4c", "score": "0.4961337", "text": "def store_transaction(self, data):\n current_amount = int(data[4])\n transaction_id = int(data[1]) + 1\n transaction_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n new_amount = current_amount + self.transaction_amount\n # print(f\"{self.childname} {transaction_id} {transaction_date} {self.transaction_amount} {new_amount} {self.description}\")\n child_data = [\n self.childname,\n transaction_id,\n transaction_date,\n self.transaction_amount,\n new_amount,\n self.description\n ]\n with open(f'{self.childname}_log.csv', mode='a+') as child_file:\n child_writer = csv.writer(child_file, delimiter=';', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n child_writer.writerow(child_data)", "title": "" }, { "docid": "b691ac551780a0925b13da0e950b1ce3", "score": "0.49601665", "text": "def write_entity_info_to_db(self):\n logging.info('Saving entity info to the sqlite3 database.')\n # Create the db.\n with contextlib.closing(sqlite3.connect(self.db_filename)) as conn:\n with conn as cursor:\n # Create table\n cursor.execute(\n 'CREATE TABLE if not exists entities (locale text, '\n 'type text, name text, salience real, wiki_url text, '\n 'filename text)')\n with conn as cursor:\n # Load all the data\n cursor.executemany(\n 'INSERT INTO entities VALUES (?,?,?,?,?,?)',\n self.entity_info)", "title": "" } ]
d0a19727f1c069c671bc757f00f28501
Select a random move from those available
[ { "docid": "e5b2a018ca1af217d1d81b9c4eee3e7b", "score": "0.75547254", "text": "def randomMove():\n move = self.pickRandomAvailableSquareToEat() # positive int or -1\n return move", "title": "" } ]
[ { "docid": "cc5e061c85fc578cb98aa857aea6a2ff", "score": "0.8201249", "text": "def _random_move(self, grid):\n return random.choice(list_actions(grid))", "title": "" }, { "docid": "d9ad96550adadc59cb5e2aabb391a920", "score": "0.81967247", "text": "def make_random_move(self):\n possible_set = self.compute_possible_set()\n\n if len(possible_set) == 0:\n return None\n\n move = random.sample(possible_set, 1)[0]\n\n for _ in range(5):\n print(\"*\")\n print(\"??-->\", move)\n return move", "title": "" }, { "docid": "fa0d8d6ac920dd2bc3fdc08a216cff91", "score": "0.81768054", "text": "def selectMove(self):\n rand_num = np.random.choice(len(self.probs), p=self.probs)\n self.selected_move = rand_num", "title": "" }, { "docid": "a0b1835f36a6a41d3fbe3fca37c99f03", "score": "0.8165296", "text": "def findRandomMove(valid_moves):\n return random.choice(valid_moves)", "title": "" }, { "docid": "e9a8a5174bfbc2ee717f9c5611201a72", "score": "0.8012869", "text": "def select_random_move(self,the_game):\n\n # compute the number of available moves\n nb_available_moves = 0\n for col in range(7):\n if the_game.board_at(col,5) == 0: # can play in col\n nb_available_moves += 1\n\n # operate a random wheel to select where to play\n rand = random.uniform(0,1)\n cpt = 0\n placed = False\n move = 0\n for col in range(7):\n if the_game.board_at(col,5) == 0: # can play in col\n cpt += 1\n if cpt/nb_available_moves >= rand and not(placed):\n move = col\n placed = True\n # return the selected move\n return move", "title": "" }, { "docid": "2c846a5b20c288d268d95fc008ffc6e2", "score": "0.79913926", "text": "def get_random_move(self) -> Move:\n move_dict = dict()\n keys = []\n for m in self.move_set.avail.values():\n try:\n move_dict[m.piece].append(m)\n except KeyError:\n keys.append(m.piece)\n move_dict[m.piece] = [m]\n\n key = random.choice(keys)\n return random.choice(move_dict[key])", "title": "" }, { "docid": "87692a8fe986fc929d507af7f88a3669", "score": "0.7929397", "text": "def rand_select(board):\n import random\n moves = [move for move, new_board in get_all_next_moves(board)]\n return moves[random.randint(0, len(moves) - 1)]", "title": "" }, { "docid": "8e875d1a4bda698fdf32b5bfa97912ac", "score": "0.78982705", "text": "def makeRandomMove(self):\n pos = self.selectTent()\n self.updateValidTents(pos)\n return pos", "title": "" }, { "docid": "73bfcaf0f25f2c8ad96fe2d023e25a48", "score": "0.7881628", "text": "def determine_move(self):\n number_of_moves = range(len(self.moves))\n try:\n choice = random.choice(number_of_moves)\n self.choice = choice\n except IndexError:\n pass", "title": "" }, { "docid": "a5555aa15ce0dd338636f157c1d19229", "score": "0.7880297", "text": "def make_random_move(self):\n move_availability = set()\n moves = self.moves_made.union(self.mines)\n for x in range(self.height):\n for y in range(self.width):\n sets = (x, y)\n if (sets not in moves):\n move_availability.add(sets)\n\n return None if move_availability == set() else random.choice(tuple(move_availability))", "title": "" }, { "docid": "3cf18fda7c353b74410ab0cd602ace17", "score": "0.7833012", "text": "def get_random_move(self, state):\n return random.choice(self.get_actions(state))", "title": "" }, { "docid": "818582b997cf6c08580a6579e0e7db6a", "score": "0.7780076", "text": "def make_random_move(self):\n allMovs = set([(i,j) for i in range(self.height) for j in range(self.width)])\n disponibles = allMovs - self.moves_made\n disponiblesSeguros = disponibles - self.mines\n if disponiblesSeguros != set():\n rango = len(disponiblesSeguros)\n return list(disponiblesSeguros)[random.randrange(rango)]\n return None", "title": "" }, { "docid": "3f59cb23bb5f1e3346e00fea738aee90", "score": "0.7771783", "text": "def random_move(self):\n # Pick the next cell from the adjacent cells.\n next_moves = self.model.grid.get_neighborhood(self.pos, True, True)\n next_move = self.random.choice(next_moves)\n # Now move:\n self.model.grid.move_agent(self, next_move)", "title": "" }, { "docid": "f0d15c3b078c43017d838aaa71a2a94b", "score": "0.77502614", "text": "def choose_move(params):\n logging.debug(\"Test: params; \" + str(params))\n available_moves = utils.available_moves(params['grid'], params['empty_cell'])\n logging.debug(\"Test: available_moves: \" + str(available_moves))\n return np.random.choice(available_moves, size=1)[0]", "title": "" }, { "docid": "072aef8bd775f862c03e4147549fbf14", "score": "0.77250195", "text": "def random_move(self):\n direction = np.random.choice(self.game.legal_moves)\n self.game.move_board(direction)", "title": "" }, { "docid": "aaa367ef15833ad0310ead8fe38348b3", "score": "0.7612821", "text": "def random_move(self,player):\r\n move = None \r\n list_possible_move = self.possible_move(player)\r\n if(len(list_possible_move) != 0):\r\n rand = random.randint(0,len(list_possible_move)-1)\r\n move = list_possible_move[rand]\r\n else:\r\n move = \"pass\"\r\n return move", "title": "" }, { "docid": "a8bd4d34b405675ba42a8e8cf116e588", "score": "0.7605228", "text": "def get_random_move(board):\n pass", "title": "" }, { "docid": "b096de95596e775623c916d280506ee8", "score": "0.75831735", "text": "def pickMove(self,board):\n validMoves = board.getAllValidMoves()\n return random.choice(validMoves)", "title": "" }, { "docid": "d4bbf275acdfd136dede1f4b4b4bd56c", "score": "0.75754166", "text": "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "title": "" }, { "docid": "d4bbf275acdfd136dede1f4b4b4bd56c", "score": "0.75754166", "text": "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "title": "" }, { "docid": "6e8fe7151dadeecf55b3632c84449e87", "score": "0.75610775", "text": "def random_player(game_info):\n return random.choice(game_info.moveset)", "title": "" }, { "docid": "f909ad294a9c4745350a2c1ea16ec56a", "score": "0.7550029", "text": "def make_random_move(self):\n # Construct a set with all possible random moves\n moves = set()\n for y in range(self.height):\n for x in range(self.width):\n cell = (x, y)\n moves.add(cell)\n\n # Remove moves that have already been chosen and are known to be mines\n moves = list(moves.difference(self.moves_made, self.mines))\n # If there are possible moves\n if moves:\n # Return a random cell from remainder\n return random.choice(moves)\n\n return None", "title": "" }, { "docid": "7e1f16dc36fbe822c5fc17d3fba4d0d9", "score": "0.7549675", "text": "def random_move(snk, body, size):\n possible_moves = [\"left\", \"up\", \"right\", \"down\"]\n move_next = True\n while move_next:\n move = random.choice(possible_moves)\n if valid(snk, body, size, [move]):\n move_next = False\n \n return move", "title": "" }, { "docid": "c493fe3373155213ea28e9c64baad440", "score": "0.75259125", "text": "def random_move(self):\n neighbourhood = self.model.grid.get_neighborhood(self.pos, True)\n new_pos = self.random.choice(neighbourhood)\n self.model.grid.move_agent(self, new_pos)", "title": "" }, { "docid": "b187539b4fc02d2376660588eb6fcecf", "score": "0.7506532", "text": "def random_moves(self):\n time.sleep(.1)\n directions = [(0, 1), (1, 0), (-1, 0), (0, -1)]\n random.shuffle(directions)\n for direction in directions:\n landed_on = self.move_worm(direction)\n if landed_on[0] == ON_EMPTY:\n break\n elif landed_on[0] == ON_FOOD:\n self.place_food()\n self.parts_missing += landed_on[1]\n self.score += landed_on[1]\n break\n else:\n time.sleep(1)\n self.reset_game()", "title": "" }, { "docid": "8871d599707c572842e9c177ea90ce3a", "score": "0.74818426", "text": "def op_move_random(program, thread):\n thread.direction = Direction.ALL[random.randint(0, len(Direction.ALL) - 1)]", "title": "" }, { "docid": "59ad10ffdf4ace33e8cfdea01191509d", "score": "0.7480926", "text": "def make_random_move(self):\n \n random_moves = set()\n\n # Loop over all cells \n for i in range(0, self.height):\n for j in range(0, self.width):\n\n # Ignore if cell move is already made\n if (i,j) in self.moves_made:\n continue\n\n # Ignore if cell is known to be a mine\n if (i,j) in self.mines:\n continue\n\n random_moves.add((i,j))\n\n # Randomly select move\n if len(random_moves) != 0:\n random_move = random.choice(list(random_moves))\n return (random_move)\n \n # If no random moves available\n else:\n return None", "title": "" }, { "docid": "b06db61e4ac65db145f6b7ed519c64d5", "score": "0.74473846", "text": "def choose_move(\n self, mode: GameMode, view: PlayerView, legal_moves: List[Move], unused: Callable[[PlayerView, Move], PlayerView]\n ) -> Move:\n return random.choice(legal_moves)", "title": "" }, { "docid": "f26b4e5d27cc87104fa2e76d4f4e0d47", "score": "0.7429097", "text": "def randommove(self,board,currentplayer):\n\t\ttemp = self.available_moves(board)\n\t\trandom.shuffle(temp)\n\t\treturn temp[0]", "title": "" }, { "docid": "526202489e8ebec8548fdb73af164e2e", "score": "0.7412674", "text": "def which_move(board):\n return random.choice(board.moves())", "title": "" }, { "docid": "8e0769006563bad48468ec8ddf378796", "score": "0.7393144", "text": "def make_random_move(self) -> Coord:\n # Get all cells minus checked and mines\n unused_cells_not_mines = self._full_board - self.moves_made - self.mines\n if unused_cells_not_mines:\n return random.choice(tuple(unused_cells_not_mines))\n else:\n return None", "title": "" }, { "docid": "f7a3dc9d34fd5c0ec8183ecbd318a08b", "score": "0.73902726", "text": "def move(self, game: Game) -> Move:\n return random.choice(game.get_allowed())", "title": "" }, { "docid": "6348f427d6faa09eb6bfc79f1b1dde0f", "score": "0.7386134", "text": "def randomMove(node):\n move = random.choice(node.state.getMoves())\n node.addMove(move)\n return move", "title": "" }, { "docid": "37691885b313dc93391696e6decb5bd7", "score": "0.7363419", "text": "def get_random_move(self) -> Action:\n number_of_possible_moves = len(self.current_state.next_states_transitions)\n return Action(self.current_state.next_states_transitions[random.randint(0, number_of_possible_moves - 1)])", "title": "" }, { "docid": "ea53b932200c31c2142d54c054fc7e8e", "score": "0.7317701", "text": "def randomWalk(self):\n self.moveto(random.choice(self.getAdjValids(allowNull = False)))", "title": "" }, { "docid": "3181ef4a28b5d7d611806da4b2dc1d89", "score": "0.7279731", "text": "def random_move(self):\n self.prev = self.pixels()\n self.tail.append(self.head)\n if len(self) > self.starting_length:\n self.tail.popleft()\n # generate new head position choices TODO: do this first and have 'stay still' as a choice\n new_heads = [pos for pos in neighbours(*self.head) if pos not in self.tail]\n # lookahead one move, that's really all that's required\n # we should just special-case the corners but this worked in a pinch\n if len(new_heads) == 2:\n new_heads = [h for h in new_heads if len(neighbours(*h)) != 2]\n choices = list(new_heads)\n\n #print(f\"Choices are {choices}, having excluded {self.tail}\")\n self.head = random.choice(choices)\n if self.head in self.food:\n self.eat()", "title": "" }, { "docid": "5b4998caaac752ea11cbf4f3b3827017", "score": "0.722176", "text": "def make_random_move(self):\n\n total_cells = self.height * self.width\n buffer = set()\n while len(buffer) <= total_cells:\n i = random.randint(0, self.height - 1)\n j = random.randint(0, self.width - 1)\n\n if tuple((i,j)) not in self.moves_made \\\n and tuple((i,j)) not in self.mines:\n return tuple((i,j))\n\n else:\n buffer.add(tuple((i,j)))\n # No moves left to be made. Being explicit\n if len(buffer) == total_cells:\n return None\n\n return None", "title": "" }, { "docid": "5a6a465f2fa90ac0d9f39391a02634b8", "score": "0.7208392", "text": "def pickRandomAvailableSquareToEat(self):\n allMoves = np.arange(0, self.rows*self.cols)\n barID = self.recogniseState()\n availableMoves = allMoves[np.invert(self.allStates[barID, :])]\n availableMoves = availableMoves[availableMoves != 0]\n # remove the chance of zeroing\n if len(availableMoves) == 0:\n move = -1 # resign\n else:\n move = int(np.random.choice(availableMoves, 1))\n return move", "title": "" }, { "docid": "04fa0019720017898c989af554da579e", "score": "0.7173872", "text": "def get_move(game_state: Othello):\n # Get the legal moves\n possible_moves = game_state.get_available_moves()\n # As the dict_keys Object returned by the function does not support indexing and Indexing is required here\n # Convert it to a list\n possible_moves = list(possible_moves)\n # Return a Random move\n return rnd.choice(possible_moves)", "title": "" }, { "docid": "c24c85ef9a94c4335fe46d5f76ff1bd2", "score": "0.7168274", "text": "def getRandomMove(self, state, role):\n legals = self.getLegalMoves(state, role)\n return legals.get(Random().nextInt(len(legals)))", "title": "" }, { "docid": "d65de12c8f9aaf9859c7e94d5409ae80", "score": "0.7155073", "text": "def choose_pos(self):\n # Find all available positions\n idx = self.find_pos(self.board)\n\n # If there are free spots, generate tile else quit\n if len(idx):\n pos = idx[np.random.randint(low=0, high=len(idx))]\n keys, prior = self.generate_tile_2or4(self.board)\n #keys, prior = self.generate_tile(self.board, self.MAX_GEN)\n self.board[pos] = keys[np.where(np.random.random() <= prior)[0][0]]", "title": "" }, { "docid": "ea310809d26821ce7142d693062bdc9c", "score": "0.71494657", "text": "def make_random_move(self):\n for i in range(0, self.height):\n for j in range(0, self.width):\n move = (i, j)\n if move not in self.moves_made and move not in self.mines:\n return move\n return None", "title": "" }, { "docid": "77d722a9fa097bc4de44ed1fc547527e", "score": "0.713903", "text": "def make_random_agent():\n def get_move(board, player):\n return random.choice(get_possible_moves(board, player))\n return get_move", "title": "" }, { "docid": "6019fbdc7fd92fd35e2ba8d7588abdd2", "score": "0.7129835", "text": "def get_random_move(self):\n\n # It's possible to simply generate an i and then generate a bigger j,\n # but this wouldn't give us a proper distribution. Moves with a bigger\n # i would have a higher chance to be chosen than those with a smaller\n # i.\n\n # generate random numbers\n i = random.randrange(1, self._size)\n j = random.randrange(1, self._size)\n\n # ensure the number are different\n while i == j:\n j = random.randrange(1, self._size)\n\n # Puts the smallest number first, not needed, but ensures that every\n # move will have only 1 representation that will be used.\n if j < i:\n i, j = j, i\n\n return (i, j)", "title": "" }, { "docid": "46b9d11202a3ef973b1d9eae127315dc", "score": "0.7123819", "text": "def get_rand_legal_move(self):\n empty_pos = self.GetMoves()\n if empty_pos:\n return random.choice(empty_pos)\n return None", "title": "" }, { "docid": "3ec54125c24328b049e684182209a842", "score": "0.70960665", "text": "def one_random_movement(self):\n #print(\"Robot [{}] moved randomly.\".format(self.unit.id))\n direction = random.choice(list(bc.Direction))\n if self.game_controller.is_move_ready(self.unit.id) \\\n and self.game_controller.can_move(self.unit.id, direction):\n self.game_controller.move_robot(self.unit.id, direction)\n self.map_location = None", "title": "" }, { "docid": "1f17dffccbb995d6aa3a8f1a8a079365", "score": "0.7092261", "text": "def move(self, grid):\n # If in training mode, choose a completely random sample w.p. epsilon\n if self._training_mode and random.random() < self.epsilon:\n # Explore: return random action (and store it).\n choice = self._random_move(grid)\n else:\n choice = self._best_move(grid)\n if self._training_mode:\n self._training_samples.append( (tuple(grid), choice))\n return choice", "title": "" }, { "docid": "3143cfaca88ebb42e692cfbe7a01efcc", "score": "0.7088086", "text": "def generate_random_elem(self):\n res_find_win_move, danger_elems = self.prepare_next_move()\n if res_find_win_move:\n return res_find_win_move\n\n selection = self.game.negative()\n if len(danger_elems) != len(selection):\n for elem in danger_elems:\n selection.remove(elem)\n return rnd.choice(selection)", "title": "" }, { "docid": "751087176c2dc07c410475dd212fe375", "score": "0.7087099", "text": "def move(self, state):\n legal_moves = state.legal_moves()\n #print(\"<RandomPolicy> legal_moves : %s\" % legal_moves)\n idx = np.random.randint(len(legal_moves))\n return legal_moves[idx]", "title": "" }, { "docid": "877145c3586f7e828e59683cdc31b591", "score": "0.7063747", "text": "def make_random_move(self):\n try:\n # pop a ramdom valid move\n move = (self.board - self.moves_made - self.mines).pop()\n except KeyError:\n # no available moves\n move = None\n return move", "title": "" }, { "docid": "887f4d824aa3191b4f196f48d6730c43", "score": "0.706172", "text": "def move_random(self) -> None:\n\n # get state at random\n state = random.choice(ANIM_GROUPS)\n\n # execute motion or switch to \"idle\"\n if state in self.movements:\n self.movements[state]()\n\n elif state == \"idle\":\n self.state = \"idle\"", "title": "" }, { "docid": "f16a2364c33fb7139b8b395e7d96fc25", "score": "0.70342374", "text": "def choose_move(self, choices):\n if len(choices) == 0:\n return None\n if len(choices) == 1:\n return choices[0]\n \n # Find the weight of the edges that take us to each of the choices.\n weights = []\n for move in choices:\n edge = self.world.edges[self.node, move]\n weights.append(self.weigh(edge))\n \n # Choose one of them using a weighted probability.\n total = sum(weights)\n cumdist = list(itertools.accumulate(weights)) + [total]\n\n return choices[bisect.bisect(cumdist, random.random() * total)]", "title": "" }, { "docid": "19029b39aad5bb87a18bf6a7a548460b", "score": "0.7023823", "text": "def choose(self) -> str:\n choice = random.choice(list(map(lambda x: x.name, Move)))\n return choice", "title": "" }, { "docid": "4e08619dc935841f598f2112bb9739fe", "score": "0.7019695", "text": "def random_strategy(game: Game) -> Any:\n move = random.choice(game.current_state.get_possible_moves())\n return game.str_to_move(move)", "title": "" }, { "docid": "5cd56599239b65480d3066671d165315", "score": "0.7016636", "text": "def select_move(self, gamestate):\n candidates = []\n for fr in range(gamestate.position.board.size*gamestate.position.board.size):\n if gamestate.position.is_valid_move(fr,gamestate.player) and not gamestate.position.board.is_eye(fr, gamestate.player):\n candidates.append(fr)\n if not candidates:\n return Move.pass_turn()\n return Move.play(random.choice(candidates))", "title": "" }, { "docid": "25a83e5be8d3f7f776661344affe675d", "score": "0.70154536", "text": "def randomPlayer1(board, state, moves):\n # moves is a list of [(from, piece, moves), ...]\n # [((0, 1), 'P', [(0, 2), (0, 3)]), ...]\n selection = random.choice(moves)\n fromPos = selection[0]\n toPos = random.choice(selection[2])\n return fromPos, toPos", "title": "" }, { "docid": "93055cd2ffcb50b5a1b403514c4f3737", "score": "0.7012158", "text": "def takeNaiveMove():\r\n\treturn rn.randint(1,9)", "title": "" }, { "docid": "e1c715db665858f8b179b4183c92aded", "score": "0.70081544", "text": "def random_strategy(game: Any) -> Any:\n move = random.choice(game.current_state.get_possible_moves())\n return game.str_to_move(move)", "title": "" }, { "docid": "dec764365563307855247ad7d64aef6b", "score": "0.69981647", "text": "def rand_move(self,board):\n our_tiles = [board_tile for row in board for board_tile in row\n if board_tile.faction == self.faction]\n for tile in our_tiles:\n if tile.nb==0:\n print(\"abberation :\",tile.x,tile.y)\n \n random_tile = random.choice(our_tiles) # awesome IA algorithm\n x = random_tile.x\n y = random_tile.y\n n = random_tile.nb\n \n pot_dest = [(x-1,y-1),(x-1,y),(x-1,y+1),(x,y-1),(x,y+1),(x+1,y-1),(x+1,y),(x+1,y+1)]\n pot_dest_filtered = [(i,j) for (i,j) in pot_dest if i>=0 and i<self.__n and j >=0 and j<self.__m]\n \n dest_x, dest_y = random.choice(pot_dest_filtered)\n\n random_n = random.choice(list(range(1,n+1)))\n\n return [[x,y,random_n, dest_x, dest_y]]", "title": "" }, { "docid": "50ecf5692250ae96f474ab41af19e6c4", "score": "0.69966036", "text": "def cpu_move(self):\n return randrange(1, 3)", "title": "" }, { "docid": "afae52355911e0da5dd32c0b5d68e691", "score": "0.6983072", "text": "def get_move(self,move):\n if len(move) != 0:\n self.board.make_move(move,self.opponent[self.color])\n else:\n self.color = 1\n moves = self.board.get_all_possible_moves(self.color)\n index = randint(0,len(moves)-1)\n inner_index = randint(0,len(moves[index])-1)\n move = moves[index][inner_index]\n self.board.make_move(move,self.color)\n return move", "title": "" }, { "docid": "20f5d2ddf93b1d162e5a7d2c18b44207", "score": "0.69813436", "text": "def next_random_move(board):\n if board.check_win() == None:\n return random.choice( board.get_empty_squares() )\n else:\n return False", "title": "" }, { "docid": "218ad6fd16069edfc3daac373f827814", "score": "0.69732875", "text": "def generate_move_random(\n board: Board, player: Player, saved_state: Optional[SavedState]\n) -> Tuple[PlayerAction, Optional[SavedState]]:\n\n print(f\"I'm the random agent. Playing as {board.player}\")\n\n available_columns = board.actions()\n\n action = PlayerAction(rnd.choice(available_columns))\n print(f\"Choosing to play {action}!\")\n\n return action, saved_state", "title": "" }, { "docid": "ed798a5c86a8ccb313befd0668e2357b", "score": "0.69699615", "text": "def chooseMove(self):\n row = randint(0,11)\n if row<6:\n # Top half of board, so choose between first and sixth row\n col = randint(0,5)\n else:\n # Bottom half so choose between first and twelfth row\n col = randint(0,11)\n # Display move in row (letter) + col (number) grid reference\n # e.g. A3 is represented as 0,2\n return row, col", "title": "" }, { "docid": "d91495fda7dd1f0f9fb9e01fe89d02e3", "score": "0.69641495", "text": "def make_move(state):\n # TODO: Implement AI!\n return random.choice([\n go_north,\n go_south,\n go_east,\n go_west,\n rotate_cw,\n rotate_ccw,\n shoot,\n ])", "title": "" }, { "docid": "8fdf8607a17affeff4dd0ad030873bb8", "score": "0.69599384", "text": "def move_random(self):\n self.coordf[1],self.coordf[2] = randrange(10,490,15),randrange(10,490,15)\n\n for i in range(len(self.info)):\n if (self.coordf[1], self.coordf[2]) not in self.info[i]:\n self.can.coords(self.coordf[0],self.coordf[1],self.coordf[2],\n self.coordf[1]+self.cc,self.coordf[2]+self.cc)", "title": "" }, { "docid": "3164018428ccdcdf2aa0bfafa4102df3", "score": "0.6911911", "text": "def get_move(self, overlord):\n return random.choice(self.valid_moves(overlord))", "title": "" }, { "docid": "97b5a1b065de9aec5e826f4559908a4b", "score": "0.6910727", "text": "def move_randomly(self):\n delta = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (0, -1), (1, -1), (1, 0), (1, 1)]\n rd.shuffle(delta)\n x, y = self.owner.pos\n while len(delta) > 0:\n dx, dy = delta.pop()\n if self.move_towards((x + dx, y + dy)):\n return", "title": "" }, { "docid": "fcfb3a2cfd59f46187b83fcd5fb958c8", "score": "0.6908616", "text": "def random_selection(board_sequence):\n game_board = board_sequence.current_board\n return rand.choice(game_board.get_available())", "title": "" }, { "docid": "da3dff4ce1f26f890f6c12f204e928cd", "score": "0.6899945", "text": "def simulate_random_game(self, game_state):\n game = copy.deepcopy(game_state)\n \n while game.is_not_over():\n random_move = self.bot.select_move(game)\n game.take_turn(random_move)\n \n return game.winner", "title": "" }, { "docid": "f935981b6f6dec99009f7392a573aa90", "score": "0.68972343", "text": "def move(self, state_prev, state, reward):\n if random.random() < 0.6:\n return random.choice(['MOVE_U', 'MOVE_D', 'MOVE_L', 'MOVE_R'])\n else:\n return random.choice(['SHOOT_Q', 'SHOOT_W', 'SHOOT_E', 'SHOOT_A', 'SHOOT_D', 'SHOOT_Z', 'SHOOT_X', 'SHOOT_C'])", "title": "" }, { "docid": "bd56a832a81170d379b582d6644fc252", "score": "0.6894614", "text": "def genMove(self,color):\r\n legalMoves = self.getLegalMoves(color)\r\n if legalMoves:\r\n return random.choice(legalMoves)\r\n return False", "title": "" }, { "docid": "edb60693961350f1681595e87d153e9d", "score": "0.6887186", "text": "def move_to_random_location(self):\n trials = LAYER_SIZE**2*10\n while trials > 0:\n try:\n return self.move_to(self.get_random_location())\n except:\n # assuming failing due to uniqueness constraint violation. \n trials = trials - 1", "title": "" }, { "docid": "fcfd0cf2bd1e564d510337a2b0160bb0", "score": "0.6868091", "text": "def random_move(self):\n rand_int = random.randint(0, len(self.moves) - 1)\n move = self.moves[rand_int]\n\n move_name = move['move']['name']\n if move_name not in self.moves_dict:\n self.moves_dict[move_name] = Move(move['move']['url'])\n\n return self.moves_dict[move_name]", "title": "" }, { "docid": "3206eaf113141db8de33793521c8b09d", "score": "0.68558997", "text": "def randomPlayer2(board, state, moves):\n # moves is a list of [(from, piece, moves), ...]\n # [((0, 1), 'P', [(0, 2), (0, 3)]), ...]\n tofrom = []\n for move in moves:\n fromPos = move[0]\n for toPos in move[2]:\n tofrom.append((fromPos, toPos))\n selection = random.choice(tofrom)\n fromPos = selection[0]\n toPos = selection[1]\n return fromPos, toPos", "title": "" }, { "docid": "fce7e8b14e96bf1c32ec0e42f03b9d1e", "score": "0.6851291", "text": "def randomMove(self, board):\n possibles = []\n for i in range(3):\n for j in range(3):\n if board[i][j] == '-':\n possibles += [(i, j)]\n return possibles[random.randint(0, len(possibles)-1)]", "title": "" }, { "docid": "99bb3865ccb7fd9123f280e3280d0b4e", "score": "0.6846981", "text": "def ai_random(grid):\n import random\n moves = []\n for i in range(len(grid)):\n if grid[i] == blank:\n moves.append(i)\n move = random.choice(moves)\n return move", "title": "" }, { "docid": "522c9c0c30f7ed1c4ca2d27e42fbeecc", "score": "0.6815907", "text": "def choice(self, player, board):\r\n\r\n #White player and black player turns\r\n if player:\r\n choices = board.turn_moves_w()\r\n\r\n else:\r\n choices = board.turn_moves_b()\r\n\r\n self.best_move = random.choice(choices) #random move choice\r\n\r\n #If the move involves a pawn, ensure that the first move is declared\r\n if type(self.best_move[0]).__name__ == 'Pawn':\r\n if self.best_move[0].first_move:\r\n self.best_move[0].first_move = False\r\n\r\n #Updates the piece location and the board\r\n self.best_move[0].update(self.best_move[1][0], self.best_move[1][1], board)", "title": "" }, { "docid": "eb9390c971b83b8a4496379f10c72fd9", "score": "0.67885303", "text": "def simulate_game_random(board, forbid_pass=True):\n pos = copy.deepcopy(board)\n while not (pos.recent[-2].move == go.PASS and pos.recent[-1].move == go.PASS):\n ill, caps = pos.play_move(select_random(pos, forbid_pass))\n return pos", "title": "" }, { "docid": "78b73f7fa8fccac3f21c15b0dd58e98c", "score": "0.67729515", "text": "def mc_trial(board, player):\n \n \n empty_lst = board.get_empty_squares();\n while(len(empty_lst)>0 and board.check_win()==None):\n empty_lst = board.get_empty_squares();\n \n pos = random.choice(empty_lst);\n \n board.move(pos[0],pos[1],player);\n player = provided.switch_player(player);", "title": "" }, { "docid": "b239b1b16e56309cb246dc4cb453f4af", "score": "0.67716897", "text": "def choose_move(self) -> GameState:\r\n raise NotImplementedError", "title": "" }, { "docid": "f9267e6f1ded8a374b00be4564fcb1db", "score": "0.67657596", "text": "def get_computer_move():\n # TODO\n return \"rps\"[random.randint(0,2)]", "title": "" }, { "docid": "65e384df51aefd5f1cea0ab717a27511", "score": "0.6727723", "text": "def pick_position(self, tile, board):\n random.choice([True, False])", "title": "" }, { "docid": "f3fb523f9e3568cdd0056695d1efda7b", "score": "0.6724774", "text": "def direction_switch(self):\n\n self.move_x = random.randint(-self.max_speed, self.max_speed)\n self.move_y = random.randint(-self.max_speed, self.max_speed)\n while self.move_x == 0 or self.move_y == 0:\n self.move_x = random.randint(-self.max_speed, self.max_speed)\n self.move_y = random.randint(-self.max_speed, self.max_speed)", "title": "" }, { "docid": "1519369a604cfe3c1e7e61f0fea035a9", "score": "0.6723876", "text": "def getRandomJointMove_0(self, state, role, move):\n random = ArrayList()\n for r in getRoles():\n if r == role:\n random.add(move)\n else:\n random.add(getRandomMove(state, r))\n return random", "title": "" }, { "docid": "d598f5144060fe6de0642780bf534bc7", "score": "0.67228514", "text": "def move(locations):\n\n r1 = random.choice(range(len(locations)))\n c1 = random.choice(range(len(locations[r1])))\n\n r2 = random.choice(range(len(locations)))\n c2 = random.choice(range(len(locations[r2])))\n\n while r1 == r2 and c1 == c2:\n r2 = random.choice(range(len(locations)))\n c2 = random.choice(range(len(locations[r2])))\n\n swap(locations, r1, c1, r2, c2)\n\n return (r1, c1, r2, c2)", "title": "" }, { "docid": "f7f05f7d564add21d2e6d0094f8da2a5", "score": "0.67196184", "text": "def getRandomNextState_0(self, state, role, move):\n random = self.getRandomJointMove(state, role, move)\n return self.getNextState(state, random)", "title": "" }, { "docid": "c358ca8cc396edbfb7e6f355fe4b6bdd", "score": "0.67162436", "text": "def random_strat(self):\n position = get_random_position()\n while not self.check_move(position):\n position = get_random_position()\n self.played.add(position)\n return position", "title": "" }, { "docid": "a8b2c37691d228b1c750d788f7bf883e", "score": "0.67161036", "text": "def random_strategy(board, player):\n return random.choice(actions(board))", "title": "" }, { "docid": "79d9c5da36402c2758bb3b33ffbeb528", "score": "0.6709384", "text": "def genmove(self, game_state, player):\n board = game_state.board\n empties = []\n for row, col in board.board_points:\n if board.get(row, col) is None:\n empties.append((row, col))\n result = gtp_states.Move_generator_result()\n if random.random() < self.resign_probability:\n result.resign = True\n else:\n result.move = random.choice(empties)\n # Used by gomill-explain_last_move and gomill-savesgf\n result.comments = \"chosen at random from %d choices\" % len(empties)\n return result", "title": "" }, { "docid": "2a3564faa996c4ba80c48ee7e9a4d75a", "score": "0.670016", "text": "def choose_random_move_from_list(board, moves_list):\n possible_moves = []\n for i in moves_list:\n if is_space_free(board, i):\n possible_moves.append(i)\n \n if len(possible_moves) != 0:\n return random.choice(possible_moves)\n else:\n return None", "title": "" }, { "docid": "addda21d6f8bb677b9496dfa4bfb06e7", "score": "0.6698148", "text": "def random_move(board):\n\t\t\tif not end(board):\n\t\t\t\tdir = random.choice(moves)\n\t\t\t\tresult_board = methods[dir](board, doScore=True)\n\t\t\t\tdrop(result_board)\n\t\t\t\treturn random_move(result_board)\n\t\t\telse:\n\t\t\t\treturn None", "title": "" }, { "docid": "984890a059c3dc1552a7181ab96766ed", "score": "0.669151", "text": "def monte_carlo_sample(board_state, side):\n result = has_winner(board_state)\n if result != 0:\n return result, None\n moves = list(available_moves(board_state))\n if not moves:\n return 0, None\n\n # select a random move\n move = random.choice(moves)\n result, next_move = monte_carlo_sample(apply_move(board_state, move, side), -side)\n return result, move", "title": "" }, { "docid": "93e634d38c883a6831b7d588d5287ff8", "score": "0.6682281", "text": "def play_turn():\n available_moves = get_available_moves()\n # Gives a little time between moves to improve player experience\n time.sleep(0.5)\n return random.choice(available_moves)", "title": "" }, { "docid": "4dc35241132a9be76b76d504aeefdd27", "score": "0.668154", "text": "def move_random(lastScramble=None):\n blankLoc = getBlankLocation()\n \n neighbors = getNeighbors(blankLoc)\n neighbors = [neighbor for neighbor in neighbors if neighbor != lastScramble]\n makeMove(random.choice(neighbors), .06)\n \n return blankLoc", "title": "" }, { "docid": "78e4f8284a8a796f5129608c32e87fe5", "score": "0.66803104", "text": "def make_safe_move(self):\n difference = self.safes.difference(self.moves_made)\n return None if not difference else random.choice(tuple(difference))", "title": "" }, { "docid": "e665137afcc69af6fb4be85336b2205e", "score": "0.6678206", "text": "def choose_move(self):\r\n next_move = self.brain.find_legal_move()\r\n return next_move", "title": "" }, { "docid": "70a3b5797661a1aa8a0770b2146ea43c", "score": "0.6674591", "text": "def __chooseStartingpoint(self):\n self.__currentRoom = random.choice(self.__listOfRooms)\n while self.__currentRoom.trapCheck():\n self.__currentRoom = random.choice(self.__listOfRooms)", "title": "" }, { "docid": "abb625749de837c44c490e599ccf1eb5", "score": "0.66680694", "text": "def next_move(self, board):\n assert (not board.is_full())\n self.num_moves += 1\n open_pos = list(board.iter_empty())\n return random.choice(open_pos)", "title": "" }, { "docid": "7fa0a004bb192a14aa8ed1dd86883c16", "score": "0.6665981", "text": "def make_move(self) -> int:\n move_type = np.random.choice([0,1],1,p=[1-self.random,self.random])\n if self.moves == 0:\n move = np.random.randint(0, self.n_moves)\n self.moves += 1\n return move\n else:\n self.moves += 1\n probability = self.count[self.previous_move[0],self.previous_move[1],:]\n if np.sum(probability) == 0:\n move = np.random.randint(0,self.n_moves)\n return move\n probability = probability / np.sum(probability)\n if move_type[0] == 1 and np.count_nonzero(probability) > 1:\n move = np.random.randint(0, self.n_moves)\n return move\n move = np.random.choice([0,1,2],1,p=probability)[0]\n move = (move + self.n_moves + 1) % self.n_moves\n return move", "title": "" } ]
8b6981e00c6948c86907bb36a43d4c0e
Find a link on this or a child workflow that has target as a sink.
[ { "docid": "894cb00f1d12c3da0715d4897f28378f", "score": "0.6926945", "text": "def find_link_by_target(self, target: Union[CommandInput, CommandParameter]):\n for link in self.links:\n if target in link.sinks:\n yield link, self\n\n for plan in self.plans:\n if isinstance(plan, CompositePlan):\n yield plan.find_link_by_target(target)", "title": "" } ]
[ { "docid": "025d34998e9a38ef9dc2c46dc588dc98", "score": "0.5772282", "text": "def get_target(self, share):", "title": "" }, { "docid": "832a11e7a60e6d1681fdfb8c43111497", "score": "0.57031846", "text": "def target(self):\n target_id = self.target_id\n if target_id:\n return self.resolve_entity(target_id)", "title": "" }, { "docid": "c0db11f58eab1f75dcf2b47f791ae70f", "score": "0.56856525", "text": "def get_target(self):\n if self.target:\n return self.target \n else:\n print \"target does not exist\"\n return None", "title": "" }, { "docid": "baf3efe07bae814a2070be85fc71ecfa", "score": "0.5681736", "text": "def get_target(self, target_id):\n return self.node[target_id][\"object\"]", "title": "" }, { "docid": "f5d86230143de5d8d47d8db4080d574c", "score": "0.5600967", "text": "def target(self):\n if self._security_class == \"lnk_file\":\n return self._target\n else:\n return None", "title": "" }, { "docid": "842e3efce6f887b9958a76eff168a7da", "score": "0.55410635", "text": "def find_media_link(self):\n if self.is_media():\n return self.content.src\n return None", "title": "" }, { "docid": "cd172c3e11186a8089e167f2b2ad93f2", "score": "0.5478133", "text": "def link(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"link\")", "title": "" }, { "docid": "cd172c3e11186a8089e167f2b2ad93f2", "score": "0.5478133", "text": "def link(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"link\")", "title": "" }, { "docid": "55afbecb03cf972a2663a462432d447b", "score": "0.5475691", "text": "def target(self):\n return self._target", "title": "" }, { "docid": "55afbecb03cf972a2663a462432d447b", "score": "0.5475691", "text": "def target(self):\n return self._target", "title": "" }, { "docid": "55afbecb03cf972a2663a462432d447b", "score": "0.5475691", "text": "def target(self):\n return self._target", "title": "" }, { "docid": "55afbecb03cf972a2663a462432d447b", "score": "0.5475691", "text": "def target(self):\n return self._target", "title": "" }, { "docid": "55afbecb03cf972a2663a462432d447b", "score": "0.5475691", "text": "def target(self):\n return self._target", "title": "" }, { "docid": "55afbecb03cf972a2663a462432d447b", "score": "0.5475691", "text": "def target(self):\n return self._target", "title": "" }, { "docid": "55afbecb03cf972a2663a462432d447b", "score": "0.5475691", "text": "def target(self):\n return self._target", "title": "" }, { "docid": "55afbecb03cf972a2663a462432d447b", "score": "0.5475691", "text": "def target(self):\n return self._target", "title": "" }, { "docid": "0d846543a150526ff5a0d6c75e62087c", "score": "0.544073", "text": "def address(self):\n if self._hlinkClick is None:\n return None\n return self.part.target_ref(self._hlinkClick.rId)", "title": "" }, { "docid": "c877488cd9698a5b7824773040848acb", "score": "0.5402326", "text": "def _get_target(self):\n pass", "title": "" }, { "docid": "48cead0022011317a08ba29b40e139cf", "score": "0.5362794", "text": "def target(self) -> typing.Optional[\"IRuleTarget\"]:\n return self._values.get('target')", "title": "" }, { "docid": "3b072ec1bd4a18cdf735e9228c8d3cc3", "score": "0.5361419", "text": "def _getlink(self):\n return self._link", "title": "" }, { "docid": "b831033d51e3ba6ee5d57fbef9925955", "score": "0.53230447", "text": "def getTarget(self, target):\n return self._targets[target]", "title": "" }, { "docid": "45897b3852aeb39d1162e4931bd4cb1a", "score": "0.5300091", "text": "def get_target(self):\n return self.target", "title": "" }, { "docid": "6a7a7d88c7e2d51be484bf907d85457a", "score": "0.52636147", "text": "def target(self) -> Optional[\"SoCTarget\"]:\n return self.board.target if self.board else None", "title": "" }, { "docid": "7570e95ad1ec5249337df1be24cb074d", "score": "0.52522063", "text": "def get_symlink_target(self):\n content = self.get_content()\n if isabs(content):\n return None\n path = normpath(joinpath(self.path, '..', content.decode('utf-8')))\n if path.startswith('..'):\n return None\n return type(self)(path, self.commit)", "title": "" }, { "docid": "1e0e592a13d28b16bb1d7bccbc857804", "score": "0.5242309", "text": "def target(self) -> Optional[str]:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "fc884d6f63d502198085868be01cc23f", "score": "0.52319074", "text": "def worklink(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"worklink\")", "title": "" }, { "docid": "8f68e2f9edb13227bce5fd19c9cd64d4", "score": "0.52282274", "text": "def getBestLink(self, destination):\n return self.table[destination][1]", "title": "" }, { "docid": "0295a7b8a05d9f685b34b3d840a40020", "score": "0.52099925", "text": "def find_html_link(self):\n for link in self.link:\n if link.rel == 'alternate' and link.type == 'text/html':\n return link.href\n return None", "title": "" }, { "docid": "3214761a00af122981cde459ad7e9923", "score": "0.5205382", "text": "def _jump_in_sink(self, current_path, next_path):\n if self._current_p.loader.find_object_containing(current_path.active[0].addr) != \\\n self._current_p.loader.main_object:\n return False, None\n\n try:\n for entry in self._sink_addrs:\n if next_path.active[0].addr == entry[0]:\n return True, entry[1]\n except Exception as e:\n log.error(\n f\"Unable to find successors for {hex(current_path.active[0].addr)}, perhaps unconstrained call?\")\n return False, None", "title": "" }, { "docid": "2ab2d19f6887e138c5df09887765edfd", "score": "0.5195", "text": "def self_link(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "2ab2d19f6887e138c5df09887765edfd", "score": "0.5195", "text": "def self_link(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "2ab2d19f6887e138c5df09887765edfd", "score": "0.5195", "text": "def self_link(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "7af051a0e2dbc00b647e9b0b887b7feb", "score": "0.5191727", "text": "def sink_get(self, sink_name):\n target = f\"/{sink_name}\"\n return self.api_request(method=\"GET\", path=target)", "title": "" }, { "docid": "1a23232fe851404311c8606a2b139337", "score": "0.5189926", "text": "def actor(self):\n return Actor.user(self.actor_id).get_link()", "title": "" }, { "docid": "7cd29d69233b0b1d6ffbbb0e91414321", "score": "0.51883066", "text": "def get_target_object(self, target_uri, *args, **kwargs):\r\n try:\r\n return self._target_lookup(*args, **kwargs)\r\n except ObjectDoesNotExist:\r\n raise BacklinkTargetDoesNotExist", "title": "" }, { "docid": "78e36f6a720b045517ed683e5dda8d13", "score": "0.5127774", "text": "def __getTarget(self):\r\n \r\n # refresh the UI and find all matching targets\r\n if self.tc.uiState.currentStateFull == None:\r\n self.tc.uiState.getCurrentState(refresh=True)\r\n\r\n targetNodes = self.tc.uiState.currentStateFull.findall(self.targets_xpath)\r\n \r\n if targetNodes:\r\n\r\n monkeyTargetList = self.__getAllTargetsFromNodes(targetNodes) # get list of targets\r\n\r\n if monkeyTargetList: # only if there's UI type of targets, use event type of targets. TODO: should events be launched without actual targets as well?\r\n monkeyTargetList += self.__getEventTargets()\r\n\r\n # print targets if settings defined\r\n if self.settings.SelectSingleNode('//printtargets') != None:\r\n p = self.settings.SelectSingleNode('//printtargets').GetAttribute('value')\r\n if p.lower() == \"true\":\r\n tgtsMsg = 'Available targets: '\r\n for i in range(len(monkeyTargetList)):\r\n if monkeyTargetList[i].targetImageOrText:\r\n tgtsMsg = tgtsMsg + monkeyTargetList[i].targetImageOrText + ' -- '\r\n else:\r\n tgtsMsg = tgtsMsg + 'event: ' + monkeyTargetList[i].method + ' -- '\r\n if i > 0 and i % 7 == 0: # split to fit output better\r\n tgtsMsg = tgtsMsg + '\\n'\r\n \r\n self.message(tgtsMsg)\r\n\r\n if self.settings.SelectSingleNode('//savestate') != None:\r\n p = self.settings.SelectSingleNode('//savestate').GetAttribute('value')\r\n if p.lower() == \"true\":\r\n self.message('state saved as %s' % self.tc.capture())\r\n \r\n if monkeyTargetList:\r\n self.getTargetCounter += 1\r\n return self.__getRandomMonkeyTarget(monkeyTargetList)\r\n else:\r\n # if no target \r\n self.message(\"failed to get target\")\r\n if debugMode:\r\n self.saveCurrentState(\"notargets_from_validate\")\r\n return None\r\n\r\n else:\r\n self.message(\"No targets found\")\r\n if debugMode:\r\n self.saveCurrentState(\"notargets\")\r\n return None", "title": "" }, { "docid": "b7c0b69d87781a7b9da9ffec30fad16e", "score": "0.5112519", "text": "def SelfLink(self):\n return self._self_link", "title": "" }, { "docid": "eba2cd0c7bae197a892fa60fc1de7998", "score": "0.5099199", "text": "def get_link(self, object):\n db_snapshot = self.read()\n for i, link in enumerate(db_snapshot['links']):\n for k, v in vars(link).items():\n if k == 'url':\n if v == object.url:\n print('duplicate found: ', link)\n return [True, link, i]\n return [False]", "title": "" }, { "docid": "1372ccb617bd9eebb761d69f1e5e0e5f", "score": "0.5096176", "text": "def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")", "title": "" }, { "docid": "1372ccb617bd9eebb761d69f1e5e0e5f", "score": "0.5096176", "text": "def link(self) -> Optional[str]:\n return pulumi.get(self, \"link\")", "title": "" }, { "docid": "4601103be37a2e3e8d01eab93d31fd35", "score": "0.5092862", "text": "def destination_node(self):\n return self._destination_node", "title": "" }, { "docid": "4cd08d8abdde753f0327d847a946d702", "score": "0.50876015", "text": "def to(self) -> \"RouteTargetReference\":\n return self.__to", "title": "" }, { "docid": "f957d26a53252ca8f28c77ec44169597", "score": "0.5064854", "text": "def target(self):\n pass", "title": "" }, { "docid": "70c426e2c7c04b44e8e47e3547c2f99c", "score": "0.50548655", "text": "def get_tool_link(exposure_object):", "title": "" }, { "docid": "fafe161df64dd294fa330c3fa97637cc", "score": "0.5048197", "text": "def destination(self):\n\n\t\treturn self.__destination", "title": "" }, { "docid": "7dd775370b781fbf8cf0db73c2fd47a9", "score": "0.50466555", "text": "def jump(self, guid=None, url=None):\n index = None\n for i, e in enumerate(self.entries):\n if guid is not None and e.guid == guid:\n index = i\n msg = 'Jumping to entry...'\n break\n if index is None and url is not None and e.feed.url == url:\n index = i\n msg = 'Jumping to feed...'\n if index is None:\n msg = 'Cannot find jump destination.'\n else:\n self.i = index\n messager.msg(msg)", "title": "" }, { "docid": "e1df2b0e079d3a62f5efe65b3976838c", "score": "0.5043757", "text": "def destination(self):\n return self._destination", "title": "" }, { "docid": "e1df2b0e079d3a62f5efe65b3976838c", "score": "0.5043757", "text": "def destination(self):\n return self._destination", "title": "" }, { "docid": "81308cff58192b49a913aba44e1ccd98", "score": "0.5041714", "text": "def target(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "81308cff58192b49a913aba44e1ccd98", "score": "0.5041714", "text": "def target(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "d8b8257bd9d92cf20cf5ec2aa70b8171", "score": "0.50249845", "text": "def subsystem_link(self):\n return self._subsystem_link", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "218075a31649ef19d93dc3b87d796525", "score": "0.50037384", "text": "def self_link(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link\")", "title": "" }, { "docid": "c83adbbaf853715c53ae4debfa183401", "score": "0.50007373", "text": "def __get_link_source(self):\n num = int(self.data[2 : len(self.data) - 1])\n if num not in self.sour:\n self.sour[num] = gt.Source(num=num)\n page = None\n while self.__get_line() and self.level > 1:\n if self.tag == \"PAGE\":\n page = self.__get_text()\n self.flag = True\n return (self.sour[num], page)", "title": "" }, { "docid": "46f43f46ab2848d85b1f93226e84d7b5", "score": "0.49834424", "text": "def _link_streams(self):\n try: self._outs[0].link = self._ins[0]\n except Exception as Error:\n if missing_stream in (self._ins + self._outs):\n raise AttributeError(f'missing stream object in {repr(self)}')", "title": "" }, { "docid": "b692b87bc3587c025cc5c4296036f65f", "score": "0.49733904", "text": "def get_output(self, target_id):\n for dataset_name in self.datasets:\n for rule_id in self.datasets[dataset_name]:\n if rule_id == target_id:\n return self.datasets[dataset_name][rule_id].output", "title": "" }, { "docid": "e70d4f0e2b94bde1374c8033839784d0", "score": "0.49709263", "text": "def getLinkbyuid():", "title": "" }, { "docid": "5ab51e7bc85631f43167a55efcbee91a", "score": "0.49607256", "text": "def find_link_by_href(self, href):\n raise NotImplementedError(\n \"%s doesn't support finding links by href.\" % self.driver_name\n )", "title": "" }, { "docid": "08a02ae2f4da676d13eff3aa36c2f694", "score": "0.49586076", "text": "def is_target(self): \n return self.next is None", "title": "" }, { "docid": "f8cf85719df6df5b564fc0065d5503a6", "score": "0.4955191", "text": "def target(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "b0103414422a3bb0338e8e105dfd0f26", "score": "0.4950128", "text": "def getLink(self) -> int:\n ...", "title": "" }, { "docid": "6efc0560dec166f3c6f5c52cd02f3475", "score": "0.49469787", "text": "def find_target(self, ident):\n if ident.startswith(':'):\n return self.project.find_target(\n self.project.alias + '//' + self.relpath + ident)\n return self.project.find_target(ident)", "title": "" }, { "docid": "e39910df1d3350bc027fda7b14ab4551", "score": "0.49454093", "text": "def target(self) -> Optional[pulumi.Input['TargetConfigurationArgs']]:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "41f67cd72691a5633ba754260d75c576", "score": "0.4942364", "text": "def getSelfLink(self):\n return self.base.get(\"selfLink\", [])", "title": "" }, { "docid": "41f67cd72691a5633ba754260d75c576", "score": "0.4942364", "text": "def getSelfLink(self):\n return self.base.get(\"selfLink\", [])", "title": "" }, { "docid": "4d2146013da87ceb890a8fc6492c781b", "score": "0.49381065", "text": "def getLink(self):\n return self.base.get(\"link\", [])", "title": "" }, { "docid": "eaa9782f36c75095f0edf9b140876083", "score": "0.49367556", "text": "def target(self) -> str:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "eaa9782f36c75095f0edf9b140876083", "score": "0.49367556", "text": "def target(self) -> str:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "eaa9782f36c75095f0edf9b140876083", "score": "0.49367556", "text": "def target(self) -> str:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "fe8eb2a7cd0e0fec4e5542da60dd5222", "score": "0.49337837", "text": "def _get_destination(self):\n return self.__destination", "title": "" }, { "docid": "fe8eb2a7cd0e0fec4e5542da60dd5222", "score": "0.49337837", "text": "def _get_destination(self):\n return self.__destination", "title": "" }, { "docid": "39a952e1b341d6e71cb63cb27d55cbb3", "score": "0.49257252", "text": "def extract_target_instance(self) -> str:\n\n target_instance_match = re.search(r'\\/targetInstances\\/(.*)',\n self.selfLink)\n if target_instance_match != None:\n return target_instance_match[1]", "title": "" }, { "docid": "e73dbd5412cd0b67f401990f1c0209b5", "score": "0.49226123", "text": "def link(self):\n return self[\"link\"]", "title": "" }, { "docid": "e24c4adb02e37613f1cdcddccd45411a", "score": "0.4909104", "text": "def _get_next_target(self):\n assert self.run_id < len(self.targets)\n next_target = self.targets[self.run_id]\n return next_target", "title": "" }, { "docid": "d66f18729317fbfc773d926dde99d8b0", "score": "0.48997682", "text": "def path_to_target(self):\n self.adjacency = AdjacencyList(self.current_uni.free_positions())\n try:\n return self.adjacency.a_star(self.current_pos,\n self.tracking_target.current_pos)\n except NoPathException:\n return None", "title": "" }, { "docid": "bcd1048a5efaf7a684fd81325ca50d86", "score": "0.48983908", "text": "def self_link_with_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"self_link_with_id\")", "title": "" }, { "docid": "822cb454cc5458b916de0cea58f05698", "score": "0.48951566", "text": "def getLink(self, *args):\n return _bullet.btMultiBody_getLink(self, *args)", "title": "" }, { "docid": "540fecfbd5c07712ad214b0d17ad1f58", "score": "0.4894681", "text": "def link_affected_by_interface(self, interface=None):\n if not interface:\n return None\n for link in self:\n if interface in (link.endpoint_a, link.endpoint_b):\n return link\n return None", "title": "" }, { "docid": "677dd49a0315c0961cff5f63531fa00e", "score": "0.48913842", "text": "def actor(self):\n actor = Actor.user(self.actor_id)\n return actor.get_link()", "title": "" }, { "docid": "7f2260338fd66659f96e2b2c787961d8", "score": "0.48708916", "text": "def target(self) -> pulumi.Output['outputs.TargetConfigurationResponse']:\n return pulumi.get(self, \"target\")", "title": "" }, { "docid": "b5bfa7fe71c91bab22f120bb2268a566", "score": "0.4860255", "text": "def getLinkbyuid(self):\n try:\n return self.linkbyuid\n except AttributeError:\n return 1", "title": "" }, { "docid": "994b84095f35cc659c26037daba148d9", "score": "0.4855159", "text": "def linkgetter(data):\r\n return (data['actions'][0]['link'])", "title": "" }, { "docid": "622f2e459156dd046bc0392b5363cd41", "score": "0.485161", "text": "def get_next_link(self, verbose=False):\n raise NotImplementedError( \"Should have implemented this\" )", "title": "" }, { "docid": "77712d2458d936beaefec8beca3b209e", "score": "0.4851136", "text": "def incoming_link(self):\n return self._incoming_link", "title": "" }, { "docid": "d46435d3c13c701464ce9e7b8df09540", "score": "0.48393214", "text": "def find_target(self, ident):\n assert '//' in ident, \"Expected absolute identifier: %r\" % ident\n alias, package_and_target = ident.split('//')\n\n if alias:\n project = self.find_project(alias)\n assert project, \"No project with alias %r\" % alias\n return project.find_target('//' + package_and_target)\n\n colons_in_remainder = package_and_target.count(':')\n if colons_in_remainder == 0:\n # Target name not specified\n pkg_path = package_and_target\n target_name = os.path.basename(pkg_path)\n elif colons_in_remainder == 1:\n # Explicit target name\n pkg_path, target_name = package_and_target.split(':')\n else:\n raise Exception('Too many colons in identifier: %r' % ident)\n\n assert pkg_path in self.packages, \\\n \"Reference to unknown package: %r\" % ident\n assert target_name in self.packages[pkg_path].targets, \\\n \"Target %s not found in package %s\" % \\\n (target_name, self.alias + '//' + pkg_path)\n return self.packages[pkg_path].targets[target_name]", "title": "" }, { "docid": "92bdffeba2543fdd4e6e1daf93a922c0", "score": "0.4838237", "text": "def get_destination(self):\n return self.dest;", "title": "" }, { "docid": "2ab1c5b92c758593c9653cbf4e2cdab9", "score": "0.48351625", "text": "def getDestination(self):\r\n return self.destination", "title": "" }, { "docid": "55b9d792847616e7b15f3666819088a6", "score": "0.48317835", "text": "def target_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_id\")", "title": "" }, { "docid": "55b9d792847616e7b15f3666819088a6", "score": "0.48317835", "text": "def target_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_id\")", "title": "" }, { "docid": "55b9d792847616e7b15f3666819088a6", "score": "0.48317835", "text": "def target_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"target_id\")", "title": "" }, { "docid": "84dabe5acc038bd2d498ea10c0cd0ddd", "score": "0.4831201", "text": "def updateLink(self):\n\n if self.path[0] == '.':\n self.link = self.follow(self)\n elif not self.root:\n self.link = self.follow(self.rootAncestor)\n else:\n # link points to an external resource. Check in externalFiles section:\n from fudge import suites as suitesModule\n externalFiles = self.findAttributeInAncestry(suitesModule.ExternalFiles.moniker)\n if externalFiles is None:\n print(\"WARNING: unable to locate external resource '%s#%s': no externalFiles section found!\" %\n (self.root, self.path))\n return\n\n externalFile = externalFiles[self.root[1:]] # skip '$' character from start of self.root\n if externalFile.instance is None:\n print(\"WARNING: link '%s#%s' points to an external file that has not been loaded yet.\" %\n (self.root, self.path))\n return\n self.link = self.follow(externalFile.instance)", "title": "" }, { "docid": "33ce5bfa47ee1482443e4150cfe51e35", "score": "0.4824023", "text": "def get_download_link(exposure_object):", "title": "" } ]
d642fa0d04315f8d72b138360099416d
Asigna un int como el identificado del factor sobre el que se obtiene la lectura.
[ { "docid": "c8ab05fe650e115bb39a9924cfeb4937", "score": "0.59797025", "text": "def set_id_factor(self, value):\n self.__idFactor = value", "title": "" } ]
[ { "docid": "6da0bbb2aad6b8aec5ab23be9e14d65b", "score": "0.60904473", "text": "def __int__(self):\n return int(self.zaehler / self.nenner)", "title": "" }, { "docid": "a83745d51bda29d42e1dd44a1cd65756", "score": "0.60733753", "text": "def factor_carga(self):\n return self.elementos / self.n", "title": "" }, { "docid": "521d11b9668e671fb74f48cdd66f09fc", "score": "0.5934092", "text": "def __int__(self):\n return self.number", "title": "" }, { "docid": "64ac37a42fee38b4b1dc939625c07ba2", "score": "0.58863974", "text": "def GetEmissiveFactor(self):\n ...", "title": "" }, { "docid": "4eadd952019255741fd73bbf29a5a281", "score": "0.5870683", "text": "def __int__(self):\n return int(self.__float__())", "title": "" }, { "docid": "f770401667dc5c8e00ef51bb2a11c942", "score": "0.5811316", "text": "def UnitFactor(self) -> float:", "title": "" }, { "docid": "751360d1cb7c3ca36247c29f75f9153c", "score": "0.5764063", "text": "def Dimlfac(self) -> float:", "title": "" }, { "docid": "751360d1cb7c3ca36247c29f75f9153c", "score": "0.5764063", "text": "def Dimlfac(self) -> float:", "title": "" }, { "docid": "751360d1cb7c3ca36247c29f75f9153c", "score": "0.5764063", "text": "def Dimlfac(self) -> float:", "title": "" }, { "docid": "3b0657bc83a155b55fc70655a3fdbb8e", "score": "0.5752862", "text": "def call_put_factor( self ):\n return -1", "title": "" }, { "docid": "f7ce7c8e34ccf7c2d0cba79a50d2bb0c", "score": "0.57426655", "text": "def factor(self, myint) -> Color:\n\n return self.factor_tuple((myint, myint, myint))", "title": "" }, { "docid": "c3a80cc04a63f23eb81723d1f83bac07", "score": "0.5711943", "text": "def __repr__(self):\n return \"Factor Count of {} = {}\".format(self.n, self.factor_count)", "title": "" }, { "docid": "7dd20c676b062bbd7685d9e5dd491c18", "score": "0.5665692", "text": "def get_factorial_for(self, n): # Ejercicio\n return 0", "title": "" }, { "docid": "5e9a887e09d11404e4c5c33ffb694a30", "score": "0.5659134", "text": "def get10Factor(cls, num):\n\t\tp = 0\n\t\tfor i in range(-20, 20):\n\t\t\tif num == num % 10**i:\n\t\t\t\tp = -(i - 1)\n\t\t\t\tbreak\n\n\t\treturn p", "title": "" }, { "docid": "aa3f18af345f5e9f09ce34044161740c", "score": "0.5655149", "text": "def GetInt(self):", "title": "" }, { "docid": "5343041cd663cdff4ccf24c3a77169fd", "score": "0.5646864", "text": "def call_put_factor( self ):\n return 1", "title": "" }, { "docid": "50fb3cdecf7c459b549028d7d9dceba4", "score": "0.5644174", "text": "def getIncrementalFactor(self, decLevel: int) -> int:\n self.checkDecimationLevel(decLevel)\n if decLevel == 0:\n return int(self.decFactors[decLevel])\n else:\n return int(self.decFactors[decLevel] / self.decFactors[decLevel - 1])", "title": "" }, { "docid": "f0b09345e7a3bc4f92d8095cfdbdc48e", "score": "0.56301856", "text": "def numdiv(self):\r\n num=self.noms[str(self.ui.cb_div.currentText())]\r\n return num", "title": "" }, { "docid": "5a592b091d80244f42c67c2ddf8e10e7", "score": "0.56108475", "text": "def _gather_factor(self, factor):\n if factor in self._factor_count:\n self._factor_count[factor] += 1\n else:\n self._factor_count[factor] = 1", "title": "" }, { "docid": "e4089e47c464a0b19082c9412b3f87cb", "score": "0.5601728", "text": "def Number(self) -> int:", "title": "" }, { "docid": "e4089e47c464a0b19082c9412b3f87cb", "score": "0.5601728", "text": "def Number(self) -> int:", "title": "" }, { "docid": "850fc9a0f10d1dc78adede4411e73e93", "score": "0.5570095", "text": "def Surfv(self) -> int:", "title": "" }, { "docid": "35e0d6259c324d9cd74cc04e337453dc", "score": "0.55691725", "text": "def init_factors(self):\n self.__fp_electrique_pointe = self.admin.admin_fp_electrique_pointe# value expressed in %\n self.__fq_electrique_pointe = self.admin.admin_fq_electrique_pointe # value expressed in % Facteur de Reactance == FR \n self.__fp_autre_pointe = self.admin.admin_fp_autre_pointe # value expressed in %\n self.__fq_autre_pointe = self.admin.admin_fq_autre_pointe # value expressed in % Facteur de Reactance == FR\n\n self.__fq_recovery_electrique_pointe = self.admin.admin_fq_recovery_electrique_pointe # value expressed in % Facteur de reactance == FR\n self.__fp_recovery_electrique_pointe = self.admin.admin_fp_recovery_electrique_pointe # value expressed in %\n self.__fq_recovery_autre_pointe = self.admin.admin_fq_recovery_autre_pointe # value expressed in % Facteur de reactance == FR \n self.__fp_recovery_autre_pointe = self.admin.admin_fp_recovery_autre_pointe # value expressed in %", "title": "" }, { "docid": "0c3ec85cdb502c17b087338ee3d6c3c4", "score": "0.5565231", "text": "def Numerator(self) -> float:", "title": "" }, { "docid": "709e9962514db64753ceff3c1f1cf1ff", "score": "0.5563321", "text": "def get_num(self):\n pass # implemented in Ada", "title": "" }, { "docid": "a673b024fcd42c154ae5a05ce40b19ef", "score": "0.55529225", "text": "def int(self):\n return self.antiderivative()", "title": "" }, { "docid": "65f55c5459f5764611833b8536c3217f", "score": "0.5549907", "text": "def num(self) -> int:\n return ( (1<<(v.s+v.m+v.f)) )", "title": "" }, { "docid": "35f11e410c0917c7b85a0fa2fd6cd172", "score": "0.5547611", "text": "def Lunits(self) -> int:", "title": "" }, { "docid": "f386779836af0f016a5e1dc2468911b5", "score": "0.55471855", "text": "def Dimtdec(self) -> int:", "title": "" }, { "docid": "f386779836af0f016a5e1dc2468911b5", "score": "0.55471855", "text": "def Dimtdec(self) -> int:", "title": "" }, { "docid": "f386779836af0f016a5e1dc2468911b5", "score": "0.55471855", "text": "def Dimtdec(self) -> int:", "title": "" }, { "docid": "0d8d9e30554dff1b9713db310a4dddd9", "score": "0.5537369", "text": "def __init__(self, divisor):\r\n self.divisor = divisor", "title": "" }, { "docid": "e1e62f9e626fc9cc91f4ecf97aea6abd", "score": "0.5536991", "text": "def __int__(self):\r\n return int(self.get_decimal_value())", "title": "" }, { "docid": "d5fa897e496154ca90a27d6ca701c5d2", "score": "0.5531309", "text": "def Dimadec(self) -> int:", "title": "" }, { "docid": "d5fa897e496154ca90a27d6ca701c5d2", "score": "0.5531309", "text": "def Dimadec(self) -> int:", "title": "" }, { "docid": "d5fa897e496154ca90a27d6ca701c5d2", "score": "0.5531309", "text": "def Dimadec(self) -> int:", "title": "" }, { "docid": "23f9129a75a8c9793410eaaaa6a42314", "score": "0.55199724", "text": "def Degree(self) -> int:", "title": "" }, { "docid": "23f9129a75a8c9793410eaaaa6a42314", "score": "0.55199724", "text": "def Degree(self) -> int:", "title": "" }, { "docid": "23f9129a75a8c9793410eaaaa6a42314", "score": "0.55198014", "text": "def Degree(self) -> int:", "title": "" }, { "docid": "b4e0586d218cc5393305d52163ee2d17", "score": "0.55165476", "text": "def ShadeTintValue(self) -> float:", "title": "" }, { "docid": "7b67abd077a98c06d222c32ecea3f4fd", "score": "0.55081284", "text": "def Dimaltd(self) -> int:", "title": "" }, { "docid": "7b67abd077a98c06d222c32ecea3f4fd", "score": "0.55081284", "text": "def Dimaltd(self) -> int:", "title": "" }, { "docid": "7b67abd077a98c06d222c32ecea3f4fd", "score": "0.55081284", "text": "def Dimaltd(self) -> int:", "title": "" }, { "docid": "845416dcf2edf7c6fa28034c75f304a4", "score": "0.54795146", "text": "def Multiply(self, factor):", "title": "" }, { "docid": "ea1e917eb6f1850c76e258236ff28b49", "score": "0.54751474", "text": "def circuloperim():\n radio = float(input(\"Introduzca el radio el circulo: \"))\n pi = float(3.14159)\n if radio >= 0:\n long = float(2 * pi * radio)\n print(\"La longitud del perimetro de este circulo es: \", long)\n else:\n print(\"NO puede ser menor ni igual a cero!\")\n \"\"\"decidi utilizar float por razones muy obvias, es decir, en caso de que se utilicen decimales.\"\"\"", "title": "" }, { "docid": "f66c35c0aea8f585d9538fed40a4e283", "score": "0.5444551", "text": "def TrackingFactor(self) -> float:", "title": "" }, { "docid": "f66c35c0aea8f585d9538fed40a4e283", "score": "0.5444551", "text": "def TrackingFactor(self) -> float:", "title": "" }, { "docid": "0f78b64f7621549656d3fd781e36da48", "score": "0.54396516", "text": "def Useri3(self) -> int:", "title": "" }, { "docid": "2416d40b2cd9b7d8b8399d03cfa384e4", "score": "0.5438971", "text": "def value(self) -> int:", "title": "" }, { "docid": "c623614425e35e2cdeef9362dce47e34", "score": "0.5436683", "text": "def number_literature(self) -> int:\n return self._number_literature", "title": "" }, { "docid": "b7835b6068c165b9328c3dcacc8b1e5c", "score": "0.5433747", "text": "def Dimaunit(self) -> int:", "title": "" }, { "docid": "b7835b6068c165b9328c3dcacc8b1e5c", "score": "0.5433747", "text": "def Dimaunit(self) -> int:", "title": "" }, { "docid": "b7835b6068c165b9328c3dcacc8b1e5c", "score": "0.5433747", "text": "def Dimaunit(self) -> int:", "title": "" }, { "docid": "798a701169b5f2064ec6fe08620eb2fc", "score": "0.5428859", "text": "def get_number(self):\n\t\tself.result = int( math.ceil(- ( self.maximum - self.minimum ) * self.epoch / self.total + self.maximum) )\n\t\tself.epoch += 1\n\t\treturn self.result", "title": "" }, { "docid": "1fbe6db4afda68c635d2da476dec8fc4", "score": "0.54028744", "text": "def _matrix_factor(self, index):\n pass", "title": "" }, { "docid": "e7362c1b15f288bb22b1543fca19795f", "score": "0.54016256", "text": "def GetDigits(self):", "title": "" }, { "docid": "d040dc5329573b6d89878e43b6b78c6a", "score": "0.54009354", "text": "def funcion(numero):\r\n return (numero)**2 + 15*(numero)+8", "title": "" }, { "docid": "0d7b470f1c5b134ce0aedcf10b577c15", "score": "0.53992224", "text": "def unit_factor(self):\n\n return 5.", "title": "" }, { "docid": "c89b799aa4cc16604315e3763de0acbf", "score": "0.5394661", "text": "def factor(self, n: int) -> int:\n if n > self.maxElem:\n raise ValueError('''cannot factor element larger than \n largest prime in FactorizationDict!''')\n elif n < 1:\n raise ValueError('cannot factor zero, or negative numbers')\n \n if n in self:\n return self[n]\n for prime in self.primes:\n if n % prime == 0:\n factorization = self.factor(prime) + self.factor(n//prime)\n self._factors[n] = factorization\n return factorization", "title": "" }, { "docid": "bb3c38efd89b2e0d2f4c4b2e5297b48e", "score": "0.53937894", "text": "def Dimlunit(self) -> int:", "title": "" }, { "docid": "bb3c38efd89b2e0d2f4c4b2e5297b48e", "score": "0.53937894", "text": "def Dimlunit(self) -> int:", "title": "" }, { "docid": "bb3c38efd89b2e0d2f4c4b2e5297b48e", "score": "0.53937894", "text": "def Dimlunit(self) -> int:", "title": "" }, { "docid": "9808e3eb15c82e0eee8725d94267f5be", "score": "0.53848827", "text": "def getFaceValue(self): # Returns value just generated\n return self.num", "title": "" }, { "docid": "282514bc423befcbc38f204ab320e2ca", "score": "0.53810835", "text": "def Value(self) -> int:", "title": "" }, { "docid": "f678ee448348190b8c8f9916d35c3ddd", "score": "0.5368032", "text": "def factor(i, j, k, l):\n if i == j and k == l and i == k:\n return 1.0\n elif i == j and k == l:\n return 2.0\n elif (\n (i == k and j == l)\n or (i == j and i == k)\n or (j == k and j == l)\n or (i == j or k == l)\n ):\n return 4.0\n else:\n return 8.0", "title": "" }, { "docid": "44a066124ce0ac7ba16e9531c2427b10", "score": "0.53650975", "text": "def number(self):\n return self._num", "title": "" }, { "docid": "efc59106b9c41bb5aadaa299e3fef4de", "score": "0.53614795", "text": "def factor(self, n: int) -> int:\n if isinstance(n, int):\n if n < 0:\n raise ValueError('Value must be positive')\n elif n == 1:\n return 1\n else:\n return n * self.factor(n-1)\n else:\n raise TypeError('Value must be type int')", "title": "" }, { "docid": "c4e2d3309e2f03afb32e7558f1e72e24", "score": "0.535967", "text": "def DegreeInU(self) -> int:", "title": "" }, { "docid": "fe0d488200790138d3e2359f96ae6bd8", "score": "0.5351201", "text": "def luminantiefactor(self):\n return self._luminantiefactor.get_waarde()", "title": "" }, { "docid": "eca6c32a2f94d9981228547fd6bbadd9", "score": "0.5344871", "text": "def Dimaltu(self) -> int:", "title": "" }, { "docid": "eca6c32a2f94d9981228547fd6bbadd9", "score": "0.5344871", "text": "def Dimaltu(self) -> int:", "title": "" }, { "docid": "eca6c32a2f94d9981228547fd6bbadd9", "score": "0.5344871", "text": "def Dimaltu(self) -> int:", "title": "" }, { "docid": "401d24261fa8968367029422eb3078e2", "score": "0.5336371", "text": "def Useri4(self) -> int:", "title": "" }, { "docid": "a4e5c69dfcb324f8c6b123ca7a7e8aab", "score": "0.5318101", "text": "def factor_min(self):\n return self.min / self.scale", "title": "" }, { "docid": "e173e4dcd9e378a398411ef0ba2398e5", "score": "0.53143406", "text": "def accept(self, visitor):\n visitor.visit_factor(self)", "title": "" }, { "docid": "64d00a993ad12cb6ae1c6d0466f1c33f", "score": "0.53135484", "text": "def Surftab1(self) -> int:", "title": "" }, { "docid": "78a62ec787a921e81e2fea4635d4dd4d", "score": "0.53036374", "text": "def multiplier(self):\n return self._multiplier", "title": "" }, { "docid": "65da2022c58723bc9cfbd0e57085b2b4", "score": "0.52964187", "text": "def level(self):\n return math.floor(sum(abs(x) for x in self.acumen.values()) / 100)", "title": "" }, { "docid": "3ecda190559cf10a0aa51da6dd9a07db", "score": "0.52908987", "text": "def factorSA(parser, node, children):\n if parser.debug:\n print(\"Factor {}\".format(children))\n if len(children) == 1:\n return children[0]\n sign = -1 if children[0] == '-' else 1\n return sign * children[-1]", "title": "" }, { "docid": "e2ecd058c35ddca0fdb4a185213c9906", "score": "0.5289141", "text": "def value(self):\n return self._factor * self._scale", "title": "" }, { "docid": "570c8dc952004468e09f03b2aebb93c7", "score": "0.5282603", "text": "def _getUnit(self):\n return 1", "title": "" }, { "docid": "a58dff319b9bf44e10a66f4849617dab", "score": "0.5274341", "text": "def mario_number(level):\n if level == 1:\n return 1\n elif level % 10 == 0:\n return 0\n else:\n return mario_number(level // 10) + mario_number((level // 10) // 10)", "title": "" }, { "docid": "a3116382044cadf3396dfa82b6b29247", "score": "0.52730167", "text": "def set_number(self):\n self.number = int(input('Введите желаемое число'))\n return self.number", "title": "" }, { "docid": "47a6e723fa082b69fda392bf2a1da6b6", "score": "0.527264", "text": "def number(self) -> int:\n return pulumi.get(self, \"number\")", "title": "" }, { "docid": "7d56f0549972d0182e6f224014a5dc7a", "score": "0.5251619", "text": "def __str__(self):\n return \"%d / %d\" % (self.numer, self.denom)", "title": "" }, { "docid": "d1a4274485f7bde91663e86601f88274", "score": "0.52456445", "text": "def get_55():\n return 55", "title": "" }, { "docid": "c6dea2f7466c2567ea4027a3e9d2106d", "score": "0.5244783", "text": "def get_dial(self):\n return self.count*self.stepsize", "title": "" }, { "docid": "c6dea2f7466c2567ea4027a3e9d2106d", "score": "0.5244783", "text": "def get_dial(self):\n return self.count*self.stepsize", "title": "" }, { "docid": "8b2af5fd4c0e80ebe2a9aae361d2e0b3", "score": "0.523935", "text": "def Chamferc(self) -> float:", "title": "" }, { "docid": "d0f35ceeb98f9f3ae62014267c287cdd", "score": "0.52383304", "text": "def Precision(self) -> int:", "title": "" }, { "docid": "efd0c422801346136d6d3e93649cbce7", "score": "0.523259", "text": "def getInteger(self) -> int:", "title": "" }, { "docid": "f7314fe8c24504b95d8c54c427cd9625", "score": "0.5231577", "text": "def __int__(self) -> int:\n return self._value", "title": "" }, { "docid": "53334b3e07ff09754b7b6f3360d2a8af", "score": "0.52259654", "text": "def __int__(self):\n\n return self.value", "title": "" }, { "docid": "8f02aa3f3a9acbaa48d4bdd7a5b1e374", "score": "0.521999", "text": "def Useri5(self) -> int:", "title": "" }, { "docid": "39e344a2e4df3b04b56ba8a71c742666", "score": "0.520706", "text": "def divisor(self) -> int:\n return self.cluster.get(\"divisor\") or 1", "title": "" }, { "docid": "820664c8bbc5cc9433e0129814d67120", "score": "0.52044225", "text": "def Filletrad(self) -> float:", "title": "" }, { "docid": "9af242046e4c15236e090a120ac82b24", "score": "0.5199715", "text": "def get_factorial_while(self, n): # Ejercicio\n return 0", "title": "" }, { "docid": "26b24466e452fa212cb0dbe87b665b6f", "score": "0.5198649", "text": "def Lookup_Vect_or_Tim_Mod(self, eint):\n if eint == 0:\n return \"Sample by sample\"\n elif eint == 1:\n return \"Pixel by pixel\"\n else:\n return str(eint) + \", \" + self.TXT_UNKN_ENUM", "title": "" }, { "docid": "175823ab88ba518677a73426fec358a4", "score": "0.5198206", "text": "def Dimdec(self) -> int:", "title": "" }, { "docid": "175823ab88ba518677a73426fec358a4", "score": "0.5198206", "text": "def Dimdec(self) -> int:", "title": "" } ]
85a30106f5afbb277c96bbd4beff562b
Generate a map of known licenses based on `nixpkgs`.
[ { "docid": "3ae921fecdfa86d5f5c8bdf8b5f02ba7", "score": "0.7784949", "text": "def get_nix_licenses():\n global _nix_licenses\n\n if _nix_licenses is None:\n nix_licenses_json = check_output([\n 'nix-instantiate', '--eval', '--expr',\n 'with import <nixpkgs> { }; builtins.toJSON lib.licenses'])\n nix_licenses_json = nix_licenses_json.decode('utf-8')\n\n # Dictionary which contains the contents of nixpkgs.lib.licenses.\n _nix_licenses = json.loads(json.loads(nix_licenses_json))\n\n # Convert all values to lowercase.\n for entry in _nix_licenses.values():\n for key, value in entry.items():\n try:\n entry[key] = value.lower()\n except AttributeError:\n # Skip values which don't have a lower() function.\n pass\n\n return _nix_licenses", "title": "" } ]
[ { "docid": "73fc30d86229d5f0c76a60b8fdfe497d", "score": "0.6401995", "text": "def get_licenses_from_pkginfo(self):\n licenses = set()\n data = \"\"\n try:\n try:\n data = self.pip_req.get_dist().get_metadata('PKG-INFO')\n except (FileNotFoundError, IOError):\n data = self.pip_req.get_dist().get_metadata('METADATA')\n except (FileNotFoundError, AttributeError):\n for dist in pkg_resources.find_on_path(None, self.pip_req.source_dir):\n try:\n data = dist.get_metadata('PKG-INFO')\n except (FileNotFoundError, IOError):\n data = dist.get_metadata('METADATA')\n break\n\n for line in data.split('\\n'):\n\n # License string from setup() function.\n if line.startswith('License: '):\n lic = line.split('License: ')[-1]\n licenses.add(lic.strip())\n\n # License strings from classifiers.\n elif line.startswith('Classifier: License ::'):\n lic = line.split('::')[-1]\n licenses.add(lic.strip())\n\n return filter_licenses(licenses)", "title": "" }, { "docid": "1202dd7576f5085f8f39d068b2e1fe75", "score": "0.63305", "text": "def load_license_list(file_name):\n licenses_map = {}\n with codecs.open(file_name, 'rb', encoding='utf-8') as lics:\n licenses = json.load(lics)\n version = licenses['licenseListVersion'].split('.')\n for lic in licenses['licenses']:\n if lic.get('isDeprecatedLicenseId'):\n continue\n name = lic['name']\n identifier = lic['licenseId']\n licenses_map[name] = identifier\n licenses_map[identifier] = name\n return version, licenses_map", "title": "" }, { "docid": "fab9602804ee267786a3ffcdf1d1c23f", "score": "0.6049798", "text": "def licenses_mapper(license, licenses, package): # NOQA\n declared_license = get_declared_licenses(license) or []\n declared_license.extend(get_declared_licenses(licenses) or [])\n package.declared_license = declared_license\n return package", "title": "" }, { "docid": "62fcf2ecf78d023460cc9dc9dd5e3515", "score": "0.59321064", "text": "def parse_licenses_xml(xml) -> Dict[str, Optional[str]]:\n license_map = {}\n for dependency in xml.find(\"dependencies\"):\n name = \"@\".join(\n [dependency.find(\"artifactId\").text, dependency.find(\"version\").text]\n )\n licenses = dependency.find(\"licenses\")\n license_name = None\n for license in licenses:\n if license_name is None:\n license_name = license.find(\"name\").text\n else:\n license_name = \"({})\".format(\n \" AND \".join([license_name, license.find(\"name\").text])\n )\n license_map[name] = license_name\n\n return license_map", "title": "" }, { "docid": "94debc65e71cc03d334131f8c21d30b0", "score": "0.59273654", "text": "def test_get_license_license_info_list(self):\n pass", "title": "" }, { "docid": "1b4475f79a9f1c8431bb1eb68e1fbf56", "score": "0.5924733", "text": "def _parse_licenses(output):\n licenses = {}\n # We are starting from 2, since the first line is the\n # list of fields and the second one is a separator.\n # We can't use csv to parse this, unfortunately.\n for line in output.strip().splitlines()[2:]:\n product, _, status = line.rpartition(\" \")\n product = product.strip()\n licenses[product] = status\n return licenses", "title": "" }, { "docid": "4abd28a2416d1d44f78d47d2774d00ba", "score": "0.58525896", "text": "def build_dist_index(pkgs):\n return dict((p.key, DistPackage(p)) for p in pkgs)", "title": "" }, { "docid": "45e483b2fb833fe66e84f263f2c4acc1", "score": "0.58409184", "text": "def getLicenses(vars_scope):\n # Need to provide some file value (does not have to exist). The task will automatically skip over if the file is not found. Otherwise, will throw an error if no file is specified.\n vars_scope[\"splunk\"][\"license_uri\"] = os.environ.get(\"SPLUNK_LICENSE_URI\", vars_scope[\"splunk\"].get(\"license_uri\") or \"splunk.lic\")\n vars_scope[\"splunk\"][\"wildcard_license\"] = False\n if vars_scope[\"splunk\"][\"license_uri\"] and '*' in vars_scope[\"splunk\"][\"license_uri\"]:\n vars_scope[\"splunk\"][\"wildcard_license\"] = True\n vars_scope[\"splunk\"][\"ignore_license\"] = False\n if os.environ.get(\"SPLUNK_IGNORE_LICENSE\", \"\").lower() == \"true\":\n vars_scope[\"splunk\"][\"ignore_license\"] = True\n vars_scope[\"splunk\"][\"license_download_dest\"] = os.environ.get(\"SPLUNK_LICENSE_INSTALL_PATH\", vars_scope[\"splunk\"].get(\"license_download_dest\") or \"/tmp/splunk.lic\")", "title": "" }, { "docid": "7bb2210038ee5891b374cb2a246e8576", "score": "0.57738626", "text": "def license():\n mit = map(lambda line: \"// \" + line, open(\"LICENSE\").read().split(\"\\n\"))\n return \"\\n\".join(mit)", "title": "" }, { "docid": "43c806501593ad055462d4d1d950d20c", "score": "0.57619333", "text": "def licenses(self):\n return self._get_list(\"license\")", "title": "" }, { "docid": "dde34063c3c9512d9a0b088d386155f1", "score": "0.5675397", "text": "def _get_licenses(self):\n return self.__licenses", "title": "" }, { "docid": "35cc6da29d968f6c954d0a0ae03771d2", "score": "0.5653719", "text": "def get_licenses(self):\n ip_list = self.fcr_fab_wide_ip()\n for ip in ip_list:\n connect_tel_noparse(ip,'root','password')\n sw_info = SwitchInfo()\n sw_name = sw_info.switch_name()\n f = \"%s%s%s\"%(\"logs/Switch_Licenses/License_File_\", sw_name ,\".txt\")\n ff = liabhar.FileStuff(f,'a+b') ###open new file or clobber old\n header = \"%s%s%s%s\" % (\"\\nLICENSE FILE \\n\", ip+\"\\n\" , sw_name, \"\\n==============================\\n\\n\")\n cons_out = fos_cmd(\"licenseshow\")\n ff.write(header)\n ff.write(cons_out+\"\\n\")\n ff.close()", "title": "" }, { "docid": "77a751330a1872cfc14eb0b252452197", "score": "0.56227815", "text": "def load_licenses_info(info_path):\n with codecs.open(info_path, encoding=\"utf-8\") as licenses_file:\n return json.loads(licenses_file.read())", "title": "" }, { "docid": "474375888905e4e2434c9b2e87d68f0e", "score": "0.5622755", "text": "def _lookup_by_mapping():\n like = distro.like().lower()\n distribution_id = distro.id().lower()\n version = distro.major_version()\n if 'arch' in (distribution_id, like):\n version = 'any'\n init_sys = constants.DIST_TO_INITSYS.get(\n distribution_id, constants.DIST_TO_INITSYS.get(like))\n if init_sys:\n system = init_sys.get(version)\n return [system] if system else []", "title": "" }, { "docid": "70c54a6c61db977bde3e3c4899dd830a", "score": "0.56181604", "text": "def get_declared_licenses(license_object):\n if not license_object:\n return []\n\n if isinstance(license_object, str):\n # current, up to date form\n return [license_object]\n\n declared_licenses = []\n if isinstance(license_object, dict):\n # old, deprecated forms\n \"\"\"\n \"license\": {\n \"type\": \"MIT\",\n \"url\": \"http://github.com/kriskowal/q/raw/master/LICENSE\"\n }\n \"\"\"\n declared_licenses.append(license_object)\n\n elif isinstance(license_object, list):\n # old, deprecated forms\n \"\"\"\n \"licenses\": [{\"type\": \"Apache License, Version 2.0\",\n \"url\": \"http://www.apache.org/licenses/LICENSE-2.0\" } ]\n or\n \"licenses\": [\"MIT\"],\n \"\"\"\n declared_licenses.extend(license_object)\n return declared_licenses", "title": "" }, { "docid": "81ee0cd1566d06f9facee171cdb0285a", "score": "0.56097704", "text": "def get_license_information(self):\r\n licenses = []\r\n rs = self.client.executeQuery(hana_queries.DATABASE_LICENSE)\r\n\r\n while rs.next():\r\n license_osh = ObjectStateHolder('db_license')\r\n license_osh.setContainer(self.hana_osh)\r\n license_osh.setStringAttribute('name', rs.getString('HARDWARE_KEY'))\r\n license_osh.setStringAttribute('product_name', rs.getString('PRODUCT_NAME'))\r\n license_osh.setIntegerAttribute('limit', rs.getString('PRODUCT_LIMIT'))\r\n license_osh.setDateAttribute('start_date', rs.getDate('START_DATE'))\r\n \r\n exp_date = rs.getDate('EXPIRATION_DATE')\r\n if exp_date:\r\n license_osh.setDateAttribute('expiration_date', exp_date)\r\n \r\n license_osh.setBoolAttribute('enforced', rs.getString('ENFORCED'))\r\n license_osh.setStringAttribute('install_no', rs.getString('INSTALL_NO'))\r\n license_osh.setIntegerAttribute('system_no', rs.getString('SYSTEM_NO'))\r\n license_osh.setIntegerAttribute('usage', rs.getString('PRODUCT_USAGE'))\r\n \r\n licenses.append(license_osh)\r\n rs.close()\r\n\r\n return licenses", "title": "" }, { "docid": "5de2a536fa453850e15183dc7ab38737", "score": "0.5543786", "text": "def _get_packages():\n packages = ['rsyslog']\n packages_per_service_dict = {}\n # Adding premium components on all, even if we're on community, because\n # yum will return 0 (success) if any packages install successfully even if\n # some of the specified packages don't exist.\n _, rh_version = _get_os_distro()\n if service_is_in_config(MANAGER_SERVICE):\n manager_packages = sources.manager\n # Premium components\n manager_packages += sources.manager_cluster + sources.manager_premium\n packages += manager_packages\n packages_per_service_dict[MANAGER_SERVICE] = manager_packages\n\n if service_is_in_config(DATABASE_SERVICE):\n db_packages = sources.db\n # Premium components\n db_packages += sources.db_cluster\n packages += db_packages\n packages_per_service_dict[DATABASE_SERVICE] = db_packages\n\n if service_is_in_config(QUEUE_SERVICE):\n queue_packages = sources.queue\n if rh_version == \"8\" and machine() == \"x86_64\":\n queue_packages += sources.queue_rh8_x86\n else:\n queue_packages += sources.queue_other\n # Premium components\n queue_packages += sources.queue_cluster\n packages += queue_packages\n packages_per_service_dict[QUEUE_SERVICE] = queue_packages\n\n if service_is_in_config(MONITORING_SERVICE):\n monitoring_packages = sources.prometheus\n # Premium components\n monitoring_packages += sources.prometheus_cluster\n packages += monitoring_packages\n for main_service in packages_per_service_dict:\n packages_per_service_dict[main_service] += monitoring_packages\n\n if service_is_in_config(ENTROPY_SERVICE):\n packages += sources.haveged\n for main_service in packages_per_service_dict:\n packages_per_service_dict[main_service] += sources.haveged\n\n return packages, packages_per_service_dict", "title": "" }, { "docid": "26e9f5f28b64247cce1da54a39d63f19", "score": "0.55005795", "text": "def parse_requirements_file(req_file):\n packages = {}\n\n for line in req_file:\n line = line.strip()\n for prefix in IGNORED_PREFIXES:\n if not line or line.startswith(prefix):\n line = None\n break\n if line:\n line = line.strip()\n # lets strip any trailing comments\n if \"#\" in line:\n line = line[:line.index(\"#\")].strip()\n\n if \";\" in line:\n line = line[:line.index(\";\")].strip()\n\n use_separator = None\n for separator in SEPRATORS:\n if separator in line:\n use_separator = separator\n break\n if use_separator:\n package_name, version = line.split(use_separator)\n #lets strip extras from the package name\n if \"[\" in package_name:\n package_name = package_name[:package_name.index(\"[\")].strip()\n\n packages[package_name] = version\n else:\n print(TERMINAL.yellow('{} not pinned to a version, skipping'.format(line)))\n\n return packages", "title": "" }, { "docid": "b3177896617a3a8efea3f0f3a18c5f8d", "score": "0.54635626", "text": "def get_packages():", "title": "" }, { "docid": "2a5ad2553e87db039ecdce59e55a8b50", "score": "0.5397612", "text": "def rpmpackagelist(rts):\n return [{'name':header[rpm.RPMTAG_NAME],\n 'epoch':header[rpm.RPMTAG_EPOCH],\n 'version':header[rpm.RPMTAG_VERSION],\n 'release':header[rpm.RPMTAG_RELEASE],\n 'arch':header[rpm.RPMTAG_ARCH],\n 'gpgkeyid':header.sprintf(\"%|SIGGPG?{%{SIGGPG:pgpsig}}:{None}|\").split()[-1]}\n for header in rts.dbMatch()]", "title": "" }, { "docid": "3e38071460eda679b19634eadb3c0c3d", "score": "0.5394904", "text": "def get_available_packages():\n available = {}\n\n for modulename in os.listdir(procyon_settings.REPO_PATH):\n if modulename != '__init__.py' and modulename.endswith('.py'):\n package_name, package_data = get_package_data(modulename)\n\n if package_name and package_data:\n available.setdefault(package_name, package_data)\n\n return available", "title": "" }, { "docid": "7c4dbde684c7e533ce489bd3bbe9828d", "score": "0.53529435", "text": "def decode_rpm_packages_list(ftp_url, platform, pkg_dict):\n \n status, packages_rsp = commands.getstatusoutput(\"curl -l \" + ftp_url + \"/*\")\n \n packages_rsp = packages_rsp.split('\\n')\n for packagename in packages_rsp:\n packagename = packagename.strip()\n if packagename.endswith('.src.rpm'):\n pos = packagename.rfind('-')\n packagename = packagename[:pos]\n pos = packagename.rfind('-')\n basename = packagename[:pos]\n version = packagename[pos+1:]\n\n packagename = basename + \"\\t\" + version\n if pkg_dict.has_key(packagename):\n pkg_dict[packagename] = pkg_dict[packagename] + \",\" + platform\n else :\n pkg_dict[packagename] = platform\n return", "title": "" }, { "docid": "b77e68b2399b428816857a6c6e68628f", "score": "0.5349504", "text": "def get_package_data():\n package_data = {}\n package_data['cdsdashboards'] = ['alembic.ini', 'cdsalembic/*', 'cdsalembic/versions/*']\n return package_data", "title": "" }, { "docid": "d80bae3c4ee083326a60a14b1ffb11a6", "score": "0.53273153", "text": "def test_get_license_license_info_by_moid(self):\n pass", "title": "" }, { "docid": "9972973db10db0da0bef5492d53f62ef", "score": "0.53192836", "text": "def _get_packs(self):\n ver = self.client.numeric_ver\n packs = [\"dist-packages\", \"dist-local-packages\"]\n if ver < 11:\n packs += [\"extra-addons\"]\n return packs", "title": "" }, { "docid": "b5a82d50f33fde8649db6805263dc573", "score": "0.5309691", "text": "def __fetch_license(self, obj):\n sub_obj = obj.get_sub_data('span', {'data-test-id': 'UnitHeader-licenses'})\n lic_list = obj.get_value_from_list('a', None, None, None, None, sub_obj)\n final_lic_list = []\n for lic in lic_list or []:\n lic = lic.strip()\n if lic:\n if ', ' in lic:\n lics = lic.split(', ')\n final_lic_list.extend(lics)\n elif ',' in lic:\n lics = lic.split(',')\n final_lic_list.extend(lics)\n else:\n final_lic_list.append(lic)\n return final_lic_list", "title": "" }, { "docid": "1cced3943e74db90f96c2a5889ad9dd4", "score": "0.5298632", "text": "def get_text(self, distributor: str, license_name: str) -> str:\n\n ret = []\n ret += dedent('''\n # Copyright {} {}\n # Distributed under the terms of the {} license\n\n ''').format(\n strftime(\"%Y\", gmtime()), distributor,\n license_name)\n\n ret += '{ lib, buildRosPackage, fetchurl, ' + \\\n ', '.join(sorted(set(map(self._to_nix_parameter,\n self.build_inputs |\n self.propagated_build_inputs |\n self.check_inputs |\n self.native_build_inputs |\n self.propagated_native_build_inputs)))\n ) + ' }:'\n\n ret += dedent('''\n buildRosPackage {{\n pname = \"ros-{distro_name}-{name}\";\n version = \"{version}\";\n\n src = fetchurl {{\n url = \"{src_url}\";\n name = \"{src_name}\";\n sha256 = \"{src_sha256}\";\n }};\n\n buildType = \"{build_type}\";\n ''').format(\n distro_name=self.distro_name,\n name=self.name,\n version=self.version,\n src_url=self.src_url,\n src_name=self.src_name,\n src_sha256=self.src_sha256,\n build_type=self.build_type)\n\n if self.build_inputs:\n ret += \" buildInputs = {};\\n\" \\\n .format(self._to_nix_list(sorted(self.build_inputs)))\n\n if self.check_inputs:\n ret += \" checkInputs = {};\\n\" \\\n .format(self._to_nix_list(sorted(self.check_inputs)))\n\n if self.propagated_build_inputs:\n ret += \" propagatedBuildInputs = {};\\n\" \\\n .format(self._to_nix_list(sorted(\n self.propagated_build_inputs)))\n\n if self.native_build_inputs:\n ret += \" nativeBuildInputs = {};\\n\" \\\n .format(self._to_nix_list(sorted(self.native_build_inputs)))\n\n if self.propagated_native_build_inputs:\n ret += \" propagatedNativeBuildInputs = {};\\n\".format(\n self._to_nix_list(sorted(self.propagated_native_build_inputs)))\n\n ret += dedent('''\n meta = {{\n description = ''{}'';\n license = with lib.licenses; {};\n }};\n }}\n ''').format(self.description,\n self._to_nix_list(map(attrgetter('nix_code'),\n self.licenses)))\n\n return ''.join(ret)", "title": "" }, { "docid": "85881fc803e7d9c1c67663495a2ba008", "score": "0.5290285", "text": "def plexxi_api_get_licenses(self, **kwargs):\n kwargs['uri'] = '/licenses/'\n return self.get(**kwargs)", "title": "" }, { "docid": "22dfdd6ba99abab18f38197831bbc47b", "score": "0.52323425", "text": "def parse_licenses_xml(data_root) -> Dict:\n dep_data: Dict[Tuple[str, str], List[str]] = {}\n\n for dependency in data_root.find(\"dependencies\"):\n dep_id = (\n dependency.find(\"artifactId\").text, dependency.find(\"version\").text\n )\n licenses = dependency.find(\"licenses\")\n dep_data[dep_id] = []\n for license_data in licenses:\n license_url_text = getattr(license_data.find(\"url\"), \"text\", \"\")\n if license_url_text.startswith('http'):\n dep_data[dep_id].append(license_url_text)\n return dep_data", "title": "" }, { "docid": "d74a0f602f344e2fb9329f5c2c48d6a8", "score": "0.5213065", "text": "def licenseinfos(self):\n return self._licenseinfos", "title": "" }, { "docid": "6d28c567ea7d843a10065edb26ac5192", "score": "0.51969945", "text": "def lsb_release():\n d = {}\n with open('/etc/os-release', 'r') as lsb:\n for l in lsb:\n s = l.split('=')\n if len(s) != 2:\n continue\n d[s[0].strip()] = s[1].strip()\n return d", "title": "" }, { "docid": "bb72fdffdb18c941a1963d2a652c94a3", "score": "0.51769626", "text": "def getLicense(self):\n capture_cmd = fos_cmd(\"licenseshow\")\n ras = re.compile('([\\w\\d]{16,37})(?=:\\\\r\\\\n)')\n ras = ras.findall(capture_cmd)\n return(ras)", "title": "" }, { "docid": "6922afa2c031c2fe26d50556266e0ba6", "score": "0.5156155", "text": "def buildPkgRefDict(pkgs, casematch=True):\n pkgdict = {}\n for pkg in pkgs:\n (n, a, e, v, r) = pkg.pkgtup\n if not casematch:\n n = n.lower()\n a = a.lower()\n e = e.lower()\n v = v.lower()\n r = r.lower()\n name = n\n nameArch = '%s.%s' % (n, a)\n nameVerRelArch = '%s-%s-%s.%s' % (n, v, r, a)\n nameVer = '%s-%s' % (n, v)\n nameVerRel = '%s-%s-%s' % (n, v, r)\n envra = '%s:%s-%s-%s.%s' % (e, n, v, r, a)\n nevra = '%s-%s:%s-%s.%s' % (n, e, v, r, a)\n for item in [name, nameArch, nameVerRelArch, nameVer, nameVerRel, envra, nevra]:\n if item not in pkgdict:\n pkgdict[item] = []\n pkgdict[item].append(pkg)\n \n return pkgdict", "title": "" }, { "docid": "156ff8aaf57dabd2cc6d2ef74cf2b017", "score": "0.514558", "text": "def license_information(self, **kwargs):\n api_method = self._category + \"Licence Information\"\n target = self._prefix + \"license\"\n return self._request(\"GET\", target, api_method, kwargs)", "title": "" }, { "docid": "d93955756fb40d7d52325e4084c3676e", "score": "0.51441586", "text": "def generate(abouts, template_string=None, vartext_dict=None):\n syntax_error = check_template(template_string)\n if syntax_error:\n return 'Template validation error at line: %r: %r' % (syntax_error)\n template = jinja2.Template(template_string)\n\n try:\n captured_license = []\n license_key_and_context = {}\n sorted_license_key_and_context = {}\n license_file_name_and_key = {}\n license_key_to_license_name = {}\n license_name_to_license_key = {}\n # FIXME: This need to be simplified\n for about in abouts:\n # about.license_file.value is a OrderDict with license_text_name as\n # the key and the license text as the value\n if about.license_file:\n # We want to create a dictionary which have the license short name as\n # the key and license text as the value\n for license_text_name in about.license_file.value:\n if not license_text_name in captured_license:\n captured_license.append(license_text_name)\n if license_text_name.endswith('.LICENSE'):\n license_key = license_text_name.strip('.LICENSE')\n else:\n license_key = license_text_name\n license_key_and_context[license_key] = about.license_file.value[license_text_name]\n sorted_license_key_and_context = collections.OrderedDict(sorted(license_key_and_context.items()))\n license_file_name_and_key[license_text_name] = license_key\n\n # Convert/map the key in license expression to license name\n if about.license_expression.value and about.license_name.value:\n special_char_in_expression, lic_list = parse_license_expression(about.license_expression.value)\n lic_name_list = about.license_name.value\n lic_name_expression_list = []\n\n # The order of the license_name and key should be the same\n # The length for both list should be the same\n assert len(lic_name_list) == len(lic_list)\n\n # Map the license key to license name\n index_for_license_name_list = 0\n for key in lic_list:\n license_key_to_license_name[key] = lic_name_list[index_for_license_name_list]\n license_name_to_license_key[lic_name_list[index_for_license_name_list]] = key\n index_for_license_name_list = index_for_license_name_list + 1\n\n # Create a license expression with license name instead of key\n for segment in about.license_expression.value.split():\n if segment in license_key_to_license_name:\n lic_name_expression_list.append(license_key_to_license_name[segment])\n else:\n lic_name_expression_list.append(segment)\n\n # Join the license name expression into a single string\n lic_name_expression = ' '.join(lic_name_expression_list)\n\n # Add the license name expression string into the about object\n about.license_name_expression = lic_name_expression\n\n # Get the current UTC time\n utcnow = datetime.datetime.utcnow()\n rendered = template.render(abouts=abouts, common_licenses=COMMON_LICENSES,\n license_key_and_context=sorted_license_key_and_context,\n license_file_name_and_key=license_file_name_and_key,\n license_key_to_license_name=license_key_to_license_name,\n license_name_to_license_key=license_name_to_license_key,\n utcnow=utcnow, vartext_dict=vartext_dict)\n except Exception as e:\n line = getattr(e, 'lineno', None)\n ln_msg = ' at line: %r' % line if line else ''\n err = getattr(e, 'message', '')\n return 'Template processing error%(ln_msg)s: %(err)r' % locals()\n return rendered", "title": "" }, { "docid": "aed5c37e92bb9b49cd558f5bc3964175", "score": "0.5136518", "text": "def get_all_licenses(self, club):\n\n self.__dbg('Suds', 'ws_Lisenser starting...')\n response = self.__get_all_club_licenses(club)\n self.__dbg('Suds', 'ws_Lisenser ended')\n\n key_list = []\n nak_list = []\n license_list = []\n expiry_list = []\n\n # Oboj, lets pivot that response and build\n # a dictionary\n for data in response:\n\n key = data[0]\n # Make list of the bloody keys\n key_list.append(key)\n\n # Need to check for item exists and evt break!\n try:\n for item in data[1].item: # 0=> string, #1=> array use .item\n\n if key == 'aNummer':\n nak_list.append(int(item))\n elif key == 'aRettighet':\n license_list.append(str(item))\n elif key == 'aExpires':\n expiry_list.append(datetime.datetime.strptime(item, '%Y-%m-%d'))\n else:\n key = ''\n pass\n except:\n pass\n\n tmp_license = 0\n d = {}\n tmp_licenses = []\n curr_nak = 0\n\n # Now lets build the final dictionary {key(aNummer): {'licenses': {[]}, 'expiry': isodate}\n # Keys are the melwin id/membership number\n for key, value in enumerate(nak_list):\n # print(key + 'processing ' + value)\n # Build list of licenses!\n\n # If same as before, drop him!\n if (curr_nak == value):\n continue\n\n else: # If not set as new...\n curr_nak = value\n\n for license_key, license in enumerate(license_list):\n\n if (nak_list[license_key] == curr_nak):\n tmp_licenses.append(license)\n if len(nak_list) == key + 1:\n break\n if nak_list[key + 1] != value:\n break\n\n # set converts list to unique values but we need it to be list, expiry date is datetime.datetime.date object!\n d[int(curr_nak)] = {'rights': list(set(tmp_licenses)),\n 'expiry': datetime.datetime.strptime(expiry_list[key], '%Y-%m-%d')}\n # \"%sT00:00:00CET\" % expiry_list[key]} #  expiry_list[key].isoformat()}\n tmp_licenses = []\n mylist = []\n\n return d", "title": "" }, { "docid": "836100da1461b68d388b7651789b6fa2", "score": "0.5122083", "text": "def get_rpi_list_installed_packages(self):\n packages = self.run_command_get_output('dpkg --get-selections')\n return packages", "title": "" }, { "docid": "717587e68cf3b470cde285e263b9f0d8", "score": "0.51171416", "text": "def packages() -> Iterator[str]:\n return iter(ontology_installer.get_installed_packages())", "title": "" }, { "docid": "0322fe60f887027b5e615fe3d483cd08", "score": "0.51016134", "text": "def all_licenses(request):\n\n # Get the list of license codes and languages that occur among the licenses\n # to let the template iterate over them as it likes.\n legalcode_objects = (\n LegalCode.objects.valid()\n .select_related(\"license\")\n .order_by(\n \"-license__version\",\n \"license__jurisdiction_code\",\n \"language_code\",\n \"license__license_code\",\n )\n )\n legalcodes = [\n dict(\n version=lc.license.version,\n jurisdiction=JURISDICTION_NAMES.get(\n lc.license.jurisdiction_code, lc.license.jurisdiction_code\n ),\n license_code=lc.license.license_code,\n language_code=lc.language_code,\n deed_url=lc.deed_url,\n license_url=lc.license_url,\n )\n for lc in legalcode_objects\n ]\n return render(\n request,\n \"all_licenses.html\",\n {\"legalcodes\": legalcodes, \"license_codes\": sorted(BY_LICENSE_CODES)},\n )", "title": "" }, { "docid": "e843484da2bb950278429bdd77f60f82", "score": "0.510115", "text": "def licenses_configured(name, licenses=None):\n ret = {\"name\": name, \"changes\": {}, \"result\": None, \"comment\": \"Default\"}\n if not licenses:\n raise salt.exceptions.ArgumentValueError(\"No licenses provided\")\n cluster_name, datacenter_name = (\n __salt__[\"esxcluster.get_details\"]()[\"cluster\"],\n __salt__[\"esxcluster.get_details\"]()[\"datacenter\"],\n )\n display_name = f\"{datacenter_name}/{cluster_name}\"\n log.info(\"Running licenses configured for '%s'\", display_name)\n log.trace(\"licenses = %s\", licenses)\n entity = {\"type\": \"cluster\", \"datacenter\": datacenter_name, \"cluster\": cluster_name}\n log.trace(\"entity = %s\", entity)\n\n comments = []\n changes = {}\n has_errors = False\n needs_changes = False\n try:\n # Validate licenses\n log.trace(\"Validating licenses\")\n schema = LicenseSchema.serialize()\n try:\n jsonschema.validate({\"licenses\": licenses}, schema)\n except jsonschema.exceptions.ValidationError as exc:\n raise salt.exceptions.InvalidLicenseError(exc)\n\n si = __salt__[\"vsphere.get_service_instance_via_proxy\"]()\n # Retrieve licenses\n existing_licenses = __salt__[\"vsphere.list_licenses\"](service_instance=si)\n # Cycle through licenses\n for license_name, license in licenses.items():\n # Check if license already exists\n filtered_licenses = [l for l in existing_licenses if l[\"key\"] == license]\n # TODO Update license description - not of interest right now\n if not filtered_licenses:\n # License doesn't exist - add and assign to cluster\n needs_changes = True\n if __opts__[\"test\"]:\n # If it doesn't exist it clearly needs to be assigned as\n # well so we can stop the check here\n comments.append(\n \"State {} will add license '{}', and assign it to cluster '{}'.\".format(\n name, license_name, display_name\n )\n )\n log.info(comments[-1])\n continue\n else:\n try:\n existing_license = __salt__[\"vsphere.add_license\"](\n key=license, description=license_name, service_instance=si\n )\n except salt.exceptions.VMwareApiError as ex:\n comments.append(ex.err_msg)\n log.error(comments[-1])\n has_errors = True\n continue\n comments.append(f\"Added license '{license_name}'.\")\n log.info(comments[-1])\n else:\n # License exists let's check if it's assigned to the cluster\n comments.append(\n \"License '{}' already exists. Nothing to be done.\".format(\n license_name\n )\n )\n log.info(comments[-1])\n existing_license = filtered_licenses[0]\n\n log.trace(\"Checking licensed entities...\")\n assigned_licenses = __salt__[\"vsphere.list_assigned_licenses\"](\n entity=entity, entity_display_name=display_name, service_instance=si\n )\n\n # Checking if any of the licenses already assigned have the same\n # name as the new license; the already assigned license would be\n # replaced by the new license\n #\n # Licenses with different names but matching features would be\n # replaced as well, but searching for those would be very complex\n #\n # the name check if good enough for now\n already_assigned_license = (\n assigned_licenses[0] if assigned_licenses else None\n )\n\n if already_assigned_license and already_assigned_license[\"key\"] == license:\n\n # License is already assigned to entity\n comments.append(\n \"License '{}' already assigned to cluster '{}'. Nothing to be done.\".format(\n license_name, display_name\n )\n )\n log.info(comments[-1])\n continue\n\n needs_changes = True\n # License needs to be assigned to entity\n\n if existing_license[\"capacity\"] <= existing_license[\"used\"]:\n # License is already fully used\n comments.append(\n \"Cannot assign license '{}' to cluster '{}'. No free capacity\"\n \" available.\".format(license_name, display_name)\n )\n log.error(comments[-1])\n has_errors = True\n continue\n\n # Assign license\n if __opts__[\"test\"]:\n comments.append(\n \"State {} will assign license '{}' to cluster '{}'.\".format(\n name, license_name, display_name\n )\n )\n log.info(comments[-1])\n else:\n try:\n __salt__[\"vsphere.assign_license\"](\n license_key=license,\n license_name=license_name,\n entity=entity,\n entity_display_name=display_name,\n service_instance=si,\n )\n except salt.exceptions.VMwareApiError as ex:\n comments.append(ex.err_msg)\n log.error(comments[-1])\n has_errors = True\n continue\n comments.append(\n \"Assigned license '{}' to cluster '{}'.\".format(\n license_name, display_name\n )\n )\n log.info(comments[-1])\n # Note: Because the already_assigned_license was retrieved\n # from the assignment license manager it doesn't have a used\n # value - that's a limitation from VMware. The license would\n # need to be retrieved again from the license manager to get\n # the value\n\n # Hide license keys\n assigned_license = __salt__[\"vsphere.list_assigned_licenses\"](\n entity=entity, entity_display_name=display_name, service_instance=si\n )[0]\n assigned_license[\"key\"] = \"<hidden>\"\n if already_assigned_license:\n already_assigned_license[\"key\"] = \"<hidden>\"\n if (\n already_assigned_license\n and already_assigned_license[\"capacity\"] == sys.maxsize\n ):\n\n already_assigned_license[\"capacity\"] = \"Unlimited\"\n\n changes[license_name] = {\n \"new\": assigned_license,\n \"old\": already_assigned_license,\n }\n continue\n __salt__[\"vsphere.disconnect\"](si)\n\n ret.update(\n {\n \"result\": True\n if (not needs_changes)\n else None\n if __opts__[\"test\"]\n else False\n if has_errors\n else True,\n \"comment\": \"\\n\".join(comments),\n \"changes\": changes if not __opts__[\"test\"] else {},\n }\n )\n\n return ret\n except salt.exceptions.CommandExecutionError as exc:\n log.error(\"Error: %s\", exc, exc_info=True)\n if si:\n __salt__[\"vsphere.disconnect\"](si)\n ret.update({\"result\": False, \"comment\": exc.strerror})\n return ret", "title": "" }, { "docid": "b9441cb61f80abc385b88eab30d1761f", "score": "0.50896204", "text": "def get_package_versions(lines):\n versions = {}\n for line in lines:\n line = line.strip()\n\n if len(line) == 0 or line.startswith('#') or line.startswith('-r '):\n continue\n\n if line.startswith('https://'):\n continue\n\n name, version_plus = line.split('==', 1)\n versions[name.lower()] = version_plus.split(' ', 1)[0]\n\n return versions", "title": "" }, { "docid": "4ea02313769caf150c6cf93a8ce879ba", "score": "0.506622", "text": "def opkg_query(cmd_output):\n verregex = re.compile(' \\([=<>]* [^ )]*\\)')\n output = dict()\n pkg = \"\"\n arch = \"\"\n ver = \"\"\n filename = \"\"\n dep = []\n pkgarch = \"\"\n for line in cmd_output.splitlines():\n line = line.rstrip()\n if ':' in line:\n if line.startswith(\"Package: \"):\n pkg = line.split(\": \")[1]\n elif line.startswith(\"Architecture: \"):\n arch = line.split(\": \")[1]\n elif line.startswith(\"Version: \"):\n ver = line.split(\": \")[1]\n elif line.startswith(\"File: \") or line.startswith(\"Filename:\"):\n filename = line.split(\": \")[1]\n if \"/\" in filename:\n filename = os.path.basename(filename)\n elif line.startswith(\"Depends: \"):\n depends = verregex.sub('', line.split(\": \")[1])\n for depend in depends.split(\", \"):\n dep.append(depend)\n elif line.startswith(\"Recommends: \"):\n recommends = verregex.sub('', line.split(\": \")[1])\n for recommend in recommends.split(\", \"):\n dep.append(\"%s [REC]\" % recommend)\n elif line.startswith(\"PackageArch: \"):\n pkgarch = line.split(\": \")[1]\n\n # When there is a blank line save the package information\n elif not line:\n # IPK doesn't include the filename\n if not filename:\n filename = \"%s_%s_%s.ipk\" % (pkg, ver, arch)\n if pkg:\n output[pkg] = {\"arch\":arch, \"ver\":ver,\n \"filename\":filename, \"deps\": dep, \"pkgarch\":pkgarch }\n pkg = \"\"\n arch = \"\"\n ver = \"\"\n filename = \"\"\n dep = []\n pkgarch = \"\"\n\n if pkg:\n if not filename:\n filename = \"%s_%s_%s.ipk\" % (pkg, ver, arch)\n output[pkg] = {\"arch\":arch, \"ver\":ver,\n \"filename\":filename, \"deps\": dep }\n\n return output", "title": "" }, { "docid": "1a345c94afbe1ca64ba3181ce0099f1c", "score": "0.50559354", "text": "def get_platforms():\n\n global _platforms\n if _platforms is not None:\n return _platforms\n\n # Start with the most generic - the OS\n system = platform.system().lower()\n\n # Initialize list with the system.\n _platforms = [system]\n\n # OS-specific stuff\n if system == \"linux\":\n import distro\n\n dist_id = distro.id()\n _platforms.insert(0, dist_id)\n\n if dist_id == \"ubuntu\":\n # Ubuntu \"minor\" versions are distinct, eg., Ubuntu 16.10\n # is potentially quite different from Ubuntu 16.04. So we\n # want to use the combined version.\n dist_ver = distro.version()\n else:\n # Other supported distros use rolling releases, so eg.\n # Centos 6.5 shouldn't differ importantly from Centos 6.9.\n # Use only the major version number.\n dist_ver = distro.major_version()\n\n _platforms.insert(0, f\"{dist_id}{dist_ver}\")\n _platforms.insert(0, f\"{dist_id}-{dist_ver}\")\n\n if dist_id == \"sles\" or dist_id.startswith(\"opensuse\"):\n # Cbdeps 1.0, at least, refers to all SUSE as \"suse\", so offer\n # those as platform names too\n dist_id = \"suse\"\n _platforms.insert(0, dist_id)\n _platforms.insert(0, f\"{dist_id}{dist_ver}\")\n _platforms.insert(0, f\"{dist_id}-{dist_ver}\")\n\n elif system == \"darwin\":\n _platforms.insert(0, \"macosx\")\n _platforms.insert(0, \"macos\")\n _platforms.insert(0, \"mac\")\n _platforms.insert(0, \"osx\")\n\n elif system == \"windows\":\n # QQQ Somehow introspect MSVC version?\n _platforms.insert(0, \"windows_msvc2015\")\n _platforms.insert(0, \"windows_msvc2017\")\n _platforms.insert(0, \"win\")\n\n return _platforms", "title": "" }, { "docid": "18a1d44f716c9f788a4e6f64004a243a", "score": "0.50526273", "text": "def getavailableyearslist(versionId):\n return {'01': ['0.9x1.25'],\n '28': ['2-20', '50-74', '75-99'], # '2-10',\n '36': ['2-20', '21-40', '60-60', '75-99'], # '2-10', \n 'ga7.66': ['2-20', '20-39', '55-74'],\n '100': ['2-5', '2-7', '2-10', '2-20', '2-21', '10-29'],\n '113': ['0.9x1.25', '2-21'],\n '114': ['2-3', '2-11', '2-21'],\n '116': ['2-3'],\n '118': ['2-11', '2-21'],\n '119': ['2-21', '21-40', '30-49', '75-99'], # '2-9', \n '125': ['2-21', '11-30', '21-40', '70-89', '80-99', # '2-9', \n '100-119'], # '100-109', \n '161': ['1850-1869', '1920-1939', '1980-1999'],\n '194': ['14-33', '15-29', '50-69', '100-119'],\n '195': ['15-29', '50-69', '80-99', '100-119', '122-141'],\n }[versionId]", "title": "" }, { "docid": "ff1f79052ba776ff014241772018b2b4", "score": "0.5052253", "text": "def test_public_repos_with_license(self):", "title": "" }, { "docid": "12f1d4a056db91029d88fc3016a44f19", "score": "0.5040462", "text": "def cli():\n\n splat(load_graph('http://creativecommons.org/licenses/index.rdf'))", "title": "" }, { "docid": "7c7b6c012b40cfbc95e3974b87ff5926", "score": "0.5037541", "text": "def generate_rights(metadata):\n rights = []\n\n # Maps license id codes to licenses taxonomy term paths\n licmap = {\n 'CC-BY-4.0': 'cc/4-0/4-by',\n 'CC0-1.0': 'cc/zero/1-0'\n }\n\n license = metadata.pop('license')\n if license:\n rights.append(taxonomy_reference('licenses', licmap[license['id']]))\n\n return rights", "title": "" }, { "docid": "2bf9380019f4ffc7f0f82c2376804982", "score": "0.5037437", "text": "def test_pkg_list_holds():\n\n # Test openSUSE 15.3\n list_locks_mock = {\n \"bar\": {\"type\": \"package\", \"match_type\": \"glob\", \"case_sensitive\": \"on\"},\n \"minimal_base\": {\n \"type\": \"pattern\",\n \"match_type\": \"glob\",\n \"case_sensitive\": \"on\",\n },\n \"baz\": {\"type\": \"package\", \"match_type\": \"glob\", \"case_sensitive\": \"on\"},\n }\n installed_pkgs = {\n \"foo\": [{\"edition\": \"1.2.3-1.1\"}],\n \"bar\": [{\"edition\": \"2.3.4-2.1\", \"epoch\": \"2\"}],\n }\n\n def zypper_search_mock(name, *_args, **_kwargs):\n if name in installed_pkgs:\n return {name: installed_pkgs.get(name)}\n\n with patch.object(\n zypper, \"list_locks\", MagicMock(return_value=list_locks_mock)\n ), patch.object(\n zypper, \"search\", MagicMock(side_effect=zypper_search_mock)\n ), patch.object(\n zypper, \"info_installed\", MagicMock(side_effect=zypper_search_mock)\n ):\n ret = zypper.list_holds()\n assert len(ret) == 1\n assert \"bar-2:2.3.4-2.1.*\" in ret", "title": "" }, { "docid": "2bf9380019f4ffc7f0f82c2376804982", "score": "0.5037437", "text": "def test_pkg_list_holds():\n\n # Test openSUSE 15.3\n list_locks_mock = {\n \"bar\": {\"type\": \"package\", \"match_type\": \"glob\", \"case_sensitive\": \"on\"},\n \"minimal_base\": {\n \"type\": \"pattern\",\n \"match_type\": \"glob\",\n \"case_sensitive\": \"on\",\n },\n \"baz\": {\"type\": \"package\", \"match_type\": \"glob\", \"case_sensitive\": \"on\"},\n }\n installed_pkgs = {\n \"foo\": [{\"edition\": \"1.2.3-1.1\"}],\n \"bar\": [{\"edition\": \"2.3.4-2.1\", \"epoch\": \"2\"}],\n }\n\n def zypper_search_mock(name, *_args, **_kwargs):\n if name in installed_pkgs:\n return {name: installed_pkgs.get(name)}\n\n with patch.object(\n zypper, \"list_locks\", MagicMock(return_value=list_locks_mock)\n ), patch.object(\n zypper, \"search\", MagicMock(side_effect=zypper_search_mock)\n ), patch.object(\n zypper, \"info_installed\", MagicMock(side_effect=zypper_search_mock)\n ):\n ret = zypper.list_holds()\n assert len(ret) == 1\n assert \"bar-2:2.3.4-2.1.*\" in ret", "title": "" }, { "docid": "778d54a8cff3f0c409075aecf63df1c1", "score": "0.5034289", "text": "def construct_tree(index):\n return dict((p, [ReqPackage(r, index.get(r.key))\n for r in p.requires()])\n for p in index.values())", "title": "" }, { "docid": "5e237cc7020b3aa9d266f9fa0d7b17cf", "score": "0.5032669", "text": "def linuxServerGetGlobalLicense(self, linuxServerIp):\n staticUrl = 'https://{linuxServerIp}/api/v1/sessions/9999/ixnetworkglobals/license'.format(linuxServerIp=linuxServerIp)\n self.logInfo('linuxServerGetGlobalLicense: %s ' % linuxServerIp)\n response = self.get(staticUrl, silentMode=False)\n licenseServerIp = response.json()['servers'][0]\n licenseServerMode = response.json()['mode']\n licenseServerTier = response.json()['tier']\n self.logInfo('linuxServerGetGlobalLicenses:')\n self.logInfo('\\t%s' % licenseServerIp)\n self.logInfo('\\t%s' % licenseServerMode)\n self.logInfo('\\t%s' % licenseServerTier)\n return licenseServerIp,licenseServerMode,licenseServerTier", "title": "" }, { "docid": "69c69b2382c1d6677707390abb1d8213", "score": "0.50242245", "text": "def lsb_release():\n d = {}\n with open('/etc/lsb-release', 'r') as lsb:\n for el in lsb:\n k, v = el.split('=')\n d[k.strip()] = v.strip()\n return d", "title": "" }, { "docid": "f26770a19ca14a16de4433a9b5874d43", "score": "0.5023234", "text": "def info():\n print(\"Architectures:\")\n for arch in supported_architectures():\n print(\"-\", arch)\n print(\"Distributions:\")\n for dist in supported_distributions(with_aliases=False):\n print(\"-\", dist)\n print(\"DEB providers:\")\n for prov in supported_deb_providers():\n print(\"-\", prov)", "title": "" }, { "docid": "73dd682b9cad2d6343ffa8cde5487f03", "score": "0.5022963", "text": "def test_enumerate_licenses_ok(self, mock_get):\n\n expected = [\n {\n \"key\": \"XXXXXXXXXXXXXXXXXXXXXXXXA\",\n \"registration\": 0,\n \"type\": \"Standard license for NSS, High Availability, HotZone, SafeCache, \\\nService Enabled Disk, Zero-Impact Backup Enabler\"\n },\n {\n \"key\": \"XXXXXXXXXXXXXXXXXXXXXXXXB\",\n \"registration\": 0,\n \"type\": \"Standard license for NSS, Base (8 iSCSI ports, Unlimited Client \\\nConnections, 256 Snapshot, Email Alerts, Mirroring, Replication, iSCSI Boot, Multi-pathing), \\\n8 FC Ports, Unlimited TimeMarks and Snapshot Copy\"\n },\n {\n \"key\": \"XXXXXXXXXXXXXXXXXXXXXXXXC\",\n \"registration\": 0,\n \"type\": \"Standard license for NSS, 5 TB storage capacity\"\n },\n {\n \"key\": \"XXXXXXXXXXXXXXXXXXXXXXXXD\",\n \"registration\": 0,\n \"type\": \"Standard license for NSS, 5 TB storage capacity\"\n },\n {\n \"key\": \"XXXXXXXXXXXXXXXXXXXXXXXXE\",\n \"registration\": 0,\n \"type\": \"Standard license for NSS, 5 TB storage capacity\"\n }\n ]\n\n mock_get.return_value = load_json('tests/licenses.json')\n licenses = self.cdp.enumerate_licenses()\n\n self.assertListEqual(expected, licenses)", "title": "" }, { "docid": "01f60de44ebff3d2907d59eeb9689dca", "score": "0.50172246", "text": "def builtin_corpora_info(with_paths: bool = False) -> Union[List[str], Dict[str, str]]:\n\n corpora = {}\n\n for fpath in glob(os.path.join(DATAPATH, '**/*.zip')):\n pathcomp = path_split(fpath)\n basename, _ = os.path.splitext(pathcomp[-1])\n\n corpora[pathcomp[-2] + '-' + basename] = os.path.abspath(fpath)\n\n if with_paths:\n return corpora\n else:\n return sorted(corpora.keys())", "title": "" }, { "docid": "ac647bfae03b600b98d0803e4b497717", "score": "0.5016071", "text": "def _load_opkg_from_req(self, *files):\n opkgs = []\n # Loop through the passed in files to support multiple requirements files\n for file in files:\n with open(file, \"r\") as f:\n for row in f.readlines():\n # Ignore commented lines and empty lines\n stripped = row.strip()\n if stripped and not stripped.startswith(\"#\"):\n # Add the package to the list of packages (and remove leading and trailing whitespace)\n opkgs.append(stripped)\n return opkgs", "title": "" }, { "docid": "5bf2d2ce8b5ff40171ea8391e2a81242", "score": "0.50155103", "text": "def _extractProjectDistributionPackagesRequirements(self, artefact, node_name):\n\t\t# collect vertices and edges\n\t\tvertices = {}\n\t\tedges = {}\n\t\tvertices[node_name] = {}\n\t\tedges[node_name] = {}\n\n\t\tfor prefix_unit in artefact[\"data\"]:\n\t\t\t# vertices\n\t\t\tvertices[node_name][\"devel\"] = []\n\t\t\tfor package in prefix_unit[\"packages\"]:\n\t\t\t\tvertices[node_name][\"devel\"].append(package)\n\n\t\t\t# edges\n\t\t\tedges[node_name][\"devel\"] = []\n\t\t\tfor dependencies in prefix_unit[\"dependencies\"]:\n\t\t\t\tedges[node_name][\"devel\"] = edges[node_name][\"devel\"] + map(lambda l: (dependencies[\"package\"], l[\"name\"]), dependencies[\"dependencies\"])\n\n\t\t\t# main packages\n\t\t\tvertices[node_name][\"main\"] = []\n\t\t\tedges[node_name][\"main\"] = []\n\t\t\tfor main in prefix_unit[\"main\"]:\n\t\t\t\t# dirname from filename says in which package the dependencies are required/imported\n\t\t\t\tpkg = os.path.dirname(main[\"filename\"])\n\t\t\t\tvertices[node_name][\"main\"].append(pkg)\n\t\t\t\tedges[node_name][\"main\"] = edges[node_name][\"main\"] + map(lambda l: (pkg, l), main[\"dependencies\"])\n\t\t\t# one directory can have multiple filename import the same package\n\t\t\tedges[node_name][\"main\"] = list(set(edges[node_name][\"main\"]))\n\n\t\t\t# unit-tests\n\t\t\tvertices[node_name][\"tests\"] = []\n\t\t\tedges[node_name][\"tests\"] = []\n\t\t\tfor test in prefix_unit[\"tests\"]:\n\t\t\t\tvertices[node_name][\"tests\"].append(test[\"test\"])\n\t\t\t\tedges[node_name][\"tests\"] = edges[node_name][\"tests\"] + map(lambda l: (test[\"test\"], l), test[\"dependencies\"])\n\n\t\treturn (vertices, edges)", "title": "" }, { "docid": "833fba0c105bb6ecae21181d1350b8b2", "score": "0.4999383", "text": "def add_license(filename, license_info):\n\n fork = ResourceFork.from_file(filename)\n\n default_lang = license_info.get('default-language', 'en_US')\n default_lang_id = region_codes.get(default_lang, 0)\n\n lpic = []\n ndx = 1\n for language,license_data in license_info['licenses'].items():\n if language not in region_codes:\n raise Exception(\"Unknown language '\" + language + \"'. Valid languages are: \" +\n \", \".join(sorted(region_codes.keys())))\n encoding_name = get_encoder_name(language)\n lang_id = region_codes[language]\n\n is_two_byte = lang_id in (14, 51, 52, 53) # Japanese, Korean, SimpChinese, TradChinese\n\n if os.path.isfile(license_data):\n with open(license_data) as f:\n license_data = f.read()\n\n if type(license_data) == bytes and license_data.startswith(b'{\\\\rtf1'):\n fork.add(Resource(b'RTF ', 5000 + ndx, (language + ' SLA').encode(),\n license_data))\n else:\n fork.add(TextResource(5000 + ndx, (language + ' SLA').encode(),\n maybe_encode(license_data, encoding_name)))\n fork.add(StyleResource(5000 + ndx, (language + ' SLA').encode(),\n [Style(0, 12, 9, Style.Helvetica,\n 0, 0, (0, 0, 0))]))\n\n buttons = license_info.get('buttons', {}).get(language, None)\n if buttons is None:\n buttons = default_buttons.get(lang_id, None)\n if buttons is None:\n buttons = default_buttons[0]\n\n buttons = [maybe_encode(b, encoding_name) for b in buttons]\n\n fork.add(StringListResource(5000 + ndx, (language + ' Buttons').encode(), buttons))\n\n lpic.append((lang_id, ndx, is_two_byte))\n\n ndx += 1\n\n fork.add(LPicResource(5000, None, default_lang_id, lpic))\n\n fork.write_to_file(filename)", "title": "" }, { "docid": "93198960dbff1a891af641e06c24f7fb", "score": "0.49972197", "text": "def test_create_license_license_info(self):\n pass", "title": "" }, { "docid": "44adbae37150946ff053f3b3307da018", "score": "0.49684665", "text": "def get_software_file_paths():", "title": "" }, { "docid": "5c7162a3d255999fa6f54e8c01d5aed1", "score": "0.49588218", "text": "def extract_keys():\n keys = {}\n\n # Extract keys\n extract_item('ProduKey', silent=True)\n for hive in find_software_hives():\n cmd = [\n global_vars['Tools']['ProduKey'],\n '/IEKeys', '0',\n '/WindowsKeys', '1',\n '/OfficeKeys', '1',\n '/ExtractEdition', '1',\n '/nosavereg',\n '/regfile', hive,\n '/scomma', '']\n try:\n out = run_program(cmd)\n except subprocess.CalledProcessError:\n # Ignore and return empty dict\n pass\n else:\n for line in out.stdout.decode().splitlines():\n # Add key to keys under product only if unique\n tmp = line.split(',')\n product = tmp[0]\n key = tmp[2]\n if product not in keys:\n keys[product] = []\n if key not in keys[product]:\n keys[product].append(key)\n\n # Done\n return keys", "title": "" }, { "docid": "56a0e7ee8e6bfce580abe65902d2068d", "score": "0.4950504", "text": "def get_installed_packages():\n installed = {}\n\n for entry in Package.select():\n installed.setdefault(entry.name, {\n 'formula_name': entry.formula_name,\n 'version': entry.version,\n 'updated_at': entry.updated_at,\n })\n\n return installed", "title": "" }, { "docid": "a8fdbcfef8d8bcbea76fb84816d19161", "score": "0.49491614", "text": "def _cross_provider_maps(lmap, rmap):\n # TODO: this is pretty darned nasty, and inefficient, but there\n # TODO: are not that many vdeps in most specs.\n result = {}\n for lspec, rspec in itertools.product(lmap, rmap):\n try:\n constrained = lspec.constrained(rspec)\n except spack.error.UnsatisfiableSpecError:\n continue\n\n # lp and rp are left and right provider specs.\n for lp_spec, rp_spec in itertools.product(lmap[lspec], rmap[rspec]):\n if lp_spec.name == rp_spec.name:\n try:\n const = lp_spec.constrained(rp_spec, deps=False)\n result.setdefault(constrained, set()).add(const)\n except spack.error.UnsatisfiableSpecError:\n continue\n return result", "title": "" }, { "docid": "4be2632e71674b68bca5e6b6a56c3614", "score": "0.49488005", "text": "def dependencies():\n\n def gen():\n logger = logging.getLogger(\"npm.utils.package_info\")\n for package in _pkginfo_iterator():\n logger.info(\"Processing %s\", package['key'])\n \"\"\" possible sources of release date:\n - ['doc']['time'][<ver>] - best source, sometimes missing\n - ['doc']['versions'][<ver>]['ctime|mtime'] # e.g. Graph\n - ['doc']['time']['modified|created'] # e.g. stack-component\n - ['doc']['ctime|mtime'] # e.g. Lingo\n - empty # JSLint-commonJS\n \"\"\"\n for version, release in package['doc'].get('versions', {}).items():\n deps = release.get('dependencies') or {}\n deps = {dep.decode(\"utf8\"): ver\n for dep, ver in deps.items()}\n time = json_path(package, 'doc', 'time', version) or \\\n json_path(release, 'ctime') or \\\n json_path(release, 'mtime') or \\\n json_path(package, 'doc', 'time', 'created') or \\\n json_path(package, 'doc', 'time', 'modified') or \\\n None\n\n yield {\n 'name': package['key'],\n 'version': version,\n 'date': time,\n 'deps': \",\".join(deps.keys()),\n 'raw_dependencies': json.dumps(deps)\n }\n\n return pd.DataFrame(gen()).sort_values(\n ['name', 'date']).set_index('name', drop=True)", "title": "" }, { "docid": "2e1e2cfe778eb330c5b84186ddffbe4b", "score": "0.49430865", "text": "def pkg():", "title": "" }, { "docid": "1320792ff4759abdb54f660b84dba7ff", "score": "0.49377304", "text": "def packages_info():\n # type: () -> pd.DataFrame\n\n def gen():\n logger = logging.getLogger(\"npm.utils.package_info\")\n for package in _pkginfo_iterator():\n logger.info(\"Processing %s\", package['key'])\n repo = _get_field(package['doc'].get('repository'), 'url') or \\\n _get_field(package['doc'].get('homepage'), 'url') or \\\n _get_field(package['doc'].get('bugs'), 'url') or str(package)\n\n m = repo and scraper.URL_PATTERN.search(repo)\n\n yield {\n 'name': package['key'],\n 'url': m and m.group(0),\n 'author': _get_field(package['doc'].get('author', {}), 'email'),\n 'license': json_path(package, 'doc', 'license')\n }\n return pd.DataFrame(gen()).set_index('name', drop=True)", "title": "" }, { "docid": "cef41d62d556ba2eae88260da975f2b0", "score": "0.4930375", "text": "def mender_dist_packages_versions(request):\n\n return {\n \"mender-client\": request.config.getoption(\"--mender-client-deb-version\"),\n \"mender-connect\": request.config.getoption(\"--mender-connect-deb-version\"),\n \"mender-configure\": request.config.getoption(\"--mender-configure-deb-version\"),\n }", "title": "" }, { "docid": "f4491ca04c5ada21c088b531504c800d", "score": "0.49294597", "text": "def pkg_management(self):\n return []", "title": "" }, { "docid": "3aad4b474e6499c70868469e3efb7768", "score": "0.49270192", "text": "def test_patch_license_license_info(self):\n pass", "title": "" }, { "docid": "eb91904f82042ec686363e3dd87dd632", "score": "0.4910295", "text": "def get_packages(config):\n requirements = []\n for section in config.sections():\n packages = []\n for key, val in config.items(section):\n packages += list(filter(None, (x.strip().lower() for x in val.splitlines())))\n packages.sort()\n requirements.append(packages)\n\n return tuple(requirements)", "title": "" }, { "docid": "8d73cbb63b2e5284309a5478c68081b7", "score": "0.49028406", "text": "def test_get_supported_cloud_identifiers_with_prereqs(prereqs):\n config_status = {\n \"providers\": [\n {\n \"key\": \"WatchmakerStatus\",\n \"required\": False,\n \"provider_type\": \"aws\",\n },\n {\n \"key\": \"WatchmakerStatus\",\n \"required\": False,\n \"provider_type\": \"azure\",\n },\n {\n \"key\": \"WatchmakerStatus\",\n \"required\": False,\n \"provider_type\": \"gcp\",\n },\n ]\n }\n\n ids = get_sup_cloud_ids_w_prereqs(config_status)\n\n assert ids is not None\n assert \"aws\" in ids\n assert \"azure\" in ids\n assert \"gcp\" not in ids\n assert \"none\" not in ids", "title": "" }, { "docid": "61e7f0b57a701e3c1e1b5cd1fe7d81ee", "score": "0.48957515", "text": "def generateLicensees():\n post_data = request.get_json(force=True)\n api_key=request.headers.get('X-API-KEY')\n num_licensees = post_data.get('NumLicensees', None)\n \n if api_key is None or api_key != API_KEY:\n return jsonify(\n status=False,\n message='Invalid API Key!'\n ), 403\n\n num_licensees = registry.generate_licensees(num_licensees)\n \n return jsonify(\n status=True,\n message=str(num_licensees) + ' generated!'\n ), 201", "title": "" }, { "docid": "df3ac0ce8c1764a07090016a12b6e015", "score": "0.48955226", "text": "def licenses(self):\n\n return [n for n in self.store.objects(subject=self.subject,\n predicate=cc.license)\n ]", "title": "" }, { "docid": "869b7e0611bae2c0f262573d060d2e45", "score": "0.48857072", "text": "def _get_gpg_pubkey_packages(rpms):\n return rpms.packages.get(\"gpg-pubkey\", [])", "title": "" }, { "docid": "741ec322430736a162cbb6da633bffe9", "score": "0.48833567", "text": "def packages(self):\n return list(self._packages_by_name.values())", "title": "" }, { "docid": "6d18ddf0bc8065f199d6d00f949c5448", "score": "0.48726177", "text": "def write_package_info_content(outfile):\n text = ['Included Python Packages\\n', 24 * '=' + '\\n', '\\n']\n\n KEY_MAP = {\n \"Name\": 'name',\n \"Version\": 'version',\n \"Home-page\": 'homepage',\n \"License\": 'license',\n }\n empty_info = {}\n for key, name in KEY_MAP.items():\n empty_info[name] = \"\"\n\n for pkg_name in sorted(SITE_PACKAGES):\n pkg = pkg_resources.get_distribution(pkg_name)\n\n info = copy.deepcopy(empty_info)\n try:\n lines = pkg.get_metadata_lines('METADATA')\n except (KeyError, IOError):\n lines = pkg.get_metadata_lines('PKG-INFO')\n for line in lines:\n try:\n key, value = line.split(': ', 1)\n if key in KEY_MAP and info['license'] == '':\n info[KEY_MAP[key]] = value\n except ValueError:\n pass\n text.append(\"{0}-{1}\\nLicense: {2}\\nURL: {3}\\n\\n\".format(\n info['name'],\n info['version'],\n info['license'],\n info['homepage']))\n\n with open(outfile, \"w\") as f:\n f.writelines(text)", "title": "" }, { "docid": "dceb53855cbf42fb7c3bb7c99901f5d4", "score": "0.48653457", "text": "def conda_packages_for_create(self):\n if self._lock_set is not None and self._lock_set.enabled and self._lock_set.supports_current_platform:\n return self._lock_set.package_specs_for_current_platform\n else:\n return self.conda_packages", "title": "" }, { "docid": "2f1d65c33622a586a51918a735b7f073", "score": "0.48601252", "text": "def install_requires():\n return reqs('default.txt')", "title": "" }, { "docid": "f13a64ce53def46b50fbd93972b3eaca", "score": "0.48544222", "text": "def get_builtlist(self) -> list:\n builtlist = []\n c = self.conn.cursor()\n result = c.execute('SELECT `package`, `version` FROM `builtpkg`;').fetchall()\n for i in result:\n builtlist.append(dict(package=i[0], version=i[1]))\n return builtlist", "title": "" }, { "docid": "3fd35ecb785574ac8b8446cdc5444775", "score": "0.48470312", "text": "def parse_packages(lines, repo_url):\n\n pkgs = defaultdict(list)\n current_pkg = {}\n\n for l in lines:\n # Empty lines delimit package stanzas.\n if not l:\n assert current_pkg\n assert 'Package' in current_pkg\n current_pkg['_RepoUrl'] = repo_url\n pkgs[current_pkg['Package']].append(current_pkg)\n current_pkg = {}\n continue\n\n try:\n key, value = l.split(\": \", 1)\n except:\n # TODO(nico): support line continuations?\n continue\n\n # Store the current key/value pair\n if key in ('Package', 'Version', 'Filename', 'SHA256'):\n current_pkg[key] = value\n\n return pkgs", "title": "" }, { "docid": "6de19f126fb64588077b53f4dfb9bf4c", "score": "0.48421174", "text": "def _getIpprefix2RpmArtefact(self, build, product, info_artefact, packages_artefacts):\n\t\tartefacts = []\n\t\tfor rpm in packages_artefacts:\n\t\t\t# Filter out all non-devel rpms\n\t\t\tname = Rpm(build, rpm).name()\n\t\t\tif name.endswith(\"unit-test-devel\") or name.endswith(\"unit-test\"):\n\t\t\t\tcontinue\n\n\t\t\tfor prefix in packages_artefacts[rpm][\"data\"]:\n\t\t\t\t artefacts.append({\n\t\t\t\t\t\"artefact\": ARTEFACT_GOLANG_IPPREFIX_TO_RPM,\n\t\t\t\t\t\"ipprefix\": prefix[\"ipprefix\"],\n\t\t\t\t\t\"commit\": info_artefact[\"commit\"],\n\t\t\t\t\t\"rpm\": rpm,\n\t\t\t\t\t\"product\": product,\n\t\t\t\t\t\"distribution\": info_artefact[\"distribution\"],\n\t\t\t\t\t\"build\": build\n\t\t\t\t})\n\n\t\treturn artefacts", "title": "" }, { "docid": "accd440f0813e1f7aee2f7aaa03f077b", "score": "0.48374054", "text": "def packages_by_name(self):\n return collections.OrderedDict(self._packages_by_name)", "title": "" }, { "docid": "28fd518f8bd93fade90dd5b2c4bb84bf", "score": "0.4834796", "text": "def get_map(cls, raw_data_path: str):\n data = {}\n with gzip.open(raw_data_path) as f_handle:\n pkg_data = f_handle.readline()\n while pkg_data:\n pkg_data = pkg_data.decode('UTF-8')\n if pkg_data.find('/') != -1:\n pkg_data = pkg_data.split()\n if len(pkg_data) > 1:\n if len(pkg_data) > 2:\n pkg_data = [\" \".join(pkg_data[:-1]), pkg_data[-1]]\n for package in pkg_data[1].split(','):\n if package in data:\n data[package].append(pkg_data[0])\n else:\n data[package] = []\n data[package].append(pkg_data[0])\n else:\n print(f'Outlier: {pkg_data}')\n pkg_data = f_handle.readline()\n return data", "title": "" }, { "docid": "45c4d3ea9904d845d750f7fc19290e6a", "score": "0.4832836", "text": "def display_packages(self):\n for pkg_name,pkg_version in self.dict_robot_pkgs.items():\n print(\"Package name:\" + pkg_name + \" \" + pkg_version)\n for pkg_name in self.tree_of_includes_os:\n print(\"OS package name:\" + pkg_name)", "title": "" }, { "docid": "07c3c3dcdf5719faf54a2c707aa87703", "score": "0.48250607", "text": "def test_pkg_get(self):\n debian_list = \"\"\"\ng++\ng++-4.9\ng++-5\ngawk\ngcc\ngcc-4.9\ngcc-4.9-base:amd64\ngcc-4.9-base:i386\ngcc-5\ngcc-5-base:amd64\ngcc-5-base:i386\ngcc-6-base:amd64\ngcc-6-base:i386\n\"\"\"\n inspector = Inspector(cachedir=\"/test\", piddir=\"/test\", pidfilename=\"bar.pid\")\n inspector.grains_core = MagicMock()\n inspector.grains_core.os_data = MagicMock()\n inspector.grains_core.os_data.get = MagicMock(return_value=\"Debian\")\n with patch.object(\n inspector, \"_Inspector__get_cfg_pkgs_dpkg\", MagicMock(return_value=\"dpkg\")\n ):\n with patch.object(\n inspector, \"_Inspector__get_cfg_pkgs_rpm\", MagicMock(return_value=\"rpm\")\n ):\n inspector.grains_core = MagicMock()\n inspector.grains_core.os_data = MagicMock()\n inspector.grains_core.os_data().get = MagicMock(return_value=\"Debian\")\n self.assertEqual(inspector._get_cfg_pkgs(), \"dpkg\")\n inspector.grains_core.os_data().get = MagicMock(return_value=\"Suse\")\n self.assertEqual(inspector._get_cfg_pkgs(), \"rpm\")\n inspector.grains_core.os_data().get = MagicMock(return_value=\"redhat\")\n self.assertEqual(inspector._get_cfg_pkgs(), \"rpm\")", "title": "" }, { "docid": "81a34317ca931eb0dca4166ae26c4431", "score": "0.4824793", "text": "def OTIS() -> list:\n return ['CATS0201','CATS2008','CATS2008_load','TPXO9.1',\n 'TPXO7.2','TPXO7.2_load','AODTM-5','AOTIM-5',\n 'AOTIM-5-2018','Arc2kmTM','Gr1kmTM','Gr1km-v2']", "title": "" }, { "docid": "53f00a220516891556286f8ebf966e32", "score": "0.48184577", "text": "def get_package_list():\n cabal_args = ['cabal', 'list', '--installed', '--simple-output']\n cabal = subprocess.check_output(cabal_args, universal_newlines=True)\n\n package_list = [pkg.split(' ') for pkg in cabal.strip('\\n').split('\\n')]\n package_map = collections.defaultdict(list)\n for name, version in package_list:\n package_map[name].append(version)\n package_list = [(name, versions) for name, versions in package_map.items()]\n package_list.sort(key=lambda x: x[0])\n return package_list", "title": "" }, { "docid": "9f50e8dc5aa6b891232599f4a81a3ab7", "score": "0.4806096", "text": "def available(self):\n available = {}\n # What is available is what I require plus what I provide.\n # 'compiler' is the only key that may be overridden.\n available.update(self.requires)\n available.update(self.provides)\n return available", "title": "" }, { "docid": "2cd93243d6ab7860ad4e06962a67f183", "score": "0.48031905", "text": "def get_pkg_list(self, path):\n if os.path.isfile(path):\n with open(path, 'r') as f:\n repodata = f.read()\n else:\n return\n\n repodata = json.loads(repodata)\n pkgs = repodata['packages'].keys()\n return pkgs", "title": "" }, { "docid": "1e418666a6e14f9a44b13c7e80c82d14", "score": "0.4802355", "text": "def provides(self):\n provides = {}\n\n # Treat the 'compiler' case in a special way, as compilers are not\n # virtual dependencies in spack\n\n # If it is in the list of supported compilers family -> compiler\n if self.spec.name in spack.compilers.supported_compilers():\n provides[\"compiler\"] = spack.spec.CompilerSpec(str(self.spec))\n # Special case for llvm\n if self.spec.name == \"llvm\":\n provides[\"compiler\"] = spack.spec.CompilerSpec(str(self.spec))\n provides[\"compiler\"].name = \"clang\"\n # Special case for llvm-amdgpu\n if self.spec.name == \"llvm-amdgpu\":\n provides[\"compiler\"] = spack.spec.CompilerSpec(str(self.spec))\n provides[\"compiler\"].name = \"rocmcc\"\n # Special case for oneapi\n if self.spec.name == \"intel-oneapi-compilers\":\n provides[\"compiler\"] = spack.spec.CompilerSpec(str(self.spec))\n provides[\"compiler\"].name = \"oneapi\"\n # Special case for oneapi classic\n if self.spec.name == \"intel-oneapi-compilers-classic\":\n provides[\"compiler\"] = spack.spec.CompilerSpec(str(self.spec))\n provides[\"compiler\"].name = \"intel\"\n\n # All the other tokens in the hierarchy must be virtual dependencies\n for x in self.hierarchy_tokens:\n if self.spec.package.provides(x):\n provides[x] = self.spec[x]\n return provides", "title": "" }, { "docid": "ae48d92e249d7484cff70575c83853ac", "score": "0.48015103", "text": "def get_package_info(self, pkg):\n depends = []\n pkg_name = pkg[0]\n pkg_ver, depends = self.get_BioC_info(pkg)\n if pkg_ver == 'not found':\n pkg_ver, depends = self.get_CRAN_info(pkg)\n if pkg_ver == 'not found':\n return 'not found', [], []\n else:\n pkg[2]['R_source'] = 'ext_options'\n else:\n pkg[2]['R_source'] = 'bioconductor_options'\n return 'ok', (pkg_name, pkg_ver), depends", "title": "" }, { "docid": "d89dc61e5c0aa83a487128414537929c", "score": "0.47986478", "text": "def pkg_info():\n try:\n doc = __doc__.decode(\"UTF-8\")\n except (AttributeError, UnicodeError):\n doc = __doc__ # Python3, or some strangeness\n\n return dict(\n )", "title": "" }, { "docid": "92d675a86a12bc3bfd5a09e16e2bea74", "score": "0.47879678", "text": "def user_licenses(self):\n return self._user_licenses", "title": "" }, { "docid": "be7905cd324feb3c150382a6e11843a7", "score": "0.47841573", "text": "def get_uname(self):\n key_list = ['sysname', 'nodename', 'release',' version', 'machine']\n val_list = map(lambda x:x, os.uname())\n return dict(zip(key_list, val_list))", "title": "" }, { "docid": "ef195ebf7b00b0198bb0a5a9cc2cd825", "score": "0.4781001", "text": "def extract_software_reqs(\n process: cwl.Process,\n) -> Iterator[cwl.SoftwareRequirement]:\n if process.requirements:\n for req in process.requirements:\n if isinstance(req, cwl.SoftwareRequirementTypes):\n yield req\n if process.hints:\n for req in process.hints:\n if isinstance(req, cwl.SoftwareRequirementTypes):\n yield req", "title": "" }, { "docid": "66a00df7fc8c1647441925813668d9d7", "score": "0.47798595", "text": "def find_installed(self):\n # We get all of packages and check the names here since the rpm\n # command on this system could be old and not support wildcards.\n self.installed = {}\n output = rpm('--query', '--all',\n '--queryformat', '%{NAME} %{VERSION} %{RELEASE} %{ARCH}\\\\n')\n for line in output:\n name,version,release,arch = line.split()\n if name.startswith('kmod-openafs') or name.startswith('openafs'):\n self.installed[name] = {\n 'version': version,\n 'release': release,\n 'arch': arch}\n return self.installed", "title": "" }, { "docid": "1d7fedbd9ab472980821ab73e473b38a", "score": "0.4777971", "text": "def pkg_as_json(pkg):\n result = {\n 'name': pkg.name,\n 'ensure': pkg.evr,\n 'platform': pkg.arch}\n return result", "title": "" }, { "docid": "296583691694faef8497bd31e38a3a19", "score": "0.4765466", "text": "def get_kernel_rpms():\n rpms = next(api.consume(InstalledRedHatSignedRPM), InstalledRedHatSignedRPM())\n return sorted([pkg for pkg in rpms.items if pkg.name == 'kernel'], key=get_kernel_rpm_release)", "title": "" }, { "docid": "8955808cbe42dddc9eb4c373a67154b4", "score": "0.47603106", "text": "def _loadProvidesByKey(self, pkgkey, package):\n allprovides = []\n cursorSub = self.mDBConnection.cursor()\n sqprov = \"select name, flags, epoch, version, release from provides where pkgkey=?\"\n resprov = cursorSub.execute(sqprov, [ pkgkey ])\n for (name, flags, epoch, version, release) in resprov:\n prov = Provides(name, version, release, epoch, flags, package)\n allprovides.append(prov)\n cursorSub.close()\n return allprovides", "title": "" }, { "docid": "aa4765df52ed2c28f743576ae131afbd", "score": "0.47585893", "text": "def createLinuxSpecifics():\n global distDir, sourceDir\n\n dataSourceDir = os.path.join(eric7SourceDir, \"data\", \"linux\")\n\n if distDir:\n dst = os.path.normpath(os.path.join(distDir, \"usr/share/icons\"))\n if not os.path.exists(dst):\n os.makedirs(dst)\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"eric_icon.png\"),\n os.path.join(dst, \"eric.png\"),\n )\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"ericWeb48_icon.png\"),\n os.path.join(dst, \"ericWeb.png\"),\n )\n\n dst = os.path.normpath(\n os.path.join(distDir, \"usr/share/icons/hicolor/48x48/apps\")\n )\n if not os.path.exists(dst):\n os.makedirs(dst)\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"eric48_icon.png\"),\n os.path.join(dst, \"eric.png\"),\n )\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"ericWeb48_icon.png\"),\n os.path.join(dst, \"ericWeb.png\"),\n )\n\n dst = os.path.normpath(os.path.join(distDir, \"usr/share/applications\"))\n if not os.path.exists(dst):\n os.makedirs(dst)\n copyDesktopFile(\n os.path.join(dataSourceDir, \"eric7.desktop.in\"),\n os.path.join(dst, \"eric7.desktop\"),\n )\n copyDesktopFile(\n os.path.join(dataSourceDir, \"eric7_browser.desktop.in\"),\n os.path.join(dst, \"eric7_browser.desktop\"),\n )\n\n dst = os.path.normpath(os.path.join(distDir, \"usr/share/metainfo\"))\n if not os.path.exists(dst):\n os.makedirs(dst)\n copyAppStreamFile(\n os.path.join(dataSourceDir, \"eric7.appdata.xml.in\"),\n os.path.join(dst, \"eric7.appdata.xml\"),\n )\n elif os.getuid() == 0:\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"eric_icon.png\"),\n \"/usr/share/icons/eric.png\",\n )\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"eric48_icon.png\"),\n \"/usr/share/icons/hicolor/48x48/apps/eric.png\",\n )\n copyDesktopFile(\n os.path.join(dataSourceDir, \"eric7.desktop.in\"),\n \"/usr/share/applications/eric7.desktop\",\n )\n if os.path.exists(\"/usr/share/metainfo\"):\n copyAppStreamFile(\n os.path.join(dataSourceDir, \"eric7.appdata.xml.in\"),\n \"/usr/share/metainfo/eric7.appdata.xml\",\n )\n elif os.path.exists(\"/usr/share/appdata\"):\n copyAppStreamFile(\n os.path.join(dataSourceDir, \"eric7.appdata.xml.in\"),\n \"/usr/share/appdata/eric7.appdata.xml\",\n )\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"ericWeb48_icon.png\"),\n \"/usr/share/icons/ericWeb.png\",\n )\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"ericWeb48_icon.png\"),\n \"/usr/share/icons/hicolor/48x48/apps/ericWeb.png\",\n )\n copyDesktopFile(\n os.path.join(dataSourceDir, \"eric7_browser.desktop.in\"),\n \"/usr/share/applications/eric7_browser.desktop\",\n )\n elif os.getuid() >= 1000:\n # it is assumed, that user ids start at 1000\n localPath = os.path.join(os.path.expanduser(\"~\"), \".local\", \"share\")\n # create directories first\n for directory in [\n os.path.join(localPath, name)\n for name in (\n \"icons\",\n \"icons/hicolor/48x48/apps\",\n \"applications\",\n \"metainfo\",\n \"appdata\",\n )\n ]:\n if not os.path.isdir(directory):\n os.makedirs(directory)\n # now copy the files\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"eric_icon.png\"),\n os.path.join(localPath, \"icons\", \"eric.png\"),\n )\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"eric48_icon.png\"),\n os.path.join(localPath, \"icons/hicolor/48x48/apps\", \"eric.png\"),\n )\n copyDesktopFile(\n os.path.join(dataSourceDir, \"eric7.desktop.in\"),\n os.path.join(localPath, \"applications\", \"eric7.desktop\"),\n )\n copyAppStreamFile(\n os.path.join(dataSourceDir, \"eric7.appdata.xml.in\"),\n os.path.join(localPath, \"metainfo\", \"eric7.appdata.xml\"),\n )\n copyAppStreamFile(\n os.path.join(dataSourceDir, \"eric7.appdata.xml.in\"),\n os.path.join(localPath, \"appdata\", \"eric7.appdata.xml\"),\n )\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"ericWeb48_icon.png\"),\n os.path.join(localPath, \"icons\", \"ericWeb.png\"),\n )\n shutilCopy(\n os.path.join(eric7SourceDir, \"pixmaps\", \"ericWeb48_icon.png\"),\n os.path.join(localPath, \"icons/hicolor/48x48/apps\", \"ericWeb.png\"),\n )\n copyDesktopFile(\n os.path.join(dataSourceDir, \"eric7_browser.desktop.in\"),\n os.path.join(localPath, \"applications\", \"eric7_browser.desktop\"),\n )", "title": "" } ]
73821787ab984ccb56d5e67dd6fc439e
The configuration settings of the storage of the tokens if a file system is used.
[ { "docid": "d551f4f3b9e27b3856657dd11566cc54", "score": "0.5914945", "text": "def file_system(self) -> Optional[pulumi.Input['FileSystemTokenStoreArgs']]:\n return pulumi.get(self, \"file_system\")", "title": "" } ]
[ { "docid": "37b3738304a27691132542a36ac34039", "score": "0.6504488", "text": "def configuration(self):\n return os.path.expanduser(self.settings.get('configuration'))", "title": "" }, { "docid": "6ff621098f65684905dd57e654a377fb", "score": "0.6473083", "text": "def _getConfig(self):\n ru = getUtility(IRegistry)\n return ru.forInterface(ITokenLoginSettings)", "title": "" }, { "docid": "6ff621098f65684905dd57e654a377fb", "score": "0.6473083", "text": "def _getConfig(self):\n ru = getUtility(IRegistry)\n return ru.forInterface(ITokenLoginSettings)", "title": "" }, { "docid": "40bb1e63526b5c149d570066dc6bb2e5", "score": "0.623006", "text": "def get_settings(self, conf):\n results = self.get_config(conf, local=True)\n results[self.password_store] = self.get_password()\n return results", "title": "" }, { "docid": "758e187f5d7a911dfbdd87782aa94f5c", "score": "0.6228006", "text": "def store_settings(self) -> Optional[Any]:\n return pulumi.get(self, \"store_settings\")", "title": "" }, { "docid": "758e187f5d7a911dfbdd87782aa94f5c", "score": "0.6228006", "text": "def store_settings(self) -> Optional[Any]:\n return pulumi.get(self, \"store_settings\")", "title": "" }, { "docid": "60c0635889b2d895b1ed7478b8700ebb", "score": "0.6190062", "text": "def setting_definitions(self):\n return self.config_service.settings", "title": "" }, { "docid": "0b2d54b1326133d2a1404b1e55db5463", "score": "0.6169809", "text": "def storage_configuration(self) -> 'outputs.Gen2StorageConfigurationOutputResponse':\n return pulumi.get(self, \"storage_configuration\")", "title": "" }, { "docid": "9f514e96839509d4b0b4f8298368ae73", "score": "0.61008465", "text": "def conf(self):\t\t\n\t\timport os.path\n\t\t\n\t\tself.setting = self.configManager.get()[\"DEFAULT\"]\n\t\tdefault = {\n\t\t\t\"savepath\": \"download\",\n\t\t\t\"runafterdownload\": \"\",\n\t\t\t\"libraryautocheck\": \"true\"\n\t\t}\n\t\textend(self.setting, default)\n\t\t\n\t\tself.setting[\"savepath\"] = os.path.normpath(self.setting[\"savepath\"])", "title": "" }, { "docid": "b58faed0ac21ba852907548e8cd7bf0a", "score": "0.60366005", "text": "def SaveConf(self):\n return {}", "title": "" }, { "docid": "cadb056f2bd41aa8fcf018fc9a6b5947", "score": "0.60337836", "text": "def default_config():\n return {'secret_key':\n '931b8-i-f44330b4a5-am-3b9b733f-not-secure-043e96882',\n 'persistence': 'ndb',\n 'cleanupqueue': 'default',\n 'cleanupdelay': 7600,\n 'defaultqueue': 'default',\n 'task_system': 'appengine_taskqueue',\n 'csrf_check': 'furious.csrf_check'}", "title": "" }, { "docid": "4548f9f397f9ca7cff679e938c42c229", "score": "0.59746224", "text": "def get_app_settings(self):\n return {\n \"template_path\": os.getcwd(),\n \"cookie_secret\": config.secret_key,\n \"log_function\": self.log_request,\n }", "title": "" }, { "docid": "cb2694d21f5a6de1188a0c41176e7c3f", "score": "0.5949339", "text": "def config():\n try:\n return config._config\n except AttributeError:\n config_filename = os.path.splitext(os.path.basename(__file__))[0] + \".config\"\n parser = ConfigParser()\n parser.read(config_filename)\n api_token = parser.get(\"auth\", \"api_token\")\n timezone = parser.get(\"settings\", \"timezone\")\n config._config = {\n \"api_token\": api_token,\n \"timezone\": pytz.timezone(timezone)\n }\n return config._config", "title": "" }, { "docid": "02e36fc7bb7d0c606a07b0c5934c9ecb", "score": "0.59379435", "text": "def get_module_config(self):\n fileOpr = FileOperation()\n config = fileOpr.readFile(\"./utils/system_config\", \"r\")\n #print(config)\n config = eval(config)\n return config", "title": "" }, { "docid": "985c30a4e24a1c72ce29b3443565ab8d", "score": "0.5916737", "text": "def _get_settings(self):\n data=None\n if self.filename is not None:\n if File.exists(self.filename):\n data=YAMLFile.load(self.filename)\n else:\n data=None\n\n if data is not None:\n self.settings = Storage(data)\n else:\n self.settings = Storage()", "title": "" }, { "docid": "3a2b4860147ae533e384b0099f7e0c97", "score": "0.5903261", "text": "def get_config(self):\n self.config = {}\n self.config_file = self.config_file and self.config_file or self.DEFAULT_CONFIG\n\n cfg = None\n if os.path.exists(self.config_file):\n cfg = open(self.config_file).readlines()\n else:\n raise Exception(\"Cannot open configuration file: \" + self.config_file + \"\\n\" + \"Use sudo, perhaps?\")\n\n for line in cfg:\n try:\n key, value = line.replace(\" \", \"\").strip().split(\"=\", 1)\n if key.startswith(\"db_\"):\n # Handling odd change in Spacewalk 1.7 where \"db_name\" could be URI\n self.config[key] = ((key == \"db_name\") and value.startswith(\"//\")) and value.split(\"/\")[-1] or value\n except:\n pass", "title": "" }, { "docid": "86a4f24c76a04d18e3e272e38e94495e", "score": "0.5878775", "text": "def get_storage_config(self, app_name):\n config = self.get_cdapp_config(app_name).objectStore\n if not config:\n raise ValueError(f\"no object storage config present for app '{app_name}'\")\n return config", "title": "" }, { "docid": "7765ce77cbc3cb8ebc9989694035a21c", "score": "0.58781886", "text": "def save_config(self):\n\n return {}", "title": "" }, { "docid": "8e38c4e9f14308c4b1953d11d9d7a927", "score": "0.5836538", "text": "def settings(self):\n raise NotImplementedError", "title": "" }, { "docid": "67530f88525bde0078aeedc0438600a6", "score": "0.5829423", "text": "def get_config(self):\n return {}", "title": "" }, { "docid": "bde095d51c8659a4b34b8bcdbec516b8", "score": "0.58140856", "text": "def get_config(self):\n return self.__conf_v", "title": "" }, { "docid": "712455df613c1c699ba5d77f553c3e60", "score": "0.58118033", "text": "def load_settings():\n # get the telegram bot token\n token = json.loads(open(\"Data/bottoken.json\").read())[\"token\"]\n return token", "title": "" }, { "docid": "165b801142d6bbb95c0cf430728613c5", "score": "0.5788463", "text": "def get_conf(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "abb17c30a56648edd03076b4b2d0c95e", "score": "0.5768831", "text": "def get_config_settings():\n conf = {}\n # Pipfiles are expected to have all the requirements of a project for development, production, testing, etc all\n # listed in a single file, unlike requirements.txt convention where production and development requirements are\n # often split into different files. Thus, it is necessary to have the ability to configure which sections of the\n # file should be considered for management by dependencies.io. The default will be to include both of the standard\n # sections of the Pipfile. This setting can be configured to eliminate a section or to possibly add a custom\n # section name.\n #\n # pipfile_sections:\n # - packages\n # - dev-packages\n # pipfilelock_sections:\n # - default\n # - develop\n SETTING_PIPFILE_SECTIONS = os.getenv(\n \"DEPS_SETTING_PIPFILE_SECTIONS\", '[\"packages\", \"dev-packages\"]'\n )\n conf[\"pipfile_sections\"] = json.loads(SETTING_PIPFILE_SECTIONS)\n\n SETTING_PIPFILELOCK_SECTIONS = os.getenv(\n \"DEPS_SETTING_PIPFILELOCK_SECTIONS\", '[\"default\", \"develop\"]'\n )\n conf[\"pipfilelock_sections\"] = json.loads(SETTING_PIPFILELOCK_SECTIONS)\n\n return conf", "title": "" }, { "docid": "3d571cbbf2954c03c8fd250215e4e5e2", "score": "0.57621694", "text": "def configuration():\n \n driver = config.DRIVER\n server = config.SERVER\n db = config.DB\n uid = config.UID\n pwd= config.PWD\n return driver, server, db, uid, pwd", "title": "" }, { "docid": "098c7289d4cfb0922d29179e5a726bc0", "score": "0.5756239", "text": "def config():", "title": "" }, { "docid": "0cf64b01a3ca213c2e7351e536e50a3f", "score": "0.5741006", "text": "def config():\n with open('/mnt/lustre02/work/um0878/users/tlang/work/dyamond/processing/config.json') as handle:\n config = json.loads(handle.read())\n \n return config", "title": "" }, { "docid": "8f0f4be344148ef0728ada8f99263a7e", "score": "0.57377875", "text": "def CONF(self):\n raise NotImplementedError", "title": "" }, { "docid": "b4e9cfe87c96e065e11240170b61f18d", "score": "0.57336724", "text": "def pipeline_settings():\n settings_files = [\n \"otu_assignment\",\n \"tax_assignment\",\n \"otu_processing\",\n \"network_inference\",\n \"datatypes\",\n ]\n settings = {}\n for file in settings_files:\n fname = SETTINGS_DIR / f\"{file}.toml\"\n with open(fname) as fid:\n data = toml.load(fid)\n settings[file] = data\n settings[\"config_folder\"] = SETTINGS_DIR\n return settings", "title": "" }, { "docid": "b4e9cfe87c96e065e11240170b61f18d", "score": "0.57336724", "text": "def pipeline_settings():\n settings_files = [\n \"otu_assignment\",\n \"tax_assignment\",\n \"otu_processing\",\n \"network_inference\",\n \"datatypes\",\n ]\n settings = {}\n for file in settings_files:\n fname = SETTINGS_DIR / f\"{file}.toml\"\n with open(fname) as fid:\n data = toml.load(fid)\n settings[file] = data\n settings[\"config_folder\"] = SETTINGS_DIR\n return settings", "title": "" }, { "docid": "3e8ef364ee3dc8b347194df359f23573", "score": "0.57286954", "text": "def getSettings():\n global settings\n from collections import defaultdict\n settings = defaultdict(str)\n try:\n path = os.path.dirname(os.path.realpath(__file__))\n with open(path + '/settings.xml') as f:\n xml = f.read()\n import xml.etree.cElementTree as et\n et = et.fromstring(xml)[0]\n for e in et:\n # change builtin type name to the named type\n try:\n conv = convert[e.attrib['type']]\n settings[e.attrib['id']] = conv(e.text)\n except:\n pass\n except:\n print \"No settings file. Continuing with a minimal configuration. \"\n settings['fetcher'] = Batch\n # use batch formatting\n settings[\"type\"] = 'batch'\n settings[\"html\"] = False\n settings[\"count\"] = 0", "title": "" }, { "docid": "7fd27e1513ec34da61b982356b8046cb", "score": "0.57282144", "text": "def getConfiguration():\n\n fileName = g.os_path_join(g.app.loadDir,\"../\",\"plugins\",\"word_export.ini\")\n config = ConfigParser.ConfigParser()\n config.read(fileName)\n return config", "title": "" }, { "docid": "ea3980039eb7bfa5e36b53bbe4a5b53e", "score": "0.57239175", "text": "def get_auth_settings():\n from mezzanine.conf import settings\n settings.use_editable()\n auth_settings = {'client_id': settings.BUFFER_CLIENT_ID,\n 'client_secret': settings.BUFFER_CLIENT_SECRET,\n 'access_token': settings.BUFFER_ACCESS_TOKEN,\n }\n return auth_settings if all(auth_settings.itervalues()) else None", "title": "" }, { "docid": "aba5f93d9db8080e3ed634af1eba225e", "score": "0.5723643", "text": "def config(self):\n return self.config_manager.data", "title": "" }, { "docid": "6a74cb1150296eb7aabb3b00bbb0175d", "score": "0.57216114", "text": "def settings(self):\n if not hasattr(self, '_settings'):\n self._settings = {}\n for name, value in self.application.settings.items():\n self._settings[name] = value\n # self._settings = copy.deepcopy(self.application.settings)\n self._settings['template_path'] = os.path.join(os.path.dirname(__file__), '../templates/')\n self._settings['static_path'] = os.path.join(os.path.dirname(__file__), '../static/')\n self._settings['static_url_prefix'] = '/admin_static/'\n return self._settings", "title": "" }, { "docid": "3d059680a1fe0a804c04b1e64d3f8468", "score": "0.57180476", "text": "def get_config():\r\n global config\r\n\r\n if config:\r\n return config\r\n\r\n with open(os.path.join(os.path.dirname(__file__), 'login.json')) as file:\r\n config = json.load(file)\r\n\r\n return config", "title": "" }, { "docid": "093133af7503cbdd197ad20e709329cb", "score": "0.57110775", "text": "def sovpn_config_file(self):\n return self.settings['server']['sovpn_config_file']", "title": "" }, { "docid": "d1448f25f0a3592024162160b011b841", "score": "0.5705211", "text": "def settings():\n logger.info(\"### Entered Settings ###\")\n\n try:\n # add the settings to the structure of the file, and lets write it out...\n config.set('Personalization', 'DefaultWordList', DefaultWordList)\n config.set('Personalization', 'PracticeListSize', str(PracticeListSize))\n logger.debug(\"DefaultWordList: \" + DefaultWordList)\n logger.debug(\"PracticeListSize: \" + str(PracticeListSize))\n\n logger.info(\"### Save Settings ###\")\n # Writing our configuration file to 'example.ini'\n with open('diktat.ini', 'w') as configfile:\n config.write(configfile)\n except configparser.NoSectionError:\n logger.error(\"Settings could not be saved\")\n return \"false\"\n except Exception as e:\n logger.error(\"Save settings: \" + str(e))\n raise", "title": "" }, { "docid": "3f23d51135e008df8bcbe1c48851137b", "score": "0.57026994", "text": "def config(self):\n pass", "title": "" }, { "docid": "3f23d51135e008df8bcbe1c48851137b", "score": "0.57026994", "text": "def config(self):\n pass", "title": "" }, { "docid": "09f897b020de0e55fd18858e5a8f5940", "score": "0.56962997", "text": "def config(self):\n config = configparser.ConfigParser()\n config.read(self.config_file)\n return config", "title": "" }, { "docid": "8cf2b29da97bf698c0bcc8b52f17df90", "score": "0.56959957", "text": "def global_config(self) -> Config:\n return Config(Path.home() / \".pdm\" / \"config.toml\", is_global=True)", "title": "" }, { "docid": "85da39ffffab913acf7edbdbb1ddc248", "score": "0.5691812", "text": "def config(self, filepath):\n pass", "title": "" }, { "docid": "f7af464e9beb81ea8b6b17fe4972ca6e", "score": "0.56849945", "text": "def auth_settings(self):\n return {\n\n }", "title": "" }, { "docid": "7f38130cc8096c3646ec66301c80f8ce", "score": "0.5672263", "text": "def conf(self):\n return None", "title": "" }, { "docid": "7e4a784fcc0cf2c1cdbd3eb8c6494843", "score": "0.5664972", "text": "def settings_system(self):\n response = self._get(url.settings_system)\n self._check_response(response, 200)\n return self._create_response(response)", "title": "" }, { "docid": "4ac33e78430488b631e2bb43fc17e254", "score": "0.56569237", "text": "def settings() -> list:\n with open(os.path.expanduser(\"~/.config/note-me/settings.json\"), \"r\") as f:\n content = f.read()\n content = json.loads(content)\n editor = content[\"editor\"]\n git = content[\"git\"]\n base = content[\"base\"]\n return [editor, git, base]", "title": "" }, { "docid": "5c29535f45062a0bd2d439213d0cdeb7", "score": "0.56515175", "text": "def getSettings(self):\n return ['Sessions']", "title": "" }, { "docid": "b6e4edfb7ee54f1474e945673f54383c", "score": "0.56425524", "text": "def config(self) -> FilebaseTemplateServiceConfig:\n return self._config", "title": "" }, { "docid": "4836d60ae76716d13c4f5fa5e9c625b0", "score": "0.56234556", "text": "def saveConfigurationFile(self):\n pass", "title": "" }, { "docid": "a38d790f814c336c1642108b23215ebd", "score": "0.56112605", "text": "def Configuration(self) -> str:", "title": "" }, { "docid": "32eb804b7d44adebd17b63cfeaea4c95", "score": "0.5609957", "text": "def get_config(self):", "title": "" }, { "docid": "c9a7f0be2f48843bb798c3953358c84c", "score": "0.55917823", "text": "def load_configuration(self, filename=None):\n\n # If a new filename has been passed as argument, then store it\n if filename is not None:\n self.configuration_file = filename\n\n if self.configuration_file[0] == \"~\" and self.configuration_file[1] == \"/\":\n self.configuration_file = os.path.expanduser(self.configuration_file)\n\n try:\n # Check it the configuration file exist\n if os.path.isfile(self.configuration_file):\n # Yes then, load it\n with open(self.configuration_file, 'r') as working_file:\n self.configuration = yaml.load(working_file)\n\n # Now we may have to expand a few paths...\n # First check if the configurationis really defined\n if self.configuration is not None and Key.CONFIGURATION.value in self.configuration:\n # Yes then we now have to check one by one th different path to expand\n # First let's process working_dir\n if Key.WORKING_DIR.value in self.configuration[Key.CONFIGURATION.value]:\n # Check if path starts with ~ and need expension\n if self.configuration[Key.CONFIGURATION.value][Key.WORKING_DIR.value][0] == \"~\" \\\n and self.configuration[Key.CONFIGURATION.value][Key.WORKING_DIR.value][1] == \"/\":\n self.configuration[Key.CONFIGURATION.value][Key.WORKING_DIR.value] = \\\n os.path.expanduser(self.configuration[Key.CONFIGURATION.value]\\\n [Key.WORKING_DIR.value])\n # Then let's do dft_base\n if Key.DFT_BASE.value in self.configuration[Key.CONFIGURATION.value]:\n # Check if path starts with ~ and need expension\n if self.configuration[Key.CONFIGURATION.value][Key.DFT_BASE.value][0] == \"~\" \\\n and self.configuration[Key.CONFIGURATION.value][Key.DFT_BASE.value][1] == \"/\":\n self.configuration[Key.CONFIGURATION.value][Key.DFT_BASE.value] = \\\n os.path.expanduser(self.configuration[Key.CONFIGURATION.value]\\\n [Key.DFT_BASE.value])\n # And finally the list of additionnal roles\n if Key.ADDITIONAL_ROLES.value in self.configuration[Key.CONFIGURATION.value]:\n # Check if path starts with ~ and need expension\n for i in range(0, len(self.configuration[Key.CONFIGURATION.value]\\\n [Key.ADDITIONAL_ROLES.value])):\n if self.configuration[Key.CONFIGURATION.value]\\\n [Key.ADDITIONAL_ROLES.value][i][0] == \"~\" and \\\n self.configuration[Key.CONFIGURATION.value]\\\n [Key.ADDITIONAL_ROLES.value][i][1] == \"/\":\n self.configuration[Key.CONFIGURATION.value][Key.ADDITIONAL_ROLES.value][i] = \\\n os.path.expanduser(self.configuration[Key.CONFIGURATION.value]\\\n [Key.ADDITIONAL_ROLES.value][i])\n\n except OSError as exception:\n # Call clean up to umount /proc and /dev\n self.logging.critical(\"Error: \" + exception.filename + \"- \" + exception.strerror)\n exit(1)", "title": "" }, { "docid": "48085c88e28ae0cc8a5da1e8640c3f4b", "score": "0.5586443", "text": "def get_system_configuration(request):\n config = cache.get('system-config')\n if not config:\n # Lookup or create the configuration\n system_config = Configuration.objects.first()\n if system_config is None:\n system_config = Configuration(\n shop_name=settings.OSCAR_SHOP_NAME,\n shop_tagline=settings.OSCAR_SHOP_TAGLINE,\n homepage_url=settings.OSCAR_HOMEPAGE,\n # Fallback to old settings name for backwards compatibility\n use_less=(\n getattr(settings, 'OSCAR_USE_LESS', None) or\n getattr(settings, 'USE_LESS', False)\n ),\n google_analytics_id=(\n getattr(settings, 'OSCAR_GOOGLE_ANALYTICS_ID', None) or\n getattr(settings, 'GOOGLE_ANALYTICS_ID', None)\n ) \n )\n system_config.save()\n config = system_config.as_context()\n cache.set('system-config', config, 360)\n return config", "title": "" }, { "docid": "36702534beb602a235cd135b75d569d3", "score": "0.5583648", "text": "def get_settings():\n return config.Settings()", "title": "" }, { "docid": "54aeac9c1f4cf846d56b4eac41e45c63", "score": "0.5583625", "text": "def get_config(self):\n pass", "title": "" }, { "docid": "921f87d247dfce3869535b874a4f8234", "score": "0.558297", "text": "def conf(self):\n\n return self._conf", "title": "" }, { "docid": "387989f5a6c9163e7953b8a82e8b0912", "score": "0.55769855", "text": "def configuration_manager(self):\n return None", "title": "" }, { "docid": "1c436cabcaadf3ab668a2691ff78d53a", "score": "0.55747277", "text": "def _token_file(self):\n return os.path.join(os.path.expanduser(cf.options.rootdir), \"JWT.json\")", "title": "" }, { "docid": "ecb1f9ec8bf452ac1925a212cfcde94d", "score": "0.55741596", "text": "def read_config_data(self):\n return self.config", "title": "" }, { "docid": "d72354f7d1020afc5a5048930f7ce04a", "score": "0.5572907", "text": "def settings():", "title": "" }, { "docid": "ed24dfcc9742feeebb73991dc174acfb", "score": "0.5570348", "text": "def get_config_local(self):\r\n\r\n return self.__config_local", "title": "" }, { "docid": "ed24dfcc9742feeebb73991dc174acfb", "score": "0.5570348", "text": "def get_config_local(self):\r\n\r\n return self.__config_local", "title": "" }, { "docid": "8a20591f49890c4ed30cd15e1c23a68d", "score": "0.5568209", "text": "def get_configfile(self):\r\n return self._configfile", "title": "" }, { "docid": "acccd54f9b010acb31de0d53c926c547", "score": "0.556647", "text": "def get_configuration(self):\n FLAGS = tf.flags.FLAGS\n with open(os.path.join(CONFIG_DIR, config_json)) as data_file:\n configuration = json.load(data_file)\n tf.flags.DEFINE_integer(\"hidden_size\", int(configuration[\"hidden_size\"]), \"Size of LSTM hidden layer.\")\n tf.flags.DEFINE_integer(\"memory_size\", int(configuration[\"memory_size\"]), \"The number of memory slots.\")\n tf.flags.DEFINE_integer(\"word_size\", int(configuration[\"word_size\"]), \"The width of each memory slot.\")\n tf.flags.DEFINE_integer(\"num_write_heads\", int(configuration[\"num_write_heads\"]), \"Number of memory write heads.\")\n tf.flags.DEFINE_integer(\"num_read_heads\", int(configuration[\"num_read_heads\"]), \"Number of memory read heads.\")\n tf.flags.DEFINE_integer(\"clip_value\", int(configuration[\"clip_value\"]),\n \"Maximum absolute value of controller and dnc outputs.\")\n tf.flags.DEFINE_float(\"max_grad_norm\", float(configuration[\"max_grad_norm\"]), \"Gradient clipping norm limit.\")\n tf.flags.DEFINE_float(\"learning_rate\", float(configuration[\"learning_rate\"]), \"Optimizer learning rate.\")\n tf.flags.DEFINE_float(\"final_learning_rate\", float(configuration[\"final_learning_rate\"]), \"Optimizer final learning rate.\")\n tf.flags.DEFINE_float(\"optimizer_epsilon\", float(configuration[\"optimizer_epsilon\"]),\n \"Epsilon used for RMSProp optimizer.\")\n tf.flags.DEFINE_string(\"dataset_neg\",configuration[\"dataset_neg\"],\"Dataset of negative tweets.\")\n tf.flags.DEFINE_string(\"dataset_pos\",configuration[\"dataset_pos\"],\"Dataset of positive tweets.\")\n tf.flags.DEFINE_string(\"dataset_test\",configuration[\"dataset_test\"],\"Dataset of testing tweets.\")\n tf.flags.DEFINE_string(\"glove_dir\", configuration[\"glove_dir\"], \"The location of GloVe pretrained model.\")\n tf.flags.DEFINE_boolean(\"random\", configuration[\"random\"], \"True if you want to randomized the rewiew to choose\")\n tf.flags.DEFINE_boolean(\"seed\", int(configuration[\"seed\"]), \"The seed that you want to set\")\n tf.flags.DEFINE_float(\"ratio\", float(configuration[\"ratio\"]), \"The ratio of training/testing splitting\")\n tf.flags.DEFINE_integer(\"batch_size\", int(configuration[\"batch_size\"]), \"Batch size for training.\")\n tf.flags.DEFINE_integer(\"max_lenght\", int(configuration[\"max_lenght\"]), \"Max number of word of the review.\")\n tf.flags.DEFINE_integer(\"word_dimension\", int(configuration[\"word_dimension\"]), \"The number of dimension of W2V\")\n tf.flags.DEFINE_integer(\"num_classes\", int(configuration[\"num_classes\"]),\n \"Number of classes\")\n tf.flags.DEFINE_integer(\"num_testing_iterations\",int( configuration[\"num_testing_iterations\"]),\n \"Number of iterations to train for.\")\n tf.flags.DEFINE_integer(\"num_epochs\", int(configuration[\"num_epochs\"]),\n \"Number of epoch.\")\n tf.flags.DEFINE_integer(\"report_interval\", int(configuration[\"report_interval\"]),\n \"Iterations between reports (samples, valid loss).\")\n tf.flags.DEFINE_string(\"checkpoint_dir\", configuration[\"checkpoint_dir\"],\n \"Checkpointing directory.\")\n tf.flags.DEFINE_integer(\"checkpoint_interval\", int(configuration[\"checkpoint_interval\"]),\n \"Checkpointing step interval.\")\n tf.flags.DEFINE_integer(\"total_samples\", int(configuration[\"total_samples\"]),\n \"Number of samples.\")\n return FLAGS", "title": "" }, { "docid": "f73a322c8a157fcdb6e123c3a458fb66", "score": "0.55659276", "text": "def settings(self):\n return get_settings(self)", "title": "" }, { "docid": "0c1ceca24bd666cca4fb4d75a1c9c791", "score": "0.5563544", "text": "def get_settings(self):\n store = dict()\n for x in dir(config):\n if x.isupper():\n store[x] = getattr(config, x)\n return store", "title": "" }, { "docid": "74dd3e3d92f1edd0b807de17063112dc", "score": "0.55542207", "text": "def get_token_config():\n cc = ConfigParser('/etc/nova/api-paste.ini')\n username = cc.get('filter:authtoken', 'admin_user')\n password = cc.get('filter:authtoken', 'admin_password')\n tenant_name = cc.get('filter:authtoken', 'admin_tenant_name')\n auth_uri = cc.get('filter:authtoken', 'auth_uri', 'http://0.0.0.0:5000')\n\n if -1 == auth_uri.rfind('/v2.0'):\n auth_uri = '%s/v2.0' % auth_uri\n\n ret = (username, password, tenant_name, auth_uri)\n assert all(ret)\n return ret", "title": "" }, { "docid": "bc4bb9bc37e2dc09199dfd9a2ab3535d", "score": "0.55524737", "text": "def _global_conf(self) -> dict:\n return {\n \"algorithm\": {\n \"name\": \"BruteForce\",\n \"parameters\": {\n \"transfer_strategy\": \"Recursive\",\n }\n },\n \"output_dir\": \"../output\",\n \"write_JSON\": {\n \"compressed\": False,\n \"suffix\": \"json\",\n \"communications\": True,\n \"offline_LB_compatible\": False\n }\n }", "title": "" }, { "docid": "dbea6086afbb59abda42a7c2c9798f34", "score": "0.5548611", "text": "def get_configuration():\n configuration = {\n \"get_object_function\": None,\n \"hcard_path\": \"/hcard/users/\",\n \"nodeinfo2_function\": None,\n \"process_payload_function\": None,\n \"search_path\": None,\n \"tags_path\": None,\n # TODO remove or default to True once AP support is more ready\n \"activitypub\": False,\n }\n try:\n configuration.update(settings.FEDERATION)\n except ImproperlyConfigured:\n # Django is not properly configured, return defaults\n return configuration\n if not all([\n \"get_private_key_function\" in configuration,\n \"get_profile_function\" in configuration,\n \"base_url\" in configuration,\n ]):\n raise ImproperlyConfigured(\"Missing required FEDERATION settings, please check documentation.\")\n return configuration", "title": "" }, { "docid": "322bacae8bfa0f0a47a4b7011bf90506", "score": "0.5544414", "text": "def make_token_store(fpath=None):\n if fpath is None:\n fpath = DEFAULT_TOKEN_FILE\n return auth_file.Storage(fpath)", "title": "" }, { "docid": "226988b7bf9c0e1a6892afaac06a52a9", "score": "0.5538398", "text": "def config(self):\n return self.__config", "title": "" }, { "docid": "87589a988184d414bff7d457f0fb5d90", "score": "0.5528916", "text": "def config(self):\r\n\r\n config_final = dict(self.config_project)\r\n config_final.update(self.config_local)\r\n return config_final", "title": "" }, { "docid": "87589a988184d414bff7d457f0fb5d90", "score": "0.5528916", "text": "def config(self):\r\n\r\n config_final = dict(self.config_project)\r\n config_final.update(self.config_local)\r\n return config_final", "title": "" }, { "docid": "fa450eb9febb1cb5c47d0f8decc9f6d0", "score": "0.5528309", "text": "def settings(self) -> Optional[Any]:\n return pulumi.get(self, \"settings\")", "title": "" }, { "docid": "3fdb9adc3b30573816afd43d8b56a768", "score": "0.5524478", "text": "def get_settings_var():\n \n settings_var = {}\n # Main\n settings_var['DEBUG'] = DEBUG\n settings_var['MEDIA_ROOT'] = MEDIA_ROOT\n settings_var['MEDIA_URL'] = MEDIA_URL\n settings_var['DIRECTORY'] = DIRECTORY\n # FileBrowser\n settings_var['URL_FILEBROWSER_MEDIA'] = URL_FILEBROWSER_MEDIA\n settings_var['PATH_FILEBROWSER_MEDIA'] = PATH_FILEBROWSER_MEDIA\n # TinyMCE\n settings_var['URL_TINYMCE'] = URL_TINYMCE\n settings_var['PATH_TINYMCE'] = PATH_TINYMCE\n # Extensions/Formats (for FileBrowseField)\n settings_var['EXTENSIONS'] = EXTENSIONS\n settings_var['SELECT_FORMATS'] = SELECT_FORMATS\n # Versions\n settings_var['VERSIONS_BASEDIR'] = VERSIONS_BASEDIR\n settings_var['VERSIONS'] = VERSIONS\n settings_var['ADMIN_VERSIONS'] = ADMIN_VERSIONS\n settings_var['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL\n settings_var['PREVIEW_VERSION'] = PREVIEW_VERSION\n # FileBrowser Options\n settings_var['MAX_UPLOAD_SIZE'] = MAX_UPLOAD_SIZE\n # Convert Filenames\n settings_var['CONVERT_FILENAME'] = CONVERT_FILENAME\n return settings_var", "title": "" }, { "docid": "e4b3e7785b0b19a1bd5b4e40af653fa4", "score": "0.55226815", "text": "def oidc_token_file_path(self) -> Optional[str]:\n return __config__.get('oidcTokenFilePath')", "title": "" }, { "docid": "c153f6ba142481e018ff29a107077cca", "score": "0.5522391", "text": "def configs(self):\n return []", "title": "" }, { "docid": "49c61617c185713663bb496fa37cdeb1", "score": "0.55034286", "text": "def meltano_yml_config(self):\n return self.config_service.current_config", "title": "" }, { "docid": "a8caf9c97a44df059e636a991717df7f", "score": "0.55019546", "text": "def show_configuration():\n cfg = {}\n path = Path(app.instance_path)\n files = list(filter(lambda x: x.is_file(), path.iterdir()))\n config = ConfigParser(allow_no_value=True)\n for file in files:\n nom = str(file)\n print(\"File name: %s\" % nom)\n try:\n with open(nom) as fd:\n text = fd.read()\n except UnicodeDecodeError as err:\n error(\"%s - not a text file: %s\" % (nom, err))\n continue\n try:\n config.read_string(text, source=nom)\n except Exception as err:\n error(\"%s - not a configuration file: %s\" % (nom, err))\n continue\n cfg[file.name] = text\n return cfg", "title": "" }, { "docid": "df1c16649300515968b78d204b9f64ac", "score": "0.5501604", "text": "def get_settings_provider_path(self):\n return os.path.join(self.workspace, \"settings.py\")", "title": "" }, { "docid": "465cc021d292ecef07a39c9b2869a620", "score": "0.5501405", "text": "def get_config_files(self):\n return [\"/tools/jetty/etc/jetty.xml\"]", "title": "" }, { "docid": "366285461062e8c41d0894a4897919a3", "score": "0.54968864", "text": "def _get_file_auth_config():\n\n config = {\n \"filetype\": \"text\",\n \"hashtype\": \"plaintext\",\n \"field_separator\": \":\",\n \"username_field\": 1,\n \"password_field\": 2,\n }\n\n for opt in __opts__[\"external_auth\"][__virtualname__]:\n if opt.startswith(\"^\"):\n config[opt[1:]] = __opts__[\"external_auth\"][__virtualname__][opt]\n\n if \"filename\" not in config:\n log.error(\n \"salt.auth.file: An authentication file must be specified \"\n \"via external_auth:file:^filename\"\n )\n return False\n\n if not os.path.exists(config[\"filename\"]):\n log.error(\n \"salt.auth.file: The configured external_auth:file:^filename (%s)\"\n \"does not exist on the filesystem\",\n config[\"filename\"],\n )\n return False\n\n config[\"username_field\"] = int(config[\"username_field\"])\n config[\"password_field\"] = int(config[\"password_field\"])\n\n return config", "title": "" }, { "docid": "91e2707b3bf9f4fdf83f74db01681834", "score": "0.54963565", "text": "def base_config(self):\n # NOTE: use ordered keys\n permissions = OrderedDict()\n\n session = self.config_models.session()\n\n permissions['users'] = self.users(session)\n permissions['groups'] = self.groups(session)\n permissions['roles'] = self.roles(session)\n\n session.close()\n\n return permissions", "title": "" }, { "docid": "b7498cfc169515fc340992c567b50f6d", "score": "0.5493636", "text": "def get_configuration():\r\n class Conf:\r\n def __init__(self):\r\n self.directories = [\"../\", \"../calculations\"]\r\n self.outputdir = [\"../../testoutput\"]\r\n\r\n def is_directory_configured(self, directory):\r\n return directory in self.directories\r\n\r\n return Conf()", "title": "" }, { "docid": "2d818a43eb2b8de3c8c780ac9d85f58f", "score": "0.549294", "text": "def system_configuration():\n config = configuration.Configuration(find_system_configuration())\n config.defaults = _read_only_copy_default_configuration()\n\n return config", "title": "" }, { "docid": "938984e23f3f51a61761506c46525b3c", "score": "0.5491607", "text": "def Settings(self):\n global_defaults = self.GlobalDefaults\n local_settings = self.local_config\n\n for n in local_settings:\n if local_settings[n] is None:\n try:\n local_settings[n] = global_defaults[n]\n except Exception:\n \"\"\"keys for config ps.plone.jssor\"\"\"\n\n return local_settings", "title": "" }, { "docid": "356be6cd40fd097922954b064e7999fe", "score": "0.54834574", "text": "def _get_credential_storage(self):\n\n storage = self._credential_storage\n if storage is not None:\n return storage\n\n debug(\"Loading storage\")\n\n storagefile = self._get_config_file('credentials')\n\n if not os.path.exists(storagefile):\n open(storagefile, 'a+b').close()\n\n from oauth2client.file import Storage\n storage = Storage(storagefile)\n self._credential_storage = storage\n\n return storage", "title": "" }, { "docid": "cf4694078ce10dcbde1d527ccb4aa36b", "score": "0.5478986", "text": "def config(self):\n return self._config", "title": "" }, { "docid": "cf4694078ce10dcbde1d527ccb4aa36b", "score": "0.5478986", "text": "def config(self):\n return self._config", "title": "" }, { "docid": "cf4694078ce10dcbde1d527ccb4aa36b", "score": "0.5478986", "text": "def config(self):\n return self._config", "title": "" }, { "docid": "cf4694078ce10dcbde1d527ccb4aa36b", "score": "0.5478986", "text": "def config(self):\n return self._config", "title": "" }, { "docid": "cf4694078ce10dcbde1d527ccb4aa36b", "score": "0.5478986", "text": "def config(self):\n return self._config", "title": "" }, { "docid": "cf4694078ce10dcbde1d527ccb4aa36b", "score": "0.5478986", "text": "def config(self):\n return self._config", "title": "" }, { "docid": "eaed6909c00a1e71bd7dc43837db0b4b", "score": "0.54742295", "text": "def read_conf(self):\n with open(self.cf, 'rU') as get:\n os = ''\n for i in get:\n # remove comment lines\n if i[0] == '#':\n continue\n else:\n os += i.replace(' ', '')\n\n return os", "title": "" }, { "docid": "487957fca96e89ac93491e40c337158d", "score": "0.5470553", "text": "def config():\n cfg = current_app.config.get_namespace('ST_')\n for k, v in cfg.items():\n print(f'{k} = {v}')", "title": "" }, { "docid": "6e815bd2e184d9a2c34eb75c4d8d43fb", "score": "0.5467955", "text": "def _read_engine_settings(self):\n cp = configparser.ConfigParser()\n cp.read(self.metadata.default_local_ini)\n self._cp_engine_settings = cp\n return cp", "title": "" }, { "docid": "b03d21351adf07d6e6a38825cd95fa73", "score": "0.5464292", "text": "def setup_token_store(self):\n # see if the <pin> and <so_pin> exist?\n pin, so_pin = read_pins_from_store()\n if pin is not None:\n # return as the token store is already set up\n return\n # see if the token directory exists - if so, delete it.\n if os.path.exists(TOKEN_STORE):\n if os.path.isdir(TOKEN_STORE):\n shutil.rmtree(TOKEN_STORE)\n else:\n os.remove(TOKEN_STORE)\n os.makedirs(TOKEN_STORE)\n # We need the token store to be 1777 so that whoever creates a token\n # can also gain access to it - the token will be created by the\n # barbican user.\n os.chmod(TOKEN_STORE, 0o1777)\n # now create the token store\n pin = ch_core_host.pwgen(PIN_LENGTH)\n so_pin = ch_core_host.pwgen(PIN_LENGTH)\n write_pins_to_store(pin, so_pin)\n cmd = [\n 'sudo', '-u', 'barbican',\n SOFTHSM2_UTIL_CMD,\n '--init-token', '--free',\n '--label', BARBICAN_TOKEN_LABEL,\n '--pin', pin,\n '--so-pin', so_pin]\n subprocess.check_call(cmd)\n hookenv.log(\"Initialised token store.\")", "title": "" }, { "docid": "98f93b367cb7b7295b3486a74f9b1cff", "score": "0.54641646", "text": "def _get_lsp_config_persistenct(self):\n return self.__lsp_config_persistenct", "title": "" }, { "docid": "b54dfc751d5d94ef32a4205a97bb30ec", "score": "0.5462244", "text": "def configuration(self):\n\n if self._config is None:\n self._config = self._get_item('config')\n\n return _config_list_to_dict(self._config)", "title": "" } ]
990f4eac6cf19f38c7d60b80c2a4c885
Get Oxalis Minecraft server status
[ { "docid": "510f636dfab6d2497112d3425b4c00af", "score": "0.6496605", "text": "async def status(ctx: commands.Context):\n await ctx.send(str(subprocess.run(\"papermc status | grep Status\", stdout=subprocess.PIPE, shell=True).stdout, \"utf-8\"))", "title": "" } ]
[ { "docid": "4a82e7890222cbf56e7e2e23bcf81c3c", "score": "0.8241819", "text": "def server_status(self):\n\t\treq_id = self._send(b'SHOW_STATUS')\n\t\treturn jsonapi.loads(self._recv(req_id).content[1])", "title": "" }, { "docid": "7857be212d49cd8e7daa0f2971e9f170", "score": "0.78942055", "text": "def api_short_server_status():\n status = api.v2.api_world_status(minecraft.World())\n return {\n 'list': status['list'],\n 'on': status['running'],\n 'version': status['version']\n }", "title": "" }, { "docid": "fb0ef80fcf81787066fbc4b980934970", "score": "0.7837038", "text": "def serverStatus(self) -> TGServerStatus:", "title": "" }, { "docid": "1c77eaad5c1e013ee7839883b0ad1197", "score": "0.77162886", "text": "async def serverstatus(self, ctx):\n await ctx.send(self.pugInfo.gameServer.format_game_server_status)", "title": "" }, { "docid": "ae21ac7cd9493e6172ea3c002de37fe5", "score": "0.75282776", "text": "def get_server_status(self) -> str:\n response: Dict[str, Any] = get(\n url=self.status_url,\n headers=self.headers,\n )\n\n return response[\"content\"]", "title": "" }, { "docid": "2a12f840d2e3d8be4628173b96a69978", "score": "0.74453336", "text": "def server_status(self):\n req_id = self._send(b'SHOW_CONFIG', b'SHOW_CONFIG')\n return jsonapi.loads(self._recv(req_id).content[1])", "title": "" }, { "docid": "ae6089d0223a0907b0265e7d5f901399", "score": "0.738453", "text": "def get(self):\n ss = ServerStatus()\n return marshal(ss, SystemStatus.statusobj), 200", "title": "" }, { "docid": "586dba8392feec6413c93457e6267eff", "score": "0.7347337", "text": "def getStatus(self, returnOldState = False):\r\n assert type(self.objectId) == types.IntType, \\\r\n \"Granite server's object id is not known!\"\r\n \r\n debug.out('Checking Granite server status...')\r\n statusRequest = GRNStatusReq(recv_obj = self.objectId)\r\n\r\n try:\r\n response = GRNStatusResp(isi_message = \\\r\n self.transferMsg(statusRequest))\r\n state = inc.lookup('GRN_SERVER_STATE_',\r\n response.state, \\\r\n default = ['GRN_SERVER_STATE_unknown']) \\\r\n [0].split('STATE_')[1].lower()\r\n\r\n # FIXME: Remove this when GRN_SERVER_STATE_INFO and GRN_SERVER_STATE_REPORTING no longer\r\n # collide in granite_p_isi.h\r\n if state == 'info':\r\n state = 'reporting'\r\n \r\n debug.out('Status: %s' % state)\r\n\r\n self.server_version = {\"zzz\":response.version_zzz,\r\n \"yyy\":response.version_yyy,\r\n \"xxx\":response.version_xxx}\r\n\r\n # start display update ntf listener if server is new enough\r\n if self.getName() == \"Main\" and self.server_version['yyy'] >= 5:\r\n self.startNtfListener()\r\n\r\n if getattr(response, 'err_cause', None) != None:\r\n # prompt server reported error if necessary\r\n if response.err_cause != inc.GRN_CAUSE_NONE:\r\n debug.err('Granite server reported an error!')\r\n debug.err('Transaction ID of the last received test or ' \\\r\n 'result request: %s' % response.last_tid)\r\n debug.err('Cause of error: %s' % response.err_cause)\r\n debug.err('State of the server when error occurred: %s' % \\\r\n response.err_state)\r\n debug.err('Transaction ID of the last received test or ' \\\r\n 'result request when the error occurred: %s' % \\\r\n response.err_tid)\r\n\r\n # return also old_state if wanted\r\n if returnOldState and getattr(response, 'old_state', None) != None:\r\n try:\r\n oldState = inc.lookup('GRN_SERVER_STATE_',\r\n response.old_state)[0].split('STATE_')[1].lower()\r\n \r\n # FIXME: Remove this when GRN_SERVER_STATE_INFO and GRN_SERVER_STATE_REPORTING no longer\r\n # collide in granite_p_isi.h \r\n if oldState == 'info':\r\n oldState = 'reporting' \r\n \r\n except IndexError:\r\n oldState = None\r\n\r\n if not oldState:\r\n oldState = 'unknown (%s)' % str(response.old_state)\r\n\r\n debug.vrb('Old state: %s' % oldState)\r\n return (state, oldState)\r\n else:\r\n return state\r\n except ISIMessageException, err:\r\n debug.out(\"Invalid message received: %s\" % str(err))\r\n except TestException, err:\r\n debug.out(\"Getting Granite server's status failed: %s\" % str(err))", "title": "" }, { "docid": "72af28aec6f7b5617cbffc394772cbff", "score": "0.71871054", "text": "def status(server: utils.HikVisionServer):\n return utils.getXML(server, \"Streaming/status\")", "title": "" }, { "docid": "be8c6e366a7a1d580125fe72699a3fd2", "score": "0.7150405", "text": "def status(self):\n\n res = requests.get(Song.STATUS.format(self.room.ip_address))\n return res.text", "title": "" }, { "docid": "be8c6e366a7a1d580125fe72699a3fd2", "score": "0.7150405", "text": "def status(self):\n\n res = requests.get(Song.STATUS.format(self.room.ip_address))\n return res.text", "title": "" }, { "docid": "c31a447ed54fe81d392891a8e76d01f7", "score": "0.71436995", "text": "def get_status():", "title": "" }, { "docid": "c31a447ed54fe81d392891a8e76d01f7", "score": "0.71436995", "text": "def get_status():", "title": "" }, { "docid": "c31a447ed54fe81d392891a8e76d01f7", "score": "0.71436995", "text": "def get_status():", "title": "" }, { "docid": "c31a447ed54fe81d392891a8e76d01f7", "score": "0.71436995", "text": "def get_status():", "title": "" }, { "docid": "f785e4cfb5dde947bdf98c2c806d14fd", "score": "0.71140915", "text": "def getstatus(self): ##{{{\n\t\tstatus, data = self.connection.command(\"getstatus\")\n\t\tif status == \"statusResponse\":\n\t\t\tself.parse_getstatus(data) ##}}}", "title": "" }, { "docid": "244ea03083d637245bd6c37953be2124", "score": "0.7058122", "text": "def getServerStatus(IP):\n global gcontext\n\n call = (\"http://\" + IP + \"/\")\n logger.info('URL request is {}'.format(call))\n # Send command to fw and see if it times out or we get a response\n count = 0\n max_count = 15\n while True:\n if count < max_count:\n try:\n count = count + 1\n r = send_request(call)\n except DeployRequestException as e:\n logger.debug(\"Got Invalid response\".format(e))\n else:\n logger.info('Jenkins Server responded with HTTP 200 code')\n return 'server_up'\n else:\n break\n return 'server_down'", "title": "" }, { "docid": "5c69baa6bb448a9593086aef3af65413", "score": "0.70222616", "text": "def get_server_status(datasource='tranquility'):\n\n response = esi.get_status(datasource)\n server_name = datasource.capitalize()\n\n if response == \"offline\" or response == \"indeterminate\":\n\n attachment = discord.Embed(\n title=\"{}: {}\".format(server_name, response.capitalize()),\n color=COLOR.RED,\n ).add_field(\n name=\"Server time\",\n value=datetime.strftime(datetime.utcnow(), \"%Y-%m-%d %H:%M:%S\")\n )\n\n else:\n vip = response.get(\"vip\")\n started = datetime.strptime(response[\"start_time\"], \"%Y-%m-%dT%H:%M:%SZ\")\n\n attachment = discord.Embed(\n title=\"{}: Online\".format(server_name),\n color=COLOR.ORANGE if vip else COLOR.GREEN,\n ).add_field(\n name=\"Server time\",\n value=datetime.strftime(datetime.utcnow(), \"%Y-%m-%d %H:%M:%S\"),\n inline=False\n ).add_field(\n name=\"Players online\",\n value=\"{:,}\".format(response[\"players\"]),\n inline=False\n ).add_field(\n name=\"Started at\",\n value=datetime.strftime(started, \"%Y-%m-%d %H:%M:%S\"),\n inline=False\n ).add_field(\n name=\"Running for\",\n value=_running_for(started),\n inline=False\n )\n\n if vip:\n attachment.title = \"{}: In VIP mode\".format(server_name)\n\n return attachment", "title": "" }, { "docid": "6e42c77315b91144d30ee0dc5223d7e4", "score": "0.7013646", "text": "def status(self):\n url = self.node_url + 'system_stats'\n try:\n r = requests.get(url, timeout=0.1)\n json_content = r.json.im_self.content\n data = True # OKAY\n except:\n data = False # ERROR Condition\n return data\n\n return data", "title": "" }, { "docid": "3085fc40d29d50afcbc1108ea141bde7", "score": "0.69724876", "text": "def getStatus(self):\n\t\tself.querier.setMsgHandler(StatusMsgHandler(\"status\"))\n\t\treturn self.querier.queryext(0x2e, 0x0, [0,0,0])", "title": "" }, { "docid": "8842a8987132bdb0c8f95d985b514e89", "score": "0.6948358", "text": "def status(self):\n steam_appid = self.gsconfig['steamcmd']['appid']\n s = Screen(steam_appid)\n is_server_running = s.exists\n return is_server_running", "title": "" }, { "docid": "287b39513bc64064e13b919ca2ad1c3f", "score": "0.6908334", "text": "def get_status():\n return _send_request('/status')", "title": "" }, { "docid": "8f5bf024a9ae791e02bff934a04efa3f", "score": "0.69042337", "text": "def get_status(self):\n\t\tself.spi.xfer(self._commands[\"GET_STATUS\"])\n\t\tvalue=self.spi.xfer(0)*256+self.spi.xfer(0)\n\t\treturn self._parse_status(value)", "title": "" }, { "docid": "6c9af0b937bf36aa266cabd793df3ae1", "score": "0.6901487", "text": "def status(name):\n statuses = { -1: \"Error\",\n\t\t 0: \"Stopped\",\n\t\t 1: \"Running\",\n\t }\n #return \"status(\" + value + \"): \" + str(serverAPI.status(value))\n return statuses[serverAPI.status(name)]", "title": "" }, { "docid": "6f113351787344d5012b33865ee22812", "score": "0.6847689", "text": "def getServerState(opcServer):\n # type: (String) -> Optional[String]\n print(opcServer)\n return \"CONNECTED\"", "title": "" }, { "docid": "da34f690a8d3fea6ac130b71acec0d78", "score": "0.6841245", "text": "def status(self):\n self.board_alive()\n resource = urlopen(self._url + \"relays.cgi\")\n line = ''\n while True:\n line = resource.readline()\n if line.startswith(\"Status\"):\n break\n status = line.split()\n status.pop(0)\n status = [int(x) for x in status]\n return status", "title": "" }, { "docid": "bf7829a2318712def7b1ecef3d7fd5e0", "score": "0.68064785", "text": "def get_status():\n return NORDVPN.status()", "title": "" }, { "docid": "22c1597372779e598951a6461c2204c3", "score": "0.68028444", "text": "def status(self):\r\n return self.send_cmd('status','',StatusResponse)", "title": "" }, { "docid": "e2ccc81d957e5483abbcd4bc55f8c6ef", "score": "0.6783357", "text": "def get_remote_status(self):\r\n raise NotImplementedError", "title": "" }, { "docid": "08ab830225e293c158b47a57cbd36166", "score": "0.6762233", "text": "def server_status(self):\n try:\n mon_addr = current_app.config['monitor_addr']\n try:\n req = requests.get(mon_addr)\n data = req.json()\n except Exception:\n logger.warn(\"Couldn't connect to internal monitor at {}\".format(mon_addr),\n exc_info=True)\n output = {'stratum_clients': 0, 'agent_clients': 0}\n else:\n output = {'stratum_clients': data['stratum_clients'],\n 'agent_clients': data['agent_clients']}\n blob = Blob(key='server', data={k: str(v) for k, v in output.iteritems()})\n db.session.merge(blob)\n db.session.commit()\n except Exception:\n logger.error(\"Unhandled exception in server_status\", exc_info=True)\n db.session.rollback()", "title": "" }, { "docid": "e415acaa76ee921953f7a6f526409967", "score": "0.6762078", "text": "def status():\n return raw_status().text", "title": "" }, { "docid": "e62524bd433c3f6e22b9feb56737f1af", "score": "0.6753492", "text": "def status(self):\n self._status = self._send_command(\"l?\")\n return self._status", "title": "" }, { "docid": "4dc6d40d6f078cbcff0696a1ff4f1a44", "score": "0.67505366", "text": "def status(self):\n return self._get(\"/status\")", "title": "" }, { "docid": "9fd95f16b74c4190578a86755adc751b", "score": "0.6747007", "text": "def online(self):\n return ONLINE[self._status_request()['syncmodule']['status']]", "title": "" }, { "docid": "4196c369c73521ae898dc45463606e7d", "score": "0.67454183", "text": "def status(self):\n return self._mon.get_status()", "title": "" }, { "docid": "68fd916d83032aeec6447d3d5d14413a", "score": "0.67451566", "text": "def get_status(self):\n try:\n self.run_command('ping')\n except ServerNotRunningException:\n return ServerStatus.STOPPED\n\n return ServerStatus.RUNNING", "title": "" }, { "docid": "9f0836d7e735e300c57c90db7cc786a2", "score": "0.6724153", "text": "def get_server_status():\n LOG.info('status request received')\n response_data = {'server_status': 'OK'}\n status = 200 if response_data is not None else 403\n js = json.dumps(response_data, indent=2)\n return flask.Response(js, status=status, mimetype='application/json')", "title": "" }, { "docid": "785cb717d93c215d79a0589c5c4bd2bf", "score": "0.67200035", "text": "def get(self):\n\n return {\n 'is_online': True,\n 'message': 'The v2 API server is alive.'\n }, 200", "title": "" }, { "docid": "29ce4335e24e88582d2669fac65fb5d0", "score": "0.6713951", "text": "def server_status(self):\n try:\n self.receiver.setsockopt(zmq.RCVTIMEO, self.timeout)\n self._send(b'SHOW_CONFIG')\n return jsonapi.loads(self._recv().content[1])\n except zmq.error.Again as _e:\n t_e = TimeoutError(\n 'no response from the server (with \"timeout\"=%d ms), '\n 'is the server on-line? is network broken? are \"port\" and \"port_out\" correct?' % self.timeout)\n if _py2:\n raise t_e\n else:\n raise t_e from _e\n finally:\n self.receiver.setsockopt(zmq.RCVTIMEO, -1)", "title": "" }, { "docid": "559f459059b9818eabe8188a785fe459", "score": "0.6693978", "text": "def Status(self):\n running = self._service.IsRunning()\n if running:\n up = self._core.IsUp()\n print('UP' if up else 'STARTING')\n else:\n print('DOWN')", "title": "" }, { "docid": "fdd385418a93a1de34f57a4a4cdd3cd7", "score": "0.6687889", "text": "def get_status(self) -> str:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as connection:\n try:\n self.__send_string(connection, \"Status\")\n status: str = self.__receive_string(connection)\n info: str = self.__receive_string(connection)\n if status == \"MESSAGE\":\n return info\n if status == \"ERROR\":\n raise RuntimeError(info)\n raise RuntimeError(\"Unknown response format\")\n finally:\n connection.close()", "title": "" }, { "docid": "3bdfa6d1d2f7fc813166e2828981fa4d", "score": "0.66702306", "text": "def show_status():\r\n logging.info('START')\r\n\r\n # Getting Redis Server health based on the parameter captured\r\n redis_status = get_health_status(create_redis_connection(DEFAULT_DB),INFO,request.args.get('section',default=False,type=str))\r\n\r\n logging.info('END')\r\n\r\n return jsonify(redis_status),200", "title": "" }, { "docid": "c1c6ffc2e2f5f648e105b30677077b16", "score": "0.66513294", "text": "def status():\n with settings(hide('running', 'stdout', 'warnings'), warn_only=True):\n res = run_as_root('shorewall status')\n return re.search(r'\\nShorewall is (\\w+)', res).group(1)", "title": "" }, { "docid": "89c68268c3517f2acfa845cd6fb6d7dc", "score": "0.66134775", "text": "def view_monitoring_alive(request):\n return JsonResponse(get_json_success(\"server up\"))", "title": "" }, { "docid": "57f721dcc7cce27aed8c228b0766ba16", "score": "0.6591331", "text": "def dev_status(self):\n self.debug_stream(\"In dev_status()\")\n argout = \"\"\n #----- PROTECTED REGION ID(ManageServer.Status) ENABLED START -----#\n \n #----- PROTECTED REGION END -----#\t//\tManageServer.Status\n self.set_status(self.argout)\n self.__status = PyTango.Device_4Impl.dev_status(self)\n return self.__status", "title": "" }, { "docid": "6959e8fc6156c288e1111cbd5fb6e222", "score": "0.6579829", "text": "def getSystemStatus(self):\n path = \"/api/system/status\"\n res = self.request_get(path)\n return res.json()", "title": "" }, { "docid": "6ddb68e84760a9754f64d67cb7d7227f", "score": "0.6575213", "text": "def status(self):\n resp = self._get()\n rebooting = \"Please wait while Jenkins is getting ready to work\"\n if resp.status_code==200:\n return self.STATUS_READY\n elif rebooting in resp.content:\n return self.STATUS_REBOOT\n else:\n return self.STATUS_UNKNOWN", "title": "" }, { "docid": "f9b495d22056f45acc59f1e149922afe", "score": "0.65663695", "text": "def get_status(self):\n pass", "title": "" }, { "docid": "0fe36c641578e83510effbadc55cb4af", "score": "0.6564475", "text": "def status(live):\n commands.status(live)", "title": "" }, { "docid": "5a81c079834665141e867046ae4f5c03", "score": "0.6556734", "text": "def get_status(self):\n url = \"http://0.0.0.0:%s/status\" % self.port\n try:\n response = urllib.urlopen(url)\n if response.getcode() == 200:\n return json.loads(response.read().strip())\n else:\n msg = (\"Error while trying to get status backup system URL %s\"\n \" (Response code %s)\" % (url, response.getcode()))\n raise BackupSystemError(msg)\n\n except IOError, ioe:\n return {\n \"status\": BACKUP_SYSTEM_STATUS_STOPPED\n }", "title": "" }, { "docid": "757391319fa17a792357c4be2e191e50", "score": "0.65563226", "text": "async def status(ctx):\n\n s = self.taskMgr.getSystemStatus()\n\n if s:\n notices = s.get('notices')\n status = BotGlobals.GLOB_CODE_TO_STATUS.get(int(s.get('status')), 'Unknown')\n outages = s.get('outages')\n\n if notices:\n tmp = \"\"\n for i in notices.keys():\n notice = notices[i]\n msg = notice.get('text')\n flag = BotGlobals.SRV_CODE_TO_STATUS.get(int(notice.get('flag')))\n\n tmp += \"\\n**%s** | %s\\n**Message:** *%s*\\n\" % (\n flag, i, msg)\n elif s.get('status', 0) == 3:\n tmp = \"\\nThe Legend of Pirates Online is currently closed for an update. Check https://status.tlopo.com for more information!\\n\"\n else:\n tmp = \"\\nNo known notices.\"\n\n output = BotLocalizer.SYSTEM_STATUS_INFO % (status, tmp, outages)\n else:\n output = \"System status is unknown.\"\n\n await ctx.send(output)", "title": "" }, { "docid": "2cd9c0db74bb97aacc1b0b3face2bbc9", "score": "0.6539282", "text": "def status():\n res = sh.command(APD_QUERY.format('status'))\n return res.decode('utf-8')", "title": "" }, { "docid": "babb8e1a645f644518c32509d56b6545", "score": "0.6537413", "text": "def get_status(self):\n\n return self.async_request(\"EST\")", "title": "" }, { "docid": "4a0e592f84205d7cfbe1a1c78b9306a3", "score": "0.6520999", "text": "def getstatus(self):\n return self._request(\n url=self._build_url(\"getstatus\"), data=self.data, check_success=False\n )", "title": "" }, { "docid": "7c742dafefb8a848e142d7a63e4b5817", "score": "0.65054524", "text": "def online():\r\n return 'token server'", "title": "" }, { "docid": "e5537ab1babda57f274a481b80db5676", "score": "0.64957595", "text": "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "e5537ab1babda57f274a481b80db5676", "score": "0.64957595", "text": "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "e5537ab1babda57f274a481b80db5676", "score": "0.64957595", "text": "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "e5537ab1babda57f274a481b80db5676", "score": "0.64957595", "text": "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "e5537ab1babda57f274a481b80db5676", "score": "0.64957595", "text": "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "e5537ab1babda57f274a481b80db5676", "score": "0.64957595", "text": "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "e5537ab1babda57f274a481b80db5676", "score": "0.64957595", "text": "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "e75a1c6edaa42c7cf62e3020bfd01d86", "score": "0.64934844", "text": "def tv_status():\n return os.system(f\"ping -c 1 -t 1 {tv_ip} >/dev/null\") # pings TV IP and returns 0 if host is reachable", "title": "" }, { "docid": "16ff2cf701d7a48d2b6fb004ac2800d5", "score": "0.648641", "text": "def _check_status(self):\n self.system_status_lock.acquire()\n info = self.system_status_proxy._getvalue()\n self.system_status_lock.release()\n return info", "title": "" }, { "docid": "4a58aaa58f7ae006682b9fe116f72b98", "score": "0.6468967", "text": "def status(url=\"http://127.0.0.1/status\"):\r\n resp = urllib2.urlopen(url)\r\n status_data = resp.read()\r\n resp.close()\r\n\r\n lines = status_data.splitlines()\r\n if not len(lines) == 4:\r\n return\r\n # \"Active connections: 1 \"\r\n active_connections = lines[0].split()[2]\r\n # \"server accepts handled requests\"\r\n # \" 12 12 9 \"\r\n accepted, handled, requests = lines[2].split()\r\n # \"Reading: 0 Writing: 1 Waiting: 0 \"\r\n _, reading, _, writing, _, waiting = lines[3].split()\r\n return {\r\n 'active connections': int(active_connections),\r\n 'accepted': int(accepted),\r\n 'handled': int(handled),\r\n 'requests': int(requests),\r\n 'reading': int(reading),\r\n 'writing': int(writing),\r\n 'waiting': int(waiting),\r\n }", "title": "" }, { "docid": "7da381b6d4d020ae39840cbce74b8c81", "score": "0.6457647", "text": "def status():\n return ''", "title": "" }, { "docid": "4d61af542eb859f17e2f21d94f5a99df", "score": "0.64524096", "text": "def get_status():\n return {\"status\": \"Olymp is Up 💚\"}", "title": "" }, { "docid": "d58de18ff7be5d0f9b8978c632aa0ce2", "score": "0.6447514", "text": "def friendly_status():\n status = raw_status().json()[\"get_maintenance_status\"][\"status\"]\n ret = \"\"\n for machine in status.get(\"draining_machines\", []):\n ret += \"{} ({}): Draining\\n\".format(\n machine[\"id\"][\"hostname\"], machine[\"id\"][\"ip\"]\n )\n for machine in status.get(\"down_machines\", []):\n ret += \"{} ({}): Down\\n\".format(machine[\"hostname\"], machine[\"ip\"])\n return ret", "title": "" }, { "docid": "0ae500503c45ef465a56af34c4219245", "score": "0.6437676", "text": "def view_monitoring_alive(request):\n return JsonResponse(dict(status=\"ok\",\n message=\"TwoRavens python server up\"))", "title": "" }, { "docid": "bdb56e45d91b59900bef7227cbfd98db", "score": "0.6431565", "text": "def status(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "ecefbb5584acc36ce68134f4ea855ae4", "score": "0.6427176", "text": "def list_servers_status(self):\n if self.get_servers():\n x = PrettyTable()\n x.field_names = [\"Name\", \"UUID\", \"Current State\"]\n x.align[\"Name\"] = \"l\"\n x.align[\"Current State\"] = \"l\"\n for server in self.servers:\n if server.state == \"running\":\n x.add_row([server.name, server.uuid, prGreen(server.state)])\n elif server.state == \"stopped\":\n x.add_row([server.name, server.uuid, prRed(server.state)])\n else:\n x.add_row([server.name, server.uuid, server.state])\n print(x)\n return True", "title": "" }, { "docid": "ad1adff868a8b3cea650848714d8eabe", "score": "0.6421061", "text": "def get_server_hardware_status(status=None):\n if status:\n return {'status': 'OK'}\n else:\n return {'status': 'Warning', 'modified': '2014-08-07T11:00:11.467Z'}", "title": "" }, { "docid": "df75332221e5e5c30d1cb4a9d4f4a912", "score": "0.6414233", "text": "def get_status(self):\n return GetStatus(*self.ipcon.send_request(self, GPS.FUNCTION_GET_STATUS, (), '', 'B B B'))", "title": "" }, { "docid": "68cfda4babf09487b04eaded4ce16255", "score": "0.6411734", "text": "def getStatus(self):\n return self._getStatus(self._getPort())", "title": "" }, { "docid": "fbaab10963e6ff09e4f8a096ee78c37c", "score": "0.6409854", "text": "def status():\n cm = get_webcore().cloud\n coin = get_webcore().coin\n\n return jsonify({\n \"bandwidth\": {\n \"total\": {\n \"incoming\": cm.total_incoming(),\n \"outgoing\": cm.total_outgoing()\n },\n \"current\": {\n \"incoming\": cm.current_incoming(),\n \"outgoing\": cm.current_outgoing()\n },\n \"limits\": {\n \"incoming\": settings.TRANSFER_MAX_INCOMING,\n \"outgoing\": settings.TRANSFER_MAX_OUTGOING\n }\n },\n\n \"storage\": {\n \"capacity\": cm.capacity(),\n \"used\": cm.used_space(),\n \"max_file_size\": settings.STORAGE_FILE\n },\n\n \"sync\": {\n \"cloud_queue\": cm.upload_queue_info(),\n \"blockchain_queue\": cm.blockchain_queue_info()\n },\n\n \"metachains\": {\n \"coin\": 'florincoin',\n \"address\": coin.address(\"incoming\"),\n \"block\": cm.last_known_block()\n }\n })", "title": "" }, { "docid": "c7d2448e74a897b5ea06a60635ee1896", "score": "0.64032406", "text": "def get_status(self):\n return self._ti_api.get_status()", "title": "" }, { "docid": "d35ec8312c390ceced2c22f03c4dbbe4", "score": "0.6402787", "text": "def status(self):\n\t\treturn self._parse_status(self[\"status\"])", "title": "" }, { "docid": "81fea4af9959de9491741c1c5f8101fd", "score": "0.64025635", "text": "def connection_status(self):\n return rpc.status() # XXX: ``rpc`` not defined", "title": "" }, { "docid": "ca3d70d081dbfd08f7d9fbff57c13a13", "score": "0.640222", "text": "def online(self):\n try:\n return ONLINE[self.status]\n except KeyError:\n _LOGGER.error(\"Unknown sync module status %s\", self.status)\n self.available = False\n return False", "title": "" }, { "docid": "45322cf1b6b6c7e1031a5f02510a338d", "score": "0.6395143", "text": "def get_status(self):\r\n _wait(self.lock_server_running_filename)\r\n try:\r\n fileobj = open(self.status_filename, \"r\")\r\n status = int(fileobj.read())\r\n fileobj.close()\r\n return status\r\n except Exception:\r\n return None", "title": "" }, { "docid": "665963a48608405833e1bebb500c409b", "score": "0.63861537", "text": "def get_status(self):\n return self.__get_data(self.url.status_url())", "title": "" }, { "docid": "6fbacacf202e4315455fdb616618afe5", "score": "0.63853014", "text": "def getStatus(self):\n\n cmd = \"/usr/sbin/adsl-status\"\n i, output = self.capture(cmd)\n\n return output", "title": "" }, { "docid": "692e55552011e148a15a6b1fa23a6465", "score": "0.63829386", "text": "def _get_status(self):\n command = self._command.STATUS_CMD\n wdata = bytearray([command.value, 1, 0xFF, 0xFF, 0xFF])\n rdata = self.spi_rack.read_data(self.module, 0, SAMD51_MODE, SAMD51_SPEED, wdata)\n \n values = {0:'running', 1:'idle', 2:'waiting', 3:'booted', 4:'readout', 5:'cancelled', 6:'done'}\n return values[rdata[-1]]", "title": "" }, { "docid": "1eda46e78f43282c3e6e98fd9a52b21e", "score": "0.63796806", "text": "def get_status(self):\n return simplejson.loads(self._make_get_request(\"/cloud/instance_state_json\"))", "title": "" }, { "docid": "29652c536584f1060d99c02b37c1df9b", "score": "0.6366396", "text": "def status(self):\n return self.info()['status']['state']", "title": "" }, { "docid": "a0453bda1cb753056b4321c6949c88ea", "score": "0.6359224", "text": "def status(self, raw=False):\n data = self.__send_cmd(SHT30.STATUS_CMD, 3, read_delay_ms=50)\n\n status_register = data[0] << 8 | data[1]\n status = {\n 'Checksum of last write transfer was correct': not (status_register & 1),\n 'Last command executed successfully': not (status_register & 2),\n 'Reset detected': bool(status_register & 16),\n 'Temperature tracking alert': bool(status_register & 1024),\n 'Humidity tracking alert': bool(status_register & 2048),\n 'Heater is ON': bool(status_register & 8192),\n 'At least one pending alert': bool(status_register & 32768)\n }\n return status_register if raw else status", "title": "" }, { "docid": "c56168f510de32da8612ac1c1dd719a3", "score": "0.6351636", "text": "def status():\n args = arguments.from_request()\n last_timestamp = args.get('last_timestamp', 0)\n force = args.get('force', False)\n\n if not environ.remote_connection.active:\n results = ui_statuses.get_status(last_timestamp, force)\n return flask.jsonify(results)\n\n # When connected remotely, get the status from the remote kernel and\n # then merge it with local information that may not have been synced\n # to the remote kernel yet.\n lost_errors = (\n ConnectionError,\n requests_exceptions.ConnectionError,\n requests_exceptions.ConnectTimeout,\n url_exceptions.MaxRetryError,\n )\n\n try:\n remote_status = requests.post(\n '{}/ui-status'.format(environ.remote_connection.url),\n json=args\n ).json()\n except lost_errors as error:\n ui_configs.status_failures += 1\n return (\n environ.Response().fail(\n code='LOST_REMOTE_CONNECTION',\n message='Unable to communicate with the remote kernel.',\n error=error\n )\n .console_if(\n display_condition=ui_configs.status_failures < 2,\n whitespace=1\n )\n .response\n .flask_serialize()\n )\n\n ui_configs.status_failures = 0\n return flask.jsonify(ui_statuses.merge_local_state(remote_status, force))", "title": "" }, { "docid": "a5d5529bd05d0b5f6421ec8d5b606a10", "score": "0.63447845", "text": "def status():\n global STATUS\n return STATUS", "title": "" }, { "docid": "9ccd59251dcbcdf75eb47875b2fb2380", "score": "0.63440174", "text": "def status(self) -> 'outputs.GoogleRpcStatusResponse':\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" }, { "docid": "17cc31e122f17d0cf2429507c3a60e28", "score": "0.63418496", "text": "def status(self) -> str:\n return pulumi.get(self, \"status\")", "title": "" } ]
fcd071579d4fd2aeabf01752e31d76e3
Run jobs from a sql table.
[ { "docid": "931353192b9fdd9a479504f52d6fc813", "score": "0.5596132", "text": "def runner_sql(options, dbdescr, exproot):\n if options.modules:\n modules = options.modules.split(',')\n else:\n modules = []\n for module in modules:\n __import__(module, fromlist=[])\n\n db = open_db(dbdescr, serial=True)\n n = options.n if options.n else -1\n nrun = 0\n try:\n while n != 0:\n if options.workdir:\n workdir = options.workdir\n else:\n if options.workdir_dir and not os.path.exists(options.workdir_dir):\n os.mkdir(options.workdir_dir)\n workdir = tempfile.mkdtemp(dir=options.workdir_dir)\n print \"The working directory is:\", os.path.join(os.getcwd(), workdir)\n\n channel = DBRSyncChannel(db,\n workdir,\n exproot,\n redirect_stdout=True,\n redirect_stderr=True,\n finish_up_after=options.finish_up_after or None,\n save_interval=options.save_every or None\n )\n channel.run()\n\n # Useful for manual tests; leave this there, just commented.\n # cachesync_runner.manualtest_before_delete()\n with cachesync_lock(None, workdir):\n # Useful for manual tests; leave this there, just\n #commented. cachesync_runner.manualtest_will_delete()\n\n shutil.rmtree(workdir, ignore_errors=True)\n\n n -= 1\n nrun += 1\n except JobError, e:\n if e.args[0] == JobError.NOJOB:\n print 'No more jobs to run (run %i jobs)' % nrun", "title": "" } ]
[ { "docid": "f6c1fab4be007de17dfb374158154517", "score": "0.67062944", "text": "def runSQL(self, sql, label):\n startTime = time.time()\n verbose(label + '...')\n if type(sql) == type(''):\n results = db.sql(sql.split(self.SQLSEPARATOR), 'auto')\n else:\n results = db.sql(sql, 'auto')\n verbose( \"SQL time: %8.3f seconds\\n\" % (time.time()-startTime) )\n return results", "title": "" }, { "docid": "d3831337487d3463624aaa07435726b8", "score": "0.6236351", "text": "def batch(self, sqls):\n with self.connection() as cur:\n for sql in sqls:\n cur.execute(query)", "title": "" }, { "docid": "41e316754d128e5d87d8b726f4c2559f", "score": "0.6232698", "text": "def execute(self, jobs, conn = None, transaction = False):\n\n if len(jobs) == 0:\n return\n\n statusTime = int(time.time())\n binds = []\n for job in jobs:\n binds.append({'jobid': job['jobid'], 'gridid': job.get('gridid', None), 'bulkid': job.get('bulkid', None),\n 'status': job.get('status', None), 'retry_count': job['retry_count'], 'userdn': job['userdn'],\n 'usergroup': job['usergroup'], 'userrole': job['userrole'],\n 'location': job.get('siteName'), 'status_time': statusTime})\n\n result = self.dbi.processData(self.sql, binds, conn = conn,\n transaction = transaction)\n\n return", "title": "" }, { "docid": "b24b9efaa01a46ece1acffafee215c88", "score": "0.6178296", "text": "def execute_sql(self,sql):\n self.query(sql)", "title": "" }, { "docid": "37262b0de97e97176d1a2d66320288c5", "score": "0.61121655", "text": "def run(self, name):\n self.executescript(sql_command(name))", "title": "" }, { "docid": "4ccf5d666b114c3c0b93cc7f95a4f407", "score": "0.6081723", "text": "def execute(self, df):\n\n (pre,sql) = self.queryGenerator.generate(df)\n for pq in pre:\n # print(pq)\n self._execute(pq).close()\n # print(sql)\n return self._execute(sql)", "title": "" }, { "docid": "3082a8ef9b135221e0de86c72dc79b34", "score": "0.5962918", "text": "def run_sql(sql_list):\n for query in sql_list:\n meta.Session.execute(query)", "title": "" }, { "docid": "b088124ea13987519aacea4d89adc2bb", "score": "0.58919364", "text": "def run_job():", "title": "" }, { "docid": "e2d3b2d426462277c6abc517b8d58bbc", "score": "0.58714205", "text": "def runsql(self, sql):\n self.connect()\n cursor = self.db.execute(sql)\n self.db.commit()\n return cursor", "title": "" }, { "docid": "3b75b1122c3e8a0147b261691fb06368", "score": "0.5867697", "text": "def run_query(workflow, log, sql):\n\n db_path = workflow.stored_data(DB_KEY)\n if not db_path:\n db_path = find_agenda_db(log)\n workflow.store_data(DB_KEY, db_path)\n else:\n log.debug(db_path)\n\n conn = sqlite3.connect(db_path)\n cursor = conn.cursor()\n log.debug(sql)\n cursor.execute(sql)\n results = cursor.fetchall()\n log.debug(\"Found {0} results\".format(len(results)))\n cursor.close()\n return results", "title": "" }, { "docid": "195df65fb760e5a77f43b37b365e2648", "score": "0.5857775", "text": "def __init__(self, job_id, table_name, sql, context):\n super(QueryJob, self).__init__(job_id, context)\n self._sql = sql\n self._table = _query_results_table.QueryResultsTable(table_name, context, self,\n is_temporary=True)\n self._bytes_processed = None\n self._cache_hit = None\n self._total_rows = None", "title": "" }, { "docid": "33fbc18736157f59deefa412206b1595", "score": "0.5856329", "text": "def runner_sqlreload(options, dbdescr, table_dir, *ids):\n if table_dir[-1] == os.path.sep:\n table_dir = table_dir[:-1]\n\n db = open_db(dbdescr, serial=True)\n\n assert os.path.split(table_dir)[-1] == db.tablename\n assert os.path.split(os.path.split(table_dir)[0])[-1] == db.dbname\n expdir = os.path.split(os.path.split(table_dir)[0])[0]\n\n if options.all:\n assert len(ids) == 0\n ids = []\n for p in os.listdir(table_dir):\n try:\n ids += [int(p)]\n except ValueError:\n print 'Skipping entry %s, as it is not a jobman id.' % p\n else:\n # Ensure that ids are all integers.\n ids = [int(d) for d in ids]\n\n try:\n session = db.session()\n for id in ids:\n # Get state dict from the file\n file_name = '%s/%i/current.conf' % (table_dir, id)\n file_state = parse.filemerge(file_name)\n\n # Get state dict from the DB\n db_state = db.get(id)\n if db_state is None:\n # No such dict exist, we have to insert it, with the right id\n file_state['jobman.id'] = id\n db.insert(file_state, session=session)\n else:\n db_state.update_in_session(file_state, session=session)\n pass\n finally:\n session.close()", "title": "" }, { "docid": "fe08fff92d373e2fb7cccbc28aff0caf", "score": "0.58003277", "text": "def run(job):", "title": "" }, { "docid": "d3fbcc4151fdea4c343a6b243a8ab15d", "score": "0.5742108", "text": "def stage_data(self,df,table):\r\n # Load data to staging table.\r\n\r\n # Add batch load timestamp.\r\n df['loadDatetime']=datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S.%f\")[:-3]\r\n\r\n if not df.empty:\r\n try:\r\n df.to_sql(table, self.engine, schema=self.database_schema, if_exists='append', index=False, chunksize=1000)\r\n except pyodbc.Error as exception:\r\n error_number, error_message = exception.args[0], exception.args[1]\r\n logging.error(\"Unable to connect to database: [{0}] {1} Exiting program.\".format(error_number, error_message))\r\n exit(1)\r\n except:\r\n logging.error(\"Unable to connect to database. Exiting program.\")\r\n exit(1)", "title": "" }, { "docid": "3108d7043f2138614349d0d8d442c63a", "score": "0.5732906", "text": "def _import_tables(self: typing.Any) -> None:\n logger.info(\"*** Importing tables...\")\n for table, _, _ in tables:\n logger.info('*** Importing table \"%s\"...', table)\n command = (\n f\"gcloud sql import csv {self.sql_instance_name} \"\n f\"{self._cloud_storage_path_to_imported_table(table)} \"\n f\"--database={self.temporary_database_name} \"\n f\"--project {self.tenant_project_name} \"\n f\"--table={table} -q --async\"\n )\n operation_id = EnvironmentUtils.execute_command_in_a_pod(\n self.worker_pod_namespace,\n self.worker_pod_name,\n self.worker_container_name,\n command,\n )\n logger.info('*** Waiting for table \"%s\" to be imported...', table)\n command = (\n f\"gcloud sql operations wait {operation_id} --timeout=3600 \"\n f\"--project {self.tenant_project_name}\"\n )\n EnvironmentUtils.execute_command_in_a_pod(\n self.worker_pod_namespace,\n self.worker_pod_name,\n self.worker_container_name,\n command,\n )", "title": "" }, { "docid": "e083d278461b0db82269268bdb061867", "score": "0.5708928", "text": "def execute_sql(self, sql):\n cursor = connection.cursor()\n cursor.execute(sql)", "title": "" }, { "docid": "0be58f9736e94372c0a09977476f6ea3", "score": "0.5674252", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n print(query)\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "87c5d9962c7b55993691afcb9ad2fc3f", "score": "0.5669891", "text": "def apply_job(self,query_data,tuple_data):\n self.cur.execute(query_data,tuple_data)\n self.conn.commit()", "title": "" }, { "docid": "8037d02faf6887fad264c204545cf53c", "score": "0.5650961", "text": "def dbQuery(query, table):\n\t#print 'Doing dbQuery: %s' % query\n\tconn = MySQLdb.connect(host = ms_dbhost,\n user = ms_dbuser,\n passwd = ms_password,\n db = ms_dbname)\n\tcursor = conn.cursor ()\n\tcursor.execute (query)\n\trow = cursor.fetchone ()\n\tcursor.close ()\n\tconn.close()\n\tif row == None:\n\t\treturn None\n\tres = None\n\tif table == 'queue':\n\t\tres = Job(row)\n\telif table == 'score':\n\t\tres = Score(row)\n\telse:\n\t\traise Exception\n\t#print 'Query done!'\n\treturn res", "title": "" }, { "docid": "3e54356aba7ff4ca2e6012b991fd8665", "score": "0.5650918", "text": "def execute_many(conn, df, table):\r\n # Create a list of tupples from the dataframe values\r\n tuples = [tuple(x) for x in song_df.to_numpy()]\r\n # Comma-separated dataframe columns\r\n cols = ','.join(list(df.columns))\r\n # SQL quert to execute\r\n query = \"INSERT INTO %s(%s) VALUES(%%s,%%s,%%s)\" % (table, cols)\r\n cursor = conn.cursor()\r\n try:\r\n cursor.executemany(query, tuples)\r\n conn.commit()\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(\"Error: %s\" % error)\r\n conn.rollback()\r\n cursor.close()\r\n return 1\r\n print(\"execute_many() done\")\r\n cursor.close()", "title": "" }, { "docid": "f7d95555189e3199a11562ab8f61fc50", "score": "0.5648072", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n print(\"Copy statement started to execute.. Please wait..\")\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "355a64296089e120c9c732f0687c65d4", "score": "0.56406385", "text": "def exec_sql(sql, db='zipcode2.sqlite'):\n if sql.endswith(';'): sql = sql[:-1]\n con = lite.connect(db, isolation_level=None)\n con.row_factory = lite.Row\n cur = con.cursor()\n for statement in sql.split(';'):\n cur.execute(sql)\n return cur.fetchall()", "title": "" }, { "docid": "565a7b3e15a2b8c47fd24511a3a8eab5", "score": "0.5610197", "text": "def execute_many(conn, df, table):\n # Create a list of tupples from the dataframe values\n tuples = [tuple(x) for x in df.to_numpy()]\n # print(tuples)\n # Comma-separated dataframe columns\n cols = \",\".join(list(df.columns))\n # SQL quert to execute\n query = 'INSERT INTO \"Stocks\"(%s) VALUES(%%s,%%s,%%s,%%s,%%s,%%s,%%s)' % (cols,)\n cursor = conn.cursor()\n try:\n cursor.executemany(query, tuples)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n return 1\n print(\"execute_many() done\")\n cursor.close()", "title": "" }, { "docid": "c3753c00232bda9f97bc4e813dffd7d8", "score": "0.55987775", "text": "def test_one_table(self):\n job = base_job.Job(os.path.join(os.getcwd(), 'my_sql_one.json'))\n job.connect_to_database()\n self.assertEqual(['states'], mysql_worker.get_tables(job))", "title": "" }, { "docid": "c04d6cc2504fc5823f82206afa4405b3", "score": "0.55671847", "text": "def run_sql_file(filename):\n db, c = connect_database()\n file = open(filename, 'r')\n sql = \" \".join(file.readlines())\n c.execute(sql)\n db.commit()\n close_database(db)", "title": "" }, { "docid": "3ded3eb7753aaff10ee100fef9ccf486", "score": "0.5550236", "text": "def execute_sql(self, sql):\n self.cursor.execute(sql)\n return self.cursor.fetchall()", "title": "" }, { "docid": "9d7956c1672443a05137150e1ceb1b06", "score": "0.55454326", "text": "def execute_sql(filename):\n cursor = get_cursor()\n with open(filename, 'rb') as f:\n queries = str(f.read(), 'utf-8').split(';')[:-1]\n for query in queries:\n cursor.execute(query)", "title": "" }, { "docid": "aaaf19243aa5a1f29f5d4852d293b485", "score": "0.5535624", "text": "def simpleQuery(self, sql, args):\n d = self.db.runQuery(sql, args)\n d.addErrback(self.sqlerror)", "title": "" }, { "docid": "e2fc33296e931f59538854e68759cbbc", "score": "0.5512534", "text": "def start_jobs(shot_id):\n for job in Jobs.select().where(Jobs.shot_id == shot_id,\n Jobs.status == 'ready'):\n print(start_job(job.id))", "title": "" }, { "docid": "b8933aa76f7b0e7dab252d029500cd02", "score": "0.5511551", "text": "def on_batch_sql_button_clicked(self, widget, crudel):\n if crudel.get_param(\"sqls\", None) :\n # Exécution de plusieurs ordres\n values = self.crud.get_table_values(elements=self.crud.get_view_elements())\n for sql_brut in crudel.get_param(\"sqls\"):\n sql = self.crud.replace_from_dict(sql_brut, values)\n self.crud.exec_sql(self.crud.get_basename(), sql, {})\n else :\n sql = crudel.get_param_replace(\"sql\")\n self.crud.exec_sql(self.crud.get_basename(), sql, {})\n self.emit(\"refresh_data_view\", \"\", \"\")", "title": "" }, { "docid": "264ab8b1c487cfebebd0cd46bb6b97f5", "score": "0.5508543", "text": "def run(batch_type):\r\n\r\n # getting station id's to get the data for each station\r\n print('\\nGetting Station IDs...\\n')\r\n stations_id_list = ml.get_station_id(weather_stations)\r\n\r\n # Getting connection\r\n conn = db.connect()\r\n\r\n # Creating tables if not created\r\n print(\"Table Creating if not exists...\\n\")\r\n status = db.execute_query(conn, qu.create_table_query)\r\n if status == 0:\r\n print(\"Error in Table Creation...\\n\")\r\n return 0\r\n status.close()\r\n\r\n\r\n # Calling either daily or historical function depends upon batch_type\r\n status = batch_type_dict[batch_type](batch_type, stations_id_list, conn)\r\n if status != 0:\r\n print('Script completed successfully...\\n')\r\n\r\n db.disconnect(conn)", "title": "" }, { "docid": "c7d7236d878851fe28208a2acfe31417", "score": "0.55044645", "text": "def submit_static_job_df(df: pd.Series):\n\n execute_line = df['execute_lines']\n parser_kwargs = df['parser_kwargs'] if type(df['parser_kwargs']) == dict else {}\n for k, v in parser_kwargs.items():\n if isinstance(v, list):\n execute_line += ' --{} {}'.format(k, ' '.join(map(str, v)))\n else:\n execute_line += f' --{k}={v}'\n pbs_kwargs = df['pbs_kwargs'] if type(df['pbs_kwargs']) == dict else {}\n run_me = pbs_file(execute_line, **pbs_kwargs)\n print('running: {}'.format(run_me))\n process = sp.Popen([f'qsub {run_me}'], stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n out, err = process.communicate()\n return out.decode('utf-8').strip()", "title": "" }, { "docid": "7260f71b0d68d2a7688283cccbbcd88a", "score": "0.54988664", "text": "def execute_sql_from_file(self, path):\n self.execute_sql(self.get_sql_from_file(path))", "title": "" }, { "docid": "09d96632eedca446a453378245ea03e5", "score": "0.5494264", "text": "def load_staging_tables(cur, conn):\n\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "4585b816605d25988097a8312c94f753", "score": "0.5488514", "text": "def to_submit(self):\n self.conn.row_factory = sq.Row\n cur = self.conn.cursor()\n cur.execute(\"SELECT * FROM idx where status = 'NEW'\")\n rows = cur.fetchall()\n \n for row in rows:\n yield Job(row)", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.54804355", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.54804355", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.54804355", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.54804355", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "cf87639424267de38ca492c69e69a1f1", "score": "0.54804355", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "1c8972396049ae693b6e5d9f8660ac22", "score": "0.5472166", "text": "def run_table(args):\n alias_list = args.step_parameters.split(\",,\")\n if args.step_parameters == \"\":\n raise ValueError(\"ERROR: 'source,alias' must be specified with --step_parameters (-p)\")\n\n ns_parameters = []\n step_job = ju.Job(\"tabler\", args)\n\n for pair in alias_list:\n src, alias = pair.split(\",\")\n\n alias_path = os.path.join(src, alias)\n local_chunk_dir = os.path.join(args.working_dir, args.data_path, alias_path, \"chunks\")\n if not os.path.exists(local_chunk_dir):\n raise IOError('ERROR: \"source,alias\" specified with --step_parameters '\n '(-p) option, ' + pair + ' does not have chunk directory:'\n + local_chunk_dir)\n\n chunk_ctr = 0\n for chunk_name in sorted(os.listdir(local_chunk_dir)):\n if \"raw_line\" not in chunk_name or \"unique\" in chunk_name:\n continue\n output_files = chunk_name.replace('.raw_line.', '.*.')\n chunk_ctr += 1\n print(\"\\t\".join([str(chunk_ctr), chunk_name]))\n\n jobname = \"-\".join([\"table\", chunk_name])\n jobname = jobname.replace(\".\", \"-\")\n jobname = jobname.replace(\".txt\", \"\")\n jobdict = generic_dict(args, None)\n jobdict.update({'TMPJOB': jobname,\n 'TMPALIASPATH': alias_path,\n 'TMPCHUNK': os.path.join(\"chunks\", chunk_name),\n 'TMPFILES': os.path.join(\"chunks\", output_files)\n })\n step_job = ju.run_job_step(args, \"tabler\", jobdict)\n\n ns_parameters.extend([chunk_name.replace('.raw_line.', '.table.')])\n\n if not args.setup and not args.one_step and args.chronos not in SPECIAL_MODES:\n ns_jobname = \"-\".join([jobname, \"next_step\"])\n ns_dict = generic_dict(args, step_job.jobname)\n ns_dict.update({'TMPJOB': ns_jobname,\n 'TMPNEXTSTEP': \"MAP\",\n 'TMPSTART': chunk_name.replace('.raw_line.', '.table.'),\n 'TMPOPTS': \" \".join([args.config_opts, args.workflow_opts,\n '-d', ns_jobname])\n })\n ju.run_job_step(args, \"next_step_caller\", ns_dict)\n\n if not args.setup and not args.one_step and args.chronos in SPECIAL_MODES and \\\n ns_parameters:\n ns_dict = generic_dict(args, step_job.jobname)\n ns_dict.update({'TMPJOB': \"-\".join([\"table\", \"next_step\"]),\n 'TMPNEXTSTEP': \"MAP\",\n 'TMPSTART': \",,\".join(ns_parameters),\n 'TMPOPTS': \" \".join([args.config_opts, args.workflow_opts,\n '-d', \"-\".join([\"table\", \"next_step\"])])\n })\n tmpargs = args\n tmpargs.chronos = \"LOCAL\"\n ju.run_job_step(tmpargs, \"next_step_caller\", ns_dict)\n\n return 0", "title": "" }, { "docid": "d1c70690336d7d49e872284d9bf8e362", "score": "0.5471556", "text": "def insert_data_into_database(table_name):\n conn = connect()\n cur = connect_to_schema(conn, \"academics\")\n sql_list = get_query_list(\"Queries.sql\")\n\n create_table(table_name)\n\n for query in sql_list:\n planning_time = float(get_planning_time(query))\n execution_time = float(get_execution_time(query))\n client_time = float(get_client_time(query))\n cold_run = float(get_execution_time(query))\n hot_run = 0\n for i in range(3):\n hot_run += float(get_execution_time(query))\n hot_run = hot_run/3\n try:\n\n cur.execute(\"INSERT INTO \" + table_name +\n \"(planning_time, execution_time, run_time, query, client_time, cold_run, hot_run) \"\n \"VALUES(%s, %s, %s, %s, %s, %s, %s)\",\n (planning_time, execution_time, execution_time, query, client_time, cold_run, hot_run))\n print \"Successfully inserting data.\"\n conn.commit()\n except psycopg2.Error as e:\n print e", "title": "" }, { "docid": "63fdb33ee3985ca4cf3582f2f76a76ab", "score": "0.54544014", "text": "def _one_statement_table__list(self, sql):\n def _wrapped_method(self, list_kwargs):\n \"\"\"\n Exec the statement and return a list with the queries\n\n @param list_kwargs: the keyword arguments of the query\n @type list_kwargs: dict\n\n @return: the queried tables\n @rtype: list of queries (tables, list of tuples...) or None\n \"\"\"\n result = []\n\n with self.tx_manager as conn:\n cursor = conn.cursor()\n\n for kwargs in list_kwargs:\n cursor.execute(sql, kwargs)\n\n result.append(cursor.fetchall())\n\n return result\n\n return _wrapped_method", "title": "" }, { "docid": "0dcd2eed40b9cf479a4f9d5f6c8e0cf7", "score": "0.5452117", "text": "def runner_sqlschedule(options, dbdescr, experiment, *strings):\n db = open_db(dbdescr, serial=True)\n\n parser = getattr(parse, options.parser, None) or resolve(options.parser)\n\n state = parser(*strings)\n # we try to load the function associated to the experiment\n resolve(experiment)\n state['jobman.experiment'] = experiment\n sql.add_experiments_to_db([state], db, verbose=1, force_dup=options.force)", "title": "" }, { "docid": "78ec77ebb40452a95988276a10a1b721", "score": "0.5449223", "text": "def sql_do(self, sql, *params):\r\n\t\tself.c.execute(sql, params)\r\n\t\tself.commit()", "title": "" }, { "docid": "a1a606d19bc2197ad8edd3e5ec019d48", "score": "0.5447683", "text": "def runner_sqlstatus(options, dbdescr, *ids):\n # we don't want to remove all output when we change the db.\n if options.set_status and options.ret_nb_jobs:\n raise UsageError(\n \"The option --set_status and --ret_nb_jobs are mutually exclusive.\")\n\n db = open_db(dbdescr, serial=True)\n\n if options.set_status:\n try:\n new_status = to_status_number(options.set_status)\n except ValueError:\n raise UsageError(\n \"The option --set_status accept only the value START, RUNNING, DONE, ERR_START, ERR_SYNC, ERR_RUN, CANCELED or their equivalent int number\")\n else:\n new_status = None\n\n have_running_jobs = False\n verbose = not options.quiet\n if options.ret_nb_jobs:\n verbose = 0\n else:\n verbose += 1\n ids = list(ids)\n try:\n session = db.session()\n\n if options.print_keys:\n q = db.query(session)\n job = q.first()\n print \"Keys in the state of the first jobs\",\n for k in job.keys():\n print k,\n print\n del q, job, k\n\n if options.status:\n q = db.query(session)\n jobs = []\n for stat in options.status:\n jobs += q.filter_eq('jobman.status',\n to_status_number(stat)).all()\n\n ids.extend([j.id for j in jobs])\n del jobs, q\n\n if options.select:\n q = db.query(session)\n j = q.first()\n for param in options.select:\n k, v = param.split('=')\n if k == 'jobman.status':\n q = q.filter_eq(k, to_status_number(v))\n elif isinstance(j[k], (str, unicode)):\n q = q.filter_eq(k, v)\n elif isinstance(j[k], float):\n q = q.filter_eq(k, float(v))\n elif isinstance(j[k], int):\n q = q.filter_eq(k, int(v))\n else:\n q = q.filter_eq(k, repr(v))\n jobs = q.all()\n ids.extend([j.id for j in jobs])\n del j, jobs, q\n\n if options.fselect:\n q = db.query(session)\n jobs = q.all()\n for param in options.fselect:\n k, v = param.split('=', 1)\n f = eval(v)\n for job in jobs:\n if k in job:\n if f(job[k]):\n ids.append(job.id)\n else:\n print \"job\", job.id, \"don't have the attribute\", k\n\n del job, jobs, q\n\n if options.all:\n q = db.query(session)\n jobs = q.all()\n ids.extend([j.id for j in jobs])\n del q, jobs\n\n # Remove all dictionaries from the session\n session.expunge_all()\n\n ids = [int(id) for id in ids]\n ids = list(set(ids))\n ids.sort()\n nb_jobs = len(ids)\n\n for id in ids:\n job = db.get(id)\n if job is None:\n if verbose > 0:\n print \"Job id %s don't exit in the db\" % (id)\n nb_jobs -= 1\n continue\n try:\n prio = job['jobman.sql.priority']\n except Exception:\n prio = 'BrokenDB_priority_DontExist'\n try:\n status = job['jobman.status']\n except KeyError:\n status = 'BrokenDB_Status_DontExist'\n\n if verbose > 1:\n print \"Job id %s, status=%d jobman.sql.priority=%s\" % (id, status, str(prio)),\n\n for p in options.prints:\n try:\n print '%s=%s' % (p, job[p]),\n except KeyError:\n print '%s=KeyDontExist' % (p),\n print\n\n if status == RUNNING:\n have_running_jobs = True\n if options.set_status:\n job.__setitem__('jobman.status', new_status, session)\n job.update_in_session({}, session)\n if options.reset_prio:\n job.__setitem__('jobman.sql.priority', 1.0, session)\n job.update_in_session({}, session)\n\n if options.set_status:\n session.commit()\n print \"Changed the status to %d for %d jobs\" % (new_status, len(ids))\n if options.reset_prio:\n print \"Reseted the priority to the default value\"\n if new_status == CANCELED and have_running_jobs:\n print \"WARNING: Canceled jobs only change the status in the db. Jobs that are already running, will continue to run. If the job finish with status COMPLETE, it will change the status to DONE. Otherwise the status won't be changed\"\n\n finally:\n session.close()\n\n if options.ret_nb_jobs:\n print nb_jobs", "title": "" }, { "docid": "9236865181709ac4aacf07b7f9bd7ae6", "score": "0.54474646", "text": "def merge_data():\n # get table name, and put in queue.\n tables_queue = Queue.Queue()\n with pyodbc.connect(connect_information, database=source_database) as con:\n with con.cursor() as cursor:\n rows = cursor.execute(\"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.Tables\")\n for r, in rows:\n tables_queue.put(r)\n for i in range(5):\n t = UpdateTableThread(tables_queue)\n t.setDaemon(True)\n t.start()\n tables_queue.join()", "title": "" }, { "docid": "1b7a7dd0ee76ca66c9b893deb060c985", "score": "0.5437278", "text": "def run_script(filename):\n global cur\n try:\n schema_query = open(filename, 'r').read()\n cur.executescript(schema_query)\n commit()\n except FileNotFoundError:\n print(\"Unable to find SQL file\", filename)", "title": "" }, { "docid": "77b470ea5a8d35ccfb57b715d509128c", "score": "0.54213256", "text": "def __executeSql(self, sql):\n connection = sqlite.connect(self.__dbName)\n cursor = connection.cursor()\n cursor.execute(sql)\n connection.commit()\n connection.close()", "title": "" }, { "docid": "60eacb442c2d6b780bd84f3e3d69fea5", "score": "0.5406003", "text": "def submit(self, sql, create, dml=None):\n dml = False if dml is None else dml\n table_name = self.get_tablename()\n client = bq.Client.from_service_account_json(self.private_key)\n #\n # Let's make sure the out dataset exists\n datasets = list(client.list_datasets())\n found = np.sum(\n [1 for dataset in datasets if dataset.dataset_id == self.odataset])\n if not found:\n dataset = bq.Dataset(client.dataset(self.odataset))\n client.create_dataset(dataset)\n\n # create the output table\n if create:\n LOGGER.info(f\"creating new table:\\t{self.tablename}\")\n bq_utils.create_standard_table(self.tablename,\n self.tablename,\n drop_existing=True,\n dataset_id=self.odataset)\n write_disposition = bq_consts.WRITE_EMPTY\n else:\n write_disposition = bq_consts.WRITE_APPEND\n LOGGER.info(f\"appending results to table:\\t{self.tablename}\")\n\n job = bq.QueryJobConfig()\n job.priority = self.priority\n job.dry_run = True\n\n dml_job = None\n if not dml:\n job.destination = client.dataset(self.odataset).table(\n self.tablename)\n job.use_query_cache = True\n job.allow_large_results = True\n job.write_disposition = write_disposition\n if self.partition:\n job._properties['timePartitioning'] = {'type': 'DAY'}\n job._properties['clustering'] = {'field': 'person_id'}\n else:\n # create a copy of the job config to use if the dry-run passes\n dml_job = copy(job)\n\n LOGGER.info(\n f\"submitting a dry-run for:\\t{self.get_tablename()}\\t\\tpriority:\\t%s\\t\\tpartition:\\t%s\",\n self.priority, self.partition)\n\n logpath = os.path.join(self.logpath, self.idataset)\n try:\n os.makedirs(logpath)\n except OSError:\n # log path already exists and we don't care\n pass\n\n try:\n response = client.query(sql, location='US', job_config=job)\n except Exception:\n LOGGER.exception(\n f\"dry run query failed for:\\t{self.get_tablename()}\\n\"\n f\"\\t\\tSQL:\\t{sql}\\n\"\n f\"\\t\\tjob config:\\t{job}\")\n else:\n\n if response.state == 'DONE':\n if dml_job:\n job = dml_job\n\n job.dry_run = False\n\n LOGGER.info('dry-run passed. submitting query for execution.')\n\n response = client.query(sql, location='US', job_config=job)\n LOGGER.info(\n f\"submitted a bigquery job for table:\\t{table_name}\\t\\t\"\n f\"status:\\t'pending'\\t\\tvalue:\\t{response.job_id}\")\n self.wait(client, response.job_id)", "title": "" }, { "docid": "9552e16b6a236fbc326eccd7cc7c3083", "score": "0.53922707", "text": "def run_queries(cur, conn, query_list):\n for query in query_list:\n print(f\"\\nQuery being executed: {query}\")\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "40571e76c51d76d700a3f9b1c9d70fc5", "score": "0.53863275", "text": "def main():\n create_table()\n data = (\"2021-01-16 20:56:05\", 71.1, 32.4, 55.4, 11.2, 9.3, 33)\n insert_air_quality_reading(data)\n df = select_all_table_data()\n print(df)\n close_db_connection()", "title": "" }, { "docid": "7164ffb44b8ec98754c1d35ec2a19c5e", "score": "0.5382386", "text": "def execute_sql(self, sql, params=None):\n return self.database.execute_sql(sql, params=params, commit=False)", "title": "" }, { "docid": "520dd9e21868d0a697454334124cfc75", "score": "0.5381108", "text": "def run(self):\n\t\tself.insert_mritable()\n\t\tif self.motionscoretablecsv:\n\t\t\tself.insert_motionscoretable()\n\t\tif self.strokescoretablecsv:\n\t\t\tself.insert_strokescoretable()\n\t\tif self.grouptables:\n\t\t\tself.insert_grouptables()\n\t\tself.session.commit()", "title": "" }, { "docid": "8ac378432f4c840061845ae54d9f2e09", "score": "0.5372695", "text": "def execute(self, sql):\n return True", "title": "" }, { "docid": "26fee8f89621ca7c914973c7f1196734", "score": "0.5370051", "text": "def run(self):\n try:\n # Connection to SQLite database\n self._dbconn = sqlite3.connect(self.database_path)\n except sqlite3.Error, e:\n raise LagartoException(e.args[0])\n\n while self._run_db:\n # Gets the SQL query from the queue\n query = self._queue.get()\n self._db_response = None\n self._db_error = None \n \n try:\n c = self._dbconn.cursor()\n # Create table\n c.execute(query)\n \n if (query.startswith(\"SELECT\")):\n # Get response\n self._db_response = c.fetchall()\n else:\n # Commit changes\n self._dbconn.commit()\n except sqlite3.Error, e:\n self._db_error = e.args[0]\n finally:\n # Job done\n self._queue.task_done()", "title": "" }, { "docid": "799dbd86dedcc6e9d212a8d526d62277", "score": "0.53680116", "text": "def sql(self, sql_statement):\n return db.run_sql(sql_statement)", "title": "" }, { "docid": "78be07a06ab193d399fd305d59395900", "score": "0.5367245", "text": "def _sql(self, sql_statement):\n return db.run_sql(sql_statement)", "title": "" }, { "docid": "c3928ed7046d63792dc3f97c93f3cc41", "score": "0.5365423", "text": "def get_all_jobs(self,query_data):\n self.cur.execute(query_data)\n return self.cur.fetchall()", "title": "" }, { "docid": "31e4aa41dd36dc7ca8dc0b7a5175108e", "score": "0.5360457", "text": "def main():\n tableFolder = sys.argv[1]\n sqlJsonFile = sys.argv[2]\n outputFile = sys.argv[3]\n\n # parse sql json file\n with open(sqlJsonFile) as f:\n queryJSON = json.load(f)\n\n # run query\n q = Query(queryJSON, tableFolder)\n result = q.run()\n\n # write result to output pile\n writeToFile(result, outputFile)", "title": "" }, { "docid": "77f394f960d9d1bf7a82d6ee7d38ab10", "score": "0.5354882", "text": "def execute_sql(self, sql):\r\n try:\r\n grid_data = None\r\n if sql.lower().startswith(\"select\"):\r\n # SELECT statement: populate grid with rows\r\n grid_data = self.db.execute_select(sql)\r\n self.grid_sql.SetTable(grid_data)\r\n self.button_export_sql.Enabled = True\r\n else:\r\n # Assume action query\r\n affected_rows = self.db.execute_action(sql)\r\n self.grid_sql.SetTable(None)\r\n self.grid_sql.CreateGrid(1, 1)\r\n self.grid_sql.SetColLabelValue(0, \"Affected rows\")\r\n self.grid_sql.SetCellValue(0, 0, str(affected_rows))\r\n self.button_export_sql.Enabled = False\r\n main.logstatus(\"Executed SQL \\\"%s\\\".\", sql)\r\n size = self.grid_sql.Size\r\n self.grid_sql.Fit()\r\n # Jiggle size by 1 pixel to refresh scrollbars\r\n self.grid_sql.Size = size[0], size[1]-1\r\n self.grid_sql.Size = size[0], size[1]\r\n self.last_sql = sql\r\n self.grid_sql.SetColMinimalAcceptableWidth(100)\r\n if grid_data:\r\n [self.grid_sql.AutoSizeColLabelSize(i) \\\r\n for i in range(grid_data.GetNumberCols())\r\n ]\r\n except Exception, e:\r\n wx.MessageBox(\r\n unicode(e).capitalize(), conf.Title, wx.OK | wx.ICON_WARNING\r\n )", "title": "" }, { "docid": "82cfac6c7bb66d74e4ee68a3440d8404", "score": "0.53430647", "text": "def execute_job(self):\n raise NotImplementedError", "title": "" }, { "docid": "03de3d729e16705b78c96c42431c0b57", "score": "0.53311956", "text": "def jobs_run_all():\n jobs_count = 1\n for job_key, job in mdb.jobs.items():\n job.submit(consistencyChecking=OFF)\n print('Job number ', str(jobs_count), ' of: ', str(len(mdb.jobs)))\n job.waitForCompletion()\n jobs_count += 1\n return", "title": "" }, { "docid": "5ebc52d216a9d76b71b5d49828ec22d1", "score": "0.5327111", "text": "def run(cls):\n hive_job = cls(args=_READ_ARGS_FROM_SYS_ARGV)\n hive_job.execute()", "title": "" }, { "docid": "84140fc9531c92116bb9453dd25b8310", "score": "0.5313786", "text": "def data_ingest(conn, cur):\n for query in data_ingestion_queries:\n cur.execute(query)\n conn.commit()", "title": "" }, { "docid": "22da7d6dad5ab1e3f4a25a835e8836af", "score": "0.53084284", "text": "def run(self, sql: str) -> hive.Cursor:\n if not self._cursor:\n self.get_conn()\n if not self._dry_run:\n self._cursor.execute(sql) # type: ignore\n else:\n self.log.warn(sql)\n return self._cursor", "title": "" }, { "docid": "6a877cd49e439a001ea359281707574e", "score": "0.53066915", "text": "def test_one_table(self):\n job = base_job.Job(os.path.join(os.getcwd(), 'one_table_oracle.json'))\n job.connect_to_database()\n base_table = ['RUNWAYL']\n self.assertEqual(base_table, oracle_worker.get_tables(job))", "title": "" }, { "docid": "da4f0b745793f43749c24072d79f8445", "score": "0.5298659", "text": "def edit_job(self,query_data):\n self.cur.execute(query_data)\n self.conn.commit()", "title": "" }, { "docid": "e4f35909ebdd681732bc68a212d5b9d2", "score": "0.5298168", "text": "def add_job(self,query_data,tuple_data):\n self.cur.execute(query_data,tuple_data)\n self.conn.commit()", "title": "" }, { "docid": "9e6ee4db1edebbf630c218e01aafc7a9", "score": "0.52882755", "text": "def run_query_on_sqlite_db(input_query, input_filename):\n conn_ = sqlite3.connect(input_filename)\n df_ = pd.read_sql_query(input_query, conn_)\n conn_.close()\n return df_", "title": "" }, { "docid": "1732b619874af61f686a56b50c4aebac", "score": "0.5279739", "text": "def _run_sql_file(self, sql_source_file):\n logging.getLogger(__name__).debug(\"Running SQL file %s\", sql_source_file)\n try:\n with open(sql_source_file, 'r') as f:\n sql_source = f.read()\n sql_statements = re.split(r';\\s*$', sql_source, flags=re.MULTILINE)\n sql_statements = [cmd.strip('\\n') for cmd in sql_statements]\n except IOError:\n logging.getLogger(__name__).exception(\"Could not read file %s\", sql_source_file)\n raise\n\n return self._run_sql(sql_statements)", "title": "" }, { "docid": "f6d6aa1f26b6c96bc92ed7fe374c38d1", "score": "0.52778035", "text": "def run_query(table, startRow, limit):\n query = \"\"\"SELECT os, ip, device, \\\n channel, app, \\\n YEAR(click_time) AS year, \\\n MONTH(click_time) AS month, \\\n DAY(click_time) AS date, \\\n DAYOFWEEK(click_time) AS day, \\\n HOUR(click_time) AS hour, \\\n MINUTE(click_time) as minute\"\"\"\n\n if 'train' in table:\n query = query + \"\"\", is_attributed\"\"\"\n else:\n query = query + \"\"\", click_id\"\"\"\n\n query = query + \"\"\" FROM \"\"\" + table + \\\n \"\"\" LIMIT \"\"\" + str(limit) + \\\n \"\"\" OFFSET \"\"\" + str(startRow) + \"\"\";\"\"\"\n db.query(query)\n dbResult = db.store_result()\n dbFetched = dbResult.fetch_row(maxrows = 0, how = 2)\n queryDF = pd.DataFrame.from_records(dbFetched)\n return queryDF", "title": "" }, { "docid": "b2cb9fafe6f714716e687b56aafc2972", "score": "0.52760965", "text": "def execute_mogrify(self, conn, df, table):\n self.connection = conn\n # Create a list of tupples from the dataframe values\n tuples = [tuple(x) for x in df.to_numpy()]\n\n # Comma-separated dataframe columns\n cols = ','.join(list(df.columns))\n\n # SQL query to execute\n cursor = conn.cursor()\n values = [cursor.mogrify(\"(%s,%s,%s,%s)\", tup).decode('utf8')\n for tup in tuples]\n # if not publishedAt, delete record\n query = \"INSERT INTO %s(%s) VALUES\" % (table, cols) + \",\".join(values)\n\n try:\n cursor.execute(query, tuples)\n conn.commit()\n except (Exception, psycopg2.DatabaseError) as error:\n logging.error(\"Error: %s\" % error)\n print(\"Error: %s\" % error)\n conn.rollback()\n cursor.close()\n conn.close()\n return 1\n logging.info(\"execute_mogrify() done\")\n cursor.close()\n conn.close()", "title": "" }, { "docid": "90b2323052ba251c450725732a12e3f4", "score": "0.52686", "text": "def import_df(sql_table):\n\n db = connect()\n sql = \"SELECT * FROM %s\" % sql_table\n df = psql.read_sql(sql, db)\n db.close()\n\n return df", "title": "" }, { "docid": "9055b68396e5b8157be0edefce0c351f", "score": "0.52644503", "text": "def test_one_table(self):\n job = base_job.Job(os.path.join(os.getcwd(), 'sql_server_one.json'))\n job.connect_to_database()\n self.assertEqual(['STATES'], sql_worker.get_tables(job))", "title": "" }, { "docid": "7e0c1086956cfdf8e1e05fe09817b160", "score": "0.52595305", "text": "def run_query_onDB(query):\n db = psycopg2.connect('dbname=' + DBNAME)\n c = db.cursor()\n c.execute(query)\n db.commit()\n rows = c.fetchall()\n db.close()\n return rows", "title": "" }, { "docid": "199ebf9dc1700ed087f852c8a7e447fa", "score": "0.5245775", "text": "def sqlExec(query):\n db = pymysql.connect(user=\"root\", host=\"localhost\", passwd=\"\", db=\"flight_data\", cursorclass=pymysql.cursors.DictCursor)\n with db:\n cur = db.cursor()\n cur.execute(query)\n tables = cur.fetchall()\n return tables", "title": "" }, { "docid": "7a973ee1ea5e3b85e1901cc18799d311", "score": "0.52426285", "text": "def run_query(conn, query, args=[]):\n cursor = conn.cursor()\n if query.lower().startswith(\"select\"):\n cursor.execute(query, args)\n return cursor.fetchall()\n else:\n cursor.execute(query, args)\n try:\n conn.commit()\n except Exception as e:\n print(\"ERROR OCCURED WHILE DB COMMIT\", e)", "title": "" }, { "docid": "017d3f5bd7d911d7432a3cb7d3aa1287", "score": "0.5241868", "text": "def run_query(query):\n db_connection = connect_to_db()\n cursor = db_connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n cursor.close()\n db_connection.close()\n return results", "title": "" }, { "docid": "db7d50edc993211057f76e57a0924495", "score": "0.52396995", "text": "def execute(self, sql, *params):\n self.execute_in_trans((sql, params))", "title": "" }, { "docid": "4ff35e62179766e99696f3c991961037", "score": "0.52380466", "text": "def run_query(query, db_name=DBNAME):\n try:\n db = psycopg2.connect(database=db_name)\n cursor = db.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n db.close()\n return results\n except Exception as e:\n print(e)\n exit(1)", "title": "" }, { "docid": "ef0c2a7c025872531f6daec5676a4a34", "score": "0.52294874", "text": "def load_staging_tables(cur, conn):\n for query in copy_table_queries:\n LOGGER.info(\"-------------------------------\")\n LOGGER.info(f\"Copying from S3:\\n{query.split()[1]}\")\n try:\n cur.execute(query)\n conn.commit()\n except Exception:\n LOGGER.exception(\"Exception occurred while copying from S3\")\n LOGGER.info(\"Finished copying.\\n\\n\")", "title": "" }, { "docid": "1096c99f3873fc3e3614b16a2923b17f", "score": "0.5214852", "text": "def _execute_sql(self, conn, sql, values=()):\n attempt = 0\n email_sent = False\n while True:\n attempt += 1\n try:\n return conn.execute(sql, values)\n except sqlite3.OperationalError:\n email_sent = self.report_sql_error(attempt, email_sent,\n sql, values)", "title": "" }, { "docid": "4652b6074de244616223c6c59cea4fcc", "score": "0.52135223", "text": "def sql(sql):\n # db = db_connect('pito')\n with db:\n try:\n cursor = db.cursor()\n cursor.execute(sql)\n except sqlite3.ProgrammingError as e:\n print(e)\n except sqlite3.OperationalError as e:\n print(e)\n else:\n print('Sql action completed')", "title": "" }, { "docid": "c97dac1a2cf9fa57316224571416bf60", "score": "0.5208402", "text": "def job_runs_table(runs_list):\n # We expect to receive a list,\n # if not we create one from the single item.\n if not isinstance(runs_list, (list,)):\n runs_list = [runs_list]\n\n fields = OrderedDict([\n ('task id', lambda s: s['id']),\n ('job id', lambda s: s['jobId']),\n ('started at', lambda s: s['createdAt']),\n ])\n tb = table(fields, runs_list)\n tb.align['JOB ID'] = 'l'\n tb.align['STARTED AT'] = 'l'\n\n return tb", "title": "" }, { "docid": "b6fe876c42d4845a5ba7b98539302d38", "score": "0.5204393", "text": "def execute(self, sql, params=()):\n\n if not self.conn:\n self.create_connection()\n self.cur = self.conn.cursor()\n if type(sql) is CopyCommand:\n #self.update_progress('Execute copy:' + sql.source_path)\n sql.run(self.cur)\n else:\n #self.update_progress('Execute sql:' + sql)\n self.cur.execute(sql, params)\n self.conn.commit()\n self.cur.close()\n self.cur = None", "title": "" }, { "docid": "fd4f8448aea9925514ad2344aedd530c", "score": "0.5193809", "text": "def run(options):\n SQSPoller(options).run()", "title": "" }, { "docid": "fd4f8448aea9925514ad2344aedd530c", "score": "0.5193809", "text": "def run(options):\n SQSPoller(options).run()", "title": "" }, { "docid": "dd2878f7e2040bb7e197c4b7359d8d0a", "score": "0.51930404", "text": "def _execute_sql_file(self, filepath):\n with open(filepath) as f:\n schema = f.read()\n # Since called outside request, force commit.\n with self.client.connect(force_commit=True) as conn:\n conn.execute(schema)", "title": "" }, { "docid": "bfc54dd5aaa1f6787dea969fdf6cc11f", "score": "0.51919156", "text": "def run_sql_updates(df, client=None):\n\n if df is None or not isinstance(df, pd.DataFrame):\n raise AttributeError(\"A valid --df `pd.DataFrame` must be provided.\")\n\n errors = []\n success = True\n\n client = client or build_client(client_type=\"bigquery\")\n\n success, run_errors = maybe_create_tables(df, client=client)\n\n if success:\n\n _LOG.info(\"Table validation succeeded.\")\n\n success, run_errors = update_nozzle_ranking(df, \n NOZZLE_QUERY,\n client=client) \n if success:\n _LOG.info(\"Data load succeeded.\")\n else:\n _LOG.error(\"run_notebook failed updating tables.\")\n errors.extend(run_errors)\n\n else:\n _LOG.error(\"run_notebook failed creating tables.\")\n errors.extend(run_errors)\n\n \n return success, errors", "title": "" }, { "docid": "7625f6929cf98a020bdd74b029e5bbb6", "score": "0.5184165", "text": "def load_staging_tables():\n\n with psycopg2.connect(config.SPARKIFYDB_DSN) as conn:\n conn.set_session(autocommit=True)\n with conn.cursor() as cur:\n\n print('Importing data. This may take awhile, please be patient.')\n\n # Copies the events from S3 to the 'staging_events' table.\n print('Copying events into the staging table \\'staging_events\\'')\n cur.execute(sql_queries.staging_events_copy)\n\n # Copies the songs from S3 to the 'staging_songs' table.\n print('Copying songs into the staging table \\'staging_songs\\'')\n counter = 1\n for query in sql_queries.staging_songs_copies():\n print(' --> Batch {}'.format(counter))\n cur.execute(query)\n counter += 1\n\n # Inserts the users using the staging tables as source.\n print('Populating the table \\'users\\'')\n cur.execute(sql_queries.users_table_insert)\n\n # Inserts the songs using the staging tables as source.\n print('Populating the table \\'songs\\'')\n cur.execute(sql_queries.songs_table_insert)\n\n # Inserts the artists using the staging tables as source.\n print('Populating the table \\'artists\\'')\n cur.execute(sql_queries.artists_table_insert)\n\n # Inserts the timestamps using the staging tables as source.\n print('Populating the table \\'time\\'')\n cur.execute(sql_queries.time_table_insert)\n\n # Inserts the songplays using the staging tables as source.\n print('Populating the table \\'songplays\\'')\n cur.execute(sql_queries.songplays_table_insert)", "title": "" }, { "docid": "d307c3d6ff9ccb56af6054b537800827", "score": "0.5182153", "text": "def runQuery(self, statement, args=None):\n return self._runWithConn('runQuery', statement, args)", "title": "" }, { "docid": "7094b970458b22495a02a05e0343ae29", "score": "0.51811355", "text": "def execute_query(self, tablename, arraysize, values=(),\n post_process_func=None):\n if tablename is None:\n return\n\n filename = self.get_data_filename(tablename)\n if filename is None:\n return\n\n self._logger.info(\"Querying file: {f}\".format(f=filename))\n return FileQueryResults(\n filename,\n self.delimiter,\n self.quotechar,\n self.samplerows,\n post_process_func,)", "title": "" }, { "docid": "9b9e4637fa21ab9a88fbfe961d514c0e", "score": "0.5175781", "text": "def pull_all_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n timestamp_field: str,\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n pass", "title": "" }, { "docid": "33b2713e3916da72f15344e8e87e04cd", "score": "0.51751715", "text": "def _run_report(conn, sql, on_success, failure_message):\n # Execute SQL\n result = conn.execute(sql)\n # If there are results\n if result.rowcount:\n for row in result:\n # Format and print the results\n print(' {}'.format(on_success(**row)))\n else:\n # Print failure message\n print(' {}'.format(failure_message))", "title": "" }, { "docid": "72c35174cca54d7b2ff5c1592145249b", "score": "0.51720023", "text": "def process_sql_query(self, db_name, sql):\n # TODO: Run the SQL query against the database and return the result.\n db = sqlite3.connect(\"example.db\")\n cursor = db.cursor()\n cursor.execute(sql)\n return cursor.fetchall()", "title": "" }, { "docid": "b4793930f0fa205873c467657e9db38c", "score": "0.5171082", "text": "def execute(self, sql, params=None):\n params = params or []\n logger.debug(\"[SQL] %s (%s)\", sql, params)\n cursor = connection.cursor()\n cursor.execute(sql, params)\n try:\n return cursor.fetchall()\n except Exception:\n return []", "title": "" }, { "docid": "f35ef3e9a56ac8027f23322961f4c9f3", "score": "0.5164871", "text": "def execute_sql(query, city):\n query = query.format(city=city)\n try:\n run(\n \" \".join(psql_cmd),\n input=query,\n check=True,\n shell=True,\n stdout=DEVNULL,\n stderr=PIPE,\n encoding=\"UTF-8\",\n )\n except CalledProcessError as exc:\n print(\"ERROR on\", city, exc.stderr)\n raise exc", "title": "" }, { "docid": "22d04f5e8576eb28ee197d84be733484", "score": "0.5159785", "text": "def execute(self, batch, parameters=None):\n with self.connection as conn:\n with conn.cursor() as curs:\n curs.execute(batch, parameters)", "title": "" }, { "docid": "6489707cb49e414860a64afeecb8a308", "score": "0.5153441", "text": "def schedule(job):", "title": "" } ]
5c29a638617b95da4741672f55fd84e8
Sets the item of this Inventory.
[ { "docid": "cb1e7ade7cef844cadf4e257577eba8e", "score": "0.7469803", "text": "def item(self, item):\n\n self._item = item", "title": "" } ]
[ { "docid": "0e77ca0fb4bafc81575720198fee9e0f", "score": "0.81508094", "text": "def set_item(self, item):\n self._item = item", "title": "" }, { "docid": "23f601c64be51554bb0bc647f09f9282", "score": "0.7558917", "text": "def set_item(self, new_item):\n self.item = new_item", "title": "" }, { "docid": "985e62fe6d885cb3c56130e785fd3ab9", "score": "0.6862644", "text": "def set_item (self, item, visible):\r\n self.item = item\r\n item.visible = visible", "title": "" }, { "docid": "1b2187ef94e93264da39a44e7066f07f", "score": "0.6841267", "text": "def item_set(self, parent, name, item):\n persistent_storage = self._get_persistent_storage_for_object(parent)\n persistent_storage.set_item(parent, name, item)", "title": "" }, { "docid": "2e58f00f3d7a3fc132720d03016b500d", "score": "0.6686532", "text": "def set(self, id, item):\n pass", "title": "" }, { "docid": "d9b109684008e2af8b96e9aea04e23e7", "score": "0.6644365", "text": "def set(self, item, value):\n self.data[item] = value\n self.dump()", "title": "" }, { "docid": "492564c9fea4e5c636cf9763eceb5417", "score": "0.6572483", "text": "def set_item(self, name, value):\n item = self.__items[name]\n old_value = item.value\n item.value = value\n self.__update_modified(datetime.datetime.utcnow())\n if value:\n value.persistent_object_parent = PersistentObjectParent(self, item_name=name)\n # the persistent_object_parent and item need to be established before\n # calling item_changed.\n if self.persistent_object_context:\n self.persistent_object_context.item_set(self, name, value) # this will also update item's persistent_object_context\n if item.item_changed:\n item.item_changed(name, old_value, value)", "title": "" }, { "docid": "e022f8d77a43ed045ee4155cb9687e80", "score": "0.656361", "text": "def itemInventorySelected(self, invIsoItem):\n item = invIsoItem.getModel()\n self.__player.setSelectedItem(item, True)", "title": "" }, { "docid": "2f1f3175442989b8cccf851b2e6bfdc6", "score": "0.65557176", "text": "def item_id(self, item_id):\n\n self._item_id = item_id", "title": "" }, { "docid": "842f986f7146bc5cc93604f06456ab8f", "score": "0.65125287", "text": "def __set_item(self, name, value):\n item = self.__items[name]\n old_value = item.value\n item.value = value\n if value:\n value.persistent_object_parent = PersistentObjectParent(self, item_name=name)\n value.persistent_object_context = self.persistent_object_context\n if item.item_changed:\n item.item_changed(name, old_value, value)", "title": "" }, { "docid": "3aa737c652e62147070cf007c5bba466", "score": "0.65052146", "text": "def buy(self, item):\n self._inventory.append(item)", "title": "" }, { "docid": "c40b940d7580e2e9e188150b29680925", "score": "0.64688337", "text": "def set(self, item, value):\n return self.__setitem__(item, value)", "title": "" }, { "docid": "57d581714ad7bedcf75155615b20608f", "score": "0.6458627", "text": "def set(self, item, value):\n self[item] = value", "title": "" }, { "docid": "a724854440dc99d322241b9081aa8d2a", "score": "0.64401907", "text": "def setitem(self: 'AbstractMatrix', row: int, col: int, item):\n pass", "title": "" }, { "docid": "ea1d1e6268fd805fe69402c12ed1e8cc", "score": "0.63368404", "text": "def __setitem__(self, item, value):\n self._set_item(item, value)", "title": "" }, { "docid": "971cc7de82d02d8b09c34b597afa00a5", "score": "0.6284342", "text": "def set_item(self, item_name):\n try:\n self.item_uri = self.item_map[item_name]\n except KeyError as e:\n raise Exception('Key [{}] not found. {}'.format(item_name, e))\n except Exception as e:\n raise e", "title": "" }, { "docid": "94e9222bde9076e1dc3e07c69b99c26d", "score": "0.62100077", "text": "def set_menu_item(self, pos, new_item):\n\t\tself.mitems[pos] = new_item", "title": "" }, { "docid": "f55624628f891fb2a544307104fe38a8", "score": "0.6184227", "text": "def replace(self, item):\n if self._lastItemPos is None:\n raise AttributeError, \"No established item to set\"\n self._lastItemPos.data = item", "title": "" }, { "docid": "4b21deea23ffe4614b0b76c685890b50", "score": "0.61723405", "text": "async def use(self):\r\n\t\tif self.inventory is None or self.inventory.client is None:\r\n\t\t\traise TypeError(\"InventoryItem doesn't have the inventory variable or Inventory doesn't have the client variable.\")\r\n\t\tawait self.inventory.client.main.send(Packet.new(31, 3).write16(self.id))", "title": "" }, { "docid": "d43f924bf1560ebe731d7ed4fce5ba5b", "score": "0.61696154", "text": "def vector_set_item(self, v1, index, value) :\n err = \"Item assignment not supported in space of type '%s'\"\n raise NotImplementedError(err % type(self))", "title": "" }, { "docid": "d7f5f6a8728ba73a7354257ee5de4bb2", "score": "0.6166907", "text": "def obtain_item(self, item):\n\t\tself.inventory.add_item(item) #TODO: checks for stuff like full inventory? (might take place before here)", "title": "" }, { "docid": "cbbfe69dd06520336bc114a02f34b2ab", "score": "0.61103", "text": "def set_item(self, key, block):\n self.blocks[key] = block", "title": "" }, { "docid": "c13ba39752d4368cc0043a2301253f29", "score": "0.61070436", "text": "def item_type(self, item_type):\n\n self._item_type = item_type", "title": "" }, { "docid": "09ecc7f27c860f5e63559833e6a1ebf1", "score": "0.6099951", "text": "def __setitem__(self, index, item):\r\n self._items[index] = item", "title": "" }, { "docid": "8ed27da82467e53ae59a846760890934", "score": "0.6093937", "text": "async def cmd_edit_item(self, ctx, item: str = None):\n # TODO: get the item\n await ctx.send(\"not implemented currently\", item)", "title": "" }, { "docid": "fa7eacd13d7952c496de9b762b3f6054", "score": "0.60914314", "text": "def update_item(self, item):\n self._modify_item(self.boto_client.update_item, \"update\", item)", "title": "" }, { "docid": "441ef5166cf0c4a354aa0e5475358ef3", "score": "0.60421103", "text": "def update_item(self):\n name = self.ui.item_type.text()\n\n # TODO: i guess eventually we're gonna need like.. some sort of generic\n # generate item function\n try:\n item = self.assets.items().get_item(name)\n if item[1].endswith(\"generatedgun\"):\n options = self.assets.items().generate_gun(item)\n name = options[\"itemName\"]\n elif item[1].endswith(\"generatedsword\"):\n options = self.assets.items().generate_sword(item)\n name = options[\"itemName\"]\n elif item[1].endswith(\"generatedshield\"):\n options = self.assets.items().generate_shield(item)\n name = options[\"itemName\"]\n elif item[1].endswith(\"sapling\"):\n options = self.assets.items().generate_sapling(item)\n elif name == \"filledcapturepod\":\n options = self.assets.items().generate_filledcapturepod(item,\n self.player.get_uuid())\n else:\n options = item[0]\n except TypeError:\n self.item = empty_slot().item\n self.ui.desc.setText(\"<html><body><strong>Empty Slot</strong></body></html>\")\n self.ui.icon.setPixmap(QPixmap())\n self.clear_item_options()\n return\n\n self.ui.item_type.setText(name)\n\n self.item = saves.new_item(name, 1, options)\n self.ui.count.setValue(1)\n self.update_item_info(name, options)\n self.populate_options()", "title": "" }, { "docid": "7a4362c6290caf9c9281de14af2b66ea", "score": "0.6035463", "text": "def setitem(self: 'Matrix', row: int, col: int, item) -> None:\n self._content[(row, col)] = item", "title": "" }, { "docid": "31654c69e05baecdeb0fca861822231c", "score": "0.60304755", "text": "def sell(self, item):\n self._inventory.remove(item)", "title": "" }, { "docid": "7d2581a3af977a4bd13a4d509d5425a2", "score": "0.59880114", "text": "def itemToInventory(self):\n if self.__compruebaNumeroItemsInventario():\n if self.__selectedItem:\n self.__player.tryToInventory(self.__selectedItem)", "title": "" }, { "docid": "ac2b6db8a1dc3eb5de117135c6059e8f", "score": "0.59468997", "text": "def addItem(self, item_to_add):\r\n self.player_inventory[item_to_add.itemName()] = item_to_add", "title": "" }, { "docid": "d9f1efcb90e651243f893ec9f1c54010", "score": "0.59368074", "text": "def update_item(self, **kwargs):\n raise NotImplementedError('\"update_item\" is not yet implemented')", "title": "" }, { "docid": "71ae5538661161a173ab3b4a8466edeb", "score": "0.5934478", "text": "def update_item(self, item):", "title": "" }, { "docid": "dbc04d55d50b0a3ce844f52cd7e88783", "score": "0.59024614", "text": "def addInventory(self, item, slot, quantity = 1):\n self.__inventoryXML += '''<InventoryItem slot=\"{}\" type=\"{}\" quantity=\"{}\"/>'''.format(slot.value, item.value, quantity)", "title": "" }, { "docid": "8ab8a7c7546ddda2e42e6b56ab33a982", "score": "0.5888789", "text": "def addItemToInventory(self, event): \n item = event.getParams()[\"item\"]\n posOrigin = event.getParams()[\"position\"]\n itemName = event.getParams()[\"itemName\"]\n posX = len(self.__isoviewInventory)%GG.utils.INV_ITEM_COUNT[0]\n posY = len(self.__isoviewInventory)/GG.utils.INV_ITEM_COUNT[1]\n pos = [INV_OR[0] + (posX * INV_ITEM_SZ[0]), INV_OR[1] + (posY * INV_ITEM_SZ[1])]\n ivItem = self.findIVItem(item)\n invItem = isoview_inventoryitem.IsoViewInventoryItem(item, self.getScreen(), self, itemName)\n if ivItem:\n positionAnim = animation.ScreenPositionAnimation(ANIM_INVENTORY_TIME, ivItem, GG.utils.p3dToP2d(posOrigin, ivItem.anchor), pos, True)\n positionAnim.setOnStop(self.room.removeItem, item)\n positionAnim.setOnStop(self.removeSprite, ivItem.getImg())\n else:\n ivItem = item.defaultView(self.getScreen(), self.__isoviewRoom, self)\n self.__isoviewRoom.addIsoViewItem(ivItem) \n positionAnim = animation.ScreenPositionAnimation(ANIM_INVENTORY_TIME, ivItem, GG.utils.p3dToP2d(posOrigin, [0, 0]), pos, True)\n positionAnim.setOnStop(self.removeSprite, ivItem.getImg())\n positionAnim.setOnStop(self.__isoviewInventory.append, invItem)\n positionAnim.setOnStop(self.paintItemsInventory, None)\n self.__isoviewRoom.itemUnselected(item)\n ivItem.setAnimation(positionAnim)", "title": "" }, { "docid": "61a7eb173b09d2a897cc5ff19cecc69c", "score": "0.58756346", "text": "def setitem(self, row, col, value):\n self._grid[(row, col)] = value", "title": "" }, { "docid": "04875ad791aef48d094e837b289da577", "score": "0.5868414", "text": "def onItem(self, row):\n self.item = row\n self.updateUI()", "title": "" }, { "docid": "c625e46f6d5dd2201e12e7fba04f083e", "score": "0.5866827", "text": "def put_item(self, table_name, item):\n self.log.info(\"put_item(): putting item {}\".format(item))\n response = self.ddb_client.put_item(\n TableName=table_name,\n Item=item\n )\n return response", "title": "" }, { "docid": "c99a524289f8eab271df7e8442bc5171", "score": "0.5856682", "text": "def setitem(self: 'ListMatrix', row: int, col: int, item) -> None:\n self._content[row][col] = item", "title": "" }, { "docid": "d5a38f2779bca7aad3878d9046e1bd80", "score": "0.57939386", "text": "def item_read(self, item):\n self.update_item(item)", "title": "" }, { "docid": "2fc6da43c2f6ca585eb0fa3cbd15af03", "score": "0.57786536", "text": "def set_item(self, key, value):\r\n self[key] = value", "title": "" }, { "docid": "647ed5dea208a93f9abbb03268b9d0c5", "score": "0.57772034", "text": "def set_selected(self, item=None):\n if item is None:\n item = self.selected_item\n else:\n self.selected_item = item\n if item:\n index = self.ui.listItemList.model().index_of(item)\n if index:\n self.ui.listItemList.setCurrentIndex(index)", "title": "" }, { "docid": "d1227ccbbf80cbac05db3b80697e2ad0", "score": "0.5770319", "text": "def give_item(conn: TConn, entity_id: int, item_id: int, quantity: int):\n\n # does the entity already have this item?\n existing_quantity = conn.execute(\"\"\"\n SELECT quantity\n FROM inventory\n WHERE entity_id = :entity_id\n AND item_id = :item_id\n \"\"\", {\n 'entity_id': entity_id,\n 'item_id': item_id,\n }).fetchone()\n\n if not existing_quantity:\n # add the item\n conn.execute(\"\"\"\n INSERT INTO inventory\n (entity_id, item_id, quantity)\n VALUES\n (:entity_id, :item_id, :quantity)\n \"\"\", {\n 'entity_id': entity_id,\n 'item_id': item_id,\n 'quantity': quantity\n })\n\n else:\n # update the item\n conn.execute(\"\"\"\n UPDATE inventory\n SET quantity = quantity + :quantity_sold\n WHERE entity_id = :entity_id\n AND item_id = :item_id\n \"\"\", {\n 'entity_id': entity_id,\n 'item_id': item_id,\n 'quantity_sold': quantity\n })", "title": "" }, { "docid": "1162b3a117f9dcffdda9212cbd3ee39f", "score": "0.5735816", "text": "def update_item(self, product_id, quantity):\n self.items.filter(product_id=product_id).update(quantity=quantity)", "title": "" }, { "docid": "bc912e5ddc19fd50b783047131f89631", "score": "0.5730987", "text": "def update_item(self):\n name = self.ui.item_type.text()\n\n try:\n item = assets.Items().get_item(name)\n except TypeError:\n self.item = empty_slot().item\n self.ui.desc.setText(\"<html><body><strong>Empty Slot</strong></body></html>\")\n self.ui.icon.setPixmap(QPixmap())\n self.ui.variant.clear()\n self.ui.variant.setHorizontalHeaderLabels([\"Options\"])\n return\n\n self.item = save_file.new_item(name, 1, item[0])\n self.ui.count.setValue(1)\n self.update_item_info(name, item[0])\n self.populate_options()", "title": "" }, { "docid": "5757e799dbf06f14dbbf98b61b79381d", "score": "0.57206553", "text": "def setItem(self, name, value):\n items = self.items\n if isinstance(value, str):\n db = self.getParentDatabase()\n translation_service = getToolByName(db, 'translation_service')\n value = translation_service.asunicodetype(value)\n items[name] = value\n self.items = items\n self.plomino_modification_time = DateTime().toZone(TIMEZONE)", "title": "" }, { "docid": "2091e6b9a3a1575913856606666a721f", "score": "0.5712522", "text": "def place_item(self, inventory):\n logger.info(\"Place %s from the store in the inventory\", self.moving_item)\n data = None\n\n if self.moving_item is not None:\n data = self.get_item(self.moving_item)\n inventory.place_item(data)\n self.moving_item_slot.item = None\n self.moving_item.is_moving = False\n self.moving_item = None\n self.moving_item_slot = None", "title": "" }, { "docid": "718776aa14cccbadc618e41b026ca20f", "score": "0.5676062", "text": "def put(self, item):\n if self.closed:\n print(\"I'm closed!\")\n else:\n Backpack.put(self, item)", "title": "" }, { "docid": "7a86b6805473b8c01c20436b84214a1c", "score": "0.56529814", "text": "def item_id(self, item_id):\n if item_id is None:\n raise ValueError(\"Invalid value for `item_id`, must not be `None`\") # noqa: E501\n\n self._item_id = item_id", "title": "" }, { "docid": "3765c5e4724a65e35d65fdf38b795e22", "score": "0.56468385", "text": "def set_item_doc(self, item_doc: ItemDoc) -> None:\n self._validate_item_doc(item_doc)\n self._validate_book()\n self._set_item_doc(item_doc)\n self._validate_book()", "title": "" }, { "docid": "133c3e9abcc2157a082086174a924c10", "score": "0.5646208", "text": "def set_active_item(self, item):\n for i,m in self._items:\n if i is item:\n m.activate()\n return\n self._set_none()", "title": "" }, { "docid": "4e5f1c872571511489054d94d0a7fa86", "score": "0.5644647", "text": "def set_item_model(self, item_model):\n # Save the model wrapper. PySide's object cache is sometimes unreliable\n # in various versions.\n self.model_wrapper = AbstractItemModelWrapper(item_model)\n self.widget.setModel(self.model_wrapper)", "title": "" }, { "docid": "4dfde7a0a5627a4869bb30b0dd020622", "score": "0.5630843", "text": "def _equip_item(self, item: Item, add_message: bool) -> None:\n if item.equippable is None:\n # Item must have an equippable component.\n raise Impossible(\"You cannot equip this item.\")\n slot = item.equippable.slot\n currently_equipped_item = self._equipped_items[slot]\n if currently_equipped_item is not None:\n # Unequip any currently worn item before equipping a new item.\n self._unequip_item_from_slot(slot, add_message)\n self._equipped_items[slot] = item\n if add_message:\n self._equip_message(item.name)", "title": "" }, { "docid": "22eb1c16b9ff642721ed442227fb6e29", "score": "0.56270206", "text": "def set_new_item_uuid(self, item):\n item = copy.deepcopy(item)\n self._verify_item_type(item)\n item[\"item_uid\"] = self.new_item_uid()\n return item", "title": "" }, { "docid": "53137bc027fdedad03dd77719f2ef212", "score": "0.560791", "text": "def set_in(self, target, item):\n target = self[:-1].get_in(target)\n try:\n target[self[-1]] = item\n except KeyError:\n target[str(self[-1])] = item", "title": "" }, { "docid": "ed75fa92aa13d613faf268cbb5fed2c4", "score": "0.5607463", "text": "def updateItem(self, new_item_dict, quantity):\n self.tile_dict[\"params\"][\"item\"] = new_item_dict\n self.tile_dict[\"params\"][\"quantity\"] = quantity", "title": "" }, { "docid": "ff8bb17111ccf6abf162210403788f95", "score": "0.5570898", "text": "def moneyToInventory(self): \n if self.__selectedItem:\n ivItem = self.findIVItem(self.__selectedItem)\n ivItem.updateZOrder(40000)\n positionAnim = animation.ScreenPositionAnimation(ANIM_INVENTORY_TIME, ivItem, ivItem.getScreenPosition(), [565, 90+568], True)\n positionAnim.setOnStop(self.__isoviewRoom.getModel().removeItem, self.__selectedItem)\n if self.__player.tryToPocket(self.__selectedItem):\n ivItem.setAnimation(positionAnim)", "title": "" }, { "docid": "f4036c311c293548716a854989f2e634", "score": "0.55481803", "text": "def set_InvoiceItemID(self, value):\n super(UpdateInvoiceItemInputSet, self)._set_input('InvoiceItemID', value)", "title": "" }, { "docid": "4a51ddda0440e3813ca44573bfc115e4", "score": "0.5522824", "text": "def set_item_values(self, ):\n row = self.points_list.get_selected_row()\n if row == None:\n return\n self.money.set_value(row[5])\n self.instrument.set_value(row[6])\n self.step.set_value(row[4])\n self.point.set_value(row[3])", "title": "" }, { "docid": "445fa89f4ff926d312867ab478234a4f", "score": "0.55003667", "text": "def add_item_to_inventory(self, item):\n if self.size < self.max_size:\n self.inventory_dict[\"Content\"].append(item)\n else:\n print(\"!!!! Inventory is full !!!!\")", "title": "" }, { "docid": "1971839dc23ccc00c89f753ff667cddf", "score": "0.54858506", "text": "def decrement_item(self, item):\n\t\tself.inventory.decrement_item(item)", "title": "" }, { "docid": "343f53861eb2f8935e1132237909145a", "score": "0.5476301", "text": "def add_item(self, item: Item):\n self.items.append(item)", "title": "" }, { "docid": "a4357ea001188180e8f348ffb30865cb", "score": "0.54670244", "text": "def update(self, item):\n raise TypeError(\"FermiWord object does not support assignment\")", "title": "" }, { "docid": "52bd0cf94e9fa0475894eb565dbcbcd7", "score": "0.5466726", "text": "def add_item(self, item, count):\n self.inventory.add_item(item, count)", "title": "" }, { "docid": "74d1c79b6ac6a9fc1df3922647c647d9", "score": "0.5464281", "text": "def mark(self, item):\n self.send(\"%s|d\" % item)", "title": "" }, { "docid": "bd7ead082a544d200bf02a8bd1a4be4a", "score": "0.54433304", "text": "def update_inventory(self, order):\n order_quantity = order.product_details[\"quantity\"]\n inventory = self._inventory\n item = list(filter(lambda product: product.product_id == order.product_id, inventory))\n\n # if the item doesn't exist in inventory, try get it from factory\n if len(item) != 1:\n try:\n item = self.get_item_from_factory(order)\n self._inventory[item] = 100\n except InvalidDataError as e:\n order.is_valid = False\n order.set_error_msg(e.__str__())\n return\n else:\n item = item[0]\n\n # if the quantity of the item is less than the order\n if self._inventory[item] < order_quantity:\n self._inventory[item] += 100\n # subtract the inventory quantity by order quantity\n self._inventory[item] -= order_quantity", "title": "" }, { "docid": "b8fbde3715d829f7e2cd7f40205f9eeb", "score": "0.5424236", "text": "def __setitem__(self, key, value):\n if not isinstance(value, RegistryItem):\n raise ValueError(\"Registry item must have RegistryItem type\")\n\n self._items[key] = value", "title": "" }, { "docid": "4e640372e86cb39ccc4465eabe51738d", "score": "0.54192936", "text": "def item_shared(self, item):\n self.update_item(item)", "title": "" }, { "docid": "e57c16eb27098fda5a65953d35cdab94", "score": "0.5415449", "text": "def set_record(self, item, flags=0, value=None):\n self._set_item(item, value, flags)", "title": "" }, { "docid": "98d8a563e82911e3bebc2369d35a6192", "score": "0.5408604", "text": "def order_item_property(self, order_item_property):\n\n self._order_item_property = order_item_property", "title": "" }, { "docid": "e20954d9b750d3ad93502b07e1f33be9", "score": "0.54044497", "text": "def modify_item(self, *args):\n\n # resetar campos\n self.modification_dialog.show()", "title": "" }, { "docid": "7c6c75c625d2c2e92df664f13c6189d8", "score": "0.5398982", "text": "def setQuantity(self, newQuantity):\n self.myQuantity = newQuantity", "title": "" }, { "docid": "47700a0fbf8ab27e68b3b836780da67e", "score": "0.5390327", "text": "async def replace_item(self, item, *, item_uid):\n async with self._lock:\n return await self._replace_item(item, item_uid=item_uid)", "title": "" }, { "docid": "aeefc66cc109f8688370d9937b67ce4d", "score": "0.5382278", "text": "def _set_item(self, item, value, flags=None):\n item = item.lstrip('/')\n response = self._adapter.get(self._build_uri([item]))\n index = 0\n\n try:\n if isinstance(value, (str, unicode)):\n value = value.encode('utf-8')\n except NameError: #in Python3 str is unicode by default\n if isinstance(value, str):\n value = value.encode('utf-8')\n\n if response.status_code == 200:\n index = response.body.get('ModifyIndex')\n if response.body.get('Value') == value:\n return True\n query_params = {'index': index}\n if flags is not None:\n query_params['flags'] = flags\n response = self._adapter.put(self._build_uri([item], query_params),\n value)\n if not response.status_code == 200 or not response.body:\n raise KeyError(\n 'Error setting \"{0}\" ({1})'.format(item, response.status_code))", "title": "" }, { "docid": "4092a2eac5b817069739cd082c574de0", "score": "0.5376446", "text": "def setName(self, name):\n name = str(name)\n if self.__name != name:\n self.__name = name\n self.sigItemChanged.emit(items.ItemChangedType.NAME)", "title": "" }, { "docid": "7970b0b70818477796f2f1c33be4e053", "score": "0.53686327", "text": "def handle_order_item(self, orderitem):\r\n orderitem.name = unicode(self)\r\n orderitem.sku = getattr(self, 'sku', u'')", "title": "" }, { "docid": "a6357443d736317bab5a7389593c7b25", "score": "0.5365811", "text": "async def register_item(self, item: Item) -> None:\n self.items[item.identifier] = item\n\n await self.init_item(item)\n\n self.core.event_bus.broadcast(EVENT_ITEM_CREATED, item=item)\n LOGGER.debug(\"Item registered: %s\", item.unique_identifier)\n if item.status != ItemStatus.ONLINE:\n LOGGER.warning(\n \"Item could not be initialised: %s [%s]\",\n item.unique_identifier, item.type)\n self.core.event_bus.broadcast(EVENT_ITEM_NOT_WORKING, item=item)", "title": "" }, { "docid": "5630ebdedd51f91efad7577cf1240b85", "score": "0.53626955", "text": "def itemToClone(self):\n if self.__compruebaNumeroItemsInventario():\n clone = self.__selectedItem.getClone()\n self.__player.addInventory(clone)\n self.itemUnselected()", "title": "" }, { "docid": "e54a1797e82b273db689fb479a8fce07", "score": "0.5361187", "text": "def setArmor(self, arg):\n self._armor += arg", "title": "" }, { "docid": "ea7685495a32b9299b03929899048512", "score": "0.5361101", "text": "def unequip_item(author: User, item: str):\n found_item = Item.find_by_name_or_nick(item)\n if not found_item:\n return f'Error: {item} does not exist.'\n\n item_name = found_item.name\n equipment = author.all_armour\n\n if found_item not in author.equipment_slots:\n return f'You do not have {item_name} equipped.'\n\n slot = found_item.slot - 1\n slot_name = author.equipment_slot_strs[slot]\n curr_equip = author.equipment_slots[slot]\n\n # Set the equipment slot\n setattr(author, slot_name, None)\n author.update_inventory(curr_equip)\n author.save()\n return f'{found_item.name} unequipped from {SLOTS[str(slot+1)]}!'", "title": "" }, { "docid": "e370900b4ac24aec9e708ccf8ce00c6a", "score": "0.5354912", "text": "def put(self, item):\n self.q.append(item)\n self.persist()", "title": "" }, { "docid": "26d6dc1a808681534c28de0a476bd348", "score": "0.5349362", "text": "def equip_item(self, ite=None, slot=None):\n equipment = self.db.equipment\n if equipment[slot] is not None:\n self.msg(\"You must unequip %s before you may equip %s.\" % (equipment[slot].name, ite.name))\n return\n \n if ite is None:\n wep_equipped = 0\n armor_equipped = 0\n lring_equipped = 0\n rring_equipped = 0\n back_equipped = 0\n trinket_equipped = 0\n shield_equipped = 0\n\n for item in self.contents:\n if item.db.item_type is not None:\n if 'weapon' in item.db.slot and wep_equipped == 0:\n equipment['weapon'] = item\n wep_equipped = 1\n item.on_equip()\n elif 'armor' in item.db.slot and armor_equipped == 0:\n equipment['armor'] = item\n armor_equipped = 1\n item.on_equip()\n elif 'left finger' in item.db.slot and lring_equipped == 0:\n equipment['left finger'] = item\n lring_equipped = 1\n item.on_equip()\n elif 'right finger' in item.db.slot and rring_equipped == 0:\n equipment['right finger'] = item\n rring_equipped = 1\n item.on_equip()\n elif 'back' in item.db.slot and back_equipped == 0:\n equipment['back'] = item\n back_equipped = 1\n item.on_equip()\n elif 'trinket' in item.db.slot and trinket_equipped == 0:\n equipment['trinket'] = item\n trinket_equipped = 1\n item.on_equip()\n elif 'shield' in item.db.slot and shield_equipped == 0:\n equipment['shield'] = item\n shield_equipped = 1\n item.on_equip()\n\n if wep_equipped != 1:\n self.msg(\"You had no weapons to equip.\")\n else:\n self.db.equipment = equipment\n self.msg(\"You now wield %s in your main hand.\" % self.db.equipment['weapon'])\n\n if armor_equipped != 1:\n self.msg(\"You had no armor to equip\")\n else:\n self.db.equipment = equipment\n self.msg(\"You are now wearing %s for armor.\" % self.db.equipment['armor'])\n return\n \n if 'main_hand_weapon' in slot:\n equipment[slot] = ite\n self.db.equipment = equipment\n self.msg(\"You now wield %s in your main hand.\" % self.db.equipment['main_hand_weapon'])\n elif 'armor' in slot:\n equipment['armor'] = ite\n self.db.equipment = equipment\n self.msg(\"You are now wearing %s for armor.\" % self.db.equipment['armor'])\n elif 'left finger' in slot:\n equipment['left finger'] = ite\n self.db.equipment = equipment\n self.msg(\"You are now wearing %s on your left finger.\" % ite.name)\n elif 'right finger' in slot:\n equipment['right finger'] = ite\n self.db.equipment = equipment\n self.msg(\"You are now wearing %s on your right finger.\" % ite.name)\n elif 'back' in slot:\n equipment['back'] = ite\n self.db.euqipment = equipment\n self.msg(\"You are now wearing %s on your back.\" % ite.name)\n elif 'shield' in slot:\n equipment['shield'] = ite\n self.db.equipment = equipment\n self.msg(\"You are now using %s as a shield\" % ite.name)\n elif 'trinket' in slot:\n equipment['trinket'] = ite\n self.db.equipment = equipment\n self.msg(\"You are now using %s as your trinket.\" % ite.name)\n else:\n self.msg(\"{r%s is not equippable in any slot!{n\" % ite)", "title": "" }, { "docid": "60f519cca7429b4cfe65ba9da118b673", "score": "0.53489584", "text": "def quantity(self, quantity):\n\n self._quantity = quantity", "title": "" }, { "docid": "60f519cca7429b4cfe65ba9da118b673", "score": "0.53489584", "text": "def quantity(self, quantity):\n\n self._quantity = quantity", "title": "" }, { "docid": "60f519cca7429b4cfe65ba9da118b673", "score": "0.53489584", "text": "def quantity(self, quantity):\n\n self._quantity = quantity", "title": "" }, { "docid": "60f519cca7429b4cfe65ba9da118b673", "score": "0.53489584", "text": "def quantity(self, quantity):\n\n self._quantity = quantity", "title": "" }, { "docid": "60f519cca7429b4cfe65ba9da118b673", "score": "0.53489584", "text": "def quantity(self, quantity):\n\n self._quantity = quantity", "title": "" }, { "docid": "02308efc66713f03e56b1cd42fe8207d", "score": "0.53276545", "text": "def addItem(self, item):\n self.items.append(item)", "title": "" }, { "docid": "eee9a3ad12a95eecbf582803f4109edd", "score": "0.53140914", "text": "def setitem(self, key, value):\n with self.lock:\n self.tbl[key] = value", "title": "" }, { "docid": "51a85cad422050fb2e00e4d015d9b02c", "score": "0.53083277", "text": "def trackItem(self, name, getFunc=None, setFunc=None):\n if name in self._itemDict:\n raise RuntimeError(\"An item named %r is already being tracked\" % (name,))\n\n self._itemDict[name] = _ItemState(\n getFunc = getFunc,\n setFunc = setFunc,\n )", "title": "" }, { "docid": "4ad0d5731764774d26f64ae8857b4d66", "score": "0.5298485", "text": "def add_item(self, item):\n\n self.current.add_item(item)", "title": "" }, { "docid": "abbf8b40c2391a003bdc2a74fd1d9003", "score": "0.528462", "text": "def qty(self, qty):\n\n self._qty = qty", "title": "" }, { "docid": "abbf8b40c2391a003bdc2a74fd1d9003", "score": "0.528462", "text": "def qty(self, qty):\n\n self._qty = qty", "title": "" }, { "docid": "abbf8b40c2391a003bdc2a74fd1d9003", "score": "0.528462", "text": "def qty(self, qty):\n\n self._qty = qty", "title": "" }, { "docid": "36256830bbfa970f01cb70f4964318c9", "score": "0.5284429", "text": "def equip_item(author: User, item: str):\n found_item = Item.find_by_name_or_nick(item)\n if found_item is None:\n return f'Error: {item} does not exist.'\n\n item_level = found_item.level\n user_cb_level = author.combat_level\n\n # Error checking/verification\n if user_cb_level < item_level:\n return f'Error: Insufficient level to equip item ({found_item.level}). \\\n Your current combat level is {user_cb_level}.'\n\n if not author.has_item_by_item(found_item):\n return f'Error: {found_item.name} not in inventory.'\n\n if not found_item.is_equippable:\n return f'Error: {item} cannot be equipped.'\n\n if found_item.is_max_only and not author.is_maxed:\n return f\"You cannot equip this item since you do not have {author.max_possible_level} skill total.\"\n\n slot = found_item.slot - 1 # I blame coiz for starting this at slot 1 :ANGERY:\n curr_equip = author.equipment_slots[slot]\n\n # if found_item == curr_equip:\n # return f\"You already have {found_item.name} equipped!\"\n\n item_name = found_item.name\n slot_name = author.equipment_slot_strs[slot]\n\n # Set the equipment slot\n setattr(author, slot_name, found_item)\n\n # Update the inventories\n author.update_inventory(curr_equip)\n author.update_inventory(found_item, remove=True)\n\n author.save()\n return f'{item_name} equipped to {SLOTS[str(slot+1)]}!'", "title": "" }, { "docid": "e618e897434c000c609d92a09c71487d", "score": "0.52832824", "text": "def add_item(self, item_name):\r\n self.items.append(QuestItem(item_name))", "title": "" }, { "docid": "f9b12c8aa99dd23ac12c27e322cb2326", "score": "0.52719384", "text": "def setEquipArmor(self, arg):\n if arg == \"helmet\":\n self._head = True\n elif arg == \"chestplate\":\n self._chest = True", "title": "" }, { "docid": "57d5cbc409ad962e8c8cd639c78f32ce", "score": "0.52688307", "text": "def update(self, request, pk=None):\n item = self.get_object(pk)\n serializer = InventorySerializer(item, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "4aa913835fc71b8f14f3c28ca005d7dd", "score": "0.52670723", "text": "def set_item(self, request):\n storage = self._get_storage(request)\n if storage.use_quota:\n left = self._check_quota(request)\n else:\n left = 0.\n\n user_id = request.user['userid']\n collection_name = request.sync_info['collection']\n item_id = request.sync_info['item']\n\n if self._was_modified(request, user_id, collection_name):\n raise HTTPPreconditionFailed(collection_name)\n\n try:\n data = json.loads(request.body)\n except ValueError:\n raise HTTPJsonBadRequest(WEAVE_MALFORMED_JSON)\n\n try:\n wbo = WBO(data)\n except ValueError:\n raise HTTPJsonBadRequest(WEAVE_INVALID_WBO)\n\n consistent, msg = wbo.validate()\n if not consistent:\n raise HTTPJsonBadRequest(WEAVE_INVALID_WBO)\n\n if self._has_modifiers(wbo):\n wbo['modified'] = request.server_time\n\n try:\n res = storage.set_item(user_id, collection_name, item_id,\n storage_time=request.server_time, **wbo)\n except StorageConflictError:\n raise HTTPJsonBadRequest(WEAVE_INVALID_WRITE)\n response = json_response(res)\n if storage.use_quota and left <= _ONE_MEG:\n response.headers['X-Weave-Quota-Remaining'] = str(left)\n return response", "title": "" } ]
2b2c2dadef91f92a188bf766ad37a1cd
Constructor @ In, kwargs, dict, arguments @ Out, None
[ { "docid": "1872d722f5cdcfd96c697a5e0dfc3b98", "score": "0.0", "text": "def __init__(self, **kwargs):\n Interaction.__init__(self, **kwargs)\n self._demands = None # the resource demanded by this interaction\n self._penalty = None # how to penalize for not meeting demand NOT IMPLEMENTED", "title": "" } ]
[ { "docid": "6c0e35b9d4d7795af375ec34db478ceb", "score": "0.828617", "text": "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs", "title": "" }, { "docid": "cce888fc35e8c8577470cab46fb38476", "score": "0.81837237", "text": "def __init__(self, *args, **kwargs):\n self._args = args\n self._kwargs = kwargs", "title": "" }, { "docid": "51d59c505c7afeec4717db7fcfe666a0", "score": "0.80491203", "text": "def __init__(self, *args, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "78f84362879746937d22ba2094764ecc", "score": "0.80053276", "text": "def __init__(self, *args, **kwargs) -> None:\n pass", "title": "" }, { "docid": "78f84362879746937d22ba2094764ecc", "score": "0.80053276", "text": "def __init__(self, *args, **kwargs) -> None:\n pass", "title": "" }, { "docid": "772a4b7a301451ce74043d69d8161e29", "score": "0.79982793", "text": "def __init__(self, **kwargs):\n raise NotImplementedError", "title": "" }, { "docid": "72e9682e82a5434c494485c8e5ff9954", "score": "0.7953157", "text": "def __init__(self, **kwargs: Any):", "title": "" }, { "docid": "6616a9ea0c920d6f57e7fb3af6c1c603", "score": "0.7861825", "text": "def __init__(self, *args, **kwargs):", "title": "" }, { "docid": "6616a9ea0c920d6f57e7fb3af6c1c603", "score": "0.7861825", "text": "def __init__(self, *args, **kwargs):", "title": "" }, { "docid": "6616a9ea0c920d6f57e7fb3af6c1c603", "score": "0.7861825", "text": "def __init__(self, *args, **kwargs):", "title": "" }, { "docid": "6616a9ea0c920d6f57e7fb3af6c1c603", "score": "0.7861825", "text": "def __init__(self, *args, **kwargs):", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.7818728", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.7818728", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "f35eaaf5233469ad3bbcb225c0e8394e", "score": "0.7818728", "text": "def __init__(self, *args, **kwargs): # real signature unknown\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.7816201", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.7816201", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.7816201", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "9a91c854034ce117248e93a1b69c6b5a", "score": "0.7816201", "text": "def __init__(self, *args, **kwargs):\n pass", "title": "" }, { "docid": "3236e12a21c98d02fca4d39466ec4450", "score": "0.78023845", "text": "def __init__(self, **kwargs):\n pass", "title": "" }, { "docid": "3236e12a21c98d02fca4d39466ec4450", "score": "0.78023845", "text": "def __init__(self, **kwargs):\n pass", "title": "" }, { "docid": "3236e12a21c98d02fca4d39466ec4450", "score": "0.78023845", "text": "def __init__(self, **kwargs):\n pass", "title": "" }, { "docid": "0e1f29c3cbff1f6c037880e23d92365e", "score": "0.778563", "text": "def __init__(self, *args):\n\n self.args = args", "title": "" }, { "docid": "0e1f29c3cbff1f6c037880e23d92365e", "score": "0.778563", "text": "def __init__(self, *args):\n\n self.args = args", "title": "" }, { "docid": "f9b3a454e3f548ce51d3ef7ca10b01d4", "score": "0.7753674", "text": "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__args = args\n self.__kwargs = kwargs", "title": "" }, { "docid": "82d96a5413652391673c287d3fc7c0bd", "score": "0.7743761", "text": "def __init__ (self, *args, **kw):\n self.__args = args\n self.__kw = kw", "title": "" }, { "docid": "53a223614a2500c4abfa622b57d4b304", "score": "0.7732608", "text": "def __init__(self, **kwds):\n raise NotImplementedError", "title": "" }, { "docid": "a131d3fdbcd0766862d2df95d0eb5ad2", "score": "0.77253556", "text": "def __init__(self, *args, **kwargs):\n raise NotImplementedError()", "title": "" }, { "docid": "1bfe1a18a861ccf7851fdd0d8d5233c6", "score": "0.7723418", "text": "def __init__(self, **kwargs):\n self.__kwargs = kwargs", "title": "" }, { "docid": "ee9552bfd3782f09f834e4e52af7df2a", "score": "0.7708933", "text": "def __init__(self, args, kwargs):\n self._args_dec = list(args)\n self._kwargs_dec = dict(kwargs)", "title": "" }, { "docid": "fe659b36b726eec4ff251e824c3d738b", "score": "0.76775396", "text": "def __init__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "51f8041fd7ca8813354a0665b497ebb6", "score": "0.7650803", "text": "def __init__(self, **kwargs):\n pass", "title": "" }, { "docid": "e4d53ac54113ca0b55f6bdc3a63a5939", "score": "0.7575041", "text": "def __init__(self, args):\n self.args = args", "title": "" }, { "docid": "b1ecc1b67bdf805f136980011c8b3be0", "score": "0.75078857", "text": "def initialize(self, *args, **kwargs):", "title": "" }, { "docid": "35e2003cfb1b9cb0bc3518a50b295473", "score": "0.7504747", "text": "def __init__(self, args=False):\n self.args = args", "title": "" }, { "docid": "b5d6bb297003ab4e1bdadae4408ab941", "score": "0.7472234", "text": "def __init__(self, **parameters):\n self.parameters = parameters", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "bac010da6ac62a4075447ab4dface9d0", "score": "0.7434349", "text": "def __init__(self, *args):\r\n pass", "title": "" }, { "docid": "af97b47ae486a851df9081f4a3f46556", "score": "0.7428293", "text": "def init(self, *args, **kwds):\n pass", "title": "" }, { "docid": "a38de29914d3bfee95899aae6bb0e044", "score": "0.74268794", "text": "def __init__(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "a38de29914d3bfee95899aae6bb0e044", "score": "0.74268794", "text": "def __init__(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "a38de29914d3bfee95899aae6bb0e044", "score": "0.74268794", "text": "def __init__(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "e0f153ca2878fdbc1faf3c17dac3eb81", "score": "0.7417475", "text": "def __init__(self, *args):\n pass", "title": "" }, { "docid": "2f62fcb729df3cfe4ca82943d8751d02", "score": "0.7400888", "text": "def __init__(self,*args):\n pass", "title": "" }, { "docid": "f0ee1e2872b545b6aee8bd5ff0df4fbe", "score": "0.7364972", "text": "def __init__(self, function, **kwargs):\n self.function = function\n self.kwargs = kwargs", "title": "" }, { "docid": "cb13f5027d3b7cda39a28810ae71eafd", "score": "0.7360314", "text": "def initialize(self, **kwargs):", "title": "" }, { "docid": "78f531ebc30e8a1e04c7b1e6d0b68e16", "score": "0.73366845", "text": "def __init__(**params):", "title": "" }, { "docid": "75f9564a4ce149d04723f0d1e5fd8987", "score": "0.7335178", "text": "def __init__(self):\n\n self.arg = None\n self.output = None", "title": "" }, { "docid": "731e3b9de2eccc526dd0d272b67febbb", "score": "0.73084694", "text": "def __init__(self, *args, **kwargs):\n\n self.logger = util.get_logger()\n self.args = args\n self.kwargs = kwargs\n for key, value in kwargs.items():\n setattr(self, key, value)", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "4c5a57dc3f3a235c460a51048d831b50", "score": "0.7293932", "text": "def __init__(*args, **kwargs):\n \n pass", "title": "" }, { "docid": "cd2728ff7e049e94bd83693a0d4e6bbf", "score": "0.7282075", "text": "def __init__(self, args):\n super().__init__()\n self.args = args", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" }, { "docid": "48a79abf12bbbae8375d9c714bfda5ca", "score": "0.72550803", "text": "def __init__(self,*args):\r\n pass", "title": "" } ]
af8d11fe0f568e83bd7293056c820f72
Function computes a 2D distance field Distance at member of entity_queue is zero Shortest paths avoid obstacles and use distance_type distances
[ { "docid": "04b882c9bfb1c8ba1a73ead8f86fe83c", "score": "0.6837322", "text": "def compute_distance_field(self, entity_type):\n visited = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]\n distance_field = [[self._grid_width * self._grid_height for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]\n boundary = poc_queue.Queue()\n if entity_type == ZOMBIE:\n for zombie in self._zombie_list:\n boundary.enqueue(zombie)\n visited[zombie[0]][zombie[1]] = FULL\n distance_field[zombie[0]][zombie[1]] = 0\n else:\n for human in self._human_list:\n boundary.enqueue(human)\n visited[human[0]][human[1]] = FULL\n distance_field[human[0]][human[1]] = 0\n\n while len(boundary) > 0:\n current_cell = boundary.dequeue()\n neighbours = self.four_neighbors(current_cell[0], current_cell[1])\n for neighbour in neighbours:\n if visited[neighbour[0]][neighbour[1]] is EMPTY and self.is_empty(neighbour[0], neighbour[1]):\n visited[neighbour[0]][neighbour[1]] = FULL\n boundary.enqueue(neighbour)\n distance_field[neighbour[0]][neighbour[1]] = distance_field[current_cell[0]][current_cell[1]] + 1\n\n return distance_field", "title": "" } ]
[ { "docid": "15e8aa11b62ca2778ac69d66394d1324", "score": "0.72263473", "text": "def compute_distance_field(self, entity_type):\n grid_height = self.get_grid_height()\n grid_width = self.get_grid_width()\n #Create a 2D list distance_field of the same size as the grid and initialize each of its entries to be the product of the height times the width of the grid\n distance_field = [[grid_height*grid_width \\\n for dummy_col in range(grid_width)] \\\n for dummy_row in range(grid_height)]\n #obstacle grid, initialize its cells to be empty\n visited = poc_grid.Grid(grid_height, grid_width)\n boundary = poc_queue.Queue() \n if entity_type == ZOMBIE:\n for zom in self.zombies():\n boundary.enqueue(zom)\n elif entity_type == HUMAN:\n for hum in self.humans():\n boundary.enqueue(hum)\n for grid in boundary:\n visited.set_full(grid[0], grid[1]) # initialize visited to be FULL \n distance_field[grid[0]][grid[1]] = 0 # initialize distance_field to be zero\n while len(boundary) > 0:\n #while boundary.__len__()>0:\n cell = boundary.dequeue()\n neighbors = visited.four_neighbors(cell[0], cell[1])\n for neighbor in neighbors:\n if (visited.is_empty(neighbor[0], neighbor[1]) )and (self.is_empty(neighbor[0], neighbor[1])):\n visited.set_full(neighbor[0], neighbor[1])\n boundary.enqueue(neighbor)\n distance_field[neighbor[0]][neighbor[1]] = min(distance_field[neighbor[0]][neighbor[1]], distance_field[cell[0]][cell[1]]+1)\n return distance_field", "title": "" }, { "docid": "34b76c82560ea15079c3bc27bce4a7cc", "score": "0.6988219", "text": "def compute_distance_field(self, entity_type):\n # Create a new grid visited of the same size as the original grid \n # and initialize its cells to be empty.\n visited = poc_grid.Grid(self._grid_height, self._grid_width)\n for obstacle in self._obstacle_list:\n visited.set_full(obstacle[0], obstacle[1])\n \n # Create a 2D list of the same size as the original grid \n # and initialize each of its entries to be the product of the height times the width of the grid.\n distance_field = [[self._grid_height * self._grid_width for dummy_c in range(self._grid_width)]\n for dummy_r in range(self._grid_height)]\n \n # Create a queue boundary that is a copy of either the zombie list or the human list. \n boundary = poc_queue.Queue()\n if entity_type == ZOMBIE:\n list_type = self._zombie_list\n if entity_type == HUMAN:\n list_type = self._human_list\n \n # For cells in the queue, initialize visited to be FULL and distance_field to be zero.\n for item in list_type:\n boundary.enqueue(item)\n visited.set_full(item[0], item[1])\n distance_field[item[0]][item[1]] = 0\n \n # Implement a modified version of the BFS search\n while boundary:\n cell = boundary.dequeue()\n neighbors = visited.four_neighbors(cell[0], cell[1])\n for resident in neighbors:\n if visited.is_empty(resident[0], resident[1]):\n distance_field[resident[0]][resident[1]] = min(distance_field[resident[0]][resident[1]],\n distance_field[cell[0]][cell[1]] + 1)\n visited.set_full(resident[0], resident[1])\n boundary.enqueue(resident) \n\n return distance_field", "title": "" }, { "docid": "711ae458d6170ced8b9349a0d8eb3166", "score": "0.69454354", "text": "def compute_distance_field(self, entity_type):\n visited = poc_grid.Grid(self._grid_height, self._grid_width)\n distance_field = [[self._grid_height * self._grid_width \\\n for dummy_col in range(self._grid_width)]\\\n for dummy_row in range(self._grid_height)]\n\n boundary = poc_queue.Queue()\n\n\n cast = self._zombie_list if entity_type == ZOMBIE else self._human_list\n\n for member in cast:\n boundary.enqueue(member)\n visited.set_full(member[0],member[1])\n distance_field[member[0]][member[1]] = 0\n\n # if entity_type == ZOMBIE:\n # for zombie in self._zombie_list:\n # boundary.enqueue(zombie)\n\n # elif entity_type == HUMAN:\n # for human in self._human_list:\n # boundary.enqueue(human)\n\n # for side in boundary.__iter__():\n # visited.set_full(side[0], side[1])\n # distance_field[side[0]][side[1]] = 0\n\n while bool(boundary):\n current_cell = boundary.dequeue()\n\n if entity_type == ZOMBIE:\n neighbors = visited.four_neighbors(current_cell[0], current_cell[1])\n\n else:\n neighbors = visited.eight_neighbors(current_cell[0], current_cell[1])\n\n for neighbor in neighbors:\n\n if self.is_empty(neighbor[0],neighbor[1]) and visited.is_empty(neighbor[0], neighbor[1]):\n distance_field[neighbor[0]][neighbor[1]] = distance_field[current_cell[0]][current_cell[1]] + 1\n \n visited.set_full(neighbor[0], neighbor[1])\n boundary.enqueue(neighbor)\n\n return distance_field", "title": "" }, { "docid": "ef5699b99595037d807993564db9450e", "score": "0.690007", "text": "def compute_distance_field(self, entity_type):\r\n four_neighbors = poc_grid.Grid.four_neighbors\r\n if entity_type == HUMAN:\r\n entity_type = self._human_list\r\n if entity_type == ZOMBIE:\r\n entity_type = self._zombie_list\r\n \r\n visited = [[EMPTY for row in range(self.grid_width)] \\\r\n for col in range(self.grid_height)]\r\n \r\n distance_field = [[self.grid_width * self.grid_height \\\r\n for row in range(self.grid_width)] \\\r\n for col in range(self.grid_height)]\r\n \r\n if self.obstacle_list != None:\r\n for row,col in self.obstacle_list:\r\n visited[row][col] = FULL\r\n \r\n \r\n boundary = poc_queue.Queue()\r\n for entity in entity_type:\r\n boundary.enqueue(entity)\r\n row,col = entity\r\n visited[row][col] = FULL\r\n distance_field[row][col] = 0\r\n \r\n while len(boundary) != 0:\r\n current_cell = boundary.dequeue()\r\n for row,col in four_neighbors(self,*current_cell):\r\n if visited[row][col] == EMPTY:\r\n distance_field[row][col] = distance_field[current_cell[0]][current_cell[1]] + 1\r\n visited[row][col] = FULL\r\n boundary.enqueue((row,col))\r\n \r\n return distance_field", "title": "" }, { "docid": "56d6f4fdb0bdcf7d0b7167dca28bf9a4", "score": "0.6853294", "text": "def compute_distance_field(self, entity_type):\n grid_height = self.get_grid_height()\n grid_width = self.get_grid_width()\n\n self._visited = poc_grid.Grid(grid_height, grid_width)\n self._distance_field = [[grid_height * grid_width for dummy_col in range(grid_width)]\n for dummy_row in range(grid_height)]\n self._boundary = poc_queue.Queue()\n\n if entity_type is ZOMBIE:\n for zombie in self.zombies():\n self._boundary.enqueue(zombie)\n if entity_type is HUMAN:\n for human in self.humans():\n self._boundary.enqueue(human)\n\n for entity in self._boundary:\n self._visited.set_full(entity[0], entity[1])\n self._distance_field[entity[0]][entity[1]] = 0\n\n while len(self._boundary) > 0:\n cell = self._boundary.dequeue()\n distance = self._distance_field[cell[0]][cell[1]]\n neighbours = self._visited.four_neighbors(cell[0], cell[1])\n\n for neighbour in neighbours:\n row, col = neighbour[0], neighbour[1]\n if self._visited.is_empty(row, col) and self.is_empty(row, col):\n self._distance_field[row][col] = min(self._distance_field[row][col], distance + 1)\n self._visited.set_full(row, col)\n self._boundary.enqueue(neighbour)\n return self._distance_field", "title": "" }, { "docid": "e06f1d72da04893254bdb6b8c5570269", "score": "0.67656726", "text": "def compute_distance_field(self, entity_type):\n # visited is used to check whether the grid has been searched as a neighbor\n visited = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]\n distance_field = [[self._grid_height * self._grid_width for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]\n \n # boundry is a waiting list for grids to be checked\n # create a copy of either the zombie or the human list \n boundary = Queue() \n if entity_type == ZOMBIE: \n for item in self._zombie_list:\n boundary.enqueue(item)\n else:\n for item in self._human_list:\n boundary.enqueue(item)\n \n # set visited to be FULL and distance_field to be zero\n for item in boundary:\n visited[item[0]][item[1]] = FULL\n distance_field[item[0]][item[1]] = 0\n \n # BFS search\n while len(boundary) != 0:\n current_cell = boundary.dequeue()\n neighbors = self.four_neighbors(current_cell[0], current_cell[1])\n for neighbor_cell in neighbors:\n # if this grid has not been searched and is not a obstacle\n if visited[neighbor_cell[0]][neighbor_cell[1]] == EMPTY and self.is_empty(neighbor_cell[0], neighbor_cell[1]):\n visited[neighbor_cell[0]][neighbor_cell[1]] = FULL\n boundary.enqueue(neighbor_cell)\n distance_field[neighbor_cell[0]][neighbor_cell[1]] = distance_field[current_cell[0]][current_cell[1]] + 1\n \n return distance_field", "title": "" }, { "docid": "33c9f6ba4589bbdcbd9e395520ead64d", "score": "0.67325926", "text": "def compute_distance_field(self, entity_type):\r\n visited = grid.Grid(self.get_grid_height(), self.get_grid_width())\r\n distance_field = [[(self.get_grid_height() * self.get_grid_width()) \r\n for dummy_col in range(self.get_grid_width())] \r\n for dummy_row in range(self.get_grid_height())]\r\n \r\n boundary = queue.Queue()\r\n \r\n if entity_type == HUMAN:\r\n for human in self.humans():\r\n boundary.enqueue(human)\r\n else:\r\n for zombie in self.zombies():\r\n boundary.enqueue(zombie)\r\n \r\n for entity in boundary:\r\n visited.set_full(entity[0], entity[1])\r\n distance_field[entity[0]][entity[1]] = 0 \r\n \r\n # Breadth First Search:\r\n while boundary:\r\n curr_cell = boundary.dequeue() # tuple of the form (row, col)\r\n neighbors = self.four_neighbors(curr_cell[0], curr_cell[1])\r\n for nbr in neighbors:\r\n if visited.is_empty(nbr[0], nbr[1]) and self.is_empty(nbr[0], nbr[1]):\r\n visited.set_full(nbr[0], nbr[1])\r\n boundary.enqueue(nbr)\r\n curr_dist = distance_field[nbr[0]][nbr[1]]\r\n distance_field[nbr[0]][nbr[1]] = min(curr_dist, \\\r\n distance_field[curr_cell[0]][curr_cell[1]] + 1)\r\n return distance_field", "title": "" }, { "docid": "673d060ce379339b00c0022d87761f1f", "score": "0.6528374", "text": "def build_distance_field(self, target, blockers=[], expansion=0):\n\n bin_size = 20\n\n obstacles = {} # (i,j) -> bool \n\n # paint no-obstacles over the map\n for i in range(self.width/bin_size):\n for j in range(self.height/bin_size):\n obstacles[(i,j)] = False\n\n # rasterize collision space of each object\n for obj in blockers:\n i_lo = int((obj.position[0] - obj.radius)/bin_size - 1)\n i_hi = int((obj.position[0] + obj.radius)/bin_size + 1)\n j_lo = int((obj.position[1] - obj.radius)/bin_size - 1)\n j_hi = int((obj.position[1] + obj.radius)/bin_size + 1)\n for i in range(i_lo, i_hi+1):\n for j in range(j_lo, j_hi+1):\n x, y = i*bin_size, j*bin_size\n dx = obj.position[0]-x\n dy = obj.position[1]-y\n dist = math.sqrt(dx*dx+dy*dy)\n if dist < obj.radius + expansion:\n obstacles[(i,j)] = True\n\n # dijkstra's algorithm to build distance map\n dist = {}\n start = (int(target[0]/bin_size), int(target[1]/bin_size))\n dist[start] = 0\n queue = [(0,start)]\n while queue:\n d, c = heapq.heappop(queue)\n for di, dj in [(-1,0),(1,0),(0,-1),(0,1)]:\n next_c = (c[0] + di, c[1] + dj)\n if next_c in obstacles:\n\n if not obstacles[next_c]:\n cost = 1\n else:\n cost = 1e6\n next_d = d + cost\n if next_c not in dist or next_d < dist[next_c]:\n dist[next_c] = d\n heapq.heappush(queue, (next_d, next_c))\n\n def lookup(position): # bilinear interpolation\n x,y = position\n alpha = float(x % bin_size)/bin_size\n beta = float(y % bin_size)/bin_size\n i, j = int(x / bin_size), int(y / bin_size)\n dx = x - self.width/2\n dy = y - self.height/2\n default = 2*math.sqrt(dx*dx+dy*dy)\n a = dist.get((i,j),default)\n b = dist.get((i+1,j),default)\n c = dist.get((i,j+1),default)\n d = dist.get((i+1,j+1),default)\n ab = (1-alpha)*a + alpha*b\n cd = (1-alpha)*c + alpha*d\n abcd = (1-beta)*ab + beta*cd\n return abcd\n\n return lookup", "title": "" }, { "docid": "13eb034fe7c89fc1c55dd491e2107106", "score": "0.578191", "text": "def distance(self, threads=2):\r\n input = self.copy(depth=8)\r\n\r\n # Temporary storage for G lattice\r\n g = self.__class__(\r\n self.width, self.height,\r\n channels=1, depth=32\r\n )\r\n ibounds = [int(t/float(threads)*self.width) for t in range(threads)]\r\n ibounds = zip(ibounds, ibounds[1:] + [self.width])\r\n\r\n args1 = [(i[0], i[1], self.width, self.height, input.pixels, g.pixels)\r\n for i in ibounds]\r\n\r\n multithread(libfab.distance_transform1, args1)\r\n\r\n del input\r\n\r\n output = self.copy(depth='f')\r\n\r\n jbounds = [int(t/float(threads)*self.height) for t in range(threads)]\r\n jbounds = zip(jbounds, jbounds[1:] + [self.height])\r\n\r\n args2 = [(j[0], j[1], self.width, self.pixels_per_mm,\r\n g.pixels, output.pixels) for j in jbounds]\r\n\r\n multithread(libfab.distance_transform2, args2)\r\n\r\n output.zmin = output.zmax = None\r\n\r\n return output", "title": "" }, { "docid": "8246cb76655793f19802c2c19934a19c", "score": "0.5745888", "text": "def get_drive_distance(row):\n \n apiKey = self.apiKey\n \n from_geopoint = row[\"From_Latitude\"] + ',' + row[\"From_Longitude\"]\n \n to_geopoint = row[\"To_Latitude\"] + ',' + row[\"To_Longitude\"]\n \n origin = self.filtered_data[(self.filtered_data[self.lat_lon_column] == from_geopoint)\n ].index.tolist()\n \n try:\n origin = origin[0]\n except:\n print(f'exception at origin point {from_lat} , {from_lon}')\n print(origin)\n return 0\n \n destination = self.filtered_data[(self.filtered_data[self.lat_lon_column]==to_geopoint)\n ].index.tolist()\n \n try:\n destination = destination[0]\n except:\n print(f'exception at destination point {from_lat} , {from_lon}')\n print(destination)\n return 0\n \n \n \n if origin == destination:\n return 0\n \n \n import requests\n url = ('https://maps.googleapis.com/maps/api/distancematrix/json?units=imperial&origins={}&destinations={}&key={}'\n .format(origin,\n destination,\n apiKey\n )\n )\n try:\n response = requests.get(url)\n resp_json_payload = response.json()\n drive_distance = resp_json_payload['rows'][0]['elements'][0]['distance']['value']*0.000621371\n except:\n print('ERROR: {}, {}'.format(origin, destination))\n drive_distance = 99999999999\n \n# print(f'{origin} to {destination} : {drive_distance}')\n return drive_distance", "title": "" }, { "docid": "ef0aad7914206f6b6f52c6bc87fe131f", "score": "0.57031024", "text": "def distance_heuristic(self) -> Integral:\n current_board = self.board\n total_distance = 0\n\n for row in range(int(self.size_y)):\n for col in range(int(self.size_x)):\n current_num = current_board[col][row]\n\n correct_x = (current_num - 1) % self.size_x if current_num != 0 else self.size_x - 1\n correct_y = (current_num - 1) // self.size_x if current_num != 0 else self.size_y - 1\n\n total_distance += abs(col - correct_x) + abs(row - correct_y)\n\n return total_distance", "title": "" }, { "docid": "4afd18271186ccf0d784af4db97b5049", "score": "0.5699638", "text": "def update_distances(self, node, board, half_move, depth):\n if depth < 0:\n legal_moves = board.legal_moves.count()\n return (0, np.full((DROP_PARAM_STEPS_NUMBER), legal_moves, dtype=int))\n\n\n legal_moves = np.full((DROP_PARAM_STEPS_NUMBER), 0, dtype=int)\n v_legal_moves = {}\n if depth == 0:\n # For legal moves we choose one random and update all according to it to save time (it's estimation)\n if len(node.variations) > 0:\n index = np.random.randint(0,len(node.variations))\n board.push(node.variations[index].move)\n _,rand_legal_moves= self.update_distances(node.variations[index], board, half_move + 1, depth - 1)\n board.pop()\n\n min_dist = 10000.0\n for v in node.variations:\n if depth != 0:\n board.push(v.move)\n v_distance, v_legal_moves[v] = self.update_distances(v, board, half_move + 1, depth - 1)\n board.pop()\n else:\n v_distance=0\n v_legal_moves[v]=rand_legal_moves\n\n # take care of the distance\n v_distance += self.moves_data[half_move].get_distance(self.get_move_san(v, board))\n v.comment = f\"{v_distance : .2f}\"\n min_dist = min(min_dist, v_distance)\n\n for v in node.variations:\n # take care of legal moves\n index = get_param_index_for_threshold(float(v.comment) - min_dist, depth)\n legal_moves[0:index - 1] = np.add(legal_moves[0:index - 1], v_legal_moves[v][0:index - 1])\n\n # remove all those not passing initial maximal threshold\n node.variations = [v for v in node.variations\n if float(v.comment) < min_dist + get_drop_threshold(DROP_PARAM_START, depth)]\n\n node.variations.sort(key=lambda v: float(v.comment))\n return (min_dist, legal_moves)", "title": "" }, { "docid": "925265ebb2207e5b0247a5a562da98bb", "score": "0.5680914", "text": "def distance(grid_sq_x, grid_sq_y, x, y, client, search_radius, grid_centre, distance_metric):\n if type(grid_sq_y) is not tuple:\n if x > y: # Only want to fill lower diagonal between demand points\n return -1\n elif not grid_sq_x.on_land[\"square\"] or not grid_sq_y.on_land[\"square\"]: # Box not on land\n return -1\n elif x == y: # Same boxes\n return 0\n else:\n if not grid_sq_x.on_land[\"square\"]:\n return -1\n try: # If centre not on land, use a corner that is on land\n coordinates, radii = coordinates_and_radii(grid_sq_x, grid_sq_y, grid_centre, search_radius)\n # Sometimes you get same corners for different (adjacent) boxes (happens if centre is not on land).\n # These are adjacent edge boxes, whose centre is not on land and for both a corner is used when\n # calculating distances. Moreover, for at least one of them (at least) one corner is also not on land.\n # In these cases both boxes end up using the same coordinate (i.e. corner) to calculate distances and\n # the distance between the two is zero (actually openrouteservice returns a \"distance\" error) and in\n # addition, the distance between the boxes and all other boxes is the same for both boxes. This essentially\n # implies that as far as the algorithm is concerned, the two boxes are merged in one. However, since one of\n # the boxes has both each centre and at least a corner off-land, probably implies that most of its (little)\n # land is very close to the other box.\n if (round(coordinates[0][0], 6), round(coordinates[0][1], 6)) \\\n == (round(coordinates[1][0], 6), round(coordinates[1][1], 6)):\n return 0\n routes = directions(client, coordinates, radiuses=radii, geometry=False)\n # Choose either duration or distance\n return float(routes[\"routes\"][0][\"summary\"][distance_metric])\n except Exception as e: # Failed to get distance\n print(f\"Failed to retrieve distance from ORS server between coordinates: {coordinates}.\")\n print(\"Waiting 5 seconds.\")\n sleep(5)\n try:\n routes = directions(client, coordinates, radiuses=radii, geometry=False)\n print(\"Success\")\n return float(routes[\"routes\"][0][\"summary\"][distance_metric])\n except:\n try:\n print(f\"Failed to retrieve distance from ORS server between coordinates: {coordinates}.\")\n print(\"Waiting 10 seconds.\")\n sleep(10)\n routes = directions(client, coordinates, radiuses=radii, geometry=False)\n print(\"Success\")\n return float(routes[\"routes\"][0][\"summary\"][distance_metric])\n except:\n print(f\"Failed to retrieve distance from ORS server between coordinates: {coordinates}. Can't create distance matrix. Aborting.Are all needed maps loaded?\")\n raise", "title": "" }, { "docid": "ebfb20a0a8dded47fe26a26fa4c040d2", "score": "0.5632835", "text": "def adj_distance(cell_1, cell_2):\n graph_dist = np.sum(np.array(cell_1.get_matrix()) != np.array(cell_2.get_matrix()))\n ops_dist = np.sum(np.array(cell_1.get_ops()) != np.array(cell_2.get_ops()))\n return graph_dist + ops_dist", "title": "" }, { "docid": "4f467ce24862481d3dfb85dfcb0d4283", "score": "0.5522272", "text": "def distance(self):\n costs = self.calculate_costs()\n print(cols[:self.N])\n #M1 = nx.to_numpy_matrix(self.g1)\n #M2 = nx.to_numpy_matrix(self.g2)\n \n self.Mindices = cols[:self.N]\n return np.sum(costs)", "title": "" }, { "docid": "c5fc9fc03577fe796e1797e4577a4e43", "score": "0.5521026", "text": "def get_dist_travelled (box, goal):\n dist = 0\n curr_box = box\n \n if goal == \"dest\":\n \n while forward_prev_box[curr_box]:\n prev_box = forward_prev_box[curr_box]\n dist += euclidian (detail_points[curr_box], detail_points[prev_box])\n curr_box = prev_box\n \n else:\n while backward_prev_box[curr_box]:\n prev_box = backward_prev_box [curr_box]\n dist += euclidian (detail_points[curr_box], detail_points[prev_box])\n curr_box = prev_box\n\n return dist", "title": "" }, { "docid": "c2523162f5c971385448447dbeaf960a", "score": "0.5485505", "text": "def distance(self):", "title": "" }, { "docid": "c2523162f5c971385448447dbeaf960a", "score": "0.5485505", "text": "def distance(self):", "title": "" }, { "docid": "e79d461faca0586d312ca0fe6fdd3ad1", "score": "0.544825", "text": "def get_object_distance(self):\n return self.ultrasonic_queue.get()", "title": "" }, { "docid": "b6cb3f7f13a57d3e760616dd12e9ff9b", "score": "0.5399637", "text": "def ddpg_distance_metric(actions1, actions2):\n diff = actions1-actions2\n mean_diff = np.mean(np.square(diff), axis=0)\n dist = sqrt(np.mean(mean_diff))\n return dist", "title": "" }, { "docid": "afb84cf7327a2c05c6f1cea666835281", "score": "0.53882235", "text": "def distance(self, node_1, node_2):\n x1 = self.map.intersections[node_1][0]\n y1 = self.map.intersections[node_1][1]\n x2 = self.map.intersections[node_2][0]\n y2 = self.map.intersections[node_2][1]\n dist = math.sqrt( (x2-x1)**2 + (y2-y1)**2 )\n return dist", "title": "" }, { "docid": "bfe316963da4f0500f3580adb5d0db37", "score": "0.53863776", "text": "def find_shortest_path(start_position, goal_position, grid_dim, black_fields):\n # init some grids of costs\n d_cost = [[-10 for j in range(grid_dim[1])] for i in range(grid_dim[0])] # distance to start cost\n h_cost = [[-10 for j in range(grid_dim[1])] for i in range(grid_dim[0])] # distance to goal cost\n f_cost = [[-10 for j in range(grid_dim[1])] for i in range(grid_dim[0])] # combined added cost\n # init a grid to store the used fields\n USED = [[False for j in range(grid_dim[1])] for i in range(grid_dim[0])]\n # init grids to declare parent and parent status\n parent = [[[-10, -10, 100000] for j in range(grid_dim[1])] for i in range(grid_dim[0])]\n set_parent = [[False for j in range(grid_dim[1])] for i in range(grid_dim[0])]\n\n # set black/unreachable fields as used\n for i in range(grid_dim[0]):\n for j in range(grid_dim[1]):\n for black in range(len(black_fields)):\n if i == black_fields[black][0] and j == black_fields[black][1]:\n USED[i][j] = True\n # init temporary position and cost\n tmp_pos = start_position\n\n found = False # tell when a path is found\n path = [] # store the path\n while not found:\n # calc and store all positions around the actual position\n around_position = [[tmp_pos[0] - 1, tmp_pos[1] - 1], [tmp_pos[0], tmp_pos[1] - 1],\n [tmp_pos[0] + 1, tmp_pos[1] - 1], [tmp_pos[0] - 1, tmp_pos[1]],\n [tmp_pos[0] + 1, tmp_pos[1]], [tmp_pos[0] - 1, tmp_pos[1] + 1],\n [tmp_pos[0], tmp_pos[1] + 1], [tmp_pos[0] + 1, tmp_pos[1] + 1]]\n for i in range(7):\n # see if position is inside the grid\n if 0 <= around_position[i][0] < grid_dim[0] and 0 <= around_position[i][1] < grid_dim[1]:\n if set_parent[around_position[i][0]][around_position[i][1]] is False:\n # set the parent status of the around position\n parent[around_position[i][0]][around_position[i][1]] = [tmp_pos[0], tmp_pos[1]]\n set_parent[around_position[i][0]][around_position[i][1]] = True\n if around_position[i][0] == goal_position[0] and around_position[i][1] == goal_position[1]:\n # on of the around positions has reached the endpoint/goal\n found = True\n pos_ln = [goal_position[0], goal_position[1]]\n found_path = False\n while found_path is not True:\n # recalculate the path from endpoint/goal to startpoint\n px = pos_ln[0]\n py = pos_ln[1]\n pos_ln = [parent[px][py][0], parent[px][py][1]]\n if pos_ln[0] == start_position[0] and pos_ln[1] == start_position[1]:\n found_path = True\n path.append(pos_ln)\n path.append(goal_position)\n break\n if 0 <= around_position[i][0] < grid_dim[0] and 0 <= around_position[i][1] < grid_dim[1]:\n # calculate the costs from the fields around a centerpoint\n\n # distance to startpoint times ten\n d_cost[around_position[i][0]][around_position[i][1]] = int(math.sqrt(\n math.pow(around_position[i][0] - start_position[0], 2) + math.pow(\n around_position[i][1] - start_position[1], 2)) * 10)\n # distance to endpoint/goal times 10\n h_cost[around_position[i][0]][around_position[i][1]] = int(math.sqrt(\n math.pow(around_position[i][0] - goal_position[0], 2) + math.pow(\n around_position[i][1] - goal_position[1], 2)) * 10)\n # sum of the two costs\n f_cost[around_position[i][0]][around_position[i][1]] = int(h_cost[around_position[i][0]][\n around_position[i][1]] + \\\n d_cost[around_position[i][0]][\n around_position[i][1]])\n lowest_cost = 100000000 # init with a high value which can be easy underscored by a lower cost\n for i in range(grid_dim[0]):\n for j in range(grid_dim[1]):\n if f_cost[i][j] != -10 and (USED[i][j] is False) and (i != start_position[0] or j != start_position[1]):\n if f_cost[i][j] < lowest_cost:\n # searching for the lowest cost to find the shortest path\n # (can also be changed to highest cost to find the longest path)\n lowest_cost = f_cost[i][j]\n tmp_pos = [i, j]\n USED[tmp_pos[0]][tmp_pos[1]] = True # mark the used point to dont go there again\n # give back the shortest path\n return path", "title": "" }, { "docid": "6eeb5b5ae338c78d49c08f61d1912005", "score": "0.5378143", "text": "def CalculateDistanceGoal(n1,n2):\n return np.sqrt(((n1[0]-n2[0])**2)+((n1[1]-n2[1])**2))", "title": "" }, { "docid": "d312cef4ead8ccfbad74716061105a5e", "score": "0.5374073", "text": "def cophenetic(self,distance):\n dm = numpy.zeros((len(self)+1,len(self)+1))\n for i in range(-len(self),0):\n dec1 = self.decendants(self[i].left)\n dec2 = self.decendants(self[i].right)\n for j in dec1:\n if j > 0:\n for k in dec2:\n if k > 0:\n if distance == 'dist':\n dm[j,k] = dm[k,j] = self[i].distance\n elif distance == 'rank':\n dm[j,k] = dm[k,j] = numpy.abs(i)\n else:\n raise ValueError('Unrecognized distance option.')\n return dm", "title": "" }, { "docid": "d85eb04425314ecf22aac44738f775cc", "score": "0.53673136", "text": "def get_object_distance(ns1, ns2):\n global counter\n global environment_info\n global server_list\n if counter >=len(environment_info) :\n counter =0\n output_info = (environment_info[counter]).request_func(command=\"distance\", robot1=ns1, robot2=ns2)\n counter=counter+1\n distance = output_info[0]\n if distance == -1:\n print(\"wrong model name\")\n return distance", "title": "" }, { "docid": "4280c915454a937b68ff7cf3b6c23ee3", "score": "0.5350873", "text": "def get_distance(self, origin, destination, optional_args=None, handle_error=False): \n \n # Checking input values\n shape = [1,1]\n if isinstance(origin, list):\n orign = '|'.join(origin)\n shape[0] = len(origin)\n elif isinstance(origin, str):\n orign = origin\n else:\n raise TypeError(\"origin must be a string or list/tuple of strings, got %s\" % type(origin))\n if isinstance(destination, list):\n dest = '|'.join(destination)\n shape[1] = len(destination)\n elif isinstance(destination, str):\n dest = destination\n else:\n raise TypeError(\"destination must be a string or list/tuple of strings, got %s\" % type(destination)) \n \n # Define api url \n DIST_MATRIX_URL = \"https://maps.googleapis.com/maps/api/distancematrix/json?\"\n \n # Define query params\n encode_params = {'key': self.key, 'origins': orign, 'destinations': dest} # required\n if optional_args:\n encode_params.update(optional_args) # add optional args\n \n # Request and read results\n encodedParams = urllib.urlencode(encode_params)\n request = urllib2.Request(DIST_MATRIX_URL+encodedParams)\n response = urllib2.urlopen(request)\n self.dist_result = json.loads(response.read()) # make accessible from main\n \n # Process api results\n results = {}\n if self.dist_result['status'] == 'OK':\n for i in xrange(shape[0]):\n for j in xrange(shape[1]):\n results['origin-'+str(i)+'_destination-'+str(j)] = {'duration': self.dist_result['rows'][i]['elements'][j]['duration']['value'] / 60., # time in minuts\n 'distance': self.dist_result['rows'][i]['elements'][j]['distance']['value'] * 0.000621371, # distance in miles\n 'origin': self.dist_result['origin_addresses'][i],\n 'destination': self.dist_result['destination_addresses'][j]}\n else:\n error_str = self.dist_result['status']\n if handle_error == True:\n print 'Requested failed with error code: ' + error_str\n else:\n self.__response_error(error_str)\n time.sleep(self.time_delay) # delay to not get locked out of api\n return results", "title": "" }, { "docid": "4052ec881cbfb2b0fed534102ba695cd", "score": "0.5346564", "text": "def find_path(src, dest, mesh):\n\n def get_cost(box, goal):\n \"\"\"\n return the A* cost (total distance and estimation) from goal point to current point\n \"\"\"\n curr_box = box\n\n if goal == 'dest':\n distance = distance_from_src[curr_box]\n heuristic = get_heuristic (detail_points[curr_box], dest)\n else:\n distance = distance_from_dest[curr_box]\n heuristic = get_heuristic (detail_points[curr_box], src)\n\n return distance + heuristic\n\n def get_dist_travelled (box, goal):\n \"\"\"\n return the total distance from goal point to current point\n \"\"\"\n dist = 0\n curr_box = box\n \n if goal == \"dest\":\n \n while forward_prev_box[curr_box]:\n prev_box = forward_prev_box[curr_box]\n dist += euclidian (detail_points[curr_box], detail_points[prev_box])\n curr_box = prev_box\n \n else:\n while backward_prev_box[curr_box]:\n prev_box = backward_prev_box [curr_box]\n dist += euclidian (detail_points[curr_box], detail_points[prev_box])\n curr_box = prev_box\n\n return dist\n\n def get_heuristic (node, dest):\n return euclidian (node, dest)\n \n \"\"\"\n ********* Here is where the function starts **********\n \"\"\"\n x_src, y_src = src\n x_dest, y_dest = dest\n\n src_box = None\n dest_box = None\n\n for x1, x2, y1, y2 in mesh['boxes']:\n if x_src >= x1 and x_src < x2 and y_src >= y1 and y_src < y2:\n src_box = (x1, x2, y1, y2)\n \n if x_dest >= x1 and x_dest < x2 and y_dest >= y1 and y_dest < y2:\n dest_box = (x1, x2, y1, y2)\n\n if src_box and dest_box:\n break\n\n #early check point\n if not src_box or not dest_box:\n print \"Source or destination is not available.\"\n return ([],[])\n\n elif src_box == dest_box:\n print \"They are in the same box.\"\n return ([(src, dest)], [(src_box)])\n\n \n visited_boxes = []\n visited_boxes.append(src_box)\n visited_boxes.append(dest_box)\n\n # store the nearest point(x, y) of a box (x1, x2, y1, y2) from given point (a, b) \n detail_points = {} # key: a box (x1, x2, y1, y2), value: a detail point (x, y)\n detail_points[src_box] = src\n detail_points[dest_box] = dest\n\n forward_prev_box = {} # key: a box (x1, x2, y1, y2), value: the parent box\n backward_prev_box = {}\n forward_prev_box[src_box] = None\n backward_prev_box[dest_box] = None\n\n distance_from_src = {} # key: a box (x1, x2, y1, y2), value: distant from target point\n distance_from_dest = {}\n distance_from_src[src_box] = 0\n distance_from_dest[dest_box] = 0\n\n priorityQueue = Q.PriorityQueue()\n priorityQueue.put ( ( get_cost(src_box, \"dest\"), src_box, \"dest\") )\n priorityQueue.put ( ( get_cost(dest_box, \"src\"), dest_box, \"src\") )\n\n found_path = False\n connection_box = None\n\n while priorityQueue :\n cost, box, goal = priorityQueue.get()\n \n if box == dest_box and goal == 'dest' :\n found_path = True\n connection_box = dest_box\n break\n\n if box == src_box and goal == 'src':\n found_path =True\n connection_box = src_box\n break\n\n if goal == 'dest' and box in backward_prev_box.keys():\n found_path = True\n connection_box = box\n break\n\n if goal == 'src' and box in forward_prev_box.keys():\n found_path = True\n connection_box = box\n break\n \n this_prev = {}\n\n if goal == 'dest':\n this_prev = forward_prev_box\n else:\n this_prev = backward_prev_box\n\n #list of boxes from adj\n adj_boxes = mesh['adj'].get(box, [])\n\n for adj_box in adj_boxes:\n\n if adj_box not in this_prev.keys():\n\n detail_points[adj_box] = get_detail_points (detail_points[box], adj_box)\n \n if goal == \"dest\":\n forward_prev_box[adj_box] = box\n distance_from_src[adj_box] = get_dist_travelled(adj_box, goal)\n else:\n backward_prev_box[adj_box] = box\n distance_from_dest[adj_box] = get_dist_travelled(adj_box, goal)\n \n visited_boxes.append((adj_box))\n priorityQueue.put( (get_cost(adj_box, goal), adj_box, goal) )\n\n if not found_path:\n print \"No Path!\"\n return ([],[])\n\n else: # build line segments\n #return ([],[]);\n path = []\n\n if connection_box == src_box: \n curr_box = connection_box\n\n while backward_prev_box[curr_box]:\n prev_box = backward_prev_box[curr_box]\n path.append( ( detail_points[curr_box], detail_points[prev_box] )) \n curr_box = prev_box\n\n path.append ((src, detail_points[connection_box]))\n\n elif connection_box == dest_box:\n curr_box = connection_box\n while forward_prev_box[curr_box]:\n prev_box = forward_prev_box[curr_box]\n path.append( ( detail_points[curr_box], detail_points[prev_box] )) \n curr_box = prev_box\n\n path.append ((dest, detail_points[connection_box]))\n\n else : # they meet at somewhere in the middle\n \n box_from_src = forward_prev_box[connection_box]\n box_from_dest = backward_prev_box[connection_box]\n point_close_to_src = detail_points[ box_from_src ]\n point_close_to_dest = detail_points [ box_from_dest]\n\n # calculating the two nearests points of the connection box from their parents' detail point\n c_detail_forward = get_detail_points( point_close_to_src, connection_box)\n c_detail_backward = get_detail_points( point_close_to_dest, connection_box)\n\n # making line segments \n path.append((c_detail_forward, c_detail_backward))\n path.append((c_detail_forward, detail_points[forward_prev_box[connection_box]]))\n path.append((c_detail_backward, detail_points[backward_prev_box[connection_box]]))\n\n box = forward_prev_box[connection_box]\n while forward_prev_box[box] :\n path.append( ( detail_points[box], detail_points[forward_prev_box[box]] )) \n box = forward_prev_box[box]\n\n box = backward_prev_box[connection_box]\n while backward_prev_box[box] :\n path.append( ( detail_points[box], detail_points[backward_prev_box[box]] )) \n box = backward_prev_box[box]\n\n \n return ( path, visited_boxes )", "title": "" }, { "docid": "3b6deff78e39b1b37c7b1c90062368e9", "score": "0.5343856", "text": "def build_distances(bw_hist):\n # assuming that edge_list has changed after TrafficGen\n route_matrix = bw_hist.route_matrix\n edge_dict = bw_hist.edge_dict\n dim = len(route_matrix)\n dist = [[0 for x in range(0,dim)] for y in range(0,dim)]\n for i in range(0,dim):\n for j in range(0,dim):\n route = route_matrix[i][j].route\n route_sum = 0\n for k in range(0,len(route)-1):\n (v1,v2) = (route[k],route[k+1])\n if edge_dict.has_key((v1,v2)):\n route_sum += edge_dict[(v1,v2)].avgbw\n else:\n route_sum += edge_dict[(v2,v1)].avgbw\n dist[i][j] = route_sum\n return dist", "title": "" }, { "docid": "87392ed7f5223ce923db496d6b0cb4eb", "score": "0.5330779", "text": "def calc_neighbor_dist(self):\n nb = (self.elem.dot(t) for t in SLNode._T)\n return (SLNode.cache_dist(c, self.cache) for c in nb)", "title": "" }, { "docid": "07a66026489f0c46556b46262f8035e2", "score": "0.53198177", "text": "def calculate_map_dist(self): # CALCUL MATRICE DIST\n nnodes = self.codebook.nnodes\n\n distance_matrix = np.zeros((nnodes, nnodes))\n for i in range(nnodes):\n #distance_matrix[i] = self.codebook.grid_dist(i).reshape(1, nnodes)\n distance_matrix[i] = self.codebook.grid_dist(i).T.reshape(1, nnodes) #attention c'et la distance au carré\n return distance_matrix", "title": "" }, { "docid": "1c3568b455ff5cc58a978e4a653330d2", "score": "0.53115445", "text": "def distance_to_goal(self, q):\n \"\"\"TODO: Decide which function should be goal function\"\"\" \n dist_to_goal = nrm(q - self.goal)**2 \n #dist_to_goal = nrm(q - self.goal)**4\n #dist_to_goal = nrm(q - self.goal)**(2 * self.k) \n return dist_to_goal", "title": "" }, { "docid": "3f6cf5457f963a5e22d5901f13ffed3a", "score": "0.5307156", "text": "def cal_dist_node_to_nearest_pois(gdf_poi, distance, network, *args, filterattr=True):\n gdf_poi[\"x\"] = gdf_poi[\"geometry\"].apply(lambda x: x.x)\n gdf_poi[\"y\"] = gdf_poi[\"geometry\"].apply(lambda x: x.y)\n # if True, process the give open space layer\n if filterattr is True:\n appended_data = []\n # iterate over each destination category\n for x in args:\n # initialize the destination point-of-interest category\n # the positions are specified by the x and y columns (which are Pandas Series)\n # at a max search distance for up to the first nearest points-of-interest\n network.set_pois(\n x[0],\n distance,\n 1,\n gdf_poi[gdf_poi[\"dest_name_full\"] == x[0]][\"x\"],\n gdf_poi[gdf_poi[\"dest_name_full\"] == x[0]][\"y\"],\n )\n # return the distance to the first nearest destination category\n # if zero destination is within the max search distance, then coded as -999\n dist = network.nearest_pois(distance, x[0], 1, -999)\n\n # important to convert columns index type to str\n dist.columns = dist.columns.astype(str)\n # change the index name corresponding to each destination name\n columnName = x[1]\n dist.rename(columns={\"1\": columnName}, inplace=True)\n appended_data.append(dist)\n # return a GeoDataFrame with distance to the nearest destination from each source node\n gdf_poi_dist = pd.concat(appended_data, axis=1)\n return gdf_poi_dist\n # if False, process the public open space layer\n else:\n for x in args:\n network.set_pois(x[0], distance, 1, gdf_poi[\"x\"], gdf_poi[\"y\"])\n dist = network.nearest_pois(distance, x[0], 1, -999)\n dist.columns = dist.columns.astype(str)\n columnName = x[1]\n dist.rename(columns={\"1\": columnName}, inplace=True)\n return dist", "title": "" }, { "docid": "eea87a64e3ec64f8bb710855518ea5c2", "score": "0.5286204", "text": "def a_star_euclidean(self):\n visited = set()\n parent = {}\n path = []\n\n _queue = queue.PriorityQueue()\n current_cost = {}\n\n _queue.put((0, self.start_point))\n visited.add(self.start_point)\n current_cost[self.start_point] = 0\n\n while not _queue.empty():\n current_node = _queue.get()\n coordinate_current_node = current_node[1]\n if coordinate_current_node == self.end_point:\n path = self.buildpath(parent)\n self.maze.solution = dict(find_path_or_not=\"YES\",\n number_of_nodes_visited=len(visited),\n visited_nodes=visited,\n path_length=len(path),\n path=path)\n return\n for child_node in self.maze.neighbor(coordinate_current_node):\n next_cost = current_cost[coordinate_current_node] + 1\n if child_node not in visited:\n current_cost[child_node] = next_cost\n parent[child_node] = coordinate_current_node\n visited.add(child_node)\n # cost so far + h\n h = math.sqrt(\n (child_node[0] - self.end_point[0]) ** 2 +\n (child_node[1] - self.end_point[1]) ** 2\n )\n\n total_cost = h + next_cost\n _queue.put((total_cost, child_node))\n\n self.maze.solution = dict(find_path_or_not=\"NO\",\n number_of_nodes_visited=len(visited),\n visited_nodes=visited,\n path_length=len(path),\n path=path)\n return self.maze.solution", "title": "" }, { "docid": "6b238d154564f5e92792952e0fb07fd2", "score": "0.52841514", "text": "def calculate_distance(self, A, B, distance='euclidean distance'):\n if (\n hasattr(A, '__iter__') and\n hasattr(B, '__iter__') and\n len(A) == 2 and\n len(B) == 2\n ):\n size_x, size_y = self['size']\n Ax, Ay, Bx, By = A[0] % size_x, A[\n 1] % size_y, B[0] % size_x, B[1] % size_y\n dif_x = min(abs(Bx - Ax), size_x - abs(Bx - Ax))\n dif_y = min(abs(By - Ay), size_y - abs(By - Ay))\n if distance in {'square', 'chess', 'chess distance'}:\n return max(dif_x, dif_y)\n elif distance in {'circle', 'euclidean', 'euclidean distance'}:\n return sqrt(dif_x**2 + dif_y**2)\n elif distance in {\n 'tilted square',\n 'taxicab',\n 'taxist',\n 'taxist distance',\n 'taxicab distance'\n }:\n return dif_x + dif_y\n else:\n return None", "title": "" }, { "docid": "db16ed6add66cd48fa688916fb16e9b6", "score": "0.5271826", "text": "def find_path (source_point, destination_point, mesh):\r\n\r\n path = []\r\n boxes = {}\r\n detailed_points = {}\r\n \r\n #locates boxes containing boxes with dest and source. y1, x1 is origin\r\n a = locate_Box(source_point, mesh)\r\n b = locate_Box(destination_point, mesh)\r\n\r\n \r\n #Fillingn dictionaries and lists n shit\r\n detailed_points[a] = source_point\r\n f_detailed_points\r\n b_detailed_points\r\n \r\n #A* Stuff\r\n start_heuristic = calc_distance(source_point, destination_point)\r\n end_heuristic = calc_distance(destination_point, destination_point) #yeah, I know it's just 0\r\n queue = [(start_heuristic, a, 'destination')]\r\n heappush(queue, (end_heuristic, b, 'initial'))\r\n\r\n \r\n distances = {}\r\n distances[a] = 0\r\n f_distances = {} # f = forward\r\n b_distances = {} # b = backward\r\n\r\n f_backpointers = {} # f = forward\r\n b_backpointers = {} # b = backward\r\n backpointers = {}\r\n backpointers[a] = None\r\n\r\n while queue:\r\n current_dist, current_box, curr_goal = heappop(queue)\r\n\r\n if current_box in f_backpointers and current_box in b_backpointers:\r\n f_current_back_box = f_backpointers[current_box]\r\n b_current_back_box = b_backpointers[current_box]\r\n\r\n while f_current_back_box is not None:\r\n if f_backpointers[f_current_back_box] != None:\r\n path.append((f_detailed_points[f_backpointers[f_current_back_box]],f_detailed_points[f_current_back_box]))\r\n f_current_back_box = f_backpointers[f_current_back_box]\r\n\r\n while b_current_back_box is not None:\r\n if b_backpointers[b_current_back_box] != None:\r\n path.append((b_detailed_points[b_backpointers[b_current_back_box]],b_detailed_points[b_current_back_box]))\r\n b_current_back_box = b_backpointers[b_current_back_box]\r\n \r\n return path, boxes.keys()\r\n \r\n for adj_box in mesh['adj'][current_box]:\r\n closest_point = get_closest_point(detailed_points[current_box], adj_box)\r\n path_cost = current_dist + calc_distance(detailed_points[current_box], closest_point)\r\n boxes[adj_box] = closest_point\r\n \r\n if adj_box not in distances or path_cost < distances[adj_box]:\r\n #########################################################\r\n if curr_goal == 'destination': \r\n distances[adj_box] = path_cost\r\n f_distances[adj_box] = path_cost\r\n detailed_points[adj_box] = closest_point\r\n f_detailed_points[adj_box] = closest_point\r\n new_heuristic = calc_distance(closest_point, destination_point)\r\n heappush(queue,((distances[adj_box] + new_heuristic), adj_box, 'destination'))\r\n backpointers[adj_box] = current_box\r\n f_backpointers[adj_box] = current_box\r\n elif curr_goal == 'initial':\r\n distances[adj_box] = path_cost\r\n b_distances[adj_box] = path_cost\r\n detailed_points[adj_box] = closest_point\r\n b_detailed_points[adj_box] = closest_point\r\n new_heuristic = calc_distance(closest_point, destination_point)\r\n heappush(queue, ((distances[adj_box] + new_heuristic), adj_box, 'initial'))\r\n backpointers[adj_box] = current_box\r\n b_backpointers[adj_box] = current_box\r\n #########################################################\r\n \r\n path.append((source_point,destination_point))\r\n return path, boxes.keys()", "title": "" }, { "docid": "f7f494b46c8a4c63996a257ad4616ef3", "score": "0.5267627", "text": "def find_path_between_two_coordinates(robot, env, start, destination):\n\tdistance = np.full((80,50), 10000, dtype=float)\n\theap = []\n\tpaths = {}\n\n\tx, y = int(start[0]) // 10, int(start[1]) // 10\n\n\t# Set unreachable zones\n\tfor obstacle in env.obstacles:\n\t\tl = round((obstacle.l - robot.width / 2) / 10)\n\t\tr = round((obstacle.r + robot.width / 2) / 10)\n\t\tb = round((obstacle.b - robot.length / 2) / 10)\n\t\tt = round((obstacle.t + robot.length / 2) / 10)\n\t\tdistance[max(l,0):r + 1,max(b,0):t + 1] = np.inf\n\n\tdistance[:round(robot.width / 2) // 10,:] = np.inf\n\tdistance[-round(robot.width / 2) // 10:,:] = np.inf\n\tdistance[:,:round(robot.length / 2) // 10] = np.inf\n\tdistance[:,-round(robot.length / 2) // 10:] = np.inf\n\n\tpaths[(x, y)] = [[x, y]]\n\tdirections = [[0,1],[1,0],[0,-1],[-1,0]]\n\n\tdef update_heap(start_x, start_y):\n\t\tnonlocal heap\n\t\tfor direction in directions:\n\t\t\tnext_x, next_y = start_x + direction[0], start_y + direction[1]\n\t\t\tif distance[next_x, next_y] == 10000:\n\t\t\t\theapq.heappush(heap, (distance[start_x, start_y], [start_x, start_y, next_x, next_y]))\n\n\tdef update_paths(start_x, start_y, next_x, next_y):\n\t\tnonlocal distance, destination, paths, heap\n\n\t\tpaths[(next_x, next_y)] = paths[(start_x, start_y)] + [[next_x, next_y]]\n\t\tif distance[next_x, next_y] <= distance[start_x,start_y] + 1:\n\t\t\treturn\n\n\t\tdistance[next_x,next_y] = distance[start_x,start_y] + 1\n\n\t\tif next_x == destination[0] // 10 and next_y == destination[1] // 10:\n\t\t\treturn paths[(next_x, next_y)]\n\n\t\tupdate_heap(next_x, next_y)\n\n\tdistance[x,y] = 1\n\tupdate_heap(x, y)\n\tcount = 0\n\twhile heap: \n\t\tstart_x, start_y, next_x, next_y = heapq.heappop(heap)[1]\n\n\t\tret = update_paths(start_x, start_y, next_x, next_y)\n\t\tif ret:\n\t\t\treturn np.array(paths[(next_x, next_y)])*10\n\tprint(destination, \"Path not found\")", "title": "" }, { "docid": "241a191dce322fbaf861b41c63828d2c", "score": "0.5247554", "text": "def path_distance(cell_1, cell_2, cutoff=None):\n if cutoff:\n return np.sum(np.array(cell_1.encode('trunc_path', cutoff=cutoff) != np.array(cell_2.encode('trunc_path', cutoff=cutoff))))\n else:\n return np.sum(np.array(cell_1.encode('path') != np.array(cell_2.encode('path'))))", "title": "" }, { "docid": "78d4929cb441d3c3c593b50b4a266fe5", "score": "0.52329385", "text": "def dist2(lines):\n global MAX_D\n global Distances\n prop = {}\n brow = []\n spanning = True\n\n line_item = lines[0]\n\n d2 = {k: v for k, v in filter(lambda t: line_item in t[0], Distances.items())}\n\n min_node = min(d2, key=d2.get)\n\n brow.append(min_node[0])\n brow.append(min_node[1])\n prop[min_node] = Distances.pop(min_node)\n\n while spanning:\n\n v_min = MAX_D\n k_min = ()\n\n for _ in brow:\n\n for item in Distances.items():\n k, v = item\n if (k[0] in brow and k[1] not in brow) or (k[1] in brow and k[0] not in brow):\n if v < v_min:\n k_min = k\n v_min = v\n if k_min != ():\n if k_min[0] not in brow:\n brow.append(k_min[0])\n if k_min[1] not in brow:\n brow.append(k_min[1])\n prop[k_min] = Distances.pop(k_min)\n\n if len(prop) == len(lines) - 1:\n spanning = False\n\n for i in prop:\n line_item = i[0]\n node_min = i[1]\n add_line(_COLOR_PRIMARY, 1, line_item[0], line_item[1], node_min[0] - line_item[0], node_min[1] - line_item[1])\n\n return prop", "title": "" }, { "docid": "9f654be3442bb1525be25ef2bc06733c", "score": "0.5228424", "text": "def computeDistanceClosing(id1, p1, v1, ang1, id2, p2, v2, ang2):\n edges = {15: [(-0.25,0.05),(-0.25,-0.05),(0.25,-0.05),(0.25,0.05)], 8: [(0.0,0.0)]}\n segments = {15: [(edges[15][0], edges[15][1]),(edges[15][1],edges[15][2]),(edges[15][2],edges[15][3]),(edges[15][3],edges[15][0])], 8:[]} \n if id1 == 8:\n ref = p1[:2]\n# ref[2] = p2[2]\n vel = (v1-v2)[:2]\n ang = ang2\n center = p2[:2]\n segId = id2\n elif id1 == 15:\n ref = p2[:2]\n# ref[2] = p1[2]\n vel = (v2-v1)[:2]\n ang = ang1\n center = p1[:2]\n segId = id1\n else:\n raise NotImplementedError(\"Currently only distance between gripper and object is implemented. (id1: {})\".format(int(id1)))\n dpList = [dist(center, e1, e2, ang, ref) for e1,e2 in segments[segId]]\n sortedL = sorted(dpList, key = itemgetter(0))\n# print \"sorted list: \", sortedL\n normal = (ref-sortedL[0][1])\n norm = np.linalg.norm(normal)\n if norm > 0.0:\n normal /= np.linalg.norm(normal)\n return np.round(max(sortedL[0][0]-0.025,0.0), config.NUMDEC), np.round(npdot(normal, vel), config.NUMDEC)", "title": "" }, { "docid": "5ec24db870c2c10b08566fcbd1f47306", "score": "0.5224024", "text": "def distance(dims, p1, p2):\n hdx = dims[0]/2\n hdy = dims[1]/2\n xd = abs(p1[0] - p2[0])#%hdx\n yd = abs(p1[1] - p2[1])#%hdy\n #wrapping around produces cyclical behavior. Ideally, some method should be\n #developped to avoid this while still having the snake be smart enough to\n #go through walls when it needs to, but for now just don't wrap distance\n # if xd > hdx:\n # xd = xd - hdx\n # if yd > hdy:\n # yd = yd - hdy\n return xd + yd", "title": "" }, { "docid": "bc9bb8bded9561e088e96e320a4567c2", "score": "0.5220723", "text": "def _calculateDistance(self):\n if self.params[\"noDistanceCalculationAndPrediction\"]:\n print(\"noDistanceCalculationAndPrediction was enabled. Skipping Distance Calculations.\")\n return \n global entriesInChunks\n \n \n print(\"\\nStarting Distance Calculation ...\")\n t1 = time.time()\n \n chunks = self.signalChunks[self.currentAnalysisName]\n #return\n entrieChunkPath = os.path.join(self.params[\"pathToComb\"], \"entriesInChunk.pkl\")\n if not self.params[\"recalculateDistance\"] and all(os.path.exists(x.replace(\".pkl\",\".npy\")) for x in chunks) and os.path.exists(entrieChunkPath):\n print(\"All chunks found for distance calculation.\")\n if not self.entriesInChunkLoaded:\n with open(os.path.join(self.params[\"pathToComb\"], \"entriesInChunk.pkl\"),\"rb\") as f:\n entriesInChunks = pickle.load(f)\n self.entriesInChunkLoaded = True\n\n else:\n\n chunkItems = Parallel(n_jobs=self.params[\"n_jobs\"], verbose=10)(delayed(calculateDistanceP)(c) for c in chunks)\n entriesInChunks[self.currentAnalysisName] = {}\n for k,v in chunkItems:\n for E1E2 in v:\n entriesInChunks[self.currentAnalysisName][E1E2] = k \n \n with open(os.path.join(self.params[\"pathToComb\"], \"entriesInChunk.pkl\"),\"wb\") as f:\n pickle.dump(entriesInChunks,f)\n \n \n \n\n print(\"Distance computing/checking: {} secs\\n\".format(round(time.time()-t1)))", "title": "" }, { "docid": "d80cccd31bf53bf4fe76c0d9d943784f", "score": "0.52203083", "text": "def __distance( self, (x1, y1), (x2, y2) ):\n return np.sqrt((x2-x1)**2 + (y2-y1)**2)", "title": "" }, { "docid": "d80cccd31bf53bf4fe76c0d9d943784f", "score": "0.52203083", "text": "def __distance( self, (x1, y1), (x2, y2) ):\n return np.sqrt((x2-x1)**2 + (y2-y1)**2)", "title": "" }, { "docid": "7d25494054f71eac05d40e76739fa2d0", "score": "0.521789", "text": "def _update_distance(self):\n \n # compute the distance from the traffic light if its state is red and a bounding box exists\n if self.true_state == STOP and self.curr_bb != None:\n \n image_h, image_w = self.curr_depth_img.shape\n # compute the bounding box coordinates\n xmin = int(self.curr_bb.xmin * image_w)\n ymin = int(self.curr_bb.ymin * image_h)\n xmax = int(self.curr_bb.xmax * image_w)\n ymax = int(self.curr_bb.ymax * image_h)\n\n # get the pixel coordinates corresponding to the traffic light position in the image\n traffic_light_pixels = self._get_traffic_light_slice_from_semantic_segmentation(xmin, xmax, ymin, ymax)\n\n # check for false positive bounding box\n if len(traffic_light_pixels) == 0:\n self.distance = None\n return\n\n # get the traffic light pixels from the depth image\n x_distance = 0\n self.vehicle_frame_list = []\n for pixel in traffic_light_pixels:\n \n # convert depth image value in meters\n depth = 1000 * self.curr_depth_img[pixel[0]][pixel[1]]\n\n ###\n # Compute the pixel position in vehicle frame\n ###\n\n pixel = [pixel[1] , pixel[0], 1]\n pixel = np.reshape(pixel, (3,1))\n # Projection \"Pixel to Image Frame\"\n image_frame_vect = np.dot(self.inv_intrinsic_matrix, pixel) * depth\n # Creation of the extended vector\n image_frame_vect_extended = np.zeros((4,1))\n image_frame_vect_extended[:3] = image_frame_vect \n image_frame_vect_extended[-1] = 1\n\n # Projection \"Image to Camera Frame\"\n camera_frame = self.image_to_camera_frame(image_frame_vect_extended)\n camera_frame = camera_frame[:3]\n camera_frame = np.asarray(np.reshape(camera_frame, (1,3)))\n\n camera_frame_extended = np.zeros((4,1))\n camera_frame_extended[:3] = camera_frame.T \n camera_frame_extended[-1] = 1\n\n # Projection \"Camera to Vehicle Frame\"\n camera_to_vehicle_frame = np.zeros((4,4))\n camera_to_vehicle_frame[:3,:3] = main.to_rot([self.cam_pitch, self.cam_yaw, self.cam_roll])\n camera_to_vehicle_frame[:,-1] = [self.cam_x_pos, -self.cam_y_pos, self.cam_height, 1]\n \n vehicle_frame = np.dot(camera_to_vehicle_frame,camera_frame_extended )\n vehicle_frame = vehicle_frame[:3]\n vehicle_frame = np.asarray(np.reshape(vehicle_frame, (1,3)))\n \n # Add the computed pixel position with respect to vehicle frame to the pixel positions list\n self.vehicle_frame_list.append([vehicle_frame[0][0], -vehicle_frame[0][1]])\n \n # Update the distance between the traffic light and the vehicle by takin into account\n # only the x axis contribute. \n x_distance += vehicle_frame[0][0]\n \n # compute the avarage distance\n self.distance = x_distance/len(traffic_light_pixels)\n # correct the distance by taking into account the ego vehicle extension on the long side.\n self.distance = self._correct_perpendicular_distance(self.distance)\n \n # if the traffic light state is not red or its bounding box doesn't exists set the distance to None\n else:\n self.distance = None", "title": "" }, { "docid": "2cae53199400d2b3c1b3176381c8b842", "score": "0.52175903", "text": "def compute_surface_distances(self, mask_gt, mask_pred):\r\n\r\n # compute the area for all 256 possible surface elements\r\n # (given a 2x2x2 neighbourhood) according to the spacing_mm\r\n neighbour_code_to_surface_area = np.zeros([256])\r\n for code in range(256):\r\n normals = np.array(neighbour_code_to_normals[code])\r\n sum_area = 0\r\n for normal_idx in range(normals.shape[0]):\r\n # normal vector\r\n n = np.zeros([3])\r\n n[0] = normals[normal_idx, 0] * self.spacing_mm[1] * self.spacing_mm[2]\r\n n[1] = normals[normal_idx, 1] * self.spacing_mm[0] * self.spacing_mm[2]\r\n n[2] = normals[normal_idx, 2] * self.spacing_mm[0] * self.spacing_mm[1]\r\n area = np.linalg.norm(n)\r\n sum_area += area\r\n neighbour_code_to_surface_area[code] = sum_area\r\n\r\n # compute the bounding box of the masks to trim\r\n # the volume to the smallest possible processing subvolume\r\n mask_gt = np.asarray(mask_gt, np.bool)\r\n mask_pred = np.asarray(mask_pred, np.bool)\r\n mask_all = mask_gt | mask_pred\r\n bbox_min = np.zeros(3, np.int64)\r\n bbox_max = np.zeros(3, np.int64)\r\n\r\n # max projection to the x0-axis\r\n proj_0 = np.max(np.max(mask_all, axis=2), axis=1)\r\n idx_nonzero_0 = np.nonzero(proj_0)[0]\r\n if len(idx_nonzero_0) == 0: # pylint: disable=g-explicit-length-test\r\n return {\"distances_gt_to_pred\": np.array([]),\r\n \"distances_pred_to_gt\": np.array([]),\r\n \"surfel_areas_gt\": np.array([]),\r\n \"surfel_areas_pred\": np.array([])}\r\n\r\n bbox_min[0] = np.min(idx_nonzero_0)\r\n bbox_max[0] = np.max(idx_nonzero_0)\r\n\r\n # max projection to the x1-axis\r\n proj_1 = np.max(np.max(mask_all, axis=2), axis=0)\r\n idx_nonzero_1 = np.nonzero(proj_1)[0]\r\n bbox_min[1] = np.min(idx_nonzero_1)\r\n bbox_max[1] = np.max(idx_nonzero_1)\r\n\r\n # max projection to the x2-axis\r\n proj_2 = np.max(np.max(mask_all, axis=1), axis=0)\r\n idx_nonzero_2 = np.nonzero(proj_2)[0]\r\n bbox_min[2] = np.min(idx_nonzero_2)\r\n bbox_max[2] = np.max(idx_nonzero_2)\r\n\r\n # crop the processing subvolume.\r\n # we need to zeropad the cropped region with 1 voxel at the lower,\r\n # the right and the back side. This is required to obtain the \"full\"\r\n # convolution result with the 2x2x2 kernel\r\n cropmask_gt = np.zeros((bbox_max - bbox_min) + 2, np.uint8)\r\n cropmask_pred = np.zeros((bbox_max - bbox_min) + 2, np.uint8)\r\n\r\n cropmask_gt[0:-1, 0:-1, 0:-1] = mask_gt[bbox_min[0]:bbox_max[0] + 1,\r\n bbox_min[1]:bbox_max[1] + 1,\r\n bbox_min[2]:bbox_max[2] + 1]\r\n\r\n cropmask_pred[0:-1, 0:-1, 0:-1] = mask_pred[bbox_min[0]:bbox_max[0] + 1,\r\n bbox_min[1]:bbox_max[1] + 1,\r\n bbox_min[2]:bbox_max[2] + 1]\r\n\r\n # compute the neighbour code (local binary pattern) for each voxel\r\n # the resultsing arrays are spacially shifted by minus half a voxel in each\r\n # axis.\r\n # i.e. the points are located at the corners of the original voxels\r\n kernel = np.array([[[128, 64],\r\n [32, 16]],\r\n [[8, 4],\r\n [2, 1]]])\r\n neighbour_code_map_gt = ndimage.filters.correlate(\r\n cropmask_gt.astype(np.uint8), kernel, mode=\"constant\", cval=0)\r\n neighbour_code_map_pred = ndimage.filters.correlate(\r\n cropmask_pred.astype(np.uint8), kernel, mode=\"constant\", cval=0)\r\n\r\n # create masks with the surface voxels\r\n borders_gt = ((neighbour_code_map_gt != 0) & (neighbour_code_map_gt != 255))\r\n borders_pred = ((neighbour_code_map_pred != 0) &\r\n (neighbour_code_map_pred != 255))\r\n\r\n # compute the distance transform (closest distance of each voxel to the\r\n # surface voxels)\r\n if borders_gt.any():\r\n distmap_gt = ndimage.morphology.distance_transform_edt(\r\n ~borders_gt, sampling=self.spacing_mm)\r\n else:\r\n distmap_gt = np.Inf * np.ones(borders_gt.shape)\r\n\r\n if borders_pred.any():\r\n distmap_pred = ndimage.morphology.distance_transform_edt(\r\n ~borders_pred, sampling=self.spacing_mm)\r\n else:\r\n distmap_pred = np.Inf * np.ones(borders_pred.shape)\r\n\r\n # compute the area of each surface element\r\n surface_area_map_gt = neighbour_code_to_surface_area[neighbour_code_map_gt]\r\n surface_area_map_pred = neighbour_code_to_surface_area[\r\n neighbour_code_map_pred]\r\n\r\n # create a list of all surface elements with distance and area\r\n distances_gt_to_pred = distmap_pred[borders_gt]\r\n distances_pred_to_gt = distmap_gt[borders_pred]\r\n surfel_areas_gt = surface_area_map_gt[borders_gt]\r\n surfel_areas_pred = surface_area_map_pred[borders_pred]\r\n\r\n # sort them by distance\r\n if distances_gt_to_pred.shape != (0,):\r\n sorted_surfels_gt = np.array(\r\n sorted(zip(distances_gt_to_pred, surfel_areas_gt)))\r\n distances_gt_to_pred = sorted_surfels_gt[:, 0]\r\n surfel_areas_gt = sorted_surfels_gt[:, 1]\r\n\r\n if distances_pred_to_gt.shape != (0,):\r\n sorted_surfels_pred = np.array(\r\n sorted(zip(distances_pred_to_gt, surfel_areas_pred)))\r\n distances_pred_to_gt = sorted_surfels_pred[:, 0]\r\n surfel_areas_pred = sorted_surfels_pred[:, 1]\r\n\r\n return {\"distances_gt_to_pred\": distances_gt_to_pred,\r\n \"distances_pred_to_gt\": distances_pred_to_gt,\r\n \"surfel_areas_gt\": surfel_areas_gt,\r\n \"surfel_areas_pred\": surfel_areas_pred}", "title": "" }, { "docid": "4d2079160dc5af00b696c21816bff921", "score": "0.5216728", "text": "def _distance(self, a, b):\n _sum = 0\n if (not a) or (not b):\n return 1\n for d in xrange(self._dim):\n difference_sq = (a.flist[d].iweight - b.flist[d].iweight) ** 2\n _sum += difference_sq\n return sqrt(_sum)", "title": "" }, { "docid": "8267cd662955d0bfa10af3564b282139", "score": "0.5201762", "text": "def test_distance(self):\n # should work in 2D\n p0 = (0, 0)\n p1 = (0, 1.)\n self.assertEqual(distance(p0, p1), 1.)\n # should work in 3D\n p0 = (0, 0, 0)\n p1 = (0, 0, 1)\n self.assertEqual(distance(p0, p1), 1.)", "title": "" }, { "docid": "3a619803728fc7aad1ece3ba3cf39066", "score": "0.5197045", "text": "def distance(x,y):\n return rigid.dist.get((x,y)) or rigid.dist.get((y,x))", "title": "" }, { "docid": "5c07bb2951128485db62473f77bead28", "score": "0.5193128", "text": "def heuristic(frm, to):\n \n from_cell = frm.cell_id\n to_cell = to.cell_id\n\n # from cell IDs extract 1st digit as x coordinate\n # and 2nd digit as y coordinate\n x_frm = (from_cell // 10)\n y_frm = (from_cell % 10) \n\n x_to = (to_cell // 10)\n y_to = (to_cell % 10) \n\n dist = ((x_to - x_frm)**2 + (y_to - y_frm)**2)**0.5\n \n # as in same cell can be up to 14.1 away\n if from_cell == to_cell:\n return 14.1\n\n # if adjacent - distance is 0 \n if ((x_to - x_frm == 1 and y_to - y_frm == 0)\n or (x_to - x_frm == 0 and y_to - y_frm == 1)):\n return 0\n \n # other cases\n else:\n return dist", "title": "" }, { "docid": "dd71509e0d5e6c608a12463d8bbd1019", "score": "0.5191502", "text": "def _get_distance(self, obs, goal):\n (i1, j1) = self._discretize_state(obs.copy())\n (i2, j2) = self._discretize_state(goal.copy())\n return self._apsp[i1, j1, i2, j2]", "title": "" }, { "docid": "469ce0646bacd27c536e7b69ac8b6965", "score": "0.5185238", "text": "def __surface_distances(result, reference, voxelspacing=None, connectivity=1):\n result = np.atleast_1d(result.astype(np.bool))\n reference = np.atleast_1d(reference.astype(np.bool))\n if voxelspacing is not None:\n voxelspacing = _ni_support._normalize_sequence(voxelspacing, result.ndim)\n voxelspacing = np.asarray(voxelspacing, dtype=np.float64)\n if not voxelspacing.flags.contiguous:\n voxelspacing = voxelspacing.copy()\n \n # binary structure\n footprint = generate_binary_structure(result.ndim, connectivity)\n \n # test for emptiness\n if 0 == np.count_nonzero(result): \n raise RuntimeError('The first supplied array does not contain any binary object.')\n if 0 == np.count_nonzero(reference): \n raise RuntimeError('The second supplied array does not contain any binary object.') \n \n # extract only 1-pixel border line of objects\n result_border = np.logical_xor(result, binary_erosion(result, structure=footprint, iterations=1))\n reference_border = np.logical_xor(reference, binary_erosion(reference, structure=footprint, iterations=1))\n \n # compute average surface distance \n # Note: scipys distance transform is calculated only inside the borders of the\n # foreground objects, therefore the input has to be reversed\n dt = distance_transform_edt(~reference_border, sampling=voxelspacing)\n sds = dt[result_border]\n \n return sds", "title": "" }, { "docid": "ada238d75454abc3d5bb0fa0a1cba5b4", "score": "0.5177379", "text": "def _calcDistance(self, loc1, loc2):\r\n\t\tfrom math import sqrt\r\n\t\treturn sqrt((loc2[0] - loc1[0]) ** 2 + (loc2[1] - loc1[1]) ** 2)", "title": "" }, { "docid": "aae727eff6a66b6f9a750ccf6f6ae0e6", "score": "0.5165812", "text": "def _get_distance(self):\n return self.__distance", "title": "" }, { "docid": "11f763ae1a8e44a8ca291d36d653cc81", "score": "0.5162742", "text": "def _get_distance(self, obs, goal):\n (i1, j1) = self._discretize_state(obs)\n (i2, j2) = self._discretize_state(goal)\n return self._apsp[i1, j1, i2, j2]", "title": "" }, { "docid": "8e4f3beb689df9264236d26139f28b82", "score": "0.5162519", "text": "def route_distance(self):\n path_distance = 0\n\n for ii in range(len(self.route)):\n from_city = self.route[ii]\n to_city = None\n if ii + 1 < len(self.route):\n to_city = self.route[ii + 1]\n else:\n to_city = self.route[0]\n path_distance += self.distanceMatrix[from_city, to_city]\n\n self.distance = path_distance\n\n return self.distance", "title": "" }, { "docid": "5bfee928fa12519784c801a500030edc", "score": "0.5160071", "text": "def maxDistanceHeuristic(state, problem):\n targets_left = state[1]\n drivingCost = 2 / 3\n return drivingCost * max(problem.shortest[state[0]][problem.start_index], max([problem.shortest[state[0]][h] for h in targets_left]) if targets_left else 0)", "title": "" }, { "docid": "9ed1938750f0555438436f400c3e3f4a", "score": "0.51551753", "text": "def pixel_dist_2d(nx, ny):\n \n [xg, yg] = np.meshgrid(np.arange(nx), np.arange(ny))\n grid_vals = np.hstack((xg.reshape(-1, 1), yg.reshape(-1, 1)))\n \n return ot.dist(grid_vals, metric='sqeuclidean')", "title": "" }, { "docid": "00e2d025ee69fbc18f009975fc1397ea", "score": "0.5154372", "text": "def __init__(self, min_dist, max_dist, fov, dist_per_que, goal, threshold = 0.2):\r\n self.goal = goal #format [angle, distance]\r\n num_queue = int(max_dist/dist_per_que)*2 + 1\r\n #Going to initialize bat in the middle of the grid \r\n self.num_queue = num_queue\r\n self.mindist = min_dist\r\n self.maxdist = max_dist\r\n self.dist_pq = dist_per_que\r\n self.obst_queue = pylab.matrix(np.zeros((num_queue, num_queue)))\r\n self.path = []\r\n self.fov = fov\r\n self.goal_index = None\r\n self.real_goal = False \r\n self.threshold = threshold \r\n self.bat_tile = [(num_queue - 1)/2, (num_queue - 1)/2]\r\n self.bat_pos = [0, 0]\r\n \"\"\"\r\n note: intially the goal might not be included in grid beacuse it is too \r\n far away, in that case, leave real_goal parameter as false and set goal \r\n and goal_index as a tile/point in the general direction towards the real \r\n goal. This will be generated in the initialize queue function\r\n Keep in mind that self.goal has to be updated everytime the queue is to be \r\n reinitiated \r\n \"\"\"", "title": "" }, { "docid": "6440a1af7ecba56cd2268517424b79cb", "score": "0.5148581", "text": "def sq_dist2(self, sq1, sq2):\n rct1 = [self.g.nodes[sq1]['x'], self.g.nodes[sq1]['y'], self.g.nodes[sq1]['width'], self.g.nodes[sq1]['height']]\n rct2 = [self.g.nodes[sq2]['x'], self.g.nodes[sq2]['y'], self.g.nodes[sq2]['width'], self.g.nodes[sq2]['height']]\n\n # rct2 = [1,5,2,3]\n # rct1 = [4,5,1,1]\n # rct1 = [704.85, 334.94, 155, 197]\n # rct2 = [605, 229, 68, 167]\n\n\n\n # get ranges of rectangles\n rng1x = ((rct1[0]-rct1[2]/2), (rct1[0]+rct1[2]/2))\n rng2x = ((rct2[0]-rct2[2]/2), (rct2[0]+rct2[2]/2))\n\n rng1y = ((rct1[1]-rct1[3]/2), (rct1[1]+rct1[3]/2))\n rng2y = ((rct2[1]-rct2[3]/2), (rct2[1]+rct2[3]/2))\n\n ovlp_x = self.dim_ovlp(rng1x, rng2x)\n ovlp_y = self.dim_ovlp(rng1y, rng2y)\n\n if ovlp_x == set() and ovlp_y == set():\n # print('no overlap whatsoever')\n\n rct_pts1 = self.rect_points(rct1)\n rct_pts2 = self.rect_points(rct2)\n dist_ar = euclidean_distances(rct_pts1, rct_pts2)\n min_dist = np.min(dist_ar)\n min_loc = np.where(dist_ar == min_dist)\n\n pt1 = rct_pts1[min_loc[0]]\n pt2 = rct_pts2[min_loc[1]]\n\n dx = pt1[0][0] - pt2[0][0]\n dy = pt1[0][1] - pt2[0][1]\n\n elif ovlp_x == set() and len(ovlp_y) > 0:\n # print('overlap in y, not x')\n rct1_pts = np.array(rng1x)\n rct2_pts = np.array(rng2x)\n\n dist_ar = np.array([\n [abs(rct1_pts[0] - rct2_pts[0]), abs(rct1_pts[0] - rct2_pts[1])],\n [abs(rct1_pts[1] - rct2_pts[0]), abs(rct1_pts[1] - rct2_pts[1])]])\n min_dist = np.min(dist_ar)\n min_loc = np.where(dist_ar == min_dist)\n\n pt1 = rct1_pts[min_loc[0][0]]\n pt2 = rct2_pts[min_loc[1][0]]\n\n dx = pt1 - pt2\n dy = 0\n\n elif ovlp_y == set() and len(ovlp_x) > 0:\n # print('overlap in x, not y')\n # overlap in y dimension but not x\n rct1_pts = np.array(rng1y)\n rct2_pts = np.array(rng2y)\n\n dist_ar = np.array([\n [abs(rct1_pts[0] - rct2_pts[0]), abs(rct1_pts[0] - rct2_pts[1])],\n [abs(rct1_pts[1] - rct2_pts[0]), abs(rct1_pts[1] - rct2_pts[1])]])\n\n min_dist = np.min(dist_ar)\n min_loc = np.where(dist_ar == min_dist)\n\n # print(rct2_pts)\n # print(rct1_pts)\n\n # print(dist_ar)\n # print(min_dist)\n # print(min_loc)\n \n pt1 = rct1_pts[min_loc[0][0]]\n pt2 = rct2_pts[min_loc[1][0]]\n\n dy = pt1 - pt2\n dx = 0\n\n else:\n print('overlap in both')\n dx = (rct1[0] - rct2[0]) * 0.2\n dy = (rct1[1] - rct2[1]) * 0.2\n\n\n return dx, dy", "title": "" }, { "docid": "4e09d3d7171f5f847a8fb073ad2caee5", "score": "0.514827", "text": "def calc_dis(point_list1, point_list2):\r\n y = []\r\n for i in range(point_list1.__len__()):\r\n y.append(point_list2)\r\n y = np.array(y)\r\n x=[]\r\n for i in range(point_list2.__len__()):\r\n x.append(point_list1)\r\n x = np.array(x)\r\n x = x.transpose(1, 0, 2)\r\n distance_matrix = np.linalg.norm(np.array(x) - np.array(y), axis=2)\r\n return distance_matrix", "title": "" }, { "docid": "3f246cd395c2d530ba7679a07c1b9132", "score": "0.5146216", "text": "def calculate_distance_and_similarity_label(self, features, features_, labels, labels_, pair_type):\n\n def get_squared_features(features):\n \"\"\"\n elementwised operation.\n \"\"\"\n return tf.expand(tf.reduce_sum(tf.square(features), axis=1), axis=1)\n\n\n print(\"\\n\\t\\t***********************\")\n if pair_type is None or pair_type == 'matrix':\n print(\"\\t\\tthe pair_type is matrix\")\n\n print(\"\\t\\t***********************\\n\")\n # reshape label for convenience\n labels = tf.reshape(labels, [-1, 1])\n labels_ = tf.reshape(labels_, [-1, 1])\n\n\n # calcualte pairwise distance\n squared_features = get_squared_features(features)\n squared_features_ = get_squared_features(features_)\n\n correlation_term = tf.matmul(features, tf.transpose(features_, perm=[1, 0]))\n\n pairwise_distances = tf.sqrt(tf.subtract(squared_features + squred_features_, \\\n tf.multiply(2., squared_features_)))\n\n # calcualte pairwise similarity labels\n num_labels = tf.shape(labels)[0]\n num_labels_ = tf.shape(labels_)[0]\n tiled_labels = tf.tile(labels, [1, num_labels_])\n tiled_labels_ = tf.tile(labels_, [num_labels, 1])\n\n\n pairwise_similarity_labels = tf.cast(tf.equal(tf.reshape(tiled_labels, [-1]), \\\n tf.reshape(tiled_labels_, [-1])), tf.float32)\n pairwise_similarity_labels = tf.reshape(pairwise_similarity_labels, [num_labels, num_labels_])\n\n\n return pairwise_distances, pairwise_similarity_labels\n\n elif pair_type == 'vector':\n\n print(\"\\t\\tthe pair_type is vector\")\n\n print(\"\\t\\t***********************\\n\")\n pairwise_distances = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(features, features_)), axis=1))\n pairwise_similarity_labels = tf.cast(tf.equal(labels, labels_), tf.float32)\n\n return pairwise_distances, pairwise_similarity_labels", "title": "" }, { "docid": "af6e1b24435a2a987b8fe9b202b7f89a", "score": "0.5138084", "text": "def get_distance_dask(__tan1, __tan2, __cos3):\r\n\r\n temp = __tan1 * __tan1 + __tan2 * __tan2 - 2.0 * __tan1 * __tan2 * __cos3\r\n\r\n temp = da.where(temp < 0, 0, temp)\r\n\r\n # w = np.where(temp < 0)[0]\r\n # temp[w] = 0.0\r\n\r\n # TODO\r\n # self.__temp = temp # used by other functions ??\r\n\r\n return da.sqrt(temp)", "title": "" }, { "docid": "34a6f16d64add96004fd651c569f8446", "score": "0.51338315", "text": "def get_dist(self,atom):\n d = self.r-self.r[:,atom].reshape(3,1) # subtract the location col from the matrix for each atom\n dist = np.sqrt((d*d).sum(axis=0))\n dist_copy = dist.copy()\n dist_copy[dist_copy >=self.sim_params['nbCutoff']] = 0\n dist_copy[self.bonded_atoms[atom]] = dist[self.bonded_atoms[atom]]\n mask = dist_copy>0\n mask[0] = False # take care of the zero position\n return dist, mask", "title": "" }, { "docid": "4591bce2f27b2dabea26b89cf875530a", "score": "0.5132941", "text": "def get_obstacle_distances(car: Car, layout_, max_distance_, resolution_=100):\n return (\n Utils.get_distance_from_obstacle_along_direction(car, layout_, 90, max_distance_, resolution_),\n Utils.get_distance_from_obstacle_along_direction(car, layout_, 45, max_distance_, resolution_),\n Utils.get_distance_from_obstacle_along_direction(car, layout_, 0, max_distance_, resolution_),\n Utils.get_distance_from_obstacle_along_direction(car, layout_, -45, max_distance_, resolution_),\n Utils.get_distance_from_obstacle_along_direction(car, layout_, -90, max_distance_, resolution_),\n )", "title": "" }, { "docid": "6c43e68299b20bdbb22c58ce9418129a", "score": "0.51317847", "text": "def test_composite_dist_compute(self):\n\n ## Check that d(x, x) = 0\n d = tc.distances.compute_composite_distance(self.dist, self.x, self.x)\n self.assertAlmostEqual(d, 0.0)\n\n d = tc.distances.compute_composite_distance(self.dist, self.y, self.y)\n self.assertAlmostEqual(d, 0.0)\n\n ## Check the distance between two data points against the hard-coded\n # answer.\n d = tc.distances.compute_composite_distance(self.dist, self.x, self.y)\n self.assertAlmostEqual(d, 30.29165739, places=5)\n\n ## Check the distance against the nearest neighbors toolkit\n sf = tc.SFrame([self.x, self.y]).unpack(\"X1\", column_name_prefix=\"\")\n m = tc.nearest_neighbors.create(sf, distance=self.dist, verbose=False)\n knn = m.query(sf[:1], k=2, verbose=False)\n self.assertAlmostEqual(d, knn[\"distance\"][1], places=5)", "title": "" }, { "docid": "4ce3f0e3458e426a49a05fef7cdf6fa3", "score": "0.51245344", "text": "def b2Distance(*args):\r\n return _Box2D2.b2Distance(*args)", "title": "" }, { "docid": "11d5976a19bc4d253c1b41f353acf9f3", "score": "0.5117361", "text": "def distance_tf(tf_buffer, pose1, pose2):\n #convert to the same coordinate frame...\n pose1 = pose_transform(tf_buffer, pose1, pose2.header.frame_id)\n return distance(pose1, pose2)", "title": "" }, { "docid": "4d05a57c096bcda6907b66e574db1904", "score": "0.5112318", "text": "def compute_obstacle_neighbors(self, agent, rangeSq):\n self.query_obstacle_tree_recursive(agent, rangeSq, self.obstacleTree_)", "title": "" }, { "docid": "03b8627e3cf4a84ffc081ab02d8f9b90", "score": "0.51115006", "text": "def calculate_heuristics(expand_paths, map, destination_id, type_preference=0):\n\n for path in expand_paths:\n if type_preference==0:\n if path.route[-1] != destination_id:\n path.update_h(1)\n else:\n path.update_h(0)\n continue\n if type_preference==1:\n currentCoord = [map.stations[path.route[-1]]['x'], map.stations[path.route[-1]]['y']]\n destCoord = [map.stations[destination_id]['x'], map.stations[destination_id]['y']]\n\n x = destCoord[0] - currentCoord[0]\n y = destCoord[1] - currentCoord[1]\n dist = math.sqrt(x * x + y * y)\n maxSpeed = max(map.velocity.values())\n path.update_h(dist/maxSpeed)\n continue\n if type_preference==2:\n currentCoord = [map.stations[path.route[-1]]['x'], map.stations[path.route[-1]]['y']]\n destCoord = [map.stations[destination_id]['x'], map.stations[destination_id]['y']]\n\n x = destCoord[0] - currentCoord[0]\n y = destCoord[1] - currentCoord[1]\n dist = math.sqrt(x*x + y*y)\n path.update_h(dist)\n\n # if map.stations[path.route[-1]][\"line\"] != map.stations[path.route[-2]][\"line\"]:\n # path.update_g(-1) # TODO ask Ali why I have to do this\n\n continue\n\n if type_preference==3:\n if map.stations[path.route[-1]][\"line\"] != map.stations[path.route[-2]][\"line\"]:\n path.update_h(1)\n else:\n path.update_h(0)\n\n return expand_paths", "title": "" }, { "docid": "53f766a8c6e676f0d8819670a570ff45", "score": "0.5109168", "text": "def distance(self):\n pass", "title": "" }, { "docid": "53f766a8c6e676f0d8819670a570ff45", "score": "0.5109168", "text": "def distance(self):\n pass", "title": "" }, { "docid": "53f766a8c6e676f0d8819670a570ff45", "score": "0.5109168", "text": "def distance(self):\n pass", "title": "" }, { "docid": "97629ba9a7e03d5fdbbe0ddc0a8749f5", "score": "0.5107634", "text": "def _calculate_distances_land_grid(base_point_vector_path, base_raster_path,\n target_dist_raster_path, work_dir):\n LOGGER.info('Starting _calculate_distances_land_grid.')\n temp_dir = tempfile.mkdtemp(dir=work_dir, prefix='calc-dist-land')\n\n # Open the point shapefile and get the layer\n base_point_vector = gdal.OpenEx(base_point_vector_path, gdal.OF_VECTOR)\n base_point_layer = base_point_vector.GetLayer()\n # A list to hold the land to grid distances in order for each point\n # features 'L2G' field\n l2g_dist = []\n # A list to hold the individual distance transform path's in order\n land_point_dist_raster_path_list = []\n\n # Get the original layer definition which holds needed attribute values\n base_layer_defn = base_point_layer.GetLayerDefn()\n file_ext, driver_name = _get_file_ext_and_driver_name(\n base_point_vector_path)\n output_driver = ogr.GetDriverByName(driver_name)\n single_feature_vector_path = os.path.join(\n temp_dir, 'single_feature' + file_ext)\n target_vector = output_driver.CreateDataSource(single_feature_vector_path)\n\n # Create the new layer for target_vector using same name and\n # geometry type from base_vector as well as spatial reference\n target_layer = target_vector.CreateLayer(base_layer_defn.GetName(),\n base_point_layer.GetSpatialRef(),\n base_layer_defn.GetGeomType())\n\n # Get the number of fields in original_layer\n base_field_count = base_layer_defn.GetFieldCount()\n\n # For every field, create a duplicate field and add it to the new\n # shapefiles layer\n for fld_index in range(base_field_count):\n base_field = base_layer_defn.GetFieldDefn(fld_index)\n target_field = ogr.FieldDefn(base_field.GetName(),\n base_field.GetType())\n # NOT setting the WIDTH or PRECISION because that seems to be\n # unneeded and causes interesting OGR conflicts\n target_layer.CreateField(target_field)\n\n # Create a new shapefile with only one feature to burn onto a raster\n # in order to get the distance transform based on that one feature\n for feature_index, point_feature in enumerate(base_point_layer):\n # Get the point features land to grid value and add it to the list\n field_index = point_feature.GetFieldIndex('L2G')\n l2g_dist.append(float(point_feature.GetField(field_index)))\n\n # Copy original_datasource's feature and set as new shapes feature\n output_feature = ogr.Feature(feature_def=target_layer.GetLayerDefn())\n\n # Since the original feature is of interest add its fields and\n # Values to the new feature from the intersecting geometries\n # The False in SetFrom() signifies that the fields must match\n # exactly\n output_feature.SetFrom(point_feature, False)\n target_layer.CreateFeature(output_feature)\n target_vector.SyncToDisk()\n target_layer.DeleteFeature(point_feature.GetFID())\n\n dist_raster_path = os.path.join(temp_dir,\n 'dist_%s.tif' % feature_index)\n _create_distance_raster(base_raster_path, single_feature_vector_path,\n dist_raster_path, work_dir)\n # Add each features distance transform result to list\n land_point_dist_raster_path_list.append(dist_raster_path)\n\n target_layer = None\n target_vector = None\n base_point_layer = None\n base_point_vector = None\n l2g_dist_array = numpy.array(l2g_dist)\n\n def _min_land_ocean_dist(*grid_distances):\n \"\"\"Aggregate each features distance transform output and create one\n distance output that has the shortest distances combined with each\n features land to grid distance\n\n Args:\n *grid_distances (numpy.ndarray): a variable number of numpy.ndarray\n\n Returns:\n a numpy.ndarray of the shortest distances\n\n \"\"\"\n # Get the shape of the incoming numpy arrays\n # Initialize with land to grid distances from the first array\n min_distances = numpy.min(grid_distances, axis=0)\n min_land_grid_dist = l2g_dist_array[numpy.argmin(grid_distances, axis=0)]\n return min_distances + min_land_grid_dist\n\n pygeoprocessing.raster_calculator(\n [(path, 1)\n for path in land_point_dist_raster_path_list], _min_land_ocean_dist,\n target_dist_raster_path, _TARGET_DATA_TYPE, _TARGET_NODATA)\n\n shutil.rmtree(temp_dir, ignore_errors=True)\n\n LOGGER.info('Finished _calculate_distances_land_grid.')", "title": "" }, { "docid": "8df708167481ce946e84a751052ec6fa", "score": "0.5106133", "text": "def distance(self,n1,n2):\n lat1 = self.data.nodes[n1][0]\n lon1 = self.data.nodes[n1][1]\n lat2 = self.data.nodes[n2][0]\n lon2 = self.data.nodes[n2][1]\n return self.coordDist(lat1,lon1,lat2,lon2)", "title": "" }, { "docid": "e95e216f39adff0db26a425bec6a4694", "score": "0.51004857", "text": "def __calc_distance(self, start_idx, end_idx):\n\n def dl(a, b):\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2 + (a.z - b.z)**2)\n\n total_dist = 0\n num_wps = (end_idx - start_idx) % len(self.waypoints.waypoints)\n idx1 = start_idx\n for i in range(num_wps):\n idx0 = idx1\n idx1 = (idx0 + 1) % len(self.waypoints.waypoints)\n\n total_dist += dl(self.waypoints.waypoints[idx0].pose.pose.position,\n self.waypoints.waypoints[idx1].pose.pose.position)\n return total_dist", "title": "" }, { "docid": "8ecc42af369a180c72f3d2beebe0cbc1", "score": "0.5098893", "text": "def calculateDistances(self):\n \n # shift by one second left flies, seconds, (x,y)\n fs = np.roll(self.fly_one_minute_buffer, -1, axis=1) \n \n x = self.fly_one_minute_buffer[:,:,:1]\n y = self.fly_one_minute_buffer[:,:,1:]\n \n x1 = fs[:,:,:1]\n y1 = fs[:,:,1:]\n \n d = self.__distance((x,y),(x1,y1))\n #we sum everything BUT the last bit of information otherwise we have data duplication\n values = d[:,:-1,:].sum(axis=1).reshape(-1)\n \n activity = '\\t'.join( ['%s' % int(v) for v in values] )\n return activity", "title": "" }, { "docid": "12369fa77511a7a41d34d6acb5fb3036", "score": "0.5092588", "text": "def dijkstra(g, src):\r\n s = g.__contains__(src)\r\n # diccionario que contiene cada vertice y la distancia para llegar hasta el\r\n distance = {}\r\n # resultado del algoritmo\r\n result = {}\r\n # lista de Python que funcionara como una cola con prioridad\r\n pqueue = []\r\n\r\n # se inicializa la distancia desde el vertice origen hasta el resto de los vertices del grafo a infinito y hacia el mismo a cero\r\n for v in g.vertices():\r\n if v is s:\r\n distance[v] = 0\r\n else:\r\n distance[v] = float('inf')\r\n # se insertan en la cola con prioridad cada nodo y la distancia hasta el desde el origen, este parametro servira como clave\r\n # para ordenarlos de menor a mayor en la cola con prioridad\r\n pqueue.append((distance[v], v))\r\n\r\n # mientas la cola no este vacia\r\n while len(pqueue) > 0:\r\n # se extrae la tupla cuya clave sea el minimo de la cola\r\n u = pqueue.pop(pqueue.index(my_min(pqueue)))\r\n # se guarda en el resultado el nodo que se extrajo y su distancia\r\n result[u[1]] = distance[u[1]]\r\n # para cada arista de los vertices adyacentes al extraido\r\n for edge in g.incident_edges(u[1].element):\r\n # se busca el vertice opuesto en la arista que se esta examinando\r\n v = edge.opposite(u[1])\r\n # si este vertice no esta en el resultado\r\n if v not in result:\r\n weight = edge.weight\r\n # si la distancia al nodo extraido de la cola mas el peso de la arista que conduce al nodo adyacente\r\n # es menor que el peso que tenia v en el diccionario de las distancias\r\n if distance[u[1]] + weight < distance[v]:\r\n # se toma la posicion del vertice v dentro de la cola\r\n i = pqueue.index((distance[v], v))\r\n # se actualiza la distancia de v en el diccionario de las distancias\r\n distance[v] = distance[u[1]] + weight\r\n # se actualiza la distancia de v en la cola\r\n pqueue[i] = (distance[v], v)\r\n return result", "title": "" }, { "docid": "2b2385020b71065706d930a2f67293c4", "score": "0.5089493", "text": "def get_neighbor_distances(self, subtract_radius: bool = False) -> np.ndarray:\n # handle simple cases\n if len(self) == 0:\n return np.zeros((0,))\n elif len(self) == 1:\n return np.full(1, np.nan)\n\n try:\n from scipy.spatial import cKDTree as KDTree\n except ImportError:\n from scipy.spatial import KDTree\n\n # build tree to query the nearest neighbors\n assert self.data is not None\n positions = self.data[\"position\"]\n\n # we could support periodic boundary conditions using `freud.locality.AABBQuery`\n tree = KDTree(positions)\n dist, index = tree.query(positions, 2)\n\n if subtract_radius:\n return dist[:, 1] - self.data[\"radius\"][index].sum(axis=1) # type: ignore\n else:\n return dist[:, 1] # type: ignore", "title": "" }, { "docid": "e15f664a494f18dd4420fdb071112f0d", "score": "0.5089447", "text": "def calculate_map_dist(self):\n nnodes = self.codebook.nnodes\n\n distance_matrix = np.zeros((nnodes, nnodes))\n for i in range(nnodes):\n distance_matrix[i] = self.codebook.grid_dist(i).reshape(1, nnodes)\n return distance_matrix", "title": "" }, { "docid": "f98c84eaf4c934646b9857be1f1940d2", "score": "0.50853336", "text": "def distance_from_nearest_object(self):\n pos_drone = self.c.getMultirotorState().kinematics_estimated.position\n x, y = pos_drone.x_val, pos_drone.y_val\n return min([self._distance_from_object(x, y, obj_id) for obj_id in self.obstacles])", "title": "" }, { "docid": "679284994fe708eb859d7cf86e2aaa68", "score": "0.50832176", "text": "def compute_distances(self, start, costs=None):\n # Get starting points\n x, y = start\n\n # Init cost array\n if costs is None:\n costs = [[float('Inf') for _ in range(self.width)] for _ in range(self.height)]\n\n # Add start point to queue\n q = queue.Queue()\n costs[y][x] = 0\n q.put(start)\n\n while not q.empty():\n x, y = q.get()\n\n # Add neighbours to queue\n neighbours = [pos for d, pos in self.get_neighbours((x, y))]\n for nx, ny in neighbours:\n # Only add if we can update the cost\n if costs[ny][nx] > costs[y][x] + 1:\n costs[ny][nx] = costs[y][x] + 1\n q.put((nx, ny))\n\n return costs", "title": "" }, { "docid": "7f126fbc43e9a09645ec06c6737cb80c", "score": "0.5080902", "text": "def route_distance(self):\n\n if self.distance == 0:\n path_distance = 0\n for i in range(0, len(self.route)):\n from_city = self.route[i]\n to_city = None\n if i + 1 < len(self.route):\n to_city = self.route[i + 1]\n else:\n to_city = self.route[0]\n path_distance += from_city.distance(to_city)\n self.distance = path_distance\n\n return self.distance", "title": "" }, { "docid": "28792d27a0e573813a2326c7e2e41a20", "score": "0.5079316", "text": "def distance_2d(p,q):\n return math.sqrt((p[0]-q[0])**2 + (p[1]-q[1])**2)", "title": "" }, { "docid": "3f9851937958ca34d4ccbd147f5be6ca", "score": "0.5075163", "text": "def calculate_cost(expand_paths, map, type_preference=0):\n\n for path in expand_paths:\n if type_preference==0:\n path.update_g(1)\n continue\n if type_preference==1:\n path.update_g(map.connections[path.route[-1]][path.route[-2]])\n continue\n if type_preference==2:\n if map.stations[path.route[-1]][\"line\"] == map.stations[path.route[-2]][\"line\"]:\n time = map.connections[path.route[-2]][path.route[-1]]\n speed = map.stations[path.route[-2]][\"velocity\"]\n distance = time * speed\n path.update_g(distance)\n else:\n path.update_g(0)\n continue\n\n if type_preference==3:\n if map.stations[path.route[-1]][\"line\"] != map.stations[path.route[-2]][\"line\"]:\n path.update_g(1)\n\n\n return expand_paths", "title": "" }, { "docid": "6c901350dd4b5c779cb8743ef2d0ec08", "score": "0.50538343", "text": "def _find_distance(self, point1, point2):\r\n if self.distance in [\"euclidean\", \"l2\"]:\r\n if len(point2.shape) > 1:\r\n distance = np.sqrt(np.sum((point1 - point2) ** 2, axis = 1))\r\n else:\r\n distance = np.sqrt(np.sum((point1 - point2) ** 2))\r\n \r\n elif self.distance in [\"manhattan\", \"l1\"]:\r\n if len(point2.shape) > 1:\r\n distance = np.abs(point1 - point2).sum(axis = 1)\r\n else:\r\n distance = np.abs(point1 - point2).sum()\r\n \r\n elif self.distance in [\"cosine\", \"cosine_similarity\"]:\r\n if len(point2.shape) > 1:\r\n #Had to use np.maximum here because this would sometimes return an extremely small negative value instead of zero\r\n distance = np.maximum(0,1 - (np.sum(point1 * point2, axis = 1) / (np.linalg.norm(point1) * np.linalg.norm(point2, axis = 1))))\r\n else:\r\n distance = np.maximum(0,1 - (np.sum(point1 * point2) / (np.linalg.norm(point1) * np.linalg.norm(point2))))\r\n \r\n else:\r\n raise ValueError(\"Unknown distance measure selected\")\r\n \r\n return distance", "title": "" }, { "docid": "ba5a6003f343f3ca30b0fffd32317e18", "score": "0.5049844", "text": "def _calculate_land_to_grid_distance(\n base_land_vector_path, base_grid_vector_path, dist_field_name,\n target_land_vector_path):\n LOGGER.info('Starting _calculate_land_to_grid_distance.')\n\n # Copy the point vector\n _, driver_name = _get_file_ext_and_driver_name(\n target_land_vector_path)\n base_land_vector = ogr.Open(base_land_vector_path, gdal.OF_VECTOR)\n driver = ogr.GetDriverByName(driver_name)\n driver.CopyDataSource(base_land_vector, target_land_vector_path)\n base_land_vector = None\n\n target_land_vector = gdal.OpenEx(\n target_land_vector_path, gdal.OF_VECTOR | gdal.GA_Update)\n base_grid_vector = gdal.OpenEx(\n base_grid_vector_path, gdal.OF_VECTOR | gdal.GA_ReadOnly)\n\n base_grid_layer = base_grid_vector.GetLayer()\n # List to store the grid point geometries as shapely objects\n grid_point_list = []\n\n LOGGER.info('Loading the polygons into Shapely')\n for grid_point_feat in base_grid_layer:\n # Get the geometry of the grid point in WKT format\n grid_point_wkt = grid_point_feat.GetGeometryRef().ExportToWkt()\n # Load the geometry into shapely making it a shapely object\n shapely_grid_point = shapely.wkt.loads(grid_point_wkt)\n # Add the shapely point geometry to a list\n grid_point_list.append(shapely_grid_point)\n\n # Take the union over the list of points to get one point collection object\n LOGGER.info('Get the collection of polygon geometries by taking the union')\n grid_point_collection = shapely.ops.unary_union(grid_point_list)\n\n target_land_layer = target_land_vector.GetLayer()\n # Create a new distance field based on the name given\n dist_field_defn = ogr.FieldDefn(dist_field_name, ogr.OFTReal)\n target_land_layer.CreateField(dist_field_defn)\n\n LOGGER.info('Loading the points into shapely')\n for land_point_feat in target_land_layer:\n # Get the geometry of the point in WKT format\n land_point_wkt = land_point_feat.GetGeometryRef().ExportToWkt()\n # Load the geometry into shapely making it a shapely object\n shapely_land_point = shapely.wkt.loads(land_point_wkt)\n # Get the distance in meters and convert to km\n land_to_grid_dist = shapely_land_point.distance(\n grid_point_collection) / 1000\n # Add the distance value to the new field and set to the feature\n land_point_feat.SetField(dist_field_name, land_to_grid_dist)\n target_land_layer.SetFeature(land_point_feat)\n\n target_land_layer = None\n target_land_vector = None\n base_grid_layer = None\n base_grid_vector = None\n\n LOGGER.info('Finished _calculate_land_to_grid_distance.')", "title": "" }, { "docid": "c527171be66fb083db6499383a90aac4", "score": "0.50486946", "text": "def edist(A, B):\n B_transpose = tf.transpose(B) # (DxM)\n A_norm = tf.reduce_sum(A ** 2, axis=1, keepdims=True) # (Nx1)\n B_norm = tf.reduce_sum(B_transpose ** 2, axis=0, keepdims=True) # (1xM)\n A_dot_B = tf.matmul(A, B_transpose) # (NxM)\n dist = tf.sqrt(A_norm + B_norm - 2 * A_dot_B) # (NxM)\n return dist", "title": "" }, { "docid": "9e17c945d93b1089d52a522ca05c2a5e", "score": "0.5035521", "text": "def euclidean_distance(self):\n\t#print (self.starting_odom.pose.pose.position.x)\n\t#print (self.starting_odom.pose.pose.position.y)\n\t#print (self.odom.pose.pose.position.x)\n\t#print (self.odom.pose.pose.position.y)\n\tdis = np.sqrt(math.pow((self.starting_odom.pose.pose.position.x - self.odom.pose.pose.position.x), 2) +\n\t math.pow((self.starting_odom.pose.pose.position.y - self.odom.pose.pose.position.y), 2))\n\t# print dis\n\treturn dis", "title": "" }, { "docid": "29ea46cfcccc560000eb76e948192930", "score": "0.5033438", "text": "def get_distance(input_lattice, mc_lattice_size, lattice_constant):\n distances_travelled_matrix = np.zeros([mc_lattice_size[0], mc_lattice_size[1], mc_lattice_size[2]]\n ) # array the same size as the mc_lattice, to be filled with the starting coordinates of each atom, in the location it currently occupies\n # list of the net distances travelled, loses starting point info\n distances_travelled_list = []\n # sum of deltat i and delta j coordinates of all atoms\n d_j_distance_ij = np.array([0., 0.])\n for i, j, k in itertools.product(range(mc_lattice_size[0]), range(mc_lattice_size[1]), range(mc_lattice_size[2])):\n atom_starting_point_indices = input_lattice[i][j][k]\n # is there an atom in the site? for vaccancies the starting point index will be [-1,0,0]\n if atom_starting_point_indices[0] >= 0:\n # adding to deal with atoms that have looped around the periodic boundaries\n i_index = i + \\\n mc_lattice_size[0] * \\\n round(float(atom_starting_point_indices[2]) / 100)\n j_index = j + mc_lattice_size[1] * (atom_starting_point_indices[2] - round(\n float(atom_starting_point_indices[2]) / 100) * 100)\n distances_travelled_matrix[i][j][k] = np.sqrt(np.abs((atom_starting_point_indices[0] - i_index)**2 + (atom_starting_point_indices[1] - j_index)**2 + (\n atom_starting_point_indices[0] - i_index) * (atom_starting_point_indices[1] - j_index))) # hexagonal distance between atom start point and where it is now, matrix form\n # distance travelled per atom, in list form, no order to it though\n distances_travelled_list = np.append(\n distances_travelled_list, distances_travelled_matrix[i][j][k])\n d_j_distance_ij += [(atom_starting_point_indices[0] - i_index),\n (atom_starting_point_indices[1] - j_index)]\n d_j_distance = np.sqrt(np.abs(d_j_distance_ij[0]**2 + (d_j_distance_ij[1]) **\n 2 + d_j_distance_ij[0] * d_j_distance_ij[1])) # vector sum of all distances\n return distances_travelled_matrix * lattice_constant, distances_travelled_list * lattice_constant, d_j_distance * lattice_constant", "title": "" }, { "docid": "c728a3a87aa252af1c48dc5fd724e535", "score": "0.5032287", "text": "def minimum_distances(fr,**kwargs):\n\tglobal pts_ions,pts_lipids,vecs,midplanes\n\tdistance_metric = kwargs.pop('distance_metric','r')\n\tif kwargs: raise Exception('unprocessed kwargs')\n\tvec = vecs[fr]\n\tpts_fore_unstuffed = pts_ions[fr]\n\tpts_fore = boxstuff(pts_fore_unstuffed,vec)\n\tif distance_metric=='r':\n\t\tpts_back_unstuffed = pts_lipids[fr]\n\t\tpts_back = boxstuff(pts_back_unstuffed,vec)\n\t\t#---! why does vec need to be twice as long? (tested that the limits work though)\n\t\ttry: tree = scipy.spatial.ckdtree.cKDTree(pts_back,boxsize=np.concatenate((vec,vec)))\n\t\t#---KDTree failures are blanked\n\t\texcept: return np.array([])\n\t\tclose,nns = tree.query(pts_fore,k=1)\n\telif distance_metric=='z':\n\t\t#---no PBCs implemented here and we really just take the distance to the average-z\n\t\tclose = np.abs(pts_fore[:,2]-midplanes[fr])\n\telse: raise Exception('unclear distance metric: %s'%distance_metric)\n\treturn close", "title": "" }, { "docid": "8150533ef89ca40a9083171dc7eaa8a4", "score": "0.50316894", "text": "def respects_time_distance_constraints(self):\n return (((self.totalDist + float(self.mDistances[self.adding_another_client][self.j]) + float(\n self.mDistances[len(self.demandes)][self.j])) <= self.max_dist)\n and ((self.totalTime + self.mTimes[self.adding_another_client][self.j] + self.delivery_time()) <= self.endTime))", "title": "" }, { "docid": "31e24544f45374d79efd04cc95a2d88b", "score": "0.50267303", "text": "def _calculate_distance_with_dtw(data, norm: int = 1):\n\n # Initialize empty distance matrix.\n dist_matrix = np.zeros((data.shape[0], data.shape[0]), dtype=float)\n\n # Note regarding multithreading: Splitting up by rows leads to imbalance amongst thread workloads.\n # Instead, we split up all possible pairings to ensure even workloads and collect the results (and assemble\n # the distance matrix) after the threads finished their calculations.\n # Generate all pairings.\n segment_pairings = [(i, j) for i in range(0, data.shape[0]) for j in range(0, data.shape[0]) if j > i]\n\n # Set up multithreading. Run as many threads as logical cores are available on this machine - 1.\n num_threads = psutil.cpu_count(logical=True)\n threads = []\n for i in range(0, num_threads):\n # Calculate distance with fastDTW between each pairing of segments. Distances between elements to themselves\n # are ignored and hence retain their intial value of 0.\n thread = DTWThread(thread_id=i,\n num_threads=num_threads,\n segment_pairings=segment_pairings,\n distance_matrix=dist_matrix,\n data_to_process=data,\n norm=norm)\n threads.append(thread)\n thread.start()\n\n # Wait for threads to finish.\n for thread in threads:\n thread.join()\n\n return dist_matrix", "title": "" }, { "docid": "a6ebadf6830874c01c5bc560f4fa834e", "score": "0.50263155", "text": "def greedy_eval(self, node):\n return self.tiles_distance(node)", "title": "" }, { "docid": "29e30349aa15d4ea4e7a420126afdcfc", "score": "0.50240105", "text": "def calculate_workspace_free(robot,obstacles,end_effector,point_local):\n global lower_corner,upper_corner\n resolution = (20,20,20)\n cellsize = vectorops.div(vectorops.sub(upper_corner,lower_corner),resolution)\n invcellsize = vectorops.div(resolution,vectorops.sub(upper_corner,lower_corner))\n \n reachable = np.zeros(resolution)\n #TODO: your code here\n feasible = collision_free(robot,obstacles)\n if feasible:\n wp = end_effector.getWorldPosition(point_local)\n index = [int(math.floor(v)) for v in vectorops.mul(vectorops.sub(wp,lower_corner),invcellsize)]\n if all(i>=0 and i<r for (i,r) in zip(index,resolution)):\n reachable[tuple(index)] = 1.0\n # print(index)\n num_samples = 100000\n rand_positions = np.random.rand(num_samples, 3)\n rand_positions[:,0] = rand_positions[:,0] * vectorops.sub(upper_corner, lower_corner)[0] + lower_corner[0]\n rand_positions[:,1] = rand_positions[:,1] * vectorops.sub(upper_corner, lower_corner)[1] + lower_corner[1]\n rand_positions[:,2] = rand_positions[:,2] * upper_corner[2]\n\n for i in range(num_samples):\n index = [int(math.floor(v)) for v in vectorops.mul(vectorops.sub(rand_positions[i],lower_corner),invcellsize)]\n for obstacle in obstacles:\n if obstacle.geometry().distance_point(rand_positions[i]).d <= 0:\n reachable[tuple(index)] = 0.0\n else:\n reachable[tuple(index)] = 1.0\n\n return reachable", "title": "" }, { "docid": "df74f9424365038e4a89603bbd903462", "score": "0.5023582", "text": "def optimal_transport_dist(txt_emb: Tensor, img_emb: Tensor, txt_pad: Tensor, img_pad: Tensor, beta: float=0.5, iteration: int=50, k: int=1) ->Tensor:\n cost = cost_matrix_cosine(txt_emb, img_emb)\n joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2)\n cost.masked_fill_(joint_pad, 0)\n txt_len = txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)\n img_len = img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)\n T = ipot(cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, beta, iteration, k)\n distance = trace(cost.matmul(T.detach()))\n return distance", "title": "" }, { "docid": "192d3717988e87dbe6d19f366669b810", "score": "0.5019322", "text": "def distance():\n return point() + point() ^ star(Distance)", "title": "" }, { "docid": "51647629659c238f5f8fa940ad06110e", "score": "0.50166315", "text": "def get_clique(self, asi, nodesi, disti, asj, nodesj, distj):\n g1_elem = [asi[i][4] for i in nodesi]\n g2_elem = [asj[i][4] for i in nodesj]\n nodes = mcqd.correspondence(g1_elem, g2_elem)\n if not nodes:\n return [], []\n\n g1_coords = [asi[i][1:4] for i in nodesi]\n g2_coords = [asj[i][1:4] for i in nodesj]\n #f1 = open('debug_n1.xyz', 'w')\n #f2 = open('debug_n2.xyz', 'w')\n\n #f1.writelines(\"%i\\nfirst\\n\"%(len(g1_coords)))\n #for a,c in zip(g1_elem, g1_coords):\n # f1.writelines(\"%s %6.3f %6.3f %6.3f\\n\"%(a, c[0], c[1], c[2]))\n #f2.writelines(\"%i\\nsecond\\n\"%(len(g2_coords)))\n #for a,c in zip(g2_elem, g2_coords):\n # f2.writelines(\"%s %6.3f %6.3f %6.3f\\n\"%(a, c[0], c[1], c[2]))\n\n g1_dist = disti[nodesi,:][:,nodesi]\n g2_dist = distj[nodesj,:][:,nodesj]\n adj_matrix = mcqd.correspondence_edges(nodes,\n g1_dist, \n g2_dist,\n self.tol)\n size = len(nodes)\n clique = mcqd.maxclique(adj_matrix, size)\n #f1.writelines(\"%i\\nfirst\\n\"%(len(clique)))\n #for k in [nodes[i][0] for i in clique]:\n # a = g1_elem[k]\n # c = g1_coords[k]\n # f1.writelines(\"%s %6.3f %6.3f %6.3f\\n\"%(a, c[0], c[1], c[2]))\n #f2.writelines(\"%i\\nsecond\\n\"%(len((clique))))\n #for k in [nodes[i][1] for i in clique]:\n # a = g2_elem[k]\n # c = g2_coords[k]\n # f2.writelines(\"%s %6.3f %6.3f %6.3f\\n\"%(a, c[0], c[1], c[2]))\n #f1.close()\n #f2.close()\n #sys.exit()\n if not clique:\n return [],[]\n else:\n return [nodes[i][0] for i in clique], [nodes[i][1] for i in clique]\n # catch-all?\n return [], []", "title": "" }, { "docid": "4ed7b1c19c265aa4bdd4196edbf5a81b", "score": "0.5016147", "text": "def distance(self) -> float:\r\n cur = self.get_local_pos()\r\n y = self.tgt[0] - cur[0]\r\n x = self.tgt[1] - cur[1]\r\n # print(self.tgt, cur)\r\n distance = math.sqrt((x * x) + (y * y))\r\n return distance", "title": "" }, { "docid": "aee1c89b450ed07f0873d828d3198a9d", "score": "0.50158954", "text": "def _get_distance2(from_pos: typing.Tuple[int, int], to_pos: typing.Tuple[int, int]) -> float:\n x_from, y_from = from_pos\n x_to, y_to = to_pos\n return math.sqrt((x_to - x_from)**2 + (y_to - y_from)**2)", "title": "" }, { "docid": "c6f5b2532fa816c08b01124bbb98c880", "score": "0.50086695", "text": "def _dense_distance_dual(lock, list1, list2, global_idx, shared_arr, dist_function):\n list_len = len(list1)\n # PID = os.getpid()\n # print(\"PID {} takes index {}\".format(PID, index_i))\n while global_idx.value < list_len:\n with lock:\n if not global_idx.value < list_len: return\n idx = global_idx.value\n global_idx.value += 1\n # if idx % 100 == 0: progressbar(idx, list_len)\n elem_1 = list1[idx]\n for idx_j in range(len(list2)):\n shared_arr[idx, idx_j] = dist_function(elem_1, list2[idx_j])", "title": "" } ]
995892ac6f0346f57b21b0bd45e216a1
When getting the URL of an ImageCacheFile, the storage shouldn't be checked.
[ { "docid": "112b8f8898a4d1badcf03acc13621b82", "score": "0.75797176", "text": "def test_no_io_on_url():\n file = get_image_cache_file()\n file.url\n assert not file.storage.exists.called\n assert not file.storage.open.called", "title": "" } ]
[ { "docid": "cf151055813356df2adfaec8dce1fc64", "score": "0.6698855", "text": "def get_url(url: str) -> Optional[str]:\n try:\n parsed = urlparse(url)\n except ValueError:\n return None\n\n if parsed.scheme in (\"file\", \"\"):\n return unquote(parsed.path)\n elif parsed.scheme in (\"http\", \"https\"):\n if url.startswith(\"https://open.spotify.com/image/\"):\n url = \"https://i.scdn.co/image/\" + url[len(\"https://open.spotify.com/image/\") :]\n\n name = hashlib.sha1(url.encode(\"utf-8\")).hexdigest()\n path = os.path.join(CACHE_PATH, name) + Path(parsed.path).suffix\n\n if os.path.isfile(path):\n info(f\"Already downloaded at {path}\")\n return path\n\n # Download the file to our cache. We should probably do this asynchronously,\n # but rely on the fact that the remote server is _probably_ fast enough.\n warning(f\"Downloading {url} -> {path}\")\n try:\n os.makedirs(CACHE_PATH, exist_ok=True)\n with urlopen(url) as read:\n with open(path, \"wb\") as write:\n while chunk := read.read(2048):\n write.write(chunk)\n\n return path\n except Exception as e:\n critical(\"Error getting image \" + str(e))\n\n try:\n os.remove(path)\n except:\n pass\n\n return None\n else:\n return None", "title": "" }, { "docid": "72f4f90af52d2b7444b01000f96f3083", "score": "0.65864706", "text": "def _getFile(url, cachedFile=True, return_filename=False):\n assert url, \"WHY are you trying to load an empty string url?!?! Nothing good will come of this! In fact, I will assure that! %s\" % (url)\n md5 = hashlib.md5(url).hexdigest()\n filename = os.path.join(config.WEB_CACHE_DIR, md5)\n if os.path.exists(filename) and cachedFile:\n ret = open(filename, 'r').read()\n else:\n opener = urllib.FancyURLopener()\n ret = opener.open(url).read()\n o = open(filename, 'wb') # had to open in binary mode so PIL's Image.Open() function would work\n o.write(ret)\n o.close()\n if return_filename:\n return filename\n else:\n return ret", "title": "" }, { "docid": "f9557fce3d4ba04fc09c0bad47bcbba3", "score": "0.65715086", "text": "def get_image_url():", "title": "" }, { "docid": "360846a536138a14b9e5054d18aed508", "score": "0.64725953", "text": "def store_image(self, http_client, link_hash, src, config):\r\n # check for a cache hit already on disk\r\n image = self.read_localfile(link_hash, src, config)\r\n if image:\r\n return image\r\n\r\n # no cache found download the image\r\n data = self.fetch(http_client, src)\r\n if data:\r\n image = self.write_localfile(data, link_hash, src, config)\r\n if image:\r\n return image\r\n\r\n return None", "title": "" }, { "docid": "ea9ff7f676e06f67db9649193e3dc126", "score": "0.6286346", "text": "def get_from_cache(cls, target_filename):\n is_cached = cls.is_remote_cached(target_filename)\n if is_cached:\n cache = cls.CACHE_BACKEND()\n cache.download(is_cached, target_filename)\n logger.debug('File %r was downloaded from %r', target_filename, cls.CACHE_BACKEND)\n else:\n target_filename = None\n return target_filename", "title": "" }, { "docid": "b7493a2b6bf38fbc0af240ce52d5d06a", "score": "0.62478286", "text": "def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))", "title": "" }, { "docid": "129ea1691424a4fa071ef9e803b4c6e4", "score": "0.62376696", "text": "def get_cache_path(self):", "title": "" }, { "docid": "129ea1691424a4fa071ef9e803b4c6e4", "score": "0.62376696", "text": "def get_cache_path(self):", "title": "" }, { "docid": "61132a76e7bcbb2015ab25c848d12c9e", "score": "0.6218982", "text": "def thumbnail_url(self):\n return None", "title": "" }, { "docid": "a7760a4eda1c42f332344829f94b3e62", "score": "0.62101203", "text": "def cache_path(self):", "title": "" }, { "docid": "a7760a4eda1c42f332344829f94b3e62", "score": "0.62101203", "text": "def cache_path(self):", "title": "" }, { "docid": "7a50b38e6ecdb521a8e99d6324b30df5", "score": "0.61887294", "text": "def cache(self):\n\n if self.url and not self.photo:\n result = urllib.urlretrieve(self.url)\n self.photo.save(\n os.path.basename(self.url),\n File(open(result[0]))\n )\n self.save()", "title": "" }, { "docid": "35aa9cd8e2729fde685863e9ddc9e2fe", "score": "0.6184811", "text": "def cache_image(self):\n img_temp = NamedTemporaryFile()\n # Header required for HTTPS connections\n request = Request(self.url, headers={'User-Agent': ''})\n response = urlopen(request)\n type_file = dict(response.info()._headers)['Content-Type']\n if 'image' not in type_file:\n raise ValidationError(\"The URL does not contains any image. (Content-Type: {0}) (URL: {1})\".format(type, self.url))\n # Store the filename with extension\n url_image = urlparse(self.url)\n filename, file_ext = splitext(basename(url_image.path))\n # If the file doesn't have a extension, find it out from the header\n if file_ext == '':\n file_ext = type_file.replace('image/', '')\n self.filename = \"{0}.{1}\".format(filename, file_ext)\n source_data = response.read()\n # Compress the image\n source_data = optimize(source_data)\n img_temp.write(source_data)\n img_temp.flush()\n # Save the image in the server\n self.image .save(self.url, File(img_temp))", "title": "" }, { "docid": "03fc708a87a0973d838e33d0933dff65", "score": "0.615248", "text": "def get_image_url(self, size=None):\n return images.get_serving_url(self.image_blob_key, size=size)", "title": "" }, { "docid": "4ea3c6491b89602baa9a61f29a24525f", "score": "0.6141199", "text": "def _cache_image(self, instance):\n\n image_name = '%s.tar.gz' % instance['image_id']\n full_image_path = '%s/%s' % (FLAGS.ovz_image_template_dir, image_name)\n\n if not os.path.exists(full_image_path):\n # These objects are required to retrieve images from the object store.\n # This is known only to work with glance so far but as I understand it\n # glance's interface matches that of the other object stores.\n user = manager.AuthManager().get_user(instance['user_id'])\n project = manager.AuthManager().get_project(instance['project_id'])\n\n # Grab image and place it in the image cache\n images.fetch(instance['image_id'], full_image_path, user, project)\n return True\n else:\n return False", "title": "" }, { "docid": "fca35fbd5df7d68ae64d33e28208f59a", "score": "0.60866725", "text": "def thumbnail_url_if_set(self):\n return self.thumbnail.url if self.thumbnail else self.file.url", "title": "" }, { "docid": "e339c23ae4d948cc42cb3135869e5ec8", "score": "0.6050783", "text": "def image_url(self):\n return self.photo_url or GENERIC_IMAGE", "title": "" }, { "docid": "a860b891acbd0a12adeead856858c9a3", "score": "0.604557", "text": "def get_image_from_uri(cache, url_fetcher, options, url, forced_mime_type=None,\n context=None, orientation='from-image'):\n if url in cache:\n return cache[url]\n\n try:\n with fetch(url_fetcher, url) as result:\n parsed_url = urlparse(result.get('redirected_url'))\n if parsed_url.scheme == 'file':\n filename = url2pathname(parsed_url.path)\n else:\n filename = None\n if 'string' in result:\n string = result['string']\n else:\n string = result['file_obj'].read()\n mime_type = forced_mime_type or result['mime_type']\n\n image = None\n svg_exceptions = []\n # Try to rely on given mimetype for SVG\n if mime_type == 'image/svg+xml':\n try:\n tree = ElementTree.fromstring(string)\n image = SVGImage(tree, url, url_fetcher, context)\n except Exception as svg_exception:\n svg_exceptions.append(svg_exception)\n # Try pillow for raster images, or for failing SVG\n if image is None:\n try:\n pillow_image = Image.open(BytesIO(string))\n except Exception as raster_exception:\n if mime_type == 'image/svg+xml':\n # Tried SVGImage then Pillow for a SVG, abort\n raise ImageLoadingError.from_exception(svg_exceptions[0])\n try:\n # Last chance, try SVG\n tree = ElementTree.fromstring(string)\n image = SVGImage(tree, url, url_fetcher, context)\n except Exception:\n # Tried Pillow then SVGImage for a raster, abort\n raise ImageLoadingError.from_exception(raster_exception)\n else:\n # Store image id to enable cache in Stream.add_image\n image_id = md5(url.encode()).hexdigest()\n image = RasterImage(\n pillow_image, image_id, string, filename, cache,\n orientation, options)\n\n except (URLFetchingError, ImageLoadingError) as exception:\n LOGGER.error('Failed to load image at %r: %s', url, exception)\n image = None\n\n cache[url] = image\n return image", "title": "" }, { "docid": "9f75c530ccf52e39c8285cbd7f5ff825", "score": "0.60300225", "text": "def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url", "title": "" }, { "docid": "9f75c530ccf52e39c8285cbd7f5ff825", "score": "0.60300225", "text": "def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url", "title": "" }, { "docid": "9f75c530ccf52e39c8285cbd7f5ff825", "score": "0.60300225", "text": "def get_url_image(self, obj):\n return settings.IMAGE_HOST + obj.image.url", "title": "" }, { "docid": "762e556dc9b2b02b51d2523e91e56cf1", "score": "0.60176134", "text": "def test_raw_file_url_error(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_uncached,\n op=kgb.SpyOpReturn(b'first'))\n\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'first')\n\n # Ensure output of fake result matches.\n repository._get_file_uncached.unspy()\n self.spy_on(repository._get_file_uncached,\n op=kgb.SpyOpReturn(b'second'))\n\n # Grab from cache when no changes and change fake result to confirm\n # it is not called.\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'first')\n\n # When raw_file_url changed, do not grab from cache and ensure output\n # equals second fake value.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertEqual(repository.get_file('PATH', 'd7e96b3'),\n b'second')", "title": "" }, { "docid": "d63e9ac3875bf220f8f0ccd8b2ac9fac", "score": "0.6004812", "text": "def get_cache_file_path(self) -> str:\n return self.cache_file_path", "title": "" }, { "docid": "277559c973684bc5ef316751eeed4694", "score": "0.6003073", "text": "def get_url_image(self, obj):\n return settings.SERVER_HOST + obj.image.url", "title": "" }, { "docid": "099e631d32bbefc203a326134b52a278", "score": "0.59900737", "text": "def get_image_url(self, image_url):\n if image_url:\n return '{0}?source={1}'.format(self.config['links']['imageProxy'], image_url)\n else:\n return None", "title": "" }, { "docid": "d53f8a3e7c5a7a10ba1fac93916a377b", "score": "0.5987534", "text": "def url(self, name):\n raise NotImplementedError(\"subclasses of Storage must provide a url() method\")", "title": "" }, { "docid": "cbf48bf053012e72024b85bb0577d5d6", "score": "0.5981657", "text": "def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)", "title": "" }, { "docid": "63888a9f9dbffee202ba36f691298f02", "score": "0.5955442", "text": "def _get_cached_filepath(prefix, url):\n filename = '{prefix}_{hash_string}.cache'.format(\n prefix=prefix,\n hash_string=_hash_value(url),\n )\n logger.debug('Cached filepath: ' + os.path.join(CACHE_DIRECTORY, filename))\n return os.path.join(CACHE_DIRECTORY, filename)", "title": "" }, { "docid": "5df5c0188e8e81a747acf18e34097d30", "score": "0.59553915", "text": "def image_url(self) -> str:\n return self._image_url", "title": "" }, { "docid": "2d74e7e89edaf361e55270287321829a", "score": "0.593597", "text": "def validate_and_cache(self):\n if not self.title:\n raise ValidationError('The image has no title, which is required. (URL: {0})'.format(self.url))\n if not self.url:\n raise ValidationError('The URL field is empty (Image title: {0})'.format(self.title))\n\n # Check that the URL is valid\n val = URLValidator()\n try:\n val(self.url)\n except ValidationError as e:\n raise ValidationError('The URL is not correctly formatted. Enter a valid URL (URL: {0})'.format(self.url))\n\n # Store the image in the server\n self.cache_image()", "title": "" }, { "docid": "7cf149dfe0f709353e528295bdacf8f9", "score": "0.5902302", "text": "def get_datafile_url(self):\n try:\n return self.datafile.url\n except ValueError:\n if core.utils.is_absolute_url(self.source):\n if self.source.startswith('s3://'):\n return None # file is in the UPLOAD_BUCKET\n return self.source\n logger.error(\"File not found at '%s'\", self.datafile.name)\n return None", "title": "" }, { "docid": "57a9d9e615daa74b8837d20918b7a5d6", "score": "0.5894909", "text": "def cache(self):\n\t\tprint self.url\n\t\tif self.url and not self.streetimage:\n\t\t\tresult = urllib.urlretrieve(self.url)\n\t\t\tfname = os.path.basename(self.url).split('&')[-1]+\".jpg\"\n\t\t\tprint 'fname = ', fname, 'result = ', result\n\t\t\tself.streetimage.save(fname, File(open(result[0])))\n\t\t\tself.save()", "title": "" }, { "docid": "6e382109b7137352fb9ebdd83e101859", "score": "0.58891803", "text": "def cache_and_save(self):\n if self.url and not self.photo:\n result = urllib.urlretrieve(self.url)\n self.photo.save(\n os.path.basename(self.url),\n File(open(result[0], 'rb')),\n )\n self.save()", "title": "" }, { "docid": "8ba3147a58f3a9622dd3120811484dc6", "score": "0.58803225", "text": "def get_local_image(self, src):\r\n local_image = ImageUtils.store_image(None,\r\n self.link_hash, src, self.config)\r\n return local_image", "title": "" }, { "docid": "27bfaa923389d591b6cb987611bc6a3a", "score": "0.58715415", "text": "def get_image(self, processor, source, cache=None):\n cache = cache or self._get_cache_filename(processor, source)\n cache_path = os.path.join(self.cache_dir, cache)\n if os.path.exists(cache_path):\n return CachedImage(cache_path)", "title": "" }, { "docid": "d9a7cc94bb9381ad6c48f923211e1866", "score": "0.58592206", "text": "def use_cached_files(self, cache_key):\r\n pass", "title": "" }, { "docid": "891a1ad3b32b6ed2780b7d34d5b7be36", "score": "0.58568215", "text": "def get_image(self):\n if not hasattr(self, '_BasePublication__image_cache'):\n images = self.get_images()\n self.__image_cache = images[0].picture if images else None\n return self.__image_cache", "title": "" }, { "docid": "f16cd73e16b8bb6a66e799b8d1de4ef5", "score": "0.5852952", "text": "def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in (\"http\", \"https\", \"s3\"):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))", "title": "" }, { "docid": "324d7d46c685a10829b5925f968c2f9c", "score": "0.5839466", "text": "def get_url(self):\n raise NotImplementedError(\"This asset does not have a URL\")", "title": "" }, { "docid": "509a07a82980e43bad021d05d168019c", "score": "0.58318967", "text": "def get_image_from_cache(cache, file_path):\n if file_path in cache:\n return cache[file_path]\n image = read_image(file_path, GRAY_NUMBER)\n cache[file_path] = image\n return image", "title": "" }, { "docid": "9f80868219f4a027890c09d60731d23b", "score": "0.58224815", "text": "def get_image_file(self, processor, source, cache=None):\n cache = cache or self._get_cache_filename(processor, source)\n cache_path = os.path.join(self.cache_dir, cache)\n if not os.path.exists(cache_path) or source_changed(source, cache):\n processor.process(source).save(cache_path)\n return cache_path", "title": "" }, { "docid": "62efb66dcf6e7ef74f9b2221b7bbeabe", "score": "0.5819388", "text": "def find_image_url(lat_value, long_value):\n global custom_memory, custom_hit, custom_miss, total_custom_memory\n image_tuple = (lat_value, long_value)\n \n #When Latitude Longitude in Cache and HIT\n if image_tuple in custom_memory:\n custom_hit+=1\n custom_memory[image_tuple][1] = datetime.now()\n return custom_memory[image_tuple][0],\"hit\"\n \n #When Latitude Longitude NOT in Cache and MISS\n if len(custom_memory) < total_custom_memory:\n custom_miss+=1\n custom_memory[image_tuple] = [GetImageURL(*image_tuple), datetime.now()]\n return custom_memory[image_tuple][0], \"miss_when_not_full\"\n else:\n custom_memory = sorted([(key, list_vals) for key, list_vals in custom_memory.items()], key=lambda i:i[1][1], reverse=False)\n del custom_memory[0]\n custom_memory = dict(custom_memory)\n custom_miss+=1\n custom_memory[image_tuple] = [GetImageURL(*image_tuple), datetime.now()]\n return custom_memory[image_tuple][0], \"miss_when_after_full\"", "title": "" }, { "docid": "6f70a4d0245f57c3bd7129ffaa872644", "score": "0.5802188", "text": "def get_cache_file_path(self, URL):\n\n filename = hashlib.md5(URL.encode('utf-8')).hexdigest() + '.wbc'\n path = pathlib.Path(config.WEATHER_PROVIDERS['App']['Cache_path'])\n cache_file_path = path.joinpath(filename)\n\n return cache_file_path", "title": "" }, { "docid": "3c8fc215f02abe24587a269f6a6675e3", "score": "0.58008486", "text": "def _get_image_url(self, image_filepath):\n return self.IMAGE_URL + image_filepath", "title": "" }, { "docid": "864fcd35ae4b4ccf8a19864fbd27e6e7", "score": "0.57877064", "text": "def url(self):\n if not self.fid:\n raise exceptions.NotCreatedError(object=self)\n\n return self._file_url(self.fid)", "title": "" }, { "docid": "8402b97108d2d6c44df73ef0999f6911", "score": "0.5785012", "text": "def test_get_file_exists_caching_with_raw_url(self):\n repository = self.remote_repository\n\n self.spy_on(repository._get_file_exists_uncached,\n op=kgb.SpyOpReturn(True))\n\n # Use spy to put key into cache\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Remove spy to ensure key is still in cache without needing spy\n repository._get_file_exists_uncached.unspy()\n self.assertTrue(repository.get_file_exists('PATH', 'd7e96b3'))\n\n # Does not exist when raw_file_url changed because it is not cached.\n repository.raw_file_url = \\\n 'http://github.com/api/v2/yaml/blob/show/reviewboard/<revision>'\n\n self.assertFalse(repository.get_file_exists('PATH', 'd7e96b3'))", "title": "" }, { "docid": "d78a3720c36a2b82f4f5ecbf0a872a6c", "score": "0.5768686", "text": "def _lookup_url(self, endpoint, values):\r\n try:\r\n cont = self.get_container(values['container'])\r\n if cont.cdn_enabled:\r\n return \"%s/%s\" % (cont.cdn_uri, values['filename'])\r\n else:\r\n return None\r\n except: # pragma: no cover\r\n return None", "title": "" }, { "docid": "d42addcd78739a55548169c9eb595126", "score": "0.5761914", "text": "def get_thumbnail_url():", "title": "" }, { "docid": "65e81fa5aee813cd4c1f46c6a079f50a", "score": "0.5757347", "text": "def getOriginalFile(url):\n # does url exist?\n if url is None or url is \"\":\n return", "title": "" }, { "docid": "bbebd6c03e7319083f008fb34b478176", "score": "0.5726853", "text": "def media_image_url(self):\n return self._imageUrl", "title": "" }, { "docid": "1037b994c8b16dc4f37053ed92c7517a", "score": "0.57236093", "text": "def get_image(self, address):\r\n # Do a bit of caching\r\n if self.last_image and self.last_image.contains(address):\r\n return self.last_image\r\n \r\n # if it was not cached, traverse all of the loaded images\r\n for image in self.loaded_images:\r\n if image.contains(address):\r\n self.last_image = image\r\n return image\r\n \r\n return None", "title": "" }, { "docid": "f7c5f23b793fa6026f86fe6dfe5f9bb8", "score": "0.5723437", "text": "def resolve_image(self, info):\n if self.image:\n self.image = info.context.build_absolute_uri(self.image.url)\n return self.image", "title": "" }, { "docid": "82b3ceee189b3922eb925e42d3bca6c1", "score": "0.57201684", "text": "def getReferenceImageUrl(self, name):\n bucket = self.productSearch.bucket\n blobName = self._getReferenceImageBlobName(name)\n return bucket.blob(blobName).public_url", "title": "" }, { "docid": "74c3979aeac460f150fecde911b9b8d3", "score": "0.57170355", "text": "def get_url(self):\n try:\n return self._file.url\n except AttributeError:\n raise NotImplementedError(\"Underlying file does not have a URL.\")", "title": "" }, { "docid": "e10ddb5164aaee64b255bba389dcd76f", "score": "0.5712572", "text": "def get_url(self, image_id):\n key = image_id if image_id else self.default_image\n if key:\n return u'{bucket_url}{key}'.format(\n bucket_url=self.connection.bucket_url,\n key=self.id_to_key(key))\n else:\n return None", "title": "" }, { "docid": "4bbaa9d409d25195171fb85a1468bc0b", "score": "0.5703366", "text": "def still_image_url(self) -> str:\n\t\treturn 'grab.jpg?oid={0}'.format(self._oid)", "title": "" }, { "docid": "1c6fc1fb58919f847379df1ae8a085a7", "score": "0.5696939", "text": "def media_image_url(self):\n return self._media_image_url", "title": "" }, { "docid": "1c6fc1fb58919f847379df1ae8a085a7", "score": "0.5696939", "text": "def media_image_url(self):\n return self._media_image_url", "title": "" }, { "docid": "340ab0cf3c3095146d9fad9b727f12d9", "score": "0.5694348", "text": "def get_url(self):\n return staticfiles_storage.url(self._name)", "title": "" }, { "docid": "530f1fa329dbe7f75a49513bc260bfee", "score": "0.56889766", "text": "def url(self):\n return self.storage.url(self.name)", "title": "" }, { "docid": "9b45df474dbfd8a696525d337b08d344", "score": "0.56876", "text": "def get_cached_path(self):\n if util.IS_CACHE_ENABLED and not self.physical_key.is_local():\n return ObjectPathCache.get(str(self.physical_key))\n return None", "title": "" }, { "docid": "da51497e05c11331433d9654ec3c2940", "score": "0.5681513", "text": "def image_link(self):\r\n\r\n if not self._image_link:\r\n warnings.warn(\"Seems like you are trying to pull out the image link while not having it.\", Warning, stacklevel=2)\r\n\r\n return self._image_link", "title": "" }, { "docid": "fca06b98517b2904cb70c33a449afdb7", "score": "0.5672595", "text": "def cache_file(url, prefix):\n cache_filepath = _get_cached_filepath(\n prefix=prefix,\n url=url,\n )\n # If the file exists, return path.\n if os.path.isfile(cache_filepath):\n logger.info('Returning cached file for {}.'.format(url))\n return cache_filepath\n # If the file does not exist, download and return path.\n else:\n r = requests.get(url, verify=False)\n\n with open(cache_filepath, 'wb') as f:\n f.write(r.content)\n\n logger.info('Caching file for {}.'.format(url))\n return cache_filepath", "title": "" }, { "docid": "bffeb7b050dfaf2d0519788e2207dd61", "score": "0.5669351", "text": "def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):\n\tif cache_dir is None:\n\t\tcache_dir = PYTORCH_TRANSFORMERS_CACHE\n\tif sys.version_info[0] == 3 and isinstance (url_or_filename, Path):\n\t\turl_or_filename = str (url_or_filename)\n\tif sys.version_info[0] == 3 and isinstance (cache_dir, Path):\n\t\tcache_dir = str (cache_dir)\n\n\tparsed = urlparse (url_or_filename)\n\n\tif parsed.scheme in ('http', 'https', 's3'):\n\t\t# URL, so get it from the cache (downloading if necessary)\n\t\treturn get_from_cache (url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies)\n\telif os.path.exists (url_or_filename):\n\t\t# File, and it exists.\n\t\treturn url_or_filename\n\telif parsed.scheme == '':\n\t\t# File, but it doesn't exist.\n\t\traise EnvironmentError (\"file {} not found\".format (url_or_filename))\n\telse:\n\t\t# Something unknown\n\t\traise ValueError (\"unable to parse {} as a URL or as a local path\".format (url_or_filename))", "title": "" }, { "docid": "1f6a12c1c87800c4ff2d67ed99c28cd8", "score": "0.56690085", "text": "def get_image_url(self, image_id):\n if image_id in self.image_id_to_url:\n return self.image_id_to_url[image_id]\n return None", "title": "" }, { "docid": "a6701645a48ac661895436d6df097cea", "score": "0.56666386", "text": "def get_picture(self):\n\t\tno_picture = settings.STATIC_URL + 'img/img_avatar.png'\n\t\ttry:\n\t\t\treturn self.picture.url\n\t\texcept:\n\t\t\treturn no_picture", "title": "" }, { "docid": "3cd26b5e928122c98d590b716098dbfb", "score": "0.5647679", "text": "def get_profile_picture_url(cls, filename):\n if filename is None:\n return None\n profile_picture = bucket.blob('images/users/'+filename)\n if profile_picture.exists():\n profile_picture.make_public()\n return profile_picture.public_url\n return None", "title": "" }, { "docid": "c62be71979a39ec17a1538255180b7fa", "score": "0.56406003", "text": "def test_no_io_on_bool():\n file = get_image_cache_file()\n bool(file)\n assert not file.storage.exists.called\n assert not file.storage.open.called", "title": "" }, { "docid": "639828d1cbb6361f545642adf5cf8778", "score": "0.56374377", "text": "def get_cached_item(self, path):\n item_path = '%s/%s' % (\n self.cache_folder,\n path.strip('/')\n )\n\n try:\n cf_object = self.container.get_object(item_path)\n except NoSuchObject:\n return False\n\n f = tempfile.NamedTemporaryFile()\n f.write(cf_object.fetch())\n f.seek(0)\n image = Image.open(f.name)\n f.close()\n\n return image", "title": "" }, { "docid": "b1604feafdd7d8507361aa4a3be217be", "score": "0.5623213", "text": "def check_cached_item(self, path):\n item_path = '%s/%s' % (\n self.cache_folder,\n path.strip('/')\n )\n\n try:\n self.container.get_object(item_path)\n return '%s/%s' % (self.container.cdn_ssl_uri, item_path)\n except NoSuchObject:\n return False", "title": "" }, { "docid": "5c8a1dc3696e8a75de7b628ce729142b", "score": "0.5610468", "text": "def __download_image_file(self):\n if not file_utils.file_exists(self.image_file_path):\n logger.info('Downloading Image from - ' + self.image_url)\n return file_utils.download(self.image_url, self.download_path)", "title": "" }, { "docid": "8e3104efbfb9ac61cca9cd498e013dfc", "score": "0.558419", "text": "def get_url(self):\n return self.get_file(uri_type=URI_URL, no_copy=True)", "title": "" }, { "docid": "8214bd829b48a321cef5b0087594a0fb", "score": "0.5582438", "text": "def media_image_url(self):\n if (media_status := self._media_status()[0]) is None:\n return None\n\n images = media_status.images\n\n return images[0].url if images and images[0].url else None", "title": "" }, { "docid": "c530df7cb95ce5eb7222937c29ddbe3f", "score": "0.5556376", "text": "def image_url(self) -> str:\n return pulumi.get(self, \"image_url\")", "title": "" }, { "docid": "30e0b894b4d3a2ecab4fc955ca39a81b", "score": "0.5550424", "text": "def GET(self, url):\n try:\n f = open(url, 'r')\n image = f.read()\n f.close()\n except:\n\n db_module.resave_img(url[5:])\n\n f = open(url, 'r')\n image = f.read()\n f.close()\n\n return image", "title": "" }, { "docid": "0d2c2c3df784b4717ed009cd61ef88d7", "score": "0.55337256", "text": "def get_public_url(self, get_image_public_url):\n\n # get that url\n return get_image_public_url(self.short_hash)", "title": "" }, { "docid": "2134c2ca03782a7135ea9e357e869fb3", "score": "0.55135864", "text": "def get_image_path(self) -> Optional[str]:\n if not self.image or not self.image.file_path:\n return None\n return self.image.file_path", "title": "" }, { "docid": "27717aaab8000dd85949c2255e23e9f7", "score": "0.55051106", "text": "def load_file_from_url(self, url: str) -> bytes:\n cached_content = self.cache_get(url)\n if cached_content is not None:\n return cached_content\n try:\n req = requests.get(url, timeout=self.requests_timeout)\n req.raise_for_status()\n content = req.content\n self.cache_set(url, content)\n except requests.RequestException as err:\n self.log_error(err)\n repl_content = self.get_replacement_file(url)\n if repl_content is None:\n raise ImageNotFound(err)\n content = repl_content\n return content", "title": "" }, { "docid": "11bf81c4dad7121a88a768dfe1a822d6", "score": "0.5502", "text": "def filename_to_url(filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n cache_path = os.path.join(cache_dir, filename)\n if not os.path.exists(cache_path):\n raise EnvironmentError(\"file {} not found\".format(cache_path))\n\n meta_path = cache_path + '.json'\n if not os.path.exists(meta_path):\n raise EnvironmentError(\"file {} not found\".format(meta_path))\n\n with open(meta_path, encoding=\"utf-8\") as meta_file:\n metadata = json.load(meta_file)\n url = metadata['url']\n etag = metadata['etag']\n\n return url, etag", "title": "" }, { "docid": "684836c82212312dc53fb1bc7fab1453", "score": "0.5480511", "text": "def image_url(self):\n return \"{}/mjpeg_read.php\".format(self.base_url)", "title": "" }, { "docid": "096dcc180c77a6fa8b47da5b2d367e13", "score": "0.5476265", "text": "def _getImagePath(self, link):\n return settings.WIKI_UPLOAD_URL + urlquote(link)", "title": "" }, { "docid": "9ef574e5d5e9bc2749c32ef322cc57e9", "score": "0.5466016", "text": "def is_remote_cached(cls, target_filename):\n is_cached = None\n cache = cls.CACHE_BACKEND()\n for file_name, file_id in cache.search():\n if file_name == os.path.basename(target_filename):\n is_cached = file_id\n logger.debug('File %r already cached at %r', target_filename, cls.CACHE_BACKEND)\n break\n return is_cached", "title": "" }, { "docid": "143675c585ea776bc420f3f9fb4a94ca", "score": "0.5450554", "text": "def get_url(self, asset, force_save=None):\n if force_save is None:\n force_save = not settings.DEBUG\n asset = AdaptiveAsset(asset)\n if not force_save:\n try:\n return asset.get_url()\n except NotImplementedError:\n pass\n return self._storage.url(self.get_name(asset))", "title": "" }, { "docid": "b4955c1e8035334aad27d4d81430933a", "score": "0.5419665", "text": "def get_entry_image(uri):\n entry = get_entry(uri)\n try:\n return entry['images'][0]['files'][1]['uri'].replace('https','http')\n except KeyError:\n # No image in the entry\n return ''", "title": "" }, { "docid": "d57ce2f4cf19d586421aafef6b60b48b", "score": "0.5411988", "text": "def testFetchNonGs(self):\n def _Fetch(*args, **_kwargs):\n # Probably shouldn't assume this ordering, but best way for now.\n cmd = args[0]\n local_path = cmd[-1]\n osutils.Touch(local_path)\n self.PatchObject(retry_util, 'RunCurl', side_effect=_Fetch)\n\n schemes = ('ftp', 'http', 'https')\n for scheme in schemes:\n key = (scheme, 'foo')\n url = '%s://some.site.localdomain/file_go_boom' % scheme\n with self.cache.Lookup(key) as ref:\n self.assertFalse(ref.Exists())\n ref.Assign(url)\n self.assertTrue(ref.Exists())", "title": "" }, { "docid": "7f9c90434f2818f3c0294273c9bd0944", "score": "0.5401404", "text": "def image_url(self, url):\n return self.is_regex_url(url, self.is_image_regex)", "title": "" }, { "docid": "c406a6ceaeb553115ecbd520b4bda131", "score": "0.54001504", "text": "def test_rackspace_uploader_lookup_url_none(self, mock1):\r\n filename = 'test.jpg'\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n cdn_enabled_mock = PropertyMock(return_value=False)\r\n type(fake_container).cdn_enabled = cdn_enabled_mock\r\n mycf.get_container.return_value = fake_container\r\n\r\n u = RackspaceUploader()\r\n u.init_app(self.flask_app)\r\n res = u._lookup_url('rackspace', {'filename': filename,\r\n 'container': 'user_3'})\r\n err_msg = \"We should get the None\"\r\n assert res is None, err_msg", "title": "" }, { "docid": "a3152f2b762a936e966c9cc738b8bbd5", "score": "0.5397179", "text": "def maybe_download_from_cloud(url, filename, subfolder=None, cache_dir=None, refresh_cache=False) -> str:\n if cache_dir is None:\n cache_location = Path.joinpath(Path.home(), \".cache/torch/mridc\")\n else:\n cache_location = cache_dir\n if subfolder is not None:\n destination = Path.joinpath(cache_location, subfolder)\n else:\n destination = cache_location\n\n if not os.path.exists(destination):\n os.makedirs(destination, exist_ok=True)\n\n destination_file = Path.joinpath(destination, filename)\n\n if os.path.exists(destination_file):\n logging.info(f\"Found existing object {destination_file}.\")\n if refresh_cache:\n logging.info(\"Asked to refresh the cache.\")\n logging.info(f\"Deleting file: {destination_file}\")\n os.remove(destination_file)\n else:\n logging.info(f\"Re-using file from: {destination_file}\")\n return str(destination_file)\n # download file\n wget_uri = url + filename\n logging.info(f\"Downloading from: {wget_uri} to {str(destination_file)}\")\n # NGC links do not work everytime so we try and wait\n i = 0\n max_attempts = 3\n while i < max_attempts:\n i += 1\n try:\n wget.download(wget_uri, str(destination_file))\n if os.path.exists(destination_file):\n return str(destination_file)\n return \"\"\n except Exception as e:\n logging.info(f\"Download from cloud failed. Attempt {i} of {max_attempts}\")\n logging.info(f\"Error: {e}\")\n sleep(0.05)\n continue\n raise ValueError(\"Not able to download url right now, please try again.\")", "title": "" }, { "docid": "63ffae63f9cfaf180165eec3aaf84e80", "score": "0.53893477", "text": "def img_url_big(self):\n url = '%s=s%s' % (self.img_url, self.BIG_SIZE_PX)\n if self.img_rot in Plaque.ALLOWED_ROTATIONS:\n url = \"%s-r%s\" % (url, self.img_rot)\n return url", "title": "" }, { "docid": "91f3a21ab0239c9666a90328e27e6ec5", "score": "0.53887075", "text": "def save_this_url(self, event, *args, **kwargs):\n\n # Get the url object\n url = event.url\n # If not image, save always\n if not url.is_image():\n return True\n else:\n # If image, check for content-length > 4K\n size = url.clength\n return (size>self.size_threshold)", "title": "" }, { "docid": "1e3dd606b4563ff5817e50ce68236beb", "score": "0.53859854", "text": "def get_thumb_url(self):\n return self.thumb_url", "title": "" }, { "docid": "fdd36af4854088c0ed81c5453d3e251e", "score": "0.5382295", "text": "def ImageUrlToFile(image_url):\n BAD_RETURN = False, ''\n prefix = request.url_root + \"static/%s/\" % app.config[Constants.KEY_UPLOAD_DIR]\n if not image_url.startswith(prefix):\n return BAD_RETURN\n a = image_url.split('/')\n if not a or (image_url != prefix + a[-1]):\n return BAD_RETURN\n return True, a[-1]", "title": "" }, { "docid": "1b5478b5f96f10b4c251dd05f7093985", "score": "0.53754765", "text": "def image_url(self, name):\r\n s3_key = self._generate_s3_key(name)\r\n return s3_key.generate_url(self.IMAGE_LINK_DURATION)", "title": "" }, { "docid": "68194d2a9ea479fda22ec9b62928e763", "score": "0.5362435", "text": "def MEDIA_URL(path=None):\n\n if not path:\n return random_media_url()\n\n # Remove leading slashes\n while path.startswith('/'):\n path = path[1:]\n\n # Remove existing query string\n path = path.split('?')[0]\n\n # Find the real file modification time, but first try to get it\n # from MEDIA_URL_MTIMES\n filepath = os.path.join(settings.STATIC_ROOT, path)\n if path not in MEDIA_URL_MTIMES:\n try:\n mtime = os.path.getmtime(filepath)\n except (IOError, OSError):\n mtime = None\n MEDIA_URL_MTIMES[path] = mtime\n else:\n mtime = MEDIA_URL_MTIMES[path]\n\n cache_key = 'media_url.%s' % md5('%s:%s' % (path, mtime)).hexdigest()\n\n # Do not check the cache if debug is set\n # if settings.DEBUG or not settings.STATIC_URLS:\n # url = None\n # else:\n # url = cache.get(cache_key)\n # if url:\n # return url\n url = '%s%s?v=%s' % (random_media_url(), path, mtime or 1)\n cache.set(cache_key, url)\n return url", "title": "" }, { "docid": "a1c06e824c756673e313849ab9b1d07c", "score": "0.5346705", "text": "def get_image_url(self, image_name: str, thumbnail: bool = False) -> str:\n pass", "title": "" }, { "docid": "35d699856c542a26abde6f8aed5b6265", "score": "0.5339744", "text": "def __get_image_file(self):\n if file_utils.file_exists(self.image_file_path):\n return open(self.image_file_path, 'r')\n else:\n if not os.path.exists(self.download_path):\n os.makedirs(self.download_path)\n logger.info('Found existing image file')\n return self.__download_image_file()", "title": "" }, { "docid": "a8f20030e6deecaae685e3bc5c9fcff6", "score": "0.5338195", "text": "def get_url(self, name):\n if self.folder.type != 's3':\n return super(NereidStaticFile, self).get_url(name)\n\n cloudfront = config.get('nereid_s3', 'cloudfront')\n if cloudfront:\n return '/'.join([cloudfront, self.s3_key])\n\n return \"https://s3.amazonaws.com/%s/%s\" % (\n config.get('nereid_s3', 'bucket'), self.s3_key\n )", "title": "" }, { "docid": "6f58f3dcb62fccb43bef055903d9e73c", "score": "0.53367305", "text": "def cachebust_url_for(endpoint, **kwargs):\n if endpoint == \"static\":\n endpoint = \"static_cachebust\"\n path = os.path.join(app.static_folder, kwargs.get(\"filename\"))\n kwargs[\"timestamp\"] = int(os.stat(path).st_mtime)\n return url_for(endpoint, **kwargs)", "title": "" }, { "docid": "2389d4a6d47fe5949669c8278694fe8d", "score": "0.53353524", "text": "def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path", "title": "" }, { "docid": "32d074ec0ce739d3b2987a202ab064dd", "score": "0.533497", "text": "def get_thumbnail_url(self):\n return self.thumbnail_url", "title": "" } ]
9a8974939db3cf419a2799d18e500be1
The ID of an Azure Active Directory server application of type \\"Web app/API\\". This application represents the managed cluster's apiserver (Server application) (string)
[ { "docid": "53d534dd54140f88c1bbaf2d0e60f8e4", "score": "0.73392534", "text": "def add_server_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"add_server_app_id\")", "title": "" } ]
[ { "docid": "134caedbe6b529fe9ce366e221179202", "score": "0.79774123", "text": "def server_app_id(self) -> Optional[str]:\n return pulumi.get(self, \"server_app_id\")", "title": "" }, { "docid": "aac51cc07f1e0d6be0a043f58d9e1552", "score": "0.76388377", "text": "def app_id(self) -> str:\n return pulumi.get(self, \"app_id\")", "title": "" }, { "docid": "52521d8012c9edb642f88d665b35f467", "score": "0.7490504", "text": "def application_id(self) -> str:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "52521d8012c9edb642f88d665b35f467", "score": "0.7490504", "text": "def application_id(self) -> str:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "52521d8012c9edb642f88d665b35f467", "score": "0.7490504", "text": "def application_id(self) -> str:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "b88e21759c9a8d9df2eada9c1fbf305a", "score": "0.7446178", "text": "def app_id(self):\n service = self.application.settings.get('service')\n version = self.application.settings.get('version')\n return '{}/{}'.format(service, version)", "title": "" }, { "docid": "424bd4ddc0fe95b387588de68503bac1", "score": "0.7381459", "text": "def app_id(self):\n return self._app.app_id", "title": "" }, { "docid": "eee9130362d7b4cb5886e0d550cdde95", "score": "0.7352182", "text": "def app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_id\")", "title": "" }, { "docid": "0ebf723861f8fa76e8d89dd8a1bc86c7", "score": "0.7282523", "text": "def client_app_id(self) -> Optional[str]:\n return pulumi.get(self, \"client_app_id\")", "title": "" }, { "docid": "cf07eff768ba59a593ff9f44ec8fc344", "score": "0.7207375", "text": "def app_id(self):\n return self.properties.get(\"appId\", None)", "title": "" }, { "docid": "b787d8b3eb47d80549407b8053a96a66", "score": "0.71948344", "text": "def application_id(self) -> Optional[str]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "bace956f820f1c492ae608a44240b09a", "score": "0.7099986", "text": "def get_vsts_app_id(self):\r\n return '499b84ac-1321-427f-aa17-267ca6975798'", "title": "" }, { "docid": "4680268d9f3566d3a2632c387a3a113c", "score": "0.7072293", "text": "def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")", "title": "" }, { "docid": "4680268d9f3566d3a2632c387a3a113c", "score": "0.7072293", "text": "def app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"app_id\")", "title": "" }, { "docid": "f326de85848dddf22b7639fda44f0baa", "score": "0.7050724", "text": "def app_id(self):\n return self._app_id # pragma: no cover", "title": "" }, { "docid": "7f2b709c8fd126dd512765c0b725628b", "score": "0.7047921", "text": "def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")", "title": "" }, { "docid": "7f2b709c8fd126dd512765c0b725628b", "score": "0.7047921", "text": "def app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_id\")", "title": "" }, { "docid": "26f0fb1169273b7c0308d3f1c6ecfc10", "score": "0.69900674", "text": "def id_app(self):\n return self._id_app", "title": "" }, { "docid": "aed69201f7c899ac4f04458370de3e39", "score": "0.6986363", "text": "def app_id(self):\n return self.__app_id", "title": "" }, { "docid": "f9419ff343a72ffe360390eb41d44cf5", "score": "0.69789577", "text": "def app_id(self):\n return self._app_id", "title": "" }, { "docid": "ca0a5fdcc23f3d8c89b65d020fe0b3a6", "score": "0.693904", "text": "def id(self) -> str:\n return self.data[const.MANIFEST_APP_ID]", "title": "" }, { "docid": "a5e79aa8b6a3c0504c2d98aebc4cec52", "score": "0.69121045", "text": "def application_identifier(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_identifier\")", "title": "" }, { "docid": "c3aa01427add088c0ab27c79fa8c6c83", "score": "0.6883003", "text": "def get_app_id():\n if profile is None:\n raise EikonError('401','AppID is missing. Did you forget to call set_app_id()?')\n\n return profile.get_application_id()", "title": "" }, { "docid": "1a9f42dbc77881e6f3d3ac2c98cb4ae0", "score": "0.68096256", "text": "def application_id(self):\n return self._application_id", "title": "" }, { "docid": "ed34ba378f4c409f463d3ec37029caf9", "score": "0.67860776", "text": "def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "ed34ba378f4c409f463d3ec37029caf9", "score": "0.67860776", "text": "def application_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "f0a1f1c1df3a59a4b87af8d6d988a8fc", "score": "0.67743856", "text": "def app_id(self):\n # Validate what was already stored\n self._validate(key=\"app_id\", value=self._app_id)\n return self._app_id", "title": "" }, { "docid": "7d1e692837ed856b30504b4989995a9d", "score": "0.675186", "text": "def authorized_app_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"authorized_app_id\")", "title": "" }, { "docid": "a07f607d76dc13ee038dbdc5973f7b31", "score": "0.6725331", "text": "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "a07f607d76dc13ee038dbdc5973f7b31", "score": "0.6725331", "text": "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "a07f607d76dc13ee038dbdc5973f7b31", "score": "0.6725331", "text": "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "a07f607d76dc13ee038dbdc5973f7b31", "score": "0.6725331", "text": "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "a07f607d76dc13ee038dbdc5973f7b31", "score": "0.6725331", "text": "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "a07f607d76dc13ee038dbdc5973f7b31", "score": "0.6725331", "text": "def application_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_id\")", "title": "" }, { "docid": "ac80fc643d3e50e5eb9af4928a472d3a", "score": "0.6719398", "text": "def get_internal_app(application_id):\n return '{internal_api_path}/{application_id}'.format(internal_api_path=INTERNAL_API_COMMON_PATH,\n application_id=application_id)", "title": "" }, { "docid": "a2695319cc1d42a35cb62d82740f8d9c", "score": "0.671433", "text": "def get_application_id(self):\n if not self.application_id:\n\n raise EikonError('401','AppID was not set (set_app_id was not called)')\n return self.application_id", "title": "" }, { "docid": "a34e2da7d4ce16c970848e02762d04e4", "score": "0.67114675", "text": "def app_instance_id(self):\n return self.properties.get(\"AppInstanceId\", None)", "title": "" }, { "docid": "e41c758a193934f8e0c9aa64cab2d99e", "score": "0.6690899", "text": "def get_app_name(app_id):\n return app_id.lstrip('/').replace('/', '-')", "title": "" }, { "docid": "7b26bbd98f8916ff9405f9a23a414876", "score": "0.6645433", "text": "def config_app_id(self):\n return self._config_app_id", "title": "" }, { "docid": "3dfd63d677349a8ada20dc7f211662b0", "score": "0.6616706", "text": "def application_object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_object_id\")", "title": "" }, { "docid": "42360257690e51bd2a6bd814ec45fa21", "score": "0.6604446", "text": "def app_id(self):\n if self._app_id_present:\n return self._app_id_value\n else:\n raise AttributeError(\"missing required field 'app_id'\")", "title": "" }, { "docid": "42360257690e51bd2a6bd814ec45fa21", "score": "0.6604446", "text": "def app_id(self):\n if self._app_id_present:\n return self._app_id_value\n else:\n raise AttributeError(\"missing required field 'app_id'\")", "title": "" }, { "docid": "2182ad50afdb8703e24f234722b4f59e", "score": "0.6587346", "text": "def get_app_id(self):\n self.blueprint['app_id'] = self.coating.app_id", "title": "" }, { "docid": "19771d2f1b9ad6cfc2e77679c5fc7867", "score": "0.6545031", "text": "def web_app_arm_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"web_app_arm_id\")", "title": "" }, { "docid": "19771d2f1b9ad6cfc2e77679c5fc7867", "score": "0.6545031", "text": "def web_app_arm_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"web_app_arm_id\")", "title": "" }, { "docid": "c9742866e41a72fdacccd64b7d191e2c", "score": "0.65380985", "text": "def application_identifier(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_identifier\")", "title": "" }, { "docid": "c69b81cef7bd9d45b187e2df5d872ad2", "score": "0.65274626", "text": "def api_id(self) -> str:\n return self[\"requestContext\"][\"apiId\"]", "title": "" }, { "docid": "c69b81cef7bd9d45b187e2df5d872ad2", "score": "0.65274626", "text": "def api_id(self) -> str:\n return self[\"requestContext\"][\"apiId\"]", "title": "" }, { "docid": "7bee9c8a89e29a262021f8c750d17327", "score": "0.65250736", "text": "def authorized_app_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"authorized_app_id\")", "title": "" }, { "docid": "b54ae128b14bc11098d701d786b965b0", "score": "0.6520984", "text": "def app_name(self) -> str:\n return self.principal_unit_name.split(\"/\")[0]", "title": "" }, { "docid": "a0b6287556011768ef261ea21abeea9d", "score": "0.6504411", "text": "def authorized_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"authorized_app_id\")", "title": "" }, { "docid": "5aa8be0d7d7295613caa899b2ea966c8", "score": "0.6501335", "text": "def add_client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"add_client_app_id\")", "title": "" }, { "docid": "c8402fb9dd1fbb9b7b5513ceb1948a9e", "score": "0.6491209", "text": "def get_unique_app_id(self, instance):\n return format_app_id(instance.id)", "title": "" }, { "docid": "c8402fb9dd1fbb9b7b5513ceb1948a9e", "score": "0.6491209", "text": "def get_unique_app_id(self, instance):\n return format_app_id(instance.id)", "title": "" }, { "docid": "c8402fb9dd1fbb9b7b5513ceb1948a9e", "score": "0.6491209", "text": "def get_unique_app_id(self, instance):\n return format_app_id(instance.id)", "title": "" }, { "docid": "847ad1862b3236ca53a4f00aabd2dc3f", "score": "0.6477444", "text": "def app_service_environment_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_service_environment_id\")", "title": "" }, { "docid": "548622f3a0058af0a37c1656f3d9ad77", "score": "0.6473006", "text": "def web_server_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"web_server_id\")", "title": "" }, { "docid": "548622f3a0058af0a37c1656f3d9ad77", "score": "0.6473006", "text": "def web_server_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"web_server_id\")", "title": "" }, { "docid": "f771ad6c9c33c7c028b8968844941ba3", "score": "0.6465087", "text": "def server_id(self) -> str:\n return pulumi.get(self, \"server_id\")", "title": "" }, { "docid": "647a9a4ad633cd839b1e2d0013bd7889", "score": "0.6461957", "text": "def application_identifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_identifier\")", "title": "" }, { "docid": "c8934d9aa93a448afc0883897fbf5643", "score": "0.64232516", "text": "def get_appid():\n key = os.environ.get(\"API_KEY\")\n return f\"APPID={key}\"", "title": "" }, { "docid": "c8ba4ec7a95e1ec12b375e09b6978855", "score": "0.6380201", "text": "def app_service_environment_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_service_environment_id\")", "title": "" }, { "docid": "c8ba4ec7a95e1ec12b375e09b6978855", "score": "0.6380201", "text": "def app_service_environment_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_service_environment_id\")", "title": "" }, { "docid": "41a6f70cc9138341af0817439f64b750", "score": "0.63725096", "text": "def app_name() -> str:\n return get_tokens()[1]", "title": "" }, { "docid": "cc4b40a1f6760c3dd009ad71427c1d1d", "score": "0.63619304", "text": "def get_app_id(self):\n app_conf = get_merged_conf(self.build_dir, \"app.conf\")\n try:\n return app_conf[\"package\"][\"id\"]\n except KeyError as e:\n raise AppVarMagicException(e)", "title": "" }, { "docid": "a033072469d3761d249fc42449081fa4", "score": "0.63433504", "text": "def logic_app_resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"logic_app_resource_id\")", "title": "" }, { "docid": "a033072469d3761d249fc42449081fa4", "score": "0.63433504", "text": "def logic_app_resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"logic_app_resource_id\")", "title": "" }, { "docid": "0f36e37714e6b2c8dca60020b408e2f0", "score": "0.6323708", "text": "def app_name(id):\n return \"dlgr-\" + id[0:8]", "title": "" }, { "docid": "0f36e37714e6b2c8dca60020b408e2f0", "score": "0.6323708", "text": "def app_name(id):\n return \"dlgr-\" + id[0:8]", "title": "" }, { "docid": "0b1683fd5965b9acadd5e96982c42170", "score": "0.6321523", "text": "def application_object_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_object_id\")", "title": "" }, { "docid": "57079e1b4be0eb13e6b2a60ea6cf58b4", "score": "0.6312563", "text": "def get_app_id_from_app_config(cls, app_dir):\n app_config_file = cls.get_config_file_from_dir(app_dir)\n if cls.FILE_IS_YAML.search(app_config_file):\n yaml_contents = yaml.safe_load(cls.read_file(app_config_file))\n if 'application' in yaml_contents and yaml_contents['application'] != '':\n return yaml_contents['application']\n else:\n raise AppEngineConfigException(\"No application id set in your app.yaml\")\n else:\n xml_contents = cls.read_file(app_config_file)\n app_id_matchdata = cls.JAVA_APP_ID_REGEX.search(xml_contents)\n if app_id_matchdata:\n return app_id_matchdata.group(1)\n else:\n raise AppEngineConfigException(\"No application id set in your \" + \\\n \"appengine-web.xml\")", "title": "" }, { "docid": "0ee0c981cdf909ace84d3584f35a0970", "score": "0.63101834", "text": "def loan_application_id(self) -> str:\n return self._loan_application_id", "title": "" }, { "docid": "3f7adbbb0b1ed8d261861503287eabf6", "score": "0.63029295", "text": "def app_id(self, value: str):\n self._app_id = value", "title": "" }, { "docid": "d65f6918d2c269796f47e1fd3692a416", "score": "0.6300164", "text": "def application_object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_object_id\")", "title": "" }, { "docid": "a0a8f92ed98a545b04d1229b4677d027", "score": "0.62516546", "text": "def application_name(self):\n return ch_core.hookenv.application_name()", "title": "" }, { "docid": "cec3af582bdde463e9756c6c663d66b6", "score": "0.6239586", "text": "def getAppId(self):\n if not self.__appid:\n self.fatal(\"Error: getAppId: Application ID was not set. Aborting.\")\n return self.__appid", "title": "" }, { "docid": "deeb5ab1b3bff215756b1c781f6a32df", "score": "0.6237081", "text": "def app_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_name\")", "title": "" }, { "docid": "c36970c82c5ba83238cf902221bbdddc", "score": "0.6219525", "text": "def rest_api_id(self) -> Optional[str]:\n return pulumi.get(self, \"rest_api_id\")", "title": "" }, { "docid": "9df6981e60ce4331f0af85551a80a69d", "score": "0.6209644", "text": "def application_name(self) -> str:\n return pulumi.get(self, \"application_name\")", "title": "" }, { "docid": "2eac9da9b9ba98b9ee18ecd13a68cf46", "score": "0.61978734", "text": "def get_server_id(self):\n datadir = self.starter.all_instances[0].basedir / \"data\"\n server_file_content = json.load(open(datadir / \"SERVER\", encoding=\"utf-8\"))\n server_id = server_file_content[\"serverId\"]\n return server_id", "title": "" }, { "docid": "698fe8fee82b707c6db3d2282a308b55", "score": "0.61974394", "text": "def get_unique_app_id(self, instance):\n return (\n f\"{settings.MITOL_HUBSPOT_API_ID_PREFIX}-{B2B_ORDER_PREFIX}-{instance.id}\"\n )", "title": "" }, { "docid": "698fe8fee82b707c6db3d2282a308b55", "score": "0.61974394", "text": "def get_unique_app_id(self, instance):\n return (\n f\"{settings.MITOL_HUBSPOT_API_ID_PREFIX}-{B2B_ORDER_PREFIX}-{instance.id}\"\n )", "title": "" }, { "docid": "c6354e1f09d7f4f498e9bfe0d582c0c8", "score": "0.619514", "text": "def api_id(self):\n\t\t\n\t\treturn self.get_api_id()", "title": "" }, { "docid": "f5ad8720a9e79916e440b54b61fe5f92", "score": "0.61816895", "text": "def get_running_app_name(self):\n return self.get_status()['running-app-name']", "title": "" }, { "docid": "d810e468a1ddc58830d78d980dddecf8", "score": "0.6181058", "text": "def device_app_instance_id(self):\n return self.properties.get(\"DeviceAppInstanceId\", None)", "title": "" }, { "docid": "0c0b2c65481ac04b1c5a2c46d7d27cea", "score": "0.6179471", "text": "def aad_server_app_secret(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"aad_server_app_secret\")", "title": "" }, { "docid": "7330831938a4a656bab6f226c9ec2466", "score": "0.6177942", "text": "def gtk_application_id(self):\n return self.get_text_property(\"_GTK_APPLICATION_ID\")", "title": "" }, { "docid": "d8102213d7a74bb7f7a7230b56d43251", "score": "0.6171372", "text": "def applicationID(self) -> Optional[int]:\n return getattr(self, ApplicationID, None)", "title": "" }, { "docid": "ba366746eef5fb8b0fe4b16e8f07f177", "score": "0.6166271", "text": "def app(self):\n return self._proto.http_application", "title": "" }, { "docid": "17e1d4966601e19d9babcce9bc8f1e9c", "score": "0.6152635", "text": "def application(self):\n return self._get_field(\"application\")", "title": "" }, { "docid": "c05cbc1820ec8f1357b327bcfbbe9a36", "score": "0.61405677", "text": "def arc_application_client_id(self) -> Optional[str]:\n return pulumi.get(self, \"arc_application_client_id\")", "title": "" }, { "docid": "e559408c88ba74259917521c5f21f48b", "score": "0.6106719", "text": "def get_app_name(self):\n if \"app\" in self.properties:\n return self.properties[\"app\"].display_name\n return None", "title": "" }, { "docid": "76ef6b10b2f9837c03d0a66886095b9b", "score": "0.6106359", "text": "def get_application(self, app_id):\n return self._make_request('get', '/users/%s/applications/%s' % (self.user_id, app_id))[0]", "title": "" }, { "docid": "02d730f3a8eb701a949f2647176b5518", "score": "0.6104514", "text": "def app_key(self) -> str:\n return self._api.app_key", "title": "" }, { "docid": "68b3c47843dc84f6ac74270e3aa2e5d5", "score": "0.60886276", "text": "def client_application(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_application\")", "title": "" }, { "docid": "51a837fb6eb91bc4c6b93228d8de7145", "score": "0.60865045", "text": "def app_service_plan_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"app_service_plan_id\")", "title": "" }, { "docid": "e7114052ef36de6a8252dd48b907a260", "score": "0.608621", "text": "def get_edamam_id():\n return os.environ.get('EDAMAM_APP_ID')", "title": "" }, { "docid": "b76b8b9c5ab8ac7572ae42202f0c9395", "score": "0.6080834", "text": "def demo_app_link_id(name):\n return 'app-link-id-{}'.format(name.replace(\"_\", \"-\"))", "title": "" }, { "docid": "a73de360c5e88974435671d6226b1bd8", "score": "0.6065663", "text": "def application(self):\n\n return self._get_field(\"application\")", "title": "" }, { "docid": "6b184a66da615ed4875824f56c2f7d6c", "score": "0.60558224", "text": "def rest_api_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"rest_api_id\")", "title": "" } ]
bfc90003e943be34c028aa78b08084bc
Evaluate a model on a given CV split
[ { "docid": "59ccda8a4a49b6a84deb090dab951621", "score": "0.6733805", "text": "def compute_evaluation(model, cv_split_filename, params=None,\r\n train_size=1.0, mmap_mode='r',\r\n scoring=None, dump_model=False,\r\n dump_predictions=False, dump_folder='.'):\r\n # All module imports should be executed in the worker namespace to make\r\n # possible to run an an engine node.\r\n from time import time\r\n from sklearn.externals import joblib\r\n\r\n X_train, y_train, X_test, y_test = joblib.load(\r\n cv_split_filename, mmap_mode=mmap_mode)\r\n\r\n # Slice a subset of the training set for plotting learning curves\r\n if train_size <= 1.0:\r\n # Assume that train_size is an relative fraction of the number of\r\n # samples\r\n n_samples_train = int(train_size * X_train.shape[0])\r\n else:\r\n # Assume that train_size is an absolute number of samples\r\n n_samples_train = int(train_size)\r\n X_train = X_train[:n_samples_train]\r\n y_train = y_train[:n_samples_train]\r\n\r\n # Configure the model\r\n if model is not None:\r\n model.set_params(**params)\r\n\r\n # Fit model and measure training time\r\n tick = time()\r\n model.fit(X_train, y_train)\r\n train_time = time() - tick\r\n\r\n # Compute score on training set\r\n train_score = model.score(X_train, y_train)\r\n\r\n # Compute score on test set\r\n test_score = model.score(X_test, y_test)\r\n\r\n # Wrap evaluation results in a simple tuple datastructure\r\n return (test_score, train_score, train_time,\r\n train_size, params)", "title": "" } ]
[ { "docid": "35ea1019c0b7a7f5741d8cba7b8458c8", "score": "0.68407726", "text": "def evaluate_model(model_name, model, x, y):\n print('%s:' % model_name)\n model.fit(x, y.values.ravel())\n print('CV f1_micro (not reusing data): %s' % np.mean(cross_val_score(model,\n x, y.values.ravel(), cv=5, scoring='f1_micro')))", "title": "" }, { "docid": "6e3b00ea569d3b4775dd801f447a796b", "score": "0.6775238", "text": "def evaluate_model(dataset, split):\n data, ne_list, model = load_data_and_initialize_model(dataset, split_list=[split])\n \n saver = tf.train.Saver()\n saver.restore(model.sess, \"./tmp.model\")\n ner_hat_list = predict_dataset(model, data[split][\"tree_pyramid_list\"], ne_list)\n score = evaluate_prediction(data[split][\"ner_list\"], ner_hat_list)\n print \"[%s]\" % split + \" precision=%.1f%% recall=%.1f%% f1=%.3f%%\" % score\n return", "title": "" }, { "docid": "958b869ac03bccb5df39cede8da933d2", "score": "0.66875386", "text": "def fit(self, X, y=None, cv=5, eval_split=0.8):\n for model in self.models:\n kfold = KFoldCV(model, k=cv, eval_split=eval_split)\n hparams = {} # TODO: nerds/issues/24 (passing hyperparams).\n # Right now it will fall back to the default hyperparameters.\n score = kfold.cross_validate(X, hparams)\n self.confidence_scores.append(score)\n return super().fit(X, y)", "title": "" }, { "docid": "0fab813d78a7f929649809af9c9a0bfa", "score": "0.66832787", "text": "def doCV(modelInit, training_size, validation_size, maxK=100, saveModel = False):\n if training_size + validation_size > 100:\n print('validation_size + training_size should be smaller than 101')\n return\n if training_size > 100:\n print('TRAINING_SIZE should be less than 100') \n return\n if training_size < 1:\n print('VALIDATION_SIZE should be bigger than 1') \n return \n print (\"Run Cross Validation on a training set of size \"+str(training_size)+\" with a validation set size of \"+str(validation_size))\n \n \"\"\" --- RUN CROSS VALIDATION --- \"\"\"\n total_size = training_size + validation_size\n valset_start_id = 1\n all_training_ids = list(range(1, total_size+1))\n score = 0\n run = 1\n while valset_start_id < total_size and run <= maxK:\n print(\"-----------------------------------------------\")\n print(\" run: \"+str(run))\n print(\"-----------------------------------------------\")\n model = modelInit(run)\n #define validation and training set for each run\n valset_end_id = valset_start_id+validation_size\n val_set = range(valset_start_id, valset_end_id)\n training_set = []\n if valset_start_id > 1:\n training_set += all_training_ids[:valset_start_id-1]\n if valset_end_id < total_size:\n training_set += all_training_ids[valset_end_id:]\n #run\n train_classifier(model, training_set, saveModel) # (model isn't stored in tmp; only locally)\n new_score = validate(model, val_set, valset_start_id==1) # calculates F1 Score of the valset using the trained model\n print(\"score of run \"+str(run)+\": \"+str(new_score))\n #average\n score = score + ( new_score - score) / run\n run += 1\n valset_start_id = valset_end_id\n \n return score", "title": "" }, { "docid": "f5ceba4df38bdf51f82b30840e1036c8", "score": "0.6546712", "text": "def evaluate(self, splits=[\"train\", \"val\"], constant_preprocessor=None):\n if self.model:\n for split in splits:\n self.__evaluate_and_log_to_mlflow(split=split, balance_method=None, constant_preprocessor=constant_preprocessor)\n if split == \"val\":\n self.__evaluate_and_log_to_mlflow(split=split, balance_method=\"translate\", constant_preprocessor=constant_preprocessor)\n elif split == \"train\" and self.data.balance_method:\n self.__evaluate_and_log_to_mlflow(split=split, balance_method=self.data.balance_method, constant_preprocessor=constant_preprocessor)\n else:\n logger.info(f\"No model available! Please run `train()` first.\")", "title": "" }, { "docid": "f7e36fea9dbaf61d295d936c3a86c152", "score": "0.6545675", "text": "def compute_evaluation(config, cv_split_filename,\n train_fraction=1.0, mmap_mode='r'):\n # All module imports should be executed in the worker namespace to make\n # possible to run an an engine node.\n from time import time, sleep\n from sklearn.externals import joblib\n\n time_start = time()\n\n pre_processing = config['pre_processing']\n classifier = config['classifier']\n\n X_train, y_train, X_test, y_test = joblib.load(\n cv_split_filename, mmap_mode=mmap_mode)\n\n # Slice a subset of the training set for plotting learning curves\n n_samples_train = int(train_fraction * X_train.shape[0])\n X_train = X_train[:n_samples_train]\n y_train = y_train[:n_samples_train]\n\n pre_processing.fit(X_train)\n X_train_pp = pre_processing.transform(X_train)\n\n # Fit model and measure training time\n tick = time()\n classifier.fit(X_train_pp, y_train)\n train_time = time() - tick\n\n # Compute score on training set\n train_score = classifier.score(X_train_pp, y_train)\n\n # Compute score on test set\n X_test_pp = pre_processing.transform(X_test)\n test_score = classifier.score(X_test_pp, y_test)\n\n sleep(3)\n\n time_end = time()\n\n # Wrap evaluation results in a simple tuple datastructure\n return {\n 'loss': 1 - test_score,\n 'loss_': {\n 'duration': time_end - time_start,\n 'erate': 1 - test_score, #XXX should be validation error\n },\n 'test_score': test_score,\n 'train_score': train_score,\n 'train_time': train_time,\n 'train_fraction': train_fraction,\n }", "title": "" }, { "docid": "a54377fd5853c98058b72cddef34f435", "score": "0.6501031", "text": "def evaluate(self,fit = True,split = True,cross_validation = False,**kwargs):\n\n # Evaluate models using train test split\n if split:\n\n # If already prepared a split, we will be using it\n if hasattr(self.dataset,\"splits\"):\n X_train,X_test,y_train,y_test = self.dataset.splits\n\n # Otherwise prepare the split using function arguments\n else:\n X_train,X_test,y_train,y_test = self.dataset.train_test_split(inplace = False,**kwargs)\n \n # Fit the models\n if fit:\n self.fit(X = X_train,y = y_train)\n\n # Predict the probability\n pred = self.predict(X = X_test)\n pred[\"true\"] = y_test.values\n pred.insert(0,\"index\",y_test.index)\n\n # Compute metrics\n metrics,confusion_matrices = self.compute_metrics(pred)\n\n return pred,metrics,confusion_matrices\n\n else:\n raise ProblemsError(\"Cross validation is not implemented right now\")", "title": "" }, { "docid": "13f4cba2dbef2956ec9fb4cc225949e5", "score": "0.6428906", "text": "def evaluate_cv(self, clf, embedding, n_splits):\n embedding = embedding[self.label_ind, :]\n results = defaultdict(list)\n for i in range(10):\n rskf = StratifiedKFold(n_splits=n_splits, shuffle=True)\n for train_idx, test_idx in rskf.split(embedding, self.labels):\n x_train, x_test, y_train, y_test, w_train = self._get_split(embedding, test_idx, train_idx)\n pred, probs = self.get_predictions(\n clf,\n x_train,\n x_test,\n y_train,\n y_test,\n sample_weights=w_train\n )\n self._assemble_results(y_test, i, pred, probs[:, 1], results)\n return results", "title": "" }, { "docid": "2c7e1758546e6b7e217be2aed71d04a1", "score": "0.6404894", "text": "def evaluate_model(cv, X_test, y_test, category_names):\n y_pred = cv.predict(X_test)\n for i in range(35):\n print(category_names[i])\n print(classification_report(y_test[:,i], y_pred[:,i]))\n print('\\nBest Parameters:', cv.best_params_)", "title": "" }, { "docid": "8bed545c4cac25bc21967d632302bb31", "score": "0.63968045", "text": "def evalrank(model_path, data_path=None, split='dev', fold5=False):\n # load model and options\n checkpoint = torch.load(model_path)\n opt = checkpoint['opt']\n\n if data_path is not None:\n opt.data_path = data_path\n opt.vocab_path = \"./vocab/\"\n # load vocabulary\t \n vocab = pickle.load(open(os.path.join(\n opt.vocab_path, 'vocab.pkl'), 'rb'))\n \n opt.vocab_size = len(vocab)\n\n # construct model\n model = VSE(opt)\n \n # load model state\n model.load_state_dict(checkpoint['model'])\n print(opt)\t\n\t\n ####### input video files\n path= os.path.join(opt.data_path, opt.data_name)+\"/Caption/charades_\"+ str(split) + \".csv\"\n df=pandas.read_csv(open(path,'rb'))\n #columns=df.columns\n inds=df['video']\n desc=df['description']\n\n print('Loading dataset')\n data_loader = get_test_loader(split, opt.data_name, vocab, opt.crop_size,\n opt.batch_size, opt.workers, opt)\n\n print('Computing results...')\n img_embs, cap_embs, attn_index, lengths_img = encode_data(model, data_loader)\n\n print(img_embs.shape)\n print(cap_embs.shape)\n print('Images: %d, Captions: %d' %\n (img_embs.shape[0], cap_embs.shape[0]))\n\n\t# retrieve moments\n r13, r15, r17 = t2i(img_embs, cap_embs, df, attn_index, lengths_img, measure=opt.measure, return_ranks=True)", "title": "" }, { "docid": "b64d67b52c3c655815c9bb4aec01a76f", "score": "0.63791084", "text": "def test_model(model, X_test, y_test, n_splits):\n n_splits_x_test = n_splits\n number = 1\n # val es un trozo que nunca se repite\n for (_, one_split) in KFold(n_splits=n_splits_x_test).split(X_test):\n y_pred = model.predict(X_test[one_split])\n print(\"-----------------------------\")\n print(\"Mean squared error test\", number, \": \", mean_squared_error(y_test[one_split], y_pred))\n number += 1", "title": "" }, { "docid": "467a2f1347d8a29bc0dddaba6749e583", "score": "0.62942666", "text": "def eval_models_CV(X, y, models, label_col_name='label', prediction_col_name='prediction', value_col_name='value',\n n_splits=None, verbose=True, window_size_for_metrics=3):\n\n if X is None:\n raise TypeError\n if y is None:\n raise TypeError\n\n if n_splits is None:\n n_splits = int(len(X.index.levels[0]) / 2)\n print(\"Running {} splits\".format(n_splits))\n\n if n_splits > len(X.index.levels[0]):\n n_splits = int(len(X.index.levels[0]) / 2)\n print(\"Warning: n_splits cannot be larger than the number of dates. Reducing n_splits to {}\".format(n_splits))\n\n per_model_res = {}\n\n datetimeindex = X.index.levels[0]\n categories = X.index.levels[1]\n for model in models:\n counter = 0\n metrics = _initialize_metrics(categories)\n\n tscv = TimeSeriesSplit(n_splits=n_splits)\n if verbose:\n print(\"Model: {}\".format(str(model)))\n all_iterations_res = []\n for train, test in tscv.split(datetimeindex):\n if verbose:\n print(\"Iteration: %s, Train size: %s, Test size: %s, Data size: %s \" % (\n counter, len(train), len(test), len(datetimeindex)))\n\n train_samples = _prep_set(X, datetimeindex[train])\n train_labels = _prep_set(y, datetimeindex[train])\n\n test_samples = _prep_set(X, datetimeindex[test])\n test_labels = _prep_set(y, datetimeindex[test])\n\n if (test_labels is not None) and (len(test_labels) > 0):\n # run the model\n model.fit(X=train_samples, y=train_labels)\n prediction = model.predict(X=test_samples)\n\n # evaluate results, aggregate metrics\n metrics = get_evaluation_metrics(test_samples, prediction, test_labels, metrics,\n value_col_name=value_col_name, label_col_name=label_col_name,\n prediction_col_name=prediction_col_name,\n window_size_for_metrics=window_size_for_metrics)\n\n counter += 1\n\n final_metrics = get_final_metrics(metrics)\n if verbose:\n print(final_metrics)\n per_model_res[str(model.__name__)] = final_metrics\n return per_model_res", "title": "" }, { "docid": "55ed21c0681e1d3532dcd65d9c427556", "score": "0.6263902", "text": "def cv_deep_learning_pred( X, y, nb_classes = 3, mode = \"CNNC_Name\", \n\t\t\t\t\t\t\t l = None, param_d = None, graph = True, n_splits = 5):\n\t\n\tKF = model_selection.KFold(n_splits, shuffle=True)\n\tkf = KF.split(X)\n\n\tdcnn_score_l = []\n\tc_wb_l = []\n\ty_cv = np.copy( y)\n\tfor tr, te in kf:\n\t\tX_train, y_train = X[tr,:], y[tr]\n\t\tX_test, y_test = X[te,:], y[te]\n\t\t\n\t\tif mode == \"CNNC_Name\":\n\t\t\tmodel = kkeras.CNNC_Name( param_d[\"n_cv_flt\"], param_d[\"n_cv_ln\"], param_d[\"cv_activation\"],\n\t\t\t\t\tl=l)\n\t\t\tn_flt = param_d[\"n_cv_flt\"]\n\t\telif mode == \"CNNC_Name_Border\":\n\t\t\tmodel = kkeras.CNNC_Name_Border( param_d[\"n_cv_flt\"], param_d[\"n_cv_ln\"], param_d[\"cv_activation\"],\n\t\t\t\t\tl=l, border_mode = 'valid')\n\t\t\tn_flt = param_d[\"n_cv_flt\"] \n\t\telif mode == \"MLPC_Name\":\n\t\t\tmodel = kkeras.MLPC_Name(l=l)\n\t\t\tn_flt = l[1]\n\t\telse:\n\t\t\traise ValueError(\"The given mode is not supported: mode={}\".format(mode))\n\t\t\t\n\t\tmodel.fit( X_train, y_train, X_test, y_test, nb_classes, batch_size=6, nb_epoch = 5)\n\t\ttr_score = model.score( X_train, y_train)\n\t\t#dcnn_score_l.append(dcnn_score)\n\t\tprint( \"Training Accuracy:\", tr_score)\n\n\t\tdcnn_score = model.score( X_test, y_test)\n\t\tdcnn_score_l.append(dcnn_score)\n\t\tprint( \"Testing Accuracy:\", dcnn_score)\n\t\t\n\t\tc_w, c_b = model.get_c_wb()\n\t\tprint( \"c_w.shape=\", c_w.shape)\n\t\tc_w = c_w.reshape(-1, n_flt)\n\t\tc_wb_l.append( (c_w, c_b))\n\n\t\ty_cv[ te] = model.predict( X_test)\n\n\tprint( dcnn_score_l)\n\tprint( \"Mean:{0}, Std:{1}\".format( np.mean( dcnn_score_l), np.std( dcnn_score_l)))\n\n\tif graph:\n\t\t# One of weight vectors are drawn.\n\t\tc_w = c_wb_l[0][0] # 0 = c_w, 1 = c_b\n\t\tfor ll in range(n_flt):\n\t\t\t#plt.figure()\n\t\t\tplt.plot( c_w[:,ll], label=\"Filter #{}\".format(ll))\n\t\tplt.legend() \n\t\n\treturn dcnn_score_l, c_wb_l, y_cv", "title": "" }, { "docid": "2dc2febfba7f9fe66e990068a9da48f5", "score": "0.6254168", "text": "def compute_score_crossvalidation(self, model, n_splits):\n\n if n_splits is not None and isinstance(n_splits, int):\n # we create a copy because we don't want to \"modify\" an already trained model\n copy_model = cp.deepcopy(model)\n copy_model.estimator = cp.deepcopy(self.base_estimator)\n # copy_model.estimator.reset()\n score = 0\n kf = KFold(n_splits=self.n_splits, shuffle=True, random_state=0)\n for train_idx, test_idx in kf.split(X=self.X_chunk, y=self.y_chunk):\n X_train, y_train = self.X_chunk[train_idx], self.y_chunk[train_idx]\n X_test, y_test = self.X_chunk[test_idx], self.y_chunk[test_idx]\n copy_model.estimator = self.train_model(\n model=copy_model.estimator,\n X=X_train,\n y=y_train,\n classes=copy_model.seen_labels,\n sample_weight=None)\n score += self.compute_score(model=copy_model, X=X_test, y=y_test) / self.n_splits\n else:\n # compute the score on the entire data chunk\n score = self.compute_score(X=self.X_chunk, y=self.y_chunk, model=model)\n\n return score", "title": "" }, { "docid": "6474e07db18db453049714b335960899", "score": "0.6225613", "text": "def CV_learning(paras,n_splits,scoring='roc_auc'):\n kf = StratifiedKFold(n_splits=n_splits, shuffle=True)\n scores = []\n pred_proba_group = []\n try:\n scorer = check_scoring(paras.model, scoring=scoring)\n except:pass\n for train, test in kf.split(paras.X,paras.y):\n X_train = paras.X.iloc[train,:]\n y_train = paras.y.iloc[train]\n X_test = paras.X.iloc[test,:]\n y_test = paras.y.iloc[test]\n paras.model.fit(X_train,y_train)\n pred_proba_tmp = DataFrame(cal_pred_prob(paras.model,X_test),index = X_test.index)\n pred_proba_group.append(pred_proba_tmp)\n try:\n scores.append(scorer(paras.model,X_test,y_test))\n except:\n if scoring == 'aic':\n scores.append(Criterion(y_test,pred_proba_tmp.iloc[:,1],len(X_test.columns)).AIC())\n elif scoring == 'bic':\n scores.append(Criterion(y_test,pred_proba_tmp.iloc[:,1],len(X_test.columns)).BIC())\n else:\n raise ValueError('%r is not a valid scoring value.' \n 'Use sorted(sklearn.metrics.SCORERS.keys()' \n 'to get valid options.' %(scoring))\n pred_proba_all = concat(pred_proba_group).sort_index()\n return(scores,pred_proba_group,pred_proba_all)", "title": "" }, { "docid": "d00c26c2ff72430e2d36022d70aa39fd", "score": "0.62111855", "text": "def results_classification(\n model_class,\n model_name,\n run_id,\n ensemble_folds,\n test_set,\n data_params,\n robust,\n device,\n eval_type=\"checkpoint\",\n):\n\n print(\n \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n \"------------Evaluate model on Test Set------------\\n\"\n \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n )\n\n test_generator = DataLoader(test_set, **data_params)\n\n y_pre_logits = []\n y_logits = []\n if robust:\n y_pre_ale = []\n\n acc = np.zeros((ensemble_folds))\n roc_auc = np.zeros((ensemble_folds))\n precision = np.zeros((ensemble_folds))\n recall = np.zeros((ensemble_folds))\n fscore = np.zeros((ensemble_folds))\n\n for j in range(ensemble_folds):\n\n if ensemble_folds == 1:\n resume = f\"models/{model_name}/{eval_type}-r{run_id}.pth.tar\"\n print(\"Evaluating Model\")\n else:\n resume = f\"models/{model_name}/{eval_type}-r{j}.pth.tar\"\n print(\"Evaluating Model {}/{}\".format(j + 1, ensemble_folds))\n\n assert os.path.isfile(resume), f\"no checkpoint found at '{resume}'\"\n checkpoint = torch.load(resume, map_location=device)\n assert (\n checkpoint[\"model_params\"][\"robust\"] == robust\n ), f\"robustness of checkpoint '{resume}' is not {robust}\"\n\n model = model_class(**checkpoint[\"model_params\"], device=device,)\n model.to(device)\n model.load_state_dict(checkpoint[\"state_dict\"])\n\n with torch.no_grad():\n idx, comp, y_test, output = model.predict(generator=test_generator)\n\n if model.robust:\n mean, log_std = output.chunk(2, dim=1)\n logits = sampled_softmax(mean, log_std, samples=10).data.cpu().numpy()\n pre_logits = mean.data.cpu().numpy()\n pre_logits_std = torch.exp(log_std).data.cpu().numpy()\n y_pre_ale.append(pre_logits_std)\n else:\n pre_logits = output.data.cpu().numpy()\n\n logits = softmax(pre_logits, axis=1)\n\n y_pre_logits.append(pre_logits)\n y_logits.append(logits)\n\n y_test_ohe = np.zeros_like(pre_logits)\n y_test_ohe[np.arange(y_test.size), y_test] = 1\n\n acc[j] = accuracy_score(y_test, np.argmax(logits, axis=1))\n roc_auc[j] = roc_auc_score(y_test_ohe, logits)\n precision[j], recall[j], fscore[j] = precision_recall_fscore_support(\n y_test, np.argmax(logits, axis=1), average=\"weighted\"\n )[:3]\n\n if ensemble_folds == 1:\n print(\"\\nModel Performance Metrics:\")\n print(\"Accuracy : {:.4f} \".format(acc[0]))\n print(\"ROC-AUC : {:.4f}\".format(roc_auc[0]))\n print(\"Weighted Precision : {:.4f}\".format(precision[0]))\n print(\"Weighted Recall : {:.4f}\".format(recall[0]))\n print(\"Weighted F-score : {:.4f}\".format(fscore[0]))\n else:\n acc_avg = np.mean(acc)\n acc_std = np.std(acc)\n\n roc_auc_avg = np.mean(roc_auc)\n roc_auc_std = np.std(roc_auc)\n\n precision_avg = np.mean(precision)\n precision_std = np.std(precision)\n\n recall_avg = np.mean(recall)\n recall_std = np.std(recall)\n\n fscore_avg = np.mean(fscore)\n fscore_std = np.std(fscore)\n\n print(\"\\nModel Performance Metrics:\")\n print(f\"Accuracy : {acc_avg:.4f} +/- {acc_std:.4f}\")\n print(f\"ROC-AUC : {roc_auc_avg:.4f} +/- {roc_auc_std:.4f}\")\n print(f\"Weighted Precision : {precision_avg:.4f} +/- {precision_std:.4f}\")\n print(f\"Weighted Recall : {recall_avg:.4f} +/- {recall_std:.4f}\")\n print(f\"Weighted F-score : {fscore_avg:.4f} +/- {fscore_std:.4f}\")\n\n # calculate metrics and errors with associated errors for ensembles\n ens_logits = np.mean(y_logits, axis=0)\n\n y_test_ohe = np.zeros_like(ens_logits)\n y_test_ohe[np.arange(y_test.size), y_test] = 1\n\n ens_acc = accuracy_score(y_test, np.argmax(ens_logits, axis=1))\n ens_roc_auc = roc_auc_score(y_test_ohe, ens_logits)\n ens_precision, ens_recall, ens_fscore = precision_recall_fscore_support(\n y_test, np.argmax(ens_logits, axis=1), average=\"weighted\"\n )[:3]\n\n print(\"\\nEnsemble Performance Metrics:\")\n print(f\"Accuracy : {ens_acc:.4f} \")\n print(f\"ROC-AUC : {ens_roc_auc:.4f}\")\n print(f\"Weighted Precision : {ens_precision:.4f}\")\n print(f\"Weighted Recall : {ens_recall:.4f}\")\n print(f\"Weighted F-score : {ens_fscore:.4f}\")\n\n # NOTE we save pre_logits rather than logits due to fact that with the\n # hetroskedastic setup we want to be able to sample from the gaussian\n # distributed pre_logits we parameterise.\n core = {\"id\": idx, \"composition\": comp, \"target\": y_test}\n\n results = {}\n for n_ens, y_pre_logit in enumerate(y_pre_logits):\n pred_dict = {\n f\"class-{lab}-pred_{n_ens}\": val for lab, val in enumerate(y_pre_logit.T)\n }\n results.update(pred_dict)\n if model.robust:\n ale_dict = {\n f\"class-{lab}-ale_{n_ens}\": val\n for lab, val in enumerate(y_pre_ale[n_ens].T)\n }\n results.update(ale_dict)\n\n df = pd.DataFrame({**core, **results})\n\n if ensemble_folds == 1:\n df.to_csv(\n index=False,\n path_or_buf=(f\"results/test_results_{model_name}_r-{run_id}.csv\"),\n )\n else:\n df.to_csv(\n index=False, path_or_buf=(f\"results/ensemble_results_{model_name}.csv\")\n )", "title": "" }, { "docid": "e63c8a0cfd8d7af83bbe0f0bda598ad6", "score": "0.619019", "text": "def test_train_split(clf, split):\n\n X_train, X_test, y_train, y_test, train_idx, test_idx = split\n\n clf.fit(X_train, y_train)\n\n probs = clf.predict_proba(X_test)\n train_probs = clf.predict_proba(X_train)\n\n score = fmax(probs, y_test)\n train_score = fmax(train_probs, y_train)\n\n return score, train_score, clf", "title": "" }, { "docid": "689f9115883883d0a943417edb4a2c53", "score": "0.6190071", "text": "def compute_CV(self,X_train,Y_train,CVfolds=None,scoring='neg_mean_squared_error'):\n ##- Select the number of folds for leave-one-out CV\n if CVfolds is None:\n CVfolds = X_train.shape[0]\n\n ##- Perform CV for both regressor performance evaluation and hyperparameter tuning\n estimator = self.pipe\n param_grid = get_params_dict(self.model)\n model_grid = GridSearchCV(estimator,param_grid,scoring=scoring,cv=CVfolds,return_train_score=False)\n model_grid.fit(X_train,Y_train)\n\n return model_grid.best_score_, model_grid.best_estimator_, model_grid.best_params_", "title": "" }, { "docid": "104e0b1a30de1e5e8b35d33f0f554554", "score": "0.618767", "text": "def cross_validate_submodels(self, X, Y, cv=None, n_jobs=1):\n d_submodel_results = defaultdict(OrderedDict)\n for submodel in self.submodels:\n # Get node data\n attributes = self.get_submodel_field(submodel=submodel, field=\"attributes\")\n clf = clone(self.get_submodel_field(submodel=submodel, field=\"clf\"))\n\n if cv is None:\n assert self.get_submodel_field(submodel=submodel, field=\"cv\") is not None, \"`cv` must be provided in `add_submodel` if `cv is None` in this method\"\n _cv = self.get_submodel_field(submodel=submodel, field=\"cv\")\n else:\n _cv = cv\n index = self.get_submodel_field(submodel=submodel, field=\"index\")\n self._check_cv(_cv, index)\n\n # If a custom cv is provided then there must be an index provided as well\n condition_1 = index is not None\n condition_2 = _cv is not None\n if all([condition_1, condition_2]):\n y_query = Y[submodel][index]\n X_query = X.loc[index,attributes]\n # If a integer cv is provided\n else:\n y_query = Y[submodel].dropna()\n X_query = X.loc[y_query.index,attributes]\n\n # Stats\n scores = model_selection.cross_val_score(clf, X=X_query.values, y=y_query.values, scoring=\"accuracy\", cv=_cv, n_jobs=n_jobs)\n accuracy = np.mean(scores)\n sem = stats.sem(scores)\n # Update\n self.set_submodel_field(submodel=submodel, field=\"accuracy\", obj=accuracy)\n self.set_submodel_field(submodel=submodel, field=\"sem\", obj=sem)\n self.submodel_performance[submodel] = accuracy\n d_submodel_results[submodel][\"accuracy\"] = accuracy\n d_submodel_results[submodel][\"sem\"] = sem\n d_submodel_results[submodel][\"scores\"] = scores\n\n # Scores\n df_performance = pd.DataFrame(d_submodel_results).T.loc[:,[\"accuracy\", \"sem\", \"scores\"]]\n df_performance.index.name = \"submodel\"\n self.add_weights_from_accuracy(df_performance[\"accuracy\"])\n self._cross_validation_complete = True\n return df_performance", "title": "" }, { "docid": "07fce7cb4d70249ff28204648e3ee2f3", "score": "0.6184491", "text": "def evaluate_models_with_kfold_validation(seed, models, X_train, Y_train, scoring, num_splits):\n results = list()\n names = list()\n mean = int()\n for name, model in models:\n # rskf = model_selection.RepeatedStratifiedKFold(n_splits=num_kfold_splits, n_repeats=3, random_state=seed)\n skf = KFold(n_splits=num_splits, random_state=seed)\n cv_results = cross_val_score(model, X_train, Y_train, cv=skf, scoring=scoring)\n results.append(cv_results)\n names.append(name)\n print(\"Algorithm {0} - Mean: {1:.3f}, Std.deviation: {2:.3f}\".format(name, cv_results.mean(), cv_results.std()))\n if cv_results.mean() >= mean:\n mean = cv_results.mean()\n best_algorithm = name\n return best_algorithm, results, names", "title": "" }, { "docid": "cf38a99d3b412fedbd6e0600ba402cd7", "score": "0.61775506", "text": "def evaluate_model(model, X_test, Y_test, category_names):", "title": "" }, { "docid": "29fb188edf5bc71501a4e907cbf4311f", "score": "0.61558455", "text": "def train_model(X_s, Xb_s, X, Xb, model_type, hyper1,\n hyper2, hyper3, n_cv=3, recall_thresh=0.8):\n # iterate over hypers, cv\n scores = []\n hyp1_best = 0\n hyp2_best = 0\n hyp3_best = 0\n score_best = 0\n recall_best = 0\n bckgnd_best = 0\n hypers = list(itertools.product(*[hyper1, hyper2, hyper3]))\n for h1, h2, h3 in hypers:\n sc, rc, bg = [], [], []\n for i in range(n_cv):\n clf = initialize_model(model_type, h1, h2, h3)\n X_train, X_test = random_train_test_split(X_s)\n Xb_train, Xb_test = random_train_test_split(Xb_s)\n if model_type == 'BiasedSVM':\n # Pseudo-Outliers, see Baldeck et al. (2015)\n PO, _ = random_train_test_split(np.concatenate((X_train,\n Xb_train)))\n X_ = np.concatenate((X_train, PO))\n y_ = np.concatenate((np.ones(len(X_train)),\n -1 * np.ones(len(PO))))\n clf.fit(X_, y_)\n else:\n clf.fit(X_train)\n y_pred_test = clf.predict(X_test)\n y_pred_bkgnd = clf.predict(Xb_train)\n sc_, rc_, bg_ = focal_score(y_pred_test, y_pred_bkgnd, h1, h2, h3)\n sc.append(sc_)\n rc.append(rc_)\n bg.append(bg_)\n meansc = np.mean(sc)\n meanrc = np.mean(rc)\n meanbg = np.mean(bg)\n\n if (meansc > score_best) and (meanrc > recall_thresh):\n hyp1_best = h1\n hyp2_best = h2\n hyp3_best = h3\n recall_best = meanrc\n bckgnd_best = meanbg\n score_best = meansc\n scores.append([h1, h2, h3, meansc, meanrc, meanbg])\n\n # train best model\n clf_best = initialize_model(model_type, hyp1_best, hyp2_best, hyp3_best)\n if model_type == 'BiasedSVM':\n PO, _ = random_train_test_split(np.concatenate((X_s, Xb_s)))\n X_ = np.concatenate((X_s, PO))\n y_ = np.concatenate((np.ones(len(X_s)), -1 * np.ones(len(PO))))\n clf_best.fit(X_, y_)\n else:\n clf_best.fit(X_s)\n\n # write positive/negative classes to file\n y_X = clf_best.predict(X_s)\n y_Xb = clf_best.predict(Xb_s)\n X_pos = np.concatenate((X[y_X == 1], Xb[y_Xb == 1])) # unscaled\n X_neg = np.concatenate((X[y_X == -1], Xb[y_Xb == -1])) # unscaled\n np.save('models/X_pos_unscaled_%s.npy' % model_type, X_pos)\n np.save('models/X_neg_unscaled_%s.npy' % model_type, X_neg)\n\n # write/save stuff\n clf_name = 'models/%s.pkl' % model_type\n joblib.dump(clf_best, clf_name)\n best = [clf_best, hyp1_best, hyp2_best, hyp3_best, score_best]\n print(('best: hyp1=%f, hyp2=%f, hyp3=%f, recall=%f, bckgnd=%f, score=%f')\n % (hyp1_best, hyp2_best, hyp3_best, recall_best, bckgnd_best, score_best))\n return scores, best", "title": "" }, { "docid": "b84b31c3822f200d08282adfcd6bd7c3", "score": "0.61540526", "text": "def evaluate(self, model):\n model.eval()\n cuda = torch.cuda.is_available()\n Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n ids = []\n data_dict = []\n dataiterator = iter(self.dataloader)\n while True: # all the data in val2017\n try:\n img, _, info_img, id_ = next(dataiterator) # load a batch\n except StopIteration:\n break\n info_img = [float(info) for info in info_img]\n id_ = int(id_)\n ids.append(id_)\n with torch.no_grad():\n img = Variable(img.type(Tensor))\n _, outputs = model(img)\n outputs = outputs.unsqueeze(0)\n outputs = postprocess(outputs, 80, self.confthre, self.nmsthre)\n if outputs[0] is None:\n continue\n outputs = outputs[0].cpu().data\n\n for output in outputs:\n x1 = float(output[0])\n y1 = float(output[1])\n x2 = float(output[2])\n y2 = float(output[3])\n label = self.dataset.class_ids[int(output[6])]\n box = yolobox2label((y1, x1, y2, x2), info_img)\n bbox = [box[1], box[0], box[3] - box[1], box[2] - box[0]]\n score = float(\n output[4].data.item() * output[5].data.item()\n ) # object score * class score\n A = {\n \"image_id\": id_,\n \"category_id\": label,\n \"bbox\": bbox,\n \"score\": score,\n \"segmentation\": [],\n } # COCO json format\n data_dict.append(A)\n\n annType = [\"segm\", \"bbox\", \"keypoints\"]\n\n # Evaluate the Dt (detection) json comparing with the ground truth\n if len(data_dict) > 0:\n cocoGt = self.dataset.coco\n # workaround: temporarily write data to json file because pycocotools can't process dict in py36.\n _, tmp = tempfile.mkstemp()\n json.dump(data_dict, open(tmp, \"w\"))\n cocoDt = cocoGt.loadRes(tmp)\n cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])\n cocoEval.params.imgIds = ids\n cocoEval.evaluate()\n cocoEval.accumulate()\n cocoEval.summarize()\n return cocoEval.stats[0], cocoEval.stats[1]\n else:\n return 0, 0", "title": "" }, { "docid": "368bd9be167c97eca8ec1c4b7ae3b4a4", "score": "0.61147386", "text": "def evaluate_model():\n model = get_model()\n X_train, X_test, y_train, y_test = get_data()\n model.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n print(\"Accuracy test set: %0.4f\" % score)", "title": "" }, { "docid": "bde19c2cef44b6c6841d4b78c4fd068c", "score": "0.6097888", "text": "def kfold_cv(model, X, y, catg_names, n_splits=3):\n\n\n # create splits\n kf = KFold(n_splits=n_splits, shuffle=True, random_state=11)\n\n kfold = 0\n for train_idx, test_idx in kf.split(X):\n print('\\n')\n print('-'*75)\n print('\\nEvaluating Fold:', kfold)\n\n # define train and test subsets\n X_train, y_train = X.loc[train_idx], y.loc[train_idx]\n X_test, y_test = X.loc[test_idx], y.loc[test_idx]\n # train and evaluate\n model.fit(X_train.ravel(), y_train)\n evaluate_model(model, X_test.ravel(), y_test.values, catg_names)\n\n del X_train, X_test, y_train, y_test\n gc.collect()\n\n kfold += 1\n\n print('-'*75)", "title": "" }, { "docid": "da3a53247d276083da48393e2d49af94", "score": "0.6085852", "text": "def get_cv_and_modells(data, features, model, CV, scaled, folder, model_type): \n\n\t# [1] Check Inputs:\n\t# [1-1] Data contains \"Respond\", \"sid\" & all names passed in 'features'\n\tfor _feat in features:\n\t\tif _feat not in data.columns.values:\n\t\t\traise ValueError(_feat + \"is not a columname in data\")\n\t\t\t\n\tif \"Response\" not in data.columns.values:\n\t\traise ValueError(\"data needs a 'Repsonse' Column\") \n\t\t\n\tif \"sid\" not in data.columns.values:\n\t\traise ValueError(\"data needs a 'sid' Column\") \n\t\n\t# [1-2] Check whether the model has the methods needed:\n\tif not hasattr(model, 'fit'): \n\t\traise ValueError(\"model has no 'fit'-method\")\n\t\t\n\tif not hasattr(model, 'predict'): \n\t\traise ValueError(\"model has no 'predict'-method\")\n\t\t\n\tif not hasattr(model, 'get_params'): \n\t\traise ValueError(\"model has no 'get_params'-method\")\n\t\n\t# [1-3] CV should be of length two at least\n\tif len(CV) < 2: raise ValueError(\"CV has length < 2\")\n\t\n\t# [1-4] Check whether the folder to save results is existent\n\t# if it exists we count number of summaries [.csv-files] and assign \n\t# the number\n\tif not os.path.isdir(\"models/Multivariate Approach/\" + str(folder)):\n\t\traise ValueError(str(folder) + \"is not existent in 'models/Multivariate Approach/'\")\n\telse:\n\t\tnumb = 0\n\t\tfor _files_ in os.listdir(\"models/Multivariate Approach/\" + str(folder)):\n\t\t\tif \".csv\" in _files_:\n\t\t\t\tnumb = numb + 1\t\n\t\n\t# [2] Start doing the CV:\n\t# [2-1] lists to save the results of the k-fold CV for all performance measures:\n\tF1 = []\n\tAccuracy = []\n\tPrecision = []\n\tConf_Matrix = []\t\n\t\n\t# [2-2] Loop over the different Test/Train Splits\n\tprint(\"Start CV: \\n\")\n\t\n\t# Initalize a DF to save the predictions needed for stacking!\n\tres = pd.DataFrame()\n\tfor i in range(len(CV)):\n\t\n\t\t# Print the current Process:\n\t\tprint(\"CV number: \" + str(i + 1) + \" / \" + str(len(CV)))\n\t\t\n\t\t# Extract Test_Set based on the current CV:\n\t\tcurrent_test = data.loc[data[\"sid\"].isin(CV[i]), features]\n\t\tcurrent_test_response = data.loc[data[\"sid\"].isin(CV[i]), \"Response\"] \n\t\tcurrent_test_index = data.loc[data[\"sid\"].isin(CV[i]), \"sid\"]\n\t\t\n\t\t# Extract SIDs we use for training & select correponding training points!\n\t\ttrain_sids = []\n\t\tfor j in range(len(CV)):\n\t\t\tif j != i:\n\t\t\t\ttrain_sids = train_sids + CV[j]\n\t\t\t\t\n\t\tcurrent_train = data.loc[data[\"sid\"].isin(train_sids), features]\n\t\tcurrent_train_response = data.loc[data[\"sid\"].isin(train_sids), \"Response\"] \n\t\t\n\t\t# Feature Scaling (only if \"scaled\" = True)\n\t\tif scaled:\n\t\t\tscaler = StandardScaler()\n\t\t\t\n\t\t\tscaler.fit(current_test)\n\t\t\tcurrent_test = scaler.transform(current_test)\n\t\t\t\n\t\t\tscaler.fit(current_train)\n\t\t\tcurrent_train = scaler.transform(current_train)\n\t\t\t\n\t\t# Fit the Model and get predcitions of the testset\n\t\tmodel.fit(current_train, current_train_response)\n\t\tpredictions = model.predict(current_test)\n\t\t\n\t\t# Add Scores to the corresponding lists:\n\t\tF1.append(sklearn.metrics.f1_score(current_test_response, \n\t\t\t\t\t\t\t\t\t predictions,\n\t\t\t\t\t\t\t\t\t\t average=\"weighted\"))\n\t\t\n\t\tAccuracy.append(sklearn.metrics.accuracy_score(current_test_response, \n\t\t\t\t\t\t\t\t\t predictions))\n\t\t\n\t\tPrecision.append(sklearn.metrics.recall_score(current_test_response, \n\t\t\t\t\t\t\t predictions,\n\t\t\t\t\t\t\t\t\t average = \"weighted\"))\n\t\t\n\t\tConf_Matrix.append(sklearn.metrics.confusion_matrix(current_test_response, \n\t\t\t\t\t\t\t\t predictions))\n\t\t\n\t\t\n\t\t# If predicting probabilities works, we add them as metafeatures to\n\t\t# our metafile, else we will use only the predicted class & save it!\n\t\t# Add also the fold of the current TestSIDs\n\t\tif hasattr(model, 'predict_proba'):\n\t\t\tpredictions_prob = model.predict_proba(current_test)\n\t\t\t\n\t\t\t# join the predicitons w/ the current fold number\n\t\t\tcolstoadd = np.append(predictions_prob,\n\t\t\t\t\t\t np.repeat((i+1), len(predictions_prob)).reshape(len(predictions_prob), 1), 1)\n\t\t\t \n\t\t\tcolnames = [\"M_prob_\" + str(_i) for _i in range(12)]\n\t\t\tcolnames.append(\"fold\")\n\t\t\t\n\t\t\tres = pd.concat([res, pd.DataFrame(colstoadd,\n\t\t\t\t\t\t\t\t\t columns = colnames, \n\t\t\t\t\t\t\t\t\t index = current_test_index)])\n\t\n\t\telse:\n\t\t\tcolnames = [\"M_1\", \"fold\"]\n\t\t\t\n\t\t\t# join the predicitons w/ the current fold number\n\t\t\tcolstoadd = np.column_stack((predictions, \n\t\t\t\t\t\t np.repeat((i+1), len(predictions)).reshape(len(predictions), 1)))\n\t\t\t\n\t\t\tres = pd.concat([res, pd.DataFrame(colstoadd,\n\t\t\t\t\t\t\t\t\t columns = colnames,\n\t\t\t\t\t\t\t\t\t index = current_test_index)])\n\t\t\n\tres.to_csv(\"models/Multivariate Approach/\" + str(folder) + \"/CV_Predicitons\" + str(numb) + \".csv\")\n\t\t\n\t\n\tprint(\"CV done\\n\")\n\t# [3] Save the Results:\n\t# [3-1] Extract ParameterSettings\n\t_params = json.dumps(model.get_params())\n\t\n\t# [3-2] Create BasicShape for the Result .csv\n\tdict_to_pd = {'model_type' : model_type, 'parameters' : _params,\n\t\t\t 'features' : \" - \".join(features), \"Number\": numb}\n\t\n\t# [3-3] Add CV-Scores to the Dict:\n\tfor index in range(len(F1)):\n\t\tdict_to_pd[\"F1_\" + str(index + 1)] = F1[index]\n\t\tdict_to_pd[\"Acc_\" + str(index + 1)] = Accuracy[index]\n\t\tdict_to_pd[\"Precision_\" + str(index + 1)] = Precision[index]\n\t\tdict_to_pd[\"Conf_\" + str(index + 1)] = Conf_Matrix[index]\n\t\t\n\t# [3-4] Add mean of the scores [except for Confmatrix]\n\tdict_to_pd[\"F1_mean\"] = np.mean(F1)\n\tdict_to_pd[\"Acc_mean\"] = np.mean(Accuracy)\n\tdict_to_pd[\"Prec_mean\"] = np.mean(Precision)\n\t\n\t# [3-5] Transform it to pandas, order the columns and save it: \n\tpd_res = pd.DataFrame([dict_to_pd])\n\t\t\t\n\tpd_res.to_csv(\"models/Multivariate Approach/\" + str(folder) + \"/Summary\" + str(numb) + \".csv\")\n\t\n\t\"\"\"\n\tCommented out, as it takes a lot of memory which leads to errors in the VM....\n\t# [4] Train the final model [on all train data] and save it:\n\tprint(\"train the final model\")\n\tmodel.fit(data.loc[:, features], data[\"Response\"])\n\t\n\tfinal_model_name = \"models/Multivariate Approach/\"+ folder + \"/Final_Mod\" + str(numb) + \".pickle\"\n\tpickle.dump(model, open(final_model_name, 'wb'))\n\t\"\"\"", "title": "" }, { "docid": "5395a1726f3406f39679858232fbdd80", "score": "0.6078891", "text": "def evaluate(self, splits=['train', 'valid', 'test'], best=True, final=True):\n if not self.args.save:\n logging.info('No model saved! Cannot give final status.')\n return\n\n # Evaluate final model (at end of training)\n if final:\n logging.info('Getting predictions for model in last checkpoint.')\n\n # Load checkpoint model to make predictions\n checkpoint = torch.load(self.args.checkfile)\n self.model.load_state_dict(checkpoint['model_state'])\n\n # Loop over splits, predict, and output/log predictions\n for split in splits:\n predict, targets = self.predict(split)\n self.log_predict(predict, targets, split, description='Final')\n\n # Evaluate best model as determined by validation error\n if best:\n logging.info('Getting predictions for best model.')\n\n # Load best model to make predictions\n checkpoint = torch.load(self.args.bestfile)\n self.model.load_state_dict(checkpoint['model_state'])\n\n # Loop over splits, predict, and output/log predictions\n for split in splits:\n predict, targets = self.predict(split)\n self.log_predict(predict, targets, split, description='Best')\n\n logging.info('Inference phase complete!')", "title": "" }, { "docid": "5b9e2edcb04c4761b6e6eff5d553f913", "score": "0.6069931", "text": "def _eval_model(model: Model, X, y) -> None:\n logger.info(f\"Step - Evaluating {model.name} model BEGIN\")\n cv = RepeatedStratifiedKFold(n_splits=5, n_repeats=3, random_state=RANDOM_STATE)\n cross_val_score(model.model, X, ravel_y(y), scoring='f1_macro', cv=cv, verbose=10, n_jobs=-1)\n logger.info(f\"Step - Evaluating {model.name} model {timer.time_stage(f'{model.name} Evaluation')} END\")", "title": "" }, { "docid": "b4e46a6d131e30826c65948659e558ef", "score": "0.60392827", "text": "def eval_model(self, model):\n # FIXME ugly condition branches\n if self.model_class == \"PoincareNIPS\":\n alphas_to_validate = [1000, 100, 30, 10, 3, 1, 0.3, 0.1, 0]\n elif self.model_class == \"HypCones\":\n # FIXME Invalid use of variable: K is passed instead of alpha.\n alphas_to_validate = [model.K]\n else:\n # FIXME Meaningless value is passed if alpha is not needed.\n alphas_to_validate = [-1e12]\n \n input_data = self.input()[\"data\"]\n\n # Validation\n eval_result_classif, best_alpha, _, best_test_f1, best_valid_f1 = evaluations.eval_classification(\n logger=self.logger,\n task=self.task_type,\n valid_pos_path=input_data[\"valid_pos\"].path,\n valid_neg_path=input_data[\"valid_neg\"].path,\n test_pos_path=input_data[\"test_pos\"].path,\n test_neg_path=input_data[\"test_neg\"].path,\n vocab=model.kv.vocab,\n score_fn=model.kv.is_a_scores_from_indices,\n alphas_to_validate=alphas_to_validate, # 0 means only distance\n )\n\n print(eval_result_classif)\n result_str = evaluations.pretty_print_eval_map(eval_result_classif)\n self.logger.info('BEST classification ALPHA = %.3f' % best_alpha)\n self.logger.info(result_str)\n return float(best_test_f1), float(best_valid_f1), result_str", "title": "" }, { "docid": "7075338bf20af8a39e3b8b40769f9fc7", "score": "0.6034869", "text": "def model_train_evaluation(train_test_2c, train_test_3c, classifier_method):\r\n if classifier_method == 'knn':\r\n # Selects and fits the k-nearest neighbors classifier\r\n from sklearn.neighbors import KNeighborsClassifier\r\n classifer_2c = KNeighborsClassifier(n_neighbors=5)\r\n classifer_2c.fit(train_test_2c[0], train_test_2c[2])\r\n\r\n classifer_3c = KNeighborsClassifier(n_neighbors=5)\r\n classifer_3c.fit(train_test_3c[0], train_test_3c[2])\r\n\r\n elif classifier_method == 'svm':\r\n # Selects and fits the SVM neighbors classifier (must be feature scaled)\r\n from sklearn.preprocessing import StandardScaler\r\n sc_2c = StandardScaler()\r\n train_test_2c[0] = sc_2c.fit_transform(train_test_2c[0]) # 2c train\r\n train_test_2c[1] = sc_2c.transform(train_test_2c[1]) # 2c test\r\n\r\n sc_3c = StandardScaler()\r\n train_test_3c[0] = sc_3c.fit_transform(train_test_3c[0]) # 3c train\r\n train_test_3c[1] = sc_3c.transform(train_test_3c[1]) # 3c test\r\n\r\n from sklearn.svm import SVC\r\n classifer_2c = SVC(random_state=42)\r\n classifer_2c.fit(train_test_2c[0], train_test_2c[2])\r\n\r\n classifer_3c = SVC(random_state=42)\r\n classifer_3c.fit(train_test_3c[0], train_test_3c[2])\r\n\r\n elif classifier_method == 'nb':\r\n # Selects and fits the naive Bayes neighbors classifier\r\n from sklearn.naive_bayes import GaussianNB\r\n classifer_2c = GaussianNB()\r\n classifer_2c.fit(train_test_2c[0], train_test_2c[2])\r\n\r\n classifer_3c = GaussianNB()\r\n classifer_3c.fit(train_test_3c[0], train_test_3c[2])\r\n\r\n\r\n elif classifier_method == 'dt':\r\n # Selects and fits the decesion tree classifier\r\n from sklearn. tree import DecisionTreeClassifier\r\n classifer_2c = DecisionTreeClassifier(criterion='entropy', random_state=42)\r\n classifer_2c.fit(train_test_2c[0], train_test_2c[2])\r\n\r\n classifer_3c = DecisionTreeClassifier(criterion='entropy', random_state=42)\r\n classifer_3c.fit(train_test_3c[0], train_test_3c[2]) \r\n\r\n elif classifier_method == 'rf':\r\n # Selects and fits the random forest classifier\r\n from sklearn.ensemble import RandomForestClassifier\r\n classifer_2c = RandomForestClassifier(criterion='entropy', random_state=42)\r\n classifer_2c.fit(train_test_2c[0], train_test_2c[2])\r\n\r\n classifer_3c = RandomForestClassifier(criterion='entropy', random_state=42)\r\n classifer_3c.fit(train_test_3c[0], train_test_3c[2])\r\n\r\n # Predict test data\r\n y_pred_2c = classifer_2c.predict(train_test_2c[1])\r\n y_pred_3c = classifer_3c.predict(train_test_3c[1])\r\n\r\n # Calculate confusion matrix and normalized accuracy\r\n from sklearn import metrics\r\n cm_2c = metrics.confusion_matrix(train_test_2c[3], y_pred_2c)\r\n acc_2c = metrics.accuracy_score(train_test_2c[3], y_pred_2c, normalize=True)\r\n\r\n cm_3c = metrics.confusion_matrix(train_test_3c[3], y_pred_3c)\r\n acc_3c = metrics.accuracy_score(train_test_3c[3], y_pred_3c, normalize=True)\r\n\r\n # Perform k-fold cross validation for improved accuracy score\r\n from sklearn.model_selection import cross_val_score\r\n accuracies_2c = cross_val_score(classifer_2c, train_test_2c[0], train_test_2c[2], cv=10, n_jobs=-1)\r\n kfold_acc_2c = accuracies_2c.mean()\r\n kfold_std_2c = accuracies_2c.std()\r\n\r\n accuracies_3c = cross_val_score(classifer_3c, train_test_3c[0], train_test_3c[2], cv=10, n_jobs=-1)\r\n kfold_acc_3c = accuracies_3c.mean()\r\n kfold_std_3c = accuracies_3c.std()\r\n\r\n evaluation_results = [cm_2c, acc_2c, cm_3c, acc_3c, kfold_acc_2c, kfold_std_2c, kfold_acc_3c, kfold_std_3c]\r\n\r\n return evaluation_results", "title": "" }, { "docid": "b4c006e2f47be46f18ead3ff0f89fdf5", "score": "0.6032966", "text": "def my_cross_validate(X, Y,\n mltype,\n model_name='lgb_reg',\n cv=5, groups=None,\n lr_curve_ticks=5, data_sizes_frac=None,\n args=None, fit_params=None, init_params=None,\n n_jobs=1, random_state=None, logger=None, outdir='.'):\n X = pd.DataFrame(X).values\n Y = pd.DataFrame(Y).values\n\n # TODO: didn't test!\n if isinstance(cv, int) and groups is None:\n cv_folds = cv\n cv = KFold(n_splits=cv_folds, shuffle=False, random_state=random_state)\n if isinstance(cv, int) and groups is not None:\n cv_folds = cv\n cv = GroupKFold(n_splits=cv_folds)\n else:\n cv_folds = cv.get_n_splits()\n\n if is_string_dtype(groups):\n group_encoder = LabelEncoder()\n groups = group_encoder.fit_transform(groups)\n \n # ... Now start a nested loop of train size and cv folds ...\n tr_scores_all = [] # list dicts\n vl_scores_all = [] # list dicts\n\n if mltype == 'cls':\n if Y.ndim > 1 and Y.shape[1] > 1:\n splitter = cv.split(X, np.argmax(Y, axis=1), groups=groups)\n else:\n splitter = cv.split(X, Y, groups=groups)\n elif mltype == 'reg':\n splitter = cv.split(X, Y, groups=groups)\n\n # Placeholder to save the best model\n best_model = None\n best_score = -np.Inf\n\n # Start CV iters\n for fold_id, (tr_idx, vl_idx) in enumerate(splitter):\n if logger is not None:\n logger.info(f'Fold {fold_id+1}/{cv_folds}')\n\n # Samples from this dataset are sampled for training\n xtr = X[tr_idx, :]\n ytr = np.squeeze(Y[tr_idx, :])\n\n # A fixed set of validation samples for the current CV split\n xvl = X[vl_idx, :]\n yvl = np.squeeze(Y[vl_idx, :]) \n\n # # Confirm that group splits are correct ...\n # tr_grps_unq = set(groups[tr_idx])\n # vl_grps_unq = set(groups[vl_idx])\n # print('Total group (e.g., cell) intersections btw tr and vl: ', len(tr_grps_unq.intersection(vl_grps_unq)))\n # print('A few intersections : ', list(tr_grps_unq.intersection(vl_grps_unq))[:3])\n\n # Get the estimator\n estimator = ml_models.get_model(model_name=model_name, init_params=init_params)\n\n if 'nn' in model_name:\n from keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping, TensorBoard\n\n # Create output dir\n out_nn_model = outdir / ('cv'+str(fold_id+1))\n os.makedirs(out_nn_model, exist_ok=False)\n \n # Callbacks (custom)\n clr_triangular = CyclicLR(base_lr=0.0001, max_lr=0.001, mode='triangular')\n \n # Keras callbacks\n checkpointer = ModelCheckpoint(str(out_nn_model / 'autosave.model.h5'), verbose=0, save_weights_only=False, save_best_only=True)\n csv_logger = CSVLogger(out_nn_model / 'training.log')\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.75, patience=20, verbose=1, mode='auto',\n min_delta=0.0001, cooldown=3, min_lr=0.000000001)\n early_stop = EarlyStopping(monitor='val_loss', patience=60, verbose=1, mode='auto')\n \n # Callbacks list\n callback_list = [checkpointer, csv_logger, early_stop, reduce_lr, # keras callbacks\n clr_triangular] # custom callbacks\n\n # Fit params\n fit_params['validation_data'] = (xvl, yvl)\n fit_params['callbacks'] = callback_list\n\n # Train model\n history = estimator.model.fit(xtr, ytr, **fit_params)\n\n # Calc preds and scores TODO: dump preds\n # ... training set\n y_preds, y_true = utils.calc_preds(estimator=estimator.model, xdata=xtr, ydata=ytr, mltype=mltype)\n tr_scores = utils.calc_scores(y_true=y_true, y_preds=y_preds, mltype=mltype)\n # ... val set\n y_preds, y_true = utils.calc_preds(estimator=estimator.model, xdata=xvl, ydata=yvl, mltype=mltype)\n vl_scores = utils.calc_scores(y_true=y_true, y_preds=y_preds, mltype=mltype)\n\n # Save the best model\n if mltype == 'cls':\n vl_scores['f1_score'] > best_score\n best_score = vl_scores['f1_score']\n best_model = estimator\n elif mltype == 'reg':\n vl_scores['r2'] > best_score\n best_score = vl_scores['r2']\n best_model = estimator\n\n # Plot training curves\n if 'nn' in model_name:\n ml_models.plot_prfrm_metrics(history=history, title=f'cv fold: {fold_id+1}',\n skip_epochs=1, add_lr=True, outdir=out_nn_model)\n\n # Add info\n tr_scores['tr_set'] = True\n vl_scores['tr_set'] = False\n tr_scores['fold'] = 'f'+str(fold_id)\n vl_scores['fold'] = 'f'+str(fold_id)\n\n # Aggregate scores\n tr_scores_all.append(tr_scores)\n vl_scores_all.append(vl_scores)\n\n # Delete the estimator/model\n del estimator, history\n\n # Comet log fold scores\n # https://medium.com/comet-ml/building-reliable-machine-learning-models-with-cross-validation-20b2c3e32f3e\n# if (args is not None) and ('comet' in args):\n# experiment = args['comet']\n# experiment.log_metric('Fold {}'.format(fold_id), vl_scores['r2'])\n \n tr_df = scores_to_df(tr_scores_all)\n vl_df = scores_to_df(vl_scores_all)\n scores_all_df = pd.concat([tr_df, vl_df], axis=0)\n\n\n # Comet log fold scores\n# if (args is not None) and ('comet' in args):\n# experiment = args['comet']\n# experiment.log_metric('Best score', best_score)\n\n return scores_all_df, best_model", "title": "" }, { "docid": "4c47d4bc252584e236e92931d25145b9", "score": "0.6029732", "text": "def run_models(self, X, y, param_ridge):\n\n ##################################\n ## OLS CV\n ##################################\n #ols = linear_model.LinearRegression(fit_intercept=True,\n # normalize=False,\n # copy_X=True)\n #ols_cv_score = cross_validation.cross_val_score(\n # ols, X, y,\n # cv=self.cv, scoring=self.scoring,\n # n_jobs=self.n_jobs)\n \"\"\"\n self.ols_cv_score.shape = (cv,)\n \"\"\"\n\n ##################################\n ## PLS CV\n ##################################\n tuned_parameters = [{'n_components': range(1, 5)}]\n pls = PLSRegression()\n pls_cv = GridSearchCV(pls, tuned_parameters,\n cv=self.cv, scoring=self.scoring,\n n_jobs=self.n_jobs,\n refit=self.refit, iid=self.iid)\n pls_cv.fit(X, y)\n\n\n ##################################\n ## Ridge CV\n ##################################\n tuned_parameters = [{'alpha': param_ridge}]\n ridge = linear_model.Ridge(alpha = 1)\n ridge_cv = GridSearchCV(ridge, tuned_parameters,\n cv=self.cv, scoring=self.scoring,\n n_jobs=self.n_jobs,\n refit=self.refit, iid=self.iid)\n ridge_cv.fit(X, y)\n\n return (pls_cv, ridge_cv)", "title": "" }, { "docid": "d13d98ccb51bdef868d14237dc00a5e7", "score": "0.6027703", "text": "def run_cv_pred(X, y, clf, n_folds, name, results):\n # Construct a kfolds object\n kf = KFold(n_splits=n_folds)\n splits = kf.split(X, y)\n y_pred = y.copy()\n\n # Iterate through folds\n for idx, (train_index, test_index) in enumerate(splits):\n X_train, X_test = X[train_index], X[test_index]\n y_train = y[train_index]\n # Initialize a classifier with key word arguments\n clf.fit(X_train, y_train)\n try: # Gradient boosted trees do not accept sparse matrices in the predict function currently\n probs = clf.predict_proba(X_test)\n except TypeError:\n probs = clf.predict_proba(X_test.todense())\n macro, micro = mle.evaluate(probs, y[test_index])\n print 'macro F1, micro F1', macro, micro\n results[0].loc[name, idx] = macro\n results[1].loc[name, idx] = micro\n # y_pred[test_index] = preds\n\n return results", "title": "" }, { "docid": "04360f748e0a67f6aeddf26c0aeb4c56", "score": "0.5967814", "text": "def perform_CV(self, CV = 5, query ='*'):\n if not self.trained_models.keys():\n raise AttributeError(\"Models not trained. First call trading_train()\")\n\n for i, (coin, _ ) in enumerate(self.coins_ratios):\n sql_features = \"\"\" SELECT {}\n from crypto_main.features_1h_5m\n where symbol = '{}'\n and entry_time between '{}' and '{}'\n order by entry_time asc;\n \"\"\".format(query, self.coin_dict[coin][0], self.timeperiod_2, self.date_test)\n test_data = pd.read_sql(sql=sql_features, con=con_2)\n test_features = (test_data.drop(labels=['entry_time', 'symbol'], axis=1)).fillna(0)\n min_time = test_data['entry_time'].min() - datetime.timedelta(hours=2)\n max_time = test_data['entry_time'].max() + datetime.timedelta(hours=2)\n\n sql_price = \"\"\" SELECT reporting_date, price_eur, price_usd, price_btc\n FROM crypto_main.currency_hist_per_5_minute\n where currency_id = '{}'\n and reporting_date between '{}' and '{}'\n order by reporting_date asc;\n \"\"\".format(self.coin_dict[coin][1], min_time, max_time)\n price_df = pd.read_sql(sql=sql_price, con=con_2)\n time_df = test_data['entry_time'] + self.lag\n price_df_temp = price_df.iloc[price_df['reporting_date'].searchsorted(time_df)]\n price_df_temp['change'] = price_df['price_eur'].diff().fillna(1)\n price_df_temp['change_bool'] = np.sign(price_df_temp['change'])\n\n cv_df = pd.DataFrame(index=range(CV * len(self.models)))\n entries = []\n for model in self.models:\n model_name = model\n accuracies = cross_val_score(self.models[model], test_features, price_df_temp['change_bool'], scoring='accuracy', cv=CV)\n for fold_idx, accuracy in enumerate(accuracies):\n entries.append((model_name, fold_idx, accuracy))\n cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])\n\n fig_1, ax_1 = plt.subplots(figsize=(10,10))\n sns.boxplot(x='model_name', y='accuracy', data=cv_df)\n sns.stripplot(x='model_name', y='accuracy', data=cv_df,\n size=8, jitter=True, edgecolor=\"gray\", linewidth=2)\n plt.title('Boxplot of CV scores of {} with a time delay of {} min'.format(coin, int((self.lag.seconds)/60)))\n return fig_1\n #plt.show()", "title": "" }, { "docid": "e9299571f2a15f83b95fe35120094822", "score": "0.59625655", "text": "def launch_for_splits(self, model, parameter_grid, cv_split_filenames,\r\n pre_warm=True, collect_files_on_reset=False):\r\n\r\n # Abort any existing processing and erase previous state\r\n self.reset()\r\n self.parameter_grid = parameter_grid\r\n\r\n # Mark the files for garbage collection\r\n if collect_files_on_reset:\r\n self._temp_files.extend(cv_split_filenames)\r\n\r\n # Warm the OS disk cache on each host with sequential reads instead\r\n # of having concurrent evaluation tasks compete for the the same host\r\n # disk resources later.\r\n if pre_warm:\r\n warm_mmap_on_cv_splits(self.lb_view.client, cv_split_filenames)\r\n\r\n # Randomize the grid order\r\n random_state = check_random_state(self.random_state)\r\n self.all_parameters = list(ParameterGrid(parameter_grid))\r\n random_state.shuffle(self.all_parameters)\r\n\r\n for params in self.all_parameters:\r\n task_group = []\r\n\r\n for cv_split_filename in cv_split_filenames:\r\n task = self.lb_view.apply(\r\n compute_evaluation,\r\n model, cv_split_filename, params=params)\r\n task_group.append(task)\r\n\r\n self.task_groups.append(task_group)\r\n\r\n # Make it possible to chain method calls\r\n return self", "title": "" }, { "docid": "5dca8a5fffc7f1297e8d80fdd576366c", "score": "0.5959495", "text": "def cv(params, dtrain, num_boost_round=10, nfold=3, metrics=(),\n obj=None, feval=None, fpreproc=None, show_stdv=True, seed=0):\n results = []\n cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc)\n for i in range(num_boost_round):\n for f in cvfolds:\n f.update(i, obj)\n res = aggcv([f.eval(i, feval) for f in cvfolds], show_stdv)\n sys.stderr.write(res + '\\n')\n results.append(res)\n return results", "title": "" }, { "docid": "beec1f3f4df9b9c90821338c2be18f2e", "score": "0.59589815", "text": "def evaluate_nested_cv(self, clf: BaseEstimator, embedding, n_splits):\n roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True, needs_proba=True)\n grid_search = GridSearchCV(\n clf,\n NESTED_CV_PARAMETERS,\n scoring=roc_auc_scorer,\n cv=n_splits,\n n_jobs=-1\n )\n embedding = embedding[self.label_ind, :]\n best_params = Counter()\n results = defaultdict(list)\n for i in range(10):\n rskf = StratifiedKFold(n_splits=n_splits, shuffle=True)\n for train_idx, test_idx in rskf.split(embedding, self.labels):\n x_train, x_test, y_train, y_test, w_train = self._get_split(embedding, test_idx, train_idx)\n grid_search.fit(x_train, y_train, sample_weight=w_train)\n clf.C = grid_search.best_params_['C']\n clf.class_weight = grid_search.best_params_['class_weight']\n\n cw_idx = NESTED_CV_PARAMETERS['class_weight'].index(clf.class_weight)\n best_params[('C', clf.C)] += 1\n best_params[('cw', cw_idx)] += 1\n best_params[(clf.C, cw_idx)] += 1\n\n pred, probs = self.get_predictions(\n clf,\n x_train,\n x_test,\n y_train,\n y_test,\n sample_weights=w_train\n )\n self._assemble_results(y_test, i, pred, probs[:, 1], results)\n logger.debug('Best parameters for nested cross validation are:')\n logger.debug(best_params)\n return results", "title": "" }, { "docid": "95ac64c7f5e50492affc43ea46e2e257", "score": "0.5952746", "text": "def nested_cv(estimator, X, y, groups=None, param_distributions=None, n_iter=20, internal_n_folds=5,\n internal_total_folds=None, external_n_folds=5, external_total_folds=None, n_jobs=-1, scoring='f1'):\n # convert groups and labels\n if isinstance(y, pd.Series):\n y = y.values\n if isinstance(groups, pd.Series):\n groups = groups.values\n\n # select defaults\n if groups is None:\n groups = np.arange(y.shape[0])\n if internal_total_folds is None:\n internal_total_folds = internal_n_folds\n if external_total_folds is None:\n external_total_folds = external_n_folds\n\n # get the external splits\n splits = generate_grouped_splits(X, y, groups, total_folds=external_total_folds,\n n_folds=external_n_folds)\n\n # list in which to store all cv results\n all_cv_results = []\n\n # get the scorer class for the metrics\n scorer = get_scorer(scoring)\n scores = np.zeros(external_n_folds, dtype='float32')\n\n for run_nb, split in zip(range(external_n_folds), splits):\n logger.info('Model selection on fold number {}...'.format(run_nb))\n\n # split the dataset\n if isinstance(X, pd.DataFrame):\n X_train, X_test = X.iloc[split[0]], X.iloc[split[1]]\n else:\n X_train, X_test = X[split[0]], X[split[1]]\n y_train, y_test = y[split[0]], y[split[1]]\n groups_train, groups_test = groups[split[0]], groups[split[1]]\n\n # do the internal loop, pass the corresponding groups\n best_params, cv_results = search_params(estimator, X_train, y_train, groups=groups_train,\n param_distributions=param_distributions,\n n_iter=n_iter, n_folds=internal_n_folds,\n total_folds=internal_total_folds, n_jobs=n_jobs,\n scoring=scoring)\n\n # refit the the best estimator with all the data\n logger.info('Refitting estimator with best params...')\n best_est = estimator\n best_est.set_params(**best_params) # set as kwargs!\n best_est.fit(X_train, y_train)\n\n # add the score to the list of all scores\n scores[run_nb] = scorer(best_est, X_test, y_test)\n\n # log the result\n logger.info('SCORE FOR BEST ESTIMATOR ON FOLD NUMBER {} = {}'.format(run_nb, scores[run_nb]))\n\n # add the cross validation dataframe to the list\n cv_results['run_nb'] = run_nb\n all_cv_results.append(cv_results)\n\n return scores, pd.concat(all_cv_results, ignore_index=True)", "title": "" }, { "docid": "0cbeec6b9d3725a5bac21d864d64c1b8", "score": "0.5947677", "text": "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n max_features = ['auto', 'sqrt']\n max_depth = [int(x) for x in np.linspace(10, 100, num = 4)] \n max_depth.append(None)\n min_samples_split = [2, 5, 10]\n min_samples_leaf = [1, 2, 4]\n bootstrap = [True, False]\n parameters = { 'clf__estimator__max_features': max_features,\n 'clf__estimator__max_depth': max_depth,\n 'clf__estimator__min_samples_split': min_samples_split,\n 'clf__estimator__min_samples_leaf': min_samples_leaf,\n 'clf__estimator__bootstrap': bootstrap}\n\n\n cv = GridSearchCV(pipeline, param_grid=parameters ,cv = 3)\n return cv", "title": "" }, { "docid": "5a9a938c37d6dd18d43396529f90ad93", "score": "0.5946431", "text": "def launch_cv(self):\n self.launch_op(op='cv')", "title": "" }, { "docid": "9d523bb90410c3c00a7354708adfedde", "score": "0.5938454", "text": "def evaluate(args, model, new_model_dir, features, epoch, global_step, best_score):\n y_true, y_pred, eval_loss = _eval(args, model, features)\n args.eval_tool.eval_mem(y_true, y_pred)\n # # # debug\n # args.logger.debug(args.eval_tool.get_counts())\n # args.logger.debug(args.eval_tool.get_performance())\n eval_metrix = args.eval_tool.get_performance()\n score_lvl, score_method, _ = args.model_selection_scoring.split(\"-\")\n cur_score = eval_metrix['overall'][score_lvl][score_method]\n args.eval_tool.reset()\n\n # select model based on best score\n # if best_score < cur_score:\n if cur_score - best_score > 1e-5:\n args.logger.info('''\n Global step: {}; \n Epoch: {}; \n previous best score: {:.4f}; \n new best score: {:.4f}; \n full evaluation metrix: {}\n '''.format(global_step, epoch + 1, best_score, cur_score, eval_metrix))\n best_score = cur_score\n save_model(args, model, new_model_dir, global_step, latest=args.max_num_checkpoints)\n\n # save model transformer core\n if args.save_model_core:\n save_only_transformer_core(args, model)\n\n return best_score, eval_loss", "title": "" }, { "docid": "f4686505d1b757670369c740514004ad", "score": "0.59308714", "text": "def results_regression(\n model_class,\n model_name,\n run_id,\n ensemble_folds,\n test_set,\n data_params,\n robust,\n device,\n eval_type=\"checkpoint\",\n):\n\n print(\n \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n \"------------Evaluate model on Test Set------------\\n\"\n \"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\"\n )\n\n test_generator = DataLoader(test_set, **data_params)\n\n y_ensemble = np.zeros((ensemble_folds, len(test_set)))\n if robust:\n y_ale = np.zeros((ensemble_folds, len(test_set)))\n\n for j in range(ensemble_folds):\n\n if ensemble_folds == 1:\n resume = f\"models/{model_name}/{eval_type}-r{run_id}.pth.tar\"\n print(\"Evaluating Model\")\n else:\n resume = f\"models/{model_name}/{eval_type}-r{j}.pth.tar\"\n print(\"Evaluating Model {}/{}\".format(j + 1, ensemble_folds))\n\n assert os.path.isfile(resume), f\"no checkpoint found at '{resume}'\"\n checkpoint = torch.load(resume, map_location=device)\n checkpoint[\"model_params\"][\"robust\"]\n assert (\n checkpoint[\"model_params\"][\"robust\"] == robust\n ), f\"robustness of checkpoint '{resume}' is not {robust}\"\n\n model = model_class(**checkpoint[\"model_params\"], device=device,)\n model.to(device)\n model.load_state_dict(checkpoint[\"state_dict\"])\n\n normalizer = Normalizer()\n normalizer.load_state_dict(checkpoint[\"normalizer\"])\n\n with torch.no_grad():\n idx, comp, y_test, output = model.predict(generator=test_generator,)\n\n if robust:\n mean, log_std = output.chunk(2, dim=1)\n pred = normalizer.denorm(mean.data.cpu())\n ale_std = torch.exp(log_std).data.cpu() * normalizer.std\n y_ale[j, :] = ale_std.view(-1).numpy()\n else:\n pred = normalizer.denorm(output.data)\n\n y_ensemble[j, :] = pred.view(-1).numpy()\n\n res = y_ensemble - y_test\n mae = np.mean(np.abs(res), axis=1)\n mse = np.mean(np.square(res), axis=1)\n rmse = np.sqrt(mse)\n r2 = r2_score(\n np.repeat(y_test[:, np.newaxis], ensemble_folds, axis=1),\n y_ensemble.T,\n multioutput=\"raw_values\",\n )\n\n if ensemble_folds == 1:\n print(\"\\nModel Performance Metrics:\")\n print(\"R2 Score: {:.4f} \".format(r2[0]))\n print(\"MAE: {:.4f}\".format(mae[0]))\n print(\"RMSE: {:.4f}\".format(rmse[0]))\n else:\n r2_avg = np.mean(r2)\n r2_std = np.std(r2)\n\n mae_avg = np.mean(mae)\n mae_std = np.std(mae)\n\n rmse_avg = np.mean(rmse)\n rmse_std = np.std(rmse)\n\n print(\"\\nModel Performance Metrics:\")\n print(f\"R2 Score: {r2_avg:.4f} +/- {r2_std:.4f}\")\n print(f\"MAE: {mae_avg:.4f} +/- {mae_std:.4f}\")\n print(f\"RMSE: {rmse_avg:.4f} +/- {rmse_std:.4f}\")\n\n # calculate metrics and errors with associated errors for ensembles\n y_ens = np.mean(y_ensemble, axis=0)\n\n mae_ens = np.abs(y_test - y_ens).mean()\n mse_ens = np.square(y_test - y_ens).mean()\n rmse_ens = np.sqrt(mse_ens)\n\n r2_ens = r2_score(y_test, y_ens)\n\n print(\"\\nEnsemble Performance Metrics:\")\n print(f\"R2 Score : {r2_ens:.4f} \")\n print(f\"MAE : {mae_ens:.4f}\")\n print(f\"RMSE : {rmse_ens:.4f}\")\n\n core = {\"id\": idx, \"composition\": comp, \"target\": y_test}\n results = {f\"pred_{n_ens}\": val for (n_ens, val) in enumerate(y_ensemble)}\n if model.robust:\n ale = {f\"ale_{n_ens}\": val for (n_ens, val) in enumerate(y_ale)}\n results.update(ale)\n\n df = pd.DataFrame({**core, **results})\n\n if ensemble_folds == 1:\n df.to_csv(\n index=False,\n path_or_buf=(f\"results/test_results_{model_name}_r-{run_id}.csv\"),\n )\n else:\n df.to_csv(\n index=False, path_or_buf=(f\"results/ensemble_results_{model_name}.csv\")\n )", "title": "" }, { "docid": "bc33c5ad8e54b6f17048747ff1d66767", "score": "0.59296167", "text": "def _inner_loop(self, input_x, input_y):\n inner = KFold(n_splits=self.n_splits_inner)\n params = ParameterGrid(self.hparams)\n auc_grid = np.zeros([self.n_splits_inner, len(params)]) # holds all the performance metrics for each k and each fold\n for split_idx, (t, v) in enumerate(inner.split(input_x)):\n print(f\"Inner fold {split_idx+1} of {self.n_splits_inner}\")\n for param_idx, param in enumerate(params):\n this_model = self.model_class(param)\n x_train, y_train = input_x.iloc[t], input_y.iloc[t]\n x_valid, y_valid = input_x.iloc[v], input_y.iloc[v]\n print(f\"Fitting model with params: {param}\")\n this_model.fit(x_train, y_train)\n valid_prob = this_model.predict_proba(x_valid)\n valid_auc = roc_auc_score(y_true=y_valid, y_score=valid_prob[:,1])\n auc_grid[split_idx, param_idx] = valid_auc\n # which hparam combination has best average performance across all splits?\n # select these params, train on all the data and return a trained model\n mean_auc = np.mean(auc_grid, axis=0)\n best_idx = np.argmax(mean_auc)\n best_auc = mean_auc[best_idx]\n best_params = params[best_idx]\n print(f\"Best params: {best_params}, training final model\")\n this_model = self.model_class(best_params)\n this_model.fit(input_x, input_y)\n return this_model, best_params", "title": "" }, { "docid": "3d13daf70b3cd2957a7987f2207694fc", "score": "0.5925097", "text": "def eval_child_model(session, model, dataset, mode):\n if mode == 'val':\n loader = dataset.val_loader\n elif mode == 'test':\n loader = dataset.test_loader\n else:\n raise ValueError('Not valid eval mode')\n tf.logging.info('model.batch_size is {}'.format(model.batch_size))\n\n correct = 0\n count = 0\n cost_epoch = []\n for i, batch in enumerate(loader):\n images, labels = batch\n images = images.numpy()\n labels = labels.numpy()\n labels = np.eye(dataset.num_classes)[labels]\n preds, loss = session.run(\n [model.predictions, model.cost],\n feed_dict={\n model.images: images,\n model.labels: labels,\n })\n cost_epoch.append(loss)\n correct += np.sum(\n np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))\n count += len(preds)\n return correct / count, np.mean(np.array(cost_epoch))", "title": "" }, { "docid": "590368d43dfcdda8bfbab8ced2298c4e", "score": "0.5922307", "text": "def model_data(data, test=False):\n features = data.drop(['balance'], axis=1)\n labels = data['balance'].to_frame()\n X_work, X_test, y_work, y_test = train_test_split(features, labels)\n # we don't touch the test set until the end.\n\n def grid_search(estimator, grid, X, y):\n gs = GridSearchCV(estimator, cv=5, n_jobs=-1, param_grid=grid)\n gs.fit(X, y)\n print(gs.best_params_)\n return gs.best_estimator_\n\n def support_vector(X, y):\n svc = SVC()\n grid = {\n 'kernel': ['linear', 'poly', 'rbf', 'sigmoid']\n }\n return grid_search(svc, grid, X, y)\n\n def random_forest(X, y):\n rfc = RandomForestClassifier(n_jobs=-1)\n grid = {\n 'n_estimators': np.arange(5, 15),\n 'criterion': ['gini', 'entropy']\n }\n return grid_search(rfc, grid, X, y)\n\n def knn(X, y):\n knc = KNeighborsClassifier()\n grid = {\n 'n_neighbors': np.arange(1, 10)\n }\n return grid_search(knc, grid, X, y)\n\n def perceptron(X, y):\n mlp = MLPClassifier()\n grid = {\n 'activation': ['identity', 'logistic', 'tanh', 'relu']\n }\n return grid_search(mlp, grid, X, y)\n\n def vote(X, y):\n estimators = [\n ('random_forest', random_forest(X, y)),\n ('knn', knn(X, y)),\n ('perceptron', perceptron(X, y))\n ]\n vc = VotingClassifier(estimators, n_jobs=-1)\n vc.fit(X, y)\n return vc\n\n if not test:\n avg_train, avg_validate = 0, 0\n skf = StratifiedKFold(n_splits=5)\n for train_idx, test_idx in skf.split(X_work, y_work):\n X_train, X_test, y_train, y_test = \\\n X_work.iloc[train_idx], X_work.iloc[test_idx], y_work.iloc[train_idx], y_work.iloc[test_idx]\n model = vote(X_train, y_train)\n avg_train += accuracy_score(y_train, model.predict(X_train))\n avg_validate += accuracy_score(y_test, model.predict(X_test))\n print('train: {}'.format(avg_train/5))\n print('validate: {}'.format(avg_validate/5))\n else:\n def precision_recall(y, scores):\n \"\"\"\n Plots precision recall curve. Not used in article.\n\n :param y: true y values\n :param scores: decision boundary scores\n :return:\n \"\"\"\n for i in range(0, 3):\n y_class = y['balance'].apply(lambda x: 1 if x == i else 0) # binarized for class\n scores_class = scores[:, i].flatten()\n precision, recall, _ = precision_recall_curve(y_class, scores_class)\n average_precision = average_precision_score(y_class, scores_class)\n print('average precision: {}'.format(average_precision))\n\n plt.step(recall, precision, color='b', alpha=0.2, where='post')\n plt.fill_between(recall, precision, step='post', alpha=0.2, color='b')\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.ylim([0.0, 1.05])\n plt.xlim([0.0, 1.0])\n plt.title('2-class Precision-Recall curve: AP={0:0.2f}'.format(\n average_precision))\n plt.show()\n\n model = vote(X_work, y_work['balance'])\n # train_scores = model.decision_function(X_work)\n # precision_recall(y_work, train_scores)\n\n print('train: {}'.format(model.score(X_work, y_work)))\n print('test: {}'.format(model.score(X_test, y_test)))", "title": "" }, { "docid": "c4ce24ba381ad8109d1d0a3821cfb184", "score": "0.5898583", "text": "def run_model_evaluation(self, X_train, y_train, X_test, y_test):\n self.y_test = y_test\n logger.info(\"Stage - Model Analysis BEGIN\")\n for classifier in (self.knn_model, self.rf_model):\n classifier.fit(X_train, y_train)\n self._eval_model(classifier, X=X_test, y=y_test)\n classifier.predict(X_test=X_test)\n logger.info(\"Stage - Model Analysis END\")", "title": "" }, { "docid": "38cf177174e4bbd61964bc2b4b8660b1", "score": "0.5897472", "text": "def evaluate_trained_classifier(model=None, reader=sick_dev_reader):\n mod, vectorizer, feature_selector, features = model\n feats, labels = featurizer(reader=reader, features_funcs = features)\n feat_matrix = vectorizer.transform(feats)\n if feature_selector:\n feat_matrix = feature_selector.transform(feat_matrix)\n predictions = mod.predict(feat_matrix)\n print metrics.classification_report(labels, predictions)", "title": "" }, { "docid": "be3e2eb4a254832585279c7829c953d3", "score": "0.587122", "text": "def eval_model(model, val_dataset, name='validation', dataloader=custom_dataloader, verbose=True):\n if verbose:\n print(f\"Evaluation on {name}\")\n model.eval()\n val_loader = dataloader(dataset=val_dataset, batch_size=10, shuffle=False, num_workers=4)\n predictions, labels = [], []\n for _, sample_batched in enumerate(val_loader):\n val_features, val_labels = sample_batched\n labels.append(val_labels)\n if baseCONFIG.cuda:\n val_features = val_features.to(baseCONFIG.device)\n val_pred = model.predict(val_features).detach()\n predictions.append(val_pred)\n predictions, labels = torch.cat(predictions).cpu().numpy(), torch.cat(labels).cpu().numpy()\n auc_score = roc_auc_score(y_true=labels, y_score=predictions)\n if verbose:\n print(f'{name} AUC: {auc_score}')\n return auc_score", "title": "" }, { "docid": "24f9fab5a7326de41eb9e3a8138ebdd5", "score": "0.5861194", "text": "def evaluate_model(self, X_test, y_test):\n return cross_val_score(self.pipeline,\n X_test, y_test,\n scoring=\"accuracy\",\n n_jobs=-1) \\\n .mean()", "title": "" }, { "docid": "ea80c5e9273b091eadeecae2ee8d1e07", "score": "0.58587676", "text": "def rf_cv(X_train, y_train, cv=5, verbose=False):\n rf = RandomForestClassifier()\n params = [{\n 'rf__n_estimators': range(50, 450, 50),\n 'rf__max_depth': [5, 10], \n 'rf__min_samples_split': [2, 10, 20],\n 'rf__max_features': ['sqrt', 8, 10],\n 'rf__criterion': ['gini'], \n }]\n pipe = imbPipeline(steps=[('sample', SMOTE()), ('rf', rf)])\n model = GridSearchCV(pipe, params, cv=cv, n_jobs=-1, scoring=f2scorer, verbose=verbose)\n model.fit(X_train, y_train)\n return model", "title": "" }, { "docid": "916097f34adc8577797a6c2d14940541", "score": "0.58488816", "text": "def run(self, model, verbose=False):\n print(\"Run!\")\n\n f1_I = []\n f1_H = []\n f1_M = []\n\n i = 0\n\n kf = KFold(n_splits=10, shuffle=False)\n for train_index, test_index in kf.split(self.X_train):\n i += 1\n print(\"Progress: \" + str(i) + \" out of \" + str(10))\n\n X_train = self.X_train[train_index]\n y_I_train = self.y_I[train_index]\n y_H_train = self.y_H[train_index]\n y_M_train = self.y_M[train_index]\n\n X_cv = self.X_train[test_index]\n y_I_cv = self.y_I[test_index]\n y_H_cv = self.y_H[test_index]\n y_M_cv = self.y_M[test_index]\n\n if verbose:\n print(\"Printing all shapes:\", X_train.shape, y_I_train.shape, y_H_train.shape, y_M_train.shape)\n print(\"CV\", X_cv.shape, y_I_cv.shape, y_H_cv.shape, y_M_cv.shape )\n\n assert(test_index.shape[0] > 0)\n\n # Go through each array!\n # I first!\n model.fit(X_train, y_I_train)\n y_pred = model.predict(X_cv)\n assert(y_pred.shape[0] == test_index.shape[0])\n f1_value = f1_score(y_pred, y_I_cv)\n f1_I.append(f1_value)\n\n # H second!\n model.fit(X_train, y_H_train)\n y_pred = model.predict(X_cv)\n assert(y_pred.shape[0] == test_index.shape[0])\n f1_value = f1_score(y_pred, y_H_cv)\n f1_H.append(f1_value)\n\n # M third!\n model.fit(X_train, y_M_train)\n y_pred = model.predict(X_cv)\n assert(y_pred.shape[0] == test_index.shape[0])\n f1_value = f1_score(y_pred, y_M_cv)\n f1_M.append(f1_value)\n\n if verbose:\n print(\"Predicted average f1 scores are: \")\n print(\"F1 score for I: \", np.mean(f1_I))\n print(\"F1 score for M: \", np.mean(f1_M))\n print(\"F1 score for H: \", np.mean(f1_H))\n\n return np.mean(f1_H), np.mean(f1_I), np.mean(f1_M)\n\n # # classifier with ensemble\n # # ATTENTION: unbalanced data\n # # adaboost for each of the problem\n # # reweighting (later)\n # # can play with K\n # clf.fit(X_M_train, y_M_train)\n # score = f1(X_M_validation, y_M_validation)\n # print(\"Score \" + score)", "title": "" }, { "docid": "3676a284de371aeb4ca48002933e2f60", "score": "0.5847166", "text": "def fit(self, X, y):\r\n\r\n print(f\"\\nDataset '{self.dataset_name}'\")\r\n print('{} <-- Running this model now'.format(type(self.model).__name__))\r\n\r\n self.X = X\r\n self.y = y\r\n self.y_type = type_of_target(y)\r\n\r\n if isinstance(self.outer_cv, numbers.Number) and isinstance(self.inner_cv, numbers.Number):\r\n outer_cv = StratifiedKFold(n_splits=self.outer_cv, shuffle=True, random_state=self.random_state)\r\n inner_cv = StratifiedKFold(n_splits=self.inner_cv, shuffle=True, random_state=self.random_state)\r\n else:\r\n outer_cv = self.outer_cv\r\n inner_cv = self.inner_cv\r\n\r\n outer_scores = []\r\n best_inner_params_list = []\r\n\r\n # Split X and y into K-partitions to Outer CV\r\n for (i, (train_index, test_index)) in enumerate(outer_cv.split(X, y)):\r\n print('\\n{}/{} <-- Current outer fold'.format(i+1, self.outer_cv))\r\n X_train_outer, X_test_outer = X.iloc[train_index], X.iloc[test_index]\r\n y_train_outer, y_test_outer = y.iloc[train_index], y.iloc[test_index]\r\n\r\n if self.y_type in 'binary':\r\n auc_scorer = self.scorer if self.scorer is not None else make_scorer(roc_auc_score)\r\n else:\r\n auc_scorer = self.scorer if self.scorer is not None else make_scorer(roc_auc_score,\r\n average=self.multiclass_average,\r\n needs_proba=self.predict_proba,\r\n multi_class=self.multiclass_method)\r\n\r\n if self.randomized_search:\r\n randomized_search_inner_model = clone(self.model)\r\n search_model = randomized_search_inner_model\r\n\r\n if self.wrap_model_using_ovr:\r\n one_vs_rest_randomized_inner_classifier = OneVsRestClassifier(randomized_search_inner_model)\r\n search_model = one_vs_rest_randomized_inner_classifier\r\n\r\n randomized_search_cv = RandomizedSearchCV(search_model,\r\n param_distributions=self.params_grid,\r\n scoring=auc_scorer,\r\n cv=inner_cv,\r\n n_iter=self.search_iter,\r\n verbose=self.verbose,\r\n n_jobs=self.n_jobs,\r\n random_state=self.random_state)\r\n randomized_search_cv.fit(X_train_outer, y_train_outer)\r\n\r\n best_inner_params = randomized_search_cv.best_params_\r\n else:\r\n def objective(params, cv=inner_cv, X_inner=X_train_outer, y_inner=y_train_outer, scorer=auc_scorer):\r\n bayes_search_inner_model = clone(self.model)\r\n bayes_search_inner_model.set_params(**params)\r\n bayes_search_inner_model = clone(bayes_search_inner_model)\r\n search_model = bayes_search_inner_model\r\n\r\n if self.wrap_model_using_ovr and self.y_type not in 'binary':\r\n search_model = OneVsRestClassifier(bayes_search_inner_model)\r\n\r\n cv_search = cross_validate(search_model, X_inner, y_inner, scoring=scorer, cv=cv,\r\n n_jobs=self.n_jobs, verbose=self.verbose)\r\n cv_test_score = cv_search['test_score'].mean()\r\n\r\n # should minimize\r\n loss = 1 - cv_test_score\r\n\r\n if self.verbose > 1:\r\n print(\"Bayesian search iteration params: {}\".format(params))\r\n print(\"Bayesian search iteration loss: {}\".format(loss))\r\n print()\r\n\r\n return loss\r\n\r\n trials = Trials()\r\n best_inner_params = fmin(fn=objective, space=self.params_grid, max_evals=self.search_iter,\r\n rstate=np.random.RandomState(self.random_state),\r\n algo=tpe.suggest, trials=trials, verbose=self.verbose)\r\n\r\n if not self.randomized_search:\r\n best_inner_params = space_eval(self.params_grid, best_inner_params)\r\n\r\n best_inner_params_list.append(best_inner_params)\r\n\r\n # Fit the best hyper-parameters from one of the self.search_iter inner loops\r\n cloned_model = clone(self.model)\r\n outer_model = cloned_model\r\n\r\n if self.y_type not in 'binary':\r\n if not self.randomized_search:\r\n cloned_model.set_params(**best_inner_params)\r\n cloned_model = clone(cloned_model)\r\n one_vs_rest_outer_classifier = OneVsRestClassifier(cloned_model)\r\n else:\r\n one_vs_rest_outer_classifier = OneVsRestClassifier(cloned_model)\r\n one_vs_rest_outer_classifier.set_params(**best_inner_params)\r\n\r\n outer_model = one_vs_rest_outer_classifier\r\n else:\r\n outer_model.set_params(**best_inner_params)\r\n outer_model = clone(outer_model)\r\n\r\n train_start = time.time()\r\n outer_model.fit(X_train_outer, y_train_outer)\r\n train_stop = time.time()\r\n\r\n training_time_sec = train_stop - train_start\r\n\r\n inference_time_sec = measure_sec_of_n_sample_predictions(outer_model, X_train_outer)\r\n\r\n # Get score and prediction\r\n accuracy, tpr, fpr, precision, recall, roc_auc, pr_auc = \\\r\n self._predict_and_score_outer(X_test_outer, y_test_outer, outer_model)\r\n\r\n assert tpr == recall\r\n\r\n outer_scores.append([accuracy, tpr, fpr, precision, roc_auc, pr_auc, training_time_sec, inference_time_sec])\r\n\r\n print('Inner fold best parameters: {}'.format(best_inner_params_list[i]))\r\n print('Results for outer fold:')\r\n print('Accuracy {}, TPR {}, FPR {}, Precision {}, AUC {}, PR-Curve {}, Training Time {}, Inference Time {}'.format(*outer_scores[i]))\r\n\r\n print()\r\n print(\"-\" * 100)\r\n\r\n self.outer_scores = outer_scores\r\n self.best_inner_params_list = best_inner_params_list", "title": "" }, { "docid": "d91139be39e1664ed0f2072be1cf4f1e", "score": "0.5845858", "text": "def cv_deep_learning_pred_loss( X, y, nb_classes = 3, mode = \"CNNC_Name\", \n\t\t\t\t\t\t\t l = None, param_d = None, mp = 1,\n\t\t\t\t\t\t\t graph = True, n_splits = 5):\n\t\n\tKF = model_selection.KFold(n_splits, shuffle=True)\n\tkf = KF.split(X)\n\n\tdcnn_score_l, dcnn_loss_l = [], []\n\tc_wb_l = []\n\ty_cv = np.copy( y)\n\tfor tr, te in kf:\n\t\tX_train, y_train = X[tr,:], y[tr]\n\t\tX_test, y_test = X[te,:], y[te]\n\t\t\n\t\tif mode == \"CNNC_Name\":\n\t\t\tmodel = kkeras.CNNC_Name( param_d[\"n_cv_flt\"], param_d[\"n_cv_ln\"],\n\t\t\t\tparam_d[\"cv_activation\"], l=l, mp=mp)\n\t\t\tn_flt = param_d[\"n_cv_flt\"]\n\t\telif mode == \"CNNC_Name_Border\":\n\t\t\tmodel = kkeras.CNNC_Name_Border( param_d[\"n_cv_flt\"], param_d[\"n_cv_ln\"], \n\t\t\t\tparam_d[\"cv_activation\"], l=l, border_mode = 'valid')\n\t\t\tn_flt = param_d[\"n_cv_flt\"] \n\t\telif mode == \"MLPC_Name\":\n\t\t\tmodel = kkeras.MLPC_Name(l=l)\n\t\t\tn_flt = l[1]\n\t\telse:\n\t\t\traise ValueError(\"The given mode is not supported: mode={}\".format(mode))\n\t\t\t\n\t\tmodel.fit( X_train, y_train, X_test, y_test, nb_classes, batch_size=6, nb_epoch = 5)\n\t\ttr_score_lossacc = model.evaluate( X_train, y_train)\n\t\ttr_score = tr_score_lossacc[1]\n\t\ttr_loss = tr_score_lossacc[0]\n\t\tprint( \"Training Accuracy:\", tr_score)\n\t\tprint( \"Training Loss:\", tr_loss)\n\n\t\tdcnn_score_lossacc = model.evaluate( X_test, y_test)\n\t\tdcnn_score_l.append(dcnn_score_lossacc[1])\n\t\tdcnn_loss_l.append(dcnn_score_lossacc[0])\t\n\t\tprint( \"Testing Accuracy:\", dcnn_score_lossacc[1])\n\t\tprint( \"Testing Loss:\", dcnn_score_lossacc[0])\n\t\t\n\t\tc_w, c_b = model.get_c_wb()\n\t\tprint( \"c_w.shape=\", c_w.shape)\n\t\tc_w = c_w.reshape(-1, n_flt)\n\t\tc_wb_l.append( (c_w, c_b))\n\n\t\ty_cv[ te] = model.predict( X_test)\n\n\tprint( dcnn_score_l)\n\tprint( \"[Accuracy] Mean:{0}, Std:{1}\".format( np.mean( dcnn_score_l), np.std( dcnn_score_l)))\n\tprint( dcnn_loss_l)\n\tprint( \"[Loss] Mean:{0}, Std:{1}\".format( np.mean( dcnn_loss_l), np.std( dcnn_loss_l)))\n\n\tif graph:\n\t\t# One of weight vectors are drawn.\n\t\tc_w = c_wb_l[0][0] # 0 = c_w, 1 = c_b\n\t\tfor ll in range(n_flt):\n\t\t\t#plt.figure()\n\t\t\tplt.plot( c_w[:,ll], label=\"Filter #{}\".format(ll))\n\t\tplt.legend() \n\t\n\treturn dcnn_score_l, c_wb_l, y_cv, dcnn_loss_l", "title": "" }, { "docid": "c09439864b27dac64ebe0482da4299ca", "score": "0.584459", "text": "def build_model(): \n #define ML pipeline\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n\n parameters = {\n 'clf__estimator__n_estimators': [50, 100, 150],\n 'clf__estimator__criterion': [\"gini\", \"entropy\"]\n }\n\n cv = GridSearchCV(pipeline, param_grid=parameters)\n return cv", "title": "" }, { "docid": "94268faaa24bba527c9457725071e781", "score": "0.5844449", "text": "def run_model_selection_for_ml_model():\n\n # Load data\n tweets = load_tweets(sample=True, frac=1)\n tweets = apply_preprocessing(tweets)\n\n train_tweets = tweets[['tweet']]\n labels = tweets[['polarity']]\n\n models = ['lg', 'svm', 'nb', 'rf']\n scores = list()\n for model in models:\n scores.append(run_tfidf_ml_model(tweets=train_tweets, labels=labels, model=model))\n\n print(pd.DataFrame(scores))", "title": "" }, { "docid": "49d4e94f620f7009cf8d4f9622c1f683", "score": "0.58399606", "text": "def evaluate4svm(labels,\n feats,\n params={'c': 1, 'kernal': 'gauss'},\n Nsplit=2):\n c = params.get('c')\n if params.get('kernal' == 'gauss'):\n kernal = GaussianKernel()\n kernal.set_width(80)\n elif params.get('kernal' == 'sigmoid'):\n kernal = SigmoidKernel()\n else:\n kernal = LinearKernel()\n\n split = CrossValidationSplitting(labels, Nsplit)\n split.build_subsets()\n\n accuracy = np.zeros(Nsplit)\n time_test = np.zeros(accuracy.shape)\n for i in range(Nsplit):\n idx_train = split.generate_subset_inverse(i)\n idx_test = split.generate_subset_indices(i)\n\n feats.add_subset(idx_train)\n labels.add_subset(idx_train)\n svm = GMNPSVM(c, kernal, labels)\n _ = svm.train(feats)\n out = svm.apply(feats_test)\n evaluator = MulticlassAccuracy()\n accuracy[i] = evaluator.evaluate(out, labels_test)\n\n feats.remove_subset()\n labels.remove_subset()\n feats.add_subset(idx_test)\n labels.add_subset(idx_test)\n\n t_start = time.clock()\n time_test[i] = (time.clock() - t_start) / labels.get_num_labels()\n feats.remove_subset()\n labels.remove_subset()\n return accuracy", "title": "" }, { "docid": "cdc8b6a64cdc3b3ff3f950597b14fae4", "score": "0.5835834", "text": "def evaluate(self):\n if not self.kfold:\n classes = {\"A\":\"Walking\",\"B\":\"Jogging\",\"C\":\"Stairs\",\"D\":\"Sitting\",\n \"E\":\"Standing\",\"F\":\"Typing\",\"G\":\"Brushing teeth\",\"H\":\"Eating soup\",\n \"I\":\"Eating chips\",\"J\":\"Eating pasta\",\"K\":\"Drinking from a cup\",\n \"L\":\"Eating sandwich\",\"M\":\"Kicking (soccer ball)\",\"O\":\"Playing catch (with tennis ball)\",\n \"P\":\"Dribbling (basketball)\", \"Q\":\"Writing\",\"R\":\"Clapping\",\"S\":\"Folding clothes\"}\n print(\"Model prediction accuracies based on phone acceleration data:\")\n print(\"=\"*61)\n for i in self.encoder.classes_: # iterate through classes (string letters)\n j = self.encoder.transform([i])[0] # use indexer because .transform returns a list instead of an integer ex: [1] instead of 1\n idx = self.Y_test[j] # create reference array to only choose samples from the i'th (aka j'th) class\n X1 = self.X_test[idx==1]\n Y1 = self.Y_test[idx==1]\n print(classes[i], \"test score:\",\"-\"*(40-len(classes[i])),str(self.estimator.score(X1, Y1)))\n else:\n print(\"Model evaluation is not supported for kfold-trained data. Try using .train(kfold=False) first.\")", "title": "" }, { "docid": "8dcce0a0fa3e00ca7fbefb58abcf08b7", "score": "0.5829663", "text": "def build_model():\n \n pipeline = Pipeline([(\"vect\", CountVectorizer(tokenizer = tokenize)),\n (\"tfidf\", TfidfTransformer()),\n (\"clf\", RandomForestClassifier())\n ])\n \n parameters = {'clf__min_samples_leaf': [1,2,5],\n 'clf__n_estimators': [50, 100, 200]\n }\n\n cv = GridSearchCV(pipeline, param_grid = parameters, n_jobs = -1)\n \n return cv", "title": "" }, { "docid": "e59ed6aff860d09a48a1e7f7e289288b", "score": "0.5822049", "text": "def model_evaluation_main(\n hyperparameters,\n data_dict,\n global_settings,\n objective='auc',\n weight='totalWeight'\n):\n model = create_model(\n hyperparameters, data_dict, global_settings['nthread'], objective,\n weight\n )\n score, train, test = evaluate_model(\n data_dict, global_settings, model)\n return score, train, test", "title": "" }, { "docid": "4394577214fe1fe699977c58c974625a", "score": "0.5821532", "text": "def build_model():\n\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('mcfl', MultiOutputClassifier(estimator=SVC()))\n ])\n\n parameters = {\n 'mcfl__estimator__kernel' : ['linear', 'rbf'],\n }\n\n cv = GridSearchCV(pipeline, param_grid=parameters, verbose=10, cv=2, \\\n n_jobs=-1, scoring='f1_micro')\n\n return cv", "title": "" }, { "docid": "ffd0eb78b8d978c36f82fe35ff6c1728", "score": "0.58190554", "text": "def evaluate(backbone, ckpt_path, test_folder):\n\n test_set = ImageFolder(test_folder, transform=TEST_TRANSFORM)\n\n test_loader = DataLoader(test_set,\n batch_size=32,\n shuffle=False,\n num_workers=12,\n pin_memory=True)\n\n model = XRayClassification.load_from_checkpoint(\n ckpt_path, map_location=lambda storage, loc: storage, model=backbone)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model.to(device)\n\n trainer = pl.Trainer(gpus=1, checkpoint_callback=False, logger=False)\n\n result_dict = trainer.test(model, test_loader, verbose=False)[0]\n\n for k, v in result_dict.items():\n print(k)\n print(v)", "title": "" }, { "docid": "729ff2338e793403ee4f4f264f74ee72", "score": "0.5816539", "text": "def _eval_classification(session, c, m, sps_op, summary_writer, global_step):\n name = m.name\n batch_size = c.batch_size_eval\n split_size = c.split_sizes[name]\n assert split_size % batch_size == 0\n num_batches = int(split_size / batch_size)\n print('\\nEvaluating model...\\n')\n\n preds = np.zeros(shape=[split_size])\n labels = np.zeros(shape=[split_size])\n if sps_op is None:\n sps = 0.\n else:\n sps = session.run(sps_op)\n for step in tqdm(range(num_batches), desc='evaluation', ncols=100):\n logits, lbl = session.run([m.dec_logits, m.batch_ops[1]])\n labels[step * batch_size: (step + 1) * batch_size] = np.squeeze(lbl)\n preds[step * batch_size: (step + 1) * batch_size] = np.argmax(np.squeeze(logits), axis=1)\n\n # noinspection PyUnresolvedReferences\n acc = np.mean((preds == labels).astype(np.float32)) * 100.\n err = (100. - acc)\n print('>>> {} Accuracy (%): {:.4f}'.format(name, acc))\n print('>>> {} Error (%): {:.4f}\\n'.format(name, err))\n value_summary({'{}/accuracy'.format(name): acc,\n '{}/error'.format(name): err}, summary_writer, global_step)\n # Write scores\n if global_step:\n score_dir = pjoin(os.path.dirname(c.log_path), 'run_{:02d}___infer_{}'.format(c.run, name))\n if not os.path.exists(score_dir):\n os.makedirs(score_dir)\n with open(pjoin(score_dir, 'sparsity_values.csv'), 'a') as f:\n f.write('Global step,Total sparsity\\r\\n')\n with open(pjoin(score_dir, 'metric_scores.csv'), 'a') as f:\n f.write('{:d},{:.4f},{:.4f}\\r\\n'.format(global_step, acc, err))\n with open(pjoin(score_dir, 'sparsity_values.csv'), 'a') as f:\n f.write('{:d},{:.7f}\\r\\n'.format(global_step, sps))\n return acc", "title": "" }, { "docid": "f043348b531e6811086bb1042931fb70", "score": "0.5815661", "text": "def run_crnet_eccv(model, cv_split=1, epoch=30):\n criterion = CRLoss()\n\n optimizer_ft = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4)\n\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1)\n\n print('start loading ECCV HotOrNot dataset...')\n train_dataset = HotOrNotDataset(cv_split=cv_split, train=True, transform=transforms.Compose([\n transforms.Resize(227),\n transforms.RandomCrop(224),\n transforms.RandomRotation(30),\n transforms.ColorJitter(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[1, 1, 1])\n ]))\n\n test_dataset = HotOrNotDataset(cv_split=cv_split, train=False, transform=transforms.Compose([\n transforms.Resize(227),\n transforms.RandomCrop(224),\n transforms.RandomRotation(30),\n transforms.ColorJitter(),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[1, 1, 1])\n ]))\n\n train_dataloader = DataLoader(train_dataset, batch_size=cfg['batch_size'],\n shuffle=True, num_workers=50, drop_last=True)\n test_dataloader = DataLoader(test_dataset, batch_size=cfg['batch_size'],\n shuffle=False, num_workers=50, drop_last=False)\n\n print('finish loading ECCV HotOrNot dataset...')\n\n train_model(model=model, train_dataloader=train_dataloader, test_dataloader=test_dataloader,\n criterion=criterion, optimizer=optimizer_ft, scheduler=exp_lr_scheduler, num_epochs=epoch,\n inference=False)", "title": "" }, { "docid": "298ad825cb83c1132e1b29aca84321e2", "score": "0.580828", "text": "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(estimator=LinearSVC(max_iter=10000)))\n ])\n parameters = {\n #'clf__estimator__n_estimators': [50, 100],\n #'clf__estimator__min_samples_split': [2, 5]\n 'clf__estimator__C': [1, 10],\n 'clf__estimator__max_iter': [1000, 100000]\n }\n\n cv = GridSearchCV(pipeline, param_grid=parameters)\n return cv", "title": "" }, { "docid": "b61f39a099d1d052c261104817126e97", "score": "0.57949626", "text": "def evaluate(model_path, images_dir, output_dir):\n\n device = torch.device('cpu')\n # build mobilenetv3 ssd and cast to cpu\n cfg.merge_from_file('configs/mobilenet_v3_ssd320_voc0712.yaml') # load config file for mobilenetv3 SSD\n cfg.freeze()\n model = build_detection_model(cfg)\n model = model.to(device)\n\n # load model weights\n weights = torch.load(model_path, map_location=device)['model']\n model.load_state_dict(weights)\n\n # prepare model for inference\n model.eval()\n # inference preprocessing\n # resize, subtract mean, make channel first dim and make Tensor from np array\n data_preprocess = build_transforms(cfg, is_train=False)\n image_paths = glob.glob(images_dir + '/*.jpg')\n\n # predict\n for i, image_path in enumerate(image_paths):\n # load and preprocess image\n image = np.array(Image.open(image_path).convert(\"RGB\"))\n image_tensor = data_preprocess(image)[0] # use only the returned image (not boxes/labels)\n image_tensor = image_tensor.unsqueeze(0) # add batch dim\n image_name = os.path.basename(image_path)\n height, width = image.shape[:2]\n\n # predict\n result = model(image_tensor)[0]\n result = result.resize((width, height)).numpy()\n boxes, labels, scores = result['boxes'], result['labels'], result['scores'] # unpack results\n\n # filter results\n indices = scores > 0.5\n boxes = boxes[indices]\n labels = labels[indices]\n scores = scores[indices]\n\n # draw and save boxes\n # VOC dataset was used for training and hence its class names are used\n drawn_image = draw_boxes(image, boxes, labels, scores, VOCDataset.class_names).astype(np.uint8)\n Image.fromarray(drawn_image).save(os.path.join(output_dir, image_name))", "title": "" }, { "docid": "d724c2fc7981461b626f93bb3db010d0", "score": "0.5789101", "text": "def model_evaluate(df: pd.DataFrame, features: list, target: str, metrics: dict, model, cv_rounds: int = 5):\n df = df.copy()\n scores = cross_validate(estimator=model.model,\n X=df[features],\n y=df[target],\n scoring=metrics,\n cv=cv_rounds,\n return_train_score=True)\n\n preds_proba = cross_val_predict(estimator=model.model,\n X=df[features],\n y=df[target],\n cv=cv_rounds,\n method=\"predict_proba\")[:, 1]\n preds = cross_val_predict(estimator=model.model,\n X=df[features],\n y=df[target],\n cv=cv_rounds,\n method=\"predict\")\n return scores, preds_proba, preds", "title": "" }, { "docid": "43dc07fafd41acd71fbab9980f625fcb", "score": "0.5788027", "text": "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n parameters = {'vect__min_df': [1, 5],\n 'tfidf__use_idf':[True, False],\n 'clf__estimator__n_estimators':[10, 20]}\n\n cv = GridSearchCV(pipeline, param_grid = parameters,verbose = 3)\n return cv", "title": "" }, { "docid": "800c2a370aa08e3962c8e53cc6ae4f60", "score": "0.5787464", "text": "def evaluate_model(model, testx, testy):\n\n score = model.evaluate(testx, testy, batch_size=32)\n print('Test Accuracy: ', score[1])\n print_baseline(testy)", "title": "" }, { "docid": "ef73d5ac9c563f57ce066e243d9fc21f", "score": "0.57821023", "text": "def evaluate_model(\n estimator,\n fold: Optional[Union[int, Any]] = None,\n fit_kwargs: Optional[dict] = None,\n groups: Optional[Union[str, Any]] = None,\n use_train_data: bool = False,\n):\n\n return pycaret.internal.tabular.evaluate_model(\n estimator=estimator,\n fold=fold,\n fit_kwargs=fit_kwargs,\n groups=groups,\n use_train_data=use_train_data,\n )", "title": "" }, { "docid": "2534c76c5583e5d297d7536364e3bcb9", "score": "0.5775548", "text": "def evaluate_split(self, **kwargs):", "title": "" }, { "docid": "e61ed93c2a880981f52590faeca878d6", "score": "0.57704806", "text": "def _eval(args, model, features):\n data_loader = ner_data_loader(features, batch_size=args.eval_batch_size, task='test', auto=True)\n eval_size = len(data_loader)\n args.logger.info(\"***** Running evaluation on {} number of test data *****\".format(eval_size))\n args.logger.info(\" Instantaneous batch size per GPU = {}\".format(args.eval_batch_size))\n args.logger.info(\"******************************\")\n\n # not data for evaluation\n if eval_size < 1:\n return [], [], .0\n\n # prepare processing results for each batch\n y_trues, y_preds = [], []\n y_pred, y_true = [], []\n prev_gd = 0\n\n # prediction\n model.eval()\n eval_loss = .0\n for batch in tqdm(data_loader, desc='evaluation', disable=False if args.progress_bar else True):\n original_tkid = batch[0].numpy()\n original_mask = batch[1].numpy()\n original_labels = batch[3].numpy()\n guards = batch[4].numpy()\n\n batch = tuple(b.to(args.device) for b in batch)\n eval_inputs = batch_to_model_inputs(batch, args.model_type)\n\n with torch.no_grad():\n raw_logits, _, loss = model(**eval_inputs)\n # get softmax output of the raw logits (keep dimensions)\n if not args.use_crf:\n raw_logits = torch.argmax(F.log_softmax(raw_logits, dim=-1), dim=-1)\n raw_logits = raw_logits.detach().cpu().numpy()\n # update evaluate loss\n eval_loss += loss.item()\n\n assert guards.shape == original_tkid.shape == original_mask.shape == original_labels.shape == raw_logits.shape, \\\n \"\"\"\n expect same dimension for all the inputs and outputs but get\n input_tokens: {}\n mask: {}\n label: {}\n logits: {}\n \"\"\".format(original_tkid.shape, original_mask.shape, original_labels.shape, raw_logits.shape)\n\n # tk=token, mk=mask, lb=label, lgt=logits\n for mks, lbs, lgts, gds in zip(original_mask, original_labels, raw_logits, guards):\n connect_sent_flag = False\n for mk, lb, lgt, gd in zip(mks, lbs, lgts, gds):\n if mk == 0:\n # after hit first mask, we can stop for the current sentence since all rest will be pad\n if args.model_type == \"xlnet\":\n continue\n else:\n break\n if gd == 0 or prev_gd == gd:\n continue\n if gd == NEXT_GUARD:\n connect_sent_flag = True\n break\n if prev_gd != gd:\n y_true.append(args.idx2label[lb])\n y_pred.append(args.idx2label[lgt])\n prev_gd = gd\n if connect_sent_flag:\n continue\n y_trues.append(y_true)\n y_preds.append(y_pred)\n y_pred, y_true = [], []\n prev_gd = 0\n\n return y_trues, y_preds, round(eval_loss / eval_size, 4)", "title": "" }, { "docid": "de472e3e174a36516c5873b4f464030b", "score": "0.57676053", "text": "def evaluate(data_loader, model, paradigm, task, sents):\n device = torch.device(f'cuda:{args.n_gpu}')\n model.model.to(device)\n \n model.model.eval()\n outputs, targets = [], []\n for batch in tqdm(data_loader):\n # need to push the data to device\n outs = model.model.generate(input_ids=batch['source_ids'].to(device), \n attention_mask=batch['source_mask'].to(device), \n max_length=128)\n\n dec = [tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]\n target = [tokenizer.decode(ids, skip_special_tokens=True) for ids in batch[\"target_ids\"]]\n\n outputs.extend(dec)\n targets.extend(target)\n\n raw_scores, fixed_scores, all_labels, all_preds, all_preds_fixed = compute_scores(outputs, targets, sents, paradigm, task)\n results = {'raw_scores': raw_scores, 'fixed_scores': fixed_scores, 'labels': all_labels,\n 'preds': all_preds, 'preds_fixed': all_preds_fixed}\n # pickle.dump(results, open(f\"{args.output_dir}/results-{args.task}-{args.dataset}-{args.paradigm}.pickle\", 'wb'))\n\n return raw_scores, fixed_scores", "title": "" }, { "docid": "931411a900e92b97ac5868871a577ce6", "score": "0.5763745", "text": "def build_model():\n pipeline = Pipeline([\n \n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n\n ])\n \n \n parameters = {\n 'vect__max_features':(None, 5000),\n 'clf__estimator__n_estimators': [5, 10, 20]\n }\n\n cv = GridSearchCV(estimator=pipeline, param_grid=parameters, n_jobs=-1,verbose=2)\n \n return cv", "title": "" }, { "docid": "f15cc68b812280bb6e7c9f4f8ad09913", "score": "0.57635653", "text": "def run_model(params, mode):\n assert mode in ['train', 'eval', 'train_and_eval'], mode\n dataset_fn_fn = fast_imagenet_input.make_dataset\n\n if mode in ['train', 'train_and_eval']:\n dataset_mode = 'train' if params['use_held_out_test_set'] else 'l2l_train'\n dataset_fn, _ = dataset_fn_fn(\n params['dataset_dir'],\n dataset_mode,\n training=True,\n use_bfloat16=params['use_bfloat16'])\n estimator = _make_estimator(params, is_training=True)\n estimator.train(dataset_fn, max_steps=params['max_global_step'])\n if mode in ['eval', 'train_and_eval']:\n dataset_mode = 'test' if params['use_held_out_test_set'] else 'l2l_valid'\n dataset_fn, dataset_size = dataset_fn_fn(\n params['dataset_dir'],\n dataset_mode,\n training=False,\n use_bfloat16=params['use_bfloat16'],\n final_batch_mode=fast_imagenet_input.FinalBatchMode.PAD)\n\n estimator = _make_estimator(params, is_training=False)\n for checkpoint_path in tf.train.checkpoints_iterator(\n params['checkpoint_dir']):\n eval_metrics = estimator.evaluate(\n dataset_fn,\n steps=dataset_size // params['eval_batch_size'],\n checkpoint_path=checkpoint_path)\n tf.logging.info('eval metrics = %s', eval_metrics)\n if eval_metrics['global_step'] >= params['max_global_step']:\n return eval_metrics", "title": "" }, { "docid": "078d2cb66a455b4f6c832ba1720ab237", "score": "0.57605547", "text": "def optimise(self, params):\n\n self.Model_sklearn.initialize_params(self.y, params)\n\n # logger.info(self.Model_sklearn.p)\n\n start = time.perf_counter()\n\n if self.x_val is None:\n # cross-validation\n fold_id = np.ones((len(self.y),)) * -1\n oof_val = np.zeros((self.y.shape[0], self.y.shape[1]))\n if \"time_series\" not in self.Model_sklearn.objective:\n # Cross-validation split in self.nfolds but train only on self.nfolds_train chosen randomly\n rd.seed(self.Model_sklearn.seed)\n fold_to_train = rd.sample([i for i in range(self.nfolds)], k=max(min(self.nfolds_train, self.nfolds), 1))\n\n if self.cv_strategy == \"StratifiedKFold\":\n skf = StratifiedKFold(n_splits=self.nfolds, random_state=self.Model_sklearn.seed, shuffle=True)\n folds = skf.split(self.y, self.y)\n else:\n kf = KFold(n_splits=self.nfolds, random_state=self.Model_sklearn.seed, shuffle=True)\n folds = kf.split(self.y)\n else:\n folds = [\n ([i for i in range(self.Model_sklearn.size_train)], [i for i in range(self.Model_sklearn.size_train, fold_id.shape[0])])]\n fold_to_train = [0]\n else:\n # validation\n fold_id = np.ones((len(self.y_val),)) * -1\n oof_val = np.zeros((self.y_val.shape[0], self.y_val.shape[1]))\n folds = [('all', [i for i in range(oof_val.shape[0])])]\n fold_to_train = [0]\n\n for n, (tr, te) in enumerate(folds):\n if n not in fold_to_train:\n continue\n\n if tr == 'all':\n # validation\n if isinstance(self.x, pd.DataFrame):\n x_tr, x_val = self.x.values, self.x_val.values\n else:\n x_tr, x_val = self.x, self.x_val\n if isinstance(self.y, pd.DataFrame):\n y_tr, y_val = self.y.values, self.y_val.values\n else:\n y_tr, y_val = self.y, self.y_val\n else:\n # cross-validation\n if isinstance(self.x, pd.DataFrame):\n x_tr, x_val = self.x.values[tr], self.x.values[te]\n else:\n x_tr, x_val = self.x[tr], self.x[te]\n if isinstance(self.y, pd.DataFrame):\n y_tr, y_val = self.y.values[tr], self.y.values[te]\n else:\n y_tr, y_val = self.y[tr], self.y[te]\n\n model = self.Model_sklearn.model()\n model.fit(x_tr, y_tr)\n\n if self.Model_sklearn.shape_y == 1:\n oof_val[te, :] = model.predict(x_val).reshape(-1, 1)\n else:\n oof_val[te, :] = model.predict(x_val)\n fold_id[te] = n\n del model\n\n metrics = []\n for i in range(self.Model_sklearn.shape_y):\n if self.x_val is None:\n # cross_validation\n if isinstance(self.y, pd.DataFrame):\n y_true = self.y.iloc[:, [i]].copy()\n else:\n y_true = self.y[:, i]\n else:\n # validation\n if isinstance(self.y, pd.DataFrame):\n y_true = self.y_val.iloc[:, i].copy()\n else:\n y_true = self.y_val[:, i]\n # subset, only use data where fold_id >= 0 :\n if isinstance(y_true, pd.DataFrame):\n y_true_sample = y_true.values[np.where(fold_id >= 0)[0]]\n else:\n y_true_sample = y_true[np.where(fold_id >= 0)[0]]\n prediction_oof_val = oof_val[:, i][np.where(fold_id >= 0)[0]]\n if 'regression' in self.Model_sklearn.objective:\n if 'explained_variance' == self.scoring:\n metrics.append(-explained_variance_score(y_true_sample, prediction_oof_val))\n elif 'r2' == self.scoring:\n metrics.append(-r2_score(y_true_sample, prediction_oof_val))\n else:\n metrics.append(mean_squared_error(y_true_sample, prediction_oof_val))\n else:\n if 'f1' in self.scoring:\n if 'binary' in self.Model_sklearn.objective:\n metrics.append(-f1_score(y_true_sample, prediction_oof_val))\n else:\n metrics.append(\n -f1_score(y_true_sample, prediction_oof_val, average=self.Model_sklearn.average_scoring))\n elif 'recall' in self.scoring:\n if 'binary' in self.Model_sklearn.objective:\n metrics.append(-recall_score(y_true_sample, prediction_oof_val))\n else:\n metrics.append(-recall_score(y_true_sample, prediction_oof_val,\n average=self.Model_sklearn.average_scoring))\n elif 'precision' in self.scoring:\n if 'binary' in self.Model_sklearn.objective:\n metrics.append(-precision_score(y_true_sample, prediction_oof_val))\n else:\n metrics.append(-precision_score(y_true_sample, prediction_oof_val,\n average=self.Model_sklearn.average_scoring))\n elif 'roc' in self.scoring or 'auc' in self.scoring:\n if 'binary' in self.Model_sklearn.objective:\n metrics.append(-roc_auc_score(y_true_sample, prediction_oof_val))\n else:\n metrics.append(-roc_auc_score(y_true_sample, prediction_oof_val,\n average=self.Model_sklearn.average_scoring))\n else:\n metrics.append(-accuracy_score(y_true_sample, prediction_oof_val))\n\n score = -np.mean(metrics)\n logger.info('oof_val score {} Metric {}'.format(self.scoring, score))\n\n # store hyperparameters optimization in a Dataframe self.df_all_results:\n self.df_all_results['mean_fit_time'].append(time.perf_counter() - start)\n self.df_all_results['params'].append(params)\n self.df_all_results['mean_test_score'].append(score)\n self.df_all_results['std_test_score'].append(0) # just 0\n\n return np.mean(metrics)", "title": "" }, { "docid": "6a9d06b232bf7f34923d426255fa4d69", "score": "0.57535625", "text": "def evaluate_model(self, model, test_X, test_y):\n acc = model.evaluate(test_X, test_y, verbose=0)[1]\n print('Accuracy on testing data: ', acc)", "title": "" }, { "docid": "0c95d3b3e1f4a22f0069419ead271cc7", "score": "0.5748173", "text": "def run_cross_validation(als, train):\n logger.info('Training ALS model with k-fold cross-validation..........')\n\n param_grid = (ParamGridBuilder()\n .addGrid(als.rank, [5, 10])\n .addGrid(als.maxIter, [10, 25, 50])\n .addGrid(als.regParam, [0.1, 0.01])\n .build())\n\n cv = CrossValidator(estimator=als,\n estimatorParamMaps=param_grid,\n evaluator=RegressionEvaluator(labelCol='count'),\n numFolds=5,\n parallelism=10,\n seed=42)\n\n # NOTE: Cut dataframe lineage before cross-validation to avoid local StackOverflow error\n spark.sparkContext.setCheckpointDir(\"/tmp\")\n\n cv_model = cv.fit(train)\n best_model = cv_model.bestModel\n logger.info('Average cross-validation RMSE by submodel: {}'.format(cv_model.avgMetrics))\n logger.info('Lowest cross-validation submodel RMSE: {}'.format(round(min(cv_model.avgMetrics), 3)))\n return best_model", "title": "" }, { "docid": "aa6abc0bdaea847b0a32c6d30986ea84", "score": "0.57477003", "text": "def _fit_cv(self, examples, labels, groups=None, selection_settings=None):\n selection_settings = selection_settings or self.config.param_selection\n cv_iterator = self._get_cv_iterator(selection_settings)\n\n if selection_settings is None:\n return self._fit(examples, labels, self.config.params), self.config.params\n\n cv_type = selection_settings[\"type\"]\n num_splits = cv_iterator.get_n_splits(examples, labels, groups)\n logger.info(\n \"Selecting hyperparameters using %s cross-validation with %s split%s\",\n cv_type,\n num_splits,\n \"\" if num_splits == 1 else \"s\",\n )\n\n scoring = self._get_cv_scorer(selection_settings)\n n_jobs = selection_settings.get(\"n_jobs\", -1)\n\n param_grid = self._convert_params(selection_settings[\"grid\"], labels)\n model_class = self._get_model_constructor()\n estimator, param_grid = self._get_cv_estimator_and_params(\n model_class, param_grid\n )\n # set GridSearchCV's return_train_score attribute to False improves cross-validation\n # runtime perf as it doesn't have to compute training scores and which we don't consume\n grid_cv = GridSearchCV(\n estimator=estimator,\n scoring=scoring,\n param_grid=param_grid,\n cv=cv_iterator,\n n_jobs=n_jobs,\n return_train_score=False,\n )\n model = grid_cv.fit(examples, labels, groups)\n\n for idx, params in enumerate(model.cv_results_[\"params\"]):\n logger.debug(\"Candidate parameters: %s\", params)\n std_err = (\n 2.0\n * model.cv_results_[\"std_test_score\"][idx]\n / math.sqrt(model.n_splits_)\n )\n if scoring == Model.LIKELIHOOD_SCORING:\n msg = \"Candidate average log likelihood: {:.4} ± {:.4}\"\n else:\n msg = \"Candidate average accuracy: {:.2%} ± {:.2%}\"\n # pylint: disable=logging-format-interpolation\n logger.debug(msg.format(model.cv_results_[\"mean_test_score\"][idx], std_err))\n\n if scoring == Model.LIKELIHOOD_SCORING:\n msg = \"Best log likelihood: {:.4}, params: {}\"\n self.cv_loss_ = -model.best_score_\n else:\n msg = \"Best accuracy: {:.2%}, params: {}\"\n self.cv_loss_ = 1 - model.best_score_\n\n best_params = self._process_cv_best_params(model.best_params_)\n # pylint: disable=logging-format-interpolation\n logger.info(msg.format(model.best_score_, best_params))\n\n return model.best_estimator_, model.best_params_", "title": "" }, { "docid": "326ea1e6bd1e503a6e87bd994356f83e", "score": "0.5743013", "text": "def build_model():\n \n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(KNeighborsClassifier()))\n ])\n\n \n parameters = {\n #'vect__max_df': (0.75, 1.0),\n 'tfidf__use_idf': (True, False),\n 'clf__estimator__n_neighbors': [3, 5]\n }\n \n\n cv = GridSearchCV(pipeline, parameters, cv = 3, n_jobs = -1)\n\n \n return cv", "title": "" }, { "docid": "9d4108d52cd5d39bda6b7f71b9a9e8ff", "score": "0.573901", "text": "def knn_cv(X_train, y_train, cv=5, verbose=False):\n pipe = imbPipeline(steps=[\n ('sample', SMOTE()), \n ('scaler', MinMaxScaler()), \n ('knn', KNeighborsClassifier())\n ])\n params = [{'knn__n_neighbors': range(2,25), 'knn__p': [1, 2]}]\n\n model = GridSearchCV(pipe, params, cv=cv, n_jobs=-1, scoring=f2scorer, verbose=verbose)\n model.fit(X_train, y_train)\n return model", "title": "" }, { "docid": "214e606b3c498b195dff125c2a6effa4", "score": "0.57385755", "text": "def test(self):\r\n # self.clf = LogisticRegression(penalty='l2', solver='newton-cg', multi_class='multinomial', C=10, max_iter=300, fit_intercept=True)\r\n self.clf = LogisticRegression()\r\n self.clf.set_params(**self.best_parameter)\r\n print(\"*** Test Result for Logistic Regression ***\")\r\n ModelEvaluation.evaluateModelWithCV(self.clf, self.dataset_x, self.dataset_y, cv=10)", "title": "" }, { "docid": "9602e4a850176d88fb8ae8e81e4c6677", "score": "0.5738553", "text": "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(RandomForestClassifier()))\n ])\n \n params = {'clf__estimator__n_estimators': [50, 100],\n 'clf__estimator__min_samples_split': [2, 4],\n 'clf__estimator__criterion': ['entropy', 'gini']\n }\n \n cv = GridSearchCV(pipeline, param_grid=params)\n \n return cv", "title": "" }, { "docid": "a27404cfc73845866ddce5de3b2d7e1c", "score": "0.57378215", "text": "def evaluate_classifier(self):\n pass", "title": "" }, { "docid": "e13d078adef53c2346e47c5148f0b685", "score": "0.57359105", "text": "def use_trained_model(project_dir, pickled_model, evaluate_performance, input_features = None, \\\n internal_features_to_drop = None, input_data = None, splits = None, \\\n add_new_splits = False, perf_by_sample = False):\n # first make sure features were supplied.\n if (input_features is None) and (internal_features_to_drop is None):\n raise ValueError('Features must be supplied, either through input_features argument \\\n (recommended) or by dropping from list of internal features.')\n # see if path to input data is supplied. if not, load from relative directory. \n if input_data is None:\n print('Using default path to engineered input data.')\n df = load_filter_engineered_df(project_dir)\n else:\n df = load_filter_engineered_df(project_dir, input_data)\n # use new splits created in cross-validation fold?\n # if so, use the data_splits in project_dir to define the splits\n print(f'input df has {df.shape[0]} variants.')\n if add_new_splits:\n df = load_and_merge_new_splits(engineered_df = df, fold_dir = project_dir)\n \n classifier = pickle.load(open(pickled_model, 'rb'))\n\n if input_features is not None: \n # check if any features are missing from df.\n for input_feature in input_features:\n if input_feature not in df.columns:\n raise ValueError(f'input feature {input_feature} is missing from dataframe.')\n features = input_features \n else:\n # drop from list of all internal features\n features = get_internal_features( use_purecn_purity = ('purity' in df.columns), internal_features_to_drop = internal_features_to_drop)\n features = list(features)\n print('finished selecting features.')\n print(f'df.columns: {df.columns}')\n print(f'features: {features}')\n print(f'Number of input variants to classify (before eval): {df.shape[0]}')\n if evaluate_performance:\n performance_df = evaluate_classifier(df, classifier, features, splits, perf_by_sample)\n performance_df.to_csv(make_filename(project_dir, pickled_model, \\\n '_full_performance_across_groups.csv'))\n single_samples = performance_df[performance_df['is_single_sample']]\n if single_samples.shape[0] > 0: # if there are some single samples\n print('Performance averaged across single samples.')\n # It wouldn't make sense to average over these two categorical columns\n mean_by_sample = single_samples.drop(columns = ['subset','is_single_sample']).mean()\n print(mean_by_sample)\n mean_by_sample.to_csv(make_filename(project_dir, pickled_model, \\\n '_average_performance_stats_by_sample.csv'))\n \n X_full = df[features]\n #print(f'df.shape: {df.shape}')\n X_full_values = X_full.fillna(0).values\n #print(f'X_full_values.shape: {X_full_values.shape}')\n\n # make binary predictions and probabilities for saving.\n y_pred = classifier.predict(X_full_values)\n y_proba = classifier.predict_proba(X_full_values)\n #print(f'y_pred.shape: {y_pred.shape}')\n # df already has features plus truth values; add y_pred prediction and class probabilities.\n df['tabnet_pred'] = y_pred\n df['tabnet_proba_0'] = y_proba[:,0]\n df['tabnet_proba_1'] = y_proba[:,1]\n\n # uncomment if you need to generate the full matrix including unused features.\n output_predictions_path = make_filename(project_dir, pickled_model, '_predictions_all_features.csv')\n print(output_predictions_path)\n df.to_csv(output_predictions_path)\n # drop superfluous unused features from dataframe.\n #if input_features is not None:\n # no good way to do this. skipping!\n # df_dropped_unused_features = df[input_features]\n #else:\n # intersect_features_to_drop = set(df.columns).intersection(set(internal_features_to_drop))\n # df_dropped_unused_features = df.drop(columns = list(intersect_features_to_drop))\n #df_dropped_unused_features.reset_index(drop = True).to_csv(make_filename(project_dir, pickled_model, '_predictions.csv'))", "title": "" }, { "docid": "3636ad9abfa742b023fb9641061f8a97", "score": "0.57272565", "text": "def build_model():\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(estimator=RandomForestClassifier()))\n ])\n print(pipeline.get_params())\n parameters = {\n 'clf__estimator__n_estimators': [10, 15, 20],\n 'vect__min_df': [1, 2]\n }\n cv = GridSearchCV(pipeline, param_grid=parameters)\n return cv", "title": "" }, { "docid": "8875475424f1ceffb6c8b644fc650226", "score": "0.57260394", "text": "def evaluate_classifier(clf,features, labels, num_iter, test_size, params):\n\n features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=test_size, random_state=42)\n\n\n precision_values = []\n recall_values = []\n accuracy_values = []\n\n for i in xrange(0, num_iter):\n #print params\n clf = GridSearchCV(clf, params)\n clf.fit(features_train, labels_train)\n\n clf = clf.best_estimator_\n #test_classifier(clf, dataset, feature_list)\n pred = clf.predict(features_test)\n precision_values.append(precision_score(labels_test, pred))\n recall_values.append(recall_score(labels_test, pred))\n accuracy_values.append(accuracy_score(labels_test, pred))\n \n print '_______________________________________________________________'\n print clf\n \n print 'Recall score: ', mean(recall_values)\n print 'Precision score: ', mean(precision_values)\n print 'Accuracy score: ' , mean(accuracy_values)", "title": "" }, { "docid": "3f8fa6b107c18826fe40f6f211ccdf67", "score": "0.57215756", "text": "def timeseries_cv(model, data, n_splits=5):\n X_train, X_test, y_train, y_test = ts_train_test_split(data)\n \n tscv = TimeSeriesSplit(n_splits=n_splits)\n scores = np.sqrt(-cross_val_score(estimator=model, X=X_train.values, y=y_train.values, cv=tscv, scoring='neg_mean_squared_error', n_jobs=-1))\n \n return scores", "title": "" }, { "docid": "65fa2da24a25f001cc1820d7ae0213df", "score": "0.5721298", "text": "def testClassifier():\n # load problem\n print(\"Loading testing data...\")\n y, x = svm_read_problem(\"./libsvm_test_problem\")\n problem = svm_problem(y, x)\n\n # load pretrained model\n model = svm_load_model(\"./trained-model-rbf\")\n\n # test model on training data\n print(\"Testing model...\")\n pLabel, pAcc, pVal = svm_predict(y, x, model)", "title": "" }, { "docid": "c1625a65b8b15e73501a605e3f726d5b", "score": "0.57195264", "text": "def cv_score(clf,\n X,\n y,\n sample_weight=None,\n scoring='neg_log_loss',\n n_splits=3,\n t1=None,\n cv_gen=None,\n pct_embargo=0.,\n purging=True,\n return_combs=False,\n ret=None,\n num_threads=1,\n **kwargs):\n if cv_gen is None:\n if t1 is not None:\n cv_gen = PurgedKFold(\n n_splits=n_splits,\n t1=t1,\n pct_embargo=pct_embargo,\n purging=purging,\n num_threads=num_threads)\n else:\n cv_gen = KFold(n_splits=n_splits)\n elif cv_gen == 'cp':\n cv_gen = CPKFold(\n n_splits=n_splits,\n t1=t1,\n pct_embargo=pct_embargo,\n purging=purging,\n num_threads=num_threads)\n scores = []\n for train, test in cv_gen.split(X=X):\n train_params = dict()\n test_params = dict()\n # Sample weight is an optional parameter\n if sample_weight is not None:\n train_params['sample_weight'] = sample_weight.iloc[train].values\n test_params['sample_weight'] = sample_weight.iloc[test].values\n test_params.update(kwargs)\n clf_fit = clf.fit(\n X=X.iloc[train, :].values, y=y.iloc[train].values, **train_params)\n if hasattr(clf_fit, 'classes_'):\n test_params['labels'] = clf_fit.classes_\n if ret is not None:\n test_params['ret'] = ret.iloc[test]\n # Scoring\n score_ = evaluate(clf_fit, X.iloc[test, :].values, y.iloc[test].values,\n scoring, **test_params)\n scores.append(score_)\n if scoring not in ['roc', 'precision_recall']:\n scores = np.array(scores)\n if return_combs:\n return scores, cv_gen.get_test_combs()\n else:\n return scores", "title": "" }, { "docid": "a6c22cdbaba02f0f2138bd38141ca2cb", "score": "0.5714878", "text": "def testOverall():\n # load problem\n print(\"Loading testing data...\")\n y, x = svm_read_problem(\"./libsvm_all_problem\")\n problem = svm_problem(y, x)\n\n # load pretrained model\n model = svm_load_model(\"./trained-model-rbf\")\n\n # test model on training data\n print(\"Testing model...\")\n pLabel, pAcc, pVal = svm_predict(y, x, model)", "title": "" }, { "docid": "8977ad810ac31c19ff17fb7777b5396f", "score": "0.5714392", "text": "def evaluate_model(self, model):\n\n biases, stereotypes = self.gender_neutrality_test(model)\n preds = self.gender_identification_test(model)\n ppl = self.PTB_test(model)\n\n self._log_neutrality(biases, stereotypes)\n self._log_identification(preds)\n self._log_PTB(ppl)\n\n return self.score_data", "title": "" }, { "docid": "0a6c3d5dd720194c83cc2af0bf025cec", "score": "0.5713862", "text": "def score_model(model, X_train, y_train, X_test, y_test):\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n\n if is_classifier(model):\n score = f1_score(y_test, y_pred)\n else:\n score = r2_score(y_test, y_pred)\n return model, score", "title": "" }, { "docid": "a227d4b9a32c42d496574d81eb31b11f", "score": "0.57075506", "text": "def evaluate_model(model, meta_data, test_data_folder):\n print(\"[Info] Cargando test dataset\")\n X, Y = load_test_dataset(meta_data, test_data_folder)\n print(f\"[Info] Imagenes cargadas {len(X)}\")\n\n loss, acc = model.evaluate(X, Y)\n print(f\"[Info] loss: {loss}\\taccuracy: {acc}\")", "title": "" }, { "docid": "b1d50040a146ead260b28347e3513874", "score": "0.57074225", "text": "def test_trained_model(X_test,y_test,y_train_labels,y_test_labels,lamda_list,model_num):", "title": "" }, { "docid": "f91393142be69e6580726bacc560ec15", "score": "0.5698971", "text": "def run_xval_evaluation(task, arch='segnet_proper', pretrained=None,\n hyperparams={}, test_keys=None, fit=True):\n # import os\n # os.environ['GLOG_minloglevel'] = '3'\n\n from clab import harness\n # from clab import models\n\n xval_base = abspath(task.exptdir('xval'))\n\n xval_results = []\n for idx, xval_split in enumerate(task.xval_splits(test_keys=test_keys)):\n print(ub.color_text('XVAL iteration {}'.format(idx), 'blue'))\n\n (train, test) = xval_split\n xval_dpath = ub.ensuredir((xval_base, 'split_{:0=2}'.format(idx)))\n\n harn = harness.Harness(workdir=xval_dpath, arch=arch, task=task)\n harn.task = task\n harn.set_inputs(train, test)\n\n harn.init_pretrained_fpath = pretrained\n harn.init_pretrained_fpath = None\n harn.params.update(hyperparams)\n\n harn.test.prepare_images()\n harn.train.prepare_images()\n\n harn.gpu_num = gpu_util.find_unused_gpu(min_memory=6000)\n print('harn.gpu_num = {!r}'.format(harn.gpu_num))\n if harn.gpu_num is not None:\n avail_mb = gpu_util.gpu_info()[harn.gpu_num]['mem_avail']\n # Estimate how much we can fit in memory\n # TODO: estimate this from the model arch instead.\n # (90 is a mgic num corresponding to segnet_proper)\n harn.train_batch_size = int(\n (avail_mb * 90) // np.prod(task.input_shape))\n harn.train_batch_size = int(harn.train_batch_size)\n if harn.train_batch_size == 0:\n raise MemoryError(\n 'not enough GPU memory to train the model')\n else:\n # not sure what the best to do on CPU is. Probably nothing.\n harn.train_batch_size = 4\n\n harn.prepare_solver()\n\n # Check if we can resume a previous training state\n print(ub.color_text('Checking for previous snapshot states', 'blue'))\n previous_states = harn.snapshot_states()\n print('Found {} previous_states'.format(len(previous_states)))\n\n if fit:\n if len(previous_states) == 0:\n print(ub.color_text('Starting a fresh training session', 'blue'))\n harn.fit()\n else:\n from clab.backend import iface_caffe as iface\n solver_info = iface.parse_solver_info(harn.solver_fpath)\n prev_state = previous_states[-1]\n prev_iter = iface.snapshot_iterno(prev_state)\n if prev_iter < solver_info['max_iter']:\n # continue from the previous iteration\n print(ub.color_text(\n 'Resume training from iter {}'.format(prev_iter), 'blue'))\n harn.fit(prevstate_fpath=prev_state)\n else:\n print(ub.color_text(\n 'Already finished training this model', 'blue'))\n\n for _ in harn.deploy_trained_for_testing():\n # hack to evaulate while deploying\n harn.evaulate_all()\n xval_results.append(list(harn._test_results_fpaths()))\n return xval_results", "title": "" }, { "docid": "61203862b24fdd9277d5c8fbdf8652ca", "score": "0.56965387", "text": "def sl_model_evaluation(classifier, eval_df):\n\n untampered_df = eval_df[eval_df['tamper'] == 1]\n attacks_df = eval_df[eval_df['tamper'] == -1]\n\n y_pred_test = classifier.predict(np.asarray(untampered_df[FEATURES]))\n y_pred_outliers = classifier.predict(np.asarray(attacks_df[FEATURES]))\n\n # Format the output of the classifier\n y_pred_test[y_pred_test == 0] = -1\n y_pred_outliers[y_pred_outliers == 0] = -1\n\n n_accurate_test = y_pred_test[y_pred_test == 1].size\n n_accurate_outliers = y_pred_outliers[y_pred_outliers == -1].size\n\n\n f_beta = fbeta_score(np.concatenate([np.ones(y_pred_test.shape[0]),\n -1 * np.ones(y_pred_outliers.shape[0])]),\n np.concatenate([y_pred_test, y_pred_outliers]),\n beta=20,\n pos_label=1)\n\n tnr = n_accurate_outliers / attacks_df.shape[0]\n tpr_test = n_accurate_test / untampered_df.shape[0]\n\n return f_beta, tnr, tpr_test", "title": "" }, { "docid": "bcae29934c124893a8bc0ac7eab03b32", "score": "0.56955665", "text": "def evaluation(x, y, cut_size, w, h, fimg, model):\n fimg = cv2.imread(fimg.filename)\n image = fimg[y:y+cut_size, x:x+cut_size]\n # cv2.imshow(\"cropped\", image)\n\n results = model.get_predictions(image=image, plot=False)\n\n if len(results) == 0:\n return None\n\n c = results.cpu().numpy()\n\n if(x != 0 and y != 0 and x+cut_size != w and y+cut_size != h):\n i = 0\n while i < c.shape[0]:\n if(c[i, 0] < settings.BOARDCACHE or c[i, 1] < settings.BOARDCACHE or\n c[i, 2] < settings.BOARDCACHE or c[i, 3] < settings.BOARDCACHE):\n c = np.delete(c, i, axis=0)\n i -= 1\n i += 1\n i = 0\n\n while i < c.shape[0]:\n c[i, 0] += x\n c[i, 1] += y\n c[i, 2] += x\n c[i, 3] += y\n i += 1\n\n return c", "title": "" }, { "docid": "dfa0dab4fe08c8bb9d6e128d28a311d1", "score": "0.568342", "text": "def score_model(\n self, target=\"estimated_demand\", true_demand=\"estimated_demand\", cv=5\n ):\n\n # Get the product history and drop the date and any possible demand columns\n records = self.history\n X = records.drop(\n columns=[\"date\", \"estimated_demand\", \"measured_demand\", \"demand\"],\n errors=\"ignore\",\n )\n\n # Get the specified column as the target data for training\n y = records[[target]]\n\n # Get the specified column for evaluation of production predictions\n demand = records[[true_demand]]\n\n # set up lists for recording values for each fold of the cross validation\n score = {\n \"val_loss\": [],\n \"days\": [],\n \"demand\": [],\n \"produced\": [],\n \"revenue\": [],\n \"cost\": [],\n \"sales\": [],\n \"profit\": [],\n \"waste\": [],\n }\n\n # split is done in groups, but could be switched to random samples\n # splitter = ShuffleSplit(n_splits=cv, test_size=1/cv)\n splitter = KFold(n_splits=cv, shuffle=False)\n for train_indices, test_indices in splitter.split(X):\n X_train, X_test = X.iloc[train_indices], X.iloc[test_indices]\n y_train, y_test = y.iloc[train_indices], y.iloc[test_indices]\n y_true = demand.iloc[test_indices].to_numpy()\n score[\"demand\"].append(y_true)\n\n # only look for fit_history if epochs is present, i.e. if model is a deep learning model.\n if hasattr(self.model, \"epochs\"):\n self.model.fit(\n X_train, y_train, validation_data=(X_test, y_test), reset_model=True\n )\n score[\"val_loss\"].append(self.model.fit_history[\"val_loss\"])\n else:\n self.model.fit(X_train, y_train)\n\n # Get productions on test set.\n production = self.model.predict(X_test)\n\n # Get total produced over all days.\n produced = np.sum(production)\n score[\"produced\"].append(produced)\n\n # Find actual sales\n sales = np.sum(np.min(np.hstack([y_true, production]), axis=1))\n score[\"sales\"].append(sales)\n revenue = sales * self.unit_sale_price\n score[\"revenue\"].append(revenue)\n cost = np.sum(production) / self.batch_size * self.batch_cost\n score[\"cost\"].append(cost)\n score[\"days\"].append(X_test.shape[0])\n profit = revenue - cost\n score[\"profit\"].append(profit)\n waste = produced - sales\n score[\"waste\"].append(waste)\n\n total_days = np.sum(score[\"days\"])\n score[\"revenue_avg\"] = np.sum(score[\"revenue\"]) / total_days\n score[\"profit_avg\"] = np.sum(score[\"profit\"]) / total_days\n score[\"profit_margin\"] = (\n np.sum(score[\"profit\"]) * 100.0 / np.sum(score[\"revenue\"])\n )\n score[\"waste_pct\"] = np.sum(score[\"waste\"]) * 100 / np.sum(score[\"produced\"])\n score[\"waste_avg\"] = np.sum(score[\"waste\"]) / total_days\n return score", "title": "" }, { "docid": "808eec7baabc23944bec6c2b73fe8fe4", "score": "0.567953", "text": "def cv_train(modelname,config,kfold,lstep,n_samples,feature_weight,\n\t\t\t pkl_filename=\"data/adni.pkl\",json_filename=\"conf/dataConfig.json\"):\n\n\t\"\"\"load data and data config\n\tThe data file *pkl contains three parts: {'demo','dync','max_len'}\n\t\t'demo' : a list of dataframes storing patients' demographics information\n\t\t'dync' : a list of dataframes storing patients' dynamic information, including continous features and diganosis\n\t\t'max_len' : int, the maximum lengths of patient sequence\n\t\"\"\"\n\tadni = _pickle.load(open(pkl_filename,\"rb\"))\n\tdataConfig = json.load(open(json_filename))\n\tmax_len = adni[\"max_len\"]\n\t\n\t\"\"\"\n\trecord val_loss change / trends;\n\tthe smallest loss value for each fold is given by the best model\n\t\"\"\"\n\tloss_curve = {}\n\n\t# build model graph\n\tif modelname == \"rnn\":\n\t\tmodel = rnn(len(dataConfig[\"demo_vars\"]),len(dataConfig[\"input_x_vars\"]),len(dataConfig[\"input_y_vars\"]),\n\t\t\t\t\t\t\t max_len,config[\"batch_size\"],config[\"n_h\"],config[\"n_z\"],lstep,feature_weight)\n\telif modelname == \"stocast\":\n\t\tmodel = stocast(len(dataConfig[\"demo_vars\"]),len(dataConfig[\"input_x_vars\"]),len(dataConfig[\"input_y_vars\"]),\n\t\t\t\t\t\tmax_len,config[\"batch_size\"],config[\"n_h\"],config[\"n_z\"],lstep,feature_weight)\n\telif modelname == \"storn\":\n\t\tmodel = storn(len(dataConfig[\"demo_vars\"]),len(dataConfig[\"input_x_vars\"]),len(dataConfig[\"input_y_vars\"]),\n\t\t\t\t\t\tmax_len,config[\"batch_size\"],config[\"n_h\"],config[\"n_z\"],lstep,feature_weight)\n\telif modelname == \"retain\":\n\t\tmodel = retain(len(dataConfig[\"demo_vars\"]),len(dataConfig[\"input_x_vars\"]),len(dataConfig[\"input_y_vars\"]),\n\t\t\t\t\t\tmax_len,config[\"batch_size\"],config[\"n_h\"],config[\"n_z\"],lstep,feature_weight)\n\telif modelname == \"tlstm\":\n\t\tmodel = tlstm(len(dataConfig[\"demo_vars\"]),len(dataConfig[\"input_x_vars\"]),len(dataConfig[\"input_y_vars\"]),\n\t\t\t\t\t\tmax_len,config[\"batch_size\"],config[\"n_h\"],config[\"n_z\"],lstep,feature_weight)\n\n\n\n\t# saving ...\n\tdirname = \"save/{} {}\".format(modelname,time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time())))\n\tif not os.path.exists(dirname):\n\t\tos.makedirs(dirname)\n\n\twith tf.Session() as sess:\n\t\tsummary_writer = tf.summary.FileWriter('logs/' + datetime.now().isoformat().replace(':', '-'), sess.graph)\n\t\tmerged = tf.summary.merge_all()\n\t\tsaver = tf.train.Saver(tf.global_variables(),max_to_keep=1)\n\n\t\tstart = time.time()\n\t\ttotaltime = 0\n\t\tfor k in range(kfold):\n\t\t\t\n\t\t\t\"\"\" if k = 0, random initialization params \"\"\"\n\t\t\t\"\"\" else, inherit previous best model's params \"\"\"\n\t\t\tif k == 0:\n\t\t\t\ttf.global_variables_initializer().run()\n\t\t\telse:\n\t\t\t\tckpt = tf.train.get_checkpoint_state(dirname)\n\t\t\t\tsaver.restore(sess, ckpt.model_checkpoint_path)\n\n\t\t\t# split into trainining, validation, testing data\n\t\t\ttrain_ds,valid_ds = split_data(k,kfold,adni,dataConfig,max_len)\n\n\t\t\t# train\n\t\t\tminVlloss = 1e10\n\t\t\tloss_curve[k] = []\n\t\t\tn_batchs = int(train_ds.num_examples / config[\"batch_size\"])\n\n\t\t\t# each epoch\n\t\t\tno_improvement = 0 # number of no improvements\n\t\t\te = 0\n\t\t\twhile e < config[\"num_epochs\"]:\n\t\t\t\tsess.run(tf.assign(model.lr, config[\"learning_rate\"] * (config[\"decay_rate\"] ** e)))\n\n\t\t\t\tfor b in range(n_batchs):\n\t\t\t\t\n\t\t\t\t\twrap = train_ds.next_batch(config[\"batch_size\"])\n\t\t\t\t\t\n\t\t\t\t\tfeed = {model.input_demo: wrap['input_demo'],\n\t\t\t\t\t\t\tmodel.input_x: wrap['input_x'],\n\t\t\t\t\t\t\tmodel.input_y: wrap['input_y'],\n\t\t\t\t\t\t\tmodel.input_dt: wrap['input_dt'],\n\t\t\t\t\t\t\tmodel.seqlens: wrap['seqlens'],\n\t\t\t\t\t\t\tmodel.mask: wrap['mask']}\n\t\t\t\t\n\t\t\t\t\t_,loss,summary = sess.run([model.train_op, model.loss, merged],feed)\n\t\t\t\t\tsummary_writer.add_summary(summary, e * n_batchs + b)\n\n\t\t\t\t# validation\n\t\t\t\tvloss = val_loss(sess,model,valid_ds,config[\"batch_size\"])\n\t\t\t\tloss_curve[k].append(vloss)\n\t\t\t\t\n\t\t\t\tprint(\" |- FOLD:%d, EPOCH:%d, VLOSS:%.4f\" % (k,e,vloss))\n\n\t\t\t\tif minVlloss > vloss:\n\t\t\t\t\tminVlloss = vloss\n\t\t\t\t\tcheckpoint_path = os.path.join(dirname, \"best_model_k={}_e={}.ckpt\".format(k,e))\n\t\t\t\t\tsaver.save(sess, checkpoint_path, global_step=e * n_batchs + k * config[\"num_epochs\"] * n_batchs)\n\t\t\t\t\tprint(\" |- Best model saved to {}\".format(checkpoint_path))\n\t\t\t\t\tno_improvement = 0\n\t\t\t\telse:\n\t\t\t\t\tno_improvement += 1\n\t\t\t\t\n\t\t\t\t# if the number of improvement reaches 10, stop running\n\t\t\t\tif no_improvement < 10:\n\t\t\t\t\te += 1\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\t\tend = time.time()\n\t\t\tprint(\"|- %2d fold costs %.4f seconds.\\n\" % (k,end-start))\n\t\t\ttotaltime += end-start\n\t\t\tstart = time.time()\n\t\tprint(\"Total train time is %.4f seconds.\" % totaltime)\n\n\t\t# testing\n\t\tprint(\"Starting testing\")\n\t\tckpt = tf.train.get_checkpoint_state(dirname)\n\t\tif ckpt:\n\t\t\tsaver.restore(sess, ckpt.model_checkpoint_path)\n\t\t\tprint(\"Loading model: \",ckpt.model_checkpoint_path)\n\t\t\n\t\ttest_ds = DataSet(dataConfig,adni[\"demo\"],adni[\"dync\"],max_len)\n\t\ttest_res = test(sess,model,modelname,test_ds,\n\t\t\tconfig[\"batch_size\"],max_len,dataConfig[\"input_y_vars\"],lstep,n_samples=n_samples)\n\t\t\n\t\tprint(\"Saving test results...\")\n\t\t\"\"\"The results are saved in the following format.\n\t\tres = pickle.load(open(filename,'rb'))\n\t\t\tres : a list of dicts, with each dict() stores the prediction results corresponding to a specific patient\n\t\t\tres[i] : the dict for patient i, {'curr_labels','target_labels','pred_pi','target_features','pred_mu'}\n\t\t\t\t'curr_labels' : a list of labels\n\t\t\t\t'target_labels' : a list of target labels\n\t\t\t\t'pred_pi' : a list of predictions, the length is timesteps\n\t\t\t\t\t- pred_pi[t] is a 1d array for deterministic methods, or a 2d array for stocast with size (n_samples, 3)\n\t\t\t\t'target_features' : list, the length is timesteps\n\t\t\t\t\t- target_features[t] : a 1d array\n\t\t\t\t'pred_mu' : list, the length is timesteps\n\t\t\t\t\t- pred_mu[t] : a 1d array for deterministic methods, or a 2d array for stocast\n\t\t\"\"\"\n\n\t\tdirname = \"result_fw={}/{}\".format(feature_weight,modelname)\n\t\tif not os.path.exists(dirname):\n\t\t\tos.makedirs(dirname)\n\t\t_pickle.dump(test_res,open(os.path.join(dirname,\"lstep{}_nsamples{}_result.pkl\".format(lstep,n_samples)),\"wb\"))\n\t\t_pickle.dump(loss_curve,open(os.path.join(dirname,\"lstep{}_nsamples{}_losses.pkl\".format(lstep,n_samples)),\"wb\"))", "title": "" }, { "docid": "b09cd531a8c96a4c83a59cd50bcd6a32", "score": "0.56758386", "text": "def test(self):\n self.model.eval()\n self.predictions = []\n self.targets = []\n for cluster in self.clustering_machine.clusters:\n prediction, target = self.do_prediction(cluster)\n self.predictions.append(prediction.cpu().detach().numpy())\n self.targets.append(target.cpu().detach().numpy())\n self.targets = np.concatenate(self.targets)\n self.predictions = np.concatenate(self.predictions).argmax(1)\n score = f1_score(self.targets, self.predictions, average=\"micro\")\n print(\"\\nF-1 score: {:.4f}\".format(score))", "title": "" } ]
1015f0d475a10360dd0e41ccacad1fc6
Create a site configuration from a Volt config file.
[ { "docid": "2cca93d684692315c46caed80f111696", "score": "0.0", "text": "def from_file_name(\n cls,\n invoc_dir: Path,\n project_dir: Path,\n config_file_name: str,\n **kwargs: Any,\n ) -> Self:\n config_path = project_dir / config_file_name\n with config_path.open() as src:\n user_conf = cast(Dict[str, Any], tomlkit.load(src))\n\n return cls(\n invoc_dir=invoc_dir,\n project_dir=project_dir,\n user_conf=user_conf,\n config_path=config_path,\n **kwargs,\n )", "title": "" } ]
[ { "docid": "8c9a1a385e5301c630e9e1751cbf39e6", "score": "0.713112", "text": "def load_from_file(self, config_file):\n with open(config_file, 'r') as f:\n parser = ConfigParser()\n parser.readfp(f)\n if parser.has_option('site', 'outdir'):\n self.outdir = os.path.expanduser(parser.get('site', 'outdir'))", "title": "" }, { "docid": "420991adb29a7dbd7c29181d763ab85b", "score": "0.6845928", "text": "def load_site_config(site_name = \"cmc\"):\n (the_site_name, the_site_host, the_site_port) = \\\n get_site_specific_info(DB_PATH, site_name)\n yml = yaml.load(open(os.path.join(EXC_PATH, \"config\", \"site.yaml\")))\n\n #name is first field in table\n the_site = yml[ the_site_name ]\n the_site['host'] = the_site_host\n the_site['port'] = the_site_port\n return the_site", "title": "" }, { "docid": "fa3d5fb6f03c76e141ee9d2efeedf78c", "score": "0.6822562", "text": "def load_from_file(self, config_file):\n with open(config_file, 'r') as f:\n self.parser.readfp(f)\n if self.parser.has_option('site', 'domain'):\n self._domain = self.parser.get('site', 'domain')\n\n if self.parser.has_option('site', 'outdir'):\n outdir = os.path.expanduser(self.parser.get('site', 'outdir'))\n if not os.path.isabs(outdir):\n path = os.path.dirname(config_file)\n outdir = os.path.abspath(os.sep.join([path, outdir]))\n self.outdir = outdir\n\n if self.parser.has_section('site'):\n self._find_extensions(self.parser)", "title": "" }, { "docid": "7989c2ff1abb8120c9697cb1095d6013", "score": "0.62964994", "text": "def parseConfig(file):\n \n sites = []\n # read all lines\n for line in open(file, 'r'):\n line = line.strip()\n if not re.match(r'^[^#]', line):\n continue\n \n args = [arg for arg in line.split(';')]\n sites.append(Site(args[0], args[1], args[2:]))\n\n return sites", "title": "" }, { "docid": "f69f397605260786404e366bb5832e5e", "score": "0.6267531", "text": "def up_site_config():\n\n apache_root = '/etc/apache2'\n apache_available = os.path.join(apache_root, 'sites-available')\n apache_enabled = os.path.join(apache_root, 'sites-enabled')\n app_conf = os.path.join(apache_available, '%s.conf' % env.server_name)\n\n fabtools.files.upload_template(\n 'apache_host.conf.tpl',\n app_conf,\n context=env,\n template_dir=os.path.join(env.lib_path, 'templates'),\n use_jinja=True,\n use_sudo=True,\n user='root',\n chown=True,\n mode='644',\n )\n\n fabric.api.execute(apache_enable_site, env.server_name)", "title": "" }, { "docid": "2a2a160f05eca66dc262a86949c16a6d", "score": "0.6153214", "text": "def configure(name):\n\n CONFIG = {}\n module = __import__('cfgs.%s' % name)\n CONFIG.update(getattr(module, name).CONFIG)\n\n from string import Template\n\n print '\\tCreate src/settings.py'\n with open('./tpls/settings.template', 'r') as src:\n with open('./src/settings.py', 'w') as dst:\n template = Template(src.read())\n dst.write(template.substitute(CONFIG))\n dst.close()\n src.close()\n\n print '\\tCreate fabfile/hosts.py'\n with open('./tpls/hosts.template', 'r') as src:\n with open('./fabfile/hosts.py', 'w') as dst:\n CONFIG.update({\n 'DB_USER': CONFIG['DATABASES']['default']['USER'],\n 'DB_PASSWORD': CONFIG['DATABASES']['default']['PASSWORD'],\n })\n template = Template(src.read())\n dst.write(template.substitute(CONFIG))\n dst.close()\n src.close()", "title": "" }, { "docid": "702e324a431ed7f34107a381f6f1bf0f", "score": "0.61366117", "text": "def init_config(config_file):\r\n import ConfigParser\r\n config = ConfigParser.ConfigParser()\r\n config.read(config_file)\r\n return config", "title": "" }, { "docid": "e63d3abfa889399a6fce42be4d5c205a", "score": "0.59605837", "text": "def genconf(quiet: bool, domain: str) -> None:\n explain_step(f\"HTTPS configuration file for website {domain}:\", quiet=quiet)\n with path(package=\"webserver\", resource=\"config\") as folder:\n click.echo(f\"#=== Generated with webserver v{__version__} ===#\")\n run_command(\n f'cat {folder}/443-template.conf {folder}/examples/* | sed \"s/{{{{SERVER_NAME}}}}/{domain}/g\"'\n )\n click.echo(\"}\")", "title": "" }, { "docid": "b365dfb20fe291b19fb9676f4f244d5d", "score": "0.5919497", "text": "def new_from_template(filename):\n import opus\n opus_dir = opus.__path__[0]\n configfile = os.path.join(opus_dir, \"lib\", \"conf\", \"default_settings.json\")\n config = OpusConfig(configfile)\n config._filename = filename\n config[\"SECRET_KEY\"] = ''.join([random.choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for _ in range(50)])\n return config", "title": "" }, { "docid": "ff13c4e6975a74a54a68007bfb88cdbe", "score": "0.58986014", "text": "def from_file(self, config_file):\n with open(str(config_file), 'r') as f:\n self.config = yaml.load(f)\n self.set_env_vars()", "title": "" }, { "docid": "b073a2b4404fd3963b85fffc9c818d20", "score": "0.58558446", "text": "def createConfig(self):\n config = configparser.ConfigParser()\n config.add_section(\"DEVICE\")\n config.set(\"DEVICE\", \"port\", \"com7\")\n config.set(\"DEVICE\", \"baudrate\", '9600')\n config.set(\"DEVICE\", \"timeout\", '1')\n \n config.add_section(\"DB\")\n config.set(\"DB\", \"db\", \"checkStaff.db\")\n\n config.add_section(\"MACHINE\")\n config.set(\"MACHINE\", \"stanok\", '77')\n\n config.add_section(\"CONNECT_1C\")\n config.set(\"CONNECT_1C\", \"url_http_service\" , 'http://1c-www-server/erp/hs/staffstatis/import/')\n config.set(\"CONNECT_1C\", \"user\" , '1cv8')\n config.set(\"CONNECT_1C\", \"password\" , '')\n\n\n with open(self.path, \"w\") as config_file:\n config.write(config_file)", "title": "" }, { "docid": "c0ed624af3378322aa6c646abc4341ce", "score": "0.58347595", "text": "def up_site_config():\n nginx_root = '/etc/nginx'\n nginx_available = os.path.join(nginx_root, 'sites-available')\n nginx_enabled = os.path.join(nginx_root, 'sites-enabled')\n app_conf = os.path.join(nginx_available, '%s.conf' % env.server_name)\n\n fabtools.files.upload_template(\n 'nginx.conf.tpl',\n app_conf,\n context=env,\n template_dir=os.path.join(env.lib_path, 'templates'),\n use_jinja=True,\n use_sudo=True,\n user='root',\n chown=True,\n mode='644',\n )\n\n if not fabtools.files.is_link('%s/%s.conf' % (nginx_enabled, env.server_name)):\n with fabric.api.cd(nginx_enabled):\n fabric.api.sudo('ln -s %s .' % app_conf)", "title": "" }, { "docid": "e81a1fb0236cc3af2cd32ac881254e3a", "score": "0.58207387", "text": "def build_config(self):\n self.tempest_configure()\n # find template\n template_path = os.path.join(os.path.dirname(\n os.path.abspath(__file__)), os.pardir,\n \"data/files/tempest_neutron.conf\")\n\n # open template and add values\n with open(template_path) as f:\n template = Template(f.read()).substitute(\n self.tempest_config)\n\n # save config\n with open(self.path, 'w') as w:\n logger.info(\"Writing tempest config: {0}\".format(self.path))\n logger.debug(template)\n w.write(template)", "title": "" }, { "docid": "3860d55c92b946340c3895ef345e4e3b", "score": "0.5811174", "text": "def conf(request):\n path = request.config.getoption(\"vars\")\n return BaseConfig(path)", "title": "" }, { "docid": "a7d6b55acff773a6decd34b81351158f", "score": "0.57835644", "text": "def create_conf():\n with open('config.py', 'w') as fd:\n fd.write(\"BUFF_SIZE = 4096\\n\")\n fd.write(\"PATH_FILES = '/tmp/'\\n\")\n fd.write(\"SRV_ADDR = '127.0.0.1'\\n\")\n fd.write(\"SRV_PORT = 5100\\n\")\n fd.write(\"LOG_FILE = '/tmp/logfile.log'\\n\")\n fd.write(\"PID_FILE = '/tmp/atc_daemon.pid'\\n\")", "title": "" }, { "docid": "1074ef701e6f0b30e29681267104623d", "score": "0.57806355", "text": "def conf():\n text = \"\"\"\\\nThis is a simple prolog.\n\n[TEST]\n# This is a test section.\n\nvalue1 = 53\nvalue2 = 54\n\"\"\"\n conf = seamm_installer.Configuration()\n conf.from_string(text)\n return conf", "title": "" }, { "docid": "2c6cfd40680b15ce2f07974d535aafe4", "score": "0.57622224", "text": "def load_backblaze_config(config):\n fd = open(\"%s/.backblaze.conf\" % os.environ['HOME'])\n for line in fd.readlines():\n words = line.split()\n config[words[0]] = words[1]\n fd.close()\n return config", "title": "" }, { "docid": "794c497dd1553638b1098eaf809108e9", "score": "0.57605445", "text": "def setup():\n try:\n config = ConfigParser.ConfigParser()\n config.readfp(open(CONFIG_FILE))\n lib.create.setApiVersion(config.get('general', 'api_version'))\n except:\n lib.logger.critical('Could not open config file \"%s\"' % (CONFIG_FILE,))\n sys.exit(-1)", "title": "" }, { "docid": "4a229dfe18233d6683e8c0d4977a8ada", "score": "0.57544714", "text": "def parse_config_file(config_file):", "title": "" }, { "docid": "376da956e4207567b4cf765bce627e14", "score": "0.57271695", "text": "def write_config(self, config_file: Union[str, Path] = None):\n config_file = config_file or self.config_file\n config_file = Path(config_file)\n parser: ConfigParser = ConfigParser(allow_no_value=True)\n\n # populate the config file with the template\n for section in self.template.keys():\n parser[section] = self.template[section] # type: ignore\n\n try:\n if not config_file.parent.exists():\n config_file.parent.mkdir(parents=True)\n with config_file.open(\"w\") as configfile:\n parser.write(configfile)\n except PermissionError as error:\n raise error from SystemError()\n\n print(f\"Configuration file created at {config_file}\")", "title": "" }, { "docid": "5d4df81c0759e74c5bdaffc54e34d365", "score": "0.5713698", "text": "def createConfigFile(self):\n config = ConfigParser.RawConfigParser()\n config.add_section('db')\n config.set('db', 'wanteddb', 'postgresql')\n config.add_section('postgresql')\n config.set('postgresql', 'database', 'sshkey')\n config.set('postgresql', 'user', os.getenv('USER'))\n config.set('postgresql', 'password', 'EDITTHIS')\n config.set('postgresql', 'host', 'localhost')\n config.set('postgresql', 'port', '5432')\n config.add_section('client')\n config.set('client', 'database', 'sshkey')\n config.set('client', 'user', os.getenv('USER'))\n config.set('client', 'password', 'EDITTHIS')\n config.set('client', 'host', 'localhost')\n try:\n configfile = open(os.path.expanduser(pathToConfig), 'wb')\n config.write(configfile)\n return 0\n except:\n print(\"Cannot write the defaultconfig\")\n return 1", "title": "" }, { "docid": "e387dea2087a6d11a613652fd0d57fc7", "score": "0.5701189", "text": "def load(file):\n\n config_raw = ruamel.yaml.safe_load(file)\n\n config = dacite.from_dict(data=config_raw, data_class=VideoPipelineConfig)\n return config", "title": "" }, { "docid": "6c5ab178a5f4a9f053705f7fcda306ab", "score": "0.5695858", "text": "def create_config(path):\n cfgfile = open(path, 'w')\n config = configparser.ConfigParser()\n config.add_section(\"Settings\")\n config.set(\"Settings\", \"parser\", \"ReutersParser\")\n config.set(\"Settings\", \"client\", \"PostgresClient\")\n config.set(\"Settings\", \"schema\", \"\")\n config.write(cfgfile)\n cfgfile.close()", "title": "" }, { "docid": "d1117f8e2dcb801fba14ca7992e8eef3", "score": "0.56880933", "text": "def load_config(configfile='~/labpi.conf'):\n # Create a default configuration\n config = configparser.ConfigParser()\n config.read_dict({\n 'server': {\n 'username': '',\n 'password': '',\n 'base_url': 'http://example.com/container/{id}'\n },\n 'pi': {\n 'lcd_panel': 'dummy',\n }\n })\n # Load the configuration from disk\n config.read(os.path.expanduser(configfile))\n return config", "title": "" }, { "docid": "37b0948425daae3e517485ea76f29389", "score": "0.56838214", "text": "def load_configuration(self):\r\n s = os.path.sep\r\n \r\n default = dict(\r\n python = 'python',\r\n web2py = os.path.join(s.join(__file__.split(s)[:-3]), 'web2py.py'),\r\n http_enabled = True,\r\n http_ip = '0.0.0.0',\r\n http_port = 8000,\r\n https_enabled = True,\r\n https_ip = '0.0.0.0',\r\n https_port = 8001,\r\n https_key = '',\r\n https_cert = '',\r\n password = '<recycle>',\r\n )\r\n \r\n config = default\r\n if self.config_file:\r\n try:\r\n f = open(self.config_file, 'r')\r\n lines = f.readlines()\r\n f.close()\r\n \r\n for line in lines:\r\n fields = line.split('=', 1)\r\n if len(fields) == 2:\r\n key, value = fields\r\n key = key.strip()\r\n value = value.strip()\r\n config[key] = value\r\n except:\r\n pass\r\n \r\n web2py_path = os.path.dirname(config['web2py'])\r\n os.chdir(web2py_path)\r\n \r\n args = [config['python'], config['web2py']]\r\n interfaces = []\r\n ports = []\r\n\r\n if config['http_enabled']:\r\n ip = config['http_ip']\r\n port = config['http_port']\r\n interfaces.append('%s:%s' % (ip, port))\r\n ports.append(port)\r\n if config['https_enabled']:\r\n ip = config['https_ip']\r\n port = config['https_port']\r\n key = config['https_key']\r\n cert = config['https_cert']\r\n if key != '' and cert != '':\r\n interfaces.append('%s:%s:%s:%s' % (ip, port, cert, key))\r\n ports.append(ports)\r\n if len(interfaces) == 0:\r\n sys.exit('Configuration error. Must have settings for http and/or https')\r\n \r\n password = config['password']\r\n if not password == '<recycle>':\r\n from gluon import main\r\n for port in ports:\r\n main.save_password(password, port)\r\n \r\n password = '<recycle>'\r\n \r\n args.append('-a \"%s\"' % password)\r\n \r\n interfaces = ';'.join(interfaces)\r\n args.append('--interfaces=%s' % interfaces)\r\n \r\n if 'log_filename' in config.key():\r\n log_filename = config['log_filename']\r\n args.append('--log_filename=%s' % log_filename)\r\n \r\n return (args, config)", "title": "" }, { "docid": "6ba5738033345c6a0e3a189a9ea81a15", "score": "0.56726974", "text": "def load_configuration(file_name='config'):\n config = Configuration()\n parser = configparser.ConfigParser()\n parser.read(file_name)\n default_section = parser['DEFAULT']\n config.token = environ.get('BOT_TOKEN', default_section['token'])\n return config", "title": "" }, { "docid": "80f6a41a88c581e6fb69bdcda1c69329", "score": "0.5672508", "text": "def loadConfig(self, path):\n\n parsedConfig = None\n\n try:\n # Try to open a the project config file\n tomlConfig = open(path+\"/config.toml\", \"r\")\n parsedConfig = toml.loads(tomlConfig.read())\n\n except Exception:\n raise Exception(\"Could not load config file\")\n\n print(\"TOML '\" + parsedConfig[\"BlogGen\"][\"WebsiteTitle\"] + \"' project config loaded\")\n\n return parsedConfig", "title": "" }, { "docid": "787f90dcd2768b2686466c8e61ee77ef", "score": "0.5670884", "text": "def make_config():\n # Remove the current config from SONG_TEMP_DIR\n config_path = os.path.join(DEFAULTS.CONFIG_PATH, 'config')\n\n # Check if the ytmdl folder is present in config\n if not os.path.isdir(DEFAULTS.CONFIG_PATH):\n # Make the ytmdl folder\n os.makedirs(DEFAULTS.CONFIG_PATH)\n elif os.path.isfile(config_path):\n os.remove(config_path)\n\n # Now write the config test to config file\n with open(config_path, 'w') as write_config:\n write_config.write(config_text)", "title": "" }, { "docid": "eab098fd42ce2d5e84be1a3c2e406f6a", "score": "0.5654302", "text": "def create_config_file(self):\n self.config_fail = True\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n try:\n if os.path.exists(self.settings_path):\n os.remove(self.settings_path)\n except OSError:\n pass\n with open((self.settings_path), 'w') as config_file:\n config_file.write(\"\")\n config = ConfigParser.RawConfigParser()\n config.add_section('Settings')\n config.set('Settings', 'default master address', '192.168.1.1')\n config.set('Settings', 'default device number', '0')\n config.set('Settings', 'default connection type', 'TCP')\n config.set('Settings', 'default enable dhcp', True)\n config.set('Settings', 'number of threads', 20)\n config.set('Settings', 'telnet client executable', ('putty.exe'))\n config.set('Settings', 'telnet timeout in seconds', '4')\n config.set('Settings',\n 'display notification of successful connections', True)\n config.set('Settings', 'DHCP sniffing enabled', True)\n config.set('Settings', 'filter incoming DHCP for AMX only', False)\n config.set('Settings', 'subnet filter enabled', False)\n config.set('Settings', 'subnet filter', '')\n config.set('Settings', 'play sounds', True)\n config.set('Settings', 'check for updates', True)\n config.add_section('Config')\n config.set('Config', 'debug', False)\n config.set('Config', 'columns_config', self.columns_default)\n config.set('Config', 'DXLink TX Models', self.dxtx_models_default)\n config.set('Config', 'DXLink RX Models', self.dxrx_models_default)\n config.set(\n 'Config', 'DXLink Fibre TX Models', self.dxftx_models_default)\n config.set(\n 'Config', 'DXLink Fibre RX Models', self.dxfrx_models_default)\n with open((self.settings_path), 'w') as configfile:\n config.write(configfile)", "title": "" }, { "docid": "839144b0a29d189d6064767591da60b0", "score": "0.5634978", "text": "def build_config(config_file, args):\n config = Configuration()\n if os.path.exists(config_file):\n config.load_from_file(config_file)\n\n config.load_from_arguments(args)\n return config", "title": "" }, { "docid": "839144b0a29d189d6064767591da60b0", "score": "0.5634978", "text": "def build_config(config_file, args):\n config = Configuration()\n if os.path.exists(config_file):\n config.load_from_file(config_file)\n\n config.load_from_arguments(args)\n return config", "title": "" }, { "docid": "d660f0960ebc9f48bcbd3ac05668d7d7", "score": "0.56208354", "text": "def create_config_from_file(filepath: str) -> Config:\n assert os.path.exists(filepath), \"No config file found at {}\".format(filepath)\n _logger.debug(\"Loading config file from {}\".format(filepath))\n with open(filepath) as file_:\n raw_config = yaml.safe_load(file_)\n return _create_config(raw_config, is_template=False)", "title": "" }, { "docid": "3d8ce46d1e9cd02c15d5f1d2eee3d8d2", "score": "0.5609841", "text": "def make_settings():\n Config.make()", "title": "" }, { "docid": "2e8e6c5cba9496f29e796976a4cac642", "score": "0.56074923", "text": "def scout_config(request, config_file):\n print(\"\")\n in_handle = get_file_handle(config_file)\n data = yaml.safe_load(in_handle)\n return data", "title": "" }, { "docid": "ea3632f9ce8c39e4f031078bccc2912b", "score": "0.55826586", "text": "def crud_config(path):\n if not os.path.exists(path):\n create_config(path)\n\n config = configparser.ConfigParser()\n config.read(path)\n\n # Read from config file\n parser = config.get(\"Settings\", \"parser\")\n client = config.get(\"Settings\", \"client\")\n schema = config.get(\"Settings\", \"schema\")\n\n Setting = namedtuple('Settings', ['parser', 'client', 'schema'])\n\n return Setting(parser, client, schema)", "title": "" }, { "docid": "8babcdf72ef46f8a586ada2b30bcc25b", "score": "0.55735433", "text": "def load_settings():\r\n config_parser = configparser.RawConfigParser()\r\n config_file_path = 'final/settings.txt'\r\n config_parser.read(config_file_path)\r\n\r\n browser = config_parser.get('your-config', 'BROWSER')\r\n browser_path = config_parser.get('your-config', 'BROWSER_PATH')\r\n name = config_parser.get('your-config', 'NAME')\r\n page = config_parser.get('your-config', 'PAGE')\r\n output = config_parser.get('your-config', 'Output')\r\n\r\n settings = {\r\n 'browser': browser,\r\n 'browser_path': browser_path,\r\n 'name': name,\r\n 'page': page,\r\n 'output' : output\r\n }\r\n return settings", "title": "" }, { "docid": "1209092a435836f0cff60bc141b34733", "score": "0.5567676", "text": "def make_config(self, fileo):\n config = ConfigParser.SafeConfigParser() #new class instance\n for data_type, configuration in self.data_types.iteritems():\n config.add_section(data_type)\n for option, value in configuration.iteritems():\n config.set(data_type, option, value)\n config.write(fileo)", "title": "" }, { "docid": "1525cf8b57ae0a6d98b8fc7aca71ebb8", "score": "0.5549005", "text": "def parseConfigFile(cfg):", "title": "" }, { "docid": "1525cf8b57ae0a6d98b8fc7aca71ebb8", "score": "0.5549005", "text": "def parseConfigFile(cfg):", "title": "" }, { "docid": "1525cf8b57ae0a6d98b8fc7aca71ebb8", "score": "0.5549005", "text": "def parseConfigFile(cfg):", "title": "" }, { "docid": "ee6af1b742b912d76dec588c5a3ce21b", "score": "0.5546118", "text": "def make_config():\n tmpdir = tempfile.mkdtemp()\n global db_dir\n db_dir = tmpdir\n cwd = os.getcwd()\n os.chdir(tmpdir)\n with open('haas.cfg', 'w') as f:\n config = '\\n'.join([\n '[general]',\n '[devel]',\n 'dry_run=True',\n '[auth]',\n 'require_authentication = True',\n\n '[headnode]',\n 'base_imgs = base-headnode, img1, img2, img3, img4',\n '[database]',\n 'uri = sqlite:///%s/haas.db' % tmpdir,\n '[extensions]',\n 'haas.ext.auth.database =',\n 'haas.ext.switches.mock =',\n 'haas.ext.switches.nexus =',\n 'haas.ext.switches.dell =',\n 'haas.ext.switches.brocade =',\n 'haas.ext.obm.mock =',\n 'haas.ext.obm.ipmi =',\n 'haas.ext.network_allocators.vlan_pool =',\n '[haas.ext.network_allocators.vlan_pool]',\n 'vlans = 1001-1040',\n\n ])\n f.write(config)\n return (tmpdir, cwd)", "title": "" }, { "docid": "793705206172008876610eb222772894", "score": "0.5538213", "text": "def create_config_file_from_upstream(self) -> None:\n\n PyFunceble.helpers.File(self.path_to_default_config).copy(self.path_to_config)", "title": "" }, { "docid": "8618279c876f3b3b5dcdd1d95450dbf0", "score": "0.55377173", "text": "def test_new_site(sitename):\n print(test_new_site.__doc__ + '...', end=' ')\n mld = rhfn.new_conf(sitename)\n assert_equal(mld, '')\n mld, sett = rhfn.read_conf(sitename)\n assert_equal(mld, '')\n assert_equal(sett, {})\n mld = rhfn.new_conf('test')\n assert_equal(mld, 'site_name_taken')\n sett = {x: y for x, y in rhfn.DFLT_CONF.items()}\n mld = rhfn.save_conf(sitename, rhfn.conf2text(sett))\n mld, sett = rhfn.read_conf(sitename)\n\n sett['hig'] = 'Too high'\n mld = rhfn.save_conf(sitename, rhfn.conf2text(sett))\n assert_equal(mld, 'Config: invalid value for hig')\n sett['hig'] = 32\n sett['lang'] = 'xx'\n mld = rhfn.save_conf(sitename, rhfn.conf2text(sett))\n assert_equal(mld, 'Config: invalid value for lang')\n sett['lang'] = 'en'\n sett['url'] = 'http://www.example.com'\n mld = rhfn.save_conf(sitename, rhfn.conf2text(sett))\n assert_equal(mld, '')\n rhfn.init_css(sitename)\n mld, conf = rhfn.read_conf(sitename)\n for item in BASIC_CSS:\n test = WEBROOT / sitename / item\n assert 'url + ' + item in conf['css']\n assert test.exists()\n print('ok')", "title": "" }, { "docid": "1bede075b08609bb99dd6e933a34e9ab", "score": "0.5533709", "text": "def config():\n conf = ConfigParser()\n # Set up defaults\n conf.add_section('io')\n conf.set('io', 'compression', 'zlib')\n\n conf.read(os.path.expanduser('~/.deepdish.conf'))\n return conf", "title": "" }, { "docid": "593265363c82e6879182611816cc7b0f", "score": "0.55328953", "text": "def create_config_file(file_path):\n dir_path = file_path.parent\n if not dir_path.is_dir(): # pragma: no cover\n print(f'Creating application etc/ directory \"{str(dir_path)}\".')\n dir_path.mkdir(mode=0o775, parents=True)\n if not file_path.exists():\n with file_path.open(mode=\"w\") as config_fh:\n print(f'Creating instance config file at \"{str(file_path)}\".')\n print(CONFIG_FILE_HEADER, file=config_fh)\n #\n # Set SECRET_KEY to a random string.\n #\n write_kv_to_config_file(\n file_path, \"SECRET_KEY\", generate_random_password(), str, \"\"\n )", "title": "" }, { "docid": "77f35293734d264849a4bd8064d05519", "score": "0.55297446", "text": "def _init_cfg_file(force=False):\n from shutil import rmtree, copyfile\n home = Path.home()\n dir_ = home / PACKAGE_CONF_DIR\n cfg_file = dir_ / 'conf.yml'\n\n dataset_dir = dir_ / 'dataset'\n cached_dir = dir_ / 'cached'\n\n if force:\n rmtree(dir_)\n\n if not dir_.is_dir():\n # create config root dir\n dir_.mkdir()\n\n # create other dirs\n dataset_dir.mkdir()\n cached_dir.mkdir()\n\n if not cfg_file.exists() or force:\n # copy default conf.yml to ~/.XenonPy\n copyfile(str(Path(__file__).parent / 'conf.yml'), cfg_file)", "title": "" }, { "docid": "579b7e4197bfa33717de9efbe81278d6", "score": "0.5527012", "text": "def set_configs(self):\n for config_item in ['tag_creds', 'encrypt_tag' ]:\n define(config_item)\n # Get configuration values from configuration filescript_name = path.basename(__file__)\n script_path = path.abspath(path.realpath(__file__))\n script_dir = path.split(script_path)[0]\n config_path = path.abspath(path.join(script_dir, 'model/EmsgEncrypt.conf'))\n options.parse_config_file(config_path)", "title": "" }, { "docid": "23526978594edf608881b5425f5e7cae", "score": "0.55216306", "text": "def config_test():\n cfg = ConfigParser.ConfigParser()\n cfg.read('TestConfig.cfg')\n return cfg", "title": "" }, { "docid": "cf39c72c9e5e05037e0f6220ee6f8bf7", "score": "0.551976", "text": "def load_config(): # type: ignore", "title": "" }, { "docid": "30faa66a01a398f9048673e6fda03a01", "score": "0.55030286", "text": "def load_config():\n parser = argparse.ArgumentParser(description=\"pyWegaRadio\")\n parser.add_argument(\"-c\", \"--config\", help=\"filename of the application config (YAML)\", required=True)\n args = parser.parse_args()\n with open(args.config) as application_config:\n cfg = yaml.load(application_config)\n\n # setting defaults\n cfg['mpd_host'] = cfg['mpd_client'] if 'mpd_host' in cfg else 'localhost'\n cfg['mpd_port'] = cfg['mpd_port'] if 'mpd_port' in cfg else 6600\n\n return cfg", "title": "" }, { "docid": "00822e430d6ee5cdcff3c1a9098317b1", "score": "0.5496693", "text": "def cli(config_file):\n if config_file is not None:\n config.update_from_file(config_file)", "title": "" }, { "docid": "f10a61fe63d5616d5cc284d2c5156940", "score": "0.5495602", "text": "def set_config(self):\n\n self.parser['CONNECTION'] = strings.get_config_connection()\n self.parser['DATABASE'] = strings.get_config_db()\n self.parser['METRICS'] = strings.get_config_metrics()\n\n with open(strings.get_config_path(), 'w') as file:\n self.parser.write(file)", "title": "" }, { "docid": "165d7adacf17eef282f7078e7d340d93", "score": "0.5495298", "text": "def load_from_file(cls, config_path):\n # type: (str) -> Config\n if not os.path.isfile(config_path):\n LOGGER.error(\"Runway config file was not found (looking for \"\n \"%s)\",\n config_path)\n sys.exit(1)\n with open(config_path) as data_file:\n config_file = yaml.safe_load(data_file)\n result = Config(config_file.pop('deployments'),\n config_file.pop('tests', []),\n config_file.pop('ignore_git_branch',\n config_file.pop(\n 'ignore-git-branch',\n False)),\n config_file.pop('variables', {}))\n\n if config_file:\n LOGGER.warning(\n 'Invalid keys found in runway file have been ignored: %s',\n ', '.join(config_file.keys())\n )\n return result", "title": "" }, { "docid": "bf970f814bd9190d3a94f92067f7dc5a", "score": "0.54845643", "text": "def load_file(pth):\n fil = open(pth, 'r')\n return Configuration(fil.read())", "title": "" }, { "docid": "38625a74b57d297ae8ab7e8926a10f58", "score": "0.54842806", "text": "def from_configfile(cls, configfile):\n import yaml\n with open(configfile) as fh:\n config = yaml.safe_load(fh)\n\n return cls.from_config(config)", "title": "" }, { "docid": "9ad70cf40a191b6a24d34756707bc09c", "score": "0.5480038", "text": "def config(self, settings):\n for k, v in settings.iteritems():\n self.sites[k] = v[0]\n self.sites_config[v[0]] = {'site_name': k}\n for _k, _v in v[1].iteritems():\n self.sites_config[v[0]][_k.upper()] = _v\n \n self._configed = True", "title": "" }, { "docid": "0033170d05a3bfe1a686e5d78299af53", "score": "0.547585", "text": "def parse_config_file(config_file):\n config = configparser.ConfigParser()\n config.read(config_file)\n\n settings = {}\n settings[\"world\"] = config.get(\"RuneScape\", \"world\", fallback=\"www\")\n settings[\"ram\"] = config.get(\"Java\", \"ram\", fallback=\"-Xmx1024m\")\n settings[\"stacksize\"] = config.get(\"Java\", \"stacksize\", fallback=\"-Xss1m\")\n return settings", "title": "" }, { "docid": "05210fe3fce6ea8f512bd9cb10396f0f", "score": "0.54738665", "text": "def _make_cfg_file(self, **kwargs):\n cfg_file = self._cfg_dir / \"config.yaml\"\n self.parameters.update(**kwargs)\n with cfg_file.open(mode=\"w\") as file:\n yaml.dump({k: v for k, v in self.parameters}, file)\n\n return cfg_file", "title": "" }, { "docid": "dcd10b6a6012a3305d175ec3f2dd26e4", "score": "0.54719794", "text": "def generate_config(options):\n # template name -> file mode\n config_file_templates = {}\n\n config = get_config(options.host)\n target_dir = path('target')\n\n for parent_files_dir in filter(lambda p: p.exists(),\n [target_dir.joinpath(p, 'files') for p in config['parents']]):\n for config_file in parent_files_dir.walkfiles():\n template_name = parent_files_dir.relpathto(config_file)\n if template_name not in config_file_templates:\n config_file_templates[template_name] = config_file.lstat()\n\n # Load Jinja Environment\n template_env = Environment(loader=ConfigdichLoader(target_dir))\n\n build_dir = path('config').joinpath(options.host)\n # Clean out the build dir\n build_dir.rmtree()\n build_dir.makedirs_p()\n\n for config_file_name, template_file_stat in config_file_templates.items():\n rendered_config_file_path = build_dir.joinpath(config_file_name)\n # Create the directory that the config file will be rendered to in if needed\n rendered_config_file_path.dirname().makedirs_p()\n # Render the template to the file\n t = template_env.get_template(options.host + \"/\" + str(config_file_name))\n rendered_config_file_path.write_text(t.render(config))\n rendered_config_file_path.chmod(template_file_stat.st_mode)", "title": "" }, { "docid": "88d1a4f2d28cfc378269bff766331c1d", "score": "0.546764", "text": "def generic_config_parser(config_path, local):\n config = ConfigParser.ConfigParser()\n if local:\n filename = config_path + \"/local_datastore_operator.conf\"\n else:\n filename = config_path + \"/collector_operator.conf\"\n config.read(filename)\n return config", "title": "" }, { "docid": "4dd78c123605aef6b83f97ed0eb623cb", "score": "0.54675925", "text": "def new_config_file(self):\n config_source = os.path.join(os.path.dirname(configs.__file__), 'clitest.yaml')\n out_config = os.path.join(self.test_dir, 'config.yaml')\n\n with open(config_source, 'r') as f_in:\n with open(out_config, 'w') as f_out:\n s = f_in.read()\n f_out.write(s.format(\n root=self.test_dir,\n source=os.path.dirname(bundles.__file__)\n ))\n return out_config", "title": "" }, { "docid": "4d98b746daa3accf08cffdc55619f55d", "score": "0.545733", "text": "def create_blank_conf(self):\n\n self.config['data_file'] = \"\"\n self.config.comments['data_file'] = wrap(\n \"File containing the input data. The columns are 'id' (name of the\"\n \" object), 'redshift' (if 0 the distance is assumed to be 10 pc), \"\n \"the filter names for the fluxes, and the filter names with the \"\n \"'_err' suffix for the uncertainties. The fluxes and the \"\n \"uncertainties must be in mJy. This file is optional to generate \"\n \"the configuration file, in particular for the savefluxes module.\")\n self.spec['data_file'] = \"string\"\n\n self.config['parameters_file'] = \"\"\n self.config.comments['parameters_file'] = [\"\"] + wrap(\n \"Optional file containing the list of physical parameters. Each \"\n \"column must be in the form module_name.parameter_name, with each \"\n \"line behind a different model. The columns must be in the order \"\n \"the modules will be called. The redshift column must be the last \"\n \"one. Finally, if this parameters is not left empty, cigale will \"\n \"not interpret the configuration parameters given in pcigale.ini. \"\n \"They will be given only for information.\")\n self.spec['parameters_file'] = \"string()\"\n\n self.config['sed_modules'] = []\n self.config.comments['sed_modules'] = ([\"\"] +\n [\"Order of the modules use for SED creation. Available modules:\"] +\n [\"SFH: sfh2exp, sfhdelayed, sfhfromfile, sfhperiodic\"] +\n [\"SSP: bc03, m2005\"] +\n [\"Nebular emission: nebular\"] +\n [\"Dust attenuation: dustatt_calzleit, dustatt_powerlaw, \"\n \"dustatt_2powerlaws\"] +\n [\"Dust emission: casey2012, dale2014, dl2007, dl2014\"] +\n [\"AGN: dale2014, fritz2006\"] +\n [\"Radio: radio\"] +\n [\"Redshift: redshifting (mandatory!)\"])\n self.spec['sed_modules'] = \"cigale_string_list()\"\n\n self.config['analysis_method'] = \"\"\n self.config.comments['analysis_method'] = [\"\"] + wrap(\n \"Method used for statistical analysis. Available methods: \"\n \"pdf_analysis, savefluxes.\")\n self.spec['analysis_method'] = \"string()\"\n\n self.config['cores'] = \"\"\n self.config.comments['cores'] = [\"\"] + wrap(\n \"Number of CPU cores available. This computer has {} cores.\"\n .format(mp.cpu_count()))\n self.spec['cores'] = \"integer(min=1)\"\n\n self.config.write()\n self.spec.write()", "title": "" }, { "docid": "5721b6cc0302e7d6a826212398bc72d1", "score": "0.54488325", "text": "def update_configfile(cls):\n # New database path\n cls.database_path = os.path.join(cls.tmpdir, 'test.db')\n cls.database_url = 'sqlite:///%s' % cls.database_path\n\n # Update config with database url\n config['common']['DATABASE_URL'] = cls.database_url\n\n # New config file\n cls.config_path = os.path.join(cls.tmpdir, 'test.conf')\n with open(cls.config_path, 'w') as configfile:\n config.write(configfile)", "title": "" }, { "docid": "99c370f0fa2a07a8de8cbf9c9dad67a6", "score": "0.54463136", "text": "def setconfig(jamla_path, secret_key, template_folder, static_folder, \\\n uploaded_images_dest, db_full_path, success_redirect_url, \\\n thankyou_url, mail_server, mail_port, \\\n mail_default_sender, mail_username, mail_password, mail_use_tls,\\\n email_login_from, gocardless_client_id, \\\n gocardless_client_secret, template_base_dir):\n newConfig = ''\n with open('./instance/config.py', 'r') as fh:\n for line in fh:\n frame = inspect.currentframe()\n options = inspect.getargvalues(frame).args\n for option in options:\n if option.swapcase() in line and frame.f_locals[option] is not None :\n newValue = ''.join([option.swapcase(), '=\"', str(frame.f_locals[option]), '\"'])\n expr = r\"^\" + option.swapcase() + \".*\"\n line = re.sub(expr, newValue, line)\n logging.info(\"Writing: %s\", newValue)\n newConfig = ''.join([newConfig, line])\n # Writeout new config file\n with open('./instance/config.py', 'w') as fh:\n fh.write(newConfig)", "title": "" }, { "docid": "ac0ba2d625b3994fe049062a44d035c3", "score": "0.54410285", "text": "def loadConfig(self):\n with open(self.configfile) as f:\n config = yaml.load(f)\n self.binddn = config['binddn']\n self.bindPW = config['bindPW']\n self.serverURL = config['serverURL']\n self.baseDN = config['baseDN']\n self.usersBase = config['usersBase']\n self.employeeFilter = config['employeeFilter']\n f.close", "title": "" }, { "docid": "7cd7d863bba7fa1b24f40eb2ba21c4a1", "score": "0.5431443", "text": "def create_default_config(path, conf):\n if platform.system() == 'Windows':\n base_path = os.path.join(os.path.expanduser('~'), 'seprcph')\n else:\n base_path = os.path.join(os.path.expanduser('~'), '.config', 'seprcph')\n\n try:\n os.makedirs(os.path.dirname(base_path))\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise\n\n conf.add_section('general')\n conf.set('general', 'screen_height', '480')\n conf.set('general', 'screen_width', '640')\n conf.set('general', 'fullscreen', 'false')\n conf.set('general', 'data_dir', os.path.join(base_path, 'data'))\n conf.set('general', 'image_dir', os.path.join(base_path, 'assets', 'images'))\n conf.set('general', 'sound_dir', os.path.join(base_path, 'assets', 'sounds'))\n\n conf.add_section('logging')\n conf.set('logging', 'format', '%(asctime)s - %(levelname)s - %(funcName)s '\n '- %(message)s')\n conf.set('logging', 'date_format', '%d/%m/%Y %I:%M:%S %p')\n conf.set('logging', 'level', 'DEBUG')\n conf.set('logging', 'file', os.path.join(base_path, 'log.txt'))\n\n conf.add_section('graphics')\n conf.set('graphics', 'fps', '60')\n conf.set('graphics', 'draw_fps', 'false')\n\n conf.add_section('gameplay')\n conf.set('gameplay', 'turn_limit', '30')\n\n with open(path, 'w') as conf_file:\n conf.write(conf_file)", "title": "" }, { "docid": "6d294e401d799371e32810413008056d", "score": "0.54309314", "text": "def create_config(self):\n # Generate install-config from template\n _templating = Templating()\n ocp_install_template = (\n f\"install-config-{self.deployment_platform}-\"\n f\"{self.deployment_type}.yaml.j2\"\n )\n ocp_install_template_path = os.path.join(\n \"ocp-deployment\", ocp_install_template\n )\n install_config_str = _templating.render_template(\n ocp_install_template_path, config.ENV_DATA\n )\n install_config_obj = yaml.safe_load(install_config_str)\n install_config_obj[\"pullSecret\"] = self.get_pull_secret()\n install_config_obj[\"sshKey\"] = self.get_ssh_key()\n install_config_obj[\"platform\"][\"vsphere\"][\"apiVIP\"] = config.ENV_DATA[\n \"vips\"\n ][0]\n install_config_obj[\"platform\"][\"vsphere\"][\"ingressVIP\"] = config.ENV_DATA[\n \"vips\"\n ][1]\n install_config_obj[\"metadata\"][\"name\"] = config.ENV_DATA.get(\"cluster_name\")\n install_config_obj[\"baseDomain\"] = config.ENV_DATA.get(\"base_domain\")\n install_config_str = yaml.safe_dump(install_config_obj)\n install_config = os.path.join(self.cluster_path, \"install-config.yaml\")\n\n with open(install_config, \"w\") as f:\n f.write(install_config_str)", "title": "" }, { "docid": "c450143b54e5a138152443e1ed40ebc5", "score": "0.54287505", "text": "def read_cfg():\n\timport copy\n\tglobal global_config\n\tconfig_parse = ConfigParser.ConfigParser()\n\tconfig_parse.optionxform = str\n\tconfig_parse.read(config.file_cfg)\n\tglobal_config = copy.deepcopy(default_config)\n\tfor section in config_parse.sections():\n\t\tfor (name, value) in config_parse.items(section):\n\t\t\tglobal_config[section][name] = value", "title": "" }, { "docid": "1ddc0ec266171954c8fe4a84d2b2d1b4", "score": "0.5426491", "text": "def load_settings():\n config_parser = configparser.RawConfigParser()\n config_file_path = 'settings.txt'\n config_parser.read(config_file_path)\n\n browser = config_parser.get('config', 'BROWSER')\n browser_path = config_parser.get('config', 'BROWSER_PATH')\n name = config_parser.get('config', 'NAME')\n page = config_parser.get('config', 'PAGE')\n csv_location = config_parser.get('config', OUTPUT_FILE_LOCATION)\n\n settings = {\n 'browser': browser,\n 'browser_path': browser_path,\n 'name': name,\n 'page': page,\n 'csv_location': csv_location\n }\n return settings", "title": "" }, { "docid": "001fb7ee6bc27d12a98acc8c4b33096f", "score": "0.54166555", "text": "def __create_default_config_file(self) -> None:\n with open(self.__config_filename, 'w') as configfile:\n configfile.write(open(cemu.const.TEMPLATE_CONFIG, \"r\").read())\n return", "title": "" }, { "docid": "553fa8e1f68f634cdd3a863f2e6d1cf1", "score": "0.5408216", "text": "def readSiteConfig(self, siteId, configFilePath):\n d = self.readConfig(configFilePath)\n if siteId in d:\n return d[siteId]\n else:\n return {}", "title": "" }, { "docid": "dd6e9a760ddcdd192a7168242ccf26a3", "score": "0.5402126", "text": "def __init__(self, file: str) -> None:\r\n self.load_config_from_file(file)", "title": "" }, { "docid": "ae07b5963093d1693d846a18ece8dea4", "score": "0.53898674", "text": "def get_clusto_config(filename=None):\r\n\r\n filesearchpath = ['/etc/clusto/clusto.conf']\r\n\r\n \r\n filename = filename or os.environ.get('CLUSTOCONFIG')\r\n\r\n if not filename:\r\n filename = filesearchpath[0]\r\n\r\n if filename:\r\n if not os.path.exists(os.path.realpath(filename)):\r\n raise CmdLineError(\"Config file %s doesn't exist.\" % filename)\r\n \r\n config = SafeConfigParser()\r\n config.read([filename])\r\n\r\n if not config.has_section('clusto'):\r\n config.add_section('clusto')\r\n\r\n if 'CLUSTODSN' in os.environ:\r\n config.set('clusto', 'dsn', os.environ['CLUSTODSN'])\r\n\r\n if not config.has_option('clusto', 'dsn'):\r\n raise CmdLineError(\"No database given for clusto data.\")\r\n\r\n return config", "title": "" }, { "docid": "07044e5c9c37b198525f23dbe6087509", "score": "0.5381551", "text": "def generate_config(device):\n hostname = device[\"hostname\"]\n config_data = yaml.safe_load(open(f\"host_vars/{hostname}.yaml\"))\n env = Environment(\n loader=FileSystemLoader(\"./templates\"), trim_blocks=True, lstrip_blocks=True\n )\n template = env.get_template(\"ospf.j2\")\n configuration = template.render(config_data)\n return configuration", "title": "" }, { "docid": "3dfbad3961df131242c601342467008c", "score": "0.5376425", "text": "def create_config(self):\n # Generate install-config from template\n _templating = Templating()\n ocp_install_template = (\n f\"install-config-{self.deployment_platform}-\"\n f\"{self.deployment_type}.yaml.j2\"\n )\n ocp_install_template_path = os.path.join(\n \"ocp-deployment\", ocp_install_template\n )\n install_config_str = _templating.render_template(\n ocp_install_template_path, config.ENV_DATA\n )\n\n # Parse the rendered YAML so that we can manipulate the object directly\n install_config_obj = yaml.safe_load(install_config_str)\n if version.get_semantic_ocp_version_from_config() >= version.VERSION_4_10:\n install_config_obj[\"platform\"][\"vsphere\"][\"network\"] = config.ENV_DATA[\n \"vm_network\"\n ]\n install_config_obj[\"pullSecret\"] = self.get_pull_secret()\n install_config_obj[\"sshKey\"] = self.get_ssh_key()\n install_config_str = yaml.safe_dump(install_config_obj)\n install_config = os.path.join(self.cluster_path, \"install-config.yaml\")\n with open(install_config, \"w\") as f:\n f.write(install_config_str)", "title": "" }, { "docid": "34d1bafcf14d487d6167a077cbe5754e", "score": "0.5368864", "text": "def setup(args):\n cfg = get_cfg()\n add_custom_config(cfg)\n cfg.merge_from_file(args.config_file)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "title": "" }, { "docid": "3533fe0269fed1b36b7b28d3a13fe2c1", "score": "0.5353794", "text": "def generate_supervisor_conf():\n\n tmp_file = \"/tmp/supervisor.conf\"\n target_file = os.path.join(env.config.ApiServer.document_root, \"local\", \"conf\")\n\n run(\"mkdir -p %s\" % target_file)\n render_to_file(\"supervisor.conf\", tmp_file)\n\n put(tmp_file, target_file)\n print \"supervisor config file generated at %s\" % target_file", "title": "" }, { "docid": "5714d34e47639c754d788a129d45f06f", "score": "0.53531885", "text": "def read_config_file(self):\n parser = SafeConfigParser()\n try:\n parser.read(self.filename)\n except IOError as error:\n logging.info(\"Cannot read file: {}.\".format(error))\n sys.exit(0)\n\n options_name = [\n 'host',\n 'user',\n 'database',\n 'password'\n ]\n\n for section_name in parser:\n self.configuration[section_name] = {}\n if section_name != 'DEFAULT':\n for option in options_name:\n try:\n value = parser.get(section_name, option)\n self.configuration[section_name][option] = value\n except ConfigParser.NoOptionError, err:\n logging.info(str(err))\n sys.exit(0)", "title": "" }, { "docid": "c42dc0a539aae8a4a3f3e9101892a948", "score": "0.53474313", "text": "def get_user_configuration(config_file, default_config_file=None,\n bld_dir=os.getcwd(), v=1):\n # Get the configuration\n verbose = abs(v) > 0\n\n logging.info('Looking for user configuration')\n parser = ConfigParser(allow_no_value=True)\n parser.optionxform = lambda option: option # retain case sensitivity\n\n default_config_file = get_default_configuration_file(bld_dir,\n default_config_file=default_config_file)\n if verbose:\n logging.info('Loading default configuration file: '\n '{green:%s}' % default_config_file)\n parser.read(default_config_file)\n\n if not config_file:\n if os.getenv('TRILINOS_CFG') is not None:\n config_file = os.environ['TRILINOS_CFG']\n\n if config_file:\n if not os.path.isfile(config_file):\n # Look for config file in known locations\n for d in (bld_dir, ETC):\n f = os.path.join(d, config_file)\n if os.path.isfile(f):\n config_file = f\n break\n else:\n # No config file is explicitly given, give precedence to ./Trilinos.cfg\n if os.path.isfile('Trilinos.cfg'):\n config_file = 'Trilinos.cfg'\n\n if config_file:\n if not os.path.isfile(config_file):\n raise Exception('Configuration file {0!r} does '\n 'not exist'.format(config_file))\n if verbose:\n s1 = config_file\n logging.info('Loading user configuration file: {green:%s}' % config_file)\n parser.read(config_file)\n\n if not os.path.isfile(os.path.join(bld_dir, 'Default.cfg')):\n with open(os.path.join(bld_dir, 'Default.cfg'), 'w') as fh:\n fh.write(open(default_config_file).read())\n\n return parser", "title": "" }, { "docid": "078a41fa37f83bc8607ba8bdd0a1ff7c", "score": "0.53471076", "text": "def read_config(cls, file_like):\n\n with open(file_like) as f:\n config = yaml.safe_load(f) \n return cls(**config)", "title": "" }, { "docid": "91534fc50088bd95f9aa1b9400c1b5e1", "score": "0.53469664", "text": "def setup(ctx):\n __, template_config = configuration.load_config(load_template=True)\n __, config = configuration.load_config()\n if config is None:\n config = template_config\n\n click.echo(ui.style_info(\"Setting configuration fields:\"))\n for category_name, category in config.items():\n for field, value in category.items():\n new_value = click.prompt(ui.style_prompt(field), default=value)\n if category_name == \"path\":\n new_value = os.path.abspath(new_value)\n click.echo(ui.style_info_path(\"Absolute path is\", new_value))\n config[category_name][field] = new_value\n\n click.echo(ui.style_info(\"Checking configuration:\"))\n for category_name, category in template_config.items():\n if category_name not in config:\n click.echo(\n ui.style_error(f\"Category {category_name} not in config, creating\")\n )\n config[category_name] = {}\n for field, value in category.items():\n if field not in config[category_name]:\n click.echo(ui.style_error(f\"Field {field} not in config, creating\"))\n new_value = click.prompt(ui.style_prompt(field), default=value)\n if category_name == \"path\":\n new_value = os.path.abspath(new_value)\n click.echo(ui.style_info_path(\"Absolute path is\", new_value))\n config[category_name][field] = new_value\n\n click.echo(\n ui.style_info_path(\"Saved configuration at\", configuration.write_config(config))\n )\n click.echo(\n ui.style_info_path(\n \"Copied review template to\",\n configuration.copy_template_review(\n os.path.abspath(config[\"path\"][\"reviews_directory\"])\n ),\n )\n )\n click.echo(\n ui.style_info_path(\n \"Copied HTML/CSS export templates to\",\n configuration.copy_template_html(config[\"path\"][\"export_directory\"]),\n )\n )\n return config", "title": "" }, { "docid": "7429b17fd8520aca87366541858b90d4", "score": "0.53251046", "text": "def open_config(self):\r\n config = configparser.ConfigParser()\r\n\r\n cf = os.path.join(\r\n os.path.dirname(os.path.realpath(__file__)),\r\n \"api.cfg\"\r\n )\r\n if not config.read([cf]):\r\n print(\"No login configuration file found!\")\r\n\r\n self.email = config.get(self.account, 'email')\r\n self.password = config.get(self.account, 'password')", "title": "" }, { "docid": "7cfb96c2fd95286d5dac2499570ee504", "score": "0.532114", "text": "def readconfig():\n config = ConfigParser.ConfigParser()\n config.read(configfile())\n configdict = {}\n\n configdict['server_port'] = config.get('server', 'server_port', 8212)\n configdict['verbose'] = config.getint('server', 'verbose')\n db_url = config.get('server', 'db_url')\n if db_url == 'tests/test.db':\n configdict['db_url'] = 'sqlite:///%s/tests/test.db' % \\\n os.environ['QB_ROOT']\n else:\n configdict['db_url'] = db_url\n# configdict['db_alias'] = config.get('server', 'db_alias')\n configdict['map_file'] = os.path.join(os.environ['QB_ROOT'], \\\n config.get('server', 'map_file', ''))\n configdict['alias_mapfile'] = os.path.join(os.environ['QB_ROOT'], \\\n config.get('server', 'alias_mapfile', ''))\n configdict['split_file'] = os.path.join(os.environ['QB_ROOT'], \\\n config.get('server', 'split_file', ''))\n configdict['logconfig'] = os.path.join(os.environ['QB_ROOT'], \\\n config.get('server', 'logconfig', ''))\n configdict['doc_dir'] = os.path.join(os.environ['QB_ROOT'], \\\n config.get('server', 'doc_dir', ''))\n configdict['left_join'] = os.path.join(os.environ['QB_ROOT'], \\\n config.get('server', 'left_join', ''))\n\n configdict['environment'] = config.get('server', 'environment', 'production')\n configdict['profiler'] = int(config.get('server', 'profiler', 0))\n configdict['threadpool'] = int(config.get('server', 'thread_pool', 30))\n configdict['socket_queue_size'] = int(config.get('server', 'socket_queue_size', 15))\n configdict['expires'] = int(config.get('server', 'expires', 300))\n configdict['log_screen'] = bool(config.get('server', 'log_screen', True))\n configdict['access_log_file'] = config.get('server', 'access_log_file', \\\n '/tmp/access_log.log')\n configdict['error_log_file'] = config.get('server', 'error_log_file', \\\n '/tmp/error_log.log')\n configdict['access_log_level'] = int(config.get('server', 'access_log_level', 0))\n configdict['error_log_level'] = int(config.get('server', 'error_log_level', 0))\n configdict['algo'] = config.get('server', 'algo', 'MIS')\n\n return configdict", "title": "" }, { "docid": "8c385c7e40f4f5477e0155db3d443ec5", "score": "0.53203917", "text": "def gen_supervisor_conf(conf_file='local/supervisord.conf.tmp'):\n # Back up first\n _backup_file(conf_file)\n\n env.run('python manage.py supervisor getconfig > %s' % conf_file)\n print green(\"Wrote supervisor conf file to %s\" % conf_file)", "title": "" }, { "docid": "dcababd44abb64d5ff2669218c654d5f", "score": "0.5319802", "text": "def get_config(self):\n\n config = ConfigParser.ConfigParser()\n if os.path.isfile(self.args.config):\n myconf = self.args.config\n else: myconf = 'config/db.conf'\n config.read(myconf)\n\n self.db['host'] = config.get('siemstress', 'server')\n self.db['user'] = config.get('siemstress', 'user')\n self.db['password'] = config.get('siemstress', 'password')\n self.db['database'] = config.get('siemstress', 'database')\n sectionfile = config.get('siemstress', 'sectionfile')\n\n if not sectionfile.startswith('/'):\n sectionfile = '/'.join(os.path.abspath(myconf).split('/')[:-1]) + \\\n '/' + sectionfile\n\n config.read(sectionfile)\n \n self.table = config.get(self.args.section, 'table')\n self.helpers = config.get(self.args.section, 'helpers')\n try:\n self.parsername = config.get(self.args.section, 'parser')\n except Exception:\n # To Do: narrow down exception\n self.parsername = 'syslogbsd'\n\n\n if self.parsername == 'syslogbsd':\n self.parser = logdissect.parsers.syslogbsd.ParseModule()\n elif self.parsername == 'syslogiso':\n self.parser = logdissect.parsers.syslogiso.ParseModule()\n elif self.parsername == 'nohost':\n self.parser = logdissect.parsers.nohost.ParseModule()\n elif self.parsername == 'tcpdump':\n self.parser = logdissect.parsers.tcpdump.ParseModule()", "title": "" }, { "docid": "3c9d03322b3e96d8419c8cd02ad61dc2", "score": "0.5319087", "text": "def parse_config(self):\n file_name = '{}/dyson_pure_link.yaml'.format(os.path.dirname(os.path.abspath(__file__)))\n\n if os.path.isfile(file_name):\n self.config = yaml.safe_load(open(file_name))\n\n return self.config", "title": "" }, { "docid": "57edd0435b833444b5fa7780273976fe", "score": "0.5318298", "text": "def load_config(config_file: str) -> dict:\n return toml.loads(open(config_file).read())", "title": "" }, { "docid": "dfc48fbd682a18e8dd29aed57d2a5388", "score": "0.53126377", "text": "def __init_config(cfg):\n config_path = os.getenv(\"BB_CONFIG_FILE\", None)\n if not config_path:\n config_path = find_config()\n\n if config_path:\n cfg.load(config_path)\n cfg[\"config_file\"] = os.path.abspath(config_path)\n logging.debug(\"Configuration loaded from {0}\".format(os.path.abspath(\n config_path)))\n cfg.init_from_env()", "title": "" }, { "docid": "d97aacbb7303a87c5c1de0636f6cc027", "score": "0.5306472", "text": "def setup_config(command, filename, section, vars):\r\n confuri = \"config:\" + filename\r\n if \":\" in section:\r\n confuri += \"#\" + section.rsplit(\":\", 1)[-1]\r\n conf = appconfig(confuri)\r\n\r\n load_environment(conf.global_conf, conf.local_conf)", "title": "" }, { "docid": "04d6d6db430486bd019522c3373026ca", "score": "0.53013945", "text": "def config(self, sscloud_config='/etc/sscloud/config.yml'):\n\n\t\t# Get settings from sscloud configuration\n\t\twith open(sscloud_config, 'r') as f:\n\t\t\tsscloud = yaml.load(f)\n\n\t\tsettings = dict()\n\t\t# Set master\n\t\tif not self.master:\n\t\t\tself.master = sscloud['openstack']['controller']\n\n\t\tif self.host == self.master:\n\t\t\tsettings['config'] = sscloud_config\n\t\t\tlogger.debug('Generate master configuration for saltstack')\n\t\t\tself.genconfig('/etc/salt/master', '/opt/sscloud/templates/salt/master', settings)\n\t\telse:\n\t\t\tlogger.debug('This host(%s) is not master(%s)', self.host, self.master)\n\n\t\t# Set minion\n\t\t# Get keystone settings\n\t\ttry:\n\t\t\tsettings['token'] = sscloud['keystone']['token']\n\t\texcept:\n\t\t\tsettings['token'] = sscloud['openstack']['admin']['token']\n\n\t\ttry:\n\t\t\tsettings['endpoint'] = sscloud['keystone']['endpoint']['admin']\n\t\texcept:\n\t\t\tsettings['endpoint'] = 'http://%s:35357/v2.0' % sscloud['openstack']['controller']\n\n\t\tsettings['master'] = self.master\n\t\tsettings['minion'] = self.host\n\t\tlogger.debug('Generate minion configuration for saltstack')\n\t\tself.genconfig('/etc/salt/minion', '/opt/sscloud/templates/salt/minion', settings)\n\t\tlogger.debug('Generate keystone configuration for saltstack minion')\n\t\tself.genconfig('/etc/salt/minion.d/keystone.conf', '/opt/sscloud/templates/salt/minion.d/keystone.conf', settings)\n\n\t\treturn True", "title": "" }, { "docid": "bc6942ac12dab4a58a96e0320096f959", "score": "0.5301158", "text": "def parse_config(self, config_file):\n config = configparser.ConfigParser()\n config.read(config_file)\n\n return config", "title": "" }, { "docid": "638fb05a33d33136dd507c224e4d593c", "score": "0.52998525", "text": "def build_config(args):\n args.subject = str(args.subject)\n args.exclude_words = [\"sp\", \"{lg}\", \"{ns}\"]\n create_directory_paths(args)\n\n write_config(vars(args))\n\n return args", "title": "" }, { "docid": "90d53a7eca52b662995310e634af8094", "score": "0.5298649", "text": "def get_config(self) -> None:\n self.config = {}\n self.config_file = self.config_file if self.config_file else self.DEFAULT_CONFIG\n\n if os.path.exists(self.config_file):\n cfg = open(self.config_file).readlines()\n else:\n raise Exception(\"Cannot open configuration file: \" + self.config_file + \"\\n\" + \"Use sudo, perhaps?\")\n\n for line in cfg:\n try:\n key, value = line.replace(\" \", \"\").strip().split(\"=\", 1)\n if key.startswith(\"db_\"):\n # Handling odd change in Spacewalk 1.7 where \"db_name\" could be URI\n self.config[key] = value.split(\"/\")[-1] if (key == \"db_name\") and value.startswith(\"//\") else value\n except Exception:\n # Suppress error as config not loaded or broken, so is defaulted\n pass", "title": "" }, { "docid": "a035446fa072636a660653158ad17e11", "score": "0.529783", "text": "def cfg() -> dict:\n return load_config_data(\"./l5kit/tests/artefacts/config.yaml\")", "title": "" }, { "docid": "1681609687408f97ce3c104439c010cd", "score": "0.5296773", "text": "def from_toml(cls, toml_file: str):\n assert os.path.exists(toml_file), f\"TOML file '{toml_file}' doesn't exist!\"\n config = attempt_parse_toml(toml_file)\n base_dir = pathlib.Path(toml_file).parent\n\n # load in global config\n if \"global_config\" in config:\n gconfig_filepath = join_if_not_abs(base_dir, config['global_config'])\n config[\"global_config\"] = str(pathlib.Path(gconfig_filepath).resolve())\n assert os.path.exists(gconfig_filepath), f\"TOML file '{gconfig_filepath}' doesn't exist!\"\n global_config = attempt_parse_toml(gconfig_filepath)\n config = {**global_config, **config} # favor the local config\n\n # check that all config keys are valid\n for k in config.keys():\n assert k in SystemConfig.get_valid_fields(), f\"config field '{k}' not a valid config field!\"\n\n # make directories\n dpath = join_if_not_abs(base_dir, config['codec_dir'])\n config[\"codec_dir\"] = str(pathlib.Path(dpath).resolve())\n outcome = mkdir_if_not_exist(dpath)\n if outcome:\n csaf_logger.info(f\"created codec directory {dpath} because it did not exist\")\n dpath = join_if_not_abs(base_dir, config['output_dir'])\n config[\"output_dir\"] = str(pathlib.Path(dpath).resolve())\n outcome = mkdir_if_not_exist(dpath)\n\n # setup logging\n log_filepath = join_if_not_abs(config[\"output_dir\"], config[\"log_file\"], exist=False)\n config[\"log_file\"] = str(pathlib.Path(log_filepath).resolve())\n\n # log is permissive -- will use filepath specified\n if os.path.exists(log_filepath):\n print(f\"WARNING! log will output to file that already exists: '{log_filepath}'\")\n open(log_filepath, 'w').close()\n if \"log_level\" in config:\n level = config[\"log_level\"]\n llevel = {\"critical\" : logging.CRITICAL, \"error\" : logging.ERROR,\n \"warning\" : logging.WARNING, \"info\" : logging.INFO,\n \"debug\" : logging.DEBUG, \"notset\" : logging.NOTSET}\n if level in llevel:\n log_level = llevel[level]\n else:\n AssertionError(f\"log level '{level}' found in config file {toml_file} is not valid!\")\n else:\n log_level = logging.INFO\n formatter = logging.Formatter('%(asctime)s: (%(levelname)s) %(message)s', datefmt='%I:%M:%S %p')\n # reflect user specified log level as logger level\n csaf_logger.setLevel(log_level)\n # setup file logging -- accept log level\n fh = logging.FileHandler(log_filepath)\n fh.setFormatter(formatter)\n fh.setLevel(log_level)\n # setup console logging -- set to info log level\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n sh.setLevel(logging.INFO)\n # add the two handlers to csaf logging\n csaf_logger.addHandler(fh)\n csaf_logger.addHandler(sh)\n # print config paths\n csaf_logger.info(f\"setting up CSAF System from TOML file '{toml_file}'\")\n if outcome:\n csaf_logger.info(f\"created output directory {dpath} because it did not exist\")\n csaf_logger.info(f\"Output Dir: {config['output_dir']}\")\n csaf_logger.info(f\"Codec Dir: {config['codec_dir']}\")\n csaf_logger.info(f\"Log Level: {config['log_level']}\")\n\n # load component level config file into this config dict\n for dname, dconfig in config[\"components\"].items():\n # make process absolute path\n process_path = pathlib.Path(join_if_not_abs(base_dir, dconfig[\"process\"], project_dir=\"components\"))\n assert os.path.exists(process_path), f\"process path '{process_path}' for component '{dname}' must exist!\"\n config[\"components\"][dname][\"process\"] = str(process_path.resolve())\n\n # load in config file per component\n if 'config' in dconfig:\n dcconfig_path = join_if_not_abs(base_dir, dconfig['config'])\n else:\n dcconfig_path = pathlib.Path(join_if_not_abs(base_dir, dconfig['process'])).with_suffix('.toml')\n assert os.path.exists(dcconfig_path), f\"config file '{dcconfig_path}' for component '{dname}' must exist\"\n dcconfig = attempt_parse_toml(dcconfig_path)\n config[\"components\"][dname]['config'] = dcconfig\n dbase_dir = pathlib.Path(dcconfig_path).parent.resolve()\n\n # load special field inputs\n if 'inputs' in dcconfig:\n assert 'msgs' in dcconfig['inputs'], f\"inputs field in {dcconfig_path} needs to have field msgs\"\n msg_paths = [join_if_not_abs(dbase_dir, m, project_dir=\"msg\") for m in dcconfig['inputs']['msgs']]\n config[\"components\"][dname][\"config\"][\"inputs\"]['msgs'] = [CsafMsg.from_msg_file(msg_path) for msg_path in msg_paths]\n\n # make all path to msg files absolute\n if 'topics' in dcconfig:\n for tname, tconf in dcconfig['topics'].items():\n if 'msg' in tconf:\n msg_path = join_if_not_abs(dbase_dir, tconf[\"msg\"], project_dir=\"msg\")\n assert os.path.exists(msg_path), f\"message file '{msg_path}' in topic '{tname}' for \" \\\n f\"component '{dname}' must exist!\"\n config[\"components\"][dname][\"config\"][\"topics\"][tname]['msg'] = CsafMsg.from_msg_file(msg_path)\n config[\"components\"][dname][\"config\"][\"topics\"][tname]['serializer'] = generate_serializer(msg_path, config[\"codec_dir\"])\n\n return cls(config)", "title": "" }, { "docid": "2c8e830b00474ce078cfa48aeb157cc1", "score": "0.52914757", "text": "def apply_config(self, *args):\n self.generate_configfile(self.CONFIG)", "title": "" }, { "docid": "616b6dd94afe6cf5a3afea2b5a21a906", "score": "0.5288262", "text": "def __recreate_configuration__(self,config_file='/etc/haproxy/haproxy.cfg', template_name='/etc/haproxy/haproxy.cfg.template',config_dir='/etc/haproxy'):\n print 'recreating ' + config_file+' from ' + template_name + ' in ' + config_dir\n #first back up the file\n fu = focaplo.files.file.FileUtils()\n fu.backup(config_file)\n \n #search the ACL files in the configuration directory\n acl=fu.concat(config_dir, '*.acl')\n print acl\n aclbackend=fu.concat(config_dir, '*.aclbackend')\n print acl\n backend=fu.concat(config_dir, '*.backend')\n print backend\n #read template\n configtemplate=Template(open(template_name).read())\n \n #write new to file\n conf = open(config_file, 'w')\n conf.write(configtemplate.substitute(aclplaceholder=acl,aclbackendplaceholder=aclbackend,backendplaceholder=backend))\n conf.close()\n print 'done'", "title": "" }, { "docid": "bdd260dc55783f0a42fa06dbc48d60db", "score": "0.5287948", "text": "def create_config(self, config_file):\n default = {\n 'Hare_birth': 0.08,\n 'Hare_predation': 0.04,\n 'Hare_diffusion': 0.2,\n 'Puma_birth': 0.02,\n 'Puma_mortality': 0.06,\n 'Puma_diffusion': 0.2,\n 'Time_step': 0.4,\n 'Steps': 100,\n 'Output_interval': 8\n }\n\n try:\n with open(config_file, 'w') as outfile:\n json.dump(default, outfile, sort_keys=True, indent=4*' ')\n print(\"New config file created:\\n%s\\n\" % config_file)\n except:\n print(\"Something went wrong...\")\n raise", "title": "" }, { "docid": "ca5a5c69e8bc4c1b9e021f26dbdbc6f7", "score": "0.52835", "text": "def _setup_framework_configuration_file(self):\n\n # Check configuration file and sections\n sections = [CFS_GENERAL, CFS_DYNAMIC_PARAMS]\n if common.CONF_FILE_LOCATION:\n Validation.file_exist(common.CONF_FILE_LOCATION)\n common.CONF_FILE = \\\n utils.ConfigurationFile(sections, common.CONF_FILE_LOCATION)\n\n # Load dynamic parameters within common\n for common_attrib in common.CONF_FILE.get_variable_list(CFS_DYNAMIC_PARAMS):\n setattr(\n common, common_attrib.upper(),\n common.CONF_FILE.get_variable(\n CFS_DYNAMIC_PARAMS, common_attrib))", "title": "" }, { "docid": "54798e75080debbf7675728973dbbb9e", "score": "0.52803916", "text": "def set_config(config_file, config_yaml):\n global creds\n with open(config_file, 'w') as f:\n try:\n creds = config_yaml['config'][os.environ['SNAP_NAME']]\n except (KeyError, TypeError):\n default = {'config': {\n os.environ['SNAP_NAME']: creds } }\n print(\"You need to give a yaml file like: {}\".format(default))\n save_shell_source(f, creds)", "title": "" }, { "docid": "73633f5e88cc8c5b524d81d2642b4c06", "score": "0.5280276", "text": "def _config_from_file(configfile):\n\n conf = {}\n\n # set from config if possible\n if configfile:\n with open(configfile, 'r') as fp:\n config_yaml = yaml.load(fp)\n\n conf = config_yaml\n\n # in the config yaml, 'years' is a map of years to styles; in the config\n # dict used in this module, 'year_styles' is that map and 'years' is\n # simply a list of the years to graph\n conf['year_styles'] = conf.pop('years', {})\n conf['years'] = list(conf['year_styles'].keys())\n\n return conf", "title": "" } ]
ac797dc1950168e935fd4e6788e034a4
Check that a TableDumpV2 JSON document from mabo is correctly abstracted.
[ { "docid": "30c2a70372eb4dab434bc2014251637c", "score": "0.5781272", "text": "def test_TableDumpV2(self):\n\n # Get the abstract BGP message\n abstracted = MaboTableDumpV2Document(\"collector\", json_TableDumpV2)\n\n # Check each element\n elements = abstracted.elements()\n assert len(elements) == 5\n\n element1 = elements.pop(0)\n assert element1 == TableDumpV2Element(asn=5678, as_path=\"1234 5678\", peer_as=1234, peer_ip=\"1.2.3.4\")\n\n element2 = elements.pop(0)\n assert element2 == TableDumpV2Element(asn=910112, as_path=\"5678 910112\", peer_as=5678, peer_ip=\"5.6.7.8\")\n\n element3 = elements.pop(0)\n assert element3 == TableDumpV2Element(asn=13141516, as_path=\"89101112 13141516\", peer_as=89101112, peer_ip=\"8.9.10.11\")\n\n element4 = elements.pop(0)\n assert element4 == TableDumpV2Element(asn=123, as_path=\"89101112 {123,456}\", peer_as=89101112, peer_ip=\"8.9.10.11\")\n\n element5 = elements.pop(0)\n assert element5 == TableDumpV2Element(asn=456, as_path=\"89101112 {123,456}\", peer_as=89101112, peer_ip=\"8.9.10.11\")\n\n # There is no withdraws\n assert abstracted.withdraws() == []\n\n # Check announces\n assert list(abstracted.announces()) == [ InternalMessage(timestamp=2807, collector=\"collector\",\n peer_as=1234, peer_ip=\"1.2.3.4\",\n prefix=\"10.11.12.0/24\", asn=5678, as_path=\"1234 5678\"),\n InternalMessage(timestamp=2807, collector=\"collector\",\n peer_as=5678, peer_ip=\"5.6.7.8\",\n prefix=\"10.11.12.0/24\", asn=910112, as_path=\"5678 910112\"),\n InternalMessage(timestamp=2807, collector=\"collector\",\n peer_as=89101112, peer_ip=\"8.9.10.11\",\n prefix=\"10.11.12.0/24\", asn=13141516, as_path=\"89101112 13141516\"),\n InternalMessage(timestamp=2807, collector=\"collector\",\n peer_as=89101112, peer_ip=\"8.9.10.11\",\n prefix=\"10.11.12.0/24\", asn=123, as_path=\"89101112 {123,456}\"),\n InternalMessage(timestamp=2807, collector=\"collector\",\n peer_as=89101112, peer_ip=\"8.9.10.11\",\n prefix=\"10.11.12.0/24\", asn=456, as_path=\"89101112 {123,456}\") ]", "title": "" } ]
[ { "docid": "c8ac963d2472571f1d37e326483a25e1", "score": "0.62551606", "text": "def test_parse_biom_table(self):\r\n # This is a TSV as a list of lines\r\n t = parse_biom_table(self.classic_otu_table1_no_tax)\r\n\r\n # Test TSV as a list of lines\r\n t_tsv_str = t.to_tsv()\r\n t_tsv_lines = t_tsv_str.splitlines()\r\n t_tsv = parse_biom_table(t_tsv_lines)\r\n self.assertEqual(t, t_tsv)\r\n # Test TSV as a file-like object\r\n t_tsv_stringio = StringIO(t_tsv_str)\r\n t_tsv = parse_biom_table(t_tsv_stringio)\r\n self.assertEqual(t, t_tsv)\r\n\r\n # Test JSON as a list of lines\r\n t_json_str = t.to_json('asd')\r\n t_json_lines = t_json_str.splitlines()\r\n t_json = parse_biom_table(t_json_lines)\r\n self.assertEqual(t, t_json)\r\n # Test JSON as a file-like object\r\n t_json_str = t.to_json('asd')\r\n t_json_stringio = StringIO(t_json_str)\r\n t_json = parse_biom_table(t_json_stringio)\r\n self.assertEqual(t, t_json)", "title": "" }, { "docid": "e9b4da21e1953372fb1da67c7ff99f27", "score": "0.6164362", "text": "def test_parse_biom_json(self):\r\n # light test. this code is used thoroughly within the other\r\n # parse_biom_table methods\r\n tab1_fh = json.load(StringIO(self.biom_minimal_sparse))\r\n tab = Table.from_json(tab1_fh)\r\n npt.assert_equal(tab.ids(), ('Sample1', 'Sample2', 'Sample3',\r\n 'Sample4', 'Sample5', 'Sample6'))\r\n npt.assert_equal(tab.ids(axis='observation'),\r\n ('GG_OTU_1', 'GG_OTU_2', 'GG_OTU_3',\r\n 'GG_OTU_4', 'GG_OTU_5'))\r\n self.assertEqual(tab.metadata(), None)\r\n self.assertEqual(tab.metadata(axis='observation'), None)\r\n\r\n tab = parse_biom_table(StringIO(self.biom_minimal_sparse))\r\n npt.assert_equal(tab.ids(), ('Sample1', 'Sample2', 'Sample3',\r\n 'Sample4', 'Sample5', 'Sample6'))\r\n npt.assert_equal(tab.ids(axis='observation'),\r\n ('GG_OTU_1', 'GG_OTU_2', 'GG_OTU_3',\r\n 'GG_OTU_4', 'GG_OTU_5'))\r\n self.assertEqual(tab.metadata(), None)\r\n self.assertEqual(tab.metadata(axis='observation'), None)\r\n\r\n tablestring = u'''{\r\n \"id\":null,\r\n \"format\": \"Biological Observation Matrix 0.9.1-dev\",\r\n \"format_url\": \"http://biom-format.org\",\r\n \"type\": \"OTU table\",\r\n \"generated_by\": \"QIIME revision XYZ\",\r\n \"date\": \"2011-12-19T19:00:00\",\r\n \"rows\":[\r\n {\"id\":\"GG_OTU_1\", \"metadata\":null},\r\n {\"id\":\"GG_OTU_2\", \"metadata\":null},\r\n {\"id\":\"GG_OTU_3\", \"metadata\":null},\r\n {\"id\":\"GG_OTU_4\", \"metadata\":null},\r\n {\"id\":\"GG_OTU_5\", \"metadata\":null}\r\n ],\r\n \"columns\": [\r\n {\"id\":\"Sample1\", \"metadata\":null},\r\n {\"id\":\"Sample2\", \"metadata\":null},\r\n {\"id\":\"Sample3\", \"metadata\":null},\r\n {\"id\":\"Sample4\", \"metadata\":null},\r\n {\"id\":\"Sample5\", \"metadata\":null},\r\n {\"id\":\"Sample6\", \"metadata\":null}\r\n ],\r\n \"matrix_type\": \"dense\",\r\n \"matrix_element_type\": \"int\",\r\n \"shape\": [5,6],\r\n \"data\": [[0,0,1,0,0,0],\r\n [5,1,0,2,3,1],\r\n [0,0,1,4,2,0],\r\n [2,1,1,0,0,1],\r\n [0,1,1,0,0,0]]\r\n }'''\r\n tbs_fh = json.load(StringIO(tablestring))\r\n tab1 = Table.from_json(tbs_fh)\r\n tab2 = parse_biom_table(tablestring)\r\n self.assertEqual(tab1, tab2)", "title": "" }, { "docid": "92f1cb036c17c6d02896f699d090e14c", "score": "0.5828839", "text": "def test_schema_fail(self):\n options = {\n 'schema': {\n 'unit_key_01': 'integer',\n 'unit_key_02': 'string'\n }\n }\n\n data = {'hey': 1}\n record_data = json.dumps(data)\n\n # get parsed data\n parser = JSONParser(options)\n assert_equal(parser.parse(record_data), False)\n\n expected_result = [\n data\n ]\n assert_equal(parser.invalid_parses, expected_result)", "title": "" }, { "docid": "497bbea88f665654ebec5d75b63b7058", "score": "0.5712711", "text": "def test_valid_json_file(self, file_url):\n obj = BMICalc(file_url)\n try :\n obj.define_logic_map()\n obj.read_inputfile()\n except Exception as exp:\n assert False\n else :\n assert True", "title": "" }, { "docid": "e32d065ca3b20aa2a2b82183cccca7d1", "score": "0.56740147", "text": "def handle_tables(self,subjson):\n noerr = True\n for descr in subjson:\n t = Table.parse_json(descr)\n if t is None: \n noerr = False\n continue\n if t.database not in self.databases: \n log.warning(\"Database info for table % is not known.Please specify before\")\n noerr = False\n continue\n t.database = self.databases[t.database]\n self.tables[t.name] = t\n\n return noerr", "title": "" }, { "docid": "48bc6396b70986b9ed84c201b7e31bff", "score": "0.55641615", "text": "def checkjson(self):\n raise Exception(\"This method needs to be overridden\")", "title": "" }, { "docid": "598b53d02b82b35280cdd20a08bba6c3", "score": "0.5550788", "text": "def test_parse_biom_table_with_hdf5(self):\r\n # We will round-trip the HDF5 file to several different formats, and\r\n # make sure we can recover the same table using parse_biom_table\r\n cwd = os.getcwd()\r\n if '/' in __file__[1:]:\r\n os.chdir(__file__.rsplit('/', 1)[0])\r\n\r\n t = parse_biom_table(h5py.File('test_data/test.biom'))\r\n\r\n # These things are not round-trippable using the general-purpose\r\n # parse_biom_table function\r\n t._sample_metadata = None\r\n t._observation_metadata = None\r\n t.type = None\r\n\r\n # Test TSV as a list of lines\r\n t_tsv_str = t.to_tsv()\r\n t_tsv_lines = t_tsv_str.splitlines()\r\n t_tsv = parse_biom_table(t_tsv_lines)\r\n self.assertEqual(t, t_tsv)\r\n # Test TSV as a file-like object\r\n t_tsv_stringio = StringIO(t_tsv_str)\r\n t_tsv = parse_biom_table(t_tsv_stringio)\r\n self.assertEqual(t, t_tsv)\r\n\r\n # Test JSON as a list of lines\r\n t_json_str = t.to_json('asd')\r\n t_json_lines = t_json_str.splitlines()\r\n t_json = parse_biom_table(t_json_lines)\r\n self.assertEqual(t, t_json)\r\n # Test JSON as a file-like object\r\n t_json_str = t.to_json('asd')\r\n t_json_stringio = StringIO(t_json_str)\r\n t_json = parse_biom_table(t_json_stringio)\r\n self.assertEqual(t, t_json)", "title": "" }, { "docid": "30967c66a1c4bba2d798772096c0ca29", "score": "0.55146545", "text": "def validateJSON( jsonData ):", "title": "" }, { "docid": "a5c5a3f1ed95ee923036c6b696f97a22", "score": "0.5512445", "text": "def test_parse_record_not_dict_mismatch(self):\n options = {\n 'schema': {\n 'key': 'string'\n },\n 'parser': 'json'\n }\n record_data = \"[{\\\"key\\\": \\\"value\\\"}]\"\n\n parser = JSONParser(options)\n assert_equal(parser.parse(record_data), False)", "title": "" }, { "docid": "60832dab25d6b0758616bc3712f74d03", "score": "0.5506996", "text": "def is_valid_json(self):\n try:\n json_object = json.loads(self._file)\n except (ValueError, TypeError) as e:\n raise", "title": "" }, { "docid": "86877acc3019242c33689bb5046f21a8", "score": "0.55048716", "text": "def test_nontable_file(self):\n self.setNonTableData()\n self.assertError()", "title": "" }, { "docid": "2e8e92887e1a7f747c9c510c67f1b718", "score": "0.5503046", "text": "def test_from_json_obj_wrong_type():\n for t, encoding in WRONG_TYPE_ENCODINGS:\n try:\n from_json_obj(encoding, t)\n assert False, \"Should not be decoding %s as %s.\"%(str(encoding), str(t))\n except TypeError:\n assert True", "title": "" }, { "docid": "f459534417ebff548387ecef220b20e8", "score": "0.54715943", "text": "def validate_schema(bdb, table, mml_json):\n bad_cols = []\n for col, typ in mml_json['columns'].items():\n # If the column is already ignored there's nothing to check\n if typ['stattype'] == 'IGNORE':\n continue\n one_col_json = copy.deepcopy(mml_json)\n one_col_json['columns'] = {col: typ}\n # Create a temp generator\n gen_name = uuid.uuid4().hex\n try:\n bdb.execute(to_mml(one_col_json, table, gen_name))\n bdb.execute('INITIALIZE 1 MODEL FOR %s'\n % (bql_quote_name(gen_name),))\n bdb.execute('ANALYZE %s FOR 1 ITERATION WAIT'\n % (bql_quote_name(gen_name),))\n except AssertionError:\n bad_cols.append(col)\n finally:\n # Drop our temp generator\n bdb.execute('DROP GENERATOR %s' % bql_quote_name(gen_name))\n modified_schema = copy.deepcopy(mml_json)\n # TODO(asilvers): Should we also return a summary of the modifications?\n for col in bad_cols:\n modified_schema['columns'][col]['guessed'] = (\n modified_schema['columns'][col]['stattype'])\n modified_schema['columns'][col]['stattype'] = 'IGNORE'\n modified_schema['columns'][col]['reason'] = 'Caused ANALYZE to error'\n jsonschema.validate(modified_schema, MML_SCHEMA)\n return modified_schema", "title": "" }, { "docid": "2c361a5298b91e447fddf78f500ce073", "score": "0.54509556", "text": "def copy_t2_schema_data() -> tuple[JSON, BytesInstance]:\n\n test_2_schema = {\n \"title\": \"SOME-RECORD-2\",\n \"$anchor\": \"SOME-RECORD-2\",\n \"cobol\": \"01 SOME-RECORD-2\",\n \"type\": \"object\",\n \"properties\": {\n \"FILLER-1\": {\n \"title\": \"FILLER\",\n \"$anchor\": \"FILLER-1\",\n \"cobol\": \"05 FILLER PIC X(5)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"REPEAT\": {\n \"title\": \"REPEAT\",\n \"$anchor\": \"REPEAT\",\n \"cobol\": \"05 REPEAT OCCURS 4 PIC XXX\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"REPEAT\": {\n \"title\": \"REPEAT\",\n \"$anchor\": \"REPEAT\",\n \"cobol\": \"05 REPEAT OCCURS 4 PIC XXX\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n },\n \"maxItems\": 4\n }\n }\n }\n\n assert SchemaValidator.check_schema(test_2_schema) is None\n test_2_data = BytesInstance('12345ABCDEFGHIJKL'.encode(\"CP037\"))\n return test_2_schema, test_2_data", "title": "" }, { "docid": "a2c62613813eb665838e3f06503dbbae", "score": "0.5434971", "text": "def setUp(self):\n self.biom_text = \"\"\"{\n \"id\":null,\n \"format\": \"Biological Observation Matrix 0.9.1-dev\",\n \"format_url\": \"http://biom-format.org/documentation/format_versions/biom-1.0.html\",\n \"type\": \"OTU table\",\n \"generated_by\": \"QIIME revision 1.4.0-dev\",\n \"date\": \"2011-12-19T19:00:00\",\n \"rows\":[\n {\"id\":\"GG_OTU_1\", \"metadata\":{\"taxonomy\":[\"k__Bacteria\", \"p__Proteobacteria\", \"c__Gammaproteobacteria\", \"o__Enterobacteriales\", \"f__Enterobacteriaceae\", \"g__Escherichia\", \"s__\"]}},\n {\"id\":\"GG_OTU_2\", \"metadata\":{\"taxonomy\":[\"k__Bacteria\", \"p__Cyanobacteria\", \"c__Nostocophycideae\", \"o__Nostocales\", \"f__Nostocaceae\", \"g__Dolichospermum\", \"s__\"]}},\n {\"id\":\"GG_OTU_3\", \"metadata\":{\"taxonomy\":[\"k__Archaea\", \"p__Euryarchaeota\", \"c__Methanomicrobia\", \"o__Methanosarcinales\", \"f__Methanosarcinaceae\", \"g__Methanosarcina\", \"s__\"]}},\n {\"id\":\"GG_OTU_4\", \"metadata\":{\"taxonomy\":[\"k__Bacteria\", \"p__Firmicutes\", \"c__Clostridia\", \"o__Halanaerobiales\", \"f__Halanaerobiaceae\", \"g__Halanaerobium\", \"s__Halanaerobiumsaccharolyticum\"]}},\n {\"id\":\"GG_OTU_5\", \"metadata\":{\"taxonomy\":[\"k__Bacteria\", \"p__Proteobacteria\", \"c__Gammaproteobacteria\", \"o__Enterobacteriales\", \"f__Enterobacteriaceae\", \"g__Escherichia\", \"s__\"]}}\n ],\n \"columns\":[\n {\"id\":\"Sample1\", \"metadata\":{\n \"BarcodeSequence\":\"CGCTTATCGAGA\",\n \"LinkerPrimerSequence\":\"CATGCTGCCTCCCGTAGGAGT\",\n \"BODY_SITE\":\"gut\",\n \"Description\":\"human gut\"}},\n {\"id\":\"Sample2\", \"metadata\":{\n \"BarcodeSequence\":\"CATACCAGTAGC\",\n \"LinkerPrimerSequence\":\"CATGCTGCCTCCCGTAGGAGT\",\n \"BODY_SITE\":\"gut\",\n \"Description\":\"human gut\"}},\n {\"id\":\"Sample3\", \"metadata\":{\n \"BarcodeSequence\":\"CTCTCTACCTGT\",\n \"LinkerPrimerSequence\":\"CATGCTGCCTCCCGTAGGAGT\",\n \"BODY_SITE\":\"gut\",\n \"Description\":\"human gut\"}},\n {\"id\":\"Sample4\", \"metadata\":{\n \"BarcodeSequence\":\"CTCTCGGCCTGT\",\n \"LinkerPrimerSequence\":\"CATGCTGCCTCCCGTAGGAGT\",\n \"BODY_SITE\":\"skin\",\n \"Description\":\"human skin\"}},\n {\"id\":\"Sample5\", \"metadata\":{\n \"BarcodeSequence\":\"CTCTCTACCAAT\",\n \"LinkerPrimerSequence\":\"CATGCTGCCTCCCGTAGGAGT\",\n \"BODY_SITE\":\"skin\",\n \"Description\":\"human skin\"}},\n {\"id\":\"Sample6\", \"metadata\":{\n \"BarcodeSequence\":\"CTAACTACCAAT\",\n \"LinkerPrimerSequence\":\"CATGCTGCCTCCCGTAGGAGT\",\n \"BODY_SITE\":\"skin\",\n \"Description\":\"human skin\"}}\n ],\n \"matrix_type\": \"sparse\",\n \"matrix_element_type\": \"int\",\n \"shape\": [5, 6],\n \"data\":[[0,2,1],\n [1,0,5],\n [1,1,1],\n [1,3,2],\n [1,4,3],\n [1,5,1],\n [2,2,1],\n [2,3,4],\n [2,5,2],\n [3,0,2],\n [3,1,1],\n [3,2,1],\n [3,5,1],\n [4,1,1],\n [4,2,1]\n ]\n }\"\"\"\n\n self.coreOTUdata = '''# Core OTUs across 75 % of samples.\n064EK048\t[u'k__Bacteria', u'p__Firmicutes', u'c__Bacilli', u'o__Lactobacillales', u'f__Streptococcaceae', u'g__Streptococcus', u's__HOT.064']\n200CZ006\t[u'k__Bacteria', u'p__Fusobacteria', u'c__Fusobacteria', u'o__Fusobacteriales', u'f__Fusobacteriaceae', u'g__Fusobacterium', u's__nucleatum']\n161Vparv\t[u'k__Bacteria', u'p__Firmicutes', u'c__Clostridia', u'o__Clostridiales', u'f__Veillonellaceae', u'g__Veillonella', u's__parvula']\n111_3523\t[u'k__Bacteria', u'p__Firmicutes', u'c__Clostridia', u'o__Clostridiales', u'f__Peptostreptococcaceae_[XIII]', u'g__Parvimonas', u's__micra']\n644_4671\t[u'k__Bacteria', u'p__Firmicutes', u'c__Bacilli', u'o__Lactobacillales', u'f__Streptococcaceae', u'g__Streptococcus', u's__intermedius']\n758_3928\t[u'k__Bacteria', u'p__Firmicutes', u'c__Bacilli', u'o__Lactobacillales', u'f__Streptococcaceae', u'g__Streptococcus', u's__sanguinis']\n524Vatyp\t[u'k__Bacteria', u'p__Firmicutes', u'c__Clostridia', u'o__Clostridiales', u'f__Veillonellaceae', u'g__Veillonella', u's__atypica']\n151_K168\t[u'k__Bacteria', u'p__Firmicutes', u'c__Clostridia', u'o__Clostridiales', u'f__Veillonellaceae', u'g__Selenomonas', u's__sputigena']\n530_5256\t[u'k__Bacteria', u'p__Actinobacteria', u'c__Actinobacteria', u'o__Actinomycetales', u'f__Propionibacteriaceae', u'g__Propionibacterium', u's__acnes']\n612_6662\t[u'k__Bacteria', u'p__Proteobacteria', u'c__Gammaproteobacteria', u'o__Pseudomonadales', u'f__Pseudomonadaceae', u'g__Pseudomonas', u's__fluorescens']\n539_6962\t[u'k__Bacteria', u'p__Firmicutes', u'c__Clostridia', u'o__Clostridiales', u'f__Peptostreptococcaceae_[XI]', u'g__Filifactor', u's__alocis']\n073-T1E5\t[u'k__Bacteria', u'p__Firmicutes', u'c__Bacilli', u'o__Lactobacillales', u'f__Streptococcaceae', u'g__Streptococcus', u's__australis']\n398_7051\t[u'k__Bacteria', u'p__Firmicutes', u'c__Bacilli', u'o__Lactobacillales', u'f__Streptococcaceae', u'g__Streptococcus', u's__mitis']\n622_3931\t[u'k__Bacteria', u'p__Firmicutes', u'c__Bacilli', u'o__Lactobacillales', u'f__Streptococcaceae', u'g__Streptococcus', u's__gordonii']\n826_8612\t[u'k__Bacteria', u'p__Proteobacteria', u'c__Gammaproteobacteria', u'o__Pasteurellales', u'f__Pasteurellaceae', u'g__Terrahaemophilus', u's__aromaticivorans']\n623_4320\t[u'k__Bacteria', u'p__Proteobacteria', u'c__Epsilonproteobacteria', u'o__Campylobacterales', u'f__Campylobacteraceae', u'g__Campylobacter', u's__gracilis']\n205FL002\t[u'k__Bacteria', u'p__Fusobacteria', u'c__Fusobacteria', u'o__Fusobacteriales', u'f__Fusobacteriaceae', u'g__Fusobacterium', u's__HOT.205']'''\n\n self.biom = json.loads(self.biom_text)\n self.row = self.biom['rows']", "title": "" }, { "docid": "f0ee69fc9871c44fc84de18f3abcf636", "score": "0.5420853", "text": "def test_valid_json(self):\n self.assertTrue(json.loads(self.ratings.get_json()))", "title": "" }, { "docid": "f0ee69fc9871c44fc84de18f3abcf636", "score": "0.5420853", "text": "def test_valid_json(self):\n self.assertTrue(json.loads(self.ratings.get_json()))", "title": "" }, { "docid": "3999dcecbe6aec5d912dc15cb0917c48", "score": "0.538307", "text": "def check_valid_report(context):\n response = context.response.json()\n if context.history:\n assert isinstance(response['objects'], list)\n else:\n assert isinstance(response, dict)", "title": "" }, { "docid": "c41c96316dce2d78a9eb824c8ac0f1ce", "score": "0.53647953", "text": "def test_instance_resize_prep_end_valid_json(self):\n expected_obj = self.resize_prep_end_obj\n\n actual_json = json.dumps(self.instance_resize_prep_end_dict)\n actual_obj = InstanceResizePrepEnd.deserialize(actual_json, 'json')\n\n self.assertEqual(expected_obj, actual_obj)\n self.assertFalse(actual_obj.is_empty())", "title": "" }, { "docid": "942f27d43d60da04f9e6f5e885bad519", "score": "0.53493994", "text": "def test_json(self, tmpdir):\n _ = self.__class__ # just to get rid of codacy warning, I know, it's stupid\n config_from_yaml = Menu.safe_read_yaml(yaml_path())\n p = tmpdir.mkdir(\"pytabbytest\").join(\"temp.json\")\n p.write(json.dumps(config_from_yaml))\n config_from_json = Menu.read_json(str(p))\n if not config_from_yaml == config_from_json:\n raise AssertionError", "title": "" }, { "docid": "d979a568404fd4b97971851b61c888a4", "score": "0.53352386", "text": "def test_check_json_health(self):\n # check health json\n with open(self.test_file_name, 'w') as fp:\n json.dump({'tasks': {}, 'users': {}}, fp)\n\n with open(self.test_file_name) as fp:\n self.assertEqual(\n interactive.check_json_health(fp),\n {'tasks': {}, 'users': {}}\n )\n\n # check corrupted json\n with open(self.test_file_name, 'w') as fp:\n json.dump({'tasks': {}}, fp)\n\n with open(self.test_file_name) as fp:\n self.assertIsNone(interactive.check_json_health(fp))", "title": "" }, { "docid": "4bae938a6b2a9be5595c7461afbbc7f7", "score": "0.53255975", "text": "def test_exports(self):\n # Get rid of weird formatting included in class attribute\n self.maxDiff = 5_000\n self.assertEquals(\n self._meta.to_json().replace('\\n','').replace(' ','') ,\n self._META_MODEL_JSON.replace('\\n','').replace(' ','') \n )\n # Test Risk Table\n param_dict = json.loads(self._META_MODEL_JSON)\n del param_dict['creation_date']\n del param_dict['model_uuid']\n del param_dict['name']\n del param_dict['type']\n self.assertEquals(\n self._meta.export_params(),\n param_dict\n )", "title": "" }, { "docid": "c31f08393039889c3412f02550034d72", "score": "0.5313659", "text": "def _jsonYayForTabs(self, content):\n\n\t\tfor line in content.split('\\n'):\n\t\t\tpattern = re.compile(r'\\t*(?P<spaces> *)')\n\t\t\tmatches = pattern.match(line)\n\t\t\tif matches.groupdict()['spaces']:\n\t\t\t\tself.fail('Spaces are the devil!')", "title": "" }, { "docid": "d03be5d7152ea3dbbacb164a2b03f912", "score": "0.53131914", "text": "def test8(self):\n # arrange\n model = IrisModel()\n\n # act\n json_schema = model.output_schema.json_schema(\"https://example.com/my-schema.json\")\n\n # assert\n print(json_schema)\n self.assertTrue(type(json_schema) is dict)", "title": "" }, { "docid": "4623c594c47984ba45bff2a5c85ecf75", "score": "0.5312796", "text": "def test_WbTabularData_error_on_wrong_page_type(self):\n non_data_page = Page(self.commons, 'File:Foo.jpg')\n non_map_page = Page(self.commons, 'Data:Lyngby Hovedgade.map')\n regex = (r\"^Page must be in 'Data:' namespace and end in '\\.tab' \"\n r'for tabular-data\\.$')\n with self.assertRaisesRegex(ValueError, regex):\n pywikibot.WbTabularData(non_data_page, self.get_repo())\n with self.assertRaisesRegex(ValueError, regex):\n pywikibot.WbTabularData(non_map_page, self.get_repo())", "title": "" }, { "docid": "3bb2e80bf2b0d19d05d8692003d482d1", "score": "0.5296964", "text": "def test_read_json(self):\n self.assertEqual(len(cleanUtils._read_json('abbreviated_cantons.json')), 47)", "title": "" }, { "docid": "72c6a575077f279d21857b0d07e055bd", "score": "0.52787226", "text": "def test_to_dict(self):\n my_model_dict = self.bm_obj.to_dict()\n actual = 1\n try:\n serialized = json.dumps(my_model_dict)\n except:\n actual = 0\n self.assertTrue(1 == actual)", "title": "" }, { "docid": "2b64075114bf834af6426bb023eb597f", "score": "0.52676225", "text": "def test_schema_valid(self) -> None:\n instance = TNSObjectSearchResult.from_dict(FAKE_TNS_OBJECT_DATA)\n assert isinstance(instance, TNSObjectSearchResult)\n assert instance._data == FAKE_TNS_OBJECT_DATA\n assert instance.object_type_name == FAKE_TNS_TYPE_NAME\n assert instance.redshift == FAKE_TNS_REDSHIFT", "title": "" }, { "docid": "c57325136b5ae13ff61c2fcc45f4ca8e", "score": "0.5267283", "text": "def test_character_from_tibiadata_unrelated_json(self):\n with self.assertRaises(InvalidContent):\n Character.from_tibiadata(self._load_resource(tests.tests_guild.FILE_GUILD_TIBIADATA))", "title": "" }, { "docid": "84a52c5dbdcdf5c288fb98ff443336c9", "score": "0.5260765", "text": "def test_character_from_tibiadata_invalid_json(self):\n with self.assertRaises(InvalidContent):\n Character.from_tibiadata(\"<html><b>Not a json string</b></html>\")", "title": "" }, { "docid": "1dc3b5ab52aec0434f2a986a51fea297", "score": "0.52593917", "text": "def test_WbTabularData_error_on_non_page(self):\n regex = r'^Page .+? must be a pywikibot\\.Page object not a'\n with self.assertRaisesRegex(ValueError, regex):\n pywikibot.WbTabularData('A string', self.get_repo())", "title": "" }, { "docid": "f5269412829a13c8e2ce20c7037eaae3", "score": "0.5254655", "text": "def copy_t1_schema_data() -> tuple[JSON, BytesInstance]:\n test_1_schema = {\n \"title\": \"SOME-RECORD-1\",\n \"$anchor\": \"SOME-RECORD-1\",\n \"cobol\": \"01 SOME-RECORD-1\",\n \"type\": \"object\",\n \"properties\": {\n \"SOME-COLUMN\": {\n \"title\": \"SOME-COLUMN\",\n \"$anchor\": \"SOME-COLUMN\",\n \"cobol\": \"05 SOME-COLUMN PIC X(5)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n }\n assert SchemaValidator.check_schema(test_1_schema) is None\n test_1_data = BytesInstance('12345'.encode(\"CP037\"))\n return test_1_schema, test_1_data", "title": "" }, { "docid": "a92c767374fad04fe043f1882159f28c", "score": "0.5254229", "text": "def contains_valid_json(obj):\n if obj is None:\n return False\n try:\n try:\n # json_object = json.loads(obj.read())\n json.load(obj)\n except AttributeError:\n # obj is not a file\n json.loads(obj)\n except ValueError:\n return False\n return True", "title": "" }, { "docid": "57476200bca642877417c888734d0b1f", "score": "0.52422327", "text": "def test_create_from_json_fails():\n global invalid_json\n i = se.instrument.Instrument(invalid_json)\n assert type(i) != se.instrument.Instrument", "title": "" }, { "docid": "ffcd39acd6cd65a935697f0b87ca811e", "score": "0.5238401", "text": "def copy_t3_schema_data() -> tuple[JSON, BytesInstance]:\n test_3_schema = {\n \"title\": \"SOME-RECORD-3\",\n \"$anchor\": \"SOME-RECORD-3\",\n \"cobol\": \"01 SOME-RECORD-3\",\n \"type\": \"object\",\n \"properties\": {\n \"FILLER-1\": {\n \"title\": \"FILLER\",\n \"$anchor\": \"FILLER-1\",\n \"cobol\": \"05 FILLER PIC X(5)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"REPEAT\": {\n \"title\": \"REPEAT\",\n \"$anchor\": \"REPEAT\",\n \"cobol\": \"05 REPEAT OCCURS 5\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"ITEM\": {\n \"title\": \"ITEM\",\n \"$anchor\": \"ITEM\",\n \"cobol\": \"10 ITEM OCCURS 4 PIC XX\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"ITEM\": {\n \"title\": \"ITEM\",\n \"$anchor\": \"ITEM\",\n \"cobol\": \"10 ITEM OCCURS 4 PIC XX\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n },\n \"maxItems\": 4\n }\n }\n },\n \"maxItems\": 5\n }\n }\n }\n\n assert SchemaValidator.check_schema(test_3_schema) is None\n test_3_data = BytesInstance('12345ABCDEFGHabcdefghIJKLMNOPijklmnopQRSTUVWZ'.encode(\"CP037\"))\n return test_3_schema, test_3_data", "title": "" }, { "docid": "18fac9829092d64cb98f5964cc0d1f5f", "score": "0.52317256", "text": "def test_metacritic_pages_parsed(self, m):\n set_mock_data(m)\n data = json.loads(get_json_data())\n # print('total:', len(data))\n # print(data)\n self.assertTrue(type(data) == list)\n self.assertEqual(len(data), 2037)\n for entry in data:\n self.assertTrue('title' in entry)\n self.assertTrue(type(entry['title']) == str)\n self.assertTrue('score' in entry)\n self.assertTrue(type(entry['score']) == str)", "title": "" }, { "docid": "5e8e3f656dfb20f00b379a017b61e5ac", "score": "0.52272767", "text": "def test_output_type_data(bamfile,bedfile):\n\n transposonmapper(bamfile)\n\n chrom_names_dict, chrom_start_line_dict, chrom_end_line_dict = chromosome_name_bedfile(bed_file=bedfile)\n\n assert isinstance(chrom_names_dict, dict), \"Expected dict type\"\n assert isinstance(chrom_start_line_dict, dict), \"Expected dict type\"\n assert isinstance(chrom_end_line_dict, dict), \"Expected dict type\"", "title": "" }, { "docid": "8d84942d26ae68b5bd89b614fcd24c49", "score": "0.52089804", "text": "def test_WCAG_PDF_06(self):\n\n if len(self.awamHandler.tableStructDict) == 0:\n self.logger.info('No tables found in Document')\n # No tables ? test not applicable\n return 2\n\n results = self.initResult()\n self.logger.debug('No of tables =>',len(self.awamHandler.tableStructDict))\n\n # Loop through each and see if it is marked invalid\n #if any([x.invalid for x in self.awamHandler.tableStructDict.values()]):\n # # Failed\n # print 'Invalid table structure found in Document' \n # return 0\n for tbl in self.awamHandler.tableStructDict.values():\n pg = tbl.getPage()\n if tbl.invalid:\n self.updateResult(results[0], pg, tbl)\n else:\n self.updateResult(results[1], pg, tbl)\n \n self.logger.info('wcag.pdf.06 - Test passed')\n return results", "title": "" }, { "docid": "6f0b0fadda47cc4d98e953292c644aa5", "score": "0.5208406", "text": "def testReadJsonFile(self):\n try:\n rObj = self.__ioU.deserialize(self.__pathJsonTestFile, fmt=\"json\")\n logger.debug(\"Object length %d\", len(rObj))\n self.assertGreaterEqual(len(rObj), 1)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "title": "" }, { "docid": "dbc6c01ab5319b7fd48be8d110245774", "score": "0.5198041", "text": "def test_from_json_obj_basetypes():\n for val, t, encoding in BASETYPES_ENCODINGS:\n assert from_json_obj(encoding, t) == val", "title": "" }, { "docid": "aa482574c348dad4e816be593de27bd1", "score": "0.51910233", "text": "def test_web_json(self):\n data = jsonreader.get_web_elements()\n assert type(data) == list\n assert len(data) > 0\n for entry in data:\n assert type(entry) == dict\n assert entry.get('phase') is not None\n assert type(entry.get('phase')) == str\n assert type(entry.get('route')) == str\n assert type(entry.get('html')) == list\n assert type(entry.get('xpath')) == str", "title": "" }, { "docid": "bb29b33fe7d9b3a7ec27cd18a8802106", "score": "0.51827407", "text": "def test_plant_serializer_with_valida_data(new_plant):\n result = plant_schema.load(new_plant)\n assert result[\"name\"] == \"Monstera\"\n assert result[\"latin\"] == \"Monstera Adans.\"\n assert result[\"difficulty\"] == 5", "title": "" }, { "docid": "fd05aa4fd90fa8da075c40b6f368dc01", "score": "0.5177884", "text": "def _verify_json_content(self):\n self.assertTrue(all(\n json_item['auth'] == 'Bearer fake token'\n for json_item in OnTaskSharedState.json_outbox))", "title": "" }, { "docid": "8933534c4ede0291330d9ce38076bd02", "score": "0.51730776", "text": "def test_from_json_obj_typeerrors():\n class NonTypechekableT:\n name: str\n val: int\n def __init__(self, name, val):\n self.name = name\n self.val = val\n x = NonTypechekableT(\"x\", 0)\n try:\n from_json_obj(x, NonTypechekableT)\n assert False\n except TypeError:\n assert True", "title": "" }, { "docid": "c7d60fa7fdd21c675d502350a760bca4", "score": "0.516112", "text": "def validate_table(self, model):\n expected = bloop.tables.expected_description(model)\n status = \"BLOOP_NOT_ACTIVE\"\n while status == \"BLOOP_NOT_ACTIVE\":\n description = self.describe_table(model)\n status = bloop.tables.simple_status(description)\n try:\n actual = bloop.tables.sanitized_description(description)\n except KeyError:\n raise bloop.exceptions.TableMismatch(model, expected, description)\n if bloop.util.ordered(actual) != bloop.util.ordered(expected):\n raise bloop.exceptions.TableMismatch(model, expected, actual)", "title": "" }, { "docid": "7d2e4cfe8e0a91bc5af8f6350a1cae52", "score": "0.5155278", "text": "def test_parse_record_copy(self):\n options = {\n 'schema': {\n 'key': 'string'\n }\n }\n record_data = {\n 'key': 'value'\n }\n\n parser = JSONParser(options)\n assert_equal(parser.parse(record_data), True)\n assert_equal(id(parser.parsed_records[0]) == id(record_data), False)", "title": "" }, { "docid": "c60f67023a0ac6b5729a7a041388c6e5", "score": "0.51532674", "text": "def test_ingest_json_file(self):\n ingested_json = py_challenge.ingest_json_file('data/split_ac.fastq.data.json')\n self.assertNotEqual(ingested_json, None)", "title": "" }, { "docid": "8f3023bcdc70aba8807fee85ca67a229", "score": "0.5147551", "text": "def is_json_serializable(document: dict):\n assert isinstance(document, dict), \"Check only 1 item!\"\n test_json = user_data_dir(\"test_vectorai\", \"test.json\")\n\n with open(test_json, \"w\"):\n json.dump(test_json)\n\n with open(test_json) as f:\n check_json = json.load(f)\n os.remove(test_json)\n \n assert check_json == document, (\n \"This will not upload correctly. Please ensure all items \"\n + \"in the dictionary are lists/floats/ints/strings.\"\n )\n print(\"Checked! Feel free to upload!\")", "title": "" }, { "docid": "8741e9d5d25bd6c5caaef26533663986", "score": "0.5140689", "text": "def copy_11_schema_data() -> tuple[JSON, BytesInstance]:\n copy_11_schema = {\n \"title\": \"MAIN-AREA\",\n \"$anchor\": \"MAIN-AREA\",\n \"cobol\": \"01 MAIN-AREA\",\n \"type\": \"object\",\n \"properties\": {\n \"REC-1\": {\n \"title\": \"REC-1\",\n \"$anchor\": \"REC-1\",\n \"cobol\": \"03 REC-1\",\n \"type\": \"object\",\n \"properties\": {\n \"FIELD-1\": {\n \"title\": \"FIELD-1\",\n \"$anchor\": \"FIELD-1\",\n \"cobol\": \"05 FIELD-1 PIC 9\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\",\n \"conversion\": \"decimal\"\n },\n \"FIELD-3\": {\n \"title\": \"FIELD-3\",\n \"$anchor\": \"FIELD-3\",\n \"cobol\": \"05 FIELD-3 PIC 9\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\",\n \"conversion\": \"decimal\"\n },\n \"FIELD-2\": {\n \"title\": \"FIELD-2\",\n \"$anchor\": \"FIELD-2\",\n \"cobol\": \"05 FIELD-2 OCCURS 1 TO 5 TIMES\\n DEPENDING ON FIELD-1 PIC X(05)\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"FIELD-2\": {\n \"cobol\": \"05 FIELD-2 OCCURS 1 TO 5 TIMES\\n DEPENDING ON FIELD-1 PIC X(05)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n },\n \"maxItemsDependsOn\": {\n \"$ref\": \"#FIELD-1\"\n }\n }\n }\n },\n \"REC-2\": {\n \"title\": \"REC-2\",\n \"$anchor\": \"REC-2\",\n \"cobol\": \"03 REC-2\",\n \"type\": \"object\",\n \"properties\": {\n \"FIELD-4\": {\n \"title\": \"FIELD-4\",\n \"$anchor\": \"FIELD-4\",\n \"cobol\": \"05 FIELD-4 OCCURS 1 TO 5 TIMES\\n DEPENDING ON FIELD-3 PIC X(05)\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"FIELD-4\": {\n \"cobol\": \"05 FIELD-4 OCCURS 1 TO 5 TIMES\\n DEPENDING ON FIELD-3 PIC X(05)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n },\n \"maxItemsDependsOn\": {\n \"$ref\": \"#FIELD-3\"\n }\n }\n }\n }\n }\n }\n assert SchemaValidator.check_schema(copy_11_schema) is None\n\n copy_11_data = BytesInstance(\"321111122222333334444455555\".encode(\"CP037\"))\n return copy_11_schema, copy_11_data", "title": "" }, { "docid": "866948a1ded7ca6789fe57e56be47cd1", "score": "0.51384306", "text": "def basic_json_check(datadir, filename, json_data, samehost=False):\n header = json.loads(str(json_data, 'UTF-8'))\n if not isinstance(header, dict):\n raise BoardLibError(\"%s contains JSON which does not generate a dictionary.\"\n % (filename))\n required_keys = {'schema', 'hostname', 'absdatadir', 'sequence_number',\n 'most_recent', 'most_recent_hash', 'last_process_most_recent',\n 'last_process_most_recent_hash', 'payload'}\n for header_key in list(header):\n if header_key not in required_keys:\n raise BoardLibError(\"%s is an unexpected key in the JSON data from %s.\"\n % (header_key, filename))\n for required_key in list(required_keys):\n if not required_key in header:\n raise BoardLibError(\"%s is not in JSON data in file %s\"\n % (required_key, filename))\n if header['schema'] not in schema_dictionary:\n raise BoardLibError(\"Schema %s from file %s does not appear in the \"\n \"schema dictionary.\" % (header['schema'], filename))\n if not isinstance(header['sequence_number'], int):\n raise BoardLibError(\"Filename %s holds a sequence number which is not \"\n \"an integer.\" % (filename))\n if header['sequence_number'] < 0:\n raise BoardLibError(\"Sequence_number %d from file %s is negative\"\n % (header['sequence_number'], filename))\n # <now>.<hostname>.<inode>\n unique_file_match = re.match('^\\d{10}\\.(\\S+)\\.\\d+$', filename)\n if unique_file_match:\n file_hostname = unique_file_match.group(1)\n if file_hostname != header['hostname']:\n raise BoardLibError(\"Filename %s does not match hostname in JSON: %s\"\n % (filename, header['hostname']))\n else:\n raise BoardLibError(\"Filename %s has the wrong structure.\" % (filename))\n if header['most_recent'] == None and header['sequence_number'] != 0:\n raise BoardLibError(\"Filename %s has no most_recent predecessor but \"\n \"sequence number is non-zero: %d\"\n % (filename, header['sequence_number']))\n if header['last_process_most_recent'] and header['most_recent']:\n raise BoardLibError(\"Filename %s has last_process_most_recent (%s) \"\n \"and most_recent (%s) defined.\"\n % (filename, header['last_process_most_recent'], header['most_recent']))\n if samehost:\n hostname = socket.gethostname()\n if header['hostname'] != hostname:\n raise BoardLibError(\"Hostname %s from file %s is not the same \"\n \"as our host: %s\"\n % (header['hostname'], filename, hostname))\n if datadir != header['absdatadir']:\n raise BoardLibError(\"Absdatadir %s from file %s is not the same \"\n \"as the datadir we were given: %s\"\n % (header['absdatadir'], filename, datadir))\n schema_check_func = schema_dictionary[header['schema']]\n if not schema_check_func(header['payload']):\n raise BoardLibError(\"Filename %s failed its schema check.\" % (filename))\n if (header['most_recent'] and not header['most_recent_hash']) or \\\n (not header['most_recent'] and header['most_recent_hash']):\n raise BoardLibError(\"In file %s most_recent and most_recent_hash are \"\n \"not both defined.\" % (filename))\n if (header['last_process_most_recent'] and not header['last_process_most_recent_hash']) \\\n or (not header['last_process_most_recent'] and header['last_process_most_recent_hash']):\n raise BoardLibError(\"In file %s last_process_most_recent and \"\n \"last_process_most_recent_hash are not both defined.\"\n % (filename))\n if header['most_recent_hash'] and \\\n not re.match('^[0-9a-f]{64}$', header['most_recent_hash']):\n raise BoardLibError(\"In file %s the most_recent_hash does not look like \"\n \"a valid hash: %s\" % (filename, header['most_recent_hash']))\n return None", "title": "" }, { "docid": "d66a637d2d313e23ef53b867a036b066", "score": "0.5135247", "text": "def test_read_json(self):\n # Ensure metamodel fails\n self.assertRaises(\n FairException, \n FairMetaModel.read_json, \n self._MODEL_JSON\n )", "title": "" }, { "docid": "8a0013128f44bfb6d9f2a86b78c0b072", "score": "0.51279455", "text": "def test_storm_objects_to_cells(self):\n\n this_storm_object_table = storms_to_winds._storm_objects_to_cells(\n STORM_OBJECT_TABLE_NO_CELL_INFO)\n self.assertTrue(this_storm_object_table.equals(\n STORM_OBJECT_TABLE_WITH_CELL_INFO))", "title": "" }, { "docid": "ad536e8c84b877b75ebaf8cd7dfbf8cc", "score": "0.51265985", "text": "def check_schema(self, data):\n module = \"check_schema\"\n self.mu_log.log(self.mu_log.DEBUG, \"expected meta_version is >\" + self.metaschema_version + \"<\", module)\n self.json_data = data\n\n try:\n self.meta_type = data[\"meta\"]\n self.meta_version = data[\"meta_version\"]\n self.mu_log.log(self.mu_log.DEBUG\n , \"schema is >\" + self.meta_type + \"<, version >\"\n + self.meta_version + \"<\", module)\n except KeyError as e:\n self.mu_log.log(self.mu_log.DEBUG,\n \"Key error. meta and meta_version must be in JSON file. That is not the case with \"\n + self.json_file, module)\n return messages.message[\"meta_error\"]\n except jsonschema.exceptions.SchemaError as e:\n self.mu_log.log(self.mu_log.FATAL, \"Schema error: \" + e.message, module)\n return messages.message[\"json_schema_error\"]\n except jsonschema.exceptions.ValidationError as e:\n self.mu_log.log(self.mu_log.FATAL, \"Validation error: \" + e.message, module)\n return messages.message[\"json_validation_error\"]\n except json.decoder.JSONDecodeError as e:\n self.mu_log.log(self.mu_log.FATAL, \"Error parsing JSON:\" + e.msg, module)\n return messages.message[\"json_parse_error\"]\n\n if self.meta_version == generic_settings.GenericSettings().meta_version:\n self.mu_log.log(self.mu_log.INFO, \"file meta version matches expected schema version\", module)\n schema_directory = generic_settings.GenericSettings().schema_directory\n self.schema_file = schema_directory + self.meta_type + \".json\"\n with open(self.schema_file) as f:\n schema = json.load(f)\n try:\n jsonschema.validate(data, schema)\n self.mu_log.log(self.mu_log.INFO, \"JSON file validated successfully against schema\", module)\n except jsonschema.exceptions.SchemaError as e:\n self.mu_log.log(self.mu_log.FATAL, \"A schema error occurred during validation\", module)\n return messages.message[\"jsonschema_validation_error\"]\n except jsonschema.exceptions.ValidationError as e:\n self.mu_log.log(self.mu_log.ERROR, \"A validation error occurred\", module)\n return messages.message[\"jsonschema_validation_error\"]\n else:\n self.mu_log.log(self.mu_log.DEBUG, \"File meta version does not match expected schema version\", module)\n return messages.message[\"incorrect_meta_version\"]\n\n return messages.message[\"ok\"]", "title": "" }, { "docid": "b7f29ceec2661014054d4c79ce05701d", "score": "0.51249635", "text": "def copy_9_schema_data() -> tuple[JSON, BytesInstance]:\n copy_9_schema_1 = {\n \"title\": \"DETAIL-LINE\",\n \"$anchor\": \"DETAIL-LINE\",\n \"cobol\": \"01 DETAIL-LINE\",\n \"type\": \"object\",\n \"properties\": {\n \"QUESTION\": {\n \"title\": \"QUESTION\",\n \"$anchor\": \"QUESTION\",\n \"cobol\": \"05 QUESTION PIC ZZ\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"PRINT-YES\": {\n \"title\": \"PRINT-YES\",\n \"$anchor\": \"PRINT-YES\",\n \"cobol\": \"05 PRINT-YES PIC ZZ\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"PRINT-NO\": {\n \"title\": \"PRINT-NO\",\n \"$anchor\": \"PRINT-NO\",\n \"cobol\": \"05 PRINT-NO PIC ZZ\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"NOT-SURE\": {\n \"title\": \"NOT-SURE\",\n \"$anchor\": \"NOT-SURE\",\n \"cobol\": \"05 NOT-SURE PIC ZZ\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n }\n\n copy_9_schema_2 = {\n \"title\": \"SUMMARY-LINE\",\n \"$anchor\": \"SUMMARY-LINE\",\n \"cobol\": \"01 SUMMARY-LINE REDEFINES DETAIL-LINE\",\n \"type\": \"object\",\n \"properties\": {\n \"COUNT\": {\n \"title\": \"COUNT\",\n \"$anchor\": \"COUNT\",\n \"cobol\": \"05 COUNT PIC ZZ\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"FILLER-1\": {\n \"title\": \"FILLER\",\n \"$anchor\": \"FILLER-1\",\n \"cobol\": \"05 FILLER PIC XX\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"FILLER-2\": {\n \"title\": \"FILLER\",\n \"$anchor\": \"FILLER-2\",\n \"cobol\": \"05 FILLER PIC XX\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"FILLER-3\": {\n \"title\": \"FILLER\",\n \"$anchor\": \"FILLER-3\",\n \"cobol\": \"05 FILLER PIC XX\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n }\n\n assert SchemaValidator.check_schema(copy_9_schema_1) is None\n assert SchemaValidator.check_schema(copy_9_schema_2) is None\n\n copy_9_data = BytesInstance(\"01020304\".encode(\"CP037\"))\n return [copy_9_schema_1, copy_9_schema_2], copy_9_data", "title": "" }, { "docid": "ee5511d6983bad620cf86e4d156165a7", "score": "0.5094555", "text": "def test_AttType(self):\n bModel_dict = self.bModel.to_dict()\n self.assertEqual(self.bModel.__class__.__name__, \"BaseModel\")\n self.assertIsInstance(bModel_dict['created_at'], str)\n self.assertIsInstance(bModel_dict['updated_at'], str)", "title": "" }, { "docid": "b138a83698378d9b10a30abfae4640f0", "score": "0.5094188", "text": "def test_get_json(json_bytes_data: TypingJsonBytesData):\n assert json_bytes_data[\"json_bytes\"].get_json()", "title": "" }, { "docid": "eb973c24d0d105f3173e581edef7481c", "score": "0.50940955", "text": "def test_schema_maker_5(capsys):\n schema_json_5 = {\n \"title\": \"REDEFINES-RECORD\",\n \"$anchor\": \"REDEFINES-RECORD\",\n \"cobol\": \"01 REDEFINES-RECORD\",\n \"type\": \"object\",\n \"properties\": {\n \"REDEFINES-A\": {\n \"oneOf\": [\n {\n \"title\": \"A\",\n \"$anchor\": \"REDEFINES-A.A\",\n \"type\": \"object\",\n \"properties\": {\n \"A\": {\n \"title\": \"A\",\n \"$anchor\": \"A\",\n \"cobol\": \"05 A PICTURE X(6)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n },\n {\n \"title\": \"B\",\n \"$anchor\": \"REDEFINES-A.B\",\n \"type\": \"object\",\n \"properties\": {\n \"B\": {\n \"title\": \"B\",\n \"$anchor\": \"B\",\n \"cobol\": \"05 B REDEFINES A\",\n \"type\": \"object\",\n \"properties\": {\n \"B-1\": {\n \"title\": \"B-1\",\n \"$anchor\": \"B-1\",\n \"cobol\": \"10 B-1 PICTURE X(2)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"B-2\": {\n \"title\": \"B-2\",\n \"$anchor\": \"B-2\",\n \"cobol\": \"10 B-2 PICTURE 9(4)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n }\n }\n }\n ],\n \"$anchor\": \"REDEFINES-A\"\n },\n \"A\": {\n \"title\": \"A\",\n \"$anchor\": \"A\",\n \"cobol\": \"05 A PICTURE X(6)\",\n \"$ref\": \"#REDEFINES-A.A\"\n },\n \"B\": {\n \"title\": \"B\",\n \"$anchor\": \"B\",\n \"cobol\": \"05 B REDEFINES A\",\n \"$ref\": \"#REDEFINES-A.B\"\n },\n \"C\": {\n \"title\": \"C\",\n \"$anchor\": \"C\",\n \"cobol\": \"05 C PICTURE 99V99\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\",\n \"conversion\": \"decimal\"\n }\n }\n }\n SchemaValidator.check_schema(schema_json_5)\n s = SchemaMaker.from_json(schema_json_5)\n s.print()\n out, err = capsys.readouterr()\n assert out.splitlines() == [\n \"ObjectSchema({'title': 'REDEFINES-RECORD', '$anchor': 'REDEFINES-RECORD', 'cobol': '01 REDEFINES-RECORD', 'type': 'object'})\",\n \" OneOfSchema({'$anchor': 'REDEFINES-A'})\",\n \" ObjectSchema({'title': 'A', '$anchor': 'REDEFINES-A.A', 'type': 'object'})\",\n \" AtomicSchema({'title': 'A', '$anchor': 'A', 'cobol': '05 A PICTURE X(6)', 'type': 'string', 'contentEncoding': 'cp037'})\",\n \" ObjectSchema({'title': 'B', '$anchor': 'REDEFINES-A.B', 'type': 'object'})\",\n \" ObjectSchema({'title': 'B', '$anchor': 'B', 'cobol': '05 B REDEFINES A', 'type': 'object'})\",\n \" AtomicSchema({'title': 'B-1', '$anchor': 'B-1', 'cobol': '10 B-1 PICTURE X(2)', 'type': 'string', 'contentEncoding': 'cp037'})\",\n \" AtomicSchema({'title': 'B-2', '$anchor': 'B-2', 'cobol': '10 B-2 PICTURE 9(4)', 'type': 'string', 'contentEncoding': 'cp037'})\",\n \" RefToSchema({'title': 'A', '$anchor': 'A', 'cobol': '05 A PICTURE X(6)', '$ref': '#REDEFINES-A.A'})\",\n \" RefToSchema({'title': 'B', '$anchor': 'B', 'cobol': '05 B REDEFINES A', '$ref': '#REDEFINES-A.B'})\",\n \" AtomicSchema({'title': 'C', '$anchor': 'C', 'cobol': '05 C PICTURE 99V99', 'type': 'string', 'contentEncoding': 'cp037', 'conversion': 'decimal'})\",\n ]", "title": "" }, { "docid": "8f422cc8f50a8bfc2d85be5cc54ecd66", "score": "0.50846183", "text": "def test_good_load(self):\n simple_db_path = Path(__file__).parent / \"test_dbs\" / \"simple_db.json\"\n try:\n db = type(self).load_db(simple_db_path)\n except jsonschema.exceptions.ValidationError:\n pytest.fail(\"Unexpected exception jsonschema.exceptions.ValidationError\")\n\n assert config.NAME_KIT_CAT_NVGS in db\n assert \"CUP_NVG_PVS14\" in db[config.NAME_KIT_CAT_NVGS]\n assert \"description\" in db[config.NAME_KIT_CAT_NVGS][\"CUP_NVG_PVS14\"]", "title": "" }, { "docid": "efa13f8d8711daa8c79e7ba454825de7", "score": "0.5083122", "text": "def test_unparseable(self) -> None:\n self.assertThat(\n matches_json(Always()).match(\"not json\"),\n Not(Is(None)),\n )", "title": "" }, { "docid": "151b0b1dafc42f4eb0dfb75e45ced4ef", "score": "0.5079233", "text": "def test_instance_resize_prep_start_valid_json(self):\n expected_obj = self.resize_prep_start_obj\n\n actual_json = json.dumps(self.base_resize_prep_dict)\n actual_obj = InstanceResizePrepStart.deserialize(actual_json, 'json')\n\n self.assertEqual(expected_obj, actual_obj)\n self.assertFalse(actual_obj.is_empty())", "title": "" }, { "docid": "6a09d724cfab442fe5750fcf68c02919", "score": "0.5068002", "text": "def test_desc_serializer_with_valida_data(new_desc):\n result = desc_schema.load(new_desc)\n assert result[\"content\"] == \"Description content\"\n assert result[\"source\"] == \"wikipedia\"", "title": "" }, { "docid": "eb8f6b0829f4cc09a8fa7c2c98e271fe", "score": "0.5064395", "text": "def test_structures_endpoint_data(self):\n # mpf_23 contains no relationships, which shouldn't break anything\n assert \"data\" in self.json_response\n assert len(self.json_response[\"data\"]) == 2\n assert \"included\" in self.json_response\n assert len(self.json_response[\"included\"]) == 1", "title": "" }, { "docid": "a000a3b8985acc8f4324cd7d6b8cc4ee", "score": "0.5061462", "text": "def validate(self, json_data):\n pass", "title": "" }, { "docid": "341d2535a5d05f8bfb55457ceae1064d", "score": "0.50600654", "text": "def _verify(self):\r\n if not self._verified:\r\n raise AssertionError(\r\n 'jsonpickle requires at least one of the following:\\n'\r\n ' cjson, json (new in python2.6), simplejson, demjson'\r\n )", "title": "" }, { "docid": "d4a5743355063c75f99bfe03be69236b", "score": "0.5053852", "text": "def test_unserializable(self):\n try:\n _json_handler(1)\n success = False\n except TypeError:\n # An int can not be serialized, so, this error is the\n # success.\n success = True\n self.assertTrue(success)", "title": "" }, { "docid": "54f99c34d01f028776265de88907d477", "score": "0.505098", "text": "def copy_6_schema_data() -> tuple[JSON, BytesInstance]:\n copy_6_schema = {\n \"title\": \"REDEFINES-RECORD\",\n \"$anchor\": \"REDEFINES-RECORD\",\n \"cobol\": \"01 REDEFINES-RECORD\",\n \"type\": \"object\",\n \"properties\": {\n \"REDEFINES-NAME-2\": {\n \"oneOf\": [\n {\n \"title\": \"NAME-2\",\n \"$anchor\": \"NAME-2\",\n \"cobol\": \"05 NAME-2\",\n \"type\": \"object\",\n \"properties\": {\n \"SALARY\": {\n \"title\": \"SALARY\",\n \"$anchor\": \"SALARY\",\n \"cobol\": \"10 SALARY PICTURE XXX\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"SO-SEC-NO\": {\n \"title\": \"SO-SEC-NO\",\n \"$anchor\": \"SO-SEC-NO\",\n \"cobol\": \"10 SO-SEC-NO PICTURE X(9)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"MONTH\": {\n \"title\": \"MONTH\",\n \"$anchor\": \"MONTH\",\n \"cobol\": \"10 MONTH PICTURE XX\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n },\n \"anchor\": \"REDEFINES-NAME-2.NAME-2\"\n },\n {\n \"title\": \"NAME-1\",\n \"$anchor\": \"NAME-1\",\n \"cobol\": \"05 NAME-1 REDEFINES NAME-2\",\n \"type\": \"object\",\n \"properties\": {\n \"WAGE\": {\n \"title\": \"WAGE\",\n \"$anchor\": \"WAGE\",\n \"cobol\": \"10 WAGE PICTURE 999V999\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\",\n \"conversion\": \"decimal\"\n },\n \"EMP-NO\": {\n \"title\": \"EMP-NO\",\n \"$anchor\": \"EMP-NO\",\n \"cobol\": \"10 EMP-NO PICTURE X(6)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"YEAR\": {\n \"title\": \"YEAR\",\n \"$anchor\": \"YEAR\",\n \"cobol\": \"10 YEAR PICTURE XX\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n },\n \"anchor\": \"REDEFINES-NAME-2.NAME-1\"\n }\n ],\n \"$anchor\": \"REDEFINES-NAME-2\"\n },\n \"NAME-2\": {\n \"title\": \"NAME-2\",\n \"cobol\": \"05 NAME-2\",\n \"$ref\": \"#NAME-2\"\n },\n \"NAME-1\": {\n \"title\": \"NAME-1\",\n \"cobol\": \"05 NAME-1 REDEFINES NAME-2\",\n \"$ref\": \"#NAME-1\"\n }\n }\n }\n\n assert SchemaValidator.check_schema(copy_6_schema) is None\n\n copy_6_data = BytesInstance(\"ABC123456789DE\".encode(\"CP037\"))\n return copy_6_schema, copy_6_data", "title": "" }, { "docid": "6a3d63b33a98d4e1331386600b1392b6", "score": "0.5046336", "text": "def assertJSONEqual(self, a, b):\n self.assertDictEqual(json.loads(a),json.loads(b))", "title": "" }, { "docid": "b93760539e3f788f4b1ea1503adeb92e", "score": "0.5043583", "text": "def copy_13_schema_data() -> tuple[JSON, BytesInstance]:\n copy_13_schema_1 = {\n \"title\": \"GENERIC-RECORD\",\n \"$anchor\": \"GENERIC-RECORD\",\n \"cobol\": \"01 GENERIC-RECORD\",\n \"type\": \"object\",\n \"properties\": {\n \"HEADER\": {\n \"title\": \"HEADER\",\n \"$anchor\": \"HEADER\",\n \"cobol\": \"05 HEADER PIC X(3)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"GENERIC-FIELD\": {\n \"title\": \"GENERIC-FIELD\",\n \"$anchor\": \"GENERIC-FIELD\",\n \"cobol\": \"05 GENERIC-FIELD PIC X(17)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n }\n\n copy_13_schema_2 = {\n \"title\": \"ABC-SPECIFIC-RECORD\",\n \"$anchor\": \"ABC-SPECIFIC-RECORD\",\n \"cobol\": \"01 ABC-SPECIFIC-RECORD\",\n \"type\": \"object\",\n \"properties\": {\n \"ITEM-1\": {\n \"title\": \"ITEM-1\",\n \"$anchor\": \"ITEM-1\",\n \"cobol\": \"05 ITEM-1 PIC X(10)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"ITEM-2\": {\n \"title\": \"ITEM-2\",\n \"$anchor\": \"ITEM-2\",\n \"cobol\": \"05 ITEM-2 PIC X(7)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n }\n\n copy_13_schema_3 = {\n \"title\": \"DEF-ANOTHER-RECORD\",\n \"$anchor\": \"DEF-ANOTHER-RECORD\",\n \"cobol\": \"01 DEF-ANOTHER-RECORD\",\n \"type\": \"object\",\n \"properties\": {\n \"ITEM-3\": {\n \"title\": \"ITEM-3\",\n \"$anchor\": \"ITEM-3\",\n \"cobol\": \"05 ITEM-3 PIC X(7)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"ITEM-4\": {\n \"title\": \"ITEM-4\",\n \"$anchor\": \"ITEM-4\",\n \"cobol\": \"05 ITEM-4 PIC X(10)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n }\n\n assert SchemaValidator.check_schema(copy_13_schema_1) is None\n assert SchemaValidator.check_schema(copy_13_schema_2) is None\n assert SchemaValidator.check_schema(copy_13_schema_3) is None\n\n copy_13_data_1 = BytesInstance(\"ABC0123456789TUVWXYZDEFG\".encode(\"CP037\"))\n copy_13_data_2 = BytesInstance(\"DEF0123456QRSTUVWXYZ\".encode(\"CP037\"))\n return [copy_13_schema_1, copy_13_schema_2, copy_13_schema_3], [copy_13_data_1, copy_13_data_2]", "title": "" }, { "docid": "32e3d24bef4244246cae522215fdf97a", "score": "0.50357985", "text": "def validate_against_schema(self):\n try:\n jsonschema.validate(self.json_data, self.schema)\n return True\n except jsonschema.ValidationError as err:\n self.error_log = \"JSON does not comply with schema: %s\" % err\n return False", "title": "" }, { "docid": "39789fcb3866e1b2f7771bff1466b7ee", "score": "0.50239426", "text": "def is_valid_json(self):\n content = (\n self.body\n if self.patches is None\n else \"\".join(str(patch) for patch in self.patches)\n )\n try:\n json.loads(content)\n return True\n except ValueError:\n return False", "title": "" }, { "docid": "8e000357c7668d2b9e13743d19053214", "score": "0.50196415", "text": "def validate_json(file):\n max_file_size = current_app.config.get(\n 'PREVIEWER_MAX_FILE_SIZE_BYTES', 1 * 1024 * 1024)\n if file.size > max_file_size:\n return False\n\n with file.open() as fp:\n try:\n json.loads(fp.read().decode('utf-8'))\n return True\n except Exception:\n return False", "title": "" }, { "docid": "165215b6020a256eb3d14bc76eca4ef7", "score": "0.50186133", "text": "def test_biom_to_classic2(self):\r\n self.cmd(table=self.biom_table1,\r\n output_filepath=self.output_filepath, to_tsv=True,\r\n header_key='taxonomy', output_metadata_id='foo')\r\n obs = load_table(self.output_filepath)\r\n self.assertTrue('foo' in obs.metadata(axis='observation')[0])", "title": "" }, { "docid": "fa21080d60ce1dacd27e9ee02ca7c85a", "score": "0.50157845", "text": "def test_WbTabularData_WbRepresentation_methods(self):\n q = pywikibot.WbTabularData(self.page)\n self._test_hashable(q)", "title": "" }, { "docid": "594585df14cf4d7eeb4c793a49135183", "score": "0.500506", "text": "def copy_5_schema_data() -> tuple[JSON, BytesInstance]:\n copy_5_schema = {\n \"title\": \"REDEFINES-RECORD\",\n \"$anchor\": \"REDEFINES-RECORD\",\n \"cobol\": \"01 REDEFINES-RECORD\",\n \"type\": \"object\",\n \"properties\": {\n \"REDEFINES-A\": {\n \"oneOf\": [\n {\n \"title\": \"A\",\n \"$anchor\": \"A\",\n \"cobol\": \"05 A PICTURE X(6)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n {\n \"title\": \"B\",\n \"$anchor\": \"B\",\n \"cobol\": \"05 B REDEFINES A\",\n \"type\": \"object\",\n \"properties\": {\n \"B-1\": {\n \"title\": \"B-1\",\n \"$anchor\": \"B-1\",\n \"cobol\": \"10 B-1 PICTURE X(2)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n },\n \"B-2\": {\n \"title\": \"B-2\",\n \"$anchor\": \"B-2\",\n \"cobol\": \"10 B-2 PICTURE 9(4)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n }\n ],\n \"$anchor\": \"REDEFINES-A\"\n },\n \"A\": {\n \"title\": \"A\",\n \"cobol\": \"05 A PICTURE X(6)\",\n \"$ref\": \"#A\"\n },\n \"B\": {\n \"title\": \"B\",\n \"cobol\": \"05 B REDEFINES A\",\n \"$ref\": \"#B\"\n },\n \"C\": {\n \"title\": \"C\",\n \"$anchor\": \"C\",\n \"cobol\": \"05 C PICTURE 99V99\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\",\n \"conversion\": \"decimal\"\n }\n }\n }\n\n assert SchemaValidator.check_schema(copy_5_schema) is None\n\n copy_5_data = BytesInstance(\"AB12345678\".encode(\"CP037\"))\n return copy_5_schema, copy_5_data", "title": "" }, { "docid": "78e0cf27b6eed67aeccd8e593a907f45", "score": "0.5003732", "text": "def assertJsonSuccess(data):\n obj = json.loads(data)\n if 'status' in obj and obj['status'] == \"error\":\n print(\"Error: JSON object returns an error. \" + str(obj))\n return False\n else:\n return True", "title": "" }, { "docid": "a5cb9ff24ab9edf030dd1075b5274913", "score": "0.49955502", "text": "def test_deserialise_python_builtins(self):\n data = dict(a=123, b=\"text\")\n jdata = json.dumps(data)\n got = deserialise_object(jdata)\n self.assertEqual(got, data)", "title": "" }, { "docid": "080671c8645dd31b3cec0ce799180193", "score": "0.4991183", "text": "def assert_serialization_works(self, snapshot):\n roundtrip = replication.proto_to_auth_db_snapshot(\n replication.auth_db_snapshot_to_proto(snapshot))\n self.assertEqual(snapshot_to_dict(snapshot), snapshot_to_dict(roundtrip))", "title": "" }, { "docid": "eb8f8813928bae6aaa874237a3dcf84e", "score": "0.49896422", "text": "def test_export_game(self):\n connect_str = \"sqlite:///:memory:\"\n engine = create_engine(connect_str)\n self.load_fixture(engine, \"tv-themes-v4.sql\")\n DatabaseConnection.bind(self.options.database, create_tables=False,\n engine=engine, debug=False)\n with models.db.session_scope() as dbs:\n for song in models.Song.all(dbs):\n song.check_for_uuid()\n result = models.export_game_to_object(\"20-04-24-2\", dbs)\n result = utils.flatten(result)\n validate_json(JsonSchema.GAME_TRACKS_V4, result)\n if self.EXPECTED_OUTPUT is not None:\n destination = self.EXPECTED_OUTPUT / 'exported-game-v4.json'\n with open(destination, 'wt', encoding='utf-8') as dst:\n json.dump(result, dst, indent=2, sort_keys=True)\n json_filename = fixture_filename(\"exported-game-v4.json\")\n with json_filename.open('rt', encoding='utf-8') as src:\n expected = json.load(src)\n for table in expected.keys():\n # print(f'Check {table}')\n self.assertModelListEqual(result[table], expected[table], table)", "title": "" }, { "docid": "3b53185849857f551bf98d7b8e56647f", "score": "0.49890664", "text": "def test_basic_json(self):\n options = {\n 'schema': {\n 'name': 'string',\n 'age': 'integer'\n }\n }\n\n data = {\n 'name': 'john',\n 'age': 30\n }\n\n record_data = json.dumps(data)\n\n # get parsed data\n parser = JSONParser(options)\n assert_equal(parser.parse(record_data), True)\n\n expected_result = [\n data\n ]\n\n assert_equal(parser.parsed_records, expected_result)", "title": "" }, { "docid": "e4a409a14caa243e9599662c2b99aa89", "score": "0.49819562", "text": "def test_WbTabularData_error_on_non_exitant_page(self):\n page = Page(self.commons, 'Non-existant page... really')\n regex = r'^Page \\[\\[.+?\\]\\] must exist\\.$'\n with self.assertRaisesRegex(ValueError, regex):\n pywikibot.WbTabularData(page, self.get_repo())", "title": "" }, { "docid": "9525818e6b1615a0c3de9ddf67e11a45", "score": "0.49761963", "text": "def CheckRequiredFormat(self, json_dict):\n operation_name = json_dict.get('operationName') or None\n properties = json_dict.get('properties') or None\n timestamp = json_dict.get('timeStamp') or None\n\n if (None in (operation_name, properties, timestamp) or\n operation_name != 'ApplicationGatewayAccess'):\n return False\n\n try:\n date_time = dfdatetime_time_elements.TimeElementsInMicroseconds()\n date_time.CopyFromStringISO8601(timestamp)\n except ValueError:\n return False\n\n return True", "title": "" }, { "docid": "92bedd89eed7f664c31911a40e43b624", "score": "0.4964713", "text": "def test_json_teams_format(self):\n miner.copy_miner_file()\n json_team = miner.get_json_teams(1)[0]\n self.assertEqual(len(json_team), 6)\n pokemon_dict = json_team[0]\n self.assertEqual(set(['ability', 'evs', 'item', 'ivs', 'level', 'moves', 'name', 'shiny',\n 'species']),\n set(pokemon_dict.keys()))\n self.assertTrue(set(['atk', 'def', 'hp', 'spa', 'spd', 'spe']) ==\n set(pokemon_dict['evs'].keys()) ==\n set(pokemon_dict['ivs'].keys()))\n\n self.assertEqual(len(pokemon_dict['moves']), 1 if pokemon_dict['name'] == 'Ditto' else 4)", "title": "" }, { "docid": "0e4cb6df699dc193a385e91f94e4945c", "score": "0.4964026", "text": "def test_json(self):\n processor = ProcessToRenderInfo(\n HtmlDocumentManager('test_json'),\n ProcessedEntityManager())\n processor.max_uncollapsable_json_lines = 20\n processor.max_uncollapsable_entity_rows = 20\n\n # Numeric literals wont be treated as json.\n for n in [-1, 0, 1, 3.14]:\n info = processor.process_json_html_if_possible(n)\n self.assertEquals('{0}'.format(n), info.detail_html)\n self.assertEquals(None, info.summary_html)\n\n # None of these strings are well-defined JSON documents\n # so should just be strings.\n for s in ['test', 'a phrase', 'True']:\n info = processor.process_json_html_if_possible(s)\n self.assertEquals('\"{0}\"'.format(s), info.detail_html)\n self.assertEquals(None, info.summary_html)\n\n # Boolean values wont be considered JSON.\n for b in [True, False]:\n info = processor.process_json_html_if_possible(b)\n self.assertEquals('{0}'.format(str(b)), info.detail_html)\n self.assertEquals(None, info.summary_html)\n\n # Dictionaries and JSON dictionary strings normalize to JSON.\n for d in [{'A': 'a', 'B': True}, '{\"A\":\"a\", \"B\":true}']:\n info = processor.process_json_html_if_possible(d)\n self.assertEquals('<pre>{{\\n \"A\": \"a\",\\n'\n ' \"B\": true\\n'\n '}}</pre>'.format(), info.detail_html)\n self.assertEquals(None, info.summary_html)\n self.assertEquals(None, info.summary_html)\n\n # Lists and JSON lists strings normalize to JSON.\n for l in [[123, 'abc', True, {'A': 'a', 'B': 'b'}],\n '[123, \"abc\", true, {\"A\":\"a\", \"B\":\"b\"}]']:\n info = processor.process_json_html_if_possible(l)\n self.assertEquals('<pre>[\\n 123,\\n \"abc\",\\n true,\\n'\n ' {{\\n'\n ' \"A\": \"a\",\\n'\n ' \"B\": \"b\"\\n'\n ' }}\\n'\n ']</pre>'.format(), info.detail_html)\n self.assertEquals(None, info.summary_html)", "title": "" }, { "docid": "2fafd82345d9bbcd2a55dd4ea7b3b48b", "score": "0.49624068", "text": "def test___bytes__(json_bytes_data: TypingJsonBytesData):\n assert bytes(json_bytes_data[\"json_bytes\"]) == json_bytes_data[\"bytes\"]", "title": "" }, { "docid": "1e31b0227bb6a91a545725159ce2031b", "score": "0.49622917", "text": "def test_Place_is_dict(self):\n\n st = FileStorage()\n st_dict = st.all()\n self.assertEqual(dict, type(st_dict))", "title": "" }, { "docid": "9957b5a603c6abe649505980c3dd938f", "score": "0.4946569", "text": "def test_dump_all_formats(format_key, expected):\n data = [\n {\n \"color\":\"#ffffff\", \"name\": \"white\"\n },\n {\n \"color\":\"#000000\", \"name\": \"black\"\n },\n ]\n output = build_dump(format_key, data)\n\n assert output[\"content\"] == expected", "title": "" }, { "docid": "e0348ec7650d86fb15112724c1624867", "score": "0.4946224", "text": "def testReadWriteJsonFile(self):\n try:\n rObj = self.__ioU.deserialize(self.__pathJsonTestFile, fmt=\"json\")\n logger.debug(\"Object length %d\", len(rObj))\n self.assertGreaterEqual(len(rObj), 1)\n ok = self.__ioU.serialize(self.__pathSaveJsonTestFile, rObj, fmt=\"json\")\n self.assertTrue(ok)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "title": "" }, { "docid": "304497723bd10badc7eddfed67864cce", "score": "0.4939063", "text": "def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty], Base.from_json_string(json.dumps([dicty])))", "title": "" }, { "docid": "d30994a523aefdc93cd4d6c73be16cb2", "score": "0.49368024", "text": "def export_test(self, schema_version):\n connect_str = \"sqlite:///:memory:\"\n engine = create_engine(connect_str) # , echo=True)\n self.load_fixture(engine, f\"tv-themes-v{schema_version}.sql\")\n\n DatabaseConnection.bind(self.options.database, create_tables=False,\n engine=engine, debug=False)\n output = io.StringIO()\n # pylint: disable=no-value-for-parameter\n with models.db.session_scope() as dbs:\n models.export_database_to_file(output, self.options, Progress(), dbs, None)\n output.seek(0)\n actual_json = json.load(output)\n if self.EXPECTED_OUTPUT is not None:\n destination = self.EXPECTED_OUTPUT / f\"exported-tv-themes-v{schema_version}.json\"\n with open(destination, 'wt', encoding='utf-8') as dbg:\n dbg.write(output.getvalue())\n validate_json(JsonSchema.DATABASE, actual_json)\n json_filename = fixture_filename(f\"exported-tv-themes-v{schema_version}.json\")\n with json_filename.open('rt', encoding='utf-8') as src:\n expected_json = json.load(src)\n self.assertEqual(expected_json['Users'][0]['username'], 'admin')\n expected_json['Users'][0]['last_login'] = None\n # self.maxDiff = None\n for table in expected_json.keys():\n # print(f'Check {table}')\n if isinstance(actual_json[table], dict):\n self.assertDictEqual(actual_json[table], expected_json[table], table)\n else:\n self.assertModelListEqual(actual_json[table], expected_json[table], table)", "title": "" }, { "docid": "353d25f2abc322e940c9e521e926ef78", "score": "0.4936759", "text": "def testDumpData(self):\r\n self.assertCommandSucceeds(\"dumpdata\")", "title": "" }, { "docid": "8d898c45ae770a76f03a979835ca91e7", "score": "0.49342582", "text": "def checkSchema(self):\n\n\t\traise NotImplementedError()", "title": "" }, { "docid": "d32acb33bcb6cf97eeb846abc3a97704", "score": "0.49338663", "text": "def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty, dicty],\n Base.from_json_string(json.dumps([dicty, dicty])))", "title": "" }, { "docid": "6b723a3847dac70b847b465041ec67c0", "score": "0.4933373", "text": "def is_tsv(to_test):\n if isinstance(to_test, bytes):\n as_str = to_test.decode('utf-8')\n as_list = as_str.splitlines()\n else:\n as_str = str(to_test)\n as_list = as_str.split('\\n')\n if len(as_list) > 1:\n if len(as_str.split('\\t')) > 1:\n return True\n return False", "title": "" }, { "docid": "4ab063f47e00dc7e2801dd8db146b7f4", "score": "0.49322158", "text": "def copy_10_schema_data() -> tuple[JSON, BytesInstance]:\n copy_10_schema = {\n \"title\": \"MAIN-AREA\",\n \"$anchor\": \"MAIN-AREA\",\n \"cobol\": \"01 MAIN-AREA\",\n \"type\": \"object\",\n \"properties\": {\n \"REC-1\": {\n \"title\": \"REC-1\",\n \"$anchor\": \"REC-1\",\n \"cobol\": \"03 REC-1\",\n \"type\": \"object\",\n \"properties\": {\n \"FIELD-1\": {\n \"title\": \"FIELD-1\",\n \"$anchor\": \"FIELD-1\",\n \"cobol\": \"05 FIELD-1 PIC 9\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\",\n \"conversion\": \"decimal\"\n },\n \"FIELD-2\": {\n \"title\": \"FIELD-2\",\n \"$anchor\": \"FIELD-2\",\n \"cobol\": \"05 FIELD-2 OCCURS 1 TO 5 TIMES\\n DEPENDING ON FIELD-1 PIC X(05)\",\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"FIELD-2\": {\n \"cobol\": \"05 FIELD-2 OCCURS 1 TO 5 TIMES\\n DEPENDING ON FIELD-1 PIC X(05)\",\n \"type\": \"string\",\n \"contentEncoding\": \"cp037\"\n }\n }\n },\n \"maxItemsDependsOn\": {\n \"$ref\": \"#FIELD-1\"\n }\n }\n }\n }\n }\n }\n\n assert SchemaValidator.check_schema(copy_10_schema) is None\n\n copy_10_data = BytesInstance(\"3111112222233333\".encode(\"CP037\"))\n return copy_10_schema, copy_10_data", "title": "" }, { "docid": "e000f97f33621b58909e0dfb19bd6281", "score": "0.4932113", "text": "def test_dont_mix_schema_and_object_signature(self):\n url = \"pg://localhost:5432/tuttle_test_db/test_schema\"\n res = PostgreSQLResource(url)\n assert not res.exists(), \"{} should not exist because no table, view nor any object with that name \" \\\n \"exists\".format(url)", "title": "" }, { "docid": "4d838408b6780ed3bead70cd3212caf8", "score": "0.4931609", "text": "def test_table_fits_io_astropy(table):\n # Check Table -> BinTableHDU\n hdu = fits.BinTableHDU(table)\n assert hdu.header[\"TTYPE2\"] == \"b\"\n assert hdu.header[\"TFORM2\"] == \"K\"\n assert hdu.header[\"TUNIT2\"] == \"m\"\n\n # Check BinTableHDU -> Table\n table2 = Table.read(hdu)\n assert isinstance(table2.meta, dict)\n assert table2.meta == {\"VERSION\": 42}\n assert table2[\"b\"].unit == \"m\"\n # Note: description doesn't come back in older versions of Astropy\n # that we still support, so we're not asserting on that here for now.", "title": "" }, { "docid": "e317c705efb14fd26dca1d8394a5e2db", "score": "0.49231386", "text": "def test_metadata(list_json_output):\n\n listing = list_json_output('valid-json', metadata=True)\n for c in listing:\n if 'meta' in c:\n assert c['meta'] == True\n assert c['title'] == 'NPC Listing'\n assert 'created' in c", "title": "" }, { "docid": "6144d395c5711ac22a4c29dcd2685984", "score": "0.4922923", "text": "def assert_valid_json(data: str, schema_id: str) -> None: # noqa: E501 (line too long)\n assert_valid(json.loads(data), schema_id)", "title": "" }, { "docid": "5608356d400fe8445df78527c6d36ea7", "score": "0.4917935", "text": "def test_invalid_json_path(self):\n options = {\n 'schema': {\n 'name': 'string',\n 'result': 'string'\n },\n 'configuration': {\n 'json_path': 'Records[*]'\n }\n }\n\n record_data = {'name': 'test', 'result': 'test'}\n\n # get parsed data\n parser = JSONParser(options)\n assert_equal(parser.parse(record_data), False)\n\n expected_result = [\n record_data\n ]\n assert_equal(parser.invalid_parses, expected_result)", "title": "" }, { "docid": "1c69f818cb6b1e5c3514060050b6d385", "score": "0.49155313", "text": "def schema_check(table1, table2):\n\n # compares the first row of each table to ensure the titles and number of columns match\n if table1[0] != table2[0]:\n raise MismatchedAttributesException", "title": "" } ]
890b2bfc4b102965155938e2ca874bb1
Sets the residence of this Criminal.
[ { "docid": "ef8fd305f836631f8748820b1c0b4118", "score": "0.8034765", "text": "def residence(self, residence):\n\n self._residence = residence", "title": "" } ]
[ { "docid": "43fd1c6070ff4412d49071a8e6ed45bb", "score": "0.6250316", "text": "def residence(self):\n return self._residence", "title": "" }, { "docid": "44706b55c15938a2eec0f688e775491e", "score": "0.57946104", "text": "def set_evidence(self, evidence):\n self.evidence = evidence\n\n # we do not have to sample evidence nodes\n self.nodes_minus_e = [\n node for node in self.nodes if node not in evidence\n ]\n\n # parents of the evidence nodes\n self.evidence_parents = []\n for e in self.evidence:\n for node in self.graph[e]:\n self.evidence_parents.append(node)\n\n if self.rep == \"CPT\":\n self._set_icpt()\n elif self.rep == \"Noisy-OR\":\n self._set_causality_strength()\n else:\n raise ValueError(\"Unknown option.\")", "title": "" }, { "docid": "0a9d8ee1b30bbf42b69efac30fcfd66f", "score": "0.55060416", "text": "def discipline(self, discipline):\n\n self._discipline = discipline", "title": "" }, { "docid": "0a9d8ee1b30bbf42b69efac30fcfd66f", "score": "0.55060416", "text": "def discipline(self, discipline):\n\n self._discipline = discipline", "title": "" }, { "docid": "df0a9dc452cee5c857362d2781f3d6d6", "score": "0.5466117", "text": "def SetEvidence(self, evidence):\n self._evidence = evidence", "title": "" }, { "docid": "817fbcd65ddd359ee08081e848bc7ba5", "score": "0.5239293", "text": "def r_ide(self, r_ide):\n\n self._r_ide = r_ide", "title": "" }, { "docid": "5f6cb7e436229e774bcfd5e47dc99bf1", "score": "0.50677645", "text": "def set_corrector(self, x=-2):\n self.corrector = x", "title": "" }, { "docid": "01f3cda2060a06e5109c599b4f997d5f", "score": "0.5040256", "text": "def SetConstraintRHS(self, r):\n return _constraints.ConstrainedSolver_SetConstraintRHS(self, r)", "title": "" }, { "docid": "b836ff4738c8ebb176f855a109d8b586", "score": "0.49993312", "text": "def resiliency_details(self, resiliency_details):\n\n self._resiliency_details = resiliency_details", "title": "" }, { "docid": "018f1920b2a9ffc3039b94f5a0dd031e", "score": "0.49891087", "text": "def Re(self, Re):\n raise ValueError('Cannot directly set Reynolds number as an '\n 'attribute')", "title": "" }, { "docid": "b487e89701ef192b2f5f75d6e10b5d2e", "score": "0.49070406", "text": "def set_rc(self):\n self.set_gene_is_rc_from_descr()\n if self.descr[-1] == '-':\n self.rc = True\n elif self.descr[-1] == '+':\n self.rc = False\n else:\n raise ValueError(\"State could not be detected.\")", "title": "" }, { "docid": "84ef5a325b09ff7f97a8ad4241c65ddf", "score": "0.48808113", "text": "def crr(self, crr):\n\n self._crr = crr", "title": "" }, { "docid": "56a3f5237f05746d6e4f508bde465b8e", "score": "0.48648283", "text": "def referent(self, referent):\n\n self._referent = referent", "title": "" }, { "docid": "c6d14f152dcdbeb31bf9724c6af04a10", "score": "0.48481292", "text": "def set_resistivity(cell, resistivity, group=\"all\"):\n # type: (Cell, str, str) -> None\n cell.biophysical_properties.intracellular_properties.resistivities = [Resistivity(value=resistivity, segment_groups=group)]", "title": "" }, { "docid": "35e77bcb9c306b7a95f99ec12bc95ee7", "score": "0.48346528", "text": "def setDisc(self, novoDisc):\r\n self.__disc = novoDisc", "title": "" }, { "docid": "6bb4b2ec94eb9165d7e88c2a20238e43", "score": "0.48121318", "text": "def restated_question(self, restated_question):\n\n self._restated_question = restated_question", "title": "" }, { "docid": "48432a819ca216189d62ab1b4e77be85", "score": "0.47964615", "text": "def setRes(self, res):\n self.sRes = (int(res[0]),int(res[1]))", "title": "" }, { "docid": "2237219a9340321b562a486396ca8cf0", "score": "0.47943404", "text": "def car_rental(self, car_rental):\n\n self._car_rental = car_rental", "title": "" }, { "docid": "37e65a2ccbac6f7c4ef57c59ecfedca9", "score": "0.4760247", "text": "def nvr(self, nvr):\n\n self._nvr = nvr", "title": "" }, { "docid": "b3ca68c4da0b65365c598e2cc54bd116", "score": "0.47552612", "text": "def setActualRAWRC(self, actualRAWRC: int):\n\t\tself._actualRAWRC = actualRAWRC", "title": "" }, { "docid": "fbdbf7500c526743da36dac2aaf2b540", "score": "0.47549906", "text": "def set_resistance(self, resistance):\n # Validate the desired value\n if ~(0 <= resistance <= self.max_resistance):\n return\n # Round to the nearest wipe (multiple of 100 ohms)\n wipe_equivalent = int(round(resistance, -2)) / 100\n # Set the digipot\n self.set_wipe(wipe_equivalent)", "title": "" }, { "docid": "fdc1503a21e4fd0bb3754a96bf37f5c3", "score": "0.4748327", "text": "def _set_is_querier(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"is-querier\", rest_name=\"is-querier\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"is_querier must be of a type compatible with uint8\"\"\",\n 'defined-type': \"uint8\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name=\"is-querier\", rest_name=\"is-querier\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='uint8', is_config=False)\"\"\",\n })\n\n self.__is_querier = t\n if hasattr(self, '_set'):\n self._set()", "title": "" }, { "docid": "e3371754d785b0cd33705d6f9fedb035", "score": "0.4726097", "text": "def kind_of_criminal(self, kind_of_criminal):\n\n self._kind_of_criminal = kind_of_criminal", "title": "" }, { "docid": "f8a42738215a4233e7db1622eed91c98", "score": "0.47248423", "text": "def tax_reciever_id(self, tax_reciever_id):\n\n self._tax_reciever_id = tax_reciever_id", "title": "" }, { "docid": "205863e72e9e7c113164f24616234b99", "score": "0.47195417", "text": "def set_ricci_scalar(self):\n\n self.ricci_scalar = self.compute_ricci_scalar()\n if(self.suppress_printing == False):\n print(\"\")\n print(\"\")\n print(\"Ricci curvature scalar\")\n print(\"======================\")\n self.print_ricci_scalar()", "title": "" }, { "docid": "d8a9a357d9a42ec44a85214c73f6ec69", "score": "0.47082615", "text": "def set_constraint(self, constraint):\r\n self.constraint = constraint", "title": "" }, { "docid": "b8ccb876058dbacae02047a2ce1d007c", "score": "0.47032556", "text": "def init_res(self, rail):\n err = 0\n if rail['rsense'][0] != rail['rsense'][1]:\n gpio = next((item for item in self.board_mapping_gpio_i2c if item['name'] == rail['rsense_ctl']), None)\n init_value = rail['rsense'][2]\n self.setgpio(gpio, init_value * 0xFF)\n while (self.pca6416_get_output(gpio) / gpio['pca6416'][2]) != init_value:\n err += 1\n self.setgpio(gpio, init_value * 0xFF)\n if err == 5:\n print('Failed to init resistance switch ' + gpio['name'] + ' to high level.')\n break\n CURR_RSENSE[rail['name']] = rail['rsense'][0]", "title": "" }, { "docid": "c950259e3243ff18699407db03ac7df2", "score": "0.46935955", "text": "def rid_id(self, rid_id):\n\n self._rid_id = rid_id", "title": "" }, { "docid": "24feface2a8d4e66967028b12dc80a2e", "score": "0.46875778", "text": "def set_corrector(self, x=-2):\n self.table.corrector = x", "title": "" }, { "docid": "24feface2a8d4e66967028b12dc80a2e", "score": "0.46875778", "text": "def set_corrector(self, x=-2):\n self.table.corrector = x", "title": "" }, { "docid": "eb00cd8530ee7bfafef5014ac22306ab", "score": "0.46737856", "text": "def set_was_influenced_by(self, resource):\n self.add(PROV.wasInfluencedBy, resource)\n\n if using_inverse_properties():\n resource.add(PROV.influenced, self)", "title": "" }, { "docid": "69104fde41608c5287b32c7a5686fb06", "score": "0.46691978", "text": "def crv(self, crv: \"str\"):\n self._attrs[\"crv\"] = crv", "title": "" }, { "docid": "6567d781e43bdbf71017797ff42b9bb3", "score": "0.46456328", "text": "def set_dominance(self, dominance):\n self.left_dominant = bool(dominance)\n\n self.dom_center_line = self.Ldom_center_line if self.left_dominant else self.Rdom_center_line\n self.sub_center_line = self.Lsub_center_line if self.left_dominant else self.Rsub_center_line\n self.dom_inner_bound = self.Ldom_inner_bound if self.left_dominant else self.Rdom_inner_bound\n self.sub_inner_bound = self.Lsub_inner_bound if self.left_dominant else self.Rsub_inner_bound\n\n self.min_angle_line = LineString([self.dom_center_line.coords[0], self.get_pos_intersection(self.sub_inner_bound, self.visual_space.boundary).coords[0]])\n self.min_angle = math.degrees(self.get_angle_between(\n self.min_angle_line,\n self.dom_center_line\n ))\n self.sub_min_angle = math.degrees(self.get_angle_between(\n self.sub_inner_bound,\n self.sub_center_line\n ))", "title": "" }, { "docid": "ade5fb6eabe0acceb053434e08ea63a8", "score": "0.4634538", "text": "def set_was_influenced_by(self, resource):\n resource = Resource.ensure_type(resource)\n self.add(PROV.wasInfluencedBy, resource)\n\n if using_inverse_properties():\n resource.add(PROV.influenced, self)", "title": "" }, { "docid": "6142a514c75f6760b3d0db95fe3c9d44", "score": "0.46173596", "text": "def declareDiscretizationSegmentConVariables(self, pyM):\n setattr(pyM, 'discretizationSegmentCon_' + self.abbrvName,\n pyomo.Var(getattr(pyM, 'discretizationSegmentVarSet_' + self.abbrvName), pyM.timeSet, domain=pyomo.NonNegativeReals))", "title": "" }, { "docid": "6663b7c2e6dde92b5eaa5812cfb99326", "score": "0.45879376", "text": "def SetDetectionProperties(self, det_roi):\n pass", "title": "" }, { "docid": "92b12d4b9fadbacc381e783aba5b9968", "score": "0.4580052", "text": "def set_rho(self, rho):\n rho = max(0.0, float(rho))\n self.rho = rho", "title": "" }, { "docid": "7142857b7c1d3b4ce606de7810139400", "score": "0.45655277", "text": "def reign(self, reign):\n\n self._reign = reign", "title": "" }, { "docid": "8fdac93e8ed5ee6137672d36f73867d0", "score": "0.45590976", "text": "def set_relax(self, relaxamounts, timeind, dt):\n rp = self.relax_parameters\n if rp is None:\n return\n relaxamount_s, relaxamount_v = relaxamounts\n hs.relaxation.relax_helper_vhd(rp, relaxamount_s, relaxamount_v, self, timeind, dt)", "title": "" }, { "docid": "80ae77a4c5fde38f7dd04b3ec92efc86", "score": "0.455399", "text": "def set_cvv(self, cvv):\n self.cvv = cvv", "title": "" }, { "docid": "7bca202ca0f5f99975825a465965d48c", "score": "0.4533174", "text": "def _setResource(self, r_type, x, y, amount):\n cell = self.get_cell(x, y)\n cell.resource = Cell.Resource(r_type, amount)", "title": "" }, { "docid": "2ac9a4be728632b81e30a5f46e5a18ac", "score": "0.4527307", "text": "def set_constraint(self, prop):\n query = \"CREATE \" + cypher.constraint_query(\n 'n', self._node_label, prop)\n result = self.execute(query)\n return result", "title": "" }, { "docid": "33d4897eb34a094f6989ce0fa891abaf", "score": "0.45270556", "text": "def SetDetectionProperties(self, det_roi):\n self.send_SetDetectionProperties(det_roi)\n self.recv_SetDetectionProperties()", "title": "" }, { "docid": "bb6bcc037779d3ac0e8102e9751731c5", "score": "0.451965", "text": "def inclination(self, inclination):\n\n self._inclination = inclination", "title": "" }, { "docid": "d588c6b91ae50c31bec2f27d1dece31b", "score": "0.45049813", "text": "def set_race(self, race_id):\n api.set_race(self.profile['id'], race_id)\n self.active_race = race_id", "title": "" }, { "docid": "d83c4637fe198e6fc2d8690b346547fd", "score": "0.4496941", "text": "def conta_corrente(self, conta_corrente):\n self._conta_corrente = conta_corrente", "title": "" }, { "docid": "e8e47936c0bee9f7ed69234c0f7d8c8b", "score": "0.44900003", "text": "def setConstraint(constraint):", "title": "" }, { "docid": "fa9107c4d734237a93235c58b38a5ef4", "score": "0.4484793", "text": "def influenced(self, influenced):\n\n self._influenced = influenced", "title": "" }, { "docid": "b7fd81a362e0df9454d3b4d4935f7ee7", "score": "0.44749215", "text": "def remarks(self, remarks):\n\n self._remarks = remarks", "title": "" }, { "docid": "2142d9e3bef9edd45ce9268517ef4f7e", "score": "0.44628522", "text": "def setRebalance(self,rebalance):\n self.rebalance = rebalance", "title": "" }, { "docid": "f5a937735161698b0ab263eac92b382b", "score": "0.445133", "text": "def circumcised(self, circumcised):\n\n self._circumcised = circumcised", "title": "" }, { "docid": "d22fa3211de2b13f99328034d3acc4b4", "score": "0.44486108", "text": "def set_was_influenced_by(self, resource):\n self.add(PROV.wasInfluencedBy, resource)", "title": "" }, { "docid": "d22fa3211de2b13f99328034d3acc4b4", "score": "0.44486108", "text": "def set_was_influenced_by(self, resource):\n self.add(PROV.wasInfluencedBy, resource)", "title": "" }, { "docid": "ff976c4663193371e8046529c6b6000f", "score": "0.44467577", "text": "def set_reifier (self, reifier):\n if reifier is None:\n reified = None\n else:\n if self.get_topic_map() != reifier.topic_map:\n raise ModelConstraintException(\n self, 'The reifier is not from the same topic map')\n reified = reifier.get_reified()\n if reified is None:\n self.reifier = reifier\n self.save()\n elif reified == self:\n pass\n else:\n raise ModelConstraintException(\n self, 'The reifier already reifies another construct')", "title": "" }, { "docid": "ad076556a30107f03ec3777425001a6a", "score": "0.4414624", "text": "def Set(self, *args):\n return _TDataXtd.TDataXtd_Constraint_Set(self, *args)", "title": "" }, { "docid": "b54ec26ed02b21df9d4d80602cd20cfd", "score": "0.43996385", "text": "def risk(self) -> int:\n return self.__risk", "title": "" }, { "docid": "ab7d1a0f1c067be8822918cc13a9e6dd", "score": "0.4393859", "text": "def cvv(self, cvv):\n\n self._cvv = cvv", "title": "" }, { "docid": "f7307787ebea6799bbda6d95e65221c3", "score": "0.4374353", "text": "def set_resolution(self, c, serial_number, resolution):\n ps = self._get_interface(serial_number)\n ps.setResolution(resolution)", "title": "" }, { "docid": "b575372d7809da6bbe60a62303548d58", "score": "0.43694827", "text": "def SetPred(self, pred):\n self.pred = pred", "title": "" }, { "docid": "ca6270cf9dbc184213b72628fe06c6fe", "score": "0.43674427", "text": "def confidence(self, confidence):\n\n self._confidence = confidence", "title": "" }, { "docid": "ef372a4560dc88c010020bd74ce3aaad", "score": "0.43640766", "text": "def resident_id_number(self):\n return self.__resident_id_number", "title": "" }, { "docid": "a5fdd62a233b4f88c275d7d81eac4588", "score": "0.43603277", "text": "def sexual_orientation(self, sexual_orientation):\n\n self._sexual_orientation = sexual_orientation", "title": "" }, { "docid": "bbf4f9ab6602808c7f24ec7eb7e2657b", "score": "0.43597004", "text": "def resource_subtype(self, resource_subtype):\n\n self._resource_subtype = resource_subtype", "title": "" }, { "docid": "322b08903cc6d4fabfe5e8dc56d094a5", "score": "0.43554628", "text": "def setNoeud(self, A):\n self.racine = A", "title": "" }, { "docid": "97f8aa196c692aa2a7c012464093e792", "score": "0.43508938", "text": "def set_roidb(self, roidb):\n self._roidb = roidb\n self._shuffle_roidb_inds()", "title": "" }, { "docid": "fc8894decee577335a95a0f80a01817a", "score": "0.43449247", "text": "def rset(self):\n return self._shortcmd('RSET')", "title": "" }, { "docid": "45f6f6c6d8450b8c61f9be7b25ff2d2c", "score": "0.434268", "text": "def set(self, val, scope=None):\r\n self._expreval.set(self._transform(val), scope, force=True)", "title": "" }, { "docid": "fec4ed640373cd8599f5abbdc901856f", "score": "0.43419963", "text": "def _set_icpt(self):\n # setting icpt for parents of evidence nodes\n # to uniform distribution (according to original paper on AIS-BN)\n for e in self.evidence:\n\n if self.proposal.is_root_node(e):\n continue\n\n for parent in self.graph[e]:\n if parent is not None:\n if isinstance(self.proposal.cpt[parent], list):\n # prior node\n n = len(self.proposal.cpt[parent])\n self.proposal.cpt[parent] = [1.0 / n] * n\n else:\n for p in self.proposal.cpt[parent]:\n n = len(self.proposal.cpt[parent][p])\n self.proposal.cpt[parent][p] = [1.0 / n] * n", "title": "" }, { "docid": "4f35c40d1077009578a895bcf9eb7de5", "score": "0.43412566", "text": "def set_evidences(self, request=None):\n\t\tif bbn.set_evidences(request) == True:\n\t\t\treturn self.GOOD;\n\t\telse:\n\t\t\treturn self.FAILED\n\t\t#DONE", "title": "" }, { "docid": "6afa61c2b7f962d7aed38d8b10cbbb90", "score": "0.4330183", "text": "def sport_discipline(self, sport_discipline):\n\n self._sport_discipline = sport_discipline", "title": "" }, { "docid": "6117272461c4ade0786ad9c5b4727eac", "score": "0.4329829", "text": "def set_cvid(self, cvid):\n\n\t\tif cvid is not None and not isinstance(cvid, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: cvid EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__cvid = cvid\n\t\tself.__key_modified['cvid'] = 1", "title": "" }, { "docid": "cb9519513086ccf3f5e746aa9a9c486a", "score": "0.43294677", "text": "def data_residency(self) -> pulumi.Output[Optional['outputs.DataResidencyResponse']]:\n return pulumi.get(self, \"data_residency\")", "title": "" }, { "docid": "4e46e47c8999bf8d469c0b47da916071", "score": "0.43231082", "text": "def citizen_id(self, citizen_id):\n\n self._citizen_id = citizen_id", "title": "" }, { "docid": "470b2ae63d3b465d90fba21212f0f49f", "score": "0.43186593", "text": "def set_rflag(self, rflag):\n with self.lock:\n self.rflag = rflag\n if self.closed:\n self.rflag.set()", "title": "" }, { "docid": "7aa0d362a4aa00aaa9ee3f98cd1c4b2e", "score": "0.43171245", "text": "def insurance_area(self, insurance_area):\n\n self._insurance_area = insurance_area", "title": "" }, { "docid": "537a186344c8a599ff8a26f14d9d07e1", "score": "0.4311873", "text": "def __init__(self,discipline):\n self.__discipline=discipline", "title": "" }, { "docid": "434b556cc187c887c339d736a24bd4ca", "score": "0.43091756", "text": "def ordinal(self, ordinal):\n\n self._ordinal = ordinal", "title": "" }, { "docid": "b9c15f7443261c29bfd8afd65aba3287", "score": "0.43015742", "text": "def valor_renda(self, valor_renda):\n self._valor_renda = valor_renda", "title": "" }, { "docid": "aa30e873984f310d570dceafed266c8f", "score": "0.42999122", "text": "def set(self, var1, var2):\n self.cap.set(var1, var2)", "title": "" }, { "docid": "954c7711b75128de38850988ab5eef15", "score": "0.42975128", "text": "def affair(self, affair):\n\n self._affair = affair", "title": "" }, { "docid": "0fada127a726ca62de71400dbaf14aef", "score": "0.42973864", "text": "def changeIris(self, *args):\n self.cvIrisLoc = self.guideName+\"_IrisLoc\"\n cmds.setAttr(self.moduleGrp+\".iris\", cmds.checkBox(self.irisCB, query=True, value=True))\n cmds.setAttr(self.cvIrisLoc+\".visibility\", cmds.checkBox(self.irisCB, query=True, value=True))", "title": "" }, { "docid": "c806c6477ef0b511b11984e9ba760e55", "score": "0.42914957", "text": "def set_old_resnum(self, old_resnum, icode):\n self.set_extra_info('old_resnum', old_resnum)\n self.set_extra_info('old_icode', icode)", "title": "" }, { "docid": "0c4498d4e058f8ceb546435af1f3f8a8", "score": "0.42868125", "text": "def orcid_id(self, orcid_id):\n\n self._orcid_id = orcid_id", "title": "" }, { "docid": "4d0c7d72ab3a603d47d21b1351278cfc", "score": "0.4282853", "text": "def set_referencia(self,referencia):\n self.referencia = referencia", "title": "" }, { "docid": "300e17488eb924110e200f547683d213", "score": "0.42686978", "text": "def propensity_carn(self, value):\n self._propensity_carn = value", "title": "" }, { "docid": "c206bfa059b55c58716e65aaadb16b3b", "score": "0.42506772", "text": "def assessment_id(self, assessment_id):\n\n self._assessment_id = assessment_id", "title": "" }, { "docid": "c36d2f4dfaf1bbe497d9bf48b08030ac", "score": "0.42431724", "text": "def SetIRCut(self, is_ir_on):\n self.send_SetIRCut(is_ir_on)\n self.recv_SetIRCut()", "title": "" }, { "docid": "3dc10e3cf137fe1cc2217145044919dc", "score": "0.4242249", "text": "def rub(self, rub):\n if rub is None:\n raise ValueError(\"Invalid value for `rub`, must not be `None`\") # noqa: E501\n\n self._rub = rub", "title": "" }, { "docid": "57fbb07fc09e7e8eb4ab45e6d72adfc3", "score": "0.42418826", "text": "def risk_limit_base(self, risk_limit_base):\n\n self._risk_limit_base = risk_limit_base", "title": "" }, { "docid": "db3bdb3b6d420522cb8640d1bd8dc50d", "score": "0.42300165", "text": "def set_score_land(self, objective, constraints):\n self.set_err_obj_distances(objective, constraints)\n\n score = self.obj_distance/(parameters.RATIONAL_EPSILON+self.err_distance)\n self.score = score\n\n self.set_ancestor_pass()", "title": "" }, { "docid": "76627690347c4be880189a86de8d2d8f", "score": "0.42248377", "text": "def rs(self, rs):\n self._rs = rs", "title": "" }, { "docid": "17ec03029cb391d17f2704790e4e73ab", "score": "0.42221814", "text": "def set_roi(self):\n # create the MAV_CMD_DO_SET_ROI command\n location = self.drone.location.global_relative_frame\n msg = self.drone.message_factory.command_long_encode(\n 0, 0, # target system, target component\n mavutil.mavlink.MAV_CMD_DO_SET_ROI, #command\n 0, #confirmation\n 0, 0, 0, 0, #params 1-4\n location.lat,\n location.lon,\n location.alt\n )\n # send command to vehicle\n self.drone.send_mavlink(msg)", "title": "" }, { "docid": "0ec55320612085aeb2c7430fc401cd95", "score": "0.4219721", "text": "def insurance_type(self, insurance_type):\n\n self._insurance_type = insurance_type", "title": "" }, { "docid": "887ef7da9152c129c05b7b9f6d3c2c58", "score": "0.42166197", "text": "def set_retorno(self, retorno):\n\n self.retornos = retorno", "title": "" }, { "docid": "2fa581359e84560ec93a122c41376606", "score": "0.42163342", "text": "def rstudio(self, rstudio):\n\n self._rstudio = rstudio", "title": "" }, { "docid": "475bd3d9c3112418693f61f10518479b", "score": "0.4211241", "text": "def encounter_id(self, encounter_id):\n\n self._encounter_id = encounter_id", "title": "" }, { "docid": "6373a0d905a722c666910f6db2624b32", "score": "0.4208462", "text": "def recurrence(self, recurrence):\n if recurrence is None:\n raise ValueError(\"Invalid value for `recurrence`, must not be `None`\") # noqa: E501\n\n self._recurrence = recurrence", "title": "" }, { "docid": "8049e861f49bc1d3f64d551245be517d", "score": "0.42081845", "text": "def resolution(self, resolution: ExtendedDisputeallOf1propertiesResolution):\n\n self._resolution = resolution", "title": "" }, { "docid": "f9a65d04d7bebaa934f8269480a02952", "score": "0.42042467", "text": "def data_residency(self) -> Optional[pulumi.Input['DataResidencyArgs']]:\n return pulumi.get(self, \"data_residency\")", "title": "" }, { "docid": "ed058986eae13e1ff71bc03902e83399", "score": "0.4194669", "text": "def set_voltage_rise_rate(self, rise_rate):\n self.voltage_rise_rate.put(rise_rate)", "title": "" } ]
4be294a3daaf1f9dd3c0be1dd7890a7c
Test the result preserves input dimension order when the coordinate to integrate is not the first dimension (eg there's a leading realization coordinate)
[ { "docid": "98dbb8c89a9297a91af2488f6b5d6a1d", "score": "0.5471508", "text": "def test_dimension_preservation(self):\n cube = set_up_variable_cube(280 * np.ones((3, 3, 3), dtype=np.float32))\n cube = add_coordinate(\n cube, np.array([5.0, 10.0, 20.0]), \"height\", coord_units=\"m\"\n )\n cube.transpose([1, 0, 2, 3])\n expected_coord_order = [coord.name() for coord in cube.coords(dim_coords=True)]\n result = self.plugin.process(cube)\n self.assertEqual(result.coord_dims(\"height\"), (1,))\n result_coord_order = [coord.name() for coord in result.coords(dim_coords=True)]\n self.assertListEqual(result_coord_order, expected_coord_order)", "title": "" } ]
[ { "docid": "f41ba7a1922d8fd415d35d28ac8c8643", "score": "0.5874654", "text": "def test_integrate_column_coordinates(self):\n x = np.linspace(0, 1, 5)\n t = math.integrate_column(np.arange(5), x)\n\n assert(np.isclose(t, 2))", "title": "" }, { "docid": "2b120bfe091aa6100e43fe2a71643861", "score": "0.58647597", "text": "def test_integrate_column(self):\n t = math.integrate_column(np.arange(5))\n\n assert(np.isclose(t, 8))", "title": "" }, { "docid": "b2403f079c00970dfe91f9d97fee6694", "score": "0.58519876", "text": "def test_integration_element(two_element_geometries):\n for geom in two_element_geometries:\n np.testing.assert_almost_equal(geom.integration_element, 1)", "title": "" }, { "docid": "9fa448548de0c599fc017651fe04c643", "score": "0.56866986", "text": "def ol_integrate_local(\n arr: np.ndarray,\n ) -> Callable[[np.ndarray], NumberOrArray]:\n if arr.ndim == num_axes:\n # `arr` is a scalar field\n grid_shape = self.shape\n\n def impl(arr: np.ndarray) -> Number:\n \"\"\"integrate a scalar field\"\"\"\n assert arr.shape == grid_shape\n total = 0\n for i in range(arr.size):\n total += get_cell_volume(i) * arr.flat[i]\n return total\n\n else:\n # `arr` is a tensorial field with rank >= 1\n tensor_shape = (self.dim,) * (arr.ndim - num_axes)\n data_shape = tensor_shape + self.shape\n\n def impl(arr: np.ndarray) -> np.ndarray: # type: ignore\n \"\"\"integrate a tensorial field\"\"\"\n assert arr.shape == data_shape\n total = np.zeros(tensor_shape)\n for idx in np.ndindex(*tensor_shape):\n arr_comp = arr[idx]\n for i in range(arr_comp.size):\n total[idx] += get_cell_volume(i) * arr_comp.flat[i]\n return total\n\n return impl", "title": "" }, { "docid": "dda6697dad35f662b41ce1b9205d87d4", "score": "0.5632582", "text": "def test_basic(self):\n coord_name = \"height\"\n result = str(Integration(coord_name))\n msg = (\n \"<Integration: coord_name_to_integrate: height, \"\n \"start_point: None, end_point: None, \"\n \"positive_integration: False>\"\n )\n self.assertEqual(result, msg)", "title": "" }, { "docid": "a25a133c3619f7bbf3d9826435cbeffc", "score": "0.55956846", "text": "def test_partial_trace_sys_int_dim_int_2():\n test_input_mat = np.arange(1, 17).reshape(4, 4)\n\n expected_res = 34\n\n res = partial_trace(test_input_mat, [1], [1, 4])\n\n bool_mat = np.isclose(expected_res, res)\n np.testing.assert_equal(np.all(bool_mat), True)", "title": "" }, { "docid": "cef3f46e8d656e419521c30f4ab90ba2", "score": "0.5576368", "text": "def test_partial_trace_sys_int_dim_int():\n test_input_mat = np.arange(1, 17).reshape(4, 4)\n\n expected_res = np.array([[7, 11], [23, 27]])\n\n res = partial_trace(test_input_mat, [1])\n\n bool_mat = np.isclose(expected_res, res)\n np.testing.assert_equal(np.all(bool_mat), True)", "title": "" }, { "docid": "ed7aaac0d41d38f29f9be73a63ca8dce", "score": "0.5549346", "text": "def _check_exact_dimension(*args):\n\n # Retrieving the object and the input from arguments\n obj, x = args[0], args[1]\n\n # Tries to squeeze the last dimension of `x` as it might be an array of (dim, 1)\n try:\n x = np.squeeze(x, axis=1)\n\n except ValueError:\n pass\n\n # If the function's number of dimensions is equal to `-1` (n-dimensional)\n if obj.dims == -1:\n if x.shape[0] == 0:\n raise e.SizeError(f'{obj.name} input should be n-dimensional')\n\n return f(obj, x)\n\n # If the input dimensions is different from function's allowed dimensions\n if x.shape[0] != obj.dims:\n raise e.SizeError(f'{obj.name} input should be {obj.dims}-dimensional')\n\n return f(obj, x)", "title": "" }, { "docid": "6e2f200e63ac4e2d44e7bad28ac50cb5", "score": "0.5507682", "text": "def integrate_global(arr: np.ndarray) -> NumberOrArray:\n integral = integrate_local(arr)\n return mpi_allreduce(integral) # type: ignore", "title": "" }, { "docid": "a9117b82545ed0f06fdb6110e6016a02", "score": "0.54777163", "text": "def _check_dimensions(x, y, solved_shapes):\n # print(f\"{x}x{y}\")\n if (x, y) in solved_shapes.keys():\n return solved_shapes[(x, y)]\n elif (y, x) in solved_shapes.keys():\n return solved_shapes[(y, x)]\n elif x == 1:\n solved_shapes[(x, y)] = y + 1\n return y + 1\n elif y == 1:\n solved_shapes[(x, y)] = x + 1\n return x + 1\n else:\n solved_shapes[(x, y)] = _check_dimensions(x - 1, y, solved_shapes) + _check_dimensions(x, y - 1, solved_shapes)\n return solved_shapes[(x, y)]", "title": "" }, { "docid": "0a9bab8e9760f4b44481de0c7f2c202b", "score": "0.54430807", "text": "def test_first_integration(fits_input, fits_output):\n\n assert np.all(fits_input['SCI'].data[0] == fits_output['SCI'].data[0])", "title": "" }, { "docid": "e91b778437f292faf37a4da4bdc40f11", "score": "0.5376729", "text": "def integrate(self,x1,x2):\n return 1", "title": "" }, { "docid": "8681ede88cc398aa5ec1703293f0ebdf", "score": "0.5371367", "text": "def unit(x_hat):\n temp = scipy.squeeze(x_hat)\n if temp.shape[0] != 3:\n raise ValueError('input is not in a coordinate system, first dimension must be 3')\n return temp", "title": "" }, { "docid": "d15d37b135e4fa3524e96b7140e78255", "score": "0.534553", "text": "def check_exact_dimension(f):\n\n def _check_exact_dimension(*args):\n \"\"\"Wraps the dimension checking in order to provide additional logic.\n\n Returns:\n The wrapped function output.\n\n \"\"\"\n\n # Retrieving the object and the input from arguments\n obj, x = args[0], args[1]\n\n # Tries to squeeze the last dimension of `x` as it might be an array of (dim, 1)\n try:\n x = np.squeeze(x, axis=1)\n\n except ValueError:\n pass\n\n # If the function's number of dimensions is equal to `-1` (n-dimensional)\n if obj.dims == -1:\n if x.shape[0] == 0:\n raise e.SizeError(f'{obj.name} input should be n-dimensional')\n\n return f(obj, x)\n\n # If the input dimensions is different from function's allowed dimensions\n if x.shape[0] != obj.dims:\n raise e.SizeError(f'{obj.name} input should be {obj.dims}-dimensional')\n\n return f(obj, x)\n\n return _check_exact_dimension", "title": "" }, { "docid": "38285fa940de30a24c454ad1c29dcdab", "score": "0.53400886", "text": "def IS_integrate(f, g, h, sampler, n):\n raise NotImplementedError(\"Problem 2 Incomplete\")", "title": "" }, { "docid": "35ff273b0c290200176e87db6457d7a5", "score": "0.5323855", "text": "def _check_x_dimension(x):\n x = np.atleast_1d(x)\n if len(x.shape) > 2 or (len(x.shape) == 2 and x.shape[1] != 1):\n raise ValueError('Wrong dimension in x.')\n return x.reshape((-1, ))", "title": "" }, { "docid": "b7a2a16c92652ca135f3cb11db89cd92", "score": "0.5306183", "text": "def integrate_local(arr: np.ndarray) -> NumberOrArray:\n amounts = arr * self.cell_volumes\n return amounts.sum(axis=tuple(range(-num_axes, 0, 1))) # type: ignore", "title": "" }, { "docid": "ba66271f35da3b9791cade5a6da6cc03", "score": "0.5305297", "text": "def _check_less_equal_dimension(*args):\n\n # Retrieving the object and the input from arguments\n obj, x = args[0], args[1]\n\n # Tries to squeeze the last dimension of `x` as it might be an array of (dim, 1)\n try:\n x = np.squeeze(x, axis=1)\n\n except ValueError:\n pass\n\n # If the input dimensions is different from function's allowed dimensions\n if x.shape[0] > obj.dims:\n raise e.SizeError(f'{obj.name} input should be less or equal to {obj.dims}-dimensional')\n\n return f(obj, x)", "title": "" }, { "docid": "6172d2fccfdc339980975a36fda0e29f", "score": "0.5293063", "text": "def _check_x_dimension(x):\n x = np.atleast_1d(x)\n if len(x.shape) > 2 or (len(x.shape) == 2 and x.shape[1] != 1):\n raise ValueError('Wrong dimension in x.')\n return x.reshape((-1,))", "title": "" }, { "docid": "a4ae356fd1f6cf6d7dc439d62b3b45b1", "score": "0.5279457", "text": "def requires_2d_input(self):", "title": "" }, { "docid": "1853098cd20e6c1fd093914daa97c81e", "score": "0.5267114", "text": "def test_integraldomain_is_in_domain():\n dom_params = Namespace(min_max=[(0, 5), (-3.2, 1)])\n pts_01 = np.array([[0, 1], [0.0, 1.0], [3, -3], [4, -2]])\n pts_02 = np.array(\n [[-1, 0], [6, 0], [2, -4], [2, 2], [2.2, 0], [2, 0.3], [2.2, 0.3]]\n )\n domain = IntegralDomain(params=dom_params)\n in_dom_list_01 = [domain.is_in_domain(pt) for pt in pts_01]\n in_dom_list_02 = [domain.is_in_domain(pt) for pt in pts_02]\n assert all(b is True for b in in_dom_list_01)\n assert all(b is False for b in in_dom_list_02)", "title": "" }, { "docid": "f34e631f41c84b7961081b1e0c587872", "score": "0.52346665", "text": "def test_partial_trace_sys_int():\n test_input_mat = np.arange(1, 17).reshape(4, 4)\n\n expected_res = np.array([[12, 14], [20, 22]])\n\n res = partial_trace(test_input_mat, 0)\n\n bool_mat = np.isclose(expected_res, res)\n np.testing.assert_equal(np.all(bool_mat), True)", "title": "" }, { "docid": "04f910e715999a68bbdd30eaeadef36a", "score": "0.52221394", "text": "def test_reordering(self):\n result = self.plugin._slice_over_coordinate(self.cube, \"realization\")\n self.assertEqual(len(result), 3)\n for i in range(3):\n dim_coord_names = [\n coord.name() for coord in result[i].coords(dim_coords=True)]\n self.assertEqual(dim_coord_names[0], \"realization\")", "title": "" }, { "docid": "b14329e0b5cbf164863c9cfb52f49cf9", "score": "0.51861185", "text": "def is_in_bound(trial, param):\n return param.value in self.new_dimension", "title": "" }, { "docid": "7d5cfc3d95510ba8212c4a5787cf2711", "score": "0.51828134", "text": "def _check_x_dimension(x, d=None):\n x = np.array(x)\n if len(x.shape) != 2:\n raise ValueError('Wrong dimension in x.')\n if (d is not None) and (x.shape[1] != d):\n raise ValueError('Wrong dimension in x.')\n return x", "title": "" }, { "docid": "8cd2742a208a212bc5190a416d284ced", "score": "0.5182152", "text": "def is_1d(self):\n if self.shape is None:\n return False\n return all([dim == 1 for dim in self.shape[1:]])", "title": "" }, { "docid": "e870edc231c4f5cb4fad1b7f50cd4276", "score": "0.51817715", "text": "def check_dimension(self, init_x):\n if self.f == extended_rosenbrock_function and init_x.size % 2 != 0:\n raise ValueError(\"Invalid input dimension.\")\n elif self.f == extended_powell_singular_function and init_x.size % 4 != 0:\n raise ValueError(\"Invalide input dimension\")", "title": "" }, { "docid": "e870edc231c4f5cb4fad1b7f50cd4276", "score": "0.51817715", "text": "def check_dimension(self, init_x):\n if self.f == extended_rosenbrock_function and init_x.size % 2 != 0:\n raise ValueError(\"Invalid input dimension.\")\n elif self.f == extended_powell_singular_function and init_x.size % 4 != 0:\n raise ValueError(\"Invalide input dimension\")", "title": "" }, { "docid": "892edcb4a0236c94c4616e9f9348c173", "score": "0.51809335", "text": "def check_x_dimension(x):\n array = np.atleast_1d(x)\n if len(array.shape) > 2 or (len(array.shape) == 2 and array.shape[1] != 1):\n raise ValueError(\"Wrong dimension in x.\")\n return array.reshape((-1,))", "title": "" }, { "docid": "b895e965073c3393eef1f9844efb7c16", "score": "0.5174387", "text": "def single_elem_2_array_p(one_arg):\r\n elem, funct = one_arg\r\n intermed = integrate_elem(elem, funct)\r\n return intermed", "title": "" }, { "docid": "eb88e689bdf7606e8041ee04894648c7", "score": "0.5173186", "text": "def make_integrator(self) -> Callable[[np.ndarray], NumberOrArray]:\n num_axes = self.num_axes\n # cell volume varies with position\n get_cell_volume = self.make_cell_volume_compiled(flat_index=True)\n\n def integrate_local(arr: np.ndarray) -> NumberOrArray:\n \"\"\"integrates data over a grid using numpy\"\"\"\n amounts = arr * self.cell_volumes\n return amounts.sum(axis=tuple(range(-num_axes, 0, 1))) # type: ignore\n\n @overload(integrate_local)\n def ol_integrate_local(\n arr: np.ndarray,\n ) -> Callable[[np.ndarray], NumberOrArray]:\n \"\"\"integrates data over a grid using numba\"\"\"\n if arr.ndim == num_axes:\n # `arr` is a scalar field\n grid_shape = self.shape\n\n def impl(arr: np.ndarray) -> Number:\n \"\"\"integrate a scalar field\"\"\"\n assert arr.shape == grid_shape\n total = 0\n for i in range(arr.size):\n total += get_cell_volume(i) * arr.flat[i]\n return total\n\n else:\n # `arr` is a tensorial field with rank >= 1\n tensor_shape = (self.dim,) * (arr.ndim - num_axes)\n data_shape = tensor_shape + self.shape\n\n def impl(arr: np.ndarray) -> np.ndarray: # type: ignore\n \"\"\"integrate a tensorial field\"\"\"\n assert arr.shape == data_shape\n total = np.zeros(tensor_shape)\n for idx in np.ndindex(*tensor_shape):\n arr_comp = arr[idx]\n for i in range(arr_comp.size):\n total[idx] += get_cell_volume(i) * arr_comp.flat[i]\n return total\n\n return impl\n\n # deal with MPI multiprocessing\n if self._mesh is None or len(self._mesh) == 1:\n # standard case of a single integral\n @jit\n def integrate_global(arr: np.ndarray) -> NumberOrArray:\n \"\"\"integrate data\n\n Args:\n arr (:class:`~numpy.ndarray`): discretized data on grid\n \"\"\"\n return integrate_local(arr)\n\n else:\n # we are in a parallel run, so we need to gather the sub-integrals from all\n # subgrids in the grid mesh\n from ..tools.mpi import mpi_allreduce\n\n @jit\n def integrate_global(arr: np.ndarray) -> NumberOrArray:\n \"\"\"integrate data over MPI parallelized grid\n\n Args:\n arr (:class:`~numpy.ndarray`): discretized data on grid\n \"\"\"\n integral = integrate_local(arr)\n return mpi_allreduce(integral) # type: ignore\n\n return integrate_global # type: ignore", "title": "" }, { "docid": "4117968108c12c6eed95cae185aea070", "score": "0.516667", "text": "def integrate(self):\n raise MethodImplementationError(self, 'integrate')", "title": "" }, { "docid": "35a79cfed83cf9095e9642d723d10238", "score": "0.51280475", "text": "def test_separate_coords_error(arr, expected):\n with expected:\n assert EnsembleKalmanFilter.separate_coords(arr)", "title": "" }, { "docid": "9346e0d8e4b6079a708ff5051a7be3f2", "score": "0.51262695", "text": "def check_less_equal_dimension(f):\n\n def _check_less_equal_dimension(*args):\n \"\"\"Wraps the dimension checking in order to provide additional logic.\n\n Returns:\n The wrapped function output.\n\n \"\"\"\n\n # Retrieving the object and the input from arguments\n obj, x = args[0], args[1]\n\n # Tries to squeeze the last dimension of `x` as it might be an array of (dim, 1)\n try:\n x = np.squeeze(x, axis=1)\n\n except ValueError:\n pass\n\n # If the input dimensions is different from function's allowed dimensions\n if x.shape[0] > obj.dims:\n raise e.SizeError(f'{obj.name} input should be less or equal to {obj.dims}-dimensional')\n\n return f(obj, x)\n\n return _check_less_equal_dimension", "title": "" }, { "docid": "0d62813e5bcc1f28a7488f1809afdfe9", "score": "0.51135397", "text": "def isIntegral():", "title": "" }, { "docid": "0d62813e5bcc1f28a7488f1809afdfe9", "score": "0.51135397", "text": "def isIntegral():", "title": "" }, { "docid": "c49d47f6cd107736ff7dfda6e69e4c05", "score": "0.51011837", "text": "def integrate(\n self: T,\n *,\n domain: Optional[DomainRange] = None,\n ) -> NDArrayFloat:\n pass", "title": "" }, { "docid": "17ff7d7cfd0ad4650d15eb51306fae52", "score": "0.50880814", "text": "def integrate_over_grid(X, Y, Z):\n int_x = np.trapz(Z, X, axis=1)\n return np.trapz(int_x, Y[:,0])", "title": "" }, { "docid": "e33403327e00884f2f90154ddac99ded", "score": "0.5079505", "text": "def test_integrated_variance_fails_with_out_of_domain_test_points(model):\n space = ParameterSpace([ContinuousParameter('x1', 0, 1), ContinuousParameter('x2', 0, 1)])\n\n x_monte_carlo = np.array([[0.5, 20.], [0.2, 0.3]])\n\n with pytest.raises(ValueError):\n IntegratedVarianceReduction(model, space, x_monte_carlo)", "title": "" }, { "docid": "e96be989be73e25c708518b43472c341", "score": "0.5076574", "text": "def test_dim_coords(dat, dat_inst, variable, dat_name, failed):\n for dim in dat.dims:\n if (dim not in dat_inst.dims) or (dim == \"Time\"):\n continue\n c = dat[dim].values\n cr = dat_inst[dim].values\n if (len(c) != len(cr)) or (c != cr).any():\n print(\n \"Coordinates for dimension {} in data {} of variable {}\"\n \" differs between postprocessed output and WRF output:\"\n \"\\n {} vs. {}\".format(dim, dat_name, variable, c, cr)\n )\n f = \"FAIL\"\n else:\n f = \"pass\"\n f0 = failed.loc[variable, \"dim_coords\"]\n if (f0 == \"\") or (f0 == \"pass\"):\n failed.loc[variable, \"dim_coords\"] = f", "title": "" }, { "docid": "d7046a09a8002349f403a18e53f522c6", "score": "0.5072907", "text": "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "title": "" }, { "docid": "9c3c587fc151c58b2e4df2c2d597cba9", "score": "0.5067622", "text": "def test_loc_deph_lindblad_op(dims, site_j, expected):\n\n assert np.all(lind.loc_deph_lindblad_op(dims, site_j) == expected)", "title": "" }, { "docid": "87849ac836869c0e4217f7eb9dc6e7ba", "score": "0.5053569", "text": "def test_partial_trace_sys():\n test_input_mat = np.arange(1, 17).reshape(4, 4)\n\n expected_res = np.array([[12, 14], [20, 22]])\n\n res = partial_trace(test_input_mat, [0])\n\n bool_mat = np.isclose(expected_res, res)\n np.testing.assert_equal(np.all(bool_mat), True)", "title": "" }, { "docid": "ccd2731d76a5c21abfb2f386dc9c92aa", "score": "0.5021951", "text": "def clearance(self, x):\n x = scipy.asarray(x)\n if (x < 0).any():\n return False\n if any(i >= j for i, j in zip(x, self.space.shape)):\n return False\n return self.space[tuple(x)]", "title": "" }, { "docid": "eafae0acc3d1459d26cc87fa69ff8b8b", "score": "0.5013441", "text": "def test_GridDataInterpolator_1DGrid():\n from pyirf.interpolation import GridDataInterpolator\n\n grid_points = np.array([[0], [1]])\n target_point = np.array([[0.5]])\n\n dummy_data1 = np.array([[[0, 1], [1, 1]], [[0, 2], [2, 3]], [[0, 3], [3, 5]]])\n dummy_data2 = np.array([[[0, 2], [2, 2]], [[0, 4], [4, 6]], [[0, 6], [6, 10]]])\n\n dummy_data = np.array([dummy_data1, dummy_data2])\n\n interpolator = GridDataInterpolator(\n grid_points=grid_points, params=dummy_data, method=\"linear\"\n )\n interpolant = interpolator(target_point)\n\n dummy_data_target = 1.5 * dummy_data1\n\n assert np.allclose(interpolant, dummy_data_target)\n assert interpolant.shape == (1, *dummy_data.shape[1:])", "title": "" }, { "docid": "b6b5fdb3c56f8368080e493e3c06f7e9", "score": "0.4996801", "text": "def test_multidimensional(self):\r\n # Get some higher-dimensional test data\r\n c1 = istk.realistic_4d()\r\n # Chop down to small size, and mask some data\r\n c1 = c1[:3, :4, :16, :12]\r\n c1.data[:, 2, :, :] = np.ma.masked\r\n c1.data[1, 1, 3:9, 4:7] = np.ma.masked\r\n # Give it a slightly more challenging indexing order: tzyx --> xzty\r\n c1.transpose((3, 1, 0, 2))\r\n\r\n # Construct a (coarser) target grid of about the same extent\r\n c1_cs = c1.coord(axis='x').coord_system\r\n xlims = _minmax(c1.coord(axis='x').contiguous_bounds())\r\n ylims = _minmax(c1.coord(axis='y').contiguous_bounds())\r\n # Reduce the dimensions slightly to avoid NaNs in regridded orography\r\n delta = 0.05\r\n # || NOTE: this is *not* a small amount. Think there is a bug.\r\n # || NOTE: See https://github.com/SciTools/iris/issues/458\r\n xlims = np.interp([delta, 1.0 - delta], [0, 1], xlims)\r\n ylims = np.interp([delta, 1.0 - delta], [0, 1], ylims)\r\n pole_latlon = (c1_cs.grid_north_pole_latitude,\r\n c1_cs.grid_north_pole_longitude)\r\n c2 = _make_test_cube((7, 8), xlims, ylims, pole_latlon=pole_latlon)\r\n\r\n # regrid onto new grid\r\n c1_to_c2 = regrid_conservative_via_esmpy(c1, c2)\r\n\r\n # check that all the original coords exist in the new cube\r\n # NOTE: this also effectively confirms we haven't lost the orography\r\n def list_coord_names(cube):\r\n return sorted([coord.name() for coord in cube.coords()])\r\n\r\n self.assertEqual(list_coord_names(c1_to_c2), list_coord_names(c1))\r\n\r\n # check that each xy 'slice' has same values as if done on its own.\r\n for i_p, i_t in np.ndindex(c1.shape[1:3]):\r\n c1_slice = c1[:, i_p, i_t]\r\n c2_slice = regrid_conservative_via_esmpy(c1_slice, c2)\r\n subcube = c1_to_c2[:, i_p, i_t]\r\n self.assertEqual(subcube, c2_slice)\r\n\r\n # check all other metadata\r\n self.assertEqual(c1_to_c2.metadata, c1.metadata)", "title": "" }, { "docid": "d7d15166959b545e79f8ca37f0f4784a", "score": "0.4994784", "text": "def test_shape_of_inflate_array(shape: IntTuple, grid: IntTuple) -> None:\n grid = grid[: len(shape)]\n\n inflated_array = nf.inflate_array(np.empty(shape), grid)\n\n assert inflated_array.ndim == len(shape)\n for i in range(len(grid)):\n assert inflated_array.shape[i] == grid[i] * shape[i]\n for i in range(len(grid), inflated_array.ndim):\n assert inflated_array.shape[i] == shape[i]", "title": "" }, { "docid": "b6ccfe2464dfd151dc7447d6ad022ba9", "score": "0.4989598", "text": "def isneginf(x=None, y=None):\n return ndarray()", "title": "" }, { "docid": "ed399df240d492086b495d7462e728bd", "score": "0.49836963", "text": "def integration_1d():\n I = integration_1d_sym(x, x0, x1)\n\n def P0(f):\n p = I[0](f(x))\n return sy.lambdify((x0,), p)\n\n def P1(f):\n p = I[1](f(x))\n return sy.lambdify((x0, x1), p)\n\n return P0, P1", "title": "" }, { "docid": "a7b1a82005f746cbfddf171b05a37fe0", "score": "0.49753493", "text": "def test_x_wrong_ndims():\n z = np.zeros((0, 2))\n x = [[np.array([0., 0.]), np.array([[1., 0.]]), np.array([[0., 1.]]), np.array([[1., 1.]])],\n [z, z, z, z], [z], []]\n assert_failure(x=x)", "title": "" }, { "docid": "ff4a335aad407847b4038eede45c3c6a", "score": "0.49692056", "text": "def first_tensor_invariant(A): \n return A[0,0] + A[1,1] + A[2,2]", "title": "" }, { "docid": "d7354adf5aac0eb2b3794b8c34791a1e", "score": "0.4966854", "text": "def _check_dimension(params):\n input_col = params['input_col']\n output_col = params['output_col']\n\n interaction_only = params['interaction_only']\n degree = params['degree']\n\n from sklearn.preprocessing import PolynomialFeatures\n poly = PolynomialFeatures(degree=degree, interaction_only=interaction_only)\n tmp = [[0 for _ in range(len(input_col))]]\n dim = poly.fit_transform(tmp).shape[1]\n\n params['output_col'] = ['col{}{}'.format(i, output_col) for i in range(dim)]\n return params", "title": "" }, { "docid": "7f6841e153d617aa50bcb5d13c4cb8f4", "score": "0.49655724", "text": "def test_check_data_array_dimensions_match_unknown_dimension():\n time_size, depth_size = 4, 3\n lon_size, lat_size = 10, 15\n\n dataset = xarray.Dataset(\n data_vars={\n 'botz': ([\"lat\", \"lon\"], 50 + 25 * numpy.random.random_sample((lat_size, lon_size))),\n 'eta': (\n [\"time\", \"lat\", \"lon\"],\n numpy.random.random_sample((time_size, lat_size, lon_size)),\n ),\n },\n coords={\n 'time': ([\"time\"], pandas.date_range(\"2021-12-21\", periods=time_size)),\n 'lon': ([\"lon\"], numpy.arange(lon_size) + 0.5),\n 'lat': ([\"lat\"], numpy.arange(lat_size) + 0.5),\n }\n )\n\n # Slicing the temperature to get a subset of the lat/lon grid will cause\n # the dimensions to differ\n temp = xarray.DataArray(\n data=15 + 3 * numpy.random.random_sample((time_size, depth_size, lat_size, lon_size)),\n dims=['time', 'depth', 'lat', 'lon'],\n )\n\n with pytest.raises(ValueError):\n utils.check_data_array_dimensions_match(dataset, temp)\n\n # If you remove the extra dimension, this should be fine\n surface_temp = temp.isel({'depth': 0}, drop=True)\n utils.check_data_array_dimensions_match(dataset, surface_temp)", "title": "" }, { "docid": "2bed6c0451a6a726bb30ad1aa92fe0c4", "score": "0.49650878", "text": "def is_single_unit(data):\n try:\n if isinstance(data[0][0], list) or isinstance(data[0][0], np.ndarray):\n warnings.warn(\"spike times input has too many layers!\")\n if max(np.array(data).shape[:-1]) > 1:\n # singletons = True\n return False\n data = np.squeeze(data)\n except (IndexError, TypeError):\n pass\n try:\n if isinstance(data[1], list) or isinstance(data[1], np.ndarray):\n return False\n except (IndexError, TypeError):\n pass\n return True", "title": "" }, { "docid": "32b78db03be1c39ffe970d2456c9497c", "score": "0.4958135", "text": "def _check_exact_dimension_and_auxiliary_matrix(*args):\n\n # Retrieving the object and the input from arguments\n obj, x = args[0], args[1]\n\n # Tries to squeeze the last dimension of `x` as it might be an array of (dim, 1)\n try:\n x = np.squeeze(x, axis=1)\n\n except ValueError:\n pass\n\n # If the input dimensions differs from function's allowed dimensions\n if x.shape[0] not in [2, 10, 30, 50]:\n raise e.SizeError(f'{obj.name} input should be 2-, 10-, 30- or 50-dimensional')\n\n if x.shape[0] == 2:\n setattr(obj, 'M', obj.M2)\n\n elif x.shape[0] == 10:\n setattr(obj, 'M', obj.M10)\n\n elif x.shape[0] == 30:\n setattr(obj, 'M', obj.M30)\n\n elif x.shape[0] == 50:\n setattr(obj, 'M', obj.M50)\n\n return f(obj, x)", "title": "" }, { "docid": "52ea77bb84f211310b7a6e2db87d88ec", "score": "0.49564505", "text": "def test_partial_trace():\n test_input_mat = np.arange(1, 17).reshape(4, 4)\n\n expected_res = np.array([[7, 11], [23, 27]])\n\n res = partial_trace(test_input_mat)\n\n bool_mat = np.isclose(expected_res, res)\n np.testing.assert_equal(np.all(bool_mat), True)", "title": "" }, { "docid": "59eebe69be0d9507ef3c05dae0b81843", "score": "0.49482214", "text": "def test_do_interpolate_func(my_data):\n loss = do_interpolate(my_data, scipy=False, func=func)\n assert 0 < loss < np.inf", "title": "" }, { "docid": "44ad4527f255f97e0c4c7681ce973830", "score": "0.4942044", "text": "def test_addition_centered_grid(self):\n shape = [32, 27]\n for boundary in [CLOSED, PERIODIC, OPEN]:\n domain = Domain(shape, boundaries=(boundary, boundary))\n centered_grid = CenteredGrid.sample(1, domain)\n result_array = (centered_grid + centered_grid).data\n np.testing.assert_array_equal(result_array, 2)", "title": "" }, { "docid": "d49a5818feafbc1ae551a9cea90fecdf", "score": "0.49379927", "text": "def integrate(func, x_init, x_final, slices=100, mode=\"simpson\"):\n\n h = (x_final - x_init)/float(slices)\n\n integral = 0.0\n if mode == \"midpoint\":\n for i in range(slices):\n x0 = x_init + i*h\n x1 = x0 + i*h\n xmid = (x0 + x1)/2.0\n integral += func(xmid)*h\n\n if mode == \"trapezoid\":\n for i in range(slices):\n x0 = x_init + i*h\n x1 = x0 + h\n y0 = func(x0)\n y1 = func(x1)\n integral += (y0 + y1)/2.0 * h\n if mode == \"simpson\":\n #enforce even number of slices out of laziness\n for i in range(0, slices, 2): \n x0 = x_init + i*h\n x1 = x0 + h\n x2 = x1 + h\n y0 = func(x0)\n y1 = func(x1)\n y2 = func(x2)\n integral += (y0 + 4*y1 + y2) * (h/3)\n\n return integral", "title": "" }, { "docid": "a4d15c3a4042e57b988f47d04303f463", "score": "0.49265644", "text": "def test_dimensionality_output(self):\n\n detrending = data_prep.Detrending(alpha = 0.001,\n variance = 0.001,\n noise = 0.001)\n\n matrix = detrending.cov_matrix_SE(self.test_x, self.test_x)\n prior_trace = detrending.generate_prior_SE_trace(self.test_x, 1)\n\n self.assertEqual(np.shape(matrix), tuple((len(self.test_x), len(self.test_x))))\n self.assertEqual(np.shape(prior_trace), tuple((len(self.test_x), 2))) # (number_of_observations, number_of_traces + 1)\n # self.assertEqual(prior_trace[:,0], self.test_x)\n\n optim = detrending.optimise_SE_trace(0.001, 0.001, 0.001, self.test_x, self.test_y)\n\n self.assertTrue(len(optim) == 3) # three parameters optimised\n\n posterior_trace = detrending.fit_SE(self.observed_x, self.observed_y, self.test_x, 1, True)\n\n self.assertEqual(np.shape(posterior_trace), tuple((len(self.test_x), 3))) # (number_of_observations, number_of_traces + 2)\n\n detrended_data, test_timepoints, fits = detrending.detrend_data(self.observed_x, self.data, True)\n\n self.assertTrue(np.shape(detrended_data) == np.shape(self.data))\n self.assertTrue(np.shape(fits) == tuple((len(test_timepoints), np.shape(detrended_data)[1])))", "title": "" }, { "docid": "399860a694d3be8648ddbe8390319c17", "score": "0.4923434", "text": "def isposinf(x=None, y=None):\n return ndarray()", "title": "" }, { "docid": "c9abc08cd4dda955e9fcd10aa58bff1d", "score": "0.49133965", "text": "def is_continuous_tailing_axis(self, shape, reduce_axis):\n for i in self._origin_op:\n if i[\"op\"] == 'broadcast_for_tensor':\n return False\n\n if len(reduce_axis) > 1:\n index_list = [index for index, _ in enumerate(shape)]\n\n if index_list[-len(reduce_axis):] == reduce_axis:\n return True\n\n return False", "title": "" }, { "docid": "28e5c77a28401d92ac740692b85dda8c", "score": "0.4896915", "text": "def is_dimensionless(self):\r\n return (self.category == _CATEGORY_UDUNIT and\r\n bool(_ut_is_dimensionless(self.ut_unit)))", "title": "" }, { "docid": "0f0d738d0879325a2eedec941047ebd4", "score": "0.48961633", "text": "def check_coordinates(data_array: xarray.DataArray):\n missing_dimensions = [d for d in horizontal_spatial_dimensions if d not in data_array.dims]\n if missing_dimensions:\n message = f\"Structural similarity index expects data with dimensions {horizontal_spatial_dimensions}.\\n\" \\\n f\"Dimension/s {missing_dimensions} missing.\"\n raise EnstoolsError(message)", "title": "" }, { "docid": "27442fb24554a40e7b7addc23f608b30", "score": "0.48928264", "text": "def test_multidim_moving_projection():\n\n x_ = np.random.randn(2000)\n\n x = np.empty((5, 10, 2000))\n x[:] = x_\n y = moving_projection(x, 200, 5/200.)\n\n err = y - y[0, 0]\n assert np.sum(err ** 2) < 1e-8", "title": "" }, { "docid": "87999cf2f95e74ef2b32064f96212ee9", "score": "0.48843023", "text": "def check_conformity(self, x:np.ndarray) -> np.ndarray:\n ...", "title": "" }, { "docid": "8814c55ab137be6f42bb6ec04e48350c", "score": "0.4884279", "text": "def test_infinity_handling(self):\n a = np.array([[1, 2], [1, np.inf]])\n b = np.array([3, 4])\n c = np.array([[1, 2], [3, 4]])\n ref = np.array([[[4, 6], [9, 12]],\n [[4, 6], [np.inf, np.inf]]])\n res = gulinalg.update_rank1(a, b, c)\n assert_allclose(res, ref)", "title": "" }, { "docid": "6a1f63711ea06f85ccd8a723f4957e9e", "score": "0.48806694", "text": "def implicit_constant(x):\n nobs = x.shape[0]\n rank = np.linalg.matrix_rank(np.hstack((np.ones((nobs, 1)), x)))\n return rank == x.shape[1]", "title": "" }, { "docid": "53bc6a2959655332cd00ded4d4538fb3", "score": "0.4879", "text": "def isreal(self, x):\n return ndarray()", "title": "" }, { "docid": "07cc43428533fcfc16f59765314edd5b", "score": "0.4868493", "text": "def test_partial_trace_non_square_matrix_dim_2():\n with np.testing.assert_raises(ValueError):\n rho = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])\n partial_trace(rho, [1], [2])", "title": "" }, { "docid": "9242fe04e4d70c7e783b4a3c718cad16", "score": "0.48668632", "text": "def integrate(self, t1, t2) -> float:\n ...", "title": "" }, { "docid": "cec740e22520d3afad20cb7362d58130", "score": "0.48601013", "text": "def isreal(x):\n return ndarray()", "title": "" }, { "docid": "baa17947f07f54a84091e09524365022", "score": "0.48562244", "text": "def integrate_over_grid(grid_step, F, axis=None):\n if axis is None:\n V = np.prod(grid_step)\n else:\n V = grid_step\n \n return V * np.sum(F, axis=axis)", "title": "" }, { "docid": "a80b25a8427d577786b74f624725a012", "score": "0.48501012", "text": "def test_interp_inplace():\n x = np.arange(10)\n y_actual = 2 + 5 * x\n\n y_calc = np.empty_like(y_actual)\n y_calc[0] = y_actual[0]\n y_calc[-1] = y_actual[-1]\n\n output = utils._interp_inplace(x, y_calc, y_calc[0], y_calc[-1])\n\n # output should be the same object as the input y array\n assert output is y_calc\n\n assert_allclose(y_calc, y_actual, 1e-12)", "title": "" }, { "docid": "a4f20bdf26e45c92fa051742c6abe47f", "score": "0.48459294", "text": "def is_monotonic(self):\r\n\r\n if self.ndim != 1:\r\n raise iris.exceptions.CoordinateMultiDimError(self)\r\n\r\n if self.shape == (1,):\r\n return True\r\n\r\n if self.points is not None:\r\n if not iris.util.monotonic(self.points, strict=True):\r\n return False\r\n\r\n if self.bounds is not None:\r\n for b_index in xrange(self.nbounds):\r\n if not iris.util.monotonic(self.bounds[..., b_index],\r\n strict=True):\r\n return False\r\n\r\n return True", "title": "" }, { "docid": "dc87149efa2aa73d40da4d5ce32ff726", "score": "0.484416", "text": "def test_integraldomain_defaults_is_in_domain():\n pts_01 = [2, 2.0, 4, 4.0, np.array(2), np.array(2.0), np.array(4.0)]\n pts_02 = [2.1, 4.1, np.array(2.1)]\n domain = IntegralDomain()\n in_dom_list_01 = [domain.is_in_domain(pt) for pt in pts_01]\n in_dom_list_02 = [domain.is_in_domain(pt) for pt in pts_02]\n assert all(b is True for b in in_dom_list_01)\n assert all(b is False for b in in_dom_list_02)", "title": "" }, { "docid": "9d57c5ac711b360b6f382f18288f819a", "score": "0.48433647", "text": "def valid_shape(x):\n return ((isinstance(x, numbers.Integral) and (x > 0)) or\n (x is None) or\n (isinstance(x, (tuple, list)) and\n (len(x) > 0) and\n all([((isinstance(n, numbers.Integral) and (n > 0)) or\n (n is None)) for n in x])))", "title": "" }, { "docid": "e8261c3acfc8c444e14ed380bd05af2c", "score": "0.48402226", "text": "def test_two_coords(self):\n expected_data = self.cube.collapsed(\n [\"realization\", \"latitude\"], iris.analysis.MEAN\n ).data\n result = collapsed(self.cube, [\"realization\", \"latitude\"], iris.analysis.MEAN)\n self.assertTrue((result.data == expected_data).all())\n self.assertEqual(\n result.coord(\"latitude\").dtype, self.cube.coord(\"latitude\").dtype\n )", "title": "" }, { "docid": "04397426ff10c6fdad78e98a47f1ff16", "score": "0.48389104", "text": "def valid(self):\n\t\txsq = self.x*self.x\n\t\tysq = self.y*self.y\n\t\treturn (JUBJUB_A * xsq) + ysq == (1 + JUBJUB_D * xsq * ysq)", "title": "" }, { "docid": "d874e823076e12a1bcf13ccb84c39124", "score": "0.4837981", "text": "def integrate(\n self, data: NumberOrArray, axes: Union[int, Sequence[int], None] = None\n ) -> NumberOrArray:\n # determine the volumes of the individual cells\n if axes is None:\n volume_list = self.cell_volume_data\n else:\n # use stored value for the default case of integrating over all axes\n if isinstance(axes, int):\n axes = (axes,)\n else:\n axes = tuple(axes) # required for numpy.sum\n volume_list = [\n cell_vol if ax in axes else 1\n for ax, cell_vol in enumerate(self.cell_volume_data)\n ]\n cell_volumes = functools.reduce(np.outer, volume_list)\n\n # determine the axes over which we will integrate\n if not isinstance(data, np.ndarray) or data.ndim < self.num_axes:\n # deal with the case where data is not supplied for each support\n # point, e.g., when a single scalar is integrated over the grid\n data = np.broadcast_to(data, self.shape)\n\n elif data.ndim > self.num_axes:\n # deal with the case where more than a single value is provided per\n # support point, e.g., when a tensorial field is integrated\n offset = data.ndim - self.num_axes\n if axes is None:\n # integrate over all axes of the grid\n axes = tuple(range(offset, data.ndim))\n else:\n # shift the indices to account for the data shape\n axes = tuple(offset + i for i in axes)\n\n # calculate integral using a weighted sum along the chosen axes\n integral = (data * cell_volumes).sum(axis=axes)\n\n if self._mesh is None or len(self._mesh) == 1:\n # standard case of a single integral\n return integral # type: ignore\n\n else:\n # we are in a parallel run, so we need to gather the sub-integrals from all\n from mpi4py.MPI import COMM_WORLD\n\n integral_full = np.empty_like(integral)\n COMM_WORLD.Allreduce(integral, integral_full)\n return integral_full # type: ignore", "title": "" }, { "docid": "fd3392f477d8dc6c6bf3cd9018b2ed64", "score": "0.48363113", "text": "def isIntegral(self):\n return self.precision==0", "title": "" }, { "docid": "4f3609a89a2c6d282b44f49c3d9a66c9", "score": "0.48355156", "text": "def _validate_normalize_axes(axes, axis, keepdims, input_coredimss, output_coredimss):\n nin = len(input_coredimss)\n nout = 1 if not isinstance(output_coredimss, list) else len(output_coredimss)\n\n if axes is not None and axis is not None:\n raise ValueError(\n \"Only one of `axis` or `axes` keyword arguments should be given\"\n )\n if axes and not isinstance(axes, list):\n raise ValueError(\"`axes` has to be of type list\")\n\n output_coredimss = output_coredimss if nout > 1 else [output_coredimss]\n filtered_core_dims = list(filter(len, input_coredimss))\n nr_outputs_with_coredims = len([True for x in output_coredimss if len(x) > 0])\n\n if keepdims:\n if nr_outputs_with_coredims > 0:\n raise ValueError(\"`keepdims` can only be used for scalar outputs\")\n output_coredimss = len(output_coredimss) * [filtered_core_dims[0]]\n\n core_dims = input_coredimss + output_coredimss\n if axis is not None:\n if not isinstance(axis, int):\n raise ValueError(\"`axis` argument has to be an integer value\")\n if filtered_core_dims:\n cd0 = filtered_core_dims[0]\n if len(cd0) != 1:\n raise ValueError(\n \"`axis` can be used only, if one core dimension is present\"\n )\n for cd in filtered_core_dims:\n if cd0 != cd:\n raise ValueError(\n \"To use `axis`, all core dimensions have to be equal\"\n )\n\n # Expand dafaults or axis\n if axes is None:\n if axis is not None:\n axes = [(axis,) if cd else tuple() for cd in core_dims]\n else:\n axes = [tuple(range(-len(icd), 0)) for icd in core_dims]\n elif not isinstance(axes, list):\n raise ValueError(\"`axes` argument has to be a list\")\n axes = [(a,) if isinstance(a, int) else a for a in axes]\n\n if (\n (nr_outputs_with_coredims == 0)\n and (nin != len(axes))\n and (nin + nout != len(axes))\n ) or ((nr_outputs_with_coredims > 0) and (nin + nout != len(axes))):\n raise ValueError(\n \"The number of `axes` entries is not equal the number of input and output arguments\"\n )\n\n # Treat outputs\n output_axes = axes[nin:]\n output_axes = (\n output_axes\n if output_axes\n else [tuple(range(-len(ocd), 0)) for ocd in output_coredimss]\n )\n input_axes = axes[:nin]\n\n # Assert we have as many axes as output core dimensions\n for idx, (iax, icd) in enumerate(zip(input_axes, input_coredimss)):\n if len(iax) != len(icd):\n raise ValueError(\n \"The number of `axes` entries for argument #{} is not equal \"\n \"the number of respective input core dimensions in signature\".format(\n idx\n )\n )\n if not keepdims:\n for idx, (oax, ocd) in enumerate(zip(output_axes, output_coredimss)):\n if len(oax) != len(ocd):\n raise ValueError(\n \"The number of `axes` entries for argument #{} is not equal \"\n \"the number of respective output core dimensions in signature\".format(\n idx\n )\n )\n else:\n if input_coredimss:\n icd0 = input_coredimss[0]\n for icd in input_coredimss:\n if icd0 != icd:\n raise ValueError(\n \"To use `keepdims`, all core dimensions have to be equal\"\n )\n iax0 = input_axes[0]\n output_axes = [iax0 for _ in output_coredimss]\n\n return input_axes, output_axes", "title": "" }, { "docid": "c98f08f5e0f21de7cdec50c75a58ca90", "score": "0.48316625", "text": "def _get_out_shape(in_shape, dim = 1, direction = +1, real_transform = False):\n if real_transform:\n if direction == +1:\n return in_shape[:-1] + (in_shape[-1]//2,)\n else:\n return in_shape[:-1] + (in_shape[-1] * 2,)\n else:\n return in_shape", "title": "" }, { "docid": "514c2b5647c2e278a5a0c30afbe05646", "score": "0.48250335", "text": "def check_exact_dimension_and_auxiliary_matrix(f):\n\n def _check_exact_dimension_and_auxiliary_matrix(*args):\n \"\"\"Wraps the dimension checking in order to provide additional logic.\n\n Returns:\n The wrapped function output.\n\n \"\"\"\n\n # Retrieving the object and the input from arguments\n obj, x = args[0], args[1]\n\n # Tries to squeeze the last dimension of `x` as it might be an array of (dim, 1)\n try:\n x = np.squeeze(x, axis=1)\n\n except ValueError:\n pass\n\n # If the input dimensions differs from function's allowed dimensions\n if x.shape[0] not in [2, 10, 30, 50]:\n raise e.SizeError(f'{obj.name} input should be 2-, 10-, 30- or 50-dimensional')\n\n if x.shape[0] == 2:\n setattr(obj, 'M', obj.M2)\n\n elif x.shape[0] == 10:\n setattr(obj, 'M', obj.M10)\n\n elif x.shape[0] == 30:\n setattr(obj, 'M', obj.M30)\n\n elif x.shape[0] == 50:\n setattr(obj, 'M', obj.M50)\n\n return f(obj, x)\n\n return _check_exact_dimension_and_auxiliary_matrix", "title": "" }, { "docid": "7850606dbcc5f037fb62d6d85fb5c3aa", "score": "0.48239166", "text": "def test_s_enhance_errors():\n\n arr = np.arange(28800).reshape((2, 20, 20, 12, 3))\n with pytest.raises(ValueError):\n spatial_coarsening(arr, s_enhance=3)\n with pytest.raises(ValueError):\n spatial_coarsening(arr, s_enhance=7)\n with pytest.raises(ValueError):\n spatial_coarsening(arr, s_enhance=40)\n\n arr = np.ones(10)\n with pytest.raises(ValueError):\n spatial_coarsening(arr, s_enhance=5)\n\n arr = np.ones((4, 4))\n with pytest.raises(ValueError):\n spatial_coarsening(arr, s_enhance=2)", "title": "" }, { "docid": "410883d15336f1f3a76c520679e728a7", "score": "0.48204818", "text": "def symmetric_2d(self, x: np.array) -> bool:\n return np.allclose(x, x.T, atol=self.tol)", "title": "" }, { "docid": "b1c0c76c648a3134df886607041ad8df", "score": "0.4816043", "text": "def test_do_interpolate(my_data, method, order):\n loss = do_interpolate(my_data, method=method, order=order)\n assert 0 < loss < np.inf", "title": "" }, { "docid": "f36b2b0491679907dfbb186f7b48abc0", "score": "0.48080644", "text": "def test_flatten13():\n x = np.ones(shape=[2, 3, 4, 5])\n start_axis = -2\n stop_axis = -1\n res = np.ones(shape=[2, 3, 20])\n obj.run(res=res, x=x, start_axis=start_axis, stop_axis=stop_axis)", "title": "" }, { "docid": "473afe1b65722da72d29c7a97b5a907a", "score": "0.4792067", "text": "def test_nz_qnorm_irizarry_code(self):\n irizarry_res_sxf = self.nz_qnorm(self.irizarry_ex_sxf)\n res_shape = irizarry_res_sxf.shape\n true_shape = self.irizarry_code_res_sxf.shape\n self.assertEqual(res_shape, true_shape,\n 'Shape mismatch: {} vs {}'.format(res_shape, true_shape))\n nptest.assert_allclose(irizarry_res_sxf, self.irizarry_code_res_sxf, rtol=0.001)\n return", "title": "" }, { "docid": "1dc5556ca32119d2b907494c1da850b9", "score": "0.47906113", "text": "def test_GridDataInterpolator_2DGrid():\n from pyirf.interpolation import GridDataInterpolator\n\n grid_points = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])\n target_point = np.array([[0.5, 0.5]])\n\n dummy_data1 = np.array([[[0, 1], [1, 1]], [[0, 2], [2, 3]], [[0, 3], [3, 5]]])\n dummy_data2 = np.array([[[0, 2], [1, 2]], [[0, 4], [2, 6]], [[0, 6], [3, 10]]])\n dummy_data3 = np.array([[[0, 1], [2, 1]], [[0, 2], [4, 3]], [[0, 3], [6, 5]]])\n dummy_data4 = np.array([[[0, 2], [2, 2]], [[0, 4], [4, 6]], [[0, 6], [6, 10]]])\n\n dummy_data = np.array([dummy_data1, dummy_data2, dummy_data3, dummy_data4])\n\n interpolator = GridDataInterpolator(\n grid_points=grid_points, params=dummy_data, method=\"linear\"\n )\n interpolant = interpolator(target_point)\n\n dummy_data_target = 1.5 * dummy_data1\n\n assert np.allclose(interpolant, dummy_data_target)\n assert interpolant.shape == (1, *dummy_data.shape[1:])", "title": "" }, { "docid": "78763b9a86e3d52771e5984f3b1f6a76", "score": "0.4786542", "text": "def xr_int_along_axis(xa, DZ, axis):\n assert type(axis)==np.dtype(int) or axis in xa.dims\n assert np.shape(xa)==np.shape(DZ)\n assert axis<=len(np.shape(xa))\n \n integral = np.sum(xa*DZ, axis=axis)\n \n return integral", "title": "" }, { "docid": "9e036ebd6367c582f06a4fd396f3e6cb", "score": "0.47839087", "text": "def integrate(self):\n return self._compute_integral_mean_and_variance()", "title": "" }, { "docid": "c52ca29dd99d25e9977cca19d25fb241", "score": "0.47810954", "text": "def test_3rd_order_gauss_quadrature(self):\n def fun(y, x):\n \"\"\"\n Integration test function\n \"\"\"\n return x**3 + y**3\n\n element_integral = 0\n for pt, w in zip(Q8.integration_points_3, Q8.integration_weights_3):\n element_integral += w * fun(*pt)\n\n reference_integral = integrate.dblquad(fun, -1, 1, lambda x: -1, lambda x: 1)\n\n self.assertTrue(np.isclose(element_integral, reference_integral[0]))", "title": "" }, { "docid": "5f2eaab26a29bb1d6849f43babdcb2fe", "score": "0.47807398", "text": "def remove_dimension(trial, param):\n if param == self.param:\n adapted_trial = copy.deepcopy(trial)\n # pylint: disable=protected-access\n del adapted_trial._params[adapted_trial._params.index(self.param)]\n adapted_trials.append(adapted_trial)\n return True\n\n return False", "title": "" }, { "docid": "7766f7f6cb8244f00d52541b1789065b", "score": "0.4778484", "text": "def _validate(self):\r\n if len(set(self._interp_dims)) != len(self._src_coords):\r\n raise ValueError('Coordinates repeat a data dimension - the '\r\n 'interpolation would be over-specified.')\r\n\r\n for coord in self._src_coords:\r\n if coord.ndim != 1:\r\n raise ValueError('Interpolation coords must be 1-d for '\r\n 'rectilinear interpolation.')\r\n\r\n if not isinstance(coord, DimCoord):\r\n # Check monotonic.\r\n if not iris.util.monotonic(coord.points, strict=True):\r\n msg = 'Cannot interpolate over the non-' \\\r\n 'monotonic coordinate {}.'\r\n raise ValueError(msg.format(coord.name()))", "title": "" }, { "docid": "8df5cea4c2b821411c60ad667f0aaa64", "score": "0.47729105", "text": "def numerical_integrate_1D(func, domain, *params):\n delta_x = domain[1:] - domain[:-1]\n return sum((func(domain, *params)[:-1]+ func(domain, *params)[1:])/2 * delta_x)", "title": "" }, { "docid": "efa679cc1c8cd8e7af78dc9613713529", "score": "0.47696334", "text": "def integrationOverXi(self):\n return _libBornAgainCore.InterferenceFunctionFinite2DLattice_integrationOverXi(self)", "title": "" }, { "docid": "4070db5edb7c808f31d23436f7f9c744", "score": "0.4767247", "text": "def integrate(self, Region = None):\n if ('Uniform' in self.mesh.properties and 'Cartesian' in self.mesh.properties):\n if Region:\n out = self[Region.pindex].sum() \n else: \n out = self.sum()\n \n out *= self.mesh.spacing \n \n return out.astype(self.dtype)", "title": "" }, { "docid": "74cbea9432cf0ecba9cda945e955d9d0", "score": "0.47660792", "text": "def test_separate_coords(arr, expected1, expected2):\n assert EnsembleKalmanFilter.separate_coords(arr) == (expected1, expected2)", "title": "" }, { "docid": "a74d084a2b4abf3446ac5c289fd09032", "score": "0.4765284", "text": "def test_get_arr_1d():\n # 1d\n a = np.arange(3)\n arr_1d, i, bc_idx = _get_arr_1d(a, [1], 0)\n assert i == 1\n assert bc_idx == (...,)\n np.testing.assert_equal(arr_1d, a)\n\n # 2d\n a = np.arange(4).reshape(2, 2)\n arr_1d, i, bc_idx = _get_arr_1d(a, [0, 0], 0)\n assert i == 0\n assert bc_idx == (..., 0)\n np.testing.assert_equal(arr_1d, a[:, 0])\n\n arr_1d, i, bc_idx = _get_arr_1d(a, [1, 1], 1)\n assert i == 1\n assert bc_idx == (..., 1)\n np.testing.assert_equal(arr_1d, a[1, :])\n\n # 3d\n a = np.arange(8).reshape(2, 2, 2)\n arr_1d, i, bc_idx = _get_arr_1d(a, [0, 0, 0], 0)\n assert i == 0\n assert bc_idx == (..., 0, 0)\n np.testing.assert_equal(arr_1d, a[:, 0, 0])\n\n arr_1d, i, bc_idx = _get_arr_1d(a, [1, 1, 0], 1)\n assert i == 1\n assert bc_idx == (..., 1, 0)\n np.testing.assert_equal(arr_1d, a[1, :, 0])\n\n arr_1d, i, bc_idx = _get_arr_1d(a, [1, 1, 0], 2)\n assert i == 0\n assert bc_idx == (..., 1, 1)\n np.testing.assert_equal(arr_1d, a[1, 1, :])", "title": "" } ]
2fe086c3e5a427da90d6431edd2ef5c7
Browse the file system and update the control instance accordingly. If a valid direcorty has already been entered the dialogue will automatically point to this folder, otherwise the current working directory is used.
[ { "docid": "897bc328a97b5b014ceb9ef88458331d", "score": "0.779447", "text": "def onBrowseClicked(control_instance):\n # Get the current directory\n current_control_value = os.path.join(os.getcwd(), os.pardir)\n if DirectoryControlWidget.is_valid(control_instance):\n current_control_value = six.text_type(control_instance.path.text())\n\n # Create a dialog to select a directory\n folder = qt_backend.getExistingDirectory(\n get_ref(control_instance), \"Open directory\", current_control_value,\n QtGui.QFileDialog.ShowDirsOnly\n | QtGui.QFileDialog.DontUseNativeDialog)\n\n # Set the selected directory to the path sub control\n control_instance.path.setText(six.text_type(folder))", "title": "" } ]
[ { "docid": "5a030732cf12ef84300d8a20743a3891", "score": "0.8512092", "text": "def _browse(self):\n\n initial_dir = self._variable.get()\n initial_dir = os.path.abspath(os.path.expanduser(initial_dir))\n\n self.options['initialdir'] = initial_dir\n dlg = filedialog.Directory(self.master, **self.options)\n new_dir = dlg.show()\n\n if new_dir:\n new_dir = shorten(new_dir)\n self.value = new_dir", "title": "" }, { "docid": "28ebe030b8c28676ce86a1dddb4994d8", "score": "0.7814108", "text": "def browseDirectory(self):\n \n path = tk.filedialog.askdirectory(\n title=\"Choose a directory\",\n initialdir=self.controller.destinationDir).replace(\"/\", \"\\\\\")\n\n if path:\n self.saveButton.config(\n background=\"yellow2\",\n activebackground=\"yellow2\")\n\n self.entryText.set(path)", "title": "" }, { "docid": "d956bc3b4ddcc5b85448fe3f10ee62ab", "score": "0.7581965", "text": "def _browse(self):\n\n initial_file = self._variable.get()\n\n if initial_file:\n initial_dir = os.path.split(initial_file)[0]\n self.options['initialdir'] = initial_dir\n\n if self.mode == 'open':\n dlg = filedialog.Open(self.master, **self.options)\n else:\n dlg = filedialog.SaveAs(self.master, **self.options)\n\n new_file = dlg.show()\n\n if new_file:\n new_file = shorten(new_file)\n self.value = new_file", "title": "" }, { "docid": "4efb6511d238530adfe05bb1a51fc7ae", "score": "0.74403816", "text": "def open_dir(self, event):\n dlg = wx.DirDialog(self, \"Choose default directory\", style=wx.DD_DEFAULT_STYLE)\n if dlg.ShowModal() == wx.ID_OK:\n self.settings['fp'] = dlg.GetPath()\n self.dir_txt.SetValue(self.settings['fp'])\n dlg.Destroy()\n pub.sendMessage('fp', msg=self.settings['fp'])", "title": "" }, { "docid": "ea2d2a509a0bbae5f6e64ede66353eb4", "score": "0.7431", "text": "def browseFolder(self):\n # popup folder selection dialog\n dir_path = getAFolder()\n if dir_path != \"\":\n self.filePath = str(dir_path)\n self.selectImageButton.setHidden(True)\n self.selectFolder.setHidden(True)\n self.imageCanvas.setHidden(False)\n self.ignoreFolds = set()\n self.currentFileNumber = 0\n self.onImageChanged()\n self.processFolder()", "title": "" }, { "docid": "0a99dab5d273da38824fddab5293a56a", "score": "0.74279505", "text": "def input_path_browse_button_clicked():\n directory = QFileDialog.getExistingDirectory(self, \"Find Files\", QDir.currentPath())\n self.input_path_line_edit.setText(directory)", "title": "" }, { "docid": "a1a671f72834a0692b5a8f65955bb502", "score": "0.742454", "text": "def browse_directory(self, directory_input):\n\n # Open the file dialogue\n directory_name = filedialog.askdirectory(initialdir=self.cwd)\n\n # Don't replace the directory input value if user cancels\n if directory_name is not None and directory_name != '':\n directory_input.delete(0, END) # Clear current value in entry widget\n directory_input.insert(0, directory_name) # Add user-selected value to entry widget", "title": "" }, { "docid": "df465049795f7ad25fc5d7458e0b9b96", "score": "0.7378925", "text": "def browse(self):\r\n\r\n self.filepath.set(fd.askopenfilename(initialdir=self._initaldir,\r\n filetypes=self._filetypes))", "title": "" }, { "docid": "091eed2108efb32916ee6b5b6dbe1af8", "score": "0.73339975", "text": "def onDir(self, event):\n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE\n #| wx.DD_DIR_MUST_EXIST\n #| wx.DD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n self.folder=dlg.GetPath()\n dlg.Destroy() \n self.foldadd()", "title": "" }, { "docid": "5f391f64b08f55a6987495c92c3fa145", "score": "0.71891147", "text": "def dir_browse(self):\n\n if not self.is_running():\n # dirpath will be None or a string\n msg, pth = RegistryHelpers.GetDirFromUserQT(self, RegistryKey='klusterintel',\n Title='Select directory to monitor', AppName='klusterintel')\n if pth is not None:\n self.fil_text.setText(pth)\n else:\n print('You have to stop monitoring before you can change the path')", "title": "" }, { "docid": "07fe581e76af5a38fd30ed911d4df968", "score": "0.7138757", "text": "def onDir(self, event):\n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE\n #| wx.DD_DIR_MUST_EXIST\n #| wx.DD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n self.folder=dlg.GetPath()\n self.folderadd()\n dlg.Destroy()", "title": "" }, { "docid": "1e60f451f79b10050d6fa641e5f774c6", "score": "0.7078267", "text": "def open_f(self):\n\n self.dirname = QFileDialog.getExistingDirectory()\n if self.dirname and len(self.dirname) > 0:\n self.text_file.setText(self.dirname)", "title": "" }, { "docid": "d44b9ba513741611784f668f5bac1dcb", "score": "0.7068113", "text": "def onDiradd(self):\n self.top=1 \n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE\n #| wx.DD_DIR_MUST_EXIST\n #| wx.DD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n self.folder=dlg.GetPath()\n self.foldradd()\n dlg.Destroy()", "title": "" }, { "docid": "194e2cdd6e91278ecabdd2b2d70f4f03", "score": "0.7047211", "text": "def ask_directory(self):\n self.directory = tkFileDialog.askdirectory(**self.dir_opt)", "title": "" }, { "docid": "2669963175a658f3d01a8ba7ff199e22", "score": "0.7013578", "text": "def browseFiles(self):\n\n temp_path = filedialog.askopenfilename(title = \"Select a File\", filetypes = ((\"Text Files\", \"*.txt\"), (\"All Files\", \"*\"))) \n if temp_path:\n self.file_path = temp_path\n self.label_file_explorer.configure(text = \"File: \" + self.file_path)\n self.analyze()", "title": "" }, { "docid": "c1f722d0fee5a12e68235869b0856912", "score": "0.6981006", "text": "def output_path_browse_button_clicked():\n directory = QFileDialog.getExistingDirectory(self, \"Find Files\", QDir.currentPath())\n self.output_path_line_edit.setText(directory)", "title": "" }, { "docid": "a2769fc43798d33f0894ee8024152d6c", "score": "0.6971456", "text": "def onOpenFile(self, event):\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.MULTIPLE | wx.CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n x = dlg.GetPaths()\n self.paths=x[0]\n self.fileadd() \n dlg.Destroy()", "title": "" }, { "docid": "03053fb6ee60399f4a80d7752d3c6d07", "score": "0.69596636", "text": "def do_browse_data_directory(self):\n # get the data directory from text line or from default\n if str(self.ui.lineEdit_iptsDir.text()).strip() != '':\n # from IPTS Directory\n default_dir = str(self.ui.lineEdit_iptsDir.text()).strip()\n else:\n # from default\n default_dir = self._homeDir\n\n # user-specified data directory\n data_dir = str(QFileDialog.getExistingDirectory(self, 'Get Directory',\n default_dir))\n self.ui.lineEdit_iptsDir.setText(data_dir)\n self._iptsDir = data_dir\n self._iptsDirFromDir = data_dir\n self._iptsNumber = None\n\n # Enable next step\n self.ui.groupBox_scanIptsInfo.setEnabled(True)\n\n return", "title": "" }, { "docid": "140947aeb5f7e1d9424ad24bda874d82", "score": "0.6942863", "text": "def ask_path(self):\n\n default_dir = os.path.dirname(self.path)\n\n _dir = QFileDialog.getExistingDirectory(self, dir=default_dir)\n if _dir:\n self.path = _dir", "title": "" }, { "docid": "a4dc3897f08db9e961886ce0af8a8ec6", "score": "0.6942444", "text": "def selectPath(self):\n dirname = QtWidgets.QFileDialog.getExistingDirectory(self)\n if len(dirname) == 0:\n return\n self.setText(dirname)", "title": "" }, { "docid": "6da3bd7acd4461bd44a51b1885046ed6", "score": "0.6936279", "text": "def __SelectFolder(self):\n op={}\n op[\"initialdir\"]=os.getcwd()\n f = filedialog.askdirectory(**op)\n if f==\"\":\n return\n filepath = os.path.abspath(f)\n if os.path.isdir(filepath):\n self.__CleanView()\n self.root.title(filepath)\n self.__workdir= os.path.abspath(filepath)\n self.lf.ShowWithoutAnalys(filepath)\n self.__countofLefttree = self.lf.GetCount()\n self.__SetStateofSearchMenu()", "title": "" }, { "docid": "44f29bd750498cee4a9b7d73d8ef4275", "score": "0.69354516", "text": "def directoryPathChooserClicked(self, textfield):\n path = textfield.text()\n path = QFileDialog.getExistingDirectory(self, \"Select Folder\", path)\n textfield.setText(path)", "title": "" }, { "docid": "90441004285dbc840199a58133807566", "score": "0.68388385", "text": "def button_browse_callback(self):\n options = {}\n options['initialdir'] = '.'\n options['title'] = 'Select the log directory'\n options['mustexist'] = False\n self.fileName = FileDiag.askdirectory(**options)\n info(\"***%s\" % self.fileName)\n if self.fileName == \"\":\n return None\n else:\n return self.fileName", "title": "" }, { "docid": "ce59e1feaa64f9fc196ae6e9488a760a", "score": "0.6827011", "text": "def open_directory_dialogue():\n global data_directory\n data_directory = filedialog.askdirectory()", "title": "" }, { "docid": "9b33babacb5ef6cc5147f62b237ae427", "score": "0.6822368", "text": "def askdirectory(self):\n \n des = tkFileDialog.askdirectory(title = \"The Destination folder is \")\n \n if des:\n self.submit_name(des)", "title": "" }, { "docid": "d939a34f3e990880023cf68db06f6234", "score": "0.67647696", "text": "def onSetDir(self, event):\n # dialog = wx.TextEntryDialog(None, \"Point to new directory. Currently set to %s\" % self.saveDir,\n # \"Set Save Directory\", \"%s\" % self.saveDir, wx.OK | wx.CANCEL)\n dialog = wx.DirDialog(self, \"Choose a save directory\", defaultPath=\"/home/mro/data\")\n answer = dialog.ShowModal()\n dialog.Destroy()\n\n if answer == wx.ID_OK:\n setTo = str(dialog.GetPath()) + \"/\"\n self.saveDir = setTo\n self.parent.saveDirectoryText.SetLabel(u\"Saving \\u2192 %s\" % self.saveDir)\n self.parent.Layout() # This recenters the static text\n logger.debug(\"Directory: \" + self.saveDir)", "title": "" }, { "docid": "543f4b7dc9b00d829537db1013016bfe", "score": "0.67494184", "text": "def _create_file_dialog ( self ):\r\n dlg = wx.DirDialog( self.control, message = 'Select a Directory' )\r\n dlg.SetPath( self._file_name.GetValue() )\r\n return dlg", "title": "" }, { "docid": "74aa93866ba310b886088f071bdc6191", "score": "0.67004776", "text": "def browse(self):\n startdir = self.dest.text() or os.getcwd()\n path = qtw.QFileDialog.getSaveFileName(self, 'Kies een bestand', startdir)\n if path[0]:\n self.dest.setText(path[0])", "title": "" }, { "docid": "e66af338f87cde8c552204146c68d6b9", "score": "0.667561", "text": "def askdirectory(self):\n return filedialog.askdirectory(title=\"Select A Folder\", mustexist=1)", "title": "" }, { "docid": "8fa4ecfb3ecb2ff02b31f4e644e92d1c", "score": "0.66730225", "text": "def _showChangePathDialog(self):\n path = self.path()\n\n if not path:\n path = os.path.expanduser(\"~\")\n\n path = QtWidgets.QFileDialog.getExistingDirectory(\n None,\n \"Choose a root folder location\",\n path\n )\n\n return studiolibrary.normPath(path)", "title": "" }, { "docid": "30bcd7e2d09c426b3c598169f2b11d3e", "score": "0.66609615", "text": "def _open_folder():\n tk.Tk().withdraw()\n return filedialog.askdirectory()", "title": "" }, { "docid": "73540a5dc0c93c289c87cbfe3df43d3f", "score": "0.66517067", "text": "def get_dir (self, event=None):\n msg = \"Please choose a file in the chosen directory. \"\\\n \"The directory from which the file was taken \"\\\n \"will be chosen as the default directory. \"\\\n \"If you enter an empty entry, an intelligent \"\\\n \"choice of directory will be made automatically at run-time.\"\n tkMessageBox.showinfo (\"Choose directory\", msg)\n tk_fopen = tkFileDialog.askopenfilename\n f_name = tk_fopen (title=\"Choose file to select directory\",\n initialdir=self.cfg.initial_dir,\n filetypes=[(\"All files\", \"*\")])\n if f_name:\n direc = os.path.dirname (f_name)\n self.cfg.initial_dir = direc\n self.initial_dir.set (direc)", "title": "" }, { "docid": "649b45185e27ccf9f07b1356a37beff2", "score": "0.66298395", "text": "def output_dir_change_action(self):\n fileDialog = QFileDialog()\n directory = fileDialog.getExistingDirectory()\n self.dynamic.output_directory.setText(directory)", "title": "" }, { "docid": "1766d7974f8fae48d55b2ad045380181", "score": "0.6627282", "text": "def onOpen(self, event):\r\n dlg = wx.DirDialog( self, \"Select Repository Root\", '') # parentWindow, Test, defaultPath ( save the last to make it open to the same one every time )\r\n if dlg.ShowModal() == wx.ID_OK:\r\n self._initTreeView( dlg.GetPath() )", "title": "" }, { "docid": "a4a9888d315746315458c0618a927133", "score": "0.66179836", "text": "def button_open_downloads_file_chooser_clicked(self, widget, data=None):\n dialog = gtk.FileChooserDialog(_(\"Choose Folder...\"),\n None,\n gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,\n (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n dialog.set_current_folder(os.path.expanduser(\"~\"))\n dialog.set_default_response(gtk.RESPONSE_OK)\n response = dialog.run()\n if response == gtk.RESPONSE_OK:\n self.downloads_directory = dialog.get_current_folder()\n self.downloads_text_entry.set_text(self.downloads_directory)\n self.db_session.variable_set('downloads_directory', self.downloads_directory)\n dialog.destroy()", "title": "" }, { "docid": "a5fbf1277e66ad98321829ad49b6aaf5", "score": "0.65866774", "text": "def browser_btn():\n browser_box = Tk()\n browser_box.withdraw()\n browser_box.filename = filedialog.askdirectory(initialdir=os.environ['USERPROFILE'] + \"\\\\Downloads\",\n title=\"Select Download Directory\")\n\n # This is the code to only replace the path if user have select a folder\n if browser_box.filename != \"\":\n path.configure(state=NORMAL)\n path.unbind('<Button-1>')\n path.delete(0, END)\n path.insert(0, str(browser_box.filename))\n\n browser_box.destroy()", "title": "" }, { "docid": "877dce526d4721ad1c6b308b114b1153", "score": "0.6584735", "text": "def browse_folder(self):\n # Get the file name/path from the user input in the file manager\n filename = tkinter.filedialog.asksaveasfilename(initialdir=\".\", title=\"Save JSON file\",\n filetypes=((\"json files\", \"*.json\"), (\"txt files\", \"*.txt\"),\n (\"All files\", \"*.*\")))\n\n # Update the file name/path to the file name entry\n self.filename_entry.delete(0, tkinter.END)\n self.filename_entry.insert(0, filename)", "title": "" }, { "docid": "f60a6c861a80dd09e941e65640f82582", "score": "0.65813696", "text": "def onDirIn(self, event):\n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE\n # | wx.DD_DIR_MUST_EXIST\n # | wx.DD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n self.inputDirectory = dlg.GetPath()\n self.inputText.SetLabel(self.inputDirectory)\n self.inputText.SetForegroundColour(\"black\") # set text color\n\n mylist = [f for f in glob.glob(os.path.join(self.inputDirectory, \"*.jpg\"))]\n print(not mylist)\n if not mylist:\n self.genText.SetLabel(\"* Valid images not found\")\n self.genText.SetForegroundColour(\"red\")\n self.inputDirectory = \"\"\n\n else:\n self.files = mylist\n myNames = [os.path.split(f)[1] for f in mylist]\n\n self.num_images = len(mylist)\n self.mainIterator = get_dataset(mylist, np.zeros(self.num_images), (299,299), 1,\n tf.keras.applications.inception_v3.preprocess_input)\n\n self.genText.SetLabel(\"* Found {} validated images\".format(self.num_images))\n self.genText.SetForegroundColour(\"black\")\n\n if not self.num_images * self.sliderValue // 100:\n self.outGenText.SetLabel(\"* No images will be saved. Percentage too low.\")\n self.outGenText.SetForegroundColour(\"red\")\n else:\n self.outGenText.SetLabel(\n \"* Saving {} best images\".format(self.num_images * self.sliderValue // 100))\n self.outGenText.SetForegroundColour(\"black\")\n dlg.Destroy()", "title": "" }, { "docid": "933f2d89283ca17cb4480b50edfe659a", "score": "0.6543978", "text": "def askdir(self, var, label):\n \n f = tkFileDialog.askdirectory(title='Select Directory', initialdir='R:/soundcast/inputs/lodes')\n \n label.config(text=f)\n update_input_config(var, f)", "title": "" }, { "docid": "d052929cec2d6df3b052a03ae76d4b12", "score": "0.6535897", "text": "def _add(self):\n filenames = list()\n dlg = sppasFileDialog(self)\n if os.path.exists(self.__current_dir):\n dlg.SetDirectory(self.__current_dir)\n if dlg.ShowModal() == wx.ID_OK:\n filenames = dlg.GetPaths()\n dlg.Destroy()\n\n if len(filenames) > 0:\n added = self.FindWindow(\"filestree\").AddFiles(filenames)\n if added:\n self.__current_dir = os.path.dirname(filenames[0])\n self.notify()", "title": "" }, { "docid": "2fd430ad8a9db44e6a521eae62210dfa", "score": "0.6532352", "text": "def open_ingestion_directory_selector(self, textbox_widget=None, team=\"root\"):\n directory = QFileDialog.getExistingDirectory(None, 'Select a folder:', 'C:\\\\', QFileDialog.ShowDirsOnly)\n if textbox_widget is None:\n print(directory)\n else:\n self.controller.update_folder_path(team, directory)\n textbox_widget.setPlainText(str(directory))", "title": "" }, { "docid": "62fec91a835a5470bed773c1e329aa3d", "score": "0.65273994", "text": "def displayFileDialog(self):\n return filedialog.askdirectory()", "title": "" }, { "docid": "f82e5fccac60cb735a14da42d201b6b5", "score": "0.6525494", "text": "def BrowseForFolder(self):\n pass", "title": "" }, { "docid": "066df63b4c94f56fc2118774fb3e4712", "score": "0.65220475", "text": "def _select_dir(self):\n with RememberDir(self) as rd:\n self.work_dir, load_ok = rd.get_dir()\n\n if load_ok:\n self._load2gui()", "title": "" }, { "docid": "e5908d8cec6f2a68ecac8f58cc293cd3", "score": "0.6507518", "text": "def onDirOut(self, event):\n dlg = wx.DirDialog(self, \"Choose a directory:\",\n style=wx.DD_DEFAULT_STYLE\n # | wx.DD_DIR_MUST_EXIST\n # | wx.DD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n self.outputDirectory = dlg.GetPath()\n self.outputText.SetLabel(self.outputDirectory)\n self.outputText.SetForegroundColour(\"black\") # set text color\n dlg.Destroy()", "title": "" }, { "docid": "f8682029c0addf7b83c2da98b7d8f164", "score": "0.6504469", "text": "def dir_find(self):\n directory = QtWidgets.QFileDialog.getExistingDirectory(self, \"Выберите каталог\")\n self.line_edit_with_buttons.set_text(directory)", "title": "" }, { "docid": "7e579437a3fdca8fb0e763badcb257c8", "score": "0.6493883", "text": "def update_object ( self, event ):\r\n if self.control is not None:\r\n path = self.control.GetPath()\r\n if isdir( path ):\r\n self.value = path", "title": "" }, { "docid": "a0bdfe0e0cc9941e0985a99f6ed473fc", "score": "0.64692336", "text": "def on_scan_btn_clicked(self):\n path = QFileDialog.getExistingDirectory(self.base,\n _(\"Select a directory with books or \"\n \"your eReader's drive\"),\n self.base.last_dir,\n QFileDialog.ShowDirsOnly)\n if path:\n self.base.last_dir = path\n self.base.high_list.clear()\n self.base.reload_highlights = True\n self.base.loading_thread(Scanner, path, self.base.kor_text, clear=False)", "title": "" }, { "docid": "e4ec113b1714ab2a156912943fa5d5c6", "score": "0.645265", "text": "def on_browse_userpath(self, evt):\n try:\n open_file.open_file(pathfinder.user_path())\n except IOError: # file not found\n module_logger.error(\"User folder {0} not found.\".format(pathfinder.user_path()))\n err_msg = \"Unable to find folder '{0}'.\\nPlease ensure the folder exists.\".format(pathfinder.user_path())\n err_dlg = wx.MessageDialog(self.view, message=err_msg,\n caption=\"Unable To Open Folder\", style=wx.ICON_ERROR)\n err_dlg.ShowModal()\n err_dlg.Destroy()\n except OSError as err: # other OS error\n module_logger.error(\"Unidentified OS error {0}\".format(err))\n err_msg = \"Unable to browse to data folder, error reported was:\\n{0}\".format(err)\n err_dlg = wx.MessageDialog(self.view, message=err_msg,\n caption=\"Unable To Open Folder\", style=wx.ICON_ERROR)\n err_dlg.ShowModal()\n err_dlg.Destroy()", "title": "" }, { "docid": "0a2f4b7c37d5a47728df9b5960dba045", "score": "0.641189", "text": "def open_folder(self):\n self.current_photo = 0\n self.folder = askdirectory()\n if (len(listdir(self.folder)) - 1) != 0:\n self.photos = [\n p\n for p in listdir(self.folder)\n if p.endswith(\".png\") or p.endswith(\".jpg\") or p.endswith(\".gif\")\n ]\n self.total_photos = len(self.photos) - 1\n self.get_photo()\n self.progress_bar()\n else:\n lbl_img.configure(\n text=f\"'{self.folder}' is empty or contains no photos. Choose another folder.\"\n )\n lbl_img.image = f\"'{self.folder}' is empty or contains no photos. Choose another folder.\"", "title": "" }, { "docid": "c1dda33492147b9a06a00c0206d75676", "score": "0.6392816", "text": "def open_dialog(self):\n # Dialog\n self.file_path = fd.askopenfilename(title=\"Select ASC file\",\n initialdir=self.home,\n filetypes=self.file_types)\n\n # Get the file name -- iterate backwards until /, then reverse\n fname = \"\".join(reversed(self.file_path[\n -1:self.file_path.rfind(\"/\"):-1]))\n \n # Set the tkinter var storing the file name\n self.file_name_var.set(fname)\n\n return None", "title": "" }, { "docid": "481c25b25e29b34ca591cd8a13b9eabd", "score": "0.63926655", "text": "def chooseOntologyPath(self):\n path = self.path.text()\n path = QFileDialog.getExistingDirectory(self, 'Choose Directory', path)\n self.path.setText(path)", "title": "" }, { "docid": "4abbf1654340d646117d7cb81fef2cba", "score": "0.637571", "text": "def BrowseForFile(self):\n pass", "title": "" }, { "docid": "92b756921a75e8d14c624f53c4ae582f", "score": "0.63712925", "text": "def cd(self, directory, bot):\n\t\t# If the back_label, such as \"Back\" is clicked,\n\t\t# we're expected to change directory to ..\n\t\tpath = os.path.join(self.data['path'], directory)\n\t\t# Directories sorted using {*} at the end, gets\n\t\t# their {*} removed. Glob checks if a dir that\n\t\t# ends with {*} exists, and if so, replaces path.\n\t\tif glob(path+' {*}/'):\n\t\t\tpath = glob(path+' {*}')[0]\n\t\tif (directory == args['back_label'] and \n\t\t\tself.data['path'] != args['root']):\n\t\t\t# The first element of os.path.split is the .. directory\n\t\t\tself.data['path'] = os.path.split(self.data['path'])[0]\n\t\t# If file is a directoy, open it\n\t\telif os.path.exists(path):\n\t\t\tself.data['path'] = path\n\t\tself.ls(bot)\n\t\tself.save_data()", "title": "" }, { "docid": "c426932a49491db3290bdfdcc12818cc", "score": "0.636276", "text": "def select_folder(self, entry_name):\r\n folder_selected = filedialog.askdirectory()\r\n if folder_selected != '':\r\n self.inputs[entry_name].variable.set(folder_selected)\r\n self.inputs['FolderStructure'].update_tree(folder_selected,\r\n sfilter=['.git', '.settings', 'libspecs', '__pycache__',\r\n '.png'])\r\n\r\n self.callbacks['btn_projLocation'](folder_selected)", "title": "" }, { "docid": "f6ccbeba663da9ff17e4620e67e4d91b", "score": "0.63622737", "text": "def __init__(self, title_txt=\"Seleccione carpeta\", button_txt=\"Abrir\", cls=\"dialog\", path=None):\n\n cls1 = \"filedialog\"\n if not path: self.curdir = os.getcwd()\n else: self.curdir = path\n self.dir_img = gui.basic.Image(\n gui.pguglobals.app.theme.get(cls1+\".folder\", \"\", 'image'))\n self.title = gui.basic.Label(title_txt, cls=cls+\".title.label\")\n self.body = gui.Container(width=700, height=300)\n\n self.body.add(gui.basic.Label(\"Carpeta\"), 10, 10)\n\n self.input_dir = gui.input.Input(size=38)\n self.body.add(self.input_dir, 125, 10)\n\n self.button_ok = gui.button.Button(button_txt)\n self.body.add(self.button_ok, 640, 12)\n self.button_ok.connect(gui.const.CLICK, self._button_okay_clicked_, None)\n\n self.list = gui.area.List(width=690, height=235)\n self.body.add(self.list, 10, 50)\n self._list_dir_()\n self.list.connect(gui.const.CHANGE, self._item_select_changed_, None)\n\n self.value = None\n gui.Dialog.__init__(self, self.title, self.body)", "title": "" }, { "docid": "cc2e18bf5666d62db76df45c5bc448e3", "score": "0.6353819", "text": "def browse_file(self, file_input, filetypes):\n\n # Open the file dialog\n file_name = filedialog.askopenfilename(initialdir=self.cwd, filetypes=filetypes)\n\n # Don't replace the file input value if user cancels\n if file_name is not None and file_name != '':\n file_input.delete(0, END) # Clear current value in entry widget\n file_input.insert(0, file_name) # Add user-selected value to entry widget", "title": "" }, { "docid": "e0d5d44fc9407f323a31186eb1d18887", "score": "0.63331634", "text": "def button_openfile(self, event):\n event.Skip()\n actualDir = (os.getcwd() if self.m_comboBox_files.GetValue() else\n os.path.dirname(os.path.abspath(self.m_comboBox_files.GetValue())))\n dlg = wx.FileDialog(self, message=\"Select BOM(s)\", defaultDir=actualDir,\n defaultFile=\"\", wildcard=WILDCARD_BOM,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n self.addFile(dlg.GetPaths())\n self.updateOutputFilename() # Update the output file name on GUI text.\n dlg.Destroy()", "title": "" }, { "docid": "8ca30bc35afaa245b3f0d9e716ce6de9", "score": "0.6323572", "text": "def set_userpath(self):\n try:\n current_user_path = pathfinder.user_path()\n path_dlg = wx.DirDialog(parent=self.view.parent, message=\"Please specify a data folder\",\n defaultPath=current_user_path)\n if path_dlg.ShowModal() == wx.ID_OK:\n new_user_path = path_dlg.GetPath()\n self.model.migrate_user_path(new_user_path)\n self.refresh_data()\n finally:\n path_dlg.Destroy()", "title": "" }, { "docid": "be8d284a3e9ab3cb9455ed3b042f349c", "score": "0.63098764", "text": "def _open_folder_helper(_None):\n dialog = Gtk.FileChooserDialog(\n action=Gtk.FileChooserAction.SELECT_FOLDER)\n _quick_buttons(dialog)\n dialog.add_button(Gtk.STOCK_OPEN, Gtk.ResponseType.OK)\n dialog.run()\n path = dialog.get_filename()\n dialog.destroy()\n Gtk.main_quit()\n result.append(path)", "title": "" }, { "docid": "4c6946c5df0a97628e582e32587712af", "score": "0.63047534", "text": "def path_dialog() -> input_dialog:\n return input_dialog(\n title=\"File or Directory Path\", text=\"Input file or directory path: \",\n ).run()", "title": "" }, { "docid": "41359262ae572b7a67bb3af6debbc7c7", "score": "0.6301636", "text": "def selectDir( self ):\n\n #self.showdialog()\n project = QgsProject.instance()\n if QgsExpressionContextUtils.projectScope(project).variable('project_importpath') == None: \n self.vectorPath = QFileDialog.getExistingDirectory(self.dockWidget, 'Selectvector data path', '/home', QFileDialog.ShowDirsOnly)\n QgsExpressionContextUtils.setProjectVariable(project, 'project_importpath', self.vectorPath)\n self.dockWidget.lineEdit.setText(self.vectorPath)\n #C:\\\\Users\\\\timc7\\\\Dropbox (Personal)\\\\Tim\\\\Employment\\\\WestWind Energy\\\\Work\\\\Projects\\\\GPWF\\\\GIS\\\\VectorData\n #P:\\\\P158_GPWF_GoldenPlainsWindFarm\\\\GIS_Maps\\\\VectorData\n\n else: \n originalVectorPath = QgsExpressionContextUtils.projectScope(project).variable('project_importpath')\n self.vectorPath=originalVectorPath\n self.vectorPath = QFileDialog.getExistingDirectory(self.dockWidget, 'Selectvector data path', self.vectorPath , QFileDialog.ShowDirsOnly)\n \n if (self.vectorPath and self.vectorPath is not None):\n self.dockWidget.lineEdit.setText(self.vectorPath)\n QgsExpressionContextUtils.setProjectVariable(project, 'project_importpath', self.vectorPath)\n else:\n self.dockWidget.lineEdit.setText(originalVectorPath)", "title": "" }, { "docid": "69aeb225990a6b9f3e14db629ce4c074", "score": "0.62987274", "text": "def handler_ok_changed ( self, info ):\r\n dir = info.object.file_name\r\n if not isdir( dir ):\r\n dir = dirname( dir )\r\n\r\n path = join( dir, self.dir_name )\r\n try:\r\n # Try to create the requested directory:\r\n mkdir( path )\r\n\r\n # Force the file tree view to be refreshed:\r\n info.object.reload = True\r\n\r\n # set the new directory as the currently selected file name:\r\n info.object.file_name = path\r\n\r\n # Close this view:\r\n info.ui.dispose( True )\r\n except:\r\n self.message = \"Could not create the '%s' directory\" % self.dir_name", "title": "" }, { "docid": "35419f9aa50d61cc1ded35b3719fb91e", "score": "0.6294492", "text": "def folder_dialog(title):\n\n root = tkinter.Tk()\n root.withdraw()\n return filedialog.askdirectory(title=title)", "title": "" }, { "docid": "cb95684489b7461a0f1c9c904afe3a11", "score": "0.6282837", "text": "def handlerSelectWorkingDir(self):\n workingDir = unicode(QtGui.QFileDialog.getExistingDirectory(self, 'Select Working Directory'))\n \n if workingDir:\n self.lineEditWorkingDir.setText(workingDir)\n \n logging.getLogger(type(self).__name__).info('select working directory: %s', workingDir)", "title": "" }, { "docid": "32a5b4f2906e96d28d20ea4ae1946441", "score": "0.62821496", "text": "def browseDir(self, event):\n\n fname = str('machinelabels-iter'+str(self.iterationindex)+'.h5')\n self.statusbar.SetStatusText(\"Looking for a folder to start refining...\")\n cwd = os.path.join(os.getcwd(),'labeled-data')\n# dlg = wx.FileDialog(self, \"Choose the machinelabels file for current iteration.\",cwd, \"\",wildcard=fname,style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\n print(platform.system())\n if platform.system()=='Darwin': \n dlg = wx.FileDialog(self, \"Choose the machinelabels file for current iteration.\",cwd, fname ,wildcard=\"(*.h5)|*.h5\",style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) \n else:\n dlg = wx.FileDialog(self, \"Choose the machinelabels file for current iteration.\",cwd, \"\",wildcard=fname,style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\n\n if dlg.ShowModal() == wx.ID_OK:\n self.data_file = dlg.GetPath()\n self.dir = str(Path(self.data_file).parents[0])\n self.fileName = str(Path(self.data_file).stem)\n self.load.Enable(False)\n self.next.Enable(True)\n self.save.Enable(True)\n self.zoom.Enable(True)\n self.pan.Enable(True)\n self.home.Enable(True)\n self.quit.Enable(True)\n else:\n dlg.Destroy()\n self.Destroy()\n dlg.Destroy()\n\n try:\n self.dataname = str(self.data_file)\n\n except:\n print(\"No machinelabels file found!\")\n self.Destroy()\n self.statusbar.SetStatusText('Working on folder: {}'.format(os.path.split(str(self.dir))[-1]))\n self.preview = True\n self.iter = 0\n\n if os.path.isfile(self.dataname):\n self.Dataframe = pd.read_hdf(self.dataname,'df_with_missing')\n self.Dataframe.sort_index(inplace =True)\n self.scorer = self.Dataframe.columns.get_level_values(0)[0]\n\n # bodyParts = self.Dataframe.columns.get_level_values(1)\n # _, idx = np.unique(bodyParts, return_index=True)\n # self.num_joints = len(self.bodyparts)\n # self.bodyparts = bodyParts[np.sort(idx)]\n self.index = list(self.Dataframe.iloc[:,0].index)\n# Reading images\n\n self.img = os.path.join(self.project_path,self.index[self.iter])\n img_name = Path(self.img).name\n self.norm,self.colorIndex = self.image_panel.getColorIndices(self.img,self.bodyparts)\n# Adding Slider and Checkbox\n\n self.choiceBox,self.slider,self.checkBox = self.choice_panel.addCheckBoxSlider(self.bodyparts,self.file,self.markerSize)\n self.slider.Bind(wx.EVT_SLIDER, self.OnSliderScroll)\n self.checkBox.Bind(wx.EVT_CHECKBOX,self.activateSlider)\n self.slider.Enable(False)\n# Show image\n# Setting axis title:dont want to show the threshold as it is not selected yet.\n self.figure,self.axes,self.canvas,self.toolbar = self.image_panel.drawplot(self.img,img_name,self.iter,self.index,self.threshold,self.bodyparts,self.colormap,self.preview)\n\n instruction = wx.MessageBox('1. Enter the likelihood threshold. \\n\\n2. Each prediction will be shown with a unique color. \\n All the data points above the threshold will be marked as circle filled with a unique color. All the data points below the threshold will be marked with a hollow circle. \\n\\n3. Enable the checkbox to adjust the marker size. \\n\\n4. Hover your mouse over data points to see the labels and their likelihood. \\n\\n5. Left click and drag to move the data points. \\n\\n6. Right click on any data point to remove it. Be careful, you cannot undo this step. \\n Click once on the zoom button to zoom-in the image.The cursor will become cross, click and drag over a point to zoom in. \\n Click on the zoom button again to disable the zooming function and recover the cursor. \\n Use pan button to pan across the image while zoomed in. Use home button to go back to the full;default view. \\n\\n7. When finished click \\'Save\\' to save all the changes. \\n\\n8. Click OK to continue', 'User instructions', wx.OK | wx.ICON_INFORMATION)\n\n if instruction == 4 :\n \"\"\"\n If ok is selected then the image is updated with the thresholded value of the likelihood\n \"\"\"\n textBox = wx.TextEntryDialog(self, \"Select the likelihood threshold\",caption = \"Enter the threshold\",value=\"0.1\")\n textBox.ShowModal()\n self.threshold = float(textBox.GetValue())\n textBox.Destroy()\n self.img = os.path.join(self.project_path,self.index[self.iter])\n img_name = Path(self.img).name\n self.axes.clear()\n self.preview = False\n self.figure,self.axes,self.canvas,self.toolbar = self.image_panel.drawplot(self.img,img_name,self.iter,self.index,self.threshold,self.bodyparts,self.colormap,self.preview)\n MainFrame.plot(self,self.img)\n MainFrame.saveEachImage(self)\n else:\n self.figure,self.axes,self.canvas,self.toolbar = self.image_panel.drawplot(self.img,img_name,self.iter,self.index,self.threshold,self.bodyparts,self.colormap,self.preview)\n MainFrame.plot(self,self.img)\n MainFrame.saveEachImage(self)\n\n else:\n msg = wx.MessageBox('No Machinelabels file found! Want to retry?', 'Error!', wx.YES_NO | wx.ICON_WARNING)\n if msg == 2:\n self.load.Enable(True)\n self.next.Enable(False)\n self.save.Enable(False)", "title": "" }, { "docid": "f24d5f3bbf5b82c02cdbc3fc18af74ca", "score": "0.6241064", "text": "def change_folder_path(self):\n\n if self.set_file_input_le.text() == \"\" or os.path.isdir(self.set_file_input_le.text()) == False:\n current_dir = os.getcwd()\n else:\n current_dir = self.root_path_le.text()\n new_folder_path = str(QtGui.QFileDialog.getExistingDirectory(self, \"Select Directory\", current_dir))\n if str(new_folder_path) != \"\" and str(new_folder_path)[:13] != \"/Applications\" and str(new_folder_path)[\n :10] != \"/Developer\" and str(\n new_folder_path)[:7] != \"/System\" and str(new_folder_path)[:8] != \"/Library\" and str(\n new_folder_path) != \"/\":\n self.set_file_input_le.setText(new_folder_path)\n self.process_setup.set_setup_input_data(new_folder_path, 1)\n self.save_setup()\n elif str(new_folder_path) == \"\":\n return", "title": "" }, { "docid": "f0df307fba580cc5cb70eb03633beaaa", "score": "0.62395525", "text": "def select_dir():\n root = Tkinter.Tk()\n root.withdraw()\n dirname = tkFileDialog.askdirectory(parent=root, initialdir=\"/\", \n title='Please select a directory')\n mediapath = dirname + '/__MEDIA'\n docpath = dirname + '/__DOCUMENTS'\n miscpath = dirname + '/__MISC'\n \n return dirname", "title": "" }, { "docid": "cf9a22489c4c7eaed1765bed7b002bcb", "score": "0.6239309", "text": "def browseFile(self):\n cur_attr = self.attr_combo.currentText()\n attr_list = [FileAttr.In, FileAttr.InOut, FileAttr.Out]\n if cur_attr not in [FileAttr.value2str(attr) for attr in attr_list]:\n msg = translate(\"UnitPanel\", \"Please select a value \"\n \"for the `Mode` property above \"\n \"before browsing for the file path.\")\n MessageBox.warning(self.astergui().mainWindow(),\n \"AsterStudy\", msg)\n return\n out_attr = FileAttr.value2str(FileAttr.Out)\n mode = 0 if cur_attr == out_attr else 1\n self._browseFileTemplate(mode)", "title": "" }, { "docid": "f4884e3865b864ecea5e2fa6c5b0f60c", "score": "0.6236643", "text": "def chooseOntologyPath(self):\n path = self.ontologyPath.text()\n path = QFileDialog.getExistingDirectory(self, 'Choose Directory', path)\n self.ontologyPath.setText(path)", "title": "" }, { "docid": "0fd5d372afd234c79811711a7ad0d145", "score": "0.6235732", "text": "def update_object ( self, event ):\r\n if self.control is not None:\r\n path = self.control.GetPath()\r\n if self.factory.allow_dir or isfile( path ):\r\n if self.factory.truncate_ext:\r\n path = splitext( path )[0]\r\n\r\n self.value = path", "title": "" }, { "docid": "41f267f1841420c1d439e92adae05613", "score": "0.6229492", "text": "def browseFile(self):\n file_name = getAFile()\n if file_name != \"\":\n self.onNewFileSelected(str(file_name))\n self.centralWidget.setMinimumSize(700, 500)", "title": "" }, { "docid": "8633ed824c28eccc6d85d6a345578b6a", "score": "0.6229477", "text": "def on_add_folder(self, evt):\r\n dir_dlg = wx.DirDialog(self.view, \"Please select a folder.\")\r\n if dir_dlg.ShowModal() == wx.ID_OK:\r\n wx.BeginBusyCursor()\r\n self.view.dirtree.add_folder(dir_dlg.GetPath())\r\n wx.EndBusyCursor()", "title": "" }, { "docid": "aca39cf81d1fceef081ca789347e54ea", "score": "0.6212621", "text": "def openDirectory(self): \n directory = QFileDialog.getExistingDirectory(self, \"Choose Directory\", \"\", \n QFileDialog.Option.ShowDirsOnly) # Specify the file mode to only select directories\n\n if directory:\n self.movie.setFileName(directory)\n # Check if image data is valid before playing\n if self.movie.isValid(): \n # Use setMovie() to set the label's contents as the selected GIF\n self.media_label.setMovie(self.movie)\n self.startMovie() # Call method to begin playing", "title": "" }, { "docid": "535adbf2f2533975e9454f45d5ed1e90", "score": "0.6200158", "text": "def choose_dir(initialdir=os.path.dirname(__file__)):\n root = Tk.Tk()\n dirpath = tkFileDialog.askdirectory(\n parent=root,\n initialdir=initialdir\n )\n root.destroy()\n return dirpath", "title": "" }, { "docid": "1134e4d5278278cd8c52f742091cf03a", "score": "0.619444", "text": "def zoekdir(self):\r\n oupad = self.dirnaam.get()\r\n if oupad == \"\":\r\n pad = tkFileDialog.askdirectory()\r\n else:\r\n pad = tkFileDialog.askdirectory(initialdir=oupad)\r\n if pad != \"\":\r\n self.dirnaam.set(pad)", "title": "" }, { "docid": "308afc049e86ce9abf51f99f71d331d8", "score": "0.61675054", "text": "def file_pick(self):\n old_path = self.Path_Box.text()\n # logger.debug('old path: \"{}\"'.format(old_path))\n # noinspection PyTypeChecker,PyCallByClass\n save_path = QtGui.QFileDialog.getExistingDirectory(self, 'Save Location')\n # logger.debug('selected path: \"{}\"'.format(save_path))\n if save_path == '':\n self.Path_Box.setText(old_path)\n return old_path\n else:\n self.Path_Box.setText(save_path)\n return save_path", "title": "" }, { "docid": "f7d4169f5315baee27156ed198123a94", "score": "0.61662483", "text": "def onChooseDownloadPath(self, event):\n\n self.logger.info(\"Choose download path event triggered. Opening a dialog for user input now.\")\n\n dirDialog = wx.DirDialog(self, \"Please choose a folder in which to download the file into.\")\n if dirDialog.ShowModal() != wx.ID_CANCEL:\n self.downloadPath = dirDialog.GetPath()\n self.tempDownloadPath = self.downloadPath\n self.radioDownload.SetValue(False)\n self.radioIT.SetValue(False)\n self.logger.info(\"Download path changed. New download path is: {} and new temp download path is: {}\".format(\n self.downloadPath, self.tempDownloadPath))\n self.updateDownloadLabel()\n else:\n self.logger.info(\"Choose download path event canceled.\")", "title": "" }, { "docid": "a5f27cd6638b661666aa8e9f2eef642f", "score": "0.6166166", "text": "def _create_file_dialog ( self ):\r\n dlg = QtGui.QFileDialog(self.control)\r\n dlg.selectFile(self._file_name.text())\r\n dlg.setFileMode(QtGui.QFileDialog.Directory)\r\n dlg.setOptions(QtGui.QFileDialog.ShowDirsOnly)\r\n\r\n return dlg", "title": "" }, { "docid": "a7158ea2d94cc13138b6c73705abe2ed", "score": "0.6158772", "text": "def open_file_dialog(self):\n file = QFileDialog.getOpenFileName(self, \"Open file\", getcwd(), \"*.pdf\")\n file_location = file[0].strip(getcwd().replace(\"\\\\\", \"/\"))\n self.entries[\"location\"].setText(file_location)", "title": "" }, { "docid": "9b78599994a2bea00bce0278f7b6a6ad", "score": "0.61562693", "text": "def show_file_dialog_window(self, line_edit):\n file_path = QtWidgets.QFileDialog.getExistingDirectory(\n self,\n \"Select an install directory\",\n os.path.normpath(os.environ['MAYA_APP_DIR']))\n if file_path:\n line_edit.setText(file_path)", "title": "" }, { "docid": "1be116c13474070a3ccfc831ec9a4d4b", "score": "0.6155732", "text": "def _open_file_screen(self):\n self._filepath = askdirectory()\n tkinter.Tk(\"Filesize Visualizer\").quit()", "title": "" }, { "docid": "8cd2143047c84e94cdfb6dfc7203f6f4", "score": "0.613926", "text": "def get_path(self, isfile: bool) -> None:\n if isfile:\n path = QFileDialog.getOpenFileName(self,\n 'Select File',\n os.path.dirname(os.path.abspath(__file__)))\n if path[0]:\n self.scan_path.setText(os.path.abspath(path[0]))\n self.status.setText('✔')\n else:\n path = QFileDialog.getExistingDirectory(self,\n 'Select Directory',\n os.path.dirname(os.path.abspath(__file__)))\n if path:\n self.scan_path.setText(os.path.abspath(path))\n self.status.setText('✔')", "title": "" }, { "docid": "a9c1d6093a9e610624c83ab4332641c4", "score": "0.6130713", "text": "def choose_directory(self, folder, override=False):\n\n # -f-i-x-m-e-:this doesn't seem to actually show the current folder if there\n # is one...maybe that's a Qt bug, though. Or maybe it's because of the\n # hidden folder in the path?\n\n # update: ok...so the 'default' dialog was crap and didn't work\n # right. For some reason, adding an option (in this case\n # 'DontResolveSymlinks') caused a different dialog to be used\n # (one that looked more familiar to me) that worked MUCH better\n # and started in the correct directory.\n # Wondering if this was perhaps the 'non-native' dialog and the\n # native one was just bad on my system, I changed the options to\n # include 'UseNonNativeDialog'--but this showed a *different*\n # dialog than the other two, which seemed to be between the\n # others as far as functionality went. Presumably the \"good\"\n # dialog was the native one, which is reassuring.\n # Anyway, I still don't really know what's going on, but it\n # seems to work ok for now...\n\n ovrdict = self._override_mapping\n # ovrdict = self._override_paths[self._selected_profile.name]\n\n\n start = self._override_mapping[folder].path if override else self.paths[folder]\n\n # noinspection PyTypeChecker\n chosen = QFileDialog.getExistingDirectory(self,\n \"Select directory\",\n directory=start or \"\",\n options=QFileDialog.DontResolveSymlinks)\n\n if check_path(chosen):\n\n if override:\n self.override_boxes[folder].setText(chosen)\n ovrdict[folder] = ovrdict[folder]._replace(path=chosen)\n else:\n self.path_boxes[folder].setText(chosen)\n if folder in self.indicator_labels:\n self.indicator_labels[folder].setVisible(False)\n # enable apply button if needed\n self._mark_changed()", "title": "" }, { "docid": "7179d1b7b05993b7471fde194b3ac7b1", "score": "0.6125794", "text": "def onBrowse(self, event):\n dlg = wx.FileDialog(self, \"Choose an image:\",\n style=wx.DD_DEFAULT_STYLE\n # | wx.DD_DIR_MUST_EXIST\n # | wx.DD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n self.inputImg = dlg.GetPath()\n self.inputText.SetLabel(self.inputImg)\n self.inputText.SetForegroundColour(\"black\") # set text color\n self.onView()\n dlg.Destroy()", "title": "" }, { "docid": "c72970213624644967a7922528629a60", "score": "0.61231685", "text": "def on_miSetRootPath(self):\n if self.filesRootPath is None:\n self.filesRootPath = self.wgAttrFiles.defaultRootPath\n self.fdRootPath = pQt.fileDialog(fdRoot=self.filesRootPath, fdFileMode='DirectoryOnly', fdCmd=self.on_set)\n self.fdRootPath.exec_()", "title": "" }, { "docid": "91f8b22b0125ddfed772942d341b62e6", "score": "0.6121621", "text": "def ask_user_directory(self):\n return str(\n QtWidgets.QFileDialog.getExistingDirectory(\n self.parent, \"Please select a capa rules directory\", self.rule_path\n )\n )", "title": "" }, { "docid": "76270e305747f0a99fc34f8d3bba1660", "score": "0.6119205", "text": "def openFileDialog(self):\n self.pathEdit.setText(QFileDialog.getOpenFileName(self, 'Open File', './',\n filter=\"All Files(*.*);;Text Files(*.txt)\")[0])", "title": "" }, { "docid": "813db67a49fe5445d0fc564be9c49940", "score": "0.61155754", "text": "def _input_file_dialog(self):\r\n\r\n\t\t# Open file dialog\r\n\t\tinputFile = askopenfilename(filetypes=self.inputTypes)\r\n\t\t# If file selected, check if txtInput already has an entry and clear,\r\n\t\t# then insert path of selected file\r\n\t\tif inputFile:\r\n\t\t\tcurTxt = self.txtInput.get()\r\n\t\t\tif curTxt:\r\n\t\t\t\tcurTxtEnd = len(curTxt)\r\n\t\t\t\tself.txtInput.delete(0, curTxtEnd)\r\n\t\t\tself.txtInput.insert(0, inputFile)", "title": "" }, { "docid": "980b4ecb52fec2f698afd85689e85e24", "score": "0.60958797", "text": "def select_file(_dir=False):\n Tk().withdraw()\n if not _dir:\n return askopenfilename()\n else:\n return askdirectory()", "title": "" }, { "docid": "cf16173be2046f5944ed438809f790a1", "score": "0.6092447", "text": "def directory_clicked(self, dirname):\n self.chdir(directory=dirname)", "title": "" }, { "docid": "144e6ad0bc6e4b92b1a98b5865c3148d", "score": "0.6056002", "text": "def _open_file(self):\n file_path, _ext = QFileDialog.getOpenFileName(self, dir=os.environ['HOME'], filter='Folder(.groom)')\n self.ui.groom_package_txf.setText(str(file_path))", "title": "" }, { "docid": "5e7edaee6a56092970308213c3a08d42", "score": "0.604943", "text": "def set_folder(self):\n\n self.par_folder = filedialog.askdirectory()\n self.entry_folder.delete(0, END)\n self.entry_folder.insert(0, self.par_folder)\n LOGGER.debug('Folder: %s', self.par_folder)\n self.folder_created_by_dialog = True", "title": "" }, { "docid": "227d33323a5f9a10564aea61c3dff835", "score": "0.6044199", "text": "def on_set(self):\n selPath = self.fdRootPath.selectedFiles()\n if selPath:\n self.filesRootPath = os.path.normpath(str(selPath[0]))\n self.wgAttrFiles.rf_rootPath()\n self.wgVtxFiles.rf_rootPath()", "title": "" }, { "docid": "c8c1491453232d0d955152cc161d9fd7", "score": "0.6030077", "text": "def save_folder_select(self):\n\n # Remove annoying empty window\n root = Tk()\n root.withdraw()\n\n # Select image from file\n self.folder = filedialog.askdirectory()\n\n if not self.folder:\n print(\"You choosed nothing\")\n else:\n# self.file_path = self.f\n# print(\"Choosed path: \\n\", self.folder, \"\\n\")\n self.edit_dir_save.setText(self.folder)", "title": "" }, { "docid": "9de59e1983cefbda96dca218a17b5265", "score": "0.6027771", "text": "def new_thread_1(self):\n dir_name = filedialog.askdirectory(parent=self, initialdir=\"/\",\n title='Please select a directory')\n\n if dir_name != \"\":\n self.disable_menu()\n self.path_and_bag.check_if_refresh(dir_name)\n self.config(cursor=\"wait\")\n self.update()\n self.clean_queue()\n GUI.static_queue.put(\"Finding files in chosen folder:\\n\\n\")\n num_files = len([val for sub_list in\n [[os.path.join(i[0], j)for j in i[2]]\n for i in os.walk(dir_name)]\n for val in sub_list])\n rott = tk.Tk()\n app = App(rott, GUI.static_queue, num_files)\n rott.protocol(\"WM_DELETE_WINDOW\", app.on_closing)\n thread = threading.Thread(target=self.open_menu, args=(dir_name,))\n thread.setDaemon(True)\n thread.start()\n app.mainloop()\n else:\n print(\"Action aborted\")", "title": "" }, { "docid": "cc134be69b68707e6688cf64176df5fc", "score": "0.6023432", "text": "def runOpenDirectoryDialog(self, title, startdir):\n\n dirName = tkFileDialog.askdirectory(\n title=title, initialdir=startdir, mustexist=\"true\")\n\n return dirName", "title": "" }, { "docid": "8c989646d91a20dd143851256579fa63", "score": "0.60129404", "text": "def update_curdir(self, path):\n self.olddir = self.curdir\n self.curdir = path\n self.prompt = '%s%s> ' % (self.state, path)", "title": "" }, { "docid": "212360e669cc90c29b702772a126800c", "score": "0.60106725", "text": "def select_folder(textfield, title=\"Select folder\", folder=\".\", master=None):\n if not os.path.isdir(folder):\n print(\"The folder '{}' specified for select_folder does not exist.\".format(folder))\n folder = \".\"\n \n textfield.delete(0, END)\n textfield.insert(0, filedialog.askdirectory(title=title, initialdir=folder, parent=None if master is None else master.tk))", "title": "" }, { "docid": "a946d8fb50201c0025c17fb893e67836", "score": "0.59847885", "text": "def change_download_directory(self):\n directory = tk_filedialog.askdirectory()\n if directory:\n self.download_directory_entry.config(state=tk.NORMAL)\n self.download_directory_entry.insert(0, directory)\n self.download_directory_entry.config(state=tk.DISABLED)", "title": "" } ]
8d872fb7864e42a7495f4b21e3bbb084
Parses a media_file path into its component segments.
[ { "docid": "a8a29dc123502596e6c9c7d23ad6beec", "score": "0.65137494", "text": "def parse_media_file_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^customers/(?P<customer_id>.+?)/mediaFiles/(?P<media_file_id>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}", "title": "" } ]
[ { "docid": "9f7445ca524d3ea5bd135aa47a25c27f", "score": "0.5895364", "text": "def split_path(full_path):\n parsed_list = full_path.split('/')[-2:] # directtory, file_name\n \n return parsed_list", "title": "" }, { "docid": "3405777c166d80d631ed05388b7710ab", "score": "0.58513534", "text": "def SplitPath(self, path):", "title": "" }, { "docid": "daaf27ae5c80ffa70bafe497b98d5f16", "score": "0.5751438", "text": "def get_video_parts(video_path):\n parts = video_path.split('/')\n filename = parts[-1]\n class_label = filename[0:5]\n seq = filename[5:10]\n dir = '/'.join(parts[0:-1]) + '/'\n return filename, class_label, seq, dir", "title": "" }, { "docid": "09e5c857c612cbee538811e9b105f2af", "score": "0.57160467", "text": "def _parse_segments_str(\n segments_string: str,\n media_start_time: Optional[int] = 0,\n media_stop_time: Optional[int] = None\n ) -> Optional[List[Tuple[int, int]]]:\n n_early = 0\n n_late = 0\n try:\n segments = []\n start_stops = segments_string.split(\",\")\n for ss in start_stops:\n start, stop = ss.strip().split(\"-\")\n start = int(start)\n stop = int(stop)\n\n # Limit to media start and stop times. If entirely\n # outside the limits, drop the segment\n if media_stop_time is not None and start > media_stop_time:\n n_late += 1\n continue\n if stop < media_start_time:\n n_early += 1\n continue\n start = max(start, media_start_time)\n if media_stop_time is not None:\n stop = min(stop, media_stop_time)\n\n # Offset by media_start_time so that segments are\n # relative to the transcoded waveform\n start -= media_start_time\n stop -= media_start_time\n\n segments.append((start, stop))\n except Exception as e:\n raise mpf.DetectionException(\n 'Exception raised while parsing voiced segments '\n f'\"{segments_string}\": {e}',\n mpf.DetectionError.INVALID_PROPERTY\n )\n\n # If all the voiced segments are outside the time range, signal that\n # we should halt and return an empty list.\n if not segments:\n raise NoInBoundsSpeechSegments(n_early, n_late)\n\n return sorted(segments)", "title": "" }, { "docid": "1f3ee22316bb584da3eebc6e56a55129", "score": "0.5584283", "text": "def get_video_parts(video_path):\n parts = video_path.split('/')\n filename = parts[1]\n filename_no_ext = filename.split('.')[0]\n classname = parts[0]\n\n return classname, filename_no_ext, filename", "title": "" }, { "docid": "ed9d1478a52685e258124109a7283713", "score": "0.5551553", "text": "def _parse_path_agnostic(filename):\n filename = filename.replace(\"\\\\\", os.sep).replace(\"/\", os.sep)\n return os.path.split(filename)", "title": "" }, { "docid": "3e9b205b13bfbbb8feae0e88ec96fe9d", "score": "0.5472871", "text": "def get_video_parts(video_path):\n parts = video_path.split('/')\n filename = parts[-1]\n filename_no_ext = filename\n classname = parts[-1]\n\n return classname, filename_no_ext, filename", "title": "" }, { "docid": "f6ba2878fc531fc4d9c6fe8cc09d56e1", "score": "0.5443869", "text": "def parse_path(path):\n\n result = []\n split_path = path.split('/')\n for element in split_path:\n if element != '':\n result.append(element)\n return result", "title": "" }, { "docid": "067b228b30fbe42e5c4b67590e1c7086", "score": "0.53923386", "text": "def parse(self, filepath):", "title": "" }, { "docid": "afef0dd26b9999a307b14c7857661765", "score": "0.5352113", "text": "def parse(cls, path: str) -> List:\n pass", "title": "" }, { "docid": "6e0d302732ce54557586ba5d04c4bb5b", "score": "0.53390574", "text": "def divide_video(self):\r\n if self.path and self.split_path:\r\n segment_size = self.get_parts()\r\n print\r\n print ('ffmpeg -i %s -c copy -map 0 -segment_time %s -f segment %s' % (self.path, segment_size, self.split_path) + '%03d.mp4')\r\n print\r\n print\r\n print\r\n #divide video to parts of the calculated length, NOTE: not every part will be exactly at the same length\r\n p = SUB.call('ffmpeg -i %s -c copy -map 0 -segment_time %s -f segment %s' % (self.path, segment_size, self.split_path) + '%03d.mp4')\r\n\r\n n = 0\r\n\r\n files = listdir(self.split_path)\r\n #gets max part\r\n for i in reversed(files):\r\n if len(i) >= 3 and i[:3].isdigit():\r\n max_part = int(i[:3])\r\n break\r\n else:\r\n max_part = -1\r\n\r\n while n <= max_part:\r\n #convert to audio parts\r\n p = SUB.call('ffmpeg -i %s.mp4 %s.wav' % (self.split_path+self.get_part_video_num(n), self.split_path+self.get_part_video_num(n)))\r\n n += 1\r\n\r\n n = 0\r\n while n <= max_part:\r\n #converts to video parts\r\n p = SUB.call('ffmpeg -i %s.mp4 -target ntsc-vcd -vcodec mpeg1video -an %s.mpg' % (self.split_path+self.get_part_video_num(n), self.split_path+self.get_part_video_num(n)))\r\n n += 1\r\n\r\n for f in files:\r\n try:\r\n #deletes the unnecessary original video parts\r\n remove(self.split_path + f)\r\n except:\r\n pass\r\n\r\n print 'nehenaknu im gui'", "title": "" }, { "docid": "667439f8e50f0ac824bb024e56058572", "score": "0.5306527", "text": "def _split_multipath(pathstr):\n # m is absolute path, M is relative path (or vice versa?)\n if not pathstr[0] in [\"m\",\"M\"]:\n raise ValueError(\"Bad path format: %s\" % pathstr)\n import re\n subpaths = [sp for sp in re.split('[Mm]',pathstr) if len(sp)>0]\n headers = re.findall('[Mm]',pathstr)\n for subpath,header in zip(subpaths,headers):\n # Need further parsing of multi-path strings? perhaps no.\n yield (header + subpath).strip()", "title": "" }, { "docid": "ae75d0cb47ddad1579f9a3a62236a31c", "score": "0.52950317", "text": "def get_media_url(path):\n if ck_settings.UPLOAD_PREFIX:\n url = ck_settings.UPLOAD_PREFIX + path.replace(ck_settings.UPLOAD_PATH, '')\n else:\n url = os.path.join(settings.MEDIA_URL, path.replace(ck_settings.MEDIA_ROOT, ''))\n\n # Remove multiple forward-slashes from the path portion of the url.\n # Break url into a list.\n url_parts = list(urllib.parse.urlparse(url))\n # Replace two or more slashes with a single slash.\n url_parts[2] = re.sub('\\/+', '/', url_parts[2])\n # Reconstruct the url.\n return urllib.parse.urlunparse(url_parts)", "title": "" }, { "docid": "f406d7394633ae25472c0e3490177110", "score": "0.52904373", "text": "def _parse_part(self, part, mimetypes):\n\n self._parse_section_three(part, mimetypes)", "title": "" }, { "docid": "747df1745e9302728538aac32172ca22", "score": "0.5220871", "text": "def parse_full_path(self, full_path):\n parsed_full_path = {'filename': os.path.basename(full_path)}\n return parsed_full_path", "title": "" }, { "docid": "009746872aeb480afbb1a6854596300d", "score": "0.51812714", "text": "def make_media_file_list(media):\n \n items = []\n\n for item in media:\n if isinstance(item, list):\n #print item[0]\n fname = os.path.basename(item[0])\n #print fname\n items.append(fname)\n else:\n fname = os.path.basename(item)\n #print fname\n items.append(fname)\n \n return items", "title": "" }, { "docid": "3b78823f0ec6fd346e157c235ab074a5", "score": "0.5176828", "text": "def get_file_parts(path) :\n path,fn = os.path.split(path)\n basename,ext = os.path.splitext(fn)\n return path,fn,basename,ext", "title": "" }, { "docid": "b323a64607a890dd681761eef07762c4", "score": "0.51637036", "text": "def parse(cls, path):\n pass", "title": "" }, { "docid": "31706697c83c53a0d91fe2050cd1168f", "score": "0.51542807", "text": "def SplitPath(self, path, qualified=False):\n # type: (str, bool) -> list\n parsed_path = self.hdfs.parse_uri(path)\n # split the path with the path separator and remove empty path segments.\n path_segments = list(filter(None, parsed_path.path.split(self.PATH_SEPARATOR)))\n # if qualified then the first segment is HDFS URI\n if qualified:\n path_segments.insert(0, self.hdfs.make_uri())\n return path_segments", "title": "" }, { "docid": "ba74023ff99d5b0ae8aa590126caf4ff", "score": "0.51386625", "text": "def __parse(self, dirpath):\n if not dirpath or not isinstance(dirpath, basestring):\n raise ValueError(\n 'path is not defined or not a string. path: {}'.format(\n dirpath))\n if not os.path.exists(dirpath):\n raise ValueError('path does not exist! path: {}'.format(dirpath))\n\n allparts = []\n # Split 4 parts for build, config, cid, size\n # TODO: error checks\n tmp_path = dirpath\n for i in range(0, 4):\n parts = os.path.split(tmp_path)\n assert len(\n parts) == 2, \\\n \"Not enough parts to the path! parts={}, dirpath={}\".format(\n parts, dirpath)\n tmp_path = parts[0]\n allparts.insert(0, parts[1])\n\n self.__build = allparts[0]\n self.config = allparts[1]\n self.cid = str(allparts[2])\n self.size = allparts[3]", "title": "" }, { "docid": "1ed1033701a7f407a26a8af0f1b72ed7", "score": "0.51082796", "text": "def parse_url(self, url):\n url = url.replace(self._url, '')\n url = url.strip('/').lower()\n url = url.split('/')\n id = '-'.join(url[:-1])\n filename = url[-1]\n return id, filename", "title": "" }, { "docid": "1ed1033701a7f407a26a8af0f1b72ed7", "score": "0.51082796", "text": "def parse_url(self, url):\n url = url.replace(self._url, '')\n url = url.strip('/').lower()\n url = url.split('/')\n id = '-'.join(url[:-1])\n filename = url[-1]\n return id, filename", "title": "" }, { "docid": "bd6a960f9e58f26f1b41f00e90d6d800", "score": "0.510161", "text": "def _parse_dest_path(self, path = ''):\n sep = '/'\n if path and path[0] == sep:\n # remove dealing '/'\n path = path[1:]\n if not path: return None\n v = path.split(sep)\n folderId = \"root\"\n mimeType = MIME_TYPE_FOLDER\n i = 0\n # iterates through of the path (array v)\n while i < len(v) and mimeType == MIME_TYPE_FOLDER:\n filename = v[i]\n subfolders = self.list_folders(name = filename, parentId = folderId)\n if subfolders:\n folderId = subfolders[0]['id']\n else:\n if i == len(v) - 1:\n # e.g. 'path/to/folder/foo.txt', then folderId = ID_OF('path/to/folder/'), \n # and filename = 'foo.txt'\n return (folderId, filename)\n else:\n # path not found\n return None\n i += 1\n # this is the case 'path/to/folder/', no filename given\n return (folderId, '')", "title": "" }, { "docid": "7ccd69a11e0bb5709ef8373f66383a60", "score": "0.50961405", "text": "def parse_path(path):\n\n path_key = path.split(\"/\")[0]\n\n if path_key == \"posts\":\n return PostImporter(path)\n elif path_key == \"pages\":\n return PageImporter(path)", "title": "" }, { "docid": "425f90103303ec8462b2751ca9ce8355", "score": "0.50941086", "text": "def parse_audience_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^customers/(?P<customer_id>.+?)/audiences/(?P<audience_id>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}", "title": "" }, { "docid": "871a5a4072527452f72447e0d9b66420", "score": "0.5088982", "text": "def explode_path(self, path):\n parts = []\n head = path\n while head:\n # /foo/bar/baz => head=/foo/bar, tail=baz\n head, tail = os.path.split(head)\n parts.append(tail)\n return reversed(parts)", "title": "" }, { "docid": "31eac2a741f55d733144cca3d85fc2c4", "score": "0.5079133", "text": "def _split(self, file):\r\n\r\n ext_index = str(file).rindex('.')\r\n file_name = file[:ext_index] # get the file name from the full path\r\n extension = file[ext_index + 1:] # get the extension from the full path\r\n\r\n # extract time of all matched points\r\n time_series = [int(row[1] / 1000) for row in self.stop_points]\r\n time_series = [0] + time_series + [int(self.length / 1000)]\r\n print('time_series:', time_series)\r\n\r\n # split the source audio\r\n for i in range(1, len(time_series)):\r\n print('from', time_series[i - 1], 'to', time_series[i])\r\n\r\n cmd = 'ffmpeg -i {} -acodec copy -ss {} -to {} {}_{}.{}'.format(file, time_series[i - 1], time_series[i],\r\n file_name, i, extension)\r\n\r\n print(cmd)\r\n\r\n proc = subprocess.Popen(cmd, shell=True)\r\n proc.communicate()\r\n print('convert is done...')", "title": "" }, { "docid": "4491d85987211fea83c8878cfd796f85", "score": "0.50746155", "text": "def _get_path_parts(self):\n dn_str = self._remove_format_from_path(self.path, self.api_format)\n return dn_str[1:].split(\"/\")", "title": "" }, { "docid": "df7b50905f406e89cb27216db03f4317", "score": "0.50738245", "text": "def parse_media(self, tokens):\n mediaquery_tokens = [f for f in tokens if f.type == 'IDENT']\n return mediaquery_tokens", "title": "" }, { "docid": "38cd8303bef843639beeb682b1ee3b45", "score": "0.5000593", "text": "def parse_asset_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^customers/(?P<customer_id>.+?)/assets/(?P<asset_id>.+?)$\", path\n )\n return m.groupdict() if m else {}", "title": "" }, { "docid": "cec92281a239ad766f3bbf4f3f057665", "score": "0.49627233", "text": "def resolv_path(self, path):\n itemx = self.struct_list\n for i in path:\n itemx = itemx[i]\n return itemx", "title": "" }, { "docid": "3b75873003a8627e0e27f163733b45d2", "score": "0.492974", "text": "def parse_file(self, path):\n with open(path) as file:\n return [line.strip() for line in file]", "title": "" }, { "docid": "74f2d4d55190c544d663fe744615d924", "score": "0.4928018", "text": "def readSegments( fileName ):\n f = open( fileName, 'r' )\n count = int( f.readline() )\n print \"Files has %d segments\" % ( count )\n segments = []\n for line in f.xreadlines():\n line = line.strip()\n if ( line ):\n print line.strip()\n x1, y1, x2, y2 = map( lambda x: float(x), line.strip().split() )\n segments.append( Segment( Vector3( x1, 0, y1 ), Vector3( x2, 0, y2 ) ) )\n f.close()\n return segments", "title": "" }, { "docid": "7292f66d8a595805fbf73bbca1f47d95", "score": "0.49179754", "text": "def params_in_path(self):\n # type: () -> List\n return [part[1:] for part in self.path.split('/') if part.startswith(':')]", "title": "" }, { "docid": "eee09a20ff89e03615e976deee670c28", "score": "0.49050176", "text": "def parse_ad_group_asset_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^customers/(?P<customer_id>.+?)/adGroupAssets/(?P<ad_group_id>.+?)~(?P<asset_id>.+?)~(?P<field_type>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}", "title": "" }, { "docid": "912763bdef029431fc34f379afdf15bb", "score": "0.4899388", "text": "def split_path(path):\n\n def split(src):\n \"\"\"Core function to split path.\n \"\"\"\n if \"/\" in src:\n return src.rsplit(\"/\", maxsplit=1)\n else:\n # Interpret as path to file in current directory.\n return \"./\", src\n\n if isinstance(path, sb.pretrained.fetching.FetchSource):\n fetch_from, fetch_path = path\n source, filename = split(fetch_path)\n return sb.pretrained.fetching.FetchSource(fetch_from, source), filename\n else:\n return split(path)", "title": "" }, { "docid": "c137998688eefb1903a6f3170a46fead", "score": "0.48934528", "text": "def parse_asset_group_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^customers/(?P<customer_id>.+?)/assetGroups/(?P<asset_group_id>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}", "title": "" }, { "docid": "9757a0db7c7890567adc644ff96a1eb3", "score": "0.48931915", "text": "def parse_file(self, file_path):\n pass # pragma: nocover", "title": "" }, { "docid": "d300a1f083315523010aff40a75834e3", "score": "0.48873222", "text": "def _parse_file(self, file_path):\n pass # pragma: nocover", "title": "" }, { "docid": "13288de9f91d0f1eea4434bceafd5063", "score": "0.48766533", "text": "def parse_file(self, path):\n self.logger.debug(\"Parsing SVD file %s\", path)\n\n with open(path, \"r\") as svd_file:\n svd = svd_file.read()\n return self.parse(svd)", "title": "" }, { "docid": "72fe351adcfa72fa58adec81e6683524", "score": "0.48709342", "text": "def _split(self):\n if self.is_abspath:\n return self._value[1:].split(self.fs.sep)\n return self._value.split(self.fs.sep)", "title": "" }, { "docid": "424b5ceacffe982f1f0290f9611d6c7c", "score": "0.48600677", "text": "def __file_parts__(self,file):\n parts = {\n 'path':'',\n 'name':'',\n 'ext':'',\n 'file':'',\n 'full_name':'',\n 'full_path':''\n }\n if not file == \"\" and not file == None:\n parts['path'],parts['full_name'] = os.path.split(file)\n parts['name'],parts['ext'] = parts['full_name'].split('.')\n if not parts['path']:\n parts['path'] = './'\n parts['full_path'] = parts['path']+'/'+parts['name']+'.'+parts['ext']\n else:\n parts['path'] = ''\n parts['name'] = ''\n parts['ext'] = ''\n parts['file'] = ''\n parts['full_path'] = ''\n\n return parts", "title": "" }, { "docid": "f309d66f5daa88e8142525a0f77f5484", "score": "0.4854577", "text": "def part_parse(self):\n parts = []\n for part in re.split('\\*\\*\\* ([A-Z- ]+) \\*\\*\\*', self.hand_file): # return [ 'part1', 'splitter1', 'part2',..\n parts.append(part)\n\n for i in range(0, len(parts)):\n if i == 0:\n self.part_dict['HEADER'] = parts[i].split('\\n')\n if i % 2 != 0: # number is odd\n # print(type(parts[i]))\n self.part_dict[parts[i]] = parts[i+1].split('\\n')", "title": "" }, { "docid": "60f684ab8a01895a19cc4deb1fb1e82a", "score": "0.48466882", "text": "def parse_asset_group_asset_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^customers/(?P<customer_id>.+?)/assetGroupAssets/(?P<asset_group_id>.+?)~(?P<asset_id>.+?)~(?P<field_type>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}", "title": "" }, { "docid": "48c1ed092dae14b6dc19400a6fb30d9e", "score": "0.48438734", "text": "def splitpath(path):\n # type: (bytes) -> List[bytes]\n return path.split(pycompat.ossep)", "title": "" }, { "docid": "c2c4bf3c35e4c059498f17ee9a8273ac", "score": "0.4841555", "text": "def _split_path(self, path: str) -> Tuple[str, str]:\n parts = path.split(\".\")\n coll = parts.pop(0)\n rest = \".\".join(parts)\n return coll, rest", "title": "" }, { "docid": "d2a4dcdee73b1e93a4005c200ba45cd9", "score": "0.4835787", "text": "def test_parse_path():\n\n expected_hosts = ['127.0.0.1']\n expected_services = ['45589']\n expected_notes = ['screenshot_web']\n\n pidb = ParserModule.parse_path('tests/server/data/parser-screenshot_web-job.zip')\n\n assert [x.address for x in pidb.hosts] == expected_hosts\n assert [x.port for x in pidb.services] == expected_services\n assert [x.xtype for x in pidb.notes] == expected_notes", "title": "" }, { "docid": "a1dc3aef3880e746a9478211f9ba9c62", "score": "0.48354855", "text": "def divide_pathstring_parts(pathstring):\n substring = pathstring.strip()\n paths = []\n while 'M' in substring[1:]:\n m_index = substring.find('M', 1)\n if m_index > -1:\n subpath = substring[0:m_index].strip()\n paths.append(subpath)\n substring = substring[m_index:].strip()\n\n paths.append(substring)\n return paths", "title": "" }, { "docid": "26c72d352f9c9219dfb769dce356fd03", "score": "0.4830553", "text": "def get_media_url(mid):\n return '%s%s' % (config.get('media_files_root'),mid)", "title": "" }, { "docid": "ac117c2a230a2b3e77872f847fc99089", "score": "0.4827614", "text": "def parse_uri(_, uri):\n uri = str(uri)\n if uri.startswith('file://'):\n uri = uri[len('file://'):]\n return Path(uri).resolve()", "title": "" }, { "docid": "ffb0ba7790fffefdaba8caf9da40c020", "score": "0.4827574", "text": "def get_mp4_file_path_and_drive_name(h264_filepath):\n # Splits by '.' to get path before extension\n h264_filepath_split_no_ext = h264_filepath.split('.')\n mp4_filepath = h264_filepath_split_no_ext[0] + '.mp4'\n\n # Splits by '/' to get filename at end of path for storage in Drive\n h264_filepath_split_no_slashes = h264_filepath_split_no_ext[0].split('/')\n drive_file_name = h264_filepath_split_no_slashes[len(h264_filepath_split_no_slashes) - 1] + '.mp4'\n\n return mp4_filepath, drive_file_name", "title": "" }, { "docid": "55b2b3c758d564f22bad655b4fb64330", "score": "0.48193255", "text": "def get_missing_media(self):\n self.make_segment_dir()\n media = self.retrieve_chunklist_manifest()\n for m in media:\n renamed = self.rename_media(m)\n if os.path.isfile(self.segmentDir + '/' + renamed) == False:\n print('INFO: ' + self.segmentDir + '/' + renamed + ' file does not exist')\n status = self.retrieve_media_segment(m, renamed)\n if status:\n self.segments += m # Add to segment list\n self.renamedSegments += renamed # Add to other segment list\n if self.verbose:\n print('INFO: Got new media segment \\t\\t' + m)\n else:\n if self.verbose:\n print('WARN: Failed to get media segment \\t' + m)\n else:\n if self.verbose:\n print('INFO: Segment file ' + m + ' already downloaded, skipping...')\n if self.verbose:\n sdSize = sum(d.stat().st_size for d in os.scandir(self.segmentDir + '/') if d.is_file())\n print('INFO: Segment directory is now at ' + str(sdSize) + ' bytes (' + str(round(sdSize/1000000, 2)) + ' megabytes)')", "title": "" }, { "docid": "6d17f08ae805e98e8aa00a6dd0c19b8d", "score": "0.48173803", "text": "def format_segment(record, indir='', force_input_dir='', ):\n regex = re.compile(indir)\n file_loc = regex.sub(force_input_dir or indir,\n record['FILE_LOC'])\n logger.debug('Search for file location: %s', file_loc)\n filenames = glob.glob(file_loc)\n if filenames:\n return {k: (v if k != 'FILE_LOC' else filenames[0])\n for k, v in record.items()}\n return None", "title": "" }, { "docid": "954dc0c15e9788913822df5d8ed57732", "score": "0.48143667", "text": "def read_file(path_segments):\n file_path = os.path.join(here, *path_segments)\n with open(file_path) as f:\n return f.read()", "title": "" }, { "docid": "012d693c2472b1425ceba5c831fe8757", "score": "0.48121879", "text": "def convert_media_path(html):\n lst = list()\n mcss = re.findall('href=\"(\\S+?\\.css)\"', html)\n lst.extend(list(set(mcss)))\n mjs = re.findall('src=\"([\\w\\./]\\S+?\\.js)\"', html)\n lst.extend(list(set(mjs)))\n msrc = re.findall('<img.*?src=\"([\\w\\./]\\S+?)\".*?>', html)\n lst.extend(list(set(msrc)))\n # print lst\n newlist = ['_' + each.split('/')[-1] for each in lst]\n # print newlist\n for each in zip(lst, newlist):\n html = html.replace(each[0], each[1])\n return unicode(html)", "title": "" }, { "docid": "2bb1357b96aeaf1f6f4f4ef4c8837309", "score": "0.4811636", "text": "def split_video(self, path):\r\n vidObj = cv2.VideoCapture(path)\r\n images = []\r\n\r\n # Used as counter variable\r\n count = 0\r\n\r\n # checks whether frames were extracted\r\n success = 1\r\n\r\n while success:\r\n # vidObj object calls read\r\n # function extract frames\r\n success, image = vidObj.read()\r\n\r\n # Saves the frames with frame-count\r\n # cv2.imwrite(\"frame%d.jpg\" % count, image)\r\n images.append(image)\r\n count += 1\r\n return images", "title": "" }, { "docid": "9367d762d8b2859ddc9cfb5f2edaa71d", "score": "0.4803748", "text": "def test_parse_filepath():\n # Given\n path = '/path/to/file.nii.gz'\n # When\n dirname, basename, ext = parse_filepath(path)\n # Then\n assert [dirname, basename, ext] == ['/path/to', 'file', 'nii.gz']", "title": "" }, { "docid": "7b0b14e1e5314eb5318c65fc9802e9fb", "score": "0.47905982", "text": "def get_video_parts(video_path):\n parts = video_path.split(os.path.sep)\n filename = parts[2]\n filename_no_ext = filename.split('.')[0]\n classname = parts[1]\n train_or_test = parts[0]\n\n return train_or_test, classname, filename_no_ext, filename", "title": "" }, { "docid": "5b2354c46a47cb9233f6ed9d3636b8b1", "score": "0.4785013", "text": "def parse_module_path(module_path):\n aslist = module_path.split(':', 1)\n if len(aslist) == 1:\n return (aslist[0], [])\n else:\n return (aslist[0], aslist[1].split(','))", "title": "" }, { "docid": "85d1547bb401b417f11e9eda018a3d22", "score": "0.47697225", "text": "def getSegments(self) -> List[ghidra.app.util.bin.format.omf.OmfSegmentHeader]:\n ...", "title": "" }, { "docid": "abf1a464f041c79bd964dced1c8dc039", "score": "0.47663137", "text": "def parse(filepath: str):\n paths, _, svg_attributes_dict = svg.svg2paths2(filepath)\n setsofsubpaths = [p.continuous_subpaths() for p in paths]\n paths = itertools.chain.from_iterable(setsofsubpaths)\n\n svg_attributes = SvgAttributes(svg_attributes_dict)\n if svg_attributes.is_portrait:\n return [SVGPath(path, svg_attributes.scale_factor) for path in paths]\n else:\n return [SVGPathRotatedBy90Degrees(path, svg_attributes.scale_factor, svg_attributes.height) for path in paths]", "title": "" }, { "docid": "520b966c488691321f3197c16bb8788a", "score": "0.47521687", "text": "def parse(cls, path: str) -> List[QuoteModel]:\n ext = path.split(\".\")[-1]\n # First try-except block to catch any exceptions w.r.t.\n # invalid extension.\n try:\n cls.validate_extension(ext)\n except InvalidExtensionError as inv_ext_err:\n print(inv_ext_err)\n else:\n # Second try-except block to catch any exceptions\n # while parsing the file.\n try:\n for ingestor in cls.ingestors:\n if ingestor.can_ingest(path):\n return ingestor.parse(path)\n except Exception:\n print(f\"ParsingError: Error occured while parsing {path}.\")", "title": "" }, { "docid": "8563d693422f86c87b323daf8436f459", "score": "0.47483987", "text": "def get_segments(self, boundaries, audio_file, before_margin=0.1, after_margin=0.1):\n sample_rate, sig_len = self._get_audio_info(audio_file)\n if sample_rate != self.sample_rate:\n raise ValueError('The detected sample rate is different from that set in the hparam file')\n segments = []\n for i in range(boundaries.shape[0]):\n beg_sample = boundaries[i, 0] * sample_rate\n end_sample = boundaries[i, 1] * sample_rate\n beg_sample = int(max(0, beg_sample - before_margin * sample_rate))\n end_sample = int(min(sig_len, end_sample + after_margin * sample_rate))\n len_seg = end_sample - beg_sample\n vad_segment, fs = torchaudio.load(audio_file, frame_offset=beg_sample, num_frames=len_seg)\n segments.append(vad_segment)\n return segments", "title": "" }, { "docid": "98b29d6ae12ca3b9326ffc6bfa92b315", "score": "0.47450432", "text": "def _ParseSCCAFile(self, parser_mediator, scca_file):\n format_version = scca_file.format_version\n executable_filename = scca_file.executable_filename\n prefetch_hash = scca_file.prefetch_hash\n run_count = scca_file.run_count\n number_of_volumes = scca_file.number_of_volumes\n\n volume_serial_numbers = []\n volume_device_paths = []\n path_hints = []\n\n for volume_information in iter(scca_file.volumes):\n volume_serial_number = volume_information.serial_number\n volume_device_path = volume_information.device_path\n\n volume_serial_numbers.append(volume_serial_number)\n volume_device_paths.append(volume_device_path)\n\n timestamp = volume_information.get_creation_time_as_integer()\n if timestamp:\n event_data = windows_events.WindowsVolumeEventData()\n event_data.creation_time = dfdatetime_filetime.Filetime(\n timestamp=timestamp)\n event_data.device_path = volume_device_path\n event_data.origin = parser_mediator.GetFilename()\n event_data.serial_number = volume_serial_number\n\n parser_mediator.ProduceEventData(event_data)\n\n for filename in iter(scca_file.filenames):\n if not filename:\n continue\n\n if (filename.startswith(volume_device_path) and\n filename.endswith(executable_filename)):\n _, _, path = filename.partition(volume_device_path)\n path_hints.append(path)\n\n mapped_files = []\n for entry_index, file_metrics in enumerate(scca_file.file_metrics_entries):\n mapped_file_string = file_metrics.filename\n if not mapped_file_string:\n parser_mediator.ProduceExtractionWarning(\n 'missing filename for file metrics entry: {0:d}'.format(\n entry_index))\n continue\n\n file_reference = file_metrics.file_reference\n if file_reference:\n mapped_file_string = (\n '{0:s} [{1:d}-{2:d}]').format(\n mapped_file_string, file_reference & 0xffffffffffff,\n file_reference >> 48)\n\n mapped_files.append(mapped_file_string)\n\n event_data = WinPrefetchExecutionEventData()\n event_data.executable = executable_filename\n event_data.mapped_files = mapped_files\n event_data.number_of_volumes = number_of_volumes\n event_data.path_hints = path_hints\n event_data.prefetch_hash = prefetch_hash\n event_data.run_count = run_count\n event_data.version = format_version\n event_data.volume_device_paths = volume_device_paths\n event_data.volume_serial_numbers = volume_serial_numbers\n\n timestamp = scca_file.get_last_run_time_as_integer(0)\n if timestamp:\n event_data.last_run_time = dfdatetime_filetime.Filetime(\n timestamp=timestamp)\n\n # Check for the 7 older last run time values available since\n # format version 26.\n if format_version >= 26:\n previous_run_times = []\n for last_run_time_index in range(1, 8):\n timestamp = scca_file.get_last_run_time_as_integer(last_run_time_index)\n if timestamp:\n date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)\n previous_run_times.append(date_time)\n\n if previous_run_times:\n event_data.previous_run_times = previous_run_times\n\n parser_mediator.ProduceEventData(event_data)", "title": "" }, { "docid": "078b0965961de34af3dcbf38bb091cb5", "score": "0.47448736", "text": "def parse(line):\n try:\n url_start = line.index(\" URL:\") + 5\n url_end = line.index(\" [\", url_start)\n url = line[url_start:url_end]\n filepath_start = line.index('-> \"') + 4\n filepath_end = line.index('\" ', filepath_start)\n filepath = line[filepath_start:filepath_end]\n return (url, filepath)\n except ValueError:\n return None", "title": "" }, { "docid": "31209e0164f73d988d1a293455509995", "score": "0.47420913", "text": "def parse_path(path):\n \n base_name = os.path.splitext( os.path.basename(path) )[0] \n splits = base_name.split(\"_\")\n \n signature = splits[0]\n count = splits[2] \n good = splits[1] == \"g\"\n \n return signature, count, good", "title": "" }, { "docid": "f40c47a119791e6683dd7a578e11ae9d", "score": "0.47409058", "text": "def __parse_files(self):\n parsed_files = []\n fractures = []\n total_length = 0\n for file in self.files:\n file_length = file[b'length']\n file_path = file[b'path']\n if b'md5sum' in file:\n file_md5sum = file[b'md5sum']\n parsed_files.append({b'length': file_length, b'path': file_path, b'md5sum': file_md5sum})\n else:\n parsed_files.append({b'length': file_length, b'path': file_path})\n\n total_length += file_length\n fractures.append(total_length)\n # print(total_length)\n\n return parsed_files, total_length, fractures", "title": "" }, { "docid": "c6e917068495833d48625feffa531faa", "score": "0.47385797", "text": "def split(src):\n if \"/\" in src:\n return src.rsplit(\"/\", maxsplit=1)\n else:\n # Interpret as path to file in current directory.\n return \"./\", src", "title": "" }, { "docid": "fcf2590b941bc9d86a343d83971c01f0", "score": "0.47325078", "text": "def splitPath(self, path):\n split = path.split(',')\n # Strip the individual items of white space and\n # values given after '=' sign.\n for item in split:\n item_index = split.index(item)\n item = item.strip()\n eq_index = item.find('=')\n if eq_index != -1:\n item = item[:eq_index+1]\n split[item_index] = item\n return split", "title": "" }, { "docid": "ea229d20d97d78560f9a25f0349a777f", "score": "0.47318658", "text": "def split(input_file):\n frames = []\n wave = thinkdsp.read_wave(input_file)\n\n # Some values that we might need\n totframes = len(wave.ts)\n framerate = wave.framerate\n length = totframes / framerate\n framelength = samples_per_frame / framerate\n numframes = int(length / framelength)\n\n # print('Framelength:', framelength, 'seconds')\n # print('Frames to calcuate:', numframes)\n\n for index in range(numframes):\n # print(index)\n currentstart = index * framelength\n frames.append(wave.segment(start=currentstart, duration=framelength))\n\n return frames, framelength", "title": "" }, { "docid": "55ca521e4f80aa5faed7e56931188d6c", "score": "0.47306952", "text": "def parse_uri_path(self, path):\n options = {}\n db, *_ = path[1:].split(\"/\")\n if db:\n options[\"db\"] = db\n return options", "title": "" }, { "docid": "b705e847f1bdeef28d0ba758c0f4d025", "score": "0.47251388", "text": "def parse_simout_filepath(filepath):\n split = filepath.split(\"/\")\n run = split[-2]\n opt = split[-3]\n benchmark = split[-4]\n cache_size = split[-5]\n fname = split[-1]\n slice_no = re.search(\"(?<=slice.)[0-9]+\", fname).group(0)\n return (benchmark, opt, run, slice_no, cache_size)", "title": "" }, { "docid": "da2089bd28f8c025c409b0ef36117c04", "score": "0.47226387", "text": "def parse_campaign_asset_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^customers/(?P<customer_id>.+?)/campaignAssets/(?P<campaign_id>.+?)~(?P<asset_id>.+?)~(?P<field_type>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}", "title": "" }, { "docid": "b688fc24a48f0fbdc32a5162897c6626", "score": "0.47190553", "text": "def split(self, path: AnyStr) -> Tuple[AnyStr, AnyStr]:\n return self.filesystem.splitpath(path)", "title": "" }, { "docid": "493c01dad3982c90d1913097bf7ef24f", "score": "0.47188506", "text": "def parse(self, message):\n messages = message.split(' ')\n counter = 0\n segment = ''\n segments = []\n while counter < len(messages):\n counter += 1\n l_s = len(segment)\n chunk = messages[counter - 1]\n segment = chunk if l_s == 0 else '{} {}'.format(segment, chunk)\n if (len(messages[counter]) + l_s) >= lcd_columns:\n # Save string segment and reset for next loop\n segments.append(segment + self.ext(lcd_columns - l_s))\n segment = ''\n # Append final segment:\n segments.append(segment + self.ext(lcd_columns - l_s))\n return segments", "title": "" }, { "docid": "b0dd2ae293dde22a78f83c4855a64804", "score": "0.471839", "text": "def split_chapters(paths):\n filename = glob.glob(paths[0] + '/*')[0]\n completed_process = subprocess.run(f'ffprobe -i \"{filename}\" -print_format json -show_chapters -loglevel error', stdout=subprocess.PIPE, shell=True)\n chapter_info = json.loads(completed_process.stdout)['chapters']\n if not chapter_info:\n print('Chapters Not Found')\n rmdir(paths[1])\n else:\n _split_file(filename, [(float(entry['start_time']), float(entry['end_time']), join(paths[1], entry['tags']['title'] + '.mp3')) for entry in chapter_info])", "title": "" }, { "docid": "5352ba0d952787236e8338cf830520c2", "score": "0.47076356", "text": "def _parse(self, file_path):\n print('Reading input file...', end='\\r')\n extension = _get_file_extension(file_path)\n parser = self.get_parser(extension)\n parsed_data = _cast(parser(file_path))\n return parsed_data, extension", "title": "" }, { "docid": "2aca0d285703ee00fe8a8892ab5b564d", "score": "0.470492", "text": "def _split_path(path, seps=PATH_SEPS):\n if not path:\n return []\n\n for sep in seps:\n if sep in path:\n if path == sep: # Special case, '/' or '.' only.\n return ['']\n return [x for x in path.split(sep) if x]\n\n return [path]", "title": "" }, { "docid": "64b203fc65ae18966baedf2eb2cc0b17", "score": "0.47011355", "text": "def split_path(path):\n base, has_ext = split(path)\n name, ext = splitext(has_ext)\n return base, name, ext", "title": "" }, { "docid": "1f68199bab8d8d295ff1515f3389bd50", "score": "0.47005865", "text": "def parse_segs(fid, numseg):\n # temporary storage array\n D = []\n\n # answer array\n R = []\n\n for idx in range(numseg+1):\n aline = fid.readline()\n D.append(parse_segs_data(aline))\n if D[idx][0] == -3:\n R.append((1, D[idx][3], pathlength(D, D[idx][2], D[idx][1])))\n if D[idx][0] == -4:\n R.append((0, D[idx][3], pathlength(D, D[idx][2], D[idx][1])))\n \n return R", "title": "" }, { "docid": "936c078b4657b508328262235237a289", "score": "0.4697543", "text": "def parse_parts(self, parts):\n _, _, parsed = super().parse_parts(parts)\n if len(parsed) > 0 and parsed[0] == \"/\":\n parsed = parsed[1:]\n return \"\", \"\", parsed", "title": "" }, { "docid": "6411dbf75709116cfaa5c595edbc4702", "score": "0.46675193", "text": "def parse(self, filename, encoding=None, href=None, media=None):\r\n if not encoding:\r\n encoding = 'css'\r\n return self.parseString(codecs.open(filename, 'r', encoding).read(),\r\n href=href, media=media)", "title": "" }, { "docid": "097fc681c898727f8e726839dee3e7e9", "score": "0.46597257", "text": "def parse_metadata_from_path(path):\n\n re_name_split = r'.*/newspapers/newspapers/fin/(.{4})/(.{7,9})/.{7,9}\\_.{4}\\-(..)\\-(..)'\n\n split = re.search(re_name_split, path)\n\n year, issn, month, day = split.groups()\n return year, month, day, issn", "title": "" }, { "docid": "d76c58f193d47b4fd8c15682555bdcb4", "score": "0.4659066", "text": "def get_audio_seg_filenames(audio_dir, segment_dir, p=None):\n\ttemp_filenames = [i for i in sorted(os.listdir(audio_dir)) if \\\n\t\t\t_is_audio_file(i)]\n\taudio_filenames = [os.path.join(audio_dir, i) for i in temp_filenames]\n\ttemp_filenames = [i[:-4] + '.txt' for i in temp_filenames]\n\tseg_filenames = [os.path.join(segment_dir, i) for i in temp_filenames]\n\treturn audio_filenames, seg_filenames", "title": "" }, { "docid": "d28f247b7cae0d52d8b88d2772c77fa3", "score": "0.46307933", "text": "def parse_files(self):\n pass", "title": "" }, { "docid": "7c30eb1438efb2b40c0763677d765c08", "score": "0.4624152", "text": "def importable_paths(path):\n chunks = [chunk\n for chunk in path.split(\"/\")\n if chunk and (chunk[0].isalpha() or chunk[0] == \"_\")]\n if chunks:\n chunks[-1] = chunks[-1].rsplit('.', 1)[0]\n return chunks", "title": "" }, { "docid": "eb0affbf620567de141e06935021695a", "score": "0.46221396", "text": "def fetch_media(song):\n return song.get('media')", "title": "" }, { "docid": "3f7fb2c2005ab053c18810bda2ea114a", "score": "0.46147782", "text": "def _parse_segments(self):\n reader = csv.reader(open(self._segment_file, 'rU'),\n delimiter='\\t')\n for row in reader:\n if reader.line_num == 1: #skip header\n continue\n sql = '''INSERT INTO segments\n (id, multiplicon, genome, list, first, last, ord)\n VALUES (?,?,?,?,?,?,?)'''\n self._dbconn.execute(sql, row)\n self._dbconn.commit()", "title": "" }, { "docid": "ae061a546f328c2c24b983a1c488e390", "score": "0.46123645", "text": "def resolve_media_files(document, resource):\n for field in resource_media_fields(document, resource):\n if isinstance(document[field], list):\n resolved_list = []\n for file_id in document[field]:\n resolved_list.append(resolve_one_media(file_id, resource))\n document[field] = resolved_list\n else:\n document[field] = resolve_one_media(document[field], resource)", "title": "" }, { "docid": "fd94ced41c59f23ab6f175adcf835441", "score": "0.46119153", "text": "def parse_path(path):\n\n parts = path.split('/')\n\n if parts[1] != 'api' or len(parts) != 5:\n return None\n\n return {'version': parts[2],\n 'resource': parts[3],\n 'action': parts[4]}", "title": "" }, { "docid": "6fcdb8b13e6185138229229b47d7c831", "score": "0.46079665", "text": "def retrieve_segments(track):\n\n track_npz = seg_npz / (track.stem + '.npz')\n if not track_npz.exists():\n track_segs = utility.load_audio_segments(track)\n track_npz.touch()\n np.savez(track_npz, *track_segs)\n\n return np.load(track_npz)", "title": "" }, { "docid": "76847379b64302b82214b587cdc98232", "score": "0.46075633", "text": "def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):\r\n if not maxsegs:\r\n maxsegs = minsegs\r\n if minsegs > maxsegs:\r\n raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))\r\n if rest_with_last:\r\n segs = path.split('/', maxsegs)\r\n minsegs += 1\r\n maxsegs += 1\r\n count = len(segs)\r\n if (segs[0] or count < minsegs or count > maxsegs or\r\n '' in segs[1:minsegs]):\r\n raise ValueError('Invalid path: %s' % quote(path))\r\n else:\r\n minsegs += 1\r\n maxsegs += 1\r\n segs = path.split('/', maxsegs)\r\n count = len(segs)\r\n if (segs[0] or count < minsegs or count > maxsegs + 1 or\r\n '' in segs[1:minsegs] or\r\n (count == maxsegs + 1 and segs[maxsegs])):\r\n raise ValueError('Invalid path: %s' % quote(path))\r\n segs = segs[1:maxsegs]\r\n segs.extend([None] * (maxsegs - 1 - len(segs)))\r\n return segs", "title": "" }, { "docid": "714d61a238a5e3522668bb4302453d11", "score": "0.46074203", "text": "def _im_and_segm(self, im_file, segm_dir):\n #pdb.set_trace()\n im = sm.Image(im_file)\n segm_list = []\n file_name = os.path.basename(im_file)\n print file_name\n im_segm = sm.Image(os.path.join(segm_dir, file_name))\n self.segm_post_process(im_segm)\n segm_list.append(im_segm)\n return im, segm_list, file_name", "title": "" }, { "docid": "e086e640eac867545cbc9b16b03484c1", "score": "0.46065676", "text": "def splitpath(path):\n\n head, tail = os.path.split(path)\n if tail == '':\n return head,\n elif head == '':\n return tail,\n else:\n return splitpath(head) + (tail,)", "title": "" }, { "docid": "092cdaa90c350ff8db9f6ea712fac00a", "score": "0.46034354", "text": "def SplitPath(self, path):\n # Split the path with the path separator and remove empty path segments.\n return list(filter(None, path.split(os.path.sep)))", "title": "" }, { "docid": "815f8936feeeb6e26864822ce6432fd5", "score": "0.46002877", "text": "def parse_directory(self):\n files = []\n for x in pathlib.Path(self.path).glob('*.m*'):\n if x.is_file():\n x = str(x) # tinytag needs str, not PosixPath\n files.append(x)\n return files", "title": "" }, { "docid": "8f52fd8764ef8913a15b96f9bb8c8fd5", "score": "0.4599362", "text": "def parse_ad_group_path(path: str) -> Dict[str, str]:\n m = re.match(\n r\"^customers/(?P<customer_id>.+?)/adGroups/(?P<ad_group_id>.+?)$\",\n path,\n )\n return m.groupdict() if m else {}", "title": "" }, { "docid": "04ffbe9bd356afe1f77f53a1d0544cc0", "score": "0.45965385", "text": "def split_path(path):\n if path in '/':\n return '', ''\n path = relpath(path)\n if '/' not in path:\n return path, ''\n return path.split('/', 1)", "title": "" }, { "docid": "58e4bec1d4010ffc3355dac1a719ccb8", "score": "0.45908687", "text": "def parse_mime_type(mime_type):\r\n parts = mime_type.split(\";\")\r\n params = dict([tuple([s.strip() for s in param.split(\"=\")])\\\r\n for param in parts[1:] ])\r\n (type, subtype) = parts[0].split(\"/\")\r\n return (type.strip(), subtype.strip(), params)", "title": "" }, { "docid": "f0c5d082d420cf3b8458f59cd533469a", "score": "0.45839995", "text": "def get_plugin_media_path(instance, filename):\n return instance.get_media_path(filename)", "title": "" } ]
7cdb93f03454b8ff65070a1611850f65
Distance from a proposed point to its nearest neighbour (proposed point)
[ { "docid": "2367061b483d48ffb9ef0488832fb63c", "score": "0.6574492", "text": "def ppn2_distance_to_nearest_neighbour(self):\n # FIXME vectorize loop\n distances = []\n for point in self.im_proposals:\n distances.append(np.partition(np.sum(np.power(point - self.im_proposals, 2), axis=1), 2)[1])\n bins = np.linspace(0, 100, 100)\n self.make_plot(\n distances,\n bins,\n xlabel=\"distance to nearest neighbour\",\n ylabel=\"#proposals\",\n filename='distance_to_nearest_neighbour.png'\n )\n return distances", "title": "" } ]
[ { "docid": "f3a2e04fa10b7f690c79eb3d0e02849a", "score": "0.80314505", "text": "def distance_to_point(self, point):", "title": "" }, { "docid": "2ee7ca52fefe409ffc2a9594fb14a392", "score": "0.7305967", "text": "def distance(self, p):\n return ((self.x - p.x) ** 2 + (self.y - p.y) ** 2) ** 0.5", "title": "" }, { "docid": "7fd0f91e38f1db51346d47df00e6504a", "score": "0.71589077", "text": "def PointDistance(*args):\r\n return _pynewton.CollisionGeometry_PointDistance(*args)", "title": "" }, { "docid": "ecca843a9f94e0a410f79053b96dc9c0", "score": "0.71313375", "text": "def distance(self, point):\n x_diff = self.x - point.x\n y_diff = self.y - point.y\n\n return sqrt(x_diff * x_diff + y_diff * y_diff)", "title": "" }, { "docid": "aba49ea8aafe3739fbcdf2f5d4d92bfb", "score": "0.7131221", "text": "def point_point_distance(x, y):\n\tassert len(x) == len(y) == 2\n\treturn ((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2) ** 0.5", "title": "" }, { "docid": "6a7ecce87cedcf29385beec2febe94d6", "score": "0.71158856", "text": "def get_nearest_neighbour(self, pointli):\n # Assumes that only valid (projectable, within shell etc) points\n # are in pointli\n mindist = float(sys.maxsize)\n minp = Point()\n for p in pointli:\n if p is not self:\n d = self.dist(p)\n if d < mindist:\n mindist = d\n minp = p\n if not mindist < float(sys.maxsize):\n return None\n else:\n self.nearest_neighbour_dist = mindist\n self.nearest_neighbour_point = minp\n return self.nearest_neighbour_dist", "title": "" }, { "docid": "dd47ac19cdb021392bff12c471dbb044", "score": "0.707952", "text": "def point_to_point_distance(p1, p2):\n dx = p1.x - p2.x\n dy = p1.y - p2.y\n return math.sqrt(dx**2 + dy**2)", "title": "" }, { "docid": "3f4957fbc4cd2e6f98918464b98c108f", "score": "0.7027314", "text": "def pointDistance( x1, y1, x2, y2 ):\n\tdist = ( ( float(x2) - float(x1) ) ** 2 + ( float(y2) - float(y1) ) **2 ) ** 0.5\n\treturn dist", "title": "" }, { "docid": "e95df59f5e3c28fcd0e596fa43fcc7b5", "score": "0.70109475", "text": "def estimated_distance_to_destination(self):\n if self.is_at_destination:\n return 0\n\n position = self.location()\n delta_x = position.x_position - self.destination.x_position\n delta_y = position.y_position - self.destination.y_position\n return sqrt(delta_x**2 + delta_y**2)", "title": "" }, { "docid": "9dbfcdb954669a40431dd1ec525bf1ba", "score": "0.6999854", "text": "def calculate_distance(self, other_point):\n\n return math.sqrt((self.x - other_point.x) ** 2 + (self.y - other_point.y) ** 2)", "title": "" }, { "docid": "59a0b38f0a54fd6a03a409cf1e584b52", "score": "0.69813", "text": "def distance(self):\n\n\t\treturn np.sqrt((self.x_kp1 - self.x)**2 + \n\t\t\t\t\t (self.y_kp1 - self.y)**2 )", "title": "" }, { "docid": "4219c86935eaa9d2ae703289ebbd24cb", "score": "0.6975208", "text": "def dist(p1: Point, p2: Point) -> float:\n return math.sqrt((p2.x - p1.x) ** 2 + (p2.y - p1.y) ** 2)", "title": "" }, { "docid": "4dc518a571a91c5013c88c0a7d94a425", "score": "0.69596314", "text": "def get_distance(self, p):\n assert len(p) == len(self.x)\n return (sum([(i-j)**2 for i, j in zip(p, self.x)]))**0.5", "title": "" }, { "docid": "7fd582e8853979cc00aa3134c1f00dc6", "score": "0.69469345", "text": "def direct2dDistance(self, point):\n if not isinstance(point, MapPoint): return 0.0\n return ((self.x-point.x)**2 + (self.y-point.y)**2)**(0.5) # simple distance formula", "title": "" }, { "docid": "2e3b551131d8edac39e226846f4065be", "score": "0.6929922", "text": "def distance_from_point(self, other):\n # p1, *_, p2 = self.points\n p1 = self.pixels[0]\n x1, y1 = p1.col, p1.row\n p2 = self.pixels[-1]\n x2, y2 = p2.col, p2.row\n\n x0, y0 = other.col, other.row\n\n top = abs((y2-y1)*x0 - (x2-x1)*y0 + x2*y1-y2*x1)\n bottom = sqrt((y2-y1)**2+(x2-x1)**2)\n\n return top/bottom", "title": "" }, { "docid": "e3e9a8098228a9e5ff4f378c14de95c8", "score": "0.6915623", "text": "def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1):\n point = np.array(self.image.size()) / 2\n return spsd.euclidean(point, [self.x, self.y])", "title": "" }, { "docid": "7f8c41b5fe3d269ff16ad7b52f0dcea0", "score": "0.69106966", "text": "def distance(self, other: \"Point\") -> float:\n #math.hypot(self.x - other.x, self.y-other.y)\n return math.sqrt((self.x - other.x)**2 + (self.y -other.y)**2)", "title": "" }, { "docid": "1d9415c70b5122bd14ec8020eacd7630", "score": "0.690273", "text": "def distance(self,pt):\n return np.dot(self.normal,pt)-np.dot(self.normal,self.centroid)", "title": "" }, { "docid": "5f47b5cb103ac1ba8d9619910c451659", "score": "0.6897161", "text": "def calculate_distance(self, other: \"Point\") -> float:\n return math.hypot(self.x - other.x, self.y - other.y)", "title": "" }, { "docid": "6c378dea5a699a72607407efa6956f5c", "score": "0.68824846", "text": "def distance(p1: Point, p2: Point) -> float:\n return math.sqrt((p1.x - p2.x)**2 + (p1.y - p2.y)**2)", "title": "" }, { "docid": "9e2e2f33686af5361501ba4c1233892b", "score": "0.68818855", "text": "def distToClosestPoint(self, pixel: tuple):\r\n if len(pixel) == 3:\r\n distance = self._width * self._heightZ\r\n for point in self._points:\r\n dX = pixel[0] - point[0]\r\n dY = pixel[1] - point[1]\r\n dZ = pixel[2] - point[2]\r\n distance = min(distance, sqrt(dX ** 2 + dZ ** 2 + dY ** 2)) \r\n else:\r\n distance = self._width * self._heightZ\r\n for point in self._points:\r\n dX = pixel[0] - point[0]\r\n dZ = pixel[1] - point[2]\r\n distance = min(distance, sqrt(dX ** 2 + dZ ** 2))\r\n return int(distance)", "title": "" }, { "docid": "b4f919e2283866436f21b968bb31d8ee", "score": "0.68610597", "text": "def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1 and len(self)):\n point = self[0].image.size()\n \n return spsd.cdist(self.coordinates(), [point])[:,0]", "title": "" }, { "docid": "0328cc3ed67c6796df9a9347c1ff6c63", "score": "0.68252766", "text": "def distanceFromPoint(self,p):\n p = asarray(p).reshape((3))\n d = self-p\n return sqrt(sum(d*d,-1))", "title": "" }, { "docid": "054551f7f09463007ba32b14941783e9", "score": "0.6816136", "text": "def pointDistance(p1,p2):\n return math.sqrt(pow(p1[0]-p2[0],2)+pow(p1[1]-p2[1],2))", "title": "" }, { "docid": "031c31a6c5d540627606e757e23425bb", "score": "0.68137395", "text": "def get_distance(self, other):\n return math.sqrt((self.x - other.x) ** 2 + (self.y - other.y) ** 2)", "title": "" }, { "docid": "74dc6e9092c027c930f20c06c5365ecb", "score": "0.68137014", "text": "def euclidean_distance(self, point: List[int]) -> float:\n return sqrt(point[0] ** 2 + point[1] ** 2)", "title": "" }, { "docid": "6b82b29ffeabdfa27d719a3586572c9e", "score": "0.6758727", "text": "def distance(self, another_point):\r\n\t\tif(type(another_point) is Point):\r\n\t\t\tdistance = math.sqrt((another_point.x - self.x)**2 + (another_point.y - self.y)**2)\r\n\t\telse:\r\n\t\t\traise TypeError(\"Arguement 2 must be Point, not \" + type(another_point) + \"!\")\r\n\r\n\t\treturn distance", "title": "" }, { "docid": "12683a9492c248ef487eaa1ef2c6cfa0", "score": "0.6758534", "text": "def DistanceFromPoint(nodes,pt):\n val = Fn.distanceFromPoint(pt)", "title": "" }, { "docid": "d6fcc91e56e6fdea828ebd507d50893f", "score": "0.6755509", "text": "def point_dist(a, b):\n return ((a[0]-b[0]) ** 2 + (a[1]-b[1]) ** 2) ** 0.5", "title": "" }, { "docid": "2036e0faef438091b6c1bef89d01673c", "score": "0.6754175", "text": "def _distance(self, point1, point2):\n return abs(point1[0] - point2[0]) + abs(point1[1] - point2[1])", "title": "" }, { "docid": "e75db45287b4792e650f296ba7687a5c", "score": "0.6753394", "text": "def _distance(line, point):\n proj_pos = line.project(point, normalized=True)\n proj_point = line.interpolate(proj_pos, normalized=True)\n dist = proj_point.distance(point)\n return dist", "title": "" }, { "docid": "fb5459b0e30b326754d2c6064c79e808", "score": "0.67533743", "text": "def get_distance(point_1, point_2):\n dist = np.sqrt((point_1.pos_x - point_2.pos_x) ** 2 + (\n point_1.pos_y - point_2.pos_y) ** 2)\n return dist", "title": "" }, { "docid": "4b74c2f28625dd09552f367148f1bec6", "score": "0.6749296", "text": "def getDistance(x, y):\r\n return sqrt((x ** 2) + (y ** 2))", "title": "" }, { "docid": "0d3248b9c651a8239992bb90735609f3", "score": "0.67445344", "text": "def getDistance(point1, point2):\n return vincenty(point1, point2).miles", "title": "" }, { "docid": "936ae9f09c190e6eed697a3a512e5d1f", "score": "0.673572", "text": "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "title": "" }, { "docid": "caa51e8b69a03fd7cfd882bfd38ea0a1", "score": "0.673538", "text": "def distance(self, point):\n assert len(point) == a6dataset.Dataset.getDimension(self._dataset)\n sum = 0\n for x in range(a6dataset.Dataset.getDimension(self._dataset)):\n diff = point[x] - self._centroid[x]\n diff_squared = math.pow(diff,2)\n sum += diff_squared\n euclidean = math.sqrt(sum)\n return euclidean", "title": "" }, { "docid": "523741f9b61042a10d2ead3b7ea0d37a", "score": "0.6717078", "text": "def get_dist(self):\r\n return sqrt(self.x ** 2 + self.y ** 2)", "title": "" }, { "docid": "283fc018c643d23e681c66d45cdf7b91", "score": "0.67122364", "text": "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx**2 + dy**2)", "title": "" }, { "docid": "3f739227d27bbecfc8fe50ae5be8d000", "score": "0.6706389", "text": "def distance(self, destination):\n run = destination.x - self.x\n rise = destination.y - self.y\n return math.sqrt(run**2 + rise**2)", "title": "" }, { "docid": "7ffa1de668f630ed46c217327db34254", "score": "0.6694731", "text": "def distance(p1, p2):\n x = p1.x - p2.x\n y = p1.y - p2.y \n return (x * x + y * y)", "title": "" }, { "docid": "a13dbc39fa986a1e339a3006a143a2ce", "score": "0.66871226", "text": "def _get_closest_point_distance_brute(self, points):\n point_count = len(points)\n dist = sys.float_info.max\n for i in range(1, point_count):\n for j in range(i):\n dist = min(dist, self._distance_between(points[i],\n points[j]))\n return dist", "title": "" }, { "docid": "ed4b8ce7334ab9d043a851d5af7d6225", "score": "0.667693", "text": "def distance_to(self, other_station):\n xd = self.x - other_station.x\n yd = self.y - other_station.y\n return sqrt((xd * xd) + (yd * yd))\n pass", "title": "" }, { "docid": "23569462ecead2040f4ec2a5bac6b60a", "score": "0.6675885", "text": "def dist(self,p):\n if len(self.points)==0:\n return 1e6\n dists=[self.euclidDist(p,point) for point in self.points]\n return min(dists)", "title": "" }, { "docid": "e24b0a1dd9f92dc8dddb37b6e912c775", "score": "0.6672284", "text": "def get_distance_between_two_points(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", "title": "" }, { "docid": "6bd942442b002c3c5193959e6f2e9b3e", "score": "0.66488224", "text": "def calcDistance(point1, point2):\r\n x1, y1 = point1.getX(), point1.getY()\r\n x2, y2 = point2.getX(), point2.getY()\r\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)", "title": "" }, { "docid": "87a2ff55137466b4612a28d1f3f4acb1", "score": "0.6646756", "text": "def _get_distance(self):\n return self.dist_", "title": "" }, { "docid": "c0a7395de9c6a74ce79f4c5596032766", "score": "0.66427374", "text": "def dist(self, x, y):\n pass", "title": "" }, { "docid": "fa3225b958229aba486229148b727c5d", "score": "0.6638534", "text": "def get_distance_from_point(self, pstart, p_end):\n a = numpy.array((pstart.x, pstart.y, pstart.z))\n b = numpy.array((p_end.x, p_end.y, p_end.z))\n \n distance = numpy.linalg.norm(a - b)\n \n return distance", "title": "" }, { "docid": "ad1ad93637e302928473516fcd2a6a38", "score": "0.66374236", "text": "def get_distance(point_1, point_2):\n return math.sqrt(\n math.pow(point_2[0] - point_1[0], 2)\n +\n math.pow(point_2[1] - point_1[1], 2)\n )", "title": "" }, { "docid": "df7c4971728edfca878a78c8a1d77117", "score": "0.6633415", "text": "def get_distance(self, pt1, pt2):\r\n return np.sqrt(np.power(pt1[0] - pt2[0], 2) + np.power(pt1[1] - pt2[1], 2))", "title": "" }, { "docid": "6ad6da72a322dd69073223ffd50b6a49", "score": "0.66330475", "text": "def distance_formula(self, point):\n distance = (((point.x - self.end.x)**2) + (point.y - self.end.y)**2)**.5\n\n return distance", "title": "" }, { "docid": "37ac359556e7c9dd03d9d681cacb4af0", "score": "0.66327953", "text": "def point_dist(point1, point2):\n diff = (point1[0] - point2[0], point1[1] - point2[1])\n return diff[0] * diff[0] + diff[1] * diff[1]", "title": "" }, { "docid": "e0658f138922c44872525ff82f37894c", "score": "0.6620431", "text": "def dist(point_1: Tuple[int, int], point_2: Tuple[int, int]) -> float:\n return np.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2)", "title": "" }, { "docid": "8634045cf25dbb07d440720c4119bc64", "score": "0.66097385", "text": "def compute_distance(point, center):\n import numpy as np\n if isinstance(point, SparseVector) | isinstance(center, SparseVector):\n p_d = point.toArray()\n c_d = center.toArray()\n return float(np.linalg.norm(p_d - c_d, ord=2))\n else:\n return float(np.linalg.norm(point - center, ord=2))", "title": "" }, { "docid": "7c2cfc11ff8539b398899222ff61d901", "score": "0.659397", "text": "def getDistance(point1: Tuple[int, int], point2: Tuple[int, int]) -> float:\n try:\n q = point1[0] - point2[0]\n w = point1[1] - point2[1]\n r = q ** 2 + w ** 2\n return Sqrt(r)\n except Exception as e:\n KDS.Logging.AutoError(e)\n return 0", "title": "" }, { "docid": "42ebe45ada088de58023696409674df5", "score": "0.6582625", "text": "def distance_from_origin(self):\n return (self.x ** 2 + self.y ** 2) ** 0.5", "title": "" }, { "docid": "21ba97948820e9a659c604f525fce1f0", "score": "0.65823054", "text": "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "title": "" }, { "docid": "21ba97948820e9a659c604f525fce1f0", "score": "0.65823054", "text": "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "title": "" }, { "docid": "d8ac31daa85e247d9208dd4e1443e8c2", "score": "0.65769446", "text": "def getDistanceToNeighbor(self, fromNode, neighbor):\n\t\tedge = fromNode.getEdgeTo(neighbor)\n\t\tassert(fromNode in self.pathDistance.keys())\n\t\treturn self.pathDistance[fromNode] + edge.getCost()", "title": "" }, { "docid": "f4514ca3ca24055618e6dc02efb71426", "score": "0.6571196", "text": "def pointdistance(pointa, pointb):\n return distance(pointa[0], pointa[1], pointb[0], pointb[1])", "title": "" }, { "docid": "7153de4fe19ca4d487a7952ff19e66cb", "score": "0.65652424", "text": "def distance(self,q):\n return round(sqrt((self.x-q.x)**2+(self.y-q.y)**2))", "title": "" }, { "docid": "463313830b6161b27f5c765d0d4b69b9", "score": "0.6559456", "text": "def getDistanceToViewPoint(self, *args):\n return _osg.NodeVisitor_getDistanceToViewPoint(self, *args)", "title": "" }, { "docid": "a9d221b2333a6967f36ebc4c8cf1b973", "score": "0.6546633", "text": "def _getClosestWaypoint(self, point):\n return min(self.graph, key = lambda k: self._dist2(point, self.graph[k].location))", "title": "" }, { "docid": "32b37e9dc46d94d990c944746bf162db", "score": "0.6540587", "text": "def distance( point1 = (0,0) , point2= (0,0)):\n return math.sqrt((point2[0] - point1[0]) ** 2 + (point2[1] - point1[1])**2 )", "title": "" }, { "docid": "31d748ac94e052536e315aa2afe39fd5", "score": "0.6539546", "text": "def _calc_dist_2d(self, px, py, wp_idx):\n return math.sqrt(\n (px - self.waypoints.waypoints[wp_idx].pose.pose.position.x) ** 2 +\n (py - self.waypoints.waypoints[wp_idx].pose.pose.position.y) ** 2)", "title": "" }, { "docid": "87830ab7ad1d2514cc5bf7d32a4a307e", "score": "0.6536555", "text": "def distance(self):\n return (\n (self.M_PER_LON * (self.dest[0] - self.orig[0])) ** 2\n + (self.M_PER_LAT * (self.dest[1] - self.orig[1])) ** 2\n ) ** 0.5", "title": "" }, { "docid": "87652501dbdac9071f7fcdc000748854", "score": "0.6535702", "text": "def distance(self, pos):\n return math.sqrt((pos.x - self.x) ** 2 + (pos.y - self.y) ** 2)", "title": "" }, { "docid": "6fefef5f6b76bbaa6cac5c771689b1e5", "score": "0.65287113", "text": "def get_nearest_lateral_neighbour(self, pointli):\n # Assumes that only valid (projectable, within shell etc) points\n # are in pointli\n mindist = float(sys.maxsize)\n minp = Point()\n for p in pointli:\n if p is not self:\n d = self.lateral_dist_to_point(p, self.profile.path)\n if d < mindist:\n mindist = d\n minp = p\n if not mindist < float(sys.maxsize):\n return None\n else:\n self.nearest_lateral_neighbour_dist = mindist\n self.nearest_lateral_neighbour_point = minp\n return self.nearest_lateral_neighbour_dist", "title": "" }, { "docid": "264e881c4fa27d09e2d85dad18932a58", "score": "0.6526864", "text": "def distance(pt1, pt2):\n return ((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2) ** 0.5", "title": "" }, { "docid": "01207ce2a7ffa27d91e5f1bc56078247", "score": "0.6519585", "text": "def calc_distance(x1: float, y1: float, x2: float, y2: float) -> float:\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5", "title": "" }, { "docid": "e56be8e15aa3f7c2c96949207e625add", "score": "0.651665", "text": "def distance(self, x, y):\n return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "title": "" }, { "docid": "4072ce3a758fc64a250098b45f422d06", "score": "0.65109724", "text": "def _get_distance(self):\n raise NotImplementedError", "title": "" }, { "docid": "5c1fa13bec2791f2365e112e4f95e39a", "score": "0.65055317", "text": "def GetDistance(self):\n return np.sqrt((self.pose.x - self.target_x)**2 + \\\n (self.pose.y - self.target_y)**2)", "title": "" }, { "docid": "1b0e01ba5dde7865bc35527dd547a84c", "score": "0.6489804", "text": "def computeDistance(pt1,pt2):\n return math.sqrt((pt2[1]-pt1[1])**2 + (pt2[0]-pt1[0])**2)", "title": "" }, { "docid": "74768be8b59b9542a94878ef9cf3dfc9", "score": "0.64868224", "text": "def _get_distance(self, centroid_idx: int, point_coord=None, point_idx=None) -> float:\n centroid_coord = self._temp_centroids[centroid_idx]\n if point_idx is not None and point_coord is None:\n point_coord = self.training_data[point_idx]\n\n distance = 0\n for idx in range(len(point_coord)):\n distance += pow(point_coord[idx] - centroid_coord[idx], 2)\n return round(pow(distance, 0.5), self.ROUND_AFTER_COMA)", "title": "" }, { "docid": "8b54e8ce8c47b172ec04a85872bc2c1d", "score": "0.64862436", "text": "def closest_point(self, point, maxdist=None):\n return self.xyz", "title": "" }, { "docid": "9bfbb34c6f112439eb0ee19663c39fc2", "score": "0.64813703", "text": "def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2)", "title": "" }, { "docid": "57e9b534b566a3e0d2565987f9b21fd2", "score": "0.6481139", "text": "def dist(self, person):\n distance = math.sqrt(\n (self.position[0] - person.position[0])**2 + (self.position[1] - person.position[1])**2)\n return distance", "title": "" }, { "docid": "9dceb8b839557075b439e82c2be877fe", "score": "0.6480697", "text": "def _distance(self, x, y):\r\n if self.algorithm == 'gaussian':\r\n return self._euclidean_distance(x, y)\r\n else:\r\n return self._widx_distance(x, y)", "title": "" }, { "docid": "89f4299e461e84b2c311f8491b32db88", "score": "0.64795756", "text": "def distance(self, ray):\r\n r = self.center - ray.p\r\n top = np.dot(self.normal, r)\r\n bottom = np.dot(ray.k, self.normal)\r\n if np.abs(top) < eps or np.abs(bottom) < eps:\r\n return float('inf')\r\n\r\n d = top / bottom\r\n p = ray.p + d * ray.k\r\n d = d if self.isIntersect(p) else float('inf')\r\n if self.passes:\r\n self.passes -= 1\r\n return float('inf')\r\n else:\r\n return d", "title": "" }, { "docid": "1cab8b335481b143ff69242ef8050d36", "score": "0.64735156", "text": "def _calculate_distance(self,proposed):\n # simulate the system\n rates = parameterise_rates(self.rate_funcs,proposed)\n stop_time = self.obs[-1][0]\n init_state = self.obs[0][1:]\n sample_trace = gillespie(rates,stop_time,init_state,self.updates)\n # get the distance according to the error metric specified\n return self.dist(sample_trace,self.obs)", "title": "" }, { "docid": "48828d2671e4c56e57871cc25449561c", "score": "0.64733195", "text": "def distance(point1, point2):\n return np.sqrt(np.sum((point1 - point2)**2))", "title": "" }, { "docid": "48828d2671e4c56e57871cc25449561c", "score": "0.64733195", "text": "def distance(point1, point2):\n return np.sqrt(np.sum((point1 - point2)**2))", "title": "" }, { "docid": "241acb64f4257cb0507d012616b5d942", "score": "0.646772", "text": "def get_distance(point_1, point_2):\n delta = point_1 - point_2\n distance = math.sqrt(numpy.dot(delta.transpose(), delta).item())\n\n return distance", "title": "" }, { "docid": "11a06eb9af23d39ba98ec589adfc276f", "score": "0.6465166", "text": "def distance(self):\n return float()", "title": "" }, { "docid": "36a0e577f1e465eeda199c1a96f3dade", "score": "0.646208", "text": "def distance(x1, y1, x2, y2):\n return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5", "title": "" }, { "docid": "f07495b5bc5cd9def9664b011cb9409e", "score": "0.6461339", "text": "def dist(self,x,y):\n xx=self.pZn(x)\n yy=self.pZn(y)\n if (yy<xx): xx,yy=yy,xx\n d=yy-xx # ecart direct\n dd=xx+(self.n-yy) # ecart par les extremites\n if (dd<d): return dd \n else: return d", "title": "" }, { "docid": "69bd18656d07427e115bd12e62ffdee6", "score": "0.6460986", "text": "def nearest(self, point):\n dist = np.linalg.norm(self.vertices - point, axis=1)\n best = np.argmin(dist)\n return {\"node\": self.vertices[best], \"distance\": dist[best]}", "title": "" }, { "docid": "69bd18656d07427e115bd12e62ffdee6", "score": "0.6460986", "text": "def nearest(self, point):\n dist = np.linalg.norm(self.vertices - point, axis=1)\n best = np.argmin(dist)\n return {\"node\": self.vertices[best], \"distance\": dist[best]}", "title": "" }, { "docid": "31bb2fd0e2d5352f46c35d687c2d2b50", "score": "0.6459042", "text": "def calculateDistance(origin, destination):\n\n return float((great_circle(origin, destination).km)*1000)", "title": "" }, { "docid": "855a88f062d59e26c274ef653be31507", "score": "0.64561427", "text": "def points_distance(self, points, return_nearest=False):\n\n dists = cdist(points, self.points[:-1])\n\n if return_nearest:\n mini = np.argmin(dists, axis=1)\n dists = np.take_along_axis(dists, mini[:, None], axis=1)[:, 0]\n minp = self.points[mini]\n del mini\n else:\n dists = np.min(dists, axis=1)\n\n for pi in range(len(self.points) - 1):\n pA = self.points[pi]\n pB = self.points[pi + 1]\n n = pB - pA\n d = np.linalg.norm(n)\n\n if d > 0:\n n /= d\n q = points - pA[None, :]\n x = np.einsum(\"pd,d->p\", q, n)\n\n sel = (x > 0) & (x < d)\n if np.any(sel):\n x = x[sel]\n y2 = np.maximum(np.linalg.norm(q[sel], axis=1) ** 2 - x**2, 0.0)\n\n dsel = dists[sel]\n dists[sel] = np.minimum(dsel, np.sqrt(y2))\n\n if return_nearest:\n mini = np.argwhere(np.sqrt(y2) < dsel)\n hminp = minp[sel]\n hminp[mini] = pA[None, :] + x[mini, None] * n[None, :]\n minp[sel] = hminp\n del mini, hminp\n\n del y2, dsel\n\n del x, sel\n\n if return_nearest:\n return dists, minp\n else:\n return dists", "title": "" }, { "docid": "75108290afd24b9939feae9156cf2087", "score": "0.6455762", "text": "def distance(self, x, y):\n dx = x - self.x\n dy = y - self.y\n return math.sqrt(dx**2 + dy**2)", "title": "" }, { "docid": "2753aaefdc3c9f2f2e13acae3ab3f2ea", "score": "0.64529335", "text": "def distance(x0,y0,x1,y1):\n return sqrt((x1-x0)**2 + (y1-y0)**2)", "title": "" }, { "docid": "337c8ffbf1b98ee56ef6b64d628e77e0", "score": "0.6442676", "text": "def closest_face_dist_memoized(self, point):\n mn_dist = 1.e9\n obb_faces_3d = self.faces_3d_memoized()\n for face_id in range(obb_faces_3d.shape[2]):\n face = np.array(obb_faces_3d[:, :, face_id].squeeze())\n dist, _ = pointTriangleDistance(face, np.asarray(point))\n if dist < mn_dist:\n mn_dist = dist\n return mn_dist", "title": "" }, { "docid": "9b6f591c99418637611aa13a4dc5efd7", "score": "0.64423573", "text": "def distance(self):\n detected, point = self.last_read[:2]\n return np.linalg.norm(point) if detected else 1", "title": "" }, { "docid": "eca197aea180f545691656270d332a58", "score": "0.6441566", "text": "def dist(self):\n dx = list(sorted([abs(x) for x in self.pos]))\n return dx[0] + dx[1]", "title": "" }, { "docid": "236a10aace3ad212d4580e182a3e18d6", "score": "0.6441241", "text": "def getDistance(point1, point2x, point2y):\n distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)\n return distance", "title": "" }, { "docid": "a13a649abee4006f3147133c8a6da692", "score": "0.6433959", "text": "def distance(current_x, current_y, goal_x, goal_y):\r\n x_dif = abs(goal_x - current_x) * 10.29\r\n y_dif = abs(goal_y - current_y) * 7.55\r\n return math.sqrt(math.pow(x_dif, 2) +\r\n math.pow(y_dif, 2))", "title": "" }, { "docid": "dcdff36ea13071bffe671cc42adb99d1", "score": "0.6427592", "text": "def distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5", "title": "" }, { "docid": "64f23b5bd43f1a545f0be94c123acf78", "score": "0.6426846", "text": "def _distance((x1, y1), (x2, y2)):\n\n\treturn math.sqrt((x2-x1)**2 + (y2-y1)**2)", "title": "" } ]
099e314d87cb3a8f39b796e97ab050a8
fixed value comparator test
[ { "docid": "6e63a52c489faa303fcf223b6647ddee", "score": "0.6537535", "text": "def test_fixed_value_comparator(self, num_state_qubits, value, geq):\n # initialize weighted sum operator factory\n comp = Comparator(num_state_qubits, value, geq)\n\n # initialize circuit\n q = QuantumRegister(num_state_qubits+1)\n if comp.required_ancillas() > 0:\n q_a = QuantumRegister(comp.required_ancillas())\n qc = QuantumCircuit(q, q_a)\n else:\n q_a = None\n qc = QuantumCircuit(q)\n\n # set equal superposition state\n qc.h(q[:num_state_qubits])\n\n # build circuit\n comp.build(qc, q, q_a)\n\n # run simulation\n job = execute(qc, BasicAer.get_backend('statevector_simulator'), shots=1)\n\n for i, s_a in enumerate(job.result().get_statevector()):\n\n prob = np.abs(s_a)**2\n if prob > 1e-6:\n # equal superposition\n self.assertEqual(True, np.isclose(1.0, prob * 2.0**num_state_qubits))\n b_value = '{0:b}'.format(i).rjust(qc.width(), '0')\n x = int(b_value[(-num_state_qubits):], 2)\n comp_result = int(b_value[-num_state_qubits-1], 2)\n if geq:\n self.assertEqual(x >= value, comp_result == 1)\n else:\n self.assertEqual(x < value, comp_result == 1)", "title": "" } ]
[ { "docid": "a5bff1832536e4806c5a098b17b040b7", "score": "0.65169156", "text": "def comparator_converter(self, val):\r\n return val", "title": "" }, { "docid": "b5bc04f0cc6c2a68b4b7b8a19a45f85c", "score": "0.6449908", "text": "def __cmp__(self, p):\n val = self[0].__cmp__(p[0])\n if val != 0:\n return val\n return self[1].__cmp__(p[1])", "title": "" }, { "docid": "c6f284fdfff89dcfc0a6e5a345c98c62", "score": "0.63881654", "text": "def _CompareValue(self, event_value, filter_value):\n return event_value >= filter_value", "title": "" }, { "docid": "99fee6931967a56a0e3024ff6c7e9168", "score": "0.6368352", "text": "def _CompareValue(self, event_value, filter_value):\n return event_value > filter_value", "title": "" }, { "docid": "78752964c86c349ff7e2523f896ccc8a", "score": "0.6346719", "text": "def _CompareValue(self, event_value, filter_value):\n return event_value <= filter_value", "title": "" }, { "docid": "7c4cf6ccf6648377fee84c4def0c120b", "score": "0.63366616", "text": "def _CompareValue(self, event_value, filter_value):\n return event_value < filter_value", "title": "" }, { "docid": "d3462b5ab0af0ae7bb915e8c6d2f4211", "score": "0.62081164", "text": "def cmp_func(x, y):\n if x + y > y + x:\n return 1\n elif x == y:\n return 0\n else:\n return -1", "title": "" }, { "docid": "3f1ca994f1b7c37b219649f2ae5486c8", "score": "0.60899645", "text": "def _comparator(func):\r\n\tdef comparator_wrapper(self, other):\r\n\t\ttry:\r\n\t\t\tassert self.enumtype == other.enumtype\r\n\t\t\tresult = func(self.index, other.index)\r\n\t\texcept (AssertionError, AttributeError):\r\n\t\t\tresult = NotImplemented\r\n\r\n\t\treturn result\r\n\tcomparator_wrapper.__name__ = func.__name__\r\n\tcomparator_wrapper.__doc__ = getattr(float, func.__name__).__doc__\r\n\treturn comparator_wrapper", "title": "" }, { "docid": "fcf5a3b98e3089de5624e7967330b32d", "score": "0.6084137", "text": "def default_comparator(left, right):\n if left < right:\n return -1\n elif left == right:\n return 0\n else:\n return 1", "title": "" }, { "docid": "8136edea717be14975a0b43b1dce245b", "score": "0.60528076", "text": "def na_cmp():\n return lambda x, y: int(x) == int(y) == 0", "title": "" }, { "docid": "08a63ffbf6f885847e96c195eede3aef", "score": "0.60342187", "text": "def cmp(a, b):\n return bool(a > b) - bool(a < b)", "title": "" }, { "docid": "a1dd689bfcd1d8a86ffaa17264aba4aa", "score": "0.60299563", "text": "def __le__(self, value):\r\n return self._comparison('<=', value)", "title": "" }, { "docid": "c80706325ffb9558cbd92afd32ccda3a", "score": "0.5999284", "text": "def __ge__(self, value):\r\n return self._comparison('>=', value)", "title": "" }, { "docid": "f029964f95177d4dedc4ab91d6fa47e2", "score": "0.5996046", "text": "def test_sort_list_with_neg_values(rand_neg):\n key = sorted(rand_neg)\n result = radix_sort(rand_neg)\n assert key == result", "title": "" }, { "docid": "f6ce54c028a5514b9da0af60fb4907aa", "score": "0.59779125", "text": "def compare_helper(a, b):\n if a > b:\n return 1\n elif a < b:\n return -1\n else:\n return 0", "title": "" }, { "docid": "13857895971d3fe51aaa84560097c7ea", "score": "0.595721", "text": "def __gt__(self, value):\r\n return self._comparison('>', value)", "title": "" }, { "docid": "a8cba6073f20d55a488ef45b2df2a813", "score": "0.5950804", "text": "def __contains__(self, value: Real) -> bool:\n return self.lo < value < self.hi", "title": "" }, { "docid": "996cbec666e237ff3553135027069b3e", "score": "0.59325933", "text": "def three_way_cmp(a: Any, b: Any) -> int:\n return (a > b) - (a < b)", "title": "" }, { "docid": "e508a0b5e6d7765868d956ec5efca79c", "score": "0.5932303", "text": "def cmp(a, b):\n return (b < a) - (a < b)", "title": "" }, { "docid": "db67f62d799c166427bb8a78c2d94306", "score": "0.58524966", "text": "def cmp(x, y):\n return (x > y) - (x < y)", "title": "" }, { "docid": "2bcda8c463c95ca4c19a05427d41b577", "score": "0.58466846", "text": "def cmp(x, y):\n\n return (x > y) - (x < y)", "title": "" }, { "docid": "2bcda8c463c95ca4c19a05427d41b577", "score": "0.58466846", "text": "def cmp(x, y):\n\n return (x > y) - (x < y)", "title": "" }, { "docid": "06d463f0c3644489c85c5221e65bd5f1", "score": "0.58094364", "text": "def __gt__(other):", "title": "" }, { "docid": "73ec93137d67af8a0201d330a1a25808", "score": "0.5765614", "text": "def test_binary_combinator_lt(f, ff):\n assert str(comb.LT(f, ff)) == \"{0} < {1}\".format(str(f), str(ff))", "title": "" }, { "docid": "742df08e8424e2e899d7675df6de7620", "score": "0.57641673", "text": "def better(x, y):\r\n return x > y", "title": "" }, { "docid": "c5b1472a7726c47b0de73f1c2b940f78", "score": "0.57505983", "text": "def better(x, y):\r\n return x < y", "title": "" }, { "docid": "3e130a9086d575aef4f8bbdc9c24f0a7", "score": "0.5742902", "text": "def lessEqual(compare_item):\n\n def callback(item):\n return item <= compare_item\n\n return callback", "title": "" }, { "docid": "dfdd626e90fa1994283b676914e24f29", "score": "0.57421297", "text": "def compare(a, b):\n if a == b:\n return 0\n elif a > b:\n return 1\n else:\n return -1", "title": "" }, { "docid": "d46b469680fa0f65e19fc7546cde9bab", "score": "0.57325137", "text": "def _CompareValue(self, event_value, filter_value):\n return event_value != filter_value", "title": "" }, { "docid": "2b3c20d16bc536bd8d84a3b3ff6cd79e", "score": "0.57194525", "text": "def binarize_function(val, val_low =0, val_up = 100):\n if val_low<= val < val_up :\n return 1 \n else :\n return 0", "title": "" }, { "docid": "96dcad0d9f32dfe81c10d1e1f35b39ae", "score": "0.571572", "text": "def test_it_sorts_numbers_based_on_their_binary_representation(self):\n self.assertEqual(sort_bits([0, 1, 2, 3, 4, 5, 6, 7, 8]),\n [0, 1, 2, 4, 8, 3, 5, 6, 7])", "title": "" }, { "docid": "9e03e98458043c41bdb6669bdb707a43", "score": "0.57104504", "text": "def __le__(self, other):\n return self.val <= other.val", "title": "" }, { "docid": "2cb952685517fbb44d6dbcd1df02254c", "score": "0.57051355", "text": "def _CompareValue(self, event_value, filter_value):", "title": "" }, { "docid": "394ceb205dcafb6af417814966255f8a", "score": "0.57043767", "text": "def test_binary_combinator_gt(f, ff):\n assert str(comb.GT(f, ff)) == \"{0} > {1}\".format(str(f), str(ff))", "title": "" }, { "docid": "2bda7189d7fae2c927c0a8bfe69429d9", "score": "0.5703504", "text": "def compare(a, b):\n if a > b:\n return 1\n elif a < b:\n return -1\n else:\n return 0", "title": "" }, { "docid": "0381b880320ea1398e96203788987a48", "score": "0.5683629", "text": "def get_cmp(table, col_name, value):\r\n col = getattr(table.c, col_name)\r\n original_value = value\r\n\r\n neg = False\r\n cmp_in = False\r\n regex = False\r\n max_ilvl = False\r\n\r\n is_string = isinstance(value, basestring)\r\n\r\n if is_string and value.startswith(\"!\"):\r\n value = value[1:]\r\n neg = True\r\n\r\n if is_string and value.endswith(\"^\"):\r\n value = value[:-1]\r\n assert str(table) == \"item_template\"\r\n # TODO: support other tables where a use case exists for restricting\r\n # results to the MAX/MIN of a specific column\r\n max_ilvl = True\r\n\r\n if is_string and value.startswith(\"/\") and value.endswith(\"/\"):\r\n value = value[1:-1]\r\n regex = True\r\n\r\n elif is_string and value.startswith(\"in \"):\r\n value = value[3:]\r\n cmp_in = True\r\n\r\n flags = get_flags(col_name, value)\r\n if flags is not None:\r\n value = col.in_(flags) if cmp_in else (col == flags)\r\n elif regex:\r\n value = col.op(\"rlike\")(value)\r\n elif is_string and \"%\" in value:\r\n value = col.like(value)\r\n if neg:\r\n value = not_(value)\r\n elif neg:\r\n value = not_(value)\r\n elif flags is None:\r\n value = col == value\r\n\r\n # TODO: actually handle this monkeypatch in the Model object, and stop using\r\n # the AWFUL subquery hack below.\r\n value._max = None\r\n if max_ilvl:\r\n value._max = func.max(table.c.ItemLevel)\r\n subquery = select([\"*\"]).select_from(\r\n select([func.max(table.c.ItemLevel)]).where(value).alias(\"tmp\")\r\n )\r\n value = and_(value, table.c.ItemLevel == subquery)\r\n\r\n return (value, original_value)", "title": "" }, { "docid": "a22797be5947f8c4fa682804bb9a4b34", "score": "0.56774604", "text": "def test_binary_combinator_lte(f, ff):\n assert str(comb.LTE(f, ff)) == \"{0} <= {1}\".format(str(f), str(ff))", "title": "" }, { "docid": "b803d6f5f3095adde12ddfc40ee22231", "score": "0.56651545", "text": "def _compare(new_value, old_value):\r\n if isinstance(new_value, (int, float)) and type(new_value) == type(old_value):\r\n # if numeric, compare within a threshold\r\n # TODO\r\n return abs(new_value - old_value) > 0.00001\r\n\r\n elif type(new_value) != type(old_value):\r\n return True\r\n\r\n elif isinstance(new_value, (pd.DataFrame, pd.Series, np.ndarray)) or isinstance(\r\n old_value, (pd.DataFrame, pd.Series, np.ndarray)\r\n ):\r\n return (abs(new_value - old_value) > 0.00001).any()\r\n\r\n return new_value != old_value", "title": "" }, { "docid": "f9162ca6fc614a19da48ecc8feb91f32", "score": "0.56622916", "text": "def _CompareValue(self, event_value, filter_value):\n return event_value == filter_value", "title": "" }, { "docid": "896aaef9b329c5ced5c0bc7f0b7b953c", "score": "0.5658262", "text": "def Compare_default(t, x):\n exps = [x.left] + x.comparators\n bools = []\n for i in range(len(x.ops)):\n bools.append(JSBinOp(exps[i], x.ops[i], exps[i + 1]))\n return reduce(lambda x, y: JSBinOp(x, JSOpAnd(), y), bools)", "title": "" }, { "docid": "213dc5165d719ed916636a2fcfc615e6", "score": "0.56468433", "text": "def is_better_than(self, val1, val2):\n return val1 > val2", "title": "" }, { "docid": "0da955568d85399a99518a95ad6ed93c", "score": "0.56459945", "text": "def test_pvals(self, pvals):\n x = 0\n for bnd in self.bnds:\n if bnd[0] < pvals[x] < bnd[1]:\n print '%x in bnds' %x\n else:\n print '%x not in bnds' %x\n x += 1\n return pvals", "title": "" }, { "docid": "7be17b2123d6d4d1241ce2125b3a4b74", "score": "0.56432354", "text": "def compare(self, compare_val):\r\n if compare_val == self._value:\r\n return True\r\n else:\r\n return False", "title": "" }, { "docid": "de0760b4a1cc622311160eec42f18790", "score": "0.5639935", "text": "def gt(value, arg):\n return long(value) > long(arg)", "title": "" }, { "docid": "e866c6c7615539d0935a952d5165eac6", "score": "0.5636675", "text": "def gt(value1, value2):\n return value1 > value2", "title": "" }, { "docid": "c392b6f6537290b00453644b24bb970d", "score": "0.5634278", "text": "def gt(y, x):\n return x > y", "title": "" }, { "docid": "153111ee576d158d501126618fe86e17", "score": "0.56247395", "text": "def compare(a, b):\n gt = False\n lt = False\n for j, k in zip(a, b):\n gt |= j > k\n lt |= j < k\n if gt and lt:\n break\n return int(gt) - int(lt)", "title": "" }, { "docid": "db673f84f18005e08c8d5471754c3215", "score": "0.56242937", "text": "def __cmp__(self, other):\n if isinstance(other, (int, long)):\n if other in self:\n return 0\n return cmp(self[0], other)\n return super(LumiblockRange, self).__cmp__(other)", "title": "" }, { "docid": "f927d30d007befe882110e5387821e19", "score": "0.56230444", "text": "def comparator(operator: Any) -> Any: # -> (self: Unknown, other: Unknown) -> Unknown:\n ...", "title": "" }, { "docid": "b3d1a3e9189771f6a9adefe75726972f", "score": "0.5618732", "text": "def _is_lower_than(self, item1, item2):", "title": "" }, { "docid": "2453650e6c13a428e9cdd40c10195372", "score": "0.5617463", "text": "def compare_to(self, obj):\n result = super().compare_to(obj)\n if result == 0:\n return compare_to_boolean(self.__value, obj.get_value())\n return result", "title": "" }, { "docid": "c29653a6354355b2366428d9fb59a9bd", "score": "0.56155455", "text": "def __gt__(self,other): #>\r\n return float(self)>float(other)", "title": "" }, { "docid": "158e5b5a3f829cf7e0dc73f8a9102a7e", "score": "0.5609993", "text": "def prefere(a1, a2):\n if a1 < a2:\n return True\n else:\n return False", "title": "" }, { "docid": "69e930e539a9979791ec391ec08e1e78", "score": "0.5609589", "text": "def compare(val_1, val_2):\n return (val_1 & 0xffff) == (val_2 & 0xffff)", "title": "" }, { "docid": "849ce300660edb7c6a851eb2395b4e54", "score": "0.5607712", "text": "def compare_pv(arr,idx):\n return True if idx > 0 and arr[idx] > arr[idx - 1] else False # Compares values and checks if idx is at beginning", "title": "" }, { "docid": "8d283a253b518173ea567400526b6dae", "score": "0.56054753", "text": "def na_cmp():\n return operator.is_", "title": "" }, { "docid": "614626ade817e52619dc28947e8cb947", "score": "0.56032354", "text": "def test_cmp_sorted(self, obj, result, **kwargs):\n self.assertEqual(metalk8s.cmp_sorted(obj, **kwargs), result)", "title": "" }, { "docid": "ee52f66e628302e4f097e35f48afec73", "score": "0.5597271", "text": "def test_fixed_list():\n\n fixed = selection_sort([one_list()])\n\n assert all(fixed[i] <= fixed[i+1] for i in range(len(fixed)-1))", "title": "" }, { "docid": "b6695a04b6fa9911651c103d420b456a", "score": "0.55863136", "text": "def _GetCmpFunc(col):\n if col.startswith('~'):\n col = col[1:]\n descending = True\n else:\n descending = False\n\n field_getter = property_selector.PropertyGetter(col)\n\n def Cmp(r1, r2):\n \"\"\"A comparison function.\"\"\"\n v1 = field_getter.Get(r1)\n v2 = field_getter.Get(r2)\n\n retval = cmp(v1, v2)\n\n # We want None to have the highest ordering, so if either value is\n # None, we have to negate the result of cmp().\n if v1 is None or v2 is None:\n retval *= -1\n\n if descending:\n return retval * -1\n else:\n return retval\n\n return Cmp", "title": "" }, { "docid": "e4a74dda9e46e813a997ccc6f5f739b7", "score": "0.5575672", "text": "def __ge__(self,other): #>=\r\n return self==other or float(self)>float(other)", "title": "" }, { "docid": "aac22429549a12f1a8e8f5c9e5bc5d52", "score": "0.5561724", "text": "def __cmp__(self, right):\n if not self.__b:\n # self is oo, which is bigger than everything but oo.\n if not right.__b:\n return 0\n else:\n return 1\n elif not right.__b:\n if not self.__b:\n return 0\n else:\n return -1\n return cmp(self._rational_(), right._rational_())", "title": "" }, { "docid": "f8ae2da2725002d1e2a57765a14f92b2", "score": "0.55459964", "text": "def _compare(self, first, second):\n pass", "title": "" }, { "docid": "a65edf11d782a7c79bcac99a0e865923", "score": "0.5542708", "text": "def test_numbers_can_be_in_any_order(self):\n self.assertEqual(sort_bits([8, 0, 1, 3, 5, 4, 2, 6, 7]),\n [0, 1, 2, 4, 8, 3, 5, 6, 7])", "title": "" }, { "docid": "37d1de2c79844ff90e5bc8bd064ca1f0", "score": "0.5534672", "text": "def __gt__( self, other ) :\n\n return( self.compare( other, 5 ) > 0 )", "title": "" }, { "docid": "cb5353c9ab8e354f13ee4f3294706892", "score": "0.552876", "text": "def adjustedCompareValue(self, value):\n try:\n return gennumber.GenNumber(value).num\n except ValueError:\n return 0", "title": "" }, { "docid": "9f2881df0b1fe4d2e6a7604d3fc16556", "score": "0.55265766", "text": "def _equal_to_ (f,v) :\n return opers.__equal_to__(f,v)", "title": "" }, { "docid": "189d37ed1f6656e041f4dc0028de1de4", "score": "0.55171615", "text": "def greaterEqual(compare_item):\n\n def callback(item):\n return item >= compare_item\n\n return callback", "title": "" }, { "docid": "42a70258809c97981e6e01f33f72a923", "score": "0.5515798", "text": "def _comp(self, a_node_index, b_node_index):\n if self.min:\n return self._lte(a_node_index, b_node_index)\n else:\n return self._lte(b_node_index, a_node_index)", "title": "" }, { "docid": "9ad9542268df257b4240b1f9cc029b3e", "score": "0.55149037", "text": "def __cmp__(self, other):\n return 0", "title": "" }, { "docid": "9559de76f6a658f29bc52a83e2483d18", "score": "0.5513158", "text": "def __ge__(self, other):\n raise NotImplementedError(\"Cannot compare CIs with >=\")", "title": "" }, { "docid": "46c7c9b9e106b846c58e057d783e3925", "score": "0.5490949", "text": "def test_binary_combinator_gte(f, ff):\n assert str(comb.GTE(f, ff)) == \"{0} >= {1}\".format(str(f), str(ff))", "title": "" }, { "docid": "81fd759b710dc8d2c090eec79a94e60b", "score": "0.5489729", "text": "def cmp_versions(v1, v2):\n def normalize(v):\n return [int(x) for x in re.sub(r'(\\.0+)*$', '', v).split(\".\")]\n\n n1 = normalize(v1)\n n2 = normalize(v2)\n return (n1 > n2) - (n1 < n2)", "title": "" }, { "docid": "567919d05aed7f80284a87778160982f", "score": "0.54817504", "text": "def compare (v1, v2):\r\n v1_norm = normalize(v1)\r\n v2_norm = normalize(v2)\r\n if v1_norm < v2_norm:\r\n return -1\r\n if v1_norm > v2_norm:\r\n return 1\r\n return 0", "title": "" }, { "docid": "8c2ae8dc1c1a13197b5984fadfa14ff4", "score": "0.54794455", "text": "def __gt__(self, other):", "title": "" }, { "docid": "d34190deb3c39bcbf8aeca53366d6c90", "score": "0.54788876", "text": "def __le__( self, other ) :\n\n return( self.compare( other, 5 ) <= 0 )", "title": "" }, { "docid": "285d24e8fe406eeecfe0b291ed65c41a", "score": "0.54756546", "text": "def twos_comp(val):\n if(val>=0):\n return val\n else:\n return 2**16+val #not(-val-1)", "title": "" }, { "docid": "84f739ea5cf4160460c8449074b0bd52", "score": "0.5471509", "text": "def test_range_sorting_in_fh():\n standard_range = ForecastingHorizon(values=range(5))\n assert (standard_range == ForecastingHorizon(values=[0, 3, 4, 1, 2])).all()", "title": "" }, { "docid": "7c3e36ead0bc8c8eb557b65b37ff1c24", "score": "0.5447383", "text": "def __gt__(self, other):\n return int(self) > int(other)", "title": "" }, { "docid": "9b97e8d904f20c1ace9d77828b6ccb3a", "score": "0.54462487", "text": "def __ge__( self, other ) :\n\n return( self.compare( other, 5 ) >= 0 )", "title": "" }, { "docid": "af32e7b1d1027bd088aa089d2bc54c41", "score": "0.544412", "text": "def cmp_f(x, y):\n epsilon = 0.00000001\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon\n or -epsilon <= (x - y) / y <= epsilon)", "title": "" }, { "docid": "af8f89850de49e93039132140252e230", "score": "0.54404444", "text": "def __gt__(self, other):\n return self.priority > other.priority or (self.priority == other.priority and self.key > other.key)", "title": "" }, { "docid": "4594671ad0e1e0e5db221bc506eaa0e0", "score": "0.5434453", "text": "def compare(x, y):\n\tif x > y:\n\t\treturn 1\n\tif x == y:\n\t\treturn 0\n\tif x < y:\n\t\treturn -1", "title": "" }, { "docid": "1d515df6acce5f99006868906f7e06aa", "score": "0.54308546", "text": "def __gt__(self, other):\n return float(self) > float(other)", "title": "" }, { "docid": "c48fc7134a89a0b65cf8c97b943c776b", "score": "0.5416734", "text": "def _rvs(cls, U, p):\n return U < p", "title": "" }, { "docid": "1f59e6f10c46aefe21960f7fef429edf", "score": "0.5416148", "text": "def __gt__(self, other):\n return self.getData() > other", "title": "" }, { "docid": "43d572294c0df536a6e8a1011cda05eb", "score": "0.5414917", "text": "def compare(self, left, right):\n return cmp(left, right)", "title": "" }, { "docid": "b86d4f0555f42c15f49e6fec3851a769", "score": "0.54073787", "text": "def comp(a, b):\n for x in range(len(a)):\n if a[x] > b[x]:\n return 1\n elif a[x] < b[x]:\n return 2\n return 0", "title": "" }, { "docid": "7c544bdf9feda5e49991275cead04ac4", "score": "0.5407196", "text": "def my_compare(x, y):\n if x % 2 == y % 2:\n return compare(x, y)\n if x % 2 == 0:\n return 1\n else:\n return -1", "title": "" }, { "docid": "72ef154778fc7035000ed5b01472113e", "score": "0.540564", "text": "def precision_at_position_1(sort_data):\n\n if sort_data[0][1] == 1:\n return 1\n else:\n return 0", "title": "" }, { "docid": "a57abcb663bb33eb4fdd0dbbbc7d939c", "score": "0.5405529", "text": "def __le__(self,other): #<=\r\n return self==other or float(self)<float(other)", "title": "" }, { "docid": "4ef99704374202f212ec75077667eccc", "score": "0.53977454", "text": "def __ge__(self, other):\r\n return self == other or self > other", "title": "" }, { "docid": "4ef99704374202f212ec75077667eccc", "score": "0.53977454", "text": "def __ge__(self, other):\r\n return self == other or self > other", "title": "" }, { "docid": "b7359c948ecbbe12d6fae6bb531a21e2", "score": "0.5397527", "text": "def lte(value, arg):\n return long(value) <= long(arg)", "title": "" }, { "docid": "99456fcc32f4a1bbdd72f18da34cb5aa", "score": "0.5394073", "text": "def cmp(a, b): # pragma: no cover\n if a is None and b is None:\n return 0\n elif a is None:\n return -1\n elif b is None:\n return 1\n return (a > b) - (a < b)", "title": "" }, { "docid": "7f8dadd3c1bc06b8263560d204fc3c8a", "score": "0.53925204", "text": "def __le__(self, other):\n return float(self) <= float(other)", "title": "" }, { "docid": "d1db31b77dfc05a913feabf6cda5ae0b", "score": "0.53807455", "text": "def test_natural_sort(self):\n l = ['100_b','10_bb','100_a','20_aa','500_c', '9_c']\n exp =['9_c','10_bb','20_aa','100_a','100_b','500_c']\n obs = natural_sort(l)\n\n self.assertEqual(obs, exp)", "title": "" }, { "docid": "5aa0db7296e6294052e86816a649e3bf", "score": "0.53804195", "text": "def _compare(self, other, method):\r\n return method(self.value, other)", "title": "" }, { "docid": "9ee8d2920a6ce72361dc2f6768a13dcb", "score": "0.53720444", "text": "def test_compare_custom_object(self):\n\n class CustomClass:\n def __init__(self, cmp_result=None):\n self.cmp_result = cmp_result\n\n def generic_result(self):\n if self.cmp_result is None:\n return NotImplemented\n else:\n return self.cmp_result\n\n def __eq__(self, other):\n return self.generic_result()\n\n def __gt__(self, other):\n return self.generic_result()\n\n t = Timedelta(\"1s\")\n\n assert not (t == \"string\")\n assert not (t == 1)\n assert not (t == CustomClass())\n assert not (t == CustomClass(cmp_result=False))\n\n assert t < CustomClass(cmp_result=True)\n assert not (t < CustomClass(cmp_result=False))\n\n assert t == CustomClass(cmp_result=True)", "title": "" }, { "docid": "dc463d83ad508ae00ea36d7b80ad9d36", "score": "0.5369304", "text": "def compare_to_100(value):\n if value < 100:\n return 'less than 100'\n elif value == 100:\n return 'equal to 100'\n else:\n return 'more than 100'", "title": "" }, { "docid": "1caae99650df61b15245bf72d4735245", "score": "0.53589207", "text": "def __ge__(self, other):\n return self > other and self == other", "title": "" } ]
c1ef3fb3fca50659a61d3135daea5bf1
Subscribe to the model update request stream.
[ { "docid": "f64491422975b5dd6dc00f57e0bff34d", "score": "0.7139607", "text": "def __listen_to_model_update_stream(self):\n r = alliance.ClientAvailableMessage()\n\n whoami(r.sender, self)\n\n for request in self.orchestrator.ModelUpdateStream(r):\n # A client sent a model update to be handled by the combiner\n if request.client.name != \"reducer\":\n print(\"COMBINER: received model from client! {}\".format(request.client), flush=True)\n self.receive_model_candidate(request.model_update_id)\n print(\"COMBINER: Received model update.\", flush=True)", "title": "" } ]
[ { "docid": "8c39a358475085d8a1c88a34ea6e2c7c", "score": "0.66264987", "text": "def ModelUpdateRequestStream(self, response, context):\n\n client = response.sender\n metadata = context.invocation_metadata()\n if metadata:\n print(\"\\n\\n\\nGOT METADATA: {}\\n\\n\\n\".format(metadata), flush=True)\n\n status = fedn.Status(status=\"Client {} connecting to ModelUpdateRequestStream.\".format(client.name))\n status.log_level = fedn.Status.INFO\n\n self.__whoami(status.sender, self)\n\n self._subscribe_client_to_queue(client, fedn.Channel.MODEL_UPDATE_REQUESTS)\n q = self.__get_queue(client, fedn.Channel.MODEL_UPDATE_REQUESTS)\n\n self._send_status(status)\n\n while True:\n yield q.get()", "title": "" }, { "docid": "b1ee5098ddbc00ef8e0721b7f72bf141", "score": "0.6481928", "text": "def ModelUpdateRequestStream(self, response, context):\n\n client = response.sender\n metadata = context.invocation_metadata()\n if metadata:\n print(\"\\n\\n\\nGOT METADATA: {}\\n\\n\\n\".format(metadata), flush=True)\n\n status = alliance.Status(status=\"Client {} connecting to ModelUpdateRequestStream.\".format(client.name))\n status.log_level = alliance.Status.INFO\n\n whoami(status.sender, self)\n # print(\"Client {} connecting to ModelUpdateRequestStream.\".format(client))\n\n self._subscribe_client_to_queue(client, alliance.Channel.MODEL_UPDATE_REQUESTS)\n q = self.__get_queue(client, alliance.Channel.MODEL_UPDATE_REQUESTS)\n\n self._send_status(status)\n\n while True:\n yield q.get()", "title": "" }, { "docid": "3fcd60305be3df302a08b2acf57299c6", "score": "0.6266932", "text": "def notify(self):\n UpdateManager.input_updated(self)", "title": "" }, { "docid": "c6d0cf79b85e47a814e071053b960c59", "score": "0.6261632", "text": "def UpdateStream(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "title": "" }, { "docid": "a819b03475e798da2feda1f282224f72", "score": "0.61266166", "text": "def signal_update():\n signal.update_parameters(model=self.model)", "title": "" }, { "docid": "ae0a1d8eddc952d4ef6333e49c223d44", "score": "0.60659707", "text": "def update(self) -> None:\n self._filter.handle_internal_messages()", "title": "" }, { "docid": "89abbdcb7ca7f63cf52c43f95b49c370", "score": "0.60451084", "text": "def updateStream(self):\n response.headers['content-type'] = 'text/event-stream'\n response.status_int = 200\n #updater logic needs to go here...\n context = zmq.Context()\n receiver = context.socket(zmq.SUB)\n receiver.connect(config[\"selectivehearing.updater_url\"])\n receiver.setsockopt(zmq.SUBSCRIBE, \"\")\n def go():\n while True:\n update = receiver.recv()\n #update = '{\"foo\": [[\"bar\", \"broken! %s\", %s]]}' % (time.time(), random.randint(0,4)) #this should actually be an update...\n msg = \"data: %s\\n\\n\" % (update)\n yield msg\n return go()", "title": "" }, { "docid": "2d5a87b4e4b9e2feb0381441347a624a", "score": "0.6008182", "text": "def model_updated(self,subject=None):\n\n \n raise NotImplementedError", "title": "" }, { "docid": "a2057322549acfbab0877b94a4410d0a", "score": "0.59964186", "text": "def SendModelUpdate(self, request, context):\n self.combiner.receive_model_candidate(request.model_update_id)\n print(\"ORCHESTRATOR: Received model update\", flush=True)\n\n response = fedn.Response()\n response.response = \"RECEIVED ModelUpdate {} from client {}\".format(response, response.sender.name)\n return response # TODO Fill later", "title": "" }, { "docid": "5c6890c9e95b0c66c9dcf45f5414c456", "score": "0.59116393", "text": "def SendModelUpdateRequest(self, request, context):\n self._send_request(request, fedn.Channel.MODEL_UPDATE_REQUESTS)\n\n response = fedn.Response()\n response.response = \"CONTROLLER RECEIVED ModelUpdateRequest from client {}\".format(request.sender.name)\n return response # TODO Fill later", "title": "" }, { "docid": "f38afcf24de36764abdb6cdfb88aaa86", "score": "0.5898151", "text": "def post_update_event(self, request, event, data): # pragma: no cover\n pass", "title": "" }, { "docid": "971a1b52a413b23dedccaddecb3e3e24", "score": "0.5887242", "text": "def subscribe(self):\n pass", "title": "" }, { "docid": "8f2ac5ba3a7aebc662ff79c1694d74cd", "score": "0.5797382", "text": "async def subscribe(self, request: WSRequest):\n await self.send(request)", "title": "" }, { "docid": "f3978f4f326323a7569d54d452ab7c7a", "score": "0.57791865", "text": "def update(self, *args, **kwargs):\n ...", "title": "" }, { "docid": "0146116eb239bc5e60b01401dd74743f", "score": "0.57052034", "text": "def _update(self, model):\n raise NotImplementedError", "title": "" }, { "docid": "f8689ad05bbfb3d3df73be3f52a6e68b", "score": "0.56919247", "text": "def update(self, *args, **kwargs) -> None:\n pass", "title": "" }, { "docid": "d82ef6bb821bc5ed0419d82946235053", "score": "0.56815875", "text": "def model_updated(self):\n raise NotImplementedError", "title": "" }, { "docid": "086bdd4ddc6de3d59c93fd3a4c32a051", "score": "0.5653346", "text": "def process_update(\n self, stream_name: str, updates: List[DataUpdate],\n resync: bool,\n ):\n raise NotImplementedError()", "title": "" }, { "docid": "ce28fc34bb0ab3cd39d452c133b43288", "score": "0.5617974", "text": "def SendModelUpdateRequest(self, request, context):\n self._send_request(request, alliance.Channel.MODEL_UPDATE_REQUESTS)\n\n response = alliance.Response()\n response.response = \"CONTROLLER RECEIVED ModelUpdateRequest from client {}\".format(request.sender.name)\n return response # TODO Fill later", "title": "" }, { "docid": "f0faaac9881848cda5244666d2a3e9a9", "score": "0.5613765", "text": "def callback_subscribe(self, callback):\n self.update_callbacks.append(callback)", "title": "" }, { "docid": "11a075441410e0b9a6567a686b5ba01b", "score": "0.5602831", "text": "def publish_update(self, payload):\n self._send_message(mtype=RDFStanzaType.UPDATE, payload=payload)", "title": "" }, { "docid": "70d37f68c9e55eeeac6b48abe7d2a588", "score": "0.55858904", "text": "def partial_update(self, request, *args, **kwargs):\n pass", "title": "" }, { "docid": "a505a0b20ca9413d4da358a144f31412", "score": "0.5576473", "text": "def update(self):\n ffc.flexflow_model_update(self.handle)", "title": "" }, { "docid": "e479f25eed8e8dff86e01c13fcb6b642", "score": "0.55674285", "text": "async def handle_subscribe_updates(hass, connection, msg):\n\n @callback\n async def handle_event_wiser_update(hub: str):\n \"\"\"pass data to frontend when backend changes\"\"\"\n connection.send_message(\n {\n \"id\": msg[\"id\"],\n \"type\": \"event\",\n \"event\": {\n \"event\": \"wiser_updated\",\n \"hub\": hub,\n }, # data to pass with event\n }\n )\n\n remove_listener = async_dispatcher_connect(\n hass, \"wiser_update_received\", handle_event_wiser_update\n )\n\n def unsubscribe_listeners():\n \"\"\"unsubscribe listeners when frontend connection closes\"\"\"\n remove_listener()\n\n connection.subscriptions[msg[\"id\"]] = unsubscribe_listeners\n connection.send_result(msg[\"id\"])", "title": "" }, { "docid": "53a6e48d74fc071ae16756b70be054f2", "score": "0.5560723", "text": "def update(self, *args, **kwargs) -> Any:\n pass", "title": "" }, { "docid": "23b1bb92da64f1d5b69536a195ad7ed0", "score": "0.55462414", "text": "def update(self, update_info):\n pass", "title": "" }, { "docid": "d06ab1b9055b73198d635bc1be7449d4", "score": "0.554461", "text": "def _update_callback(self, msg):\n self.async_update()", "title": "" }, { "docid": "9c275163e70748a88f9ea8c061aea4f3", "score": "0.55235994", "text": "def UpdateStream(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "719b2096194cf41f6de107e755aa37f9", "score": "0.55231196", "text": "def process_update(cls, team, date):\n update = model.Update.get_or_insert(team, date=date)\n f = functools.partial(cls.process_subscriber_update,\n update.date)\n map(f, model.Subscriber.subscribed(team))", "title": "" }, { "docid": "ef4bf6200c46a5c385fd4a7b479d585b", "score": "0.5516106", "text": "def __updateSubscription(self):\n self.__currentSubscription.updateNow()", "title": "" }, { "docid": "d0981cd55022664421cc3cc1575fa4e7", "score": "0.55160517", "text": "def SendModelUpdate(self, request, context):\n # self._send_request(request,alliance.Channel.MODEL_UPDATES)\n self.orchestrator.receive_model_candidate(request.model_update_id)\n print(\"ORCHESTRATOR: Received model update\", flush=True)\n\n response = alliance.Response()\n response.response = \"RECEIVED ModelUpdate {} from client {}\".format(response, response.sender.name)\n return response # TODO Fill later", "title": "" }, { "docid": "fb66db71e920d4afd423efb440898c8a", "score": "0.54947203", "text": "def subscribe(self):\n self.mediator.Subscribers.append(self)", "title": "" }, { "docid": "96d7a20d3b407dceef239c8b87d1084a", "score": "0.54853505", "text": "def sendUpdate(self, info=None):\n for observer in self.observers:\n observer.receiveUpdate(info)", "title": "" }, { "docid": "0bf5a47f3ae67c059102aa18b13abbeb", "score": "0.54840416", "text": "def modelChanged(self, payload):\n pass", "title": "" }, { "docid": "b372dd9eed21245c7a5cbb7e34bf9328", "score": "0.5482174", "text": "def update_handler(self, fd, events):\n ...", "title": "" }, { "docid": "3bae08085df7f752a87e49764afecb8e", "score": "0.54543567", "text": "def __listen_to_model_validation_stream(self):\n r = alliance.ClientAvailableMessage()\n whoami(r.sender, self)\n for validation in self.orchestrator.ModelValidationStream(r):\n # A client sent a model update to be handled by the combiner\n self.receive_validation(validation)\n print(\"COMBINER: Received model validation.\", flush=True)", "title": "" }, { "docid": "790ba6cd8c31c5e065fbd7d8fb7638bc", "score": "0.5453752", "text": "def handle_subscription_update_event(self, event):\n \n subscription = event.data.object\n price_id = subscription['items']['data'][0].plan.id\n stripe_customer = subscription.customer\n user = MyAccount.objects.get(stripe_customer_id=stripe_customer)\n package = Package.objects.get(stripe_price_id=price_id)\n\n return HttpResponse(content=f'Webhook received: {event[\"type\"]}', status=200)", "title": "" }, { "docid": "9e29b4cdf659a68b989d1d5a0fd8ebf8", "score": "0.5436362", "text": "def receiveUpdate(self, id, attr, value):\n\n if (self.barrierUp):\n self.communication.objStore.objects[id].update(attr,value)\n else:\n self.incomingUpdates.append((id, attr, value))", "title": "" }, { "docid": "ecfa9422f4ca951941b1296b9f6e5dc3", "score": "0.53950834", "text": "async def update(self, obj: Model_co):\n pass", "title": "" }, { "docid": "ef49b257afe5f78b5bc67c05f338cda1", "score": "0.5394589", "text": "def SubscribeCurrentItemChanged(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "title": "" }, { "docid": "a66cbc06b2133cf3f1437719dd00c52e", "score": "0.5393619", "text": "def do_update(self, _):\n self.fam.handle_events_in_interval(0.1)", "title": "" }, { "docid": "c13c2a488d8528f3e38687b64f24a5ab", "score": "0.5392663", "text": "def update(self, request, *args, **kwargs):\n response = super().update(request, *args, **kwargs)\n self.get_object().notify_jira_of_change()\n return response", "title": "" }, { "docid": "c418e76256485dc5edb761f732e57d98", "score": "0.538968", "text": "def update_signal(self, event):\n\n raise NotImplementedError(\"Should implement update_signal()\")", "title": "" }, { "docid": "dcbac044e88c61c5d7b380f910385588", "score": "0.5389385", "text": "def _listen_source_updates(self) -> None:\n\n @callback\n def _handle_signal(url: str) -> None:\n if self.stream:\n _LOGGER.debug(\"Update stream URL for camera %s\", self.camera_data.name)\n self.stream.update_source(url)\n\n assert self.platform.config_entry\n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n f\"{SIGNAL_CAMERA_SOURCE_CHANGED}_{self.platform.config_entry.entry_id}_{self.camera_data.id}\",\n _handle_signal,\n )\n )", "title": "" }, { "docid": "c805c587b9e254fe70a720c696ee39c1", "score": "0.53880614", "text": "def update(object):\n pass", "title": "" }, { "docid": "a84db9444391ea813c9463f13968a7ce", "score": "0.53875816", "text": "def on_update(self, state: Dict[str, Any], **kwargs):\n ...", "title": "" }, { "docid": "c670629b654a5fb4b0a6386469454da4", "score": "0.53875285", "text": "def update(self):\n _LOGGER.debug(\"Super Updating\")\n self._api.update()", "title": "" }, { "docid": "441df0db1a25e99dfa63d2c14569588f", "score": "0.5373929", "text": "async def async_update(self) -> None:\n return", "title": "" }, { "docid": "6b93ed71d8143e8cdbd521ae00f363ae", "score": "0.5372482", "text": "async def async_update(self) -> None:", "title": "" }, { "docid": "bb68a6d0319cd00e8120325083389701", "score": "0.53702474", "text": "async def async_update(self):", "title": "" }, { "docid": "9b939a3a03a8891e5f3393f5f0a09543", "score": "0.5347592", "text": "def update(self, *args, **kwargs): # real signature unknown\r\n pass", "title": "" }, { "docid": "2457c88bcc5d52bec4848a39f4115dcf", "score": "0.53390104", "text": "def update(self, data):", "title": "" }, { "docid": "2457c88bcc5d52bec4848a39f4115dcf", "score": "0.53390104", "text": "def update(self, data):", "title": "" }, { "docid": "2457c88bcc5d52bec4848a39f4115dcf", "score": "0.53390104", "text": "def update(self, data):", "title": "" }, { "docid": "cc1d8b3e147d7148764b0b0b7dc49f6f", "score": "0.5332301", "text": "def process_subscriber_update(cls, date, subscriber):\n subscriber_update = model.SubscriberUpdate.get_or_insert(\n name=subscriber.name,\n mail=subscriber.mail,\n team=subscriber.team,\n date=date)\n sender = cls.get_reply_address(subscriber_update.key.urlsafe())\n message = cls.get_update_message(\n subscriber.team, subscriber.mail, sender, date)\n message.send()\n subscriber_update.sent = True\n subscriber_update.put()", "title": "" }, { "docid": "a92a5ef783618862f56fbe10476d95e4", "score": "0.53293395", "text": "def update(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "title": "" }, { "docid": "b00adbf65b7f25e53847273e3c2a3191", "score": "0.5326588", "text": "def test_update_message_subscriber(self):\n pass", "title": "" }, { "docid": "1d1beb77daafd104a8b828584a3582d8", "score": "0.5305633", "text": "def _update(self):\n pass", "title": "" }, { "docid": "1d1beb77daafd104a8b828584a3582d8", "score": "0.5305633", "text": "def _update(self):\n pass", "title": "" }, { "docid": "f0333f2e7f72ae748f2aebe4cd5cbea0", "score": "0.53019804", "text": "def watch_model(self, model):\n if self.should_use_remote_logger:\n wandb.watch(models=model, log=\"all\")", "title": "" }, { "docid": "0586bbf5739ffbf6cb003f3d42d7b805", "score": "0.52762574", "text": "def subscribe():\n\n pub.subscribe(onNode, \"meshtastic.node\")", "title": "" }, { "docid": "42545c97b36854cd8a15633eaaf1a587", "score": "0.5275858", "text": "def Update(self, data):", "title": "" }, { "docid": "4311e0ed658a8bb2fcc955b2b272c3db", "score": "0.5274366", "text": "def process_update(self, payload):\n update_id = payload.get('update_id')\n if update_id and update_id > self._update_offset:\n self._update_offset = update_id\n if 'message' in payload:\n self.supervisor.spawn(self.safe_exec, self.on_message,\n payload['message'])\n elif 'callback_query' in payload:\n self.supervisor.spawn(self.safe_exec, self.on_query,\n payload['callback_query'])", "title": "" }, { "docid": "6ffc8345afebddc83c6b279663d75d93", "score": "0.5268459", "text": "def update(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, partial=True)\n if serializer.is_valid():\n errors_messages = []\n self.perform_update(serializer)\n return Response(data={\"response_code\": 0, \"response_message\": \"Frequency entry updated successfully.\",\n \"obj\": serializer.data, \"errors\": errors_messages},\n status=status.HTTP_201_CREATED)\n else:\n default_errors = serializer.errors # default errors dict\n errors_messages = []\n for field_name, field_errors in default_errors.items():\n for field_error in field_errors:\n error_message = '%s: %s' % (field_name, field_error)\n errors_messages.append(error_message)\n\n return Response(data={\"response_code\": 1, \"response_message\": \"Frequency entry not updated\",\n \"obj\": serializer.data, \"errors\": errors_messages},\n status=status.HTTP_400_BAD_REQUEST)", "title": "" }, { "docid": "0c65c332e0da0ad2b6a364f4c274b4a4", "score": "0.52646416", "text": "def observe(self):\n pass", "title": "" }, { "docid": "ffb1344d2225ff8722984cad8116bcfd", "score": "0.52493244", "text": "def notify(self):\n\n print(\"Subject: Notifying observers...\")\n for observer in self._observers:\n observer.update(self)", "title": "" }, { "docid": "245af5a129c7a1bc33b96832654473ca", "score": "0.5246309", "text": "async def update(self):\r\n await self._client.update()", "title": "" }, { "docid": "54e1f72a280cdb05c24e1d8de1814021", "score": "0.5229", "text": "def update(self, data, *args, **kwargs):\n self.__getattr__(\"update\", _location=\"remote\")(data, *args, **kwargs)", "title": "" }, { "docid": "9aeda54897245a9d6414115d338ac40c", "score": "0.52277863", "text": "async def handle_event_wiser_update(hub: str):\n connection.send_message(\n {\n \"id\": msg[\"id\"],\n \"type\": \"event\",\n \"event\": {\n \"event\": \"wiser_updated\",\n \"hub\": hub,\n }, # data to pass with event\n }\n )", "title": "" }, { "docid": "8b8b7c376a484f6cae6493aec8d66487", "score": "0.5226176", "text": "async def on_connect(self, _):\n self.last_update_success = True", "title": "" }, { "docid": "1ea7548605548b28169fad3bfd1aae7e", "score": "0.5221183", "text": "def update(self, request, *args, **kwargs):\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n serializer = UpdateEventModelSerializer(\n instance,\n data=request.data,\n partial=partial\n )\n serializer.is_valid(raise_exception=True)\n event = serializer.save()\n\n data = self.get_serializer(event).data\n return Response(data, status=status.HTTP_200_OK)", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "63705f1ca943ee5589ca4acc99e87352", "score": "0.52187914", "text": "def update(self):\n pass", "title": "" }, { "docid": "4eb93c971e507cd03871d2b7511995b5", "score": "0.5204943", "text": "def update(self):\n\n raise NotImplementedError()", "title": "" }, { "docid": "53da9d749cd193a850587ecfd424041a", "score": "0.52047443", "text": "def _update_model(self, new_state, data):", "title": "" }, { "docid": "f231c97b18562fecc587a028385f33a2", "score": "0.51988226", "text": "def partial_update(self, request, pk=None):\n raise errors.NotImplemented('HTTP Verb Not Supported')", "title": "" }, { "docid": "52abb06960a074d1975bdcd67e34966d", "score": "0.5195938", "text": "def update(self, data):\n self._sender.send(data)", "title": "" }, { "docid": "c96eb87e87a10b42d2d4c3a09b3ef6cb", "score": "0.51864207", "text": "def update_view(self):\n return [\n r'^(?P<pk>.+)/change/$',\n self.get_update_view(),\n '{model_name}_change',\n ]", "title": "" }, { "docid": "d5dde718a6acb79a1e7e43f012586076", "score": "0.5185283", "text": "def update(self):\n\n self.process_pool()\n self.process_reactor()", "title": "" }, { "docid": "d5dde718a6acb79a1e7e43f012586076", "score": "0.5185283", "text": "def update(self):\n\n self.process_pool()\n self.process_reactor()", "title": "" }, { "docid": "d24adb6c1a03b10a335576c7599289e8", "score": "0.51786363", "text": "def update(self) -> None:\n pass", "title": "" }, { "docid": "0083b9ee8a10535774c32c619a2d7010", "score": "0.51731795", "text": "def update(self):\n raise NotImplementedError", "title": "" }, { "docid": "0083b9ee8a10535774c32c619a2d7010", "score": "0.51731795", "text": "def update(self):\n raise NotImplementedError", "title": "" } ]
e936c006f2b60cb1447f0caf61b07198
r""" Return the orientation of the crossings of the link diagram of ``self``.
[ { "docid": "eb391244b5bc7f6d2cc3e43b0e0fc07c", "score": "0.6693827", "text": "def orientation(self):\n directions = self._directions_of_edges()[0]\n orientation = []\n for C in self.pd_code():\n if C[0] == C[1] or C[2] == C[3]:\n orientation.append(-1)\n elif C[1] == C[2] or C[0] == C[3]:\n orientation.append(1)\n elif directions[C[1]] == C:\n orientation.append(-1)\n else:\n orientation.append(1)\n return orientation", "title": "" } ]
[ { "docid": "f1ce0f0417423532c5526f5a36e613ce", "score": "0.6612555", "text": "def get_orientation(self, visited):\n #print(visited)\n if visited:\n rot = mathutils.Quaternion(self.helical_axisParam, self.get_angle(2))\n a = self.positions[2]\n b = self.positions[3]\n else:\n rot = mathutils.Quaternion(self.helical_axisParam, self.get_angle(0))\n a = self.positions[0]\n b = self.positions[1]\n n = mathutils.Vector(self.east)\n n.rotate(rot)\n return a, b, n", "title": "" }, { "docid": "7d032df59101c5531e7038bcff103b56", "score": "0.62240785", "text": "def orientation(self):\n\n a, b, b, c = self.covariance.flat\n if a < 0 or c < 0: # negative variance\n return np.nan * u.rad # pragma: no cover\n return 0.5 * np.arctan2(2. * b, (a - c))", "title": "" }, { "docid": "3804bde52098c268274098da0ca6420c", "score": "0.6199202", "text": "def orientation(self):\n o_tmp = self._orientation + 180\n o_tmp %= 360\n return o_tmp - 180", "title": "" }, { "docid": "dceed3de285eb7e32bdd733bc1aea5cd", "score": "0.60408294", "text": "def interior_angle(self):\n return (self.edges - 2)*(100/self.edges)", "title": "" }, { "docid": "51564e6580bb05eeca02be5bec95fa8e", "score": "0.5991328", "text": "def visual_rotation(self):\n if getattr(self, '_body', None) is None:\n return self.rotation\n\n return -np.rad2deg(self._body.angle)", "title": "" }, { "docid": "22b662239cc7e1ca2715e694e4e3022a", "score": "0.5961181", "text": "def cone_orientation(self):\n if self.is3d:\n orientation = lib.D3DVECTOR()\n _check(\n self._native_buffer3d.GetConeOrientation(ctypes.byref(orientation))\n )\n return orientation.x, orientation.y, orientation.z\n else:\n return 0, 0, 0", "title": "" }, { "docid": "1cfa68b1b4ca59c83845cf36d6f5fc0d", "score": "0.5934017", "text": "def chiral(self):\n return self.orientation_from_basis(self.basis_from_domains(self.domains))", "title": "" }, { "docid": "d889bd8be8a0754c4092b53db9f850bb", "score": "0.59151345", "text": "def getOrientation(self):\n return self.orientation", "title": "" }, { "docid": "4b7b3d83e7726ac46530f05bc07cb9ae", "score": "0.59147805", "text": "def incidence_angle(self):\n return self.get_incidence_angle()", "title": "" }, { "docid": "00a61bb1b7b5fd2a0a9a2f509fd7be6d", "score": "0.58855635", "text": "def _orientation(side):\r\n a, b = side\r\n if a < b:\r\n return 1\r\n elif a > b:\r\n return -1\r\n else:\r\n raise ValueError('side connects a vertex to itself: ' + str(side))", "title": "" }, { "docid": "9dffd6e8401a42ddf7d3154f8f34c4e3", "score": "0.5871174", "text": "def angle(self):\n return", "title": "" }, { "docid": "9dffd6e8401a42ddf7d3154f8f34c4e3", "score": "0.5871174", "text": "def angle(self):\n return", "title": "" }, { "docid": "5b7dfe204d04525a15a36c9e7944705d", "score": "0.5828715", "text": "def getOrientation(self):\n if self._belt_connection_state != BeltConnectionState.CONNECTED:\n return None\n if self._belt_firm_version<34:\n print(\"BeltController: Unable to know the belt orientation. \" +\n \"Orientation notifications are available only from \"+\n \"firmware version 34.\")\n return\n if self._belt_heading is None or self._belt_heading_offset is None:\n return None\n orientation = (self._belt_heading, self._belt_heading_offset)\n return orientation", "title": "" }, { "docid": "d075472cb4de66e6b0a74cfcd0e228a0", "score": "0.5820682", "text": "def orientated_lines(self) -> List[str]:\n return self.get_orientated_lines(self.lines, self.flipped, self.rotated)", "title": "" }, { "docid": "b542f277095793890e56ac1e4b6e7931", "score": "0.57949173", "text": "def getOrientation(self):\n return self._orientation", "title": "" }, { "docid": "7588595274d5a093468ef9b29a381206", "score": "0.57776535", "text": "def getRotationDirection(self):\n return self.rotationDirection", "title": "" }, { "docid": "f7af541adf1e6d318ac133fe19ac0e86", "score": "0.57652414", "text": "def _get_end_effector_orientation(self):\n orient_quat = p.getLinkState(\n self.robot,\n linkIndex=8,\n computeForwardKinematics=True,\n physicsClientId=self.id)[1]\n # orient_euler = p.getEulerFromQuaternion(orient_quat)\n return np.array(orient_quat)", "title": "" }, { "docid": "9b0133cc8eb6a2ec28272c319c7c2437", "score": "0.5738593", "text": "def get_orientation_vector(self):\n return rotate_vector([1, 0, 0][:self.get_dim()], self.orientation)", "title": "" }, { "docid": "8911de800f0563996af7608bc225a09b", "score": "0.57322216", "text": "def get_orientation(self):\n return tuple(self.x[7:10]) + (self.x[6],)", "title": "" }, { "docid": "9f1643187bd3a2ef3ebe4013533d220f", "score": "0.57239", "text": "def getAngle(self):\n return _almathswig.Pose2D_getAngle(self)", "title": "" }, { "docid": "16fcaab976971d18dffb06ac30e3c340", "score": "0.5717684", "text": "def interior_angle(self):\n\t\tif not hasattr(self, '_int_angle'):\n\t\t\tself._int_angle = (self.vertices - 2) * 180 / self.vertices\n\t\treturn self._int_angle", "title": "" }, { "docid": "7c9dbc0446b32dce93f02676c53537ea", "score": "0.57156324", "text": "def getAngle(self):\n return self.joint_pos", "title": "" }, { "docid": "b4966a3ee04a981b11df3272a3eaf06e", "score": "0.57014596", "text": "def adjoint(self):\n rotmat = self.rot.as_matrix()\n return np.vstack(\n [np.hstack([rotmat,\n self.RotationType.wedge(self.trans).dot(rotmat)]),\n np.hstack([np.zeros((3, 3)), rotmat])]\n )", "title": "" }, { "docid": "9d45ef89ffee63c47de030b925816160", "score": "0.5670013", "text": "def __repr__(self):\n return _almathswig.Rotation3D___repr__(self)", "title": "" }, { "docid": "7523e3f83d08a5086f147458a7be83db", "score": "0.56660104", "text": "def draw_vectors(self):\n arm_v = self.state.heading.cross(Y).unit\n if arm_v == O:\n arm_v = X\n head_v = self.state.heading.rotate(self.state.elevation, about=arm_v)\n return arm_v, head_v", "title": "" }, { "docid": "325b7ed93cc67368794d6d9569677105", "score": "0.56347597", "text": "def OrientationVector():", "title": "" }, { "docid": "961cac352ffa7cb15f44fe0d19ebe7fc", "score": "0.56207556", "text": "def __repr__(self):\n return _almathswig.Rotation___repr__(self)", "title": "" }, { "docid": "3eae7508e063079543ad744292681153", "score": "0.56200635", "text": "def find_rotation(self):\n\n return math.degrees(-abs(STEVE_ROLL - self.face['roll']))", "title": "" }, { "docid": "9f488018c19279799db9855cf4189cb4", "score": "0.56190187", "text": "def orientation(self, v=None):\n if v is None:\n v = self._DEFAULT_ORIENTATION\n if isinstance(v, str):\n v = v.lower()\n v = {'horizontal': 'h', 0: 'h', 'left-to-right': 'h',\n 'vertical': 'v', 1: 'v', 'top-to-bottom': 'v',\n 'right-to-left': 'hr', 'bottom-to-top': 'vr'}.get(v, v)\n if v not in ('h', 'v', 'hr', 'vr'):\n raise ValueError('%s.orientation got unknown value %r' % (self.id, v))\n return v", "title": "" }, { "docid": "83992eabfff6941718d8e63707819df8", "score": "0.5613136", "text": "def horizontal_angle(self):\n hypotenuse = math.sqrt(\n (self.topLeft['x'] - self.topRight['x'])^2 + \n (self.topLeft['y'] - self.topRight['y'])^2)\n opposite = self.topRight['x'] - self.topLeft['x']\n\n angle = math.asin(opposite / hypotenuse)\n return math.degrees(angle)", "title": "" }, { "docid": "e98ba986bc9f2ce7c46dd419dcf89c42", "score": "0.5608064", "text": "def orientation(self) -> bool:\n return self.begin.x != self.end.x", "title": "" }, { "docid": "e2377f4ce40c0aff70a34cf387b56689", "score": "0.5578167", "text": "def _compute_orientation(self):\n cyl_mat = self.sim.data.body_xmat[self.cyl_body_id]\n cyl_mat.shape = (3, 3)\n cyl_pos = self.sim.data.body_xpos[self.cyl_body_id]\n\n hole_pos = self.sim.data.body_xpos[self.hole_body_id]\n hole_mat = self.sim.data.body_xmat[self.hole_body_id]\n hole_mat.shape = (3, 3)\n\n v = cyl_mat @ np.array([0, 0, 1])\n v = v / np.linalg.norm(v)\n center = hole_pos + hole_mat @ np.array([0.1, 0, 0])\n\n t = (center - cyl_pos) @ v / (np.linalg.norm(v) ** 2)\n d = np.linalg.norm(np.cross(v, cyl_pos - center)) / np.linalg.norm(v)\n\n hole_normal = hole_mat @ np.array([0, 0, 1])\n return (\n t,\n d,\n abs(\n np.dot(hole_normal, v) / np.linalg.norm(hole_normal) / np.linalg.norm(v)\n ),\n )", "title": "" }, { "docid": "a03cb0da6f48bfeca079bb18665f4cb2", "score": "0.5549978", "text": "def angle(self):\n return self._keypoint.angle", "title": "" }, { "docid": "23e036b235d20ac4ff4b411baab34aa4", "score": "0.55357623", "text": "def toAngleAxis(self):\n\n nself = self.normalize()\n \n # Clamp nself.w (since the quat has to be normalized it should\n # be between -1 and 1 anyway, but it might be slightly off due\n # to numerical inaccuracies)\n w = max(min(nself.w,1.0),-1.0)\n \n w = math.acos(w)\n s = math.sin(w)\n if s<1E-12:\n return (0.0, _vec3(0.0,0.0,0.0))\n return (2.0*w, _vec3(nself.x/s, nself.y/s, nself.z/s))", "title": "" }, { "docid": "dd86470fe234e2ba507e023b8ebb08fc", "score": "0.5532004", "text": "def actuator_to_joint_order(self):\n return self._inverse_order", "title": "" }, { "docid": "ad06b00cac072dcf53693f894cb13a5b", "score": "0.5528827", "text": "def arrows(self):\n Q = self._quiver\n return tuple(self.element_class(self, e[0],e[1], [i]) for i,e in enumerate(self._sorted_edges))", "title": "" }, { "docid": "226051add0982468e58659b5196d6574", "score": "0.5528287", "text": "def getRotationAngles(self):\n return self.rots", "title": "" }, { "docid": "4a40fd576f01d63c08ea8e6aa239aa06", "score": "0.5522791", "text": "def get_navigation_angles(self):\n center = len(self.nav_angles[(-0.1 <= self.nav_angles) & (self.nav_angles <= 0.1)])\n left = len(self.nav_angles[self.nav_angles > 0.1])\n right = len(self.nav_angles[self.nav_angles < -0.1])\n\n return left, center, right", "title": "" }, { "docid": "15ed520fd2dff50c2a4b08d6c630e507", "score": "0.5520763", "text": "def direction(self):\r\n\r\n return self.Direction.ABOVE", "title": "" }, { "docid": "ade181ebdf891e104a38606ab03c92e1", "score": "0.5508316", "text": "def GetAxisDirection(self):\n ...", "title": "" }, { "docid": "1b153dd300c3d58c433d58311ea5ea1a", "score": "0.55073184", "text": "def oriented_gauss_code(self):\n if self._oriented_gauss_code is not None:\n return self._oriented_gauss_code\n\n pd = self.pd_code()\n orient = self.orientation()\n crossing_info = {}\n for i, j in enumerate(pd):\n if orient[i] == -1:\n crossing_info[(j[0], -1, i + 1)] = j[2]\n crossing_info[(j[3], 1, i + 1)] = j[1]\n elif orient[i] == 1:\n crossing_info[(j[0], -1, i + 1)] = j[2]\n crossing_info[(j[1], 1, i + 1)] = j[3]\n edges = {}\n cross_number = {}\n for i, j in crossing_info.items():\n edges[i[0]] = [j]\n if i[1] == 1:\n cross_number[i[0]] = i[2]\n elif i[1] == -1:\n cross_number[i[0]] = -i[2]\n edges_graph = DiGraph(edges)\n d = edges_graph.all_simple_cycles()\n code = []\n for i in d:\n l = []\n for j in i:\n l.append(cross_number[j])\n del l[-1]\n code.append(l)\n oriented_code = [code, orient]\n self._oriented_gauss_code = oriented_code\n return self._oriented_gauss_code", "title": "" }, { "docid": "f6797aa8752fd466fb3b9d2fe1f4540c", "score": "0.5484768", "text": "def rotation (self):\n return self.eman.component_for_entity (self.e, Movement).angle", "title": "" }, { "docid": "734bb9200c70b3b87150063a2ee1c8da", "score": "0.54796785", "text": "def get_rotation(self):\n return self._properties['axis_rotation']", "title": "" }, { "docid": "ede9516cf0852ed210d68a267f56478a", "score": "0.5478329", "text": "def get_current_orientation(self):\n res, orientation = vrep.simxGetObjectOrientation(self.clientID, self.robot_handle, -1, vrep.simx_opmode_streaming)\n while(res != vrep.simx_return_ok):\n res, orientation = vrep.simxGetObjectOrientation(self.clientID, self.robot_handle, -1, vrep.simx_opmode_streaming)\n\n return orientation", "title": "" }, { "docid": "f8b7a4323fb538b294a3bb506e720d6e", "score": "0.5473131", "text": "def angle(self):\n\t\treturn self._angle", "title": "" }, { "docid": "5cf94794e3e17a5d6f446c0f8f375fc8", "score": "0.5466589", "text": "def angles(self):\n return self.cellpar()[3:].copy()", "title": "" }, { "docid": "854cc397cc6d892bc036b2977f302c34", "score": "0.5446664", "text": "def direction(self):\n return _vec2(math.cos(self.angle), math.sin(self.angle))", "title": "" }, { "docid": "d73efceabe911795f41f6102c1698a2f", "score": "0.5446603", "text": "def rotation(self):\n return self.eman.component_for_entity(self.e, Movement).angle", "title": "" }, { "docid": "caaae32d59ec157a2d149be54122932d", "score": "0.54398906", "text": "def physical_rotation(self):\n if getattr(self, '_body', None) is not None:\n return self._body.angle\n\n return -np.deg2rad(self.rotation)", "title": "" }, { "docid": "b4d6b17ab1a041dd4102cfbd94f159f5", "score": "0.54374886", "text": "def orientation(self, version=PHOTOS_VERSION_CURRENT):\n imagedata = self._request_image_data(version=version)\n return imagedata.orientation", "title": "" }, { "docid": "51a6bda2532c19e081bffc0ab4f309b0", "score": "0.5425652", "text": "def orientation(self):\n return self.http.get('orientation').value", "title": "" }, { "docid": "39d11370a639b5b8f3106018f59354ab", "score": "0.54195535", "text": "def angle(self):\n return self._angle", "title": "" }, { "docid": "33e137e273cc85a7e4c7e78980bc81f3", "score": "0.5389049", "text": "def getNavigationDirection(self) -> ghidra.app.util.datatype.NavigationDirection:\n ...", "title": "" }, { "docid": "947903ae5462907839df117eee023626", "score": "0.5378571", "text": "def rotation(self) -> So2:\n return self._rotation", "title": "" }, { "docid": "fad93792bbd7878586f1187b17553653", "score": "0.5378019", "text": "def orientation(self,p,q,r):\n\t\tval = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0])*(r[1] - q[1])\n\t\tif val == 0:\n\t\t\treturn 0\n\t\tif val > 0:\n\t\t\treturn 1\n\t\tif val < 0:\n\t\t\treturn 2", "title": "" }, { "docid": "0b7f9ff4bf4cd532029e5cd9b69cb94e", "score": "0.5376779", "text": "def return_angle(self) -> 'number':\n return arctan2(self.y, self.x)", "title": "" }, { "docid": "10be09e6202314e8d7447eb07c557f56", "score": "0.53754926", "text": "def get_orientation_from_lines(self, v0, v1):\n # get into an np array\n\t#v1 = invert_vector(v1)\n\t#v0 = invert_vector(v0)\n\tprint \"vectors\"\n v0, v1 = np.array(v0), np.array(v1)\n\t#print \"DOT PRODUCT\"\n\tprint v0\n\tprint v1\n\t#print np.dot(v0,v1)\n # normalize\n v0 = v0 / np.linalg.norm(v0) \n v1 = v1 / np.linalg.norm(v1) \n\tprint v0\n\tprint v1\n # find normal\n n = np.cross(v0, v1)\n\t#print \"NORMAL\"\n\t#print n\n\t#n = np.cross(v1, v0)\n # stack into a matrix\n #rotMat = np.vstack((n, v0, v1)).T\n\trotMat = np.vstack((v0, v1, n)).T\n\t#print \"ROTMAT\"\n\t#print rotMat\n\tmatrix = rotMat\n\tmatrix = linalg.orth(rotMat) #FIXME uncomment\n\t#print \"matrices\"\n\t#print rotMat\n\t#print orthogonalized\n # find the quaternion xyzw\n tbRot = tfx.tb_angles(matrix)\n quat = tbRot.quaternion\n #code.interact(local=locals())\n return list(quat)", "title": "" }, { "docid": "cfc6c801440af8ba96fd83843bb5cfa6", "score": "0.5372919", "text": "def get_direction(self):\n if self.direction_x == 1:\n return RIGHT\n elif self.direction_x == -1:\n return LEFT\n elif self.direction_y == -1:\n return UP\n elif self.direction_y == 1:\n return DOWN", "title": "" }, { "docid": "dd497a0e86b2e8ef853fcaa254608ee2", "score": "0.53591913", "text": "def _get_direction(self):\n if self.secondaryjoin is not None:\n return sync.MANYTOMANY\n elif self._is_self_referential():\n # for a self referential mapper, if the \"foreignkey\" is a single or composite primary key,\n # then we are \"many to one\", since the remote site of the relationship identifies a singular entity.\n # otherwise we are \"one to many\".\n if self.remote_side is not None and len(self.remote_side):\n for f in self.foreignkey:\n if f in self.remote_side:\n return sync.ONETOMANY\n else:\n return sync.MANYTOONE\n else:\n for f in self.foreignkey:\n if not f.primary_key:\n return sync.ONETOMANY\n else:\n return sync.MANYTOONE\n else:\n onetomany = len([c for c in self.foreignkey if self.mapper.unjoined_table.corresponding_column(c, False) is not None])\n manytoone = len([c for c in self.foreignkey if self.parent.unjoined_table.corresponding_column(c, False) is not None])\n if not onetomany and not manytoone:\n raise exceptions.ArgumentError(\"Cant determine relation direction for '%s' on mapper '%s' with primary join '%s' - foreign key columns are not present in neither the parent nor the child's mapped tables\" %(self.key, str(self.parent), str(self.primaryjoin)) + str(self.foreignkey))\n elif onetomany and manytoone:\n raise exceptions.ArgumentError(\"Cant determine relation direction for '%s' on mapper '%s' with primary join '%s' - foreign key columns are present in both the parent and the child's mapped tables. Specify 'foreignkey' argument.\" %(self.key, str(self.parent), str(self.primaryjoin)))\n elif onetomany:\n return sync.ONETOMANY\n elif manytoone:\n return sync.MANYTOONE", "title": "" }, { "docid": "6f200472a4e45fdd2fff2d59be74e33a", "score": "0.53297985", "text": "def angle(self):\n a = self.p1() - self.p0()\n b = self.p1() - self.p2()\n d = np.cross(a, b)[0]\n pi = math.pi\n return pi - np.sign(-d) * math.acos(a.dot(b) / (norm(a) * norm(b)))", "title": "" }, { "docid": "e35fee336f225d304b866f4347491d5a", "score": "0.5319846", "text": "def rotation(self):\n return self._rotation * 180. / np.pi", "title": "" }, { "docid": "edb431d2b2d417488df53941353f18f5", "score": "0.5307419", "text": "def angle(self) -> float:\n try:\n return self.cic.hedraAngle[self.ndx]\n except AttributeError:\n return 0.0", "title": "" }, { "docid": "cca5776dfc905da66abfbc620788b856", "score": "0.53044343", "text": "def getAngle(self):\n\n return math.atan2(self.y, self.x)", "title": "" }, { "docid": "5a42afb926c17971119d3f92c1b7a531", "score": "0.53030115", "text": "def slip_angle(self):\n if np.count_nonzero(self.mol_a.geom.perp_ax) == 0:\n self.mol_a.calc_axes()\n if np.count_nonzero(self.mol_b.geom.perp_ax) == 0:\n self.mol_b.calc_axes()\n # vector gonig from A to B\n cen_cen = self.mol_b.centroid() - self.mol_a.centroid()\n # test angle with positive cen_cen and negative cen_cen. keep smallest\n slip_angle_a_pos = ao.vec_angle(self.mol_a.geom.perp_ax, cen_cen)\n slip_angle_a_neg = 180 - slip_angle_a_pos\n slip_angle_b_pos = ao.vec_angle(self.mol_b.geom.perp_ax, cen_cen)\n slip_angle_b_neg = 180 - slip_angle_b_pos\n slip_angle_a = min((slip_angle_a_pos,slip_angle_a_neg))\n slip_angle_b = min((slip_angle_b_pos,slip_angle_b_neg))\n\n final_slip = min(slip_angle_a, slip_angle_b)\n return final_slip", "title": "" }, { "docid": "00dcf189db084ce20dbd4f1922536603", "score": "0.5300512", "text": "def arrowEdge(self):\n return self._state.arrow_edge", "title": "" }, { "docid": "3dac802efa442b9a6711de9b55087884", "score": "0.5299564", "text": "def _get_orientation(self, origin, p1, p2):\n difference = (\n ((p2.x - origin.x) * (p1.y - origin.y))\n - ((p1.x - origin.x) * (p2.y - origin.y))\n )\n\n return difference", "title": "" }, { "docid": "708b9f64959982a867fbae5e6da1a0cf", "score": "0.529511", "text": "def _orientation(parents_slot, parents_orientation):\n orientation = int(parents_orientation)\n if orientation == 0:\n if parents_slot == 0:\n return 2\n elif parents_slot == 1:\n return 0\n elif parents_slot == 2:\n return 3\n elif parents_slot == 3:\n return 1\n elif orientation == 1:\n if parents_slot == 0:\n return 3\n elif parents_slot == 1:\n return 1\n elif parents_slot == 2:\n return 0\n elif parents_slot == 3:\n return 2\n elif orientation == 2:\n if parents_slot == 0:\n return 0\n elif parents_slot == 1:\n return 2\n elif parents_slot == 2:\n return 1\n elif parents_slot == 3:\n return 3\n elif orientation == 3:\n if parents_slot == 0:\n return 1\n elif parents_slot == 1:\n return 3\n elif parents_slot == 2:\n return 2\n elif parents_slot == 3:\n return 0\n else:\n err(\"Unsupported parents rotation angle provided.\")", "title": "" }, { "docid": "a9f684835fed799de03657f3eb6f63c7", "score": "0.5287697", "text": "def angleAdir(self, joints):\r\n\t\tj = joints.reshape((16,2), order = 'F')\r\n\t\tlinks = [(0,1),(1,2),(2,6),(3,6),(4,3),(5,4),(10,11),(11,12),(12,8),(13,8),(14,13),(15,14)]\r\n\t\tangles_l = [(0,1),(1,2),(2,3),(3,4),(4,5),(6,7),(7,8),(8,9),(9,10),(10,11)]\r\n\t\tvects = []\r\n\t\tangles = []\r\n\t\tfor i in range(len(links)):\r\n\t\t\tvects.append(self.joint2Vect( j[links[i][0]], j[links[i][1]]))\r\n\t\tfor i in range(len(angles_l)):\r\n\t\t\tangles.append(self.vect2angle(vects[angles_l[i][0]], vects[angles_l[i][1]]))\r\n\t\treturn vects, np.degrees(angles)", "title": "" }, { "docid": "170affc4424693c029a61d0d6b4050c5", "score": "0.5282547", "text": "def angle(self, other):\n if isinstance(other, Line3D):\n return self.direction.angle(other.direction)\n elif isinstance(other, Plane3D):\n return self.direction.angle(other.normal)\n\n raise NotImplementedError(\n \"Angle between a Line3D and a %s is not defined\" % other\n )", "title": "" }, { "docid": "26f5d256011319465e9a661b72461a38", "score": "0.5281203", "text": "def adjacency(self):\n return self.halfedge", "title": "" }, { "docid": "b85f084fb7e8292e4d128f59ae12ac00", "score": "0.5280726", "text": "def angle(self):\n data_dict = {}\n for name, vectors in self._angles.items():\n data_dict[name]=self.get_angle(self[vectors['left']], self[vectors['right']])\n return pd.DataFrame(data_dict).set_index(self.data.index)", "title": "" }, { "docid": "539d8d96d2a39a5682cc9bd508a5cb68", "score": "0.52799267", "text": "def orientation(p, q, r):\n val = (float(q.y - p.y) * (r.x - q.x)) - (float(q.x - p.x) * (r.y - q.y))\n if val > 0:\n # Clockwise orientation\n return 1\n elif val < 0:\n # Counterclockwise orientation\n return 2\n else:\n # Colinear orientation\n return 0", "title": "" }, { "docid": "7fd5ffd4d8d9d2f7074ae8c228619244", "score": "0.52719665", "text": "def get_diagonal(self):\n diagonal1 = (sqrt(pow(self.width, 2) + pow(self.length, 2) -\n 2 * self.width * self.length * cos(radians(self.angle))))\n diagonal2 = (sqrt(self.width ** 2 + self.length ** 2 -\n 2 * self.width * self.length * cos(radians(180 - self.angle))))\n return max(diagonal1, diagonal2)", "title": "" }, { "docid": "69be6631e46a9e7b90918b09e3d54b02", "score": "0.52628636", "text": "def transpose(self):\n return _almathswig.Rotation_transpose(self)", "title": "" }, { "docid": "170740438b53307402ff6a08982cab9a", "score": "0.52611506", "text": "def dowker_notation(self):\n pd = self.pd_code()\n orient = self.orientation()\n dn = [(i[0], i[3]) if orient[j] == -1 else (i[0], i[1])\n for j, i in enumerate(pd)]\n return dn", "title": "" }, { "docid": "0fa82c79f5b39cf643cdea5653e6ac7f", "score": "0.5247597", "text": "def determinant(self):\n return _almathswig.Rotation_determinant(self)", "title": "" }, { "docid": "fe599d0edc7d410a35e26f887d1cb4e5", "score": "0.5235638", "text": "def getangle(self):\n return self._ang", "title": "" }, { "docid": "428945ca30175820c559f80195c1eea4", "score": "0.52339476", "text": "def getAngle(self) -> float:\n ...", "title": "" }, { "docid": "19abbd26a30f7809dbeb5a9ccfbc6b3c", "score": "0.5231853", "text": "def get_aligner_edge(self):\n\n return self._aligner_edge", "title": "" }, { "docid": "2470de906bb465934028b3e5db5248c6", "score": "0.5219311", "text": "def getRotationAngle(self):\n angle = self.getAttribute('rotation_angle')\n return max(0.0, min(angle, 360.0))", "title": "" }, { "docid": "0e46f23123ec0f1c2606cebe5ca164a0", "score": "0.5212095", "text": "def horizon(self) -> StratigraphicObject:\n return self.hor", "title": "" }, { "docid": "0e46f23123ec0f1c2606cebe5ca164a0", "score": "0.5212095", "text": "def horizon(self) -> StratigraphicObject:\n return self.hor", "title": "" }, { "docid": "949760041ce33d91a71b000aeaecb82a", "score": "0.5208128", "text": "def getRotation(self):\n\t\treturn self._rotation", "title": "" }, { "docid": "0477b20487a0be85b0e41e165efba6d4", "score": "0.5206345", "text": "def angle(self, other):\n if isinstance(other, Line3D):\n return self.normal.angle(other.direction)\n elif isinstance(other, Plane3D):\n return self.normal.angle(other.normal)\n\n raise NotImplementedError(\n \"Angle between a Line3D and a %s is not defined\" % other\n )", "title": "" }, { "docid": "18175f225d581f2c60cb2c3281c79621", "score": "0.52045244", "text": "def rotated_intersections(self):\n slices = self.find_active_intersections()\n rotation_angle = slices[0]\n slices = slices - rotation_angle\n\n return rotation_angle, slices + (slices < 0)*2.*np.pi", "title": "" }, { "docid": "67e6be9727dc8fecbb51c2b9161fd7f1", "score": "0.5204403", "text": "def getAngle(self):\n angle = self.getAttribute('angle')\n return max(0.0, min(angle, 90.0))", "title": "" }, { "docid": "251c76d08235f363308b51f9c5ade9a7", "score": "0.52041566", "text": "def angleBetween(self, Qt_ScreenOrientation, Qt_ScreenOrientation_1): # real signature unknown; restored from __doc__\n return 0", "title": "" }, { "docid": "33598d80d8d4b1f15bb2dcd9f48a0c47", "score": "0.52019775", "text": "def get_angle(self):\n return self._header[b'ItalicAngle']", "title": "" }, { "docid": "855f210d4c0d713feb877e8fe9caa980", "score": "0.5196006", "text": "def convertir_direction_angle(self):\n \n return atan2( self.robot.direction[0], self.robot.direction[1] )", "title": "" }, { "docid": "d0d4df130c603bbfa49f285a775d25db", "score": "0.5189529", "text": "def angle(self) -> float:\n return math.degrees(math.atan2(self.y, self.x))", "title": "" }, { "docid": "2282585c960cca9c3e58cad6865821a7", "score": "0.5188738", "text": "def orientation(self) -> int:\n return self._prefs.get(PREF_ORIENTATION, 1)", "title": "" }, { "docid": "e5c90c3892512bd9700182e0682ca2c1", "score": "0.51867753", "text": "def angles(self):\n if np.count_nonzero(self.mol_a.geom.perp_ax) == 0:\n self.mol_a.calc_axes()\n if np.count_nonzero(self.mol_b.geom.perp_ax) == 0:\n self.mol_b.calc_axes()\n out_lis = [ao.vec_angle(self.mol_a.geom.prin_ax,self.mol_b.geom.prin_ax),\n ao.vec_angle(self.mol_a.geom.sec_ax,self.mol_b.geom.sec_ax),\n ao.vec_angle(self.mol_a.geom.perp_ax,self.mol_b.geom.perp_ax)]\n out_arr = np.array(out_lis)\n\n return out_arr", "title": "" }, { "docid": "865ea4583a62436499221854b7073762", "score": "0.5182891", "text": "def __str__(self):\n return _almathswig.Rotation___str__(self)", "title": "" }, { "docid": "1e6e2b375ba3d774f2a8cdb93a229020", "score": "0.517307", "text": "def transpose(self) -> 'TextDiagramDrawer':\n out = TextDiagramDrawer()\n out.entries = {(y, x): v for (x, y), v in self.entries.items()}\n out.vertical_lines = [_VerticalLine(*e)\n for e in self.horizontal_lines]\n out.horizontal_lines = [_HorizontalLine(*e)\n for e in self.vertical_lines]\n return out", "title": "" }, { "docid": "f4805008fa5f16e5072917c24009e48a", "score": "0.5163102", "text": "def getVerticalAlignment(self):\n return (getLayout()).getVerticalAlignment()", "title": "" }, { "docid": "9ae2f47882714711c2a7ff99afa63a01", "score": "0.5155457", "text": "def opposite(self):\n\t\treturn Traversal(self.segment, not self.orientation)", "title": "" }, { "docid": "1f479c6079fa88d9df98bfb8fd9a81dd", "score": "0.5149696", "text": "def r(self) -> So2:\n return self._rotation", "title": "" }, { "docid": "33ea2de217047fbba1daa6ae1067f460", "score": "0.5146656", "text": "def exterior_angle(self,k):\n return arccos(self.tangent_vector(k+1).inner_product(self.tangent_vector(k)))", "title": "" }, { "docid": "4a43da5dda9dc17659bb340f092cfc95", "score": "0.51417583", "text": "def getDrawOrder(self):\n\n raise NotImplementedError", "title": "" }, { "docid": "414413852193dc6d416bb3891e168dc4", "score": "0.5140761", "text": "def getVerticalAlignment(self):\n return verticalAlignment", "title": "" } ]
05d347d75016454d195ce03d8236430f
Handles input file browsing and selection.
[ { "docid": "49171b4d5fc7d7816a1b2a758f052185", "score": "0.0", "text": "def get_input_file_name(self):\r\n\r\n\t\tentry = self.input_file_E\r\n\t\tentry.delete(0, \"end\")\r\n\t\t\r\n\t\troot.update()\r\n\t\tself.master_input = filedialog.askopenfilename(multiple = True)\r\n\t\troot.update()\r\n\t\t\r\n\t\tif self.master_input == \"\":\r\n\t\t\tself.master_input = (\"\",)\t\r\n\t\t\treturn \r\n\r\n\t\tif len(self.master_input) == 1:\r\n\t\t\tentry.insert(0, self.master_input[0])\r\n\t\t\t\r\n\t\t\ttemp_re = re.search((\".*\\.\"), os.path.basename(os.path.normpath(self.master_input[0])))\r\n\t\t\tif temp_re:\r\n\t\t\t\tself.input_root = temp_re.group(0)[:-1]\r\n\t\t\telse:\r\n\t\t\t\tself.input_root = os.path.basename(os.path.normpath(self.master_input[0]))\r\n\t\t\t\t\t\r\n\t\t\t# Update default names\r\n\t\t\treplace_entry(self.plot_file_E, (self.input_root + \"plot.html\"))\t\t\r\n\t\t\tget_novel_name(self.plot_file_E, (self.input_root + \"plot(NUM).html\"))\r\n\t\t\treplace_entry(self.stats_file_E, (self.input_root + \"Stats.csv\"))\r\n\t\t\tget_novel_name(self.stats_file_E, (self.input_root + \"Stats(NUM).csv\"))\r\n\r\n\t\telse:\r\n\t\t\tentry.insert(0, self.master_input)\r\n\t\t\treplace_entry(self.plot_file_E, \"------------------\")\r\n\t\t\treplace_entry(self.stats_file_E, \"------------------\")", "title": "" } ]
[ { "docid": "32ea7dad953fd98a57afaf65664098bb", "score": "0.68089163", "text": "def input_pick(self):\n file_name = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', self.vid_file)\n if file_name[0]:\n self.input_load(file_name[0])", "title": "" }, { "docid": "326edf721d5ec1cd4508a2ec8ddd6168", "score": "0.67879474", "text": "def OnSelectFile(self, evt):\n evid = evt.GetId()\n if evid == self.EVT_SELECT_TRAINING_FILE:\n target = self.fileinput_training\n elif evid == self.EVT_SELECT_PROCESS_FILE:\n target = self.fileinput_analysis\n else:\n return\n fd = wx.FileDialog(self, 'Open file', 'a', 'b',\n 'Excel file (*.xlsx,*.xls)|*.xlsx;*.xls|CSV file (*.csv,*.txt)|*.txt;*.csv', wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\n if self.__datadirectory is None:\n self.__datadirectory = os.path.curdir\n fd.SetDirectory(self.__datadirectory)\n if fd.ShowModal() == wx.ID_OK:#CANCEL:\n self.__datadirectory = os.path.dirname(fd.GetPath())\n target.SetValue(fd.GetPath())\n fd.Destroy()", "title": "" }, { "docid": "dfc9855433998af82d6c194569c2158f", "score": "0.671288", "text": "def _input_select(self):\n options = QFileDialog.Options()\n file_select, _ = QFileDialog.getOpenFileNames(\n self._view,\n 'Select Input File...',\n '',\n 'RNH Files (*.rnh);;RND Files (*.rnd);;CSV Files (*.csv);;Excel Files (*.xls*);;All Files (*)',\n options=options\n )\n if file_select:\n self._config[\"input\"] = file_select", "title": "" }, { "docid": "6ac19ad5f6676706633999912e840670", "score": "0.6612921", "text": "def onBrowse(self, event):\n \n fileBrowser = wx.FileDialog(self.GetParent(),\n style=wx.OPEN|wx.MULTIPLE|wx.FILE_MUST_EXIST)\n\n if fileBrowser.ShowModal() == wx.ID_OK:\n\n for filename in fileBrowser.GetPaths():\n # generate an ItemSelected event for each file\n zope.component.handle(\n p6.storage.events.ItemSelected(\n p6.storage.items.FileItem(filename)\n )\n )", "title": "" }, { "docid": "128867df7e6598346cedc4a90ee5cd4c", "score": "0.6427338", "text": "def browse_file(self):\n filename, _ = QFileDialog.getOpenFileName(self, 'Open file', '.', '*.txt')\n self.inp.setText(filename)\n if os.path.isfile(filename):\n self.write_log(self.logOutput, \"\\nText file: \" + filename)\n self.txt_file = filename\n else:\n self.write_log(self.logOutput, \"\\nError: Text file is not a valid file.\")", "title": "" }, { "docid": "1c82753b53086c7bc09d86dd9d1ea5df", "score": "0.64145774", "text": "def __open_file(self, event=None) -> None:\n\n filepath = askopenfilename(\n filetypes=[(\"Goethe Files\", \"*.goethe\"), (\"All Files\", \"*.*\")])\n\n if not filepath:\n # User did not select a file\n return\n\n with open(filepath, \"r\") as file:\n self.filepath = filepath\n self.filename = os.path.basename(self.filepath)\n self.editor.delete(1.0, 'end')\n self.editor.insert('end', file.read())\n self.interpreter.set_code(self.__get_text())\n self.__update_widgets()", "title": "" }, { "docid": "437515f5ee52612370c5b24e00358e2e", "score": "0.63791525", "text": "def browse(self):\n self.load_filename = filedialog.askopenfilename()\n self.pathlabel.config(text=self.load_filename)\n self.reset()\n self.read_raw_data(self.load_filename)", "title": "" }, { "docid": "c219c33a0eaa50082fc4a2ae2278979f", "score": "0.63583136", "text": "def input_button_clicked(self):\n\t\tfilename = filedialog.askopenfilename(title='Open an input file', initialdir='~/', filetypes=[('Comma-Separated File', '*.csv')])\n\t\tif filename:\n\t\t\tself.input_entry['state'] = \"normal\"\n\t\t\tself.input_entry.delete(0,tk.END)\n\t\t\tself.input_entry.insert(0,filename)\n\t\t\tself.check_input()", "title": "" }, { "docid": "1d7521fa57ddfc2dd57bd24fc5677db0", "score": "0.63055557", "text": "def selectfile(self):\n self.singleselectfile()\n value = self.component('filename').get()\n self.setfilename(value)\n if value:\n self.okbutton()", "title": "" }, { "docid": "77a9734522cf037b1a787fd1f67beea2", "score": "0.62773037", "text": "def _process_input(self):\n\n while not self.exit_event.is_set():\n try:\n keypress = self.screen.getkey()\n if keypress == 'q' or keypress == 'Q': # quit program\n self.exit_event.set(0)\n elif keypress == '1': # switch to mode 1\n self.exit_event.set(1)\n elif keypress == '2': # switch to mode 2\n self.exit_event.set(2)\n elif keypress == 'k': # move selected line up\n if self.selected_line != 0:\n self.selected_line -= 1\n self.change_event.set()\n elif keypress == 'j': # move selected line down\n if self.selected_line != len(self.all_plugins) - 1:\n self.selected_line += 1\n self.change_event.set()\n elif keypress == 'c': # toggle selected plugin\n with self.status_lock:\n self.plugin_statuses[self.selected_line] = not self.plugin_statuses[self.selected_line]\n self.change_event.set()\n elif keypress == 's': # send analysis request for currently selected plugins\n self._get_startup()\n elif keypress == 'i':\n stream = self._get_stream_input(keypress)\n elif keypress == 'p':\n while True:\n stream_input = self._get_stream_input(keypress)\n if stream_input.lower() == 'cancel':\n break\n elif os.path.isfile(stream_input):\n self.vimrc_path = stream_input\n break\n logging.debug(keypress)\n except curses.error:\n pass", "title": "" }, { "docid": "12df600c97a6e0a7ca94d4c201f4f07b", "score": "0.617765", "text": "def unify_tk_controller(self):\n\n self.browse_file_directory()\n self.open_and_read_in_csv_file()\n self.unify_pathway_decider()\n # clearing the parameters so the button can be used again with a new file\n self.filename = ''\n self.csv_file_in_list_format = []", "title": "" }, { "docid": "1d70e73d34b6ed6583ee0f22a31c1153", "score": "0.6151189", "text": "def fileSelected_callback(self):\n print('=== myTreeWidget.fileSelected_callback()')\n modifiers = QtWidgets.QApplication.keyboardModifiers()\n isShift = modifiers == QtCore.Qt.ShiftModifier\n isControl = modifiers == QtCore.Qt.ControlModifier\n\n print(' isControl:', isControl)\n\n theItems = self.selectedItems()\n if len(theItems) > 0:\n theItem = theItems[0]\n #selectedRow = self.fileList.currentRow() # self.fileList is a QTreeWidget\n filename = theItem.text(self.myColumns['File'])\n #print(' fileSelected_callback()', filename)\n if isControl:\n self.myCanvasWidget.getGraphicsView().zoomSelectedItem(filename)\n # visually select image in canvas with yellow square\n self.myCanvasWidget.getGraphicsView().setSelectedItem(filename)\n else:\n print(' no selected items')", "title": "" }, { "docid": "7f3bc675c44006e6c4690833c3e7c505", "score": "0.6136701", "text": "def fileButtonClicked(self):\n\t\tfilename = QtGui.QFileDialog.getOpenFileName(self, SELECT_FILE_MESSAGE)\n\t\tif filename:\n\t\t\t# User selected a file\n\t\t\tself.current_file.setText(os.path.basename(str(filename)))\n\t\t\terr = self.source_window.loadSource(str(filename))\n\t\t\tself.assembly_widget.clear()\n\t\t\tself.stack_and_frame_widget.clear()\n\t\t\tself.gdb_process.gdbReset()\n\n\t\t\tif (err > 0):\n\t\t\t\t# User selected a valid C source file\n\t\t\t\tself.compileSource(str(filename))", "title": "" }, { "docid": "b770c6866c5d6e146e75c313b7dd86c6", "score": "0.6100576", "text": "def open_file(self):\n self.file = askopenfile(mode='r+', filetypes=[('FASTA Files', '*.fasta'), ('FASTA files', '*.fa')])\n # check user has selected a file\n try:\n self.content = SeqIO.read(self.file, 'fasta')\n # reset text_out\n self.text_out.set('')\n # tell user no file was opened if they press cancel\n except AttributeError:\n self.text_out.set('You pressed cancel, no new file was opened')\n # note if a file has already been opened this will stay open", "title": "" }, { "docid": "73a903874df06ccc4cd0ebcdff82854d", "score": "0.6097784", "text": "def askopenfilename(self, *args, **kw):\n\n self.tk.tk_setPalette('#888888')\n save_update_step = self.update_step\n self.update_step = 0\n filename = tkFileDialog.askopenfilename(parent=self.tk, **file_opt)\n if filename:\n self.readwtf(filename)\n self.wtf = filename\n self.redraw_letters()\n self.update_step = save_update_step\n self.tk.tk_setPalette('#000000')", "title": "" }, { "docid": "1069a751d64ae183721ee6aa889b2e3b", "score": "0.60699177", "text": "def on_pushButton_DatabaseFileInputSelect_clicked(self):\n ArxmlInputFile, supported_fileKinds = QFileDialog.getOpenFileName(\n self, u'open the file', u'./')\n self.lineEdit_DatabaseFileInput.setText(ArxmlInputFile)\n filePathForWinOS = ArxmlInputFile.replace('/', '\\\\')\n if str(ArxmlInputFile)[-6:] == '.arxml':\n self.ArxmlInputFilePath = filePathForWinOS\n pass\n else:\n l_MessageBox = QMessageBox.information(\n self, u'Tips', 'The file opened is not an arxml file,pls open an arxml file.')\n pass", "title": "" }, { "docid": "e5eded315e3e275ef3516f86bec62a9d", "score": "0.6020328", "text": "def onOpenFile(self, event):\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory,\n defaultFile=\"\",\n wildcard=\"Images, PDF (*.pdf,*.jpg,*.png)|*.pdf;*.jpg;*.png|\" \\\n \"All files (*.*)|*.*\",\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print(\"You chose the following file(s):\")\n self.parent.imagesPaths = []\n for path in paths:\n print(\"choose path: \"+path)\n self.pathsEditText.AppendText(path)\n self.parent.imagesPaths.append(path)\n dlg.Destroy()", "title": "" }, { "docid": "ed67d121ce34e64314ff8f11bc54ee77", "score": "0.5993419", "text": "def _upload_action(event=None):\n global filename\n filename = filedialog.askopenfilename()\n print('Selected:', filename)", "title": "" }, { "docid": "905e38e93dc44ad6ecd4147cdb58d426", "score": "0.5941498", "text": "def __openFileInEditor(self):\n itmList = self.getSelectedItems()\n for itm in itmList[:]:\n self.sourceFile.emit(itm.fileName())", "title": "" }, { "docid": "9806b6a347dadd39a12511e792f2b834", "score": "0.5905072", "text": "def on_open(self):\n text = _(\"Open file dialog.\")\n openFileDialog = wx.FileDialog(\n self,\n _(\"Open\"),\n wildcard=\"Circuit Definition files (*.txt;*.lcdf)|*.txt;*.lcdf\",\n style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)\n res = openFileDialog.ShowModal()\n if res == wx.ID_OK: # user selected a file\n self.current_file_path = openFileDialog.GetPath()\n self.clear_log()\n self.log_message(_(\"File opened: {}\")\n .format(self.current_file_path),\n no_new_line=True)\n self.load_file(self.current_file_path)\n self.canvas.render(_(\"Opened file\"))", "title": "" }, { "docid": "2520d775ad0387aa77a265ebb441270b", "score": "0.5901012", "text": "def _file_dialog_handler(self):\n dirname = QtWidgets.QFileDialog.getExistingDirectory(\n self, self.label, self.dirpath)\n if dirname:\n self.update_path(dirname)", "title": "" }, { "docid": "5ba4284f15a98d2bebba0579276bf76a", "score": "0.58996785", "text": "async def handle_file_select_action(*, event_data, action_data, logger, app):\n await _confirm_selection(\n event_data=event_data, action_data=action_data, logger=logger, app=app\n )\n\n selected_template = action_data[\"selected_option\"][\"value\"]\n repo = app[\"templatebot/repo\"].get_repo(\n gitref=app[\"root\"][\"templatebot/repoRef\"]\n )\n template = repo[selected_template]\n if len(template.config[\"dialog_fields\"]) == 0:\n await _respond_with_nonconfigurable_content(\n template=template, event_data=event_data, logger=logger, app=app\n )\n else:\n await open_template_dialog(\n template=template,\n callback_id_root=\"templatebot_file_dialog\",\n event_data=event_data,\n logger=logger,\n app=app,\n )", "title": "" }, { "docid": "5711180e7df415fb3a349649cdaabdbe", "score": "0.5883519", "text": "def _acceptSelection(self, *args, **kwargs):\n if len(args) == 0:\n return\n line = args[0]\n # get path and open it in a window\n path = line.split()[-1]\n # print(path)\n docsetManager.openSearchResult(self._ftype, path)", "title": "" }, { "docid": "eeec14d4745237123aa63e06568ad057", "score": "0.58706737", "text": "def on_select(self, e):\n style_dirdialog = wx.DD_DEFAULT_STYLE\n dlg = wx.DirDialog(self.pnl, \"Choose the output folder...\", self.src_dir, style=style_dirdialog)\n\n if dlg.ShowModal() == wx.ID_OK:\n self.out_dir = dlg.GetPath()\n\n file_prefix = self.txt_file_prefix.GetValue()\n file_suffix = self.txt_file_suffix.GetValue()\n\n if file_prefix and file_suffix:\n self.txt_outputprev.SetValue(f'{os.path.join(self.out_dir, file_prefix)}{file_suffix}.pdf [...]')\n elif file_prefix:\n self.txt_outputprev.SetValue(f'{os.path.join(self.out_dir, file_prefix)}001.pdf [...]')\n elif file_suffix:\n self.txt_outputprev.SetValue(f'{os.path.join(self.out_dir, self.src_file[:self.src_file.rindex(\".\")])}'\n f'{file_suffix}.pdf [...]')\n else:\n self.txt_outputprev.SetValue(f'{os.path.join(self.out_dir, self.src_file[:self.src_file.rindex(\".\")])}'\n '001.pdf')\n\n # update the statusbar based on the current conditions\n self._statusbar()\n # f.close()\n dlg.Destroy()", "title": "" }, { "docid": "c8bccba6909a493293f571b5ab625411", "score": "0.5861975", "text": "def on_open(self, e):\n dlg = wx.FileDialog(self.pnl, \"Choose the PDF to split and save to individual pages\", self.src_dir, \"\",\n \"PDF Files (*.pdf)|*.pdf\", wx.FD_OPEN)\n if dlg.ShowModal() == wx.ID_OK: # If the OK button is clicked on the 'Select PDF' dialog\n self.src_file = dlg.GetFilename()\n self.src_dir = dlg.GetDirectory()\n\n self.txt_filename_preview.SetValue(os.path.join(self.src_dir, self.src_file))\n # First, check if the prefix is set or not. If it's not, set it to the prefix of the selected file\n if self.txt_file_prefix.GetValue() == '':\n self.txt_file_prefix.SetValue(self.src_file[:self.src_file.rfind('.')])\n\n # Now, check if the suffix txt box is filled in or not, if not, set it to the standard suffix\n if self.txt_file_suffix.GetValue() == '':\n self.txt_file_suffix.SetValue('001')\n\n # Next, check if the output directory has been selected\n if self.out_dir != '':\n oval = os.path.join(self.out_dir, self.txt_file_prefix.GetValue())\n suffix = f\"{self.txt_file_suffix.GetValue()} [...]\"\n if self.txt_file_suffix.GetValue() != '':\n suffix = self.txt_file_suffix.GetValue()\n self.txt_outputprev.SetValue(f\"{oval}{suffix}.pdf [...]\")\n\n # Now, check if the output directory has not been selected\n elif self.txt_file_prefix.GetValue() and self.txt_file_suffix.GetValue() == '':\n # If not, use the base directory of the source file\n oval = f'{os.path.join(self.src_dir, self.txt_file_prefix.GetValue())}'\n self.txt_outputprev.SetValue(f\"{oval}001.pdf [...]\")\n self.out_dir = self.src_dir\n\n self._statusbar()\n\n dlg.Destroy()", "title": "" }, { "docid": "c1dd43a1603c52d3095a2971aa2269f6", "score": "0.5855089", "text": "def input_file(self):\n raise NotImplementedError()", "title": "" }, { "docid": "a30b28e216886d86b3a0cef69c21f64c", "score": "0.5834874", "text": "def select_file_clicked(ui):\n # open file ui\n file_ui = QtGui.QFileDialog()\n file_name = file_ui.getSaveFileName(None, \"Choose output file\", str(expanduser(\"~\")), SELECT_FILTER)\n ui.select_file.setText(file_name)", "title": "" }, { "docid": "a238588968bb9f994722b4f74c0461ae", "score": "0.580808", "text": "def OnFile(self, event):\r\n if event.Id == wx.ID_OPEN:\r\n self.OpenFile()\r\n elif event.Id == wx.ID_SAVE:\r\n self.SaveFile()\r\n else:\r\n super(FileEditor, self).OnFile(event)", "title": "" }, { "docid": "eef51faa54e56e08ad247e6623723298", "score": "0.57987547", "text": "def input_check(self, choice):\r\n\r\n if choice == 'U':\r\n print('\\n==========================================')\r\n print('You have chosen to upload a file!')\r\n print('==========================================')\r\n fileName = input('Please enter a filename: ')\r\n category = input('Please enter a category: ')\r\n keywords = input('Please enter keywords: ')\r\n\r\n response = self.user.upload(fileName, category, keywords)\r\n\r\n # results of upload\r\n if response == 1:\r\n print('Successful upload!\\n')\r\n\r\n else: print('A file with that name already exists.\\n')\r\n\r\n elif choice == 'R':\r\n print('\\n==========================================')\r\n print('You have chosen to retrieve a file!')\r\n print('==========================================')\r\n file = str(input('Please enter a filename: '))\r\n\r\n response = self.user.download(file)\r\n if response == 1:\r\n print(\"Success!! file retrieved...\\n\")\r\n else:\r\n print (\"Error!! cannot find file.. \")\r\n\r\n\r\n elif choice == 'S':\r\n print('\\n==========================================')\r\n print('You have chosen to search for a file!')\r\n print('==========================================')\r\n file = str(input('Please enter a keyword to search for: '))\r\n if self.user.search(file):\r\n print(\"\")\r\n\r\n\r\n elif choice == 'D':\r\n print('\\n==========================================')\r\n print('You have chosen to delete an existing file!')\r\n print('==========================================')\r\n file = str(input('Please enter a filename: '))\r\n\r\n self.user.delete(file)\r\n else:\r\n os._exit(0)", "title": "" }, { "docid": "41f267f1841420c1d439e92adae05613", "score": "0.5796372", "text": "def browseFile(self):\n file_name = getAFile()\n if file_name != \"\":\n self.onNewFileSelected(str(file_name))\n self.centralWidget.setMinimumSize(700, 500)", "title": "" }, { "docid": "8c11002e6b2e7416899e340e887ac87f", "score": "0.5775657", "text": "def push_Driver_Index_file(self):\n filepath = QFileDialog.getOpenFileName(self)\n self.plainTextEdit_3.setPlainText(filepath[0])\n self.filepath_Driver = filepath[0]", "title": "" }, { "docid": "389a05fcb3bf97b04a6ac375c9aa3e38", "score": "0.5752986", "text": "def handle_input(self, input):\n pass", "title": "" }, { "docid": "d4e4cc1bc5297fe75dee7cf09d1f6619", "score": "0.57359797", "text": "def fileBrowserDialog(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "6401a26b5cae191dff94f0ad56319c23", "score": "0.5721455", "text": "def OnOpen(self, event):\n\t\tdlg = wx.FileDialog(self, \"\", self.aztexEditor.dirname, \"\", \"*.*\", wx.OPEN)\n\t\tif dlg.ShowModal() == wx.ID_OK: # if user clicks OK (if user wants to open a document)\n\t\t\tself.aztexEditor.filename = dlg.GetFilename()\n\t\t\tself.aztexEditor.dirname = dlg.GetDirectory()\n\t\t\tf = open(os.path.join(self.aztexEditor.dirname, self.aztexEditor.filename), 'r')\n\t\t\tself.aztexEditor.SetValue(f.read())\n\t\t\tf.close()\n\t\t\tself.SetTitle(self.aztexEditor.filename)\n\t\tdlg.Destroy()", "title": "" }, { "docid": "af8edc6ab1e42e8c1b13c549357083d1", "score": "0.56955665", "text": "def pick_file(self):\n fileName = QtGui.QFileDialog.getOpenFileName(self,\n \"Open Session\", __file__, \n \"Session Files ({x})\".format( \\\n x = self.file_extension))\n \n self.pathLE.setText(fileName[0])\n self.textChanged.emit()", "title": "" }, { "docid": "6b245b14402cd1cdbfb0072cfb80ce79", "score": "0.5685589", "text": "def file_dialog(self):\n\n self.filename = filedialog.askopenfilename(\n initialdir=\"./\", title=\"Select A File\",\n )\n self.label = ttk.Label(self.labelFrame, text=\"\")\n self.label.grid(column=1, row=2)\n self.label.configure(text=self.filename)\n if self.filename:\n self.get_wave_info(self.filename)", "title": "" }, { "docid": "33169b5160e13596b2c90cfb1e7f60df", "score": "0.566944", "text": "def prepare_input(self, _input, _file):\n return NotImplementedError", "title": "" }, { "docid": "f25569617da9589321d009c6013739f0", "score": "0.5661297", "text": "def browse_file():\n currdir = os.getcwd()\n Tk().withdraw()\n filepath = tkFileDialog.askopenfilename(parent = root, initialdir=currdir, title='Please select a file')\n if len(filepath) > 0:\n print \"You selected file : %s\" % filepath\n else:\n return browse_file()\n return filepath", "title": "" }, { "docid": "2097e7034e17e328cb54eeed28a3c99d", "score": "0.56575316", "text": "def open_files(self) :\n\n self.input_file = self._open_root( self.input_file_name )\n if self.input_file is None :\n return self.error_before_loop()\n\n self.input_tree = self._load_tree( self.input_tree_name )\n if self.input_tree is None :\n return self.error_before_loop()\n\n self.output_file = self._create_root( self.output_file_name )\n if self.output_file is None :\n return self.error_before_loop()\n\n return self.ok_before_loop()", "title": "" }, { "docid": "e0657a78066d8a2bc2e5a6f8bdf76acc", "score": "0.5649424", "text": "def singleselectfile(self):\n cs = self.component('filenamebox').curselection()\n if cs != ():\n value = self.component('filenamebox').get(cs)\n self.setfilename(value)", "title": "" }, { "docid": "30f5151d5d9091bead031835bea19500", "score": "0.56484985", "text": "def openFile(self):\r\n\t\t\r\n\t\tfList = [(\"Python Files\", \"*.py\"), (\"Text Files\", \"*.txt\")]\r\n\t\tfileName = tkinter.filedialog.askopenfilename(parent = self, filetypes = fList)\r\n\t\t\r\n\t\tif fileName != \"\":\r\n\t\t\tfile = open(fileName, 'r')\r\n\t\t\ttext = file.read()\r\n\t\t\tfile.close()\r\n\t\t\tself.outputArea.setText(text)\r\n\t\t\tself.setTitle(fileName)", "title": "" }, { "docid": "306c52717b5f72ba80aa3e0cbaa36620", "score": "0.5645772", "text": "def handle_click_open():\n filepath = askopenfilename(\n filetypes=[(\"Python Files\", \"*.py\"), (\"All Files\", \"*.*\")]\n )\n if not filepath:\n return\n textBox.delete(\"1.0\", tk.END)\n with open(filepath, \"r\") as input_file:\n text = input_file.read()\n textBox.insert(tk.END, text)\n window.title(f\"Python IDE - {filepath}\")", "title": "" }, { "docid": "5fc950c94972ad4b1d1998ece5ae5be8", "score": "0.564423", "text": "def browse_file_directory(self):\n\n self.filename = filedialog.askopenfilename(initialdir=self.files_to_unify_directory)", "title": "" }, { "docid": "46e39275a1a06853439d0db512944448", "score": "0.5626472", "text": "def _OnOpenFile( self, ev ):\n if ev is not None:\n ev.Skip()\n\n if self.state.dataModelMgr is not None:\n self._UpdateConfig()\n\n dialog = wx.FileDialog(\n\tself, 'Open File', '', '',\n 'HDF5 files (*.h5)|*.h5|VERAView session files (*.vview)|*.vview',\n\twx.FD_OPEN | wx.FD_FILE_MUST_EXIST | wx.FD_CHANGE_DIR\n )\n\n path = None\n if dialog.ShowModal() != wx.ID_CANCEL:\n path = dialog.GetPath()\n dialog.Destroy()\n\n if path:\n path, session = self._ResolveFile( path )\n self.OpenFile( path, session )", "title": "" }, { "docid": "8eaa6cea11398587f200850b0a09fa30", "score": "0.560331", "text": "def select_signal_handler(self,key):\n if self.file_walker.chdir_into(self.file_walker.join_to_cur(key.get_label())):\n selectable_list_box = self.frame.body.base_widget[0]\n selectable_list_box.body.clear()\n if(self.check_sel_cache()):\n selectable_list_box.body.extend(self.select_mode_walker_cache[self.file_walker.curdir])\n else:\n tracked_item = self.selectable_mode(self.file_walker.get_dir_list(hide_dir_changers=True))\n self.select_mode_walker_cache.update({self.file_walker.curdir:tracked_item })\n selectable_list_box.body.extend(tracked_item)", "title": "" }, { "docid": "4be470f2979fd1dcb08c53373da72a7d", "score": "0.55991966", "text": "def on_importSelector_activated(self, p0):\n self.importFiles(p0)", "title": "" }, { "docid": "14034a7596c5621f14d30f973e435517", "score": "0.559585", "text": "def event_db_file_search(self):\n db_dialog = QtWidgets.QFileDialog(self)\n if have_Qt5:\n file_name, _ = db_dialog.getOpenFileName()\n else:\n file_name = db_dialog.getOpenFileName()\n save = self.gui_row\n self.gui_row = 0\n if file_name != '':\n self.top(file_name)\n self.gui_row = save", "title": "" }, { "docid": "f0da0287e20edc06e1a236ed54beb558", "score": "0.55887145", "text": "def on_hex_file_selection_changed(self, *args):\n self.erase_list.unselect_all()\n\n hex_filename = self.gmap.hex_file_cbutton.get_filename()\n try:\n hex_file = HexFile(hex_filename)\n except (OSError, IOError), e:\n self.goserror(\"Unable to open hex file %(filename)s: %(strerror)s\", e)\n return\n\n micro = self.conf[\"type\"]\n block_ranges = micro_info[micro][\"block_range\"]\n try:\n blocks = hex_file.used_blocks(block_ranges)\n except HexError, e:\n self.ghexerror(\"Error processing hex file %(filename)s at \"\n \"lineno %(lineno)d: %(msg)s\", e)\n return\n self.erase_list.select_blocks(blocks)\n self.used_blocks = blocks", "title": "" }, { "docid": "7e34e3a482a22fc681a63cadbd23abbf", "score": "0.558441", "text": "def on_file_open(self, event):\n dialog = wx.FileDialog(self, message=\"Select the graph\",\n wildcard=\"DOT files (*.dot)|*.dot|\" \\\n \"All files (*.*)|*.*\",\n style=wx.OPEN | wx.CHANGE_DIR)\n\n if dialog.ShowModal() == wx.ID_OK:\n path = dialog.GetPath()\n self.open_graph = dot.read_graph(path) \n self.open_graph_path = path\n self.SetTitle(path)\n for i in range(self.toolbar.GetToolsCount()):\n self.toolbar.EnableTool(i, True)\n else:\n exit()\n\n dialog.Destroy()", "title": "" }, { "docid": "fb079809bce195e6da5cbd5007877d15", "score": "0.5579713", "text": "def open_file(self):\n\t\t\n\t\tself.new_file_name = QtGui.QFileDialog.getOpenFileName(self, 'Open file', self.current_file)\n\n\t\tf = open(self.new_file_name, 'r')\n\n\t\twith f: \n\t\t\tself.selected_file = f.read()\n\t\t\tself.current_file = self.new_file_name\n\t\t\tself.file_name.setText(self.current_file)\n\t\t\tself.view_file.setText(self.selected_file)", "title": "" }, { "docid": "757c69079209718970416a4497fe0901", "score": "0.5568364", "text": "def fileDialog(*args, **kwargs):\n\n pass", "title": "" }, { "docid": "ca45ae73c8fafdca7359592fdbe196fb", "score": "0.5567603", "text": "def OnFEButton(self, evt):\n \n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=os.getcwd(), \n defaultFile=\"\",\n wildcard=\"NASTRAN file (*.nas)|*.nas|\" \"All files (*.*)|*.*\",\n style=wx.OPEN | wx.CHANGE_DIR\n )\n \n if dlg.ShowModal() == wx.ID_OK:\n fileName = dlg.GetPath()\n \n self.fem.ReadFile(fileName)\n \n self.Syncronize()\n \n dlg.Destroy()", "title": "" }, { "docid": "f72c1a62aca40140465cd937e90ddc68", "score": "0.5567067", "text": "def OnSelectFile1(self, event):\n\n # The user can select both file1 and file2 from the file dialog box\n # by using the shift or control key to pick two files. The order in\n # which they are selected determines which is file1 and file2.\n dlg = wx.FileDialog(self,\n message=\"Select 1st Data File\",\n defaultDir=os.getcwd(),\n defaultFile=\"\",\n wildcard=(REFL_FILES+\"|\"+DATA_FILES+\"|\"+\n TEXT_FILES+\"|\"+ALL_FILES),\n style=wx.FD_OPEN|wx.FD_MULTIPLE|wx.FD_CHANGE_DIR)\n # Wait for user to close the dialog.\n sts = dlg.ShowModal()\n if sts == wx.ID_OK:\n paths = dlg.GetPaths()\n dlg.Destroy()\n if sts == wx.ID_CANCEL:\n return # Do nothing\n\n # The user could have selected 1 to n files.\n num_files = len(paths)\n if num_files > 2:\n popup_error_message(\n \"Too Many Files Selected\",\n \"You can select up to two data files, please try again.\")\n return\n elif num_files == 2:\n datafile_1 = paths[1] # files are returned in reverse order!\n datafile_2 = paths[0] # files are returned in reverse order!\n self.TCfile1.SetBackgroundColour(\"WHITE\")\n self.TCfile1.SetValue(datafile_1)\n self.TCfile1.SetInsertionPointEnd()\n self.TCfile2.SetBackgroundColour(\"WHITE\")\n self.TCfile2.SetValue(datafile_2)\n self.TCfile2.SetInsertionPointEnd()\n elif num_files == 1:\n datafile_1 = paths[0]\n self.TCfile1.SetBackgroundColour(\"WHITE\")\n self.TCfile1.SetValue(datafile_1)\n self.TCfile1.SetInsertionPointEnd()\n\n # Plot one or both files.\n self.plot_dataset(self.TCfile1.GetValue(), self.TCfile2.GetValue())", "title": "" }, { "docid": "c56ce40b79d962f3e8a7eb31f24530be", "score": "0.55613637", "text": "def main():\n file = input(\"Enter File Name: \")\n read_file(file)", "title": "" }, { "docid": "fa5a009218aa731b5e68e859e2e9f064", "score": "0.55529666", "text": "def chooseFile(self):\n\n global gcode_user_file_path\n options = {}\n\n options['title'] = 'Loading G Code for piece...'\n options['initialdir'] = 'C:\\\\'\n gcode_user_file_path = filedialog.askopenfilename(**options)\n\n self.master.destroy()", "title": "" }, { "docid": "b6bbbc62d1a20490224fd98058904fbf", "score": "0.55513495", "text": "def __init__(self):\n print \"welcome to file manager...\"\n print \"press to copy data file[1]: \"\n print \"press to delet the file[2]: \"\n print \"perss to change path[3]\"\n print \"press to show file data[4]: \"\n print \"press to longth of data[5]: \"\n print \"press quit to exit\"\n self.chooses()", "title": "" }, { "docid": "4ed4e869bc781fe9d5f3f8ce78b6f519", "score": "0.55430955", "text": "def open_file(self):\n options = {}\n options['defaultextension'] = '.txt'\n options['filetypes'] = [('all files', '.*'), ('text files', '.txt')]\n options['initialdir'] = '//seavvfile1/Market_SAIntMktg/_Offers/5. In Work'\n options['parent'] = self.root\n options['title'] = \"Open a file\"\n \n with tkFileDialog.askopenfile(mode='r', **options) as f_handle:\n print options\n #for line in f_handle:\n #print line", "title": "" }, { "docid": "214870844d831ec485bc98d421500215", "score": "0.5540342", "text": "def load_filechooser(self, *args):\n if len(self.objects) != 0:\n fileChooserClass(self)", "title": "" }, { "docid": "9322afd96bb08cafe728b5174bbb637b", "score": "0.5522619", "text": "def openFileDialog(self,event=None):\r\n global FILE_EXTENSION\r\n options = {}\r\n options['parent'] = self\r\n try:\r\n global CONFIG_NAME\r\n theFile = open(CONFIG_NAME,'rb') #open connection\r\n options['initialdir'] = pickle.load(theFile)[0]\r\n if options['initialdir'] == \"/examples\":\r\n options['initialdir'] = \"examples\"\r\n theFile.close()\r\n except:\r\n options['initialdir'] = \"examples\"\r\n\r\n\r\n\r\n options['defaultextension'] = FILE_EXTENSION\r\n options['filetypes'] = [(\"graph files\",FILE_EXTENSION)]\r\n self.filePath = tkFileDialog.askopenfilename(**options)\r\n self.openFile(self.filePath)", "title": "" }, { "docid": "373b5753924283af2dad996e9de9dadc", "score": "0.5519668", "text": "def openFile(self):\n\n dialog = gui.QFileDialog(self);\n dialog.setOptions(gui.QFileDialog.DontUseNativeDialog)\n dialog.setWindowTitle(\"Open data file\")\n\n if dialog.exec_():\n if dialog.selectedFiles and dialog.selectedFiles():\n\n # Get file information\n self.filename = str(dialog.selectedFiles()[0])\n \n # Update UI information\n self.updateInfo()\n self.sample = 0\n\n # We have all required information, get data start plotting\n self.updateUI()", "title": "" }, { "docid": "d6a21ad83eaae33bdd444d6356444a7d", "score": "0.5515773", "text": "def on_convert(self):\n items = self.treeview.get_children()\n if items:\n if self.option_var.get() == 1:\n # merge must include more than 1 file else return error\n if len(items) > 1:\n self.merge_files(items)\n else:\n message = \"Please select 2 or more files to perform a merge.\"\n messagebox.showerror(title='Missing files', message=message)\n else:\n # only 1 file shold be selected when splitting the file\n if len(items) > 1:\n message = \"Select 1 file only when splitting to multiple files.\"\n messagebox.showerror(title='Too many files', message=message)\n else:\n self.split_files(items)\n else:\n message = \"Please select file(s) to continue.\"\n messagebox.showerror(title='Missing files', message=message)", "title": "" }, { "docid": "947adf18498e1e29fef1321ebc083a90", "score": "0.55074733", "text": "def browseForInput(self):\n settings = QtCore.QSettings()\n startingDir = str(settings.value(\"OsTranslatorII/lastInputFolder\", os.path.expanduser(\"~\")))\n d = str( QFileDialog.getExistingDirectory(None, 'Browse For Input', startingDir) )\n if d != os.sep and d.lower() != 'c:\\\\' and d != '':\n settings.setValue(\"OsTranslatorII/lastInputFolder\", d)\n self.inputLineEdit.setText(d)", "title": "" }, { "docid": "a4487941f990d8ca3ba8595599350286", "score": "0.5502798", "text": "def clik_on_browse_query(self):\n try:\n self.qry_path = filedialog.askopenfilename()\n self.q_box_from_path.delete('1.0', END)\n self.q_box_from_path.insert(END, self.qry_path)\n except:\n pass", "title": "" }, { "docid": "09488cca1f696b2012fbe6e5fa482be4", "score": "0.54970175", "text": "def handlerSelectListOfEconomicSector(self):\n file = unicode(QtGui.QFileDialog.getOpenFileName(\n self, 'Select List of Economic Sector', QtCore.QDir.homePath(), 'List of Economic Sector (*{0})'.format(self.main.appSettings['selectCsvfileExt'])))\n \n if file:\n self.lineEditListOfEconomicSector.setText(file)\n \n logging.getLogger(type(self).__name__).info('select file: %s', file)", "title": "" }, { "docid": "e0963c5706cfda83c75512fc4d87da26", "score": "0.5495374", "text": "async def handle_ui(search_state):\n last_search = ''\n # Event Loop to process \"events\"\n while True:\n event, values = window.read(timeout=1)\n if event in (sg.WIN_CLOSED, 'Exit'):\n sys.exit()\n elif event == '-INPUT-':\n if last_search != values['-INPUT-']:\n window['-RESULT-'].update([])\n last_search = values['-INPUT-']\n if search_state['searching']:\n search_state['interrupting'] = True\n elif event == '-RESULT-':\n file_clicked = values['-RESULT-'][0]\n if search_state['searching']:\n search_state['interrupting'] = True\n os.startfile(file_clicked)\n elif event == '__TIMEOUT__':\n pass\n else:\n print(event, values)\n await asyncio.sleep(0)", "title": "" }, { "docid": "23fb2dde6a9a0d32fbccda3a32f308a2", "score": "0.5489173", "text": "def open_callback(self):\n\n # Forget welcome screen widgets\n self.parent.open_label.pack_forget() \n self.parent.manual_label.pack_forget()\n self.parent.email_label.pack_forget()\n\n # If a file is already open, close it.\n if Session.file_opened:\n self.close_callback()\n \n file_path = filedialog.askopenfilename(**self.OPEN_OPT)\n if file_path: \n Session.file_path = file_path\n status = FileHandler.open_file(file_path)\n if status:\n Session.file_opened = True\n Session.file_saved = True\n EventHandler.event_generate(OPEN_EVENT)", "title": "" }, { "docid": "cbf5912abbc14706cd84f826a8ccc222", "score": "0.5484031", "text": "def open_file_dialog(self):\n \n try:\n selected_filename = filedialog.askopenfilename(initialdir = \".\", title = \"Select file\", filetypes = ((\"obj files\",\"*.obj\"), (\"mtl files\",\"*.mtl\"), (\"all files\",\"*.*\")))\n \n if os.path.exists(selected_filename):\n self.upload_file_list.append(selected_filename)\n print(\"File added to upload list:\", selected_filename)\n \n ## Also add the '.mtl' file if not already added\n if selected_filename.endswith('.obj'): \n selected_filename_mtl = selected_filename[:-4]+'.mtl'\n if selected_filename_mtl not in self.upload_file_list:\n if os.path.exists(selected_filename_mtl):\n self.upload_file_list.append(selected_filename_mtl)\n print(\"File added to upload list:\", selected_filename_mtl)\n else:\n print(\"No existing file selected.\")\n pass\n \n except Exception as emsg:\n print(\"EXCEPTION: \"+str(emsg))\n return None\n \n return", "title": "" }, { "docid": "2145ff301c240cf738ecffaf6ccb401a", "score": "0.5475936", "text": "def _fname_selected(self, fname):\n self.fname_selected.emit(fname.text())", "title": "" }, { "docid": "b0beae70840f936b6c1692a814595718", "score": "0.5463578", "text": "def _handle_selection_change(self, selection):", "title": "" }, { "docid": "f6e0607e565d906d639d69331ce65932", "score": "0.5457833", "text": "def __doOpen(self):\n\n if self.__modified == True:\n self.__askForSave()\n\n openname = askopenfilename(initialdir=\"*\",\n title=self.__dicts.getWordFromDict(self.__Config.get_Element(\"Language\"), \"open\"),\n filetypes=(\n (self.__dicts.getWordFromDict(self.__Config.get_Element(\"Language\"), \"fileBoot\"),\n \"*.boo\"),\n (self.__dicts.getWordFromDict(self.__Config.get_Element(\"Language\"), \"fileTxT\"),\n \"*.txt\"),\n (self.__dicts.getWordFromDict(self.__Config.get_Element(\"Language\"), \"fileAll\"),\n \"*.*\"),\n ))\n\n self.__openFile(openname,True)", "title": "" }, { "docid": "0091410e94c46bc712ebae905d7afb34", "score": "0.54505515", "text": "def invoke (self, context, event):\n context.window_manager.fileselect_add (self)\n return {'RUNNING_MODAL'}", "title": "" }, { "docid": "0091410e94c46bc712ebae905d7afb34", "score": "0.54505515", "text": "def invoke (self, context, event):\n context.window_manager.fileselect_add (self)\n return {'RUNNING_MODAL'}", "title": "" }, { "docid": "8310268c066a0f320864bce4e5b72242", "score": "0.54462266", "text": "def _open_file(self):\n file = QFileDialog.getOpenFileName(self, 'Open file', \".\")[0]\n if file:\n file_name = str(file).split('/')[-1] # Need to create version for Windows\n with open(file, 'rt') as text:\n if self.tab.currentWidget().text(): # If current Tab is not empty\n self.tab.addTab(Editor(), file_name)\n self.tab.setCurrentIndex(self.tab.currentIndex() + 1)\n self.tab.currentWidget().setText(text.read())\n else:\n self.tab.currentWidget().setText(text.read())\n self.tab.setTabText(self.tab.currentIndex(), file_name)\n try:\n lexer = lexers.get_lexer_by_ext(file_name)\n self.tab.currentWidget().setLexer(lexer())\n except:\n pass", "title": "" }, { "docid": "99d88108b505f9aa38ead9a6171430ff", "score": "0.54447657", "text": "def get_user_input(self) -> bool:\r\n tag_list = ['1','2','3']\r\n #currnently only one file\r\n layout = [\r\n [sg.Text('Bat name:')], \r\n [sg.Input(key='-BAT NAME-')],\r\n [sg.Text('Tag')],\r\n [sg.Listbox(tag_list, size=(15, len(tag_list)), key='-TAG-')],\r\n # [sg.Listbox(values=tag_list, size=(20, 12), key='-LIST-', enable_events=False)],\r\n [sg.Text('Select files or a folder to analyze')],\r\n [sg.Text('Files', size=(8, 1)) ,sg.Input(), sg.FilesBrowse(file_types=((\"txt Files\", \"*.txt\"),)) if platform.system() == 'Windows' else sg.FilesBrowse()],\r\n [sg.Text('OR Folder', size=(8, 1)), sg.Input(), sg.FolderBrowse()],\r\n [sg.OK(size=(7,1)), sg.Cancel(size=(7,1))]\r\n ]\r\n\r\n window = sg.Window('Feeder analysis', layout)\r\n event, self.values = window.Read()\r\n window.Close()\r\n return True if event == 'OK' else False", "title": "" }, { "docid": "f1542f0e267c4edf544fe7207aacc7d8", "score": "0.5443162", "text": "def on_open(self):\n filenames = filedialog.askopenfilenames(parent=self.treeview, filetypes = [(\"TIFF file\", \"*.tif *.tiff\")])\n self.update_idletasks() # without this update the gui becomes temporarily unresponsive\n if filenames:\n for name in filenames:\n short_name = pathlib.Path(name).stem\n self.treeview.insert('', 'end', text=short_name, values=(str(name),))", "title": "" }, { "docid": "da91b24d994d4d033b00be17ba5b264d", "score": "0.5441948", "text": "def filerSelect(self, *args):\n logger.debug(\"dirchooser.filerSelect called\")\n# self.importImageCallBack(self.guiFiler.filer\").get_current_folder())\n self.guiFiler.filer.close()", "title": "" }, { "docid": "3e230cc776ecbcab099b159492d16004", "score": "0.5437073", "text": "def OnOpenTxt(self, evt):\n fileDlg = wx.FileDialog(self, message='Choose Pore Radius file...',\n wildcard=DATWILDCARD, style=wx.FD_OPEN)\n if fileDlg.ShowModal() != wx.ID_OK:\n fileDlg.Destroy()\n return\n self.datapath = fileDlg.GetPath()\n fileDlg.Destroy()\n self.open_txt(evt)", "title": "" }, { "docid": "928e11e0ff7a8d7efac0d23a48c7b8d6", "score": "0.54310805", "text": "def push_CAR_Index_file(self):\n filepath = QFileDialog.getOpenFileName(self)\n self.plainTextEdit_2.setPlainText(filepath[0])\n self.filepath_Car = filepath[0]", "title": "" }, { "docid": "4e611d75846c34c52ce5fd80dd6150d2", "score": "0.5416835", "text": "def Openfile5(self, event):\n dlg = wx.FileDialog(\n self, message=\"Choose a Specie\",\n defaultFile=\"\",\n wildcard=wildcard1,\n style=wx.FD_OPEN\n )\n if dlg.ShowModal() == wx.ID_OK:\n tmp=\"\"\n #paths = dlg.GetPaths()\n paths = dlg.GetPaths()\n #print \"You chose the following file(s):\"\n for path in paths:\n tmp=tmp+path\n #set the value of TextCtrl[filename]\n \n #set the value to the TextCtrl[contents]\n \n self.aa = self.readtable(tmp)\n self.contents1.SetValue(str(self.aa[0]))\n self.contents2.SetValue(str(self.aa[1]))\n self.contents3.SetValue(str(self.aa[2]))\n# self.contents4.SetValue(str(a[3]))\n \n dlg.Destroy()\n return self.aa", "title": "" }, { "docid": "5fd5729a6dc2a79ea0f09688e2c82801", "score": "0.54084235", "text": "def onFileOpen(self, path=None):\n if path:\n self.fileMenu.currFile = path\n if self.fileMenu.currFile and os.path.exists(self.fileMenu.currFile):\n self.cmdFile = self.fileMenu.currFile\n self.onExit(quit=False)", "title": "" }, { "docid": "0178abd40f7651d2422c68c7e75bb773", "score": "0.5407632", "text": "def on_button_click(self):\n prev = self.filename.get()\n initialdir = os.path.dirname(prev) if prev else _INITIALDIR\n\n filename = filedialog.askopenfilename(\n parent=self, initialdir=initialdir, title='Choose a file')\n if filename:\n self.filename.set(filename)", "title": "" }, { "docid": "0577bb5de24d2bdd21caff7766c1d7e3", "score": "0.54021037", "text": "def openFile(self):\n fList = [(\"Text files\", \"*.txt\")]\n fileName = tkinter.filedialog.askopenfilename(parent = self, filetypes = fList)\n\n if fileName != \"\":\n file = open(fileName, 'r')\n text = file.read()\n file.close()\n self.outputArea.setText(text)\n self.setTitle(fileName)", "title": "" }, { "docid": "14731fb3610163e16972482b21c053a7", "score": "0.53979987", "text": "def __init__(self):\n self.infile = open(input())", "title": "" }, { "docid": "f27550350b8137809cfe267442203050", "score": "0.5395495", "text": "def file_open(self):\n\n # Stop any running calculation\n if self.calculation_thread is not None:\n if self.calculation_thread.isRunning():\n # Cancel the running calculation\n self.interrupt_calculation()\n\n filename, _chosen_extension = QFileDialog.getOpenFileName(\n parent=self,\n caption=\"Open File\",\n filter=\"MagnetiCalc INI File (*.ini)\",\n options=QFileDialog.DontUseNativeDialog\n )\n\n if filename != \"\":\n\n self.model.invalidate()\n\n self.config.close()\n self.config.set_filename(filename)\n self.config.load()\n\n self.sidebar_left.wire_widget.reinitialize()\n self.sidebar_left.sampling_volume_widget.reinitialize()\n self.sidebar_right.field_widget.reinitialize()\n self.sidebar_right.metric_widget.reinitialize()\n # Parameters_Widget doesn't need reinitialization as it does not access the configuration\n # Perspective_Widget doesn't need reinitialization as it does not access the configuration\n self.sidebar_right.display_widget.reinitialize()\n\n self.menu.reinitialize()\n self.statusbar.reinitialize()\n\n self.vispy_canvas.load_perspective()\n\n if self.config.get_bool(\"auto_calculation\"):\n self.recalculate()", "title": "" }, { "docid": "813177ce00fbc8abf7c010ac4676ab23", "score": "0.5393413", "text": "def get_file_items(self, window, sel_items):\n if len(sel_items) != 1 or sel_items[0].is_directory() or sel_items[0].get_uri_scheme() != 'file':\n return\n uri_raw = sel_items[0].get_uri()\n if len(uri_raw) < 7: return\n source_path = urllib.unquote(uri_raw[7:])\n filetype = subprocess.Popen(\"file -i %s\" % re.escape(source_path), shell=True, stdout=subprocess.PIPE).communicate()[0]\n\tprint filetype\n\n if \"application\" or \"text\" in filetype:\n item = Nautilus.MenuItem(name='NautilusPython::preferences-nautilus-optirun',\n label=_('Run via Optirun'),\n tip=_('Run the selected file on discrete video card'),\n icon='preferences-nautilus-optirun')\n item.connect('activate', self.run, source_path)\n return item,", "title": "" }, { "docid": "e7ca99011908043a81dee284849bb031", "score": "0.5393086", "text": "def run(self) -> None:\n user_input = \"\"\n if not self._logo_displayed:\n self.cls()\n print(base64.b64decode(self.LOGO).decode(\"ascii\"))\n self._logo_displayed = True\n self.display_menu()\n while True:\n user_input = input(\"\\nYour selection: \")\n try:\n user_input = int(user_input)\n if user_input <= 0 or user_input > len(self.options):\n self.display_error()\n else:\n if callable(self.options[user_input][1]):\n self.selected_option = user_input\n self.options[user_input][1]()\n if self.options[user_input][2]:\n return\n else:\n self.display_menu()\n elif self.options[user_input][1]:\n self.selected_option = user_input\n self.options[user_input][1].run()\n self.display_menu(clear_screen=True)\n else:\n return\n except ValueError:\n self.display_error()", "title": "" }, { "docid": "a8307a6c17981445509b374d903d469c", "score": "0.5392842", "text": "def load(self, selection):\n self.file_path = str(selection[0])\n self.popup.dismiss()\n self.load_action()", "title": "" }, { "docid": "bd21cb2b3b0431f19d28c8b5bb89ba2d", "score": "0.5392435", "text": "def Openfile4(self, event):\n dlg = wx.FileDialog(\n self, message=\"Choose a Specie\",\n defaultFile=\"\",\n wildcard=wildcard1,\n style=wx.FD_OPEN\n )\n if dlg.ShowModal() == wx.ID_OK:\n tmp=\"\"\n #paths = dlg.GetPaths()\n paths = dlg.GetPaths()\n #print \"You chose the following file(s):\"\n for path in paths:\n tmp=tmp+path\n \n #set the value to the TextCtrl[contents]\n self.a4 = self.readtable(tmp)\n self.contents4.SetValue(str(self.a4))\n \n dlg.Destroy()\n return self.a4", "title": "" }, { "docid": "1bf673ac509152b4339e9f8fc384d5cc", "score": "0.5390938", "text": "def openFileDialog(self):\r\n filename, filter = QFileDialog.getOpenFileName(dir=os.path.dirname(self.filename))\r\n if not filename == '':\r\n self.filename = filename", "title": "" }, { "docid": "73314dd050fdee0f0d1f59f65153aec0", "score": "0.5389249", "text": "def browse(self):\n qfd = QFileDialog()\n file_filter = 'VHDL file(*.vhd);;All files(*)'\n self.input_file_path = QFileDialog.getOpenFileName(qfd, 'Open source file', 'C:/', file_filter)[0]\n self.input_file_name = self.input_file_path.split(\"/\")[-1]\n self.project_path = '/'.join(self.input_file_path.split(\"/\")[:-1]) + '/'\n self.xdc_ports = {}\n if self.input_file_path != '':\n ports, module_name, libs = gen_and_parse.get_stuff(self.input_file_path)\n\n for key, value in ports.items():\n self.xdc_ports[key] = value\n try:\n num1, num2 = re.search(r'(\\d+).*(\\d+)', value[1]).groups()\n del self.xdc_ports[key]\n\n num1 = int(num1)\n num2 = int(num2)\n if num1 > num2:\n for i in range(num2, num1 + 1):\n self.xdc_ports[f'{key}[{i}]'] = ''\n else:\n for i in range(num1, num2 + 1):\n self.xdc_ports[f'{key}[{i}]'] = ''\n except:\n pass\n\n self.tableWidget.setRowCount(len(self.xdc_ports.keys()))\n global ddr_chosen\n for i, port in enumerate(self.xdc_ports, start=0):\n temp_w = QTableWidgetItem(port)\n self.tableWidget.setItem(i, 0, temp_w)\n self.set_items_in_table(i)", "title": "" }, { "docid": "5312a6cf2c9f24787b7fd89c9c225805", "score": "0.5380366", "text": "def execute(self, context):\n\n filename, extension = os.path.splitext(self.filepath)\n\n print('Selected file:', self.filepath)\n print('File name:', filename)\n print('File extension:', extension)\n rigify(self.filepath)\n\n return {'FINISHED'}", "title": "" }, { "docid": "f73f202f712477728d73a6d33a733c5d", "score": "0.5370303", "text": "def fileHandler(value):\r\n if value.lower() == \"exit\":\r\n print(\"Exiting the program..\")\r\n sys.exit()\r\n \r\n if(value == \"1\"): #if the user enters 1\r\n filename = \"C:/Users/shruti/source/repos/Hw2/StemAndLeaf1.txt\" #file path for SteamAndLeaf1.txt file\r\n with open(filename) as f:\r\n fileData = f.read().splitlines() #reads the file data\r\n \r\n\r\n if(value == \"2\"):\r\n filename = \"C:/Users/shruti/source/repos/Hw2/StemAndLeaf2.txt\"\r\n with open(filename) as f:\r\n fileData = f.read().splitlines()\r\n \r\n if(value == \"3\"):\r\n filename = \"C:/Users/shruti/source/repos/Hw2/StemAndLeaf3.txt\"\r\n with open(filename) as f:\r\n fileData = f.read().splitlines()\r\n \r\n\r\n return fileData", "title": "" }, { "docid": "1e6fa296d6154d939919d64038a5ff9d", "score": "0.53696764", "text": "def read_input(self):", "title": "" }, { "docid": "66823154aeefc417924f6f0a3147c1d0", "score": "0.53628767", "text": "def browse_function(first_frame):\n global raw_filenames\n root.filename = \\\n filedialog.askopenfilenames(initialdir=\"/\", title=\"Select Image\")\n first_file = root.filename[0]\n if first_file.lower().endswith('zip') is True:\n zf = zipfile.ZipFile(root.filename[0], 'r')\n raw_filenames = zf.namelist()\n num_files = len(raw_filenames)\n else:\n raw_filenames = root.filename\n num_files = len(raw_filenames)\n file_label = ttk.Label(first_frame,\n text=\"{} file(s) uploaded\".format(num_files))\n file_label.grid(column=0, row=3)", "title": "" }, { "docid": "2a5f38d80d9e8be7db2458c88e48c7a9", "score": "0.53509563", "text": "def outFileBrowse(self):\n \n outFilename = Utilities.saveFileDialog(self) \n if not outFilename:\n return\n self.outputFilename.setText(outFilename)", "title": "" }, { "docid": "a629bde03fa71f19539ba455ff463050", "score": "0.53497124", "text": "def _output_select(self):\n options = QFileDialog.Options()\n file_select, _ = QFileDialog.getSaveFileName(\n self._view,\n 'Save Output...',\n '',\n # 'RNH Files (*.rnh);;RND Files (*.rnd);;CSV Files (*.csv);;Excel Files (*.xls*);;All Files (*)',\n options=options\n )\n if file_select:\n self._config[\"output\"] = [file_select]", "title": "" }, { "docid": "64cc3e7e6e0287f55526e5300f02127a", "score": "0.5345172", "text": "def _openItem(self):\n itmList = self.getSelectedItems(\n [BrowserFileItem, BrowserClassItem,\n BrowserMethodItem, BrowserClassAttributeItem,\n BrowserImportItem])\n \n if not self._activating:\n self._activating = True\n for itm in itmList:\n if isinstance(itm, BrowserFileItem):\n if itm.isPython2File():\n self.sourceFile[str].emit(itm.fileName())\n elif itm.isPython3File():\n self.sourceFile[str].emit(itm.fileName())\n elif itm.isRubyFile():\n self.sourceFile[str, int, str].emit(\n itm.fileName(), -1, \"Ruby\")\n elif itm.isDFile():\n self.sourceFile[str, int, str].emit(\n itm.fileName(), -1, \"D\")\n elif itm.isDesignerFile():\n self.designerFile.emit(itm.fileName())\n elif itm.isLinguistFile():\n if itm.fileExt() == '.ts':\n self.linguistFile.emit(itm.fileName())\n else:\n self.trpreview.emit([itm.fileName()])\n elif itm.isProjectFile():\n self.projectFile.emit(itm.fileName())\n elif itm.isMultiProjectFile():\n self.multiProjectFile.emit(itm.fileName())\n elif itm.isIdlFile():\n self.sourceFile[str].emit(itm.fileName())\n elif itm.isProtobufFile():\n self.sourceFile[str].emit(itm.fileName())\n elif itm.isResourcesFile():\n self.sourceFile[str].emit(itm.fileName())\n elif itm.isSvgFile():\n self.svgFile.emit(itm.fileName())\n elif itm.isPixmapFile():\n self.pixmapFile.emit(itm.fileName())\n else:\n if Utilities.MimeTypes.isTextFile(itm.fileName()):\n self.sourceFile[str].emit(itm.fileName())\n else:\n QDesktopServices.openUrl(QUrl(itm.fileName()))\n elif isinstance(itm, BrowserClassItem):\n self.sourceFile[str, int].emit(\n itm.fileName(), itm.classObject().lineno)\n elif isinstance(itm, BrowserMethodItem):\n self.sourceFile[str, int].emit(\n itm.fileName(), itm.functionObject().lineno)\n elif isinstance(itm, BrowserClassAttributeItem):\n self.sourceFile[str, int].emit(\n itm.fileName(), itm.attributeObject().lineno)\n elif isinstance(itm, BrowserImportItem):\n self.sourceFile[str, list].emit(\n itm.fileName(), itm.linenos())\n self._activating = False", "title": "" }, { "docid": "d773be7d07d2e4959c0f2c6980e91983", "score": "0.53407794", "text": "def select_files(self, directory):\n filenames_loc=fd.askopenfilenames(initialdir=directory, title='Select files', filetypes=(('text files', '*.txt'), ('all files', '*.*')))\n filenames_loc=list(filenames_loc)\n \n if len(filenames_loc)>0: #prevents error from ocurring if user doesn't actually select any files\n self.folder=os.path.dirname(filenames_loc[0]) #remembers last used directory\n \n return filenames_loc", "title": "" }, { "docid": "27e9f1a6a8879aad6793d44be555ad70", "score": "0.53387815", "text": "def file_inputs(self):\n for i in range(self.controller.inputs['num_variables_input']):\n if self.controller.inputs['is_included_in_file_input'][i]==0:\n self.controller.inputs['num_txt_files']*=self.controller.inputs['num_datapoints_input'][i] #finds the product of the length of all the unincluded dimensions\n \n #uploads files \n if self.controller.inputs['track_file_input']==1 and self.controller.inputs['unincluded_dimensions']!=[]: #checks for whether the user would like to and is able to track their files \n self.tracker_child_frame.destroy()\n self.tracker_child_frame=tk.Frame(self.tracker_frame)\n self.tracker_child_frame.config(highlightbackground='blue', highlightcolor='blue', highlightthickness=1, height=20, width=70)\n self.tracker_child_frame.pack() \n self.shared_func.tracker(frame=self.tracker_child_frame, \n dimensions=self.controller.inputs['unincluded_dimensions'], \n dim_name=self.controller.inputs['unincluded_dim_name'], \n row=0, \n column=0) \n elif self.controller.inputs['track_file_input']==1 and self.controller.inputs['unincluded_dimensions']==[]: #if the user would like to track their files but there is only one file, show error\n mb.showerror('Error', 'Cannot track only one file. Please uncheck tracking file option') \n else: #creates file list without tracker function \n while len(self.shared_func.file_list)<self.controller.inputs['num_txt_files']:\n add_file=self.shared_func.select_files(self.shared_func.folder)\n if add_file==[]: #if user cancels adding files, assumes the user would like to start everything over\n break\n add_file.append(add_file.pop(0)) #puts the first file selected back to the beginning because tkinter is weird like that (tkinter will put the first file selected at the end)\n self.shared_func.file_list+=add_file \n \n #Check to make sure the file list is the right size\n if len(self.shared_func.file_list)!=self.controller.inputs['num_txt_files']: \n mb.showerror(\"Error\", \"File list incompatible with amount of data specified, please reconfirm inputs and press reset\") \n \n #Widgets for loading text from the .txt files** \n self.shared_func.file_format_widgets(frame=self.file_format_child_frame, row=0, column=0)\n\n #Button that will compile all the information and .txt files into an n-dimensional matrix\n compile_button=ttk.Button(self.file_format_child_frame, \n text='Compile', \n command= lambda comments=self.shared_func.comments_entry, \n delimiter=self.shared_func.delimiter_entry,\n skiprows=self.shared_func.skiprows_entry, \n usecolumns=self.shared_func.usecols_entry: self.inputs_create_matrix(comments, \n delimiter, \n skiprows, \n usecolumns))\n compile_button.grid(row=0, column=8)", "title": "" }, { "docid": "47ed733ca6ac5391f4e3773b7937abe8", "score": "0.5332661", "text": "def _browse_vfk_files(self):\n \n title = u'Vyberte VFK soubor.'\n filters = u'.vfk (*.vfk)'\n \n filePath = self.dW.open_file_dialog(title, filters, True)\n \n if filePath:\n self.set_text_browseVfkLineEdit.emit(filePath)", "title": "" }, { "docid": "4b00f3d8854c7bf991c84fd1a917756a", "score": "0.5327411", "text": "def _handle(self, inputs, **options):\n pass", "title": "" } ]
abc3539d6a4d16fc36b94d707e583008
Return triangle area given its vertices' coordinates.
[ { "docid": "0add7d83ad7bf33b1cf10765399b27f0", "score": "0.70234346", "text": "def get_triangle_area(point1: Point, point2: Point, point3: Point):\n\t\tprod1 = (point1[0] - point3[0]) * (point2[1] - point1[1])\n\t\tprod2 = (point1[0] - point2[0]) * (point3[1] - point1[1])\n\n\t\ttriangle_area = 1/2 * abs(prod1 - prod2)\n\n\t\treturn triangle_area", "title": "" } ]
[ { "docid": "ce87c52ea5d3ae5d6cadd5a7d6f1c8a6", "score": "0.8274563", "text": "def area_of_triangle(*args):\n if len(args) != 1:\n print('ERROR: one argument expected, got {}.'.format(len(args)))\n return None\n vertexes = args[0]\n if not isinstance(vertexes, list):\n print('ERROR: list of vertexes expected as an argument, got {}.'.format(type(vertexes)))\n return None\n if len(vertexes) != 6:\n print('ERROR: 6 coordinates expected, got {}.'.format(len(vertexes)))\n return None\n for coordinate in vertexes:\n if not (isinstance(coordinate, int) or isinstance(coordinate, float)):\n print('ERROR: type int or float expected, got {}.'.format(type(coordinate)))\n return None\n ver = [[vertexes[0], vertexes[1]], [vertexes[2], vertexes[3]], [vertexes[4], vertexes[5]]]\n a = distance(ver[0], ver[1])\n b = distance(ver[1], ver[2])\n c = distance(ver[2], ver[0])\n p = (a + b + c) / 2\n return (p * (p - a) * (p - b) * (p - c)) ** 0.5", "title": "" }, { "docid": "4a3537ea3fce9eaa695688c0a9bd4ba9", "score": "0.7953434", "text": "def get_area(vertices):\n area = 0.0\n for i in range(len(vertices)):\n j = (i + 1) % len(vertices)\n area += vertices[i][0] * vertices[j][1] - vertices[j][0] * vertices[i][1]\n return abs(area) / 2.0", "title": "" }, { "docid": "8e9e7e7fcb60d5d687bc91ed0ca60cf0", "score": "0.78157234", "text": "def polygon_area(vertices: Sequence[Sequence[float]]) -> float:\n if len(vertices) == 3:\n # Specialized, fast implementation for triangles, since this is heavily\n # used in `smallest_enclosing_circle`\n dx1 = vertices[1][0] - vertices[0][0]\n dy1 = vertices[1][1] - vertices[0][1]\n dx2 = vertices[2][0] - vertices[1][0]\n dy2 = vertices[2][1] - vertices[1][1]\n return (dx1 * dy2 - dy1 * dx2) / 2\n\n x, y = np.vstack((vertices[-1], vertices)).T\n return np.sum((x[1:] + x[:-1]) * (y[1:] - y[:-1])) / 2", "title": "" }, { "docid": "cdb866be487eb3b7cad6ce2f957b0d0c", "score": "0.757878", "text": "def area_of_triangle(x1, y1, x2, y2, x3, y3):\n area = abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)\n\n return area", "title": "" }, { "docid": "639d2fa5d4b924aa263a32215afbe1a8", "score": "0.74019337", "text": "def area_of_triangle_with(self, v):\n\t\treturn .5*self.area_of_parallelogram(v)", "title": "" }, { "docid": "c121e4114d613be50ab749deff4081b2", "score": "0.7401034", "text": "def triangleArea(a, b, c):\n\treturn (a[0]*b[1] - a[1]*b[0] + a[1]*c[0]\n\t\t\t- a[0]*c[1] + b[0]*c[1] - c[0]*b[1]) / 2.0", "title": "" }, { "docid": "9c1038134adbd7b858778ca4ab99e5a5", "score": "0.7334358", "text": "def area_of_triangle(base, height):\n return 0", "title": "" }, { "docid": "322df138966e862ab85aac6336a18cc3", "score": "0.7289267", "text": "def polygon_area(coords):\n Xs = coords[:,0]\n Ys = coords[:,1]\n\n # Ignore orientation\n return 0.5*abs(sum(Xs[:-1]*Ys[1:] - Xs[1:]*Ys[:-1]))", "title": "" }, { "docid": "fd3bd8e34edd08d3fd4530f77f79ecca", "score": "0.7232928", "text": "def area_for_poly(vertices, radius=0):\r\n vs = list(map(tuple, vertices))\r\n return cp.cpAreaForPoly(len(vertices), vs, radius)", "title": "" }, { "docid": "912743698ebe28c24a622abef68b6657", "score": "0.71538067", "text": "def triangle_area(base, height):\n # You have to code here\n # REMEMBER: Tests first!!!\n area = (base * height) / 2\n return area", "title": "" }, { "docid": "718d4216605cbec2c7ed2d2b1e0d013a", "score": "0.71321255", "text": "def triangle_area(a, h):\n return (a*h)/2", "title": "" }, { "docid": "eda08222a39e5f48bf5397c18209b2dd", "score": "0.7088057", "text": "def area_of_triangle(p1, p2, p3):\n return norm(cross((p2 - p1), (p3 - p1))) / 2.0", "title": "" }, { "docid": "8bcc1aaebdc0c7793e824d3218898414", "score": "0.69998634", "text": "def calculate_triangle_area(base, height):\n return 0.5 * base * height", "title": "" }, { "docid": "432bb08ac25c321de1d54a602f9524da", "score": "0.69147646", "text": "def square_of_triangle(vertexes):\n if not isinstance(vertexes, list):\n print('ERROR: list of vertexes expected as an argument, got {}'.format(type(vertexes)))\n return None\n if len(vertexes) != 6:\n print('ERROR: 6 coordinates expected, got {}'.format(len(vertexes)))\n return None\n for coordinate in vertexes:\n if not (isinstance(coordinate, int) or isinstance(coordinate, float)):\n print('ERROR: type int or float expected, got {}'.format(type(coordinate)))\n return None\n a = ((vertexes[0] - vertexes[2]) ** 2 + (vertexes[1] - vertexes[3]) ** 2) ** 0.5\n b = ((vertexes[2] - vertexes[4]) ** 2 + (vertexes[3] - vertexes[5]) ** 2) ** 0.5\n c = ((vertexes[0] - vertexes[4]) ** 2 + (vertexes[1] - vertexes[5]) ** 2) ** 0.5\n p = (a + b + c) / 2\n return (p * (p - a) * (p - b) * (p - c)) ** 0.5", "title": "" }, { "docid": "3fce82b2dcd8c55b00edd24be3dbc2f8", "score": "0.6842898", "text": "def areaTriangle(ptx1,pty1,ptx2,pty2,ptx3,pty3):\r\n # This uses Heron's Formula - Google it if you want to know more\r\n side1 = distance(ptx1,pty1,ptx2,pty2)\r\n side2 = distance(ptx2,pty2,ptx3,pty3)\r\n side3 = distance(ptx3,pty3,ptx1,pty1)\r\n p = (side1 + side2 + side3)/2\r\n t1 = p-side1\r\n t2 = p-side2\r\n t3 = p-side2\r\n if t1==0 or t2==0 or t3==0:\r\n print(\"Does not form a triangle\")\r\n return None\r\n area = math.sqrt( p*(p-side1)*(p-side2)*(p-side3) )\r\n return(area)", "title": "" }, { "docid": "53741ada61b2749f4f8c694b22353f84", "score": "0.6834161", "text": "def triangle(vert_a, vert_b, vert_c):\n vert_a_x, vert_a_y = vert_a\n vert_b_x, vert_b_y = vert_b\n vert_c_x, vert_c_y = vert_c\n a_tri = abs((vert_a_x * (vert_b_y - vert_c_y) +\n vert_b_x * (vert_c_y - vert_a_y) +\n vert_c_x * (vert_a_y - vert_b_y)) / 2.0)\n # area = (1/2)b*h\n # /\\A\n # / \\\n # / \\\n # B/______\\C\n\n return a_tri", "title": "" }, { "docid": "a10a13cdcbdcdae3ccb7370d09b886bc", "score": "0.6788807", "text": "def triangle_area(base=None, height=None, side_a=None, side_b=None, side_c=None):\n if (base is not None) & (height is not None):\n return 0.5 * base * height\n elif (side_a is not None) & (side_b is not None) & (side_c is not None):\n if (side_a + side_b) > side_c & (side_b + side_c) > side_a & (side_a + side_c) > side_b:\n s = (side_a + side_b + side_c) * 0.5\n return (s*(s-side_a)*(s-side_b)*(s-side_c)) ** 0.5\n else:\n return\"The sum of two sides must be greater than the third side of a triangle\"\n else:\n return \"no such triangle\"", "title": "" }, { "docid": "b888b0733977f59da0f0d0aed909e12d", "score": "0.67785037", "text": "def poly_area(verts):#Tartley - Jonathan Hartley, http://tartley.com\n\taccum = 0.0\n\tfor i in range(len(verts)):\n\t\tj = (i + 1) % len(verts)\n\t\taccum += verts[j][0] * verts[i][1] - verts[i][0] * verts[j][1]\n\treturn accum / 2", "title": "" }, { "docid": "726b1bd7ca7feb04765715b09876e7aa", "score": "0.6737634", "text": "def compute_tri_area(side):\n return side * side * math.sqrt(3)/4", "title": "" }, { "docid": "83e30e76b185b71cb7c0984721e3e715", "score": "0.67302614", "text": "def get_area_triangle(p1, p2, p3):\n det_jac = (p1[0] - p3[0]) * (p2[1] - p3[1]) - (p2[0] - p3[0]) * (p1[1] - p3[1])\n return 0.5 * abs(det_jac)", "title": "" }, { "docid": "196547415b5ca299c2f8e3725bde47e7", "score": "0.67248505", "text": "def calculate_area(coords):\r\n return max(0, coords[1] - coords[0]) * max(0, coords[3] - coords[2])", "title": "" }, { "docid": "058642431a02403e92e2ba67c9028891", "score": "0.672136", "text": "def get_area(self):\n\n # We use the \"shoelace\" algorithm to calculate the area, since the\n # polygon has no holes or self-intersections.\n s1 = s2 = 0\n for i in range(self.n):\n j = (i+1) % self.n\n s1 += self.vertices[i].x * self.vertices[j].y\n s2 += self.vertices[i].y * self.vertices[j].x\n return abs(s1 - s2) / 2", "title": "" }, { "docid": "82e34d37ead553aa7b651813d03a1437", "score": "0.6668018", "text": "def polygon_area(points):\n res = 0\n for i in range(len(points)):\n j = (i+1) % len(points)\n res = res + points[i].x * points[j].y\n res = res - points[j].x * points[i].y\n return abs(res) / 2", "title": "" }, { "docid": "d10c3e647d3b5eb263df75040d991580", "score": "0.666083", "text": "def triangle_area(height=None, base=None) -> Number:\n if dim_validate3(base) and dim_validate3(height):\n if dim_validate2(base) and dim_validate2(height):\n if dim_validate1(base) and dim_validate1(height):\n return base * height / 2\n else:\n raise ValueError(\"side must be positive: \")\n raise TypeError('value is a string')\n raise AttributeError(\"The dimension is not provided\")", "title": "" }, { "docid": "92cadaf8de68f99cd322dfbca611f6f5", "score": "0.66544366", "text": "def area(vs):\n a = 0\n x0, y0 = vs[0]\n for [x1, y1] in vs[1:]:\n dy = y1 - y0\n a += x0*dy\n x0 = x1\n y0 = y1\n return a", "title": "" }, { "docid": "cb71486155d37251644a40732f1a369b", "score": "0.6585298", "text": "def triangle_area(breadth, height: Number) -> Number:\n return 0.5*breadth*height", "title": "" }, { "docid": "35d25fb3d05464f4a863121e998417b4", "score": "0.6577138", "text": "def area_triangle(b, h: Number) -> Number:\n return 1/2*b*h", "title": "" }, { "docid": "c3708370821e0c77c95fd606c8af6e9f", "score": "0.6539446", "text": "def heron_triangle_area_sides(a, b, c):\n if triangle_exists(a, b, c):\n return 0.25*math.sqrt((a + (b + c))*(c - (a - b))*(c + (a - b))*(a + (b - c)))\n else:\n return 0", "title": "" }, { "docid": "5afeb340572a8e455cf98e66681a7c02", "score": "0.6485532", "text": "def calculate_area(pts):\n pts = perspective.order_points(pts)\n a = euclid_dist(pts[2], pts[3])\n b = euclid_dist(pts[3], pts[0])\n c = euclid_dist(pts[0], pts[1])\n d = euclid_dist(pts[1], pts[2])\n t = 0.5 * (a + b + c + d)\n angle1 = angle_betw_lines(\n np.array([pts[2], pts[3]]), np.array([pts[3], pts[0]]))\n angle2 = angle_betw_lines(\n np.array([pts[0], pts[1]]), np.array([pts[1], pts[2]]))\n area = math.sqrt(((t - a) * (t - b) * (t - c) * (t - d))\n - (a * b * c * d * ((math.cos((angle1 + angle2)/2)) ** 2)))\n return area", "title": "" }, { "docid": "b46a305074f292a2cc456664802e70bd", "score": "0.6464238", "text": "def line_vertex_area(X, L):\n a = line_edge_area(X, L)\n _, ind0 = np.unique(L[:,0], return_index=True)\n _, ind1 = np.unique(L[:,1], return_index=True)\n ind = np.column_stack((ind0,ind1))\n a = np.sum(a[ind], axis=1) / 2\n return a", "title": "" }, { "docid": "8e1d20bf728ff68ccae47672fdfc363b", "score": "0.6463007", "text": "def triangleArea(triangleSet):\n\n\ttriangleAreaSet = []\n\n\tfor i in range(len(triangleSet)):\n\t\tv1 = triangleSet[i][1] - triangleSet[i][0]\n\t\tv2 = triangleSet[i][2] - triangleSet[i][0]\n\t\tarea = np.linalg.norm(np.cross(v1, v2))/2\n\t\ttriangleAreaSet.append(area)\n\n\treturn triangleAreaSet", "title": "" }, { "docid": "fe04a806cf7cd09bb431b73441692284", "score": "0.6447586", "text": "def polygon_area(polygon):\n\n w=0\n for count in range(len(polygon)-1):\n y = polygon[count+1][1] + polygon[count][1]\n x = polygon[count+1][0] - polygon[count][0]\n z = y * x\n w += z\n return abs(w/2.0)", "title": "" }, { "docid": "7d07499360c23269b42c987a1d62f28f", "score": "0.6436004", "text": "def area(self) -> float:\n p = 0.5 * sum(self.sides)\n a, b, c = self.sides\n return math.sqrt(p * (p - a) * (p - b) * (p - c))", "title": "" }, { "docid": "34c03ef9f25e74907b28598356fd06c8", "score": "0.6430339", "text": "def area_triangle(x=None, y=None, z=None, h=None, theta=None) ->Number:\n if (x is not None) & (y is not None) & (z is not None):\n s = (x + y + z)/2\n area = (s*(s - x)*(s - y)*(s - z))**(1/2)\n return area\n elif (x is not None) & (y is not None) & (theta is not None):\n area = 1/2*x*y*sin(theta)\n return area\n elif (x is not None) & (h is not None):\n area = x*h/2\n return area\n else:\n return \"the argument given are inadequate to compute area\"", "title": "" }, { "docid": "61db44bdfd46b354c352239b44b10203", "score": "0.64260465", "text": "def polygon_center(vertices: Sequence[Sequence[float]],\n area: Optional[float] = None) -> Tuple[float, float]:\n if area is None:\n area = polygon_area(vertices)\n\n x, y = np.vstack((vertices[-1], vertices)).T\n fact = x[:-1] * y[1:] - x[1:] * y[:-1]\n x_c = np.sum((x[:-1] + x[1:]) * fact) / (6 * area)\n y_c = np.sum((y[:-1] + y[1:]) * fact) / (6 * area)\n\n return x_c, y_c", "title": "" }, { "docid": "8f36582619a174b292f21e15bb1af89c", "score": "0.6413507", "text": "def polygon_area(corners):\n n = len(corners) # of corners\n area = 0.0\n for i in range(n):\n j = (i + 1) % n\n area += corners[i][0] * corners[j][1]\n area -= corners[j][0] * corners[i][1]\n area = abs(area) / 2.0\n return area", "title": "" }, { "docid": "4e47b75ca6c90c9289deed7edb6e025d", "score": "0.64095837", "text": "def trapezoidal_area(xyz):\n d = scipy.spatial.Delaunay(xyz[:,:2])\n tri = xyz[d.vertices]\n\n a = tri[:,0,:2] - tri[:,1,:2]\n b = tri[:,0,:2] - tri[:,2,:2]\n proj_area = np.cross(a, b).sum(axis=-1)\n zavg = tri[:,:,2].sum(axis=1)\n vol = zavg * np.abs(proj_area) / 6.0\n return vol.sum()", "title": "" }, { "docid": "4e47b75ca6c90c9289deed7edb6e025d", "score": "0.64095837", "text": "def trapezoidal_area(xyz):\n d = scipy.spatial.Delaunay(xyz[:,:2])\n tri = xyz[d.vertices]\n\n a = tri[:,0,:2] - tri[:,1,:2]\n b = tri[:,0,:2] - tri[:,2,:2]\n proj_area = np.cross(a, b).sum(axis=-1)\n zavg = tri[:,:,2].sum(axis=1)\n vol = zavg * np.abs(proj_area) / 6.0\n return vol.sum()", "title": "" }, { "docid": "a0c499fb1dbc9aa22065e28f35df4cef", "score": "0.63906235", "text": "def triangleSignedAreaE2(A, B, C):\n return 0.5 * (A.x * (B.y - C.y) + A.y * (C.x - B.x) + (B.x * C.y - B.y * C.x))", "title": "" }, { "docid": "9130dc322cdfc40a30028ae6ab5b1361", "score": "0.6388903", "text": "def get_triangle_area(self, other):\n return self.get_parallelogram_area(other) / 2", "title": "" }, { "docid": "e7a263c5abdabb27b6b0b6554d6d8100", "score": "0.63794124", "text": "def get_area_polygon(polygon): \n area_outer = get_area_single_polygon(polygon[0])\n area_inner = 0\n for poly in polygon[1:]:\n area_inner += get_area_single_polygon(poly)\n return area_outer - area_inner", "title": "" }, { "docid": "c4a2b3f87aa3f70661d49bdcbaf94547", "score": "0.63707113", "text": "def EqTriArea(side):\n return ((3**0.5) / 4) * (side**2)", "title": "" }, { "docid": "564722d326ad4c414fcf7616910d4dc5", "score": "0.63513863", "text": "def area(x1, y1, x2, y2, x3, y3): \n return abs((x1 * (y2 - y3) + x2 * (y3 - y1) \n + x3 * (y1 - y2)) / 2.0)", "title": "" }, { "docid": "a08d601b818be6249b4f136671d3076a", "score": "0.6299496", "text": "def area(self):\n # Length x breadth\n if self.is_valid():\n return self.sides[0] * self.sides[1]", "title": "" }, { "docid": "d11ccf4ee16a69f294b378f04c788a00", "score": "0.6232965", "text": "def getArea(self):\n a, b, c = self.a, self.b, self.c\n return abs(a.x*(b.y-c.y)+b.x*(c.y-a.y)+c.x*(a.y-b.y))/2", "title": "" }, { "docid": "bb8757be74feed95fd61d974d19e0e6f", "score": "0.619349", "text": "def largestTriangleArea(self, # pylint: disable=invalid-name\n\t\t\t\t\t\t\tpoints: Points) -> float:\n\t\tconvex_hull = self.get_convex_hull(points)\n\n\t\tmax_triangle_area = 0\n\t\tconvex_hull_len = len(convex_hull)\n\n\t\tfor idx1 in range(convex_hull_len):\n\t\t\t# Combine idx1 with every index to its right to avoid repeats\n\t\t\tfor idx2 in range(idx1, convex_hull_len):\n\t\t\t\t# Combine idx2 with every index to its right to avoid repeats\n\t\t\t\tfor idx3 in range(idx2, convex_hull_len):\n\t\t\t\t\ttriple = (\n\t\t\t\t\t\tconvex_hull[idx1],\n\t\t\t\t\t\tconvex_hull[idx2],\n\t\t\t\t\t\tconvex_hull[idx3]\n\t\t\t\t\t)\n\t\t\t\t\ttriangle_area = self.get_triangle_area(*triple)\n\t\t\t\t\tmax_triangle_area = max(max_triangle_area, triangle_area)\n\n\t\treturn max_triangle_area", "title": "" }, { "docid": "ec86d9c791eac13f45e6c898853c20e1", "score": "0.6110272", "text": "def area(self):\n s = self.perimeter() / 2.0\n return sqrt(s * (s - self.a) * (s - self.b) * (s - self.c))", "title": "" }, { "docid": "ab897ec99d62d5ef82651f8379ef969f", "score": "0.6084797", "text": "def area_of_polygon_wrt_point(self, centroid):\n x, y = [i for i in centroid]\n\n area_of_polygon = 0.0\n\n for i in range(len(self._coords) - 1):\n x1, y1 = [i for i in self._coords[i]]\n x2, y2 = [i for i in self._coords[i + 1]]\n area_of_polygon += self.area_of_triangle(x, y, x1, y1, x2, y2)\n\n x1, y1 = [i for i in self._coords[-1]]\n x2, y2 = [i for i in self._coords[0]]\n\n area_of_polygon += self.area_of_triangle(x, y, x1, y1, x2, y2)\n\n return area_of_polygon", "title": "" }, { "docid": "d12ac51e8fd6eaa74c0bdae55789723d", "score": "0.6058393", "text": "def get_area_single_polygon(single_polygon):\n x,y = [c[0] for c in single_polygon], [c[1] for c in single_polygon]\n area = 0\n for i in range(len(single_polygon) - 1):\n area += 0.5 * (x[i] * y[i + 1] - x[i + 1] * y[i])\n return abs(area)", "title": "" }, { "docid": "434ed129e22036c18fab34a93b30950a", "score": "0.60412085", "text": "def getArea(self):\n return _libBornAgainCore.Polygon_getArea(self)", "title": "" }, { "docid": "ae37851a036528138d3cb2fb34d5d55f", "score": "0.60280424", "text": "def polyArea(self, x, y=None):\r\n if self.isnone(y) and not shape(x)[1]==2:\r\n raise ValueError('input must be 2d!')\r\n elif self.isnone(y):\r\n y = x[:,0]\r\n x = x[:,1]\r\n elif len(shape(x))>1 and not shape(x)[1]==1:\r\n raise ValueError('\\'x\\' must be a 1d vector of first coordinates!')\r\n elif not len(x)==len(y):\r\n raise ValueError('\\'x\\' and \\'y\\' must be the same length!')\r\n else:\r\n x = reshape(x, len(x))\r\n y = reshape(y, len(x))\r\n \r\n return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))", "title": "" }, { "docid": "9c3adb7a011c95dd55a6b2ccc5a39e28", "score": "0.6014797", "text": "def area_of_parallelogram_with(self, v):\n\t\treturn (self*v).magnitude", "title": "" }, { "docid": "9bf29a6c63f8c31b9f1dd6468dab98d1", "score": "0.59926504", "text": "def test_polygon_area():\n result = polygon_area(15, sides=3)\n assert math.isclose(result, 6.495190528383289, abs_tol=1e-5)", "title": "" }, { "docid": "24310e87683beb216ed9c44a1b6e13fc", "score": "0.5974369", "text": "def area(l, w):\n return l*w", "title": "" }, { "docid": "080757f8762c508732d76684ea03c576", "score": "0.5959136", "text": "def area(self):\n return self.b.distance(self.a) * self.b.distance(self.c)", "title": "" }, { "docid": "3b08f8003dde6bb4b0b652f1326a2bb3", "score": "0.59469944", "text": "def area(boxes):\n\n xmin, ymin, xmax, ymax = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]\n return (ymax - ymin) * (xmax - xmin)", "title": "" }, { "docid": "10e4731f2ba426bb6296be6da1eb3f6e", "score": "0.5918353", "text": "def _vertices_barycenters_areas(self) -> np.ndarray:\n Avf = self.vertex_face_adjacency()\n faces_areas = self.areas\n return (1 / 3) * Avf * faces_areas", "title": "" }, { "docid": "97b3b4868245e6fe35d5cc64e5675e14", "score": "0.5886943", "text": "def elem_area(xn):\n nnodes = len(xn[:,0]) # numero de nodos\n if nnodes == 3:\n # elemento triangular lineal\n x1, x2, x3 = xn[0, 0], xn[1, 0], xn[2, 0]\n y1, y2, y3 = xn[0, 1], xn[1, 1], xn[2, 1]\n A = 1/2*(x1*y2 - x1*y3 + x2*y3 - x2*y1 + x3*y1 - x3*y2)\n else:\n # elemento cuadrilateral bilineal\n x1, x2 = xn[0, 0], xn[1, 0]\n y1, y4 = xn[0, 1], xn[3, 1]\n a = 1/2 * (x2 - x1)\n b = 1/2 * (y4 - y1)\n A = 4*a*b\n \n return A", "title": "" }, { "docid": "3cbe3a58c2c1ea14592c1e622f3d4a71", "score": "0.5884567", "text": "def area_of_polygon(geom):\n geod = pyproj.Geod(ellps=\"WGS84\")\n\n poly_area, poly_perimeter = geod.geometry_area_perimeter(\n geom\n )\n\n return abs(poly_area)", "title": "" }, { "docid": "ea96d8031b42e389718a3d945ab96c05", "score": "0.5877812", "text": "def regular_polygon_area(perimeter, apothem):\n # You have to code here\n # REMEMBER: Tests first!!!\n area = (perimeter * apothem) / 2\n return area", "title": "" }, { "docid": "db3f30c7319d62ed8c97810ac33ad819", "score": "0.5867328", "text": "def get_area(self):\n self._area = vector.length(self._vec_cross) * 0.5\n return self._area", "title": "" }, { "docid": "b6f360327b485398eeff22f3cd230f6f", "score": "0.58475584", "text": "def area(self):\n self.area = ((3**0.5)/4)* self.__class__.edge_length**2\n return self.area", "title": "" }, { "docid": "b20b55b6ee61050fe6fedbc56057e6f7", "score": "0.58466566", "text": "def triArea(base,height):\n return 0.5*base*height", "title": "" }, { "docid": "efa452c36bfd5bee936705367af0f1e9", "score": "0.5829293", "text": "def area_of_square(side_length):\n return 0", "title": "" }, { "docid": "f9dd43a0d71604c0a9ed91665fc6dadb", "score": "0.58236367", "text": "def area_4vector(self, jet):\n return _fastjet.ClusterSequenceVoronoiArea_area_4vector(self, jet)", "title": "" }, { "docid": "b020f16239850044187cd26656b3eb2a", "score": "0.5821677", "text": "def get_area(self):\n return (self.length ** 2 * math.sqrt(3)) / 4", "title": "" }, { "docid": "a748deb31d5fa5be35495e549d53069d", "score": "0.5820992", "text": "def area(self):\n return Fraction(1, 2) \\\n * self.edges \\\n * self.edge_length \\\n * self.apothem", "title": "" }, { "docid": "998a906789149ba30c6d7eab5e832ef4", "score": "0.5802728", "text": "def get_area(self):\n coords = self.polygon.centroid.coords\n self.polygon.transform(EPSG_NY, clone=False)\n return self.polygon.area, coords", "title": "" }, { "docid": "85c58d67239c22b45e6b24e94cc7a9c4", "score": "0.5795838", "text": "def area(x,y):\n return x * y", "title": "" }, { "docid": "1f66cfe6c7107d0f46adc11f8cc7f4dd", "score": "0.5773974", "text": "def get_area_from_points(\n mesh: meshio.Mesh, points: List[int], min_value: float = 1e-10\n) -> float:\n a: np.ndarray\n b: np.ndarray\n c: np.ndarray\n a, b, c = mesh.points[points[0]], mesh.points[points[1]], mesh.points[points[2]]\n return max(0.5 * np.linalg.norm((np.cross(a - c, b - c))), min_value)", "title": "" }, { "docid": "84fe01cc011adc230b6b00790a236e0b", "score": "0.5737316", "text": "def perimeter_triangle(a, b, c: Number) -> Number:\n return a + b + c", "title": "" }, { "docid": "592825a105125e940a7a78e8c1b8e4e2", "score": "0.57358825", "text": "def triangle(self):\n\t\t# noinspection PyTypeChecker\n\t\treturn distance.squareform(self.squareform().values)", "title": "" }, { "docid": "b50f521e0774d2bd1d0c83972cd711c7", "score": "0.5726993", "text": "def triangle_area(): \n print_title(\"OPTION 1-TRIANGLE AREA\")\n h = input_int(\"Triangle height: \")\n b = input_int(\"Triangle base: \")\n precision = input(\"Do you want Decimals?: [y/n]\")\n if precision == \"y\":\n r = (b * h) / 2\n else:\n r = (b * h) // 2\n print(f\"AREA: {r}\")", "title": "" }, { "docid": "e004eca4920b218f61c2e77ebfe9aa45", "score": "0.5710562", "text": "def calculate_area(a: float, b: float, c: float):\n p = (a + b + c) / 2.0\n area = math.sqrt(p * (p - a) * (p - b) * (p - c))\n return area", "title": "" }, { "docid": "8d37377186448db25cdc06998c1c6797", "score": "0.57068974", "text": "def trapezoid_area(base_minor, base_major, height):\n # You have to code here\n # REMEMBER: Tests first!!!\n area = height * ((base_minor + base_major)/ 2)\n return area", "title": "" }, { "docid": "577cd84b19f63ee0964d7b4329d649eb", "score": "0.56994534", "text": "def area_rect(l, w):\n a = l * w\n return a", "title": "" }, { "docid": "214fec990a0d7baa96a80c0f5d96efd0", "score": "0.56805736", "text": "def area_box(self):\n return (self.x_max - self.x_min) * (self.y_max - self.y_min)", "title": "" }, { "docid": "08eb9112a80e6c06f3aa68c807bbc5ba", "score": "0.56796795", "text": "def _area(bboxes):\n size = bboxes[:, 2:4] - bboxes[:, 0:2]\n area = size[:, 0] * size[:, 1]\n return area", "title": "" }, { "docid": "151b8dfbf83c2b216ab154ea5304fcc3", "score": "0.5647638", "text": "def area(self) -> float:\n return self.w * self.h", "title": "" }, { "docid": "a9c99d1dfaa0d8eb2fd86b2c17d692b3", "score": "0.56459033", "text": "def NetPolygonsArea(polys):\n area = 0.0\n for i in range(len(polys)):\n area += SignedPolygonArea(polys[i])\n return area", "title": "" }, { "docid": "68391f00de257e3966934c1481e8bed5", "score": "0.564241", "text": "def _get_triangleCount(self) -> \"int\" :\n return _fusion.PolygonMesh__get_triangleCount(self)", "title": "" }, { "docid": "785077d51f4491d96c6f94911ebb05ed", "score": "0.56256735", "text": "def area(self):\r\n return (self.ymax - self.ymin) * (self.xmax - self.xmin)", "title": "" }, { "docid": "c0de8bf31616b557df3cc13f8696d22c", "score": "0.5624591", "text": "def _ComputeArea(cutPoly):\n area = []\n for i in range(cutPoly.GetNumberOfCells()):\n tri=polydata.GetCell(i)\n points=tri.GetPoints()\n area.append(tri.ComputeArea())\n\n print('Area is {}'.format(sum(area)))\n print('Diameter is {}'.format(sqrt(4*sum(area)/pi**2)))", "title": "" }, { "docid": "9b0034de00bf9744946176ffb81e7ccb", "score": "0.56236637", "text": "def area(self):\n return len(self.coordinates)", "title": "" }, { "docid": "ccc1ad5a37fc0aa7bcac87e7b6b63818", "score": "0.56100273", "text": "def rotatedRectangularArea(self):\n return polygonArea(self.rotatedBox)", "title": "" }, { "docid": "6ed77555c1c0fa93a1e2d7d36137471e", "score": "0.5603313", "text": "def _faces_areas(self) -> np.ndarray:\n vertices = np.array(self.v)\n faces = np.array(self.f)\n faces_vertices = [\n (\n vertices[faces[f][0], :],\n vertices[faces[f][1], :],\n vertices[faces[f][2], :],\n )\n for f in range(faces.shape[0])]\n\n triangles = np.concatenate([\n np.expand_dims(np.array((\n np.sqrt(np.sum(np.power((tri[0] - tri[1]), 2))),\n np.sqrt(np.sum(np.power((tri[0] - tri[2]), 2))),\n np.sqrt(np.sum(np.power((tri[1] - tri[2]), 2))),\n )), 0) for tri in faces_vertices], 0)\n\n # Heron's formula\n s = np.sum(triangles, 1) / 2\n\n # area of each face\n areas = np.array([np.sqrt((s[t] * (s[t] - triangles[t, 0]) * (s[t] - triangles[t, 1]) * (s[t] - triangles[t, 2])))\n for t, tri in enumerate(triangles)])\n return areas", "title": "" }, { "docid": "384cdd28cb3a64c46de6d8dfdd481b3b", "score": "0.56016064", "text": "def area(self, jet):\n return _fastjet.ClusterSequenceVoronoiArea_area(self, jet)", "title": "" }, { "docid": "d47956cd22e845a7b683d17705b78307", "score": "0.5575096", "text": "def panels_area(tris):\n Ni = np.cross( tris[::,1 ] - tris[::,0] , tris[::,2 ] - tris[::,0] )\n Area = np.sqrt(Ni[:,0]**2 + Ni[:,1]**2 + Ni[:,2]**2) * 0.5\n \n return Area", "title": "" }, { "docid": "16f0e81cc1f4d7d27bcedb076fa93cff", "score": "0.55578417", "text": "def compute_square_area(side):\n return side ** 2", "title": "" }, { "docid": "92365ea6439c9c55f499e2bd23b08402", "score": "0.5556692", "text": "def _area(vset):\n # Initialize A as it could be calculated iteratively\n A = 0\n # Check multiple exteriors\n if type(vset[0][0]) == list:\n # Calc every exterior separately\n for i in range(len(vset)):\n A += pyclipper.scale_from_clipper(pyclipper.scale_from_clipper(pyclipper.Area(pyclipper.scale_to_clipper(vset[i]))))\n else:\n # Single exterior\n A = pyclipper.scale_from_clipper(pyclipper.scale_from_clipper(pyclipper.Area(pyclipper.scale_to_clipper(vset))))\n return A", "title": "" }, { "docid": "d388d7f51da6988d3fac85e55cd10151", "score": "0.5549856", "text": "def surface_area(self):\n l, w, h = self.all\n return 2 * l * w + 2 * w * h + 2 * l * h", "title": "" }, { "docid": "dae9bde064f6d9940845fd0779cbb040", "score": "0.554556", "text": "def _triangle_vertices(self, angle, center=0):\n\n angle = radians(self._hue_degrees)\n hx = floor(center + 0.5 + (cos(angle) * self._triangle_radius))\n hy = floor(center + 0.5 - (sin(angle) * self._triangle_radius))\n sx = floor(center + 0.5 + (cos(angle + (2.0 * pi / 3.0)) * self._triangle_radius))\n sy = floor(center + 0.5 - (sin(angle + (2.0 * pi / 3.0)) * self._triangle_radius))\n vx = floor(center + 0.5 + (cos(angle + (4.0 * pi / 3.0)) * self._triangle_radius))\n vy = floor(center + 0.5 - (sin(angle + (4.0 * pi / 3.0)) * self._triangle_radius))\n\n return hx, hy, sx, sy, vx, vy", "title": "" }, { "docid": "15b3baa328c9318a8e151c811bc69364", "score": "0.55320925", "text": "def area_rectangle(length, width) -> Number:\n return length*width", "title": "" }, { "docid": "07366dac9361fdbce0e8066efcf00d2b", "score": "0.55202895", "text": "def cuboid_surface_area(length, width, height):\n return 2*(length*width+length*height+width*height)", "title": "" }, { "docid": "03de21e137b65cf4abaab224cb89dfa1", "score": "0.5515626", "text": "def geometry_area_weights(cube, geometry, normalize=False):\r\n # Validate the input parameters\r\n if not cube.coords(axis='x') or not cube.coords(axis='y'):\r\n raise ValueError('The cube must contain x and y axes.')\r\n\r\n x_coords = cube.coords(axis='x')\r\n y_coords = cube.coords(axis='y')\r\n if len(x_coords) != 1 or len(y_coords) != 1:\r\n raise ValueError('The cube must contain one, and only one, coordinate for each of the x and y axes.')\r\n\r\n x_coord = x_coords[0]\r\n y_coord = y_coords[0]\r\n if not (x_coord.has_bounds() and y_coord.has_bounds()):\r\n raise ValueError('Both horizontal coordinates must have bounds.')\r\n\r\n if x_coord.ndim != 1:\r\n raise iris.exceptions.CoordinateMultiDimError(x_coord)\r\n if y_coord.ndim != 1:\r\n raise iris.exceptions.CoordinateMultiDimError(y_coord)\r\n\r\n # Figure out the shape of the horizontal dimensions\r\n shape = [1] * len(cube.shape)\r\n x_dim = cube.coord_dims(x_coord)[0]\r\n y_dim = cube.coord_dims(y_coord)[0]\r\n shape[x_dim] = x_coord.shape[0]\r\n shape[y_dim] = y_coord.shape[0]\r\n weights = np.empty(shape, np.float32)\r\n\r\n # Calculate the area weights\r\n x_bounds = x_coord.bounds\r\n y_bounds = y_coord.bounds\r\n for nd_index in np.ndindex(weights.shape):\r\n xi = nd_index[x_dim]\r\n yi = nd_index[y_dim]\r\n x0, x1 = x_bounds[xi]\r\n y0, y1 = y_bounds[yi]\r\n polygon = Polygon([(x0, y0), (x0, y1), (x1, y1), (x1, y0)])\r\n if normalize:\r\n weights[nd_index] = polygon.intersection(geometry).area / polygon.area\r\n else:\r\n weights[nd_index] = polygon.intersection(geometry).area\r\n\r\n # Fix for the limitation of iris.analysis.MEAN weights handling.\r\n # Broadcast the array to the full shape of the cube\r\n weights = np.broadcast_arrays(weights, cube.data)[0]\r\n\r\n return weights", "title": "" }, { "docid": "c5dd08439deb56fc5730ac6d0cd5c625", "score": "0.5493793", "text": "def area(self) -> np.float64:\n return 0.5 * self.normal().norm()", "title": "" }, { "docid": "91c28923172d53c582927b6915bfb53e", "score": "0.5489234", "text": "def area_trapezoid(base_1, base_2, height) -> Number:\n return 1/2*(base_1 + base_2)*height", "title": "" }, { "docid": "5d63a0cd5fc1091ee830998703a7cc72", "score": "0.5483533", "text": "def area_of(self, left_top, right_bottom):\n hw = np.clip(right_bottom - left_top, 0.0, None)\n return hw[..., 0] * hw[..., 1]", "title": "" }, { "docid": "286e67fe41b4c7e219d124457fb4d3c4", "score": "0.5470709", "text": "def SignedPolygonArea(poly):\n n = len(poly) # of poly\n area = 0.0\n for i in range(n):\n j = (i + 1) % n\n area += poly[i][0] * poly[j][1]\n area -= poly[j][0] * poly[i][1]\n area = area / 2.0\n\n\n if np.isfinite(area):\n if area > 0:\n cornercorrection = 0.5\n elif area == 0:\n cornercorrection = 0.\n elif area < 0:\n cornercorrection = -0.5\n return area + cornercorrection\n else: \n print(\"[polytools] WARNING nan area encountered\")\n return 0.", "title": "" }, { "docid": "1cfff9454f645495b5392315dd9512a3", "score": "0.5466223", "text": "def _e_area(a):\r\n x0, y1 = (a.T)[:, 1:]\r\n x1, y0 = (a.T)[:, :-1]\r\n e0 = np.einsum('...i,...i->...i', x0, y0)\r\n e1 = np.einsum('...i,...i->...i', x1, y1)\r\n return np.nansum((e0-e1)*0.5)", "title": "" } ]
4128657e337d1d76f50983275de1d03c
Method to convert assessor label into a dictionary
[ { "docid": "08d533d9dece6896af31f5254a0b5073", "score": "0.7269525", "text": "def get_assessor_dict(assessor_label):\n assessor_dict = dict()\n labels = assessor_label.split('-x-')\n if len(labels) == 1:\n print'ERROR: WRONG PROCESS LABEL: the assessor label can not be set (ERROR no \"-x-\" in the name)'\n print' -> Skipping the processor %s' % (assessor_label)\n else:\n assessor_dict['project_id'] = labels[0]\n assessor_dict['subject_label'] = labels[1]\n assessor_dict['session_label'] = labels[2]\n assessor_dict['label'] = assessor_label\n assessor_dict['proctype'] = labels[-1]\n if assessor_dict['proctype'] == 'FS' and XnatUtils.has_fs_datatypes:\n assessor_dict['xsiType'] = XnatUtils.DEFAULT_FS_DATATYPE\n else:\n assessor_dict['xsiType'] = XnatUtils.DEFAULT_DATATYPE\n return assessor_dict", "title": "" } ]
[ { "docid": "807e2a0238177386f84ae6d97f5c6d81", "score": "0.61845165", "text": "def get_labels(self):\n\t\tld = {}\n\t\tfor label in self.labels:\n\t\t\ttokens = label.split(\":\")\n\t\t\tld[tokens[0]] = tokens[1]\n\t\treturn ld", "title": "" }, { "docid": "c791b0778493c3bc4f7b391e1407d05c", "score": "0.6141668", "text": "def get_labels_dict(acquisitions, separator='_', detectPosition=True):\n labels_dict = {}\n value = 0\n for key in acquisitions.keys():\n key = key.split('_')[0]\n key = key.split(separator)\n if key[0] == \"Normal\" or detectPosition:\n label = key[0]\n else:\n label = key[0][2:]\n if not label in labels_dict:\n labels_dict[label] = value\n value += 1\n return labels_dict", "title": "" }, { "docid": "f6b7519e610a19fd27d48885f4a0a8d7", "score": "0.6018663", "text": "def __label_mapper(label):\n mapper = {'Normal': 0, 'PVC': 1#, 'SPB': 2\n }\n \n y = mapper[label]\n \n return y", "title": "" }, { "docid": "59fa727603cef8874026c2214b348b10", "score": "0.6017599", "text": "def _label_task(input_dataframe: DataFrame) -> dict:\n # Stupid but it works\n # True must be turned into 'true'\n json_value = input_dataframe.label.value_counts().to_json()\n return json.loads(json_value)", "title": "" }, { "docid": "e78a5d42f110eec87903a1d0ffa72bcd", "score": "0.5934875", "text": "def __label_mapper(label):\n mapper = {'N': 0, 'PVC': 1 # , 'SPB': 2\n }\n\n y = mapper[label]\n\n return y", "title": "" }, { "docid": "6a15de1af46eecf1fe5467f8495ef664", "score": "0.59233665", "text": "def __label_mapper(label):\n\n mapper = {'Normal': 0, 'PVC': 1}#'SPB': 1, 'PVC': 2\n y = mapper[label]\n\n return y", "title": "" }, { "docid": "5a3a0969bac8fc2a141a6bd66605301c", "score": "0.5861572", "text": "def __label_mapper(label):\n mapper = {'Normal': 0, 'SPB': 1, 'PVC': 2}\n\n y = mapper[label]\n\n return y", "title": "" }, { "docid": "5a3a0969bac8fc2a141a6bd66605301c", "score": "0.5861572", "text": "def __label_mapper(label):\n mapper = {'Normal': 0, 'SPB': 1, 'PVC': 2}\n\n y = mapper[label]\n\n return y", "title": "" }, { "docid": "3a274a971464dcea39d6f7e4557fdfc6", "score": "0.5835935", "text": "def parse_labels(response):\n return {\n label[\"Name\"]: round(label[\"Confidence\"], 2) for label in response[\"Labels\"]\n }", "title": "" }, { "docid": "85cd0b50d4f7e56e86fb4174627622a4", "score": "0.57522", "text": "def createDictLabels(labels):\n\n # Re-arange the Target vectors between [0..nClasses_train]\n labels = labels.numpy()\n unique_labels = np.unique(labels)\n dictLabels = {val: i for i, val in enumerate(unique_labels)}\n dictLabelsInverse = {i: val for i, val in enumerate(unique_labels)}\n return dictLabels,dictLabelsInverse", "title": "" }, { "docid": "01f672874e892f63aa6ea3d583e73676", "score": "0.5692806", "text": "def pred2dict(self, data_sample: KIEDataSample) -> Dict:\n result = {}\n pred = data_sample.pred_instances\n result['scores'] = pred.scores.cpu().numpy().tolist()\n result['edge_scores'] = pred.edge_scores.cpu().numpy().tolist()\n result['edge_labels'] = pred.edge_labels.cpu().numpy().tolist()\n result['labels'] = pred.labels.cpu().numpy().tolist()\n return result", "title": "" }, { "docid": "5e8597e07a428f06a2edb6628c46ea55", "score": "0.56730324", "text": "def security_label_map(self):\n return {\n 'tlp:white': 'marking-definition--613f2e26-407d-48c7-9eca-b8e91df99dc9',\n 'tlp:green': 'marking-definition--34098fce-860f-48ae-8e50-ebd3cc5e41da',\n 'tlp:amber': 'marking-definition--f88d31f6-486f-44da-b317-01333bde0b82',\n 'tlp:red': 'marking-definition--5e57c739-391a-4eb3-b6be-7d15ca92d5ed',\n }", "title": "" }, { "docid": "4097ce2e488537020e67eeefb95a1c9f", "score": "0.5661316", "text": "def labels_(self):\n return {self.LABEL_LICIT: 0, \n self.LABEL_ILLICIT: 1}", "title": "" }, { "docid": "76f5ff631670f96d4c94921c3e71f283", "score": "0.5631069", "text": "def label_to_data_id(self):\n return {\"Altitude\": DataEntryIds.CALCULATED_ALTITUDE,\n \"MaxAltitude\": None,\n \"State\": DataEntryIds.STATE,\n \"Pressure\": DataEntryIds.PRESSURE,\n \"Acceleration\": [DataEntryIds.ACCELERATION_X, DataEntryIds.ACCELERATION_Y,\n DataEntryIds.ACCELERATION_Z]\n }", "title": "" }, { "docid": "5f52dd44d9e4a78fb4e3c24dab789e3f", "score": "0.56184167", "text": "def get_inference(self) -> dict:\n return {'output': self.label_pred.argmax(dim=1).cpu().numpy(), 'image_id': self.image_id,\n 'label_orig': self.label_original.cpu().numpy()}", "title": "" }, { "docid": "94a41357c324e035f8e83729d700bbaa", "score": "0.55440646", "text": "def _to_dict(self):\r\n _dict = {}\r\n if hasattr(self, 'label') and self.label is not None:\r\n _dict['label'] = self.label\r\n if hasattr(self, 'value') and self.value is not None:\r\n _dict['value'] = self.value._to_dict()\r\n return _dict", "title": "" }, { "docid": "9905b0d3ec8a3befe5a04470e86619a8", "score": "0.5542545", "text": "def map_labels(self, pgrm: List[Instr]) -> dict:\n label_addrs = {}\n for addr in range(len(pgrm)):\n ins = pgrm[addr]\n if ins.isLabel():\n # duplicate labels are not allowed\n if ins.label in label_addrs:\n raise DuplicateLabel(self, ins.label)\n label_addrs[ins.label] = addr + 1\n return label_addrs", "title": "" }, { "docid": "d2c8e34141f389ddf01a7c14f6bc89b3", "score": "0.5541204", "text": "def process_label(intents, w2v):\n class_dict = {}\n label_vec = []\n class_id = 0\n for line in intents:\n # check whether all the words in w2v dict\n if line == 'Troubleshooting_Technical_Issues':\n line = 'Trouble_shooting_Technical_Issues'\n if '_' in line:\n label = [w.lower().strip() for w in line.split('_')]\n else:\n label = [w.lower().strip() for w in line.split(' ')]\n for w in label:\n if not w2v.vocab.has_key(w.lower()):\n print(\"not in w2v dict\", w)\n # compute label vec\n label_sum = np.sum([w2v[w] for w in label], axis = 0)\n label_vec.append(label_sum)\n # store class names => index\n class_dict[' '.join(label)] = class_id\n class_id = class_id + 1\n return class_dict, np.asarray(label_vec)", "title": "" }, { "docid": "c1acb9afb5c2f34b77cc683d1452008c", "score": "0.55404854", "text": "def get_dict(self):\n return dict(label=self.__label, color=self.__color)", "title": "" }, { "docid": "25168796eef5e37b31fe841a8f68e388", "score": "0.55311346", "text": "def as_dict(self, use_count: int) -> Dict[Text, Any]:\n\n return {\"value\": self.name, \"id\": self.id, \"nlu_examples_count\": use_count}", "title": "" }, { "docid": "b42b52f557f135ded8d1b421c50a6594", "score": "0.55262387", "text": "def get_label(example):\n return example['label'].numpy()", "title": "" }, { "docid": "d91f8ce2c9ac71f3c7b8b9fec37150b0", "score": "0.5513511", "text": "def to_dict():", "title": "" }, { "docid": "9a02ec62382cafc3e89bdfb4b805a48d", "score": "0.54940736", "text": "def convert_to_dict(self) -> dict:", "title": "" }, { "docid": "da846cc22ec06c8d636d33a3ad7729c1", "score": "0.5470197", "text": "def get_k8s_labels_for_job(job: Job) -> Dict[str, str]:\n return {\n \"job_id\": job.id,\n \"job_type\": job.target_type,\n }", "title": "" }, { "docid": "bd642233c29bb7ac5a501f947aa94740", "score": "0.546146", "text": "def label(self):\n return self.sess_element.get('label')", "title": "" }, { "docid": "5358922f256e49e14047fa9b7f03fd7d", "score": "0.54249454", "text": "def parse_lexicon_entry(lexicon_entry: str) -> dict:\n functional_labels = {}\n lexicon_entry = lexicon_entry.replace(\"[\", \"\").replace(\"]\", \"\").replace(\";\", \"\")\n labels = lexicon_entry.split()\n for label in labels:\n parts = label.split('=')\n if len(parts) == 2:\n functional_labels[parts[0].strip()] = parts[1].strip()\n return functional_labels", "title": "" }, { "docid": "2b56c3f021e318b3d2bd4911f0c259b0", "score": "0.54211986", "text": "def dict_from_pbtxt_file(fname):\n lines = [line.rstrip('\\n').strip() for line in open(fname)]\n label_map = {}\n curr_label = ''\n curr_id = 0\n\n for l in lines:\n \n if l.startswith( 'display_name: '):\n curr_label = l.split(' ')[1]\n\n if l.startswith( 'id: '):\n curr_id = int(l.split(' ')[1])\n\n if l.startswith( '}'):\n # print(curr_id, curr_label)\n label_map[curr_id] = curr_label.replace(\"\\\"\", \"\")\n\n return label_map", "title": "" }, { "docid": "c591496302f43289dbcfe9f64290c4dc", "score": "0.54123926", "text": "def _get_labels(self, truth_dict: Dict[str, Any]) -> Dict[str, Any]:\n if \"pid\" in truth_dict.keys():\n abs_pid = abs(truth_dict[\"pid\"])\n sim_type = truth_dict[\"sim_type\"]\n\n labels_dict = {\n self._index_column: truth_dict[self._index_column],\n \"muon\": int(abs_pid == 13),\n \"muon_stopped\": int(truth_dict.get(\"stopped_muon\") == 1),\n \"noise\": int((abs_pid == 1) & (sim_type != \"data\")),\n \"neutrino\": int(\n (abs_pid != 13) & (abs_pid != 1)\n ), # @TODO: `abs_pid in [12,14,16]`?\n \"v_e\": int(abs_pid == 12),\n \"v_u\": int(abs_pid == 14),\n \"v_t\": int(abs_pid == 16),\n \"track\": int(\n (abs_pid == 14) & (truth_dict[\"interaction_type\"] == 1)\n ),\n \"dbang\": self._get_dbang_label(truth_dict),\n \"corsika\": int(abs_pid > 20),\n }\n else:\n labels_dict = {\n self._index_column: truth_dict[self._index_column],\n \"muon\": -1,\n \"muon_stopped\": -1,\n \"noise\": -1,\n \"neutrino\": -1,\n \"v_e\": -1,\n \"v_u\": -1,\n \"v_t\": -1,\n \"track\": -1,\n \"dbang\": -1,\n \"corsika\": -1,\n }\n return labels_dict", "title": "" }, { "docid": "128ab4da939efdf9dd0148cafa3655af", "score": "0.5407699", "text": "def set_labels(self,label:dict):\n self.label_dict = label\n print(\"[INFO] Label dictionary : \",label)", "title": "" }, { "docid": "539fab3c7267134f9aaf80dd4114975a", "score": "0.54033065", "text": "def create_ans2label(occurence, name, cache_root):\n label, label2ans, ans2label = 0, [], {}\n for answer in occurence:\n label2ans.append(answer)\n ans2label[answer] = label\n label += 1\n\n utils.create_dir(cache_root)\n\n cache_file = os.path.join(cache_root, name+'_ans2label.json')\n json.dump(ans2label, open(cache_file, 'w'))\n cache_file = os.path.join(cache_root, name+'_label2ans.json')\n json.dump(label2ans, open(cache_file, 'w'))\n return ans2label", "title": "" }, { "docid": "3eb6f8f2e01bea7c93bc65ef696e2e90", "score": "0.5385623", "text": "def ark2dict(arkfile):", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "c66257bd0326b0b14c54f97588f3cdf7", "score": "0.5378722", "text": "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "ed628b0e2b48af84e68ad32360623284", "score": "0.5369858", "text": "def get_label_yml(label):\n text = f' - name: \"{label[\"name\"]}\"\\n'\n text += f' color: \"{label[\"color\"]}\"\\n'\n text += f' description: \"{label[\"description\"]}\"\\n'\n return text", "title": "" }, { "docid": "17632f7e3ac20d81e99fa2040dab8ec7", "score": "0.5362394", "text": "def predict(self, exemple, label):", "title": "" }, { "docid": "17632f7e3ac20d81e99fa2040dab8ec7", "score": "0.5362394", "text": "def predict(self, exemple, label):", "title": "" }, { "docid": "682cd2798e228da6619b8bebc9773bac", "score": "0.536196", "text": "def get_labels(*args):\n return {name: value for name, value in LABELS.items() if name in args}", "title": "" }, { "docid": "54f67b8e2e2fab8e2bb8627eaa87a10a", "score": "0.53590834", "text": "def aspen_version_label_dict(self):\n # 先以 version name 找到對應的 version label\n l_list = []\n for version_name in self.__version_list:\n h_key = r.HKEY_CLASSES_ROOT(fr\"{version_name}\\shell\")\n for item in h_key.subkeys():\n if re.match(\"Open with Aspen Plus V\\d+.\", item.name): l_list.append(item.name)\n\n # 然後再將 name 與 label 變成字典\n self.__version_label_dict = dict(zip(self.__version_list, l_list))", "title": "" }, { "docid": "027bcc9fb054a27e1ac17e2efcdc9b6e", "score": "0.53537124", "text": "def labels(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "027bcc9fb054a27e1ac17e2efcdc9b6e", "score": "0.53537124", "text": "def labels(self) -> Mapping[str, Any]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "ded56ba607f6d753abe84f7f517123c7", "score": "0.5336088", "text": "def get_labels(G: nx.classes.graph.Graph,\n ml_target: str) -> Dict:\n\n labels = {}\n for n in G.nodes(data=True):\n labels[n[1]['id']] = n[1][ml_target]\n return labels", "title": "" }, { "docid": "e352369c936c8f498d275251444e6afd", "score": "0.5334917", "text": "def my_fn(x):\n input_label = tf.strings.join([input_feature, ':'], separator='')\n inputs = tf.strings.join(\n [prefix, input_label, x[input_feature]], separator=' ')\n\n class_label = tf.gather(output_classes, x['label'])\n if drop_explanations:\n targets = class_label\n else:\n targets = _explanation_targets(class_label, x['evidences'])\n\n return {'inputs': inputs, 'targets': targets}", "title": "" }, { "docid": "1f0ced40754aa6036772658610bc2bf9", "score": "0.5326014", "text": "def labelToDescription(label) :\n descriptions = [\"top\", \"trouser\", \"pullover\",\n \"dress\", \"coat\", \"sandal\",\n \"shirt\", \"sneaker\", \"bag\",\n \"ankle boot\"]\n return descriptions[label]", "title": "" }, { "docid": "40cd4eca35432c103fcc9faebc180417", "score": "0.5324759", "text": "def as_dict(self) -> Dict[str, Any]:", "title": "" }, { "docid": "7eeb479fb6f86530b46754b1b755a6e8", "score": "0.53247446", "text": "def load_labels(\n location: PathLike = DEV_DATA,\n expr_type: ExperimentType = ExperimentType.ils,\n) -> Dict:\n location = pathlib.Path(location) / f\"task-{expr_type.value}\"\n # specify `latin-1` encoding due to an encoding error\n reader = CSVReader(location, DataType.labels, encoding=\"latin-1\")\n data = reader.read_all()\n # an encoding error results in a bad column\n del data[\"Unnamed: 0\"]\n # the conventions for Subject, Date, and Run do not match folder names\n # so we update them here to match the other data\n pilot = data.Subject.apply(lambda x: f\"sub-cp{int(x):03d}\")\n session = data.Date.apply(lambda x: f\"ses-{int(x)}\")\n run = data.Run.apply(lambda x: f\"run-{int(x):03d}\")\n del data[\"Subject\"]\n del data[\"Date\"]\n del data[\"Run\"]\n data[\"pilot\"] = pilot\n data[\"session\"] = session\n data[\"run\"] = run\n data = data.astype(\n {\n \"pilot\": \"category\",\n \"session\": \"category\",\n \"run\": \"category\",\n }\n )\n return {\"labels\": data}", "title": "" }, { "docid": "e3d79df5fea94e0393e2bcedf3986bb7", "score": "0.53228766", "text": "def getMetadataLabels(self):\n return { \"title\": self.label }", "title": "" }, { "docid": "600b0fc605315140ae662443197931a9", "score": "0.5321642", "text": "def to_dict(self) -> t.Dict[str, t.Any]:\n return {k: getattr(self, k) for k in self._get_annotations()}", "title": "" }, { "docid": "934aea56fc6b067c8c106134a50cdd38", "score": "0.5319331", "text": "def make_metadata(self, margin, best_label):\n return {\n 'confidence': float(f'{margin: 1.2f}'),\n 'job-name': self.job_name,\n 'class-name': self.label_names[self.get_label_index(best_label)],\n 'human-annotated': 'no',\n 'creation-date': datetime.utcnow().strftime('%Y-%m-%dT%H:%m:%S.%f'),\n 'type': JOB_TYPE\n }", "title": "" }, { "docid": "b5f23662056a15ac30ce32db31294c94", "score": "0.53111476", "text": "def labels(observation):\n raise NotImplementedError", "title": "" }, { "docid": "9472a095408517cb9521788c519c2d04", "score": "0.5311099", "text": "def asdict(self) -> Dict[str, Any]:", "title": "" }, { "docid": "0559ca65b68dc65a0ca91e319d6b497e", "score": "0.5298789", "text": "def individual_to_params(individual):\n return dict(zip(GENE_LABELS, individual))", "title": "" }, { "docid": "bfc19f087b25f791a49a04260b29e73f", "score": "0.52983963", "text": "def __init__(self, label):\n self.assessor_label = label\n self.is_session_assessor = False\n self.is_scan_assessor = False\n if len(re.findall('-x-', label)) == 3:\n labels = label.split('-x-')\n self.project_id = labels[0]\n self.subject_label = labels[1]\n self.session_label = labels[2]\n self.proctype = labels[3]\n self.scan_id = None\n self.is_session_assessor = True\n elif len(re.findall('-x-', label)) == 4:\n labels = label.split('-x-')\n self.project_id = labels[0]\n self.subject_label = labels[1]\n self.session_label = labels[2]\n self.scan_id = labels[3]\n self.proctype = labels[4]\n self.is_scan_assessor = True\n else:\n self.assessor_label = None", "title": "" }, { "docid": "a220b6a1e2e7c48fe9e2cb49e1260583", "score": "0.52759767", "text": "def get_label_text(label_number):\n mapping = {}\n with open(CLASS_MAPPING_FILE, 'r') as fp:\n for _, line in enumerate(fp):\n terms = line.split('\\t')\n # mapping", "title": "" }, { "docid": "1b0d9280e9290e6a138216bd7dfa277c", "score": "0.52754885", "text": "def assignVar2label(node2var):\n node2label = {}\n for node in node2var.keys():\n label = node2var[node].replace(\"x{0}?\".format(node),\"\")\n node2label[node] = label\n return node2label", "title": "" }, { "docid": "5ed5eff77bad17151c399a09e9fa59fc", "score": "0.52524656", "text": "def getLabelInfo(self, label) -> retval:\n ...", "title": "" }, { "docid": "052f415ba3539cbf324771c2a51e3ade", "score": "0.5244683", "text": "def predict_label(text: str):\n # Step 01: Predict the label\n prediction_result = Classifier().get_instance().predict(text)\n\n # Step 02: Parse the prediction result.\n predicted_label = str(prediction_result[0][0])\n predicted_label = predicted_label.replace(\"__label__\", \"\").strip()\n confidence = round(100 * prediction_result[1][0], 2)\n\n # Step 03: Return the result.\n return predicted_label, confidence", "title": "" }, { "docid": "bccd7e38987d2f844bcc6165b7134aa2", "score": "0.52344626", "text": "def _impact_labels():\n # Prevent reset upon auto reload in jupyter notebook\n if not '_impact_labels' in builtins.__dict__:\n builtins._impact_labels = dict()\n\n return builtins._impact_labels", "title": "" }, { "docid": "4ccfedb796d6d1e66eefa1e4f82c2bf3", "score": "0.5218931", "text": "def get_label(self):", "title": "" }, { "docid": "77f94c9e9cde815255bc44bbd40625af", "score": "0.5216753", "text": "def _collect_label(self, file_id, content):\n label_dressed = dict()\n label_dressed[file_id] = {cls:[] for cls in self.classes[1:]}\n for line in content:\n cls = line['field_name']\n if cls in self.classes:\n #identity = line.get('identity', 0) \n label_dressed[file_id][cls].append( {'key_id':[], 'value_id':[], 'key_text':'', 'value_text':''} )\n label_dressed[file_id][cls][-1]['key_id'] = line.get('key_id', [])\n label_dressed[file_id][cls][-1]['value_id'] = line['value_id'] # value_id\n label_dressed[file_id][cls][-1]['key_text'] = line.get('key_text', []) \n label_dressed[file_id][cls][-1]['value_text'] = line['value_text'] # value_text\n \n # handle corrupted data\n for cls in label_dressed[file_id]: \n for idx, label in enumerate(label_dressed[file_id][cls]):\n if len(label) == 0: # no relevant class in sample @file_id\n continue\n if (len(label['key_text'])>0 and len(label['key_id'])==0) or \\\n (len(label['value_text'])>0 and len(label['value_id'])==0):\n return None\n \n return label_dressed", "title": "" }, { "docid": "f65bbd161c3afec09e3b8f08ca88e803", "score": "0.51999146", "text": "def read_img_labels(img_label_path):\n with open(img_label_path,\"r\") as img_label_file:\n data_lines = img_label_file.readlines()\n x1,y1,x2,y2 = data_lines[1].split(\"\\t\")\n img_label_file.close()\n return {\"x1\":int(x1),\"y1\":int(y1),\"x2\":int(x2),\"y2\":int(y2)}", "title": "" }, { "docid": "4087ce8dd5025c28aaf1b2839e13fbc7", "score": "0.5198736", "text": "def info(self):\n sess_info = {}\n\n sess_info['ID'] = self.get('ID')\n sess_info['label'] = self.get('label')\n sess_info['note'] = self.get('xnat:note')\n sess_info['session_type'] = self.get('session_type')\n sess_info['project_id'] = self.project\n sess_info['original'] = self.get('original')\n sess_info['modality'] = self.get('modality')\n sess_info['UID'] = self.get('UID')\n sess_info['subject_id'] = self.get('xnat:subject_ID')\n sess_info['subject_label'] = self.subject\n sess_info['project_label'] = sess_info['project_id']\n sess_info['project'] = sess_info['project_id']\n sess_info['subject_ID'] = self.get('xnat:subject_ID')\n sess_info['URI'] = '/data/experiments/%s' % sess_info['ID']\n sess_info['session_label'] = sess_info['label']\n sess_info['last_updated'] = sess_info['original']\n sess_info['type'] = sess_info['modality']\n\n return sess_info", "title": "" }, { "docid": "a37a930b620268ef093cfc437aed7126", "score": "0.519552", "text": "def create_edge_labels(game_board):\n edge_labels = {}\n for edge in game_board:\n edge_labels[(edge[0][1],edge[1][1])] = edge[2]\n return edge_labels", "title": "" }, { "docid": "f814b51d76d48c8bb18edcfb568456a4", "score": "0.51852524", "text": "def prepare_labels(labels):\n d = {}\n count = 0\n setlabels = set(labels)\n for w in setlabels:\n d[w] = count\n count += 1\n idxlabels = np.array([d[w] for w in labels])\n return idxlabels", "title": "" }, { "docid": "e3517bee022723f6ccec853140d1105a", "score": "0.51809204", "text": "def load_labels():", "title": "" }, { "docid": "fbf76a8614dd0971da88b77905e2fc4e", "score": "0.5176966", "text": "def parse_possible_labels(word_and_labels):\n possible_labels = {}\n for i, word_label in enumerate(word_and_labels):\n word, label = word_label\n if not word in possible_labels:\n possible_labels[word] = set([label])\n else:\n possible_labels[word].add(label)\n return possible_labels", "title": "" }, { "docid": "a1602671a0b225c0112a10ad9d168985", "score": "0.516992", "text": "def get_event_label_dict(self, pattern=None):\n return self._parser_obj.get_event_label_dict(pattern=pattern)", "title": "" }, { "docid": "c106850bdecb0e3ed2f0672749e8c39a", "score": "0.51476675", "text": "def getAnnotationDict_old(self):\n html = unicode(self.toHtml())\n parser = etree.HTMLParser()\n tree = etree.fromstring(html, parser)\n aElements = tree.findall(\".//a[@name]\")\n ret = {}\n for aElement in aElements:\n idAnnotation = aElement.attrib[\"name\"]\n annotation = \"\"\n if idAnnotation == \"title\":\n for t in aElement.getparent().itertext():\n annotation = annotation + t\n else:\n for t in aElement.getparent().getparent().getnext().itertext():\n annotation = annotation + t\n annotation = annotation.strip()\n if annotation == self.strEmptyCharacter:\n annotation = \"\"\n ret[idAnnotation] = annotation\n #print \"%s: %s\" % (idAnnotation, annotation)\n return ret", "title": "" }, { "docid": "35c13318b1839e66f856baf62bdbc2a8", "score": "0.5146432", "text": "def _predict(self, X):\n y_pred = self._automl.predict(X)\n y_pred = self.dataset.inverse_transform_labels(y_pred)\n return {self.Keys.PREDICTED: y_pred}", "title": "" }, { "docid": "03d85f314da1c9918ef18cb4a44f6f2d", "score": "0.5137802", "text": "def _inputs_to_dict(**kwords):\n return kwords", "title": "" }, { "docid": "99e225bb141c6c4534b5ede3fdc1325f", "score": "0.5136677", "text": "def convert_label_to_idx(self, label):\n label = label.strip()\n return class2label[label]", "title": "" }, { "docid": "b0c1dc74b5197cd2a93f1f6ff98a39d3", "score": "0.5126444", "text": "def get_label_mapping(label_file):\n with open(label_file, 'r') as f:\n id2label = f.readlines()\n id2label = [l.strip() for l in id2label]\n label2id = {}\n count = 0\n for label in id2label:\n label2id[label] = count\n count += 1\n return id2label, label2id", "title": "" }, { "docid": "dc824fa2eaf11763eb5ac1c3e652f035", "score": "0.51264375", "text": "def state_dict(self) -> Dict:", "title": "" }, { "docid": "3d521c56ad86ea4755e8f57bb22249b2", "score": "0.5117194", "text": "def pipeline_dict(request) -> dict:\n pipeline_dict = {\n \"name\": \"transformers_tokenizer_test\",\n \"features\": {\n \"transformers\": {\"model_name\": \"sshleifer/tiny-distilroberta-base\"}\n },\n \"head\": {\n \"type\": \"TextClassification\",\n \"labels\": [\"a\", \"b\"],\n },\n }\n return pipeline_dict", "title": "" }, { "docid": "273cdf8275aee5da47809feb999008a6", "score": "0.5114229", "text": "def assignLabel2Var(node2label):\n node2var = {}\n for node in node2label.keys():\n var = \"x{0}?{1}\".format(node,node2label[node])\n node2var[node] = var\n return node2var", "title": "" }, { "docid": "fd120c04e7b42408e98947a8f1313525", "score": "0.5107142", "text": "def pred2dict(self, data_sample: InstanceData) -> Dict:\n result = {}\n if 'pred_instances_3d' in data_sample:\n pred_instances_3d = data_sample.pred_instances_3d.numpy()\n result = {\n 'bboxes_3d': pred_instances_3d.bboxes_3d.tensor.cpu().tolist(),\n 'labels_3d': pred_instances_3d.labels_3d.tolist(),\n 'scores_3d': pred_instances_3d.scores_3d.tolist()\n }\n\n if 'pred_pts_seg' in data_sample:\n pred_pts_seg = data_sample.pred_pts_seg.numpy()\n result['pts_semantic_mask'] = \\\n pred_pts_seg.pts_semantic_mask.tolist()\n\n return result", "title": "" }, { "docid": "bf2513229a6782c77f4434092e65b031", "score": "0.5099719", "text": "def prepare_labels(self, dots):\n # The expected decoding is just the input.\n labels = dots.copy()\n labels[\"decode\"] = self._small_eye\n # We don't really care about the encoded representation, so we can just set\n # the labels to what the output already is.\n labels[\"encode\"] = self.__encoded\n return labels", "title": "" }, { "docid": "dde4846796372364d21a33bce5154237", "score": "0.50925577", "text": "def _convert_labels_to_label_mapping(labels, requires_zero_mapping):\n if isinstance(labels, dict):\n return labels\n\n # if list\n start_index = 0 if requires_zero_mapping else 1\n return dict(zip(labels, list(\n range(start_index, start_index + len(labels)))))", "title": "" }, { "docid": "06ff4d84e6e133ea25962d8bbbd9e4f7", "score": "0.50896394", "text": "def __getitem__(self, index):\n\n\t\treturn {'domain_a': super(MNISTG4LANDDataset, self).__getitem__(self.domain_a[index]),\\\n\t\t\t\t 'domain_b': super(MNISTG4LANDDataset, self).__getitem__(self.domain_b[index]), \\\n\t\t\t\t 'y_label': self.to_one_of_k(int(self.y_label[index]), 2)}", "title": "" }, { "docid": "0200eed5a69c4be12aabfeacd1717b34", "score": "0.50820607", "text": "def asDict(self):\n return {\n \"predominant_emotion\": self.predominateEmotion.name.lower(),\n \"estimations\": {\n \"anger\": self.anger,\n \"disgust\": self.disgust,\n \"fear\": self.fear,\n \"happiness\": self.happiness,\n \"sadness\": self.sadness,\n \"surprise\": self.surprise,\n \"neutral\": self.neutral,\n },\n }", "title": "" }, { "docid": "c964b69b4ddebf65a91b4aad2ebff6b6", "score": "0.50816333", "text": "def get_labels(self, encoding='shift-jis'):\n label_dict = dict()\n label_dict[0] = 'NULL'\n data_length = len(self._data)\n label_base_offset = data_length + len(self._p1_list) * 4 + \\\n len(self._p2_list) * 8\n for pointer1 in self._p1_list:\n ptr = unpack('<I', self._data[pointer1:pointer1 + 0x4])[0]\n if ptr < data_length:\n continue\n offset = ptr - label_base_offset\n length = 0\n while self._labels[offset + length] != '\\0':\n length += 1\n if encoding == 'shift-jis':\n label_dict[ptr] = self._labels[offset:offset + length]\n else:\n label_dict[ptr] = self._labels[offset:offset + length]\\\n .decode('shift-jis').encode(encoding)\n return label_dict", "title": "" }, { "docid": "2d72f8114f2d918cfe22f97aaa30e6ef", "score": "0.5076148", "text": "def get_withLabel(self):\r\n \r\n return self.obj_dict['withLabel']", "title": "" }, { "docid": "01411627b12e7d7719c93e3676f31c15", "score": "0.50739354", "text": "def encode_label(self, label):\n if label not in self.labels_dict:\n self.labels_dict[label] = self.labels[0].tolist()\n self.labels = self.labels[1:]\n return self.labels_dict[label]", "title": "" }, { "docid": "ef6eba6101b0ac3a47779d426833d933", "score": "0.5068386", "text": "def read_labels(file_path):\n with open(file_path, \"r\", encoding=\"utf-8\") as label_file:\n lines = label_file.readlines()\n labels = {}\n for line in lines:\n pair = line.strip().split(maxsplit=1)\n labels[int(pair[0])] = pair[1].strip()\n return labels", "title": "" }, { "docid": "8d1c6b6ed925378a4b3a95ce633f4fb7", "score": "0.5066627", "text": "def to_dict(self):\n return {\n \"acc\": \"None\" if self.acc is None else \"%.2f%%\" % self.acc,\n \"beatmap\": map_str(self.beatmap) if self.beatmap else \"None\",\n \"mode\": \"Unknown\"\n if self.mode is None\n else consts.mode2str[self.mode], # noqa\n \"mods\": combine_mods(self.mods),\n \"player\": self.player.username if self.player else \"None\",\n \"guest mapper\": self.guest_mapper.username\n if self.guest_mapper\n else \"None\", # noqa\n }", "title": "" }, { "docid": "ff22da51da5c377222eef8857f62027f", "score": "0.5066222", "text": "def convert_to_dict(data):\n pass #TODO implement", "title": "" }, { "docid": "9506ff1bd08b0c355c97b2ac92baa8be", "score": "0.5057204", "text": "def copa_preformat_fn(ex: Dict[str, Any]) -> Dict[str, Any]:\n premise, question = ex['premise'].decode(), ex['question'].decode()\n input_prompt = f'{premise} What was the {question} of this?'\n choices = {\n ex['choice1'].decode(): -ex['label'] + 1,\n ex['choice2'].decode(): ex['label']\n }\n return {'input': input_prompt, 'target_scores': choices}", "title": "" }, { "docid": "91a70b02fe14b7c7385276179d9a27dd", "score": "0.5053743", "text": "def state(self) -> Dict[str, Any]:", "title": "" }, { "docid": "28eda5082edf2ef8912608cde15eb812", "score": "0.5053538", "text": "def labels_str_to_rule_format(labels_string: str, gc_api: RESTManagementAPI) -> Dict[str, List[Dict[str, List[str]]]]:\r\n structured_labels = {\"or_labels\": list()}\r\n # Normalize spaces\r\n labels_string = labels_string.replace(\", \", \",\").replace(\r\n \" ,\", \",\").replace(\"& \", \"&\").replace(\" &\", \"&\")\r\n labels_string = labels_string.replace(\": \", \":\").replace(\" :\", \":\").strip()\r\n\r\n for or_label in labels_string.split(','):\r\n and_labels = {\"and_labels\": list()}\r\n for and_label in or_label.split('&'):\r\n key, value = and_label.split(':')\r\n and_labels[\"and_labels\"].append(get_label_id(key, value, gc_api))\r\n structured_labels[\"or_labels\"].append(and_labels)\r\n return structured_labels", "title": "" }, { "docid": "2a277cd529f0f474490c6356f7b55901", "score": "0.5050749", "text": "def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "2a277cd529f0f474490c6356f7b55901", "score": "0.5050749", "text": "def labels(self) -> pulumi.Output[Mapping[str, str]]:\n return pulumi.get(self, \"labels\")", "title": "" }, { "docid": "b45af93715635948522e972981dd6120", "score": "0.5049098", "text": "def state_to_dict(self):\n self.subsetDict = {}\n for butTyp, buttons in self.toggleStates.items():\n l = []\n for butName, state in buttons.items():\n if state is True:\n label = self.builder.get_object(butName).get_label()\n l.append(label)\n self.subsetDict[butTyp] = l\n\n self.subsetDict['MeanFit'] = \\\n self.builder.get_object('meanFitSpinButton').get_value()", "title": "" }, { "docid": "095634c58a7f68b63677c96733401452", "score": "0.5043671", "text": "def to_dict(self, target):\n\n\t\tret_dic = {}\n\n\t\tret_dic[\"name\"] = target.get_name()\n\n\t\treturn ret_dic", "title": "" }, { "docid": "1eb70d06191a636f50c62fbed2d84c5e", "score": "0.5039761", "text": "def label_to_name(self, label):\n return self.labels[label]", "title": "" } ]
7ce80f9f8797379e4a6aa6862d2a224d
Update the cell with new input x_t = the new input
[ { "docid": "181e455170603e85e323fcba7a64b0ba", "score": "0.78416604", "text": "def update(self, x_t):\n # the output of the previous update\n h_t_0 = self.h_t\n \n # the previous cell content \n C_t_0 = self.C_t\n\n # step 1: Decide what information to forget\n f_t = forget_gate(h_t_0, x_t, self.W_f, self.U_f, self.b_f)\n\n # step 2: Decide what information to put in the cell\n # 2.1 input gate decides which values to update\n # 2.2 tanh layer creates vector of new candidate values\n i_t, Ctilde_t = input_gate(h_t_0, x_t, self.W_i, self.U_i, self.b_i, self.W_c, self.U_c, self.b_c)\n\n # step 3: Update old cell staet Ct-1 into new cell state Ct\n # 3.1 multiply old state by ft to forget\n # 3.2 add it * C_t, new scaled candidates\n C_t = update_cell_state(f_t, C_t_0, i_t, Ctilde_t)\n\n # step 4: Decide what the output is\n # 4.1 Run a signmoid layer to decide what part to output\n # 4.2 Put a cell through tanh -> transform to range [-1, 1]\n h_t = output_gate(h_t_0, x_t, self.W_o, self.U_o, self.b_o, C_t)\n\n # Update cell values, output and the time\n self.h_t = h_t\n self.C_t = C_t\n self.t += 1", "title": "" } ]
[ { "docid": "9b44555f42e0d42190d1746033231b46", "score": "0.70777196", "text": "def Updatev(Cell,dt):", "title": "" }, { "docid": "c6a57999dc4ecee8ddd36a67f5fc584f", "score": "0.680216", "text": "def recurrent_cell(self, st, t):\n\n\t\tz = self.z[t-par['latency'],..., cp.newaxis]\n\t\tx = self.input_data[t,:,:,cp.newaxis]\n\n\t\t# Update the input traces based on presynaptic spikes\n\t\tcurr_beta = self.con_dict[par['spike_model']]['beta']\n\t\tst['ia'] = curr_beta * st['ia'] + (1-curr_beta) * self.eff_var['W_in'] * x\n\t\tst['ir'] = curr_beta * st['ir'] + (1-curr_beta) * self.eff_var['W_rnn'] * st['sx'] * st['su'] * z\n\n\t\tst['ja'] = curr_beta * st['ja'] + (1-curr_beta) * x\n\t\tst['jr'] = curr_beta * st['jr'] + (1-curr_beta) * st['sx'] * st['su'] * z\n\n\t\t#print( 'I', cp.mean(st['ia']), cp.mean(st['ir']))\n\n\t\t# Update the synaptic plasticity state (recurrent only; input is static)\n\t\tst['sx'], st['su'] = \\\n\t\t\tsynaptic_plasticity(st['sx'], st['su'], z, self.con_dict, par['use_stp'])\n\n\t\t# Sum the input currents into shape [batch x postsynaptic]\n\t\tI = cp.sum(st['ia'], axis=1, keepdims=True) + cp.sum(st['ir'], axis=1, keepdims=True)\n\n\t\t# Update the AdEx cell state with the input current\n\t\tst['v'], st['w'], self.z[t,...] = run_spike_model(st['v'], st['w'], I, par['spike_model'], self.con_dict[par['spike_model']])\n\n\t\t# Update output trace based on postsynaptic cell state (Eq. 12)\n\t\tself.y[t,...] = self.con_dict[par['spike_model']]['kappa'] * self.y[t-1,...] + self.z[t,...] @ self.eff_var['W_out'] + self.eff_var['b_out']\n\n\t\t# Calculate h, the pseudo-derivative (Eq. 5, ~24, 20/21)\n\t\t# Bellec et al., 2018b\n\t\tif par['spike_model'] == 'adex':\n\t\t\tT = self.con_dict['adex']['V_T'] + par['betagrad']\n\t\telif par['spike_model'] == 'izhi':\n\t\t\tT = self.con_dict['izhi']['c'] + par['betagrad']\n\t\telse:\n\t\t\traise Exception('Unimplemented pseudo-derivative.')\n\n\t\tself.h[t,...] = cp.squeeze(par['gamma_psd'] * cp.maximum(0., \\\n\t\t\t1 - cp.abs(st['v'] - T)/par['pseudo_th']))\n\n\n\t\t#h = par['gamma_psd'] * cp.maximum(0., 1 - cp.abs((st['v'] + 40e-3)/par['pseudo_th']))\n\t\t#h = par['gamma_psd'] * cp.ones_like(h)\n\t\treturn st, I", "title": "" }, { "docid": "dc0f3904f07f7bcde140e1aaaf940c80", "score": "0.6566709", "text": "def updateX(self):\n pass", "title": "" }, { "docid": "c1e2463db041448fceeabccb2a4a6b88", "score": "0.646216", "text": "def update_state(s_t, x_t1_col):\n x_t1 = resize(rgb2gray(x_t1_col), (84, 84))\n x_t1 = np.reshape(x_t1, (84, 84, 1))\n s_t1 = np.append(x_t1, s_t[:,:,:3], axis = 2)\n return s_t1", "title": "" }, { "docid": "78134ac058a0b38a33fce84ad5e1877e", "score": "0.64159375", "text": "def update(self, t=1):\n self.pos = np.add(self.pos, t * self.v + self.acc * (1/2 * t ** 2))\n self.v = np.add(self.v, t * self.acc)", "title": "" }, { "docid": "a3f40f73b1c51c8cff1c780e09077448", "score": "0.64041686", "text": "def update_x(self):\n self.x += self.v*self.dt\n return self.x", "title": "" }, { "docid": "9046340e02eafb4ee9a95513082431df", "score": "0.63605344", "text": "def update(self, x: numbers.Number):\n raise NotImplementedError", "title": "" }, { "docid": "aaaf263f71ab36ff80241bcb1aae0332", "score": "0.6114706", "text": "def update(self, x=None, y=None):\n if x:\n self.x = x\n elif y:\n self.y = y\n self.t += 1", "title": "" }, { "docid": "53229c5c445f360c702bd26bf38780a2", "score": "0.5980773", "text": "def update_x(x_current_l, v_current_l, a_current_l, dt_l):\n\n x_new = x_current_l + v_current_l * dt_l + 0.5 * a_current_l * (dt_l ** 2)\n\n return x_new", "title": "" }, { "docid": "358eb0c7aac3a64c2245a51548dee07b", "score": "0.5942913", "text": "def update(self, t, dt):\n\n festim.update_expressions(self.expressions, t)\n\n converged = False\n u_ = Function(self.u.function_space())\n u_.assign(self.u)\n while converged is False:\n self.u.assign(u_)\n nb_it, converged = self.solve_once()\n if dt.adaptive_stepsize is not None:\n dt.adapt(t, nb_it, converged)\n\n # Update previous solutions\n self.update_previous_solutions()\n\n # Solve extrinsic traps formulation\n self.traps.solve_extrinsic_traps()", "title": "" }, { "docid": "1043adf7a7ebbc786aaddcd414d062dc", "score": "0.59254026", "text": "def update(self, value, t):\n self._freshen(t)\n self.weight += value", "title": "" }, { "docid": "c63c597d065ebc59a11a45c8ccd2a338", "score": "0.59201956", "text": "def propogate_state(x_t_prev, u_t, d_t):\n \"\"\"STUDENT CODE START\"\"\"\n #x_bar_t = np.empty()\n \"\"\"STUDENT CODE END\"\"\"\n\n return #x_bar_t", "title": "" }, { "docid": "2be5a9de279fd42c61aef124a0fa10d7", "score": "0.58918077", "text": "def update(self, input_val, t_step):\n \n self.ode.set_f_params(input_val)\n self.ode.integrate(self.ode.t + t_step)\n return self.ode.successful()", "title": "" }, { "docid": "21f29b7ea1ef341c8abb8e33dab2025a", "score": "0.58527356", "text": "def update(self, x, y):", "title": "" }, { "docid": "21f29b7ea1ef341c8abb8e33dab2025a", "score": "0.58527356", "text": "def update(self, x, y):", "title": "" }, { "docid": "21f29b7ea1ef341c8abb8e33dab2025a", "score": "0.58527356", "text": "def update(self, x, y):", "title": "" }, { "docid": "8c2b004e391c01b02d326434c0cc5f7d", "score": "0.58463764", "text": "def UpdateU(Cell,dt):\n\tfor i in Cell:\n\t\tfac = dt/Cell[i][\"V\"]\n\t\t\n\t\tCell[i][\"Unew\"] = Cell[i][\"U\"]-fac*(\n\t\tCell[i][\"Side2\"][4]*Cell[i][\"Normalvector2\"][0]+\n\t\tCell[i][\"Side4\"][4]*Cell[i][\"Normalvector4\"][0]\n\t\t)\n\t\t\n\t\t\n\t\t#print(\"Side2 -> {}\".format(Cell[i][\"Side2\"][4]*Cell[i][\"Normalvector2\"][0]))\n\n\t\t#print(\"Side4 -> {}\".format(Cell[i][\"Side4\"][4]*Cell[i][\"Normalvector4\"][0]))\n\n\t\n\tfor i in Cell:\n\t\tCell[i][\"U\"] = Cell[i][\"Unew\"]", "title": "" }, { "docid": "66c7ab07460389fabcb05cd0664548a7", "score": "0.5781401", "text": "def set_cval(self, x, y, val):\n self.t.update_cell(y, x, val)", "title": "" }, { "docid": "a18f43ed60b5e10dbaf0526a5f080d39", "score": "0.57679224", "text": "def update(self, X, accepted, S, r):\n self.t += S + accepted*X # update time state", "title": "" }, { "docid": "a18f43ed60b5e10dbaf0526a5f080d39", "score": "0.57679224", "text": "def update(self, X, accepted, S, r):\n self.t += S + accepted*X # update time state", "title": "" }, { "docid": "02810d5df0bf0c908f57547b38bd13db", "score": "0.575226", "text": "def b_node_set_button(self, event):\n\n new_x = float(self.x_input.GetValue())\n new_y = float(self.y_input.GetValue())\n\n if self.i == self.node_layer_reference:\n xt = np.array(self.plotx)\n yt = np.array(self.ploty)\n\n if self.boundary_lock_list[self.i] == 0 and self.index_node is not None:\n if xt[self.index_node] == 0 and yt[self.index_node] != 0.001:\n xt[self.index_node] = 0 # REPLACE OLD X WITH NEW X\n yt[self.index_node] = new_y # REPLACE OLD Y WITH NEW Y\n elif xt[self.index_node] == self.x2 and yt[self.index_node] != 0.001:\n xt[self.index_node] = self.x2 # REPLACE OLD X WITH NEW X\n yt[self.index_node] = new_y # REPLACE OLD Y WITH NEW Y\n elif xt[self.index_node] == 0 and yt[self.index_node] == 0.001:\n xt[self.index_node] = 0 # REPLACE OLD X WITH NEW X\n yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y\n elif xt[self.index_node] == self.x2 and yt[self.index_node] == 0.001:\n xt[self.index_node] = self.x2 # REPLACE OLD X WITH NEW X\n yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y\n elif new_y <= 0:\n xt[self.index_node] = new_x # REPLACE OLD X WITH NEW X\n yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y\n else:\n xt[self.index_node] = new_x # REPLACE OLD X WITH NEW X\n yt[self.index_node] = new_y # REPLACE OLD Y WITH NEW Y\n elif self.boundary_lock_list[self.i] == 1:\n if new_y <= 0:\n xt[self.index_node] = new_x # REPLACE OLD X WITH NEW X\n yt[self.index_node] = 0.001 # REPLACE OLD Y WITH NEW Y\n else:\n xt[self.index_node] = new_x # REPLACE OLD X WITH NEW X\n yt[self.index_node] = new_y # REPLACE OLD Y WITH NEW Y\n\n # DEAL WITH PINCHED NODE\n if self.pinch_switch != 0:\n for k in range(0, len(self.index_arg2_list)):\n if self.index_arg2_list[k] is not None:\n next_x_list = self.plotx_list[k]\n next_y_list = self.ploty_list[k] # GET THE NODE LIST OF THE NEXT LAYER\n next_x_list[self.index_arg2_list[k]] = new_x\n next_y_list[self.index_arg2_list[k]] = new_y # REPLACE THE PINCHED NODE WITH THE NEW NODE\n self.plotx_list[k] = next_x_list\n self.ploty_list[k] = next_y_list # OVERWRITE THE NODE LIST WITH UPDATED LIST\n\n self.plotx = xt\n self.ploty = yt\n self.polyline.set_data(self.plotx, self.ploty)\n\n # COLOR CURRENTLY SELECTED NODE RED\n self.current_node.set_offsets([new_x, new_y])\n else:\n pass\n\n # UPDATE LAYER DATA\n self.set_density(self)\n self.set_susceptibility(self)\n self.set_angle_a(self)\n self.set_angle_b(self)\n\n # UPDATE GMG\n self.update_layer_data()\n self.run_algorithms()", "title": "" }, { "docid": "f8e0915abc6df761f4b67f43fe5e2885", "score": "0.5723508", "text": "def _update_eta(self, t) -> None:\n # self.eta *= DTYPE((t + 1) / (t + 2))", "title": "" }, { "docid": "e4d2e0cd8669d4dcb3fefc565c9a8bb8", "score": "0.5714379", "text": "def OnCellEdit(self,event):\n row,col = (event.GetRow(),event.GetCol())\n cval = self.GetCellValue(row,col)\n if cval.startswith(\"=\"):\n try:\n cval = str(eval(cval[1:]))\n self.SetCellValue(row,col,cval)\n except:\n pass\n try:\n cval = float(cval)\n except ValueError:\n cval = np.nan\n self.SetCellValue(row,col,str(cval))", "title": "" }, { "docid": "ed9714fab24a235615465ff53a2f338b", "score": "0.56973976", "text": "def insert_number(self, x,y,value):\n M = self.actual_state.reshape((9,9))\n M[x][y] = value\n self.actual_state= M.reshape(-1)", "title": "" }, { "docid": "b4b0c92854b38870a16f8441e55bd9e3", "score": "0.5661427", "text": "def update_temperature(self):\n self.T = self.T - (self.T0 / self.iterations)\n\n # Exponential would look like this:\n # alpha = 0.99\n # self.T = self.T * alpha\n\n # where alpha can be any value below 1", "title": "" }, { "docid": "c6e87a26a2018dcdce811aa7e50dffda", "score": "0.5647182", "text": "def update(self, t, x):\n # For readability\n g = self.g\n m_p, m_c = self.m_p, self.m_c\n l = self.l\n s, c = np.sin(x[2]), np.cos(x[2])\n u = self.u(t, x)\n dx = np.zeros((4,))\n\n if not self.manipulator_eqns:\n den = (m_c + m_p*s**2)\n q_ddot = np.zeros((2,1))\n q_ddot[0] = (1/den)*(u + m_p*s*(l*x[3]**2 + g*c))\n q_ddot[1] = (1/(l*den))*(-u*c - m_p*l*x[3]**2*c*s - (m_c + m_p)*g*s)\n else:\n # Mass matrix\n M = np.array([[ m_c + m_p, m_p*l*c ],\n [ m_p*l*c, m_p*l**2]])\n # Coriolis terms\n C = np.array([[ 0, -m_p*l*x[3]*s],\n [ 0, 0 ]])\n # Torque\n tau_g = -g*np.array([[ 0 ],\n [ m_p*l*s]])\n # B\n B = np.array([[ 1],\n [ 0]])\n # Calculate angular acceleration\n q_dot = np.array([[x[1]],\n [x[3]]])\n Cv = np.dot(C, q_dot)\n q_ddot = np.dot(np.linalg.inv(M), (tau_g + np.dot(B, u) - Cv))\n\n dx[0] = x[1]\n dx[1] = q_ddot[0]\n dx[2] = x[3]\n dx[3] = q_ddot[1]\n return dx", "title": "" }, { "docid": "2edbb8665b5b87c014d43afdb8d4f83c", "score": "0.5644153", "text": "def xt_calculate(self):\r\n\r\n for i in range(0, len(self.obs.obt_g)):\r\n t = float(self.obs.obt[i]) / self.nu_t\r\n self.t.append(t)\r\n self.t_err.append(0.5 * (1.0 / 2.0) / self.nu_t) # 0.5 is bar\r\n x = float(self.obs.obx[i]) / self.nu_x\r\n self.x.append(x)\r\n print 'coordinate calculated'", "title": "" }, { "docid": "c7acaecb0f5998785d0f1ead39ebe967", "score": "0.5632091", "text": "def setCell(self, x, y, val):\r\n\t\tself.list[x][y].set(val)", "title": "" }, { "docid": "569abfee28be703d1ad95a8bad198937", "score": "0.56252295", "text": "def update_matrix(self):\r\n Xleft, Xright = self.X[1:-1,:-2], self.X[1:-1,2:]\r\n Xtop, Xbottom = self.X[:-2,1:-1], self.X[2:,1:-1]\r\n self.X[1:-1,1:-1] += self.dt*(Xleft+Xright+Xtop+Xbottom-4*self.X[1:-1,1:-1])/self.dx**2", "title": "" }, { "docid": "72041ce1f73d7550243e90e61300dc6b", "score": "0.562036", "text": "def update(self,x,win,t):\n # eta(t) = eta(0) / (1 + t/T)\n # keeps the learning rate nearly constant for the first T iterations and then adjusts it\n eta = self.learning_rate/(1+t/self.T)\n sig = self.sigma/(1+t/self.T) # sigma and learning rate decrease with the same rule\n g = self.neighborhood(win,sig)*eta # improves the performances\n it = nditer(g, flags=['multi_index'])\n while not it.finished:\n # eta * neighborhood_function * (x-w)\n self.weights[it.multi_index] += g[it.multi_index]*(x-self.weights[it.multi_index])\n # normalization\n #self.weights[it.multi_index] = self.weights[it.multi_index] / linalg.norm(self.weights[it.multi_index])\n it.iternext()", "title": "" }, { "docid": "ede566b0e2fd8a6f7ae19e7896196b71", "score": "0.5601493", "text": "def update(self):\n self.x += self.vx", "title": "" }, { "docid": "ede566b0e2fd8a6f7ae19e7896196b71", "score": "0.5601493", "text": "def update(self):\n self.x += self.vx", "title": "" }, { "docid": "4d8bb3ff7e80fbcb029346c58621d855", "score": "0.55964744", "text": "def update(self, x):\n if self.first_pass:\n self.means = np.mean(x, axis=0)\n self.vars = np.var(x, axis=0)\n self.m = x.shape[0]\n self.first_pass = False\n else:\n n = x.shape[0]\n new_data_var = np.var(x, axis=0)\n new_data_mean = np.mean(x, axis=0)\n new_data_mean_sq = np.square(new_data_mean)\n new_means = ((self.means * self.m) + (new_data_mean * n)) / (self.m + n)\n self.vars = (((self.m * (self.vars + np.square(self.means))) +\n (n * (new_data_var + new_data_mean_sq))) / (self.m + n) -\n np.square(new_means))\n self.vars = np.maximum(0.0, self.vars)\n self.means = new_means\n self.m += n", "title": "" }, { "docid": "b93147f44ff41df185549d0c27733cc5", "score": "0.55931485", "text": "def update_tau(self, r, s, val):\n self.tau_mat[r][s] = val", "title": "" }, { "docid": "b8f4733e6b2fac54e3b85a9f88c33070", "score": "0.55798584", "text": "def _update1D(self, x, output):\n msg = \"Plot updating ... \"\n wx.PostEvent(self.parent, StatusEvent(status=msg, type=\"update\"))", "title": "" }, { "docid": "8c0b4365f11b4a262e9ef84d0057a8ca", "score": "0.5555576", "text": "def update(self, t):\n x = np.zeros((3,))\n x_dot = np.zeros((3,))\n x_ddot = np.zeros((3,))\n x_dddot = np.zeros((3,))\n x_ddddot = np.zeros((3,))\n yaw = 0\n yaw_dot = 0\n\n # STUDENT CODE HERE\n\n flat_output = {'x': x, 'x_dot': x_dot, 'x_ddot': x_ddot, 'x_dddot': x_dddot, 'x_ddddot': x_ddddot,\n 'yaw': yaw, 'yaw_dot': yaw_dot}\n\n num_points = len(self.points)\n distances = np.zeros((num_points - 1, 3)) # matrix of distances between points\n for i in range(num_points - 1): # calculate distances between points\n distances[i] = self.points[i + 1] - self.points[i]\n\n times = np.linalg.norm(distances, axis=1) # calculate times btwn points based on distance\n times = np.cumsum(times)\n # times = times.reshape(-1, 1)\n times = np.insert(times, 0, 0, axis=0) # add t = 0 to first element of time vector\n times = times / 3.0\n\n for ind, j in enumerate(times):\n if t == times[0]: # QC at initial position at t = 0\n x = self.points[0]\n break\n elif t >= times[-1]: # if actual time is greater than time for final position, keep QC at final position\n x = self.points[-1]\n break\n elif ind == self.points.shape[0] - 1:\n break\n elif j < t < times[ind + 1]:\n x[0], x_dot[0], x_ddot[0] = self.posvel(ind, self.points[ind], times, t, flat_output, 'x')\n x[1], x_dot[1], x_ddot[1] = self.posvel(ind, self.points[ind], times, t, flat_output, 'y')\n x[2], x_dot[2], x_ddot[2] = self.posvel(ind, self.points[ind], times, t, flat_output, 'z')\n # x[0], x_dot[0] = self.posvel(ind, self.points[ind], times, t, flat_output, 'x')\n # x[1], x_dot[1] = self.posvel(ind, self.points[ind], times, t, flat_output, 'y')\n # x[2], x_dot[2] = self.posvel(ind, self.points[ind], times, t, flat_output, 'z')\n\n x_ddot = x_ddot.clip(max=3)\n x_ddot = x_ddot.clip(min=-3)\n x_dot = x_dot.clip(max=2)\n x_dot = x_dot.clip(min=-2)\n\n\n flat_output = {'x': x, 'x_dot': x_dot, 'x_ddot': x_ddot, 'x_dddot': x_dddot, 'x_ddddot': x_ddddot,\n 'yaw': yaw, 'yaw_dot': yaw_dot}\n return flat_output", "title": "" }, { "docid": "a03f5fc1aa9a5753b03d466b43b55ce6", "score": "0.55434877", "text": "def update(self):\n for i in range(len(self.previous)-1):\n xi = self.previous[i]\n xip1 = self.previous[i+1]\n for field in xi:\n field.assign(xip1(field.name()))\n for field in self.n:\n field.assign(self.np1(field.name()))", "title": "" }, { "docid": "ab6e6d4682da055accac164032d4f637", "score": "0.55409485", "text": "def sketch(self, val):\n row, col = self.selected\n self.cells[row][col].set_temp(val)", "title": "" }, { "docid": "10cb68592baf69606f5ada6df7624261", "score": "0.55309683", "text": "def set_x(self, new_x):\r\n check_value(new_x)\r\n self.x = new_x", "title": "" }, { "docid": "704829bdd6fd42e5a71e54f9314ead05", "score": "0.5528827", "text": "def update(self, x):\n if self.first_pass:\n self.means = np.mean(x, axis=0)\n self.vars = np.var(x, axis=0)\n self.m = x.shape[0]\n self.first_pass = False\n else:\n n = x.shape[0]\n new_data_var = np.var(x, axis=0)\n new_data_mean = np.mean(x, axis=0)\n new_data_mean_sq = np.square(new_data_mean)\n new_means = ((self.means * self.m) + (new_data_mean * n)) / (self.m + n)\n self.vars = (((self.m * (self.vars + np.square(self.means))) +\n (n * (new_data_var + new_data_mean_sq))) / (self.m + n) -\n np.square(new_means))\n self.vars = np.maximum(0.0, self.vars) # occasionally goes negative, clip\n self.means = new_means\n self.m += n", "title": "" }, { "docid": "979b53148475dbc04cce2d71376eb002", "score": "0.5515013", "text": "def update(self, x):\n diff = (x - self.W) # compute difference matrix\n bmu_idx = self.find_bmu(x, diff_matrix=diff)\n u = self.W[bmu_idx,:]\n\n for n in range(self.W.shape[0]):\n v = tf.expand_dims(self.W[n,:],axis=0)\n dist_uv = float( tf.norm(u - v, ord=\"euclidean\") )\n if dist_uv < (self.radius * self.radius):\n oh = tf.expand_dims(tf.one_hot(n, depth=self.W.shape[0]),axis=1)\n wght = self.calc_neighborhood_weights(u, v, sigma=self.radius)\n dW = tf.matmul(oh, (x - v) * wght * self.eta)\n self.W = self.W + dW\n self.s += 1", "title": "" }, { "docid": "18f7bfeca0c45840d82cb6e8cfb06333", "score": "0.5498006", "text": "def set_cell(self, x):\n if isinstance(x, str): \n apart = None # letter part of a1-style reference\n npart = None # numeric part of a1-style reference \n \n #SN_1 shorter version\n search_result = re.search(\"^(\\D*)(\\d*)$\", x)\n if search_result is not None:\n apart = search_result.group(1)\n npart = search_result.group(2)\n # ***\n \n if apart and npart:\n a1ref = apart.upper() + npart\n self.rowx, self.colx = xlsxwriter.utility.xl_cell_to_rowcol(a1ref) \n elif npart:\n rowx = int(npart) \n elif apart:\n _proxy_cell = apart.upper() + '1'\n self.colx = xlsxwriter.utility.xl_cell_to_rowcol(_proxy_cell)[1] \n elif isinstance(x,int):\n self.rowx = x - 1", "title": "" }, { "docid": "fe26268a8253b9ea378461263852bffe", "score": "0.54898095", "text": "def ForcePoint(self,x_next):\n \n # change position of interface and get resulting y-value\n self.mi.setX(x_next)\n if(self.acq_func[0] == 'testEI'):\n (x_new, y_new) = (x_next, self.acq_func[2].iloc[ind,-1])\n else:\n (x_new, y_new) = self.mi.getState()\n # add new entry to observed data\n self.X_obs = np.concatenate((self.X_obs,x_new),axis=0)\n self.Y_obs.append(y_new)\n \n # update the model (may want to add noise if using testEI)\n self.model.update(x_new, y_new)", "title": "" }, { "docid": "4a4daa4082d05f1271091a4966977ed4", "score": "0.5485281", "text": "def update(self, xnew, ynew):\n y = np.concatenate((self.y, ynew), axis=0)\n X = np.concatenate((self.X, xnew), axis=0)\n self.fit(X, y)", "title": "" }, { "docid": "4a4daa4082d05f1271091a4966977ed4", "score": "0.5485281", "text": "def update(self, xnew, ynew):\n y = np.concatenate((self.y, ynew), axis=0)\n X = np.concatenate((self.X, xnew), axis=0)\n self.fit(X, y)", "title": "" }, { "docid": "1c91b80ef69c13a91c9b8795ceb8ac16", "score": "0.54694533", "text": "def update_hyperparams(self):\n self.eta_t = self.eta_t * self.f\n if self.epoch < self.T:\n self.p_t = (1 - ((self.epoch + 1) / self.T)) * self.p_i + \\\n ((self.epoch + 1) / self.T) * self.p_f\n else:\n self.p_t = self.p_f", "title": "" }, { "docid": "f81cb9ba3bd0fd01e4f7ffce7e1cc973", "score": "0.546051", "text": "def update(self, x, win, t):\n eta = self._decay_function(self.learning_rate, t, self.T)\n sig = self._decay_function(self.sigma, t, self.T) # sigma and learning rate decrease with the same rule\n g = self.neighborhood(win, sig)*eta # improves the performances\n it = nditer(g, flags=['multi_index'])\n while not it.finished:\n # eta * neighborhood_function * (x-w)\n self.weights[it.multi_index] += g[it.multi_index]*(x-self.weights[it.multi_index])\n # normalization\n self.weights[it.multi_index] = self.weights[it.multi_index] / fast_norm(self.weights[it.multi_index])\n it.iternext()", "title": "" }, { "docid": "689988474378fdff280486b1f5479684", "score": "0.54528725", "text": "def setCell(self, x, y, val, updateGUI):\r\n\t\t# Set the grid value for the puzzle at x-y to val\r\n\t\tself.grid[x][y] = val\r\n\t\tif val != 0:\r\n\t\t\t# Only update the nodes expanded if we are not resetting the cell value back to 0\r\n\t\t\tself.nodesExpanded += 1\r\n\t\tif updateGUI and not self.commandLine:\r\n\t\t\t# If asked, update the gui with new cell values too\r\n\t\t\tself.gui.setCell(x,y,val)\r\n\t\t\tself.update_gui()", "title": "" }, { "docid": "75ee58d618b9dd67a78f3a9fb947287c", "score": "0.54461056", "text": "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "title": "" }, { "docid": "94e880159ec2e10310cdc73222be0a89", "score": "0.54388535", "text": "def save_X(self, x):\n self.x = x", "title": "" }, { "docid": "c721f14d2b1912f0b3c13b9efa8e3138", "score": "0.5433696", "text": "def update_state(self, clock, new_data):\r\n # self.system_stat.iloc[clock, 1:9] = self.system_stat.iloc[clock, 1:9] #+ new_data\r\n self.system_stat.iloc[clock,\r\n ] = self.system_stat.iloc[clock, ] + new_data\r\n # self.system_stat.loc[clock, 'E'] = E\r", "title": "" }, { "docid": "a37e2653d996282d28256017fef8e0b5", "score": "0.54266226", "text": "def propagate_qttt_to_ttt(self):\n self.ttt.update_ttt_from_qttt(self.board)", "title": "" }, { "docid": "85bd27b2c1ed95b032f0173ff0607dcc", "score": "0.5407221", "text": "def update(self,t=1):\n force=Force.sum(self.forces)\n print(\"MyMaterialPoint:force:\",force)\n x,y=force\n acceleration=Vector(x,y)/self.mass\n self.motion.setAcceleration(acceleration)\n self.motion.update(t)", "title": "" }, { "docid": "efded9597a8faab8809539e764fbcb9d", "score": "0.5395012", "text": "def update(self):\n self.Tax = self.tw*self.w*self.L + self.tr*self.r*(self.A+self.D)\n self.G = self.gy*self.C\n self.Tr = (self.Tax+self.Beq-self.G)/self.Pt\n self.tb = self.zeta*(1-self.tw)*self.Pr/(self.L+self.zeta*self.Pr)\n self.b = self.zeta*(1-self.tw-self.tb)*self.w\n self.p = array([self.r, self.w, self.b, self.tr, self.tw, self.tb, \n self.Tr, self.qh, self.qr])", "title": "" }, { "docid": "ba910dde69e271e3d23f81df2685b86c", "score": "0.53934026", "text": "def put_x(self, x, y):\r\n self.field[y - 1][x - 1] = 1", "title": "" }, { "docid": "c3beedb2149ba231696661cbf88c7aa7", "score": "0.5391946", "text": "def move_poly(self, xt=1000, yt=1000):\n index = self.get_id_input()\n if index:\n self.df.loc[self.df.index == index, self.columns_x] = \\\n self.df.loc[self.df.index == index].apply(lambda x: x[self.columns_x]+xt, axis=1)\n self.df.loc[self.df.index == index, self.columns_y] = \\\n self.df.loc[self.df.index == index].apply(lambda x: x[self.columns_y]+yt, axis=1)\n return self.df.loc[self.df.index == index]\n return None", "title": "" }, { "docid": "9fcf259bbcdce1a1391428b80c7e9461", "score": "0.5386673", "text": "def __update_t(t_opt, window_size):\n global t_min\n t_min = max(t0_min, round(t_opt - window_size//2))\n global t_max\n t_max = round(t_opt + window_size//2)\n global n\n n = t_max*2", "title": "" }, { "docid": "c8d521b8098318c0e49273eac6fb04c6", "score": "0.538591", "text": "def eval_cell(self, value, x, ufc_cell):\n\t\tglobal area_fracs\n\t\tvalue[0] = area_fracs[ufc_cell.index]", "title": "" }, { "docid": "cc02479d90ad7f17585bc353ab12b2a6", "score": "0.5382476", "text": "def update(self, x, y, d):\n raise NotImplementedError", "title": "" }, { "docid": "e1936a0b8bc9e2279b587fc30ca1872a", "score": "0.53799874", "text": "def update_cell(self, row, col, val):\n self.update_acell( Worksheet.get_addr_int(row,col), val=unicode(val))", "title": "" }, { "docid": "e4a4bcdc8d885829ea34bc8c1ba9c31b", "score": "0.5379168", "text": "def update_orig(self, inp=None):\n \n if inp is None:\n inp = self.arrtemp\n inp[self.arrmask < MASK_LIM] = np.nan\n \n self.arrorig = inp.copy()\n self.ptsorig = copy.deepcopy(self.ptstemp)\n self.oriorig = copy.deepcopy(self.oritemp)\n self.parsorig = copy.deepcopy(self.parstemp)\n \n self.reset_temp()", "title": "" }, { "docid": "b11f63001f61f77d9ff28c208dae81be", "score": "0.5374063", "text": "def xt_accurate(self):\r\n\r\n self.t_acc.append(0.0)\r\n err = 0.0\r\n self.t_local_err.append(err)\r\n for i in range(1, len(self.obs.obt_g)):\r\n an = math.sqrt(float(self.obs.obt_g[i]) ** 2 + self.x[i]\r\n ** 2)\r\n self.t_acc.append(an)\r\n err = math.fabs(100.0 * (an - self.t[i]) / an)\r\n self.t_local_err.append(err)\r\n print 'accurate coordinate calculated'", "title": "" }, { "docid": "b072f35d4b1e79933de5a836c1b48a0a", "score": "0.5371576", "text": "def set_cells(self, x: int, y: int, kind: CellType, w = 1, h = 1):\n for col in range(x, x + w):\n for row in range(y, y + h):\n self.grid[row * self.rows + col] = CellType(kind.value)\n self.updates.append((col, row, kind))", "title": "" }, { "docid": "ed834a351e2fd8425b22e4f882e91ae4", "score": "0.5368172", "text": "def tick(self):\n self.setup_cells()\n\n updated = []\n for i in range(self.rows):\n this_row = []\n for j in range(self.cols):\n this_cell = self.get_cell(i, j)\n result_identity = None\n if not this_cell.static: # not only inactive cells surrounding\n for rule in RULES[this_cell.identity]:\n result_identity = rule(this_cell)\n if result_identity: # breaks on first identity change\n break\n result_identity = result_identity or this_cell.identity # prioritizes result identity\n this_cell.ticks += 1\n this_row.append(CELLS_INDEX[result_identity])\n updated.append(this_row)\n\n self.initialize(updated)\n self.ticks += 1", "title": "" }, { "docid": "33aae1e67dcd78a279912000cf14e3ed", "score": "0.5357971", "text": "def update_model(self):\n self.model = [[self.cells[i][j].value for j in range(self.cols)] for i in range(self.rows)]", "title": "" }, { "docid": "f9533f0a837753ef5c86a307e3ddd38d", "score": "0.53511006", "text": "def opcode_Td(renderer, t_x, t_y):\n renderer.ts.m = PdfMatrix(1, 0, 0, 1, t_x, t_y)*renderer.ts.lm\n renderer.ts.reset_lm()", "title": "" }, { "docid": "597edfaa72307449bc62102045c73ea9", "score": "0.5340474", "text": "def set_tile(self, row, col, value): \n self.cells[row][col] = value", "title": "" }, { "docid": "11f37521d970b24987d40927023ee202", "score": "0.5324432", "text": "def _tnet_update(self):\n with tf.variable_scope('qnet'): \n self.update_opt = [t.assign(q) for t, q in zip(self.tnet.params, self.params)]", "title": "" }, { "docid": "ce855b59ff59026ea5670f0bdb6cb850", "score": "0.53202856", "text": "def update_params(self, X):\n batchsize = X.shape[0]\n self.deltaC = self.p_t * self.deltaC - \\\n (1 - self.p_t) * (self.eta_t / batchsize) * self.dC\n self.deltaB = self.p_t * self.deltaB - \\\n (1 - self.p_t) * (self.eta_t / batchsize) * self.db\n self.deltaM = self.p_t * self.deltaM - \\\n (1 - self.p_t) * (self.eta_t / batchsize) * self.dM\n self.deltaJ = self.p_t * self.deltaJ - \\\n (1 - self.p_t) * (self.eta_t / batchsize) * self.dJ\n self.deltaBj = self.p_t * self.deltaBj - \\\n (1 - self.p_t) * (self.eta_t / batchsize) * self.dBj\n self.deltaWhf = self.p_t * self.deltaWhf - \\\n (1 - self.p_t) * (self.eta_t / batchsize) * self.dWhf\n self.deltaWfv = self.p_t * self.deltaWfv - \\\n (1 - self.p_t) * (self.eta_t / batchsize) * self.dWfv\n self.deltaWfx = self.p_t * self.deltaWfx - \\\n (1 - self.p_t) * (self.eta_t / batchsize) * self.dWfx\n\n self.C = self.C + self.deltaC\n self.bw = self.bw + self.deltaB\n self.M = self.M + self.deltaM\n self.J = self.J + self.deltaJ\n self.bj = self.bj + self.deltaBj\n self.Wfv = self.Wfv + self.deltaWfv\n self.Wfx = self.Wfx + self.deltaWfx\n self.Whf = self.Whf + self.deltaWhf", "title": "" }, { "docid": "69276eef642e3d487fdea0d0180d62b6", "score": "0.53150904", "text": "def set_cell(self, x: int, y: int, state: CellState):\n if 0 <= y < len(self.grid) and 0 <= x < len(self.grid[y]):\n self.grid[y][x] = state", "title": "" }, { "docid": "923375c33f8e50856675541e3bb10837", "score": "0.5313385", "text": "def _update_pert_row(\n self,\n rxn_table_id,\n rxn_no,\n rxn,\n k_i,\n ind_len_west,\n ind_len_gav,\n ind_len_ng,\n cell_size_west,\n cell_size_gav,\n cell_size_ng,\n sens_ind_len_west,\n sens_ind_len_gav,\n sens_ind_len_ng,\n sens_cell_size_west,\n sens_cell_size_gav,\n sens_cell_size_ng,\n ):\n with self.con as con:\n cur = con.cursor()\n cur.execute(\n \"\"\"\n UPDATE {:s} SET \n k_i = :k_i,\n ind_len_west = :ind_len_west,\n ind_len_gav = :ind_len_gav,\n ind_len_ng = :ind_len_ng,\n cell_size_west = :cell_size_west,\n cell_size_gav = :cell_size_gav,\n cell_size_ng = :cell_size_ng,\n sens_ind_len_west = :sens_ind_len_west,\n sens_ind_len_gav = :sens_ind_len_gav,\n sens_ind_len_ng = :sens_ind_len_ng,\n sens_cell_size_west = :sens_cell_size_west,\n sens_cell_size_gav = :sens_cell_size_gav,\n sens_cell_size_ng = :sens_cell_size_ng\n WHERE\n rxn_no = :rxn_no AND\n rxn = :rxn\n \"\"\".format(rxn_table_id),\n {\n 'rxn_no': rxn_no,\n 'rxn': rxn,\n 'k_i': k_i,\n 'ind_len_west': ind_len_west,\n 'ind_len_gav': ind_len_gav,\n 'ind_len_ng': ind_len_ng,\n 'cell_size_west': cell_size_west,\n 'cell_size_gav': cell_size_gav,\n 'cell_size_ng': cell_size_ng,\n 'sens_ind_len_west': sens_ind_len_west,\n 'sens_ind_len_gav': sens_ind_len_gav,\n 'sens_ind_len_ng': sens_ind_len_ng,\n 'sens_cell_size_west': sens_cell_size_west,\n 'sens_cell_size_gav': sens_cell_size_gav,\n 'sens_cell_size_ng': sens_cell_size_ng,\n }\n )", "title": "" }, { "docid": "e5429677997cadd71eb2727325c66f16", "score": "0.5309254", "text": "def set_cell(frame, data):\n with data.cell_:\n data.cell_[:,0] = [L, 0., 0.]\n data.cell_[:,1] = [0., L, 0.]\n data.cell_[:,2] = [0., 0., L]\n #cell origin\n data.cell_[:,3] = [0, 0 , 0]\n #set periodic boundary conditions\n data.cell_.pbc = (True, True, True)", "title": "" }, { "docid": "96846cc85d43990e9709b6d2c3c47e53", "score": "0.53056103", "text": "def update(self):\n vals = np.zeros(len(self.table_model._TableModel__x))\n for ipt in self.onPtSrc:\n vals += [\n self.likelihoodModel.pointSources[ipt].spectralModel.photonFlux(\n bounds[0], bounds[1]\n )\n for bounds in zip(self.e_lo, self.e_hi)\n ]\n # integrated fluxes over same energy bins as for dataset, according to Sherpa TableModel specs, TBV\n self.table_model._TableModel__y = vals", "title": "" }, { "docid": "1dfed14778d7ed29e0e4c6fa39ebe9dd", "score": "0.5283027", "text": "def set_value(self, tuple, item):\r\n self.cells[tuple[0]][tuple[1]] = item", "title": "" }, { "docid": "2adaf6996a7708274ac2b20e3b5fb1cd", "score": "0.5274169", "text": "def update_state(state, kernel, learning_rate, x_i, y_i):\n # *** START CODE HERE ***\n # Append to the last column\n length = len(state[0])\n\n # add a new row\n state[0].append(x_i)\n state[1].append(y_i)\n state[2].append([0.0])\n state[3].append([])\n index = 0 \n while index < length + 1:\n kernEnt = kernel(state[0][index], x_i)\n oldMargin = state[2][length][index] \n state[2][length].append(oldMargin + learning_rate * (state[1][index] - sign(state[2][index][index])) * kernEnt)\n state[3][length].append(kernEnt)\n index = index + 1\n\n index1 = 0 \n while index1 < length:\n kernEnt = kernel(x_i, state[0][index1])\n oldMargin = state[2][index1][length] \n #state[2][index] = oldMargin + learning_rate * (y_i - sign(oldMargin)) * kernEnt\n state[2][index1].append(oldMargin + learning_rate * (y_i - sign(state[2][length][length])) * kernEnt)\n state[3][index1].append(kernEnt)\n index1 = index1 + 1\n # *** END CODE HERE ***", "title": "" }, { "docid": "e222f5ab4ad18cba94fed235fcc9199a", "score": "0.52676314", "text": "def update_grid(self):\n for var in self.model.variables():\n if var.name[0:1] == 'x':\n coor = int(var.name[1:])\n xCoor = (coor - 1) % self.cols\n yCoor = math.floor((coor - 1) / self.cols)\n if self.grid[yCoor][xCoor] == 0:\n self.grid[yCoor][xCoor] = \"0\"\n continue\n if var.value() == 1:\n self.grid[yCoor][xCoor] = \"x\"\n else:\n self.grid[yCoor][xCoor] = \"1\"", "title": "" }, { "docid": "688dcec9c00d0f5213022276113c9504", "score": "0.52493984", "text": "def modify_cell_with(self, sheet_name, column, row, op, val):\n pass\n # my_sheet_index = self.workbook.sheet_names().index(sheet_name)\n # cell = self.workbook.get_sheet(my_sheet_index).cell(int(row), int(column))\n # curval = cell.value\n # if cell.ctype is XL_CELL_NUMBER:\n # self.workbook.sheets()\n # if not self.copied_workbook:\n # self.copied_workbook = copy(self.workbook)\n # plain = easyxf('')\n # modexpr = str(curval) + op + val\n # self.copied_workbook.get_sheet(my_sheet_index).write(int(row), int(column), eval(modexpr), plain)", "title": "" }, { "docid": "96e950612e1513a9f8b73fadd5da46d9", "score": "0.5247235", "text": "def update(self, t, state, flat_output):\r\n\r\n \"First extract the current (actual) position of the robot and the desired position derived from the waypoints\"\r\n ri = state[\"x\"] #expressed in inertial frame\r\n rT = flat_output[\"x\"] #expressed in inertial frame\r\n \"actual velocity of robot and desired velocity from waypoints\"\r\n vi = state[\"v\"] #expressed in inertial frame\r\n vT = flat_output[\"x_dot\"] #expressed in inertial frame\r\n \"Create the position and velocity error vector between those two\"\r\n e_pos = ri - rT\r\n e_vel = vi - vT\r\n rdotdotT = flat_output[\"x_ddot\"] #acceleration as returned from waypoints\r\n psiT = flat_output[\"yaw\"]\r\n\r\n \"From quaternions to euler angles\"\r\n rot = Rotation.from_quat(state[\"q\"])\r\n eulerAngles = rot.as_euler('zyx')\r\n psi = eulerAngles[0]\r\n theta = eulerAngles[1]\r\n phi = eulerAngles[2]\r\n\r\n kd = 5 #tune\r\n kp = 5 #tune\r\n\r\n kdMatrix = np.diagflat(kd*np.ones((1,3)))\r\n kpMatrix = np.diagflat(kp*np.ones((1,3)))\r\n\r\n rdotdotdes = rdotdotT - kdMatrix@e_vel - kpMatrix@e_pos\r\n\r\n \"Now we can compute u1 (thrust), θDes and φDes\"\r\n u1 = self.mass*(self.g + rdotdotdes[2]) #thrust control\r\n phiDes = rdotdotdes[0]*np.sin(psiT) - rdotdotdes[1]*np.cos(psiT)\r\n thetaDes = (rdotdotdes[0] - phiDes*np.sin(psiT)) / self.g*np.cos(psiT)\r\n\r\n \"Now we can compute u2\"\r\n I = np.diagflat([self.Ixx, self.Iyy, self.Izz])\r\n u2 = [email protected]([[-kp*(phi - phiDes)-kd*(state[\"w\"][0])],\r\n [-kp*(theta - thetaDes)-kd*(state[\"w\"][1])],\r\n [-kp*(psi - psiT)-kd*(state[\"w\"][2])]])\r\n\r\n \"From u2 to forces\"\r\n gamma = self.k_drag / self.k_thrust\r\n A = np.array([[1, 1, 1, 1],\r\n [0, self.arm_length, 0, -self.arm_length],\r\n [-self.arm_length, 0, self.arm_length, 0],\r\n [gamma, -gamma, gamma, -gamma]])\r\n\r\n forces = np.linalg.inv(A)@np.insert(u2,0,u1)\r\n\r\n \"From forces to motor speeds\"\r\n cmd_motor_speeds = np.squeeze(np.sqrt(1/self.k_thrust * forces))\r\n cmd_thrust = np.sum(forces)\r\n\r\n \"From motor speeds to moments\"\r\n cmd_moment = u2\r\n cmd_q = np.zeros((4,))\r\n\r\n # STUDENT CODE HERE\r\n\r\n control_input = {'cmd_motor_speeds':cmd_motor_speeds,\r\n 'cmd_thrust':cmd_thrust,\r\n 'cmd_moment':cmd_moment,\r\n 'cmd_q':cmd_q}\r\n return control_input", "title": "" }, { "docid": "0b744236c1978264254e5b5720381ad3", "score": "0.52438533", "text": "def __call__(self, t, x):\n #TODO: might want to integrate/average over the input so it isn't only based on the last timestep\n\n # Only perform a state transition if enough time has passed since the last one\n if t - self.last_t >= self.time_interval:\n # Get the action id of the closest action vector to the noisy input vector\n action = self.find_closest_action(x)\n # Perform that action to update the current state\n self.make_action(action)\n\n self.last_t = t\n\n return self.index_to_state_vector[self.current_state]", "title": "" }, { "docid": "f24268841eaaa34790f133c0f3ab2704", "score": "0.5241271", "text": "def _update(self, vals, params, dt):\n pass", "title": "" }, { "docid": "0683e66d002a85076de659838e02764f", "score": "0.5240061", "text": "def update_problem(self, x0, Xm):\n self.update_x0(x0)\n self.update_Xm(Xm)\n return", "title": "" }, { "docid": "3bee19d67300ffe0791ed32f7ba82db2", "score": "0.52332383", "text": "def rearrange(self, cell):\r\n pass", "title": "" }, { "docid": "4c75b247b5b86cf34e78da36650fc8e7", "score": "0.5227867", "text": "def set_initial_value(self, y, t=0.0):\n self.t = t\n self.y = y\n return self", "title": "" }, { "docid": "f630973511a4bf858f555f38f023e3ef", "score": "0.5221748", "text": "def rebase_to_one(self):\n self.x_data = self.x_data.T\n list_of_first_elem_price = [i[0] for i in self.x_data]\n shape_row, shape_column = self.x_data.shape[0], self.x_data.shape[1]\n\n for i in range(shape_row):\n for j in range(shape_column):\n self.x_data[i][j] /= list_of_first_elem_price[i]\n self.x_data = self.x_data.T", "title": "" }, { "docid": "bf81e8bcc5fae4af7f6700581a39586a", "score": "0.52135056", "text": "def update_cell(self, i: int, j: int, new_value: Cell) -> None:\n try:\n self.__cells[j][i] = new_value\n except IndexError:\n print(\"Trying to update a cell with index out of bounds\")", "title": "" }, { "docid": "ef0d64b2c6688a85479f74dc436ddcb8", "score": "0.51921713", "text": "def update_dynamics_model(self, x, u):\n if x.ndim < 2:\n x = np.expand_dims(x, axis=1) # shape of x should be (N,1), not (N,)\n if u.ndim < 2:\n u = np.expand_dims(u, axis=1)\n # ===== Model parameters ===== #\n\n # num of state, action\n nx = self.nx\n nu = self.nu\n\n m = self.m # i30\n\n width = self.width\n length = self.length\n\n l_f = self.l_f\n l_r = self.l_r\n wheelbase = self.wheelbase\n turning_circle = self.turning_circle\n max_steer = self.max_steer\n\n Iz = self.Iz\n\n # Iw = 1.8 # wheel inertia\n # rw = 0.3 # wheel radius\n\n roh = self.roh # density of air [kg/m3]\n C_d = self.C_d # drag coefficient\n A_f = self.A_f # vehicle frontal area [m2]\n C_roll = self.C_roll # rolling resistance coefficient\n\n dt = self.dt # sampling time. [sec]\n\n \n \"\"\"\n Pacejka lateral tire model params\n \n \"\"\"\n Fz_f = 9.81 * (m * l_r/wheelbase) * 0.001 # vertical force at front axle. [kN]\n\n a_lat_f = [-22.1, 1011, 1078, 1.82, 0.208, 0.000, -0.354, 0.707] # for Fy\n C_lat_f = 1.30\n D_lat_f = a_lat_f[0]*Fz_f**2 + a_lat_f[1]*Fz_f\n BCD_lat_f = a_lat_f[2]*math.sin(a_lat_f[3]*math.atan(a_lat_f[4]*Fz_f)) # before, atan\n # B_lat_f = BCD_lat_f/(C_lat_f*D_lat_f)\n B_lat_f = BCD_lat_f/(C_lat_f*D_lat_f) * 180/np.pi # for radian sideslip angle\n E_lat_f = a_lat_f[5]*Fz_f**2 + a_lat_f[6]*Fz_f + a_lat_f[7]\n\n Fz_r = 9.81 * (m * l_f/wheelbase) * 0.001 # vertical force at rear axle. [kN]\n\n a_lat_r = [-22.1, 1011, 1078, 1.82, 0.208, 0.000, -0.354, 0.707] # for Fy\n C_lat_r = 1.30\n D_lat_r = a_lat_r[0]*Fz_r**2 + a_lat_r[1]*Fz_r\n BCD_lat_r = a_lat_r[2]*math.sin(a_lat_r[3]*math.atan(a_lat_r[4]*Fz_r)) # berore, atan\n # B_lat_r = BCD_lat_r/(C_lat_r*D_lat_r)\n B_lat_r = BCD_lat_r/(C_lat_r*D_lat_r) * 180/np.pi # for radian sideslip angle\n E_lat_r = a_lat_r[5]*Fz_r**2 + a_lat_r[6]*Fz_r + a_lat_r[7]\n\n \"\"\"\n ===== Discretize Linearized Dynamics model =====\n \"\"\"\n\n # normalize angle\n # x[2] = normalize_angle(x[2])\n\n # Avoiding zero denominator (for slip angle, expm in discretization procedure)\n # before 19.07.31, 0.5 m/s\n if x[3,0] >=0 and x[3,0] < 0.5:\n # x[3] = 0.0\n x[4,0] = 0. # v_y\n x[5,0] = 0. # yaw_rate\n u[0,0] = 0. # steer\n if x[3,0] < 0.3:\n x[3,0] = 0.3 # v_x\n print(\"Avoiding zero denominator\")\n\n if x[3,0] > -0.5 and x[3,0] < 0:\n # x[3] = 0.\n x[4,0] = 0.\n x[5,0] = 0.\n u[0,0] = 0.\n if x[3,0] > -0.3:\n x[3,0] = -0.3\n print(\"Avoiding zero denominator\")\n\n # States\n yaw = x[2,0] # [0] for scalar data\n v_x = x[3,0]\n v_y = x[4,0]\n yaw_rate = x[5,0]\n\n steer = u[0,0]\n accel_track = u[1,0]\n\n # Dynamics model\n # Slip angle [deg]\n # alpha_f = np.rad2deg(-math.atan2( l_f*yaw_rate + v_y,v_x) + steer)\n # alpha_r = np.rad2deg(-math.atan2(-l_r*yaw_rate + v_y,v_x))\n\n alpha_f = -math.atan2( l_f*yaw_rate + v_y,v_x) + steer\n alpha_r = -math.atan2(-l_r*yaw_rate + v_y,v_x)\n\n # Lateral force (front & rear)\n # Fy_f = D_lat_f * math.sin(C_lat_f * math.atan2(B_lat_f * alpha_f, 1)) # before was atan\n # Fy_r = D_lat_r * math.sin(C_lat_r * math.atan2(B_lat_r * alpha_r, 1)) # before was atan\n Fy_f = D_lat_f * math.sin(C_lat_f * math.atan(B_lat_f * alpha_f))\n Fy_r = D_lat_r * math.sin(C_lat_r * math.atan(B_lat_r * alpha_r))\n\n # Longitudinal force\n\n # for both forward and backward driving.\n R_roll = C_roll * m * 9.81 * np.sign(v_x) # rolling resistance. [N] f*(Fzf+Fzr) = f*(mg)\n F_aero = 0.5*roh*C_d*A_f*v_x**2 * np.sign(v_x) # aero dynamics drag. [N] 0.5*rho*cd*A.\n Fx_f = m*accel_track - F_aero - R_roll\n\n # Next state\n x_dot = np.array([[v_x*math.cos(yaw) - v_y*math.sin(yaw)],\n [v_y*math.cos(yaw) + v_x*math.sin(yaw)],\n [yaw_rate],\n [1./m*(Fx_f*math.cos(steer) - Fy_f*math.sin(steer) + m*v_y*yaw_rate)],\n [1./m*(Fx_f*math.sin(steer) + Fy_r + Fy_f*math.cos(steer) - m*v_x*yaw_rate)],\n [1./Iz*(Fx_f*l_f*math.sin(steer) + Fy_f*l_f*math.cos(steer)- Fy_r*l_r)]])\n\n x_next = x + x_dot * dt\n\n return x_next, alpha_f, alpha_r", "title": "" }, { "docid": "f22189c24f6422b20fa1f36876bb54b1", "score": "0.51817864", "text": "def x(self, x: float):\n\n self._x = x", "title": "" }, { "docid": "1540b88ab29d2321c269c45e2692cad7", "score": "0.51742435", "text": "def _update_times(self):\n self._times = self.tmin + (self.tstep * np.arange(self.shape[-1]))\n self._times.flags.writeable = False", "title": "" }, { "docid": "a7cdae8715195477e2ffb0c6edd76ea4", "score": "0.517007", "text": "def replace_cell(self, new, row, col):\n self.cells[row][col] = new", "title": "" }, { "docid": "19fe5bb797175119e8f90b55d380bfdb", "score": "0.5166559", "text": "def t_value(self, kx: float, ky: float) : # This is t_ij(k_tilde)\n t = self.t ; tp = self.tp; tpp = self.tpp\n t_val = np.zeros((4, 4), dtype=complex)\n ex = np.exp(-2.0j*kx) ; emx = np.conjugate(ex)\n ey = np.exp(-2.0j*ky) ; emy = np.conjugate(ey)\n tloc = np.array([[0.0, -t, -t, -tp],\n [-t, 0.0, 0.0, -t],\n [-t, 0.0, 0.0, -t],\n [-tp, -t, -t, 0.0]])\n\n\n t_val += tloc\n\n t_val[0, 0] += -tpp*(ex+emx+ey+emy); t_val[0, 1] += -t*ex; t_val[0, 3] += -tp*(ex + ey + ex*ey); t_val[0, 2] += -t*ey\n t_val[1, 0] += -t*emx; t_val[1, 1] += -tpp*(ex+emx+ey+emy); t_val[1, 3] += -t*ey; t_val[1, 2] += -tp*(emx + ey + emx*ey)\n t_val[3, 0] += -tp*(emx + emy + emx*emy); t_val[3, 1] += -t*emy; t_val[3, 3] += -tpp*(ex+emx+ey+emy); t_val[3, 2] += -t*emx\n t_val[2, 0] += -t*emy; t_val[2, 1] += -tp*(ex + emy + ex*emy); t_val[2, 3] += -t*ex; t_val[2, 2] += -tpp*(ex+emx+ey+emy)\n\n return (t_val)", "title": "" }, { "docid": "974b3ecf0a5291de32ca1bd4bd4b6fd0", "score": "0.51663643", "text": "def actuate(self, cell):\n pass", "title": "" }, { "docid": "24ba997272ffe57e1ed08a5d0c888ded", "score": "0.51662946", "text": "def updateValues(self, tickData):\n pass", "title": "" }, { "docid": "36b7e0ae60f6c23fe61dc8554e253380", "score": "0.5149533", "text": "def set(self, name, value, ex=None, px=None, nx=False, xx=False):", "title": "" }, { "docid": "8bb4ae34502eb110565d6ca7018d2431", "score": "0.5140047", "text": "def updateTable(self, item: KnapsackItem, x: int, y: int) -> None:\n ((self.table)[y][x]).extend(item)", "title": "" }, { "docid": "e39080875e3288f70ca8b812ba350c13", "score": "0.51353663", "text": "def post_integration(self):\n # Cache the values\n self.sim.derivs(self.t0, self.xold)\n self.__store_values()\n \n V, dV = self.sim.CVs.volumes(self.t0)\n Nexist = self.sim.CVs.Nexist\n if sorted(self.sim.stateVariables) == ['D','T']:\n self.sim.CVs.updateStates('T',self.xnew[0:Nexist],'D',self.xnew[Nexist:2*Nexist])\n elif sorted(self.sim.stateVariables) == ['M','T']:\n self.sim.CVs.updateStates('T',self.xnew[0:Nexist],'D',self.xnew[Nexist:2*Nexist]/V)\n else:\n raise NotImplementedError", "title": "" }, { "docid": "485da1b2d4554809aa38ca7fe336d418", "score": "0.5130694", "text": "def changespeed(self, x):\r\n self.change_x += x", "title": "" }, { "docid": "485da1b2d4554809aa38ca7fe336d418", "score": "0.5130694", "text": "def changespeed(self, x):\r\n self.change_x += x", "title": "" }, { "docid": "485da1b2d4554809aa38ca7fe336d418", "score": "0.5130694", "text": "def changespeed(self, x):\r\n self.change_x += x", "title": "" }, { "docid": "c177146ecff0c7731b66f65a03b57895", "score": "0.51288164", "text": "def setX(self, x):\n self.x = x", "title": "" }, { "docid": "c177146ecff0c7731b66f65a03b57895", "score": "0.51288164", "text": "def setX(self, x):\n self.x = x", "title": "" } ]
08221d73799e78906b30f1464d647dc0
Computes how the interaction hamiltonian acts over a given state
[ { "docid": "a69fab8c6a837f99f24123569f0e0dad", "score": "0.55249983", "text": "def Hint(state_ini):\n states = []\n coefs = []\n for k in range(len(state_ini)):\n for l in range(len(state_ini)):\n for p in range(len(state_ini)):\n for q in range(len(state_ini)):\n if not (I(k,l,p,q)==0):\n state1,coef1,stop1 = anihilation(p,state_ini)\n if not stop1: \n state2,coef2,stop2 = anihilation(q,state1)\n if not stop2: \n state3,coef3 = creation(l,state2)\n state4,coef4 = creation(k,state3)\n states.append(state4)\n coefs.append(I(k,l,p,q)*coef1*coef2*coef3*coef4)\n \n return states,coefs", "title": "" } ]
[ { "docid": "97f2aec5ed08627489e932504160126d", "score": "0.7395203", "text": "def actHam(state, N, J, U):\n t1, t2 = [], []\n # First term\n for i in range(len(state.vector)-1):\n t1.append(state.create(i+1, N).destroy(i))\n t1.append(state.create(i, N).destroy(i+1))\n # Second term\n for i in range(len(state.vector)):\n PREFACTOR = state.num(i) * (state.num(i) - 1)\n temp3 = deepcopy(state)\n temp3.prefactor *= (PREFACTOR*U/2)\n t2.append(temp3)\n\n for state in t1:\n state.prefactor *= (-1 * J)\n\n return np.r_[t1, t2]", "title": "" }, { "docid": "49e2bcfe0fdee658320bbefd516c24dd", "score": "0.6854056", "text": "def HMF(state, Delta, N):\n\n\n\t#kinetic term: sum_i(eps(i)*(n_i,up + n_i,down))\n\tkinetic_state = dict_list_sum(\n\t\t[dict_prod(eps(i, N), dict_sum(number_op(state, i, 0, N), number_op(state, i, 1, N))) for i in range(N)])\n\n\t#interaction term: sum_i( Delta c_iUP^dag c_iDOWN^dag + conj.(Delta) c_iDOWN c_iUP )\n\tinteraction_state = dict_list_sum(\n\t\t[dict_sum(dict_prod(Delta, cr(cr(state, i, 1, N), i, 0, N)), dict_prod(np.conj(Delta), an(an(state, i, 0, N), i, 1, N))) for i in range(N)])\n\n\treturn dict_sum(kinetic_state, interaction_state)", "title": "" }, { "docid": "6b219e360df4b439f5375946a9b7bea7", "score": "0.6688797", "text": "def make_Hamiltonian(p, state_table):\n # dim = len(state_table)\n # row = []\n # col = []\n # data = []\n\n # H = sparse.csr_matrix((data, (row, col)), shape=(dim, dim), dtype=complex)\n H = transverse_field_matrix(p, state_table)\n H += longitudinal_field_matrix(p, state_table)\n H += ising_interaction_matrix(p, state_table)\n\n return H", "title": "" }, { "docid": "6efcd98dcfc3dca2bcb8789e628b922d", "score": "0.65604424", "text": "def Hexact(state, N):\n\n\t#kinetic term: sum_i(eps(i)*(n_i,up + n_i,down))\n\tkinetic_state = dict_list_sum(\n\t\t[dict_prod(eps(i, N), dict_sum(number_op(state, i, 0, N), number_op(state, i, 1, N))) for i in range(N)])\n\n\t#interaction term: sum_ij(c_iUP^dag c_iDOWN^dag c_jDOWN c_jUP)\n\tinteraction_state = dict_list_sum([\n\t\t\t\t\t\t\tdict_list_sum([\n\t\t\t\t\t\t\t\tcr(cr(an(an(state, j, 1, N), j, 0, N), i, 0, N), i, 1, N) for i in range(N)]) for j in range(N)])\n\t\n\tg=1/N\n\tinteraction_state = dict_prod(-g, interaction_state)\n\treturn dict_sum(kinetic_state, interaction_state)", "title": "" }, { "docid": "0305491888b297f37caaeb325dcefbf6", "score": "0.6484014", "text": "def calc_h(self, a_state):\n # this trivial version returns 0, a trivial estimate, but consistent and admissible\n count = 0\n for i in range(len(a_state.new)):\n if a_state.new[i] != self._goal[i]:\n count+=1\n return count", "title": "" }, { "docid": "b9e281db8139379581670b477e5238c1", "score": "0.6339773", "text": "def getHamMatrix(N, M, J, U):\n basis = getBasisStates(N, M)\n ham_matrix = np.zeros((len(basis), len(basis)))\n for state in basis:\n # Act ham on each basis state\n acted = actHam(state, N, J, U)\n for x in acted:\n for b in basis:\n # Find the matrix location of the 'acted' state and enter into\n # Hamiltonian matrix.\n if(np.all(x.vector == b.vector)):\n ham_matrix[state.idx][b.idx] += x.prefactor\n # Return ham matrix and basis\n return ham_matrix, basis", "title": "" }, { "docid": "a8305c1a560f3afeb326b5427c2f1241", "score": "0.6338933", "text": "def Hsp_element(state,omega):\n res= 0.0\n for i in range(len(state)):\n e = 1+i*(1-omega)\n res=res+state[i]*e\n return res", "title": "" }, { "docid": "f65b36235f3bf9b3a02b24c0e43b90cb", "score": "0.6199688", "text": "def ising_interaction_matrix(p, state_table):\n if p['J'].shape[0] != p['N'] or p['J'].shape[1] != p['N']:\n warnings.warn('J does NOT have dimensions LxL!')\n if not np.array_equal(p['J'], p['J'].conj().T):\n warnings.warn('J is NOT hermitian!')\n\n dim = len(state_table)\n row = []\n col = []\n data = []\n\n for In in range(dim):\n state = int_to_state(p, state_table[In])\n\n # eigenvalue of |0> is 1 and |1> is -1 so transform state (binary code) to spin basis\n spin = 1 - np.multiply(2, state)\n\n # construct matrix which has all the ZiZj products as elements (i,j)\n ZiZj = np.tensordot(spin, spin, axes=0)\n\n # get matrix element\n matrixelement = -1.0 * np.sum(np.multiply(p['J'], ZiZj)) / 2\n\n # store matrix element (note hz is diagonal so Out = In)\n row.append(In)\n col.append(In)\n data.append(matrixelement)\n\n del matrixelement\n\n ising_interaction = sparse.csr_matrix((data, (row, col)),\n shape=(dim, dim), dtype=complex)\n return ising_interaction", "title": "" }, { "docid": "17ae9c4c8f13e19e763b1423af3ef7ee", "score": "0.6149006", "text": "def manhattan_heuristic(state):\n goal = 0\n mis = 0\n\n for y in state:\n for x in y:\n mis+= (abs(goal - x) //3) + (abs(abs(goal - x) % 3))\n goal += 1\n \n return(mis) # replace this", "title": "" }, { "docid": "24a4aeba011380cdc6b192337e00c463", "score": "0.6114106", "text": "def h(state: tuple):\n return 6 - state[0] - state[1]", "title": "" }, { "docid": "93419913bb220264de1c247a0db38e34", "score": "0.61005616", "text": "def simulate(self, input_state: ndarray) -> List[int]:\n phi_0 = self.__prepare_initial_state(input_state)\n evolved_state = dot(self.interferometer_matrix, phi_0)\n probabilities = self.__calculate_probabilities(evolved_state)\n return self.__calculate_approximation_of_boson_sampling_outcome(probabilities)", "title": "" }, { "docid": "3ca828b1d7a60c67bf048f70740ecb16", "score": "0.60028356", "text": "def make_trotter_Hamiltonian(p, state_table):\n H_list = []\n H_list.append(longitudinal_field_matrix(p, state_table) +\n ising_interaction_matrix(p, state_table))\n H_list.append(transverse_field_matrix(p, state_table))\n return H_list", "title": "" }, { "docid": "297ed502535f64ed50f9102b6fd39d2f", "score": "0.600123", "text": "def call(self, inputs, state):\n\n inputs_u, inputs_i = tf.split(value=inputs, num_or_size_splits=2, axis=1)\n gate_inputs = math_ops.matmul(\n array_ops.concat([inputs, state], 1), self._gate_kernel)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)\n\n value = math_ops.sigmoid(gate_inputs)\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n\n candidate = math_ops.matmul(\n array_ops.concat([inputs, r_state], 1), self._candidate_kernel)\n candidate = nn_ops.bias_add(candidate, self._candidate_bias)\n\n c = self._activation(candidate)\n new_h = u * state + (1 - u) * c\n return new_h, new_h", "title": "" }, { "docid": "82f30d60bc9c67b7bedc9ccd4bd67323", "score": "0.59757584", "text": "def H(self, state):\n\t\tH = state.EX + state.EV\n\t\treturn H", "title": "" }, { "docid": "14f67be23c2a4e9414e54490713d35c5", "score": "0.59659684", "text": "def state_obs_mat(ns,g_cords,state_obs):\n state = sym.Matrix(np.zeros(2*len(g_cords)))\n\n for i in range(len(g_cords)):\n state[i] = sym.symbols(g_cords[i], real = True)\n state[i+len(g_cords)] = sym.symbols(g_cords[i] + '_dot', real = True)\n\n state_obs_s = sym.sympify(state_obs, locals = ns)\n\n Htilde_s = state_obs_s.jacobian(state)\n\n t = sym.symbols('t', real = True)\n state_time[0] = t\n for i in range(len(g_cords)):\n state_time[i + 1] = sym.symbols(g_cords[i], real = True)\n state_time[i + len(g_cords)+ 1] = sym.symbols(g_cords[i]+'_dot', real = True)\n Htilde = sym.lambdify(state_time,Htilde_s, modules='numpy')\n\n return Htilde", "title": "" }, { "docid": "a4bb356fd591a18e8d4600fcbdce38b1", "score": "0.59569496", "text": "def call(self, inputs, state):\n with vs.variable_scope(\"gates\"):\n h = state\n args = array_ops.concat([inputs, h], 1)\n concat = self._linear(args, 2)\n\n z, r = array_ops.split(value=concat, num_or_size_splits=2, axis=1)\n if self._layer_norm:\n z = self._norm(z, \"update\")\n r = self._norm(r, \"reset\")\n\n with vs.variable_scope(\"candidate\"):\n args = array_ops.concat([inputs, math_ops.sigmoid(r) * h], 1)\n new_c = self._linear(args, 1)\n if self._layer_norm:\n new_c = self._norm(new_c, \"state\")\n new_h = self._activation(new_c) * math_ops.sigmoid(z) + \\\n (1 - math_ops.sigmoid(z)) * h\n return new_h, new_h", "title": "" }, { "docid": "0ecabccba7e3b7662aa5d2de5dc93c41", "score": "0.59503686", "text": "def call(self, inputs, state):\n sigmoid = math_ops.sigmoid\n one = constant_op.constant(1, dtype=dtypes.int32)\n\n # Parameters of gates are concatenated into one multiply for efficiency.\n c, h, h_skip, h_cnt = state\n n_skip = self._n_skip\n if n_skip:\n skip_bool = h_cnt % self._n_skip == 0\n\n gate_inputs = math_ops.matmul(\n array_ops.concat([inputs, h], 1), self._kernel)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n i, j, f, o = array_ops.split(\n value=gate_inputs, num_or_size_splits=4, axis=one)\n forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)\n\n # Note that using `add` and `multiply` instead of `+` and `*` gives a\n # performance improvement. So using those at the cost of readability.\n add = math_ops.add\n multiply = math_ops.multiply\n\n first = multiply(c, sigmoid(add(f, forget_bias_tensor)))\n new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))),\n multiply(sigmoid(i), self._activation(j)))\n if n_skip:\n new_h = multiply(self._activation(new_c), sigmoid(o)) + skip_bool * 1 * h_skip\n h_skip = h_skip * (1-skip_bool) + new_h * skip_bool\n else:\n new_h = multiply(self._activation(new_c), sigmoid(o)) \n h_skip = new_h \n\n h_cnt += 1\n new_state = SCLSTMStateTuple(new_h, new_c, h_skip, h_cnt)\n return new_h, new_state", "title": "" }, { "docid": "87868cbe1affefbb21921f94e9d7c122", "score": "0.59469837", "text": "def forward(self, state):\n x = self.fc1(state)\n x = self.act(x)\n x = self.bn1(x)\n x1 = self.fc2(x)\n x1 = self.act(x1)\n x1 = self.bn2(x1)\n x1 = self.fc3(x1)\n x1 = self.act(x1)\n x1 = self.bn3(x1)\n x = torch.add(x1,self.act(self.fc1_skip(x)))\n x1 = self.fc4(x)\n x1 = self.act(x1)\n x1 = self.bn4(x1)\n x1 = self.fc5(x1)\n x1 = self.act(x1)\n x1 = self.bn5(x1)\n x = torch.add(x1,self.act(self.fc2_skip(x)))\n x = self.fc6(x)\n x = self.act(x)\n x = self.bn6(x)\n return torch.add(self.fc8_1(self.act(self.fc7_1(x))),self.fc8_2(self.act(self.fc7_2(x))) - torch.mean(self.fc8_2(self.act(self.fc7_2(x)))))", "title": "" }, { "docid": "ef75533465394cff98e4f38ee8b87da1", "score": "0.59223974", "text": "def act(self, state):\n action = np.zeros(self.action_size)\n if np.random.rand() <= self.epsilon:\n action[random.randrange(self.action_size)]=1\n else:\n state = state.reshape(1,self.state_size)\n act_values = self.brain.predict(state)\n action[np.argmax(act_values[0])] = 1\n return action", "title": "" }, { "docid": "359cc813b316231a152502e83dc6f585", "score": "0.58734393", "text": "def act(self, state):\n state = np.reshape(state, [-1, self.state_size])\n action = self.actor_local.model.predict(state)[0]\n actions = list(action + self.noise.sample()) # add some noise for exploration\n #actions = list(action) # no noise for exploration\n actions = np.maximum(actions, self.action_low)\n actions = np.minimum(actions, self.action_high)\n return actions", "title": "" }, { "docid": "9cefa3e8e7750d39eaf74143397f1406", "score": "0.5870848", "text": "def hamiltonian(self):\n return self.kineticmat() + self.potentialmat()", "title": "" }, { "docid": "b2684bd508eb3345dfcab6e3c76ab878", "score": "0.5867485", "text": "def process(state):\n \n ## normalized vector\n return np.array([state[0]/(max(env.state_space)[0]), state[1]/(max(env.state_space)[1]), state[2]]).reshape((3,1))\n \n ## one-hot (dim = 280,1)", "title": "" }, { "docid": "100d19d84cea3e371fb5d96e6839cf35", "score": "0.58647317", "text": "def calc_eq_of_state(pyom, n):\n density_args = (pyom, pyom.salt[..., n], pyom.temp[..., n], np.abs(pyom.zt))\n\n \"\"\"\n calculate new density\n \"\"\"\n pyom.rho[..., n] = density.get_rho(*density_args) * pyom.maskT\n\n if pyom.enable_conserve_energy:\n \"\"\"\n calculate new dynamic enthalpy and derivatives\n \"\"\"\n pyom.Hd[..., n] = density.get_dyn_enthalpy(*density_args) * pyom.maskT\n pyom.int_drhodT[..., n] = density.get_int_drhodT(*density_args)\n pyom.int_drhodS[..., n] = density.get_int_drhodS(*density_args)\n\n \"\"\"\n new stability frequency\n \"\"\"\n fxa = -pyom.grav / pyom.rho_0 / pyom.dzw[np.newaxis, np.newaxis, :-1] * pyom.maskW[:, :, :-1]\n pyom.Nsqr[:, :, :-1, n] = fxa * (density.get_rho(pyom, pyom.salt[:,:,1:,n], pyom.temp[:,:,1:,n], np.abs(pyom.zt[:-1])) - pyom.rho[:,:,:-1,n])\n pyom.Nsqr[:, :, -1, n] = pyom.Nsqr[:,:,-2,n]", "title": "" }, { "docid": "72d2c0e6fcd90d2f4171a96ac0741f60", "score": "0.57848847", "text": "def test_state_dependent_exploration_grad():\n n_states = 2\n state_dim = 3\n action_dim = 10\n sigma_hat = th.ones(state_dim, action_dim, requires_grad=True)\n # Reduce the number of parameters\n # sigma_ = th.ones(state_dim, action_dim) * sigma_\n # weights_dist = Normal(th.zeros_like(log_sigma), th.exp(log_sigma))\n th.manual_seed(2)\n weights_dist = Normal(th.zeros_like(sigma_hat), sigma_hat)\n weights = weights_dist.rsample()\n\n state = th.rand(n_states, state_dim)\n mu = th.ones(action_dim)\n noise = th.mm(state, weights)\n\n action = mu + noise\n\n variance = th.mm(state**2, sigma_hat**2)\n action_dist = Normal(mu, th.sqrt(variance))\n\n # Sum over the action dimension because we assume they are independent\n loss = action_dist.log_prob(action.detach()).sum(dim=-1).mean()\n loss.backward()\n\n # From Rueckstiess paper: check that the computed gradient\n # correspond to the analytical form\n grad = th.zeros_like(sigma_hat)\n for j in range(action_dim):\n # sigma_hat is the std of the gaussian distribution of the noise matrix weights\n # sigma_j = sum_j(state_i **2 * sigma_hat_ij ** 2)\n # sigma_j is the standard deviation of the policy gaussian distribution\n sigma_j = th.sqrt(variance[:, j])\n for i in range(state_dim):\n # Derivative of the log probability of the jth component of the action\n # w.r.t. the standard deviation sigma_j\n d_log_policy_j = (noise[:, j] ** 2 - sigma_j**2) / sigma_j**3\n # Derivative of sigma_j w.r.t. sigma_hat_ij\n d_log_sigma_j = (state[:, i] ** 2 * sigma_hat[i, j]) / sigma_j\n # Chain rule, average over the minibatch\n grad[i, j] = (d_log_policy_j * d_log_sigma_j).mean()\n\n # sigma.grad should be equal to grad\n assert sigma_hat.grad.allclose(grad)", "title": "" }, { "docid": "d1b4036704b90211b08319d6f2ff190e", "score": "0.5756763", "text": "def inv_chol(self, state):\n return self.lmult_inv_chol(state, np.eye(state.mom.shape[0]))", "title": "" }, { "docid": "9925893a3fb16894a61d097ea3da8225", "score": "0.5751316", "text": "def state_symmetry_exploit(state):\n player_id, board = np.split(state, [1])\n assert bool(player_id)\n return board*player_id", "title": "" }, { "docid": "e36e221f33fb516ebb392e5bc61ca1ab", "score": "0.5737062", "text": "def formulate_state(state):\n\n # Initalize all necessary infomration\n arena = state['arena'].copy()\n crates_arena = np.maximum(arena, 0)\n for (cx, cy) in state['coins']:\n crates_arena[cx, cy] = 2\n crates_arena = crates_arena.T\n\n x, y, _, bombs_left, _ = state['self']\n bombs = state['bombs']\n others = [(xo, yo) for (xo, yo, _, _, _) in state['others']]\n\n # First Information: Direction to nearest coin\n diglist = list()\n if len(state['coins']) == 0:\n diglist.append(0)\n else:\n closest_coin = sorted(state['coins'], key=lambda k: abs(k[0] - x) + abs(k[1] - y))[0]\n best_orientation = np.argmin([(closest_coin[0] - mx)**2 + (closest_coin[1] - my)**2 for (mx, my) in\n [(x, y-1), (x-1, y-1), (x-1, y), (x-1, y+1),\n (x, y+1), (x+1, y+1), (x+1, y), (x+1, y-1)]]) + 1\n diglist.append(best_orientation)\n\n # Second Information: Direction to more crates\n if np.sum(crates_arena) == 0:\n diglist.append(0)\n else:\n q1map = np.sum(crates_arena[1:6, 1:6])\n q2map = np.sum(crates_arena[1:6, 6:11])\n q3map = np.sum(crates_arena[1:6, 11:16])\n q4map = np.sum(crates_arena[6:11, 1:6])\n q5map = np.sum(crates_arena[6:11, 6:11])\n q6map = np.sum(crates_arena[6:11, 11:16])\n q7map = np.sum(crates_arena[11:16, 1:6])\n q8map = np.sum(crates_arena[11:16, 6:11])\n q9map = np.sum(crates_arena[11:16, 11:16])\n diglist.append(np.argmax([q1map, q2map, q3map, q4map, q5map, q6map, q7map, q8map, q9map]) + 1)\n\n # Thrid Information: Direction to nearest opponent\n if len(state['others']) == 0:\n diglist.append(0)\n else:\n closest_p = sorted(state['others'], key=lambda k: abs(k[0] - x) + abs(k[1] - y))[0]\n closest_orientation = np.argmin([abs(closest_p[0] - mx) + abs(closest_p[1] - my) for (mx, my) in\n [(x, y - 1), (x - 1, y - 1), (x - 1, y), (x - 1, y + 1), (x, y + 1),\n (x + 1, y + 1), (x + 1, y), (x + 1, y - 1)]]) + 1\n diglist.append(closest_orientation)\n\n # Fourth Information: Number of bombs available\n diglist.append(bombs_left)\n\n # 5-45th Information: Information in the 4-step vision\n for (i, j) in [( 0, -4), (-1, -3), ( 0, -3), ( 1, -3),\n (-2, -2), (-1, -2), ( 0, -2), ( 1, -2), ( 2, -2),\n (-3, -1), (-2, -1), (-1, -1), ( 0, -1), ( 1, -1),\n ( 2, -1), ( 3, -1), (-4, 0), (-3, 0), (-2, 0),\n (-1, 0), (0 , 0), ( 1, 0), ( 2, 0), ( 3, 0),\n ( 4, 0), (-3, 1), (-2, 1), (-1, 1), ( 0, 1),\n ( 1, 1), ( 2, 1), ( 3, 1), (-2, 2), (-1, 2),\n ( 0, 2), ( 1, 2), ( 2, 2), (-1, 3), ( 0, 3),\n ( 1, 3), ( 0, 4)]:\n\n if (x + i) < 0 or (x + i) > 16 or (y + j) < 0 or (y + j) > 16:\n diglist.append(0)\n elif (x + i, y + j) in state['coins']:\n diglist.append(300)\n elif state['explosions'][x + i, y + j] == 1:\n diglist.append(10)\n elif state['explosions'][x + i, y + j] == 2:\n diglist.append(9)\n elif (x + i, y + j, 4) in bombs:\n if (x + i, y + j) in others:\n diglist.append(40)\n else:\n diglist.append(4)\n elif (x + i, y + j, 3) in bombs:\n if (x + i, y + j) in others:\n diglist.append(50)\n else:\n diglist.append(5)\n elif (x + i, y + j, 2) in bombs:\n if (x + i, y + j) in others:\n diglist.append(60)\n else:\n diglist.append(6)\n elif (x + i, y + j, 1) in bombs:\n if (x + i, y + j) in others:\n diglist.append(70)\n else:\n diglist.append(7)\n elif (x + i, y + j, 0) in bombs:\n if (x + i, y + j) in others:\n diglist.append(80)\n else:\n diglist.append(8)\n elif (x + i, y + j) in others:\n diglist.append(100)\n else:\n diglist.append(arena[x + i, y + j] + 1) # 0, 1, 2\n\n state = np.array(diglist)\n return state.reshape((1, 45))", "title": "" }, { "docid": "17ce22e2f65d211f2d3efb91ab50f002", "score": "0.57361174", "text": "def construct_H(state_bra,state_ket):\n states,coefs=Hint(state_ket)\n res = 0\n for i in range(len(states)):\n if (state_bra==states[i]):\n res = res+ coefs[i]\n return 0.5*res", "title": "" }, { "docid": "793ad6bfa6bc966e871e36ac947b51cc", "score": "0.5724917", "text": "def likelihood_and_decoding_hmm(*args,**kwargs):\n num_of_states = kwargs['num_of_states']\n time_grid = kwargs['time_grid']\n num_of_steps = len(time_grid) # |W| - 1\n log_alphas = np.zeros((num_of_steps,num_of_states),dtype=np.float)\n log_viterbi = np.zeros((num_of_steps,num_of_states),dtype=np.float)\n backpointers = np.zeros((num_of_steps),dtype=np.float)\n init_probs = kwargs['init_probs'] # distribution over initial states\n pi = kwargs['pi'] # state transition; \"parameters\" HMM (rows x cols ~ origin x destination)\n e = kwargs['e'] # list of functions size (num_of_states); likelihood of obs from each state\n O = kwargs['O'] # list of observations size (num_of_steps)\n\n # init forward pass\n time_next = time_grid[0] # the first point in the time_grid, |W|, is _not_ zero in general.\n # but it is zero currently.... not great.\n for state_idx in range(num_of_states):\n obs = [O[0,time_next],time_next]\n log_alphas[0,state_idx] = np.ma.log([init_probs.l(state_idx)]).filled(-np.infty) +\\\n np.ma.log([e(obs)[state_idx]]).filled(-np.infty)\n log_viterbi[0,state_idx] = np.ma.log([init_probs.l(state_idx)]).filled(-np.infty) +\\\n np.ma.log([e(obs)[state_idx]]).filled(-np.infty)\n \n \"\"\"\n The augmented state space is actually the issue; we don't need to iterate over delta_w_enum\n What we actually need is for the augmented state space to _include_ all the different\n enumerations of delta_w. E.g. NOT [STATE,GRID] rather [STATE,DELTA_W]. \n I do ~not~ think this should impact \"smjp_transition()\", but let's watch out.\n \"\"\"\n # run the forward\n # print(\"---\")\n index_range = range(1,len(time_grid))\n print(len(index_range),len(time_grid[1:-1]),len(time_grid[0:-1]),len(time_grid))\n for alpha_index,time in zip(index_range,time_grid[0:-1]): # {w_0,...,w_{|W|-1}}\n # print(alpha_index)\n time_n = time_grid[alpha_index] # update the next time.\n delta_w = time_n - time \n delta_w_l = [delta_w]\n # delta_w_enumeration = create_delta_w_enumeration(time_n,time_grid)\n for delta_w in delta_w_l: # todo: remove this loop\n obs = [O[time,time_n],time_n]\n for state_idx in range(num_of_states):\n for state_prime_idx in range(num_of_states):\n #print(pi(delta_w)[state_prime_idx,state_idx],e(obs)[state_idx])\n # HMM: pi(t,t_next) = pi; does not depend on time difference\n # sMJP: pi(t,t_next) =/= pi; depends on time difference\n log_alpha = log_alphas[alpha_index-1,state_prime_idx]\n log_transition = pi([time,time_n])[state_prime_idx,state_idx]\n log_obs = np.ma.log([e(obs)[state_idx]]).filled(-np.infty)[0]\n #print(log_alpha,log_transition,log_obs)\n alpha_new = np.exp(log_alpha + log_transition + log_obs)\n if alpha_index == index_range[-1] and state_prime_idx == 0 and state_idx in [0,1] and False:\n print(log_alpha,log_transition,log_obs)\n print(np.sum([log_alpha,log_transition,log_obs]))\n\n #print(alpha_new)\n if state_prime_idx == 0 and state_idx == 1 and alpha_index > 2 and False:\n print(\"[[state_prime_idx == 0 and state_idx == 1]]\")\n print(log_alpha,log_transition,log_obs)\n print(log_alphas[alpha_index-1,:])\n print(log_alphas[alpha_index-1,state_prime_idx])\n print('dw',delta_w,time,time_n)\n print(pi.state_space[state_prime_idx])\n print(pi.state_space[state_idx])\n print(\"==========================\")\n exit()\n if do_we_thin(pi.state_space,state_prime_idx,state_idx,delta_w) and False:\n print(pi.state_space)\n print(alpha_index-1)\n print(log_alphas[alpha_index-1,:])\n print(state_prime_idx,state_idx)\n print(log_alpha)\n print(\"THN\")\n v,l = pi.state_space[state_prime_idx]\n vp,lp = pi.state_space[state_idx]\n print(log_alphas[alpha_index-1,state_prime_idx])\n print(delta_w)\n print(pi.state_space[state_prime_idx])\n print(pi.state_space[state_idx])\n if np.any(np.isnan(alpha_new)):\n print(\"found a nan\")\n quit()\n #print('alpha_new',alpha_new,log_alpha,state_idx)\n log_alphas[alpha_index,state_idx] += alpha_new\n log_alphas[alpha_index,:] = np.ma.log([log_alphas[alpha_index,:]]).filled(-np.infty)\n # print(log_alphas)\n # exit()\n # print(alpha_index,time)\n # print(time,alphas[time,:])\n backpath = None # trace_backward_path(backpointers)\n \n # print(log_alphas)\n # print(len(index_range),len(time_grid[1:-1]),len(time_grid[0:-1]),len(time_grid))\n # print(time_grid)\n # exit()\n backpath = None\n output_prob = np.sum(np.exp(log_alphas[-1,:]))\n \n return log_alphas,output_prob,log_viterbi,backpath", "title": "" }, { "docid": "45e97c50e1486c399512fd98baac5ade", "score": "0.5722083", "text": "def produce_action_and_action_info(self, state):\n actor_output = self.actor_local(state)\n mean, log_std = actor_output[:, :self.action_size], actor_output[:, self.action_size:]\n std = log_std.exp()\n normal = Normal(mean, std)\n x_t = normal.rsample() #rsample means it is sampled using reparameterisation trick\n action = torch.tanh(x_t)\n log_prob = normal.log_prob(x_t)\n log_prob -= torch.log(1 - action.pow(2) + 1e-6)\n log_prob = log_prob.sum(1, keepdim=True)\n return action, log_prob, torch.tanh(mean)", "title": "" }, { "docid": "6516b51969a54d09829e27c15134f1ab", "score": "0.57211965", "text": "def hamiltonian(n):\n N = 2**n\n binaryFactor = np.array([1, 2, 4])\n row = []\n col = []\n value = []\n for j in range(n-1):\n for i in range(N):\n spin1 = (i % binaryFactor[1]) // binaryFactor[0]\n spin2 = (i % binaryFactor[2]) // binaryFactor[1]\n if spin1 != spin2:\n row.append(i)\n col.append(i)\n value.append(-1)\n \n row.append(i)\n value.append(2)\n if spin1 == 1:\n col.append((i + binaryFactor[0]) % N)\n else:\n col.append((i - binaryFactor[0]) % N)\n else:\n row.append(i)\n col.append(i)\n value.append(1)\n binaryFactor *= 2\n return scipy.sparse.coo_matrix((value, (row, col)), shape = (N, N), dtype = np.float64)", "title": "" }, { "docid": "d8bdcb0ad63e10f92e1349c816964aeb", "score": "0.5719273", "text": "def nextState(self):\n newstate = np.zeros(self.N, dtype=int)\n i = 0\n for adj in self.adjmat:\n input_ind = np.where(adj == 1)\n inputs = [self.state[ind] for ind in input_ind[0]]\n if np.random.rand() < self.LUT[tuple(inputs)]:\n newstate[i] = 0\n else:\n newstate[i] = 1\n i += 1\n return newstate", "title": "" }, { "docid": "f6f8eff0394c3192c08efb19daa74409", "score": "0.57086587", "text": "def call(self, state):\n z = tf.cast(state, tf.float64)\n h1 = tf.nn.leaky_relu(tf.matmul(z, self.w1) + self.b1)\n h2 = tf.nn.relu(tf.matmul(h1, self.w2) + self.b2)\n \n h2 = tf.nn.relu(tf.matmul(h2, self.w21) + self.b21)\n h2 = tf.nn.relu(tf.matmul(h2, self.w22) + self.b22)\n \n if self.deeper:\n h2 = tf.nn.relu(tf.matmul(h2, self.w23) + self.b23)\n h2 = tf.nn.relu(tf.matmul(h2, self.w24) + self.b24)\n \n out = (tf.matmul(h2, self.w3) + self.b3) #/10\n # out = - tf.math.log(1.01 + tf.exp(out))\n return out", "title": "" }, { "docid": "2c62d308827bc5056be49da43a36ce1e", "score": "0.5694486", "text": "def forward(self, state):\n x = state\n x = self.hidden_activation(self.fc0(x))\n state_value = self.hidden_activation(self.fc1_s(x))\n state_value = self.fc2_s(state_value)\n advantage_values = self.hidden_activation(self.fc1_a(x))\n advantage_values = self.fc2_a(advantage_values)\n q = state_value + advantage_values - advantage_values.mean(dim=1, keepdim=True)\n\n return q", "title": "" }, { "docid": "b2d9d36d9a411ebe08b75567bee5c300", "score": "0.5693643", "text": "def call(self, inputs, state):\n gate_inputs = tf.matmul(\n tf.concat([inputs, state], 1), self._gate_kernel)\n gate_inputs = tf.nn.bias_add(gate_inputs, self._gate_bias)\n\n if self._layer_norm:\n gate_inputs = common_layers.split_last_dim(gate_inputs, 3)\n mean = tf.reduce_mean(gate_inputs, axis=[-1], keepdims=True)\n variance = tf.reduce_mean(tf.square(gate_inputs - mean), axis=[-1], keepdims=True)\n norm_x = (gate_inputs - mean) * tf.rsqrt(variance + self._ln_epsilon)\n norm_x = common_layers.combine_last_two_dims(norm_x)\n gate_inputs = norm_x * self._ln_scale + self._ln_bias\n\n value = tf.sigmoid(gate_inputs)\n r, u, l = tf.split(value=value, num_or_size_splits=3, axis=1)\n\n r_state = r * state\n\n candidate = tf.matmul(\n tf.concat([inputs, r_state], 1), self._candidate_kernel)\n candidate = tf.nn.bias_add(candidate, self._candidate_bias)\n\n c = self._activation(candidate)\n c += l * tf.matmul(inputs, self._linear_kernel)\n if self._dropout_rate:\n c = tf.nn.dropout(c, keep_prob=1-self._dropout_rate)\n new_h = u * state + (1 - u) * c\n return new_h, new_h", "title": "" }, { "docid": "966c4f025e93f8e5a8db02bea7be46c8", "score": "0.5692773", "text": "def hamiltonian(self, state, time, value, grad_value):\n del time, value # unused\n control = self.optimal_control(state, grad_value)\n return grad_value @ self.dynamics(state, control)", "title": "" }, { "docid": "a61d7e95f8edaf1a53f2cbc094b743bc", "score": "0.5683809", "text": "def binary_mera_energy(hamiltonian, state, isometry, disentangler):\n backend = \"jax\"\n\n out = []\n for dirn in ('left', 'right'):\n iso_c = tensornetwork.Node(isometry, backend=backend)\n iso_r = tensornetwork.Node(isometry, backend=backend)\n\n iso_l_con = tensornetwork.linalg.node_linalg.conj(iso_l)\n #iso_c_con = tensornetwork.linalg.node_linalg.conj(iso_c)\n iso_r_con = tensornetwork.linalg.node_linalg.conj(iso_r)\n\n op = tensornetwork.Node(hamiltonian, backend=backend)\n rho = tensornetwork.Node(state, backend=backend)\n\n un_l = tensornetwork.Node(disentangler, backend=backend)\n un_l_con = tensornetwork.linalg.node_linalg.conj(un_l)\n\n un_r = tensornetwork.Node(disentangler, backend=backend)\n un_r_con = tensornetwork.linalg.node_linalg.conj(un_r)\n\n tensornetwork.connect(iso_l[2], rho[0])\n tensornetwork.connect(iso_c[2], rho[1])\n tensornetwork.connect(iso_r[2], rho[2])\n\n tensornetwork.connect(iso_l[0], iso_l_con[0])\n tensornetwork.connect(iso_l[1], un_l[2])\n tensornetwork.connect(iso_c[0], un_l[3])\n tensornetwork.connect(iso_c[1], un_r[2])\n tensornetwork.connect(iso_r[0], un_r[3])\n tensornetwork.connect(iso_r[1], iso_r_con[1])\n\n if dirn == 'right':\n tensornetwork.connect(un_l[0], un_l_con[0])\n tensornetwork.connect(un_l[1], op[3])\n tensornetwork.connect(un_r[0], op[4])\n tensornetwork.connect(un_r[1], op[5])\n tensornetwork.connect(op[0], un_l_con[1])\n tensornetwork.connect(op[1], un_r_con[0])\n tensornetwork.connect(op[2], un_r_con[1])\n elif dirn == 'left':\n tensornetwork.connect(un_l[0], op[3])\n tensornetwork.connect(un_l[1], op[4])\n tensornetwork.connect(un_r[0], op[5])\n tensornetwork.connect(un_r[1], un_r_con[1])\n tensornetwork.connect(op[0], un_l_con[0])\n tensornetwork.connect(op[1], un_l_con[1])\n tensornetwork.connect(op[2], un_r_con[0])\n\n tensornetwork.connect(un_l_con[2], iso_l_con[1])\n tensornetwork.connect(un_l_con[3], iso_c_con[0])\n tensornetwork.connect(un_r_con[2], iso_c_con[1])\n tensornetwork.connect(un_r_con[3], iso_r_con[0])\n\n #tensornetwork.connect(iso_l_con[2], rho[3])\n tensornetwork.connect(iso_c_con[2], rho[4])\n tensornetwork.connect(iso_r_con[2], rho[5])\n\n # FIXME: Check that this is giving us a good path!\n out.append(\n contractors.branch(tensornetwork.reachable(rho),\n nbranch=2).get_tensor())\n\n return 0.5 * sum(out)", "title": "" }, { "docid": "651ee5c4589e0381a6652cfc29f180ca", "score": "0.56658435", "text": "def forward(self, state):\n \n h_relu1 = F.relu(self.fc1(state))\n h_relu2 = F.relu(self.fc2(h_relu1))\n yhat = self.fc3(h_relu2)\n return yhat", "title": "" }, { "docid": "2dff52b429a3171a910ecd3c9c529c69", "score": "0.5662271", "text": "def random_action(state):\n cible = [int(random.uniform(0, w-1)), int(random.uniform(0, h-1))]\n dist = Env.dist(state,cible)\n if dist == 0 :\n dist = 1\n norm = min(100,dist)\n return [int(norm/dist*(cible[0]-state[0])),int(norm/dist*(cible[1]-state[1]))]", "title": "" }, { "docid": "6903789fa0af81b1bc4d1dfc8d497914", "score": "0.56595314", "text": "def get_action_features(phi_state, action, n_actions):\n if len(phi_state.shape) > 1:\n assert phi_state.shape[0] == action.shape[0]\n\n phi = np.ones((phi_state.shape[0], n_actions * phi_state[0].size))\n i = 0\n for s, a in zip(phi_state, action):\n start = s.size * int(a[0])\n stop = start + s.size\n\n phi_sa = np.zeros(n_actions * s.size)\n phi_sa[start:stop] = s\n\n phi[i] = phi_sa\n\n i += 1\n else:\n start = phi_state.size * action[0]\n stop = start + phi_state.size\n\n phi = np.zeros(n_actions * phi_state.size)\n phi[start:stop] = phi_state\n\n return phi", "title": "" }, { "docid": "32e40a7f569426fba704921e3ab3c635", "score": "0.5657275", "text": "def phi(self, state, action):\n state_ = self._domain.states[state]\n return np.array([self._feature_puddle(state_),\n self._feature_goal_distance(state_)])", "title": "" }, { "docid": "83218e52feb5a7fbbe502c234abea26c", "score": "0.56513274", "text": "def act(self, state):\n return self.brain.predict_ensemble(state)", "title": "" }, { "docid": "537b3d8dbada89e8c081ee8316d4ac87", "score": "0.5630806", "text": "def groundstate(H):\n \"\"\"find the gs of a Hamiltonian matrix\"\"\"\n Hinv = H.I\n E, wf = sparse.linalg.eigsh(H, k = 1)\n return 1/E, wf", "title": "" }, { "docid": "1edd0af8ac092bb0c7bb7fb9573ae0c0", "score": "0.5627552", "text": "def act_target(self, state):\n state = np.reshape(state, [-1, self.state_size])\n action = self.actor_target.model.predict(state)[0]\n #actions = list(action + self.noise.sample()) # add some noise for exploration\n actions = list(action) # no noise for exploration\n return actions", "title": "" }, { "docid": "893b7d3ead52d4d28f9514e003ec8c20", "score": "0.56271374", "text": "def act(self, state):\n if np.random.rand() <= self.epsilon:\n return np.random.choice(self.action_size)\n act_values = self.model.predict(state)\n\n return np.argmax(act_values[0])", "title": "" }, { "docid": "5d19c1e930f0816aa24b6d4e78ac6fb7", "score": "0.56209517", "text": "def call(self, inputs, state):\n # Using Attention Wrapper\n print(\"self.input_depth:\",self.input_depth)\n inputs, context = array_ops.split(value=inputs, num_or_size_splits=[int(self.input_depth), self._num_units], axis=-1)\n #_, _, state = array_ops.split(value=state, num_or_size_splits=[int(self.input_depth), self._num_units, self._num_units], axis=-1)\n \n # First round\n gate_inputs = math_ops.matmul(\n array_ops.concat([inputs, state], 1), self._gate_kernel_1)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias_1)\n\n value = math_ops.sigmoid(gate_inputs)\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n\n candidate = math_ops.matmul(\n array_ops.concat([inputs, r_state], 1), self._candidate_kernel_1)\n candidate = nn_ops.bias_add(candidate, self._candidate_bias_1)\n\n c = self._activation(candidate)\n new_h_prime = u * state + (1 - u) * c\n \n # Final round\n\n gate_inputs = math_ops.matmul(\n array_ops.concat([context, new_h_prime], 1), self._gate_kernel_2)\n gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias_2)\n\n value = math_ops.sigmoid(gate_inputs)\n r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * new_h_prime\n\n candidate = math_ops.matmul(\n array_ops.concat([context, r_state], 1), self._candidate_kernel_2)\n candidate = nn_ops.bias_add(candidate, self._candidate_bias_2)\n\n c = self._activation(candidate)\n new_h = u * new_h_prime + (1 - u) * c\n\n return new_h, new_h", "title": "" }, { "docid": "38dfe1a26dc675e1c0401065753cb1c6", "score": "0.5617738", "text": "def call(self, inputs, state):\n sigmoid = math_ops.sigmoid\n add = math_ops.add\n multiply = math_ops.multiply\n\n transformed_input = math_ops.matmul(inputs, self._kernel)\n transformed_input = nn_ops.bias_add(transformed_input, self._bias)\n in_all = array_ops.split(value=transformed_input, num_or_size_splits=self._k_depth, axis=1)\n\n h = state[0]\n y = h\n for idx in range(self._k_depth):\n hy = math_ops.matmul(y, self._U_dict[idx])\n y = sigmoid(in_all[idx] + hy)\n\n new_h = self._activation(add(h, y))\n new_state = RRNStateTuple(new_h)\n\n return new_h, new_state", "title": "" }, { "docid": "54439a91aa40e49339a02dc35813b2fc", "score": "0.5617707", "text": "def ComputeInteractionState(self, p_int, p_int_1, p_int_2):\n ...", "title": "" }, { "docid": "54439a91aa40e49339a02dc35813b2fc", "score": "0.5617707", "text": "def ComputeInteractionState(self, p_int, p_int_1, p_int_2):\n ...", "title": "" }, { "docid": "e30cc5c9983124e06c8afce2afc2e92a", "score": "0.5616494", "text": "def Viterbi(all_possible_hidden_states,\r\n all_possible_observed_states,\r\n prior_distribution,\r\n transition_model,\r\n observation_model,\r\n observations):\r\n\r\n # TODO: Write your code here\r\n \r\n num_time_steps = len(observations)\r\n w = [None] * num_time_steps\r\n phi = [None] * (num_time_steps-1)\r\n estimated_hidden_states = [None] * num_time_steps\r\n \r\n # computing the transition matrix p(zn|zn-1)\r\n transition_matrix = rover.Distribution()\r\n \r\n # computing the observation matrix p(x,y | z)\r\n observation_matrix = rover.Distribution()\r\n \r\n for state in all_possible_hidden_states:\r\n transition_matrix[state] = transition_model(state)\r\n \r\n observation_matrix[state] = observation_model(state)\r\n\r\n # initial w(z1)\r\n initial_post = observation_matrix[ ( (observations[0]) + (\"stay\",) ) ]\r\n \r\n result = rover.Distribution()\r\n \r\n for state in initial_post:\r\n result[ (state) + (\"stay\",) ] = np.log(initial_post[state] * prior_distribution[state + (\"stay\",)])\r\n \r\n\r\n w[0] = result\r\n \r\n \r\n \r\n # loop\r\n for i in range(1, num_time_steps):\r\n \r\n w[i] = rover.Distribution()\r\n \r\n phi[i-1] = rover.Distribution()\r\n \r\n # loop through all states possible\r\n \r\n # i is zn and i-1 is zn-1\r\n \r\n # get the possible states of zn\r\n for zn in all_possible_hidden_states:\r\n \r\n # set to large negative number\r\n inner_term = -90000000\r\n \r\n # loop through zn-1 values\r\n for zn1 in w[i-1]:\r\n \r\n # probability p(z_n | z_n-1)\r\n probZZ = transition_matrix[zn1]\r\n \r\n # check if in the p(zn|zn-1)\r\n if zn not in probZZ:\r\n continue\r\n \r\n if w[i-1][zn1] == 0:\r\n continue\r\n\r\n \r\n # max term of forward message \r\n if (np.log(probZZ[zn]) + w[i-1][zn1] > inner_term):\r\n inner_term = np.log(probZZ[zn]) + w[i-1][zn1]\r\n phi[i-1][zn] = zn1\r\n \r\n \r\n # once gone through all values of z_n-1, multiply by p(x|z_n)\r\n post_xy = observation_model(zn)\r\n \r\n if (observations[i] == None):\r\n probxy = 1\r\n else:\r\n if observations[i] not in post_xy:\r\n continue\r\n probxy = post_xy[observations[i]]\r\n \r\n \r\n \r\n result = np.log(probxy) + inner_term\r\n \r\n if (inner_term != -90000000):\r\n w[i][zn] = result\r\n \r\n # find max z_n\r\n maximum = -90000\r\n arg_max = None\r\n \r\n estimated_hidden_states[-1] = rover.Distribution()\r\n \r\n for key in w[-1]:\r\n if w[-1][key] > maximum:\r\n arg_max = key\r\n maximum = w[-1][key]\r\n \r\n estimated_hidden_states[-1] = arg_max\r\n \r\n\r\n # backtrack\r\n for i in range(num_time_steps-2, -1, -1):\r\n estimated_hidden_states[i] = rover.Distribution()\r\n \r\n estimated_hidden_states[i] = phi[i][arg_max]\r\n \r\n # next term used to backtrack\r\n arg_max = phi[i][arg_max]\r\n \r\n \r\n return estimated_hidden_states", "title": "" }, { "docid": "b1527e8d7df30709aee57f1fe8bfaf88", "score": "0.5615736", "text": "def act(self, state: np.ndarray): \n state = tupleTensor(state)\n \n with torch.no_grad():\n outputs,self.dqn_hidden_state,self.dqn_cell_state =\\\n self.dqn(state,self.dqn_hidden_state,self.dqn_cell_state)\n action = torch.argmax(outputs)\n return action.item()", "title": "" }, { "docid": "12a564bc5a1453914d4e404d12321c31", "score": "0.5614487", "text": "def act(self, state):\n\n state = torch.from_numpy(state).float().unsqueeze(0).to(self.device)\n probs, embed = self.forward(state)\n probs = probs.cpu()\n embed = embed.cpu()\n m = Categorical(probs)\n action = m.sample()\n return action.item(), m.log_prob(action), embed", "title": "" }, { "docid": "02da9e841926f3df293097e7a5faf2d8", "score": "0.56090236", "text": "def transition(self, state):\n #Find the label for the state\n label = self.dict[state]\n #For when I redfine the dictionary in the other part of the lab\n if type(label) is int:\n# print(self.mat[label])\n# print(state)\n result = np.random.multinomial(1, self.mat[:,label])\n index = np.argmax(result)\n for l, i in self.dict.items():\n if index == i:\n return l\n \n #For when I don't need to redefine the dictionary\n for s in label:\n #Transition\n result = np.random.multinomial(1, self.mat[:,s])\n index = np.argmax(result)\n #Check where the transition took me\n for l, i in self.dict.items():\n if index in i:\n return l\n \n \n raise NotImplementedError(\"Problem 2 Incomplete\")", "title": "" }, { "docid": "40e77c78593475d34dbc7fd6b833fe62", "score": "0.559867", "text": "def _dense_ham_term(H):\n h1, (h2L, h2R) = H\n D = h1.shape[0]\n dtype = h1.dtype\n\n E = backend.eye(D, dtype=dtype)\n\n h = backend.ncon([h1, E], [(-1, -3), (-2, -4)])\n for (hl, hr) in zip(h2L, h2R):\n h += backend.ncon([hl, hr], [(-1, -3), (-2, -4)])\n\n return h", "title": "" }, { "docid": "43c9f4ea47f07462371de3763b127fc4", "score": "0.5591284", "text": "def act(self, state):\n if np.random.rand() < self.epsilon:\n return self.action_space.sample() #sample() is a function of spaces.Discrete()\n else:\n state_interval = self.make_interval(state)\n\n q_values = self.q_network.predict(state_interval)\n action = np.argmax(q_values[0])\n return action", "title": "" }, { "docid": "c7c29856a7dc483236b4127bae92ae11", "score": "0.5582921", "text": "def actions(self, state):\n sick = []\n healthy = []\n for i, line in enumerate(state):\n for j, (value, _) in enumerate(line):\n if value == 'S':\n sick.append((Q_action, (i, j)))\n elif value == 'H':\n healthy.append((V_action, (i, j)))\n\n sick_permutations = list(itertools.combinations(sick, min(self.police, len(sick))))\n healthy_permutations = list(itertools.combinations(healthy, min(self.medics, len(healthy))))\n actions = [sick_perm + healthy_perm\n for sick_perm in sick_permutations\n for healthy_perm in healthy_permutations]\n return actions", "title": "" }, { "docid": "ed11b945f14fb39bcc7a2e7888525efc", "score": "0.5581947", "text": "def __call__(self, inputs, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n c, h = state\n\n # change bias argument to False since LN will add bias via shift\n concat = tf.nn.rnn_cell._linear([inputs, h], 4 * self._num_units, False)\n\n i, j, f, o = tf.split(1, 4, concat)\n\n # add layer normalization to each gate\n i = ln(i, scope = 'i/')\n j = ln(j, scope = 'j/')\n f = ln(f, scope = 'f/')\n o = ln(o, scope = 'o/')\n\n new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) *\n self._activation(j))\n\n # add layer_normalization in calculation of new hidden state\n new_h = self._activation(ln(new_c, scope = 'new_h/')) * tf.nn.sigmoid(o)\n new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)\n\n return new_h, new_state", "title": "" }, { "docid": "e85ff62fff976096c3a0516060ae9662", "score": "0.5581648", "text": "def ham_ising():\n E = np.array([[1, 0], [0, 1]])\n X = np.array([[0, 1], [1, 0]])\n Z = np.array([[1, 0], [0, -1]])\n # hmat = np.kron(X, np.kron(Z, X))\n hmat -= 0.5 * (np.kron(np.kron(X, X), E) + np.kron(E, np.kron(X, X)))\n return np.reshape(hmat, [2] * 6)", "title": "" }, { "docid": "04116fb31389501f4d6b3b71b20f0eba", "score": "0.5579655", "text": "def lift_state(self):\n init_state = self.x0\n ex0 = np.array(self.x0)\n for mu in self.centers:\n init_state.append(logistic(np.linalg.norm(ex0 - mu)**2, self.alpha))\n return np.hstack([np.array([1]), init_state])", "title": "" }, { "docid": "3cbd4c21e63889886f3afd106d678c0a", "score": "0.55784917", "text": "def no_actions_first_and_second_clauses(states_encoding, t):\n\n if t != 1 and t != 2:\n print(\"timestamp does not match clauses generation method\")\n return\n\n formula = []\n formula += unique_state_clauses(states_encoding, t)\n H = states_encoding[\"H\"]\n S = states_encoding[\"S\"]\n U = states_encoding[\"U\"]\n\n X = H.shape[0]\n Y = H.shape[1]\n for x in range(X):\n for y in range(Y):\n\n # unpopulated[t] ==> unpopulated[t-1]\n # unpopulated[t-1] ==> unpopulated[t]\n formula.append([-U[x, y, t], U[x, y, t-1]])\n formula.append([-U[x, y, t-1], U[x, y, t]])\n\n # healthy[t] ==> healthy[t-1]\n formula.append([-H[x, y, t], H[x, y, t-1]])\n\n # sick[t-1] ==> sick[t]\n formula.append([-S[x, y, t-1], S[x, y, t]])\n # sick[t] ==> sick[t-1] | healthy[t-1]\n formula.append([-S[x, y, t], S[x, y, t-1], H[x, y, t-1]])\n\n adjacent_neighbors = neighbors((x, y), (X, Y))\n # construct gradually the clause of (was_sick(t-1) OR one_of_neighbor_was_sick(t-1))\n sick_neighbors = []\n\n for neighbor_x, neighbor_y in adjacent_neighbors:\n # healthy[t] ==> ~sick_neighbor[t-1]\n formula.append([-H[x, y, t], -S[neighbor_x, neighbor_y, t-1]])\n # sick_neighbor[t-1] & healthy[t-1] ==> S[t]\n formula.append([-S[neighbor_x, neighbor_y, t-1], -H[x, y, t-1], S[x, y, t]])\n sick_neighbors.append(S[neighbor_x, neighbor_y, t-1])\n\n formula.append([-S[x, y, t], S[x, y, t-1]] + sick_neighbors)\n # H[t-1] & ~S[x-1, y, t-1] & ~S[x+1, y, t-1] & ~ S[x, y+1, t-1] ... ==> H[t]\n formula.append([-H[x, y, t-1], H[x, y, t]]+sick_neighbors)\n\n return formula", "title": "" }, { "docid": "80b55e4efe62148cf2b7ec59db507e7a", "score": "0.5577992", "text": "def get_dyn_theory(env, state):\n assert env.problem == OneMax\n assert env.action_description == \"lbd\"\n assert env.state_description == \"n, f(x)\"\n assert len(env.state_functions) == 2\n\n return np.asarray([np.sqrt(env.x.n / (env.x.n - state[1]))], dtype=np.float32)", "title": "" }, { "docid": "6d5725a613f769b99f01d4b4469638ed", "score": "0.55727875", "text": "def getActionProbabilities(self, state: int) -> np.ndarray:\n distribution = []\n denominator = sum([np.exp(self._sigma * self._theta[state][i]) for i in range(self.numActions)])\n for i in range(self.numActions):\n numerator = np.exp(self._sigma * self._theta[state][i])\n distribution.append(numerator / denominator)\n return np.array(distribution)", "title": "" }, { "docid": "9eba66fff22090b2f8011c4272d08818", "score": "0.5571411", "text": "def _heom_state_dictionaries(dims, excitations):\n nstates = 0\n state2idx = {}\n idx2state = {}\n\n for state in state_number_enumerate(dims, excitations):\n state2idx[state] = nstates\n idx2state[nstates] = state\n nstates += 1\n\n return nstates, state2idx, idx2state", "title": "" }, { "docid": "cef9f9492f2834e707b480176989d1be", "score": "0.55690783", "text": "def action_distribution(self, state):\n raise NotImplementedError()", "title": "" }, { "docid": "4183db0d99516a080c915486f81dfc74", "score": "0.5557254", "text": "def reward(self, state):\n test_pixels = self.get_underlying_pixels(state)\n sim = self.similarity(self.agent[self.indices], test_pixels) #TODO scale this? \n return sim", "title": "" }, { "docid": "160bcf209043a339d65ccfb731152936", "score": "0.5556321", "text": "def __call__(self, inputs, state, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n c, h = state\n\n # change bias argument to False since LN will add bias via shift\n concat = tf.nn.rnn_cell._linear(\n [inputs, h], 4 * self._num_units, False)\n # ipdb.set_trace()\n\n i, j, f, o = tf.split(1, 4, concat)\n\n # add layer normalization to each gate\n i = ln(i, scope='i/')\n j = ln(j, scope='j/')\n f = ln(f, scope='f/')\n o = ln(o, scope='o/')\n\n new_c = (c * tf.nn.sigmoid(f + self._forget_bias) +\n tf.nn.sigmoid(i) * self._activation(j))\n\n # add layer_normalization in calculation of new hidden state\n new_h = self._activation(\n ln(new_c, scope='new_h/')) * tf.nn.sigmoid(o)\n new_state = tf.nn.rnn_cell.LSTMStateTuple(new_c, new_h)\n return new_h, new_state", "title": "" }, { "docid": "439a23fc1be390bc1b8f84b153324a71", "score": "0.5550651", "text": "def act(self, state):\n if np.random.rand() < self.epsilon:\n return random.randrange(self.action_size)\n return np.argmax(self.predict(state)[0])", "title": "" }, { "docid": "5d2bc3d257b1d21c5e56b2b4979ea457", "score": "0.5548322", "text": "def run_baum_welch_algorithm(self, state_features, timeseries_observations, cycle_states):\n\n # create the initial transition matrix\n transition_matrix = self.create_transition_matrix()\n\n # set cumulative probability, this will be used as a breaking criteria for the EM algorithm\n cummulative_probability = np.inf\n\n for i in range(10):\n # create the observation probabilities given the initial features\n observation_probabilities = self.create_observation_probabilities(state_features, timeseries_observations, cycle_states)\n\n # run forward and backward pass through\n forward_probabilities, forward_trellis = self.run_forward_pass(transition_matrix, observation_probabilities)\n backward_probabilities, backward_trellis = self.run_backward_pass(transition_matrix,\n observation_probabilities)\n backward_trellis.reverse()\n backward_probabilities.reverse()\n\n # update lambda parameter (probability of state i, time j)\n numerator = np.multiply(np.array(forward_probabilities), np.array(backward_probabilities))\n\n denominator = sum(np.multiply(np.array(forward_probabilities), np.array(backward_probabilities)).T)\n _lambda = []\n for j in range(len(numerator)):\n _lambda.append((numerator[j, :].T / denominator[j]).T)\n\n # update epsilon parameter (probability of moving for state i to state j)\n numerator = np.multiply(forward_trellis[1:], backward_trellis[:-1])\n epsilon = []\n for g in range(len(numerator)):\n denominator = np.sum(numerator[g, :, :])\n epsilon.append((numerator[g, :, :].T / denominator).T)\n\n # Update the transition matrix and observation probabilities for the next iteration\n transition_matrix = ((sum(epsilon) / sum(_lambda))).T / sum((sum(epsilon) / sum(_lambda)))\n\n # Update the state space parameters\n observation_probabilities = _lambda\n state_ind = 0\n for state in state_features:\n param_set = state_features[state]\n state_weight = [0]*len(set(cycle_states))\n state_var = [0]*len(set(cycle_states))\n state_sum = [0]*len(set(cycle_states))\n\n for ind in range(len(timeseries_observations)):\n cycle = cycle_states[ind]\n state_weight[cycle] += _lambda[ind][state_ind]\n state_sum[cycle] += timeseries_observations[ind] * _lambda[ind][state_ind]\n state_var[cycle] += _lambda[ind][state_ind] * np.sqrt(\n (timeseries_observations[ind] - param_set.loc[param_set['CYCLE']==cycle, 'SEASONALITY_MU'].item()) ** 2)\n\n state_mu_set = np.divide(state_sum, state_weight).tolist()\n state_sigma_set = np.divide(state_var, state_weight).tolist()\n cycle_ind = list(set(cycle_states))\n cycle_ind.sort()\n param_set_new = pd.DataFrame(columns=['CYCLE', 'SEASONALITY_MU', 'SEASONALITY_SIGMA'], data=\n np.array([cycle_ind, state_mu_set, state_sigma_set]).T)\n\n state_features.update({state: param_set_new})\n\n state_ind += 1\n\n cummulative_probability_new = np.sum(_lambda)\n pcnt_change = (cummulative_probability_new-cummulative_probability)/cummulative_probability\n if pcnt_change < 0.01:\n break\n else:\n cummulative_probability = cummulative_probability_new\n\n print('Fitted transition matrix: ')\n print(transition_matrix)\n print('Fitted state features: ')\n print(state_features)\n\n # multiply the probabilities to get the overall probability. Convert to state using MAP\n observation_probabilities = _lambda\n\n return transition_matrix, state_features, observation_probabilities", "title": "" }, { "docid": "08d35e4730c3dcdb9f69f6eb2563a4da", "score": "0.55421877", "text": "def compute_state(self):\r\n\r\n self.predicted_state[:, [0]] = self.vehicle.state\r\n u = []\r\n for idx in range(1, self.horizon+1):\r\n dstate, control = self.propagate(self.predicted_state[:, [0]])\r\n\r\n self.predicted_state[:, [idx]] = np.matmul(self.vehicle.system_matrix(), self.predicted_state[:, [idx-1]])\\\r\n + dstate\r\n u.append(control)\r\n self.update_time()\r\n print(\"----------------------------------\")\r\n print(\"Current initial state and horizon: \\n\")\r\n print(self.predicted_state[self.xidx, :], \"\\n\", self.predicted_state[self.yidx, :])\r\n print(\"----------------------------------\")\r\n current_initial = self.predicted_state[:self.num_states, [1]]\r\n\r\n self.vehicle.update(current_initial, u[0])\r\n return self.predicted_state", "title": "" }, { "docid": "10207053dd706df12c49568975759a62", "score": "0.5530769", "text": "def no_actions_clauses(states_encoding, t):\n if t < 3:\n print(\"timestamp does not match clauses generation method\")\n return\n\n formula = []\n formula += unique_state_clauses(states_encoding, t)\n\n H = states_encoding[\"H\"]\n S = states_encoding[\"S\"]\n U = states_encoding[\"U\"]\n\n X = H.shape[0]\n Y = H.shape[1]\n\n for x in range(X):\n for y in range(Y):\n\n # if unpopulated[t] ==> unpopulated[t-1]\n formula.append([-U[x, y, t], U[x, y, t-1]])\n # if unpopulated[t-1] ==> unpopulated[t]\n formula.append([-U[x, y, t-1], U[x, y, t]])\n\n # S[t-1] & S[t-2] & S[t-3] ==> H[t]\n formula.append([-S[x, y, t-1], -S[x, y, t-2], -S[x, y, t-3], H[x, y, t]])\n\n # S[t-1] & ~S[t-2] ==> S[t]\n formula.append([-S[x, y, t-1], S[x, y, t-2], S[x, y, t]])\n\n adjacent_neighbors = neighbors((x, y), (X, Y))\n\n # H[x, y, t] ==> (S[t-1] & S[t-2] & S[t-3]) | (H[x, y, t-1] & ~S[x+1, y, t-1] & ~S[x-1, y, t-1]...)\n healthy_condition_prefixes = [[-H[x, y, t], S[x, y, t-1]], [-H[x, y, t], S[x, y, t-2]],\n [-H[x, y, t], S[x, y, t-3]]]\n healthy_condition_suffixed = [[H[x, y, t-1]]]\n sick_neighbors = []\n for neighbor_x, neighbor_y in adjacent_neighbors:\n healthy_condition_suffixed.append([-S[neighbor_x, neighbor_y, t-1]])\n\n # H[t-1] & sick_neighbor[t-1] ==> S[t]\n formula.append([-H[x, y, t-1], -S[neighbor_x, neighbor_y, t-1], S[x, y, t]])\n\n # build or clause between sick neighbors sick conditions\n sick_neighbors.append(S[neighbor_x, neighbor_y, t-1])\n\n formula += [prefix + suffix for prefix in healthy_condition_prefixes\n for suffix in healthy_condition_suffixed]\n\n # S[t] ==> (S[t-1] & (~S[t-2] | ~ S[t-3]) | (H[t-1] & (S[x+1, y, t-1] | S[x-1, y, t-1]...))\n sick_condition_prefixes = [[-S[x, y, t], S[x, y, t-1]], [-S[x, y, t], -S[x, y, t-2], -S[x, y, t-3]]]\n sick_condition_suffixes = [[H[x, y, t-1]], sick_neighbors]\n formula += [prefix + suffix for prefix in sick_condition_prefixes\n for suffix in sick_condition_suffixes]\n\n return formula", "title": "" }, { "docid": "f0f44953b32b476eb41e4acc072e8c7d", "score": "0.5526509", "text": "def call(self, inputs, state):\n\n gate_inputs = tf.matmul(state, self._gate_kernel)\n gate_inputs = tf.nn.bias_add(gate_inputs, self._gate_bias)\n\n if self._layer_norm:\n gate_inputs = common_layers.split_last_dim(gate_inputs, 2)\n mean = tf.reduce_mean(gate_inputs, axis=[-1], keepdims=True)\n variance = tf.reduce_mean(tf.square(gate_inputs - mean), axis=[-1], keepdims=True)\n norm_x = (gate_inputs - mean) * tf.rsqrt(variance + self._ln_epsilon)\n norm_x = common_layers.combine_last_two_dims(norm_x)\n gate_inputs = norm_x * self._ln_scale + self._ln_bias\n\n value = tf.sigmoid(gate_inputs)\n r, u = tf.split(value=value, num_or_size_splits=2, axis=1)\n\n r_state = r * state\n\n candidate = tf.matmul(r_state, self._candidate_kernel)\n candidate = tf.nn.bias_add(candidate, self._candidate_bias)\n\n c = self._activation(candidate)\n if self._dropout_rate:\n c = tf.nn.dropout(c, keep_prob=1-self._dropout_rate)\n new_h = u * state + (1 - u) * c\n return new_h, new_h", "title": "" }, { "docid": "54d666e7c4479fe5d688c66d2f7f8fe4", "score": "0.5525123", "text": "def actions(self, state):\n # find all s and then insert the new ss to 'seakness_index_map'\n\n # make S's index list\n comb_list_S = get_comb_list(self, 'S', state,2,self.zoc)\n comb_list_S1 = get_comb_list(self, 'S', state, 1, self.zoc)\n # make H's index list\n comb_list_H = get_comb_list(self, 'H', state,1,self.zoc)\n\n # combined S an H\n if comb_list_H==-1 or comb_list_S==-1:\n combHS2=[]\n else:\n combHS2 = list(product(comb_list_S, comb_list_H))\n if comb_list_H == -1 or comb_list_S1 == -1:\n combHS1 = []\n else:\n combHS1 = list(product(comb_list_S1, comb_list_H))\n if combHS1 != -1:\n combHS2.extend(combHS1)\n if comb_list_S != -1:\n combHS2.extend(comb_list_S)\n if comb_list_S != -1:\n combHS2.extend(comb_list_S)\n if comb_list_H!=-1:\n combHS2.extend(comb_list_H)\n return tuple(combHS2)", "title": "" }, { "docid": "d70d9aa325da8a50a773e69ebbf86b94", "score": "0.55226743", "text": "def IMU_3DOF(state_t0,state_t1,T):\n # num_state = len(state_t0)\n sigma_theta = 0.001\n sigma_x = 0.01\n sigma_y = 0.01\n dtheta = state_t1[5] + sigma_theta*np.random.randn(1,1)\n accel_x = (state_t1[3] - state_t0([3]))/T + sigma_x*np.random.randn(1,1)\n accel_y = (state_t1[5] - state_t0([5]))/T + sigma_y*np.random.randn(1,1)\n\n return (dtheta,accel_x,accel_y)", "title": "" }, { "docid": "6aef5f47114586753cfc1bdfd2a3077f", "score": "0.55225915", "text": "def GetAction(state):\n p = 1/actionSize\n if len(state.shape)==3:\n probs =np.full((1,actionSize),p)\n else:\n probs =np.full((state.shape[0],actionSize),p)\n actions = np.array([np.random.choice(probs.shape[1], p=prob / sum(prob)) for prob in probs])\n return actions", "title": "" }, { "docid": "4607d651d7e6712bc381744d33b876b0", "score": "0.55015206", "text": "def result(self, state, actions):\n state = state_to_list(state)\n\n for action, (i, j) in actions:\n if action == Q_action:\n state[i][j] = ('Q', 3)\n\n elif action == V_action:\n state[i][j] = ('I', 0)\n\n infect = []\n for i, row in enumerate(state):\n for j, value in enumerate(row):\n if value[0] == 'S':\n for k, l in self.get_neighbors(i, j):\n if state[k][l][0] == 'H':\n infect.append((k, l))\n\n for i, j in infect:\n state[i][j] = ('S', 4) # 4 since this is going to be demoted immediately\n\n for i, row in enumerate(state):\n for j, (value, days) in enumerate(row):\n if value in ['S', 'Q']:\n if days == 1:\n state[i][j] = ('H', 0)\n else:\n state[i][j] = (value, days - 1)\n\n return state_to_tuple(state)", "title": "" }, { "docid": "a2d2defb15a4080a1c22e131ad238d03", "score": "0.54844165", "text": "def calculate_gs(p):\n state_table = generate_state_table(p)\n H = make_Hamiltonian(p, state_table)\n w, v = scipy.sparse.linalg.eigsh(H, k=1, which='SA')\n\n return w[0], v[:, 0], state_table", "title": "" }, { "docid": "f90ab98973a5df59e1f8c07afdfbac95", "score": "0.5483582", "text": "def forward(observed_states):\n L = len(observed_states[0])\n matrix = [[] for i in range (0, L)]\n for i in range (0, L):\n for j in range(0, 4):\n if j == 0: state = \"coding\"\n if j == 1: state = \"regulatory\"\n if j == 2: state = \"hetero\"\n if j == 3: state = \"other\"\n emission = math.log10(E1[state][observed_states[0][i]]) + math.log10(E2[state][observed_states[1][i]])\n # emission = math.pow(10,emission)\n if i == 0:\n ## matrix[i][j] = P[state]\n # matrix[i].insert(j, emission * P[state])\n init = emission + math.log10(P[state])\n init = math.pow(10, init)\n matrix[i].insert(j, init)\n else:\n # print(\"first, \", E1[state][observed_states[0][i]], \"second, \", E2[state][observed_states[1][i]])\n summation = 0\n prevState = \"\"\n for k in range(0, 4):\n if k == 0: prevState = \"coding\"\n if k == 1: prevState = \"regulatory\"\n if k == 2: prevState = \"hetero\"\n if k == 3: prevState = \"other\"\n\n # summation += matrix[i - 1][k] * A[prevState][state]\n if matrix[i - 1][k] == 0 or A[prevState][state] == 0:\n summation += 0\n else:\n product = math.log10(matrix[i - 1][k]) + math.log10(A[prevState][state])\n product = math.pow(10, product)\n summation += product\n\n # matrix[i].insert(j, emission * summation)\n if summation == 0:\n entry = 0\n else:\n entry = emission + math.log10(summation)\n entry = math.pow(10, entry)\n matrix[i].insert(j, entry)\n\n likelihood = 0\n for i in range(0, 4):\n likelihood += matrix[L - 1][i]\n print(\"likelihood forward\", likelihood)\n\n return matrix", "title": "" }, { "docid": "301646394b9e378532d8b3ecb1e2853a", "score": "0.5477483", "text": "def creation(i,state_in):\n coef = np.sqrt(state_in[i]+1)\n state_out=state_in.copy()\n state_out[i] = state_out[i]+1\n return state_out,coef", "title": "" }, { "docid": "3580525e6888641ab93969a2a71b9259", "score": "0.5475777", "text": "def __call__(self, inputs, state, scope=None):\n with vs.variable_scope(scope or type(self).__name__): # \"BasicLSTMCell\"\n # Parameters of gates are concatenated into one multiply for efficiency.\n if self._state_is_tuple:\n # batch_size X num_cand * hidden_size, batch_size X num_cand * hidden_size\n c, h = state\n else:\n c, h = array_ops.split(1, 2, state)\n hidden_size = int(self._num_units / self._num_cand)\n # batch_size X num_cand X hidden_size\n h_flat = array_ops.reshape(h, [-1, self._num_cand, hidden_size])\n # batch_size X hidden_size\n h_sum = tf.reduce_mean(h_flat, reduction_indices=1)\n # batch_size * num_cand X hidden_size\n with vs.variable_scope('forget_gate'):\n W_f = tf.get_variable(\"W_f\", [hidden_size, hidden_size])\n U_f = tf.get_variable(\"U_f\", [hidden_size, hidden_size])\n b_f = tf.get_variable(\"b_f\", [hidden_size],\n initializer=tf.constant_initializer(1.0))\n f_x = tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_f)\n f_h = tf.reshape(tf.matmul(tf.reshape(h_flat,\n [-1, hidden_size]),\n U_f),\n [-1, 1, self._num_cand, hidden_size])\n f_x = tf.tile(tf.reshape(f_x,\n [-1, self._num_cand, 1, hidden_size]),\n [1, 1, self._num_cand, 1])\n # batch_size * num_cand * num_cand * hidden_size\n f_xh = sigmoid(f_x + f_h + b_f)\n # f_x = _linear(array_ops.reshape(inputs, [-1, hidden_size]),\n # hidden_size, True, 1.0, 'InputGate1')\n # batch_size * num_cand X hidden_size\n # f_h = array_ops.reshape(_linear(array_ops.reshape(h,\n # [-1, hidden_size]),\n # hidden_size, True, 1.0, 'InputGate2'),\n # [-1, self._num_cand, hidden_size])\n #\n # f_x = array_ops.reshape(array_ops.tile(f_x, [1, self._num_cand]),\n # [-1, self._num_cand, self._num_cand, hidden_size])\n # f_h = array_ops.reshape(array_ops.tile(f_h, [1, self._num_cand, 1]),\n # [-1, self._num_cand, self._num_cand, hidden_size])\n # f_xh = array_ops.transpose(f_x + f_h, [1, 0, 2, 3])\n\n with vs.variable_scope('update'):\n W_a = tf.get_variable(\"W_a\", [hidden_size, hidden_size * 3])\n U_a = tf.get_variable(\"U_a\", [hidden_size, hidden_size * 3])\n b_in = tf.get_variable(\"b_in\", [hidden_size])\n b_o = tf.get_variable(\"b_o\", [hidden_size])\n b_u = tf.get_variable(\"b_u\", [hidden_size])\n a_x = tf.matmul(tf.reshape(inputs, [-1, hidden_size]), W_a)\n a_h = tf.matmul(h_sum, U_a)\n i_x, o_x, u_x = tf.split(1, 3, a_x)\n i_h, o_h, u_h = tf.split(1, 3, a_h)\n # batch_size X num_cand X hidden_size\n i_xh = sigmoid(tf.reshape(i_x, [-1, self._num_cand, hidden_size]) \\\n + tf.reshape(i_h, [-1, 1, hidden_size]) + b_in)\n o_xh = sigmoid(tf.reshape(o_x, [-1, self._num_cand, hidden_size]) \\\n + tf.reshape(o_h, [-1, 1, hidden_size]) + b_o)\n u_xh = tanh(tf.reshape(u_x, [-1, self._num_cand, hidden_size]) \\\n + tf.reshape(u_h, [-1, 1, hidden_size]) + b_u)\n\n # o_x = _linear(array_ops.reshape(inputs, [-1, hidden_size]), 3 * hidden_size, True)\n # with vs.variable_scope('UpdateGates2'):\n # o_h = _linear(array_ops.reshape(h_sum, [-1, hidden_size]), 3 * hidden_size, True)\n # batch_size * num_cand * hidden_size\n # o = array_ops.reshape(o_x, [-1, self._num_cand, 3 * hidden_size]) \\\n # + array_ops.reshape(tf.tile(o_h, [1, self._num_cand]),\n # [-1, self._num_cand, 3 * hidden_size])\n # i_xh, o_xh, u_xh = array_ops.split(2, 3, o)\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\n new_c =tf.reduce_sum(\n array_ops.reshape(c,[-1, 1, self._num_cand, hidden_size])\n * f_xh, reduction_indices=2) + i_xh * u_xh\n new_h = tanh(new_c) * o_xh\n new_c = array_ops.reshape(new_c, [-1, hidden_size * self._num_cand])\n new_h = array_ops.reshape(new_h, [-1, hidden_size * self._num_cand])\n if self._state_is_tuple:\n new_state = LSTMStateTuple(new_c, new_h)\n else:\n new_state = array_ops.concat(1, [new_c, new_h])\n return new_h, new_state", "title": "" }, { "docid": "f177517e3d49689d4bcbb9f45fa70695", "score": "0.54749775", "text": "def forward(self, state):\n x = np.dot(state, self.w)\n # softmax\n x = np.exp(x)/sum(np.exp(x))\n return x", "title": "" }, { "docid": "b0ab2c7256a25ab59f166aee41eb5d66", "score": "0.5473587", "text": "def evolve(p, state_table, state, kind=\"list\", trotterised=False, xinit=False, xmeas=False):\n if kind == \"ket\":\n psi0 = state\n elif kind == \"list\":\n # if we parsed a product state, construct ket by identifying the\n # corresponding number of the basis state and putting a 1 into the ket\n psi0 = np.zeros((len(state_table), 1), dtype=complex)\n psi0[state_table.index(state_to_int(p, state))] = 1.\n elif kind == \"int\":\n psi0 = np.zeros((len(state_table), 1), dtype=complex)\n psi0[state_table.index(state)] = 1.\n\n if xinit:\n H_matrix = local_H_matrix(p, state_table)\n psi0 = H_matrix.dot(psi0)\n\n time = np.linspace(p['t_initial'], p['t_final'],\n int(p['t_final'] / p['dt'] + 1))\n\n # make dictionary with measurement operators\n meas = {}\n for i in range(int(p['N'])):\n meas['Zi' + ' Site ' + str(i)\n ] = Zi_matrix(p, i, state_table)\n meas['Yi' + ' Site ' + str(i)\n ] = Yi_matrix(p, i, state_table)\n meas['Xi' + ' Site ' + str(i)\n ] = Xi_matrix(p, i, state_table)\n\n sim = {}\n sim['Time'] = time\n for key in meas.keys():\n sim[key] = np.zeros(np.shape(time))\n sim['Total Z'] = np.zeros(np.shape(time))\n sim['Total Y'] = np.zeros(np.shape(time))\n sim['Total X'] = np.zeros(np.shape(time))\n\n if trotterised:\n H_list = make_trotter_Hamiltonian(p, state_table)\n else:\n H_list = [make_Hamiltonian(p, state_table)]\n\n # construct time-evolution operators for a single time-step\n U_list = [LA.expm(-1.j * H.tocsc() * p['dt']) for H in H_list]\n\n # Time Evolution\n for i in range(len(time)):\n # define initial (t=0) state\n if i == 0:\n psi = psi0\n\n # measurements\n for operator in meas.keys():\n expct = expct_val(meas[operator], psi) # [0][0]\n\n if np.imag(expct) < 1e-12:\n sim[operator][i] = np.real(expct)\n else:\n print(\"Imaginary Measurement %s\" % (operator))\n\n # apply U to current state psi to get psi(t+dt) = U * psi(t)\n for U in U_list:\n psi = U.dot(psi)\n\n for key in sim.keys():\n if key[:2] == \"Zi\":\n sim['Total Z'] += sim[key]\n elif key[:2] == \"Yi\":\n sim['Total Y'] += sim[key]\n elif key[:2] == \"Xi\":\n sim['Total X'] += sim[key]\n\n return sim, state_table", "title": "" }, { "docid": "040bb8c1c8154722b89b7726987bed2b", "score": "0.5473407", "text": "def anihilation(i,state_in):\n if not (state_in[i] == 0):\n coef = np.sqrt(state_in[i])\n state_out=state_in.copy()\n state_out[i]=state_out[i]-1\n stop = False\n return state_out,coef,stop\n else:\n #print('This state cant be lowered at', i,'!', )\n stop = True \n state_out= []\n coef=0\n return state_out,coef,stop", "title": "" }, { "docid": "12badc73be8c3bd67d63bf51657ec725", "score": "0.5472945", "text": "def H(state: SparseSim,\n qubit: int) -> None:\n\n stabs = state.stabs\n\n # Change the sign appropriately\n\n # X and Z -> -1\n # ----------\n stabs.signs_minus ^= stabs.col_x[qubit] & stabs.col_z[qubit]\n\n # Update Paulis\n # -------------------------------------------------------------------\n for g in state.gens:\n\n # Swap X and Z for rows\n xonly = g.col_x[qubit] - g.col_z[qubit]\n\n zonly = g.col_z[qubit] - g.col_x[qubit]\n\n for i in xonly:\n g.row_x[i].discard(qubit)\n g.row_z[i].add(qubit)\n\n for i in zonly:\n g.row_z[i].discard(qubit)\n g.row_x[i].add(qubit)\n\n # Swap X and Z for cols\n g.col_x[qubit], g.col_z[qubit] = g.col_z[qubit], g.col_x[qubit]", "title": "" }, { "docid": "fcea6487e36d1cecda3c5f71129f43eb", "score": "0.5468132", "text": "def getManhattanDistanceComponents(self, state):\n\t\treshaped_state = np.reshape(state, self.outdim)\n\t\t# np_state = np.array([[j for j in i.split('\\t')] for i in state.splitlines()])\n\t\tavatar = 1\n\t\t## Example: to find what ID a box would have, you'd just do ...index(\"box\"). This is the\n\t\t## Schaul function for figuring the sprite IDs.\n\t\tgoal = 2**(1+sorted(self._obstypes.keys())[::-1].index(\"goal\"))\n\t\tavatar_loc = None\n\t\tgoal_loc = None\n\t\tnumRows, numCols = self.outdim\n\t\tfor i in range(numRows): \n\t\t\tfor j in range(numCols):\n\t\t\t\tif (reshaped_state[i,j] / goal) % 2 == 1:\n\t\t\t\t\tgoal_loc = (i,j)\n\n\t\t\t\tif (reshaped_state[i,j]/ avatar) % 2 == 1:\n\t\t\t\t\tavatar_loc = (i,j)\n\n\t\treturn avatar_loc[0]-goal_loc[0], avatar_loc[1] - goal_loc[1]", "title": "" }, { "docid": "9a44f0b3553645e8f860d46854bc194f", "score": "0.545734", "text": "def tclab_step(self, state, time, action, dist):\n heater_pwm = action\n sensor_temp, heater_temp = state\n\n dth = (self.surf_area * self.beta1 * (self.k_d * dist) ** self.beta2 * (\n self.amb_temp - heater_temp) + self.emissivity * self.boltzmann\n * self.surf_area * (self.amb_temp ** 4 - heater_temp ** 4)\n + self.alpha * heater_pwm) / (self.mass * self.cp)\n dtc = (-sensor_temp + heater_temp) / self.tau_hc\n\n new_state = np.zeros(2)\n new_state[0] = dtc\n new_state[1] = dth\n return new_state", "title": "" }, { "docid": "6e51e3c8410985c94b5d12315ff7fb4d", "score": "0.54566973", "text": "def act(self, state):\n state = np.reshape(state, [-1, self.state_size])\n action = self.actor_local.model.predict(state)[0]\n # add some noise for exploration\n return list(action + self.noise.sample())\n return action", "title": "" }, { "docid": "37ef0a1dda4a5f208c9530c289563c2b", "score": "0.54542744", "text": "def get_action(self, state:np.ndarray)->int:\n x = self.basis.encode(state) # Computes the basis function representation of the state features\n p = self.get_action_probabilities(x) # computes the probabilities of each action\n a = int(np.random.choice(range(p.shape[0]), p=p, size=1)) # samples the action from p\n\n return a", "title": "" }, { "docid": "27bb87e54eb058ab5f0a4061306398be", "score": "0.5453973", "text": "def state_transform(state):\n return state.mean(2)[None, ...] / 256.0", "title": "" }, { "docid": "7428bdc2ef95a73f70b247fbf9a89c44", "score": "0.544846", "text": "def local_H_matrix(p, state_table):\n single_H = np.array([[1., 1.], [1., -1.]]) / np.sqrt(2)\n H_mat = np.eye(1)\n for i in range(p['N']):\n H_mat = np.kron(single_H, H_mat)\n return H_mat", "title": "" }, { "docid": "7fa34013afda72da1e3fc9477101db97", "score": "0.54361", "text": "def h1(state: State):\n delta = [state.board.end_state[i] - state.board.tiles[i] for i in state.board.tiles]\n return sum(x != 0 for x in delta)", "title": "" }, { "docid": "b1a41231b13a3b6866f87753f1e287da", "score": "0.54301536", "text": "def work(self):\r\n return self.m_act * self.eff_mech * (self.inlet.h - self.outlet.h)", "title": "" }, { "docid": "b5002d9c33efb936bd11ed68682a96be", "score": "0.5418533", "text": "def act(self, state, sess):\n probs = sess.run(self.a_prob, {self.states_St: np.expand_dims(state.flatten(), axis=0)})\n a_t = 1 * (np.random.rand() < probs)\n return a_t.squeeze()", "title": "" }, { "docid": "e4d3a28d5d011def3ab71de89c1198fb", "score": "0.5418465", "text": "def get_action(self, state):\n tens = torch.tensor(state, dtype = torch.float32).permute(2,0,1)\n logits, _ = self.net(tens)\n probs = torch.softmax(logits, dim=-1)\n probs = probs.squeeze()\n return np.random.choice(4, p = probs.detach().numpy())", "title": "" }, { "docid": "ef3e281d233f2a0acae83413984ffc6c", "score": "0.54177237", "text": "def airportMDP():\n # States -------------------------------------------\n # order of states goes: G_1, G_2, ... T_1, T_2,...\n # gate ordered from concourse A/B/C/D satellite N/S \n # -------------------------------------------\n S = 6 + 77;\n \n # Actions -------------------------------------------\n # actions: going within each concourse/satellite, \n # going to neighbouring concourse/satellite\n # action order: first action is to stay in concourse\n # subsequent actions are to move to neighbouring state\n # -------------------------------------------\n A = np.zeros(S);\n # concourse A\n A[0] = 3;\n # concourse B\n A[1] = 4;\n # concourse C\n A[2] = 4;\n # concourse D\n A[3] = 3;\n # satellite N\n A[4] = 3;\n # satellite S\n A[5] = 3;\n # the gate states T_i\n A[6:] = 1;\n# print (A)\n actionSize = int( max(A));\n # corresponding terminal states\n Terminals = np.array([14,12,12, 11,14,14]);\n # neighbouring states \n Neighbours = [[1,5], # A is connected to (B, S)\n [0,2,5], # B is connected to (A, C, S)\n [1,3,4], # C is connected to (B, D, N),\n [2,4], # D is connected to (C, N)\n [2,3], # N is connected to (C, D)\n [0,1]]; # S is connected to (A, B)\n \n # probability kernel -------------------------------------------\n P = np.zeros((S,S,actionSize));\n for s in range(S):\n if s <= 5: # state is a concourse state\n # action 0: state in concourse state\n P[s,s,0] = 0.5;\n termStart = int(6 + sum(Terminals[0:s]));\n termEnd = termStart + Terminals[s];\n P[termStart:termEnd, s, 0] = 0.5/Terminals[s];\n# print (\"state \", s );\n# print (P[:,s,0])\n # other actions: going to neighbouring concourses\n neighbourAction = 1;\n neighbours = len(Neighbours[s]) - 1;\n for desti in Neighbours[s]:\n P[desti, s, neighbourAction] = 0.9;\n for otherNeighB in Neighbours[s]:\n if otherNeighB != desti:\n P[otherNeighB, s, neighbourAction] = 0.1/neighbours;\n neighbourAction += 1;\n else: # state is a gate state\n P[s,s,0] = 1;\n \n # cost -------------------------------------------\n # l(y) = Cy + D, C: S x A, D : S x A\n C = np.zeros((S,actionSize));\n D = np.zeros((S,actionSize));\n # cost of non-existent actions is infinity \n for s in range(S):\n action = 0;\n # cost of feasible action\n while action < A[s]:\n if action == 0:\n if s <= 5: # cost of staying in concourse s\n C[s, action]= 50;\n else: # cost of staying in gate s\n C[s, action] = 0;\n D[s, action] = 1;\n else:\n # going to neighbouring states depends on how \n # many other planes are traversing the road\n C[s, action] = 1; \n # gas money of going else where\n D[s, action] = 3;\n action += 1;\n # cost of non-existent actions is infinity\n someInf = 10000; # apparently np.inf doesn't work\n while action < actionSize: \n C[s, action] = 0;\n D[s, action] = someInf;\n action +=1; \n return P, C, D, S, actionSize", "title": "" }, { "docid": "d36c2cd1d7924bb1a36c987ca0628d94", "score": "0.5408316", "text": "def get_action(self, state):\n\t\tq_vals = self._get_q_vals(state)\n\t\tnum = np.random.uniform()\n\n\t\tif num < self._epsilon:\n\t\t\treturn np.random.choice(self.k)\n\t\telse:\n\t\t\treturn np.argmax(q_vals)", "title": "" }, { "docid": "567e3d4ae89622d60a370fdd7c747826", "score": "0.5407528", "text": "def solve(t, state):\n \n p= state[0]; q= state[1]; r= state[2]; vA= state[3]; vB= state[4]\n pd= state[5]; qd= state[6]; rd= state[7]; vAd= state[8]; vBd= state[9]\n wA= state[10]; wB= state[11] \n \n bA= cross(wA, IA.dot(wA))-mA*cross(g*z+cross(wA, cross(wA, p)), p)-mB*cross(g*z+cross(wA, cross(wA, q))+cross(wB, cross(wB, r)), q)\n bB= cross(wB, IB.dot(wB))-mB*cross(cross(wA, cross(wA, q))+cross(wB, cross(wB, r))+g*z, r)\n b= array([bA.dot(x), bA.dot(y), bA.dot(z), bB.dot(x), bB.dot(y), bB.dot(z)])\n \n px, py, pz= p; qx, qy, qz= q; rx, ry, rz= r\n A= zeros((6,6)) # A will be the coefficient matrix for [A] alphas = b\n A[0]= [-mA*(pz**2+py**2)-mB*qy**2-IA[0][0], mA*px*py+mB*qx*qy-IA[0][1], mA*px*pz+mB*qx*qz-IA[0][2], -mB*(rz*qz+ry*qy), mB*rx*qy, mB*rx*qz]\n A[1]= [mA*py*px+mB*qy*qx-IA[1][0], -mA*(px**2+pz**2)-mB*(qx**2+qz**2)-IA[1][1], mA*py*pz+mB*qy*qz-IA[1][2], mB*ry*qx, -mB*(rx*qx+rz*qz), mB*ry*qz]\n A[2]= [mA*pz*px+mB*qz*qx-IA[2][0], mA*pz*py+mB*qz*qy-IA[2][1], -mA*(py**2+px**2)-mB*(qy**2+qx**2)-IA[2][2], mB*rz*qx, mB*rz*qy, -mB*(ry*qy+rx*qx)]\n \n A[3]= [-mB*(qz*rz+qy*ry), mB*qx*ry, mB*qx*rz, -mB*(rz**2+ry**2)-IB[0][0], mB*rx*ry-IB[0][1], mB*rx*rz-IB[0][2]]\n A[4]= [mB*qy*rx, -mB*(qx*rx+qz*rz), mB*qy*rz, mB*ry*rx-IB[1][0], -mB*(rx**2+rz**2)-IB[1][1], mB*ry*rz-IB[1][2]]\n A[5]= [mB*qz*rx, mB*qz*ry, -mB*(qy*ry+qx*rx), mB*rz*rx-IB[2][0], mB*rz*ry-IB[2][1], -mB*(ry**2+rx**2)-IB[2][2]]\n \n alphas= linalg.solve(A, b)\n aA= alphas[:3] # alpha_A\n aB= alphas[3:]\n \n pdd= cross(aA, p) + cross(wA, cross(wA, p))\n qdd= cross(aA, q) + cross(wA, cross(wA, q))\n vAdd= cross(aA, vA) + cross(wA, cross(wA, vA))\n rdd= qdd + cross(aB, r) + cross(wB, cross(wB, r))\n vBdd= qdd + cross(aB, vB) + cross(wB, cross(wB, vB))\n \n return [pd, qd, rd, vAd, vBd, pdd, qdd, rdd, vAdd, vBdd, aA, aB]", "title": "" }, { "docid": "5750ab4c9a8c8bcb4ab0b7b21e9c4d5f", "score": "0.53902906", "text": "def inv(self, state):\n return self.lmult_inv(state, np.eye(state.mom.shape[0]))", "title": "" }, { "docid": "eb6c9ef2583b039f485472352eb1949b", "score": "0.5387148", "text": "def construct_HH_model(n: int, m: int, name: str = None):\n\n if n < 2 or m < 2:\n raise Exception()\n\n if name is None:\n name = f\"HH_{n}_{m}\"\n\n labels = []\n for i in range(n):\n for j in range(m):\n if i == 0:\n if j == 0:\n label = 'O'\n else:\n label = f\"C{j}\"\n elif j == 0:\n label = f\"I{i}\"\n else:\n label = f\"I{i}C{j}\"\n labels.append(label)\n\n mc = MarkovChain(name=name)\n\n for label in labels:\n if label == 'O':\n mc.add_state(label, open_state=True)\n else:\n mc.add_state(label)\n\n labels = np.array(labels, dtype=object).reshape((n, m))\n\n # Add inactivation transitions\n for i in range(n):\n for j in range(m):\n if i < n - 1:\n mc.add_both_transitions(labels[i, j], labels[i + 1, j], sp.sympify(f\"{n-i-1} * b_o\"),\n sp.sympify(f\"{i+1}*a_o\"))\n if j < m - 1:\n mc.add_both_transitions(labels[i, j], labels[i, j + 1], sp.sympify(f\"{m-j-1} * b_i\"),\n sp.sympify(f\"{j+1}*a_i\"))\n return mc", "title": "" }, { "docid": "27f0cc2d130916f5b4ecfaf02fb090c6", "score": "0.53861105", "text": "def get_action(self, state):\n # starting point of algorithm\n if (self.q[state, :] == 0).all():\n return np.random.choice(self.actions)\n # Epsilon-Greedy\n if np.random.random() < self.epsilon:\n return np.random.choice(self.actions)\n else:\n return np.argmax(self.q[state, :])", "title": "" }, { "docid": "92488b296c4f3842f0123702d96a0021", "score": "0.53851795", "text": "def random_HS_state(d):\n rho = np.random.normal(0, 1, size=(d, d)) + np.random.normal(0, 1, size=(d, d))*1j\n rho = rho @ rho.conj().T\n rho = rho / np.trace(rho)\n return rho", "title": "" } ]
ab02758ba0c11476d13c5cbb34b4f7ee
Update the mapping (nodal2global and elements2global)
[ { "docid": "63e31e5664150100ebe332549c1fd359", "score": "0.6652489", "text": "def update_mapping(self, fields, nodeids, connectivity, dofs_by_element, callbacks, callbackargs, **kwargs):\n self._set_standard_mapping(fields, nodeids, connectivity, dofs_by_element, callbacks, callbackargs, **kwargs)", "title": "" } ]
[ { "docid": "c51c89163158e8315eca9b877ad3d659", "score": "0.6116114", "text": "def _update_global_vars(self):\n\n pass", "title": "" }, { "docid": "88cec46d1e0d250965becc0faf411415", "score": "0.60464275", "text": "def _update_mappings(self):\n self.existing_cost_entry_map.update(self.processed_report.cost_entries)\n self.existing_product_map.update(self.processed_report.products)\n self.existing_pricing_map.update(self.processed_report.pricing)\n self.existing_reservation_map.update(self.processed_report.reservations)\n\n self.processed_report.remove_processed_rows()", "title": "" }, { "docid": "887980315c5f8b7230278e395e40877e", "score": "0.58705133", "text": "def _set_standard_mapping(self, fields, nodeids, connectivity, dofs_by_element, callbacks, callbackargs, **kwargs):\n raise NotImplementedError('The _set_standard_mapping method must be implemented in subclasses')", "title": "" }, { "docid": "2fb4c3b4b6babce1477387971704cf90", "score": "0.58472997", "text": "def setChampIdMap():\n\tglobal champIdMap\n\tchampIdMap = getChampMap()\n\tprint 'map updated'", "title": "" }, { "docid": "3a8db93bc1a0d7e458dc1f1eace9b2a9", "score": "0.5842898", "text": "def set_global_attributes(self):\n\n for key, value in sorted(self._attributes['global'].items()):\n self._nc.setncattr(key, value)", "title": "" }, { "docid": "846dea85c48ad26d54fcea24ee8b60de", "score": "0.57833976", "text": "def update_global(data):\n for key, value in data.items():\n setattr(g, key, value)", "title": "" }, { "docid": "f9c06b2ac88442709ad6f5f9227b1144", "score": "0.5764773", "text": "def global_update(self):\n pass", "title": "" }, { "docid": "43f25ee6e442b9bef343c0315349fc4e", "score": "0.5733424", "text": "def transform_mappings(self):\n self.entity2idx = {v: k for k, v in enumerate(self.all_entities)}\n self.idx2entity = {v: k for k, v in self.entity2idx.items()}\n self.relation2idx = {v: k for k, v in enumerate(self.all_relations)}\n self.idx2relation = {v: k for k, v in self.relation2idx.items()}", "title": "" }, { "docid": "78292a087bbb8b537e09cc2a7669695c", "score": "0.56884396", "text": "def update(self, mapping: dict):\n self.__d.update(mapping)", "title": "" }, { "docid": "2aba0deaf061ccb789440dde516ca413", "score": "0.5644098", "text": "def make_map(self):", "title": "" }, { "docid": "d3fc7375f9195c3e686d7864f61b8109", "score": "0.56298625", "text": "def update2(self, ir, can, nheav):\n self.ncan = len(self.cans)\n if can not in self.cans:\n #print '++', can #, '\\n\\n'\n self.maps.append( [ir, self.ncan, 0] )\n #self.iokgs.append( T )\n self.cans.append( can )\n self.nsheav.append( nheav )\n self.ncan += 1\n else:\n ican = self.cans.index( can )\n entry = [ir, ican, 0]\n if entry not in self.maps:\n self.maps.append( entry )\n #self.iokgs.append( T )\n #print(' -- maps = ', self.maps)", "title": "" }, { "docid": "c8159952b3a5e728d4b04e2337280cab", "score": "0.562656", "text": "def add_to_map(self):\n pass", "title": "" }, { "docid": "c8159952b3a5e728d4b04e2337280cab", "score": "0.562656", "text": "def add_to_map(self):\n pass", "title": "" }, { "docid": "3f27649842dce2a80494688cd03d0570", "score": "0.5603346", "text": "def update_discovery_map(self) -> int:\n raise NotImplementedError() # pragma no cover", "title": "" }, { "docid": "525f1b847e4f220709f7716eaacd4922", "score": "0.56023455", "text": "def _clearNodeMap(self):\r\n \r\n self._py_nodes = {}", "title": "" }, { "docid": "fa8c3505ebf715c0d25393cca664a280", "score": "0.5580438", "text": "def reload_mapping():\r\n mod = __import__(module_name)\r\n mapping = getattr(mod, mapping_name, None)\r\n if mapping:\r\n self.fvars = mod.__dict__\r\n self.mapping = mapping", "title": "" }, { "docid": "d36ee12d1838e6f9672c04a6e5c52c11", "score": "0.55775356", "text": "def update_map(self, map_form):\n pass", "title": "" }, { "docid": "6725b8ec2df9a1ed6551f7b4663809ab", "score": "0.5571848", "text": "def static_information_update(microgrid, info, logger):\n\tmicrogrid = deepcopy(microgrid)\n\tmicrogrid[\"AREA\"] = info.AREA\n\tmicrogrid[\"TIME_STAMP\"] = info.TIME_STAMP\n\t# Update the utility grid group\n\tfor i in updated_attributes_static_ac_generator:\n\t\tif i in microgrid[\"UG\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.dg[default_sequence[\"UG\"]], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"UG\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"UG\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of UG has been changed!\")\n\n\tfor i in updated_attributes_static_ac_generator:\n\t\tif i in microgrid[\"DG\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.dg[default_sequence[\"DG\"]], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"DG\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"DG\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of DG has been changed!\")\n\t# Update the energy storage system group\n\tfor i in updated_attributes_static_ess:\n\t\tif i in microgrid[\"ESS\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.ess[0], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"ESS\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"ESS\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of ESS has been changed!\")\n\n\t# Update the photovoltaic generator grid group\n\tfor i in updated_attributes_static_res_generator:\n\t\tif i in microgrid[\"PV\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.pv[0], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"PV\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"PV\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of PV has been changed!\")\n\n\tfor i in updated_attributes_static_res_generator:\n\t\tif i in microgrid[\"WP\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.wp[0], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"WP\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"WP\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of WP has been changed!\")\n\n\t# Update the critical AC load group\n\tfor i in updated_attributes_static_ac_load:\n\t\tif i in microgrid[\"Load_ac\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.load_ac[default_sequence[\"CRI\"]], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"Load_ac\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"Load_ac\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of Load_ac has been changed!\")\n\n\t# Update the non-critical AC load group\n\tfor i in updated_attributes_static_ac_load:\n\t\tif i in microgrid[\"Load_nac\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.load_ac[default_sequence[\"NON_CRI\"]], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"Load_nac\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"Load_nac\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of Load_nac has been changed!\")\n\n\t# Update the critical DC load group\n\tfor i in updated_attributes_static_dc_load:\n\t\tif i in microgrid[\"Load_dc\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.load_dc[default_sequence[\"CRI\"]], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"Load_dc\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"Load_dc\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of Load_dc has been changed!\")\n\n\t# Update the non-critical DC load group\n\tfor i in updated_attributes_static_dc_load:\n\t\tif i in microgrid[\"Load_nac\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.load_dc[default_sequence[\"NON_CRI\"]], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"Load_nac\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"Load_nac\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of Load_nac has been changed!\")\n\n\t# Update the bi-directional convertor group\n\tfor i in updated_attributes_static_bic:\n\t\tif i in microgrid[\"BIC\"]: # Update the attribute value of given attributes list\n\t\t\ttemp = getattr(info.bic[0], i, 0)\n\t\t\tif type(temp) is float or type(temp) is int:\n\t\t\t\tmicrogrid[\"BIC\"][i] = temp\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\tmicrogrid[\"BIC\"][i] = temp._values\n\t\t\t\texcept:\n\t\t\t\t\tlogger.warning(\"The protocol buffer model of BIC has been changed!\")\n\t# Return result\n\treturn microgrid", "title": "" }, { "docid": "3771ab371f6ff2cbc260a23006f622fb", "score": "0.55485487", "text": "def global_update(nodes, rank, network, weights_list):\n\n for j, parameter in enumerate(network.get_parameters()):\n if rank != 0:\n dist.gather(tensor=network.get_parameters()[parameter].data, gather_list=[], dst=0, group=nodes)\n else:\n dist.gather(tensor=network.get_parameters()[parameter].data, gather_list=weights_list[j], dst=0, group=nodes)\n network.get_parameters()[parameter].data = torch.mean(torch.stack(weights_list[j][1:]), dim=0)\n dist.broadcast(network.get_parameters()[parameter], 0, group=nodes)", "title": "" }, { "docid": "dbb69a5a9de7b9737a8138e82ff47d96", "score": "0.55434436", "text": "def initialize_map(self):\n with open(self.mapping_file, 'r') as mapping_f:\n for line in mapping_f:\n freebase_type, figer_type = line.rstrip('\\n').split('\\t')\n freebase_type = freebase_type.replace('/', '', 1).replace('/', '.')\n figer_type = figer_type.replace('/', '', 1).replace('/', '.')\n self.freebase2figer_map[freebase_type] = figer_type", "title": "" }, { "docid": "2d35445a2d9758f01dea1f947037a5bc", "score": "0.55419", "text": "def _assign_variables(self) -> None:\n mdg = self.mdg\n for g, d in mdg.subdomains(return_data=True):\n d[pp.PRIMARY_VARIABLES] = {self.displacement_variable: {\"cells\": self.nd}}", "title": "" }, { "docid": "5d53dc62ae65b8deb84b568b1e5ded94", "score": "0.55303204", "text": "def _update_map(self, xi, yi, resid):\n # Do nothing if doesn't satisfy condition\n if abs(resid) > self.error_term:\n return\n \n # Decide whether to store in map\n store_in_map = False\n if (xi not in self.xi2yi) and (yi not in self.yi2xi):\n # no competition, store in map\n store_in_map = True\n elif (xi in self.xi2yi) and (yi not in self.yi2xi):\n if abs(resid) < abs(self.xi2resid[xi]):\n # This is a better fit than the stored one\n # Break the stored link\n self.yi2resid.pop(self.xi2yi[xi])\n self.yi2xi.pop(self.xi2yi[xi])\n store_in_map = True\n elif (xi not in self.xi2yi) and (yi in self.yi2xi):\n if abs(resid) < abs(self.yi2resid[yi]):\n # This is a better fit than the stored one\n # Break the stored link\n self.xi2resid.pop(self.yi2xi[yi])\n self.xi2yi.pop(self.yi2xi[yi])\n store_in_map = True\n elif self.xi2yi[xi] == yi and self.yi2xi[yi] == xi:\n # this exact link is already stored\n pass\n else:\n # Both are already in the map, and they are out of sync\n raise ValueError(\"map out of sync\")\n \n # Now actually update the dicts\n if store_in_map:\n self.xi2yi[xi] = yi\n self.xi2resid[xi] = resid \n self.yi2xi[yi] = xi\n self.yi2resid[yi] = resid", "title": "" }, { "docid": "661f6c4300039aa8eda144bd5e151fa8", "score": "0.55198646", "text": "def _assemble_global_K(self):\n elements = self._model.elements\n # Generate Node Freedom Al[location Table and total number of\n # active DOFs of the system\n self.__generate_node_freedom_allocation_dict__()\n # Generate Node Freedom Map dictionary\n self.__generate_node_freedom_map_dict__()\n # number of dof per node\n n_dof = self.n_active_dof\n # Initialize the global stiffness matrix\n K = np.zeros([n_dof, n_dof], dtype=np.float64)\n # Fill the global stiffness matrix\n for n_elem, element in elements.items():\n # Get nodes of the respective element\n nodes_elem = element._nodal_connectivity\n # Assemble element stiffness matrix\n element.assemble_Ke()\n # List to store the global system indices\n g_i = []\n # List of indices of used element DOFs\n g_e = []\n # For each node in the current element\n for n_node_e, node in nodes_elem.items():\n # Get Element Freedom Signature\n efs = element.efs[n_node_e]\n # Number of active DOF in the node of the element\n active_dof = np.sum((efs >= 1))\n if active_dof > 0:\n # Get value of th Node Freedom Assign Table for the\n # current node\n nfat_node = self.nfmt[node.number]\n # Get NFS of the node in the element\n enfs_node = element.enfmt[n_node_e]\n # for the total of used DOF in the node\n # FIXME!!\n index_base = element.get_node_active_dof(n_node_e)\n active_nodes = nfat_node + index_base\n # Extend the list\n g_i.extend(active_nodes)\n #\n index_base_e = element.get_element_active_dof(n_node_e)\n active_nodes_e = enfs_node + index_base_e\n g_e.extend(active_nodes_e)\n\n # Convert list to numpy array in order to broadcast more\n # easily to the global stiffness matrix\n g_i = np.array(g_i)\n g_e = np.array(g_e)\n # Add the contributions to the respective DOFs in global system\n K[g_i[:, None], g_i] += element._Ke[g_e[:, None], g_e]\n\n # Generate sparse matrix\n K_s = sparse.csr_matrix(K)\n\n return K_s", "title": "" }, { "docid": "fdcc207c0f1fd714e5789ca4b0911882", "score": "0.55162865", "text": "def after_map(self, map):", "title": "" }, { "docid": "8f3c39c304d71f3689fd04d5279f1067", "score": "0.5508919", "text": "def save_mappings(self):\n self.mappings0 = self.mappings.copy()", "title": "" }, { "docid": "d85815918c97312e8e00330959ed4817", "score": "0.549593", "text": "def _map_personalities(self):\n self.personality_map = {p: n for n in self.graph.nodes() for p in self.personalities[n]}", "title": "" }, { "docid": "90b6bfd0d29be0248cec16c04c6d857e", "score": "0.54929286", "text": "def merge_info(self):\n global global_path\n global global_known_p\n global_path = global_path + (list(set(self.path) - set(global_path))) # removes duplicates\n self.path = self.path + (list(set(global_path) - set(self.path))) # removes duplicates\n\n # haalt uit global_known_p eerst weg wat in self.known_p_removed zit\n l3 = [x for x in global_known_p if\n x not in self.known_p_removed]\n global_known_p = l3\n # merge dan alles\n global_known_p = global_known_p + list(set(self.known_p) - set(global_known_p))\n self.known_p = global_known_p", "title": "" }, { "docid": "322572407dc2b24401d5df388e98444f", "score": "0.5489253", "text": "def update(self, other):\n self._map.update(other._map)", "title": "" }, { "docid": "4572dbb10ca1ad112faaaea4b4287478", "score": "0.5486843", "text": "def put_map(self):\n self._check(pn_data_put_map(self._data))", "title": "" }, { "docid": "705b5c810ad096d8f1ede78987610aaf", "score": "0.54607624", "text": "def _set_netcdf_grid_mapping_variable(root, grid_mapping):\n name = grid_mapping.pop(\"name\")\n var = root.createVariable(name, \"S1\", dimensions=())\n for attr in grid_mapping.keys():\n setattr(var, attr, grid_mapping[attr])", "title": "" }, { "docid": "d7b711d9f1af1d606fedb2315d4e5084", "score": "0.54587203", "text": "def update(self, mapping):\r\n self.frames[0].update(mapping)", "title": "" }, { "docid": "d7b711d9f1af1d606fedb2315d4e5084", "score": "0.54587203", "text": "def update(self, mapping):\r\n self.frames[0].update(mapping)", "title": "" }, { "docid": "6bdc724b59cce3567d8b30e7fee1a1b6", "score": "0.54581684", "text": "def mapFromGlobal(self, obj, subdev=None):\n tr = self.inverseGlobalTransform(subdev)\n if tr is not None:\n return self._mapTransform(obj, tr)\n \n ## If our transformation is nonlinear, then the local mapping step must be done separately.\n subdev = self._subdevDict(subdev)\n parent = self.parentDevice()\n if parent is None:\n obj = parent.mapFromGlobal(obj, subdev)\n return self.mapFromParent(obj, subdev)", "title": "" }, { "docid": "0ed3d6a8f3da4fa828a696e4ec58f0a2", "score": "0.5453379", "text": "def update_discovery_map(self) -> int:\n ret = self.generate_discovery_map()\n if ret != 0:\n return ret\n if self.is_generated_map_different():\n ret = self.commit_discovery_map()\n return ret", "title": "" }, { "docid": "a25a11ca0c74964ec3c3ee9738ba5d0d", "score": "0.54403746", "text": "def new_feature_map(self, device):\n raise NotImplementedError()", "title": "" }, { "docid": "9eabc2c0e2a4b903d413c0145a101e6f", "score": "0.54342556", "text": "def _updateKnowledge(self):\n \n # updating known locations\n locations = set(self.locations)\n for loc in self.conn.keys():\n locations.add(loc)\n locations.union(self.conn[loc])\n self.locations = list(locations)\n \n \n # updating hawkweed info at locs\n for loc in self.locations:\n if loc not in self.hawkweed.keys():\n self.hawkweed[loc] = 0.0\n \n # updating _conn to reflect bi-directional paths\n temp = dict(self.conn)\n for loc in self.conn.keys():\n for node in self.conn[loc]:\n if node not in self.conn.keys():\n temp[node] = set()\n temp[node].add(loc)\n self.conn = dict(temp)", "title": "" }, { "docid": "9f5cbefcad8555ef27748045d9677b80", "score": "0.54269683", "text": "def _update_index_mapper(self):\n\n x = self.x\n x2 = self.x2\n y = self.y\n y2 = self.y2\n\n if \"left\" in self.origin:\n x_low = x\n x_high = x2\n else:\n x_low = x2\n x_high = x\n\n if \"bottom\" in self.origin:\n y_low = y\n y_high = y2\n else:\n y_low = y2\n y_high = y\n\n if self.index_mapper is not None:\n if self.orientation == 'h':\n self.index_mapper.screen_bounds = (x_low, x_high, y_low, y_high)\n else:\n self.index_mapper.screen_bounds = (y_low, y_high, x_low, x_high)\n self.index_mapper_changed = True\n self.invalidate_draw()", "title": "" }, { "docid": "08e87c978cd446bc374cf9c46632f1c7", "score": "0.54263204", "text": "def _update_proximity(self):\n pass", "title": "" }, { "docid": "ad5ba8913105520e210e98b12d64e6fd", "score": "0.5416478", "text": "def update_persistent_graph():\n from goldstone.cinder.utils import update_nodes as update_cinder_nodes\n from goldstone.glance.utils import update_nodes as update_glance_nodes\n from goldstone.keystone.utils import update_nodes as update_keystone_nodes\n from goldstone.nova.utils import update_nodes as update_nova_nodes\n\n update_cinder_nodes()\n update_glance_nodes()\n update_keystone_nodes()\n update_nova_nodes()", "title": "" }, { "docid": "199ae760ba6cc1f3ee02b038c85933b3", "score": "0.541002", "text": "def UpdateMapLabel(self):\n\n vector = self.instruction.FindInstructionByType('vector')\n if vector:\n vectorId = vector.id \n else:\n vectorId = None\n\n raster = self.instruction.FindInstructionByType('raster')\n if raster:\n rasterId = raster.id \n else:\n rasterId = None\n\n rasterName = 'None'\n if rasterId:\n rasterName = self.instruction[rasterId]['raster'].split('@')[0]\n \n self.itemLabels['map'] = self.itemLabels['map'][0:1]\n self.itemLabels['map'].append(\"raster: \" + rasterName)\n if vectorId: \n for map in self.instruction[vectorId]['list']:\n self.itemLabels['map'].append('vector: ' + map[0].split('@')[0])", "title": "" }, { "docid": "b2a0681e33180385a2cec2e16ed3d2f0", "score": "0.5409486", "text": "def global_update(self):\n grads = []\n for uid in range(self.client_num):\n w = self.weight[uid] / sum(self.weight)\n grads.append(self.client_update(uid) * w)\n torch.cuda.empty_cache()\n\n grads = sum(grads)\n self.server_model.update(grads)\n self.broad_cast()", "title": "" }, { "docid": "78a21b9e451b5095790bca1e2c82c065", "score": "0.5400456", "text": "def read_mappings(self):\n self.entity2idx = {v: k for k, v in enumerate(self.read_entities())} ##\n self.idx2entity = {v: k for k, v in self.entity2idx.items()}\n self.relation2idx = {v: k for k, v in enumerate(self.read_relations())} ##\n self.idx2relation = {v: k for k, v in self.relation2idx.items()}", "title": "" }, { "docid": "f04a54897a2283bad6bea1a4289bb02d", "score": "0.5397055", "text": "def _reset_map(self):\n # reset obstacle msg id\n self.last_id = -1\n # first reset map\n if self.manager is not None:\n # create clean slate\n self.manager.map = deepcopy(self.manager.original_map)\n self.manager.neighbourhood = deepcopy(self.manager.original_neighbourhood)\n self.manager.message_by_lanelet = deepcopy(self.manager.original_message_by_lanelet)", "title": "" }, { "docid": "9edd11197f00cf60ac1aa9654896454a", "score": "0.53909904", "text": "def update_map_info(self, map):\n possible_lanes = ray_localization(self.heading, self.spawn_place, self.engine, return_all_result=True)\n possible_lane_indexes = [lane_index for lane, lane_index, dist in possible_lanes]\n try:\n idx = possible_lane_indexes.index(self.config[\"spawn_lane_index\"])\n except ValueError:\n lane, new_l_index = possible_lanes[0][:-1]\n else:\n lane, new_l_index = possible_lanes[idx][:-1]\n dest = self.config[\"destination_node\"]\n self.navigation.update(\n map,\n current_lane_index=new_l_index,\n final_road_node=dest if dest is not None else None,\n random_seed=self.engine.global_random_seed\n )\n assert lane is not None, \"spawn place is not on road!\"\n self.navigation.update_localization(self)\n self.lane_index = new_l_index\n self.lane = lane", "title": "" }, { "docid": "e9205b466dcb1a23c7c5198375ed36fa", "score": "0.5385583", "text": "def redraw_map_cmd(self):\n FillMapWithNodes(self).redraw_map()", "title": "" }, { "docid": "0ecff58947fe052db9e4f711f4423fb7", "score": "0.5373781", "text": "def _update_geospatial_global_attributes(self):\n \n lat_var_name = self.sensor_def_exists('drv_interp_m_gps_lat')\n lon_var_name = self.sensor_def_exists('drv_interp_m_gps_lon')\n if not lat_var_name or not lon_var_name:\n self._logger.warning('Skipping set global geospatial_lat/lon attributes')\n return\n else:\n \n if lat_var_name in self._nc.variables and lon_var_name in self._nc.variables:\n min_lat = self._nc.variables[lat_var_name][:].min()\n max_lat = self._nc.variables[lat_var_name][:].max()\n min_lon = self._nc.variables[lon_var_name][:].min()\n max_lon = self._nc.variables[lon_var_name][:].max()\n \n # Create polygon WKT and set geospatial_bounds\n coords = ((max_lat, min_lon),\n (max_lat, max_lon),\n (min_lat, max_lon),\n (min_lat, min_lon),\n (max_lat, min_lon))\n polygon = Polygon(coords)\n polygon_wkt = polygon.wkt\n else:\n min_lat = np.nan\n max_lat = np.nan\n min_lon = np.nan\n max_lon = np.nan\n polygon_wkt = u'POLYGON EMPTY'\n \n # Set the global attributes\n self._nc.setncattr('geospatial_lat_min', min_lat)\n self._nc.setncattr('geospatial_lat_max', max_lat)\n self._nc.setncattr('geospatial_lon_min', min_lon)\n self._nc.setncattr('geospatial_lon_max', max_lon)\n self._nc.setncattr('geospatial_bounds', polygon_wkt)\n \n depth_var_name = self.sensor_def_exists('drv_depth')\n if not depth_var_name:\n self._logger.warning('Skipping set global geospatial_vertical attributes')\n else:\n if depth_var_name in self._nc.variables:\n min_depth = self._nc.variables[depth_var_name][:].min()\n max_depth = self._nc.variables[depth_var_name][:].max()\n else:\n min_depth = np.nan\n max_depth = np.nan\n \n self._nc.setncattr('geospatial_vertical_min', min_depth)\n self._nc.setncattr('geospatial_vertical_max', max_depth)", "title": "" }, { "docid": "4b3cf55810f540625233cf7cdb28493f", "score": "0.53670615", "text": "def recreate_map(self):\n self.create_map()\n for item in self.saved_positions.items():\n print(item[1][-1])\n self.update_position(item[1][-1])\n self.draw_historic_path(device_id=item[1][-1]['device_id'],last=20)\n m.draw_map()", "title": "" }, { "docid": "6caa37af69f31dd753f3a30e8d31947f", "score": "0.5366982", "text": "def before_map(self, map):", "title": "" }, { "docid": "6331a53da197340c1ef29e48dcbdc1a8", "score": "0.53653723", "text": "def register_for_changed_maps(self):\n pass", "title": "" }, { "docid": "db25b8c4877968567f636861d0781a55", "score": "0.5341022", "text": "def svf_piomap(self, mapping):", "title": "" }, { "docid": "2375f9a15a5b9effe719648bc7f1b938", "score": "0.5339042", "text": "def reset_idxes(G):\n mapping = {}\n for new_idx, old_idx in enumerate(G.nodes()):\n mapping[old_idx] = new_idx\n new_G = nx.relabel_nodes(G, mapping, copy=True)\n return new_G, mapping", "title": "" }, { "docid": "c3a2dd5c3327330e96610f0451f0f9f0", "score": "0.5338325", "text": "def update(self, other):\n return self.__map.update(other)", "title": "" }, { "docid": "74deb448e370242c1cd9caefc50a7b14", "score": "0.5315969", "text": "def _resort_mapper( self ):\n for s in self._symbol_mapper[ \"node\" ]:\n for k, l in self._symbol_mapper[ \"node\" ][ s ].iteritems( ):\n self._symbol_mapper[ \"node\" ][ s ][ k ] = sorted( set( l ) )", "title": "" }, { "docid": "1a21597549e88e25210badea5acdaa13", "score": "0.5310865", "text": "def set_map(self, M):\n self._reset(self)\n self.start = None\n self.goal = None\n # TODO: Set map to new value.\n self.map=M\n\n def set_start(self, start):\n \"\"\"Method used to set start attribute \"\"\"\n self._reset(self)\n # TODO: Set start value. Remember to remove goal, closedSet, openSet, cameFrom, gScore, fScore,\n # and path attributes' values.\n self.start=start\n\n\n def set_goal(self, goal):\n \"\"\"Method used to set goal attribute \"\"\"\n self._reset(self)\n # TODO: Set goal value.\n self.goal=goal\n\n def get_current_node(self):\n \"\"\" Returns the node in the open set with the lowest value of f(node).\"\"\"\n # TODO: Return the node in the open set with the lowest value of f(node).\n cur={}\n for i in self.openSet:\n cur[i]=self.fScore[i]\n Min=min(cur.items(), key = lambda x: x[1])\n return Min[0]\n\n\n def get_neighbors(self, node):\n \"\"\"Returns the neighbors of a node\"\"\"\n # TODO: Return the neighbors of a node\n return self.map.roads[node]\n\n def get_gScore(self, node):\n \"\"\"Returns the g Score of a node\"\"\"\n # TODO: Return the g Score of a node\n return self.gScore[node]\n\n\n def get_tenative_gScore(self, current, neighbor):\n \"\"\"Returns the tenative g Score of a node\"\"\"\n # TODO: Return the g Score of the current node\n # plus distance from the current node to it's neighbors\n return self.get_gScore(current) + self.distance(current,neighbor)\n\n def is_open_empty(self):\n \"\"\"returns True if the open set is empty. False otherwise. \"\"\"\n # TODO: Return True if the open set is empty. False otherwise.\n if not self.openSet:\n return True\n\n def distance(self, node_1, node_2):\n \"\"\" Computes the Euclidean L2 Distance\"\"\"\n # TODO: Compute and return the Euclidean L2 Distance\n n1_x=self.map.intersections[node_1][0]\n n1_y=self.map.intersections[node_1][1]\n n2_x=self.map.intersections[node_2][0]\n n2_y=self.map.intersections[node_1][1]\n return math.sqrt(pow(n1_x - n2_x, 2)+pow(n1_y - n2_y, 2))\n\n def heuristic_cost_estimate(self, node):\n \"\"\" Returns the heuristic cost estimate of a node \"\"\"\n # TODO: Return the heuristic cost estimate of a node\n end = self.goal\n return self.distance(end, node)\n\n def calculate_fscore(self, node):\n \"\"\"Calculate the f score of a node. \"\"\"\n # TODO: Calculate and returns the f score of a node.\n # REMEMBER F = G + H\n return self.get_gScore(node)+self.heuristic_cost_estimate(node)\n\n\n def record_best_path_to(self, current, neighbor):\n \"\"\"Record the best path to a node \"\"\"\n # TODO: Record the best path to a node, by updating cameFrom, gScore, and fScore\n self.cameFrom[neighbor]=current\n self.gScore[neighbor]=self.get_tenative_gScore(current,neighbor)\n self.fScore[neighbor]=self.calculate_fscore(neighbor)\n\n PathPlanner.create_closedSet = create_closedSet", "title": "" }, { "docid": "373194bc83c8ad88ff7682bffeccd00a", "score": "0.5299723", "text": "def _update_weighted_matrix(self) -> None:\n self.weighted_map = deepcopy(self.map)\n for connection in self.weighted_map:\n connections = self.weighted_map[connection]\n connections_count = sum(list(connections.values()))\n for key in self.weighted_map[connection]:\n self.weighted_map[connection][key] /= connections_count", "title": "" }, { "docid": "09d03ebab194d5e392b9b65be3050d3d", "score": "0.5297442", "text": "def redraw_map(self):\n for node_tag_item in tag_list:\n self.parent.canvas.delete(node_tag_item)\n ReadCfg().read_cfg()\n FillMapWithNodes.run(self)", "title": "" }, { "docid": "3d78b9612c1bb464cd9e766ee7e57617", "score": "0.5295578", "text": "def get_map_2d(self):\n return super().get_map_2d()", "title": "" }, { "docid": "4d60fd830db3b01ba47724008fcfb462", "score": "0.5293758", "text": "def update(self, prob_map, position):\r\n raise NotImplementedError()", "title": "" }, { "docid": "91ed8834258d013b7f5e107050bcd3ac", "score": "0.5287312", "text": "def infomap(self):\n cur = self.con.cursor()\n with open('/home/ubuntu/insight/network_update.pickle', 'rb') as f:\n network = pickle.load(f)\n edge_weight_query = \"\"\"SELECT \"Weights\" FROM network_update;\"\"\"\n cur.execute(edge_weight_query)\n weights = cur.fetchall()\n\n mod_weights = []\n for w in weights:\n mod_weights.append(w[0])\n #clear memory of weights\n weights = None\n\n info_map = network.community_infomap(edge_weights = mod_weights)\n\n with open('/home/ubuntu/insight/communities_updated.pickle', 'wb') as f:\n pickle.dump(info_map, f)", "title": "" }, { "docid": "df3f0ec6de0c3daeb3e28abf6ac39e62", "score": "0.52838075", "text": "def set_global2local(self, name, global2local=None):\n assert len(name) > 0, 'name cannot be empty.'\n\n if global2local is not None: # Create shared-tensor\n if isinstance(global2local, list):\n global2local = F.tensor(global2local)\n assert 'int64' == get_type_str(F.dtype(global2local)), 'global2local must be int64 type.'\n shared_data = empty_shared_mem(name+'-g2l-', True, global2local.shape, 'int64')\n dlpack = shared_data.to_dlpack()\n self._data_store[name+'-g2l-'] = F.zerocopy_from_dlpack(dlpack)\n self._data_store[name+'-g2l-'][:] = global2local[:]\n # write data information to temp file that can be read by other processes\n self._write_data_shape_type(name+'-g2l-shape-'+str(self._machine_id), global2local)\n self._open_file_list.append(name+'-g2l-shape-'+str(self._machine_id))\n else: # Read shared-tensor\n while True:\n if (os.path.exists(name+'-g2l-shape-'+str(self._machine_id))):\n time.sleep(2) # wait writing finish\n break\n else:\n time.sleep(2) # wait until the file been created\n data_shape, data_type = self._read_data_shape_type(name+'-g2l-shape-'+str(self._machine_id))\n assert data_type == 'int64'\n shared_data = empty_shared_mem(name+'-g2l-', False, data_shape, 'int64')\n dlpack = shared_data.to_dlpack()\n self._data_store[name+'-g2l-'] = F.zerocopy_from_dlpack(dlpack)\n\n self._has_data.add(name+'-g2l-')", "title": "" }, { "docid": "6d0c02cb97b9f765259d90fe059b73ae", "score": "0.5277513", "text": "def register_for_new_maps(self):\n pass", "title": "" }, { "docid": "8591a855c7377c26fedc622490936f2b", "score": "0.5275509", "text": "def __load_id_maps(self):\n\n self.id_map.update(dict(zip(self.par_ids, self.names)))\n self.id_map.update(dict(zip(self.names, self.par_ids)))\n\n rdx_names = []\n for keyname in self.names:\n name_ = ''.join(s if s.isalpha() else ' ' for s in keyname).casefold()\n rdx_names.append(name_)\n\n self.__norm_name_map = dict(zip(rdx_names, self.names))", "title": "" }, { "docid": "966aa86de86f8b8a1b41ab0d57ba356a", "score": "0.5272972", "text": "def _update(self, columns):\n for c in columns:\n self._map[c.key] = c\n self._map[c.id] = c\n if c.iskey:\n setattr(self.key, c.key, c)\n else:\n setattr(self.value, c.key, c)", "title": "" }, { "docid": "9b46ee5795f3f23805fbe7966857ed43", "score": "0.5271868", "text": "def mapToGlobal(self, obj, subdev=None):\n tr = self.globalTransform(subdev)\n if tr is not None:\n mapped = self._mapTransform(obj, tr)\n return mapped\n\n ## If our transformation is nonlinear, then the local mapping step must be done separately.\n subdev = self._subdevDict(subdev)\n o2 = self.mapToParentDevice(obj, subdev)\n parent = self.parentDevice()\n if parent is None:\n return o2\n else:\n return parent.mapToGlobal(o2, subdev)", "title": "" }, { "docid": "83072c0c410f0ef44e1ed2052e0d6899", "score": "0.5265351", "text": "def update_type_and_attribute_ids(self):\n\n self.get_type_name_map()\n\n if len(self.input_network.types)>0:\n # If the network has type\n self.input_network.types = [self.network_template_type]\n\n #map the name of the nodes, links and groups to its negative ID\n for n_j in self.input_network.nodes:\n self.name_maps['NODE'][n_j.name] = n_j.id\n self.update_type_and_attribute(n_j)\n\n for l_j in self.input_network.links:\n self.name_maps['LINK'][l_j.name] = l_j.id\n self.update_type_and_attribute(l_j)\n\n for g_j in self.input_network.resourcegroups:\n self.name_maps['GROUP'][g_j.name] = g_j.id\n self.update_type_and_attribute(g_j)", "title": "" }, { "docid": "3d4ed00f8918f1ca806b65d160ee092e", "score": "0.5257187", "text": "def update_func_world(mech, world):\n globals().update(world)", "title": "" }, { "docid": "b13d7ec9e51858045a707b88cf300de5", "score": "0.52528906", "text": "def remap(self,states):\n raise NotImplementedError()", "title": "" }, { "docid": "9e0270eaa18a07f6905370c8420fbec6", "score": "0.5237808", "text": "def update_feed_dict(self):\n gp = self.gaussian_process\n feed_dict = self.feed_dict\n\n gp.update_feed_dict(gp.get_feed_dict_keys(), feed_dict)\n feed_dict[self.hyperparameters[0]] = gp.get_free_state()", "title": "" }, { "docid": "e52d9d722798b948ec0c7bf52676717b", "score": "0.5237184", "text": "def map_graphs(self):\n\n for k, v in list(self.graphmap.items()):\n if k == v: continue\n self.ruleset[v] = self.ruleset[k]\n del self.ruleset[k]\n for g in self.ruleset:\n for r in self.ruleset[g]:\n r.grapheme = re.sub(k, v, r.grapheme)\n r.leftcontext = re.sub(k, v, r.leftcontext)\n r.rightcontext = re.sub(k, v, r.rightcontext)\n if self.gnulls:\n for gk, gv in list(self.gnulls.items()):\n if (k in gk) or (k in gv):\n del self.gnulls[gk]\n gk = re.sub(k, v, gk)\n gv = re.sub(k, v, gv)\n self.gnulls.update({gk: gv})", "title": "" }, { "docid": "957535cbc732876b71a88720ef3fbbd6", "score": "0.5214665", "text": "def update_labelid_geo(self):\n local_sql = MysqlConnecttion(\"local\")\n booth_sql = MysqlConnecttion(\"booth\")\n label_map = {}#{\"word\":labelid}\n query = r\"select Word, Labelid from labels WHERE Geogr = 1 and Vert = 0 and Mktseg = 0\"\n rows = local_sql.excute_with_result(query)\n for row in rows:\n label_map[row[0]] = row[1]\n query = r'''(SELECT mqid, docid, labelid, mq_title_vector_short FROM magic_quadrants where removed = 0)'''\n mq_vector_map = {}#{\"mqid\":\"word vector (short)\"}\n label_tmap = {}\n docid_map ={}\n rows = local_sql.excute_with_result(query)\n for row in rows:\n mq_vector_map[row[0]] = row[3]\n docid_map[row[0]] = row[1]\n label_map[row[0]] = row[2]\n query = r'''(SELECT title_short, docid FROM doc_deatail_vector)'''\n cool_map ={}\n rows = local_sql.excute_with_result(query)\n for row in rows:\n cool_map[row[1]] = row[0]\n for mq_id in mq_vector_map:\n json_word_set = mq_vector_map[mq_id]\n if json_word_set == None or json_word_set == \"\":\n json_word_set = \"{}\"\n word_map = json.loads(json_word_set)\n label_list = []\n for word in word_map:\n if word in label_map:\n label_list.append(str(label_map[word]))\n json_word_set = cool_map[docid_map[mq_id]]\n if json_word_set == None or json_word_set == \"\":\n json_word_set = \"{}\"\n word_map = json.loads(json_word_set)\n for word in word_map:\n if word in label_map:\n label_list.append(str(label_map[word]))\n label_list = list(set(label_list))\n length = len(label_list)\n labels = \";\".join(label_list)\n query = r\"update new_magic_quadrants set Geo_label = '%s' where MQID = '%s'\"%(labels, mq_id)\n local_sql.excute(query)\n booth_sql.excute(query)", "title": "" }, { "docid": "15bede240fabcfa504f6c6cf1ca89d59", "score": "0.5214411", "text": "def reload_mappings(self):\n with open(self.mappings_path, 'rb') as f:\n mappings = cPickle.load(f)\n self.id_to_word = mappings['id_to_word']\n self.id_to_char = mappings['id_to_char']\n self.id_to_tag = mappings['id_to_tag']", "title": "" }, { "docid": "1dc37a233ace5af412a0f91cc0b9398e", "score": "0.5208248", "text": "def update_global_view_status (self, status):\n log.debug(\"Update Global view (DoV) mapping status with: %s\" % status)\n NFFGToolBox.update_status_info(nffg=self.__dov.get_resource_info(),\n status=status, log=log)", "title": "" }, { "docid": "3416196f9db65217bcc5b02a4a66228a", "score": "0.5205673", "text": "def _lu_swap(self, r1: int, r2: int) -> Dict[int, str]:\n\n for i in range(self.M.nrows() + min(r1, r2)):\n self.M[r1, i], self.M[r2, i] = self.M[r2, i], self.M[r1, i]\n\n return {r1: fr'\\leftarrow r_{r2+1} \\setminus 0', r2: fr'\\leftarrow r_{r1+1} \\setminus 0'}", "title": "" }, { "docid": "55c75732aae55391b5c0c236db2c88e2", "score": "0.5204835", "text": "def _update_pointers(self):\n pass", "title": "" }, { "docid": "b51be8ac8c620c4dcb4b889951b7f1ad", "score": "0.51973325", "text": "def updateTransform(self):\n\n coordCalibrationsOrigin, coordCalibrationsDestiny, namesCalibration = self.getLandmarksByType(PointType.calibrated)\n\n total_calibration_points = len(coordCalibrationsOrigin)\n if (total_calibration_points < 2):\n self.logger.info(\"From \" + str(self.map_id) + \":Not enough points to update.\")\n return\n\n self.logger.info(\"From \"+str(self.map_id)+\":Updating transform with \" + str(total_calibration_points) + \" reference points\")\n origin = np.zeros((total_calibration_points, 2), dtype=np.float32)\n destiny = np.zeros((total_calibration_points, 2), dtype=np.float32)\n for i in range(total_calibration_points):\n origin[i, 0] = coordCalibrationsOrigin[i][0]\n origin[i, 1] = coordCalibrationsOrigin[i][1]\n destiny[i, 0] = coordCalibrationsDestiny[i][0]\n destiny[i, 1] = coordCalibrationsDestiny[i][1]\n\n\n self.CalibratedPtp.updateGlobal(origin,destiny,namesCalibration)\n\n coordT, _, namesTarget = self.getLandmarksByType(PointType.target)\n self.processLocalArea(coordT,namesTarget)\n\n coordNC, _, namesNonCal = self.getLandmarksByType(PointType.non_calibrated)\n self.processLocalArea(coordNC, namesNonCal)\n\n _, coordACQ, namesAcq = self.getLandmarksByType(PointType.acquired)\n self.processLocalArea(coordACQ, namesAcq)\n\n namesAll = self.getLandmarkIds()\n originAll = self.getCoordsFromLandmarks(namesAll,1)\n destinyAll = self.getCoordsFromLandmarks(namesAll,2)\n self.GlobalPtp.updateGlobal(originAll, destinyAll, namesAll)", "title": "" }, { "docid": "4c537836bb19de309e946cbcd4510e93", "score": "0.519472", "text": "def setAttrMap(self, attr):\n\t\t\n\t\tattrName = attr.getAttribute(\"name\")\n\t\tattrValue = attr.getAttribute(\"value\")\n\t\t\t\t\t\n\t\tif self.mods:\n\t\t\tself.pmSetAttr(self.objWithNS, attrName, float(attrValue), self.mult)\n\t\telse:\n\t\t\tcmds.setAttr(self.objWithNS + \".\" + attrName, float(attrValue))", "title": "" }, { "docid": "da8555175f15d5005b801230f05deb95", "score": "0.51908076", "text": "def update_objects(self):\n\t\tself.update_projectiles()", "title": "" }, { "docid": "f5d72fc4851081af7f890e41d4f6cced", "score": "0.5188522", "text": "def SetKey2(self, *args):\n return _TopTools.TopTools_IndexedMapNodeOfIndexedMapOfShape_SetKey2(self, *args)", "title": "" }, { "docid": "34ecf36406dea43ce27cd68365feb032", "score": "0.5180778", "text": "def request_rebuild_midi_map(self):\n pass", "title": "" }, { "docid": "8c4b1ae5ca27414da51a4be37a7d47d1", "score": "0.51782566", "text": "def adjustPCmap(self,transition):\r\n poly_map = {}\r\n\tpc_list = sort(self.pc_list)\r\n\tfor i in range(shape(transition)[1]):\r\n\t poly_map[pc_list[i]] = i\r\n\tfor key in self.pc_map.keys():\r\n\t self.pc_map[key] = poly_map[self.pc_map[key]]", "title": "" }, { "docid": "e2956e1113e300458bfaafaab409d074", "score": "0.5170163", "text": "def _reassign_ids(self, old_new: Dict[int, int]) -> None:\n # update id_fw\n new_id_fw = {}\n for fwid, fws in self.id_fw.items():\n new_id_fw[old_new.get(fwid, fwid)] = fws\n self.id_fw = new_id_fw\n\n # update the Links\n new_l = {}\n for parent, children in self.links.items():\n new_l[old_new.get(parent, parent)] = [old_new.get(child, child) for child in children]\n self.links = Workflow.Links(new_l)\n\n # update the states\n new_fw_states = {}\n for fwid, fw_state in self.fw_states.items():\n new_fw_states[old_new.get(fwid, fwid)] = fw_state\n self.fw_states = new_fw_states", "title": "" }, { "docid": "f3b0b375f86d20e87cb83a906297188e", "score": "0.5165266", "text": "def request_rebuild_midi_map(self):\r\n return", "title": "" }, { "docid": "e52f41542f0d6796c146e2d4720525ee", "score": "0.51637876", "text": "def update_map(self, grid_map, pose, scan):\n\n # Current yaw of the robot\n robot_yaw = self.get_yaw(pose.pose.orientation)\n # The origin of the map [m, m, rad]. This is the real-world pose of the\n # cell (0,0) in the map.\n origin = grid_map.get_origin()\n # The map resolution [m/cell]\n resolution = grid_map.get_resolution()\n\n\n \"\"\"\n Fill in your solution here\n \"\"\"\n map_x =[]\n map_y = []\n ind_x = []\n ind_y = [] \n robot_x = []\n robot_y = []\n px=pose.pose.position.x\n py=pose.pose.position.y\n \n \n #For occupied space\n i=0\n for j in range(len( (scan.ranges))):\n \n if scan.range_min >= (scan.ranges[j]) or (scan.ranges[j]) >= scan.range_max:\n continue\n\n else:\n b_angle = scan.angle_min + (j * scan.angle_increment)\n \n map_x.append(px + (scan.ranges[j]) * cos(robot_yaw + b_angle))\n map_y.append(py + (scan.ranges[j]) * sin(robot_yaw + b_angle))\n\n ind_x.append(int((map_x[i]- origin.position.x) / resolution))\n ind_y.append(int((map_y[i]- origin.position.y) / resolution))\n\n # grid index of the robot\n robot_x.append(int((px - origin.position.x) / resolution) )\n robot_y.append(int((py - origin.position.y) / resolution)) \n start =(robot_x[i],robot_y[i])\n end = (ind_x[i],ind_y[i])\n \n trace=self.raytrace(start,end)\n i=i+1\n\n #Filling the free space\n for l in trace:\n self.add_to_map(grid_map, l[0], l[1], self.free_space) \n\n #Filling the occupied space \n for k in range(len(ind_x)):\n self.add_to_map(grid_map, ind_x[k], ind_y[k], self.occupied_space)\n\n\n \"\"\"\n For C only!\n Fill in the update correctly below.\n \"\"\"\n # Only get the part that has been updated\n update = OccupancyGridUpdate()\n # The minimum x index in 'grid_map' that has been updated\n update.x = np.amin(ind_x)\n # The minimum y index in 'grid_map' that has been updated\n update.y = np.amin(ind_y)\n # Maximum x index - minimum x index + 1\n update.w = np.amax(ind_x) - np.amin(ind_x) + 1\n # Maximum y index - minimum y index + 1\n update.h = np.amax(ind_y) - np.amin(ind_y) + 1\n # The map data inside the rectangle, in row-major order.\n update.data = []\n for y_u in range(np.amin(ind_y), np.amax(ind_y) + 1):\n for x_u in range(np.amin(ind_x), np.amax(ind_x) + 1):\n updated_value = grid_map[x_u,y_u]\n update.data.append(updated_value)\n\n # Return the updated map together with only the\n # part of the map that has been updated\n return grid_map, update", "title": "" }, { "docid": "ad8e01e73a8667290a5bbb31fad6287b", "score": "0.51632774", "text": "def updateMapKey(initkey, key, map,value = 1):\n\tif initkey not in map:\n\t\tmap[initkey] = {}\n\tif key not in map[initkey]:\n\t\tmap[initkey][key] = 0\n\tmap[initkey][key] = map[initkey][key] +value", "title": "" }, { "docid": "a39cce74753d252e93ec0cdebbf41697", "score": "0.5159968", "text": "def perform_mapping(\n self, fs: typing.List[AnyFunction], map: PointType, inverse_map: PointType, tdim: int\n ) -> typing.List[AnyFunction]:\n raise mappings.MappingNotImplemented()", "title": "" }, { "docid": "a39cce74753d252e93ec0cdebbf41697", "score": "0.5159968", "text": "def perform_mapping(\n self, fs: typing.List[AnyFunction], map: PointType, inverse_map: PointType, tdim: int\n ) -> typing.List[AnyFunction]:\n raise mappings.MappingNotImplemented()", "title": "" }, { "docid": "0801832ae89c00440c55d6e04dc14e58", "score": "0.51568234", "text": "def _pad_static_map(self):\n global_costmap = np.array(self.static_map.data, dtype=np.int8).reshape(self.static_map.info.height, -1)\n\n # Get index of occupied cells\n occupied_index = np.where(global_costmap == 100)\n\n # Loop over occupied cells and pad them\n for coord in zip(occupied_index[0], occupied_index[1]):\n self._pad_point(global_costmap, coord)\n \n # Show messages for debugging\n if self.debug_mode == True:\n # Receive an image of the global_costmap\n cv2.imwrite('map_padded.jpg', global_costmap.astype(np.uint8))\n print(np.unique(global_costmap))\n\n self.static_map.data = global_costmap.ravel()", "title": "" }, { "docid": "60ff5edb4617790d1566bc05d5b5e006", "score": "0.51456535", "text": "def reinit(self):\n self.current_value_global = copy_dict(self.varGlobalInit)\n for mod in self.modules:\n mod.reinit()", "title": "" }, { "docid": "20cf72991c6f600c1c90b3d4c6386e50", "score": "0.5143412", "text": "def updateConstantsFromMapping(self, mapping):\n for key, val in mapping.iteritems():\n if isinstance(val, Constant):\n if key in self.constants:\n log.warn(\"Constant with name %s is already declared, overriding\", key)\n self.constants[key] = (val.value, val.type)", "title": "" }, { "docid": "c9002293ef606c513567d1176729c260", "score": "0.51360947", "text": "def assign_simple_node_features(ndata, g, ntypes, assign_id=False):\n for ntype in ntypes:\n for col in g.nodes[ntype].data.keys():\n if not assign_id and col == dgl.NID:\n continue\n induced_nodes = ndata[dgl.NID][ntype]\n ndata[col] = {ntype: g.nodes[ntype].data[col][induced_nodes]}", "title": "" }, { "docid": "a5a96233a922d24cbc576114b67200ad", "score": "0.51353544", "text": "def test_incremental_fcache_buildup_and_global_access(self):\n # map with empty\n self.upapa_class.purge_fasta_info(\"Test.fasta\")\n maps = self.upapa_class.map_peptide(peptide=\"KLEINER\", fasta_name=\"Test.fasta\")\n # print(maps)\n self.assertEqual(len(maps), 0)\n\n # parse one fasta via a unode ..\n self.upapa_class.build_lookup(fasta_name=\"Test.fasta\", fasta_stream=TEST_FASTA)\n # map with one parsed fasta\n maps = self.upapa_class.map_peptide(peptide=\"KLEINER\", fasta_name=\"Test.fasta\")\n self.assertEqual(len(maps), 1)\n # parse another fasta via a different unode ..\n self.upapa_class.build_lookup(\n fasta_name=\"Test.fasta\", fasta_stream=TEST_FASTA_TWO\n )\n # map with two parsed fastas\n\n maps = self.upapa_class.map_peptide(peptide=\"KLEINER\", fasta_name=\"Test.fasta\")\n print(maps)\n self.assertEqual(len(maps), 2)", "title": "" }, { "docid": "8a37e733648f3b986c55cf9db5553f95", "score": "0.5135315", "text": "def update_nni(nodes, links):\n for link in links:\n ports = link[\"ports\"]\n nni_a, nni_b = ports[0], ports[1]\n node_a = nni_a.split(\":\")[4]\n port_a = nni_a.split(\":\")[5]\n node_b = nni_b.split(\":\")[4]\n port_b = nni_b.split(\":\")[5]\n for node in nodes:\n if node[\"name\"] == node_a:\n for port in node[\"ports\"]:\n if port_a == port[\"id\"].split(\":\")[5]:\n port[\"nni\"] = nni_b\n elif node[\"name\"] == node_b:\n for port in node[\"ports\"]:\n if port_b == port[\"id\"].split(\":\")[5]:\n port[\"nni\"] = nni_a", "title": "" }, { "docid": "37e120373fa89087f76b7651b1315eb6", "score": "0.513131", "text": "def merge_info(self):\n global global_path\n global global_known_p\n global_path = global_path + (list(set(self.path) - set(global_path))) # removes duplicates\n self.path = self.path + (list(set(global_path) - set(self.path))) # removes duplicates\n\n # haalt uit global_known_p eerst weg wat in self.known_p_removed zit\n l3 = [x for x in global_known_p if x not in self.known_p_removed]\n global_known_p = l3\n # merge dan alles\n global_known_p = global_known_p + list(set(self.known_p) - set(global_known_p))\n self.known_p = global_known_p\n self.sort_known_patients()\n l3 = []", "title": "" }, { "docid": "3afdeee6f9b19b30e8ad8412d3772e2a", "score": "0.5125499", "text": "def update_idx_id_mapping(self):\n if self.dpd is not None and self.dpd.shape[0] > 0:\n # sort by id link ascending\n self.dpd = self.dpd[self.dpd[:, 1].argsort()]\n\n # update dpd_idx to make the index to ID pairing bidirectional again\n for idx, dpd_pair in enumerate(self.dpd):\n self.reps[dpd_pair[1]].dpd_idx = idx", "title": "" }, { "docid": "6ff897a6151f85183d9da06575ac678b", "score": "0.51248366", "text": "def map_variable(gdb_path, mukey_path, variable, dst):\n\n # Expand user path\n mukey_path = os.path.expanduser(mukey_path)\n gdb_path = os.path.expanduser(gdb_path)\n dst = os.path.expanduser(dst)\n\n # Get the Map Unit Aggregated Attribute Table\n mukey = xr.open_rasterio(mukey_path, chunks=(1, 5000, 5000))\n umukeys = np.unique(mukey[:]).astype(str)\n muaggatt = gpd.read_file(gdb_path, layer=\"muaggatt\")\n chorizon = gpd.read_file(gdb_path, layer=\"chorizon\")\n cogeomordesc = gpd.read_file(gdb_path, layer=\"cogeomordesc\")\n components = gpd.read_file(gdb_path, layer=\"component\")\n components = pd.merge(chorizon, components, on=\"cokey\")\n components = pd.merge(components, muaggatt, on=\"mukey\")\n\n # Put the keys in front\n keys = [c for c in components.columns if \"key\" in c]\n others = [c for c in components.columns if \"key\" not in c]\n new_order = keys + others\n components = components[new_order]\n\n # Get the Horizon Table\n munits = muaggatt[[\"mukey\", \"muname\"]]\n variable_df = pd.merge(variable_df, munits, on=\"mukey\")\n variable_df = components[[\"mukey\", \"chkey\", \"muname\", \"hzname\", \"geomdesc\",\n \"desgnmaster\", \"hzdept_r\", \"hzdepb_r\", \"hzthk_r\",\n \"sandtotal_r\", \"silttotal_r\", \"claytotal_r\", \n variable]]\n\n # Now, whats the best way to map these values\n val_dict = dict(zip(variable_df[\"mukey\"].astype(int),\n variable_df[variable]))\n mv = Map_Values(val_dict, err_val=-9999)\n mv.map_file(mukey_path, dst)", "title": "" }, { "docid": "965d1c87eb8723c7aa692e6b73d897bf", "score": "0.5122539", "text": "def map_update(self, m):\n dists = np.zeros((self.num_points,))\n for idx, point in enumerate(self.coords):\n dists[idx] = m.nearest_lane_dist(point)\n if np.percentile(dists, 5) > 15:\n # Don't bother incorporating weights if the 95%+ of particles are\n # more than 15m from a road. In that case, we can safely assume\n # that we're off the road.\n return\n factors = 1.0 / ((1 + dists ** 2) ** 1.1)\n self.weights *= factors", "title": "" }, { "docid": "3de2192a9ab603c96997a048cbd4c709", "score": "0.5116645", "text": "def __reset_node_ids (info, binding):\n log.debug(\"Reset NF paths...\")\n for attr in (getattr(info, e) for e in info._sorted_children):\n rewrite = []\n for element in attr:\n if hasattr(element, \"object\"):\n old_path = element.object.get_value()\n bb, nf = get_bb_nf_from_path(path=old_path)\n if bb not in binding:\n log.warning(\"Missing binding for node: %s\" % bb)\n continue\n new_bb = binding.get(bb)\n log.debug(\"Find BiSBiS node remapping: %s --> %s\" % (bb, new_bb))\n old_bb, new_bb = \"/node[id=%s]\" % bb, \"/node[id=%s]\" % new_bb\n new_path = str(old_path).replace(old_bb, new_bb)\n rewrite.append((element, new_path))\n # Tricky override because object is key in yang -> del and re-add\n for e, p in rewrite:\n attr.remove(e)\n e.object.set_value(p)\n attr.add(e)\n log.debug(\"Overrided new path for NF --> %s\" % e.object.get_value())\n log.log(VERBOSE, info.xml())\n return info", "title": "" }, { "docid": "97354a1a33c34f4428dda61ed1f460df", "score": "0.51147324", "text": "def updatekernel(self,kernel):\n self.GPkernel = kernel", "title": "" }, { "docid": "71a3d64111258b46fa8a2a867a71cd1e", "score": "0.51105374", "text": "def recalc_info_dict(self):\n self._info_dict = self.__create_info_dict()\n map(self.add_s2_info_dict, self.slist)", "title": "" }, { "docid": "aa1e5029ae5d3a96acd4f03baf72ffa1", "score": "0.51089907", "text": "def _post_setattr_formulas(self, old, new):\n self.database_entries = {key: 1.0 for key in new}", "title": "" } ]
4d39bb1519a7f62a7cf3caeafcd634a3
GET request to retrieve a specific product from database.
[ { "docid": "4ad297d0b00a0f6bc43b64684bf5bab8", "score": "0.7202844", "text": "def get_specific_product(product_id):\n try:\n # Query database for all products\n selection = Products.query.order_by(Products.id).filter(\n Products.id == product_id).one_or_none()\n\n return jsonify({\n 'success': True,\n 'products': selection.info()\n })\n\n except Exception as e:\n # Print exception error as well as abort 404\n print(f'Exception \"{e}\" in get_specific_product()')\n abort(404, {'message':\n f'Product ID: {product_id} does not exist.'})", "title": "" } ]
[ { "docid": "0dec387335a7d688dff232ff44946424", "score": "0.77556497", "text": "def retrieve(self, request, pk=None):\n try:\n product = ProductModel.objects.get(pk=pk)\n serializer = ProductSerializer(product, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "f62e413f620cacede006454b0a085f3b", "score": "0.7739515", "text": "def get(self):\n\n identifier = self.get_query_argument(\"id\", None)\n if not identifier:\n self.generic_resp(404)\n return\n\n direct = self.get_query_argument(\"direct\", False)\n\n try:\n direct = bool(int(direct))\n except:\n direct = False\n try:\n\n res = self.get_product(identifier, direct)\n if not res:\n self.generic_resp(404)\n return\n\n resp = dict()\n resp[\"product\"] = res\n resp[\"status\"] = 200\n resp[\"mesasage\"] = \"OK\"\n\n self.write(json.dumps(resp))\n self.set_status(200)\n return\n except Exception as e:\n self.generic_resp(500, str(e))\n self.finish()", "title": "" }, { "docid": "0322cefc47631d684b43847f320b8966", "score": "0.7682345", "text": "def retrieve(self, request, pk=None):\n try:\n product = Product.objects.get(pk=pk)\n serializer = ProductsSerializer(\n product, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "2426a33dd1b9aafdf872c20dd4ab99c5", "score": "0.7679826", "text": "def get_product(self, productId):\n\n method = \"GET\"\n endpoint = \"products/{}\".format(productId)\n\n return self.request(method, endpoint)", "title": "" }, { "docid": "99b547f0d2dcc5bcaecf0a3fcc89f125", "score": "0.7581064", "text": "def get(self, product_id):\n app.logger.info('Request for product with id: %s', product_id)\n product = Product.find(product_id)\n if not product:\n api.abort(status.HTTP_404_NOT_FOUND,\n \"Product with id '{}' was not found.\".format(product_id))\n return product.serialize(), status.HTTP_200_OK", "title": "" }, { "docid": "23f2e401da392bf4e22abf6999862c32", "score": "0.74065155", "text": "def fetch(cls, product_id):\n return cls().requests.get(f\"product/{product_id}\")", "title": "" }, { "docid": "784fd520ee573308a251ee7fbcfc47dc", "score": "0.7405221", "text": "def get(self, id):\n product = ProductsModel()\n product_to_get = product.get_item('products', product_id=id)\n message = \"Product with id {} does not exist\".format(id)\n if product_to_get:\n return product_to_get\n return {\"message\": message}, 404", "title": "" }, { "docid": "d0f3090a37ffba66fcfcb0a65fabfd20", "score": "0.7219862", "text": "def get(self, p_id):\n for i in range(len(products)):\n if products[i]['id'] == int(p_id):\n product = products[i]\n return make_response(jsonify(product)), 200\n else:\n response = {\n 'status': 'Failed',\n 'message': 'Product Not Found'\n }\n return make_response(jsonify(response)), 404", "title": "" }, { "docid": "760a97e4491587f09585195ca3ae075a", "score": "0.7085526", "text": "def get(self):\n\n\t\tparams = self.parseParams()\n\t\tself.doProductSearch(params)", "title": "" }, { "docid": "673d0e8331196d40be8a8c2056f6993e", "score": "0.7048351", "text": "def product(db, id):\n session.get_or_create_session(db)\n\n product = model.product_get(db, id)\n\n if product:\n #this product exists, to get its info.\n info = {\n 'product': model.product_get(db, id)\n }\n return template('product', info)\n else:\n #no product exists, return 404\n return HTTPError(404, \"No such product.\")", "title": "" }, { "docid": "cbe4fab1337913dbd17ce2a516d022ae", "score": "0.6942659", "text": "def get_products(self, **kwargs):\n\n method = \"GET\"\n endpoint = self._create_endpoint(\"products\", kwargs)\n\n return self.request(method, endpoint)", "title": "" }, { "docid": "d6c80405be2161d99baa3de01cf076fe", "score": "0.6934231", "text": "def get(self, request, slug):\n product = get_object_or_404(Product, slug=slug)\n return render(request, \"niunius/product.html\", {\"product\": product})", "title": "" }, { "docid": "b02cd35abaff4812804577ab349f600f", "score": "0.68860555", "text": "def retrieve(self, request, pk=None):\n try:\n category = Product_Category.objects.get(pk=pk)\n serializer = Product_Category_Serializer(category, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "00ef0aeeabd0e901cbb9668073a7809b", "score": "0.6879762", "text": "def get_product(product_id):\n\n response = _get('products/' + product_id)\n if response.status_code != 200:\n error_msg = response.json()['data']\n logger.error('get_product() failed with \"%s\"' % error_msg)\n raise Exception('Product not found')\n product_data = response.json()[u'data']\n return api.Product(product_data)", "title": "" }, { "docid": "aabf3b82af7f2f14cf25c3b05c2284d2", "score": "0.6842081", "text": "def api_product_detail(request, product_id):\n try:\n product = Product.objects.get(pk=product_id)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = ProductSerializer(product)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = ProductSerializer(product, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n product.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "title": "" }, { "docid": "7dff2cfa8ecd20f574949d680574f015", "score": "0.68408215", "text": "def retrieve(self, request, pk=None): \n return Response({\"http_method\":\"GET\"})", "title": "" }, { "docid": "6ce5d12791a21f4ade0dc69e340c54c7", "score": "0.6810289", "text": "def get(self, request, id):\n \n reviews = Review.objects.filter(user__id=id)\n\n if request.GET.get('product'):\n reviews.filter(product__slug=request.GET.get('product'))\n\n if not reviews:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n serializer = ReviewSerializer(reviews, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "5119df8009633f439a7db35afbfff853", "score": "0.67971087", "text": "def get(self):\n app.logger.info('Request for product list')\n products = []\n category = request.args.get('category')\n name = request.args.get('name')\n price = request.args.get('price')\n if category:\n products = Product.find_by_category(category)\n elif name:\n products = Product.find_by_name(name)\n elif price and int(price) > 0 and int(price) < 4: # query price by range\n if int(price) == 1:\n products = Product.find_by_price(0, 25)\n elif int(price) == 2:\n products = Product.find_by_price(25, 50)\n else:\n products = Product.find_by_price(50, 75)\n else:\n products = Product.all()\n results = [product.serialize() for product in products]\n return results, status.HTTP_200_OK", "title": "" }, { "docid": "3068d4bdc78abbda5b162006ba10bffe", "score": "0.67844826", "text": "def test_product_get(self):\n\n product = self.products['Yellow Wool Jumper']\n result = model.product_get(self.db, product['id'])\n\n # check a few fields\n self.assertEqual(product['id'], result['id'])\n self.assertEqual(product['name'], result['name'])", "title": "" }, { "docid": "cd18e1d24022b6ca0d4d8d8371b1340c", "score": "0.67698896", "text": "def test_valid_retrieve_product(self):\n response = self.client.get(\n r('products:product-detail', pk=self.valid_pk), format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "2874e3d87fe3d73f814b517502d9fe1f", "score": "0.67581475", "text": "def get(self, request):\n\n reviews = Review.objects.filter(user=request.user)\n\n if request.GET.get('product'):\n reviews.filter(product__slug=request.GET.get('product'))\n\n if not reviews:\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n serializer = ReviewSerializer(reviews, many=True)\n return Response(serializer.data)", "title": "" }, { "docid": "e2ff35075bf78741ac3178e754f5dad7", "score": "0.6736622", "text": "def retrieve(self, request, pk=None):\n\n return Response({'http_method':'GET'})", "title": "" }, { "docid": "c32086c67fd1b4a68e4543ea546f9fcb", "score": "0.67365336", "text": "def retrieve(self,request,pk=None):\n\t\treturn Response({'message':'GET'})", "title": "" }, { "docid": "c91c02f60eee46f2721f2ced623f7a72", "score": "0.673558", "text": "def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"category_id\", type=int, default=None, required=True)\n args = parser.parse_args()\n return c.get_product_list(args)", "title": "" }, { "docid": "769be5c4ac2cbcbe8d32e1512d0792fa", "score": "0.6734989", "text": "def retrieve(self, request, pk=None):\n\n return Response({'http_method': 'GET'})", "title": "" }, { "docid": "769be5c4ac2cbcbe8d32e1512d0792fa", "score": "0.6734989", "text": "def retrieve(self, request, pk=None):\n\n return Response({'http_method': 'GET'})", "title": "" }, { "docid": "769be5c4ac2cbcbe8d32e1512d0792fa", "score": "0.6734989", "text": "def retrieve(self, request, pk=None):\n\n return Response({'http_method': 'GET'})", "title": "" }, { "docid": "769be5c4ac2cbcbe8d32e1512d0792fa", "score": "0.6734989", "text": "def retrieve(self, request, pk=None):\n\n return Response({'http_method': 'GET'})", "title": "" }, { "docid": "eedbf37c7f41126c7de2a3f3b3a0d8c0", "score": "0.67247903", "text": "def retrieve(self, request, pk = None):\n return Response({'http_method':'GET'})", "title": "" }, { "docid": "9244b8d6d941f7277b1bbac525f0c588", "score": "0.6713015", "text": "def get_products():\n return requests.get(API_URL + \"products\").json()", "title": "" }, { "docid": "27386e599b9c1629d1187a2bc27759d3", "score": "0.6705706", "text": "def retrieve(self,request,pk=None):\n return Response({'http_method':'GET'})", "title": "" }, { "docid": "27386e599b9c1629d1187a2bc27759d3", "score": "0.6705706", "text": "def retrieve(self,request,pk=None):\n return Response({'http_method':'GET'})", "title": "" }, { "docid": "21b2575288ea732e121e598f883bfa8b", "score": "0.6705331", "text": "def get_product_info():\n iid = request.json[\"idProducto\"] # item id\n uid = request.json.get(\"idSocio\", None) # user id\n return safe_return(db.get_product_info, iid, uid)", "title": "" }, { "docid": "0efe1b664315de5e20c4c6d8d38c06a3", "score": "0.67022336", "text": "def retrieve(self, request, pk=None):\n return Response({'method': 'Retrieve(GET)'})", "title": "" }, { "docid": "96c4a3c54f55648259279c6e6655a850", "score": "0.6679882", "text": "def product(request, product_id):\n\n product = get_object_or_404(Product, pk=product_id)\n\n context = {\n 'product': product,\n }\n\n return render(request, 'products/product.html', context)", "title": "" }, { "docid": "caaf9a0ec911620fe9837cf4783862f9", "score": "0.6678344", "text": "def get_product(self, id_):\n for _product in self.products:\n if _product.id == id_:\n return _product\n raise LookupError(\"Product Not in DB\")", "title": "" }, { "docid": "bdcc0a0c7fb38ca8de3875271a3188e7", "score": "0.66685796", "text": "def retrieve(self,request,pk= None):\n return Response({\"http_method\":'GET'})", "title": "" }, { "docid": "561cf5fe187abb4c4aaaeb31d0e7d438", "score": "0.6626251", "text": "def get_specific_product(self,id):\n return self.records.get_data(\"id\",id)", "title": "" }, { "docid": "8f9533d0a88a17a5584f4e5a9b4c1f15", "score": "0.66077554", "text": "def get(self):\n \"\"\"\n @api {get} /product/:productId Get a seller\n @apiVersion 1.0.0\n @apiGroup Product\n\n @apiUrlParam {Integer} productId\n \"\"\"\n return self", "title": "" }, { "docid": "ca6ce37c09ee029fc78589d0d68df51e", "score": "0.6592273", "text": "def get(self):\n products = self.user.get_all_products()\n # check_product = [product for product in products if product[\"product_name\"]==product_name]\n if products:\n return make_response(jsonify({\"products\":products}),200)\n else:\n return make_response(jsonify({\"message\":\"No products available\"}))\n # return make_response(jsonify({\"message\":\"No products available\"}),204)", "title": "" }, { "docid": "57d907064e96f15a9dcea227f448679a", "score": "0.65551853", "text": "def product_detail(request, product_id):\n\n product = get_object_or_404(Product, pk=product_id)\n\n # product singular to return on product with that id.\n context = {\n 'product': product,\n }\n\n return render(request, 'products/product_detail.html', context)", "title": "" }, { "docid": "a6042c939de14b349349b6becf95a5e6", "score": "0.6554168", "text": "def view_product():\n try:\n db = get_db()\n products = db.execute(\"SELECT product_id, product_name FROM Product\")\n return render_template(\"product/view_product.html\", result=products)\n except sqlite3.Error as error:\n print(error)\n return render_template(\"error_occured.html\")", "title": "" }, { "docid": "855b856751eb0f7522ce525dadd6712c", "score": "0.65521985", "text": "def test_retrieve_product(self):\n serializer = ProductSerializer(self.product, many=False)\n response = self.client.get(\n r('products:product-detail', pk=self.valid_pk), format='json')\n self.assertEqual(response.data, serializer.data)", "title": "" }, { "docid": "940df445847be49979a91a2c3dcb9508", "score": "0.6545566", "text": "def get_product(self, product_name=False, product_id=False):\n if not product_name and not product_id:\n self.error(\"Can't look for a PRODUCT without NAME or ID\")\n return False\n self.debug('Getting PRODUCT \"{}\" from database'\n ''.format(product_name or product_id))\n #TODO: get PRODUCT from database using psycopg2\n prod_obj = _MODEL_PRODUCT(self_id=1, name='demo', stock=0, recipe_id=1)\n return prod_obj", "title": "" }, { "docid": "182bf005f5859cb8a643954ae5b75d64", "score": "0.6534951", "text": "def product_detail(request, pk):\n product = get_object_or_404(Product, pk=pk)\n return render(request, \"product_detail.html\", {'product': product})", "title": "" }, { "docid": "2fe4c94fb5ff849a5efc1ed97dc52730", "score": "0.6522919", "text": "def retrieve(self, request, pk=None):\n return Response({'message': 'get methode'})", "title": "" }, { "docid": "68be0141094391e73c9f58e719df9b9e", "score": "0.65121883", "text": "def get_request(id ,url): \n\n http = urllib3.PoolManager()\n r = http.request('GET', f'http://localhost:5000/product/{id}',\n headers={'Content-Type': 'application/json'})\n\n new_product = json.loads(r.data)\n #print(y['name'])\n return product(new_product['name'], new_product['description'], new_product['price'], new_product['qty'])", "title": "" }, { "docid": "bd3bc2787ec997b08418eb6e90c85000", "score": "0.6511772", "text": "def getSpecific(self, **kwargs):\n\n allParams = ['productId', 'id']\n\n params = locals()\n for (key, val) in params['kwargs'].iteritems():\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method getSpecific\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/products/{productId}/product-versions/{id}'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n \n\n \n\n \n if ('productId' in params):\n replacement = str(self.apiClient.toPathValue(params['productId']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'productId' + '}',\n replacement)\n \n if ('id' in params):\n replacement = str(self.apiClient.toPathValue(params['id']))\n replacement = urllib.quote(replacement)\n resourcePath = resourcePath.replace('{' + 'id' + '}',\n replacement)\n \n\n \n\n \n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)\n\treturn response", "title": "" }, { "docid": "37b3e5266320fd2aa3ab8e8f4de8c889", "score": "0.64975417", "text": "def get_product(request):\r\n try:\r\n token = request.META.get(\"HTTP_AUTHORIZATION\")\r\n payload = jwt.decode(token, \"Temage\")\r\n identity = payload['id']\r\n product_id = json.loads(request.body.decode('utf-8'))['productID']\r\n product = Product.objects.get(id=product_id)\r\n content = {}\r\n user_info = {}\r\n user_info['username'] = product.creator.user.username\r\n user_info['id'] = product.creator.user.id\r\n user_info['avator'] = settings.MEDIA_PATH + str(product.creator.avator)\r\n if product.creator.user.id == identity:\r\n content['can_be_delete'] = 1\r\n else:\r\n content['can_be_delete'] = 0\r\n user = Profile.objects.get(user__id=identity)\r\n collect = user.collection\r\n been_owned = collect.cards.filter(product_id = product_id)\r\n if been_owned:\r\n content['has_been_colleted'] = 1\r\n else:\r\n content['has_been_collected'] = 0\r\n content['id'] = product_id\r\n content['text'] = product.html\r\n content['creator'] = user_info\r\n content['title'] = product.title\r\n themes = product.theme.all()\r\n themelist = []\r\n for theme in themes:\r\n themelist.append(theme.name)\r\n content['style'] = themelist\r\n return HttpResponse(json.dumps(content), status=200, content_type=\"application/json\")\r\n except:\r\n return HttpResponse(json.dumps(\"something wrong\"), status=400, content_type=\"application/json\")", "title": "" }, { "docid": "a4d9a81b83c9da8416970bbfd96d3465", "score": "0.6488897", "text": "def retrieve(self, request, pk=None):\n try:\n vendor = Vendor.objects.get(pk=pk)\n serializer = VendorSerializer(vendor, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "b9fd4cd315bd3d7721ea969ec5b510db", "score": "0.64838034", "text": "def test_invalid_retrieve_product(self):\n response = self.client.get(\n r('products:product-detail', pk=self.invalid_pk), format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "title": "" }, { "docid": "058b089eb965087edb035db4799bb162", "score": "0.6476916", "text": "def retrieve(self,request,pk=None):\n return Response({'method':'RETRIEVE'})", "title": "" }, { "docid": "7be9860444f6d9c205074cbdbf7d1f15", "score": "0.64646006", "text": "def show(self, req, id):\n # FIXME(nmg): should catch exception if any\n query = self.db.get_veg(id)\n LOG.info(query)\n\n if not query:\n return Fault(webob.exc.HTTPNotFound())\n\n item = {\n 'id': str(query['_id']),\n 'name': query['name'],\n 'photo': query['photo'],\n 'price': query['price'],\n 'mprice': query['mprice'],\n 'size': query['size'],\n 'origin': query['origin'],\n 'desc': query['desc']\n }\n\n return HttpResponse(item)", "title": "" }, { "docid": "51af46ab163f08d6d67313e04d7dbb11", "score": "0.64368194", "text": "def find_by_id(\n self,\n product_id: str,\n ) -> Product:\n pass", "title": "" }, { "docid": "700bf702cee983647e8ed68bfae83f32", "score": "0.639829", "text": "def getProductByID(id):\n product = session.query(ProductItem)\\\n .filter(ProductItem.product_id == id).first()\n return product", "title": "" }, { "docid": "c20ee3a68049df9c66b85e0ef462bfd4", "score": "0.6396573", "text": "def test_get(self):\n response = self.client.get(r('products:product-list'), format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "title": "" }, { "docid": "4bd64a9ae5f2156ae8c459dda8841cfc", "score": "0.6395936", "text": "def get(self):\n if not product_list:\n return {\"message\": \"no product saved\"}, 404\n return product_list", "title": "" }, { "docid": "2a199e998efbb92ab8af29cf28e2c7a8", "score": "0.637678", "text": "def fetch_products():\n products = product_controller.get_all_products()\n if products:\n return jsonify({\"Products\": products}), 200\n return jsonify({\"message\": \"No products available\"}), 404", "title": "" }, { "docid": "5f37ddb2ab6e6caa130e1927becae96f", "score": "0.6373812", "text": "def get(self, request, id, format=None):\n cat_obj = Products.objects.filter(id=id, is_active=True).all()\n product_list = []\n for each in cat_obj:\n product_list.append({each.id: each.name})\n return Response(product_list, status=status.HTTP_200_OK)", "title": "" }, { "docid": "344e290cfa669d45955b8d5e4a6be784", "score": "0.63417745", "text": "def get_a_product(self, product_id):\n\n return self.database.Product.get_by_id(product_id)", "title": "" }, { "docid": "4339f026fdc33069673871a31200ca4a", "score": "0.6329463", "text": "def get_product_details(product_id,access_token):\r\n product_url = \"https://api.kroger.com/v1/products/\" + product_id\r\n headers = {\r\n \"Accept\": \"application/json\",\r\n \"Authorization\": \"Bearer \" + access_token,\r\n }\r\n response = requests.get(product_url,headers=headers)\r\n #print(response.status_code)\r\n if(response.status_code == requests.codes.ok):\r\n return json.loads(response.text) # Return the product details as python dictionary\r\n\r\n print(\"ERROR:Could not get product detail for product_id:\"+product_id)\r\n return None", "title": "" }, { "docid": "2498f142d6532e2afa9ae1abd68c6ffd", "score": "0.6306155", "text": "def get(self):\n try:\n vend_controller = Vend_Controller()\n products = vend_controller.view_all_products()\n return { 'message': 'Success', \"data\" : products } , 200\n except Exception as e:\n return self.error_controller.handle(e)", "title": "" }, { "docid": "350d562d32073ceb8cf34070fce7d52e", "score": "0.6297312", "text": "def test_get_product_by_id(self):\n pass", "title": "" }, { "docid": "078e17c5bf92fb90461545a1b1d3cc44", "score": "0.6295105", "text": "def get(self, request, format=None):\n cat_obj = Products.objects.filter(is_active=True).all()\n product_list = []\n for each in cat_obj:\n product_list.append({each.id: each.name})\n return Response(product_list, status=status.HTTP_200_OK)", "title": "" }, { "docid": "a9acc997f3826cb78d328600174deecc", "score": "0.6254769", "text": "def test_get_product_by_id(test_app, test_database, add_product):\n test_database.session.query(Product).delete()\n product = add_product(test_product)\n client = test_app.test_client()\n response = client.get(f\"/products/{product.id}\")\n data = response.json\n assert response.status_code == 200\n for key in test_product.keys():\n assert data[key] == test_product[key]", "title": "" }, { "docid": "48be21e1cf00226d249a83aaa11103c0", "score": "0.61984587", "text": "def retrieve(self, request, pk=None):\n try:\n resource = Resource.objects.get(pk=pk)\n serializer = ResourceSerializer(resource, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "c5aa106a5897872920a36a1381945912", "score": "0.61776114", "text": "def retrieve(self,request):\n return Response({'http_method':'GET'})", "title": "" }, { "docid": "24b67ebe96af43fdcb7295eca2c383ec", "score": "0.6152506", "text": "def get(self, uri, query=None, **kwargs):\n return self.fetch('get', uri, query, **kwargs)", "title": "" }, { "docid": "68915964a24d032a0a39d6863fc5dad0", "score": "0.61452615", "text": "def products(self, request):\n user_id = int(request.query_params.get('user_id'))\n\n products = Products.objects.all_products(user_id)\n\n return Response(products)", "title": "" }, { "docid": "508720d89bd31393b7e0f30545f12af7", "score": "0.61174357", "text": "def get(self, **kwargs):\n return self._make_request('get', kwargs)", "title": "" }, { "docid": "886eb8f3c7ccf2f781f221cb28a0402b", "score": "0.61054057", "text": "def am_get_product_info():\n return get_single_product(external_facing=False)", "title": "" }, { "docid": "a5b7d52cfdc90998a5698a663e80697c", "score": "0.6091989", "text": "def get_product_detail(\n self, collection_id: str, user: Dict[str, Any] = None\n ) -> dict:\n try:\n LOGGER.info(\"%s product requested\", collection_id)\n\n product_record = None\n if settings.IS_CSW_SERVER and collection_id in settings.WHITELIST:\n product_record = self.csw_session.get_product(collection_id)\n elif settings.IS_CSW_SERVER_DC and collection_id in settings.WHITELIST_DC:\n product_record = self.csw_session_dc.get_product(collection_id)\n elif settings.IS_HDA_WEKEO and collection_id in settings.WHITELIST_WEKEO:\n product_record = self.hda_session.get_product(collection_id)\n if not product_record:\n return ServiceException(400, self._get_user_id(user),\n f\"The requested collection {collection_id} does not exist on the backend.\",\n internal=False, links=[]).to_dict()\n\n # Check user permission\n # TODO: implement better logic for checking user permissions\n # Unauthorized\n if collection_id in ('TUW_SIG0_S1') and not user:\n return ServiceException(401, self._get_user_id(user), \"This collection is not publicly accessible.\",\n internal=False, links=[]).to_dict()\n # Forbidden (does not have permissions)\n elif collection_id == 'TUW_SIG0_S1' and user \\\n and self.csw_session_dc.data_access not in user[\"profile\"][\"data_access\"]:\n return ServiceException(403, self._get_user_id(user),\n \"User is not authorized to access this collection.\", internal=False,\n links=[]).to_dict()\n\n response = CollectionSchema().dump(product_record)\n LOGGER.debug(\"response:\\n%s\", pformat(response))\n return {\"status\": \"success\", \"code\": 200, \"data\": response}\n except Exception as exp:\n return ServiceException(500, self._get_user_id(user), str(exp)).to_dict()", "title": "" }, { "docid": "d1b7531f23d3919546a9340648ecd7a0", "score": "0.6088593", "text": "def test_detail_get(self) -> None:\n client: Client = Client()\n Product.objects.create(**Config.DATABASE_EXPECTED)\n code_product: dict = Config.DATABASE_EXPECTED[\"code\"]\n response: HttpResponse = client.get(f\"/products/{code_product}/\")\n assert response.status_code == 200", "title": "" }, { "docid": "7d029d2a4363dd960a60911766825f22", "score": "0.6081455", "text": "def retrieve(self, request, pk=None):\n try:\n # `pk` is a parameter to this function, and\n # Django parses it from the URL route parameter\n # http://localhost:8000/categories/2\n #\n # The `2` at the end of the route becomes `pk`\n category = Category.objects.get(pk=pk)\n serializer = CategorySerializer(category, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)", "title": "" }, { "docid": "56a363bf1bd29c611a7df21953b51ebe", "score": "0.6070192", "text": "def get_product(product_id):\n current_time = datetime.utcnow()\n yesterday = current_time - timedelta(days=1)\n products = models.storage.get_session().query(Product).filter(Product.product_id == product_id).filter(\n Product.created_at > yesterday).all()\n if len(products) > 0:\n return products[0]\n else:\n return None", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" }, { "docid": "14e1f88bf29271be5f145fd195e69553", "score": "0.6059371", "text": "def get(self, request, *args, **kwargs):\r\n return self.retrieve(request, *args, **kwargs)", "title": "" } ]