hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0a6664a131eebc11f4bbd4774aef93f20aa62a4d | 7,261 | py | Python | game.py | akaeme/BlackJackBot | 04970107202a24059f8da933233fba7df9f3a0ef | [
"MIT"
] | null | null | null | game.py | akaeme/BlackJackBot | 04970107202a24059f8da933233fba7df9f3a0ef | [
"MIT"
] | null | null | null | game.py | akaeme/BlackJackBot | 04970107202a24059f8da933233fba7df9f3a0ef | [
"MIT"
] | null | null | null | #encoding: utf8
__author__ = 'Diogo Gomes'
__email__ = '[email protected]'
__license__ = "GPL"
__version__ = "0.1"
import copy
import card
from shoe import Shoe
from dealer import Dealer
from player import Player
BET_MULTIPLIER = 2
| 36.305 | 208 | 0.493596 |
0a66d37f0e15138eb333c83b7140c80ba5e24e15 | 284 | py | Python | loops/for/for3.py | camipozas/python-exercises | c8c02d2b9ff77f21592c99038e10434aba08dbc7 | [
"MIT"
] | null | null | null | loops/for/for3.py | camipozas/python-exercises | c8c02d2b9ff77f21592c99038e10434aba08dbc7 | [
"MIT"
] | null | null | null | loops/for/for3.py | camipozas/python-exercises | c8c02d2b9ff77f21592c99038e10434aba08dbc7 | [
"MIT"
] | null | null | null | # Escribir un programa que muestre la sumatoria de todos los mltiplos de 7 encontrados entre el 0 y el 100.
# Summing all the multiples of 7 from 0 to 100.
total = 0
for i in range(101):
if i % 7 == 0:
total = total+i
print("Sumatoria de los mltiplos de 7:", total)
| 28.4 | 110 | 0.679577 |
0a67bdcc24a12daa838689d0d299113ff13d2c1e | 7,044 | py | Python | lib/TWCManager/Status/HASSStatus.py | Saftwerk/TWCManager | 9b17c063ada80fc159db82fe6e3ad8c4ca071a1a | [
"Unlicense"
] | 1 | 2021-12-26T03:41:22.000Z | 2021-12-26T03:41:22.000Z | lib/TWCManager/Status/HASSStatus.py | Saftwerk/TWCManager | 9b17c063ada80fc159db82fe6e3ad8c4ca071a1a | [
"Unlicense"
] | null | null | null | lib/TWCManager/Status/HASSStatus.py | Saftwerk/TWCManager | 9b17c063ada80fc159db82fe6e3ad8c4ca071a1a | [
"Unlicense"
] | null | null | null | # HomeAssistant Status Output
# Publishes the provided sensor key and value pair to a HomeAssistant instance
import logging
import time
from ww import f
logger = logging.getLogger(__name__.rsplit(".")[-1])
| 33.383886 | 104 | 0.52598 |
0a682a6477a9ae21b7ff09cd8fd4db9201909c6a | 809 | py | Python | Archive/routes/home_routes.py | taycurran/TwitOff | 6e2ee13f83fa86c80988a91b3b41ed0958688c3c | [
"MIT"
] | null | null | null | Archive/routes/home_routes.py | taycurran/TwitOff | 6e2ee13f83fa86c80988a91b3b41ed0958688c3c | [
"MIT"
] | 3 | 2021-06-08T21:05:06.000Z | 2022-01-13T02:20:50.000Z | Archive/routes/home_routes.py | taycurran/TwitOff | 6e2ee13f83fa86c80988a91b3b41ed0958688c3c | [
"MIT"
] | null | null | null |
from flask import Blueprint, jsonify, request, render_template
home_routes = Blueprint("home_routes", __name__)
# # Add config for database
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
# # stop tracking modifications on sqlalchemy config
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# # ? app.config["TWITTER_API_CLIENT"] = twitter
# # Have the database know about the app
# DB.init_app(app) | 25.28125 | 64 | 0.678616 |
0a6a0fd024fe59393b29eb7bb5c4f5bdd676e60b | 8,845 | py | Python | intent/scripts/classification/ctn_to_classifier.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | 3 | 2016-08-05T01:11:57.000Z | 2017-08-26T15:35:51.000Z | intent/scripts/classification/ctn_to_classifier.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | 2 | 2016-03-01T22:41:24.000Z | 2016-09-14T18:39:25.000Z | intent/scripts/classification/ctn_to_classifier.py | rgeorgi/intent | 9920798c126f6d354029f7bb0a345e7cdb649f3a | [
"MIT"
] | null | null | null | from argparse import ArgumentParser
from collections import defaultdict
import glob
import os
import pickle
from random import shuffle, seed
import sys
from tempfile import mkdtemp
import shutil
import logging
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
CTN_LOG = logging.getLogger('CTN_CLASS')
CTN_LOG.setLevel(logging.DEBUG)
logging.basicConfig()
from intent.igt.metadata import set_intent_method, get_intent_method
from intent.interfaces.stanford_tagger import StanfordPOSTagger
from intent.pos.TagMap import TagMap
from intent.utils.env import tagger_model, proj_root
from xigt.codecs import xigtxml
from xigt.consts import ALIGNMENT
from intent.eval.pos_eval import poseval
from intent.igt.consts import GLOSS_WORD_ID, POS_TIER_TYPE, LANG_WORD_ID, GLOSS_WORD_TYPE, POS_TIER_ID, \
INTENT_TOKEN_TYPE, INTENT_POS_PROJ, LANG_WORD_TYPE, TRANS_WORD_TYPE, TRANS_WORD_ID, MANUAL_POS, INTENT_POS_CLASS
from intent.igt.rgxigt import RGCorpus, strip_pos, RGIgt, RGTokenTier, RGTier, gen_tier_id, RGToken, \
ProjectionTransGlossException, word_align
from intent.interfaces.mallet_maxent import MalletMaxent
from intent.scripts.classification.xigt_to_classifier import instances_to_classifier
from intent.utils.token import POSToken, GoldTagPOSToken
from intent.igt.igtutils import rgp
__author__ = 'rgeorgi'
"""
The purpose of this module is to evaluate the POS-line classifiers trained on
"""
def eval_classifier(c, inst_list, context_feats=False, posdict=None):
"""
:param c: The classifier
:param inst_list: A list of Igt instances to test against. Must already have POS tags.
"""
gold_sents = []
eval_sents = []
to_dump = RGCorpus()
for inst in inst_list:
to_tag = inst.copy()
strip_pos(to_tag)
# Do the classification.
to_tag.classify_gloss_pos(c, lowercase=True,
feat_next_gram=context_feats,
feat_prev_gram=context_feats,
posdict=posdict)
to_dump.append(to_tag)
# Fix the tags...
# fix_ctn_gloss_line(to_tag, tag_method=INTENT_POS_CLASS)
# Now, retrieve eval/gold.
eval_tags = [v.value() for v in to_tag.get_pos_tags(GLOSS_WORD_ID, tag_method=INTENT_POS_CLASS)]
gold_tags = [v.value() for v in inst.get_pos_tags(GLOSS_WORD_ID, tag_method=MANUAL_POS)]
tag_tokens = [POSToken('a', label=l) for l in eval_tags]
gold_tokens= [POSToken('a', label=l) for l in gold_tags]
if not len(tag_tokens) == len(gold_tokens):
print("LENGTH OF SEQUENCE IS MISMATCHED")
continue
gold_sents.append(gold_tokens)
eval_sents.append(tag_tokens)
xigtxml.dump(open('./enriched_ctn_dev.xml', 'w'), to_dump)
return poseval(eval_sents, gold_sents, details=True,csv=True, matrix=True)
def fix_ctn_gloss_line(inst, tag_method=None):
"""
Given a CTN gloss line, do some specific fixes to attempt to fix the CTN tag mapping.
:param inst:
:type inst:RGIgt
"""
gpos_tier = inst.get_pos_tags(GLOSS_WORD_ID, tag_method=tag_method)
# Get the gloss words
for gw in inst.gloss:
new_tag = None
if gw.value().lower() in ['foc','top','seq','add','emph','cit','rep']:
new_tag = 'PRT'
elif gw.value().lower() in ['but','and','or']:
new_tag = 'CONJ'
elif 'dem' in gw.value().lower():
new_tag = 'PRON'
elif gw.value().lower() in ['for','in']:
new_tag = 'ADP'
elif gw.value().lower() in ['the']:
new_tag = 'DET'
if new_tag:
gpos = gpos_tier.find(alignment=gw.id)
if not gpos:
gpt = RGToken(id=gpos_tier.askItemId(), alignment=gw.id, text=new_tag)
gpos_tier.add(gpt)
else:
gpos.text = new_tag
if __name__ == '__main__':
ctn_train = './data/xml-files/ctn/ctn_train.xml'
ctn_dev = './data/xml-files/ctn/ctn_dev.xml'
ctn_dev_processed = './data/xml-files/ctn/ctn_dev_processed.xml'
ctn_train_processed = './data/xml-files/ctn/ctn_train_processed.xml'
posdict = pickle.load(open('./data/dictionaries/CTN.dict', 'rb'))
# print("Loading CTN Dev Corpus...", end=" ", flush=True)
# dev_xc = RGCorpus.load(ctn_dev)
# print("Done.")
#
# print("Loading CTN Train corpus...", end=" ", flush=True)
# train_xc = RGCorpus.load(ctn_train)
# print("Done.")
print("Initializing tagger...", end=" ", flush=True)
tagger = StanfordPOSTagger(tagger_model)
print("Done.")
# =============================================================================
# 1) Start by projecting the language line to the gloss line in the dev set,
# remapping it from the CTN tagset to the universal tagset along the way.
# =============================================================================
#
# print("Processing DEV corpus...", end=' ', flush=True)
# for inst in dev_xc:
# word_align(inst.gloss, inst.lang)
# inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt')
# fix_ctn_gloss_line(inst, tag_method=MANUAL_POS)
# inst.tag_trans_pos(tagger)
# inst.heur_align() # Align trans/gloss lines heuristically
# inst.project_trans_to_gloss() # Now, project heuristically.
# print('done.')
#
# xigtxml.dump(open(ctn_dev_processed, 'w', encoding='utf-8'), dev_xc)
#
#
# print("Processing TRAIN Corpus...", end=' ', flush=True)
# # Get the language line words projected onto the gloss...
# for inst in train_xc:
# word_align(inst.gloss, inst.lang)
# inst.project_lang_to_gloss(tagmap = './data/tagset_mappings/ctn.txt')
# inst.tag_trans_pos(tagger)
# inst.heur_align()
# inst.project_trans_to_gloss()
# fix_ctn_gloss_line(inst, tag_method=INTENT_POS_PROJ)
#
# print("Done.")
#
# xigtxml.dump(open(ctn_train_processed, 'w', encoding='utf-8'), train_xc)
# sys.exit()
print("Loading Processed CTN Train corpus...", end=" ", flush=True)
train_xc = RGCorpus.load(ctn_train_processed)
print("Done.")
print("Loading Processed CTN Dev corpus...", end=" ", flush=True)
dev_xc = RGCorpus.load(ctn_dev_processed)
print("Done.")
#
# # =============================================================================
# # 2) Train a classifier based on the projected gloss line.
# # =============================================================================
#
index_list = [35,70,106,141,284,569,854,1139,1424,1708,1993,7120]
for train_stop_index in index_list:
train_instances = list(train_xc)[0:train_stop_index]
print('* '*50)
tokens = 0
for inst in train_instances:
tokens += len(inst.gloss)
print("Now training with {} tokens, {} instances.".format(tokens, train_stop_index))
print("Training Classifier...", end=" ", flush=True)
c = instances_to_classifier(train_instances, './ctn-train.class',
tag_method=MANUAL_POS,
posdict=posdict,
context_feats=True,
feat_path='./ctn-train_feats.txt')
print("Done.")
# c = MalletMaxent('/Users/rgeorgi/Documents/code/dissertation/gc.classifier')
# c = MalletMaxent('./ctn_class.class.classifier')
print("Evaluating classifier...", end=" ", flush=True)
eval_classifier(c, dev_xc, posdict=posdict, context_feats=True)
print("Done.")
# eval_proj(dev_xc)
| 33.25188 | 116 | 0.614019 |
0a6abcccf806b40379eafeffa1d5d6385d6c8a7c | 1,358 | py | Python | watchdog/back-end/v0.3.0/watchdog/app/resource/video.py | Havana3351/Low-cost-remote-monitor | 9f86a62b8515c0f9fddda31f25548680f0ad8e2f | [
"MIT"
] | 18 | 2021-12-03T13:18:07.000Z | 2022-03-30T20:20:17.000Z | watchdog/back-end/v1.0.0/watchdogV1-3/app/resource/video.py | Fairywyt/Low-cost-remote-monitor | 263b98d969251d2dbef5fb5e4d42a58075e744fa | [
"MIT"
] | null | null | null | watchdog/back-end/v1.0.0/watchdogV1-3/app/resource/video.py | Fairywyt/Low-cost-remote-monitor | 263b98d969251d2dbef5fb5e4d42a58075e744fa | [
"MIT"
] | 4 | 2022-03-22T09:58:00.000Z | 2022-03-28T08:57:17.000Z | from flask_restful import Resource
from flask import Response
import os
import cv2
picturecounter = 1 # | 26.115385 | 111 | 0.563328 |
0a6b4ad6f031ba8193614f726faf3a710def3c48 | 22,385 | py | Python | codes/ambfix.py | valgur/LEOGPS | f289f279ef55980a0e3fd82b3b3686e41c474a2e | [
"MIT"
] | null | null | null | codes/ambfix.py | valgur/LEOGPS | f289f279ef55980a0e3fd82b3b3686e41c474a2e | [
"MIT"
] | null | null | null | codes/ambfix.py | valgur/LEOGPS | f289f279ef55980a0e3fd82b3b3686e41c474a2e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
###############################################################################
###############################################################################
## ##
## _ ___ ___ ___ ___ ___ ##
## | | | __ / \ / __| _ | __| ##
## | |__| __ ( ) | (_ | _|__ \ ##
## |____|___ \___/ \___|_| \___/ ##
## v 1.0 (Stable) ##
## ##
## FILE DESCRIPTION: ##
## ##
## This is the classical LAMBDA method that was originally authored by ##
## Teunissen, Jonge, and Tiberius (1993). The code was later written in ##
## MATLAB by Dr Sandra Verhagen and Dr Bofeng Li. It takes in a vector of ##
## float ambiguities to the integer least-squares problem, and covariance ##
## of the float ambiguities. It then runs the LAMBDA's ILS search-&-shrink ##
## and spits out the ambiguity integers. The other 5 methods in original ##
## LAMBDA MATLAB code are not supported here (feel free to edit the code ##
## and implement it youself!). The default ncands = 2, as per original code. ##
## All support functions from the original MATLAB code (decorrel, ldldecom) ##
## have been nested within the main function as sub functions. ##
## ##
## INPUTS: ##
## ##
## - ahat : numpy array of float ambiguities ##
## - Qahat : numpy covariance matrix for float ambiguities ##
## - ncands : number of candidates (optional parameter, default = 2) ##
## ##
## OUTPUT: ##
## ##
## - afixed : Array of size (n x ncands) with the estimated integer ##
## candidates, sorted according to the corresponding squared ##
## norms, best candidate first. ##
## - sqnorm : Distance between integer candidate and float ambiguity ##
## vectors in the metric of the variance-covariance matrix. ##
## ##
## REMARKS: ##
## ##
## Besides above changes, mostly syntax changes to this Python version: ##
## - Everything is identical EXCEPT MATLAB is ones-based indexing. ##
## - Python is zeros-based indexing, and range function does not ##
## include the upper limit index. Thus, only indices have changed. ##
## - Example in MATLAB: for i = 1:5 => {1,2,3,4,5} ##
## - Equivalently in Python: for i in range(0,5) => {0,1,2,3,4} ##
## - Indices are thus updated accordingly. ##
## ##
## DEVELOPER: Professor Peter Teunissen (TU Delft) ##
## ORIGINAL AUTHOR: Sandra Verhagen and Bofeng Li (TU Delft) ##
## AUTHOR MODIFIED: 26-07-2019, by Samuel Y.W. Low, with permissions. ##
## ##
###############################################################################
###############################################################################
'''
import numpy as np
def LAMBDA( ahat, Qahat, ncands = 2 ):
###########################################################################
###########################################################################
# [afixed, sqnorm] = LAMBDA( ahat, Qahat, ncands )
#
# This is the main routine of the LAMBDA software package. By default the
# ILS method will be used for integer estimation based on the provided
# float ambiguity vector ahat and associated variance-covariance matrix
# Qahat. In this Pythonic version (modified by Samuel Low, 2019), only
# the ILS method is implemented. For other techniques: integer rounding,
# bootstrapping or Partial Ambiguity Resolution (PAR), the user is free
# to modify this code and adapt it to their own needs.
#
# NOTE 1: LAMBDA always first applies a decorrelation before the integer
# estimation (for ILS this is required to guarantee an efficient search,
# for rounding and bootstrapping it is required in order to get higher
# success rates).
#
# INPUTS:
#
# ahat: Float ambiguities (must be a column!)
# Qahat: Variance/covariance matrix of ambiguities
# ncands: number of candidates (optional parameter, default = 2)
#
# OUTPUTS:
#
# afixed: Array of size (n x ncands) with the estimated integer
# candidates, sorted according to the corresponding squared
# norms, best candidate first.
# sqnorm: Distance between integer candidate and float ambiguity vectors
# in the metric of the variance-covariance matrix Qahat.
# Only available for ILS.
#
# -------------------------------------------------------------------------
# Release date : 1-SEPT-2012
# Authors : Bofeng LI and Sandra VERHAGEN
#
# GNSS Research Centre, Curtin University
# Mathematical Geodesy and Positioning, Delft University of Technology
# -------------------------------------------------------------------------
#
# REFERENCES:
# 1. LAMBDA Software Package: Matlab implementation, Version 3.0.
# Documentation provided with this software package.
# 2. Teunissen P (1993) Least-squares estimation of the integer GPS
# ambiguities. In: Invited lecture, section IV theory and methodology,
# IAG General Meeting, Beijing, China
# 3. Teunissen P (1995) The least-squares ambiguity decorrelation
# adjustment: a method for fast GPS ambiguity estitmation. J Geod
# 70:651-7
# 4. De Jonge P, Tiberius C (1996) The LAMBDA method of intger ambiguity
# estimation:implementation aspects.
# 5. Chang X ,Yang X, Zhou T (2005) MLAMBDA: a modified LAMBDA method for
# integer least-squares estimation
###########################################################################
###########################################################################
''' A function for obtaining the decimals only from float arrays '''
###########################################################################
###########################################################################
''' A function to perform LtDL decomposition of the covariance matrix '''
###########################################################################
###########################################################################
''' Decorrelation function for LAMBDA '''
###########################################################################
###########################################################################
###########################################################################
###########################################################################
''' Initialisation and some initial sanity checks... '''
# Initialise all output variables
sqnorm = np.array([])
# Test inputs: Is the Q-matrix symmetric?
if np.array_equal(Qahat,Qahat.transpose()) == False:
print('Variance-covariance matrix is not symmetric!')
return None
# Test inputs: Is the Q-matrix positive-definite?
if np.sum(np.linalg.eig(Qahat)[0] > 0.0) != len(Qahat):
print('Variance-covariance matrix is not positive definite!')
return None
# Test inputs: Does Q-matrix and amb vector have identical dimensions?
if len(ahat) != len(Qahat):
print('Variance-covariance matrix and vector of ambiguities...')
print('... do not have identical dimensions!')
return None
###########################################################################
###########################################################################
''' Begin least-squares ambiguity decorrelation adjustment! '''
# Remove integer numbers from float solution, so that all values are
# between -1 and 1 (for computational convenience only)
ahat, incr = floatrem( ahat )
# Compute Z matrix based on the decomposition Q=L^T*D*L;
Qzhat, Z, L, D, zhat, iZt = decorrel( ahat, Qahat )
# Integer ambiguity vector search via search-and-shrink
zfixedff, sqnormff = ssearch( zhat, L, D, ncands )
# Perform the back-transformation and add the increments
afixed = np.matmul(iZt,zfixedff)
repmat = np.repeat(np.array([incr]),ncands,axis=0)
repmat = repmat.transpose()
afixed = afixed + repmat
afixed = afixed.transpose()
###########################################################################
###########################################################################
''' Returns best amb-fix, second best amb-fix, and the square norm '''
return afixed, sqnorm
###########################################################################
###########################################################################
| 42.638095 | 104 | 0.3979 |
0a6bdfe36df3fc3c2674d86fa755f854cc5eacf6 | 133 | py | Python | summarizer/test_summarizer.py | bmcilw1/text-summary | f594fd4f41279a6e11262ac859cfbdad6aaf1703 | [
"MIT"
] | null | null | null | summarizer/test_summarizer.py | bmcilw1/text-summary | f594fd4f41279a6e11262ac859cfbdad6aaf1703 | [
"MIT"
] | null | null | null | summarizer/test_summarizer.py | bmcilw1/text-summary | f594fd4f41279a6e11262ac859cfbdad6aaf1703 | [
"MIT"
] | null | null | null | from summarizer.summarizer import summarize
| 26.6 | 56 | 0.796992 |
0a6d2f3733dce67a2fafd219a662c5c458e102f9 | 1,774 | py | Python | XORCipher/XOREncrypt.py | KarthikGandrala/DataEncryption | 6ed4dffead345bc9f7010ac2ea9afbff958c85af | [
"MIT"
] | 1 | 2021-07-12T06:05:45.000Z | 2021-07-12T06:05:45.000Z | XORCipher/XOREncrypt.py | KarthikGandrala/Encrypt-Your-Data | 6ed4dffead345bc9f7010ac2ea9afbff958c85af | [
"MIT"
] | null | null | null | XORCipher/XOREncrypt.py | KarthikGandrala/Encrypt-Your-Data | 6ed4dffead345bc9f7010ac2ea9afbff958c85af | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Function to encrypt message using key is defined
| 23.653333 | 79 | 0.558061 |
0a6e68db8c94071ad8a29d0149ef1ef93e54c4c1 | 634 | py | Python | 02-Use-functions/21-Opening_a_file/secret_message.py | francisrod01/udacity_python_foundations | 2a384cf35ce7eff547c88097cdc45cc4e8fc6041 | [
"MIT"
] | null | null | null | 02-Use-functions/21-Opening_a_file/secret_message.py | francisrod01/udacity_python_foundations | 2a384cf35ce7eff547c88097cdc45cc4e8fc6041 | [
"MIT"
] | null | null | null | 02-Use-functions/21-Opening_a_file/secret_message.py | francisrod01/udacity_python_foundations | 2a384cf35ce7eff547c88097cdc45cc4e8fc6041 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
import random
print("# Python program - Adding random numbers to beginning of filename.")
rename_files("./prank")
| 25.36 | 83 | 0.679811 |
0a6eef44c90456b4e29cb5273e1126093472758f | 101,780 | py | Python | xarray/core/variable.py | timgates42/xarray | bf0fe2caca1d2ebc4f1298f019758baa12f68b94 | [
"Apache-2.0"
] | null | null | null | xarray/core/variable.py | timgates42/xarray | bf0fe2caca1d2ebc4f1298f019758baa12f68b94 | [
"Apache-2.0"
] | null | null | null | xarray/core/variable.py | timgates42/xarray | bf0fe2caca1d2ebc4f1298f019758baa12f68b94 | [
"Apache-2.0"
] | 1 | 2021-07-13T07:06:10.000Z | 2021-07-13T07:06:10.000Z | import copy
import functools
import itertools
import numbers
import warnings
from collections import defaultdict
from datetime import timedelta
from distutils.version import LooseVersion
from typing import (
Any,
Dict,
Hashable,
Mapping,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
import numpy as np
import pandas as pd
import xarray as xr # only for Dataset and DataArray
from . import arithmetic, common, dtypes, duck_array_ops, indexing, nputils, ops, utils
from .indexing import (
BasicIndexer,
OuterIndexer,
PandasIndexAdapter,
VectorizedIndexer,
as_indexable,
)
from .npcompat import IS_NEP18_ACTIVE
from .options import _get_keep_attrs
from .pycompat import (
cupy_array_type,
dask_array_type,
integer_types,
is_duck_dask_array,
)
from .utils import (
OrderedSet,
_default,
decode_numpy_dict_values,
drop_dims_from_indexers,
either_dict_or_kwargs,
ensure_us_time_resolution,
infix_dims,
is_duck_array,
)
NON_NUMPY_SUPPORTED_ARRAY_TYPES = (
(
indexing.ExplicitlyIndexed,
pd.Index,
)
+ dask_array_type
+ cupy_array_type
)
# https://github.com/python/mypy/issues/224
BASIC_INDEXING_TYPES = integer_types + (slice,) # type: ignore
VariableType = TypeVar("VariableType", bound="Variable")
"""Type annotation to be used when methods of Variable return self or a copy of self.
When called from an instance of a subclass, e.g. IndexVariable, mypy identifies the
output as an instance of the subclass.
Usage::
class Variable:
def f(self: VariableType, ...) -> VariableType:
...
"""
def as_variable(obj, name=None) -> "Union[Variable, IndexVariable]":
"""Convert an object into a Variable.
Parameters
----------
obj : object
Object to convert into a Variable.
- If the object is already a Variable, return a shallow copy.
- Otherwise, if the object has 'dims' and 'data' attributes, convert
it into a new Variable.
- If all else fails, attempt to convert the object into a Variable by
unpacking it into the arguments for creating a new Variable.
name : str, optional
If provided:
- `obj` can be a 1D array, which is assumed to label coordinate values
along a dimension of this given name.
- Variables with name matching one of their dimensions are converted
into `IndexVariable` objects.
Returns
-------
var : Variable
The newly created variable.
"""
from .dataarray import DataArray
# TODO: consider extending this method to automatically handle Iris and
if isinstance(obj, DataArray):
# extract the primary Variable from DataArrays
obj = obj.variable
if isinstance(obj, Variable):
obj = obj.copy(deep=False)
elif isinstance(obj, tuple):
try:
obj = Variable(*obj)
except (TypeError, ValueError) as error:
# use .format() instead of % because it handles tuples consistently
raise error.__class__(
"Could not convert tuple of form "
"(dims, data[, attrs, encoding]): "
"{} to Variable.".format(obj)
)
elif utils.is_scalar(obj):
obj = Variable([], obj)
elif isinstance(obj, (pd.Index, IndexVariable)) and obj.name is not None:
obj = Variable(obj.name, obj)
elif isinstance(obj, (set, dict)):
raise TypeError("variable {!r} has invalid type {!r}".format(name, type(obj)))
elif name is not None:
data = as_compatible_data(obj)
if data.ndim != 1:
raise MissingDimensionsError(
"cannot set variable %r with %r-dimensional data "
"without explicit dimension names. Pass a tuple of "
"(dims, data) instead." % (name, data.ndim)
)
obj = Variable(name, data, fastpath=True)
else:
raise TypeError(
"unable to convert object into a variable without an "
"explicit list of dimensions: %r" % obj
)
if name is not None and name in obj.dims:
# convert the Variable into an Index
if obj.ndim != 1:
raise MissingDimensionsError(
"%r has more than 1-dimension and the same name as one of its "
"dimensions %r. xarray disallows such variables because they "
"conflict with the coordinates used to label "
"dimensions." % (name, obj.dims)
)
obj = obj.to_index_variable()
return obj
def _maybe_wrap_data(data):
"""
Put pandas.Index and numpy.ndarray arguments in adapter objects to ensure
they can be indexed properly.
NumpyArrayAdapter, PandasIndexAdapter and LazilyOuterIndexedArray should
all pass through unmodified.
"""
if isinstance(data, pd.Index):
return PandasIndexAdapter(data)
return data
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64, according to the pandas convention. Also used for
validating that datetime64 and timedelta64 objects are within the valid date
range for ns precision, as pandas will raise an error if they are not.
"""
return np.asarray(pd.Series(values.ravel())).reshape(values.shape)
def as_compatible_data(data, fastpath=False):
"""Prepare and wrap data to put in a Variable.
- If data does not have the necessary attributes, convert it to ndarray.
- If data has dtype=datetime64, ensure that it has ns precision. If it's a
pandas.Timestamp, convert it to datetime64.
- If data is already a pandas or xarray object (other than an Index), just
use the values.
Finally, wrap it up with an adapter if necessary.
"""
if fastpath and getattr(data, "ndim", 0) > 0:
# can't use fastpath (yet) for scalars
return _maybe_wrap_data(data)
if isinstance(data, Variable):
return data.data
if isinstance(data, NON_NUMPY_SUPPORTED_ARRAY_TYPES):
return _maybe_wrap_data(data)
if isinstance(data, tuple):
data = utils.to_0d_object_array(data)
if isinstance(data, pd.Timestamp):
# TODO: convert, handle datetime objects, too
data = np.datetime64(data.value, "ns")
if isinstance(data, timedelta):
data = np.timedelta64(getattr(data, "value", data), "ns")
# we don't want nested self-described arrays
data = getattr(data, "values", data)
if isinstance(data, np.ma.MaskedArray):
mask = np.ma.getmaskarray(data)
if mask.any():
dtype, fill_value = dtypes.maybe_promote(data.dtype)
data = np.asarray(data, dtype=dtype)
data[mask] = fill_value
else:
data = np.asarray(data)
if not isinstance(data, np.ndarray):
if hasattr(data, "__array_function__"):
if IS_NEP18_ACTIVE:
return data
else:
raise TypeError(
"Got an NumPy-like array type providing the "
"__array_function__ protocol but NEP18 is not enabled. "
"Check that numpy >= v1.16 and that the environment "
'variable "NUMPY_EXPERIMENTAL_ARRAY_FUNCTION" is set to '
'"1"'
)
# validate whether the data is valid data types.
data = np.asarray(data)
if isinstance(data, np.ndarray):
if data.dtype.kind == "O":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "M":
data = _possibly_convert_objects(data)
elif data.dtype.kind == "m":
data = _possibly_convert_objects(data)
return _maybe_wrap_data(data)
def _as_array_or_item(data):
"""Return the given values as a numpy array, or as an individual item if
it's a 0d datetime64 or timedelta64 array.
Importantly, this function does not copy data if it is already an ndarray -
otherwise, it will not be possible to update Variable values in place.
This function mostly exists because 0-dimensional ndarrays with
dtype=datetime64 are broken :(
https://github.com/numpy/numpy/issues/4337
https://github.com/numpy/numpy/issues/7619
TODO: remove this (replace with np.asarray) once these issues are fixed
"""
if isinstance(data, cupy_array_type):
data = data.get()
else:
data = np.asarray(data)
if data.ndim == 0:
if data.dtype.kind == "M":
data = np.datetime64(data, "ns")
elif data.dtype.kind == "m":
data = np.timedelta64(data, "ns")
return data
def load(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return this variable.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
if is_duck_dask_array(self._data):
self._data = as_compatible_data(self._data.compute(**kwargs))
elif not is_duck_array(self._data):
self._data = np.asarray(self._data)
return self
def compute(self, **kwargs):
"""Manually trigger loading of this variable's data from disk or a
remote source into memory and return a new variable. The original is
left unaltered.
Normally, it should not be necessary to call this method in user code,
because all xarray functions should either work on deferred data or
load data automatically.
Parameters
----------
**kwargs : dict
Additional keyword arguments passed on to ``dask.array.compute``.
See Also
--------
dask.array.compute
"""
new = self.copy(deep=False)
return new.load(**kwargs)
def __dask_postcompute__(self):
array_func, array_args = self._data.__dask_postcompute__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
def __dask_postpersist__(self):
array_func, array_args = self._data.__dask_postpersist__()
return (
self._dask_finalize,
(array_func, array_args, self._dims, self._attrs, self._encoding),
)
def to_base_variable(self):
"""Return this variable as a base xarray.Variable"""
return Variable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_variable = utils.alias(to_base_variable, "to_variable")
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return IndexVariable(
self.dims, self._data, self._attrs, encoding=self._encoding, fastpath=True
)
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
return self.to_index_variable().to_index()
def to_dict(self, data=True):
"""Dictionary representation of variable."""
item = {"dims": self.dims, "attrs": decode_numpy_dict_values(self.attrs)}
if data:
item["data"] = ensure_us_time_resolution(self.values).tolist()
else:
item.update({"dtype": str(self.dtype), "shape": self.shape})
return item
def _parse_dimensions(self, dims):
if isinstance(dims, str):
dims = (dims,)
dims = tuple(dims)
if len(dims) != self.ndim:
raise ValueError(
"dimensions %s must have the same length as the "
"number of data dimensions, ndim=%s" % (dims, self.ndim)
)
return dims
def _item_key_to_tuple(self, key):
if utils.is_dict_like(key):
return tuple(key.get(dim, slice(None)) for dim in self.dims)
else:
return key
def _broadcast_indexes(self, key):
"""Prepare an indexing key for an indexing operation.
Parameters
-----------
key: int, slice, array-like, dict or tuple of integer, slice and array-like
Any valid input for indexing.
Returns
-------
dims : tuple
Dimension of the resultant variable.
indexers : IndexingTuple subclass
Tuple of integer, array-like, or slices to use when indexing
self._data. The type of this argument indicates the type of
indexing to perform, either basic, outer or vectorized.
new_order : Optional[Sequence[int]]
Optional reordering to do on the result of indexing. If not None,
the first len(new_order) indexing should be moved to these
positions.
"""
key = self._item_key_to_tuple(key) # key is a tuple
# key is a tuple of full size
key = indexing.expanded_indexer(key, self.ndim)
# Convert a scalar Variable to an integer
key = tuple(
k.data.item() if isinstance(k, Variable) and k.ndim == 0 else k for k in key
)
# Convert a 0d-array to an integer
key = tuple(
k.item() if isinstance(k, np.ndarray) and k.ndim == 0 else k for k in key
)
if all(isinstance(k, BASIC_INDEXING_TYPES) for k in key):
return self._broadcast_indexes_basic(key)
self._validate_indexers(key)
# Detect it can be mapped as an outer indexer
# If all key is unlabeled, or
# key can be mapped as an OuterIndexer.
if all(not isinstance(k, Variable) for k in key):
return self._broadcast_indexes_outer(key)
# If all key is 1-dimensional and there are no duplicate labels,
# key can be mapped as an OuterIndexer.
dims = []
for k, d in zip(key, self.dims):
if isinstance(k, Variable):
if len(k.dims) > 1:
return self._broadcast_indexes_vectorized(key)
dims.append(k.dims[0])
elif not isinstance(k, integer_types):
dims.append(d)
if len(set(dims)) == len(dims):
return self._broadcast_indexes_outer(key)
return self._broadcast_indexes_vectorized(key)
def _broadcast_indexes_basic(self, key):
dims = tuple(
dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types)
)
return dims, BasicIndexer(key), None
def _validate_indexers(self, key):
""" Make sanity checks """
for dim, k in zip(self.dims, key):
if isinstance(k, BASIC_INDEXING_TYPES):
pass
else:
if not isinstance(k, Variable):
k = np.asarray(k)
if k.ndim > 1:
raise IndexError(
"Unlabeled multi-dimensional array cannot be "
"used for indexing: {}".format(k)
)
if k.dtype.kind == "b":
if self.shape[self.get_axis_num(dim)] != len(k):
raise IndexError(
"Boolean array size {:d} is used to index array "
"with shape {:s}.".format(len(k), str(self.shape))
)
if k.ndim > 1:
raise IndexError(
"{}-dimensional boolean indexing is "
"not supported. ".format(k.ndim)
)
if getattr(k, "dims", (dim,)) != (dim,):
raise IndexError(
"Boolean indexer should be unlabeled or on the "
"same dimension to the indexed array. Indexer is "
"on {:s} but the target dimension is {:s}.".format(
str(k.dims), dim
)
)
def _nonzero(self):
""" Equivalent numpy's nonzero but returns a tuple of Varibles. """
# TODO we should replace dask's native nonzero
# after https://github.com/dask/dask/issues/1076 is implemented.
nonzeros = np.nonzero(self.data)
return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
def __getitem__(self: VariableType, key) -> VariableType:
"""Return a new Variable object whose contents are consistent with
getting the provided key from the underlying data.
NB. __getitem__ and __setitem__ implement xarray-style indexing,
where if keys are unlabeled arrays, we index the array orthogonally
with them. If keys are labeled array (such as Variables), they are
broadcasted with our usual scheme and then the array is indexed with
the broadcasted key, like numpy's fancy indexing.
If you really want to do indexing like `x[x > 0]`, manipulate the numpy
array `x.values` directly.
"""
dims, indexer, new_order = self._broadcast_indexes(key)
data = as_indexable(self._data)[indexer]
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def _finalize_indexing_result(self: VariableType, dims, data) -> VariableType:
"""Used by IndexVariable to return IndexVariable objects when possible."""
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def _getitem_with_mask(self, key, fill_value=dtypes.NA):
"""Index this Variable with -1 remapped to fill_value."""
# TODO(shoyer): expose this method in public API somewhere (isel?) and
# use it for reindex.
# TODO(shoyer): add a sanity check that all other integers are
# non-negative
# TODO(shoyer): add an optimization, remapping -1 to an adjacent value
# that is actually indexed rather than mapping it to the last value
# along each axis.
if fill_value is dtypes.NA:
fill_value = dtypes.get_fill_value(self.dtype)
dims, indexer, new_order = self._broadcast_indexes(key)
if self.size:
if is_duck_dask_array(self._data):
# dask's indexing is faster this way; also vindex does not
# support negative indices yet:
# https://github.com/dask/dask/pull/2967
actual_indexer = indexing.posify_mask_indexer(indexer)
else:
actual_indexer = indexer
data = as_indexable(self._data)[actual_indexer]
mask = indexing.create_mask(indexer, self.shape, data)
# we need to invert the mask in order to pass data first. This helps
# pint to choose the correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
data = duck_array_ops.where(np.logical_not(mask), data, fill_value)
else:
# array cannot be indexed along dimensions of size 0, so just
# build the mask directly instead.
mask = indexing.create_mask(indexer, self.shape)
data = np.broadcast_to(fill_value, getattr(mask, "shape", ()))
if new_order:
data = duck_array_ops.moveaxis(data, range(len(new_order)), new_order)
return self._finalize_indexing_result(dims, data)
def __setitem__(self, key, value):
"""__setitem__ is overloaded to access the underlying numpy values with
orthogonal indexing.
See __getitem__ for more details.
"""
dims, index_tuple, new_order = self._broadcast_indexes(key)
if not isinstance(value, Variable):
value = as_compatible_data(value)
if value.ndim > len(dims):
raise ValueError(
"shape mismatch: value array of shape %s could not be "
"broadcast to indexing result with %s dimensions"
% (value.shape, len(dims))
)
if value.ndim == 0:
value = Variable((), value)
else:
value = Variable(dims[-value.ndim :], value)
# broadcast to become assignable
value = value.set_dims(dims).data
if new_order:
value = duck_array_ops.asarray(value)
value = value[(len(dims) - value.ndim) * (np.newaxis,) + (Ellipsis,)]
value = duck_array_ops.moveaxis(value, new_order, range(len(new_order)))
indexable = as_indexable(self._data)
indexable[index_tuple] = value
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
If `deep=True`, the data array is loaded into memory and copied onto
the new object. Dimensions, attributes and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Whether the data array is loaded into memory and copied onto
the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
When `data` is used, `deep` is ignored.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
Examples
--------
Shallow copy versus deep copy
>>> var = xr.Variable(data=[1, 2, 3], dims="x")
>>> var.copy()
<xarray.Variable (x: 3)>
array([1, 2, 3])
>>> var_0 = var.copy(deep=False)
>>> var_0[0] = 7
>>> var_0
<xarray.Variable (x: 3)>
array([7, 2, 3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
Changing the data using the ``data`` argument maintains the
structure of the original object, but with the new data. Original
object is unaffected.
>>> var.copy(data=[0.1, 0.2, 0.3])
<xarray.Variable (x: 3)>
array([0.1, 0.2, 0.3])
>>> var
<xarray.Variable (x: 3)>
array([7, 2, 3])
See Also
--------
pandas.DataFrame.copy
"""
if data is None:
data = self._data
if isinstance(data, indexing.MemoryCachedArray):
# don't share caching between copies
data = indexing.MemoryCachedArray(data.array)
if deep:
data = copy.deepcopy(data)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
# note:
# dims is already an immutable tuple
# attributes and encoding will be copied when the new Array is created
return self._replace(data=data)
def _replace(
self, dims=_default, data=_default, attrs=_default, encoding=_default
) -> "Variable":
if dims is _default:
dims = copy.copy(self._dims)
if data is _default:
data = copy.copy(self.data)
if attrs is _default:
attrs = copy.copy(self._attrs)
if encoding is _default:
encoding = copy.copy(self._encoding)
return type(self)(dims, data, attrs, encoding, fastpath=True)
def __copy__(self):
return self.copy(deep=False)
def __deepcopy__(self, memo=None):
# memo does nothing but is required for compatibility with
# copy.deepcopy
return self.copy(deep=True)
# mutable objects should not be hashable
# https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
_array_counter = itertools.count()
def chunk(self, chunks={}, name=None, lock=False):
"""Coerce this array's data into a dask arrays with the given chunks.
If this variable is a non-dask array, it will be converted to dask
array. If it's a dask array, it will be rechunked to the given chunk
sizes.
If neither chunks is not provided for one or more dimensions, chunk
sizes along that dimension will not be updated; non-dask arrays will be
converted into dask arrays with a single block.
Parameters
----------
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``.
name : str, optional
Used to generate the name for this array in the internal dask
graph. Does not need not be unique.
lock : optional
Passed on to :py:func:`dask.array.from_array`, if the array is not
already as dask array.
Returns
-------
chunked : xarray.Variable
"""
import dask
import dask.array as da
if chunks is None:
warnings.warn(
"None value for 'chunks' is deprecated. "
"It will raise an error in the future. Use instead '{}'",
category=FutureWarning,
)
chunks = {}
if utils.is_dict_like(chunks):
chunks = {self.get_axis_num(dim): chunk for dim, chunk in chunks.items()}
data = self._data
if is_duck_dask_array(data):
data = data.rechunk(chunks)
else:
if isinstance(data, indexing.ExplicitlyIndexed):
# Unambiguously handle array storage backends (like NetCDF4 and h5py)
# that can't handle general array indexing. For example, in netCDF4 you
# can do "outer" indexing along two dimensions independent, which works
# differently from how NumPy handles it.
# da.from_array works by using lazy indexing with a tuple of slices.
# Using OuterIndexer is a pragmatic choice: dask does not yet handle
# different indexing types in an explicit way:
# https://github.com/dask/dask/issues/2883
data = indexing.ImplicitToExplicitIndexingAdapter(
data, indexing.OuterIndexer
)
if LooseVersion(dask.__version__) < "2.0.0":
kwargs = {}
else:
# All of our lazily loaded backend array classes should use NumPy
# array operations.
kwargs = {"meta": np.ndarray}
else:
kwargs = {}
if utils.is_dict_like(chunks):
chunks = tuple(chunks.get(n, s) for n, s in enumerate(self.shape))
data = da.from_array(data, chunks, name=name, lock=lock, **kwargs)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def _as_sparse(self, sparse_format=_default, fill_value=dtypes.NA):
"""
use sparse-array as backend.
"""
import sparse
# TODO: what to do if dask-backended?
if fill_value is dtypes.NA:
dtype, fill_value = dtypes.maybe_promote(self.dtype)
else:
dtype = dtypes.result_type(self.dtype, fill_value)
if sparse_format is _default:
sparse_format = "coo"
try:
as_sparse = getattr(sparse, f"as_{sparse_format.lower()}")
except AttributeError:
raise ValueError(f"{sparse_format} is not a valid sparse format")
data = as_sparse(self.data.astype(dtype), fill_value=fill_value)
return self._replace(data=data)
def _to_dense(self):
"""
Change backend from sparse to np.array
"""
if hasattr(self._data, "todense"):
return self._replace(data=self._data.todense())
return self.copy(deep=False)
def isel(
self: VariableType,
indexers: Mapping[Hashable, Any] = None,
missing_dims: str = "raise",
**indexers_kwargs: Any,
) -> VariableType:
"""Return a new array indexed along the specified dimension(s).
Parameters
----------
**indexers : {dim: indexer, ...}
Keyword arguments with names matching dimensions and values given
by integers, slice objects or arrays.
missing_dims : {"raise", "warn", "ignore"}, default: "raise"
What to do if dimensions that should be selected from are not present in the
DataArray:
- "raise": raise an exception
- "warning": raise a warning, and ignore the missing dimensions
- "ignore": ignore the missing dimensions
Returns
-------
obj : Array object
A new Array with the selected data and dimensions. In general,
the new variable's data will be a view of this variable's data,
unless numpy fancy indexing was triggered by using an array
indexer, in which case the data will be a copy.
"""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "isel")
indexers = drop_dims_from_indexers(indexers, self.dims, missing_dims)
key = tuple(indexers.get(dim, slice(None)) for dim in self.dims)
return self[key]
def squeeze(self, dim=None):
"""Return a new object with squeezed data.
Parameters
----------
dim : None or str or tuple of str, optional
Selects a subset of the length one dimensions. If a dimension is
selected with length greater than one, an error is raised. If
None, all length one dimensions are squeezed.
Returns
-------
squeezed : same type as caller
This object, but with with all or a subset of the dimensions of
length 1 removed.
See Also
--------
numpy.squeeze
"""
dims = common.get_squeeze_dims(self, dim)
return self.isel({d: 0 for d in dims})
def shift(self, shifts=None, fill_value=dtypes.NA, **shifts_kwargs):
"""
Return a new Variable with shifted data.
Parameters
----------
shifts : mapping of the form {dim: offset}
Integer offset to shift along each of the given dimensions.
Positive offsets shift to the right; negative offsets shift to the
left.
fill_value: scalar, optional
Value to use for newly missing values
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but shifted data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "shift")
result = self
for dim, count in shifts.items():
result = result._shift_one_dim(dim, count, fill_value=fill_value)
return result
def pad(
self,
pad_width: Mapping[Hashable, Union[int, Tuple[int, int]]] = None,
mode: str = "constant",
stat_length: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
constant_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
end_values: Union[
int, Tuple[int, int], Mapping[Hashable, Tuple[int, int]]
] = None,
reflect_type: str = None,
**pad_width_kwargs: Any,
):
"""
Return a new Variable with padded data.
Parameters
----------
pad_width : mapping of hashable to tuple of int
Mapping with the form of {dim: (pad_before, pad_after)}
describing the number of values padded along each dimension.
{dim: pad} is a shortcut for pad_before = pad_after = pad
mode : str, default: "constant"
See numpy / Dask docs
stat_length : int, tuple or mapping of hashable to tuple
Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
values at edge of each axis used to calculate the statistic value.
constant_values : scalar, tuple or mapping of hashable to tuple
Used in 'constant'. The values to set the padded values for each
axis.
end_values : scalar, tuple or mapping of hashable to tuple
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
reflect_type : {"even", "odd"}, optional
Used in "reflect", and "symmetric". The "even" style is the
default with an unaltered reflection around the edge value. For
the "odd" style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
**pad_width_kwargs
One of pad_width or pad_width_kwargs must be provided.
Returns
-------
padded : Variable
Variable with the same dimensions and attributes but padded data.
"""
pad_width = either_dict_or_kwargs(pad_width, pad_width_kwargs, "pad")
# change default behaviour of pad with mode constant
if mode == "constant" and (
constant_values is None or constant_values is dtypes.NA
):
dtype, constant_values = dtypes.maybe_promote(self.dtype)
else:
dtype = self.dtype
# create pad_options_kwargs, numpy requires only relevant kwargs to be nonempty
if isinstance(stat_length, dict):
stat_length = self._pad_options_dim_to_index(
stat_length, fill_with_shape=True
)
if isinstance(constant_values, dict):
constant_values = self._pad_options_dim_to_index(constant_values)
if isinstance(end_values, dict):
end_values = self._pad_options_dim_to_index(end_values)
# workaround for bug in Dask's default value of stat_length https://github.com/dask/dask/issues/5303
if stat_length is None and mode in ["maximum", "mean", "median", "minimum"]:
stat_length = [(n, n) for n in self.data.shape] # type: ignore
# change integer values to a tuple of two of those values and change pad_width to index
for k, v in pad_width.items():
if isinstance(v, numbers.Number):
pad_width[k] = (v, v)
pad_width_by_index = self._pad_options_dim_to_index(pad_width)
# create pad_options_kwargs, numpy/dask requires only relevant kwargs to be nonempty
pad_option_kwargs = {}
if stat_length is not None:
pad_option_kwargs["stat_length"] = stat_length
if constant_values is not None:
pad_option_kwargs["constant_values"] = constant_values
if end_values is not None:
pad_option_kwargs["end_values"] = end_values
if reflect_type is not None:
pad_option_kwargs["reflect_type"] = reflect_type # type: ignore
array = duck_array_ops.pad(
self.data.astype(dtype, copy=False),
pad_width_by_index,
mode=mode,
**pad_option_kwargs,
)
return type(self)(self.dims, array)
def roll(self, shifts=None, **shifts_kwargs):
"""
Return a new Variable with rolld data.
Parameters
----------
shifts : mapping of hashable to int
Integer offset to roll along each of the given dimensions.
Positive offsets roll to the right; negative offsets roll to the
left.
**shifts_kwargs
The keyword arguments form of ``shifts``.
One of shifts or shifts_kwargs must be provided.
Returns
-------
shifted : Variable
Variable with the same dimensions and attributes but rolled data.
"""
shifts = either_dict_or_kwargs(shifts, shifts_kwargs, "roll")
result = self
for dim, count in shifts.items():
result = result._roll_one_dim(dim, count)
return result
def transpose(self, *dims) -> "Variable":
"""Return a new Variable object with transposed dimensions.
Parameters
----------
*dims : str, optional
By default, reverse the dimensions. Otherwise, reorder the
dimensions to this order.
Returns
-------
transposed : Variable
The returned object has transposed data and dimensions with the
same attributes as the original.
Notes
-----
This operation returns a view of this variable's data. It is
lazy for dask-backed Variables but not for numpy-backed Variables.
See Also
--------
numpy.transpose
"""
if len(dims) == 0:
dims = self.dims[::-1]
dims = tuple(infix_dims(dims, self.dims))
axes = self.get_axis_num(dims)
if len(dims) < 2 or dims == self.dims:
# no need to transpose if only one dimension
# or dims are in same order
return self.copy(deep=False)
data = as_indexable(self._data).transpose(axes)
return type(self)(dims, data, self._attrs, self._encoding, fastpath=True)
def set_dims(self, dims, shape=None):
"""Return a new variable with given set of dimensions.
This method might be used to attach new dimension(s) to variable.
When possible, this operation does not copy this variable's data.
Parameters
----------
dims : str or sequence of str or dict
Dimensions to include on the new variable. If a dict, values are
used to provide the sizes of new dimensions; otherwise, new
dimensions are inserted with length 1.
Returns
-------
Variable
"""
if isinstance(dims, str):
dims = [dims]
if shape is None and utils.is_dict_like(dims):
shape = dims.values()
missing_dims = set(self.dims) - set(dims)
if missing_dims:
raise ValueError(
"new dimensions %r must be a superset of "
"existing dimensions %r" % (dims, self.dims)
)
self_dims = set(self.dims)
expanded_dims = tuple(d for d in dims if d not in self_dims) + self.dims
if self.dims == expanded_dims:
# don't use broadcast_to unless necessary so the result remains
# writeable if possible
expanded_data = self.data
elif shape is not None:
dims_map = dict(zip(dims, shape))
tmp_shape = tuple(dims_map[d] for d in expanded_dims)
expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape)
else:
expanded_data = self.data[(None,) * (len(expanded_dims) - self.ndim)]
expanded_var = Variable(
expanded_dims, expanded_data, self._attrs, self._encoding, fastpath=True
)
return expanded_var.transpose(*dims)
def stack(self, dimensions=None, **dimensions_kwargs):
"""
Stack any number of existing dimensions into a single new dimension.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to tuple of hashable
Mapping of form new_name=(dim1, dim2, ...) describing the
names of new dimensions, and the existing dimensions that
they replace.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
stacked : Variable
Variable with the same attributes but stacked data.
See also
--------
Variable.unstack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "stack")
result = self
for new_dim, dims in dimensions.items():
result = result._stack_once(dims, new_dim)
return result
def unstack(self, dimensions=None, **dimensions_kwargs):
"""
Unstack an existing dimension into multiple new dimensions.
New dimensions will be added at the end, and the order of the data
along each new dimension will be in contiguous (C) order.
Parameters
----------
dimensions : mapping of hashable to mapping of hashable to int
Mapping of the form old_dim={dim1: size1, ...} describing the
names of existing dimensions, and the new dimensions and sizes
that they map to.
**dimensions_kwargs
The keyword arguments form of ``dimensions``.
One of dimensions or dimensions_kwargs must be provided.
Returns
-------
unstacked : Variable
Variable with the same attributes but unstacked data.
See also
--------
Variable.stack
"""
dimensions = either_dict_or_kwargs(dimensions, dimensions_kwargs, "unstack")
result = self
for old_dim, dims in dimensions.items():
result = result._unstack_once(dims, old_dim)
return result
def reduce(
self,
func,
dim=None,
axis=None,
keep_attrs=None,
keepdims=False,
**kwargs,
):
"""Reduce this array by applying `func` along some dimension(s).
Parameters
----------
func : callable
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of reducing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
the reduction is calculated over the flattened array (by calling
`func(x)` without an axis argument).
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
keepdims : bool, default: False
If True, the dimensions which are reduced are left in the result
as dimensions of size one
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
if dim == ...:
dim = None
if dim is not None and axis is not None:
raise ValueError("cannot supply both 'axis' and 'dim' arguments")
if dim is not None:
axis = self.get_axis_num(dim)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", r"Mean of empty slice", category=RuntimeWarning
)
if axis is not None:
data = func(self.data, axis=axis, **kwargs)
else:
data = func(self.data, **kwargs)
if getattr(data, "shape", ()) == self.shape:
dims = self.dims
else:
removed_axes = (
range(self.ndim) if axis is None else np.atleast_1d(axis) % self.ndim
)
if keepdims:
# Insert np.newaxis for removed dims
slices = tuple(
np.newaxis if i in removed_axes else slice(None, None)
for i in range(self.ndim)
)
if getattr(data, "shape", None) is None:
# Reduce has produced a scalar value, not an array-like
data = np.asanyarray(data)[slices]
else:
data = data[slices]
dims = self.dims
else:
dims = [
adim for n, adim in enumerate(self.dims) if n not in removed_axes
]
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
attrs = self._attrs if keep_attrs else None
return Variable(dims, data, attrs=attrs)
def equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the same dimensions and values;
otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
This method is necessary because `v1 == v2` for Variables
does element-wise comparisons (like numpy.ndarrays).
"""
other = getattr(other, "variable", other)
try:
return self.dims == other.dims and (
self._data is other._data or equiv(self.data, other.data)
)
except (TypeError, AttributeError):
return False
def broadcast_equals(self, other, equiv=duck_array_ops.array_equiv):
"""True if two Variables have the values after being broadcast against
each other; otherwise False.
Variables can still be equal (like pandas objects) if they have NaN
values in the same locations.
"""
try:
self, other = broadcast_variables(self, other)
except (ValueError, AttributeError):
return False
return self.equals(other, equiv=equiv)
def identical(self, other, equiv=duck_array_ops.array_equiv):
"""Like equals, but also checks attributes."""
try:
return utils.dict_equiv(self.attrs, other.attrs) and self.equals(
other, equiv=equiv
)
except (TypeError, AttributeError):
return False
def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv):
"""True if the intersection of two Variable's non-null data is
equal; otherwise false.
Variables can thus still be equal if there are locations where either,
or both, contain NaN values.
"""
return self.broadcast_equals(other, equiv=equiv)
def quantile(
self, q, dim=None, interpolation="linear", keep_attrs=None, skipna=True
):
"""Compute the qth quantile of the data along the specified dimension.
Returns the qth quantiles(s) of the array elements.
Parameters
----------
q : float or sequence of float
Quantile to compute, which must be between 0 and 1
inclusive.
dim : str or sequence of str, optional
Dimension(s) over which to apply quantile.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}, default: "linear"
This optional parameter specifies the interpolation method to
use when the desired quantile lies between two data points
``i < j``:
* linear: ``i + (j - i) * fraction``, where ``fraction`` is
the fractional part of the index surrounded by ``i`` and
``j``.
* lower: ``i``.
* higher: ``j``.
* nearest: ``i`` or ``j``, whichever is nearest.
* midpoint: ``(i + j) / 2``.
keep_attrs : bool, optional
If True, the variable's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
Returns
-------
quantiles : Variable
If `q` is a single quantile, then the result
is a scalar. If multiple percentiles are given, first axis of
the result corresponds to the quantile and a quantile dimension
is added to the return array. The other dimensions are the
dimensions that remain after the reduction of the array.
See Also
--------
numpy.nanquantile, pandas.Series.quantile, Dataset.quantile,
DataArray.quantile
"""
from .computation import apply_ufunc
_quantile_func = np.nanquantile if skipna else np.quantile
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
scalar = utils.is_scalar(q)
q = np.atleast_1d(np.asarray(q, dtype=np.float64))
if dim is None:
dim = self.dims
if utils.is_scalar(dim):
dim = [dim]
axis = np.arange(-1, -1 * len(dim) - 1, -1)
result = apply_ufunc(
_wrapper,
self,
input_core_dims=[dim],
exclude_dims=set(dim),
output_core_dims=[["quantile"]],
output_dtypes=[np.float64],
dask_gufunc_kwargs=dict(output_sizes={"quantile": len(q)}),
dask="parallelized",
kwargs={"q": q, "axis": axis, "interpolation": interpolation},
)
# for backward compatibility
result = result.transpose("quantile", ...)
if scalar:
result = result.squeeze("quantile")
if keep_attrs:
result.attrs = self._attrs
return result
def rank(self, dim, pct=False):
"""Ranks the data.
Equal values are assigned a rank that is the average of the ranks that
would have been otherwise assigned to all of the values within that
set. Ranks begin at 1, not 0. If `pct`, computes percentage ranks.
NaNs in the input array are returned as NaNs.
The `bottleneck` library is required.
Parameters
----------
dim : str
Dimension over which to compute rank.
pct : bool, optional
If True, compute percentage ranks, otherwise compute integer ranks.
Returns
-------
ranked : Variable
See Also
--------
Dataset.rank, DataArray.rank
"""
import bottleneck as bn
data = self.data
if is_duck_dask_array(data):
raise TypeError(
"rank does not work for arrays stored as dask "
"arrays. Load the data via .compute() or .load() "
"prior to calling this method."
)
elif not isinstance(data, np.ndarray):
raise TypeError(
"rank is not implemented for {} objects.".format(type(data))
)
axis = self.get_axis_num(dim)
func = bn.nanrankdata if self.dtype.kind == "f" else bn.rankdata
ranked = func(data, axis=axis)
if pct:
count = np.sum(~np.isnan(data), axis=axis, keepdims=True)
ranked /= count
return Variable(self.dims, ranked)
def rolling_window(
self, dim, window, window_dim, center=False, fill_value=dtypes.NA
):
"""
Make a rolling_window along dim and add a new_dim to the last place.
Parameters
----------
dim : str
Dimension over which to compute rolling_window.
For nd-rolling, should be list of dimensions.
window : int
Window size of the rolling
For nd-rolling, should be list of integers.
window_dim : str
New name of the window dimension.
For nd-rolling, should be list of integers.
center : bool, default: False
If True, pad fill_value for both ends. Otherwise, pad in the head
of the axis.
fill_value
value to be filled.
Returns
-------
Variable that is a view of the original array with a added dimension of
size w.
The return dim: self.dims + (window_dim, )
The return shape: self.shape + (window, )
Examples
--------
>>> v = Variable(("a", "b"), np.arange(8).reshape((2, 4)))
>>> v.rolling_window("b", 3, "window_dim")
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, nan, 0.],
[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.]],
<BLANKLINE>
[[nan, nan, 4.],
[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.]]])
>>> v.rolling_window("b", 3, "window_dim", center=True)
<xarray.Variable (a: 2, b: 4, window_dim: 3)>
array([[[nan, 0., 1.],
[ 0., 1., 2.],
[ 1., 2., 3.],
[ 2., 3., nan]],
<BLANKLINE>
[[nan, 4., 5.],
[ 4., 5., 6.],
[ 5., 6., 7.],
[ 6., 7., nan]]])
"""
if fill_value is dtypes.NA: # np.nan is passed
dtype, fill_value = dtypes.maybe_promote(self.dtype)
array = self.astype(dtype, copy=False).data
else:
dtype = self.dtype
array = self.data
if isinstance(dim, list):
assert len(dim) == len(window)
assert len(dim) == len(window_dim)
assert len(dim) == len(center)
else:
dim = [dim]
window = [window]
window_dim = [window_dim]
center = [center]
axis = [self.get_axis_num(d) for d in dim]
new_dims = self.dims + tuple(window_dim)
return Variable(
new_dims,
duck_array_ops.rolling_window(
array, axis=axis, window=window, center=center, fill_value=fill_value
),
)
def coarsen(
self, windows, func, boundary="exact", side="left", keep_attrs=None, **kwargs
):
"""
Apply reduction function.
"""
windows = {k: v for k, v in windows.items() if k in self.dims}
if not windows:
return self.copy()
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
_attrs = self.attrs
else:
_attrs = None
reshaped, axes = self._coarsen_reshape(windows, boundary, side)
if isinstance(func, str):
name = func
func = getattr(duck_array_ops, name, None)
if func is None:
raise NameError(f"{name} is not a valid method.")
return self._replace(data=func(reshaped, axis=axes, **kwargs), attrs=_attrs)
def _coarsen_reshape(self, windows, boundary, side):
"""
Construct a reshaped-array for coarsen
"""
if not utils.is_dict_like(boundary):
boundary = {d: boundary for d in windows.keys()}
if not utils.is_dict_like(side):
side = {d: side for d in windows.keys()}
# remove unrelated dimensions
boundary = {k: v for k, v in boundary.items() if k in windows}
side = {k: v for k, v in side.items() if k in windows}
for d, window in windows.items():
if window <= 0:
raise ValueError(f"window must be > 0. Given {window}")
variable = self
for d, window in windows.items():
# trim or pad the object
size = variable.shape[self._get_axis_num(d)]
n = int(size / window)
if boundary[d] == "exact":
if n * window != size:
raise ValueError(
"Could not coarsen a dimension of size {} with "
"window {}".format(size, window)
)
elif boundary[d] == "trim":
if side[d] == "left":
variable = variable.isel({d: slice(0, window * n)})
else:
excess = size - window * n
variable = variable.isel({d: slice(excess, None)})
elif boundary[d] == "pad": # pad
pad = window * n - size
if pad < 0:
pad += window
if side[d] == "left":
pad_width = {d: (0, pad)}
else:
pad_width = {d: (pad, 0)}
variable = variable.pad(pad_width, mode="constant")
else:
raise TypeError(
"{} is invalid for boundary. Valid option is 'exact', "
"'trim' and 'pad'".format(boundary[d])
)
shape = []
axes = []
axis_count = 0
for i, d in enumerate(variable.dims):
if d in windows:
size = variable.shape[i]
shape.append(int(size / windows[d]))
shape.append(windows[d])
axis_count += 1
axes.append(i + axis_count)
else:
shape.append(variable.shape[i])
return variable.data.reshape(shape), tuple(axes)
def isnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is a missing value.
Returns
-------
isnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.isnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.isnull()
<xarray.Variable (x: 3)>
array([False, True, False])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.isnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
def notnull(self, keep_attrs: bool = None):
"""Test each value in the array for whether it is not a missing value.
Returns
-------
notnull : Variable
Same type and shape as object, but the dtype of the data is bool.
See Also
--------
pandas.notnull
Examples
--------
>>> var = xr.Variable("x", [1, np.nan, 3])
>>> var
<xarray.Variable (x: 3)>
array([ 1., nan, 3.])
>>> var.notnull()
<xarray.Variable (x: 3)>
array([ True, False, True])
"""
from .computation import apply_ufunc
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
return apply_ufunc(
duck_array_ops.notnull,
self,
dask="allowed",
keep_attrs=keep_attrs,
)
def __array_wrap__(self, obj, context=None):
return Variable(self.dims, obj)
def _to_numeric(self, offset=None, datetime_unit=None, dtype=float):
"""A (private) method to convert datetime array to numeric dtype
See duck_array_ops.datetime_to_numeric
"""
numeric_array = duck_array_ops.datetime_to_numeric(
self.data, offset, datetime_unit, dtype
)
return type(self)(self.dims, numeric_array, self._attrs)
def _unravel_argminmax(
self,
argminmax: str,
dim: Union[Hashable, Sequence[Hashable], None],
axis: Union[int, None],
keep_attrs: Optional[bool],
skipna: Optional[bool],
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Apply argmin or argmax over one or more dimensions, returning the result as a
dict of DataArray that can be passed directly to isel.
"""
if dim is None and axis is None:
warnings.warn(
"Behaviour of argmin/argmax with neither dim nor axis argument will "
"change to return a dict of indices of each dimension. To get a "
"single, flat index, please use np.argmin(da.data) or "
"np.argmax(da.data) instead of da.argmin() or da.argmax().",
DeprecationWarning,
stacklevel=3,
)
argminmax_func = getattr(duck_array_ops, argminmax)
if dim is ...:
# In future, should do this also when (dim is None and axis is None)
dim = self.dims
if (
dim is None
or axis is not None
or not isinstance(dim, Sequence)
or isinstance(dim, str)
):
# Return int index if single dimension is passed, and is not part of a
# sequence
return self.reduce(
argminmax_func, dim=dim, axis=axis, keep_attrs=keep_attrs, skipna=skipna
)
# Get a name for the new dimension that does not conflict with any existing
# dimension
newdimname = "_unravel_argminmax_dim_0"
count = 1
while newdimname in self.dims:
newdimname = f"_unravel_argminmax_dim_{count}"
count += 1
stacked = self.stack({newdimname: dim})
result_dims = stacked.dims[:-1]
reduce_shape = tuple(self.sizes[d] for d in dim)
result_flat_indices = stacked.reduce(argminmax_func, axis=-1, skipna=skipna)
result_unravelled_indices = duck_array_ops.unravel_index(
result_flat_indices.data, reduce_shape
)
result = {
d: Variable(dims=result_dims, data=i)
for d, i in zip(dim, result_unravelled_indices)
}
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs:
for v in result.values():
v.attrs = self.attrs
return result
def argmin(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the minimum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple minima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the minimum. By default, finds minimum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmin, DataArray.idxmin
"""
return self._unravel_argminmax("argmin", dim, axis, keep_attrs, skipna)
def argmax(
self,
dim: Union[Hashable, Sequence[Hashable]] = None,
axis: int = None,
keep_attrs: bool = None,
skipna: bool = None,
) -> Union["Variable", Dict[Hashable, "Variable"]]:
"""Index or indices of the maximum of the Variable over one or more dimensions.
If a sequence is passed to 'dim', then result returned as dict of Variables,
which can be passed directly to isel(). If a single str is passed to 'dim' then
returns a Variable with dtype int.
If there are multiple maxima, the indices of the first one found will be
returned.
Parameters
----------
dim : hashable, sequence of hashable or ..., optional
The dimensions over which to find the maximum. By default, finds maximum over
all dimensions - for now returning an int for backward compatibility, but
this is deprecated, in future will return a dict with indices for all
dimensions; to return a dict with all dimensions now, pass '...'.
axis : int, optional
Axis over which to apply `argmin`. Only one of the 'dim' and 'axis' arguments
can be supplied.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
Returns
-------
result : Variable or dict of Variable
See also
--------
DataArray.argmax, DataArray.idxmax
"""
return self._unravel_argminmax("argmax", dim, axis, keep_attrs, skipna)
ops.inject_all_ops_and_reduce_methods(Variable)
def copy(self, deep=True, data=None):
"""Returns a copy of this object.
`deep` is ignored since data is stored in the form of
pandas.Index, which is already immutable. Dimensions, attributes
and encodings are always copied.
Use `data` to create a new object with the same structure as
original but entirely new data.
Parameters
----------
deep : bool, optional
Deep is ignored when data is given. Whether the data array is
loaded into memory and copied onto the new object. Default is True.
data : array_like, optional
Data to use in the new object. Must have same shape as original.
Returns
-------
object : Variable
New object with dimensions, attributes, encodings, and optionally
data copied from original.
"""
if data is None:
data = self._data.copy(deep=deep)
else:
data = as_compatible_data(data)
if self.shape != data.shape:
raise ValueError(
"Data shape {} must match shape of object {}".format(
data.shape, self.shape
)
)
return type(self)(self.dims, data, self._attrs, self._encoding, fastpath=True)
def to_index_variable(self):
"""Return this variable as an xarray.IndexVariable"""
return self
to_coord = utils.alias(to_index_variable, "to_coord")
def to_index(self):
"""Convert this variable to a pandas.Index"""
# n.b. creating a new pandas.Index from an old pandas.Index is
# basically free as pandas.Index objects are immutable
assert self.ndim == 1
index = self._data.array
if isinstance(index, pd.MultiIndex):
# set default names for multi-index unnamed levels so that
# we can safely rename dimension / coordinate later
valid_level_names = [
name or "{}_level_{}".format(self.dims[0], i)
for i, name in enumerate(index.names)
]
index = index.set_names(valid_level_names)
else:
index = index.set_names(self.name)
return index
def get_level_variable(self, level):
"""Return a new IndexVariable from a given MultiIndex level."""
if self.level_names is None:
raise ValueError("IndexVariable %r has no MultiIndex" % self.name)
index = self.to_index()
return type(self)(self.dims, index.get_level_values(level))
# for backwards compatibility
Coordinate = utils.alias(IndexVariable, "Coordinate")
def _broadcast_compat_variables(*variables):
"""Create broadcast compatible variables, with the same dimensions.
Unlike the result of broadcast_variables(), some variables may have
dimensions of size 1 instead of the the size of the broadcast dimension.
"""
dims = tuple(_unified_dims(variables))
return tuple(var.set_dims(dims) if var.dims != dims else var for var in variables)
def broadcast_variables(*variables):
"""Given any number of variables, return variables with matching dimensions
and broadcast data.
The data on the returned variables will be a view of the data on the
corresponding original arrays, but dimensions will be reordered and
inserted so that both broadcast arrays have the same dimensions. The new
dimensions are sorted in order of appearance in the first variable's
dimensions followed by the second variable's dimensions.
"""
dims_map = _unified_dims(variables)
dims_tuple = tuple(dims_map)
return tuple(
var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables
)
def concat(variables, dim="concat_dim", positions=None, shortcut=False):
"""Concatenate variables along a new or existing dimension.
Parameters
----------
variables : iterable of Variable
Arrays to stack together. Each variable is expected to have
matching dimensions and shape except for along the stacked
dimension.
dim : str or DataArray, optional
Name of the dimension to stack along. This can either be a new
dimension name, in which case it is added along axis=0, or an
existing dimension name, in which case the location of the
dimension is unchanged. Where to insert the new dimension is
determined by the first variable.
positions : None or list of array-like, optional
List of integer arrays which specifies the integer positions to which
to assign each dataset along the concatenated dimension. If not
supplied, objects are concatenated in the provided order.
shortcut : bool, optional
This option is used internally to speed-up groupby operations.
If `shortcut` is True, some checks of internal consistency between
arrays to concatenate are skipped.
Returns
-------
stacked : Variable
Concatenated Variable formed by stacking all the supplied variables
along the given dimension.
"""
variables = list(variables)
if all(isinstance(v, IndexVariable) for v in variables):
return IndexVariable.concat(variables, dim, positions, shortcut)
else:
return Variable.concat(variables, dim, positions, shortcut)
def assert_unique_multiindex_level_names(variables):
"""Check for uniqueness of MultiIndex level names in all given
variables.
Not public API. Used for checking consistency of DataArray and Dataset
objects.
"""
level_names = defaultdict(list)
all_level_names = set()
for var_name, var in variables.items():
if isinstance(var._data, PandasIndexAdapter):
idx_level_names = var.to_index_variable().level_names
if idx_level_names is not None:
for n in idx_level_names:
level_names[n].append(f"{n!r} ({var_name})")
if idx_level_names:
all_level_names.update(idx_level_names)
for k, v in level_names.items():
if k in variables:
v.append("(%s)" % k)
duplicate_names = [v for v in level_names.values() if len(v) > 1]
if duplicate_names:
conflict_str = "\n".join(", ".join(v) for v in duplicate_names)
raise ValueError("conflicting MultiIndex level name(s):\n%s" % conflict_str)
# Check confliction between level names and dimensions GH:2299
for k, v in variables.items():
for d in v.dims:
if d in all_level_names:
raise ValueError(
"conflicting level / dimension names. {} "
"already exists as a level name.".format(d)
)
| 36.664265 | 124 | 0.589674 |
0a6f4ad99174d6090d3cbdb6c9bcc1d787eae3b4 | 223 | py | Python | codeforces.com/1186A/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | codeforces.com/1186A/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | codeforces.com/1186A/solution.py | zubtsov/competitive-programming | 919d63130144347d7f6eddcf8f5bc2afb85fddf3 | [
"MIT"
] | null | null | null | number_of_participants, number_of_pens, number_of_notebooks = map(int, input().split())
if number_of_pens >= number_of_participants and number_of_notebooks >= number_of_participants:
print('Yes')
else:
print('No')
| 31.857143 | 94 | 0.7713 |
0a7052f7029ee061d74d603abefe9574ef7b3461 | 114 | py | Python | DLA/__main__.py | StanczakDominik/DLA | bf63592a5ac96ffef639e7a0c80d7d52ff776322 | [
"MIT"
] | null | null | null | DLA/__main__.py | StanczakDominik/DLA | bf63592a5ac96ffef639e7a0c80d7d52ff776322 | [
"MIT"
] | null | null | null | DLA/__main__.py | StanczakDominik/DLA | bf63592a5ac96ffef639e7a0c80d7d52ff776322 | [
"MIT"
] | null | null | null | from DLA import main_single
d = main_single(1, gotosize=[1e4, 5e4])
d.plot_particles()
d.plot_mass_distribution()
| 22.8 | 39 | 0.780702 |
0a70ca1b5958248a2b51b4d49a2d791ec9ec77e7 | 36,386 | py | Python | pyamf/tests/test_util.py | bulutistan/Py3AMF | 3de53095b52fe2bf82b69ba5ad0b894b53045f7e | [
"MIT"
] | 42 | 2017-04-17T11:40:25.000Z | 2021-09-19T09:59:31.000Z | pyamf/tests/test_util.py | bulutistan/Py3AMF | 3de53095b52fe2bf82b69ba5ad0b894b53045f7e | [
"MIT"
] | 8 | 2017-07-27T07:39:30.000Z | 2021-10-19T09:49:09.000Z | pyamf/tests/test_util.py | bulutistan/Py3AMF | 3de53095b52fe2bf82b69ba5ad0b894b53045f7e | [
"MIT"
] | 15 | 2017-05-16T12:46:33.000Z | 2021-09-20T02:30:57.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) The PyAMF Project.
# See LICENSE.txt for details.
"""
Tests for AMF utilities.
@since: 0.1.0
"""
import unittest
from datetime import datetime
from io import BytesIO
import pyamf
from pyamf import util
from pyamf.tests.util import replace_dict
PosInf = 1e300000
NegInf = -1e300000
NaN = PosInf / PosInf
| 27.461132 | 81 | 0.542516 |
0a71715157a2be752f2c46cd1b41f44aab6ece59 | 3,087 | py | Python | e-valuator.py | keocol/e-valuator | c2bab22e3debf08263fef57ee4135312a2bb2b0d | [
"MIT"
] | null | null | null | e-valuator.py | keocol/e-valuator | c2bab22e3debf08263fef57ee4135312a2bb2b0d | [
"MIT"
] | null | null | null | e-valuator.py | keocol/e-valuator | c2bab22e3debf08263fef57ee4135312a2bb2b0d | [
"MIT"
] | null | null | null | import dns.resolver
import sys
import colorama
import platform
from colorama import init, Fore, Back, Style
import re
# pip install -r requirements.txt (colorama)
os = platform.platform()
if os.find('Windows')!= (-1):
init(convert=True)
print("""
\x1B[3mSimple Python3 Script for Checking SPF & DMARC Records.\x1B[0m
""" + '\n')
Domain = input('Domain: ')
# Checking SPF
print ('\n[+] Checking SPF Record...')
try:
obj_answer = dns.resolver.resolve(Domain, 'TXT')
except:
sys.exit(Fore.RED + "\n[+] Domain can't be resolved! Check the domain name and try again..")
answer = str(obj_answer.response)
cond = answer.find("v=spf")
if cond != -1:
print ('[+] SPF Record Found!')
spf_pos= answer.find("v=spf")
spf_end_tmp= (answer[spf_pos:].find("\n"))-1
spf_end= answer[spf_pos:spf_pos+spf_end_tmp]
print (Fore.GREEN + '[+] Domain: ' + Domain)
print (Fore.GREEN + '[+] SPF Record: ' +spf_end)
neutral_check = answer.find('?all')
fail_check = answer.find('-all')
soft_check = answer.find('~all')
pass_check = answer.find('+all')
if neutral_check != -1:
print (Fore.RED +'[+] Result: ?all IS FOUND!! Domain emails can be spoofed!')
elif fail_check != -1:
print (Fore.GREEN +'[+] Result: -all is found. SPF is correctly configured.')
elif soft_check != -1:
print (Fore.GREEN +'[+] Result: ~all is found. SPF is correctly configured.')
elif pass_check != -1:
print (Fore.RED +'[+] Result: +all DOMAIN IS VERY BADLY CONFIGURED! Domain emails can be spoofed!')
else:
print (Fore.RED +'[+] Result: No condition is set for "all"! Domain emails can be spoofed!')
else:
print (Fore.RED +'[+] No SPF Record Found!!')
# Checking DMARC
print (Fore.WHITE + '\n\n[+] Checking DMARC Policy..')
try:
obj2_answer = dns.resolver.resolve('_dmarc.'+ Domain, 'TXT')
except:
sys.exit(Fore.RED + "[+] The domain doesn't have DMARC policy configured!")
answer2 = str(obj2_answer.response)
print (Fore.WHITE + '[+] DMARC Policy Found!')
none_check = re.search("[\;\s]p\=none\;", answer2)
reject_check = re.search("[\;\s]p\=reject\;", answer2)
quarantine_check = re.search("[\;\s]p\=quarantine\;", answer2)
if none_check:
print (Fore.RED + '[+] Result: DMARC Policy is set as none! Domain emails can be spoofed!')
if reject_check:
print (Fore.GREEN + '[+] Result: DMARC Policy is set as reject! Domain emails are safe from spoofing.')
if quarantine_check:
print (Fore.GREEN + '[+] Result: DMARC Policy is set as quarantine! Domain emails are safe from spoofing.') | 32.15625 | 108 | 0.547781 |
6a50e8edb03f4c7852b3cc7809ccd49216f25af1 | 2,655 | py | Python | api/server.py | qh73xe/HowAboutNatume | 8d994a1e16e2153dc200097d8f8b43713d76a3d5 | [
"MIT"
] | null | null | null | api/server.py | qh73xe/HowAboutNatume | 8d994a1e16e2153dc200097d8f8b43713d76a3d5 | [
"MIT"
] | 7 | 2020-03-24T15:37:48.000Z | 2021-06-01T22:01:22.000Z | api/server.py | qh73xe/HowAboutNatume | 8d994a1e16e2153dc200097d8f8b43713d76a3d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*
""" ask.api ."""
from json import dumps
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.options import parse_command_line
from tornado.web import Application, RequestHandler
from tornado.options import define, options
from tokenizer import get_entity
from logger import getLogger
LOGGER = getLogger('API_MODULE')
define("port", default=8000, help="run on the given port", type=int)
if __name__ == "__main__":
parse_command_line()
app = Application(handlers=[(r"/", AskHandler)])
http_server = HTTPServer(app)
http_server.listen(options.port)
IOLoop.instance().start()
| 27.947368 | 75 | 0.529567 |
6a5258097a7cb4af2ef28cde1153d8db7884fd80 | 3,012 | py | Python | proxy/http/chunk_parser.py | GDGSNF/proxy.py | 3ee2824217286df3c108beadf3185eee35c28b49 | [
"BSD-3-Clause"
] | null | null | null | proxy/http/chunk_parser.py | GDGSNF/proxy.py | 3ee2824217286df3c108beadf3185eee35c28b49 | [
"BSD-3-Clause"
] | null | null | null | proxy/http/chunk_parser.py | GDGSNF/proxy.py | 3ee2824217286df3c108beadf3185eee35c28b49 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import NamedTuple, Tuple, List, Optional
from ..common.utils import bytes_, find_http_line
from ..common.constants import CRLF, DEFAULT_BUFFER_SIZE
ChunkParserStates = NamedTuple(
'ChunkParserStates', [
('WAITING_FOR_SIZE', int),
('WAITING_FOR_DATA', int),
('COMPLETE', int),
],
)
chunkParserStates = ChunkParserStates(1, 2, 3)
| 35.857143 | 86 | 0.581009 |
6a53c43c787fb87b95985049d6273d36fc7dbdab | 31,240 | py | Python | nova/pci/stats.py | 10088/nova | 972c06c608f0b00e9066d7f581fd81197065cf49 | [
"Apache-2.0"
] | null | null | null | nova/pci/stats.py | 10088/nova | 972c06c608f0b00e9066d7f581fd81197065cf49 | [
"Apache-2.0"
] | null | null | null | nova/pci/stats.py | 10088/nova | 972c06c608f0b00e9066d7f581fd81197065cf49 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import typing as ty
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
from nova import exception
from nova import objects
from nova.objects import fields
from nova.objects import pci_device_pool
from nova.pci.request import PCI_REMOTE_MANAGED_TAG
from nova.pci import utils
from nova.pci import whitelist
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# TODO(stephenfin): We might want to use TypedDict here. Refer to
# https://mypy.readthedocs.io/en/latest/kinds_of_types.html#typeddict for
# more information.
Pool = ty.Dict[str, ty.Any]
| 41.708945 | 79 | 0.628073 |
6a54175f824a3a8a92a61c38d426dd45948b4848 | 364 | py | Python | Use.py | Codingprivacy/Multiple-Rename | 486289e8158487dad058cd8f781ac27bc9a5fc02 | [
"MIT"
] | 2 | 2018-04-01T06:16:33.000Z | 2018-05-04T18:57:50.000Z | Use.py | codingprivacy/Multiple-Rename | 486289e8158487dad058cd8f781ac27bc9a5fc02 | [
"MIT"
] | null | null | null | Use.py | codingprivacy/Multiple-Rename | 486289e8158487dad058cd8f781ac27bc9a5fc02 | [
"MIT"
] | null | null | null | import multiple
multiple.rename("C:/Users/Username/Desktop",'new_name',33,'.exe')
"""this above lines renames all the files of the folder Desktop to 'new_name' and
count starts from 33 to further (we can also provide 1 to start it from 1) and
extension is given '.exe'
hence the files will be renamed like :
1. new_name33.exe
2. new_name34.exe and so on
""" | 28 | 81 | 0.739011 |
6a55c2af9ac7243f141edb694902ca98eb95a939 | 278 | py | Python | ReadSymLink.py | ohel/pyorbital-gizmod-tweaks | 4c02783d1c6287df508351467a5c203a11430b07 | [
"Unlicense"
] | null | null | null | ReadSymLink.py | ohel/pyorbital-gizmod-tweaks | 4c02783d1c6287df508351467a5c203a11430b07 | [
"Unlicense"
] | null | null | null | ReadSymLink.py | ohel/pyorbital-gizmod-tweaks | 4c02783d1c6287df508351467a5c203a11430b07 | [
"Unlicense"
] | null | null | null | import os
def readlinkabs(l):
"""
Return an absolute path for the destination
of a symlink
"""
if not (os.path.islink(l)):
return None
p = os.readlink(l)
if os.path.isabs(p):
return p
return os.path.join(os.path.dirname(l), p)
| 18.533333 | 48 | 0.582734 |
6a55f8c89efdf9367ae5e51c6555c781fae366b6 | 1,368 | py | Python | examples/capture_circular.py | IanTBlack/picamera2 | 4d31a56cdb0d8360e71927e754fc6bef50bec360 | [
"BSD-2-Clause"
] | 71 | 2022-02-15T14:24:34.000Z | 2022-03-29T16:36:46.000Z | examples/capture_circular.py | IanTBlack/picamera2 | 4d31a56cdb0d8360e71927e754fc6bef50bec360 | [
"BSD-2-Clause"
] | 37 | 2022-02-16T12:35:45.000Z | 2022-03-31T13:18:42.000Z | examples/capture_circular.py | IanTBlack/picamera2 | 4d31a56cdb0d8360e71927e754fc6bef50bec360 | [
"BSD-2-Clause"
] | 15 | 2022-02-16T12:12:57.000Z | 2022-03-31T15:17:58.000Z | #!/usr/bin/python3
import time
import numpy as np
from picamera2.encoders import H264Encoder
from picamera2.outputs import CircularOutput
from picamera2 import Picamera2
lsize = (320, 240)
picam2 = Picamera2()
video_config = picam2.video_configuration(main={"size": (1280, 720), "format": "RGB888"},
lores={"size": lsize, "format": "YUV420"})
picam2.configure(video_config)
picam2.start_preview()
encoder = H264Encoder(1000000, repeat=True)
encoder.output = CircularOutput()
picam2.encoder = encoder
picam2.start()
picam2.start_encoder()
w, h = lsize
prev = None
encoding = False
ltime = 0
while True:
cur = picam2.capture_buffer("lores")
cur = cur[:w * h].reshape(h, w)
if prev is not None:
# Measure pixels differences between current and
# previous frame
mse = np.square(np.subtract(cur, prev)).mean()
if mse > 7:
if not encoding:
epoch = int(time.time())
encoder.output.fileoutput = "{}.h264".format(epoch)
encoder.output.start()
encoding = True
print("New Motion", mse)
ltime = time.time()
else:
if encoding and time.time() - ltime > 5.0:
encoder.output.stop()
encoding = False
prev = cur
picam2.stop_encoder()
| 27.918367 | 89 | 0.604532 |
6a561a673ebb04da901d20e99ce9c86e3955a26e | 8,933 | py | Python | Bio/NeuralNetwork/Gene/Pattern.py | barendt/biopython | 391bcdbee7f821bff3e12b75c635a06bc1b2dcea | [
"PostgreSQL"
] | 3 | 2017-10-23T21:53:57.000Z | 2019-09-23T05:14:12.000Z | Bio/NeuralNetwork/Gene/Pattern.py | eoc21/biopython | c0f8db8f55a506837c320459957a0ce99b0618b6 | [
"PostgreSQL"
] | null | null | null | Bio/NeuralNetwork/Gene/Pattern.py | eoc21/biopython | c0f8db8f55a506837c320459957a0ce99b0618b6 | [
"PostgreSQL"
] | 6 | 2020-02-26T16:34:20.000Z | 2020-03-04T15:34:00.000Z | """Generic functionality useful for all gene representations.
This module contains classes which can be used for all the different
types of patterns available for representing gene information (ie. motifs,
signatures and schemas). These are the general classes which should be
handle any of the different specific patterns.
"""
# standard library
import random
# biopython
from Bio import utils
from Bio.Seq import Seq, MutableSeq
| 35.169291 | 110 | 0.622187 |
6a565a6b3597c1dbb9a2e86bdaf31bd17e76951c | 58 | py | Python | neslter/parsing/nut/__init__.py | WHOIGit/nes-lter-ims | d4cc96c10da56ca33286af84d669625b67170522 | [
"MIT"
] | 3 | 2019-01-24T16:32:50.000Z | 2021-11-05T02:18:12.000Z | neslter/parsing/nut/__init__.py | WHOIGit/nes-lter-ims | d4cc96c10da56ca33286af84d669625b67170522 | [
"MIT"
] | 45 | 2019-05-23T15:15:32.000Z | 2022-03-15T14:09:20.000Z | neslter/parsing/nut/__init__.py | WHOIGit/nes-lter-ims | d4cc96c10da56ca33286af84d669625b67170522 | [
"MIT"
] | null | null | null | from .nut import parse_nut, format_nut, merge_nut_bottles
| 29 | 57 | 0.844828 |
6a57cefd47f3150e0a9d0bbdcd3affcfe90d72c9 | 15,520 | py | Python | legtool/tabs/servo_tab.py | jpieper/legtool | ab3946051bd16817b61d3073ce7be8bd27af90d0 | [
"Apache-2.0"
] | 10 | 2015-09-23T19:28:06.000Z | 2021-04-27T02:32:27.000Z | legtool/tabs/servo_tab.py | jpieper/legtool | ab3946051bd16817b61d3073ce7be8bd27af90d0 | [
"Apache-2.0"
] | null | null | null | legtool/tabs/servo_tab.py | jpieper/legtool | ab3946051bd16817b61d3073ce7be8bd27af90d0 | [
"Apache-2.0"
] | 9 | 2015-10-16T07:26:18.000Z | 2021-01-13T07:18:35.000Z | # Copyright 2014 Josh Pieper, [email protected].
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import trollius as asyncio
from trollius import Task, From, Return
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
from ..servo import selector
from .common import BoolContext
from . import gazebo_config_dialog
| 35.514874 | 78 | 0.593814 |
6a588636dc362efae84b790a87924f429a4e4039 | 33,745 | py | Python | epsilon/juice.py | twisted/epsilon | 783910e1829688e95719a7d3151ec3e2cbb101fd | [
"MIT"
] | 4 | 2017-09-01T18:49:11.000Z | 2020-04-21T10:11:33.000Z | epsilon/juice.py | twisted/epsilon | 783910e1829688e95719a7d3151ec3e2cbb101fd | [
"MIT"
] | 35 | 2015-01-16T22:12:44.000Z | 2021-07-11T11:28:58.000Z | epsilon/juice.py | twisted/epsilon | 783910e1829688e95719a7d3151ec3e2cbb101fd | [
"MIT"
] | 8 | 2015-01-24T17:43:58.000Z | 2019-09-01T12:38:41.000Z | # -*- test-case-name: epsilon.test.test_juice -*-
# Copyright 2005 Divmod, Inc. See LICENSE file for details
import warnings, pprint
import keyword
import io
import six
from twisted.internet.main import CONNECTION_LOST
from twisted.internet.defer import Deferred, maybeDeferred, fail
from twisted.internet.protocol import ServerFactory, ClientFactory
from twisted.internet.ssl import Certificate
from twisted.python.failure import Failure
from twisted.python import log, filepath
from epsilon.liner import LineReceiver
from epsilon.compat import long
from epsilon import extime
ASK = '_ask'
ANSWER = '_answer'
COMMAND = '_command'
ERROR = '_error'
ERROR_CODE = '_error_code'
ERROR_DESCRIPTION = '_error_description'
LENGTH = '_length'
BODY = 'body'
debug = False
# juice.Box => JuiceBox
Box = JuiceBox
def normalizeKey(key):
lkey = six.ensure_str(key).lower().replace('-', '_')
if keyword.iskeyword(lkey):
return lkey.title()
return lkey
def parseJuiceHeaders(lines):
"""
Create a JuiceBox from a list of header lines.
@param lines: a list of lines.
@type lines: a list of L{bytes}
"""
b = JuiceBox()
key = None
for L in lines:
if L[0:1] == b' ':
# continuation
assert key is not None
b[key] += six.ensure_str(b'\r\n' + L[1:])
continue
parts = L.split(b': ', 1)
if len(parts) != 2:
raise MalformedJuiceBox("Wrong number of parts: %r" % (L,))
key, value = parts
key = normalizeKey(key)
b[key] = six.ensure_str(value)
return int(b.pop(LENGTH, 0)), b
# Temporary backwards compatibility for Exponent
Body = String
VERSIONS = [1]
parse = _ParserHelper.parse
parseString = _ParserHelper.parseString
| 33.410891 | 133 | 0.602874 |
6a5913eb8964167841ec2eb740f4b32d39ad706a | 7,290 | py | Python | bonsai3/simulator_client.py | kirillpol-ms/bonsai3-py | ede9c2c1d25d784d61b7cbf1438a257b5d592274 | [
"MIT"
] | null | null | null | bonsai3/simulator_client.py | kirillpol-ms/bonsai3-py | ede9c2c1d25d784d61b7cbf1438a257b5d592274 | [
"MIT"
] | 3 | 2020-06-01T18:43:55.000Z | 2020-08-14T17:44:54.000Z | bonsai3/simulator_client.py | BonsaiAI/bonsai3-py | 29158cc58f39604fa96e10e41ff00fc195f6b315 | [
"MIT"
] | 2 | 2020-06-16T14:24:17.000Z | 2020-08-13T00:27:31.000Z | """
Client for simulator requests
"""
__copyright__ = "Copyright 2020, Microsoft Corp."
# pyright: strict
from random import uniform
import time
from typing import Union
import jsons
import requests
from .exceptions import RetryTimeoutError, ServiceError
from .logger import Logger
from .simulator_protocol import (
ServiceConfig,
SimulatorEvent,
SimulatorEventRequest,
SimulatorInterface,
)
log = Logger()
_RETRYABLE_ERROR_CODES = {502, 503, 504}
_MAXIMUM_BACKOFF_SECONDS = 60
_BACKOFF_BASE_MULTIPLIER_MILLISECONDS = 50
| 38.167539 | 120 | 0.58834 |
6a5a09a1f1eb09c5b1fb6c4e179dd1021a0b354e | 47,088 | py | Python | perturbed_images_generation_multiProcess.py | gwxie/Synthesize-Distorted-Image-and-Its-Control-Points | ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7 | [
"Apache-2.0"
] | 8 | 2022-03-27T18:37:57.000Z | 2022-03-30T09:17:26.000Z | perturbed_images_generation_multiProcess.py | gwxie/Synthesize-Distorted-Image-and-Its-Control-Points | ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7 | [
"Apache-2.0"
] | null | null | null | perturbed_images_generation_multiProcess.py | gwxie/Synthesize-Distorted-Image-and-Its-Control-Points | ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7 | [
"Apache-2.0"
] | 1 | 2022-03-31T02:22:58.000Z | 2022-03-31T02:22:58.000Z | '''
GuoWang xie
set up :2020-1-9
intergrate img and label into one file
-- fiducial1024_v1
'''
import argparse
import sys, os
import pickle
import random
import collections
import json
import numpy as np
import scipy.io as io
import scipy.misc as m
import matplotlib.pyplot as plt
import glob
import math
import time
import threading
import multiprocessing as mp
from multiprocessing import Pool
import re
import cv2
# sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN
import utils
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--path',
default='./scan/new/', type=str,
help='the path of origin img.')
parser.add_argument('--bg_path',
default='./background/', type=str,
help='the path of bg img.')
parser.add_argument('--output_path',
default='./output/', type=str,
help='the path of origin img.')
# parser.set_defaults(output_path='test')
parser.add_argument('--count_from', '-p', default=0, type=int,
metavar='N', help='print frequency (default: 10)') # print frequency
parser.add_argument('--repeat_T', default=0, type=int)
parser.add_argument('--sys_num', default=6, type=int)
args = parser.parse_args()
xgw(args)
| 53.692132 | 380 | 0.720417 |
6a5a90584312df812f9d84d198fd00ed22ebcb67 | 3,042 | py | Python | tweet_evaluator.py | tw-ddis/Gnip-Tweet-Evaluation | c5c847698bd6deb891870e5cf2514dfe78caa1c2 | [
"MIT"
] | 3 | 2019-11-14T11:46:27.000Z | 2021-01-16T06:04:46.000Z | tweet_evaluator.py | pen-corsica/Gnip-Tweet-Evaluation | c5c847698bd6deb891870e5cf2514dfe78caa1c2 | [
"MIT"
] | 1 | 2017-09-19T22:59:03.000Z | 2017-09-19T23:06:12.000Z | tweet_evaluator.py | pen-corsica/Gnip-Tweet-Evaluation | c5c847698bd6deb891870e5cf2514dfe78caa1c2 | [
"MIT"
] | 4 | 2016-06-13T16:34:32.000Z | 2017-08-01T20:20:56.000Z | #!/usr/bin/env python
import argparse
import logging
try:
import ujson as json
except ImportError:
import json
import sys
import datetime
import os
import importlib
from gnip_tweet_evaluation import analysis,output
"""
Perform audience and/or conversation analysis on a set of Tweets.
"""
logger = logging.getLogger('analysis')
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-n","--identifier",dest="unique_identifier", default='0',type=str,
help="a unique name to identify the conversation/audience; default is '%(default)s'")
parser.add_argument("-c","--do-conversation-analysis",dest="do_conversation_analysis",action="store_true",default=False,
help="do conversation analysis on Tweets")
parser.add_argument("-a","--do-audience-analysis",dest="do_audience_analysis",action="store_true",default=False,
help="do audience analysis on users")
parser.add_argument("-i","--input-file-name",dest="input_file_name",default=None,
help="file containing Tweet data; take input from stdin if not present")
parser.add_argument('-o','--output-dir',dest='output_directory',default=os.environ['HOME'] + '/tweet_evaluation/',
help='directory for output files; default is %(default)s')
parser.add_argument('-b','--baseline-input-file',dest='baseline_input_name',default=None,
help='Tweets against which to run a relative analysis')
args = parser.parse_args()
# get the time right now, to use in output naming
time_now = datetime.datetime.now()
output_directory = '{0}/{1:04d}/{2:02d}/{3:02d}/'.format(args.output_directory.rstrip('/')
,time_now.year
,time_now.month
,time_now.day
)
# get the empty results object, which defines the measurements to be run
results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis)
baseline_results = None
if args.baseline_input_name is not None:
baseline_results = analysis.setup_analysis(do_conversation = args.do_conversation_analysis, do_audience = args.do_audience_analysis)
# manage input sources, file opening, and deserialization
if args.input_file_name is not None:
tweet_generator = analysis.deserialize_tweets(open(args.input_file_name))
else:
tweet_generator = analysis.deserialize_tweets(sys.stdin)
# run analysis
analysis.analyze_tweets(tweet_generator, results)
# run baseline analysis, if requests
if baseline_results is not None:
baseline_tweet_generator = analysis.deserialize_tweets(open(args.baseline_input_name))
analysis.analyze_tweets(baseline_tweet_generator, baseline_results)
results = analysis.compare_results(results,baseline_results)
# dump the output
output.dump_results(results, output_directory, args.unique_identifier)
| 42.25 | 140 | 0.72288 |
6a5b61c287644aa1eac5b1af996dc433d21c0841 | 2,621 | py | Python | app.py | admiral-aokiji/whatsapp-bot | 5a0b0d4afddc679cda3670771934cb472629587a | [
"MIT"
] | null | null | null | app.py | admiral-aokiji/whatsapp-bot | 5a0b0d4afddc679cda3670771934cb472629587a | [
"MIT"
] | null | null | null | app.py | admiral-aokiji/whatsapp-bot | 5a0b0d4afddc679cda3670771934cb472629587a | [
"MIT"
] | null | null | null | from flask import Flask, request
import os
from twilio.twiml.messaging_response import MessagingResponse
from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.binary_location = os.environ.get("GOOGLE_CHROME_BIN")
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-dev-shm-usage")
chrome_options.add_argument("--no-sandbox")
driver = webdriver.Chrome(executable_path=os.environ.get("CHROMEDRIVER_PATH"), chrome_options=chrome_options)
app = Flask(__name__)
import utils
if __name__ == "__main__":
app.run(host="localhost", port=5000, debug=True)
| 39.119403 | 217 | 0.645174 |
6a5b876bee110f96f947af456cbf93cb78d5e1bc | 94 | py | Python | nflfastpy/errors.py | hchaozhe/nflfastpy | 11e4894d7fee4ff8baac2c08b000a39308b41143 | [
"MIT"
] | 47 | 2020-10-24T10:10:51.000Z | 2022-03-07T19:48:05.000Z | nflfastpy/errors.py | jbf302/nflfastpy | c1e2365966e0f0f8efeb651be804d84caba57807 | [
"MIT"
] | 3 | 2021-05-03T11:58:00.000Z | 2021-11-14T16:17:30.000Z | nflfastpy/errors.py | jbf302/nflfastpy | c1e2365966e0f0f8efeb651be804d84caba57807 | [
"MIT"
] | 7 | 2020-12-14T15:03:12.000Z | 2021-11-17T23:41:37.000Z | """
Custom exceptions for nflfastpy module
""" | 15.666667 | 38 | 0.755319 |
6a5ce615b33cd197b365d6e3673610f15fbcf59b | 12,289 | py | Python | assignment1/cs231n/classifiers/neural_net.py | zeevikal/CS231n-spring2018 | 50691a947b877047099e7a1fe99a3fdea4a4fcf8 | [
"MIT"
] | null | null | null | assignment1/cs231n/classifiers/neural_net.py | zeevikal/CS231n-spring2018 | 50691a947b877047099e7a1fe99a3fdea4a4fcf8 | [
"MIT"
] | 3 | 2019-12-09T06:04:00.000Z | 2019-12-09T06:05:23.000Z | assignment1/cs231n/classifiers/neural_net.py | zeevikal/CS231n-spring2018 | 50691a947b877047099e7a1fe99a3fdea4a4fcf8 | [
"MIT"
] | null | null | null | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
| 45.854478 | 85 | 0.487509 |
6a5cfd1895fbfd5a40ac1b9716a706c236f16372 | 2,309 | py | Python | dynamic_setting/tests/test_models.py | koralarts/django-dynamic-settings | 8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318 | [
"MIT"
] | 2 | 2015-02-11T05:07:19.000Z | 2015-11-24T17:49:03.000Z | dynamic_setting/tests/test_models.py | koralarts/django-dynamic-settings | 8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318 | [
"MIT"
] | 1 | 2018-03-02T13:26:08.000Z | 2018-03-02T13:26:08.000Z | dynamic_setting/tests/test_models.py | koralarts/django-dynamic-settings | 8a3c5f44ad71f6d8fb78af9e7a3f5a380dd3d318 | [
"MIT"
] | null | null | null | from django.test import TestCase
from dynamic_setting.models import Setting
| 36.078125 | 80 | 0.628411 |
6a5d7ccdf81701102bd40960b2c34a8fefe0bff7 | 3,973 | py | Python | homeassistant/components/zamg/weather.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/zamg/weather.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/zamg/weather.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Sensor for data from Austrian Zentralanstalt fr Meteorologie."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
# Reuse data and API logic from the sensor implementation
from .sensor import (
ATTRIBUTION,
CONF_STATION_ID,
ZamgData,
closest_station,
zamg_stations,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION_ID): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the ZAMG weather platform."""
name = config.get(CONF_NAME)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
station_id = config.get(CONF_STATION_ID) or closest_station(
latitude, longitude, hass.config.config_dir
)
if station_id not in zamg_stations(hass.config.config_dir):
_LOGGER.error(
"Configured ZAMG %s (%s) is not a known station",
CONF_STATION_ID,
station_id,
)
return
probe = ZamgData(station_id=station_id)
try:
probe.update()
except (ValueError, TypeError) as err:
_LOGGER.error("Received error from ZAMG: %s", err)
return
add_entities([ZamgWeather(probe, name)], True)
| 28.582734 | 87 | 0.678832 |
6a5e2a2e683b7b168a4a8789ce91b511ae5da26d | 19,403 | py | Python | rasa/model.py | martasls/rasa | 6e535a847f6be0c05e7b89208f16a53d2c478629 | [
"Apache-2.0"
] | null | null | null | rasa/model.py | martasls/rasa | 6e535a847f6be0c05e7b89208f16a53d2c478629 | [
"Apache-2.0"
] | null | null | null | rasa/model.py | martasls/rasa | 6e535a847f6be0c05e7b89208f16a53d2c478629 | [
"Apache-2.0"
] | null | null | null | import copy
import glob
import hashlib
import logging
import os
import shutil
from subprocess import CalledProcessError, DEVNULL, check_output # skipcq:BAN-B404
import tempfile
import typing
from pathlib import Path
from typing import Any, Text, Tuple, Union, Optional, List, Dict, NamedTuple
from packaging import version
from rasa.constants import MINIMUM_COMPATIBLE_VERSION
import rasa.shared.utils.io
import rasa.utils.io
from rasa.cli.utils import create_output_path
from rasa.shared.utils.cli import print_success
from rasa.shared.constants import (
CONFIG_KEYS_CORE,
CONFIG_KEYS_NLU,
CONFIG_KEYS,
DEFAULT_DOMAIN_PATH,
DEFAULT_MODELS_PATH,
DEFAULT_CORE_SUBDIRECTORY_NAME,
DEFAULT_NLU_SUBDIRECTORY_NAME,
)
from rasa.exceptions import ModelNotFound
from rasa.utils.common import TempDirectoryPath
if typing.TYPE_CHECKING:
from rasa.shared.importers.importer import TrainingDataImporter
logger = logging.getLogger(__name__)
# Type alias for the fingerprint
Fingerprint = Dict[Text, Union[Text, List[Text], int, float]]
FINGERPRINT_FILE_PATH = "fingerprint.json"
FINGERPRINT_CONFIG_KEY = "config"
FINGERPRINT_CONFIG_CORE_KEY = "core-config"
FINGERPRINT_CONFIG_NLU_KEY = "nlu-config"
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY = "config-without-epochs"
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY = "domain"
FINGERPRINT_NLG_KEY = "nlg"
FINGERPRINT_RASA_VERSION_KEY = "version"
FINGERPRINT_STORIES_KEY = "stories"
FINGERPRINT_NLU_DATA_KEY = "messages"
FINGERPRINT_NLU_LABELS_KEY = "nlu_labels"
FINGERPRINT_PROJECT = "project"
FINGERPRINT_TRAINED_AT_KEY = "trained_at"
SECTION_CORE = Section(
name="Core model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_CORE_KEY,
FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY,
FINGERPRINT_STORIES_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLU = Section(
name="NLU model",
relevant_keys=[
FINGERPRINT_CONFIG_KEY,
FINGERPRINT_CONFIG_NLU_KEY,
FINGERPRINT_NLU_DATA_KEY,
FINGERPRINT_RASA_VERSION_KEY,
],
)
SECTION_NLG = Section(name="NLG responses", relevant_keys=[FINGERPRINT_NLG_KEY])
def get_model(model_path: Text = DEFAULT_MODELS_PATH) -> TempDirectoryPath:
"""Get a model and unpack it. Raises a `ModelNotFound` exception if
no model could be found at the provided path.
Args:
model_path: Path to the zipped model. If it's a directory, the latest
trained model is returned.
Returns:
Path to the unpacked model.
"""
if not model_path:
raise ModelNotFound("No path specified.")
elif not os.path.exists(model_path):
raise ModelNotFound(f"No file or directory at '{model_path}'.")
if os.path.isdir(model_path):
model_path = get_latest_model(model_path)
if not model_path:
raise ModelNotFound(
f"Could not find any Rasa model files in '{model_path}'."
)
elif not model_path.endswith(".tar.gz"):
raise ModelNotFound(f"Path '{model_path}' does not point to a Rasa model file.")
try:
model_relative_path = os.path.relpath(model_path)
except ValueError:
model_relative_path = model_path
logger.info(f"Loading model {model_relative_path}...")
return unpack_model(model_path)
def get_latest_model(model_path: Text = DEFAULT_MODELS_PATH) -> Optional[Text]:
"""Get the latest model from a path.
Args:
model_path: Path to a directory containing zipped models.
Returns:
Path to latest model in the given directory.
"""
if not os.path.exists(model_path) or os.path.isfile(model_path):
model_path = os.path.dirname(model_path)
list_of_files = glob.glob(os.path.join(model_path, "*.tar.gz"))
if len(list_of_files) == 0:
return None
return max(list_of_files, key=os.path.getctime)
def unpack_model(
model_file: Text, working_directory: Optional[Union[Path, Text]] = None
) -> TempDirectoryPath:
"""Unpack a zipped Rasa model.
Args:
model_file: Path to zipped model.
working_directory: Location where the model should be unpacked to.
If `None` a temporary directory will be created.
Returns:
Path to unpacked Rasa model.
"""
import tarfile
if working_directory is None:
working_directory = tempfile.mkdtemp()
# All files are in a subdirectory.
try:
with tarfile.open(model_file, mode="r:gz") as tar:
tar.extractall(working_directory)
logger.debug(f"Extracted model to '{working_directory}'.")
except Exception as e:
logger.error(f"Failed to extract model at {model_file}. Error: {e}")
raise
return TempDirectoryPath(working_directory)
def get_model_subdirectories(
unpacked_model_path: Text,
) -> Tuple[Optional[Text], Optional[Text]]:
"""Return paths for Core and NLU model directories, if they exist.
If neither directories exist, a `ModelNotFound` exception is raised.
Args:
unpacked_model_path: Path to unpacked Rasa model.
Returns:
Tuple (path to Core subdirectory if it exists or `None` otherwise,
path to NLU subdirectory if it exists or `None` otherwise).
"""
core_path = os.path.join(unpacked_model_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
nlu_path = os.path.join(unpacked_model_path, DEFAULT_NLU_SUBDIRECTORY_NAME)
if not os.path.isdir(core_path):
core_path = None
if not os.path.isdir(nlu_path):
nlu_path = None
if not core_path and not nlu_path:
raise ModelNotFound(
"No NLU or Core data for unpacked model at: '{}'.".format(
unpacked_model_path
)
)
return core_path, nlu_path
def create_package_rasa(
training_directory: Text,
output_filename: Text,
fingerprint: Optional[Fingerprint] = None,
) -> Text:
"""Create a zipped Rasa model from trained model files.
Args:
training_directory: Path to the directory which contains the trained
model files.
output_filename: Name of the zipped model file to be created.
fingerprint: A unique fingerprint to identify the model version.
Returns:
Path to zipped model.
"""
import tarfile
if fingerprint:
persist_fingerprint(training_directory, fingerprint)
output_directory = os.path.dirname(output_filename)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
with tarfile.open(output_filename, "w:gz") as tar:
for elem in os.scandir(training_directory):
tar.add(elem.path, arcname=elem.name)
shutil.rmtree(training_directory)
return output_filename
def project_fingerprint() -> Optional[Text]:
"""Create a hash for the project in the current working directory.
Returns:
project hash
"""
try:
remote = check_output( # skipcq:BAN-B607,BAN-B603
["git", "remote", "get-url", "origin"], stderr=DEVNULL
)
return hashlib.sha256(remote).hexdigest()
except (CalledProcessError, OSError):
return None
def fingerprint_from_path(model_path: Text) -> Fingerprint:
"""Load a persisted fingerprint.
Args:
model_path: Path to directory containing the fingerprint.
Returns:
The fingerprint or an empty dict if no fingerprint was found.
"""
if not model_path or not os.path.exists(model_path):
return {}
fingerprint_path = os.path.join(model_path, FINGERPRINT_FILE_PATH)
if os.path.isfile(fingerprint_path):
return rasa.shared.utils.io.read_json_file(fingerprint_path)
else:
return {}
def persist_fingerprint(output_path: Text, fingerprint: Fingerprint):
"""Persist a model fingerprint.
Args:
output_path: Directory in which the fingerprint should be saved.
fingerprint: The fingerprint to be persisted.
"""
path = os.path.join(output_path, FINGERPRINT_FILE_PATH)
rasa.shared.utils.io.dump_obj_as_json_to_file(path, fingerprint)
def did_section_fingerprint_change(
fingerprint1: Fingerprint, fingerprint2: Fingerprint, section: Section
) -> bool:
"""Check whether the fingerprint of a section has changed."""
for k in section.relevant_keys:
if fingerprint1.get(k) != fingerprint2.get(k):
logger.info(f"Data ({k}) for {section.name} section changed.")
return True
return False
def move_model(source: Text, target: Text) -> bool:
"""Move two model directories.
Args:
source: The original folder which should be merged in another.
target: The destination folder where it should be moved to.
Returns:
`True` if the merge was successful, else `False`.
"""
try:
shutil.move(source, target)
return True
except Exception as e:
logging.debug(f"Could not merge model: {e}")
return False
def should_retrain(
new_fingerprint: Fingerprint,
old_model: Text,
train_path: Text,
has_e2e_examples: bool = False,
force_training: bool = False,
) -> FingerprintComparisonResult:
"""Check which components of a model should be retrained.
Args:
new_fingerprint: The fingerprint of the new model to be trained.
old_model: Path to the old zipped model file.
train_path: Path to the directory in which the new model will be trained.
has_e2e_examples: Whether the new training data contains e2e examples.
force_training: Indicates if the model needs to be retrained even if the data
has not changed.
Returns:
A FingerprintComparisonResult object indicating whether Rasa Core and/or Rasa
NLU needs to be retrained or not.
"""
fingerprint_comparison = FingerprintComparisonResult()
if old_model is None or not os.path.exists(old_model):
return fingerprint_comparison
with unpack_model(old_model) as unpacked:
last_fingerprint = fingerprint_from_path(unpacked)
old_core, old_nlu = get_model_subdirectories(unpacked)
fingerprint_comparison = FingerprintComparisonResult(
core=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_CORE
),
nlu=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLU
),
nlg=did_section_fingerprint_change(
last_fingerprint, new_fingerprint, SECTION_NLG
),
force_training=force_training,
)
# We should retrain core if nlu data changes and there are e2e stories.
if has_e2e_examples and fingerprint_comparison.should_retrain_nlu():
fingerprint_comparison.core = True
core_merge_failed = False
if not fingerprint_comparison.should_retrain_core():
target_path = os.path.join(train_path, DEFAULT_CORE_SUBDIRECTORY_NAME)
core_merge_failed = not move_model(old_core, target_path)
fingerprint_comparison.core = core_merge_failed
if not fingerprint_comparison.should_retrain_nlg() and core_merge_failed:
# If moving the Core model failed, we should also retrain NLG
fingerprint_comparison.nlg = True
if not fingerprint_comparison.should_retrain_nlu():
target_path = os.path.join(train_path, "nlu")
fingerprint_comparison.nlu = not move_model(old_nlu, target_path)
return fingerprint_comparison
def can_finetune(
last_fingerprint: Fingerprint,
new_fingerprint: Fingerprint,
core: bool = False,
nlu: bool = False,
) -> bool:
"""Checks if components of a model can be finetuned with incremental training.
Args:
last_fingerprint: The fingerprint of the old model to potentially be fine-tuned.
new_fingerprint: The fingerprint of the new model.
core: Check sections for finetuning a core model.
nlu: Check sections for finetuning an nlu model.
Returns:
`True` if the old model can be finetuned, `False` otherwise.
"""
section_keys = [
FINGERPRINT_CONFIG_WITHOUT_EPOCHS_KEY,
]
if core:
section_keys.append(FINGERPRINT_DOMAIN_WITHOUT_NLG_KEY)
if nlu:
section_keys.append(FINGERPRINT_NLU_LABELS_KEY)
fingerprint_changed = did_section_fingerprint_change(
last_fingerprint,
new_fingerprint,
Section(name="finetune", relevant_keys=section_keys),
)
old_model_above_min_version = version.parse(
last_fingerprint.get(FINGERPRINT_RASA_VERSION_KEY)
) >= version.parse(MINIMUM_COMPATIBLE_VERSION)
return old_model_above_min_version and not fingerprint_changed
def package_model(
fingerprint: Fingerprint,
output_directory: Text,
train_path: Text,
fixed_model_name: Optional[Text] = None,
model_prefix: Text = "",
) -> Text:
"""
Compress a trained model.
Args:
fingerprint: fingerprint of the model
output_directory: path to the directory in which the model should be stored
train_path: path to uncompressed model
fixed_model_name: name of the compressed model file
model_prefix: prefix of the compressed model file
Returns: path to 'tar.gz' model file
"""
output_directory = create_output_path(
output_directory, prefix=model_prefix, fixed_name=fixed_model_name
)
create_package_rasa(train_path, output_directory, fingerprint)
print_success(
"Your Rasa model is trained and saved at '{}'.".format(
os.path.abspath(output_directory)
)
)
return output_directory
def get_model_for_finetuning(
previous_model_file: Optional[Union[Path, Text]]
) -> Optional[Text]:
"""Gets validated path for model to finetune.
Args:
previous_model_file: Path to model file which should be used for finetuning or
a directory in case the latest trained model should be used.
Returns:
Path to model archive. `None` if there is no model.
"""
if Path(previous_model_file).is_dir():
logger.debug(
f"Trying to load latest model from '{previous_model_file}' for "
f"finetuning."
)
return get_latest_model(previous_model_file)
if Path(previous_model_file).is_file():
return previous_model_file
logger.debug(
"No valid model for finetuning found as directory either "
"contains no model or model file cannot be found."
)
return None
| 31.345719 | 89 | 0.687368 |
6a5e9ccfe0101a01a8c7498e619dd38d0b22d208 | 2,484 | py | Python | algorithmic_trading/backester_framework_test.py | CatalaniCD/quantitative_finance | c752516a43cd80914dcc8411aadd7b15a258d6a4 | [
"MIT"
] | 1 | 2021-08-20T19:17:10.000Z | 2021-08-20T19:17:10.000Z | algorithmic_trading/backester_framework_test.py | CatalaniCD/quantitative_finance | c752516a43cd80914dcc8411aadd7b15a258d6a4 | [
"MIT"
] | null | null | null | algorithmic_trading/backester_framework_test.py | CatalaniCD/quantitative_finance | c752516a43cd80914dcc8411aadd7b15a258d6a4 | [
"MIT"
] | 1 | 2021-10-04T07:44:02.000Z | 2021-10-04T07:44:02.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 16 11:20:01 2021
@author: q
GOAL : develop a backtester from a .py framework / library
# installation :
pip install backtesting
# Documentation
Index :
- Manuals
- Tutorials
- Example Strategies
- FAQ
- License
- API Reference Documentation
source : https://kernc.github.io/backtesting.py/doc/backtesting/
# Features
* Simple, well-documented API
* Blazing fast execution
* Built-in optimizer
* Library of composable base strategies and utilities
* Indicator-library-agnostic
* Supports any financial instrument with candlestick data
* Detailed results
* Interactive visualizations
"""
# =============================================================================
# imports and settings
# =============================================================================
# data handling
import pandas as pd
import numpy as np
# import backtesting and set options
import backtesting
# Set notebook False
backtesting.set_bokeh_output(notebook=False)
from backtesting import Backtest, Strategy
from backtesting.lib import crossover, cross
from backtesting.test import SMA, GOOG
# =============================================================================
# strategy definition
# =============================================================================
# =============================================================================
# Program Execution
# =============================================================================
if __name__ == '__main__':
""" Instantiate the Backtester """
backtester = Backtest(GOOG, PriceAboveSMA, commission=.002,
exclusive_orders=True, cash = 10000)
PLOT = True
""" Run a Single Backtest """
stats = backtester.run()
print(stats)
if PLOT: backtester.plot()
| 25.090909 | 79 | 0.515298 |
6a5edc2a9e4d4da78b37be28e1fdb8023841826f | 2,215 | py | Python | Sec_10_expr_lambdas_fun_integradas/f_generators.py | PauloAlexSilva/Python | 690913cdcfd8bde52d9ddd15e3c838e6aef27730 | [
"MIT"
] | null | null | null | Sec_10_expr_lambdas_fun_integradas/f_generators.py | PauloAlexSilva/Python | 690913cdcfd8bde52d9ddd15e3c838e6aef27730 | [
"MIT"
] | null | null | null | Sec_10_expr_lambdas_fun_integradas/f_generators.py | PauloAlexSilva/Python | 690913cdcfd8bde52d9ddd15e3c838e6aef27730 | [
"MIT"
] | null | null | null | """"
Generator Expression
Em aulas anteriores foi abordado:
- List Comprehension;
- Dictionary Comprehension;
- Set Comprehension.
No foi abordado:
- Tuple Comprehension ... porque elas se chamam Generators
nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa']
print(any8[nomes[0] == 'C' for nome in nomes])
# Poderia ter sido feito usando os Generators
nomes = ['Carlos', 'Camila', 'Carla', 'Cristiana', 'Cristina', 'Vanessa']
print(any(nome[0] == 'C' for nome in nomes))
# List Comprehension
res = [nome[0] == 'C' for nome in nomes]
print(type(res))
print(res) # [True, True, True, True, True, False]
# Generator - mais efeciente
res2 = (nome[0] == 'C' for nome in nomes)
print(type(res2))
print(res2)
# O que faz a funo de getsizeof()? -> retorna a quantidade de bytes em memria do elemento
# passado como parmetro
from sys import getsizeof
# Mostra quantos bytes a string 'Paulo' est ocupando em memria.
# Quanto maior a string mais espao ocupa.
print(getsizeof('Paulo'))
print(getsizeof('Quanto maior a string mais espao ocupa.'))
print(getsizeof(9))
print(getsizeof(91))
print(getsizeof(12345667890))
print(getsizeof(True))
from sys import getsizeof
# Gerando uma lista de nmeros com List Comprehension
list_comp = getsizeof([x * 10 for x in range(1000)])
# Gerando uma lista de nmeros com Set Comprehension
set_comp = getsizeof({x * 10 for x in range(1000)})
# Gerando uma lista de nmeros com Dictionary Comprehension
dic_comp = getsizeof({x: x * 10 for x in range(1000)})
# Gerando uma lista de nmeros com Generator
gen = getsizeof(x * 10 for x in range(1000))
print('Para fazer a mesma gastamos em memria: ')
print(f'List Comprehension: {list_comp} bytes!')
print(f'Set Comprehension: {set_comp} bytes!')
print(f'Dictionary Comprehension: {dic_comp} bytes!')
print(f'Generator Expression: {gen} bytes!')
Para fazer a mesma gastamos em memria:
List Comprehension: 8856 bytes!
Set Comprehension: 32984 bytes!
Dictionary Comprehension: 36960 bytes!
Generator Expression: 112 bytes!
"""
# Posso iterar no Generator Expression? Sim
gen = (x * 10 for x in range(1000))
print(gen)
print(type(gen))
for num in gen:
print(num)
| 24.340659 | 92 | 0.719187 |
6a5f51cf2ae3a67fb99172b7bd4214f43d0d42bc | 269 | py | Python | python/ordenacao.py | valdirsjr/learning.data | a4b72dfd27f55f2f04120644b73232bf343f71e3 | [
"MIT"
] | null | null | null | python/ordenacao.py | valdirsjr/learning.data | a4b72dfd27f55f2f04120644b73232bf343f71e3 | [
"MIT"
] | null | null | null | python/ordenacao.py | valdirsjr/learning.data | a4b72dfd27f55f2f04120644b73232bf343f71e3 | [
"MIT"
] | null | null | null | numero1 = int(input("Digite o primeiro nmero: "))
numero2 = int(input("Digite o segundo nmero: "))
numero3 = int(input("Digite o terceiro nmero: "))
if (numero1 < numero2 and numero2 < numero3):
print("crescente")
else:
print("no est em ordem crescente") | 38.428571 | 50 | 0.69145 |
6a5f7c637685db9897573cf124a2ab2c3a9ea578 | 408 | py | Python | _sources/5-extra/opg-parameters-sneeuwvlok_solution.py | kooi/ippt-od | f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354 | [
"MIT"
] | 1 | 2018-08-21T21:05:41.000Z | 2018-08-21T21:05:41.000Z | _sources/5-extra/opg-parameters-sneeuwvlok_solution.py | kooi/ippt-od | f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354 | [
"MIT"
] | null | null | null | _sources/5-extra/opg-parameters-sneeuwvlok_solution.py | kooi/ippt-od | f1ba44ccfb72e6fcdfdc392fbfbec3e37c47b354 | [
"MIT"
] | null | null | null | import turtle
tina = turtle.Turtle()
tina.shape("turtle")
tina.speed(10)
sneeuwvlok(30, 6)
| 21.473684 | 72 | 0.644608 |
6a5ff44d20ced0eb4ad46edf90219db489f08973 | 5,153 | py | Python | nikola/plugins/task_render_listings.py | servalproject/nikola | 4d78504d93597894f3da4a434dfafdec907601a7 | [
"MIT"
] | 1 | 2015-12-14T21:38:33.000Z | 2015-12-14T21:38:33.000Z | nikola/plugins/task_render_listings.py | servalproject/nikola | 4d78504d93597894f3da4a434dfafdec907601a7 | [
"MIT"
] | null | null | null | nikola/plugins/task_render_listings.py | servalproject/nikola | 4d78504d93597894f3da4a434dfafdec907601a7 | [
"MIT"
] | null | null | null | # Copyright (c) 2012 Roberto Alsina y otros.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import unicode_literals, print_function
import os
from pygments import highlight
from pygments.lexers import get_lexer_for_filename, TextLexer
from pygments.formatters import HtmlFormatter
from nikola.plugin_categories import Task
from nikola import utils
| 39.335878 | 81 | 0.523578 |
6a60999063f76386f01b79b85ecc655ec0929c57 | 25,232 | py | Python | csld/phonon/head.py | jsyony37/csld | b0e6d5845d807174f24ca7b591bc164c608c99c8 | [
"MIT"
] | null | null | null | csld/phonon/head.py | jsyony37/csld | b0e6d5845d807174f24ca7b591bc164c608c99c8 | [
"MIT"
] | null | null | null | csld/phonon/head.py | jsyony37/csld | b0e6d5845d807174f24ca7b591bc164c608c99c8 | [
"MIT"
] | null | null | null | # to include all module here in order to cite
from numpy import *
from numpy.linalg import *
import string
import os
import scipy
import scipy.sparse
#import rwposcar
#import anaxdat
import math
#define touch file
if False:
mkdir("xixi/")
#define rm file
#define check file(1 exist; else0)
#define check the file status (print the status)
#define readallline function
#define write1dmat
#define one number to file
#define write2dmat
#define write2dMTX
#test
if False:
Amat=[[0,1],[2,0],[0,0],[0,16]]
print(Amat)
write2dMTX(Amat, "test.mtx")
print(read2dMTX("test.mtx"))
#define read1dmat
#read float
if False:
haha=[1,2,3,4,5]
write1dmat(haha, "haha")
xixi=read1dmat("haha")
print(xixi)
#define read2dmat (this is a relatively fast way: iter or chunck read)
#test
#mat=read2dmat("C-isoo.mat")
#print len(mat)
#print len(mat[0])
#define writeorb(orb)
#test
if False:
SCinfo={'invSCmat': [[-0.25, 0.25, 0.25], [0.25, -0.25, 0.25], [0.25, 0.25, -0.25]], 'SCmat': [[0.0, 2.0, 2.0], [2.0, 0.0, 2.0], [2.0, 2.0, 0.0]], 'SCref': [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0], [1, 1, 1], [1, 1, 2], [1, 2, 1], [1, 2, 2], [2, 1, 1], [2, 1, 2], [2, 2, 1], [2, 2, 2], [2, 2, 3], [2, 3, 2], [3, 2, 2], [3, 3, 3]], 'SCpos': [[0.75, 0.25, 0.5], [0.25, 0.75, 0.5], [0.5, 0.25, 0.75], [0.5, 0.75, 0.25], [0.25, 0.5, 0.75], [0.75, 0.5, 0.25], [0.785, 0.785, 0.0], [0.215, 0.215, 0.0], [0.0, 0.215, 0.215], [0.0, 0.785, 0.785], [0.785, 0.0, 0.785], [0.215, 0.0, 0.215], [0.5239, 0.0, 0.7543], [0.7543, 0.0, 0.5239], [0.4761, 0.2304, 0.4761], [0.2457, 0.7696, 0.2457], [0.5239, 0.7543, 0.0], [0.7543, 0.5239, 0.0], [0.2457, 0.2457, 0.7696], [0.4761, 0.4761, 0.2304], [0.7696, 0.2457, 0.2457], [0.2304, 0.4761, 0.4761], [0.0, 0.5239, 0.7543], [0.0, 0.7543, 0.5239], [0.0, 0.0, 0.0], [0.4636, 0.0, 0.0], [0.0, 0.0, 0.4636], [0.5364, 0.5364, 0.5364], [0.0, 0.4636, 0.0], [0.75, 1.25, 1.5], [0.25, 1.75, 1.5], [0.5, 1.25, 1.75], [0.5, 1.75, 1.25], [0.25, 1.5, 1.75], [0.75, 1.5, 1.25], [0.785, 1.785, 1.0], [0.215, 1.215, 1.0], [0.0, 1.215, 1.215], [0.0, 1.785, 1.785], [0.785, 1.0, 1.785], [0.215, 1.0, 1.215], [0.5239, 1.0, 1.7543], [0.7543, 1.0, 1.5239], [0.4761, 1.2304, 1.4761], [0.2457, 1.7696, 1.2457], [0.5239, 1.7543, 1.0], [0.7543, 1.5239, 1.0], [0.2457, 1.2457, 1.7696], [0.4761, 1.4761, 1.2304], [0.7696, 1.2457, 1.2457], [0.2304, 1.4761, 1.4761], [0.0, 1.5239, 1.7543], [0.0, 1.7543, 1.5239], [0.0, 1.0, 1.0], [0.4636, 1.0, 1.0], [0.0, 1.0, 1.4636], [0.5364, 1.5364, 1.5364], [0.0, 1.4636, 1.0], [1.75, 0.25, 1.5], [1.25, 0.75, 1.5], [1.5, 0.25, 1.75], [1.5, 0.75, 1.25], [1.25, 0.5, 1.75], [1.75, 0.5, 1.25], [1.785, 0.785, 1.0], [1.215, 0.215, 1.0], [1.0, 0.215, 1.215], [1.0, 0.785, 1.785], [1.785, 0.0, 1.785], [1.215, 0.0, 1.215], [1.5239, 0.0, 1.7543], [1.7543, 0.0, 1.5239], [1.4761, 0.2304, 1.4761], [1.2457, 0.7696, 1.2457], [1.5239, 0.7543, 1.0], [1.7543, 0.5239, 1.0], [1.2457, 0.2457, 1.7696], [1.4761, 0.4761, 1.2304], [1.7696, 0.2457, 1.2457], [1.2304, 0.4761, 1.4761], [1.0, 0.5239, 1.7543], [1.0, 0.7543, 1.5239], [1.0, 0.0, 1.0], [1.4636, 0.0, 1.0], [1.0, 0.0, 1.4636], [1.5364, 0.5364, 1.5364], [1.0, 0.4636, 1.0], [1.75, 1.25, 0.5], [1.25, 1.75, 0.5], [1.5, 1.25, 0.75], [1.5, 1.75, 0.25], [1.25, 1.5, 0.75], [1.75, 1.5, 0.25], [1.785, 1.785, 0.0], [1.215, 1.215, 0.0], [1.0, 1.215, 0.215], [1.0, 1.785, 0.785], [1.785, 1.0, 0.785], [1.215, 1.0, 0.215], [1.5239, 1.0, 0.7543], [1.7543, 1.0, 0.5239], [1.4761, 1.2304, 0.4761], [1.2457, 1.7696, 0.2457], [1.5239, 1.7543, 0.0], [1.7543, 1.5239, 0.0], [1.2457, 1.2457, 0.7696], [1.4761, 1.4761, 0.2304], [1.7696, 1.2457, 0.2457], [1.2304, 1.4761, 0.4761], [1.0, 1.5239, 0.7543], [1.0, 1.7543, 0.5239], [1.0, 1.0, 0.0], [1.4636, 1.0, 0.0], [1.0, 1.0, 0.4636], [1.5364, 1.5364, 0.5364], [1.0, 1.4636, 0.0], [1.75, 1.25, 1.5], [1.25, 1.75, 1.5], [1.5, 1.25, 1.75], [1.5, 1.75, 1.25], [1.25, 1.5, 1.75], [1.75, 1.5, 1.25], [1.785, 1.785, 1.0], [1.215, 1.215, 1.0], [1.0, 1.215, 1.215], [1.0, 1.785, 1.785], [1.785, 1.0, 1.785], [1.215, 1.0, 1.215], [1.5239, 1.0, 1.7543], [1.7543, 1.0, 1.5239], [1.4761, 1.2304, 1.4761], [1.2457, 1.7696, 1.2457], [1.5239, 1.7543, 1.0], [1.7543, 1.5239, 1.0], [1.2457, 1.2457, 1.7696], [1.4761, 1.4761, 1.2304], [1.7696, 1.2457, 1.2457], [1.2304, 1.4761, 1.4761], [1.0, 1.5239, 1.7543], [1.0, 1.7543, 1.5239], [1.0, 1.0, 1.0], [1.4636, 1.0, 1.0], [1.0, 1.0, 1.4636], [1.5364, 1.5364, 1.5364], [1.0, 1.4636, 1.0], [1.75, 1.25, 2.5], [1.25, 1.75, 2.5], [1.5, 1.25, 2.75], [1.5, 1.75, 2.25], [1.25, 1.5, 2.75], [1.75, 1.5, 2.25], [1.785, 1.785, 2.0], [1.215, 1.215, 2.0], [1.0, 1.215, 2.215], [1.0, 1.785, 2.785], [1.785, 1.0, 2.785], [1.215, 1.0, 2.215], [1.5239, 1.0, 2.7543], [1.7543, 1.0, 2.5239], [1.4761, 1.2304, 2.4761], [1.2457, 1.7696, 2.2457], [1.5239, 1.7543, 2.0], [1.7543, 1.5239, 2.0], [1.2457, 1.2457, 2.7696], [1.4761, 1.4761, 2.2304], [1.7696, 1.2457, 2.2457], [1.2304, 1.4761, 2.4761], [1.0, 1.5239, 2.7543], [1.0, 1.7543, 2.5239], [1.0, 1.0, 2.0], [1.4636, 1.0, 2.0], [1.0, 1.0, 2.4636], [1.5364, 1.5364, 2.5364], [1.0, 1.4636, 2.0], [1.75, 2.25, 1.5], [1.25, 2.75, 1.5], [1.5, 2.25, 1.75], [1.5, 2.75, 1.25], [1.25, 2.5, 1.75], [1.75, 2.5, 1.25], [1.785, 2.785, 1.0], [1.215, 2.215, 1.0], [1.0, 2.215, 1.215], [1.0, 2.785, 1.785], [1.785, 2.0, 1.785], [1.215, 2.0, 1.215], [1.5239, 2.0, 1.7543], [1.7543, 2.0, 1.5239], [1.4761, 2.2304, 1.4761], [1.2457, 2.7696, 1.2457], [1.5239, 2.7543, 1.0], [1.7543, 2.5239, 1.0], [1.2457, 2.2457, 1.7696], [1.4761, 2.4761, 1.2304], [1.7696, 2.2457, 1.2457], [1.2304, 2.4761, 1.4761], [1.0, 2.5239, 1.7543], [1.0, 2.7543, 1.5239], [1.0, 2.0, 1.0], [1.4636, 2.0, 1.0], [1.0, 2.0, 1.4636], [1.5364, 2.5364, 1.5364], [1.0, 2.4636, 1.0], [1.75, 2.25, 2.5], [1.25, 2.75, 2.5], [1.5, 2.25, 2.75], [1.5, 2.75, 2.25], [1.25, 2.5, 2.75], [1.75, 2.5, 2.25], [1.785, 2.785, 2.0], [1.215, 2.215, 2.0], [1.0, 2.215, 2.215], [1.0, 2.785, 2.785], [1.785, 2.0, 2.785], [1.215, 2.0, 2.215], [1.5239, 2.0, 2.7543], [1.7543, 2.0, 2.5239], [1.4761, 2.2304, 2.4761], [1.2457, 2.7696, 2.2457], [1.5239, 2.7543, 2.0], [1.7543, 2.5239, 2.0], [1.2457, 2.2457, 2.7696], [1.4761, 2.4761, 2.2304], [1.7696, 2.2457, 2.2457], [1.2304, 2.4761, 2.4761], [1.0, 2.5239, 2.7543], [1.0, 2.7543, 2.5239], [1.0, 2.0, 2.0], [1.4636, 2.0, 2.0], [1.0, 2.0, 2.4636], [1.5364, 2.5364, 2.5364], [1.0, 2.4636, 2.0], [2.75, 1.25, 1.5], [2.25, 1.75, 1.5], [2.5, 1.25, 1.75], [2.5, 1.75, 1.25], [2.25, 1.5, 1.75], [2.75, 1.5, 1.25], [2.785, 1.785, 1.0], [2.215, 1.215, 1.0], [2.0, 1.215, 1.215], [2.0, 1.785, 1.785], [2.785, 1.0, 1.785], [2.215, 1.0, 1.215], [2.5239, 1.0, 1.7543], [2.7543, 1.0, 1.5239], [2.4761, 1.2304, 1.4761], [2.2457, 1.7696, 1.2457], [2.5239, 1.7543, 1.0], [2.7543, 1.5239, 1.0], [2.2457, 1.2457, 1.7696], [2.4761, 1.4761, 1.2304], [2.7696, 1.2457, 1.2457], [2.2304, 1.4761, 1.4761], [2.0, 1.5239, 1.7543], [2.0, 1.7543, 1.5239], [2.0, 1.0, 1.0], [2.4636, 1.0, 1.0], [2.0, 1.0, 1.4636], [2.5364, 1.5364, 1.5364], [2.0, 1.4636, 1.0], [2.75, 1.25, 2.5], [2.25, 1.75, 2.5], [2.5, 1.25, 2.75], [2.5, 1.75, 2.25], [2.25, 1.5, 2.75], [2.75, 1.5, 2.25], [2.785, 1.785, 2.0], [2.215, 1.215, 2.0], [2.0, 1.215, 2.215], [2.0, 1.785, 2.785], [2.785, 1.0, 2.785], [2.215, 1.0, 2.215], [2.5239, 1.0, 2.7543], [2.7543, 1.0, 2.5239], [2.4761, 1.2304, 2.4761], [2.2457, 1.7696, 2.2457], [2.5239, 1.7543, 2.0], [2.7543, 1.5239, 2.0], [2.2457, 1.2457, 2.7696], [2.4761, 1.4761, 2.2304], [2.7696, 1.2457, 2.2457], [2.2304, 1.4761, 2.4761], [2.0, 1.5239, 2.7543], [2.0, 1.7543, 2.5239], [2.0, 1.0, 2.0], [2.4636, 1.0, 2.0], [2.0, 1.0, 2.4636], [2.5364, 1.5364, 2.5364], [2.0, 1.4636, 2.0], [2.75, 2.25, 1.5], [2.25, 2.75, 1.5], [2.5, 2.25, 1.75], [2.5, 2.75, 1.25], [2.25, 2.5, 1.75], [2.75, 2.5, 1.25], [2.785, 2.785, 1.0], [2.215, 2.215, 1.0], [2.0, 2.215, 1.215], [2.0, 2.785, 1.785], [2.785, 2.0, 1.785], [2.215, 2.0, 1.215], [2.5239, 2.0, 1.7543], [2.7543, 2.0, 1.5239], [2.4761, 2.2304, 1.4761], [2.2457, 2.7696, 1.2457], [2.5239, 2.7543, 1.0], [2.7543, 2.5239, 1.0], [2.2457, 2.2457, 1.7696], [2.4761, 2.4761, 1.2304], [2.7696, 2.2457, 1.2457], [2.2304, 2.4761, 1.4761], [2.0, 2.5239, 1.7543], [2.0, 2.7543, 1.5239], [2.0, 2.0, 1.0], [2.4636, 2.0, 1.0], [2.0, 2.0, 1.4636], [2.5364, 2.5364, 1.5364], [2.0, 2.4636, 1.0], [2.75, 2.25, 2.5], [2.25, 2.75, 2.5], [2.5, 2.25, 2.75], [2.5, 2.75, 2.25], [2.25, 2.5, 2.75], [2.75, 2.5, 2.25], [2.785, 2.785, 2.0], [2.215, 2.215, 2.0], [2.0, 2.215, 2.215], [2.0, 2.785, 2.785], [2.785, 2.0, 2.785], [2.215, 2.0, 2.215], [2.5239, 2.0, 2.7543], [2.7543, 2.0, 2.5239], [2.4761, 2.2304, 2.4761], [2.2457, 2.7696, 2.2457], [2.5239, 2.7543, 2.0], [2.7543, 2.5239, 2.0], [2.2457, 2.2457, 2.7696], [2.4761, 2.4761, 2.2304], [2.7696, 2.2457, 2.2457], [2.2304, 2.4761, 2.4761], [2.0, 2.5239, 2.7543], [2.0, 2.7543, 2.5239], [2.0, 2.0, 2.0], [2.4636, 2.0, 2.0], [2.0, 2.0, 2.4636], [2.5364, 2.5364, 2.5364], [2.0, 2.4636, 2.0], [2.75, 2.25, 3.5], [2.25, 2.75, 3.5], [2.5, 2.25, 3.75], [2.5, 2.75, 3.25], [2.25, 2.5, 3.75], [2.75, 2.5, 3.25], [2.785, 2.785, 3.0], [2.215, 2.215, 3.0], [2.0, 2.215, 3.215], [2.0, 2.785, 3.785], [2.785, 2.0, 3.785], [2.215, 2.0, 3.215], [2.5239, 2.0, 3.7543], [2.7543, 2.0, 3.5239], [2.4761, 2.2304, 3.4761], [2.2457, 2.7696, 3.2457], [2.5239, 2.7543, 3.0], [2.7543, 2.5239, 3.0], [2.2457, 2.2457, 3.7696], [2.4761, 2.4761, 3.2304], [2.7696, 2.2457, 3.2457], [2.2304, 2.4761, 3.4761], [2.0, 2.5239, 3.7543], [2.0, 2.7543, 3.5239], [2.0, 2.0, 3.0], [2.4636, 2.0, 3.0], [2.0, 2.0, 3.4636], [2.5364, 2.5364, 3.5364], [2.0, 2.4636, 3.0], [2.75, 3.25, 2.5], [2.25, 3.75, 2.5], [2.5, 3.25, 2.75], [2.5, 3.75, 2.25], [2.25, 3.5, 2.75], [2.75, 3.5, 2.25], [2.785, 3.785, 2.0], [2.215, 3.215, 2.0], [2.0, 3.215, 2.215], [2.0, 3.785, 2.785], [2.785, 3.0, 2.785], [2.215, 3.0, 2.215], [2.5239, 3.0, 2.7543], [2.7543, 3.0, 2.5239], [2.4761, 3.2304, 2.4761], [2.2457, 3.7696, 2.2457], [2.5239, 3.7543, 2.0], [2.7543, 3.5239, 2.0], [2.2457, 3.2457, 2.7696], [2.4761, 3.4761, 2.2304], [2.7696, 3.2457, 2.2457], [2.2304, 3.4761, 2.4761], [2.0, 3.5239, 2.7543], [2.0, 3.7543, 2.5239], [2.0, 3.0, 2.0], [2.4636, 3.0, 2.0], [2.0, 3.0, 2.4636], [2.5364, 3.5364, 2.5364], [2.0, 3.4636, 2.0], [3.75, 2.25, 2.5], [3.25, 2.75, 2.5], [3.5, 2.25, 2.75], [3.5, 2.75, 2.25], [3.25, 2.5, 2.75], [3.75, 2.5, 2.25], [3.785, 2.785, 2.0], [3.215, 2.215, 2.0], [3.0, 2.215, 2.215], [3.0, 2.785, 2.785], [3.785, 2.0, 2.785], [3.215, 2.0, 2.215], [3.5239, 2.0, 2.7543], [3.7543, 2.0, 2.5239], [3.4761, 2.2304, 2.4761], [3.2457, 2.7696, 2.2457], [3.5239, 2.7543, 2.0], [3.7543, 2.5239, 2.0], [3.2457, 2.2457, 2.7696], [3.4761, 2.4761, 2.2304], [3.7696, 2.2457, 2.2457], [3.2304, 2.4761, 2.4761], [3.0, 2.5239, 2.7543], [3.0, 2.7543, 2.5239], [3.0, 2.0, 2.0], [3.4636, 2.0, 2.0], [3.0, 2.0, 2.4636], [3.5364, 2.5364, 2.5364], [3.0, 2.4636, 2.0], [3.75, 3.25, 3.5], [3.25, 3.75, 3.5], [3.5, 3.25, 3.75], [3.5, 3.75, 3.25], [3.25, 3.5, 3.75], [3.75, 3.5, 3.25], [3.785, 3.785, 3.0], [3.215, 3.215, 3.0], [3.0, 3.215, 3.215], [3.0, 3.785, 3.785], [3.785, 3.0, 3.785], [3.215, 3.0, 3.215], [3.5239, 3.0, 3.7543], [3.7543, 3.0, 3.5239], [3.4761, 3.2304, 3.4761], [3.2457, 3.7696, 3.2457], [3.5239, 3.7543, 3.0], [3.7543, 3.5239, 3.0], [3.2457, 3.2457, 3.7696], [3.4761, 3.4761, 3.2304], [3.7696, 3.2457, 3.2457], [3.2304, 3.4761, 3.4761], [3.0, 3.5239, 3.7543], [3.0, 3.7543, 3.5239], [3.0, 3.0, 3.0], [3.4636, 3.0, 3.0], [3.0, 3.0, 3.4636], [3.5364, 3.5364, 3.5364], [3.0, 3.4636, 3.0]], 'SC': [[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]], 'order': [81, 33, 1, 65, 49, 17, 137, 129, 97, 105, 121, 113, 274, 285, 257, 363, 219, 213, 298, 193, 333, 225, 250, 243, 385, 461, 442, 401, 451, 85, 37, 5, 69, 53, 21, 141, 133, 101, 109, 125, 117, 278, 281, 261, 367, 223, 209, 302, 197, 329, 229, 254, 247, 389, 457, 446, 405, 455, 83, 35, 3, 67, 51, 19, 139, 131, 99, 107, 123, 115, 276, 287, 259, 361, 217, 215, 300, 195, 335, 227, 252, 241, 387, 463, 444, 403, 449, 82, 34, 2, 66, 50, 18, 138, 130, 98, 106, 122, 114, 273, 286, 258, 364, 220, 214, 297, 194, 334, 226, 249, 244, 386, 462, 441, 402, 452, 43, 93, 75, 10, 29, 58, 186, 177, 145, 157, 171, 161, 371, 379, 353, 265, 314, 306, 201, 289, 233, 321, 349, 341, 393, 425, 409, 433, 417, 87, 39, 7, 71, 55, 23, 143, 135, 103, 111, 127, 119, 280, 283, 263, 365, 221, 211, 304, 199, 331, 231, 256, 245, 391, 459, 448, 407, 453, 86, 38, 6, 70, 54, 22, 142, 134, 102, 110, 126, 118, 277, 282, 262, 368, 224, 210, 301, 198, 330, 230, 253, 248, 390, 458, 445, 406, 456, 47, 89, 79, 14, 25, 62, 190, 181, 149, 153, 175, 165, 375, 383, 357, 269, 318, 310, 205, 293, 237, 325, 345, 337, 397, 429, 413, 437, 421, 84, 36, 4, 68, 52, 20, 140, 132, 100, 108, 124, 116, 275, 288, 260, 362, 218, 216, 299, 196, 336, 228, 251, 242, 388, 464, 443, 404, 450, 41, 95, 73, 12, 31, 60, 188, 179, 147, 159, 169, 163, 369, 377, 355, 267, 316, 308, 203, 291, 235, 323, 351, 343, 395, 427, 411, 435, 419, 44, 94, 76, 9, 30, 57, 185, 178, 146, 158, 172, 162, 372, 380, 354, 266, 313, 305, 202, 290, 234, 322, 350, 342, 394, 426, 410, 434, 418, 88, 40, 8, 72, 56, 24, 144, 136, 104, 112, 128, 120, 279, 284, 264, 366, 222, 212, 303, 200, 332, 232, 255, 246, 392, 460, 447, 408, 454, 45, 91, 77, 16, 27, 64, 192, 183, 151, 155, 173, 167, 373, 381, 359, 271, 320, 312, 207, 295, 239, 327, 347, 339, 399, 431, 415, 439, 423, 48, 90, 80, 13, 26, 61, 189, 182, 150, 154, 176, 166, 376, 384, 358, 270, 317, 309, 206, 294, 238, 326, 346, 338, 398, 430, 414, 438, 422, 42, 96, 74, 11, 32, 59, 187, 180, 148, 160, 170, 164, 370, 378, 356, 268, 315, 307, 204, 292, 236, 324, 352, 344, 396, 428, 412, 436, 420, 46, 92, 78, 15, 28, 63, 191, 184, 152, 156, 174, 168, 374, 382, 360, 272, 319, 311, 208, 296, 240, 328, 348, 340, 400, 432, 416, 440, 424], 'invSC': [[0.5, 0.0, 0.0], [0.0, 0.5, 0.0], [0.0, 0.0, 0.5]]}
writeSCinfo(SCinfo, "SCinfo")
haha=readSCinfo("SCinfo")
print(haha['SC'])
print(haha['invSC'])
print(haha['SCref'])
print(haha['SCpos'])
print(haha['SCmat'])
print(haha['invSCmat'])
print(haha['order'])
#writeclus(clus,"uniqueC")
#print "\n".join(map(str, readclus("uniqueC")))
#test
if False:
orbset=[[[[[0.75, 0.25, 0.5]], 1, 1, [[[0.0, 0.0, 0.0], 1]]], [[[0.75, 0.5, 0.25]], 1, 2, [[[0.0, 0.0, 0.0], 6]]], [[[0.5, 0.25, -0.25]], 1, 3, [[[0.0, 0.0, -1.0], 3]]], [[[0.25, -0.25, -0.5]], 1, 4, [[[0.0, -1.0, -1.0], 2]]], [[[0.5, -0.25, 0.25]], 1, 5, [[[0.0, -1.0, 0.0], 4]]], [[[0.25, -0.5, -0.25]], 1, 6, [[[0.0, -1.0, -1.0], 5]]]],[[[[0.7696, 0.2457, 0.2457], [0.0, -0.215, -0.215]], 42, 1, [[[0.0, 0.0, 0.0], 21], [[0.0, -1.0, -1.0], 10]]], [[[0.5238999999999999, 0.0, -0.2457], [0.215, 0.0, 0.215]], 42, 3, [[[-0.0, 0.0, -1.0], 13], [[0.0, 0.0, 0.0], 12]]], [[[0.5238999999999999, -0.2457, 0.0], [0.215, 0.215, 0.0]], 42, 5, [[[-0.0, -1.0, 0.0], 17], [[0.0, 0.0, 0.0], 8]]], [[[-0.2457, 0.0, 0.5238999999999999], [0.215, 0.0, 0.215]], 42, 7, [[[-1.0, 0.0, -0.0], 14], [[0.0, 0.0, 0.0], 12]]], [[[0.2457, 0.2457, 0.7696], [-0.215, -0.215, 0.0]], 42, 9, [[[0.0, 0.0, 0.0], 19], [[-1.0, -1.0, 0.0], 7]]], [[[0.0, -0.2457, 0.5238999999999999], [0.0, 0.215, 0.215]], 42, 11, [[[0.0, -1.0, -0.0], 24], [[0.0, 0.0, 0.0], 9]]], [[[-0.7696, -0.5238999999999999, -0.5238999999999999], [0.0, -0.215, -0.215]], 42, 13, [[[-1.0, -1.0, -1.0], 22], [[0.0, -1.0, -1.0], 10]]], [[[-0.5238999999999999, -0.5238999999999999, -0.7696], [-0.215, -0.215, 0.0]], 42, 15, [[[-1.0, -1.0, -1.0], 20], [[-1.0, -1.0, 0.0], 7]]], [[[-0.5238999999999999, -0.7696, -0.5238999999999999], [-0.215, 0.0, -0.215]], 42, 17, [[[-1.0, -1.0, -1.0], 15], [[-1.0, 0.0, -1.0], 11]]], [[[-0.2457, 0.5238999999999999, 0.0], [0.215, 0.215, 0.0]], 42, 19, [[[-1.0, -0.0, 0.0], 18], [[0.0, 0.0, 0.0], 8]]], [[[0.2457, 0.7696, 0.2457], [-0.215, 0.0, -0.215]], 42, 21, [[[0.0, 0.0, 0.0], 16], [[-1.0, 0.0, -1.0], 11]]], [[[0.0, 0.5238999999999999, -0.2457], [0.0, 0.215, 0.215]], 42, 23, [[[0.0, -0.0, -1.0], 23], [[0.0, 0.0, 0.0], 9]]]]]
print("\n".join(map(str,orbset)))
writeorb(orbset,"test-orb")
print("\n")
print("\n".join(map(str,readorb("test-orb"))))
#def read fit.ou
#test:
if False:
print(readfit("fit.out-mu1"))
| 65.537662 | 12,884 | 0.507411 |
6a60c251c96da7b05351011b63ba88125eca7fb7 | 9,790 | py | Python | sdk/python/pulumi_azure_native/storage/storage_account_static_website.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storage/storage_account_static_website.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/storage/storage_account_static_website.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['StorageAccountStaticWebsiteArgs', 'StorageAccountStaticWebsite']
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
error404_document: Optional[pulumi.Input[str]] = None,
index_document: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageAccountStaticWebsiteArgs.__new__(StorageAccountStaticWebsiteArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["error404_document"] = error404_document
__props__.__dict__["index_document"] = index_document
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["container_name"] = None
super(StorageAccountStaticWebsite, __self__).__init__(
'azure-native:storage:StorageAccountStaticWebsite',
resource_name,
__props__,
opts)
| 48.226601 | 199 | 0.674157 |
6a61c6ef3ad58f9b8003931de1870b0f5ad404c7 | 1,247 | py | Python | python/example_code/s3/s3-python-example-get-bucket-policy.py | onehitcombo/aws-doc-sdk-examples | 03e2e0c5dee75c5decbbb99e849c51417521fd82 | [
"Apache-2.0"
] | 3 | 2021-01-19T20:23:17.000Z | 2021-01-19T21:38:59.000Z | python/example_code/s3/s3-python-example-get-bucket-policy.py | onehitcombo/aws-doc-sdk-examples | 03e2e0c5dee75c5decbbb99e849c51417521fd82 | [
"Apache-2.0"
] | null | null | null | python/example_code/s3/s3-python-example-get-bucket-policy.py | onehitcombo/aws-doc-sdk-examples | 03e2e0c5dee75c5decbbb99e849c51417521fd82 | [
"Apache-2.0"
] | 2 | 2019-12-27T13:58:00.000Z | 2020-05-21T18:35:40.000Z | # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
# Call to S3 to retrieve the policy for the given bucket
result = s3.get_bucket_policy(Bucket='my-bucket')
print(result)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-get-bucket-policy.py demonstrates how to list the Amazon S3 Buckets in your account.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
| 35.628571 | 133 | 0.735365 |
6a61f1e1f810996e1c76609bf6e7fcc907c4da57 | 2,020 | py | Python | lang/py/aingle/test/gen_interop_data.py | AIngleLab/aae | 6e95f89fad60e62bb5305afe97c72f3278d8e04b | [
"Apache-2.0"
] | null | null | null | lang/py/aingle/test/gen_interop_data.py | AIngleLab/aae | 6e95f89fad60e62bb5305afe97c72f3278d8e04b | [
"Apache-2.0"
] | null | null | null | lang/py/aingle/test/gen_interop_data.py | AIngleLab/aae | 6e95f89fad60e62bb5305afe97c72f3278d8e04b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import aingle.codecs
import aingle.datafile
import aingle.io
import aingle.schema
NULL_CODEC = "null"
CODECS_TO_VALIDATE = aingle.codecs.KNOWN_CODECS.keys()
DATUM = {
"intField": 12,
"longField": 15234324,
"stringField": "hey",
"boolField": True,
"floatField": 1234.0,
"doubleField": -1234.0,
"bytesField": b"12312adf",
"nullField": None,
"arrayField": [5.0, 0.0, 12.0],
"mapField": {"a": {"label": "a"}, "bee": {"label": "cee"}},
"unionField": 12.0,
"enumField": "C",
"fixedField": b"1019181716151413",
"recordField": {"label": "blah", "children": [{"label": "inner", "children": []}]},
}
if __name__ == "__main__":
generate(sys.argv[1], sys.argv[2])
| 31.5625 | 127 | 0.681188 |
6a630004921c5a5ff2ec4e4b2d0a96b0bf000baa | 897 | py | Python | data_io/util/value_blob_erosion.py | Rekrau/PyGreentea | 457d7dc5be12b15c3c7663ceaf6d74301de56e43 | [
"BSD-2-Clause"
] | null | null | null | data_io/util/value_blob_erosion.py | Rekrau/PyGreentea | 457d7dc5be12b15c3c7663ceaf6d74301de56e43 | [
"BSD-2-Clause"
] | 4 | 2016-04-22T15:39:21.000Z | 2016-11-15T21:23:58.000Z | data_io/util/value_blob_erosion.py | Rekrau/PyGreentea | 457d7dc5be12b15c3c7663ceaf6d74301de56e43 | [
"BSD-2-Clause"
] | 4 | 2017-05-12T00:17:55.000Z | 2019-07-01T19:23:32.000Z | import numpy as np
from scipy import ndimage
| 42.714286 | 101 | 0.733556 |
6a631c95edefbd6ccab71b999ffa359886535e5b | 32,032 | py | Python | astropy/units/tests/test_logarithmic.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 3 | 2018-03-20T15:09:16.000Z | 2021-05-27T11:17:33.000Z | astropy/units/tests/test_logarithmic.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | astropy/units/tests/test_logarithmic.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding: utf-8
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Test the Logarithmic Units and Quantities
"""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
from ...extern import six
from ...extern.six.moves import zip
import pickle
import itertools
import pytest
import numpy as np
from numpy.testing.utils import assert_allclose
from ...tests.helper import assert_quantity_allclose
from ... import units as u, constants as c
lu_units = [u.dex, u.mag, u.decibel]
lu_subclasses = [u.DexUnit, u.MagUnit, u.DecibelUnit]
lq_subclasses = [u.Dex, u.Magnitude, u.Decibel]
pu_sample = (u.dimensionless_unscaled, u.m, u.g/u.s**2, u.Jy)
def test_predefined_string_roundtrip():
"""Ensure roundtripping; see #5015"""
with u.magnitude_zero_points.enable():
assert u.Unit(u.STmag.to_string()) == u.STmag
assert u.Unit(u.ABmag.to_string()) == u.ABmag
assert u.Unit(u.M_bol.to_string()) == u.M_bol
assert u.Unit(u.m_bol.to_string()) == u.m_bol
def test_inequality():
"""Check __ne__ works (regresssion for #5342)."""
lu1 = u.mag(u.Jy)
lu2 = u.dex(u.Jy)
lu3 = u.mag(u.Jy**2)
lu4 = lu3 - lu1
assert lu1 != lu2
assert lu1 != lu3
assert lu1 == lu4
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
class TestLogUnitArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other units is only
possible when the physical unit is dimensionless, and that this
turns the unit into a normal one."""
lu1 = u.mag(u.Jy)
with pytest.raises(u.UnitsError):
lu1 * u.m
with pytest.raises(u.UnitsError):
u.m * lu1
with pytest.raises(u.UnitsError):
lu1 / lu1
for unit in (u.dimensionless_unscaled, u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lu1 / unit
lu2 = u.mag(u.dimensionless_unscaled)
with pytest.raises(u.UnitsError):
lu2 * lu1
with pytest.raises(u.UnitsError):
lu2 / lu1
# But dimensionless_unscaled can be cancelled.
assert lu2 / lu2 == u.dimensionless_unscaled
# With dimensionless, normal units are OK, but we return a plain unit.
tf = lu2 * u.m
tr = u.m * lu2
for t in (tf, tr):
assert not isinstance(t, type(lu2))
assert t == lu2.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lu2.physical_unit)
# Now we essentially have a LogUnit with a prefactor of 100,
# so should be equivalent again.
t = tf / u.cm
with u.set_enabled_equivalencies(u.logarithmic()):
assert t.is_equivalent(lu2.function_unit)
assert_allclose(t.to(u.dimensionless_unscaled, np.arange(3.)/100.),
lu2.to(lu2.physical_unit, np.arange(3.)))
# If we effectively remove lu1, a normal unit should be returned.
t2 = tf / lu2
assert not isinstance(t2, type(lu2))
assert t2 == u.m
t3 = tf / lu2.function_unit
assert not isinstance(t3, type(lu2))
assert t3 == u.m
# For completeness, also ensure non-sensical operations fail
with pytest.raises(TypeError):
lu1 * object()
with pytest.raises(TypeError):
slice(None) * lu1
with pytest.raises(TypeError):
lu1 / []
with pytest.raises(TypeError):
1 / lu1
def test_addition_subtraction_to_non_units_fails(self):
lu1 = u.mag(u.Jy)
with pytest.raises(TypeError):
lu1 + 1.
with pytest.raises(TypeError):
lu1 - [1., 2., 3.]
def test_complicated_addition_subtraction(self):
"""for fun, a more complicated example of addition and subtraction"""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
lu_dm = u.mag(dm0)
lu_absST = u.STmag - lu_dm
assert lu_absST.is_equivalent(u.erg/u.s/u.AA)
def test_pickle():
lu1 = u.dex(u.cm/u.s**2)
s = pickle.dumps(lu1)
lu2 = pickle.loads(s)
assert lu1 == lu2
def test_hashable():
lu1 = u.dB(u.mW)
lu2 = u.dB(u.m)
lu3 = u.dB(u.mW)
assert hash(lu1) != hash(lu2)
assert hash(lu1) == hash(lu3)
luset = {lu1, lu2, lu3}
assert len(luset) == 2
class TestLogQuantityCreation(object):
def test_from_view(self):
# Cannot view a physical quantity as a function quantity, since the
# values would change.
q = [100., 1000.] * u.cm/u.s**2
with pytest.raises(TypeError):
q.view(u.Dex)
# But fine if we have the right magnitude.
q = [2., 3.] * u.dex
lq = q.view(u.Dex)
assert isinstance(lq, u.Dex)
assert lq.unit.physical_unit == u.dimensionless_unscaled
assert np.all(q == lq)
def test_using_quantity_class(self):
"""Check that we can use Quantity if we have subok=True"""
# following issue #5851
lu = u.dex(u.AA)
with pytest.raises(u.UnitTypeError):
u.Quantity(1., lu)
q = u.Quantity(1., lu, subok=True)
assert type(q) is lu._quantity_class
def test_conversion_to_and_from_physical_quantities():
"""Ensures we can convert from regular quantities."""
mst = [10., 12., 14.] * u.STmag
flux_lambda = mst.physical
mst_roundtrip = flux_lambda.to(u.STmag)
# check we return a logquantity; see #5178.
assert isinstance(mst_roundtrip, u.Magnitude)
assert mst_roundtrip.unit == mst.unit
assert_allclose(mst_roundtrip.value, mst.value)
wave = [4956.8, 4959.55, 4962.3] * u.AA
flux_nu = mst.to(u.Jy, equivalencies=u.spectral_density(wave))
mst_roundtrip2 = flux_nu.to(u.STmag, u.spectral_density(wave))
assert isinstance(mst_roundtrip2, u.Magnitude)
assert mst_roundtrip2.unit == mst.unit
assert_allclose(mst_roundtrip2.value, mst.value)
def test_quantity_decomposition():
lq = 10.*u.mag(u.Jy)
assert lq.decompose() == lq
assert lq.decompose().unit.physical_unit.bases == [u.kg, u.s]
assert lq.si == lq
assert lq.si.unit.physical_unit.bases == [u.kg, u.s]
assert lq.cgs == lq
assert lq.cgs.unit.physical_unit.bases == [u.g, u.s]
class TestLogQuantityViews(object):
class TestLogQuantitySlicing(object):
class TestLogQuantityArithmetic(object):
def test_multiplication_division(self):
"""Check that multiplication/division with other quantities is only
possible when the physical unit is dimensionless, and that this turns
the result into a normal quantity."""
lq = u.Magnitude(np.arange(1., 11.)*u.Jy)
with pytest.raises(u.UnitsError):
lq * (1.*u.m)
with pytest.raises(u.UnitsError):
(1.*u.m) * lq
with pytest.raises(u.UnitsError):
lq / lq
for unit in (u.m, u.mag, u.dex):
with pytest.raises(u.UnitsError):
lq / unit
lq2 = u.Magnitude(np.arange(1, 11.))
with pytest.raises(u.UnitsError):
lq2 * lq
with pytest.raises(u.UnitsError):
lq2 / lq
with pytest.raises(u.UnitsError):
lq / lq2
# but dimensionless_unscaled can be cancelled
r = lq2 / u.Magnitude(2.)
assert r.unit == u.dimensionless_unscaled
assert np.all(r.value == lq2.value/2.)
# with dimensionless, normal units OK, but return normal quantities
tf = lq2 * u.m
tr = u.m * lq2
for t in (tf, tr):
assert not isinstance(t, type(lq2))
assert t.unit == lq2.unit.function_unit * u.m
with u.set_enabled_equivalencies(u.logarithmic()):
with pytest.raises(u.UnitsError):
t.to(lq2.unit.physical_unit)
t = tf / (50.*u.cm)
# now we essentially have the same quantity but with a prefactor of 2
assert t.unit.is_equivalent(lq2.unit.function_unit)
assert_allclose(t.to(lq2.unit.function_unit), lq2._function_view*2)
def test_complicated_addition_subtraction(self):
"""For fun, a more complicated example of addition and subtraction."""
dm0 = u.Unit('DM', 1./(4.*np.pi*(10.*u.pc)**2))
DMmag = u.mag(dm0)
m_st = 10. * u.STmag
dm = 5. * DMmag
M_st = m_st - dm
assert M_st.unit.is_equivalent(u.erg/u.s/u.AA)
assert np.abs(M_st.physical /
(m_st.physical*4.*np.pi*(100.*u.pc)**2) - 1.) < 1.e-15
class TestLogQuantityComparisons(object):
class TestLogQuantityMethods(object):
| 37.031214 | 80 | 0.59094 |
6a64620ee9819bca0e28e6f332c50299811770b5 | 13,981 | py | Python | djconnectwise/tests/mocks.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | null | null | null | djconnectwise/tests/mocks.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | null | null | null | djconnectwise/tests/mocks.py | kti-sam/django-connectwise | 28484faad9435892a46b8ce4a3c957f64c299971 | [
"MIT"
] | null | null | null | import os
from mock import patch
from datetime import datetime, date, time
import json
import responses
from . import fixtures
from django.utils import timezone
CW_MEMBER_IMAGE_FILENAME = 'AnonymousMember.png'
def create_mock_call(method_name, return_value, side_effect=None):
"""Utility function for mocking the specified function or method"""
_patch = patch(method_name, side_effect=side_effect)
mock_get_call = _patch.start()
if not side_effect:
mock_get_call.return_value = return_value
return mock_get_call, _patch
def get(url, data, headers=None, status=200):
"""Set up requests mock for given URL and JSON-serializable data."""
get_raw(url, json.dumps(data), "application/json", headers, status=status)
def get_raw(url, data, content_type="application/octet-stream", headers=None,
status=200):
"""Set up requests mock for given URL."""
responses.add(
responses.GET,
url,
body=data,
status=status,
content_type=content_type,
adding_headers=headers,
)
def get_member_avatar():
"""Return the avatar image data in the tests directory."""
cw_member_image_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
CW_MEMBER_IMAGE_FILENAME
)
with open(cw_member_image_path, 'rb') as anonymous_image_file:
return anonymous_image_file.read()
| 36.126615 | 78 | 0.800801 |
6a648d570a29d5a4d4e0f9f5bffd72aadfab36cb | 2,632 | py | Python | visual_odometry/visual_odometry.py | vineeths96/Visual-Odometry | 88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7 | [
"MIT"
] | 2 | 2021-07-20T03:49:54.000Z | 2022-01-19T13:43:51.000Z | visual_odometry/visual_odometry.py | vineeths96/Visual-Odometry | 88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7 | [
"MIT"
] | null | null | null | visual_odometry/visual_odometry.py | vineeths96/Visual-Odometry | 88d96a23a0bde9c05de1f4dddcca8b6c4bd817e7 | [
"MIT"
] | 3 | 2021-11-28T06:23:23.000Z | 2021-12-05T17:09:00.000Z | from .monovideoodometry import MonoVideoOdometry
from .parameters import *
def visual_odometry(
image_path="./input/sequences/10/image_0/",
pose_path="./input/poses/10.txt",
fivepoint=False,
):
"""
Plots the estimated odometry path using either five point estimation or eight point estimation
:param image_path: Path to the directory of camera images
:param pose_path: Path to the directory of pose file
:param fivepoint: Whether to use five point or eight point method
:return: None
"""
vo = MonoVideoOdometry(image_path, pose_path, FOCAL, PP, K, LUCAS_KANADE_PARAMS, fivepoint)
trajectory = np.zeros(shape=(800, 1200, 3))
frame_count = 0
while vo.hasNextFrame():
frame_count += 1
frame = vo.current_frame
cv2.imshow("Frame", frame)
k = cv2.waitKey(1)
if k == 27:
break
vo.process_frame()
estimated_coordinates = vo.get_mono_coordinates()
true_coordinates = vo.get_true_coordinates()
print("MSE Error: ", np.linalg.norm(estimated_coordinates - true_coordinates))
print("x: {}, y: {}, z: {}".format(*[str(pt) for pt in estimated_coordinates]))
print("True_x: {}, True_y: {}, True_z: {}".format(*[str(pt) for pt in true_coordinates]))
draw_x, draw_y, draw_z = [int(round(x)) for x in estimated_coordinates]
true_x, true_y, true_z = [int(round(x)) for x in true_coordinates]
trajectory = cv2.circle(trajectory, (true_x + 400, true_z + 100), 1, list((0, 0, 255)), 4)
trajectory = cv2.circle(trajectory, (draw_x + 400, draw_z + 100), 1, list((0, 255, 0)), 4)
cv2.putText(
trajectory,
"Actual Position:",
(140, 90),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
)
cv2.putText(trajectory, "Red", (270, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.putText(
trajectory,
"Estimated Odometry Position:",
(30, 120),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(255, 255, 255),
1,
)
cv2.putText(
trajectory,
"Green",
(270, 120),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
1,
)
cv2.imshow("trajectory", trajectory)
if frame_count % 5 == 0:
cv2.imwrite(f"./results/trajectory/trajectory_{frame_count}.png", trajectory)
cv2.imwrite(f"./results/trajectory.png", trajectory)
cv2.destroyAllWindows()
| 32.097561 | 98 | 0.578647 |
6a659a66fbda946ae307b1633f49b480eec28005 | 886 | py | Python | tf-2-data-parallelism/src/utils.py | Amirosimani/amazon-sagemaker-script-mode | ea8d7d6b1b0613dffa793c9ae247cfd8868034ec | [
"Apache-2.0"
] | 144 | 2019-02-05T21:03:30.000Z | 2022-03-24T15:24:32.000Z | tf-2-data-parallelism/src/utils.py | kirit93/amazon-sagemaker-script-mode | 095af07488889bb2655b741749d8740d3e11a49e | [
"Apache-2.0"
] | 22 | 2019-03-04T04:18:02.000Z | 2022-03-09T00:21:36.000Z | tf-2-data-parallelism/src/utils.py | kirit93/amazon-sagemaker-script-mode | 095af07488889bb2655b741749d8740d3e11a49e | [
"Apache-2.0"
] | 94 | 2019-02-05T21:03:33.000Z | 2022-01-16T07:29:15.000Z | import os
import numpy as np
import tensorflow as tf
| 36.916667 | 89 | 0.76298 |
6a65a78ac7de33dc7adca445fb1aae94ba18f829 | 10,269 | py | Python | scripts/run_rbf_comparison_car_air_top5.py | CaptainCandy/influence-release | a152486a1c130fb5f907259c6692b9fe0d2ef6d0 | [
"MIT"
] | null | null | null | scripts/run_rbf_comparison_car_air_top5.py | CaptainCandy/influence-release | a152486a1c130fb5f907259c6692b9fe0d2ef6d0 | [
"MIT"
] | null | null | null | scripts/run_rbf_comparison_car_air_top5.py | CaptainCandy/influence-release | a152486a1c130fb5f907259c6692b9fe0d2ef6d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 16:26:35 2019
@author: Administrator
"""
# Forked from run_rbf_comparison.py
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import math
import copy
import numpy as np
import pandas as pd
import sklearn.linear_model as linear_model
import sklearn.preprocessing as preprocessing
import scipy
import scipy.linalg as slin
import scipy.sparse.linalg as sparselin
import scipy.sparse as sparse
import random
import sys
sys.path.append("C:/Tang/influence-release-master") #
from load_vehicles import load_vehicles
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import base
from sklearn.metrics.pairwise import rbf_kernel
from influence.inceptionModel import BinaryInceptionModel
from influence.smooth_hinge import SmoothHinge
from influence.binaryLogisticRegressionWithLBFGS import BinaryLogisticRegressionWithLBFGS
import influence.dataset as dataset
from influence.dataset import DataSet
from influence.dataset_poisoning import generate_inception_features
#%%
num_classes = 2
num_train_ex_per_class = 40
num_test_ex_per_class = 300
dataset_name = 'carair_%s_%s' % (num_train_ex_per_class, num_test_ex_per_class)
image_data_sets = load_vehicles(
num_train_ex_per_class=num_train_ex_per_class,
num_test_ex_per_class=num_test_ex_per_class)
weight_decay = 0.001
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
#%%
### Generate kernelized feature vectors
X_train = image_data_sets.train.x
X_test = image_data_sets.test.x
Y_train = np.copy(image_data_sets.train.labels) * 2 - 1
Y_test = np.copy(image_data_sets.test.labels) * 2 - 1
num_train = X_train.shape[0]
num_test = X_test.shape[0]
X_stacked = np.vstack((X_train, X_test))
gamma = 0.05
weight_decay = 0.0001
K = rbf_kernel(X_stacked, gamma = gamma / num_train)
# =============================================================================
# L = slin.cholesky(K, lower=True)
# L_train = L[:num_train, :num_train]
# L_test = L[num_train:, :num_train]
# =============================================================================
K_train = K[:num_train, :num_train]
K_test = K[num_train:, :num_train]
### Compare top 5 influential examples from each network
test_idx = 0
## RBF
input_channels = 1
weight_decay = 0.001
batch_size = num_train
initial_learning_rate = 0.001
keep_probs = None
max_lbfgs_iter = 1000
use_bias = False
decay_epochs = [1000, 10000]
tf.reset_default_graph()
X_train = image_data_sets.train.x
Y_train = image_data_sets.train.labels * 2 - 1
train = DataSet(K_train, Y_train)
test = DataSet(K_test, Y_test)
data_sets = base.Datasets(train=train, validation=None, test=test)
input_dim = data_sets.train.x.shape[1]
# Train with hinge
print('Train rbf with hinge...')
rbf_model = SmoothHinge(
temp=0,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output7',
log_dir='log',
model_name='carair_rbf_hinge_t-0')
rbf_model.train()
hinge_W = rbf_model.sess.run(rbf_model.params)[0]
# Then load weights into smoothed version
print('Load weights into smoothed version...')
tf.reset_default_graph()
rbf_model = SmoothHinge(
temp=0.001,
use_bias=use_bias,
input_dim=input_dim,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output7',
log_dir='log',
model_name='car_air_rbf_hinge_t-0.001')
params_feed_dict = {}
params_feed_dict[rbf_model.W_placeholder] = hinge_W
rbf_model.sess.run(rbf_model.set_params_op, feed_dict=params_feed_dict)
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
#%%
## Inception
dataset_name = 'carair_40_300'
test_idx = 0
# Generate inception features
print('Generate inception features...')
img_side = 299
num_channels = 3
num_train_ex_per_class = 40
num_test_ex_per_class = 300
batch_size = 20 #TODO:
# reset_default_graphtf sessionsession
tf.reset_default_graph()
full_model_name = '%s_inception' % dataset_name
# inception
full_model = BinaryInceptionModel(
img_side=img_side,
num_channels=num_channels,
weight_decay=weight_decay,
num_classes=num_classes,
batch_size=batch_size,
data_sets=image_data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=True,
train_dir='output9',
log_dir='log',
model_name=full_model_name)
# inception
train_inception_features_val = generate_inception_features(
full_model,
image_data_sets.train.x,
image_data_sets.train.labels,
batch_size=batch_size)
test_inception_features_val = generate_inception_features(
full_model,
image_data_sets.test.x,
image_data_sets.test.labels,
batch_size=batch_size)
train = DataSet(
train_inception_features_val,
image_data_sets.train.labels)
test = DataSet(
test_inception_features_val,
image_data_sets.test.labels)
# train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name)
# train = DataSet(train_f['inception_features_val'], train_f['labels'])
# test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name)
# test = DataSet(test_f['inception_features_val'], test_f['labels'])
validation = None
# inceptionfeature2048
data_sets = base.Datasets(train=train, validation=validation, test=test)
# train_f = np.load('G:/output/%s_inception_features_new_train.npz' % dataset_name)
# train = DataSet(train_f['inception_features_val'], train_f['labels'])
# test_f = np.load('G:/output/%s_inception_features_new_test.npz' % dataset_name)
# test = DataSet(test_f['inception_features_val'], test_f['labels'])
# validation = None
# data_sets = base.Datasets(train=train, validation=validation, test=test)
# inceptionfeatureFC
print('Train logistic regression after inception...')
input_dim = 2048
weight_decay = 0.001
batch_size = 20
initial_learning_rate = 0.001
keep_probs = None
decay_epochs = [1000, 10000]
max_lbfgs_iter = 1000
num_classes = 2
tf.reset_default_graph()
inception_model = BinaryLogisticRegressionWithLBFGS(
input_dim=input_dim,
weight_decay=weight_decay,
max_lbfgs_iter=max_lbfgs_iter,
num_classes=num_classes,
batch_size=batch_size,
data_sets=data_sets,
initial_learning_rate=initial_learning_rate,
keep_probs=keep_probs,
decay_epochs=decay_epochs,
mini_batch=False,
train_dir='output9',
log_dir='log',
model_name='%s_inception_onlytop' % dataset_name)
inception_model.train()
# =============================================================================
# inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
# [test_idx],
# np.arange(len(inception_model.data_sets.train.labels)),
# force_refresh=True)
#
# x_test = X_test[test_idx, :]
# y_test = Y_test[test_idx]
#
#
# distances = dataset.find_distances(x_test, X_train)
# flipped_idx = Y_train != y_test
# rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
# rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
# inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
#
#
# np.savez(
# 'output7/rbf_carair_results_%s' % test_idx,
# test_idx=test_idx,
# distances=distances,
# flipped_idx=flipped_idx,
# rbf_margins_test=rbf_margins_test,
# rbf_margins_train=rbf_margins_train,
# inception_Y_pred_correct=inception_Y_pred_correct,
# rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
# inception_predicted_loss_diffs=inception_predicted_loss_diffs
# )
# =============================================================================
#%%
print('Save results...')
#rand_test = random.sample(range(1, 600),50)
#np.savez('output7/rand_test_point', rand_test=rand_test)
for test_idx in range(1, 600):
rbf_predicted_loss_diffs = rbf_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(rbf_model.data_sets.train.labels)),
force_refresh=True)
inception_predicted_loss_diffs = inception_model.get_influence_on_test_loss(
[test_idx],
np.arange(len(inception_model.data_sets.train.labels)),
force_refresh=True)
x_test = X_test[test_idx, :]
y_test = Y_test[test_idx]
distances = dataset.find_distances(x_test, X_train)
flipped_idx = Y_train != y_test
rbf_margins_test = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_test_feed_dict)
rbf_margins_train = rbf_model.sess.run(rbf_model.margin, feed_dict=rbf_model.all_train_feed_dict)
inception_Y_pred_correct = get_Y_pred_correct_inception(inception_model)
np.savez(
'output9/rbf_carair_results_%s' % test_idx,
test_idx=test_idx,
distances=distances,
flipped_idx=flipped_idx,
rbf_margins_test=rbf_margins_test,
rbf_margins_train=rbf_margins_train,
inception_Y_pred_correct=inception_Y_pred_correct,
rbf_predicted_loss_diffs=rbf_predicted_loss_diffs,
inception_predicted_loss_diffs=inception_predicted_loss_diffs
)
| 30.930723 | 101 | 0.7435 |
6a6623a4cf3e4c5b80fbcffbafebb173294bba30 | 1,478 | py | Python | data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/145085/kaggle_Microsoft_Malware-master/kaggle_Microsoft_malware_small/find_4g.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import sys
import pickle
##########################################################
# usage
# pypy find_4g.py xid_train.p ../../data/train
# xid_train.p is a list like ['loIP1tiwELF9YNZQjSUO',''....] to specify
# the order of samples in traing data
# ../../data/train is the path of original train data
##########################################################
xid_name=sys.argv[1]
data_path=sys.argv[2]
xid=pickle.load(open(xid_name)) #xid_train.p or xid_test.p
newc=pickle.load(open('newc.p'))
newc2=pickle.load(open('cutcmd3g_for_4g.p'))
cmd4g={}
for i in newc2:
for j in newc:
cmd4g[(i[0],i[1],i[2],j)]=0
print(newc)
for c,f in enumerate(xid):
count={}
fo=open(data_path+'/'+f+'.asm')
tot=0
a=-1
b=-1
d=-1
e=-1
for line in fo:
xx=line.split()
for x in xx:
if x in newc:
a=b
b=d
d=e
e=x
if (a,b,d,e) in cmd4g:
if (a,b,d,e) not in count:
count[(a,b,d,e)]=0
count[(a,b,d,e)]+=1
tot+=1
fo.close()
if True:#c%10000==0:
print(c*1.0/len(xid),tot)
for i in count:
cmd4g[i]=count[i]+cmd4g[i]
del count
cmd4gx={}
for i in cmd4g:
if cmd4g[i]>0:
cmd4gx[i]=cmd4g[i]
print(len(cmd4gx))
pickle.dump(cmd4gx,open('cmd4g.p','w'))
| 25.050847 | 72 | 0.451962 |
6a6651ad80b45cc4756ccfc411bd482091aff56e | 50 | py | Python | src/domain/enums/__init__.py | Antonio-Gabriel/easepay_backend | 9aaf4de27c9cc906911ae46ee61c75c6d92dc826 | [
"MIT"
] | 1 | 2021-11-24T09:18:19.000Z | 2021-11-24T09:18:19.000Z | src/domain/enums/__init__.py | Antonio-Gabriel/easepay_backend | 9aaf4de27c9cc906911ae46ee61c75c6d92dc826 | [
"MIT"
] | null | null | null | src/domain/enums/__init__.py | Antonio-Gabriel/easepay_backend | 9aaf4de27c9cc906911ae46ee61c75c6d92dc826 | [
"MIT"
] | null | null | null | from .months import Months
from .sizes import Size | 25 | 26 | 0.82 |
6a6655e14286bbfcb799353c5812e25b7720b10d | 1,512 | py | Python | pygments/lexers/trafficscript.py | blu-base/pygments | da799d14818ed538bf937684a19ce779ddde9446 | [
"BSD-2-Clause"
] | 1 | 2015-06-08T14:52:49.000Z | 2015-06-08T14:52:49.000Z | pygments/lexers/trafficscript.py | blu-base/pygments | da799d14818ed538bf937684a19ce779ddde9446 | [
"BSD-2-Clause"
] | 1 | 2022-03-13T09:17:24.000Z | 2022-03-13T09:18:02.000Z | pygments/lexers/trafficscript.py | blu-base/pygments | da799d14818ed538bf937684a19ce779ddde9446 | [
"BSD-2-Clause"
] | null | null | null | """
pygments.lexers.trafficscript
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for RiverBed's TrafficScript (RTS) language.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer
from pygments.token import String, Number, Name, Keyword, Operator, Text, Comment
__all__ = ['RtsLexer']
| 29.076923 | 83 | 0.433201 |
6a681ede8ff42ae46d972ef7a200eff04f8f87d4 | 20,333 | py | Python | pandas/tests/indexes/test_common.py | dimithras/pandas | d321be6e2a43270625abf671d9e59f16529c4b48 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T17:32:26.000Z | 2020-10-29T17:32:26.000Z | pandas/tests/indexes/test_common.py | BhavarthShah/pandas | efb068f25b911ff3009d5692eb831df35bb042e5 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/test_common.py | BhavarthShah/pandas | efb068f25b911ff3009d5692eb831df35bb042e5 | [
"BSD-3-Clause"
] | null | null | null | """
Collection of tests asserting things that should be true for
any index subclass. Makes use of the `indices` fixture defined
in pandas/tests/indexes/conftest.py.
"""
import re
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.common import is_period_dtype, needs_i8_conversion
import pandas as pd
from pandas import (
CategoricalIndex,
DatetimeIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
TimedeltaIndex,
)
import pandas._testing as tm
| 37.79368 | 88 | 0.629764 |
6a6837a4b97157cac91cdd54ef662d5a158d6207 | 22,699 | py | Python | tests/test_dynamics.py | leasanchez/BiorbdOptim | 28fac818af031668ecd82bc1929f78303c5d58d2 | [
"MIT"
] | 34 | 2020-12-14T17:09:41.000Z | 2022-03-31T17:03:37.000Z | tests/test_dynamics.py | pariterre/bioptim | 4064138e7d3fce34e21d488df19941937ce30557 | [
"MIT"
] | 229 | 2020-09-30T16:53:40.000Z | 2022-03-29T21:11:46.000Z | tests/test_dynamics.py | fbailly/bioptim | 3a5473ee7c39d645d960611596a45b044e8ccf58 | [
"MIT"
] | 15 | 2020-11-20T12:32:59.000Z | 2022-01-22T22:59:08.000Z | import pytest
import numpy as np
from casadi import MX, SX
import biorbd_casadi as biorbd
from bioptim.dynamics.configure_problem import ConfigureProblem
from bioptim.dynamics.dynamics_functions import DynamicsFunctions
from bioptim.interfaces.biorbd_interface import BiorbdInterface
from bioptim.misc.enums import ControlType
from bioptim.optimization.non_linear_program import NonLinearProgram
from bioptim.optimization.optimization_vector import OptimizationVector
from bioptim.dynamics.configure_problem import DynamicsFcn, Dynamics
from .utils import TestUtils
| 37.958194 | 120 | 0.491784 |
6a68e42c5242acff02618aac8ab6c6c44bb61d29 | 1,312 | py | Python | polyaxon/event_manager/event_manager.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/event_manager/event_manager.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | polyaxon/event_manager/event_manager.py | elyase/polyaxon | 1c19f059a010a6889e2b7ea340715b2bcfa382a0 | [
"MIT"
] | null | null | null | from hestia.manager_interface import ManagerInterface
from event_manager import event_actions
| 31.238095 | 90 | 0.634909 |
6a6b124cb7b2cd1d6d09ae5b84d5b49e63612508 | 679 | py | Python | test_f_login_andy.py | KotoLLC/peacenik-tests | 760f7799ab2b9312fe0cce373890195151c48fce | [
"Apache-2.0"
] | null | null | null | test_f_login_andy.py | KotoLLC/peacenik-tests | 760f7799ab2b9312fe0cce373890195151c48fce | [
"Apache-2.0"
] | null | null | null | test_f_login_andy.py | KotoLLC/peacenik-tests | 760f7799ab2b9312fe0cce373890195151c48fce | [
"Apache-2.0"
] | null | null | null | from helpers import * | 35.736842 | 75 | 0.696613 |
6a6b9fd92e89d1958b00048f55376ec87fde6db2 | 7,696 | py | Python | docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py | ian-r-rose/visualization | ed6d9fab95eb125e7340ab3fad3ed114ed3214af | [
"CC-BY-4.0"
] | 11 | 2017-01-04T18:19:48.000Z | 2021-02-21T01:46:33.000Z | docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py | ian-r-rose/visualization | ed6d9fab95eb125e7340ab3fad3ed114ed3214af | [
"CC-BY-4.0"
] | 8 | 2016-09-22T20:49:51.000Z | 2019-09-06T23:28:13.000Z | docker/src/clawpack-5.3.1/riemann/src/shallow_1D_py.py | ian-r-rose/visualization | ed6d9fab95eb125e7340ab3fad3ed114ed3214af | [
"CC-BY-4.0"
] | 13 | 2016-09-22T20:20:06.000Z | 2020-07-13T14:48:32.000Z | #!/usr/bin/env python
# encoding: utf-8
r"""
Riemann solvers for the shallow water equations.
The available solvers are:
* Roe - Use Roe averages to caluclate the solution to the Riemann problem
* HLL - Use a HLL solver
* Exact - Use a newton iteration to calculate the exact solution to the
Riemann problem
.. math::
q_t + f(q)_x = 0
where
.. math::
q(x,t) = \left [ \begin{array}{c} h \\ h u \end{array} \right ],
the flux function is
.. math::
f(q) = \left [ \begin{array}{c} h u \\ hu^2 + 1/2 g h^2 \end{array}\right ].
and :math:`h` is the water column height, :math:`u` the velocity and :math:`g`
is the gravitational acceleration.
:Authors:
Kyle T. Mandli (2009-02-05): Initial version
"""
# ============================================================================
# Copyright (C) 2009 Kyle T. Mandli <[email protected]>
#
# Distributed under the terms of the Berkeley Software Distribution (BSD)
# license
# http://www.opensource.org/licenses/
# ============================================================================
import numpy as np
num_eqn = 2
num_waves = 2
def shallow_roe_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
Roe shallow water solver in 1d::
ubar = (sqrt(u_l) + sqrt(u_r)) / (sqrt(h_l) + sqrt(h_r))
cbar = sqrt( 0.5 * g * (h_l + h_r))
W_1 = | 1 | s_1 = ubar - cbar
| ubar - cbar |
W_2 = | 1 | s_1 = ubar + cbar
| ubar + cbar |
a1 = 0.5 * ( - delta_hu + (ubar + cbar) * delta_h ) / cbar
a2 = 0.5 * ( delta_hu - (ubar - cbar) * delta_h ) / cbar
*problem_data* should contain:
- *g* - (float) Gravitational constant
- *efix* - (bool) Boolean as to whether a entropy fix should be used, if
not present, false is assumed
:Version: 1.0 (2009-02-05)
"""
# Array shapes
num_rp = q_l.shape[1]
# Output arrays
wave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.zeros( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Compute roe-averaged quantities
ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /
(np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )
cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))
# Compute Flux structure
delta = q_r - q_l
a1 = 0.5 * (-delta[1,:] + (ubar + cbar) * delta[0,:]) / cbar
a2 = 0.5 * ( delta[1,:] - (ubar - cbar) * delta[0,:]) / cbar
# Compute each family of waves
wave[0,0,:] = a1
wave[1,0,:] = a1 * (ubar - cbar)
s[0,:] = ubar - cbar
wave[0,1,:] = a2
wave[1,1,:] = a2 * (ubar + cbar)
s[1,:] = ubar + cbar
if problem_data['efix']:
raise NotImplementedError("Entropy fix has not been implemented.")
else:
s_index = np.zeros((2,num_rp))
for m in xrange(num_eqn):
for mw in xrange(num_waves):
s_index[0,:] = s[mw,:]
amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]
apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]
return wave, s, amdq, apdq
def shallow_hll_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
HLL shallow water solver ::
W_1 = Q_hat - Q_l s_1 = min(u_l-c_l,u_l+c_l,lambda_roe_1,lambda_roe_2)
W_2 = Q_r - Q_hat s_2 = max(u_r-c_r,u_r+c_r,lambda_roe_1,lambda_roe_2)
Q_hat = ( f(q_r) - f(q_l) - s_2 * q_r + s_1 * q_l ) / (s_1 - s_2)
*problem_data* should contain:
- *g* - (float) Gravitational constant
:Version: 1.0 (2009-02-05)
"""
# Array shapes
num_rp = q_l.shape[1]
num_eqn = 2
num_waves = 2
# Output arrays
wave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.empty( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Compute Roe and right and left speeds
ubar = ( (q_l[1,:]/np.sqrt(q_l[0,:]) + q_r[1,:]/np.sqrt(q_r[0,:])) /
(np.sqrt(q_l[0,:]) + np.sqrt(q_r[0,:])) )
cbar = np.sqrt(0.5 * problem_data['grav'] * (q_l[0,:] + q_r[0,:]))
u_r = q_r[1,:] / q_r[0,:]
c_r = np.sqrt(problem_data['grav'] * q_r[0,:])
u_l = q_l[1,:] / q_l[0,:]
c_l = np.sqrt(problem_data['grav'] * q_l[0,:])
# Compute Einfeldt speeds
s_index = np.empty((4,num_rp))
s_index[0,:] = ubar+cbar
s_index[1,:] = ubar-cbar
s_index[2,:] = u_l + c_l
s_index[3,:] = u_l - c_l
s[0,:] = np.min(s_index,axis=0)
s_index[2,:] = u_r + c_r
s_index[3,:] = u_r - c_r
s[1,:] = np.max(s_index,axis=0)
# Compute middle state
q_hat = np.empty((2,num_rp))
q_hat[0,:] = ((q_r[1,:] - q_l[1,:] - s[1,:] * q_r[0,:]
+ s[0,:] * q_l[0,:]) / (s[0,:] - s[1,:]))
q_hat[1,:] = ((q_r[1,:]**2/q_r[0,:] + 0.5 * problem_data['grav'] * q_r[0,:]**2
- (q_l[1,:]**2/q_l[0,:] + 0.5 * problem_data['grav'] * q_l[0,:]**2)
- s[1,:] * q_r[1,:] + s[0,:] * q_l[1,:]) / (s[0,:] - s[1,:]))
# Compute each family of waves
wave[:,0,:] = q_hat - q_l
wave[:,1,:] = q_r - q_hat
# Compute variations
s_index = np.zeros((2,num_rp))
for m in xrange(num_eqn):
for mw in xrange(num_waves):
s_index[0,:] = s[mw,:]
amdq[m,:] += np.min(s_index,axis=0) * wave[m,mw,:]
apdq[m,:] += np.max(s_index,axis=0) * wave[m,mw,:]
return wave, s, amdq, apdq
def shallow_fwave_1d(q_l, q_r, aux_l, aux_r, problem_data):
r"""Shallow water Riemann solver using fwaves
Also includes support for bathymetry but be wary if you think you might have
dry states as this has not been tested.
*problem_data* should contain:
- *grav* - (float) Gravitational constant
- *sea_level* - (float) Datum from which the dry-state is calculated.
:Version: 1.0 (2014-09-05)
"""
g = problem_data['grav']
num_rp = q_l.shape[1]
num_eqn = 2
num_waves = 2
# Output arrays
fwave = np.empty( (num_eqn, num_waves, num_rp) )
s = np.empty( (num_waves, num_rp) )
amdq = np.zeros( (num_eqn, num_rp) )
apdq = np.zeros( (num_eqn, num_rp) )
# Extract state
u_l = np.where(q_l[0,:] - problem_data['sea_level'] > 1e-3,
q_l[1,:] / q_l[0,:], 0.0)
u_r = np.where(q_r[0,:] - problem_data['sea_level'] > 1e-3,
q_r[1,:] / q_r[0,:], 0.0)
phi_l = q_l[0,:] * u_l**2 + 0.5 * g * q_l[0,:]**2
phi_r = q_r[0,:] * u_r**2 + 0.5 * g * q_r[0,:]**2
# Speeds
s[0,:] = u_l - np.sqrt(g * q_l[0,:])
s[1,:] = u_r + np.sqrt(g * q_r[0,:])
delta1 = q_r[1,:] - q_l[1,:]
delta2 = phi_r - phi_l + g * 0.5 * (q_r[0,:] + q_l[0,:]) * (aux_r[0,:] - aux_l[0,:])
beta1 = (s[1,:] * delta1 - delta2) / (s[1,:] - s[0,:])
beta2 = (delta2 - s[0,:] * delta1) / (s[1,:] - s[0,:])
fwave[0,0,:] = beta1
fwave[1,0,:] = beta1 * s[0,:]
fwave[0,1,:] = beta2
fwave[1,1,:] = beta2 * s[1,:]
for m in xrange(num_eqn):
for mw in xrange(num_waves):
amdq[m,:] += (s[mw,:] < 0.0) * fwave[m,mw,:]
apdq[m,:] += (s[mw,:] >= 0.0) * fwave[m,mw,:]
return fwave, s, amdq, apdq
def shallow_exact_1D(q_l,q_r,aux_l,aux_r,problem_data):
r"""
Exact shallow water Riemann solver
.. warning::
This solver has not been implemented.
"""
raise NotImplementedError("The exact swe solver has not been implemented.")
| 31.801653 | 88 | 0.511954 |
6a6cf8239e9dd6960a26d7ae881835b1d30a1dd5 | 10,408 | py | Python | nuitka/Constants.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | 1 | 2020-04-13T18:56:02.000Z | 2020-04-13T18:56:02.000Z | nuitka/Constants.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | 1 | 2020-07-11T17:53:56.000Z | 2020-07-11T17:53:56.000Z | nuitka/Constants.py | juanfra684/Nuitka | 0e276895fadabefb598232f2ccf8cc7736c9a85b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Module for constants in Nuitka.
This contains tools to compare, classify and test constants.
"""
import math
from types import BuiltinFunctionType
from nuitka.Builtins import builtin_type_names
from nuitka.PythonVersions import python_version
from .__past__ import ( # pylint: disable=I0021,redefined-builtin
iterItems,
long,
unicode,
xrange,
)
from .Builtins import (
builtin_anon_names,
builtin_anon_value_list,
builtin_exception_values_list,
builtin_named_values_list,
)
NoneType = type(None)
# These built-in type references are kind of constant too. The list should be
# complete.
constant_builtin_types = (
int,
str,
float,
list,
tuple,
set,
dict,
slice,
complex,
xrange,
NoneType,
)
if python_version >= 300:
constant_builtin_types += (bytes,)
else:
constant_builtin_types += (
unicode,
long,
# This has no name in Python, but the natural one in C-API.
builtin_anon_names["instance"],
)
def isMutable(constant):
""" Is a constant mutable
That means a user of a reference to it, can modify it. Strings are
a prime example of immutable, dictionaries are mutable.
"""
# Many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
range,
bytes,
slice,
xrange,
type,
BuiltinFunctionType,
):
return False
elif constant_type in (dict, list, set, bytearray):
return True
elif constant_type is tuple:
for value in constant:
if isMutable(value):
return True
return False
elif constant_type is frozenset:
for value in constant:
if isMutable(value):
return True
return False
elif constant is Ellipsis:
return False
elif constant is NotImplemented:
return False
else:
assert False, repr(constant)
def isHashable(constant):
""" Is a constant hashable
That means a user of a reference to it, can use it for dicts and set
keys. This is distinct from mutable, there is one types that is not
mutable, and still not hashable: slices.
"""
# Many cases and all return, that is how we do it here,
# pylint: disable=too-many-return-statements
constant_type = type(constant)
if constant_type in (
str,
unicode,
complex,
int,
long,
bool,
float,
NoneType,
xrange,
bytes,
type,
BuiltinFunctionType,
):
return True
elif constant_type in (dict, list, set, slice, bytearray):
return False
elif constant_type is tuple:
for value in constant:
if not isHashable(value):
return False
return True
elif constant_type is frozenset:
for value in constant:
if not isHashable(value):
return False
return True
elif constant is Ellipsis:
return True
else:
assert False, constant_type
def isCompileTimeConstantValue(value):
""" Determine if a value will be usable at compile time.
"""
# This needs to match code in makeCompileTimeConstantReplacementNode
if isConstant(value):
return True
elif type(value) is type:
return True
else:
return False
| 25.635468 | 96 | 0.600596 |
6a6d56d36f5446ad1de42a20d6e31bc1aa3492a2 | 13,724 | py | Python | functions/predictionLambda/botocore/endpoint.py | chriscoombs/aws-comparing-algorithms-performance-mlops-cdk | 6d3888f3ecd667ee76dc473edba37a608786ed2e | [
"Apache-2.0"
] | 40 | 2020-07-11T10:07:51.000Z | 2021-12-11T17:09:20.000Z | functions/predictionLambda/botocore/endpoint.py | chriscoombs/aws-comparing-algorithms-performance-mlops-cdk | 6d3888f3ecd667ee76dc473edba37a608786ed2e | [
"Apache-2.0"
] | 21 | 2019-11-10T05:38:06.000Z | 2022-03-10T15:07:48.000Z | functions/predictionLambda/botocore/endpoint.py | chriscoombs/aws-comparing-algorithms-performance-mlops-cdk | 6d3888f3ecd667ee76dc473edba37a608786ed2e | [
"Apache-2.0"
] | 37 | 2020-07-09T23:12:30.000Z | 2022-03-16T11:15:58.000Z | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import logging
import time
import threading
from botocore.vendored import six
from botocore.awsrequest import create_request_object
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.utils import is_valid_endpoint_url, get_environ_proxies
from botocore.hooks import first_non_none_response
from botocore.history import get_global_history_recorder
from botocore.response import StreamingBody
from botocore import parsers
logger = logging.getLogger(__name__)
history_recorder = get_global_history_recorder()
DEFAULT_TIMEOUT = 60
MAX_POOL_CONNECTIONS = 10
def convert_to_response_dict(http_response, operation_model):
"""Convert an HTTP response object to a request dict.
This converts the requests library's HTTP response object to
a dictionary.
:type http_response: botocore.vendored.requests.model.Response
:param http_response: The HTTP response from an AWS service request.
:rtype: dict
:return: A response dictionary which will contain the following keys:
* headers (dict)
* status_code (int)
* body (string or file-like object)
"""
response_dict = {
'headers': http_response.headers,
'status_code': http_response.status_code,
'context': {
'operation_name': operation_model.name,
}
}
if response_dict['status_code'] >= 300:
response_dict['body'] = http_response.content
elif operation_model.has_event_stream_output:
response_dict['body'] = http_response.raw
elif operation_model.has_streaming_output:
length = response_dict['headers'].get('content-length')
response_dict['body'] = StreamingBody(http_response.raw, length)
else:
response_dict['body'] = http_response.content
return response_dict
| 42.09816 | 81 | 0.6587 |
6a6dcc4d9c3e1b2437b6c8b26173ce12b1dfa929 | 7,761 | py | Python | week2/Assignment2Answer.py | RayshineRen/Introduction_to_Data_Science_in_Python | b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71 | [
"MIT"
] | 1 | 2020-09-22T15:06:02.000Z | 2020-09-22T15:06:02.000Z | week2/Assignment2Answer.py | RayshineRen/Introduction_to_Data_Science_in_Python | b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71 | [
"MIT"
] | 1 | 2020-11-03T14:11:02.000Z | 2020-11-03T14:24:50.000Z | week2/Assignment2Answer.py | RayshineRen/Introduction_to_Data_Science_in_Python | b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71 | [
"MIT"
] | 2 | 2020-09-22T05:27:09.000Z | 2020-11-05T10:39:49.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 21:56:15 2020
@author: Ray
@email: [email protected]
@wechat: RayTing0305
"""
'''
Question 1
Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree.
This function should return a dictionary in the form of (use the correct numbers, do not round numbers):
{"less than high school":0.2,
"high school":0.4,
"more than high school but not college":0.2,
"college":0.2}
'''
import scipy.stats as stats
import numpy as np
import pandas as pd
df = pd.read_csv("./assets/NISPUF17.csv")
assert type(proportion_of_education())==type({}), "You must return a dictionary."
assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it."
assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct"
'''
Question 2
Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not.
This function should return a tuple in the form (use the correct numbers:
(2.5, 0.1)
'''
assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no."
'''
Question 3
It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex.
This function should return a dictionary in the form of (use the correct numbers):
{"male":0.2,
"female":0.4}
Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077.
'''
assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females."
'''
Question 4
A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella).
Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more nos) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses.
Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number).
[1] This isnt really the full picture, since we are not looking at when the dose was given. Its possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose?
'''
| 53.895833 | 576 | 0.74024 |
6a6dde8d68a99fd68fff6d0aa6d0f4f64dc22408 | 4,018 | py | Python | backup/26.py | accordinglyto/dferte | d4b8449c1633973dc538c9e72aca5d37802a4ee4 | [
"MIT"
] | null | null | null | backup/26.py | accordinglyto/dferte | d4b8449c1633973dc538c9e72aca5d37802a4ee4 | [
"MIT"
] | 8 | 2020-11-13T18:55:17.000Z | 2022-03-12T00:34:40.000Z | backup/26.py | accordinglyto/dferte | d4b8449c1633973dc538c9e72aca5d37802a4ee4 | [
"MIT"
] | null | null | null | from numpy import genfromtxt
import matplotlib.pyplot as plt
import mpl_finance
import numpy as np
import uuid
import matplotlib
# Input your csv file here with historical data
ad = genfromtxt(f"../financial_data/SM.csv", delimiter=",", dtype=str)
# output = []
# with open("STOCKbluechip.csv") as f:
# output = [str(s) for line in f.readlines() for s in line[:-1].split(",")]
# for stock in output:
pd = ad
buy_dir = "../data/train/buy/"
sell_dir = "../data/train/sell/"
iter = 0
for x in range(len(pd)):
graphwerk(iter, iter + 12)
iter = iter + 2
| 28.097902 | 128 | 0.558238 |
6a6e5f7f79247bb69a8f187a793986d06aaf806b | 3,091 | py | Python | streams/readers/arff_reader.py | JanSurft/tornado | 2c07686c5358d2bcb15d6edac3126ad9346c3c76 | [
"MIT"
] | 103 | 2017-10-01T20:24:58.000Z | 2022-03-16T09:09:10.000Z | streams/readers/arff_reader.py | JanSurft/tornado | 2c07686c5358d2bcb15d6edac3126ad9346c3c76 | [
"MIT"
] | 2 | 2019-09-17T11:06:26.000Z | 2021-11-08T23:57:46.000Z | streams/readers/arff_reader.py | JanSurft/tornado | 2c07686c5358d2bcb15d6edac3126ad9346c3c76 | [
"MIT"
] | 28 | 2018-12-18T00:43:10.000Z | 2022-03-04T08:39:47.000Z | """
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
"""
import re
from data_structures.attribute import Attribute
from dictionary.tornado_dictionary import TornadoDic
| 38.6375 | 100 | 0.525396 |
6a6f28bb63a4999e5f2dcb27c1de7d562bafcd05 | 1,664 | py | Python | Experimente/Experiment ID 8/run-cifar10-v7.py | MichaelSchwabe/conv-ebnas-abgabe | f463d7bbd9b514597e19d25007913f7994cbbf7c | [
"MIT"
] | 6 | 2021-11-03T07:20:48.000Z | 2021-11-10T08:20:44.000Z | Experimente/Experiment ID 8/run-cifar10-v7.py | MichaelSchwabe/conv-ebnas-abgabe | f463d7bbd9b514597e19d25007913f7994cbbf7c | [
"MIT"
] | 1 | 2021-11-02T21:10:51.000Z | 2021-11-02T21:11:05.000Z | Experimente/Experiment ID 8/run-cifar10-v7.py | MichaelSchwabe/conv-ebnas-abgabe | f463d7bbd9b514597e19d25007913f7994cbbf7c | [
"MIT"
] | null | null | null | from __future__ import print_function
from keras.datasets import mnist
from keras.datasets import cifar10
from keras.utils.np_utils import to_categorical
import numpy as np
from keras import backend as K
from evolution import Evolution
from genome_handler import GenomeHandler
import tensorflow as tf
#import mlflow.keras
#import mlflow
#import mlflow.tensorflow
#mlflow.tensorflow.autolog()
#mlflow.keras.autolog()
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
K.set_image_data_format("channels_last")
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],x_train.shape[3]).astype('float32') / 255
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3]).astype('float32') / 255
# nCLasses
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#y_train.shape
dataset = ((x_train, y_train), (x_test, y_test))
genome_handler = GenomeHandler(max_conv_layers=4,
max_dense_layers=2, # includes final dense layer
max_filters=512,
max_dense_nodes=1024,
input_shape=x_train.shape[1:],
n_classes=10)
evo = Evolution(genome_handler, data_path="log/evo_cifar10_gen40_pop10_e20.csv")
model = evo.run(dataset=dataset,
num_generations=40,
pop_size=10,
epochs=20,metric='acc')
#epochs=10,metric='loss')
print(model.summary()) | 37.818182 | 120 | 0.676683 |
6a703f7100900fb7196e6525d9f4720fdc63dbae | 11,514 | py | Python | CarModel.py | JaredFG/Multiagentes-Unity | 37f7ec5c0588865ef08b50df83566a43d817bebf | [
"MIT"
] | null | null | null | CarModel.py | JaredFG/Multiagentes-Unity | 37f7ec5c0588865ef08b50df83566a43d817bebf | [
"MIT"
] | null | null | null | CarModel.py | JaredFG/Multiagentes-Unity | 37f7ec5c0588865ef08b50df83566a43d817bebf | [
"MIT"
] | 1 | 2022-02-10T20:33:44.000Z | 2022-02-10T20:33:44.000Z | '''
Autores:Eduardo Rodrguez Lpez A01749381
Rebeca Rojas Prez A01751192
Jared Abraham Flores Guarneros A01379868
Eduardo Aguilar Chas A01749375
'''
from random import random
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from mesa.batchrunner import BatchRunner
from mesa.datacollection import DataCollector
from mesa.space import MultiGrid
from mesa import Agent , Model
from mesa.time import RandomActivation
#Clase para crear a los agentes automviles
#Clase para crear a los agentes semforos.
#Clase para crear a los agentes calle.
#Clase para crear el modelo. | 41.566787 | 120 | 0.538822 |
6a705dbb2cd1b609cc2090d60bc5b82810db8095 | 1,684 | py | Python | qcodes/widgets/display.py | nulinspiratie/Qcodes | d050d38ac83f532523a39549c3247dfa6096a36e | [
"MIT"
] | 2 | 2017-02-27T06:02:39.000Z | 2019-06-03T04:56:59.000Z | qcodes/widgets/display.py | nulinspiratie/Qcodes | d050d38ac83f532523a39549c3247dfa6096a36e | [
"MIT"
] | 50 | 2017-04-12T04:03:15.000Z | 2022-03-09T00:41:43.000Z | qcodes/widgets/display.py | nulinspiratie/Qcodes | d050d38ac83f532523a39549c3247dfa6096a36e | [
"MIT"
] | null | null | null | """Helper for adding content stored in a file to a jupyter notebook."""
import os
from pkg_resources import resource_string
from IPython.display import display, Javascript, HTML
# Originally I implemented this using regular open() and read(), so it
# could use relative paths from the importing file.
#
# But for distributable packages, pkg_resources.resource_string is the
# best way to load data files, because it works even if the package is
# in an egg or zip file. See:
# http://pythonhosted.org/setuptools/setuptools.html#accessing-data-files-at-runtime
def display_auto(qcodes_path, file_type=None):
"""
Display some javascript, css, or html content in a jupyter notebook.
Content comes from a package-relative file path. Will use the file
extension to determine file type unless overridden by file_type
Args:
qcodes_path (str): the path to the target file within the qcodes
package, like 'widgets/widgets.js'
file_type (Optional[str]): Override the file extension to determine
what type of file this is. Case insensitive, supported values
are 'js', 'css', and 'html'
"""
contents = resource_string('qcodes', qcodes_path).decode('utf-8')
if file_type is None:
ext = os.path.splitext(qcodes_path)[1].lower()
elif 'js' in file_type.lower():
ext = '.js'
elif 'css' in file_type.lower():
ext = '.css'
else:
ext = '.html'
if ext == '.js':
display(Javascript(contents))
elif ext == '.css':
display(HTML('<style>' + contents + '</style>'))
else:
# default to html. Anything else?
display(HTML(contents))
| 35.083333 | 84 | 0.672803 |
6a71f08eeecbd606e19448cf8f9c90856e40cbac | 6,697 | py | Python | hubcontrol.py | smr99/lego-hub-tk | d3b86847873fa80deebf993ccd44b4d3d8f9bf40 | [
"MIT"
] | 16 | 2021-02-17T01:59:39.000Z | 2022-03-29T05:10:12.000Z | hubcontrol.py | smr99/lego-hub-tk | d3b86847873fa80deebf993ccd44b4d3d8f9bf40 | [
"MIT"
] | 15 | 2021-04-20T04:01:36.000Z | 2022-02-01T02:46:30.000Z | hubcontrol.py | smr99/lego-hub-tk | d3b86847873fa80deebf993ccd44b4d3d8f9bf40 | [
"MIT"
] | 9 | 2021-04-18T20:29:21.000Z | 2022-03-31T11:50:04.000Z | #! /usr/bin/python3
import base64
from data.ProgramHubLogger import ProgramHubLogger
from datetime import datetime
import logging
import os
import sys
from ui.MotionSensor import MotionSensorWidget
from ui.PositionStatus import PositionStatusWidget
from ui.DevicePortWidget import DevicePortWidget
from ui.ConnectionWidget import ConnectionWidget
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtWidgets import QApplication, QPushButton, QWidget
from comm.HubClient import ConnectionState, HubClient
from data.HubMonitor import HubMonitor
from data.HubStatus import HubStatus
from ui.DeviceStatusWidget import DeviceStatusWidget
from utils.setup import setup_logging
logger = logging.getLogger("App")
log_filename = os.path.dirname(__file__) + "/logs/hubcontrol.log"
setup_logging(log_filename)
logger.info("LEGO status app starting up")
hc = HubClient()
monitor = HubMonitor(hc)
monitor.logger = ProgramHubLogger('logs/program')
app = QApplication(sys.argv)
window = MainWindow(hc, monitor)
window.setWindowTitle('LEGO Hub Status')
window.show()
hc.start()
sys.exit(app.exec_())
| 33.823232 | 119 | 0.667164 |
6a725ee4987cc406e04ed4e04ead31dbd1e9b6ea | 1,088 | py | Python | To-D0-App-main/base/views.py | shagun-agrawal/To-Do-App | 083081690fe9d291f13c0452a695a092b7544ab2 | [
"MIT"
] | 1 | 2021-04-08T14:12:38.000Z | 2021-04-08T14:12:38.000Z | To-D0-App-main/base/views.py | shagun-agrawal/To-Do-App | 083081690fe9d291f13c0452a695a092b7544ab2 | [
"MIT"
] | null | null | null | To-D0-App-main/base/views.py | shagun-agrawal/To-Do-App | 083081690fe9d291f13c0452a695a092b7544ab2 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.contrib.auth.views import LoginView
from .models import Task
# Create your views here.
| 24.727273 | 73 | 0.714154 |
6a72d886218147f91e76b4f7f571b23929432026 | 966 | py | Python | tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py | samysweb/dnnv | 58fb95b7300914d9da28eed86c39eca473b1aaef | [
"MIT"
] | 5 | 2022-01-28T20:30:34.000Z | 2022-03-17T09:26:52.000Z | tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py | samysweb/dnnv | 58fb95b7300914d9da28eed86c39eca473b1aaef | [
"MIT"
] | 9 | 2022-01-27T03:50:28.000Z | 2022-02-08T18:42:17.000Z | tests/unit_tests/test_nn/test_converters/test_tensorflow/test_Dropout.py | samysweb/dnnv | 58fb95b7300914d9da28eed86c39eca473b1aaef | [
"MIT"
] | 2 | 2022-02-03T17:32:43.000Z | 2022-03-24T16:38:49.000Z | import numpy as np
from dnnv.nn.converters.tensorflow import *
from dnnv.nn.operations import *
TOL = 1e-6
| 26.108108 | 48 | 0.63354 |
6a7328e83cbca070a32d28d91e1af148c593184e | 4,202 | py | Python | smarts/zoo/worker.py | idsc-frazzoli/SMARTS | bae0a6ea160330921edc94a7161a4e8cf72a1974 | [
"MIT"
] | 554 | 2020-10-16T02:30:35.000Z | 2022-03-29T14:13:00.000Z | smarts/zoo/worker.py | idsc-frazzoli/SMARTS | bae0a6ea160330921edc94a7161a4e8cf72a1974 | [
"MIT"
] | 917 | 2020-10-17T00:10:31.000Z | 2022-03-31T23:00:47.000Z | smarts/zoo/worker.py | idsc-frazzoli/SMARTS | bae0a6ea160330921edc94a7161a4e8cf72a1974 | [
"MIT"
] | 135 | 2020-10-20T01:44:49.000Z | 2022-03-27T04:51:31.000Z | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Run an agent in it's own (independent) process.
What Agent code does is out of our direct control, we want to avoid any interactions with global state that might be present in the SMARTS process.
To protect and isolate Agents from any pollution of global state in the main SMARTS process, we spawn Agents in their fresh and independent python process.
This script is called from within SMARTS to instantiate a remote agent.
The protocal is as follows:
1. SMARTS calls: worker.py --port 5467 # sets a unique port per agent
2. worker.py will begin listening on port 5467.
3. SMARTS connects to (ip, 5467) as a client.
4. SMARTS calls `build()` rpc with `AgentSpec` as input.
5. worker.py recieves the `AgentSpec` instances and builds the Agent.
6. SMARTS calls `act()` rpc with observation as input and receives the actions as response from worker.py.
"""
import argparse
import importlib
import logging
import os
import signal
import sys
from concurrent import futures
import grpc
from smarts.zoo import worker_pb2_grpc, worker_servicer
# Front-load some expensive imports as to not block the simulation
modules = [
"smarts.core.utils.pybullet",
"smarts.core.utils.sumo",
"smarts.core.sumo_road_network",
"numpy",
"sklearn",
"shapely",
"scipy",
"trimesh",
"panda3d",
"gym",
"ray",
]
for mod in modules:
try:
importlib.import_module(mod)
except ImportError:
if mod == "ray":
print(
"You need to install the ray dependency using pip install -e .[train] first"
)
if mod == "panda3d":
print(
"You need to install the panda3d dependency using pip install -e .[camera-obs] first"
)
pass
# End front-loaded imports
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(f"worker.py - pid({os.getpid()})")
if __name__ == "__main__":
parser = argparse.ArgumentParser("Run an agent in an independent process.")
parser.add_argument(
"--port",
type=int,
required=True,
help="Port to listen for remote client connections.",
)
args = parser.parse_args()
serve(args.port)
| 34.442623 | 155 | 0.702285 |
6a744ccf0662773e4f40dc632d9bd05f720ada5c | 2,846 | py | Python | week2/problems/problem2.py | Nburkhal/mit-cs250 | a3d32a217deb2cfa1b94d8188bef73c0742b1245 | [
"MIT"
] | null | null | null | week2/problems/problem2.py | Nburkhal/mit-cs250 | a3d32a217deb2cfa1b94d8188bef73c0742b1245 | [
"MIT"
] | null | null | null | week2/problems/problem2.py | Nburkhal/mit-cs250 | a3d32a217deb2cfa1b94d8188bef73c0742b1245 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Now write a program that calculates the minimum fixed monthly payment needed
in order pay off a credit card balance within 12 months.
By a fixed monthly payment, we mean a single number which does not change each month,
but instead is a constant amount that will be paid each month.
In this problem, we will not be dealing with a minimum monthly payment rate.
The following variables contain values as described below:
balance - the outstanding balance on the credit card
annualInterestRate - annual interest rate as a decimal
The program should print out one line: the lowest monthly payment
that will pay off all debt in under 1 year, for example:
Lowest Payment: 180
Assume that the interest is compounded monthly according to
the balance at the end of the month (after the payment for that month is made).
The monthly payment must be a multiple of $10 and is the same for all months.
Notice that it is possible for the balance to become negative
using this payment scheme, which is okay. A summary of the required math is found below:
Monthly interest rate = (Annual interest rate) / 12.0
Monthly unpaid balance = (Previous balance) - (Minimum fixed monthly payment)
Updated balance each month = (Monthly unpaid balance) + (Monthly interest rate x Monthly unpaid balance)
Test Case 1:
balance = 3329
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 310
Test Case 2:
balance = 4773
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 440
Test Case 3:
balance = 3926
annualInterestRate = 0.2
Result Your Code Should Generate:
-------------------
Lowest Payment: 360
"""
# Establish variables that we know / needed for the evaluation.
# Counter optional
balance = 3329
annualInterestRate = 0.2
monthlyInterestRate = annualInterestRate / 12
monthlyPayment = 0
updatedBalance = balance
counter = 0
# Will loop through everything until we find a rate that will reduce updatedBalance to 0.
while updatedBalance > 0:
# Was stated that payments needed to happen in increments of $10
monthlyPayment += 10
# To reset balance back to actual balance when loop inevitably fails.
updatedBalance = balance
month = 1
# For 12 months and while balance is not 0...
while month <= 12 and updatedBalance > 0:
# Subtract the ($10*n) amount
updatedBalance -= monthlyPayment
# Compound the interest AFTER making monthly payment
interest = monthlyInterestRate * updatedBalance
updatedBalance += interest
# Increase month counter
month += 1
counter += 1
print("Lowest Payment: ", monthlyPayment)
print("Number of iterations: ", counter)
| 37.447368 | 104 | 0.713282 |
6a74aa2ca33901c3b2b2f9d3fd978d06054719fb | 2,410 | py | Python | leetcode_python/Sort/sort-characters-by-frequency.py | yennanliu/CS_basics | 3c50c819897a572ff38179bfb0083a19b2325fde | [
"Unlicense"
] | 18 | 2019-08-01T07:45:02.000Z | 2022-03-31T18:05:44.000Z | leetcode_python/Sort/sort-characters-by-frequency.py | yennanliu/CS_basics | 3c50c819897a572ff38179bfb0083a19b2325fde | [
"Unlicense"
] | null | null | null | leetcode_python/Sort/sort-characters-by-frequency.py | yennanliu/CS_basics | 3c50c819897a572ff38179bfb0083a19b2325fde | [
"Unlicense"
] | 15 | 2019-12-29T08:46:20.000Z | 2022-03-08T14:14:05.000Z | # V0
import collections
# V0'
# IDEA : collections.Counter(s).most_common
# V1
# IDEA : SORT
# https://blog.csdn.net/fuxuemingzhu/article/details/79437548
import collections
### Test case:
s=Solution()
assert s.frequencySort(['a','b','c','c']) == 'ccab'
assert s.frequencySort(['a']) == 'a'
assert s.frequencySort(['a','A','c','c']) == 'ccaA'
assert s.frequencySort(['c','c','c']) == 'ccc'
assert s.frequencySort([]) == ''
assert s.frequencySort(['','','']) == ''
# V1'
# http://bookshadow.com/weblog/2016/11/02/leetcode-sort-characters-by-frequency/
# V2
import collections
# V2'
# Time: O(n)
# Space: O(n)
import collections
| 27.078652 | 87 | 0.568465 |
6a750b59d5869e7c84ae1fcca0d133a7dbf28cce | 9,165 | py | Python | eval/scripts/human/html_gen.py | chateval/chatevalv2 | 7ba96d81842db00427a6d6351d5cea76a8766450 | [
"Apache-2.0"
] | 5 | 2018-06-11T19:47:23.000Z | 2020-03-04T01:29:15.000Z | eval/scripts/human/html_gen.py | chateval/app | 7ba96d81842db00427a6d6351d5cea76a8766450 | [
"Apache-2.0"
] | 12 | 2018-07-11T18:50:13.000Z | 2022-02-10T10:45:58.000Z | eval/scripts/human/html_gen.py | chateval/app | 7ba96d81842db00427a6d6351d5cea76a8766450 | [
"Apache-2.0"
] | 1 | 2018-06-29T14:52:16.000Z | 2018-06-29T14:52:16.000Z | """Stores all the helper functions that generate html"""
import random
def generate_2choice_html(example):
'''Makes html for ranking form for the specified row index.
Returns the HTML for a table of radio buttons used for ranking,
as well as a count of the total number of radio buttons.
'''
# Check for duplicates.
if example.target_lines[0] == example.target_lines[1]:
return "", 0
# Find all the non-duplicate target indices.
target_indices = [0, 1]
# Randomize the order targets are shown in.
random.shuffle(target_indices)
num_targets = len(target_indices)
source_html = ''
speaker = 'A'
for utterance in example.source_line_utterances():
source_html += '<h4>Speaker %s: %s</h4>' % (speaker, utterance)
speaker = 'A' if speaker == 'B' else 'B'
html = """
<br/>
<div class="panel panel-default btn-group">
%s
<br/>
<table>
""" % (source_html)
html += """
<tr>
<td>Speaker %s: %s</td>""" % (speaker, example.target_lines[target_indices[0]])
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key, target_indices[0])
html += """
<tr>
<td>Speaker %s: %s</td>""" % (speaker, example.target_lines[target_indices[1]])
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key, target_indices[1])
html += """
<tr>
<td>It's a tie.</td>
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-tie" data-col="1" value="1"/>
</label>
</td>
</tr>""" % (example.key, example.key)
html += """
</table>
</div>
"""
return html, 1
def generate_ranking_tables_html(example):
'''Makes html for ranking form for the specified row index.
Returns the HTML for a table of radio buttons used for ranking,
as well as a count of the total number of radio buttons.
'''
# Find all the non-duplicate target indices.
target_indices = []
for idx in range(len(example.target_lines)):
current = example.target_lines[idx]
if current not in example.target_lines[0:idx] or idx == 0:
target_indices.append(idx)
# Randomize the order targets are shown in.
random.shuffle(target_indices)
num_targets = len(target_indices)
html = """
<br/>
<div class="panel panel-default btn-group">
<h4>Speaker A: %s</h4>
<table>
<tr>
<th></th>
""" % example.source_line
for idx in range(num_targets):
if idx == 0:
tag = 'best'
elif idx == num_targets - 1:
tag = 'worst'
else:
tag = ''
html += '<th align="center">%s<br>%s</th>' % (tag, idx+1)
html += "</tr>"
for idx in target_indices:
html += """
<tr>
<td>Speaker B: %s</td>""" % (example.target_lines[idx])
# Add a row of radio buttons whose length is the number of options.
for jdx in range(num_targets):
html += """
<td>
<label class="btn">
<input type="radio" class="%s" name="%s-target-%s" data-col="%s" value="%s"/>
</label>
</td>""" % (example.key, example.key, idx, jdx, jdx)
html += "</tr>"
html += """
</table>
</div>
"""
return html, num_targets | 33.448905 | 295 | 0.59509 |
6a75a5070f34939725c30b7941b46fda26295424 | 1,582 | py | Python | python3_module_template/subproject/myexample.py | sdpython/python_project_template | e365b29ba9a7dfd2688f68eb7ff2b84a6a82cb57 | [
"MIT"
] | null | null | null | python3_module_template/subproject/myexample.py | sdpython/python_project_template | e365b29ba9a7dfd2688f68eb7ff2b84a6a82cb57 | [
"MIT"
] | null | null | null | python3_module_template/subproject/myexample.py | sdpython/python_project_template | e365b29ba9a7dfd2688f68eb7ff2b84a6a82cb57 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@file
@brief This the documentation of this module (myexampleb).
"""
| 18.611765 | 58 | 0.54488 |
6a75c6bcf2a235fe76f46e51c4cc31283811626a | 2,534 | py | Python | simulation/dataset_G_1q_X_Z_N1.py | eperrier/QDataSet | 383b38b9b4166848f72fac0153800525e66b477b | [
"MIT"
] | 42 | 2021-08-17T02:27:59.000Z | 2022-03-26T16:00:57.000Z | simulation/dataset_G_1q_X_Z_N1.py | eperrier/QDataSet | 383b38b9b4166848f72fac0153800525e66b477b | [
"MIT"
] | 1 | 2021-09-25T11:15:20.000Z | 2021-09-27T04:18:25.000Z | simulation/dataset_G_1q_X_Z_N1.py | eperrier/QDataSet | 383b38b9b4166848f72fac0153800525e66b477b | [
"MIT"
] | 6 | 2021-08-17T02:28:04.000Z | 2022-03-22T07:11:48.000Z | ##############################################
"""
This module generate a dataset
"""
##############################################
# preample
import numpy as np
from utilites import Pauli_operators, simulate, CheckNoise
################################################
# meta parameters
name = "G_1q_X_Z_N1"
################################################
# quantum parameters
dim = 2 # dimension of the system
Omega = 12 # qubit energy gap
static_operators = [0.5*Pauli_operators[3]*Omega] # drift Hamiltonian
dynamic_operators = [0.5*Pauli_operators[1]] # control Hamiltonian
noise_operators = [0.5*Pauli_operators[3]] # noise Hamiltonian
initial_states = [
np.array([[0.5,0.5],[0.5,0.5]]), np.array([[0.5,-0.5],[-0.5,0.5]]),
np.array([[0.5,-0.5j],[0.5j,0.5]]),np.array([[0.5,0.5j],[-0.5j,0.5]]),
np.array([[1,0],[0,0]]), np.array([[0,0],[0,1]])
] # intial state of qubit
measurement_operators = Pauli_operators[1:] # measurement operators
##################################################
# simulation parameters
T = 1 # Evolution time
M = 1024 # Number of time steps
num_ex = 10000 # Number of examples
batch_size = 50 # batch size for TF
##################################################
# noise parameters
K = 2000 # Number of realzations
noise_profile = [1] # Noise type
###################################################
# control parameters
pulse_shape = "Gaussian" # Control pulse shape
num_pulses = 5 # Number of pulses per sequence
####################################################
# Generate the dataset
sim_parameters = dict( [(k,eval(k)) for k in ["name", "dim", "Omega", "static_operators", "dynamic_operators", "noise_operators", "measurement_operators", "initial_states", "T", "M", "num_ex", "batch_size", "K", "noise_profile", "pulse_shape", "num_pulses"] ])
CheckNoise(sim_parameters)
simulate(sim_parameters)
#################################################### | 56.311111 | 261 | 0.404893 |
6a75e8b3a7f6a8bf44de0912c3cdfced6251b233 | 55 | py | Python | configs/mmdet/detection/detection_tensorrt_static-300x300.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 746 | 2021-12-27T10:50:28.000Z | 2022-03-31T13:34:14.000Z | configs/mmdet/detection/detection_tensorrt_static-300x300.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 253 | 2021-12-28T05:59:13.000Z | 2022-03-31T18:22:25.000Z | configs/mmdet/detection/detection_tensorrt_static-300x300.py | zhiqwang/mmdeploy | 997d111a6f4ca9624ab3b36717748e6ce002037d | [
"Apache-2.0"
] | 147 | 2021-12-27T10:50:33.000Z | 2022-03-30T10:44:20.000Z | _base_ = ['../_base_/base_tensorrt_static-300x300.py']
| 27.5 | 54 | 0.745455 |
6a7641f27315b4a34aa454452b185ab3ffeddc05 | 505 | py | Python | user_service/user_service/api.py | Ziang-Lu/Flask-Blog | 8daf901a0ea0e079ad24a61fd7f16f1298514d4c | [
"MIT"
] | null | null | null | user_service/user_service/api.py | Ziang-Lu/Flask-Blog | 8daf901a0ea0e079ad24a61fd7f16f1298514d4c | [
"MIT"
] | 2 | 2020-06-09T08:40:42.000Z | 2021-04-30T21:20:35.000Z | user_service/user_service/api.py | Ziang-Lu/Flask-Blog | 8daf901a0ea0e079ad24a61fd7f16f1298514d4c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
API definition module.
"""
from flask import Blueprint
from flask_restful import Api
from .resources.user import UserAuth, UserItem, UserList, UserFollow
# Create an API-related blueprint
api_bp = Blueprint(name='api', import_name=__name__)
api = Api(api_bp)
api.add_resource(UserList, '/users')
api.add_resource(UserItem, '/users/<int:id>')
api.add_resource(UserAuth, '/user-auth')
api.add_resource(
UserFollow, '/user-follow/<int:follower_id>/<followed_username>'
)
| 22.954545 | 68 | 0.740594 |
6a77df2fb34c60a66cb0710a264af376f888be93 | 2,112 | py | Python | advanced/itertools_funcs.py | ariannasg/python3-essential-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | 1 | 2020-06-02T08:37:41.000Z | 2020-06-02T08:37:41.000Z | advanced/itertools_funcs.py | ariannasg/python3-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | null | null | null | advanced/itertools_funcs.py | ariannasg/python3-training | 9b52645f5ccb57d2bda5d5f4a3053681a026450a | [
"MIT"
] | null | null | null | #!usr/bin/env python3
import itertools
# itertools is a module that's not technically a set of built-in functions but
# it is part of the standard library that comes with python.
# it's useful for for creating and using iterators.
if __name__ == "__main__":
main()
# CONSOLE OUTPUT:
# some infinite iterators
# Joe
# John
# Mike
# Joe
# John
# 100
# 103
# 106
# some non-infinite iterators
# [10, 15, 35, 65, 105, 155, 195, 225]
# [10, 10, 20, 30, 40, 50, 50, 50]
# [10, 5, 5, 5, 5, 5, 5, 5]
# ['A', 'B', 'C', 'D', '1', '2', '3', '4']
# [40, 50, 40, 30]
# [10, 5, 20, 30]
| 29.333333 | 78 | 0.673295 |
6a78c291da971309bc64ed073bb014f9b709c144 | 119 | py | Python | examples/plain_text_response.py | lukefx/stardust | 4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad | [
"MIT"
] | 2 | 2020-11-27T10:30:38.000Z | 2020-12-22T16:48:49.000Z | examples/plain_text_response.py | lukefx/stardust | 4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad | [
"MIT"
] | null | null | null | examples/plain_text_response.py | lukefx/stardust | 4d9e399ffba9d4a47a2f428b59b5abf4c5bd41ad | [
"MIT"
] | null | null | null | from starlette.responses import PlainTextResponse
| 19.833333 | 49 | 0.789916 |
6a78c857a857449cf31704c6af0759d610215a2d | 25,852 | py | Python | pypyrus_logbook/logger.py | t3eHawk/pypyrus_logbook | bd647a1c355b07e8df28c0d7298fcfe68cd9572e | [
"MIT"
] | null | null | null | pypyrus_logbook/logger.py | t3eHawk/pypyrus_logbook | bd647a1c355b07e8df28c0d7298fcfe68cd9572e | [
"MIT"
] | null | null | null | pypyrus_logbook/logger.py | t3eHawk/pypyrus_logbook | bd647a1c355b07e8df28c0d7298fcfe68cd9572e | [
"MIT"
] | 2 | 2019-02-06T08:05:43.000Z | 2019-02-06T08:06:35.000Z | import atexit
import datetime as dt
import os
import platform
import pypyrus_logbook as logbook
import sys
import time
import traceback
from .conf import all_loggers
from .formatter import Formatter
from .header import Header
from .output import Root
from .record import Record
from .sysinfo import Sysinfo
def write(self, record):
"""Direct write to the output.
Parameters
----------
record : Record
The argument is used to send it to the output `root`.
"""
self.__check_file_stats()
self.root.write(record)
pass
def record(self, rectype, message, error=False, **kwargs):
"""Basic method to write records.
Parameters
----------
rectype : str
By default method creates the record with the type NONE.
That can be changed but depends on available record types.
All registered record types are stored in the instance attribute
rectypes. If you wish to use own record type or change the
presentaion of exeisting one then edit this dictinary.
message : str
The message that must be written.
error : bool, optional
If record is error then set that parameter to `True`.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
if self.filters.get(rectype, True) is True:
record = Record(self, rectype, message, error=error, **kwargs)
self.write(record)
pass
def info(self, message, **kwargs):
"""Send INFO record to output."""
rectype = 'info'
self.record(rectype, message, **kwargs)
pass
def debug(self, message, **kwargs):
"""Send DEBUG record to the output."""
rectype = 'debug'
self.record(rectype, message, **kwargs)
pass
def error(self, message=None, rectype='error', format=None, alarming=False,
level=1, **kwargs):
"""Send ERROR record to the output.
If exception in current traceback exists then method will format the
exception according to `formatter.error` string presentation. If
`formatter.error` is set to `False` the exception will be just printed
in original Python style.
Also method will send an alarm if alarming attribute is `True`, email
output is enabled and SMTP server is configurated.
If one of the limit triggers worked then application will be aborted.
Parameters
----------
message : str, optional
The message that must be written instead of exception.
rectype : str, optional
The type of error according to `rectypes` dictionary.
format : str, optional
The format of the error message.
alarming : bool
The argument is used to enable or disable the alarming mechanism
for this certain call.
level : int
The argument is used to describe the error level.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
self._with_error = True
self._count_errors += 1
format = self.formatter.error if format is None else format
# Parse the error.
err_type, err_value, err_tb = sys.exc_info()
if message is None and err_type is not None:
if isinstance(format, str) is True:
err_name = err_type.__name__
err_value = err_value
for tb in traceback.walk_tb(err_tb):
f_code = tb[0].f_code
err_file = os.path.abspath(f_code.co_filename)
err_line = tb[1]
err_obj = f_code.co_name
self.record(rectype, message, error=True,
err_name=err_name, err_value=err_value,
err_file=err_file, err_line=err_line,
err_obj=err_obj, **kwargs)
elif format is False:
exception = traceback.format_exception(err_type, err_value,
err_tb)
message = '\n'
message += ''.join(exception)
self.record(rectype, message, **kwargs)
else:
message = message or ''
self.record(rectype, message, **kwargs)
# Break execution in case of critical error if permitted.
# The alarm will be generated at exit if it is configured.
if self._control is True:
if level >= self._maxlevel:
sys.exit()
if self._maxerrors is not False:
if self._count_errors > self._maxerrors:
sys.exit()
# Send alarm if execution was not aborted but alarm is needed.
if alarming is True:
self.root.email.alarm()
pass
def warning(self, message=None, **kwargs):
"""Send WARNING error record to the output."""
self.error(message, rectype='warning', level=0, **kwargs)
pass
def critical(self, message=None, **kwargs):
"""Send CRITICAL error record to the output."""
self.error(message, rectype='critical', level=2, **kwargs)
pass
def head(self):
"""Send header to the output."""
string = self.header.create()
self.write(string)
pass
def subhead(self, string):
"""Send subheader as upper-case text between two border lines to the
output.
Parameters
----------
string : str
The text that will be presented as subheader.
"""
bound = f'{self.formatter.div*self.formatter.length}\n'
string = f'{bound}\t{string}\n{bound}'.upper()
self.write(string)
pass
def line(self, message):
"""Send raw text with the new line to the output.
Parameters
----------
message : str
The message that must be written.
"""
self.write(f'{message}\n')
pass
def bound(self, div=None, length=None):
"""Write horizontal border in the output. Useful when need to separate
different blocks of information.
Parameters
----------
div : str, optional
Symbol that is used to bulid the bound.
length : int, optional
Lenght of the bound.
"""
border = self.formatter.div * self.formatter.length
self.write(border + '\n')
pass
def blank(self, number=1):
"""Write blank lines in the output.
Parameters
----------
number : int, optional
The number of the blank lines that must be written.
"""
string = '\n'*number
self.write(string)
pass
def ok(self, **kwargs):
"""Print INFO message with OK."""
rectype = 'info'
message = self.messages['ok']
self.record(rectype, message, **kwargs)
pass
def success(self, **kwargs):
"""Print INFO message with SUCCESS."""
rectype = 'info'
message = self.messages['success']
self.record(rectype, message, **kwargs)
pass
def fail(self, **kwargs):
"""Print INFO message with FAIL."""
rectype = 'info'
message = self.messages['fail']
self.record(rectype, message, **kwargs)
pass
def restart(self):
"""Restart logging. Will open new file."""
self._start_date = dt.datetime.now()
self.__calculate_restart_date()
if self.root.file.status is True:
self.root.file.new()
if self.header.used is True:
self.head()
pass
def send(self, *args, **kwargs):
"""Send email message. Note that SMTP server connection must be
configured.
"""
self.root.email.send(*args, **kwargs)
pass
def set(self, **kwargs):
"""Update values in table. Note that DB connection must be
configured.
"""
self.root.table.write(**kwargs)
pass
def __calculate_restart_date(self):
"""Calculate the date when logger must be restarted according to
maxdays parameter.
"""
self.__restart_date = (self._start_date
+ dt.timedelta(days=self._maxdays))
pass
def __check_file_stats(self):
"""Check the output file statistics to catch when current file must be
closed and new one must be opened.
"""
if self.root.file.status is True:
if self._maxsize is not False:
if self.root.file.size is not None:
if self.root.file.size > self._maxsize:
self.restart()
return
if self._maxdays is not False:
if self.__restart_date.day == dt.datetime.now().day:
self.restart()
return
| 39.348554 | 79 | 0.592952 |
6a79e21ee2f5d7ad67e69bd27f9206807683db56 | 488 | py | Python | darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | 1 | 2020-06-25T03:12:58.000Z | 2020-06-25T03:12:58.000Z | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
MEBIBYTE = 1024 * 1024
STREAMING_DEFAULT_PART_SIZE = 10 * MEBIBYTE
DEFAULT_PART_SIZE = 128 * MEBIBYTE
OBJECT_USE_MULTIPART_SIZE = 128 * MEBIBYTE
| 54.222222 | 245 | 0.772541 |
6a7a250cd753510b2923ce0ec46a2aae0ee1d50c | 1,028 | py | Python | scraper/news/spiders/millardayo.py | ZendaInnocent/news-api | 71465aea50e0b1cea08a421d72156cbe7ed8a952 | [
"Apache-2.0"
] | 3 | 2021-11-15T08:43:53.000Z | 2021-11-15T19:44:56.000Z | scraper/news/spiders/millardayo.py | ZendaInnocent/news-api | 71465aea50e0b1cea08a421d72156cbe7ed8a952 | [
"Apache-2.0"
] | null | null | null | scraper/news/spiders/millardayo.py | ZendaInnocent/news-api | 71465aea50e0b1cea08a421d72156cbe7ed8a952 | [
"Apache-2.0"
] | 1 | 2021-11-15T08:43:58.000Z | 2021-11-15T08:43:58.000Z | # Spider for MillardAyo.com
import scrapy
from bs4 import BeautifulSoup
| 27.783784 | 65 | 0.521401 |
6a7ab579c54d59a8d5d95bf8ca299d1a0ccc36a3 | 10,384 | py | Python | sdks/python/apache_beam/runners/portability/expansion_service_test.py | stephenoken/beam | 4797f310b6671de6fd703502520f4b012b655c82 | [
"Apache-2.0"
] | 3 | 2020-08-28T17:47:26.000Z | 2021-08-17T06:38:58.000Z | sdks/python/apache_beam/runners/portability/expansion_service_test.py | stephenoken/beam | 4797f310b6671de6fd703502520f4b012b655c82 | [
"Apache-2.0"
] | 5 | 2020-11-13T19:06:10.000Z | 2021-11-10T19:56:12.000Z | sdks/python/apache_beam/runners/portability/expansion_service_test.py | stephenoken/beam | 4797f310b6671de6fd703502520f4b012b655c82 | [
"Apache-2.0"
] | 1 | 2018-09-30T05:34:06.000Z | 2018-09-30T05:34:06.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import argparse
import logging
import signal
import sys
import typing
import grpc
from past.builtins import unicode
import apache_beam as beam
import apache_beam.transforms.combiners as combine
from apache_beam.coders import StrUtf8Coder
from apache_beam.pipeline import PipelineOptions
from apache_beam.portability.api import beam_expansion_api_pb2_grpc
from apache_beam.portability.api.external_transforms_pb2 import ExternalConfigurationPayload
from apache_beam.runners.portability import expansion_service
from apache_beam.transforms import ptransform
from apache_beam.transforms.external import ImplicitSchemaPayloadBuilder
from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor
# This script provides an expansion service and example ptransforms for running
# external transform test cases. See external_test.py for details.
_LOGGER = logging.getLogger(__name__)
TEST_PREFIX_URN = "beam:transforms:xlang:test:prefix"
TEST_MULTI_URN = "beam:transforms:xlang:test:multi"
TEST_GBK_URN = "beam:transforms:xlang:test:gbk"
TEST_CGBK_URN = "beam:transforms:xlang:test:cgbk"
TEST_COMGL_URN = "beam:transforms:xlang:test:comgl"
TEST_COMPK_URN = "beam:transforms:xlang:test:compk"
TEST_FLATTEN_URN = "beam:transforms:xlang:test:flatten"
TEST_PARTITION_URN = "beam:transforms:xlang:test:partition"
def parse_string_payload(input_byte):
payload = ExternalConfigurationPayload()
payload.ParseFromString(input_byte)
coder = StrUtf8Coder()
return {
k: coder.decode_nested(v.payload)
for k,
v in payload.configuration.items()
}
server = None
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
main(sys.argv)
| 32.654088 | 92 | 0.74557 |
6a7bee943837f03f68168bdd6b1277bb1e2654a4 | 268 | py | Python | db.py | RunnerPro/RunnerProApi | 2e0aba17cba2a019b6d102bc4eac2fd60f164156 | [
"MIT"
] | null | null | null | db.py | RunnerPro/RunnerProApi | 2e0aba17cba2a019b6d102bc4eac2fd60f164156 | [
"MIT"
] | null | null | null | db.py | RunnerPro/RunnerProApi | 2e0aba17cba2a019b6d102bc4eac2fd60f164156 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
from settings import DB_URI
Session = sessionmaker(autocommit=False, autoflush=False, bind=create_engine(DB_URI))
session = scoped_session(Session)
| 33.5 | 85 | 0.850746 |
6a7c09860b07db2134a799e024cf2d3ffbf7dc17 | 11,429 | py | Python | python/tvm/contrib/nvcc.py | ntanhbk44/tvm | f89a929f09f7a0b0ccd0f4d46dc2b1c562839087 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/contrib/nvcc.py | ntanhbk44/tvm | f89a929f09f7a0b0ccd0f4d46dc2b1c562839087 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/contrib/nvcc.py | ntanhbk44/tvm | f89a929f09f7a0b0ccd0f4d46dc2b1c562839087 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility to invoke nvcc compiler in the system"""
from __future__ import absolute_import as _abs
import subprocess
import os
import warnings
import tvm._ffi
from tvm.runtime import ndarray as nd
from . import utils
from .._ffi.base import py_str
def compile_cuda(code, target="ptx", arch=None, options=None, path_target=None):
"""Compile cuda code with NVCC from env.
Parameters
----------
code : str
The cuda code.
target : str
The target format
arch : str
The architecture
options : str or list of str
The additional options
path_target : str, optional
Output file.
Return
------
cubin : bytearray
The bytearray of the cubin
"""
temp = utils.tempdir()
if target not in ["cubin", "ptx", "fatbin"]:
raise ValueError("target must be in cubin, ptx, fatbin")
temp_code = temp.relpath("my_kernel.cu")
temp_target = temp.relpath("my_kernel.%s" % target)
with open(temp_code, "w") as out_file:
out_file.write(code)
if arch is None:
if nd.gpu(0).exist:
# auto detect the compute arch argument
arch = "sm_" + "".join(nd.gpu(0).compute_version.split("."))
else:
raise ValueError("arch(sm_xy) is not passed, and we cannot detect it from env")
file_target = path_target if path_target else temp_target
cmd = ["nvcc"]
cmd += ["--%s" % target, "-O3"]
if isinstance(arch, list):
cmd += arch
else:
cmd += ["-arch", arch]
if options:
if isinstance(options, str):
cmd += [options]
elif isinstance(options, list):
cmd += options
else:
raise ValueError("options must be str or list of str")
cmd += ["-o", file_target]
cmd += [temp_code]
# NOTE: ccbin option can be used to tell nvcc where to find the c++ compiler
# just in case it is not in the path. On Windows it is not in the path by default.
# However, we cannot use TVM_CXX_COMPILER_PATH because the runtime env.
# Because it is hard to do runtime compiler detection, we require nvcc is configured
# correctly by default.
# if cxx_compiler_path != "":
# cmd += ["-ccbin", cxx_compiler_path]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = code
msg += "\nCompilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
data = bytearray(open(file_target, "rb").read())
if not data:
raise RuntimeError("Compilation error: empty result is generated")
return data
def find_cuda_path():
"""Utility function to find cuda path
Returns
-------
path : str
Path to cuda root.
"""
if "CUDA_PATH" in os.environ:
return os.environ["CUDA_PATH"]
cmd = ["which", "nvcc"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
out = py_str(out)
if proc.returncode == 0:
return os.path.realpath(os.path.join(str(out).strip(), "../.."))
cuda_path = "/usr/local/cuda"
if os.path.exists(os.path.join(cuda_path, "bin/nvcc")):
return cuda_path
raise RuntimeError("Cannot find cuda path")
def get_cuda_version(cuda_path):
"""Utility function to get cuda version
Parameters
----------
cuda_path : str
Path to cuda root.
Returns
-------
version : float
The cuda version
"""
version_file_path = os.path.join(cuda_path, "version.txt")
if not os.path.exists(version_file_path):
# Debian/Ubuntu repackaged CUDA path
version_file_path = os.path.join(cuda_path, "lib", "cuda", "version.txt")
try:
with open(version_file_path) as f:
version_str = f.readline().replace("\n", "").replace("\r", "")
return float(version_str.split(" ")[2][:2])
except FileNotFoundError:
pass
cmd = [os.path.join(cuda_path, "bin", "nvcc"), "--version"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
out = py_str(out)
if proc.returncode == 0:
release_line = [l for l in out.split("\n") if "release" in l][0]
release_fields = [s.strip() for s in release_line.split(",")]
release_version = [f[1:] for f in release_fields if f.startswith("V")][0]
major_minor = ".".join(release_version.split(".")[:2])
return float(major_minor)
raise RuntimeError("Cannot read cuda version file")
def get_target_compute_version(target=None):
"""Utility function to get compute capability of compilation target.
Looks for the arch in three different places, first in the target attributes, then the global
scope, and finally the GPU device (if it exists).
Parameters
----------
target : tvm.target.Target, optional
The compilation target
Returns
-------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
# 1. Target
if target:
if "arch" in target.attrs:
compute_version = target.attrs["arch"]
major, minor = compute_version.split("_")[1]
return major + "." + minor
# 2. Global scope
from tvm.autotvm.env import AutotvmGlobalScope # pylint: disable=import-outside-toplevel
if AutotvmGlobalScope.current.cuda_target_arch:
major, minor = AutotvmGlobalScope.current.cuda_target_arch.split("_")[1]
return major + "." + minor
# 3. GPU
if tvm.gpu(0).exist:
return tvm.gpu(0).compute_version
warnings.warn(
"No CUDA architecture was specified or GPU detected."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return None
def parse_compute_version(compute_version):
"""Parse compute capability string to divide major and minor version
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "6.0")
Returns
-------
major : int
major version number
minor : int
minor version number
"""
split_ver = compute_version.split(".")
try:
major = int(split_ver[0])
minor = int(split_ver[1])
return major, minor
except (IndexError, ValueError) as err:
# pylint: disable=raise-missing-from
raise RuntimeError("Compute version parsing error: " + str(err))
def have_fp16(compute_version):
"""Either fp16 support is provided in the compute capability or not
Parameters
----------
compute_version: str
compute capability of a GPU (e.g. "6.0")
"""
major, minor = parse_compute_version(compute_version)
# fp 16 support in reference to:
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/#arithmetic-instructions
if major == 5 and minor == 3:
return True
if major >= 6:
return True
return False
def have_int8(compute_version):
"""Either int8 support is provided in the compute capability or not
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "6.1")
"""
major, _ = parse_compute_version(compute_version)
if major >= 6:
return True
return False
def have_tensorcore(compute_version=None, target=None):
"""Either TensorCore support is provided in the compute capability or not
Parameters
----------
compute_version : str, optional
compute capability of a GPU (e.g. "7.0").
target : tvm.target.Target, optional
The compilation target, will be used to determine arch if compute_version
isn't specified.
"""
if compute_version is None:
if tvm.gpu(0).exist:
compute_version = tvm.gpu(0).compute_version
else:
if target is None or "arch" not in target.attrs:
warnings.warn(
"Tensorcore will be disabled due to no CUDA architecture specified."
"Try specifying it by adding '-arch=sm_xx' to your target."
)
return False
compute_version = target.attrs["arch"]
# Compute version will be in the form "sm_{major}{minor}"
major, minor = compute_version.split("_")[1]
compute_version = major + "." + minor
major, _ = parse_compute_version(compute_version)
if major >= 7:
return True
return False
def have_cudagraph():
"""Either CUDA Graph support is provided"""
try:
cuda_path = find_cuda_path()
cuda_ver = get_cuda_version(cuda_path)
if cuda_ver < 10.0:
return False
return True
except RuntimeError:
return False
def have_bf16(compute_version):
"""Either bf16 support is provided in the compute capability or not
Parameters
----------
compute_version : str
compute capability of a GPU (e.g. "8.0")
"""
major, _ = parse_compute_version(compute_version)
if major >= 8:
return True
return False
| 30.155673 | 97 | 0.627351 |
6a7c3c233cce04ab709c5ec217d50c3347a2a2a8 | 2,429 | py | Python | calc/history/calculations.py | dhruvshah1996/Project3 | d87ad37f6cf2de0d3402c71d21b25258946aad69 | [
"MIT"
] | null | null | null | calc/history/calculations.py | dhruvshah1996/Project3 | d87ad37f6cf2de0d3402c71d21b25258946aad69 | [
"MIT"
] | null | null | null | calc/history/calculations.py | dhruvshah1996/Project3 | d87ad37f6cf2de0d3402c71d21b25258946aad69 | [
"MIT"
] | null | null | null | """Calculation history Class"""
from calc.calculations.addition import Addition
from calc.calculations.subtraction import Subtraction
from calc.calculations.multiplication import Multiplication
from calc.calculations.division import Division | 40.483333 | 86 | 0.712227 |
6a7d299369e55fc318f13ff176616da2592dab8c | 526 | py | Python | Python/17 - 081 - extraindo dados de uma lista.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | Python/17 - 081 - extraindo dados de uma lista.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | Python/17 - 081 - extraindo dados de uma lista.py | matheusguerreiro/python | f39a1b92409f11cbe7fef5d9261f863f9e0fac0d | [
"MIT"
] | null | null | null | # Aula 17 (Listas (Parte 1))
valores = []
while True:
valor = int(input('Digite um Valor ou -1 para Finalizar: '))
if valor < 0:
print('\nFinalizando...')
break
else:
valores.append(valor)
print(f'Foram digitados {len(valores)} nmeros')
valores.sort(reverse=True)
print(f'Lista ordenada de forma decrescente: {valores}')
if 5 in valores:
valores.reverse()
print(f'O valor 5 foi digitado e est na {valores.index(5)} posio.')
else:
print('Valor 5 no encontrado na lista.')
| 26.3 | 74 | 0.652091 |
6a7d44f1e562967fd6fedbdfc2867ad65df6f217 | 2,163 | py | Python | yekpay/migrations/0014_auto_20181120_1453.py | maryam-afzp/django-yekpay | f7b9d7914035ea4f27238eba9e0c70227cc65046 | [
"MIT"
] | 3 | 2020-05-17T18:33:22.000Z | 2021-12-06T08:31:42.000Z | yekpay/migrations/0014_auto_20181120_1453.py | Glyphack/django-yekpay | 8c4a44853207be4ff0b1711c0524fb0201859b19 | [
"MIT"
] | null | null | null | yekpay/migrations/0014_auto_20181120_1453.py | Glyphack/django-yekpay | 8c4a44853207be4ff0b1711c0524fb0201859b19 | [
"MIT"
] | 4 | 2019-11-14T14:16:49.000Z | 2021-12-06T08:31:44.000Z | # Generated by Django 2.0.9 on 2018-11-20 11:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 30.041667 | 121 | 0.57374 |
6a7e1204635ee097ea34ac3eae2b9d121e4f7471 | 203 | py | Python | polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/modules/__init__.py | nishaq503/polus-plugins-dl | 511689e82eb29a84761538144277d1be1af7aa44 | [
"MIT"
] | null | null | null | polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/modules/__init__.py | nishaq503/polus-plugins-dl | 511689e82eb29a84761538144277d1be1af7aa44 | [
"MIT"
] | 1 | 2021-09-09T23:22:16.000Z | 2021-09-09T23:22:16.000Z | polus-cell-nuclei-segmentation/src/dsb2018_topcoders/albu/src/pytorch_zoo/inplace_abn/modules/__init__.py | nishaq503/polus-plugins-dl | 511689e82eb29a84761538144277d1be1af7aa44 | [
"MIT"
] | 4 | 2021-06-22T13:54:52.000Z | 2022-01-26T19:23:39.000Z | from .bn import ABN, InPlaceABN, InPlaceABNWrapper, InPlaceABNSync, InPlaceABNSyncWrapper
from .misc import GlobalAvgPool2d
from .residual import IdentityResidualBlock
from .dense import DenseModule
| 40.6 | 90 | 0.842365 |
6a7e7d0b939c716cda0bb6e7629a5a7ce8b56ac7 | 10,911 | py | Python | python/pyarrow/tests/test_compute.py | kylebrandt/arrow | 515197dfe6e83d6fa6fe82bfec134f41b222b748 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/tests/test_compute.py | kylebrandt/arrow | 515197dfe6e83d6fa6fe82bfec134f41b222b748 | [
"Apache-2.0"
] | null | null | null | python/pyarrow/tests/test_compute.py | kylebrandt/arrow | 515197dfe6e83d6fa6fe82bfec134f41b222b748 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pytest
import pyarrow as pa
import pyarrow.compute
all_array_types = [
('bool', [True, False, False, True, True]),
('uint8', np.arange(5)),
('int8', np.arange(5)),
('uint16', np.arange(5)),
('int16', np.arange(5)),
('uint32', np.arange(5)),
('int32', np.arange(5)),
('uint64', np.arange(5, 10)),
('int64', np.arange(5, 10)),
('float', np.arange(0, 0.5, 0.1)),
('double', np.arange(0, 0.5, 0.1)),
('string', ['a', 'b', None, 'ddd', 'ee']),
('binary', [b'a', b'b', b'c', b'ddd', b'ee']),
(pa.binary(3), [b'abc', b'bcd', b'cde', b'def', b'efg']),
(pa.list_(pa.int8()), [[1, 2], [3, 4], [5, 6], None, [9, 16]]),
(pa.large_list(pa.int16()), [[1], [2, 3, 4], [5, 6], None, [9, 16]]),
(pa.struct([('a', pa.int8()), ('b', pa.int8())]), [
{'a': 1, 'b': 2}, None, {'a': 3, 'b': 4}, None, {'a': 5, 'b': 6}]),
]
numerical_arrow_types = [
pa.int8(),
pa.int16(),
pa.int64(),
pa.uint8(),
pa.uint16(),
pa.uint64(),
pa.float32(),
pa.float64()
]
def test_take_indices_types():
arr = pa.array(range(5))
for indices_type in ['uint8', 'int8', 'uint16', 'int16',
'uint32', 'int32', 'uint64', 'int64']:
indices = pa.array([0, 4, 2, None], type=indices_type)
result = arr.take(indices)
result.validate()
expected = pa.array([0, 4, 2, None])
assert result.equals(expected)
for indices_type in [pa.float32(), pa.float64()]:
indices = pa.array([0, 4, 2], type=indices_type)
with pytest.raises(NotImplementedError):
arr.take(indices)
def test_filter_chunked_array():
arr = pa.chunked_array([["a", None], ["c", "d", "e"]])
expected_drop = pa.chunked_array([["a"], ["e"]])
expected_null = pa.chunked_array([["a"], [None, "e"]])
for mask in [
# mask is array
pa.array([True, False, None, False, True]),
# mask is chunked array
pa.chunked_array([[True, False, None], [False, True]]),
# mask is python object
[True, False, None, False, True]
]:
result = arr.filter(mask)
assert result.equals(expected_drop)
result = arr.filter(mask, null_selection_behavior="emit_null")
assert result.equals(expected_null)
def test_compare_chunked_array_mixed():
arr = pa.array([1, 2, 3, 4, None])
arr_chunked = pa.chunked_array([[1, 2, 3], [4, None]])
arr_chunked2 = pa.chunked_array([[1, 2], [3, 4, None]])
expected = pa.chunked_array([[True, True, True, True, None]])
for result in [
arr == arr_chunked,
arr_chunked == arr,
arr_chunked == arr_chunked2,
]:
assert result.equals(expected)
| 32.281065 | 77 | 0.59967 |
6a7ebe45370c220d4cb3303c8715bdc2a5f264ae | 7,074 | py | Python | python/sdk/client/api/log_api.py | ashwinath/merlin | 087a7fa6fb21e4c771d64418bd58873175226ca1 | [
"Apache-2.0"
] | null | null | null | python/sdk/client/api/log_api.py | ashwinath/merlin | 087a7fa6fb21e4c771d64418bd58873175226ca1 | [
"Apache-2.0"
] | null | null | null | python/sdk/client/api/log_api.py | ashwinath/merlin | 087a7fa6fb21e4c771d64418bd58873175226ca1 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Merlin
API Guide for accessing Merlin's model management, deployment, and serving functionalities # noqa: E501
OpenAPI spec version: 0.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from client.api_client import ApiClient
| 39.3 | 185 | 0.607577 |
6a7ef047892a808f5b9e319a809d26915f83c93f | 2,207 | py | Python | openmdao/solvers/nonlinear/nonlinear_block_jac.py | bollwyvl/OpenMDAO | 4d7a31b2bb39674e2be0d6a13cbe22de3f5353af | [
"Apache-2.0"
] | null | null | null | openmdao/solvers/nonlinear/nonlinear_block_jac.py | bollwyvl/OpenMDAO | 4d7a31b2bb39674e2be0d6a13cbe22de3f5353af | [
"Apache-2.0"
] | null | null | null | openmdao/solvers/nonlinear/nonlinear_block_jac.py | bollwyvl/OpenMDAO | 4d7a31b2bb39674e2be0d6a13cbe22de3f5353af | [
"Apache-2.0"
] | 1 | 2018-07-27T06:39:15.000Z | 2018-07-27T06:39:15.000Z | """Define the NonlinearBlockJac class."""
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.solvers.solver import NonlinearSolver
from openmdao.utils.mpi import multi_proc_fail_check
| 32.940299 | 82 | 0.584957 |
6a7ef877f9a75af565239a4f498da3558863fc35 | 7,766 | py | Python | tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py | Smokrow/tensorflow | debd66dae1c9a49d36ea006c97facf06b4ac25cb | [
"Apache-2.0"
] | 1 | 2018-09-08T08:26:31.000Z | 2018-09-08T08:26:31.000Z | tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py | Smokrow/tensorflow | debd66dae1c9a49d36ea006c97facf06b4ac25cb | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py | Smokrow/tensorflow | debd66dae1c9a49d36ea006c97facf06b4ac25cb | [
"Apache-2.0"
] | 1 | 2020-02-15T14:34:36.000Z | 2020-02-15T14:34:36.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapAndFilterFusion optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
if __name__ == "__main__":
test.main()
| 34.515556 | 80 | 0.638424 |
6a7f52df743becc0516c5282308cd0c5db04737d | 16,979 | py | Python | meerk40t/lihuiyu/lihuiyuemulator.py | jpirnay/meerk40t | 10d4e41a8c5e2bb95a504904273699e115822b9b | [
"MIT"
] | null | null | null | meerk40t/lihuiyu/lihuiyuemulator.py | jpirnay/meerk40t | 10d4e41a8c5e2bb95a504904273699e115822b9b | [
"MIT"
] | null | null | null | meerk40t/lihuiyu/lihuiyuemulator.py | jpirnay/meerk40t | 10d4e41a8c5e2bb95a504904273699e115822b9b | [
"MIT"
] | null | null | null | from meerk40t.core.cutcode import CutCode, RawCut
from meerk40t.core.parameters import Parameters
from meerk40t.core.units import UNITS_PER_MIL
from meerk40t.kernel import Module
from meerk40t.numpath import Numpath
from meerk40t.svgelements import Color
def header_write(self, data):
"""
Write data to the emulator including the header. This is intended for saved .egv files which include a default
header.
"""
if self.header_skipped:
self.write(data)
else:
data = LihuiyuParser.remove_header(data)
self.write(data)
def write_packet(self, packet):
self.write(packet[1:31])
def write(self, data):
for b in data:
self.process(b, chr(b))
class EGVBlob:
def __init__(self, data: bytearray, name=None):
self.name = name
self.data = data
self.operation = "blob"
self._cutcode = None
self._cut = None
| 32.777992 | 118 | 0.52777 |
6a7f701b1440f625bfec8817f0a39a899231c69f | 105,704 | py | Python | tencentcloud/dbbrain/v20210527/models.py | lleiyyang/tencentcloud-sdk-python | e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b | [
"Apache-2.0"
] | 465 | 2018-04-27T09:54:59.000Z | 2022-03-29T02:18:01.000Z | tencentcloud/dbbrain/v20210527/models.py | lleiyyang/tencentcloud-sdk-python | e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b | [
"Apache-2.0"
] | 91 | 2018-04-27T09:48:11.000Z | 2022-03-12T08:04:04.000Z | tencentcloud/dbbrain/v20210527/models.py | lleiyyang/tencentcloud-sdk-python | e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b | [
"Apache-2.0"
] | 232 | 2018-05-02T08:02:46.000Z | 2022-03-30T08:02:48.000Z | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
| 31.310427 | 195 | 0.597149 |
6a7fd9c2a4520acac2ad0d4b073014e3ffeaa218 | 20,152 | py | Python | oauth/provider.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | oauth/provider.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | oauth/provider.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | # Ported to Python 3
# Originally from https://github.com/DeprecatedCode/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py
import json
import logging
from requests import Response
from io import StringIO
try:
from werkzeug.exceptions import Unauthorized
except ImportError:
Unauthorized = Exception
from oauth import utils
def generate_access_token(self):
"""Generate a random access token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_refresh_token(self):
"""Generate a random refresh token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def get_authorization_code(self, response_type, client_id, redirect_uri, **params):
"""Generate authorization code HTTP response.
:param response_type: Desired response type. Must be exactly "code".
:type response_type: str
:param client_id: Client ID.
:type client_id: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:rtype: requests.Response
"""
# Ensure proper response_type
if response_type != "code":
err = "unsupported_response_type"
return self._make_redirect_error_response(redirect_uri, err)
# Check redirect URI
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
if not is_valid_redirect_uri:
return self._invalid_redirect_uri_response()
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_access = self.validate_access()
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
# Return proper error responses on invalid conditions
if not is_valid_client_id:
err = "unauthorized_client"
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_access:
err = "access_denied"
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_scope:
err = "invalid_scope"
return self._make_redirect_error_response(redirect_uri, err)
# Generate authorization code
code = self.generate_authorization_code()
# Save information to be used to validate later requests
self.persist_authorization_code(client_id=client_id, code=code, scope=scope)
# Return redirection response
params.update(
{"code": code, "response_type": None, "client_id": None, "redirect_uri": None}
)
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={"Location": redirect}, status_code=302)
def refresh_token(self, grant_type, client_id, client_secret, refresh_token, **params):
"""Generate access token HTTP response from a refresh token.
:param grant_type: Desired grant type. Must be "refresh_token".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param refresh_token: Refresh token.
:type refresh_token: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != "refresh_token":
return self._make_json_error_response("unsupported_grant_type")
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id, client_secret)
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_refresh_token(client_id, refresh_token, scope)
is_valid_refresh_token = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response("invalid_client")
if not is_valid_scope:
return self._make_json_error_response("invalid_scope")
if not is_valid_refresh_token:
return self._make_json_error_response("invalid_grant")
# Discard original refresh token
self.discard_refresh_token(client_id, refresh_token)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(
client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data,
)
# Return json response
return self._make_json_response(
{
"access_token": access_token,
"token_type": token_type,
"expires_in": expires_in,
"refresh_token": refresh_token,
}
)
def get_token(self, grant_type, client_id, client_secret, redirect_uri, code, **params):
"""Generate access token HTTP response.
:param grant_type: Desired grant type. Must be "authorization_code".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param code: Authorization code.
:type code: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != "authorization_code":
return self._make_json_error_response("unsupported_grant_type")
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id, client_secret)
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_authorization_code(client_id, code, scope)
is_valid_grant = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response("invalid_client")
if not is_valid_grant or not is_valid_redirect_uri:
return self._make_json_error_response("invalid_grant")
if not is_valid_scope:
return self._make_json_error_response("invalid_scope")
# Discard original authorization code
self.discard_authorization_code(client_id, code)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(
client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data,
)
# Return json response
return self._make_json_response(
{
"access_token": access_token,
"token_type": token_type,
"expires_in": expires_in,
"refresh_token": refresh_token,
}
)
def get_authorization_code_from_uri(self, uri):
"""Get authorization code response from a URI. This method will
ignore the domain and path of the request, instead
automatically parsing the query string parameters.
:param uri: URI to parse for authorization information.
:type uri: str
:rtype: requests.Response
"""
params = utils.url_query_params(uri)
try:
if "response_type" not in params:
raise TypeError("Missing parameter response_type in URL query")
if "client_id" not in params:
raise TypeError("Missing parameter client_id in URL query")
if "redirect_uri" not in params:
raise TypeError("Missing parameter redirect_uri in URL query")
return self.get_authorization_code(**params)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
err = "invalid_request"
if "redirect_uri" in params:
u = params["redirect_uri"]
return self._make_redirect_error_response(u, err)
else:
return self._invalid_redirect_uri_response()
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
err = "server_error"
u = params["redirect_uri"]
return self._make_redirect_error_response(u, err)
def get_token_from_post_data(self, data):
"""Get a token response from POST data.
:param data: POST data containing authorization information.
:type data: dict
:rtype: requests.Response
"""
try:
# Verify OAuth 2.0 Parameters
for x in ["grant_type", "client_id", "client_secret"]:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
# Handle get token from refresh_token
if "refresh_token" in data:
return self.refresh_token(**data)
# Handle get token from authorization code
for x in ["redirect_uri", "code"]:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
return self.get_token(**data)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
return self._make_json_error_response("invalid_request")
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
return self._make_json_error_response("server_error")
class OAuthError(Unauthorized):
"""OAuth error, including the OAuth error reason."""
class ResourceAuthorization(object):
"""A class containing an OAuth 2.0 authorization."""
is_oauth = False
is_valid = None
token = None
client_id = None
expires_in = None
error = None
class ResourceProvider(Provider):
"""OAuth 2.0 resource provider. This class provides an interface
to validate an incoming request and authenticate resource access.
Certain methods MUST be overridden in a subclass, thus this
class cannot be directly used as a resource provider.
These are the methods that must be implemented in a subclass:
get_authorization_header(self)
# Return header string for key "Authorization" or None
validate_access_token(self, access_token, authorization)
# Set is_valid=True, client_id, and expires_in attributes
# on authorization if authorization was successful.
# Return value is ignored
"""
def get_authorization(self):
"""Get authorization object representing status of authentication."""
auth = self.authorization_class()
header = self.get_authorization_header()
if not header or not header.split:
return auth
header = header.split()
if len(header) > 1 and header[0] == "Bearer":
auth.is_oauth = True
access_token = header[1]
self.validate_access_token(access_token, auth)
if not auth.is_valid:
auth.error = "access_denied"
return auth
| 35.730496 | 129 | 0.654575 |
6a811562ddff805b40048018c138048e412a8c98 | 773 | py | Python | main.py | TomHacker/ImageCluster | c4262e08a61c50b6d850ba29bc4d56d21c789aa9 | [
"Apache-2.0"
] | 10 | 2019-04-08T06:46:35.000Z | 2019-10-31T11:10:32.000Z | main.py | HandsomeBrotherShuaiLi/ImageCluster | c4262e08a61c50b6d850ba29bc4d56d21c789aa9 | [
"Apache-2.0"
] | 3 | 2020-06-02T01:24:18.000Z | 2021-05-20T04:53:26.000Z | main.py | HandsomeBrotherShuaiLi/ImageCluster | c4262e08a61c50b6d850ba29bc4d56d21c789aa9 | [
"Apache-2.0"
] | 1 | 2019-05-23T11:08:04.000Z | 2019-05-23T11:08:04.000Z | from model import ImageCluster
m=ImageCluster(
base_model='vgg16',#your feature map extractor model
resorted_img_folder='resorted_data',#the folder for clustered images
cluster_algo='kmeans',#cluster algorithm
base_img_folder='data',
maxK=150,#the max k num is 30, which means ImageCluster calculates every k in range(2,30+1)
)
# calculate the feature maps
# m.get_feature_map(
# resize_shape=(224,224) # (w,h) a tuple for resizing the input images to the same shape
# )
# #clustering for feature maps
# m.imagecluster()
#As we can see, 21 may be the best cluster number for this dataset.
#So,we can call the resorted_img function to label the images under different folders
m.resorted_img(
selected_k_num=100# a int number in range[2,maxK]
)
| 36.809524 | 95 | 0.750323 |
6a8199a221f44d9fef4df3ccc6d623b0243a377c | 1,058 | py | Python | tests/dummies.py | arvindmuralie77/gradsflow | d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f | [
"Apache-2.0"
] | 253 | 2021-08-17T17:42:25.000Z | 2022-03-25T07:59:41.000Z | tests/dummies.py | arvindmuralie77/gradsflow | d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f | [
"Apache-2.0"
] | 161 | 2021-08-17T16:28:08.000Z | 2022-03-27T02:36:45.000Z | tests/dummies.py | arvindmuralie77/gradsflow | d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f | [
"Apache-2.0"
] | 35 | 2021-08-23T16:26:15.000Z | 2022-03-26T17:08:15.000Z | # Copyright (c) 2021 GradsFlow. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from gradsflow.models import Model
| 32.060606 | 75 | 0.697543 |
6a82018dc0f7662911572e4ff805c96d468e9254 | 2,330 | py | Python | JumpscaleCore/tools/executor/ExecutorSerial.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 1 | 2020-06-21T11:18:52.000Z | 2020-06-21T11:18:52.000Z | JumpscaleCore/tools/executor/ExecutorSerial.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 644 | 2019-08-25T10:19:56.000Z | 2020-12-23T09:41:04.000Z | JumpscaleCore/tools/executor/ExecutorSerial.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 11 | 2019-08-29T21:38:50.000Z | 2020-06-21T11:18:55.000Z | from Jumpscale import j
JSBASE = j.baseclasses.object
from .ExecutorBase import *
import serial
| 27.093023 | 108 | 0.615021 |
6a836399736ccfbfdcec602215566bd6e9ae598c | 2,201 | py | Python | melisa/utils/snowflake.py | MelisaDev/melisa | 53fee10d8c1bf4dd716bc90096c16f096e11bfbf | [
"MIT"
] | 5 | 2022-03-11T19:51:28.000Z | 2022-03-13T16:28:58.000Z | melisa/utils/snowflake.py | jungledev1/melisa | 835e4b644e50b5038599ecbd1bfa510a0d3200e9 | [
"MIT"
] | 2 | 2022-03-19T18:09:39.000Z | 2022-03-23T12:18:49.000Z | melisa/utils/snowflake.py | jungledev1/melisa | 835e4b644e50b5038599ecbd1bfa510a0d3200e9 | [
"MIT"
] | 1 | 2022-03-23T07:30:04.000Z | 2022-03-23T07:30:04.000Z | # Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
| 30.150685 | 92 | 0.63562 |
6a83ea727e6668f4f022e77a641fbd9d212a22e3 | 8,749 | py | Python | feed/serializers/extensions.py | cul-it/arxiv-rss | 40c0e859528119cc8ba3700312cb8df095d95cdd | [
"MIT"
] | 4 | 2020-06-29T15:05:37.000Z | 2022-02-02T10:28:28.000Z | feed/serializers/extensions.py | arXiv/arxiv-feed | 82923d062e2524df94c22490cf936a988559ce66 | [
"MIT"
] | 12 | 2020-03-06T16:45:00.000Z | 2022-03-02T15:36:14.000Z | feed/serializers/extensions.py | cul-it/arxiv-rss | 40c0e859528119cc8ba3700312cb8df095d95cdd | [
"MIT"
] | 2 | 2020-12-06T16:30:06.000Z | 2021-11-05T12:29:08.000Z | """Classes derived from the Feedgen extension classes."""
from typing import Dict, List, Optional
from lxml import etree
from lxml.etree import Element
from flask import current_app
from feedgen.ext.base import BaseEntryExtension, BaseExtension
from feed.domain import Author, Media
| 30.590909 | 79 | 0.529318 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.