content
stringlengths
22
815k
id
int64
0
4.91M
def initialize_window(icon, title, width, height, graphical): # pragma: no cover """ Initialise l'environnement graphique et la fenêtre. Parameters ---------- icon : Surface Icone de la fenêtre title : str Nom de la fenêtre width : int Largeur de la fenêtre height : int Hauteur de la fenêtre graphical : bool Indique si la fenêtre doit être affichée Returns ------- Surface * Surface Un couple (surface de jeu, surface à afficher) """ game = pygame.Surface((width, height)) if graphical: pygame.display.set_icon(load_image(icon)) pygame.display.set_caption(title) return (game, pygame.display.set_mode((width, height), flags=pygame.RESIZABLE)) return (game, None)
5,357,000
def get_contour_verts(cn): """unpack the SVM contour values""" contours = [] # for each contour line for cc in cn.collections: paths = [] # for each separate section of the contour line for pp in cc.get_paths(): xy = [] # for each segment of that section for vv in pp.iter_segments(): xy.append(vv[0]) paths.append(np.vstack(xy)) contours.append(paths) return contours
5,357,001
def play(data_type, stream_id, name, start=-2, duration=-1, reset=False): """ Construct a 'play' message to start receive audio/video data from publishers on the server. :param data_type: int the RTMP datatype. :param stream_id: int the stream which the message will be sent on. :param name: str the name of the stream that is published/recorded on the server. :param start: N/A. :param duration: N/A. :param reset: N/A. """ # TODO: Add start, duration, reset(?) Will it work with 'play'? msg = {'msg': data_type, 'stream_id': stream_id, 'command': [u'play', 0, None, u'' + str(name)]} return msg
5,357,002
def _compute_covariances(precisions_chol): """Compute covariancess from Cholesky decomposition of the precision matrices. Parameters ---------- precisions_chol : array-like, shape (n_components, n_features, n_features) The Cholesky decomposition of the sample precisions. Returns ------- covariances : array-like The covariance matrices corresponding to the given precision matrices. """ n_components, n_features, _ = precisions_chol.shape covariances = np.empty((n_components, n_features, n_features)) for k, prec_chol in enumerate(precisions_chol): cov_chol = sl.solve_triangular(prec_chol, np.eye(n_features), lower=True).T covariances[k] = np.dot(cov_chol, cov_chol.T) return covariances
5,357,003
def test_load_by_file_path(): """ Test if the image can be loaded by passing a filepath as string. """ image = ShdlcFirmwareImage( EKS2.get('HEXFILE'), EKS2.get('BL_ADDR'), EKS2.get('APP_ADDR')) assert image.size > 0
5,357,004
def rad2deg(angle): """ Convert radian to degree. Parameters ---------- angle : float Angle in radians Returns ------- degree : float Angle in degrees """ return (180./PI) * angle
5,357,005
def parse_kinetics_splits(level): """Parse Kinetics-400 dataset into "train", "val", "test" splits. Args: level (int): Directory level of data. 1 for the single-level directory, 2 for the two-level directory. Returns: list: "train", "val", "test" splits of Kinetics-400. """ def convert_label(s, keep_whitespaces=False): """Convert label name to a formal string. Remove redundant '"' and convert whitespace to '_'. Args: s (str): String to be converted. keep_whitespaces(bool): Whether to keep whitespace. Default: False. Returns: str: Converted string. """ if not keep_whitespaces: return s.replace('"', '').replace(' ', '_') else: return s.replace('"', '') def line_to_map(x, test=False): """A function to map line string to vid and label. Args: x (str): A single line from Kinetics-400 csv file. test (bool): Indicate whether the line comes from test annotation file. Returns: tuple[str, str]: (vid, label), vid is the video id, label is the video label. """ if test: # vid = f'{x[0]}_{int(x[1]):06d}_{int(x[2]):06d}' vid = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}' label = -1 # label unknown return vid, label else: vid = f'{x[1]}_{int(float(x[2])):06d}_{int(float(x[3])):06d}' if level == 2: vid = f'{convert_label(x[0])}/{vid}' else: assert level == 1 label = class_mapping[convert_label(x[0])] return vid, label train_file = 'data/kinetics400/annotations/kinetics_train.csv' val_file = 'data/kinetics400/annotations/kinetics_val.csv' test_file = 'data/kinetics400/annotations/kinetics_test.csv' csv_reader = csv.reader(open(train_file)) # skip the first line next(csv_reader) labels_sorted = sorted(set([convert_label(row[0]) for row in csv_reader])) class_mapping = {label: i for i, label in enumerate(labels_sorted)} csv_reader = csv.reader(open(train_file)) next(csv_reader) train_list = [line_to_map(x) for x in csv_reader] csv_reader = csv.reader(open(val_file)) next(csv_reader) val_list = [line_to_map(x) for x in csv_reader] csv_reader = csv.reader(open(test_file)) next(csv_reader) test_list = [line_to_map(x, test=True) for x in csv_reader] splits = ((train_list, val_list, test_list), ) return splits
5,357,006
def execute(command, shell=True): """ Execute command using os package and return output to log file :param command: The command to be executed :type command: str :param shell: Takes either True or False :type shell: boolean :return: Run the command in the background and save the output to the logging file. """ loginfo(command) p = Popen(command.split(), stderr=PIPE, stdout=PIPE) output, error = p.communicate() if output != b"": loginfo(output.encode('utf-8')) if error != b"": logwarning(error.encode('utf-8'))
5,357,007
def rotateContoursAbout(contours, about, degrees=90, ccw=True): """\ Rotate the given contours the given number of degrees about the point about in a clockwise or counter-clockwise direction. """ rt = Transform.rotationAbout(about, degrees, ccw) return rt.applyToContours(contours)
5,357,008
def coordinate_addition(v, b, h, w, A, B, psize): """ Shape: Input: (b, H*W*A, B, P*P) Output: (b, H*W*A, B, P*P) """ assert h == w v = v.view(b, h, w, A, B, psize) coor = torch.arange(h, dtype=torch.float32) / h coor_h = torch.cuda.FloatTensor(1, h, 1, 1, 1, psize).fill_(0.) coor_w = torch.cuda.FloatTensor(1, 1, w, 1, 1, psize).fill_(0.) coor_h[0, :, 0, 0, 0, 0] = coor coor_w[0, 0, :, 0, 0, 1] = coor v = v + coor_h + coor_w v = v.view(b, h * w * A, B, psize) return v
5,357,009
def ticket() -> str: """生成请求饿百接口所需的ticket参数""" return str(uuid.uuid1()).upper()
5,357,010
def create_WchainCNOT_layered_ansatz(qc: qiskit.QuantumCircuit, thetas: np.ndarray, num_layers: int = 1): """Create WchainCNOT layered ansatz Args: - qc (qiskit.QuantumCircuit): init circuit - thetas (np.ndarray): parameters - n_layers (Int): numpy of layers Returns: - qiskit.QuantumCircuit """ n = qc.num_qubits if isinstance(num_layers, int) != True: num_layers = (num_layers['num_layers']) if len(thetas) != num_layers * (n * 3): raise Exception( 'Number of parameters must be equal n_layers * num_qubits * 3') for i in range(0, num_layers): phis = thetas[i * (n * 3):(i + 1) * (n * 3)] qc = create_WchainCNOT(qc) qc.barrier() qc = create_rz_nqubit(qc, phis[:n]) qc = create_rx_nqubit(qc, phis[n:n * 2]) qc = create_rz_nqubit(qc, phis[n * 2:n * 3]) return qc
5,357,011
def test_simple_cases(testdir): """Verify a simple passing test and a simple failing test. The failing test is marked as xfail to have it skipped.""" testdir.makepyfile( """ import pytest from seleniumbase import BaseCase class MyTestCase(BaseCase): def test_passing(self): self.assert_equal('yes', 'yes') @pytest.mark.xfail def test_failing(self): self.assert_equal('yes', 'no') """ ) result = testdir.inline_run("--headless", "--rs") assert result.matchreport("test_passing").passed assert result.matchreport("test_failing").skipped
5,357,012
def birth_brander(): """ This pipeline operator will add or update a "birth" attribute for passing individuals. If the individual already has a birth, just let it float by with the original value. If it doesn't, assign the individual the current birth ID, and then increment the global, stored birth count. We don't increment a birth ID in the ctor because that overall birth count will bloat due to clone operations. Inserting this operator into the pipeline will ensure that each individual that passes through is properly "branded" with a unique birth ID. However, care must be made to ensure that the initial population is similarly branded. Provides: * brand_population() to brand an entire population all at once, which is useful for branding initial populations. * brand() for explicitly branding a single individual :param next_thing: preceding individual in the pipeline :return: branded individual """ # incremented with each birth num_births = itertools.count() # sometimes next_thing is a population, so we need this to track that # the next individual in the population iterator = None def brand(individual): """ brand the given individual :param individual: to be branded :return: branded individual """ if not hasattr(individual, "birth"): # Only assign a birth ID if they don't already have one individual.birth = next(num_births) return individual def brand_population(population): """ We want to brand an entire population in one go Usually used to brand an initial population is one shot. :param population: to be branded :return: branded population """ return [brand(i) for i in population] def do_birth_branding(next_thing): """ This has the flexibility of being inserted in a pipeline such that the preceding pipeline is a population or a generator that provides an individual. It'll flexibly handle either situation. :param next_thing: either the next individual in the pipeline or a population of individuals to be branded :return: branded individual """ nonlocal num_births nonlocal iterator while True: if is_iterable(next_thing): # We're being passed in a single individual in a pipeline next_thing = next(next_thing) else: # We're being passed a test_sequence/population if iterator is None: iterator = iter(next_thing) next_thing = next(iterator) next_thing = brand(next_thing) yield next_thing do_birth_branding.brand_population = brand_population return do_birth_branding
5,357,013
def find_center_vo(tomo, ind=None, smin=-50, smax=50, srad=6, step=0.5, ratio=0.5, drop=20, smooth=True): """ Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`. Parameters ---------- tomo : ndarray 3D tomographic data. ind : int, optional Index of the slice to be used for reconstruction. smin, smax : int, optional Coarse search radius. Reference to the horizontal center of the sinogram. srad : float, optional Fine search radius. step : float, optional Step of fine searching. ratio : float, optional The ratio between the FOV of the camera and the size of object. It's used to generate the mask. drop : int, optional Drop lines around vertical center of the mask. smooth : bool, optional Whether to apply additional smoothing or not. Returns ------- float Rotation axis location. """ tomo = dtype.as_float32(tomo) if ind is None: ind = tomo.shape[1] // 2 _tomo = tomo[:, ind, :] # Reduce noise by smooth filters. Use different filters for coarse and fine search _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1)) if smooth else _tomo _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2)) if smooth else _tomo # Coarse and fine searches for finding the rotation center. if _tomo.shape[0] * _tomo.shape[1] > 4e6: # If data is large (>2kx2k) _tomo_coarse = downsample(np.expand_dims(_tomo_cs,1), level=2)[:, 0, :] init_cen = _search_coarse(_tomo_coarse, smin / 4.0, smax / 4.0, ratio, drop) fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop) else: init_cen = _search_coarse(_tomo_cs, smin, smax, ratio, drop) fine_cen = _search_fine(_tomo_fs, srad, step, init_cen, ratio, drop) logger.debug('Rotation center search finished: %i', fine_cen) return fine_cen
5,357,014
def random_fit_nonnegative(values, n): """ Generates n random values using a normal distribution fitted from values used as argument. Returns only non-negative values. :param values: array/list to use as model fot the random data :param n: number of random elements to return :returns: an array of n random non-negative numbers """ values = np.array(values) mean = np.mean(values) sd = np.std(values) random_values = np.empty(0) offset = 0.05 # 5% offset to compensate values less than 0 while len(random_values) < n: random_values = np.round(np.random.normal(mean, sd, round(n * (1 + offset)))) random_values = random_values[random_values >= 0] # If the while loop check fail, next time will try with a larger offset offset *= 2 # slice n first elements and shape the array to int return random_values[:n].astype("int")
5,357,015
def passthrough(world1, world2, signaling_pipe): """ Simple passthrough filter: wait for changes on a world world1 and propagate these changes to world world2. """ name = "passthrough_filter_%s_to_%s" % (world1, world2) with underworlds.Context(name) as ctx: world1 = ctx.worlds[world1] world2 = ctx.worlds[world2] try: print("Waiting for changes...") while not signaling_pipe.poll(): #print("%f -- %s waiting" % (time.time(), name)) change = world1.scene.waitforchanges(0.5) #print("%f -- %s Done waiting (last change: %s)" % (time.time(), name, str(change))) if change is not None: id, op = change #print("%f -- propagating from %s to %s" % (time.time(), name, world1, world2)) world2.scene.update_and_propagate(world1.scene.nodes[id]) change = None except Exception as e: import traceback traceback.print_exc() print("Stopping passthrough") print("Passthrough stopped")
5,357,016
def connected_components(weak_crossings=None, strong_crossings=None, probe_adjacency_list=None, join_size=None, channels=None): """Find all connected components in binary arrays of threshold crossings. Parameters ---------- weak_crossings : array `(n_samples, n_channels)` array with weak threshold crossings strong_crossings : array `(n_samples, n_channels)` array with strong threshold crossings probe_adjacency_list : dict A dict `{channel: [neighbors]}` channels : array An (n_channels,) array with a list of all non-dead channels join_size : int The number of samples defining the tolerance in time for finding connected components Returns ------- A list of lists of pairs `(samp, chan)` of the connected components in the 2D array `weak_crossings`, where a pair is adjacent if the samples are within `join_size` of each other, and the channels are adjacent in `probe_adjacency_list`, the channel graph. Note ---- The channel mapping assumes that column #i in the data array is channel #i in the probe adjacency graph. """ if probe_adjacency_list is None: probe_adjacency_list = {} if channels is None: channels = [] # If the channels aren't referenced at all but exist in 'channels', add a # trivial self-connection so temporal floodfill will work. If this channel # is dead, it should be removed from 'channels'. probe_adjacency_list.update({i: {i} for i in channels if not probe_adjacency_list.get(i)}) # Make sure the values are sets. probe_adjacency_list = {c: set(cs) for c, cs in probe_adjacency_list.items()} if strong_crossings is None: strong_crossings = weak_crossings assert weak_crossings.shape == strong_crossings.shape # Set of connected component labels which contain at least one strong # node. strong_nodes = set() n_s, n_ch = weak_crossings.shape join_size = int(join_size or 0) # An array with the component label for each node in the array label_buffer = np.zeros((n_s, n_ch), dtype=np.int32) # Component indices, a dictionary with keys the label of the component # and values a list of pairs (sample, channel) belonging to that component comp_inds = {} # mgraph is the channel graph, but with edge node connected to itself # because we want to include ourself in the adjacency. Each key of the # channel graph (a dictionary) is a node, and the value is a set of nodes # which are connected to it by an edge mgraph = {} for source, targets in probe_adjacency_list.items(): # we add self connections mgraph[source] = targets.union([source]) # Label of the next component c_label = 1 # For all pairs sample, channel which are nonzero (note that numpy .nonzero # returns (all_i_s, all_i_ch), a pair of lists whose values at the # corresponding place are the sample, channel pair which is nonzero. The # lists are also returned in sorted order, so that i_s is always increasing # and i_ch is always increasing for a given value of i_s. izip is an # iterator version of the Python zip function, i.e. does the same as zip # but quicker. zip(A,B) is a list of all pairs (a,b) with a in A and b in B # in order (i.e. (A[0], B[0]), (A[1], B[1]), .... In conclusion, the next # line loops through all the samples i_s, and for each sample it loops # through all the channels. for i_s, i_ch in zip(*weak_crossings.nonzero()): # The next two lines iterate through all the neighbours of i_s, i_ch # in the graph defined by graph in the case of edges, and # j_s from i_s-join_size to i_s. for j_s in range(i_s - join_size, i_s + 1): # Allow us to leave out a channel from the graph to exclude bad # channels if i_ch not in mgraph: continue for j_ch in mgraph[i_ch]: # Label of the adjacent element. adjlabel = label_buffer[j_s, j_ch] # If the adjacent element is nonzero we need to do something. if adjlabel: curlabel = label_buffer[i_s, i_ch] if curlabel == 0: # If current element is still zero, we just assign # the label of the adjacent element to the current one. label_buffer[i_s, i_ch] = adjlabel # And add it to the list for the labelled component. comp_inds[adjlabel].append((i_s, i_ch)) elif curlabel != adjlabel: # If the current element is unequal to the adjacent # one, we merge them by reassigning the elements of the # adjacent component to the current one. # samps_chans is an array of pairs sample, channel # currently assigned to component adjlabel. samps_chans = np.array(comp_inds[adjlabel], dtype=np.int32) # samps_chans[:, 0] is the sample indices, so this # gives only the samp,chan pairs that are within # join_size of the current point. # TODO: is this the right behaviour? If a component can # have a width bigger than join_size I think it isn't! samps_chans = samps_chans[i_s - samps_chans[:, 0] <= join_size] # Relabel the adjacent samp,chan points with current # label. samps, chans = samps_chans[:, 0], samps_chans[:, 1] label_buffer[samps, chans] = curlabel # Add them to the current label list, and remove the # adjacent component entirely. comp_inds[curlabel].extend(comp_inds.pop(adjlabel)) # Did not deal with merge condition, now fixed it # seems... # WARNING: might this "in" incur a performance hit # here...? if adjlabel in strong_nodes: strong_nodes.add(curlabel) strong_nodes.remove(adjlabel) # NEW: add the current component label to the set of all # strong nodes, if the current node is strong. if curlabel > 0 and strong_crossings[i_s, i_ch]: strong_nodes.add(curlabel) if label_buffer[i_s, i_ch] == 0: # If nothing is adjacent, we have the beginnings of a new # component, # so we label it, create a new list for the new # component which is given label c_label, # then increase c_label for the next new component afterwards. label_buffer[i_s, i_ch] = c_label comp_inds[c_label] = [(i_s, i_ch)] if strong_crossings[i_s, i_ch]: strong_nodes.add(c_label) c_label += 1 # Only return the values, because we don't actually need the labels. comps = [comp_inds[key] for key in comp_inds.keys() if key in strong_nodes] return comps
5,357,017
def channel_values(channel_freqs, channel_samples, dt, t): """Computes value of channels with given frequencies, samples, sample size and current time. Args: channel_freqs (array): 1d array of channel frequencies channel_samples (array): 2d array of channel samples, the first index being time step and the second index indexing channel dt (float): size of each sample t (float): current time Returns: array: array of channel values at the given time """ sample_idx = int(t // dt) if sample_idx >= len(channel_samples): sample_idx = len(channel_samples) - 1 sample_vals = channel_samples[sample_idx] return np.real(sample_vals * np.exp(1j * 2 * np.pi * channel_freqs * t))
5,357,018
def bandpass_voxels(realigned_file, bandpass_freqs, sample_period = None): """ Performs ideal bandpass filtering on each voxel time-series. Parameters ---------- realigned_file : string Path of a realigned nifti file. bandpass_freqs : tuple Tuple containing the bandpass frequencies. (LowCutoff, HighCutoff) sample_period : float, optional Length of sampling period in seconds. If not specified, this value is read from the nifti file provided. Returns ------- bandpassed_file : string Path of filtered output (nifti file). """ import os import nibabel as nb import numpy as np def ideal_bandpass(data, sample_period, bandpass_freqs): #Derived from YAN Chao-Gan 120504 based on REST. from scipy.fftpack import fft, ifft # sample_period = T # LowCutoff = 10. # HighCutoff = 15. # data = x def nextpow2(n): x = np.log2(n) return 2**np.ceil(x) sample_freq = 1./sample_period sample_length = data.shape[0] data_p = np.zeros(nextpow2(sample_length)) data_p[:sample_length] = data LowCutoff, HighCutoff = bandpass_freqs if(LowCutoff is None): #No lower cutoff (low-pass filter) low_cutoff_i = 0 elif(LowCutoff > sample_freq/2.): #Cutoff beyond fs/2 (all-stop filter) low_cutoff_i = int(data_p.shape[0]/2) else: low_cutoff_i = np.ceil(LowCutoff*data_p.shape[0]*sample_period).astype('int') if(HighCutoff > sample_freq/2. or HighCutoff is None): #Cutoff beyond fs/2 or unspecified (become a highpass filter) high_cutoff_i = int(data_p.shape[0]/2) else: high_cutoff_i = np.fix(HighCutoff*data_p.shape[0]*sample_period).astype('int') freq_mask = np.zeros_like(data_p, dtype='bool') freq_mask[low_cutoff_i:high_cutoff_i+1] = True freq_mask[data_p.shape[0]-high_cutoff_i:data_p.shape[0]+1-low_cutoff_i] = True f_data = fft(data_p) f_data[freq_mask != True] = 0. data_bp = np.real_if_close(ifft(f_data)[:sample_length]) return data_bp nii = nb.load(realigned_file) data = nii.get_data().astype('float64') mask = (data != 0).sum(-1) != 0 Y = data[mask].T Yc = Y - np.tile(Y.mean(0), (Y.shape[0], 1)) if not sample_period: hdr = nii.get_header() sample_period = float(hdr.get_zooms()[3]) # Sketchy check to convert TRs in millisecond units if sample_period > 20.0: sample_period /= 1000.0 print 'Frequency filtering using sample period: ', sample_period, 'sec' Y_bp = np.zeros_like(Y) for j in range(Y.shape[1]): Y_bp[:,j] = ideal_bandpass(Yc[:,j], sample_period, bandpass_freqs) data[mask] = Y_bp.T img = nb.Nifti1Image(data, header=nii.get_header(), affine=nii.get_affine()) bandpassed_file = os.path.join(os.getcwd(), 'bandpassed_demeaned_filtered.nii.gz') img.to_filename(bandpassed_file) return bandpassed_file
5,357,019
def get_small_corpus(num=10000): """ 获取小型文本库,用于调试网络模型 :param num: 文本库前n/2条对联 :return: 默认返回前500条对联(1000句话)的list """ list = getFile('/total_list.json') return list[:num]
5,357,020
def groupby_apply2(df_1, df_2, cols, f, tqdn=True): """Apply a function `f` that takes two dataframes and returns a dataframe. Groups inputs by `cols`, evaluates for each group, and concatenates the result. """ d_1 = {k: v for k,v in df_1.groupby(cols)} d_2 = {k: v for k,v in df_2.groupby(cols)} if tqdn: from tqdm import tqdm_notebook progress = tqdm_notebook else: progress = lambda x: x arr = [] for k in progress(d_1): arr.append(f(d_1[k], d_2[k])) return pd.concat(arr)
5,357,021
def login(request): """Logs in the user if given credentials are valid""" username = request.data['username'] password = request.data['password'] try: user = User.objects.get(username=username) except: user = None if user is not None: encoded = user.password hasher = PBKDF2PasswordHasher() login_valid = hasher.verify(password, encoded) if login_valid: key = username + str(datetime.datetime.now()) key = hasher.encode(key, 'key', 10) life = datetime.datetime.now() + datetime.timedelta(hours=14) timezone = pytz.timezone("America/Bogota") life_aware = timezone.localize(life) loginsession = LoginSession(key=key, life=life_aware, user=user) loginsession.save() request.session['loginsession'] = key data = { 'success': True, 'key': key } return Response(data, status=status.HTTP_200_OK, content_type='application/json') data = { 'success': False, 'message':"Nombre de usuario o contraseña incorrectos" } return Response(data, status=status.HTTP_200_OK, content_type='application/json')
5,357,022
def replace_data_in_gbq_table(project_id, table_id, complete_dataset): """ replacing data in Google Cloud Table """ complete_dataset.to_gbq( destination_table=table_id, project_id=project_id, credentials=credentials, if_exists="replace", ) return None
5,357,023
def default_pubkey_inner(ctx): """Default expression for "pubkey_inner": tap.inner_pubkey.""" return get(ctx, "tap").inner_pubkey
5,357,024
def presenter(poss, last_move): """ Présenter les choix à l'utilisateur. """ prop = "CHOIX :" + "\n" prop += " espace : arrière" + "\n" prop += " entrée : automatique" + "\n" prop += " autre : hasard" + "\n" for i, p in enumerate(poss): star = " " if last_move == p: star = "*" prop += " {}{}:{}\n".format(star, i, p) prop += "\n" * (8 - len(poss)) print(prop)
5,357,025
def assert_array_almost_equal(x: numpy.ndarray, y: List[complex]): """ usage.scipy: 6 """ ...
5,357,026
def quantize_8(image): """Converts and quantizes an image to 2^8 discrete levels in [0, 1].""" q8 = tf.image.convert_image_dtype(image, tf.uint8, saturate=True) return tf.cast(q8, tf.float32) * (1.0 / 255.0)
5,357,027
def get_clients( wlc, *vargs, **kvargs ): """ create a single dictionary containing information about all associated stations. """ rsp = wlc.rpc.get_stat_user_session_status() ret_data = {} for session in rsp.findall('.//USER-SESSION-STATUS'): locstat = session.find('.//USER-LOCATION-MEMBER') ret_data[session.get('mac-addr')] = dict(session.attrib) ret_data[session.get('mac-addr')].update(locstat.attrib) return ret_data
5,357,028
def coalmine(eia923_dfs, eia923_transformed_dfs): """Transforms the coalmine_eia923 table. Transformations include: * Remove fields implicated elsewhere. * Drop duplicates with MSHA ID. Args: eia923_dfs (dict): Each entry in this dictionary of DataFrame objects corresponds to a page from the EIA923 form, as reported in the Excel spreadsheets they distribute. eia923_transformed_dfs (dict): A dictionary of DataFrame objects in which pages from EIA923 form (keys) correspond to normalized DataFrames of values from that page (values). Returns: dict: eia923_transformed_dfs, a dictionary of DataFrame objects in which pages from EIA923 form (keys) correspond to normalized DataFrames of values from that page (values). """ # These are the columns that we want to keep from FRC for the # coal mine info table. coalmine_cols = ['mine_name', 'mine_type', 'state', 'county_id_fips', 'mine_id_msha'] # Make a copy so we don't alter the FRC data frame... which we'll need # to use again for populating the FRC table (see below) cmi_df = eia923_dfs['fuel_receipts_costs'].copy() # Keep only the columns listed above: cmi_df = _coalmine_cleanup(cmi_df) cmi_df = cmi_df[coalmine_cols] # If we actually *have* an MSHA ID for a mine, then we have a totally # unique identifier for that mine, and we can safely drop duplicates and # keep just one copy of that mine, no matter how different all the other # fields associated with the mine info are... Here we split out all the # coalmine records that have an MSHA ID, remove them from the CMI # data frame, drop duplicates, and then bring the unique mine records # back into the overall CMI dataframe... cmi_with_msha = cmi_df[cmi_df['mine_id_msha'] > 0] cmi_with_msha = cmi_with_msha.drop_duplicates(subset=['mine_id_msha', ]) cmi_df.drop(cmi_df[cmi_df['mine_id_msha'] > 0].index) cmi_df.append(cmi_with_msha) cmi_df = cmi_df.drop_duplicates(subset=['mine_name', 'state', 'mine_id_msha', 'mine_type', 'county_id_fips']) # drop null values if they occur in vital fields.... cmi_df.dropna(subset=['mine_name', 'state'], inplace=True) # we need an mine id to associate this coalmine table with the frc # table. In order to do that, we need to create a clean index, like # an autoincremeted id column in a db, which will later be used as a # primary key in the coalmine table and a forigen key in the frc table # first we reset the index to get a clean index cmi_df = cmi_df.reset_index() # then we get rid of the old index cmi_df = cmi_df.drop(labels=['index'], axis=1) # then name the index id cmi_df.index.name = 'mine_id_pudl' # then make the id index a column for simpler transferability cmi_df = cmi_df.reset_index() cmi_df = PUDL_META.get_resource("coalmine_eia923").encode(cmi_df) eia923_transformed_dfs['coalmine_eia923'] = cmi_df return eia923_transformed_dfs
5,357,029
def _throw_object_x_at_y(): """ Interesting interactions: * If anything is breakable :return: """ all_pickupable_objects_x = env.all_objects_with_properties({'pickupable': True}) x_weights = [10.0 if (x['breakable'] or x['mass'] > 4.0) else 1.0 for x in all_pickupable_objects_x] if len(all_pickupable_objects_x) == 0: raise ValueError('No pickupable objects') all_objects_y = env.all_objects_with_properties({'pickupable': True}) y_weights = [10.0 if (y['breakable'] and not y['pickupable']) else ( 4.0 if y['breakable'] else 1.0) for y in all_objects_y] object_x = all_pickupable_objects_x[_weighted_choice(x_weights)] object_y = all_objects_y[_weighted_choice(y_weights)] if object_x['objectId'] == object_y['objectId']: raise ValueError('objects are the same?') ##################### hardness_options = {'softly': 10.0, 'normally': 100.0, 'aggressively': 1000.0} hardness = random.choice(sorted(hardness_options.keys())) renv = RecordingEnv(env, text=f'Throw $1 at $2 {hardness}.', main_object_ids=(object_x['objectId'], object_y['objectId']) ) s_a = pickup_object(renv, object_x['objectId'], navigate=True) print("Pickup {} succeeds".format(object_x['objectId']), flush=True) path2use = path_to_object(renv.env, object_y, angle_noise=0, dist_to_obj_penalty=0.1) while len(path2use) > 0 and path2use[-1]['action'].startswith(('Rotate', 'Look')): path2use.pop(-1) for p in path2use: renv.step(p) # Teleport, throw, then snap back to grid # Face object old_pos = renv.env.get_agent_location() new_pos = {k: v for k, v in old_pos.items()} new_pos['rotation'] = rotation_angle_to_object(object_y, renv.env.get_agent_location()) new_pos['horizon'] = horizon_angle_to_object(object_y, renv.env.get_agent_location()) renv.env.teleport_agent_to(**new_pos, ignore_y_diffs=True, only_initially_reachable=False) if not renv.env.last_action_success: raise ValueError("teleport failed") if renv.env.get_agent_location()['y'] < -10: raise ValueError("negative coords") s_b = renv.step(dict(action='ThrowObject', moveMagnitude=hardness_options[hardness], forceAction=True)) # If something broke then things are interesting is_interesting = s_b and any([(x['isBroken'] or 'Cracked' in x['objectType']) for x in renv.new_items.values()]) renv.env.teleport_agent_to(**old_pos, ignore_y_diffs=True) return renv, is_interesting
5,357,030
def concatenate_best_aligned_pus(peeling_level, aligned_pu_pdb, all_pu_ref): """ Write the full PDB of the best aligned PUs by concatenating them in the right order. Args: peeling_level (int): Actual peeling level aligned_pu_pdb (str): Path to the PDB into which will be written all the best aligned PUs all_pu_ref (list of tuples): Full list of the PU bounds of the actual peeling level """ open_mode = "w" if os.path.isfile(aligned_pu_pdb) else "a" with open(aligned_pu_pdb, open_mode) as f_out: # Loop over the PUs for pu_index in range(len(all_pu_ref)): # Find PDB of the best PU for this index aln_max_pu_file = "tmp/{}_aln_max_pu_{}.pdb".format(peeling_level+1, pu_index+1) # Concatenate to the full PDB with open(aln_max_pu_file, "r") as f_in: for line in f_in: f_out.write(line)
5,357,031
def GraficarConstantesAsintoticas(historiaCABIS, historiaCANR, historiaCANRM, historiaCASEC, funcion): """ Recibe las historias de de las constantes de ordenes de convergencia, estas deben de ser validas, caso contrario, se indicara en el grafico la falta de datos. Necesita tambien de la funcion para usarlo como titulo identificatorio del grafico. """ plt.figure() axes = plt.gca() axes.set_ylim([0, 1.5]) graficar(historiaCABIS, 'Biseccion', 'blue') graficar(historiaCANR, 'Newton-Raphson', 'red') graficar(historiaCANRM, 'NR modificado', 'orange') graficar(historiaCASEC, 'Secante', 'green') plt.xlabel('Iteración') plt.ylabel('Lambda') plt.legend(loc='best') plt.title(funcion) plt.grid(True) plt.show()
5,357,032
def plot( X, color_by=None, color_map="Spectral", colors=None, edges=None, axis_limits=None, background_color=None, marker_size=1.0, figsize_inches=(8.0, 8.0), savepath=None, ): """Plot an embedding, in one, two, or three dimensions. This function plots embeddings. The input embedding's dimension should be at most 3. The embedding is visualized as a scatter plot. The points can optionally be colored according to categorical or continuous values, or according to a pre-defined sequence of colors. Additionally, edges can optionally be superimposed. Arguments --------- X: array-like The embedding to plot, of shape ``(n_items, embedding_dim)``. The second dimension should be 1, 2, or 3. color_by: array-like, optional A sequence of values, one for each item, which should be used to color each embedding vector. These values may either be categorical or continuous. For example, if ``n_items`` is 4, .. code:: python3 np.ndarray(['dog', 'cat', 'zebra', 'cat']) np.ndarray([0, 1, 1, 2] np.ndarray([0.1, 0.5, 0.31, 0.99] are all acceptable. The first two are treated as categorical, the third is continuous. A finite number of colors is used when the values are categorical, while a spectrum of colors is used when the values are continuous. color_map: str or matplotlib colormap instance Color map to use when resolving ``color_by`` to colors; ignored when ``color_by`` is None. colors: array-like, optional A sequence of colors, one for each item, specifying the exact color each item should be colored. Each row must represent an RGBA value. Only one of ``color_by`` and ``colors`` should be non-None. edges: array-like, optional List of edges to superimpose over the scatter plot, shape ``(any, 2)`` axis_limits: tuple, optional tuple ``(limit_low, limit_high)`` of axis limits, applied to both the x and y axis. background_color: str, optional color of background marker_size: float, optional size of each point in the scatter plot figsize_inches: tuple size of figures in inches: ``(width_inches, height_inches)`` savepath: str, optional path to save the plot. Returns ------- matplotlib.Axes: Axis on which the embedding is plotted. """ if color_by is not None and colors is not None: raise ValueError("Only one of 'color_by` and `colors` can be non-None") ax = _plot( X=X, color_by=color_by, cmap=color_map, colors=colors, edges=edges, lim=axis_limits, background_color=background_color, s=marker_size, figsize=figsize_inches, ) if savepath is not None: plt.savefig(savepath) return ax
5,357,033
def test_dqn_pong(): """Test tf/dqn_pong.py with reduced replay buffer size for reduced memory consumption. """ env = os.environ.copy() env['GARAGE_EXAMPLE_TEST_N_EPOCHS'] = '1' assert subprocess.run( [str(EXAMPLES_ROOT_DIR / 'tf/dqn_pong.py'), '--buffer_size', '5'], check=False, env=env).returncode == 0
5,357,034
def CausalConvIntSingle(val, time, kernel): """ Computing convolution of time varying data with given kernel function. """ ntime = time.size dt_temp = np.diff(time) dt = np.r_[time[0], dt_temp] out = np.zeros_like(val) for i in range(1, ntime): temp = 0. if i==0: temp += val[0]*kernel(time[i]-time[0])*dt[0]*0.5 for k in range(1,i+1): temp += val[k-1]*kernel(time[i]-time[k-1])*dt[k]*0.5 temp += val[k]*kernel(time[i]-time[k])*dt[k]*0.5 out[i] = temp return out
5,357,035
def sbox1(v): """AES inverse S-Box.""" w = mpc.to_bits(v) z = mpc.vector_add(w, B) y = mpc.matrix_prod([z], A1, True)[0] x = mpc.from_bits(y)**254 return x
5,357,036
def _get_photon_info_COS(tag, x1d, traceloc='stsci'): """ Add spectral units (wavelength, cross dispersion distance, energy/area) to the photon table in the fits data unit "tag". For G230L, you will get several 'xdisp' columns -- one for each segment. This allows for the use of overlapping background regions. Parameters ---------- tag x1d traceloc Returns ------- xdisp, order """ if x1d is not None: xd, xh = x1d[1].data, x1d[1].header det = tag[0].header['detector'] segment = tag[0].header['segment'] data_list = [] for i,t in enumerate(tag): if t.name != 'EVENTS': continue td,th = t.data, t.header """ Note: How STScI extracts the spectrum is unclear. Using 'y_lower/upper_outer' from the x1d reproduces the x1d gross array, but these results in an extraction ribbon that has a varying height and center -- not the parallelogram that is described in the Data Handbook as of 2015-07-28. The parameters in the xtractab reference file differ from those populated in the x1d header. So, I've punted and stuck with using the x1d header parameters because it is easy and I think it will make little difference for most sources. The largest slope listed in the xtractab results in a 10% shift in the spectral trace over the length of the detector. In general, I should just check to be sure the extraction regions I'm using are reasonable. """ data = [td[s] for s in ['time', 'wavelength', 'epsilon', 'dq', 'pha']] if det == 'NUV': # all "orders" (segments) of the NUV spectra fall on the same detector and are just offset in y, # I'll just duplicate the events for each spectrum segs = [s[-1] for s in xd['segment']] orders = list(range(len(segs))) else: seg = segment[-1] segs = [seg] orders = [0 if seg == 'A' else 1] for order, seg in zip(orders, segs): if not (traceloc == 'stsci' or type(traceloc) in [int, float]) and det == 'NUV': raise NotImplementedError('NUV detector has multiple traces on the same detector, so custom traceloc ' 'has not been implemented.') if traceloc == 'stsci': yspec = xh['SP_LOC_'+seg] elif traceloc == 'median': Npixx = th['talen2'] x, y = td['xfull'], td['yfull'] yspec = _median_trace(x, y, Npixx, 8) elif traceloc == 'lya': Npixy = th['talen3'] yspec = _lya_trace(td['wavelength'], td['yfull'], Npixy) elif type(traceloc) in [int, float]: yspec = float(traceloc) else: raise ValueError('traceloc={} not recognized.'.format(traceloc)) xdisp = td['yfull'] - yspec order_vec = _np.ones_like(xdisp, 'i2')*order if det == 'NUV': w = data[1] keep = (xdisp > -15.) & (xdisp < 15.) x = td['xfull'] xref, wref = x[keep], w[keep] isort = _np.argsort(xref) xref, wref = xref[isort], wref[isort] wnew = _np.interp(x, xref, wref) data_list.append(data[:1] + [wnew] + data[2:] + [xdisp, order_vec]) else: data_list.append(data + [xdisp, order_vec]) data = list(map(_np.hstack, list(zip(*data_list)))) return data
5,357,037
def maximum_value(tab): """ brief: return maximum value of the list args: tab: a list of numeric value expects at leas one positive value return: the max value of the list the index of the max value raises: ValueError if expected a list as input ValueError if no positive value found """ if not(isinstance(tab, list)): raise ValueError('Expected a list as input') valMax = 0.0 valMaxIndex = -1; nPositiveValues = 0 for i in range(len(tab)): if tab[i] >= 0 and tab[i] > valMax: valMax = float(tab[i]) valMaxIndex = i nPositiveValues += 1 if nPositiveValues <= 0: raise ValueError('No positive value found') return valMax, valMaxIndex
5,357,038
def writer(q: Queue): """Receives messages from queue and writes to file""" while True: try: data = q.get() logger.info(data) if data.get("service_exit", False): raise SerialDevicePoolError("Restarting device pool") name = data["name"] row = data["row"] row["time"] = row["time"].isoformat() date = row["time"][0:10] path = os.path.join(DATAPATH, name, f"{date}.csv") should_write_header = not os.path.exists(path) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, "a") as f: writer = csv.DictWriter(f, fieldnames=row.keys()) if should_write_header: writer.writeheader() writer.writerow(row) except (SerialDevicePoolError, KeyboardInterrupt, SystemExit): raise
5,357,039
def create_index_from_registry(registry_path, index_path, parser): """Generate an index files from the IEEE registry file.""" oui_parser = parser(registry_path) oui_parser.attach(FileIndexer(index_path)) oui_parser.parse()
5,357,040
def pdf(x, k, loc, scale): """ Probability density function for the Weibull distribution (for minima). This is a three-parameter version of the distribution. The more typical two-parameter version has just the parameters k and scale. """ with mpmath.extradps(5): x = mpmath.mpf(x) k, loc, scale = _validate_params(k, loc, scale) if x == loc: if k < 1: return mpmath.mp.inf elif k == 1: return 1/scale else: return mpmath.mp.zero if x < loc: return mpmath.mp.zero return mpmath.exp(logpdf(x, k, loc, scale))
5,357,041
def add_flight(): """Allows users to add flights.""" if request.method == "GET": return render_template("add.html", airports=AIRPORTS) else: # Move request.form into a dictionary that's a bit shorter to access than request.form form = dict(request.form) # Change hour and minute into integers form["hour"] = int(form["hour"]) form["minute"] = int(form["minute"]) # TODO: Return error message if hour not in valid 0-23 range # TODO: Return error message if minute not in valid 0-59 range # TODO: Return error message if either airport not in AIRPORTS, or if they're equal # Insert into database insert_flight(db, form) # TODO: Redirect user to homepage return "TODO"
5,357,042
def rescale_column_test(img, img_shape, gt_bboxes, gt_label, gt_num): """rescale operation for image of eval""" img_data, scale_factor = mmcv.imrescale(img, (config.img_width, config.img_height), return_scale=True) if img_data.shape[0] > config.img_height: img_data, scale_factor2 = mmcv.imrescale(img_data, (config.img_height, config.img_height), return_scale=True) scale_factor = scale_factor*scale_factor2 pad_h = config.img_height - img_data.shape[0] pad_w = config.img_width - img_data.shape[1] assert ((pad_h >= 0) and (pad_w >= 0)) pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype) pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data img_shape = np.append(img_shape, (scale_factor, scale_factor)) img_shape = np.asarray(img_shape, dtype=np.float32) return (pad_img_data, img_shape, gt_bboxes, gt_label, gt_num)
5,357,043
async def start_workers( search_requests: Union[List[SearchIndexRequest], asyncio.Queue] ) -> Set[str]: """Runs the pipeline using asyncio concurrency with three main coroutines: - get results: fetch the search requests queue, perform the search and output the results - download and parse the body: fetch the results queue, download and parse the body and meta from S3 - persist the data: fetch the 'to_persist' queue and serialize the data to ndjson files """ results_queue = asyncio.Queue(maxsize=settings.MAX_RESULTS_QUEUE_SIZE) to_persist_queue = asyncio.Queue(maxsize=settings.MAX_PERSIST_QUEUE_SIZE) search_end_event = asyncio.Event() download_end_event = asyncio.Event() if isinstance(search_requests, asyncio.Queue): search_requests_queue = search_requests else: search_requests_queue = asyncio.Queue() for request in search_requests: await search_requests_queue.put(request) num_search_requests = search_requests_queue.qsize() logger.info( f"Starting pipeline. Total of {num_search_requests} search index requests to process." ) async with create_client() as client: gateway = CDXGateway(client) search_indexes = SearchIndexesExecutor( gateway, results_queue, search_requests_queue ) search_indexes_task = asyncio.create_task(search_indexes.run()) download_task = asyncio.create_task( download_executor( results_queue, to_persist_queue, client, search_end_event, ) ) store_results_task = asyncio.create_task( store_results(to_persist_queue, download_end_event, os.getpid()) ) while not search_indexes_task.done(): await asyncio.sleep(1) logger.debug( f"Search index requests pending: {search_requests_queue.qsize()}" ) else: search_end_event.set() while not download_task.done(): await asyncio.sleep(1) else: download_end_event.set() while not store_results_task.done(): await asyncio.sleep(1) for task in [search_indexes_task, download_task, store_results_task]: exc = task.exception() if exc: logger.exception(exc_info=exc) logger.info("Pipeline finished, exiting.") return store_results_task.result()
5,357,044
def resolvability_query(m, walks_): """ :param m: cost matrix :param walks_: list of 0-percolation followed by its index of redundancy as returned by percolation_finder :return: M again untouched, followed by the list of $0$-percolation with minimal index of redundancy, and with a flag, True if the minimal index is 0 and so we have already our solution, False otherwise. """ min_redundancy = np.min(walks_[1::2]) filtered_walks = [walks_[i] for i in list(range(len(walks_)))[::2] \ if walks_[i + 1] == min_redundancy] if min_redundancy == 0: flag = True else: flag = False return [m, filtered_walks, flag]
5,357,045
def teardown_module(): # type: () -> None """ Removes any created stats files, if any. """ for stats_file in TEST_STATS_FILES: if os.path.exists(stats_file): os.remove(stats_file)
5,357,046
def lock_parent_directory(filename, timeout=10): """ Context manager that acquires a lock on the parent directory of the given file path. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). :param filename: file path of the parent directory to be locked :param timeout: timeout (in seconds) """ return lock_path(os.path.dirname(filename), timeout=timeout)
5,357,047
def test_invalid_domain_names_options(absolute_path): """End-to-End test to check domain names options validation works.""" process = subprocess.Popen( [ 'flake8', '--isolated', '--select', 'WPS', # values from `allowed-domain-names` cannot intersect with # `--forbidden-domain-names` '--allowed-domain-names', 'item,items,handle,visitor', '--forbidden-domain-names', 'handle,visitor,node', absolute_path('fixtures', 'noqa.py'), ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding='utf8', ) _, stderr = process.communicate() assert process.returncode == 1 assert 'ValueError' in stderr assert 'handle' in stderr assert 'visitor' in stderr
5,357,048
def dsh( incidence1: float, solar_az1: float, incidence2: float, solar_az2: float ): """Returns the Shadow-Tip Distance (dsh) as detailed in Becker et al.(2015). The input angles are assumed to be in radians. This is defined as the distance between the tips of the shadows in the two images for a hypothetical vertical post of unit height. The "shadow length" describes the shadow of a hypothetical pole so it applies whether there are actually shadows in the image or not. It's a simple and consistent geometrical way to quantify the difference in illumination. This quantity is computed analogously to dp. """ def shx(inc: float, sunazgnd: float): return -1 * math.tan(inc) * math.cos(sunazgnd) def shy(inc: float, sunazgnd: float): return math.tan(inc) * math.sin(sunazgnd) shx1 = shx(incidence1, solar_az1) shx2 = shx(incidence2, solar_az2) shy1 = shy(incidence1, solar_az1) shy2 = shy(incidence2, solar_az2) return math.sqrt(math.pow(shx1 - shx2, 2) + math.pow(shy1 - shy2, 2))
5,357,049
def fix_commitment(mod, g, tmp): """ Fix committed capacity based on number of committed units and unit size """ mod.Commit_Capacity_MW[g, tmp] = \ mod.fixed_commitment[g, mod.prev_stage_tmp_map[tmp]] mod.Commit_Capacity_MW[g, tmp].fixed = True
5,357,050
def similarity(vec1, vec2): """Cosine similarity.""" return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
5,357,051
def epv00(date1, date2): """ Earth position and velocity, heliocentric and barycentric, with respect to the Barycentric Celestial Reference System. :param date1, date2: TDB as a two-part Julian date. :type date1, date2: float :returns: a tuple of two items: * heliocentric Earth position velocity as a numpy.matrix of shape \ 2x3. * barycentric Earth position/velocity as a numpy.matrix of shape \ 2x3. :raises: :exc:`UserWarning` if the date falls outside the range 1900-2100. .. seealso:: |MANUAL| page 79 """ pvh = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C')) pvb = _np.asmatrix(_np.zeros(shape=(2,3), dtype=float, order='C')) s = _sofa.iauEpv00(date1, date2, pvh, pvb) if s != 0: _warnings.warn(_epv00_msg[s], UserWarning, 2) return pvh, pvb
5,357,052
def label(Z, n): """Correctly label clusters in unsorted dendrogram.""" uf = LinkageUnionFind(n) for i in range(n - 1): x, y = int(Z[i, 0]), int(Z[i, 1]) x_root, y_root = uf.find(x), uf.find(y) if x_root < y_root: Z[i, 0], Z[i, 1] = x_root, y_root else: Z[i, 0], Z[i, 1] = y_root, x_root Z[i, 3] = uf.merge(x_root, y_root)
5,357,053
def comp_pack(ctx: Context): """Force packing resources.""" for ent in ctx.vmf.by_class['comp_pack']: ent.remove() for key, value in ent.keys.items(): # Not important. if key in {'classname', 'origin', 'angles', 'hammerid'}: continue # We allow numeric suffixes for multiple - generic45. try: res_type = PACK_TYPES[key.rstrip('0123456789').casefold()] except KeyError: LOGGER.warning( 'Unknown resource type: "{}" @ {}', key, ent['origin'], ) res_type = FileType.GENERIC ctx.pack.pack_file(value, res_type)
5,357,054
def algorithm_conflict(old_config, new_config): """Generate an algorithm configuration conflict""" return conflicts.AlgorithmConflict(old_config, new_config)
5,357,055
def nin(): """ :return: """ def nin_block(num_channels, kernel_size, strides, padding): blk = nn.Sequential() blk.add(nn.Conv2D(num_channels, kernel_size, strides, padding, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu'), nn.Conv2D(num_channels, kernel_size=1, activation='relu')) return blk net = nn.Sequential() net.add(nin_block(96, kernel_size=11, strides=4, padding=0), nn.MaxPool2D(pool_size=3, strides=2), nin_block(256, kernel_size=5, strides=1, padding=2), nn.MaxPool2D(pool_size=3, strides=2), nin_block(384, kernel_size=3, strides=1, padding=1), nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.5), # 标签类别数是10 nin_block(10, kernel_size=3, strides=1, padding=1), # 全局平均池化层将窗口形状自动设置成输入的高和宽 nn.GlobalAvgPool2D(), # 将四维的输出转成二维的输出,其形状为(批量大小, 10) nn.Flatten()) X = nd.random.uniform(shape=(1, 1, 224, 224)) net.initialize() for layer in net: X = layer(X) print(layer.name, 'output shape:\t', X.shape) lr, num_epochs, batch_size, ctx = 0.1, 5, 128, d2l.try_gpu() net.initialize(force_reinit=True, ctx=ctx, init=init.Xavier()) trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': lr}) train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224) d2l.train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx, num_epochs)
5,357,056
def factorial(n): """ Return the product of the integers 1 through n. n must be a nonnegative integer. """ return product(range(2, n + 1))
5,357,057
def nftparams(): """ Show params of all NFTs """ nfts = Nfts() t = PrettyTable(["key", "value"]) t.align = "l" params = nfts.get_nft_params() for key in params: t.add_row([key, str(params[key])]) print(t)
5,357,058
def get_status_lines(frames, check_transposed=True): """ Extract status lines from the given frames. `frames` can be 2D array (one frame), 3D array (stack of frames, first index is frame number), or list of array. Automatically check if the status line is present; return ``None`` if it's not. If ``check_transposed==True``, check for the case where the image is transposed (i.e., line becomes a column). """ if isinstance(frames,list): return [get_status_lines(f,check_transposed=check_transposed) for f in frames] if frames.shape[-1]>=4: lines=_extract_line(frames,True) if _check_magic(lines): return lines lines=_extract_line(frames,False) if _check_magic(lines): return lines if check_transposed: tframes=frames.T if frames.ndim==2 else frames.transpose((0,2,1)) return get_status_lines(tframes,check_transposed=False) return None
5,357,059
def returnLendingHistory(*, start, end, limit=None): """ Returns your lending history within a time range. :param session: Aiohttp client session object :param String api_key: The API key :param String secret_key: The API secret key :param String start: UNIX timestamp. Every returned value will have a timestamps greater or equal. :param String end: UNIX timestamp. Every returned value will have a timestamps lower or equal. :param String limit: Maximum number of returned values. """ pass
5,357,060
async def receiver(): """receive messages with polling""" pull = ctx.socket(zmq.PULL) pull.connect(url) poller = Poller() poller.register(pull, zmq.POLLIN) while True: events = await poller.poll() if pull in dict(events): print("recving", events) msg = await pull.recv_multipart() print('recvd', msg)
5,357,061
def quote(): """Get stock quote.""" if request.method == "POST": quote = lookup(request.form.get("symbol")) if quote == None: return apology("invalid symbol", 400) return render_template("quoted.html", quote=quote) # User reached route via GET (as by clicking a link or via redi) else: return render_template("quote.html")
5,357,062
def stats(api, containers=None, stream=True): """Get container stats container When stream is set to true, the raw HTTPResponse is returned. """ path = "/containers/stats" params = {'stream': stream} if containers is not None: params['containers'] = containers try: response = api.get(path, params=params) if stream: return response return json.loads(str(response.read(), 'utf-8')) except errors.NotFoundError as e: api.raise_not_found(e, e.response, errors.ContainerNotFound)
5,357,063
def sendTGMessage(prepared_data): """ Prepared data should be json which includes at least `chat_id` and `text` """ message_url = Variables.BOT_URL + prepared_data["reqType"] requests.post(message_url, json=prepared_data["data"])
5,357,064
def add_engineered(features): """Add engineered features to features dict. Args: features: dict, dictionary of input features. Returns: features: dict, dictionary with engineered features added. """ features["londiff"] = features["dropofflon"] - features["pickuplon"] features["latdiff"] = features["dropofflat"] - features["pickuplat"] features["euclidean"] = tf.math.sqrt( features["londiff"]**2 + features["latdiff"]**2) return features
5,357,065
def test_3tris(): """3 triangles""" conv = ToPointsAndSegments() polygons = [ [[(0, 0), (1, 0), (0.5, -0.5), (0, 0)]], [[(1, 0.5), (2, 0.5), (1.5, 1), (1, 0.5)]], [[(2, 0), (3, 0), (2.5, -0.5), (2, 0)]], ] for polygon in polygons: conv.add_polygon(polygon) return conv, 24, 16, 8
5,357,066
def test_envvar_windows( cd_tmp_path: Path, cp_config: CpConfigTypeDef, monkeypatch: MonkeyPatch ) -> None: """Test envvars for Windows.""" monkeypatch.setattr("platform.system", MagicMock(return_value="Windows")) monkeypatch.delenv("MSYSTEM", raising=False) cp_config("simple_env_vars", cd_tmp_path) runner = CliRunner() result0 = runner.invoke(cli, ["envvars"]) assert result0.exit_code == 0 assert result0.output == PSH_OUTPUT monkeypatch.setenv("MSYSTEM", "MINGW") result1 = runner.invoke(cli, ["envvars"]) assert result1.output == POSIX_OUTPUT
5,357,067
def program_modules_with_functions(module_type, function_templates): """ list the programs implementing a given set of functions """ prog_lsts = [program_modules_with_function(module_type, function_template) for function_template in function_templates] # get the intersection of all of them progs = _reduce(set.intersection, map(set, prog_lsts)) return tuple(sorted(progs))
5,357,068
def tasmax_below_tasmin( tasmax: xarray.DataArray, tasmin: xarray.DataArray, ) -> xarray.DataArray: """Check if tasmax values are below tasmin values for any given day. Parameters ---------- tasmax : xarray.DataArray tasmin : xarray.DataArray Returns ------- xarray.DataArray, [bool] Examples -------- To gain access to the flag_array: >>> from xclim.core.dataflags import tasmax_below_tasmin >>> ds = xr.open_dataset(path_to_tas_file) >>> flagged = tasmax_below_tasmin(ds.tasmax, ds.tasmin) """ tasmax_lt_tasmin = _sanitize_attrs(tasmax < tasmin) description = "Maximum temperature values found below minimum temperatures." tasmax_lt_tasmin.attrs["description"] = description tasmax_lt_tasmin.attrs["units"] = "" return tasmax_lt_tasmin
5,357,069
def laplace_attention(q, k, v, scale, normalize): """ Laplace exponential attention Parameters ---------- q : torch.Tensor Shape (batch_size, m, k_dim) k : torch.Tensor Shape (batch_size, n, k_dim) v : torch.Tensor Shape (batch_size, n, v_dim) scale : float scale in the L1 distance normalize : bool does the weights sum to 1? Returns ------- r : torch.Tensor Shape (batch_size, m, v_dim) """ k = k.unsqueeze(1) # shape [B, 1, n, k_dim] q = q.unsqueeze(2) # shape [B, m, 1, k_dim] unnorm_weights = - torch.abs((k - q) / scale) # shape [B, m, n, k_dim] unnorm_weights = torch.mean(weights, dim=-1) # shape [B, m, n] if normalize: weight_fn = F.softmax else: weight_fn = lambda x: 1 + torch.tanh(x) weights = weight_fn(unnorm_weights) # shape [B, m, n] r = torch.einsum('bij,bjk->bik', weights, v) # shape [B, m, v_dim] return r
5,357,070
def test_range_integrity_check_fails(full_range_stream_fresh, error_msg): """ Putting a range “into” (i.e. with shared positions on) the central ‘mid-body’ of an existing range (without first trimming the existing range), will cause the existing range to automatically ‘split’ its RangeSet to ‘give way’ to the new range (this is the behaviour of RangeDict upon adding new keys whose range intersects the existing range key of a RangeSet, and means keys remain unique, but gives ‘loose ends’ as the singleton rangeset for the existing range splits into a doublet RangeSet of pre- and post- subranges) """ with raises(ValueError, match=error_msg): full_range_stream_fresh._ranges.add(rng=Range(4, 6), value=123) full_range_stream_fresh.check_range_integrity()
5,357,071
def instantiate(class_name, *args, **kwargs): """Helper to dynamically instantiate a class from a name.""" split_name = class_name.split(".") module_name = split_name[0] class_name = ".".join(split_name[1:]) module = __import__(module_name) class_ = getattr(module, class_name) return class_(*args, **kwargs)
5,357,072
def fawa(pv_or_state, grid=None, levels=None, interpolate=None): """Finite-Amplitude Wave Activity according to Nakamura and Zhu (2010). - If the first parameter is not a `barotropic.State`, `grid` must be specified. - `levels` specifies the number of contours generated for the equivalent latitude zonalization. - By default, FAWA is returned on the computed equivalent latitudes. To obtain FAWA interpolated to a specific set of latitudes, specify these with the `interpolate` parameter. Returns a tuple containing FAWA and its latitude coordinates. """ grid, pv = _get_grid_vars(["pv"], grid, pv_or_state) # Compute zonalized background state of PV qq, yy = grid.zonalize_eqlat(pv, levels=levels, interpolate=None, quad="sptrapz") # Use formulation that integrates PV over areas north of PV # contour/equivalent latitude and then computes difference q_int = np.vectorize(lambda q: grid.quad_sptrapz(pv, pv - q)) y_int = np.vectorize(lambda y: grid.quad_sptrapz(pv, grid.lat - y)) # Normalize by zonal circumference at each latitude fawa = (q_int(qq) - y_int(yy)) / grid.circumference(yy) # Interpolate to a given set of latitudes if specified if interpolate is not None: fawa = np.interp(interpolate, yy, fawa, left=0, right=0) yy = interpolate return fawa, yy
5,357,073
def bmm_update(context, bmm_id, values, session=None): """ Updates Bare Metal Machine record. """ if not session: session = get_session_dodai() session.begin() bmm_ref = bmm_get(context, bmm_id, session=session) bmm_ref.update(values) bmm_ref.save(session=session) return bmm_ref
5,357,074
def to_unified(entry): """ Convert to a unified entry """ assert isinstance(entry, StatementEntry) date = datetime.datetime.strptime(entry.Date, '%d/%m/%Y').date() return UnifiedEntry(date, entry.Reference, method=entry.Transaction_Type, credit=entry.Money_In, debit=entry.Money_Out)
5,357,075
def evaluate(args, model, data): """ This function samples images via Gibbs sampling chain in order to inspect the marginal distribution of the visible variables. Args: args: parse_args input command-line arguments (hyperparameters). model: model to sample from. data: data to measure pseudo_log_likelihood (pll) that model assign to it (if pll is used). """ for e in range(args.n_eval_samples): model.sample_v_marg(epoch=-(e + 1))
5,357,076
def call_crawlers(dataset_list): """ Call crawlers to get latest data. """ for dataset_name in dataset_list: crawler_module = importlib.import_module( 'thousandaire.crawlers.%s' % dataset_name) crawler = crawler_module.Crawler(dataset_name) cur_data = DataLoader([dataset_name]).get_all()[dataset_name] last_date, new_data = crawler.update() if not os.path.isdir(DATA_DIR): os.mkdir(DATA_DIR) data_path = os.path.join(DATA_DIR, dataset_name) with open(data_path, 'wb') as file: for key in new_data: if key in cur_data.keys(): cur_data[key].extend(new_data[key]) else: cur_data[key] = new_data[key] pickle.dump(cur_data, file) crawler.set_last_modified_date(last_date)
5,357,077
def test_tree(): """Trains a Tree""" X, y = getdata() clf = buildtree(X, y) clf.predict([[2., 2.]])
5,357,078
def edit_profile(): """ POST endpoint that edits the student profile. """ user = get_current_user() json = g.clean_json user.majors = Major.objects.filter(id__in=json['majors']) user.minors = Minor.objects.filter(id__in=json['minors']) user.interests = Tag.objects.filter(id__in=json['interests']) user.save() return _fetch_user_profile(user)
5,357,079
def _stringcoll(coll): """ Predicate function to determine whether COLL is a non-empty collection (list/tuple) containing only strings. Arguments: - `coll`:* Return: bool Exceptions: None """ if isinstance(coll, (list, tuple)) and coll: return len([s for s in coll if isinstance(s, six.string_types)]) == len(coll) return False
5,357,080
def create_user_db_context( database=Database(), *args, **kwargs): """ Create a context manager for an auto-configured :func:`msdss_users_api.tools.create_user_db_func` function. Parameters ---------- database : :class:`msdss_base_database:msdss_base_database.core.Database` Database to use for managing users. *args, **kwargs Additional arguments passed to :func:`msdss_users_api.tools.create_user_db_func`. Return ------ dict Returns a dictionary with the following keys: * ``get_user_db_context`` (:func:`contextlib.asynccontextmanager`): function returned from :func:`contextlib.asynccontextmanager` created from an auto-configured :func:`msdss_users_api.tools.create_user_db_func` function * ``get_user_db`` (func): user db function from :func:`msdss_users_api.tools.create_user_db_func` * ``async_database`` (:class:`databases:databases.Database`): auto-configured :class:`databases:databases.Database` from env vars * ``database_engine`` (:class:`sqlalchemy:sqlalchemy.engine.Engine`): auto-configured :class:`sqlalchemy:sqlalchemy.engine.Engine` from env vars Author ------ Richard Wen <[email protected]> Example ------- .. jupyter-execute:: from msdss_users_api.tools import * results = create_user_db_context() get_user_db_context = results['get_user_db_context'] async_database = results['async_database'] """ # (create_user_db_func_db) Create databases database_engine = database._connection async_database = databases.Database(str(database_engine.url)) # (get_user_db_context_return) Return user db context get_user_db = create_user_db_func(database_engine=database_engine, async_database=async_database, *args, **kwargs) out = dict( get_user_db_context=contextlib.asynccontextmanager(get_user_db), get_user_db=get_user_db, async_database=async_database, database_engine=database_engine ) return out
5,357,081
def config(base_config): """:py:class:`nemo_nowcast.Config` instance from YAML fragment to use as config for unit tests.""" config_file = Path(base_config.file) with config_file.open("at") as f: f.write( textwrap.dedent( """\ file group: allen vhfr fvcom runs: host: arbutus.cloud run types: nowcast x2: results: /nemoShare/MEOPAR/SalishSea/fvcom-nowcast-x2/ forecast x2: results: /nemoShare/MEOPAR/SalishSea/fvcom-forecast-x2/ nowcast r12: results: /nemoShare/MEOPAR/SalishSea/fvcom-nowcast-r12/ results archive: nowcast x2: /opp/fvcom/nowcast-x2/ forecast x2: /opp/fvcom/forecast-x2/ nowcast r12: /opp/fvcom/nowcast-r12/ """ ) ) config_ = nemo_nowcast.Config() config_.load(config_file) return config_
5,357,082
def enable_image_registry_default_route(): """Enables the Image Registry default route with the Custom Resource Definition https://docs.openshift.com/container-platform/latest/registry/configuring-registry-operator.html#registry-operator-default-crd_configuring-registry-operator """ oc_patch_args = [ "patch", "configs.imageregistry.operator.openshift.io/cluster", "--patch", '{"spec":{"defaultRoute":true}}', "--type", "merge", ] execute_oc_command(oc_patch_args)
5,357,083
def check_nodes_in_graph(graph, nodes): """ Validate if nodes are in graph :param graph: graph that should contain nodes :type graph: :graphit:GraphBase :param nodes: nodes to check :type nodes: :py:list :return: True if validation successful :rtype: :py:bool :raises GraphitNodeNotFound """ nodes_not_present = [nid for nid in nodes if nid not in graph.nodes.keys()] if nodes_not_present: raise GraphitNodeNotFound('Nodes not in graph: {0}'.format(repr(nodes_not_present).strip('[]')))
5,357,084
def __check_complete_list(list_, nb_max, def_value): """ make sure the list is long enough complete with default value if not :param list_: list to check :param nb_max: maximum length of the list :param def_value: if list too small, completes it with this value :return: boolean, False if the list is too long """ if len(list_) <= nb_max: list_.extend([def_value] * (nb_max - len(list_))) return True else: return False
5,357,085
def _fill_function(func, globals, defaults, closure, dct): """ Fills in the rest of function data into the skeleton function object that were created via _make_skel_func(). """ func.func_globals.update(globals) func.func_defaults = defaults func.func_dict = dct if len(closure) != len(func.func_closure): raise pickle.UnpicklingError("closure lengths don't match up") for i in range(len(closure)): _change_cell_value(func.func_closure[i], closure[i]) return func
5,357,086
def symlink_gfid_to_path(brick, gfid): """ Each directories are symlinked to file named GFID in .glusterfs directory of brick backend. Using readlink we get PARGFID/basename of dir. readlink recursively till we get PARGFID as ROOT_GFID. """ if gfid == ROOT_GFID: return "" out_path = "" while True: path = os.path.join(brick, ".glusterfs", gfid[0:2], gfid[2:4], gfid) path_readlink = os.readlink(path) pgfid = os.path.dirname(path_readlink) out_path = os.path.join(os.path.basename(path_readlink), out_path) if pgfid == "../../00/00/%s" % ROOT_GFID: break gfid = os.path.basename(pgfid) return out_path
5,357,087
def main(): """ Main function """ argument_spec = vmware_argument_spec() argument_spec.update( state=dict(type='str', default='present', choices=['present', 'absent']), datacenter=dict(type='str', required=False, aliases=['datacenter_name']), cluster=dict(type='str', required=True, aliases=['cluster_name']), group_name=dict(type='str', required=True), vms=dict(type='list', elements='str'), hosts=dict(type='list', elements='str') ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[['vms', 'hosts']], required_one_of=[['vms', 'hosts']] ) try: # Create instance of VmwareDrsGroupMemberManager vmware_drs_group = VmwareDrsGroupMemberManager(module=module) vmware_drs_group.manage_drs_group_members() # Set results results = dict(msg=vmware_drs_group.message, failed=False, changed=vmware_drs_group.changed, drs_group_member_info=vmware_drs_group.result) except Exception as error: results = dict(failed=True, msg="Error: %s" % error) if results['failed']: module.fail_json(**results) module.exit_json(**results)
5,357,088
def resize_img(img, size, keep_aspect_ratio=True): """resize image using pillow Args: img (PIL.Image): pillow image object size(int or tuple(in, int)): width of image or tuple of (width, height) keep_aspect_ratio(bool): maintain aspect ratio relative to width Returns: (PIL.Image): pillow image """ if isinstance(size, int): size = (size, size) # get ratio width, height = img.size requested_width = size[0] if keep_aspect_ratio: ratio = width / requested_width requested_height = height / ratio else: requested_height = size[1] size = (int(requested_width), int(requested_height)) img = img.resize(size, resample=PIL.Image.LANCZOS) return img
5,357,089
def calculate_clip_from_vd(circuit: DiodeCircuit, v_d: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """ :returns: v_in, v_out """ Rs = circuit.Rs Is = circuit.diode.Is n = circuit.diode.n Vt = circuit.diode.Vt Rp = circuit.Rp Rd = circuit.Rd Id = Is * (np.exp(v_d / (n * Vt)) - 1.0) Vd = v_d if Rp is None: Vin = Vd + (Rd * Id) + (Rs * Id) else: Vin = Vd + (Rd * Id) + (Rs * Id) + (Vd * Rs / Rp) + (Id * Rd * Rs / Rp) Vout = Vd + Id * Rd return Vin, Vout
5,357,090
def make_timeseries(input_files, roi_file, output_dir, labels=None, regressor_files=None, regressors=None, as_voxels=False, discard_scans=None, n_jobs=1, **masker_kwargs): """Extract timeseries data from input files using an roi file to demark the region(s) of interest(s). This is the main function of this module. Parameters ---------- input_files : list of niimg-like List of input NIfTI functional images mask_img : str Image that contains region mask(s). Can either be a single binary mask for a single region, or a numerically labeled atlas file. 0 must indicate background (non-region voxels). output_dir : str Save directory. labels : str or list of str ROI names which are in order of ascending numeric labels in roi_file. Default is None regressor_files : list of str, optional Confound .csv files for each run. Default is None regressors : list of str, optional Either a) list of regressor names to select from `regressor_files` headers, b) a predefined load_confounds strategy, or c) a list of flexibe load_confounds strategies. If none, all regressors in `regressor_files` are used. Default isNone as_voxels : bool, optional Extract out individual voxel timecourses rather than mean timecourse of the ROI, by default False. NOTE: This is only available for binary masks, not for atlas images (yet) discard_scans : int, optional The number of scans to discard at the start of each functional image, prior to any sort of extraction and post-processing. This is prevents unstabilized signals at the start from being included in signal standardization, etc. n_jobs : int, optional Number of processes to use for extraction if parallelization is dersired. Default is 1 (no parallelization) **masker_kwargs Keyword arguments for `nilearn.input_data` Masker objects. """ masker = _set_masker(roi_file, input_files, output_dir,as_voxels, **masker_kwargs) # set as list of NoneType if no regressor files; makes it easy for # iterations if regressor_files is None: regressor_files = [regressor_files] * len(input_files) # no parallelization if n_jobs == 1: for i, img in enumerate(input_files): _mask_and_save(masker, img, output_dir, regressor_files[i], regressors, as_voxels, labels, discard_scans) else: # repeat parameters are held constant for all parallelized iterations args = zip( repeat(masker), input_files, # iterate over repeat(output_dir), regressor_files, # iterate over, paired with input_files repeat(regressors), repeat(as_voxels), repeat(labels), repeat(discard_scans) ) with multiprocessing.Pool(processes=n_jobs) as pool: pool.starmap(_mask_and_save, args)
5,357,091
def tex_coord(x, y, n=8): """ Return the bounding vertices of the texture square. """ m = 1.0 / n dx = x * m dy = y * m return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
5,357,092
def compareImage(renwin, img_fname, threshold=10): """Compares renwin's (a vtkRenderWindow) contents with the image file whose name is given in the second argument. If the image file does not exist the image is generated and stored. If not the image in the render window is compared to that of the figure. This function also handles multiple images and finds the best matching image. """ global _NO_IMAGE if _NO_IMAGE: return w2if = vtk.vtkWindowToImageFilter() w2if.ReadFrontBufferOff() w2if.SetInput(renwin) w2if.Update() try: compareImageWithSavedImage(w2if, img_fname, threshold) except RuntimeError: w2if.ReadFrontBufferOn() compareImageWithSavedImage(w2if, img_fname, threshold) return
5,357,093
def uniindtemp_compute(da: xr.DataArray, thresh: str = "0.0 degC", freq: str = "YS"): """Docstring""" out = da - convert_units_to(thresh, da) out = out.resample(time=freq).mean() out.attrs["units"] = da.units return out
5,357,094
def verify_parentheses(parentheses_string: str) -> bool: """Takes input string of only '{},[],()' and evaluates to True if valid.""" open_parentheses = [] valid_parentheses_set = {'(', ')', '[', ']', '{', '}'} parentheses_pairs = { ')' : '(', ']' : '[', '}' : '{' } if len(parentheses_string) % 2 != 0: return False for character in parentheses_string: if character not in valid_parentheses_set: raise ValueError("Only parentheses may be part of input string.") if character in {'(', '[', '{'}: open_parentheses.append(character) if character in {')', ']', '}'}: if len(open_parentheses) == 0: return False elif open_parentheses[-1] != parentheses_pairs[character]: return False del open_parentheses[-1] if len(open_parentheses) > 0: return False return True
5,357,095
def _stableBaselineTrainingAndExecution(env, typeAgent, numberOptions, mode): """"Function to execute Baseline algorithms""" if typeAgent == 2: model = A2C(MlpPolicy, env, verbose=1) else: model = PPO2(MlpPolicy, env, verbose=1) print("Training model....") startTime = time() model.learn(total_timesteps=DEFAULT_TRAINING_RANGE) trainingTime = time() - startTime print("Model trained in " + str(trainingTime) + ".") print("Starting episodes....") totalSteps, numberEpisodes, studentTotalScore, projectTotalScore, skillsTotalScore = 0, 0, 0., 0., 0. bestResult = [] bestStudentScore = 0.0 bestStudentAssigned = 0 sumStudentAssigned = 0.0 allStudentsAssigned = [] allProjectAssignations = [] allSteps = [] allResults = [] allAverageStudentScore = [] allAverageProjectScore = [] allAverageSkillsScore = [] allStudentScores = [] allProjectScores = [] progressBar = Bar("-> Execution progress:", max=DEFAULT_EXECUTION_RANGE) for i in range(DEFAULT_EXECUTION_RANGE): state = env.reset(1) steps, reward = 0, 0 done = False print("Execution " + str(i)) while not done: action, _state = model.predict(state) state, reward, done, info = env.step(action) # env.render() steps += 1 numberEpisodes += 1 allSteps.append(steps) averageStudentScore, averageProjectScore, averageSkillsScore, studentScores, projectScores, studentsAssigned, projectAssignations = env.stepScores() allResults.append(env.finalState()) allAverageStudentScore.append(averageStudentScore) allAverageProjectScore.append(averageProjectScore) allAverageSkillsScore.append(averageSkillsScore) allStudentScores.append(studentScores) allProjectScores.append(projectScores) allStudentsAssigned.append(studentsAssigned) allProjectAssignations.append(projectAssignations) averageStudentAssigned = sum(studentsAssigned) / numberOptions sumStudentAssigned += sum(studentsAssigned) / numberOptions if averageStudentAssigned >= bestStudentAssigned and averageStudentScore > bestStudentScore: bestStudentAssigned = averageStudentAssigned bestStudentScore = averageStudentScore bestResult = env.finalState() progressBar.next() progressBar.finish() print("Execution done.") print(trainingTime) if mode == 0: _executionAnalysis(numberEpisodes, allStudentScores, allProjectScores, allSteps, bestStudentAssigned, numberOptions, allStudentsAssigned, allProjectAssignations, sumStudentAssigned) return bestResult
5,357,096
def string_to_hexadecimale_device_name(name: str) -> str: """Encode string device name to an appropriate hexadecimal value. Args: name: the desired name for encoding. Return: Hexadecimal representation of the name argument. """ length = len(name) if 1 < length < 33: hex_name = hexlify(name.encode()) zeros_pad = ("00" * (32 - length)).encode() return (hex_name + zeros_pad).decode() raise ValueError("name length can vary from 2 to 32")
5,357,097
def createTeam( firstIndex, secondIndex, isRed, first = 'DefensiveAgent', second = 'OffensiveAgent'): """ This function should return a list of two agents that will form the team, initialized using firstIndex and secondIndex as their agent index numbers. isRed is True if the red team is being created, and will be False if the blue team is being created. As a potentially helpful development aid, this function can take additional string-valued keyword arguments ("first" and "second" are such arguments in the case of this function), which will come from the --redOpts and --blueOpts command-line arguments to capture.py. For the nightly contest, however, your team will be created without any extra arguments, so you should make sure that the default behavior is what you want for the nightly contest. """ return [eval(first)(firstIndex), eval(second)(secondIndex)]
5,357,098
def str_dice(die): """Return a string representation of die. >>> str_dice(dice(1, 6)) 'die takes on values from 1 to 6' """ return 'die takes on values from {0} to {1}'.format(smallest(die), largest(die))
5,357,099