id
stringlengths
1
265
text
stringlengths
6
5.19M
dataset_id
stringclasses
7 values
1762195
<filename>Alice/Sorting/Insertion_Sort.py<gh_stars>1-10 class InsertionSort: def __init__(self,array): self.array = array def result(self): for i in range(1,len(self.array)): for j in range(i-1,-1,-1): if self.array[j] > self.array[j+1]: self.array[j],self.array[j+1] = self.array[j+1],self.array[j] return self.array test = list(map(int,input().split(' '))) t = InsertionSort(test) print(t.result())
StarcoderdataPython
3226610
<gh_stars>0 import matplotlib.pyplot as plt import numpy as np from matplotlib import cm from tqdm import tqdm class EvolutionaryParameterOptimizer: """ Evolutionary Algorithm Optimizer for arbitrary parameter_discretization which allows it to solve integer optimizations """ def __init__(self, parameter_ranges, parameter_discretization, fitness_function, population_size=20, replacement_proportion=0.5, generations=20, minimize=False, show_evolution=False, random_variation_probability=0.1, crossover_probability=0.7): self.minimize = minimize self.show_evolution = show_evolution if self.show_evolution: plt.ion() self.cb = None self.population_size = population_size assert 0 < self.population_size self.replacement_proportion = replacement_proportion assert 0.0 < self.replacement_proportion <= 1.0 self.parameter_ranges = np.array(parameter_ranges) self.parameter_discretization = np.array(parameter_discretization) assert len(self.parameter_ranges) == len(self.parameter_discretization) self.fitness_function = fitness_function self.best_individual = None self.best_fitness = float("inf") if self.minimize else -float("inf") self.fitnesses = np.zeros(self.population_size) self.parameter_interval = self.parameter_ranges[:, 1] - self.parameter_ranges[:, 0] self.population = np.random.random((self.population_size, len(self.parameter_ranges))) *\ self.parameter_interval + self.parameter_ranges[:, 0] self.population -= (self.population % self.parameter_discretization) self.generations = generations assert 0 < self.generations self.random_variation_probability = random_variation_probability assert 0.0 <= self.random_variation_probability <= 1.0 self.crossover_probability = self.random_variation_probability + crossover_probability assert 0.0 <= self.crossover_probability <= 1.0 self.fitness_cache = dict() self.best_fitnesses = [] self.optimize() def evaluate_fitness(self): fitnesses = [] for individual in self.population: if tuple(individual) in self.fitness_cache: fitness = self.fitness_cache[tuple(individual)] else: fitness = self.fitness_function(*individual) self.fitness_cache[tuple(individual)] = fitness fitnesses.append(fitness) if (self.minimize and fitness < self.best_fitness) or (not self.minimize and fitness > self.best_fitness): self.best_fitness = fitness self.best_individual = individual self.fitnesses = np.array(fitnesses) self.best_fitnesses.append(self.best_fitness) def replacement(self): population_fitness_variation = self.get_population_fitness_variation(noise_level=0.01) survivors_inx = sorted(range(self.population_size), key=lambda x: population_fitness_variation[x], reverse=True)[0:int(self.population_size * self.replacement_proportion)] self.population = self.population[survivors_inx] self.fitnesses = self.fitnesses[survivors_inx] def get_population_fitness_variation(self, noise_level=0.0): population_fitness_variation = -1 * self.fitnesses if self.minimize else self.fitnesses population_fitness_variation += np.random.normal(0, noise_level, population_fitness_variation.shape) population_fitness_variation -= population_fitness_variation.min() return population_fitness_variation def population_variation(self): population_fitness_variation = self.get_population_fitness_variation(noise_level=0.01) new_population = list(self.population) total_fitness_variation = sum(population_fitness_variation) _selection_weights = (population_fitness_variation / total_fitness_variation) while len(new_population) < self.population_size: rnd = np.random.random() if rnd < self.random_variation_probability: # go random (random_variation_probability of time) child = np.random.random((len(self.parameter_ranges))) * self.parameter_interval + self.parameter_ranges[:, 0] child -= (child % self.parameter_discretization) elif rnd < self.crossover_probability: # sexual reproduction (crossover_probability of time) father_index, mother_index = np.random.choice(range(len(self.population)), 2, replace=False, p=_selection_weights) father, mother = self.population[father_index], self.population[mother_index] child = (father + mother) / 2 child -= (child % self.parameter_discretization) else: # asexual reproduction (rest of time) parent_index = np.random.choice(range(len(self.population)), 1, replace=False, p=_selection_weights)[0] parent = self.population[parent_index] child = [] for i in range(len(parent)): s = int(np.std(self.population[:, i]) + 1) d = s * int(np.random.normal(0, 10)) * self.parameter_discretization[i] child_param = parent[i] + d child_param = min(self.parameter_ranges[i][1], child_param) child_param = max(self.parameter_ranges[i][0], child_param) child.append(child_param) new_population.append(np.array(child)) self.population = np.array(new_population) def optimize(self): for _ in tqdm(range(self.generations)): self.population_variation() self.evaluate_fitness() if self.show_evolution: self.show() self.replacement() def show(self): if len(self.best_individual) == 2: plt.cla() xs = self.population x, y = xs[:, 0], xs[:, 1] z = self.fitnesses sc = plt.scatter(x, y, c=z, marker='o', cmap=cm.jet, label="all_fitnesses") if self.best_individual is not None: plt.scatter(self.best_individual[0], self.best_individual[1], c='r', marker='^', label="best_fitness") if self.cb is None: self.cb = plt.colorbar(sc) plt.xlim(*self.parameter_ranges[0]) plt.ylim(*self.parameter_ranges[1]) plt.pause(0.00001) else: plt.cla() plt.plot(self.best_fitnesses) plt.pause(0.00001)
StarcoderdataPython
3356701
<gh_stars>10-100 #! /usr/bin/env python # -*- coding: utf-8 -*- """ @version: ?? @author: li @file: rnn_model_attention.py @time: 2018/3/27 下午5:41 """ import tensorflow as tf from attention import attention import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' class TRNNConfig(object): embedding_dim = 64 # 词向量维度 seq_length = 600 # 序列长度 num_classes = 10 # 类别数 vocab_size = 5000 # 词汇表大小 num_layers = 2 # 隐藏层层数 hidden_dim = 128 # 隐藏神经单元个数 rnn = 'gru' # lstm 或 gru attention = True attention_size = 50 dropout_keep_prob = 0.8 learning_rate = 1e-3 batch_size = 128 num_epochs = 10 print_per_batch = 100 save_per_batch = 10 class TextRNN(object): def __init__(self, config): self.config = config self._build_graph() def lstm_cell(self): return tf.contrib.rnn.BasicLSTMCell(self.config.hidden_dim, state_is_tuple=True) def gru_cell(self): return tf.contrib.rnn.GRUCell(self.config.hidden_dim) def dropout(self): if self.config.rnn == 'lstm': cell = self.lstm_cell() else: cell = self.gru_cell() return tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self.dropout_keep_prob) def _build_graph(self): with tf.variable_scope("InputData"): # input_x:[batch_size, seq_length] self.input_x = tf.placeholder(tf.int32, [None, self.config.seq_length], name='input_x') # input_y:[batch_size, num_classes] self.input_y = tf.placeholder(tf.int32, [None, self.config.num_classes], name='input_y') self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob') with tf.device('/cpu:0'), tf.name_scope('embedding_layer'): # embedding:[vocab_size, embedding_dim] self.embedding = tf.get_variable('embedding', [self.config.vocab_size, self.config.embedding_dim]) embedding_inputs = tf.nn.embedding_lookup(self.embedding, self.input_x) with tf.name_scope("RNN"): cells = [self.dropout() for _ in range(self.config.num_layers)] rnn_cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True) self._outputs, _ = tf.nn.dynamic_rnn(cell=rnn_cell, inputs=embedding_inputs, dtype=tf.float32) print('shape_of_outputs: %s' % self._outputs.get_shape()) if self.config.attention is True: with tf.name_scope('AttentionLayer'): # Attention layer attention_output, self.alphas = attention(self._outputs, self.config.attention_size, return_alphas=True) last = tf.nn.dropout(attention_output, self.dropout_keep_prob) else: last = self._outputs[:, -1, :] # 取最后一个时序输出作为结果 # print('shape_of_outputs: %s' % last.get_shape()) with tf.name_scope("ScoreLayer"): # Fully connected layer fc = tf.layers.dense(last, self.config.hidden_dim, name='fc1') fc = tf.contrib.layers.dropout(fc, self.dropout_keep_prob) fc = tf.nn.relu(fc) self.logits = tf.layers.dense(fc, self.config.num_classes, name='fc2') self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1) with tf.name_scope("OptimizerLayer"): # 损失函数,交叉熵 cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y) self.loss = tf.reduce_mean(cross_entropy) # 优化器 self.optim = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss) with tf.name_scope("Accuracy"): correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls) self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
StarcoderdataPython
1730111
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Helper functions to train a model."""
StarcoderdataPython
2500
# -*- coding: utf-8 -*- __author__ = """<NAME>""" __email__ = '<EMAIL>' __version__ = '0.1.0'
StarcoderdataPython
3229433
from .produces import * from .dependencies import *
StarcoderdataPython
107326
<reponame>utsavdey/Fundamentals_Of_Deep_Learning_Assignments<filename>Assignment1/grad.py<gh_stars>0 import numpy as np def cross_entropy_grad(y_hat, label): # grad w.r.t out activation temp = np.zeros_like(y_hat) # If the initial guess is very wrong. This gradient will explode. This places a limit on that. if y_hat[label] < 10 ** -8: y_hat[label] = 10 ** -8 temp[label] = -1 / (y_hat[label]) norm = np.linalg.norm(temp) if norm > 100.0: return temp * 100.0 / norm else: return temp def squared_error_grad(y_hat, label): # grad w.r.t out activation temp = np.copy(y_hat) temp[label] -= 1 temp = 2 * temp temp = temp / len(y_hat) norm = np.linalg.norm(temp) if norm > 100.0: return temp * 100.0 / norm else: return temp def output_grad(y_hat, label, loss_type): if loss_type == 'cross_entropy': return cross_entropy_grad(y_hat=y_hat, label=label) elif loss_type == 'squared_error': return squared_error_grad(y_hat=y_hat, label=label) def last_grad(y_hat, label): # grad w.r.t out last layer temp = np.copy(y_hat) temp[label] = temp[label] - 1 norm = np.linalg.norm(temp) if norm > 100.0: return temp * 100.0 / norm else: return temp # this function helps in calculation of gradient w.r.t 'a_i''s when activation function is sigmoid.We have passed h_is def sigmoid_grad(post_activation): return np.multiply(post_activation, 1 - post_activation) # this function helps in calculation of gradient w.r.t 'a_i''s when activation function is tanh. We have passed h_is def tanh_grad(post_activation): return 1 - np.power(post_activation, 2) # this function helps in calculation of gradient w.r.t 'a_i''s when activation function is relu. def relu_grad(pre_activation_vector): grad = np.copy(pre_activation_vector) # making +ve and 0 component 1 grad[grad >= 0] = 1 # making -ve component 0 grad[grad < 0] = 0 return grad def a_grad(network, transient_gradient, layer): # grad w.r.t a_i's layer if network[layer]['context'] == 'sigmoid': active_grad_ = sigmoid_grad(network[layer]['h']) elif network[layer]['context'] == 'tanh': active_grad_ = tanh_grad(network[layer]['h']) elif network[layer]['context'] == 'relu': active_grad_ = relu_grad(network[layer]['a']) temp = np.multiply(transient_gradient[layer]['h'], active_grad_) norm = np.linalg.norm(temp) if norm > 100.0: return temp * 100.0 / norm else: return temp # hadamard multiplication def h_grad(network, transient_gradient, layer): # grad w.r.t out h_i layer network[layer]['weight'].transpose() temp = network[layer + 1]['weight'].transpose() @ transient_gradient[layer + 1]['a'] norm = np.linalg.norm(temp) if norm > 100.0: return temp * 100.0 / norm else: return temp def w_grad(network, transient_gradient, layer, x): if layer == 0: temp = transient_gradient[layer]['a'] @ x.transpose() else: temp = transient_gradient[layer]['a'] @ network[layer - 1]['h'].transpose() norm = np.linalg.norm(temp) if norm > 10000.0: return temp * 10000.0 / norm else: return temp
StarcoderdataPython
1696441
import logging from requests import request import json logger = logging.getLogger("ghproject") def post_request(url: str, data: dict, headers: dict): """Send data to GitHub API Parameters ---------- url : str url to request data : dict data to be send to url headers : dict GitHub authentication details Returns ------- list content of request """ payload = json.dumps(data) r = request("POST", url=url, data=payload, headers=headers) if r.status_code in [201, 202]: logger.debug("Successfully executed the POST request") return json.loads(r.content) elif r.status_code == 401: logger.warning("Status: 401 Unauthorized") else: logger.warning(json.loads(r.content)["errors"][0]["message"]) def get_request(url: str, headers: dict): """Retrieve data from GitHub API Parameters ---------- url : str url to request headers : dict authentication details Returns ------- list content of request """ r = request("GET", url=url, headers=headers) if r.status_code == 200: logger.debug("Successfully executed the GET request") return json.loads(r.content) elif r.status_code == 401: logger.warning("Status: 401 Unauthorized") return None else: logger.error(json.loads(r.content)["errors"][0]["message"]) return None def verify_authentication(url, headers): """Verify authentication to GitHub repository Parameters ---------- url : str url to request headers : dict authentication details """ r = request("GET", url=url, headers=headers) if r.status_code == 200: logger.info(f"Authentication successful to {url}") elif r.status_code == 401: logger.warning("Status: 401 Unauthorized") else: logger.error(f"Status {r.status_code}, {json.loads(r.content)}")
StarcoderdataPython
1654194
from app import app from .routes import france_routes app.register_blueprint(france_routes)
StarcoderdataPython
4820516
import pytest def test_get_sample_ecommerce_dataset(): from relevanceai.datasets import get_ecommerce_1_dataset assert len(get_ecommerce_1_dataset(number_of_documents=100)) == 100 def test_get_games_dataset_subset(): from relevanceai.datasets import get_games_dataset assert len(get_games_dataset(number_of_documents=100)) == 100 @pytest.mark.skip(reason="Min time to insight") def test_get_games_dataset_full(): from relevanceai.datasets import get_games_dataset assert len(get_games_dataset(number_of_documents=None)) == 365 @pytest.mark.skip(reason="Skipping as large file in memory") def test_get_online_retail_dataset_subset(): from relevanceai.datasets import get_online_retail_dataset assert len(get_online_retail_dataset(number_of_documents=1000)) == 1000 @pytest.mark.skip(reason="Skipping as large file in memory") def test_get_online_retail_dataset_full(): from relevanceai.datasets import get_online_retail_dataset assert len(get_online_retail_dataset(number_of_documents=None)) == 406829 def test_get_news_dataset_subset(): from relevanceai.datasets import get_news_dataset assert len(get_news_dataset(number_of_documents=100)) == 100 @pytest.mark.skip(reason="Min time to insight") def test_get_news_dataset_full(): from relevanceai.datasets import get_news_dataset assert len(get_news_dataset(number_of_documents=None)) == 250 def test_get_ecommerce_dataset_subset(): from relevanceai.datasets import get_ecommerce_3_dataset assert len(get_ecommerce_3_dataset(number_of_documents=1000)) == 1000 @pytest.mark.skip(reason="Min time to insight") def test_get_ecommerce_dataset_full(): from relevanceai.datasets import get_ecommerce_3_dataset assert len(get_ecommerce_3_dataset(number_of_documents=None)) == 15528
StarcoderdataPython
1764786
<filename>lowhaio.py import asyncio import contextlib import ipaddress import logging import urllib.parse import ssl import socket from aiodnsresolver import ( TYPES, DnsError, Resolver, ResolverLoggerAdapter, ) class HttpError(Exception): pass class HttpConnectionError(HttpError): pass class HttpDnsError(HttpConnectionError): pass class HttpTlsError(HttpConnectionError): pass class HttpDataError(HttpError): pass class HttpConnectionClosedError(HttpDataError): pass class HttpHeaderTooLong(HttpDataError): pass class HttpLoggerAdapter(logging.LoggerAdapter): def process(self, msg, kwargs): return \ ('[http] %s' % (msg,), kwargs) if not self.extra else \ ('[http:%s] %s' % (','.join(str(v) for v in self.extra.values()), msg), kwargs) def get_logger_adapter_default(extra): return HttpLoggerAdapter(logging.getLogger('lowhaio'), extra) def get_resolver_logger_adapter_default(http_extra): def _get_resolver_logger_adapter_default(resolver_extra): http_adapter = HttpLoggerAdapter(logging.getLogger('aiodnsresolver'), http_extra) return ResolverLoggerAdapter(http_adapter, resolver_extra) return _get_resolver_logger_adapter_default async def empty_async_iterator(): while False: yield get_current_task = \ asyncio.current_task if hasattr(asyncio, 'current_task') else \ asyncio.Task.current_task def streamed(data): async def _streamed(): yield data return _streamed async def buffered(data): return b''.join([chunk async for chunk in data]) def get_nonblocking_sock(): sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=socket.IPPROTO_TCP) sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) sock.setblocking(False) return sock def set_tcp_cork(sock): sock.setsockopt(socket.SOL_TCP, socket.TCP_CORK, 1) # pylint: disable=no-member def unset_tcp_cork(sock): sock.setsockopt(socket.SOL_TCP, socket.TCP_CORK, 0) # pylint: disable=no-member async def send_body_async_gen_bytes(logger, loop, sock, socket_timeout, body, body_args, body_kwargs): logger.debug('Sending body') num_bytes = 0 async for chunk in body(*body_args, **dict(body_kwargs)): num_bytes += len(chunk) await send_all(loop, sock, socket_timeout, chunk) logger.debug('Sent body bytes: %s', num_bytes) async def send_header_tuples_of_bytes(logger, loop, sock, socket_timeout, http_version, method, parsed_url, params, headers): logger.debug('Sending header') outgoing_qs = urllib.parse.urlencode(params, doseq=True).encode() outgoing_path = urllib.parse.quote(parsed_url.path).encode() outgoing_path_qs = outgoing_path + \ ((b'?' + outgoing_qs) if outgoing_qs != b'' else b'') host_specified = any(True for key, value in headers if key == b'host') headers_with_host = \ headers if host_specified else \ ((b'host', parsed_url.hostname.encode('idna')),) + headers await send_all(loop, sock, socket_timeout, b'%s %s %s\r\n%s\r\n' % ( method, outgoing_path_qs, http_version, b''.join( b'%s:%s\r\n' % (key, value) for (key, value) in headers_with_host ) )) logger.debug('Sent header') def Pool( get_dns_resolver=Resolver, get_sock=get_nonblocking_sock, get_ssl_context=ssl.create_default_context, sock_pre_message=set_tcp_cork if hasattr(socket, 'TCP_CORK') else lambda _: None, sock_post_message=unset_tcp_cork if hasattr(socket, 'TCP_CORK') else lambda _: None, send_header=send_header_tuples_of_bytes, send_body=send_body_async_gen_bytes, http_version=b'HTTP/1.1', keep_alive_timeout=15, recv_bufsize=16384, max_header_length=16384, socket_timeout=10, get_logger_adapter=get_logger_adapter_default, get_resolver_logger_adapter=get_resolver_logger_adapter_default, ): loop = \ asyncio.get_running_loop() if hasattr(asyncio, 'get_running_loop') else \ asyncio.get_event_loop() ssl_context = get_ssl_context() logger_extra = {} logger = get_logger_adapter({}) dns_resolve, dns_resolver_clear_cache = get_dns_resolver( get_logger_adapter=get_resolver_logger_adapter_default(logger_extra), ) pool = {} async def request(method, url, params=(), headers=(), body=empty_async_iterator, body_args=(), body_kwargs=(), get_logger_adapter=get_logger_adapter, get_resolver_logger_adapter=get_resolver_logger_adapter, ): parsed_url = urllib.parse.urlsplit(url) logger_extra = {'lowhaio_method': method.decode(), 'lowhaio_url': url} logger = get_logger_adapter(logger_extra) try: ip_addresses = (ipaddress.ip_address(parsed_url.hostname),) except ValueError: try: ip_addresses = await dns_resolve( parsed_url.hostname, TYPES.A, get_logger_adapter=get_resolver_logger_adapter(logger_extra), ) except DnsError as exception: raise HttpDnsError() from exception key = (parsed_url.scheme, parsed_url.netloc) sock = get_from_pool(logger, key, ip_addresses) if sock is None: sock = get_sock() try: logger.debug('Connecting: %s', sock) await connect(sock, parsed_url, str(ip_addresses[0])) logger.debug('Connected: %s', sock) except asyncio.CancelledError: sock.close() raise except Exception as exception: sock.close() raise HttpConnectionError() from exception except BaseException: sock.close() raise try: if parsed_url.scheme == 'https': logger.debug('TLS handshake started') sock = tls_wrapped(sock, parsed_url.hostname) await tls_complete_handshake(loop, sock, socket_timeout) logger.debug('TLS handshake completed') except asyncio.CancelledError: sock.close() raise except Exception as exception: sock.close() raise HttpTlsError() from exception except BaseException: sock.close() raise try: sock_pre_message(sock) await send_header(logger, loop, sock, socket_timeout, http_version, method, parsed_url, params, headers) await send_body(logger, loop, sock, socket_timeout, body, body_args, body_kwargs) sock_post_message(sock) code, version, response_headers, unprocessed = await recv_header(sock) logger.debug('Received header with code: %s', code) connection, body_length, body_handler = connection_length_body_handler( logger, method, version, response_headers) response_body = response_body_generator( logger, sock, unprocessed, key, connection, body_length, body_handler) except asyncio.CancelledError: sock.close() raise except Exception as exception: sock.close() if isinstance(exception, HttpDataError): raise raise HttpDataError() from exception except BaseException: sock.close() raise return code, response_headers, response_body def get_from_pool(logger, key, ip_addresses): try: socks = pool[key] except KeyError: logger.debug('Connection not in pool: %s', key) return None while socks: _sock, close_callback = next(iter(socks.items())) close_callback.cancel() del socks[_sock] try: connected_ip = ipaddress.ip_address(_sock.getpeername()[0]) except OSError: logger.debug('Unable to get peer name: %s', _sock) _sock.close() continue if connected_ip not in ip_addresses: logger.debug('Not current for domain, closing: %s', _sock) _sock.close() continue logger.debug('Reusing connection %s', _sock) if _sock.fileno() != -1: return _sock del pool[key] def add_to_pool(key, sock): try: key_pool = pool[key] except KeyError: key_pool = {} pool[key] = key_pool key_pool[sock] = loop.call_later(keep_alive_timeout, close_by_keep_alive_timeout, key, sock) def close_by_keep_alive_timeout(key, sock): logger.debug('Closing by timeout: %s,%s', key, sock) sock.close() del pool[key][sock] if not pool[key]: del pool[key] async def connect(sock, parsed_url, ip_address): scheme = parsed_url.scheme _, _, port_specified = parsed_url.netloc.partition(':') port = \ port_specified if port_specified != '' else \ 443 if scheme == 'https' else \ 80 address = (ip_address, port) await loop.sock_connect(sock, address) def tls_wrapped(sock, host): return ssl_context.wrap_socket(sock, server_hostname=host, do_handshake_on_connect=False) async def recv_header(sock): unprocessed = b'' while True: unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize) try: header_end = unprocessed.index(b'\r\n\r\n') except ValueError: if len(unprocessed) >= max_header_length: raise HttpHeaderTooLong() continue else: break header_bytes, unprocessed = unprocessed[:header_end], unprocessed[header_end + 4:] lines = header_bytes.split(b'\r\n') code = lines[0][9:12] version = lines[0][5:8] response_headers = tuple( (key.strip().lower(), value.strip()) for line in lines[1:] for (key, _, value) in (line.partition(b':'),) ) return code, version, response_headers, unprocessed async def response_body_generator( logger, sock, unprocessed, key, connection, body_length, body_handler): try: generator = body_handler(logger, sock, body_length, unprocessed) unprocessed = None # So can be garbage collected logger.debug('Receiving body') num_bytes = 0 async for chunk in generator: yield chunk num_bytes += len(chunk) logger.debug('Received transfer-decoded body bytes: %s', num_bytes) except BaseException: sock.close() raise else: if connection == b'keep-alive': logger.debug('Keeping connection alive: %s', sock) add_to_pool(key, sock) else: logger.debug('Closing connection: %s', sock) sock.close() def connection_length_body_handler(logger, method, version, response_headers): headers_dict = dict(response_headers) transfer_encoding = headers_dict.get(b'transfer-encoding', b'identity') logger.debug('Effective transfer-encoding: %s', transfer_encoding) connection = \ b'close' if keep_alive_timeout == 0 else \ headers_dict.get(b'connection', b'keep-alive').lower() if version == b'1.1' else \ headers_dict.get(b'connection', b'close').lower() logger.debug('Effective connection: %s', connection) body_length = \ 0 if method == b'HEAD' else \ 0 if connection == b'keep-alive' and b'content-length' not in headers_dict else \ None if b'content-length' not in headers_dict else \ int(headers_dict[b'content-length']) uses_identity = (method == b'HEAD' or transfer_encoding == b'identity') body_handler = \ identity_handler_known_body_length if uses_identity and body_length is not None else \ identity_handler_unknown_body_length if uses_identity else \ chunked_handler return connection, body_length, body_handler async def identity_handler_known_body_length(logger, sock, body_length, unprocessed): logger.debug('Expected incoming body bytes: %s', body_length) total_remaining = body_length if unprocessed and total_remaining: total_remaining -= len(unprocessed) yield unprocessed while total_remaining: unprocessed = None # So can be garbage collected unprocessed = await recv(loop, sock, socket_timeout, min(recv_bufsize, total_remaining)) total_remaining -= len(unprocessed) yield unprocessed async def identity_handler_unknown_body_length(logger, sock, _, unprocessed): logger.debug('Unknown incoming body length') if unprocessed: yield unprocessed unprocessed = None # So can be garbage collected try: while True: yield await recv(loop, sock, socket_timeout, recv_bufsize) except HttpConnectionClosedError: pass async def chunked_handler(_, sock, __, unprocessed): while True: # Fetch until have chunk header while b'\r\n' not in unprocessed: if len(unprocessed) >= max_header_length: raise HttpHeaderTooLong() unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize) # Find chunk length chunk_header_end = unprocessed.index(b'\r\n') chunk_header_hex = unprocessed[:chunk_header_end] chunk_length = int(chunk_header_hex, 16) # End of body signalled by a 0-length chunk if chunk_length == 0: while b'\r\n\r\n' not in unprocessed: if len(unprocessed) >= max_header_length: raise HttpHeaderTooLong() unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize) break # Remove chunk header unprocessed = unprocessed[chunk_header_end + 2:] # Yield whatever amount of chunk we have already, which # might be nothing chunk_remaining = chunk_length in_chunk, unprocessed = \ unprocessed[:chunk_remaining], unprocessed[chunk_remaining:] if in_chunk: yield in_chunk chunk_remaining -= len(in_chunk) # Fetch and yield rest of chunk while chunk_remaining: unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize) in_chunk, unprocessed = \ unprocessed[:chunk_remaining], unprocessed[chunk_remaining:] chunk_remaining -= len(in_chunk) yield in_chunk # Fetch until have chunk footer, and remove while len(unprocessed) < 2: unprocessed += await recv(loop, sock, socket_timeout, recv_bufsize) unprocessed = unprocessed[2:] async def close( get_logger_adapter=get_logger_adapter, get_resolver_logger_adapter=get_resolver_logger_adapter, ): logger_extra = {} logger = get_logger_adapter(logger_extra) logger.debug('Closing pool') await dns_resolver_clear_cache( get_logger_adapter=get_resolver_logger_adapter(logger_extra), ) for key, socks in pool.items(): for sock, close_callback in socks.items(): logger.debug('Closing: %s,%s', key, sock) close_callback.cancel() sock.close() pool.clear() return request, close async def send_all(loop, sock, socket_timeout, data): try: latest_num_bytes = sock.send(data) except (BlockingIOError, ssl.SSLWantWriteError): latest_num_bytes = 0 else: if latest_num_bytes == 0: raise HttpConnectionClosedError() if latest_num_bytes == len(data): return total_num_bytes = latest_num_bytes def writer(): nonlocal total_num_bytes try: latest_num_bytes = sock.send(data_memoryview[total_num_bytes:]) except (BlockingIOError, ssl.SSLWantWriteError): pass except Exception as exception: loop.remove_writer(fileno) if not result.done(): result.set_exception(exception) else: total_num_bytes += latest_num_bytes if latest_num_bytes == 0 and not result.done(): loop.remove_writer(fileno) result.set_exception(HttpConnectionClosedError()) elif total_num_bytes == len(data) and not result.done(): loop.remove_writer(fileno) result.set_result(None) else: reset_timeout() result = asyncio.Future() fileno = sock.fileno() loop.add_writer(fileno, writer) data_memoryview = memoryview(data) try: with timeout(loop, socket_timeout) as reset_timeout: return await result finally: loop.remove_writer(fileno) async def recv(loop, sock, socket_timeout, recv_bufsize): incoming = await _recv(loop, sock, socket_timeout, recv_bufsize) if not incoming: raise HttpConnectionClosedError() return incoming async def _recv(loop, sock, socket_timeout, recv_bufsize): try: return sock.recv(recv_bufsize) except (BlockingIOError, ssl.SSLWantReadError): pass def reader(): try: chunk = sock.recv(recv_bufsize) except (BlockingIOError, ssl.SSLWantReadError): pass except Exception as exception: loop.remove_reader(fileno) if not result.done(): result.set_exception(exception) else: loop.remove_reader(fileno) if not result.done(): result.set_result(chunk) result = asyncio.Future() fileno = sock.fileno() loop.add_reader(fileno, reader) try: with timeout(loop, socket_timeout): return await result finally: loop.remove_reader(fileno) async def tls_complete_handshake(loop, ssl_sock, socket_timeout): try: return ssl_sock.do_handshake() except (ssl.SSLWantReadError, ssl.SSLWantWriteError): pass def handshake(): try: ssl_sock.do_handshake() except (ssl.SSLWantReadError, ssl.SSLWantWriteError): reset_timeout() except Exception as exception: loop.remove_reader(fileno) loop.remove_writer(fileno) if not done.done(): done.set_exception(exception) else: loop.remove_reader(fileno) loop.remove_writer(fileno) if not done.done(): done.set_result(None) done = asyncio.Future() fileno = ssl_sock.fileno() loop.add_reader(fileno, handshake) loop.add_writer(fileno, handshake) try: with timeout(loop, socket_timeout) as reset_timeout: return await done finally: loop.remove_reader(fileno) loop.remove_writer(fileno) @contextlib.contextmanager def timeout(loop, max_time): cancelling_due_to_timeout = False current_task = get_current_task() def cancel(): nonlocal cancelling_due_to_timeout cancelling_due_to_timeout = True current_task.cancel() def reset(): nonlocal handle handle.cancel() handle = loop.call_later(max_time, cancel) handle = loop.call_later(max_time, cancel) try: yield reset except asyncio.CancelledError: if cancelling_due_to_timeout: raise asyncio.TimeoutError() raise finally: handle.cancel()
StarcoderdataPython
1738392
#!/usr/bin/env python2 """ builtin_process.py - Builtins that deal with processes or modify process state. This is sort of the opposite of builtin_pure.py. """ from __future__ import print_function import signal # for calculating numbers from _devbuild.gen import arg_types from _devbuild.gen.runtime_asdl import ( cmd_value, cmd_value__Argv, wait_status_e, wait_status__Proc, wait_status__Pipeline, wait_status__Cancelled, ) from _devbuild.gen.syntax_asdl import source from asdl import runtime from core import alloc from core import dev from core import error from core.pyerror import e_usage from core import main_loop from core.pyutil import stderr_line from core import vm from core.pyerror import log from frontend import args from frontend import flag_spec from frontend import reader from frontend import signal_def from frontend import typed_args from mycpp import mylib from mycpp.mylib import iteritems, tagswitch import posix_ as posix from typing import List, Dict, Optional, Any, cast, TYPE_CHECKING if TYPE_CHECKING: from _devbuild.gen.syntax_asdl import command_t from core.process import ExternalProgram, FdState, JobState, Waiter from core.pyos import SignalState from core.state import Mem, SearchPath from core.ui import ErrorFormatter from frontend.parse_lib import ParseContext if mylib.PYTHON: EXEC_SPEC = flag_spec.FlagSpec('exec') class Exec(vm._Builtin): def __init__(self, mem, ext_prog, fd_state, search_path, errfmt): # type: (Mem, ExternalProgram, FdState, SearchPath, ErrorFormatter) -> None self.mem = mem self.ext_prog = ext_prog self.fd_state = fd_state self.search_path = search_path self.errfmt = errfmt def Run(self, cmd_val): # type: (cmd_value__Argv) -> int arg_r = args.Reader(cmd_val.argv, spids=cmd_val.arg_spids) arg_r.Next() # skip 'exec' _ = args.Parse(EXEC_SPEC, arg_r) # no flags now, but accepts -- # Apply redirects in this shell. # NOTE: Redirects were processed earlier. if arg_r.AtEnd(): self.fd_state.MakePermanent() return 0 environ = self.mem.GetExported() i = arg_r.i cmd = cmd_val.argv[i] argv0_path = self.search_path.CachedLookup(cmd) if argv0_path is None: self.errfmt.Print_('exec: %r not found' % cmd, span_id=cmd_val.arg_spids[1]) raise SystemExit(127) # exec builtin never returns # shift off 'exec' c2 = cmd_value.Argv(cmd_val.argv[i:], cmd_val.arg_spids[i:], cmd_val.typed_args) self.ext_prog.Exec(argv0_path, c2, environ) # NEVER RETURNS assert False, "This line should never be reached" # makes mypy happy class Wait(vm._Builtin): """ wait: wait [-n] [id ...] Wait for job completion and return exit status. Waits for each process identified by an ID, which may be a process ID or a job specification, and reports its termination status. If ID is not given, waits for all currently active child processes, and the return status is zero. If ID is a a job specification, waits for all processes in that job's pipeline. If the -n option is supplied, waits for the next job to terminate and returns its exit status. Exit Status: Returns the status of the last ID; fails if ID is invalid or an invalid option is given. """ def __init__(self, waiter, job_state, mem, tracer, errfmt): # type: (Waiter, JobState, Mem, dev.Tracer, ErrorFormatter) -> None self.waiter = waiter self.job_state = job_state self.mem = mem self.tracer = tracer self.errfmt = errfmt def Run(self, cmd_val): # type: (cmd_value__Argv) -> int with dev.ctx_Tracer(self.tracer, 'wait', cmd_val.argv): return self._Run(cmd_val) def _Run(self, cmd_val): # type: (cmd_value__Argv) -> int attrs, arg_r = flag_spec.ParseCmdVal('wait', cmd_val) arg = arg_types.wait(attrs.attrs) job_ids, arg_spids = arg_r.Rest2() if arg.n: #log('*** wait -n') # wait -n returns the exit status of the JOB. # You don't know WHICH process, which is odd. # TODO: this should wait for the next JOB, which may be multiple # processes. # Bash has a wait_for_any_job() function, which loops until the jobs # table changes. # # target_count = self.job_state.NumRunning() - 1 # while True: # if not self.waiter.WaitForOne(): # break # # if self.job_state.NumRunning == target_count: # break # #log('wait next') result = self.waiter.WaitForOne(False) if result == 0: # OK return self.waiter.last_status elif result == -1: # nothing to wait for return 127 else: return result # signal if len(job_ids) == 0: #log('*** wait') i = 0 while True: # BUG: If there is a STOPPED process, this will hang forever, because # we don't get ECHILD. # Not sure it matters since you can now Ctrl-C it. result = self.waiter.WaitForOne(False) if result != 0: break # nothing to wait for, or interrupted i += 1 if self.job_state.NoneAreRunning(): break return 0 if result == -1 else result # Get list of jobs. Then we need to check if they are ALL stopped. # Returns the exit code of the last one on the COMMAND LINE, not the exit # code of last one to FINISH. status = 1 # error for i, job_id in enumerate(job_ids): span_id = arg_spids[i] # The % syntax is sort of like ! history sub syntax, with various queries. # https://stackoverflow.com/questions/35026395/bash-what-is-a-jobspec if job_id.startswith('%'): raise error.Usage( "doesn't support bash-style jobspecs (got %r)" % job_id, span_id=span_id) # Does it look like a PID? try: pid = int(job_id) except ValueError: raise error.Usage('expected PID or jobspec, got %r' % job_id, span_id=span_id) job = self.job_state.JobFromPid(pid) if job is None: self.errfmt.Print_("%s isn't a child of this shell" % pid, span_id=span_id) return 127 wait_status = job.JobWait(self.waiter) UP_wait_status = wait_status with tagswitch(wait_status) as case: if case(wait_status_e.Proc): wait_status = cast(wait_status__Proc, UP_wait_status) status = wait_status.code elif case(wait_status_e.Pipeline): wait_status = cast(wait_status__Pipeline, UP_wait_status) # TODO: handle PIPESTATUS? Is this right? status = wait_status.codes[-1] elif case(wait_status_e.Cancelled): wait_status = cast(wait_status__Cancelled, UP_wait_status) status = wait_status.code else: raise AssertionError() return status class Jobs(vm._Builtin): """List jobs.""" def __init__(self, job_state): # type: (JobState) -> None self.job_state = job_state def Run(self, cmd_val): # type: (cmd_value__Argv) -> int attrs, arg_r = flag_spec.ParseCmdVal('jobs', cmd_val) arg = arg_types.jobs(attrs.attrs) # note: we always use 'jobs -l' format, so -l is a no-op self.job_state.DisplayJobs() if arg.debug: self.job_state.DebugPrint() return 0 class Fg(vm._Builtin): """Put a job in the foreground""" def __init__(self, job_state, waiter): # type: (JobState, Waiter) -> None self.job_state = job_state self.waiter = waiter def Run(self, cmd_val): # type: (cmd_value__Argv) -> int # Note: 'fg' currently works with processes, but not pipelines. See issue # #360. Part of it is that we should use posix.killpg(). pid = self.job_state.GetLastStopped() if pid == -1: log('No job to put in the foreground') return 1 # TODO: Print job ID rather than the PID log('Continue PID %d', pid) posix.kill(pid, signal.SIGCONT) return self.job_state.WhenContinued(pid, self.waiter) class Bg(vm._Builtin): """Put a job in the background""" def __init__(self, job_state): # type: (JobState) -> None self.job_state = job_state def Run(self, cmd_val): # type: (cmd_value__Argv) -> int # How does this differ from 'fg'? It doesn't wait and it sets controlling # terminal? raise error.Usage("isn't implemented") class _TrapHandler(object): """A function that is called by Python's signal module. Similar to process.SubProgramThunk.""" def __init__(self, node, nodes_to_run, sig_state, tracer): # type: (command_t, List[command_t], SignalState, dev.Tracer) -> None self.node = node self.nodes_to_run = nodes_to_run self.sig_state = sig_state self.tracer = tracer def __call__(self, sig_num, unused_frame): # type: (int, Any) -> None """For Python's signal module.""" self.tracer.PrintMessage( 'Received signal %d. Will run handler in main loop' % sig_num) self.sig_state.last_sig_num = sig_num # for interrupted 'wait' self.nodes_to_run.append(self.node) def __str__(self): # type: () -> str # Used by trap -p # TODO: Abbreviate with fmt.PrettyPrint? return '<Trap %s>' % self.node def _GetSignalNumber(sig_spec): # type: (str) -> int # POSIX lists the numbers that are required. # http://pubs.opengroup.org/onlinepubs/9699919799/ # # Added 13 for SIGPIPE because autoconf's 'configure' uses it! if sig_spec.strip() in ('1', '2', '3', '6', '9', '13', '14', '15'): return int(sig_spec) # INT is an alias for SIGINT if sig_spec.startswith('SIG'): sig_spec = sig_spec[3:] return signal_def.GetNumber(sig_spec) _HOOK_NAMES = ['EXIT', 'ERR', 'RETURN', 'DEBUG'] # TODO: # # bash's default -p looks like this: # trap -- '' SIGTSTP # trap -- '' SIGTTIN # trap -- '' SIGTTOU # # CPython registers different default handlers. The C++ rewrite should make # OVM match sh/bash more closely. class Trap(vm._Builtin): def __init__(self, sig_state, traps, nodes_to_run, parse_ctx, tracer, errfmt): # type: (SignalState, Dict[str, _TrapHandler], List[command_t], ParseContext, dev.Tracer, ErrorFormatter) -> None self.sig_state = sig_state self.traps = traps self.nodes_to_run = nodes_to_run self.parse_ctx = parse_ctx self.arena = parse_ctx.arena self.tracer = tracer self.errfmt = errfmt def _ParseTrapCode(self, code_str): # type: (str) -> command_t """ Returns: A node, or None if the code is invalid. """ line_reader = reader.StringLineReader(code_str, self.arena) c_parser = self.parse_ctx.MakeOshParser(line_reader) # TODO: the SPID should be passed through argv. src = source.ArgvWord('trap', runtime.NO_SPID) with alloc.ctx_Location(self.arena, src): try: node = main_loop.ParseWholeFile(c_parser) except error.Parse as e: self.errfmt.PrettyPrintError(e) return None return node def Run(self, cmd_val): # type: (cmd_value__Argv) -> int attrs, arg_r = flag_spec.ParseCmdVal('trap', cmd_val) arg = arg_types.trap(attrs.attrs) if arg.p: # Print registered handlers for name, value in iteritems(self.traps): # The unit tests rely on this being one line. # bash prints a line that can be re-parsed. print('%s %s' % (name, value.__class__.__name__)) return 0 if arg.l: # List valid signals and hooks for name in _HOOK_NAMES: print(' %s' % name) for name, int_val in signal_def.AllNames(): print('%2d %s' % (int_val, name)) return 0 code_str = arg_r.ReadRequired('requires a code string') sig_spec, sig_spid = arg_r.ReadRequired2('requires a signal or hook name') # sig_key is NORMALIZED sig_spec: a signal number string or string hook # name. sig_key = None # type: Optional[str] sig_num = None if sig_spec in _HOOK_NAMES: sig_key = sig_spec elif sig_spec == '0': # Special case sig_key = 'EXIT' else: sig_num = _GetSignalNumber(sig_spec) if sig_num is not None: sig_key = str(sig_num) if sig_key is None: self.errfmt.Print_("Invalid signal or hook %r" % sig_spec, span_id=cmd_val.arg_spids[2]) return 1 # NOTE: sig_spec isn't validated when removing handlers. if code_str == '-': if sig_key in _HOOK_NAMES: try: del self.traps[sig_key] except KeyError: pass return 0 if sig_num is not None: try: del self.traps[sig_key] except KeyError: pass self.sig_state.RemoveUserTrap(sig_num) return 0 raise AssertionError('Signal or trap') # Try parsing the code first. # TODO: If simple_trap is on (for oil:basic), then it must be a function # name? And then you wrap it in 'try'? node = self._ParseTrapCode(code_str) if node is None: return 1 # ParseTrapCode() prints an error for us. # Register a hook. if sig_key in _HOOK_NAMES: if sig_key in ('ERR', 'RETURN', 'DEBUG'): stderr_line("osh warning: The %r hook isn't implemented", sig_spec) self.traps[sig_key] = _TrapHandler(node, self.nodes_to_run, self.sig_state, self.tracer) return 0 # Register a signal. if sig_num is not None: handler = _TrapHandler(node, self.nodes_to_run, self.sig_state, self.tracer) # For signal handlers, the traps dictionary is used only for debugging. self.traps[sig_key] = handler if sig_num in (signal.SIGKILL, signal.SIGSTOP): self.errfmt.Print_("Signal %r can't be handled" % sig_spec, span_id=sig_spid) # Other shells return 0, but this seems like an obvious error return 1 self.sig_state.AddUserTrap(sig_num, handler) return 0 raise AssertionError('Signal or trap') # Example: # trap -- 'echo "hi there" | wc ' SIGINT # # Then hit Ctrl-C. class Umask(vm._Builtin): def __init__(self): # type: () -> None """Dummy constructor for mycpp.""" pass def Run(self, cmd_val): # type: (cmd_value__Argv) -> int argv = cmd_val.argv[1:] if len(argv) == 0: # umask() has a dumb API: you can't get it without modifying it first! # NOTE: dash disables interrupts around the two umask() calls, but that # shouldn't be a concern for us. Signal handlers won't call umask(). mask = posix.umask(0) posix.umask(mask) # print('0%03o' % mask) # octal format return 0 if len(argv) == 1: a = argv[0] try: new_mask = int(a, 8) except ValueError: # NOTE: This happens if we have '8' or '9' in the input too. stderr_line("osh warning: umask with symbolic input isn't implemented") return 1 else: posix.umask(new_mask) return 0 e_usage('umask: unexpected arguments') class Fork(vm._Builtin): def __init__(self, shell_ex): # type: (vm._Executor) -> None self.shell_ex = shell_ex def Run(self, cmd_val): # type: (cmd_value__Argv) -> int attrs, arg_r = flag_spec.ParseCmdVal('fork', cmd_val) arg, span_id = arg_r.Peek2() if arg is not None: e_usage('got unexpected argument %r' % arg, span_id=span_id) block = typed_args.GetOneBlock(cmd_val.typed_args) if block is None: e_usage('expected a block') return self.shell_ex.RunBackgroundJob(block) class ForkWait(vm._Builtin): def __init__(self, shell_ex): # type: (vm._Executor) -> None self.shell_ex = shell_ex def Run(self, cmd_val): # type: (cmd_value__Argv) -> int attrs, arg_r = flag_spec.ParseCmdVal('forkwait', cmd_val) arg, span_id = arg_r.Peek2() if arg is not None: e_usage('got unexpected argument %r' % arg, span_id=span_id) block = typed_args.GetOneBlock(cmd_val.typed_args) if block is None: e_usage('expected a block') return self.shell_ex.RunSubshell(block)
StarcoderdataPython
162937
# Generated by Django 2.2.13 on 2020-10-09 17:57 import django.db.models.deletion import django.utils.timezone import model_utils.fields from django.db import migrations, models import waldur_core.core.fields import waldur_core.core.models import waldur_core.core.shims import waldur_core.core.validators import waldur_core.structure.models class Migration(migrations.Migration): dependencies = [ ('taggit', '0003_taggeditem_add_unique_index'), ('openstack', '0005_error_traceback'), ] operations = [ migrations.CreateModel( name='Router', fields=[ ( 'id', models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name='ID', ), ), ( 'created', model_utils.fields.AutoCreatedField( default=django.utils.timezone.now, editable=False, verbose_name='created', ), ), ( 'modified', model_utils.fields.AutoLastModifiedField( default=django.utils.timezone.now, editable=False, verbose_name='modified', ), ), ( 'description', models.CharField( blank=True, max_length=2000, verbose_name='description' ), ), ( 'name', models.CharField( max_length=150, validators=[waldur_core.core.validators.validate_name], verbose_name='name', ), ), ('uuid', waldur_core.core.fields.UUIDField()), ('backend_id', models.CharField(blank=True, max_length=255)), ( 'service_project_link', models.ForeignKey( on_delete=django.db.models.deletion.PROTECT, related_name='routers', to='openstack.OpenStackServiceProjectLink', ), ), ( 'tags', waldur_core.core.shims.TaggableManager( blank=True, help_text='A comma-separated list of tags.', related_name='router_router_openstack', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags', ), ), ( 'tenant', models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name='routers', to='openstack.Tenant', ), ), ], options={'abstract': False,}, bases=( waldur_core.core.models.DescendantMixin, waldur_core.core.models.BackendModelMixin, waldur_core.structure.models.StructureLoggableMixin, models.Model, ), ), ]
StarcoderdataPython
160587
<gh_stars>0 from . import immix CACHEABLE = True methods = ("imagesforperson") def run(args, method): if "q" not in args: raise Exception("No query given") q = args.get("q") if method == "imagesforperson": return immix.imagesforperson(q)
StarcoderdataPython
3367183
# -*- coding: utf-8 -*- # # Copyright (c) 2019~2999 - Cologler <<EMAIL>> # ---------- # # ---------- from typing import Tuple import click from click.testing import CliRunner from click_anno import command, click_app, attrs def test_basic_arguments(): @command def touch(filename): click.echo(filename) result = CliRunner().invoke(touch, ['foo.txt']) assert result.exit_code == 0 assert result.output == 'foo.txt\n' def test_variadic_arguments(): @command def copy(src: tuple, dst): for fn in src: click.echo('move %s to folder %s' % (fn, dst)) result = CliRunner().invoke(copy, ['foo.txt', 'bar.txt', 'my_folder']) assert result.exit_code == 0 assert result.output == 'move foo.txt to folder my_folder\nmove bar.txt to folder my_folder\n' def test_basic_value_options(): @command def dots(n=1): click.echo('.' * n) result = CliRunner().invoke(dots, []) assert result.exit_code == 0 assert result.output == '.\n' result_5 = CliRunner().invoke(dots, ['--n', '5']) assert result_5.exit_code == 0 assert result_5.output == '.....\n' def test_required_value_options(): @command def dots(*, n: int): click.echo('.' * n) result = CliRunner().invoke(dots, ['--n=2']) assert result.exit_code == 0 assert result.output == '..\n' def test_multi_value_options(): @command def findme(*, pos: Tuple[float, float]): click.echo('%s / %s' % pos) result = CliRunner().invoke(findme, ['--pos', '2.0', '3.0']) assert result.exit_code == 0 assert result.output == '2.0 / 3.0\n' def test_tuples_as_multi_value_options(): @command def putitem(*, item: (str, int)): click.echo('name=%s id=%d' % item) result = CliRunner().invoke(putitem, ['--item', 'peter', '1338']) assert result.exit_code == 0 assert result.output == 'name=peter id=1338\n' @command def putitem(*, item: Tuple[str, int]): click.echo('name=%s id=%d' % item) result = CliRunner().invoke(putitem, ['--item', 'peter', '1338']) assert result.exit_code == 0 assert result.output == 'name=peter id=1338\n' def test_boolean_flags(): import click from click_anno import command from click_anno.types import flag @command def func(shout: flag): click.echo(f'{shout!r}') result = CliRunner().invoke(func, ['--shout']) assert result.exit_code == 0 assert result.output == 'True\n' def test_inject_context(): @command def sync(a, ctx: click.Context, b): # `ctx` can be any location assert isinstance(ctx, click.Context) click.echo(f'{a}, {b}') result = CliRunner().invoke(sync, ['1', '2']) assert result.exit_code == 0 assert result.output == '1, 2\n' def test_group(): @click_app class App: def __init__(self): click.echo('Running') def sync(self): click.echo('Syncing') result = CliRunner().invoke(App, ['sync']) assert result.exit_code == 0 assert result.output == 'Running\nSyncing\n' def test_group_invocation_without_command(): @click_app @attrs(invoke_without_command=True) class App: def __init__(self, ctx: click.Context): if ctx.invoked_subcommand is None: click.echo('I was invoked without subcommand') else: click.echo('I am about to invoke %s' % ctx.invoked_subcommand) def sync(self): click.echo('The subcommand') result = CliRunner().invoke(App, []) assert result.exit_code == 0 assert result.output == 'I was invoked without subcommand\n' result = CliRunner().invoke(App, ['sync']) assert result.exit_code == 0 assert result.output == 'I am about to invoke sync\nThe subcommand\n' def test_alias(): @click_app class App: def sync(self): click.echo('Syncing') alias = sync result = CliRunner().invoke(App, ['sync']) assert result.exit_code == 0 assert result.output == 'Syncing\n' result = CliRunner().invoke(App, ['alias']) assert result.exit_code == 0 assert result.output == 'Syncing\n' def test_enum(): import click from click_anno import command from enum import Enum, auto class HashTypes(Enum): md5 = auto() sha1 = auto() @command def digest(hash_type: HashTypes): assert isinstance(hash_type, HashTypes) click.echo(hash_type) result = CliRunner().invoke(digest, ['md5']) assert result.exit_code == 0 assert result.output == 'HashTypes.md5\n'
StarcoderdataPython
3264657
<filename>modules/Eventlog/lv1_os_win_event_logs_shared_folder.py # -*- coding: utf-8 -*- from __future__ import unicode_literals import os, sys, re from datetime import datetime from utility import database class Shared_Folder_Information: par_id = '' case_id = '' evd_id = '' task = '' time = '' user_sid = '' event_id = '' source = '' event_id_description = '' def EVENTLOGSHAREDFOLDER(configuration): #db = database.Database() #db.open() shared_folder_list = [] shared_folder_count = 0 query = f"SELECT data, event_id, time_created, source, user_sid FROM lv1_os_win_evt_total WHERE (evd_id='{configuration.evidence_id}') and (event_id like '4656' and source like '%Security.evtx%') or (event_id like '4663' and source like '%Security.evtx%') or (event_id like '5140' and source like '%Security.evtx%') or (event_id like '30804' and source like '%SMBClient%') or (event_id like '30805' and source like '%SMBClient%') or (event_id like '30806' and source like '%SMBClient%') or (event_id like '30807' and source like '%SMBClient%') or (event_id like '30808' and source like '%SMBClient%')" #result_query = db.execute_query_mul(query) result_query = configuration.cursor.execute_query_mul(query) for result_data in result_query: shared_folder_information = Shared_Folder_Information() try: shared_folder_list.append(shared_folder_information) shared_folder_list[shared_folder_count].event_id = result_data[1] shared_folder_list[shared_folder_count].time = result_data[2] shared_folder_list[shared_folder_count].source = result_data[3] shared_folder_list[shared_folder_count].user_sid = result_data[4] shared_folder_list[shared_folder_count].task = 'File Shared' if result_data[1] == '4656': shared_folder_list[shared_folder_count].event_id_description = 'Request a handle to an object' elif result_data[1] == '4663': shared_folder_list[ shared_folder_count].event_id_description = 'Attempt to access an object' elif result_data[1] == '5140': shared_folder_list[ shared_folder_count].event_id_description = 'A network share object was accessed' elif result_data[1] == '30804': shared_folder_list[ shared_folder_count].event_id_description = 'A network connection was disconnected' elif result_data[1] == '30805': shared_folder_list[ shared_folder_count].event_id_description = 'The client lost its session to the server' elif result_data[1] == '30806': shared_folder_list[ shared_folder_count].event_id_description = 'The client re-established its session to the server' elif result_data[1] == '30807': shared_folder_list[ shared_folder_count].event_id_description = 'The connection to the share was lost' elif result_data[1] == '30808': shared_folder_list[ shared_folder_count].event_id_description = 'The connection to the share was re-established' shared_folder_count = shared_folder_count + 1 except: print("EVENT LOG SHARED FOLDER ERROR") #db.close() return shared_folder_list
StarcoderdataPython
105971
<gh_stars>0 from dataclasses import dataclass from typing import Optional from torch.utils.data import DataLoader @dataclass class DataLoaderCollection: train_loader: DataLoader test_loader: DataLoader valid_loader: Optional[DataLoader] = None
StarcoderdataPython
4801934
''' corpusからwordデータを抜き出して、model作成するための前処理 ''' import re from collections import Counter import MeCab from tqdm import tqdm from modules.models.DbModel import DbModel from modules.corpus_processing.WikiProcessing import WikiProcessing from config.config import CONFIG class WordProcessing(): ''' 文章から単語の出現頻度を求めたり、単語をindexにしたりするclass ''' def __init__(self, corpus, docs_files): self.corpus = corpus self.docs_files = docs_files self.file_num = len(docs_files) # 日本語の表層系でなく、基本形を使用するためにOchasenを使用 self.tagger = MeCab.Tagger('-Ochasen') def extract_words_fequency(self): words_frequency = Counter({}) unnecessary_words = [] pbar = tqdm(total=self.file_num) pbar.set_description('extract words fequency') for docs_file in self.docs_files: pbar.update(1) with open(docs_file) as doc: paragraph_sentence_list =\ self.corpus.separate_paragraph_array(doc) words_list = [] for paragraph_sentence in paragraph_sentence_list: node = self.tagger.parseToNode(paragraph_sentence) paragraph_words = [] while node: word = node.feature.split(",")[6] # 日本語の表現だけを抽出 if re.search(r'[ぁ-んァ-ヶ一-龥]+', word): paragraph_words.append(word) else: if word not in unnecessary_words: unnecessary_words.append(word) node = node.next words_list.extend(paragraph_words) words_frequency += Counter(words_list) pbar.close() return words_frequency def _create_word2idx_dict(self, words_info): word_stoi = {} for word_info in words_info: word_stoi[word_info[1]] = word_info[0] return word_stoi def transfer_sentence_word2idx(self, words_info, db_model): word_stoi = self._create_word2idx_dict(words_info) pbar = tqdm(total=self.file_num) pbar.set_description('transfer sentence word2idx') for wiki_file in self.docs_files: pbar.update(1) with open(wiki_file) as doc: inserted_info = [] paragraph_sentence_list =\ self.corpus.separate_paragraph_array(doc) for paragraph_sentence in paragraph_sentence_list: node = self.tagger.parseToNode(paragraph_sentence) paragraph_words = '' # paragraph_words = [] while node: word = node.feature.split(",")[6] # 日本語の表現だけを抽出 if re.search(r'[ぁ-んァ-ヶ一-龥]+', word): paragraph_words += str(word_stoi[word]) + ', ' node = node.next # if len(paragraph_words) > 0: if paragraph_words: paragraph_words = paragraph_words[:-2] inserted_info.append(( wiki_file, paragraph_sentence, paragraph_words )) # 文章情報のDB登録 db_model.insert_records_sentences_table(inserted_info) pbar.close() def preprocess(): wiki = WikiProcessing() wiki_files = wiki.get_files() wiki_word_processing = WordProcessing(wiki, wiki_files) wiki_words_frequency = wiki_word_processing.extract_words_fequency() # 単語情報のDB登録 db_model = DbModel(CONFIG['db_file_name'], True) db_model.insert_records_words_table(wiki_words_frequency) words_info = db_model.select_all_records_words_table() wiki_word_processing.transfer_sentence_word2idx(words_info, db_model) db_model.close_connection()
StarcoderdataPython
4842171
<filename>src/graph/visu_sol.py from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import numpy as np from itertools import product, combinations fig = plt.figure() ax = None def create_subfig(row, col, place): global ax ax = fig.add_subplot(row, col, place, projection="3d") def wiresphere_spere( resolution_theta: int = 20j, resolution_phi: int = 10j, color="black" ) -> None: # draw sphere u, v = np.mgrid[0 : 2 * np.pi : resolution_theta, 0 : np.pi : resolution_phi] x = np.cos(u) * np.sin(v) y = np.sin(u) * np.sin(v) z = np.cos(v) ax.plot_wireframe(x, y, z, color=color) def surface_spere( r=1, alpha=1, resolution_theta=20j, resolution_phi=10j, color="black" ) -> None: # draw sphere u, v = np.mgrid[0 : 2 * np.pi : resolution_theta, 0 : np.pi : resolution_phi] x = r * np.cos(u) * np.sin(v) y = r * np.sin(u) * np.sin(v) z = r * np.cos(v) ax.plot_surface(x, y, z, color=color, alpha=alpha) def add_point(x, y, z, color="g", size=50,alpha=1): ax.scatter(x, y, z, color=color, s=size,alpha=alpha) def kugelkappe(x, y, z, r, resolution=20j, color="g"): M = np.array([x, y, z]) if y == 0 and z == 0: e1 = np.array([0, 1, 0]) e2 = np.array([0, 0, 1]) else: e1 = np.array([0, z, -y]) A = np.array([M, e1, [1, 0, 0]]) B = np.array([0, 0, 1]) e2 = np.linalg.solve(A, B) M = M * np.cos(r) e1 = e1 / (np.sum(e1 ** 2) ** 0.5) e2 = e2 / (np.sum(e2 ** 2) ** 0.5) a = np.sin(r) phi_arr = np.mgrid[0 : 2 * np.pi : resolution] px = M[0] + a * np.cos(phi_arr) * e1[0] + a * np.sin(phi_arr) * e2[0] py = M[1] + a * np.cos(phi_arr) * e1[1] + a * np.sin(phi_arr) * e2[1] pz = M[2] + a * np.cos(phi_arr) * e1[2] + a * np.sin(phi_arr) * e2[2] ax.scatter(px, py, pz, color=color, s=10) def plot_show(): plt.show()
StarcoderdataPython
1713660
"""Module that defines the message class.""" class Message(object): """Defines the message to be added in the response.""" def __init__(self, message: str) -> None: """ Instances the Message Class. Args: message: Aditional information of the response. """ self.msg = message def serialize(self) -> dict: """ Serialize the Message object into a dictionary. Returns: A dictionary with the msg of the Message object. """ return { 'msg': self.msg, }
StarcoderdataPython
3312458
from ._circuit_conversions import export_to_qiskit, import_from_qiskit from ._openfermion_conversions import qiskitpauli_to_qubitop, qubitop_to_qiskitpauli
StarcoderdataPython
3359624
<reponame>itsyaboyrocket/ttleveleditor<filename>leveleditor/ObjectMgr.py """ Defines ObjectMgr """ from ObjectMgrBase import * class ObjectMgr(ObjectMgrBase): """ ObjectMgr will create, manage, update objects in the scene """ def __init__(self, editor): ObjectMgrBase.__init__(self, editor)
StarcoderdataPython
3235790
<filename>tests/test_connection_spid.py import ctds from .base import TestExternalDatabase from .compat import long_ class TestConnectionSpid(TestExternalDatabase): '''Unit tests related to the Connection.spid attribute. ''' def test___doc__(self): self.assertEqual( ctds.Connection.spid.__doc__, '''\ The SQL Server Session Process ID (SPID) for the connection or :py:data:`None` if the connection is closed. :rtype: int ''' ) def test_read(self): with self.connect() as connection: self.assertTrue(isinstance(connection.spid, long_)) with connection.cursor() as cursor: cursor.execute('SELECT @@SPID;') spid = cursor.fetchone()[0] if self.freetds_version >= (1, 0, 0): # pragma: nocover self.assertEqual(connection.spid, spid) self.assertEqual(connection.spid, None) def test_write(self): with self.connect() as connection: try: connection.spid = 9 except AttributeError: pass else: self.fail('.spid did not fail as expected') # pragma: nocover try: connection.spid = None except AttributeError: pass else: self.fail('.spid did not fail as expected') # pragma: nocover
StarcoderdataPython
1654803
import cv2 import numpy def merge_rectangle_contours(rectangle_contours): merged_contours = [rectangle_contours[0]] for rec in rectangle_contours[1:]: for i in range(len(merged_contours)): x_min = rec[0][0] y_min = rec[0][1] x_max = rec[2][0] y_max = rec[2][1] merged_x_min = merged_contours[i][0][0] merged_y_min = merged_contours[i][0][1] merged_x_max = merged_contours[i][2][0] merged_y_max = merged_contours[i][2][1] if x_min >= merged_x_min and y_min >= merged_y_min and x_max <= merged_x_max and y_max <= merged_y_max: break else: if i == len(merged_contours)-1: merged_contours.append(rec) return merged_contours def get_image_text(img, engine='cnocr'): text = 'cnocr' return text def contour_area_filter(binary, contours, thresh=1500): rectangle_contours =[] h, w = binary.shape for contour in contours: if thresh < cv2.contourArea(contour) < 0.2*h*w: rectangle_contours.append(contour) return rectangle_contours def get_roi_image(img, rectangle_contour): roi_image = img[rectangle_contour[0][1]:rectangle_contour[2][1], rectangle_contour[0][0]:rectangle_contour[1][0]] return roi_image def get_pop_v(image): """ calculate value if a pop window exit :param image: image path :return: mean of v channel """ img = cv2.imread('capture/'+image) img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) h, s, v = cv2.split(img_hsv) return numpy.mean(v) def get_rectangle_contours(binary): _, contours, _ = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) rectangle_contours = [] for counter in contours: x, y, w, h = cv2.boundingRect(counter) cnt = numpy.array([[x, y], [x + w, y], [x + w, y + h], [x, y + h]]) rectangle_contours.append(cnt) rectangle_contours = sorted(rectangle_contours, key=cv2.contourArea, reverse=True)[:100] rectangle_contours = contour_area_filter(binary, rectangle_contours) rectangle_contours = merge_rectangle_contours(rectangle_contours) return rectangle_contours def get_center_pos(contour): x = int((contour[0][0]+contour[1][0])/2) y = int((contour[1][1]+contour[2][1])/2) return [x, y] def get_label_pos(contour): center = get_center_pos(contour) x = int((int((center[0]+contour[2][0])/2)+contour[2][0])/2) y = int((int((center[1]+contour[2][1])/2)+contour[2][1])/2) return [x, y] def draw_contours(img, contours, color="info"): if color == "info": cv2.drawContours(img, contours, -1, (255, 145, 30), 3)
StarcoderdataPython
199353
<reponame>toyoshi/myCobotROS #!/usr/bin/env python2 # license removed for brevity import time, subprocess import rospy from sensor_msgs.msg import JointState from std_msgs.msg import Header from visualization_msgs.msg import Marker from pymycobot.mycobot import MyCobot def talker(): pub = rospy.Publisher('joint_states', JointState, queue_size=10) pub_marker = rospy.Publisher('visualization_marker', Marker, queue_size=10) rospy.init_node('display', anonymous=True) rate = rospy.Rate(30) # 30hz # pub joint state joint_state_send = JointState() joint_state_send.header = Header() joint_state_send.name = [ 'joint2_to_joint1', 'joint3_to_joint2', 'joint4_to_joint3', 'joint5_to_joint4', 'joint6_to_joint5', 'joint6output_to_joint6' ] joint_state_send.velocity = [0] joint_state_send.effort = [] marker_ = Marker() marker_.header.frame_id = '/joint1' marker_.ns = 'my_namespace' while not rospy.is_shutdown(): joint_state_send.header.stamp = rospy.Time.now() angles = mycobot.get_radians() data_list = [] for index, value in enumerate(angles): if index != 2: value *= -1 data_list.append(value) joint_state_send.position = data_list pub.publish(joint_state_send) coords = mycobot.get_coords() rospy.loginfo('{}'.format(coords)) #marker marker_.header.stamp = rospy.Time.now() marker_.type = marker_.SPHERE marker_.action = marker_.ADD marker_.scale.x = 0.04 marker_.scale.y = 0.04 marker_.scale.z = 0.04 #marker position initial # print(coords) if not coords: coords = [0,0,0,0,0,0] rospy.loginfo('error [101]: can not get coord values') marker_.pose.position.x = coords[1] / 1000 * -1 marker_.pose.position.y = coords[0] / 1000 marker_.pose.position.z = coords[2] / 1000 marker_.color.a = 1.0 marker_.color.g = 1.0 pub_marker.publish(marker_) rate.sleep() if __name__ == '__main__': port = subprocess.check_output(['echo -n /dev/ttyUSB*'], shell=True).decode() mycobot = MyCobot(port) try: talker() except rospy.ROSInterruptException: pass
StarcoderdataPython
1704987
<reponame>raminjafary/ethical-hacking<gh_stars>0 #!/usr/bin/python2.7 #discover urls in the domain by extract the href link in the content and crawl recursively to get all urls import requests import re import urlparse target_url = "http://192.168.44.101" target_links = [] def extract_links_from(url): response = requests.get(url) return re.findall('(?:href=")(.*?)"',response.content) def crawl(url): href_links = extract_links_from(url) for link in href_links: link = urlparse.urljoin(url,link) if "#" in link: # # refers to original page so avoid duplicate page again and again link = link.split("#")[0] if target_url in link and link not in target_links: #to avoid repeating the same url target_links.append(link) print "[+]urls --->",link crawl(link) #recursively crawling crawl(target_url)
StarcoderdataPython
3251438
from aiogram import types from aiogram.dispatcher import FSMContext from tortoise.exceptions import IntegrityError from keyboards import inline from keyboards.inline import back_callback, faculties, delete_callback, create_callback from keyboards.inline.admin import cancel, cancel_or_delete, cancel_or_create from loader import dp, bot from models import Admin, subgroup, User, Subgroup from states.admin import AdminStates from states.admin.edit_subgroup import EditSubgroupStates from utils.misc import get_current_admin, get_current_user from middlewares import _ @get_current_admin() @dp.callback_query_handler(delete_callback.filter(category='subgroup'), state=EditSubgroupStates.edit) async def delete_subgroup(callback: types.CallbackQuery, admin: Admin, state: FSMContext): data = await state.get_data() subgroup_id = data.get('subgroup') subgroup = await Subgroup.filter(id=subgroup_id).first() await subgroup.delete() await state.update_data(subgroup=None) await callback.answer(_('Подгруппа была успешно удалена')) await admin.fetch_related('group') keyboard = await inline.admin.edit_subgroups.get_keyboard(admin.group.id) await callback.message.edit_text(_('Выбери подгруппу или добавь новую:'), reply_markup=keyboard) await AdminStates.subgroups.set() @get_current_admin() @dp.callback_query_handler(back_callback.filter(category='cancel'), state=[AdminStates.subgroups, EditSubgroupStates.create, EditSubgroupStates.edit]) async def back_from_subgroup(callback: types.CallbackQuery, admin: Admin, state: FSMContext): await callback.answer() await admin.fetch_related("group") keyboard = await inline.admin.edit_subgroups.get_keyboard(admin.group.id) await callback.message.edit_text(_('Выбери подгруппу или добавь новую:'), reply_markup=keyboard) await AdminStates.subgroups.set() @get_current_admin() @get_current_user() @dp.callback_query_handler(create_callback.filter(category='subgroup'), state=EditSubgroupStates.edit) async def save_subgroup(callback: types.CallbackQuery, state: FSMContext, user: User, admin: Admin): data = await state.get_data() subgroup_id = data.get('subgroup') subgroup = await Subgroup.filter(id=subgroup_id).first() subgroup.title = data.get('new_subgroup') if data.get('new_subgroup') else None await subgroup.save() await admin.fetch_related('group') keyboard = await inline.admin.edit_subgroups.get_keyboard(admin.group.id) await callback.answer(_("Подгруппа успешно переименована")) await bot.edit_message_text(_("Выбери подгруппу или добавь новую"), reply_markup=keyboard, chat_id=user.tele_id, message_id=data.get("current_msg")) await AdminStates.subgroups.set() @get_current_user() @get_current_admin() @dp.message_handler(state=EditSubgroupStates.create) async def create_subgroup(msg: types.Message, state: FSMContext, user: User, admin: Admin): data = await state.get_data() await msg.delete() await admin.fetch_related('group') await Subgroup.create(title=msg.text, group=admin.group) keyboard = await inline.admin.edit_subgroups.get_keyboard(admin.group.id) await bot.edit_message_text(_("Выбери подгруппу или добавь новую:"), reply_markup=keyboard, chat_id=user.tele_id, message_id=data.get("current_msg")) await AdminStates.subgroups.set() @get_current_user() @dp.message_handler(state=EditSubgroupStates.edit) async def edit_subgroup(msg: types.Message, state: FSMContext, user: User): data = await state.get_data() subgroup_id = data.get('subgroup') await msg.delete() if subgroup_id: subgroup = await Subgroup.filter(id=subgroup_id).first() keyboard = await cancel_or_create.get_keyboard("subgroup") await state.update_data(new_subgroup=msg.text) await bot.edit_message_text( _('Ты пытаешься изменить название подгруппы "{}" на "{}"'.format(subgroup.title, msg.text)), reply_markup=keyboard, chat_id=user.tele_id, message_id=data.get("current_msg")) @get_current_admin() @dp.callback_query_handler(state=AdminStates.subgroups) async def edit_subgroups(callback: types.CallbackQuery, admin: Admin, state: FSMContext): await callback.answer() if callback.data == "add-subgroup": await callback.message.edit_text(_('Введи название для новой подгруппы:'), reply_markup=cancel.keyboard) await EditSubgroupStates.create.set() elif callback.data.startswith("subgroup-"): subgroup_id = callback.data.split('-')[-1] subgroup = await Subgroup.filter(id=int(subgroup_id)).first() keyboard = await cancel_or_delete.get_keyboard("subgroup") await callback.message.edit_text(_('Напиши название для подгруппы - {}, чтобы изменить'.format(subgroup.title)), reply_markup=keyboard) await EditSubgroupStates.edit.set() await state.update_data(subgroup=int(subgroup_id)) @dp.message_handler(state=AdminStates.subgroups) async def clear(msg: types.Message): await msg.delete()
StarcoderdataPython
3307459
import numpy as np from PIL import Image def rebuild(dir): images = np.zeros([7, 168, 168]) lvector = np.zeros([7, 3]) # the direction of light for num in range(7): image = Image.open(dir + '/train/' + str(num + 1) + '.bmp') images[num] = np.asarray(image) for line in open(dir + '/train.txt'): i, ang1, ang2 = line.strip().split(",") i = int(i) ang1 = int(ang1) ang2 = int(ang2) lvector[i - 1] = (np.sin(np.pi * ang1 / 180) * np.cos(np.pi * ang2 / 180), np.sin(np.pi * ang2 / 180), np.cos(np.pi * ang1 / 180) * np.cos(np.pi * ang2 / 180)) lvector = -lvector vector = np.zeros([3, 168, 168]) alpha = np.zeros([168, 168]) b = np.zeros([3, 168, 168]) for j in range(168): for k in range(168): b[:, j, k] = np.linalg.solve(np.dot(lvector.T, lvector), np.dot(lvector.T, images[:, j, k])) alpha[j, k] = np.linalg.norm(b[:, j, k], ord=2) temp = b[:, j, k] / alpha[j, k] if b[:, j, k][-1] > 0: vector[:, j, k] = -temp else: vector[:, j, k] = temp while True: pred = np.clip(np.einsum('ij,jkl->ikl', lvector, b), 0, 255) grad_b = (2 / 7) * np.sum( np.einsum('ijk,il->iljk', (pred - images), lvector) * ((pred > 0)[:, np.newaxis, :, :]), axis=0) b = b - grad_b if np.abs(np.max(grad_b)) <= 0.1: break return vector, b
StarcoderdataPython
134965
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User # from .models import Profile # # class ProfileInline(admin.StackedInline): # """ # # """ # model = Profile # can_delete = False # verbose_name_plural = 'Profile' # fk_name = 'user' # # class CustomUserAdmin(UserAdmin): # inlines = (ProfileInline, ) # # def get_inline_instances(self, request, obj=None): # if not obj: # return list() # return super(CustomUserAdmin, self).get_inline_instances(request, obj) # # # admin.site.unregister(User) # admin.site.register(User, CustomUserAdmin) # class UserStripeAdmin(admin.ModelAdmin): # class Meta: # model = UserStripe # # admin.site.register(UserStripe, UserStripeAdmin)
StarcoderdataPython
1652681
# Generated by Django 2.2.2 on 2019-08-31 20:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('source_optics', '0020_statistic_days_before_joined'), ] operations = [ migrations.AddField( model_name='statistic', name='days_active', field=models.IntegerField(blank=True, default=0), ), migrations.AddField( model_name='statistic', name='longevity', field=models.IntegerField(blank=True, default=0), ), ]
StarcoderdataPython
4823341
from urllib.error import URLError, HTTPError import logging from time import time, sleep from .configuration import config from .jsonapi import JsonApi from .util import random_secret log = logging.getLogger(__name__) class Vault(JsonApi): def __init__(self, endpoint, token=None): super().__init__(endpoint + '/v1/') self.token = token def request(self, *args, headers=None, **kwargs): headers = dict(headers or {}) if self.token: headers['X-Vault-Token'] = self.token return super().request(*args, headers=headers, **kwargs) def ensure_engine(self, timeout=30): t0 = time() while time() - t0 < int(timeout): try: mounts = self.get('sys/mounts') break except URLError as e: log.warning('vault GET sys/mounts: %s', e) sleep(2) else: raise RuntimeError(f"Vault is down after {timeout}s") if 'liquid/' not in mounts['data']: log.info("Creating kv secrets engine `liquid`") self.post('sys/mounts/liquid', {'type': 'kv'}) def list(self, prefix=''): return self.get(f'{prefix}?list=true')['data']['keys'] def read(self, path): try: return self.get(path)['data'] except HTTPError as e: if e.code == 404: return None raise def set(self, path, payload): return self.put(path, payload) def ensure_secret(self, path, get_value): if not self.read(path): log.info(f"Generating value for {path}") self.set(path, get_value()) def ensure_secret_key(self, path): self.ensure_secret(path, lambda: {'secret_key': random_secret()}) vault = Vault(config.vault_url, config.vault_token)
StarcoderdataPython
1784515
import click def print_buckets(buckets): click.echo(' buckets') for bucket in buckets: frozen = '[frozen]' if bucket['frozen'] else '' click.echo(' + %-25s' % (bucket['name'] + frozen), nl=False) if bucket['urls']: urls = '(%s)' % ', '.join(bucket['urls'].keys()) else: urls = 'empty' click.echo(urls) def print_bucket(bucket): frozen = '[frozen]' if bucket['frozen'] else '' click.echo(' bucket: %-25s' % (bucket['name'] + frozen)) click.echo(' Caches:') for name, info in bucket['urls'].items(): click.echo(' %10s => %s' % (name, info['url'])) def print_objects(info): objects = info.get('objects') if not objects: click.echo(' No sources in this bucket') return for obj in objects: click.echo(' - {}'.format(obj['source']))
StarcoderdataPython
1656173
<filename>11/aoc_11.py import os import numpy as np INPUT = os.path.join(os.path.dirname(__file__), "input.txt") with open(INPUT) as f: lines = f.readlines() lines = [list(l.rstrip()) for l in lines] lines_arr = np.array(lines).astype(int) def within_bounds(x, y, lower, upper): full = np.transpose(np.vstack((x, y))) r = [np.all(n >= lower) and np.all(n <= upper) for n in full] return np.where(np.invert(r)) def flash(octo_field, flash_count=0, flash_field=None): if flash_field is None: flash_field = np.ones_like(octo_field) flashing = np.where(octo_field > 9) n_flashing = flashing[0].size if n_flashing == 0: return flash_count, octopi flash_count += n_flashing octo_field[flashing] = 0 flash_field[flashing] = 0 # Generate all Neighbors for x_offset in range(-1, 2): for y_offset in range(-1, 2): x, y = flashing[0] + x_offset, flashing[1] + y_offset # Remove Indices Out of Bounds - Literal Edge cases illegal_indices = within_bounds(x, y, 0, 9) x = np.delete(x, illegal_indices) y = np.delete(y, illegal_indices) octo_field[x, y] += 1 # Already flashed octopi stay 0 octo_field *= flash_field return flash(octo_field, flash_count, flash_field) # Part 1 octopi = lines_arr.copy() sum = 0 for t in range(100): octopi += 1 flash_count, octopi = flash(octopi) sum += flash_count print(sum) # Part 2 octopi = lines_arr.copy() step = 0 while True: octopi += 1 step += 1 flash_count, octopi = flash(octopi) if flash_count == 100: break print(step)
StarcoderdataPython
1749100
<gh_stars>0 from lx16a import * """ # This is the port that the controller board is connected to # This will be different for different computers # On Windows, try the ports COM1, COM2, COM3, etc... # On Raspbian, try each port in /dev/ LX16A.initialize('/dev/ttyUSB0') servo1 = LX16A(1) servo2 = LX16A(2) servo3 = LX16A(3) servo4 = LX16A(4) servo5 = LX16A(5) servo6 = LX16A(6) servo7 = LX16A(7) servo8 = LX16A(8) t = 0 """ def HealthCheck(servo1, servo2, servo3, servo4, servo5, servo6, servo7, servo8): #Query Initial Motor position init1 = servo1.getPhysicalPos() init2 = servo2.getPhysicalPos() init3 = servo3.getPhysicalPos() init4 = servo4.getPhysicalPos() init5 = servo5.getPhysicalPos() init6 = servo6.getPhysicalPos() init7 = servo7.getPhysicalPos() #init8 = servo8.getPhysicalPos() #Check that each motor is receiving sufficient power lock = 0 if max(servo1.vInLimitRead()) > servo1.vInRead() > min(servo1.vInLimitRead()): volt = 0 else: volt = 1 print('Voltage Error: servo 1:', servo1.vInRead()) if servo1.tempRead() > servo1.tempMaxLimitRead(): temp = 1 print('Temperature Error: servo 1') else: temp = 0 servo1.LEDErrorWrite(temp,volt,lock) if max(servo2.vInLimitRead()) > servo2.vInRead() > min(servo2.vInLimitRead()): volt = 0 else: volt = 1 print('Voltage Error: servo 2:', servo2.vInRead()) if servo2.tempRead() > servo2.tempMaxLimitRead(): temp = 1 print('Temperature Error: servo 2') else: temp = 0 servo2.LEDErrorWrite(temp,volt,lock) if max(servo3.vInLimitRead()) > servo3.vInRead() > min(servo3.vInLimitRead()): volt = 0 else: volt = 1 print('Voltage Error: servo 3:', servo3.vInRead()) if servo3.tempRead() > servo3.tempMaxLimitRead(): temp = 1 print('Temperature Error: servo 3') else: temp = 0 servo3.LEDErrorWrite(temp,volt,lock) if max(servo4.vInLimitRead()) > servo4.vInRead() > min(servo4.vInLimitRead()): volt = 0 else: volt = 1 print('Voltage Error: servo 4:', servo4.vInRead()) if servo4.tempRead() > servo4.tempMaxLimitRead(): temp = 1 print('Temperature Error: servo 4') else: temp = 0 servo4.LEDErrorWrite(temp,volt,lock) if max(servo5.vInLimitRead()) > servo5.vInRead() > min(servo5.vInLimitRead()): volt = 0 else: volt = 1 print('Voltage Error: servo 5:', servo5.vInRead()) if servo5.tempRead() > servo5.tempMaxLimitRead(): temp = 1 print('Temperature Error: servo 5') else: temp = 0 servo5.LEDErrorWrite(temp,volt,lock) if max(servo6.vInLimitRead()) > servo6.vInRead() > min(servo6.vInLimitRead()): volt = 0 else: volt = 1 print('Voltage Error: servo 6:', servo6.vInRead()) if servo6.tempRead() > servo6.tempMaxLimitRead(): temp = 1 print('Temperature Error: servo 6') else: temp = 0 servo6.LEDErrorWrite(temp,volt,lock) if max(servo7.vInLimitRead()) > servo7.vInRead() > min(servo7.vInLimitRead()): volt = 0 else: volt = 1 print('Voltage Error: servo 7:', servo7.vInRead()) if servo7.tempRead() > servo7.tempMaxLimitRead(): temp = 1 print('Temperature Error: servo 7') else: temp = 0 servo7.LEDErrorWrite(temp,volt,lock) if max(servo8.vInLimitRead()) > servo8.vInRead() > min(servo8.vInLimitRead()): volt = 0 else: volt = 1 print('Voltage Error: servo 8:', servo8.vInRead()) if servo8.tempRead() > servo8.tempMaxLimitRead(): temp = 1 print('Temperature Error: servo 8') else: temp = 0 servo8.LEDErrorWrite(temp,volt,lock) #Checksum / COM errors #if servo1.checksum()
StarcoderdataPython
1799719
__version__ = '0.4.0.dev0+git'
StarcoderdataPython
3206805
import logging import pygame from ._game_scene_manager import IProvideGameScenes from ._game_session import IGameSession logger = logging.getLogger(__name__) class BlockingGameSession(IGameSession): _scene_manager: IProvideGameScenes def __init__(self, scene_manager: IProvideGameScenes) -> None: self._scene_manager = scene_manager def start(self) -> None: logger.debug("starting game session") pygame.display.set_caption("Our Game") scene = self._scene_manager.get_scene() scene.start() while not scene.should_quit(): scene.tick() logger.debug("exiting game session") def wait_for_completion(self) -> None: pass def stop(self) -> None: pass
StarcoderdataPython
1628868
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Test for nigt optimizer.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import tensorflow.compat.v1 as tf from tensorflow.compat.v1 import test from nigt_optimizer.nigt_optimizer import NIGTOptimizer class NIGTOptimizerTest(test.TestCase): def testRunsMinimize(self): nigt = NIGTOptimizer(1.0) w = tf.Variable([3.0]) loss = tf.square(w) update_op = nigt.minimize(loss, var_list=[w]) if not tf.executing_eagerly(): self.evaluate(tf.initializers.global_variables()) for _ in range(3): self.evaluate(update_op) if __name__ == '__main__': test.main()
StarcoderdataPython
3244468
# Copyright L.P.Klyne 2013 # Licenced under 3 clause BSD licence # $Id: TestHttpAction.py 3499 2010-02-02 08:55:42Z philipp.schuster $ # # Unit testing for WebBrick library functions (WbAccess.py) # See http://pyunit.sourceforge.net/pyunit.html # # NOTE: this is not strictly a unit test, in that it requires a WebBrick to be # available at the specified IP address import sys, logging, time import unittest from MiscLib.DomHelpers import * from EventLib.Event import Event, makeEvent from EventHandlers.BaseHandler import * from EventHandlers.EventRouterLoad import EventRouterLoader import EventHandlers.tests.TestEventLogger as TestEventLogger import Events from Utils import * # NOTE # ******* # This only does a single twisted test as there seems to be an issue stopping and starting twisted # during tests. # # Configuration for the tests testConfigHttpAction = """<?xml version="1.0" encoding="utf-8"?> <eventInterfaces> <eventInterface module='EventHandlers.tests.TestEventLogger' name='TestEventLogger'> <eventtype type=""> <eventsource source="" > <event> <!-- interested in all events of this type --> </event> </eventsource> </eventtype> </eventInterface> <eventInterface module='EventHandlers.TwistedReactor' name='TwistedReactor' debug="yes" /> <eventInterface module='EventHandlers.HttpAction' name='HttpAction' usetwisted='1'> <eventtype type="http://id.webbrick.co.uk/events/webbrick/TD"> <!-- events from a source of a specific type --> <eventsource source="webbrick/100/TD/0" > <!-- all events from a single source --> <event> <params> </params> <url cmd="GET" address="localhost:20999" uri="/test?medianame=ITunes&amp;mediacmd=volup" /> Test </event> </eventsource> </eventtype> </eventInterface> </eventInterfaces> """ testConfigTwisted = """<?xml version="1.0" encoding="utf-8"?> <eventInterfaces> <eventInterface module='EventHandlers.tests.TestEventLogger' name='TestEventLogger'> <eventtype type=""> <eventsource source="" > <event> <!-- interested in all events of this type --> </event> </eventsource> </eventtype> </eventInterface> <eventInterface module='EventHandlers.TwistedReactor' name='TwistedReactor' debug="yes" /> <eventInterface module='EventHandlers.HttpAction' name='HttpAction' usetwisted='1'> <eventtype type="http://id.webbrick.co.uk/events/webbrick/TD"> <eventsource source="webbrick/100/TD/0" > <event> <url cmd="GET" address="localhost:20999" uri="/test?medianame=ITunes&amp;mediacmd=volup" /> </event> </eventsource> </eventtype> </eventInterface> </eventInterfaces> """ testConfigError = """<?xml version="1.0" encoding="utf-8"?> <eventInterfaces> <eventInterface module='EventHandlers.tests.TestEventLogger' name='TestEventLogger'> <eventtype type=""> <eventsource source="" > <event> <!-- interested in all events of this type --> </event> </eventsource> </eventtype> </eventInterface> <eventInterface module='EventHandlers.TwistedReactor' name='TwistedReactor' debug="yes" /> <eventInterface module='EventHandlers.HttpAction' name='HttpAction' usetwisted='0'> <eventtype type="http://id.webbrick.co.uk/events/webbrick/TD"> <!-- events from a source of a specific type --> <eventsource source="webbrick/100/TD/0" > <!-- all events from a single source --> <event> <params> </params> <url cmd="GET" address="localhost:59999" uri="/test?medianame=ITunes&amp;mediacmd=volup" /> Test </event> </eventsource> </eventtype> </eventInterface> </eventInterfaces> """ testConfigHttpAction2 = """<?xml version="1.0" encoding="utf-8"?> <eventInterfaces> <eventInterface module='EventHandlers.tests.TestEventLogger' name='TestEventLogger'> <eventtype type=""> <eventsource source="" > <event> <!-- interested in all events of this type --> </event> </eventsource> </eventtype> </eventInterface> <eventInterface module='EventHandlers.TwistedReactor' name='TwistedReactor' debug="yes" /> <eventInterface module='EventHandlers.HttpAction' name='HttpAction' usetwisted='0'> <eventtype type="http://id.webbrick.co.uk/events/webbrick/DO"> <!-- events from a source of a specific type --> <eventsource source="webbrick/100/DO/0" > <!-- all events from a single source --> <event> <url cmd="GET" address="localhost:20999" uri="/test?state=%(state)s" /> </event> </eventsource> </eventtype> </eventInterface> </eventInterfaces> """ testConfigWebbrickRedirect = """<?xml version="1.0" encoding="utf-8"?> <eventInterfaces> <eventInterface module='EventHandlers.tests.TestEventLogger' name='TestEventLogger'> <eventtype type=""> <eventsource source="" > <event> <!-- interested in all events of this type --> </event> </eventsource> </eventtype> </eventInterface> <eventInterface module='EventHandlers.TwistedReactor' name='TwistedReactor' debug="yes" /> <eventInterface module='EventHandlers.HttpAction' name='HttpAction' usetwisted='0'> <eventtype type="http://id.webbrick.co.uk/events/webbrick/TD"> <!-- events from a source of a specific type --> <eventsource source="webbrick/100/TD/0" > <!-- all events from a single source --> <event> <url cmd="GET" address="localhost:20999" uri="/hid.spi?COM=DO0N:" /> </event> </eventsource> </eventtype> </eventInterface> </eventInterfaces> """ class TestHttpAction(unittest.TestCase): def setUp(self): self._log = logging.getLogger( "TestHttpAction" ) self._log.debug( "\n\nsetUp" ) self.httpServer = None self.httpServer = TestHttpServer() self.httpServer.start() self.router = None self.loader = None def tearDown(self): self._log.debug( "\n\ntearDown" ) if self.loader: self.loader.stop() # all tasks self.loader = None self.router = None if self.httpServer: self.httpServer.stop() self.httpServer = None time.sleep(5) # Actual tests follow def testHttpAction(self): self._log.debug( "\n\ntestHttpAction" ) self.loader = EventRouterLoader() self.loader.loadHandlers( getDictFromXmlString(testConfigHttpAction) ) self.loader.start() # all tasks self.router = self.loader.getEventRouter() time.sleep(1) self.router.publish( EventAgent("TestHttpAction"), Events.evtTD0 ) # 0 Off self.router.publish( EventAgent("TestHttpAction"), Events.evtTD1 ) # 1 Off maxTime = 10 while (len(self.httpServer.requests()) < 1) and (maxTime > 0): maxTime -= 1 time.sleep(1) TestEventLogger.logEvents() # now look for correct url requests self._log.debug( "testHttpAction %s", self.httpServer.requests() ) self.assertEqual( len(self.httpServer.requests()), 1) self.assertEqual( self.httpServer.requests()[0], "/test?medianame=ITunes&mediacmd=volup" ) def testHttpAction2Requests(self): self._log.debug( "\ntestHttpAction2Requests" ) self.loader = EventRouterLoader() self.loader.loadHandlers( getDictFromXmlString(testConfigHttpAction) ) self.loader.start() # all tasks self.router = self.loader.getEventRouter() time.sleep(1) self.router.publish( EventAgent("TestHttpAction"), Events.evtTD0 ) # 0 Off self.router.publish( EventAgent("TestHttpAction"), Events.evtTD0 ) # 0 Off maxTime = 10 while (len(self.httpServer.requests()) < 2) and (maxTime > 0): maxTime -= 1 time.sleep(1) TestEventLogger.logEvents() # now look for correct url requests self._log.debug( "testHttpAction %s", self.httpServer.requests() ) self.assertEqual( len(self.httpServer.requests()), 2) self.assertEqual( self.httpServer.requests()[0], "/test?medianame=ITunes&mediacmd=volup" ) self.assertEqual( self.httpServer.requests()[1], "/test?medianame=ITunes&mediacmd=volup" ) def testHttpAction2RequestsSpaced(self): self._log.debug( "\ntestHttpAction2RequestsSpaced" ) self.loader = EventRouterLoader() self.loader.loadHandlers( getDictFromXmlString(testConfigHttpAction) ) self.loader.start() # all tasks self.router = self.loader.getEventRouter() time.sleep(1) self.router.publish( EventAgent("TestHttpAction"), Events.evtTD0 ) # 0 Off maxTime = 10 while (len(self.httpServer.requests()) < 1) and (maxTime > 0): maxTime -= 1 time.sleep(1) self.router.publish( EventAgent("TestHttpAction"), Events.evtTD0 ) # 0 Off maxTime = 10 while (len(self.httpServer.requests()) < 2) and (maxTime > 0): maxTime -= 1 time.sleep(1) TestEventLogger.logEvents() # now look for correct url requests self._log.debug( "testHttpAction %s", self.httpServer.requests() ) self.assertEqual( len(self.httpServer.requests()), 2) self.assertEqual( self.httpServer.requests()[0], "/test?medianame=ITunes&mediacmd=volup" ) self.assertEqual( self.httpServer.requests()[1], "/test?medianame=ITunes&mediacmd=volup" ) def testTwisted(self): self._log.debug( "\ntestTwisted" ) self.loader = EventRouterLoader() self.loader.loadHandlers( getDictFromXmlString(testConfigTwisted) ) self.loader.start() # all tasks self.router = self.loader.getEventRouter() time.sleep(1) self.router.publish( EventAgent("TestHttpAction"), Events.evtTD0 ) # 0 Off self.router.publish( EventAgent("TestHttpAction"), Events.evtTD1 ) # 1 Off maxTime = 10 while (len(self.httpServer.requests()) < 1) and (maxTime > 0): maxTime -= 1 time.sleep(1) TestEventLogger.logEvents() # now look for correct url requests self._log.debug( "testTwisted %s", self.httpServer.requests() ) self.assertEqual( len(self.httpServer.requests()), 1) self.assertEqual( self.httpServer.requests()[0], "/test?medianame=ITunes&mediacmd=volup" ) def testHttpError(self): # TODO grab error log self._log.debug( "\ntestHttpError" ) ''' NOTE: This is expected to trow an Error! ''' self.loader = EventRouterLoader() self.loader.loadHandlers( getDictFromXmlString(testConfigError) ) self.loader.start() # all tasks self.router = self.loader.getEventRouter() time.sleep(1) self.router.publish( EventAgent("TestHttpAction"), Events.evtTD0 ) # 0 Off self.router.publish( EventAgent("TestHttpAction"), Events.evtTD1 ) # 1 Off time.sleep(5) TestEventLogger.logEvents() # now look for correct url requests, no responses self._log.debug( "testHttpAction %s", self.httpServer.requests() ) self.assertEqual( len(self.httpServer.requests()), 0) def testHttpAction2(self): self._log.debug( "\n\ntestHttpAction2" ) self.loader = EventRouterLoader() self.loader.loadHandlers( getDictFromXmlString(testConfigHttpAction2) ) self.loader.start() # all tasks self.router = self.loader.getEventRouter() time.sleep(1) self.router.publish( EventAgent("TestHttpAction"), Events.evtDO_0_on ) # 0 Off self.router.publish( EventAgent("TestHttpAction"), Events.evtTD1 ) # 1 Off maxTime = 10 while (len(self.httpServer.requests()) < 1) and (maxTime > 0): maxTime -= 1 time.sleep(1) TestEventLogger.logEvents() # now look for correct url requests self._log.debug( "testHttpAction2 %s", self.httpServer.requests() ) self.assertEqual( len(self.httpServer.requests()), 1) self.assertEqual( self.httpServer.requests()[0], "/test?state=1" ) def testWebbrickRedirect(self): self._log.debug( "\ntestWebbrickRedirect" ) self.loader = EventRouterLoader() self.loader.loadHandlers( getDictFromXmlString(testConfigWebbrickRedirect) ) self.loader.start() # all tasks self.router = self.loader.getEventRouter() time.sleep(1) self.router.publish( EventAgent("TestHttpAction"), Events.evtTD0 ) # 0 Off self.router.publish( EventAgent("TestHttpAction"), Events.evtTD1 ) # 1 Off maxTime = 10 while (len(self.httpServer.requests()) < 1) and (maxTime > 0): maxTime -= 1 time.sleep(1) TestEventLogger.logEvents() # now look for correct url requests self._log.debug( "testWebbrickRedirect %s", self.httpServer.requests() ) self.assertEqual( len(self.httpServer.requests()), 1) self.assertEqual( self.httpServer.requests()[0], "/hid.spi?COM=DO0N:" ) def testDummy(self): return from MiscLib import TestUtils def getTestSuite(select="unit"): """ Get test suite select is one of the following: "unit" return suite of unit tests only "component" return suite of unit and component tests "all" return suite of unit, component and integration tests "pending" return suite of pending tests name a single named test to be run """ testdict = { "unit": [ "testHttpAction" , "testHttpAction2Requests" , "testHttpAction2RequestsSpaced" , "testHttpError" , "testHttpAction2" , "testTwisted" , "testWebbrickRedirect" ], "component": [ "testDummy" ], "integration": [ "testDummy" ], "pending": [ "testDummy" ] } return TestUtils.getTestSuite(TestHttpAction, testdict, select=select) # Run unit tests directly from command line if __name__ == "__main__": TestUtils.runTests("TestHttpAction.log", getTestSuite, sys.argv)
StarcoderdataPython
1729342
#!/usr/bin/env python3 """ Day 8 for Advent of Code 2015 Link to problem description: http://adventofcode.com/day/8 author: <NAME> (nitsas) language: Python 3.4.2 date: December, 2015 usage: $ python3 runme.py input.txt or $ runme.py input.txt (where input.txt is the input file and $ the prompt) """ import sys import re def clean_up(line): # remove leading and trailing quotes line = line[1:-1] # convert escaped backslash character to 'a' line = line.replace(r'\\', 'a') # convert escaped double quote character to 'a' line = line.replace(r'\"', 'a') # convert any valid ascii code to 'a' line = re.sub(r'\\x[0-9a-fA-F]{2}', 'a', line) return line def unclean_up(line): # replace leading and trailing quotes with 'aa' each line = 'aa' + line[1:-1] + 'aa' # add more a's to account for the extra leading and trailing quotes line = 'a' + line + 'a' # replace backslashes with 'aa' line = line.replace('\\', 'aa') # replace double quote characters with 'aa' line = line.replace('"', 'aa') return line def solve_part_1(lines): total_code_length = sum(len(line) for line in lines) total_in_memory_length = sum(len(clean_up(line)) for line in lines) return total_code_length - total_in_memory_length def solve_part_2(lines): total_code_length = sum(len(line) for line in lines) total_escaped_length = sum(len(unclean_up(line)) for line in lines) return total_escaped_length - total_code_length def main(filename=None): # get the input file if filename is None: if len(sys.argv) == 2: # get the filename from the command line filename = sys.argv[1] else: # no filename given print('Usage: runme.py input_file') return 1 with open(filename, 'r') as file_: lines = file_.read().splitlines() print('part 1:', solve_part_1(lines)) print('part 2:', solve_part_2(lines)) return 0 # run function 'main' if this file is being run in the command line # (vs being imported as a module) if __name__ == "__main__": status = main() sys.exit(status)
StarcoderdataPython
1711692
class PkgTypeEnum: REQUIREMENT, DEPENDENCY = range(2)
StarcoderdataPython
166575
import logging from typing import Optional, Dict, List import pysolr import serpy from manifest_server.helpers.fields import StaticField from manifest_server.helpers.identifiers import get_identifier, IIIF_V3_CONTEXT from manifest_server.helpers.metadata import v3_metadata_block, get_links from manifest_server.helpers.serializers import ContextDictSerializer from manifest_server.helpers.solr import SolrManager, SolrResult from manifest_server.helpers.solr_connection import SolrConnection from manifest_server.iiif.v3.manifests.canvas import Canvas from manifest_server.iiif.v3.manifests.structure import create_v3_structures log = logging.getLogger(__name__) def create_v3_manifest(request, manifest_id: str, config: Dict) -> Optional[Dict]: fq: List = ["type:object", f"id:{manifest_id}"] record: pysolr.Results = SolrConnection.search("*:*", fq=fq, rows=1) if record.hits == 0: return None object_record = record.docs[0] manifest: Manifest = Manifest(object_record, context={"request": request, "config": config}) return manifest.data class Manifest(ContextDictSerializer): """ The main class for constructing a IIIF Manifest. Implemented as a serpy serializer. This docstring will serve as the documentation for this class, as well as the other serializer classes. The ContextDictSerializer superclass provides a 'context' object on this class. This can be used to pass values down through the various child classes, provided they are also given the same context. This lets us pass along things like the original request object, and the server configuration object, without needing to resolve it externally. For classes that implement de-referenceable objects, they provide a method field that will return None if that object is being embedded in a manifest, or the IIIF v3 context array if it's being de-referenced directly. When the values of this class are serialized, any fields that have a value of None will not be emitted in the output. Refer to the `to_value` method on the superclass for the implementation and docstring for this function. """ ctx = StaticField( value=IIIF_V3_CONTEXT, label="@context" ) mid = serpy.MethodField( label="id" ) mtype = StaticField( value="Manifest", label="type" ) label = serpy.MethodField() summary = serpy.MethodField() metadata = serpy.MethodField() homepage = serpy.MethodField() provider = serpy.MethodField() nav_date = serpy.MethodField( label='navDate' ) logo = serpy.MethodField() thumbnail = serpy.MethodField() required_statement = serpy.MethodField( label="requiredStatement" ) part_of = serpy.MethodField( label="partOf" ) behaviour = serpy.MethodField( label="behavior" ) items = serpy.MethodField() structures = serpy.MethodField() viewing_direction = serpy.StrField( attr="viewing_direction_s", label="viewingDirection", required=False ) def get_mid(self, obj: SolrResult) -> str: req = self.context.get('request') conf = self.context.get('config') manifest_tmpl: str = conf['templates']['manifest_id_tmpl'] return get_identifier(req, obj.get('id'), manifest_tmpl) def get_label(self, obj: SolrResult) -> Dict: return {"en": [f"{obj.get('full_shelfmark_s')}"]} def get_summary(self, obj: SolrResult) -> Dict: return {"en": [f"{obj.get('summary_s')}"]} def get_required_statement(self, obj: SolrResult) -> Dict: return { "label": {"en": ["Terms of Use"]}, "value": {"en": [obj.get("use_terms_sni", None)]} } def get_part_of(self, obj: SolrResult) -> Optional[List]: colls: List[str] = obj.get('all_collections_link_smni') if not colls: return None req = self.context.get('request') cfg = self.context.get('config') tmpl: str = cfg['templates']['collection_id_tmpl'] ret: List[Dict] = [] for collection in colls: cid, label = collection.split("|") ret.append({ "id": get_identifier(req, cid, tmpl), "type": "Collection", "label": {"en": [label]} }) return ret def get_homepage(self, obj: SolrResult) -> List: req = self.context.get('request') cfg = self.context.get('config') tmpl: str = cfg['templates']['digital_bodleian_permalink_tmpl'] uuid: str = obj.get("id") conn: SolrManager = SolrManager(SolrConnection) fq: List = ['type:link', f"object_id:{uuid}"] conn.search("*:*", fq=fq) links: List = [{ 'id': get_identifier(req, uuid, tmpl), 'type': "Text", "label": {"en": ["View on Digital Bodleian"]}, "format": "text/html", "language": ["en"] }] if conn.hits > 0: for r in conn.results: links.append({ 'id': r.get('target_s'), 'type': "Text", "label": {"en": [r.get('label_s')]}, "format": "text/html", "language": ["en"] }) return links def get_logo(self, obj: SolrResult) -> Optional[List]: logo_uuid: str = obj.get("logo_id") if not logo_uuid: return None req = self.context.get('request') cfg = self.context.get('config') image_tmpl: str = cfg['templates']['image_id_tmpl'] logo_ident: str = get_identifier(req, logo_uuid, image_tmpl) thumbsize: str = cfg['common']['thumbsize'] logo_service: List = [{ "id": f"{logo_ident}/full/{thumbsize},/0/default.jpg", "type": "Image", "service": { "type": "ImageService2", "profile": "level1", "id": logo_ident } }] return logo_service def get_provider(self, obj: SolrResult) -> Optional[List]: """ If a URI for the organization is not provided, we will not show any information about the organization. :param obj: A Solr record. :return: A 'provider' block. """ uri: Optional[str] = obj.get("institution_uri_s", None) if not uri: return None org_name: Optional[str] = obj.get("holding_institution_s", None) org_homepage: Optional[str] = obj.get("institution_homepage_sni", None) provider_block: List = [{ "id": uri, "type": "Agent", "label": {"en": [org_name]}, "homepage": { "id": org_homepage, "type": "Text", "label": {"en": [org_name]}, "format": "text/html" }, }] return provider_block def get_thumbnail(self, obj: SolrResult) -> Optional[List]: image_uuid: str = obj.get('thumbnail_id') if not image_uuid: return None req = self.context.get('request') cfg = self.context.get('config') image_tmpl: str = cfg['templates']['image_id_tmpl'] image_ident: str = get_identifier(req, image_uuid, image_tmpl) thumbsize: str = cfg['common']['thumbsize'] thumb_service: List = [{ "id": f"{image_ident}/full/{thumbsize},/0/default.jpg", "service": { "type": "ImageService2", "profile": "level1", "id": image_ident } }] return thumb_service def get_behaviour(self, obj: SolrResult) -> List: vtype = obj.get('viewing_type_s') if vtype and vtype in ["map", "sheet", "binding", "photo"]: return ["individuals"] return ["paged"] def get_metadata(self, obj: SolrResult) -> Optional[List[Dict]]: # description_sm is already included in the summary metadata: List = get_links(obj, 3) metadata += v3_metadata_block(obj) return metadata def get_items(self, obj: SolrResult) -> Optional[List]: req = self.context.get('request') cfg = self.context.get('config') obj_id: str = obj.get('id') # Check if the canvases have annotations. We don't actually # need to retrieve them, just get the number of hits. has_annotations_res = SolrConnection.search( "*:*", fq=["type:annotationpage", f"object_id:{obj_id}"], rows=0 ) has_annotations = has_annotations_res.hits > 0 manager: SolrManager = SolrManager(SolrConnection) fq: List = ["type:surface", f"object_id:{obj_id}"] sort: str = "sort_i asc" fl: List = ["*,[child parentFilter=type:surface childFilter=type:image]"] rows: int = 100 manager.search("*:*", fq=fq, fl=fl, sort=sort, rows=rows) if manager.hits == 0: return None return Canvas(manager.results, context={"request": req, "config": cfg, "has_annotations": has_annotations}, many=True).data def get_structures(self, obj: SolrResult) -> Optional[List[Dict]]: return create_v3_structures(self.context.get("request"), obj.get("id"), self.context.get("config")) def get_nav_date(self, obj: SolrResult) -> Optional[str]: year: Optional[int] = obj.get('start_date_i') or obj.get('end_date_i') if year is None: return None return f"{year}-01-01T00:00:00Z"
StarcoderdataPython
139325
<gh_stars>1-10 from flask import Blueprint project = Blueprint("project", __name__) from app.project import routes # noqa: E402, F401
StarcoderdataPython
3367290
from db import words def test_get_id_for_word(db_conn): cursor = db_conn.cursor() assert words.get_id_for_word(cursor, '&c') == (1,) # Should also test when the word *doesn't* exist in the database assert words.get_id_for_word(cursor, 'rgnthm') is None def test_get_word_for_id(db_conn): cursor = db_conn.cursor() # Test the affirmative case assert words.get_word_for_id(cursor, 35810) == ('boogaloo',) # Let's test on a few that obviously aren't valid ids assert words.get_word_for_id(cursor, -1) is None assert words.get_word_for_id(cursor, 1.5) is None assert words.get_word_for_id(cursor, 'wjksjksjkadbf') is None def test_word_exists(db_conn): cursor = db_conn.cursor() assert words.word_exists(cursor, 'boogaloo') assert words.word_exists(cursor, '&c') assert not words.word_exists(cursor, 'rgnthm') assert not words.word_exists(cursor, 4) def test_get_word_list(db_conn): cursor = db_conn.cursor() dct = words.get_word_list(cursor) assert len(dct) == 354971 assert 'boogaloo' in dct assert 'rgnthm' not in dct dct = words.get_word_list(cursor, ids=True) assert all(len(value) == 2 for value in dct.values()) def test_append_word(db_conn): cursor = db_conn.cursor() words.append_word(cursor, 'rgnthm') assert words.word_exists(cursor, 'rgnthm')
StarcoderdataPython
1794891
n = int(input()) numbers = list(map(int, input().split())) weights = list(map(int, input().split())) result = 0 for index in range(n): result += numbers[index] * weights[index] result /= sum(weights) print(round(result, 1))
StarcoderdataPython
19776
<reponame>paulkramme/mit-license-adder #!/usr/bin/python2 import tempfile import sys import datetime mit_license = ("""\ /* MIT License Copyright (c) 2016 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ """) class FileModifierError(Exception): pass class FileModifier(object): def __init__(self, fname): self.__write_dict = {} self.__filename = fname self.__tempfile = tempfile.TemporaryFile() with open(fname, 'rb') as fp: for line in fp: self.__tempfile.write(line) self.__tempfile.seek(0) def write(self, s, line_number = 'END'): if line_number != 'END' and not isinstance(line_number, (int, float)): raise FileModifierError("Line number %s is not a valid number" % line_number) try: self.__write_dict[line_number].append(s) except KeyError: self.__write_dict[line_number] = [s] def writeline(self, s, line_number = 'END'): self.write('%s\n' % s, line_number) def writelines(self, s, line_number = 'END'): for ln in s: self.writeline(s, line_number) def __popline(self, index, fp): try: ilines = self.__write_dict.pop(index) for line in ilines: fp.write(line) except KeyError: pass def close(self): self.__exit__(None, None, None) def __enter__(self): return self def __exit__(self, type, value, traceback): with open(self.__filename,'w') as fp: for index, line in enumerate(self.__tempfile.readlines()): self.__popline(index, fp) fp.write(line) for index in sorted(self.__write_dict): for line in self.__write_dict[index]: fp.write(line) self.__tempfile.close() filename = sys.argv[1] #license = sys.argv[1] print "Licenseadder by <NAME>" with FileModifier(filename) as fp: fp.writeline(mit_license, 0)
StarcoderdataPython
4843267
<gh_stars>1-10 import os import sys try: from balsam.core import models from balsam.core.models import ApplicationDefinition, BalsamJob from balsam.service import service except: pass from deephyper.core.cli.utils import generate_other_arguments from deephyper.core.exceptions import DeephyperRuntimeError from deephyper.problem import BaseProblem, NaProblem from deephyper.search.util import banner, generic_loader APPS = { "HPS": { "AMBS": f"{sys.executable} -m deephyper.search.hps.ambs", }, "NAS": { "AMBS": f"{sys.executable} -m deephyper.search.nas.ambs", "RANDOM": f"{sys.executable} -m deephyper.search.nas.random", "REGEVO": f"{sys.executable} -m deephyper.search.nas.regevo", "AGEBO": f"{sys.executable} -m deephyper.search.nas.agebo", }, } def add_subparser(subparsers): subparser_name = "balsam-submit" function_to_call = main subparser = subparsers.add_parser( subparser_name, help="Create and submit an HPS or NAS job directly via Balsam." ) subparser.add_argument("mode", choices=["nas", "hps"], help="Type of search") subparser.add_argument( "search", choices=["ambs", "regevo", "random", "agebo", "ambsmixed", "regevomixed"], help="Search strategy", ) subparser.add_argument("workflow", help="Unique workflow name") subparser.add_argument( "-p", "--problem", required=True, help="Problem definition path or object" ) subparser.add_argument( "-r", "--run", required=False, help="Run function path or object" ) subparser.add_argument( "-t", "--time-minutes", type=int, required=True, help="Job walltime (minutes)" ) subparser.add_argument( "-n", "--nodes", type=int, required=True, help="Number of nodes" ) subparser.add_argument("-q", "--queue", required=True, help="Queue to submit into") subparser.add_argument("-A", "--project", required=True, help="Project Name") subparser.add_argument( "-j", "--job-mode", required=True, help="Launcher job mode", choices=["mpi", "serial"], ) subparser.add_argument( "--num-evals-per-node", default=1, type=int, help="Number of evaluations performed on each node. Only valid if evaluator==balsam and balsam job-mode is 'serial'.", ) subparser.set_defaults(func=function_to_call) def main(mode: str, search: str, workflow: str, problem: str, run: str, **kwargs) -> None: """Create & submit the DH search via Balsam""" job = pre_submit(mode, search, workflow, problem, run) if os.path.exists(problem): # the problem was given as a PATH problem = os.path.abspath(problem) if run and os.path.exists(run): # the run function was given as a PATH run = os.path.abspath(run) print( f"Creating BalsamJob using application {job.application}...", end="", flush=True ) setup_job(job, problem, run, **kwargs) print("OK") print("Performing job submission...") submit_qlaunch( kwargs["project"], kwargs["queue"], kwargs["nodes"], kwargs["time_minutes"], kwargs["job_mode"], workflow, ) banner(f"Success. The search will run at: {job.working_directory}") def validate(mode: str, search: str, workflow: str, problem: str, run: str) -> str: """Validate problem, run, and workflow""" # validate the mode if not (mode.upper() in APPS): raise DeephyperRuntimeError(f"The mode '{mode}' is not valid!") # validate the search if not (search.upper()in APPS[mode.upper()]): raise DeephyperRuntimeError(f"The search '{search}' is not valid!") app = f"{mode.upper()}-{search.upper()}" print(f"Validating Problem({problem})...", end="", flush=True) prob = generic_loader(problem, "Problem") assert isinstance(prob, (NaProblem, BaseProblem)), f"{prob} is not a Problem instance" print("OK", flush=True) # validate run if run: # it is not mandatory to pass a run function for NAS print("Validating run...", end="", flush=True) run = generic_loader(run, "run") assert callable(run), f"{run} must be a a callable" print("OK", flush=True) else: if mode == "hps": raise DeephyperRuntimeError(f"No '--run' was passed for the mode 'hps'") qs = BalsamJob.objects.filter(workflow=workflow) if qs.exists(): raise DeephyperRuntimeError( f"There are already jobs matching workflow {workflow}" f"Please remove these, or use a unique workflow name" ) return app def bootstrap_apps(): """Ensure Balsam ApplicationDefinitions are populated""" for mode, mode_apps in APPS.items(): for app_name, app_exe in mode_apps.items(): app, created = ApplicationDefinition.objects.get_or_create( name=f"{mode}-{app_name}", defaults={"executable": app_exe} ) if not created: app.executable = app_exe app.save() def pre_submit( mode: str, search: str, workflow: str, problem: str, run: str ): """Validate command line; prepare apps""" app = validate(mode, search, workflow, problem, run) # creating the APPS in the balsam DB print("Bootstrapping apps...", end="", flush=True) bootstrap_apps() print("OK") job = BalsamJob(name=workflow, workflow=workflow, application=app) return job def setup_job(job, problem, run, **kwargs): job.args = f"--evaluator balsam --problem {problem}" #! it is not required for NAS to pass a run function if run: job.args += f" --run {run}" invalid_keys = ["time_minutes", "nodes", "queue", "project", "job_mode"] for k in invalid_keys: kwargs.pop(k) args = generate_other_arguments(**kwargs) if len(args) > 0: job.args += f" {args}" job.save() return job def submit_qlaunch(project, queue, nodes, time_minutes, job_mode, wf_filter): """Submit Balsam launcher job to batch scheduler""" QueuedLaunch = models.QueuedLaunch qlaunch = QueuedLaunch( project=project, queue=queue, nodes=nodes, wall_minutes=time_minutes, job_mode=job_mode, wf_filter=wf_filter, prescheduled_only=False, ) qlaunch.save() service.submit_qlaunch(qlaunch, verbose=True)
StarcoderdataPython
1666306
<filename>onmt/encoders/__init__.py<gh_stars>10-100 """Module defining encoders.""" from onmt.encoders.encoder import EncoderBase from onmt.encoders.transformer import TransformerEncoder from onmt.encoders.rnn_encoder import RNNEncoder from onmt.encoders.cnn_encoder import CNNEncoder from onmt.encoders.mean_encoder import MeanEncoder from onmt.encoders.audio_encoder import AudioEncoder from onmt.encoders.image_encoder import ImageEncoder str2enc = {"rnn": RNNEncoder, "brnn": RNNEncoder, "cnn": CNNEncoder, "transformer": TransformerEncoder, "img": ImageEncoder, "audio": AudioEncoder, "mean": MeanEncoder} __all__ = ["EncoderBase", "TransformerEncoder", "RNNEncoder", "CNNEncoder", "MeanEncoder", "str2enc"]
StarcoderdataPython
88279
def foo(): """ Parameters: a: foo Returns: None """
StarcoderdataPython
3373995
<reponame>jhoe123/Elastos.Hive.Node<gh_stars>1-10 # -*- coding: utf-8 -*- URL_V2 = '/api/v2' URL_SIGN_IN = '/did/signin' URL_AUTH = '/did/auth' URL_BACKUP_AUTH = '/did/backup_auth' URL_SERVER_INTERNAL_BACKUP = '/vault-backup-service/backup' URL_SERVER_INTERNAL_RESTORE = '/vault-backup-service/restore' URL_SERVER_INTERNAL_STATE = '/vault-backup-service/state' BACKUP_FILE_SUFFIX = '.backup' DID = 'did' USR_DID = 'user_did' APP_DID = 'app_did' OWNER_ID = 'owner_id' CREATE_TIME = 'create_time' MODIFY_TIME = 'modify_time' SIZE = 'size' STATE = 'state' STATE_RUNNING = 'running' STATE_FINISH = 'finish' STATE_FAILED = 'failed' ORIGINAL_SIZE = 'original_size' IS_UPGRADED = 'is_upgraded' CID = 'cid' COUNT = 'count' COL_ORDERS = 'vault_order' COL_ORDERS_SUBSCRIPTION = 'subscription' COL_ORDERS_PRICING_NAME = 'pricing_name' COL_ORDERS_ELA_AMOUNT = 'ela_amount' COL_ORDERS_ELA_ADDRESS = 'ela_address' COL_ORDERS_PROOF = 'proof' COL_ORDERS_STATUS = 'status' COL_ORDERS_STATUS_NORMAL = 'normal' COL_ORDERS_STATUS_PAID = 'paid' COL_ORDERS_STATUS_ARCHIVE = 'archive' COL_RECEIPTS = 'vault_receipt' COL_RECEIPTS_ID = 'receipt_id' COL_RECEIPTS_ORDER_ID = 'order_id' COL_RECEIPTS_TRANSACTION_ID = 'transaction_id' COL_RECEIPTS_PAID_DID = 'paid_did' COL_IPFS_FILES = 'ipfs_files' COL_IPFS_FILES_PATH = 'path' COL_IPFS_FILES_SHA256 = 'sha256' COL_IPFS_FILES_IS_FILE = 'is_file' COL_IPFS_FILES_IPFS_CID = 'ipfs_cid' COL_IPFS_CID_REF = 'ipfs_cid_ref' COL_IPFS_BACKUP_CLIENT = 'ipfs_backup_client' COL_IPFS_BACKUP_SERVER = 'ipfs_backup_server' BACKUP_TARGET_TYPE = 'type' BACKUP_TARGET_TYPE_HIVE_NODE = 'hive_node' BACKUP_TARGET_TYPE_GOOGLE_DRIVER = 'google_driver' BACKUP_REQUEST_ACTION = 'action' BACKUP_REQUEST_ACTION_BACKUP = 'backup' BACKUP_REQUEST_ACTION_RESTORE = 'restore' BACKUP_REQUEST_STATE = 'state' BACKUP_REQUEST_STATE_STOP = 'stop' BACKUP_REQUEST_STATE_INPROGRESS = 'process' BACKUP_REQUEST_STATE_SUCCESS = 'success' BACKUP_REQUEST_STATE_FAILED = 'failed' BACKUP_REQUEST_STATE_MSG = 'state_msg' BACKUP_REQUEST_TARGET_HOST = 'target_host' BACKUP_REQUEST_TARGET_DID = 'target_did' BACKUP_REQUEST_TARGET_TOKEN = 'target_token' # For backup subscription. BKSERVER_REQ_ACTION = 'req_action' BKSERVER_REQ_STATE = 'req_state' BKSERVER_REQ_STATE_MSG = 'req_state_msg' BKSERVER_REQ_CID = 'req_cid' BKSERVER_REQ_SHA256 = 'req_sha256' BKSERVER_REQ_SIZE = 'req_size' # @deprecated URL_BACKUP_SERVICE = '/api/v2/internal_backup/service' URL_BACKUP_FINISH = '/api/v2/internal_backup/finished_confirmation' URL_BACKUP_FILES = '/api/v2/internal_backup/files' URL_BACKUP_FILE = '/api/v2/internal_backup/file' URL_BACKUP_PATCH_HASH = '/api/v2/internal_backup/patch_hash' URL_BACKUP_PATCH_DELTA = '/api/v2/internal_backup/patch_delta' URL_BACKUP_PATCH_FILE = '/api/v2/internal_backup/patch_file' URL_RESTORE_FINISH = '/api/v2/internal_restore/finished_confirmation' URL_IPFS_BACKUP_PIN_CIDS = '/api/v2/ipfs-backup-internal/pin_cids' URL_IPFS_BACKUP_GET_DBFILES = '/api/v2/ipfs-backup-internal/get_dbfiles' URL_IPFS_BACKUP_STATE = '/api/v2/ipfs-backup-internal/state' def get_unique_dict_item_from_list(dict_list: list): if not dict_list: return list() return list({frozenset(item.items()): item for item in dict_list}.values())
StarcoderdataPython
3384011
from dispy.types.User import User import asyncio class Guild: # no support for threads def __init__(self, args: dict) -> None: self.id = int(args['id']) self.name = args['name'] self.icon = args.get('icon', None) self.icon_hash = args.get('icon_hash', None) self.splash = args.get('splash', None) self.discovery_splash = args.get('discovery_splash', None) self.owner = args.get('owner', False) == 'true' self.owner_id = int(args['owner_id']) self.permissions = args.get('permissions', None) self.region = args.get('region', None) self.afk_channel_id = int(args.get('afk_channel_id', None)) self.afk_timeout = int(args.get('afk_timeout', None)) self.widget_enabled = args.get('widget_enabled', False) == True self.widget_channel_id = int(args.get('widget_channel_id', 0)) self.verification_level = int(args['verification_level']) self.default_message_notifications = int(args['default_me']) self.explicit_content_filter = int(args['explicit_content_filter']) self.roles = args['roles'] self.emojis = args['emoji'] self.features = args['features'] self.mfa_level = int(args['mfa_level']) self.application_id = int(args.get('application_id', 0)) self.system_channel_id = int(args.get('system_channel_id', 0)) self.system_channel_flags = int(args['system_channel_flags']) self.rules_channel_id = int(args.get('rules_channel_id', 0)) self.joined_at = args.get('joined_at', None) # ISO8601 timestamp self.large = args.get('large', False) == 'true' self.unavailable = args.get('unavailable', False) == 'true' self.member_count = int(args.get('member_count', 0)) self.voice_states = args.get('voice_states', None) self.members = args.get('members', None) self.channels = args.get('channels', None) self.presences = args.get('presences', None) self.max_presences = int(args.get('max_presences', None)) self.max_members = int(args.get('max_members', None)) self.vanity_url_code = args.get('vanity_url_code', None) self.description = args.get('description', None) self.banner = args.get('banner', None) self.premium_tier = int(args['premium_tier']) self.premium_subscription_count = int(args.get('premium_subscription_count', 0)) self.preferred_locale = args['preffered_locale'] self.public_updates_channel_id = int(args.get('public_updates_channel_id', 0)) self.max_video_channel_users = int(args.get('max_video_channel_users', 0)) self.approximate_member_count = int(args.get('approximate_member_count', 0)) self.approximate_presence_count = int(args.get('approximate_precense_count', 0)) self.welcome_screen = args.get('welcome_screen', None) self.nsfw_level = int(args['nsfw_level']) self.stage_instances = args.get('stage_instances', None) self.stage_instances = args.get('stage_instances', None) async def create_guild(guild_dict) -> Guild: # API: POST /guilds pass async def get_guild(guild_id) -> Guild: # API: GET /guilds/{guild.id} pass async def get_guild_preview(guid_id): # returns guild preview object # API: GET /guilds/{guild.id}/preview pass async def modify_guild(guild_dict) -> Guild: # API: PATCH /guilds/{guild.id} pass async def delete_guild(guild_id): # API: DELETE /guilds/{guild.id} pass async def get_guild_channels(guild_id): # returns a list of channels # API: GET /guilds/{guild.id}/channels pass async def create_guild_channel(guild_id, channel): # returns a channel # API: POST /guilds/{guild.id}/channels pass async def modify_guild_channel_positions(guild_id, channels): # returns empty 204 # API: PATCH /guilds/{guild.id}/channels pass async def get_guild_member(guild_id, user_id) -> User: # API: GET /guilds/{guild.id}/members/{user.id} pass async def list_guild_members(guild_id): # returns a list fo guild member objects # API: GET /guilds/{guild.id}/members pass async def search_guild_members(guild_id, query): # returns a list of guild members # API: GET /guilds/{guild.id}/members/search pass async def add_guild_member(guild_id, user_id): # returns 201 created or 204 no content # API: PUT /guilds/{guild.id}/members/{user.id} pass async def modify_guild_member(guild_id, user_id): # returns a 200 ok # API: PATCH /guilds/{guild.id}/members/{user.id} pass async def modify_self_nickname(guild_id): # 200 on success # API: PATCH /guilds/{guild.id}/members/@me/nick pass async def add_guild_member_role(guild_id, user_id, role_id): #204 empty on success # API: PUT /guilds/{guild.id}/members/{user.id}/roles/{role.id} pass async def remove_guild_member_role(guild_id, user_id, role_id): # 204 empty on success # API: DELETE /guilds/{guild.id}/members/{user.id}/roles/{role.id} pass async def remove_guild_member(guild_id, user_id): #204 empty on success # API: DELETE /guilds/{guild.id}/member/{user.id} pass async def get_guild_bans(guild_id): # list of ban objects # API: GET /guilds/{guild.id}/bans pass async def get_guild_ban(guild_id, user_id): # ban object or 404 # API: GET /guilds/{guild.id}/bans/{user.id} pass async def create_guild_ban(guild_id, user_id): #204 empty on success # API: PUT /guilds/{guild.id}/bans/{user.id} pass async def remove_guild_ban(guild_id, user_id): #204 empty on success # API: DELETE /guilds/{guild.id}/bans/{user.id} pass async def get_guild_roles(guild_id): # returns a list of role objects # API: GET /guilds/{guild.id}/roles pass async def create_guild_role(guild_id, role): # returns role oobject # API: POST /guilds/{guild.id}/roles pass async def modify_guild_role_positions(guild_id, params): # returns list of guild's role objects # API: PATCH /guilds/{guild.id}/roles pass async def modify_guild_role(guild_id, role): # returns updated role # API: GET /guilds/{guild.id}/roles/{role.id} pass async def delete_guild_role(guild_id, role_id): # returns 204 on success # API: DELETE /guilds/{guild.id}/roles/{role.id} pass async def get_guild_prune_count(guild_id, params): # GET /guilds/{guild.id}/prune pass async def begin_guild_prune(guild_id, params): # POST /guilds/{guild.id}/prune pass async def get_guild_voice_regions(guild_id): # GET /guilds/{guild.id}/regions pass async def get_guild_invites(guild_id): # GET /guilds/{guild.id}/invites pass async def get_guild_integrations(guild_id): # GET /guilds/{guild.id}/integrations pass async def delete_guild_integration(guild_id, integration): # DELETE /guilds/{guild.id}/integrations/{integration.id} pass async def get_guild_widget_settings(guild_id): # GET /guilds/{guild.id}/widget pass async def modify_guild_widget(guild_id, widget): # PATCH /guilds/{guild.id}/widget pass async def get_guild_widget(guild_id): # GET /guilds/{guild.id}/widget.json pass async def get_guild_vanity_url(guild_id): # GET /guilds/{guild.id}/vanity-url pass async def get_guild_widget_image(guild_id): # GET /guilds/{guild.id}/widget.png pass async def get_guild_welcome_screen(guild_id): # GET /guilds/{guild.id}/welcome-screen pass async def modify_guild_welcome_screen(guild_id, params): # PATCH /guilds/{guild.id}/welcome-screen pass async def modify_self_user_state(guild_id, params): # PATCH /guilds/{guild.id}/voice-states/@me pass async def modify_user_voice_state(guild_id, user, params): # PATCH /guilds/{guild.id}/voice-states/{user.id} pass
StarcoderdataPython
3392101
<gh_stars>10-100 #!/usr/bin/env python3 # This file is Copyright (c) 2016-2020 <NAME> <<EMAIL>> # This file is Copyright (c) 2018-2019 <NAME> <<EMAIL>> # License: BSD import argparse from migen import * from migen.genlib.resetsync import AsyncResetSynchronizer from litex.build.generic_platform import * from litex.soc.cores.clock import * from litex.soc.integration.soc_core import * from litex.soc.integration.builder import * from litex.soc.interconnect import stream from litex.soc.cores.uart import UARTWishboneBridge from litex.soc.cores.usb_fifo import phy_description from litepcie.phy.s7pciephy import S7PCIEPHY from gateware.usb import USBCore from gateware.etherbone import Etherbone from gateware.tlp import TLP from gateware.msi import MSI from gateware.ft601 import FT601Sync from litescope import LiteScopeAnalyzer # CRG ---------------------------------------------------------------------------------------------- class _CRG(Module): def __init__(self, platform, sys_clk_freq): self.clock_domains.cd_sys = ClockDomain() self.clock_domains.cd_usb = ClockDomain() # # # # sys sys_clk_100 = platform.request("clk100") platform.add_period_constraint(sys_clk_100, 1e9/100e6) self.submodules.pll = pll = S7PLL(speedgrade=-1) pll.register_clkin(sys_clk_100, 100e6) pll.create_clkout(self.cd_sys, sys_clk_freq) # usb usb_clk100 = platform.request("usb_fifo_clock") platform.add_period_constraint(usb_clk100, 1e9/100e6) self.comb += self.cd_usb.clk.eq(usb_clk100) self.specials += AsyncResetSynchronizer(self.cd_usb, ResetSignal("pcie")) # PCIeScreamer ------------------------------------------------------------------------------------- class PCIeScreamer(SoCMini): usb_map = { "wishbone": 0, "tlp": 1 } def __init__(self, platform, with_analyzer=True, with_loopback=False): sys_clk_freq = int(100e6) # SoCMini ---------------------------------------------------------------------------------- SoCMini.__init__(self, platform, sys_clk_freq, ident="PCIe Screamer", ident_version=True) # CRG -------------------------------------------------------------------------------------- self.submodules.crg = _CRG(platform, sys_clk_freq) # Serial Wishbone Bridge ------------------------------------------------------------------- self.submodules.bridge = UARTWishboneBridge(platform.request("serial"), sys_clk_freq, baudrate=3e6) self.add_wb_master(self.bridge.wishbone) # PCIe PHY --------------------------------------------------------------------------------- self.submodules.pcie_phy = S7PCIEPHY(platform, platform.request("pcie_x1")) self.add_csr("pcie_phy") # USB FT601 PHY ---------------------------------------------------------------------------- self.submodules.usb_phy = FT601Sync(platform.request("usb_fifo"), dw=32, timeout=1024) # USB Loopback ----------------------------------------------------------------------------- if with_loopback: self.submodules.usb_loopback_fifo = stream.SyncFIFO(phy_description(32), 2048) self.comb += [ self.usb_phy.source.connect(self.usb_loopback_fifo.sink), self.usb_loopback_fifo.source.connect(self.usb_phy.sink) ] # USB Core --------------------------------------------------------------------------------- else: self.submodules.usb_core = USBCore(self.usb_phy, sys_clk_freq) # USB <--> Wishbone -------------------------------------------------------------------- self.submodules.etherbone = Etherbone(self.usb_core, self.usb_map["wishbone"]) self.add_wb_master(self.etherbone.master.bus) # USB <--> TLP ------------------------------------------------------------------------- self.submodules.tlp = TLP(self.usb_core, self.usb_map["tlp"]) self.comb += [ self.pcie_phy.source.connect(self.tlp.sender.sink), self.tlp.receiver.source.connect(self.pcie_phy.sink) ] # Wishbone --> MSI ------------------------------------------------------------------------- self.submodules.msi = MSI() self.comb += self.msi.source.connect(self.pcie_phy.msi) self.add_csr("msi") # Led blink -------------------------------------------------------------------------------- usb_counter = Signal(32) self.sync.usb += usb_counter.eq(usb_counter + 1) self.comb += platform.request("user_led", 0).eq(usb_counter[26]) pcie_counter = Signal(32) self.sync.pcie += pcie_counter.eq(pcie_counter + 1) self.comb += platform.request("user_led", 1).eq(pcie_counter[26]) # Analyzer --------------------------------------------------------------------------------- if with_analyzer: analyzer_signals = [ self.pcie_phy.sink, self.pcie_phy.source, ] self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 1024, csr_csv="test/analyzer.csv") self.add_csr("analyzer") # Build -------------------------------------------------------------------------------------------- def main(): parser = argparse.ArgumentParser(description="PCIe Screamer Test Gateware") parser.add_argument("--m2", action="store_true", help="use M2 variant of PCIe Screamer") parser.add_argument("--with-analyzer", action="store_true", help="enable Analyzer") parser.add_argument("--with-loopback", action="store_true", help="enable USB Loopback") parser.add_argument("--build", action="store_true", help="Build bitstream") parser.add_argument("--load", action="store_true", help="Load bitstream") parser.add_argument("--flash", action="store_true", help="Flash bitstream") args = parser.parse_args() if args.m2: from platforms.pcie_screamer_m2 import Platform else: from platforms.pcie_screamer import Platform platform = Platform() soc = PCIeScreamer(platform, args.with_analyzer, args.with_loopback) builder = Builder(soc, csr_csv="test/csr.csv") builder.build(run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit")) if args.flash: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bin")) if __name__ == "__main__": main()
StarcoderdataPython
55858
<filename>program1.py #This is my first program in python. Trying various things and capabilities of python from datetime import datetime import getpass todaysDate = datetime.today().strftime('%d-%m-%Y') welcomeMessage = "Hello " + getpass.getuser() + ' , welcome!' userActions = 0 print('\n-------------------------------------------------------------------') print('Date: ', todaysDate) print(welcomeMessage) print('This program is just an introduction to Python programming language') print('-------------------------------------------------------------------') userName = input('What\'s your name dear: ') userActions = userActions + 1 print('\nOkay ',userName,' let\'s start!') userAge = int(input('What\'s your age: ')) userActions = userActions + 1 if(userAge < 18): print('>>So you are not an adult yet.\n') else: print('>>Oh you are an adult.\n') print('You did a total of ',userActions,' actions. Perfect job!') print('Goodbye, ', getpass.getuser(), ' || Program exiting...')
StarcoderdataPython
1657683
<reponame>cedadev/cloudhands-provider-iface<gh_stars>0 """JASMIN Cloud JASMIN Cloud Provider Interface package - command line client for Edge Gateway interface """ __author__ = "<NAME>" __date__ = "24/03/14" __copyright__ = "(C) 2014 Science and Technology Facilities Council" __license__ = "BSD - see LICENSE file in top-level directory" __revision__ = "$Id$" import os import logging import argparse import getpass import xml.etree.ElementTree as ET from jasmincloud.provider.vcloud.network.client import EdgeGatewayClient log = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser( description='vCloud Director Edge Gateway interface') parser.add_argument('--config-file', '-f', dest='config_filepath', help='Path to Configuration file which sets connection ' 'parameters and which command to execute.') parser.add_argument('--log-level', '-l', dest='log_level', help='Set log level for output to stdout. Choose one ' 'of %r, default is silent mode.' % [logging.getLevelName(i) for i in range(logging.DEBUG, logging.CRITICAL+1, 10)]) args = parser.parse_args() if args.log_level is not None: logging.basicConfig(format='%(asctime)s %(message)s', level=logging.getLevelName(args.log_level)) if args.config_filepath is None: raise SystemExit('Error: no configuration file set.%s%s' % (os.linesep, parser.format_help())) edgegateway_clnt = EdgeGatewayClient.from_settings_file( args.config_filepath) global_settings = edgegateway_clnt.settings[ EdgeGatewayClient.SETTINGS_GLOBAL] if global_settings['password'] is None: # Prompt for password from command line if not set in settings file global_settings['password'] = <PASSWORD>( 'Enter password for user %r: ' % global_settings['username']) # Connect to vCloud Director service edgegateway_clnt.connect_from_settings() # Check actions to execute from settings file section - allow one connection # section followed by an action section - first filter out connect section action_name = None for section_name in list(edgegateway_clnt.settings.keys()): if section_name != EdgeGatewayClient.SETTINGS_GLOBAL: action_name = section_name break # Retrieving the current configuration settings applies to all actions edgegateway_configs = edgegateway_clnt.get_config( vdc_name=global_settings['vdc_name'], names=global_settings['edgegateway_name']) if action_name is None: # Default to display the current configuration print((ET.tostring(edgegateway_configs[0]._elem))) elif action_name == EdgeGatewayClient.SETTINGS_ROUTE_HOST: settings = edgegateway_clnt.settings[action_name] # NAT host IP from VDC to outside edgegateway_clnt.set_host_routing(edgegateway_configs[0], settings['iface_name'], settings['internal_ip'], settings['external_ip']) result = edgegateway_clnt.post_config(edgegateway_configs[0]) elif action_name == EdgeGatewayClient.SETTINGS_RM_NAT_RULES: settings = edgegateway_clnt.settings[action_name] # Remove NAT rules by identifier edgegateway_clnt.remove_nat_rules(edgegateway_configs[0], settings['nat_rule_ids']) result = edgegateway_clnt.post_config(edgegateway_configs[0]) log.debug(ET.tostring(result._elem)) elif action_name == EdgeGatewayClient.SETTINGS_CANCEL_TASKS: settings = edgegateway_clnt.settings[action_name] # Purge tasks waiting to be executed result = edgegateway_clnt.cancel_tasks(edgegateway_configs[0], task_uris=settings['task_uris']) if __name__ == '__main__': main()
StarcoderdataPython
1613456
<reponame>miiiingi/algorithmstudy import re p = re.compile('a[e]c') m = p.match('adec') print(m.group())
StarcoderdataPython
3212051
<filename>Examples/study/chain_genetic_algorithm_multinode/compute_execution_time_multinode.py<gh_stars>0 import Source.io_util as io import json def get_n_maximum(iteration_partial_results, n_nodes): time = [r['exec_time'] for n, r in iteration_partial_results.items() if n != 'selection'] time.sort(reverse=True) return time[:n_nodes] if __name__ == "__main__": import os multinode_meta_file = os.path.join(os.environ['FCM'], 'Examples', 'compute', 'genetic_algorithm_multinode', 'results', 'sota_models_cifar10-40-dev_validation', 'cifar10_8nodes_800population_400offspring_0', 'multinode_metainfo.json') execution_time = 0 n_nodes = 8 R_all = {} with open(multinode_meta_file, 'r') as handle: results = json.load(handle) for i, iteration in enumerate(results): max_exec_offspring_time = sum(get_n_maximum(iteration, 1)) selection_time = iteration['selection']['exec_time'] execution_time += max_exec_offspring_time + selection_time # Gather the results R_iter = io.read_pickle(os.path.join(os.environ['FCM'], iteration['selection']['R'])) R_all.update(R_iter) print("Iteration %d: Offspring+evaluate %f, Selection %f" % (i, max_exec_offspring_time, selection_time)) print(R_all.keys()) print("Execution time: %f" % execution_time) # Plot solutions import Examples.study.plot as myplt myplt.plot_accuracy_time_old(R_all) myplt.show()
StarcoderdataPython
3285605
<filename>broffeact/__init__.py<gh_stars>0 #!/usr/bin/python3 import datetime as dt import os import re import argparse from jinja2 import Template class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m' class Status: NOT_STARTED, PARSING, DONE = range(3) def printWarning(msg): print(bcolors.WARNING + "WARNING:" + bcolors.ENDC, msg) def printFAIL(msg): print(bcolors.FAIL + "FAIL:" + bcolors.ENDC, msg) def printOK(word, msg): print(bcolors.OKBLUE + word + bcolors.ENDC, msg) # -------------------------------- class AgDoc: """Documentation generator.""" defaults = { 'template_filename': 'default', 'prefix_module': 'M', 'prefix_comment': '#', 'prefix_export': 'exp', 'file_output': 'doc.html', 'extensions': ['coffee'], 'directories': ['.'], 'component_terms': ['createFactory'], 'proptypes_term': 'React.PropTypes', } STATS = { "nbr_exported": 0, "nbr_commented": 0, "nbr_modules_unused": 0, } def __init__(self, defaults): self.defaults.update(defaults) self.init_output_dir() def init_output_dir(self): dir = os.path.dirname(self.defaults["file_output"]) if not os.path.exists(dir) and dir != "": os.makedirs(dir) def genDoc(self): self.writeHTMLDoc(self.walk()) def isValidExtension(self, name): """Check if a file has a valid extension to parse""" file_ext = os.path.splitext(name)[1] return file_ext and file_ext[1:] in self.defaults["extensions"] def walk(self): """A little promenade into the application returning the comments into a list of dict""" print("SCANNING IN:", self.defaults["directories"]) a, d, j = os.path.abspath, os.path.dirname, os.path.join main_root_len = len(a((d(d(d(os.path.realpath(__file__))))))) values = [] for dirname in self.defaults["directories"]: for root, dirs, files in os.walk(dirname, topdown=True): files = sorted(f for f in files if self.isValidExtension(f)) for name in files: parsed = self.parse(a(j(root, name)), main_root_len) if len(parsed[1])+len(parsed[2])+len(parsed[3]) > 0: values.append(parsed) return values def parse(self, file_name, strip_len): """Parse the file""" printOK("PARSING:", file_name) p_module = self.defaults['prefix_module'] with open(file_name, 'r') as f: comments_dic = {} prev_lines, modules, used_modules = [], set(), set() status = Status.NOT_STARTED for line in f: line = line.strip() if status == Status.NOT_STARTED: if line.lstrip().startswith(p_module): status = Status.PARSING elif status == Status.PARSING: module, sep, rest = line.partition(':') if sep == ':': modules.add(module) else: status = Status.DONE else: used_modules |= set( re.findall( r'\b{}\.([\w\d_]+)\b'.format(p_module), line ) ) # Export part if self.isExportedLine(line): exported_name = self.getExportedName(line) printOK("FOUND EXPORTED: ", exported_name) self.STATS["nbr_exported"] += 1 comment = self.getComment(line, prev_lines) comments_dic[exported_name] = comment if comment[2]: self.STATS["nbr_commented"] += 1 else: printWarning("'{}' not commented in {}".format( exported_name, file_name )) prev_lines.append(line) # Check for unused module unused_modules = modules - used_modules for unused in unused_modules: self.STATS["nbr_modules_unused"] += 1 printWarning("Unused module: '" + unused + "'") return ( file_name[strip_len:].lstrip('/'), modules, unused_modules, comments_dic ) def getComment(self, line, prev_lines): """Get the comment(s) from prevLines of a component""" isComponent = any( term in line for term in self.defaults['component_terms'] ) if isComponent: com, isCommented = self.getCompComments(line, prev_lines) comment = ("c", com, isCommented) else: com, isCommented = self.getFuncComments(line, prev_lines) comment = ("f", com, isCommented) return comment def getCompComments(self, line, prev_lines): comments, ignore_lines, propTypes = '', True, [] for prev_line in reversed(prev_lines): comment = prev_line if ignore_lines: if self.defaults["proptypes_term"] in comment: propTypes.append( comment.replace( self.defaults["proptypes_term"] + ".", "" ).replace(".isRequired", " (required)") ) ignore_lines = 'createClass' not in comment continue if not comment.startswith(self.defaults["prefix_comment"]): break comments = comment + '\n' + comments if propTypes and comments: comments = ( "PropTypes:\n-" + "\n-".join(propTypes) + "\n" + comments ) return comments, comments != "" def getFuncComments(self, line, prev_lines): comments = '' for prev_line in reversed(prev_lines): if not prev_line.startswith(self.defaults["prefix_comment"]): break comments = prev_line + '\n' + comments # Adding args if there is a comment if comments: m = re.search(r'\((.*)\)', line) if m: comments = "ARGS: {}\n".format(m.group(1)) + comments return comments, comments != "" def isExportedLine(self, line): """Check if the line contains the exportPrefix""" return line.startswith(self.defaults["prefix_export"] + ".") def getExportedName(self, line): export, sep, rest = line.partition('=') assert sep == '=', 'Should be "=" found "{}"'.format(sep) prefix_export, sep, export = export.partition('.') assert sep == '.', 'Should be "." found "{}"'.format(sep) assert prefix_export.strip() == self.defaults['prefix_export'],\ 'Should be {} found {}'.format( self.defaults['prefix_export'], prefix_export ) return export def writeHTMLDoc(self, values): """ values=(file_name, modules, modules_unused, comments_dic)""" tpl_filename = os.path.dirname(os.path.realpath(__file__)) + \ "/templates/"+self.defaults["template_filename"]+".html" try: t = Template(open(tpl_filename, 'r').read()) except: printFAIL("Couldn't open template: " + self.defaults["template_filename"]) return params = { 'values': values, 'exported_cnt': self.STATS['nbr_exported'], 'commented_cnt': self.STATS['nbr_commented'], 'unused_cnt': self.STATS['nbr_modules_unused'], 'commented_percentage': ( 0 if self.STATS['nbr_exported'] == 0 else self.STATS['nbr_commented'] * 100 / self.STATS['nbr_exported'] ), 'last_modification': dt.datetime.strftime( dt.datetime.utcnow(), '%A, %d %B %Y %H:%M:%S' ) } f = open(self.defaults['file_output'], 'w') f.write(t.render(**params)) f.close() if __name__ == '__main__': main() def main(): parser = argparse.ArgumentParser() parser.add_argument("-v", "--verbosity", help="increase output verbosity") parser.add_argument("-o", "--output", help="output filename", default="doc/index.html") parser.add_argument("-i", "--input", default="app/", help="input directories (eg: app/,extra/") parser.add_argument("-t", "--template", default="default", help="HTML template file (eg: default)") args = parser.parse_args() options = {} options["file_output"] = args.output options["directories"] = args.input.split(",") options["template_filename"] = args.template AgDoc(options).genDoc()
StarcoderdataPython
3351090
<gh_stars>10-100 from .chargen import CharGen
StarcoderdataPython
1731309
from aiogram import types from ..services.database import RedisDB from ..utils import text async def command_reset(message: types.Message, database: RedisDB): await database.clear_received_urls(user_id=message.from_user.id) await message.reply( text=text.get_text( language_code=message.from_user.language_code, text_name="urls_reset", ) )
StarcoderdataPython
1771762
from typing import List import numpy as np from .mexpress import native_parse_f64, native_parse_f32 def _transform_x(x): x = np.asarray(x) # since we want to have both options f(*x) and f(x) we need to check the dimension return x if x.ndim == 1 else x.squeeze() class Mexpress: def __init__(self, interfex, dtype) -> None: self.interfex = interfex self._grad = None self._hess = None self.dtype = dtype self.n_vars = self.interfex.n_vars() def _make_grad(self): if self._grad is None: self._grad = [ self.interfex.partial(i) for i in range(self.interfex.n_vars()) ] return self._grad def __call__(self, *x): return self.interfex(_transform_x(x)) def partial(self, i: int) -> "Mexpress": return Mexpress(self.interfex.partial(i), self.dtype) def grad(self, *x) -> np.ndarray: grad_ = self._make_grad() x = _transform_x(x) return np.array([di(x) for di in grad_], dtype=self.dtype) def hess(self, *x) -> np.ndarray: grad_ = self._make_grad() if self._hess is None: self._hess = [ [grad_i.partial(c) for c in range(r, self.interfex.n_vars())] for r, grad_i in enumerate(grad_) ] x = _transform_x(x) hess = np.zeros((self.n_vars, self.n_vars), dtype=self.dtype) for r in range(self.n_vars): for c in range(r, self.n_vars): hess[r, c] = self._hess[r][c - r](x) for c in range(0, r): hess[r, c] = hess[c, r] return hess def __str__(self) -> str: return self.interfex.unparse() def parse_f64(s: str) -> Mexpress: return Mexpress(native_parse_f64(s.replace("**", "^")), dtype=np.float64) def parse_f32(s: str) -> Mexpress: return Mexpress(native_parse_f32(s.replace("**", "^")), dtype=np.float32)
StarcoderdataPython
3287556
#!/usr/bin/env python # import numpy as np import matplotlib.pyplot as plt from matplotlib import rc import matplotlib.patches as mpatches import matplotlib import os # import random # import time ext = ".eps" matplotlib.rcParams.update({'font.size': 17}) def activityDayCount(): labels = "2", "3", "4", "5", "6~9", "> 9" data = [474334, 192039, 97792, 56249, 81316, 25208] colors = [ 'lightcoral', 'lightblue', 'yellowgreen', 'gold', 'pink', 'orange' ] # labels = "2", "3", "4", "5", "6", "7", "8", "9", "> 9" # data = [474334, 192039, 97792, 56249, 34578, 22223, 14902, 9613, 25208] # colors = [ # 'lightcoral', 'lightblue', 'yellowgreen', 'gold', 'green', # 'blue', 'purple', 'pink', 'orange', 'grey' # ] # cmap = plt.cm.prism # colors = cmap(np.linspace(0., 1., len(labels))) plt.axis('equal') plt.pie( data, labels=labels, autopct='%1.1f%%', shadow=True, colors=colors, startangle=90, pctdistance=0.81 # labeldistance=0.9 ) filename = "./ActivityDay" + ext plt.savefig(filename) # os.system("open " + filename) def powerActivityInterval(): labels = "2", "3", "4", "5", "6~8", "> 8" data = [ 302574, 206186, 63559, 49204, 21519 + 18555 + 9879, 8945 + 5526 + 4941 + 3319 + 2873 + 2137 + 1825 + 912 + 1188 + 944 + 1022 + 828 + 701 + 542 + 655 + 474 + 419 + 253 + 283 + 275 + 374 + 358 + 623 # 449895, 135273, 54127, 26426, # 14913 + 8692 + 5768, # 3788 + 2737 + 1947 + 1551 + 1249 + 937 + 766 + 694 + 515 # + 431 + 272 + 227 + 194 + 135 + 88 + 110 + 52 + 41 + 38 + 15 + 10 + 1 # + 1 ] colors = [ 'lightcoral', 'lightblue', 'yellowgreen', 'gold', '#FF7BAC', '#8B9FC0', 'purple', 'pink' ] # colors = [ # 'lightcoral', 'lightblue', 'yellowgreen', 'gold', 'pink', # 'orange' # ] plt.axis('equal') plt.pie( data, labels=labels, autopct='%1.1f%%', shadow=True, colors=colors, startangle=90, pctdistance=0.83 ) filename = "./ActivityInterval" + ext plt.savefig(filename) os.system("open " + filename) def continusActivity(): labels = "2", "3", "4", "5", "6~8", "> 8" data = [ 339315, 227716, 80042, 44103, 21721 + 12851 + 7419, 4782 + 10453 ] colors = [ 'lightcoral', 'lightblue', 'yellowgreen', 'gold', '#FF7BAC', '#8B9FC0', 'purple', 'pink' ] # colors = [ # 'lightcoral', 'lightblue', 'yellowgreen', 'gold', 'pink', # 'orange' # ] plt.axis('equal') plt.pie( data, labels=labels, autopct='%1.1f%%', shadow=True, colors=colors, startangle=90, pctdistance=0.8 ) filename = "./ActivityDay" + ext plt.savefig(filename) # os.system("open " + filename) def numberIterations(): x = [ 1, 2, 3, 5, 8, 10, 12, 15 ] y = [ 0.5, 0.63141093174, 0.697941532851, 0.689625707331, 0.677482215081, 0.67721843094, 0.677048542104, 0.679407512327 ] plt.plot(x, y, marker='.') plt.axis([0, 16, 0.45, 0.75]) plt.xlabel("# of iterations") plt.ylabel("AUC") filename = "./NumberIterations" + ext plt.savefig(filename) # os.system("open " + filename) def parameterNumTrees(): x = [3, 4, 6, 8, 10, 20, 30, 40, 50, 60, 70, 80, 90] auc = [ 0.535178949203, 0.542328048322, 0.53965360195, 0.540522688697, 0.539786324703, 0.538543954391, 0.538709940309, 0.538486978138, 0.538511488698, 0.53841030334, 0.537832963885, 0.537795069212, 0.538533024443 ] plt.xlabel("\# of trees in Random Forest") plt.ylabel("Measurement of prediction result") plt.plot(x, auc, 'r') rc('text', usetex=True) filename = "./ParameterNumTrees" + ext plt.savefig(filename) # os.system("open " + filename) def parameterMaxDepth(): x = [1, 3, 6, 9, 10, 12, 15, 18] auc = [ 0.5, 0.530167110714, 0.534175309461, 0.537991447336, 0.538511488698, 0.538747477529, 0.53919721591, 0.538927390763 ] plt.xlabel("Max tree depth in Random Forest") plt.ylabel("Measurement of prediction result") plt.plot(x, auc, 'r') rc('text', usetex=True) filename = "./ParameterMaxDepth" + ext plt.savefig(filename) os.system("open " + filename) def parameterGamma(): x = [ 0, 0.025, 0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2, 0.225, 0.25, 0.275, 0.3 ] auc = [ 0.801180577625, 0.803572573946, 0.80489984546, 0.80377223374, 0.813605079096, 0.813130073065, 0.813966457008, 0.816743640758, 0.816091040286, 0.8112706937, 0.813802511038, 0.804321627593, 0.798891057248 ] precision = [ 0.653261933302, 0.602246598249, 0.630421127424, 0.6117061573, 0.471436915914, 0.467027128287, 0.4620003597, 0.454860270582, 0.438033756609, 0.373874506211, 0.370803143271, 0.310969203119, 0.281924350052 ] recall = [ 0.64107751479, 0.656484733573, 0.653340275095, 0.654794689601, 0.724548740985, 0.719634026102, 0.723789756312, 0.733451003213, 0.741407202533, 0.769272115752, 0.777675255774, 0.813956968845, 0.841884015142 ] plt.plot(x, auc, 'r-', label="AUC", marker='s') plt.plot(x, precision, 'b--', label="Precision", marker='o') plt.plot(x, recall, 'g-.', label="Recall", marker='x') rc('text', usetex=True) plt.xlabel("Parameter $\gamma$") plt.ylabel("Measurement of prediction result") plt.legend( bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0. ) # handles = [ # mpatches.Patch(color='red', label='AUC'), # mpatches.Patch(color='blue', label='Precision'), # mpatches.Patch(color='green', label='Recall') # ] # plt.legend( # handles=handles, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, # ncol=3, mode="expand", borderaxespad=0. # ) filename = "./ParameterGamma" + ext plt.savefig(filename) # os.system("open " + filename) def fixedSampleActivities(): x = range(0, 32) u1 = [ 0, 0, 3, 9, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] u2 = [ 0, 0, 0, 0, 8, 0, 0, 3, 1, 5, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ] u3 = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 3, 2, 0 ] plt.plot(x, u1, 'r-', label="User a", marker='s') plt.plot(x, u2, 'g--', label="User b", marker='o') plt.plot(x, u3, 'b-.', label="User c", marker='x') plt.axis([0, 35, 0, 10]) rc('text', usetex=True) plt.xlabel("Day") plt.ylabel("Number of Activities") plt.legend( bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0. ) filename = "./FixedSampleActivities" + ext plt.savefig(filename) # os.system("open " + filename) # fixedSampleActivities() # parameterMaxDepth() # parameterNumTrees() # powerActivityInterval() parameterGamma() # numberIterations() # continusActivity() # activityDayCount() # plt.show()
StarcoderdataPython
125330
import torch import torch.nn as nn import torch.nn.functional as F import math import pdb, time from torch.autograd import Variable torch.manual_seed(12) SIGMA = 1 EPSILON = 1e-5 class GatedConv1d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding=0, dilation=1, activation=None): super(GatedConv1d, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.Conv1d(input_channels, output_channels, kernel_size, stride, padding, dilation) self.g = nn.Conv1d(input_channels, output_channels, kernel_size, stride, padding, dilation) def forward(self, x): if self.activation is None: h = self.h(x) else: h = self.activation(self.h(x)) g = self.sigmoid(self.g(x)) return h * g class GatedConvTranspose1d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding=0, output_padding=0, dilation=1, activation=None): super(GatedConvTranspose1d, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.ConvTranspose1d(input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation) self.g = nn.ConvTranspose1d(input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation) def forward(self, x): #start_time = time.time() if self.activation is None: h = self.h(x) else: h = self.activation(self.h(x)) #print ("here:", time.time()-start_time) g = self.sigmoid(self.g(x)) #print ("here1:", time.time()-start_time) return h * g class ConvTranspose1d(nn.Module): def __init__(self, input_channels, output_channels, kernel_size, stride, padding=0, output_padding=0, dilation=1, activation=None): super(ConvTranspose1d, self).__init__() self.activation = activation self.sigmoid = nn.Sigmoid() self.h = nn.ConvTranspose1d(input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation) def forward(self, x): #start_time = time.time() if self.activation is None: h = self.h(x) else: h = self.activation(self.h(x)) #print ("here:", time.time()-start_time) #print ("here1:", time.time()-start_time) return h class edgeGNN(nn.Module): def __init__(self, nfeat, nhid, nOut,nNodes, dropout, nEdgF=1): super(edgeGNN, self).__init__() self.fc_node_1_1 = nn.Linear(nfeat, nhid) self.fc_node_1_2 = nn.Linear(nhid, nhid) self.fc_edge_1_1 = nn.Linear(nhid * 2+nEdgF, nhid) self.fc_edge_1_2 = nn.Linear(nhid, nhid) self.fc_node_2_1 = nn.Linear(nhid * 2, nhid) self.fc_node_2_2 = nn.Linear(nhid, nhid) self.fc_edge_2_1 = nn.Linear(nhid * 2+nEdgF, nhid) self.fc_edge_2_2 = nn.Linear(nhid, nhid) self.ln1 = LayerNorm(nhid) self.ln2 = LayerNorm(nhid) self.dropout = dropout self.act = nn.ReLU() self.n2e = nn.Linear(2*nhid,nOut) self.g2e = nn.Sequential(nn.Conv1d(nNodes,int(nNodes/2),1),nn.ReLU(), nn.Conv1d(int(nNodes/2),1,1)) self.sparseMM = SparseMM.apply def forward(self, x, n2e_in, n2e_out, xE): #pdb.set_trace() ## First GNN layer # Node MLP x = self.act(self.fc_node_1_1(x)) x = F.dropout(x, self.dropout, training=self.training) x = self.act(self.fc_node_1_2(x)) # Node to edge x_in = self.sparseMM(n2e_in, x) x_out = self.sparseMM(n2e_out, x) x = torch.cat([x_in, x_out, xE], 1) # Edge MLP x = self.act(self.fc_edge_1_1(x)) x = F.dropout(x, self.dropout, training=self.training) x = self.act(self.fc_edge_1_2(x)) #x = self.fc_edge_1_2(x) # Edge to node x_in = self.sparseMM(n2e_in.transpose(0, 1), x) x_out = self.sparseMM(n2e_out.transpose(0, 1), x) x = torch.cat([x_in, x_out], 1) ### Second GNN layer # Node MLP x = self.act(self.fc_node_2_1(x)) x = F.dropout(x, self.dropout, training=self.training) x = self.act(self.fc_node_2_2(x)) # Node to edge x_in = self.sparseMM(n2e_in, x) x_out = self.sparseMM(n2e_out, x) x = torch.cat([x_in, x_out, xE], 1) # Edge MLP x = self.act(self.fc_edge_2_1(x)) x = F.dropout(x, self.dropout, training=self.training) x = self.act(self.fc_edge_2_2(x)) #x = self.fc_edge_2_2(x) # Edge to node x_in = self.sparseMM(n2e_in.transpose(0, 1), x) x_out = self.sparseMM(n2e_out.transpose(0, 1), x) x = torch.cat([x_in, x_out], 1) x = self.n2e(x.unsqueeze(0)) z = self.g2e(x) return z.squeeze(1) class recEdgeGNN(nn.Module): def __init__(self, nfeat, nhid, nOut, dropout, niter): super(recEdgeGNN, self).__init__() self.fc_node_1_1 = nn.Linear(nfeat, 2*nhid) self.fc_node_1_2 = nn.Linear(nhid, nhid) self.fc_edge_1_1 = nn.Linear(nhid * 2, nhid) self.fc_edge_1_2 = nn.Linear(nhid, nhid) self.fc_node_2_1 = nn.Linear(nhid * 2, nhid) self.fc_node_2_2 = nn.Linear(nhid, nhid) self.ln1 = LayerNorm(nhid) self.ln2 = LayerNorm(nhid) self.dropout = dropout self.niter = niter self.e2p = nn.Linear(2*nhid,nOut) # embedding to prediction def forward(self, x, n2e_in, n2e_out): x = F.relu(self.fc_node_1_1(x)) for _ in range(self.niter): # Node MLP x = F.relu(self.fc_node_2_1(x)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.fc_node_2_2(x)) # Node to edge x_in = SparseMM()(n2e_in, x) x_out = SparseMM()(n2e_out, x) x = torch.cat([x_in, x_out], 1) # Edge MLP x = F.relu(self.fc_edge_1_1(x)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.fc_edge_1_2(x)) # Edge to node x_in = SparseMM()(n2e_in.transpose(0, 1), x) x_out = SparseMM()(n2e_out.transpose(0, 1), x) x = torch.cat([x_in, x_out], 1) # pdb.set_trace() return x, self.e2p(x.mean(0).view(1,-1)) class GraphAttentionLayer(nn.Module): """ Simple Graph Attention Layer, with separate processing of self-connection. Equation format from https://docs.dgl.ai/en/latest/tutorials/models/1_gnn/9_gat.html """ def __init__(self, in_features, out_features, bias=True): super(GraphAttentionLayer, self).__init__() self.in_features = in_features self.out_features = out_features self.weight_neighbor = nn.Parameter( torch.Tensor(in_features, out_features)) self.weight_self = nn.Parameter(torch.Tensor(in_features, out_features)) self.a = nn.Parameter(torch.Tensor(2*out_features,1)) nn.init.xavier_uniform_(self.a.data, gain=1.414) self.alpha = 0.2 self.leakyRelu = nn.LeakyReLU(self.alpha, inplace=True) self.softmax = nn.Softmax(dim=1) if bias: self.bias = nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1. / math.sqrt(self.weight_neighbor.size(1)) self.weight_neighbor.data.uniform_(-stdv, stdv) self.weight_self.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj, n2e_in, n2e_out): N = adj.shape[0] act_self = torch.mm(input, self.weight_self) # Transform node activations Eq. (1) h = torch.mm(input, self.weight_neighbor) # Compute pairwise edge features (Terms inside Eq. (2)) h_in = torch.mm(n2e_in, h) h_out = torch.mm(n2e_out,h) hEdge = torch.cat([h_in, h_out],1) # Apply leakyReLU and weights for attention coefficients Eq.(2) e = self.leakyRelu(torch.matmul(hEdge, self.a)) # Apply Softmax per node Eq.(3) # Sparse implementation idx = adj.coalesce().indices() # val = adj.coalesce().values() numNgbrs = (idx[0] == 0).sum() attention = self.softmax(e.view(-1,numNgbrs)).view(-1) #pdb.set_trace() # Weigh nodes with attention; done by weighting the adj entries # alpha = torch.sparse.FloatTensor(idx,val*attention,(N,N)) adj._values = adj._values * attention # Compute node updates with attention Eq. (4) act_neighbor = SparseMM()(adj,h) output = act_self + act_neighbor if self.bias is not None: return output + self.bias else: return output class MLP(nn.Module): def __init__(self, nfeat, nNodes, nhid, nOut,dropout): super(MLP, self).__init__() self.fc1 = nn.Linear(nfeat, nhid) self.fc2 = nn.Linear(nhid, nhid) self.fc3 = nn.Linear(nhid, nhid) self.fc4 = nn.Linear(nhid, nhid) self.dropout = dropout # self.ln1 = LayerNorm(nhid) self.bn1 = nn.BatchNorm1d(nNodes) # self.g2e = nn.Linear(nhid,nhid) #graph to embedding self.e2p = nn.Linear(nhid,nOut,bias=True) # embedding to prediction self.act = nn.LeakyReLU() # self.act = nn.ReLU() def forward(self, inputs, adj): x = self.act(self.fc1(inputs)) x = self.bn1(x) x = F.dropout(x, self.dropout, training=self.training) x = self.act(self.fc2(x)) # x = F.dropout(x, self.dropout, training=self.training) # x = self.act(self.fc3(x)) # x = F.dropout(x, self.dropout, training=self.training) # x = self.act(self.fc4(x)) return x, self.e2p(x.mean(1)) class GAT(nn.Module): def __init__(self, nfeat, nNodes, nhid, nOut,dropout): super(GAT, self).__init__() # self.fc1 = nn.Linear(nfeat, nhid) # self.fc2 = nn.Linear(nhid, nhid) self.gc1 = GraphAttentionLayer(nfeat, nhid) self.gc2 = GraphAttentionLayer(nhid, nhid) self.dropout = dropout # self.ln1 = LayerNorm(nhid) # self.g2e = nn.Linear(nhid,1) #graph to embedding self.e2p = nn.Linear(nhid,nOut) # embedding to prediction def encode(self, x, adj, n2e_in, n2e_out): # pdb.set_trace() x = F.relu(self.gc1(x, adj, n2e_in, n2e_out)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.gc2(x, adj, n2e_in, n2e_out)) return x def forward(self, inputs, adj, n2e_in, n2e_out): z = self.encode(inputs, adj, n2e_in, n2e_out) # z = self.g2e(z) # return z, self.e2p(z.transpose(0,1)) return z, self.e2p(z.mean(0).view(1,-1)) class nodeGNN(nn.Module): def __init__(self, nfeat, nNodes, nhid, nOut,dropout): super(nodeGNN, self).__init__() self.fc1 = nn.Linear(nfeat, nhid) # self.fc2 = nn.Linear(nhid, nhid) self.gc1 = GraphConvolution(nhid, nhid) self.gc2 = GraphConvolution(nhid, nhid) # self.gc3 = GraphConvolutionFirstOrder(nhid, nhid) # self.gc4 = GraphConvolutionFirstOrder(nhid, nhid) self.dropout = dropout # self.ln1 = LayerNorm(nhid) # self.g2e = nn.Linear(nhid,1) #graph to embedding self.e2p = nn.Linear(nhid,nOut) # embedding to prediction def encode(self, x, adj): # pdb.set_trace() x = F.relu(self.fc1(x)) # x = F.dropout(x, self.dropout, training=self.training) # x = F.relu(self.fc2(x)) # x = self.ln1(x) x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x = F.relu(self.gc2(x, adj)) # x = F.dropout(x, self.dropout, training=self.training) # x = F.relu(self.gc3(x, adj)) # x = F.dropout(x, self.dropout, training=self.training) # x = F.relu(self.gc4(x, adj)) return x def forward(self, inputs, adj): # pdb.set_trace() z = self.encode(inputs, adj) # z = self.g2e(z) # return z, self.e2p(z.transpose(0,1)) return z, self.e2p(z.mean(1)) class gatedGNN(nn.Module): def __init__(self, nfeat, nNodes, nhid, nOut,dropout): super(gatedGNN, self).__init__() self.gate1 = nn.Linear(nhid, 1) self.gate2 = nn.Linear(nhid, 1) self.gc1 = GraphConvolutionFirstOrder(nfeat, nhid) self.gc2 = GraphConvolutionFirstOrder(nhid, nhid) self.gc3 = GraphConvolutionFirstOrder(nhid, nhid) # self.gc4 = GraphConvolutionFirstOrder(nhid, nhid) self.dropout = dropout # self.ln1 = LayerNorm(nhid) # self.g2e = nn.Linear(nhid,1) #graph to embedding self.e2p = nn.Linear(nhid,nOut) # embedding to prediction def encode(self, x, adj): # pdb.set_trace() # x = F.relu(self.fc1(x)) # x = F.dropout(x, self.dropout, training=self.training) # x = F.relu(self.fc2(x)) # x = self.ln1(x) x = F.relu(self.gc1(x, adj)) x = F.dropout(x, self.dropout, training=self.training) x_in = x r = F.softmax(self.gate1(x), dim=0) z = F.softmax(self.gate2(x), dim=0) x = F.relu(self.gc2(x*r, adj)) # x = F.relu(r * x_in) x = (1-z) * x_in + z * x # x = F.relu(self.gc3(x, adj)) return x def forward(self, inputs, adj): z = self.encode(inputs, adj) # pdb.set_trace() # z = self.g2e(z) # return z, self.e2p(z.transpose(0,1)) return z, self.e2p(z.mean(0).view(1,-1)) class recGNN(nn.Module): def __init__(self, nfeat, nNodes, nhid, nOut,dropout,nIter,idxRange): super(recGNN, self).__init__() self.iter = nIter self.fc1 = nn.Linear(nfeat, nhid) self.fc2 = nn.Linear(nhid, nhid) # self.gc = GraphConvolutionFirstOrder(nhid, nhid) self.gc = GraphConvolution(nhid, nhid) self.dropout = dropout # self.e2p = nn.Linear(nhid,nOut) # embedding to prediction self.e2p = nn.Linear(nhid,nOut) # embedding to regression output self.g2e = nn.Linear(8,1) self.idxRange = idxRange self.ln1 = LayerNorm(nhid) self.ln2 = LayerNorm(nhid) self.bn1 = nn.BatchNorm1d(nhid) # self.act = nn.ELU() # self.act = nn.LeakyReLU() self.act = nn.ReLU() # self.outAct = nn.Hardtanh() def encode(self, x, adj): # pdb.set_trace() x = self.act(self.fc1(x)) x = F.dropout(x, self.dropout, training=self.training) # x = self.ln1(x) x = self.act(self.fc2(x)) for _ in range(self.iter): x = self.act(self.gc(x, adj)) return x def decode(self,x): return (self.e2p(x.mean(1))) def multiGraphDecode(self,x): lIdx = self.idxRange[0,0] uIdx = self.idxRange[1,0] z = x[lIdx:uIdx].mean(1).view(1,-1) # pdb.set_trace() for i in range(1,self.idxRange.shape[1]): lIdx = self.idxRange[0,i] uIdx = self.idxRange[1,i] z = torch.cat((z, x[lIdx:uIdx].mean(0).view(1,-1))) z = self.ln2(z) z = F.relu(self.g2e(z.transpose(0,1)).transpose(0,1)) return self.e2p(z).view(1,-1) def forward(self, inputs, adj): # pdb.set_trace() emb = self.encode(inputs, adj) z = self.decode(emb) # z = self.multiGraphDecode(emb) return emb, z class SparseMM(torch.autograd.Function): """ Sparse x dense matrix multiplication with autograd support. Implementation by <NAME>: https://discuss.pytorch.org/t/ does-pytorch-support-autograd-on-sparse-matrix/6156/7 """ @staticmethod def forward(ctx, matrix1, matrix2): ctx.save_for_backward(matrix1, matrix2) return torch.mm(matrix1, matrix2) @staticmethod def backward(ctx, grad_output): matrix1, matrix2 = ctx.saved_tensors grad_matrix1 = grad_matrix2 = None if ctx.needs_input_grad[0]: grad_matrix1 = torch.mm(grad_output, matrix2.t()) if ctx.needs_input_grad[1]: grad_matrix2 = torch.mm(matrix1.t(), grad_output) return grad_matrix1, grad_matrix2 class SparseMMnonStat(torch.autograd.Function): """ Sparse x dense matrix multiplication with autograd support. Implementation by <NAME>: https://discuss.pytorch.org/t/ does-pytorch-support-autograd-on-sparse-matrix/6156/7 """ def forward(self, matrix1, matrix2): self.save_for_backward(matrix1, matrix2) return torch.mm(matrix1, matrix2) def backward(self, grad_output): matrix1, matrix2 = self.saved_tensors grad_matrix1 = grad_matrix2 = None if self.needs_input_grad[0]: grad_matrix1 = torch.mm(grad_output, matrix2.t()) if self.needs_input_grad[1]: grad_matrix2 = torch.mm(matrix1.t(), grad_output) return grad_matrix1, grad_matrix2 class GraphConvolution(nn.Module): """ Simple GCN layer, similar to https://arxiv.org/abs/1609.02907 """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolution, self).__init__() self.in_features = in_features self.out_features = out_features self.weight = nn.Parameter(torch.Tensor(in_features, out_features)) if bias: self.bias = nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1. / math.sqrt(self.weight.size(1)) self.weight.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): # if input.dim() == 3: support = torch.mm(input.view(-1,input.shape[-1]),self.weight) #output = SparseMM()(adj, support) sparseMM = SparseMM.apply output = sparseMM(adj,support) output = output.view(input.shape) # else: # support = torch.mm(input, self.weight) # output = SparseMM()(adj, support) if self.bias is not None: return output + self.bias else: return output class GraphConvolutionFirstOrder(nn.Module): """ Simple GCN layer, with separate processing of self-connection """ def __init__(self, in_features, out_features, bias=True): super(GraphConvolutionFirstOrder, self).__init__() self.in_features = in_features self.out_features = out_features self.weight_neighbor = nn.Parameter( torch.Tensor(in_features, out_features)) self.weight_self = nn.Parameter(torch.Tensor(in_features, out_features)) if bias: self.bias = nn.Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): stdv = 1. / math.sqrt(self.weight_neighbor.size(1)) self.weight_neighbor.data.uniform_(-stdv, stdv) self.weight_self.data.uniform_(-stdv, stdv) if self.bias is not None: self.bias.data.uniform_(-stdv, stdv) def forward(self, input, adj): act_self = torch.mm(input, self.weight_self) support_neighbor = torch.mm(input, self.weight_neighbor) act_neighbor = SparseMM()(adj, support_neighbor) output = act_self + act_neighbor if self.bias is not None: return output + self.bias else: return output class LayerNorm(nn.Module): def __init__(self, features, eps=1e-6): super(LayerNorm, self).__init__() self.gamma = nn.Parameter(torch.ones(features)) self.beta = nn.Parameter(torch.zeros(features)) self.eps = eps def forward(self, x): mean = x.mean(-1, keepdim=True) std = x.std(-1, keepdim=True) return self.gamma * (x - mean) / (std + self.eps) + self.beta
StarcoderdataPython
1688617
import balanced balanced.configure('<KEY>') api_key = balanced.APIKey() api_key.save()
StarcoderdataPython
182570
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import logging def TagNumDict(version_file_path): tag_num_dict = {} with open(version_file_path, 'r') as file: for line in file.readlines(): line = line.strip() tag = line.split('=')[0] num = line.split('=')[1] if not num.isdigit(): raise ValueError("version number must be digit!") tag_num_dict[tag] = int(num) return tag_num_dict def CurrentVersion(version_file_path): nums_list = list(TagNumDict(version_file_path).values()) return _VersionStr(nums_list) def _IncreaseVersion(nums_list, index): if not nums_list or index <= 0: raise Exception('nums list is empty or index < 0') index = index - 1 if index >= len(nums_list): index = len(nums_list) - 1 nums_list[index] += 1 for i in range(index+1, len(nums_list)): nums_list[i] = 0 def _ToFile(version_dict, version_file_path): with open(version_file_path, 'w') as file: for key in version_dict: line = '%s=%s\n' % (key, version_dict[key]) file.write(line) def _VersionStr(num_list): version = '' for num in num_list: version += '%s.' % num if not version: raise ValueError("version string is empty") return version[:-1] def Handle(version_index, version_file_path): tag_num_dict = TagNumDict(version_file_path) version_nums = list(tag_num_dict.values()) if version_index <= 0: return _VersionStr(version_nums) logging.debug('before change: ' + str(version_nums)) _IncreaseVersion(version_nums, version_index) logging.debug('after change: ' + str(version_nums)) version_dict = dict(zip(tag_num_dict.keys(), version_nums)) _ToFile(version_dict, version_file_path) return _VersionStr(version_nums) if __name__ == "__main__": print(CurrentVersion('version.properties'))
StarcoderdataPython
3320569
from django.urls import path from petstagram.pets.views import * urlpatterns = [ path('', AllPetsView.as_view(), name='list pets'), path('create/', CreatePetView.as_view(), name='create pet'), path('edit/<int:pk>', EditPetView.as_view(), name='edit pet'), path('delete/<int:pk>', DeletePetView.as_view(), name='delete pet'), path('details/<int:pk>', PetDetailsView.as_view(), name='pet details'), path('like/<int:pk>', like_pet, name='like pet'), path('comment/<int:pk>', comment_pet, name='comment pet'), ]
StarcoderdataPython
98468
<reponame>jankokk/zoo from setuptools import setup with open('requirements.txt', 'r') as f: requirements = f.read().splitlines() setup_config = { 'name': 'pylfire', 'version': 0.1, 'packages': ['pylfire'], 'install_requires': requirements, 'author': '<NAME>', 'author_email': '<EMAIL>', 'url': 'https://github.com/elfi-dev/zoo/pylfire', 'licence': 'BSD' } setup(**setup_config)
StarcoderdataPython
1636646
# -*- coding: utf-8 -*- """快代理自定义异常""" import sys class KdlException(Exception): """异常类""" def __init__(self, code=None, message=None): self.code = code if sys.version_info[0] < 3 and isinstance(message, unicode): message = message.encode("utf8") self.message = message self._hint_message = "[KdlException] code: {} message: {}".format(self.code, self.message) @property def hint_message(self): return self._hint_message @hint_message.setter def hint_message(self, value): self._hint_message = value def __str__(self): if sys.version_info[0] < 3 and isinstance(self.hint_message, unicode): self.hint_message = self.hint_message.encode("utf8") return self.hint_message class KdlStatusError(KdlException): """状态码异常类""" def __init__(self, code, message): super(KdlStatusError, self).__init__(code, message) self.hint_message = "[KdlStatusError] status_code: {}, message: {}".format(self.code, self.message) class KdlNameError(KdlException): """参数异常类""" def __init__(self, message, code=-2): super(KdlNameError, self).__init__(code, message) self.hint_message = "[KdlNameError] message: {}".format(self.message) class KdlTypeError(KdlException): """类型异常类""" def __init__(self, message, code=-1): super(KdlTypeError, self).__init__(code, message) self.hint_message = "[KdlTypeError] message: {}".format(self.message)
StarcoderdataPython
90046
<reponame>QualiSystems/Ixia-IxChariotController-Shell<filename>src/driver.py from cloudshell.shell.core.session.cloudshell_session import CloudShellSessionContext from cloudshell.traffic.driver import TrafficControllerDriver from ixc_handler import IxcHandler class IxChariotControllerDriver(TrafficControllerDriver): def __init__(self): super(self.__class__, self).__init__() self.handler = IxcHandler() def load_config(self, context, ixc_config): """ Load IxChariot configuration and select end points. :type context: cloudshell.shell.core.driver_context.ResourceRemoteCommandContext :param ixc_config: IxChariot configuration name. """ session_id = self.handler.load_config(context, ixc_config) my_api = CloudShellSessionContext(context).get_api() my_api.WriteMessageToReservationOutput(context.reservation.reservation_id, ixc_config + ' loaded, endpoints reserved') return session_id def start_test(self, context, blocking): """ :type context: cloudshell.shell.core.driver_context.ResourceRemoteCommandContext """ self.handler.start_test(blocking) def stop_test(self, context): """ :type context: cloudshell.shell.core.driver_context.ResourceRemoteCommandContext """ self.handler.stop_test() def get_statistics(self, context, view_name, output_type): """ Get statistics for specific view. :type context: cloudshell.shell.core.driver_context.ResourceRemoteCommandContext :param view_name: requested statistics view name. :param output_type: CSV/PDF. """ return self.handler.get_statistics(context, view_name, output_type) def end_session(self, context): self.handler.end_session() def del_session(self, context): self.handler.del_session() # # Parent commands are not visible so we re define them in child. # def initialize(self, context): super(self.__class__, self).initialize(context) def cleanup(self): super(self.__class__, self).cleanup() def keep_alive(self, context, cancellation_context): super(self.__class__, self).keep_alive(context, cancellation_context)
StarcoderdataPython
3204127
import threading from threading import* import time f={} def create(key,value,timeout=0): if key in f: print("key already exists") else: if(key.isalpha()): if len(f)<(1024*1024*1024) and value<=(16*1024*1024): if timeout==0: l=[value,timeout] else: l=[value,time.time()+timeout] if len(key)<=32: f[key]=l else: print("Memory limit exceeded") else: print("Invalind key_name") def read(key): if key not in f: print("Please enter a valid key. Not in database") else: b=f[key] if b[1]!=0: if time.time()<b[1]: result=str(key)+":"+str(b[0]) return result else: print("time-to-live of",key,"has expired") else: result=str(key)+":"+str(b[0]) return result
StarcoderdataPython
129979
<reponame>neural-reckoning/decoding_sound_location from brian import * from brian.hears import * from matplotlib import cm, rcParams import sys, os, glob sys.path = [os.path.normpath(os.path.join(os.path.split(__file__)[0], '../'))]+sys.path from common import *
StarcoderdataPython
1643761
<gh_stars>0 # -*- coding: utf-8 -*- """ @author: guqiuyang @description: 封装用户类 @create time: 2018/12/18 """ import json import logging import usertable from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from hashlib import sha1 class User(object): def __init__(self, sql_json=None): self.sql_json = sql_json def LoadInfo(self): #辅助函数 print('get user info from mysql') def logger(self): """日志函数""" logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s') log = logging.getLogger(__name__) return log def session(self): """创建数据库连接的session""" f = open(self.sql_json) sql_dict = json.load(f) engine = create_engine('mysql+pymysql://' + sql_dict['user'] + ':' + sql_dict['password'] + '@' + sql_dict['host'] + ':3306/' + sql_dict['database'], encoding='utf-8', echo=True) # 创建DBSession类型: DBSession = sessionmaker(bind=engine) sion = DBSession() return sion def register(self, user_json): """用户注册""" f = open(user_json) user_dict = json.load(f) session = self.session() try: self.session().query(usertable.UserInfo).filter(usertable.UserInfo.u_phonenumber == user_dict['phone']).one() self.logger().info("该手机号码已注册") except: user_pwd = sha1(user_dict['pwd'].encode('utf-8')).hexdigest() print(user_pwd, len(user_pwd)) user = usertable.UserInfo(u_userpassword=<PASSWORD>, u_sex=user_dict['sex'], u_birthday=user_dict['birthday'], u_mail=user_dict['mail'], u_phonenumber=user_dict['phone']) session.add(user) session.commit() session.close() self.logger().info("用户注册成功") def SignOut(self): """用户退出""" pass def Login(self, user_json): """用户登录""" f = open(user_json) user_dict = json.load(f) user_input_pwd = <PASSWORD>1(user_dict['pwd'].encode('utf-8')).hexdigest() # print(user_input_pwd) try: userinfo = self.session().query(usertable.UserInfo.u_userpassword).filter( usertable.UserInfo.u_phonenumber == user_dict['phone']) print(userinfo) if userinfo.u_userpassword == user_input_pwd: self.logger().info("登录成功") return True else: self.logger().info("登录失败") except: self.logger().info("请检查账号密码是否出错") def DeleteUser(self, name, passwd): print('delete a user') def test(self): self.session().query(usertable.UserInfo) self.session().close() if __name__ == '__main__': user_json="E:\\Project\\easydo\\scripts\\userinfo.json" user = User(sql_json="E:\\Project\\easydo\\scripts\\sqlinfo.json") #user.register(user_json) #user.Login(user_json) print(user.test())
StarcoderdataPython
3239643
""" Services model module. """ # Django imports from django.db import models from django.utils.translation import gettext_lazy as _ from django.core.validators import MaxValueValidator # Utils Abstract model from hisitter.utils.abstract_users import HisitterModel class Service(HisitterModel): """ This model hast the main activity in the application recieving the user-client, and the user-bbs, on the service of attention to the child or children. """ user_client = models.ForeignKey( "users.Client", verbose_name=_("Customer"), related_name='client_service', on_delete=models.CASCADE ) user_bbs = models.ForeignKey( "users.Babysitter", verbose_name=_("Babysitter"), related_name='bbs_service', on_delete=models.CASCADE ) date = models.DateField( auto_now=False, auto_now_add=False, ) SHIFTS = [ ('morning', 'morning'), ('afternoon', 'afternoon'), ('evening', 'evening'), ('night', 'night') ] shift = models.CharField( max_length=10, choices=SHIFTS, default='morning' ) duration = models.DurationField( blank=True, null=True ) address = models.CharField( _("Address"), max_length=255 ) lat = models.DecimalField( _("Latitude"), max_digits=10, decimal_places=6, blank=True, null=True ) long = models.DecimalField( _("Latitude"), max_digits=10, decimal_places=6, blank=True, null=True ) max_validator = MaxValueValidator( limit_value=10, message='The maximum number of children per babysitter must be 10' ) count_children = models.PositiveSmallIntegerField(validators=[max_validator]) special_cares = models.CharField( _("Special Cares"), max_length=255, help_text='Write the special cares to consider for each child', blank=True, null=True ) is_active = models.BooleanField( _("Service Active"), default=True ) service_start = models.DateTimeField( _("Datetime service starts"), auto_now=False, auto_now_add=False, blank=True, null=True ) service_end = models.DateTimeField( _("Datetime service ends"), auto_now=False, auto_now_add=False, blank=True, null=True ) total_cost = models.DecimalField( _("Total service cost"), max_digits=7, decimal_places=2, blank=True, null=True ) def __str__(self): return 'id ' + str(self.id) + ', ' + str(self.user_client) + f'{str(self.user_bbs)}, {str(self.date)}'
StarcoderdataPython
1631807
# Iterate over two lists alist = ["Red", "Blue", "Green"] blist = [22, 33, 44] for ii, jj in zip(alist, blist): print(f"There are {jj} {ii} balls.") # Update the counts for idx, val in enumerate(alist): if val == "Blue": blist[idx] *= 100 for ii, jj in zip(alist, blist): print(f"There are {jj} {ii} balls.")
StarcoderdataPython
88189
<filename>packages/gtmapi/lmsrvlabbook/api/mutations/bundledapp.py import graphene from gtmcore.inventory.inventory import InventoryManager from lmsrvcore.auth.user import get_logged_in_username, get_logged_in_author from lmsrvlabbook.api.objects.environment import Environment from gtmcore.environment.bundledapp import BundledAppManager class SetBundledApp(graphene.relay.ClientIDMutation): """Mutation to add or update a bundled app configuration to a project """ class Input: owner = graphene.String(required=True, description="Owner of the labbook") labbook_name = graphene.String(required=True, description="Name of the labbook") app_name = graphene.String(required=True, description="Name of the bundled app") description = graphene.String(required=True, description="Description of the bundled app") port = graphene.Int(required=True, description="Port internally exposed") command = graphene.String(description="Optional command run to start the bundled app if needed") environment = graphene.Field(Environment) @classmethod def mutate_and_get_payload(cls, root, info, owner, labbook_name, app_name, description, port, command=None, client_mutation_id=None): username = get_logged_in_username() lb = InventoryManager().load_labbook(username, owner, labbook_name, author=get_logged_in_author()) bam = BundledAppManager(lb) with lb.lock(): bam.add_bundled_app(port, app_name, description, command) return SetBundledApp(environment=Environment(name=labbook_name, owner=owner)) class RemoveBundledApp(graphene.relay.ClientIDMutation): """Mutation to remove a bundled app from a container""" class Input: owner = graphene.String(required=True, description="Owner of the labbook") labbook_name = graphene.String(required=True, description="Name of the labbook") app_name = graphene.String(required=True, description="Name of the bundled app") environment = graphene.Field(Environment) @classmethod def mutate_and_get_payload(cls, root, info, owner, labbook_name, app_name, client_mutation_id=None): username = get_logged_in_username() lb = InventoryManager().load_labbook(username, owner, labbook_name, author=get_logged_in_author()) bam = BundledAppManager(lb) with lb.lock(): bam.remove_bundled_app(app_name) return SetBundledApp(environment=Environment(name=labbook_name, owner=owner))
StarcoderdataPython
3207975
<filename>tests/test_sudo.py # -*- coding: utf-8 -*- import pytest import plumbum from plumbum import local from plumbum._testtools import skip_on_windows # This is a seperate file to make seperating (ugly) sudo command easier # For example, you can now run test_local direcly without typing a password class TestSudo: @skip_on_windows def test_as_user(self): with local.as_root(): local["date"]()
StarcoderdataPython
123513
<gh_stars>1-10 import json import os import sys import time from multiprocessing import Process import requests import websocket from .objects import Key, Object class ResponseError(Exception): pass class Client(): def __init__(self, callback): self.callback = callback self.host = os.getenv('GIMULATOR_HOST') if self.host is None: raise EnvironmentError self.id = os.getenv('CLIENT_ID') if self.id is None: raise EnvironmentError self.session = requests.Session() self.session.headers = {"Content-Type": "application/json"} self.register() self.ws_header = 'Cookie: token=' + \ self.session.cookies.get_dict()['token'] self.ws = websocket.WebSocket() self.p = Process(target=self.on_message) self.p.start() def on_message(self): while True: while True: try: self.ws.connect(url=self.get_url('socket'), header=[self.ws_header]) except ConnectionRefusedError: self.p.terminate() except: time.sleep(2) else: break while True: try: json_result = self.ws.recv() result = json.loads(json_result) self.callback(Object.from_dict(result)) except KeyboardInterrupt: self.p.terminate() sys.exit(0) except: break def get_url(self, endpoint): if endpoint == 'socket': return 'ws://' + self.host + '/' + endpoint return 'http://' + self.host + '/' + endpoint def register(self): data = json.dumps({"ID": self.id}) response = self.session.post( self.get_url('register'), data=data) if response.status_code < 200 or response.status_code > 299: raise ResponseError(response.text) self.session.cookies = response.cookies def get(self, key: Key) -> Object: data = { 'Key': key.__dict__, 'Value': None } response = self.session.post(self.get_url( 'get'), data=json.dumps(data)) if response.status_code < 200 or response.status_code > 299: raise ResponseError(response.text) return Object.from_dict(json.loads(response.text)) def set(self, obj: Object): data = { 'Key': obj.Key.__dict__, 'Value': obj.Value } response = self.session.post(self.get_url( 'set'), data=json.dumps(data)) if response.status_code < 200 or response.status_code > 299: raise ResponseError(response.text) def delete(self, obj: Object): data = { 'Key': obj.Key.__dict__, 'Value': obj.Value } response = self.session.post(self.get_url( 'delete'), data=json.dumps(data)) if response.status_code < 200 or response.status_code > 299: raise ResponseError(response.text) def find(self, key: Key) -> list: data = { 'Key': key.__dict__, 'Value': None } response = self.session.post(self.get_url( 'find'), data=json.dumps(data)) if response.status_code < 200 or response.status_code > 299: raise ResponseError(response.text) response = json.loads(response) result = [] for item in response: result.append(Object.from_dict(item)) return result def watch(self, key: Key): data = { 'Key': key.__dict__, 'Value': None } response = self.session.post(self.get_url( 'watch'), data=json.dumps(data)) if response.status_code < 200 or response.status_code > 299: raise ResponseError(response.text)
StarcoderdataPython
1798071
from django.contrib.auth.decorators import login_required from django.shortcuts import render # Create your views here. @login_required def checkout(request): context = {} template = 'checkout.html' return render(request,template,context)
StarcoderdataPython
165511
<reponame>PvrpleBlvck/Python_Learning_Journey import tweepy import os TWITTER_API_KEY = os.getenv('TWITTER_API_KEY') TWITTER_API_KEY_SECRET = os.getenv('TWITTER_API_KEY_SECRET') TWITTER_ACCESS_TOKEN = os.getenv('TWITTER_ACCESS_TOKEN') TWITTER_ACCESS_TOKEN_SECRET = os.getenv('TWITTER_ACCESS_TOKEN_SECRET') # Authenticate to Twitter auth = tweepy.OAuthHandler("TWITTER_API_KEY", "TWITTER_API_KEY_SECRET") auth.set_access_token("TWITTER_ACCESS_TOKEN", "TWITTER_ACCESS_TOKEN_SECRET") # Create API object api = tweepy.API(auth) try: api.verify_credentials() print("Authentication OK") except: print("Error during authentication") api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) timeline = api.home_timeline() for tweet in timeline: print(f"{tweet.user.name} said {tweet.text}") # Create a tweet #api.update_status("Hello Pvrple Bot Test three by Tweepy") user = api.get_user("MikezGarcia") print("User details:") print(user.name) print(user.description) print(user.location) print("Last 20 Followers:") for follower in user.followers(): print(follower.name) #api.create_friendship("pvrple_blvck_sa") api.update_profile(description="I AM Pvrple Blvck.by TweepyBot") #Like most recent tweet '''tweets = api.home_timeline(count=1) tweet = tweets[0] print(f"Liking tweet {tweet.id} of {tweet.author.name}") api.create_favorite(tweet.id)''' #See blocked contacts for block in api.blocks(): print(block.name) #Search Tweets for tweet in api.search(q="Bitcoin", lang="en", rpp=10): print(f"{tweet.user.name}:{tweet.text}") #Trends trends_result = api.trends_place(1) for trend in trends_result[0]["trends"]: print(trend["name"])
StarcoderdataPython
1673935
# python3 # Copyright 2021 InstaDeep Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import abstractmethod from typing import Any, Iterator, List import dm_env class ParallelEnvWrapper(dm_env.Environment): """ Abstract class for parallel environment wrappers. """ @abstractmethod def env_done(self) -> bool: """ Returns a bool indicating if all agents in env are done. """ @property @abstractmethod def agents(self) -> List: """ Returns the active agents in the env. """ @property @abstractmethod def possible_agents(self) -> List: """ Returns all the possible agents in the env. """ class SequentialEnvWrapper(ParallelEnvWrapper): """ Abstract class for sequential environment wrappers. """ @abstractmethod def agent_iter(self, max_iter: int) -> Iterator: """ Returns an iterator that yields the current agent in the env. max_iter: Maximum number of iterations (to limit infinite loops/iterations). """ @property @abstractmethod def current_agent(self) -> Any: """ Returns the current selected agent. """
StarcoderdataPython
3277063
<reponame>pirofti/SSPG # Copyright (c) 2019-2020 <NAME> <<EMAIL>> # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import cvxpy as cp import numpy as np from numpy import linalg def sr_proxgd(sample, dictionary, Delta, alpha, lam, x0, eps, xstar): x_old = 0 x_new = x0 x_list, err_list = [x_new], [] m = dictionary.shape[0] n = dictionary.shape[1] In = np.eye(n) E = np.real(linalg.eigvals((1/m)*dictionary.T@dictionary + alpha*In)) L = E.max() eta = 1/L iter = 0 while np.linalg.norm(x_new - xstar)**2 > eps: x_old = x_new iter = iter + 1 # y = x_old - eta * ((dictionary.T@dictionary + alpha*In)@x_old # - dictionary.T @ sample) y = x_old - eta * ((1/m)*(dictionary.T@(dictionary@x_old - sample)) + alpha*x_old) z = cp.Variable(shape=n) cost = (L/2)*cp.sum_squares(z - y) + lam*cp.norm(Delta@z, 1) prob = cp.Problem(cp.Minimize(cost)) prob.solve(solver=cp.CVXOPT, verbose=False) # print('Solver status: {}'.format(prob.status)) # Check for error. if prob.status != cp.OPTIMAL: raise Exception("Solver did not converge!") x_new = z.value x_list.append(x_new) err_list.append(np.linalg.norm(x_new - xstar)) # print("[", iter, "] Error", np.linalg.norm(x_new - xstar)) return x_list[-1], x_list, err_list
StarcoderdataPython
3221042
# # This file is part of pysnmp-apps software. # # Copyright (c) 2005-2017, <NAME> <<EMAIL>> # License: http://snmplabs.com/pysnmp/license.html # from pysnmp_apps.cli import base from pysnmp.entity import config from pysnmp import error authProtocols = { 'MD5': config.usmHMACMD5AuthProtocol, 'SHA': config.usmHMACSHAAuthProtocol, 'SHA224': config.usmHMAC128SHA224AuthProtocol, 'SHA256': config.usmHMAC192SHA256AuthProtocol, 'SHA384': config.usmHMAC256SHA384AuthProtocol, 'SHA512': config.usmHMAC384SHA512AuthProtocol, 'NONE': config.usmNoAuthProtocol } privProtocols = { 'DES': config.usmDESPrivProtocol, '3DES': config.usm3DESEDEPrivProtocol, 'AES': config.usmAesCfb128Protocol, 'AES128': config.usmAesCfb128Protocol, 'AES192': config.usmAesCfb192Protocol, 'AES192BLMT': config.usmAesBlumenthalCfb192Protocol, 'AES256': config.usmAesCfb256Protocol, 'AES256BLMT': config.usmAesBlumenthalCfb256Protocol, 'NONE': config.usmNoPrivProtocol } def getUsage(): return """\ SNMPv1/v2c security options: -c COMMUNITY SNMP community string (e.g. public) SNMPv3 security options: -u SECURITY-NAME SNMP USM user security name (e.g. bert) -l SECURITY-LEVEL security level (noAuthNoPriv|authNoPriv|authPriv) -a AUTH-PROTOCOL authentication protocol ID (%s) -A PASSPHRASE authentication protocol pass phrase (8+ chars) -x PRIV-PROTOCOL privacy protocol ID (%s) -X PASSPHRASE privacy protocol pass phrase (8+ chars) -E CONTEXT-ENGINE-ID context engine ID (e.g. 800000020109840301) -e ENGINE-ID security SNMP engine ID (e.g. 800000020109840301) -n CONTEXT-NAME SNMP context name (e.g. bridge1) -Z BOOTS,TIME destination SNMP engine boots/time """ % ('|'.join(sorted([x for x in authProtocols if x != 'NONE'])), '|'.join(sorted([x for x in privProtocols if x != 'NONE']))) # Scanner class SMScannerMixIn: # SNMPv1/v2 def t_community(self, s): r' -c ' self.rv.append(base.ConfigToken('community')) # SNMPv3 def t_authProtocol(self, s): r' -a ' self.rv.append(base.ConfigToken('authProtocol')) def t_authKey(self, s): r' -A ' self.rv.append(base.ConfigToken('authKey')) def t_privProtocol(self, s): r' -x ' self.rv.append(base.ConfigToken('privProtocol')) def t_privKey(self, s): r' -X ' self.rv.append(base.ConfigToken('privKey')) def t_securityName(self, s): r' -u ' self.rv.append(base.ConfigToken('securityName')) def t_securityLevel(self, s): r' -l ' self.rv.append(base.ConfigToken('securityLevel')) def t_engineID(self, s): r' -e ' self.rv.append(base.ConfigToken('engineID')) def t_contextEngineId(self, s): r' -E ' self.rv.append(base.ConfigToken('contextEngineId')) def t_contextName(self, s): r' -n ' self.rv.append(base.ConfigToken('contextName')) def t_engineBoots(self, s): r' -Z ' self.rv.append(base.ConfigToken('engineBoots')) # Parser class SMParserMixIn: def p_smSpec(self, args): ''' Option ::= SnmpV1Option Option ::= SnmpV3Option SnmpV1Option ::= Community Community ::= community string Community ::= community whitespace string SnmpV3Option ::= AuthProtocol SnmpV3Option ::= AuthKey SnmpV3Option ::= PrivProtocol SnmpV3Option ::= PrivKey SnmpV3Option ::= SecurityName SnmpV3Option ::= SecurityLevel SnmpV3Option ::= EngineID SnmpV3Option ::= ContextEngineId SnmpV3Option ::= ContextName SnmpV3Option ::= EngineBoots AuthProtocol ::= authProtocol string AuthProtocol ::= authProtocol whitespace string AuthKey ::= authKey string AuthKey ::= authKey whitespace string PrivProtocol ::= privProtocol string PrivProtocol ::= privProtocol whitespace string PrivKey ::= privKey string PrivKey ::= privKey whitespace string SecurityName ::= securityName string SecurityName ::= securityName whitespace string SecurityLevel ::= securityLevel string SecurityLevel ::= securityLevel whitespace string EngineID ::= engineID string EngineID ::= engineID whitespace string ContextEngineId ::= contextEngineId string ContextEngineId ::= contextEngineId whitespace string ContextName ::= contextName string ContextName ::= contextName whitespace string EngineBoots ::= engineBoots string EngineBoots ::= engineBoots whitespace string ''' # Generator class __SMGenerator(base.GeneratorTemplate): # SNMPv1/v2 def n_Community(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: ctx['communityName'] = node[2].attr else: ctx['communityName'] = node[1].attr # SNMPv3 def n_AuthProtocol(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: p = node[2].attr.upper() else: p = node[1].attr.upper() try: ctx['authProtocol'] = authProtocols[p] except KeyError: raise error.PySnmpError('Unknown authentication protocol "%s"' % p) def n_AuthKey(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: p = node[2].attr else: p = node[1].attr if len(p) < 8: raise error.PySnmpError('Short authentication key (8+ chars required)') ctx['authKey'] = p def n_PrivProtocol(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: p = node[2].attr.upper() else: p = node[1].attr.upper() try: ctx['privProtocol'] = privProtocols[p] except KeyError: raise error.PySnmpError('Unknown privacy protocol "%s"' % p) def n_PrivKey(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: p = node[2].attr else: p = node[1].attr if len(p) < 8: raise error.PySnmpError('Short privacy key (8+ chars required)') ctx['privKey'] = p def n_SecurityName(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: ctx['securityName'] = node[2].attr else: ctx['securityName'] = node[1].attr def n_SecurityLevel(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: ctx['securityLevel'] = node[2].attr else: ctx['securityLevel'] = node[1].attr def n_EngineID(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: ctx['engineID'] = node[2].attr else: ctx['engineID'] = node[1].attr def n_ContextEngineId(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: ctx['contextEngineId'] = node[2].attr else: ctx['contextEngineId'] = node[1].attr def n_ContextName(self, cbCtx, node): snmpEngine, ctx = cbCtx if len(node) > 2: ctx['contextName'] = node[2].attr else: ctx['contextName'] = node[1].attr def n_EngineBoots(self, cbCtx, node): # XXX snmpEngine, ctx = cbCtx if len(node) > 2: ctx['engineBoots'] = node[2].attr else: ctx['engineBoots'] = node[1].attr if ',' in ctx['engineBoots']: ctx['engineBoots'], ctx['engineTime'] = ctx['engineBoots'].split(',', 1) else: ctx['engineTime'] = 0 def generator(cbCtx, ast): snmpEngine, ctx = cbCtx __SMGenerator().preorder(cbCtx, ast) # Commit collected data if ctx['versionId'] == 3: if 'securityName' not in ctx: raise error.PySnmpError('Security name not specified') if 'securityLevel' not in ctx: raise error.PySnmpError('Security level not specified') if ctx['securityLevel'] == 'noAuthNoPriv': if 'authKey' in ctx: del ctx['authKey'] if 'privKey' in ctx: del ctx['privKey'] elif ctx['securityLevel'] == 'authNoPriv': if 'privKey' in ctx: del ctx['privKey'] if 'authKey' in ctx: if 'authProtocol' not in ctx: ctx['authProtocol'] = config.usmHMACMD5AuthProtocol else: ctx['authProtocol'] = config.usmNoAuthProtocol ctx['authKey'] = None if 'privKey' in ctx: if 'privProtocol' not in ctx: ctx['privProtocol'] = config.usmDESPrivProtocol else: ctx['privProtocol'] = config.usmNoPrivProtocol ctx['privKey'] = None config.addV3User( snmpEngine, ctx['securityName'], ctx['authProtocol'], ctx['authKey'], ctx['privProtocol'], ctx['privKey'] ) # edit SNMP engine boots/uptime if 'engineBoots' in ctx: snmpEngineBoots, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineBoots') snmpEngineBoots.setSyntax( snmpEngineBoots.getSyntax().clone(ctx['engineBoots']) ) if 'engineTime' in ctx: snmpEngineTime, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineTime') snmpEngineTime.setSyntax( snmpEngineTime.getSyntax().clone(ctx['engineTime']) ) else: # SNMPv1/v2c if 'communityName' not in ctx: raise error.PySnmpError('Community name not specified') ctx['securityName'] = 'my-agent' ctx['securityLevel'] = 'noAuthNoPriv' config.addV1System( snmpEngine, ctx['securityName'], ctx['communityName'] ) ctx['paramsName'] = '%s-params' % ctx['securityName'] config.addTargetParams( snmpEngine, ctx['paramsName'], ctx['securityName'], ctx['securityLevel'], ctx['versionId'] )
StarcoderdataPython
17194
#poly_gauss_coil model #conversion of Poly_GaussCoil.py #converted by <NAME>, Mar 2016 r""" This empirical model describes the scattering from *polydisperse* polymer chains in theta solvents or polymer melts, assuming a Schulz-Zimm type molecular weight distribution. To describe the scattering from *monodisperse* polymer chains, see the :ref:`mono-gauss-coil` model. Definition ---------- .. math:: I(q) = \text{scale} \cdot I_0 \cdot P(q) + \text{background} where .. math:: I_0 &= \phi_\text{poly} \cdot V \cdot (\rho_\text{poly}-\rho_\text{solv})^2 \\ P(q) &= 2 [(1 + UZ)^{-1/U} + Z - 1] / [(1 + U) Z^2] \\ Z &= [(q R_g)^2] / (1 + 2U) \\ U &= (Mw / Mn) - 1 = \text{polydispersity ratio} - 1 \\ V &= M / (N_A \delta) Here, $\phi_\text{poly}$, is the volume fraction of polymer, $V$ is the volume of a polymer coil, $M$ is the molecular weight of the polymer, $N_A$ is Avogadro's Number, $\delta$ is the bulk density of the polymer, $\rho_\text{poly}$ is the sld of the polymer, $\rho_\text{solv}$ is the sld of the solvent, and $R_g$ is the radius of gyration of the polymer coil. The 2D scattering intensity is calculated in the same way as the 1D, but where the $q$ vector is redefined as .. math:: q = \sqrt{q_x^2 + q_y^2} References ---------- .. [#] O Glatter and O Kratky (editors), *Small Angle X-ray Scattering*, Academic Press, (1982) Page 404 .. [#] <NAME>, <NAME>, *Polymers and Neutron Scattering*, Oxford Science Publications, (1996) .. [#] <NAME>, *Small Angle Neutron Scattering* in *Modern Techniques for Polymer Characterisation*, Wiley, (1999) .. [#] http://www.ncnr.nist.gov/staff/hammouda/distance_learning/chapter_28.pdf Authorship and Verification ---------------------------- * **Author:** * **Last Modified by:** * **Last Reviewed by:** """ import numpy as np from numpy import inf, expm1, power name = "poly_gauss_coil" title = "Scattering from polydisperse polymer coils" description = """ Evaluates the scattering from polydisperse polymer chains. """ category = "shape-independent" # pylint: disable=bad-whitespace, line-too-long # ["name", "units", default, [lower, upper], "type", "description"], parameters = [ ["i_zero", "1/cm", 70.0, [0.0, inf], "", "Intensity at q=0"], ["rg", "Ang", 75.0, [0.0, inf], "", "Radius of gyration"], ["polydispersity", "None", 2.0, [1.0, inf], "", "Polymer Mw/Mn"], ] # pylint: enable=bad-whitespace, line-too-long # NB: Scale and Background are implicit parameters on every model def Iq(q, i_zero, rg, polydispersity): # pylint: disable = missing-docstring u = polydispersity - 1.0 z = q**2 * (rg**2 / (1.0 + 2.0*u)) # need to trap the case of the polydispersity being 1 (ie, monodisperse!) if polydispersity == 1.0: result = 2.0 * (expm1(-z) + z) index = q != 0. result[index] /= z[index]**2 result[~index] = 1.0 else: # Taylor series around z=0 of (2*(1+uz)^(-1/u) + z - 1) / (z^2(u+1)) p = [ #(-1 - 20*u - 155*u**2 - 580*u**3 - 1044*u**4 - 720*u**5) / 2520., #(+1 + 14*u + 71*u**2 + 154*u**3 + 120*u**4) / 360., #(-1 - 9*u - 26*u**2 - 24*u**3) / 60., (+1 + 5*u + 6*u**2) / 12., (-1 - 2*u) / 3., (+1), ] result = 2.0 * (power(1.0 + u*z, -1.0/u) + z - 1.0) / (1.0 + u) index = z > 1e-4 result[index] /= z[index]**2 result[~index] = np.polyval(p, z[~index]) return i_zero * result Iq.vectorized = True # Iq accepts an array of q values def random(): """Return a random parameter set for the model.""" rg = 10**np.random.uniform(0, 4) #rg = 1e3 polydispersity = 10**np.random.uniform(0, 3) pars = dict( #scale=1, background=0, i_zero=1e7, # i_zero is a simple scale rg=rg, polydispersity=polydispersity, ) return pars demo = dict(scale=1.0, i_zero=70.0, rg=75.0, polydispersity=2.0, background=0.0) # these unit test values taken from SasView 3.1.2 tests = [ [{'scale': 1.0, 'i_zero': 70.0, 'rg': 75.0, 'polydispersity': 2.0, 'background': 0.0}, [0.0106939, 0.469418], [57.6405, 0.169016]], ]
StarcoderdataPython
27639
import abc from typing import List from utils.datatypes import Source class DataLoaderInterface(object): @abc.abstractmethod def get_name() -> str: '''Returns an internal name for this loader''' raise NotImplementedError("users must define a name for this loader") @staticmethod @abc.abstractmethod def get_filetypes() -> List[str]: '''Returns a list of file types supported by this data loader''' raise NotImplementedError('users must define a list of supported filetypes.') @abc.abstractmethod def load_data_from_file(self, filepath: str) -> Source: '''Reads file and extracts lines of texts. Returns one section per page''' raise NotImplementedError("userers must define a function to load data from a file.")
StarcoderdataPython
3368881
import os import torch import pickle import pandas as pd import numpy as np from torch.utils.data import Dataset import tqdm def createDataCSV(dataset): labels = [] texts = [] dataType = [] label_map = {} name_map = {'wiki31k': 'Wiki10-31K', 'wiki500k': 'Wiki-500K', 'amazoncat13k': 'AmazonCat-13K', 'amazon670k': 'Amazon-670K', 'eurlex4k': 'Eurlex-4K'} assert dataset in name_map dataset = name_map[dataset] fext = '_texts.txt' if dataset == 'Eurlex-4K' else '_raw_texts.txt' with open(f'../data/{dataset}/train{fext}', encoding="utf-8") as f: for i in tqdm.tqdm(f): texts.append(i.replace('\n', '')) dataType.append('train') with open(f'../data/{dataset}/test{fext}', encoding="utf-8") as f: for i in tqdm.tqdm(f): texts.append(i.replace('\n', '')) dataType.append('test') with open(f'../data/{dataset}/train_labels.txt', encoding="utf-8") as f: for i in tqdm.tqdm(f): for l in i.replace('\n', '').split(): label_map[l] = 0 labels.append(i.replace('\n', '')) with open(f'../data/{dataset}/test_labels.txt', encoding="utf-8") as f: print(len(label_map)) for i in tqdm.tqdm(f): for l in i.replace('\n', '').split(): label_map[l] = 0 labels.append(i.replace('\n', '')) print(len(label_map)) assert len(texts) == len(labels) == len(dataType) df_row = {'text': texts, 'label': labels, 'dataType': dataType} for i, k in enumerate(sorted(label_map.keys())): label_map[k] = i df = pd.DataFrame(df_row) print('label map', len(label_map)) return df, label_map class MDataset(Dataset): def __init__(self, df, mode, tokenizer, label_map, max_length, token_type_ids=None, group_y=None, candidates_num=None): assert mode in ["train", "valid", "test"] self.mode = mode self.df, self.n_labels, self.label_map = df[df.dataType == self.mode], len(label_map), label_map self.len = len(self.df) self.tokenizer, self.max_length, self.group_y = tokenizer, max_length, group_y self.multi_group = False self.token_type_ids = token_type_ids self.candidates_num = candidates_num if group_y is not None: # group y mode self.candidates_num, self.group_y, self.n_group_y_labels = candidates_num, [], group_y.shape[0] self.map_group_y = np.empty(len(label_map), dtype=np.long) for idx, labels in enumerate(group_y): self.group_y.append([]) for label in labels: self.group_y[-1].append(label_map[label]) self.map_group_y[self.group_y[-1]] = idx self.group_y[-1] = np.array(self.group_y[-1]) self.group_y = np.array(self.group_y) def __getitem__(self, idx): max_len = self.max_length review = self.df.text.values[idx].lower() labels = [self.label_map[i] for i in self.df.label.values[idx].split() if i in self.label_map] review = ' '.join(review.split()[:max_len]) text = review if self.token_type_ids is not None: input_ids = self.token_type_ids[idx] if input_ids[-1] == 0: input_ids = input_ids[input_ids != 0] input_ids = input_ids.tolist() elif hasattr(self.tokenizer, 'encode_plus'): input_ids = self.tokenizer.encode( 'filling empty' if len(text) == 0 else text, add_special_tokens=True, max_length=max_len ) else: # fast input_ids = self.tokenizer.encode( 'filling empty' if len(text) == 0 else text, add_special_tokens=True ).ids if len(input_ids) == 0: print('zero string') assert 0 if len(input_ids) > self.max_length: input_ids[self.max_length-1] = input_ids[-1] input_ids = input_ids[:self.max_length] attention_mask = [1] * len(input_ids) token_type_ids = [0] * len(input_ids) padding_length = self.max_length - len(input_ids) input_ids = input_ids + ([0] * padding_length) attention_mask = attention_mask + ([0] * padding_length) token_type_ids = token_type_ids + ([0] * padding_length) input_ids = torch.tensor(input_ids) attention_mask = torch.tensor(attention_mask) token_type_ids = torch.tensor(token_type_ids) if self.group_y is not None: label_ids = torch.zeros(self.n_labels) label_ids = label_ids.scatter(0, torch.tensor(labels), torch.tensor([1.0 for i in labels])) group_labels = self.map_group_y[labels] if self.multi_group: group_labels = np.concatenate(group_labels) group_label_ids = torch.zeros(self.n_group_y_labels) group_label_ids = group_label_ids.scatter(0, torch.tensor(group_labels), torch.tensor([1.0 for i in group_labels])) candidates = np.concatenate(self.group_y[group_labels], axis=0) if len(candidates) < self.candidates_num: sample = np.random.randint(self.n_group_y_labels, size=self.candidates_num - len(candidates)) candidates = np.concatenate([candidates, sample]) elif len(candidates) > self.candidates_num: candidates = np.random.choice(candidates, self.candidates_num, replace=False) if self.mode == 'train': return input_ids, attention_mask, token_type_ids,\ label_ids[candidates], group_label_ids, candidates else: return input_ids, attention_mask, token_type_ids,\ label_ids, group_label_ids, candidates # root fc layer group_labels = layers_group_labels[0] group_label_ids = torch.zeros(len(self.map_children[0])) group_label_ids = group_label_ids.scatter(0, torch.tensor(group_labels), torch.tensor([1.0 for i in group_labels])) layers_group_labels_ids.append(group_label_ids) if self.mode == 'train': return input_ids, attention_mask, token_type_ids, \ layers_group_labels_ids[::-1], layers_candidates[::-1] else: return input_ids, attention_mask, token_type_ids, layers_group_labels + [labels] label_ids = torch.zeros(self.n_labels) label_ids = label_ids.scatter(0, torch.tensor(labels), torch.tensor([1.0 for i in labels])) return input_ids, attention_mask, token_type_ids, label_ids def __len__(self): return self.len
StarcoderdataPython
3304157
# 2021 - <NAME> - www.manualdocodigo.com.br import sys from PyQt5 import QtCore, QtWidgets, uic from PyQt5.QtCore import QFile, QTextStream class MainWindow(QtWidgets.QMainWindow): def __init__(self, *args, **kwargs): super(MainWindow, self).__init__(*args, **kwargs) self.filename = "" # Load the UI Page uic.loadUi("mainwindow.ui", self) self.actionopen.triggered.connect(self.open) self.actionsave.triggered.connect(self.save) self.actionsave_as.triggered.connect(self.saveAs) self.lineEditAddress.textChanged.connect(self.serCursorPosition) def open(self): fName, filter = QtWidgets.QFileDialog.getOpenFileName(self, "OpenFile") f = QtCore.QFile(fName) f.open(QtCore.QFile.ReadOnly) data = f.readAll() self.hexwidget.setData(data) self.filename = fName def save(self): if self.filename: data = self.hexwidget.getData() f = open(self.filename, "wb") f.write(data) f.close() print("Saved successfully...") else: print("No file to save") def saveAs(self): fName, _ = QtWidgets.QFileDialog.getSaveFileName(self, "Save File") if fName: self.filename = fName self.save() else: print("Invalid File") def serCursorPosition(self): try: address = int(self.lineEditAddress.text(), 16) self.hexwidget.setCursorPosition(address) except: print("Invalid hexadecimal number") def main(): app = QtWidgets.QApplication(sys.argv) # Theme test from: # https://github.com/Alexhuszagh/BreezeStyleSheets if False: file = QFile("./dark.qss") file.open(QFile.ReadOnly | QFile.Text) stream = QTextStream(file) app.setStyleSheet(stream.readAll()) main = MainWindow() main.show() sys.exit(app.exec_()) if __name__ == "__main__": main()
StarcoderdataPython
130254
<reponame>johnnoone/django-admincommand<gh_stars>10-100 def generate_instance_name(name): out = name[0].lower() for char in name[1:]: if char.isupper(): out += "_%s" % char.lower() else: out += char return out def generate_human_name(name): out = name[0] for char in name[1:]: if char.isupper(): out += " %s" % char.lower() else: out += char return out
StarcoderdataPython
3387690
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import json import math import string import collections import numpy as np from keras import backend from six.moves import xrange from nets.layers import BatchNormalization from keras import layers BASE_WEIGHTS_PATH = ( 'https://github.com/Callidior/keras-applications/' 'releases/download/efficientnet/') WEIGHTS_HASHES = { 'efficientnet-b0': ('163292582f1c6eaca8e7dc7b51b01c61' '5b0dbc0039699b4dcd0b975cc21533dc', 'c1421ad80a9fc67c2cc4000f666aa507' '89ce39eedb4e06d531b0c593890ccff3'), 'efficientnet-b1': ('d0a71ddf51ef7a0ca425bab32b7fa7f1' '6043ee598ecee73fc674d9560c8f09b0', '75de265d03ac52fa74f2f510455ba64f' '9c7c5fd96dc923cd4bfefa3d680c4b68'), 'efficientnet-b2': ('bb5451507a6418a574534aa76a91b106' 'f6b605f3b5dde0b21055694319853086', '433b60584fafba1ea3de07443b74cfd3' '2ce004a012020b07ef69e22ba8669333'), 'efficientnet-b3': ('03f1fba367f070bd2545f081cfa7f3e7' '6f5e1aa3b6f4db700f00552901e75ab9', 'c5d42eb6cfae8567b418ad3845cfd63a' 'a48b87f1bd5df8658a49375a9f3135c7'), 'efficientnet-b4': ('98852de93f74d9833c8640474b2c698d' 'b45ec60690c75b3bacb1845e907bf94f', '<KEY>' 'd9d91ea64877e8d9c38b6c1e0767c411'), 'efficientnet-b5': ('30172f1d45f9b8a41352d4219bf930ee' '3339025fd26ab314a817ba8918fefc7d', '9d197bc2bfe29165c10a2af8c2ebc675' '07f5d70456f09e584c71b822941b1952'), 'efficientnet-b6': ('f5270466747753485a082092ac9939ca' 'a546eb3f09edca6d6fff842cad938720', '1d0923bb038f2f8060faaf0a0449db4b' '96549a881747b7c7678724ac79f427ed'), 'efficientnet-b7': ('876a41319980638fa597acbbf956a82d' '10819531ff2dcb1a52277f10c7aefa1a', '60b56ff3a8daccc8d96edfd40b204c11' '3e51748da657afd58034d54d3cec2bac') } BlockArgs = collections.namedtuple('BlockArgs', [ 'kernel_size', 'num_repeat', 'input_filters', 'output_filters', 'expand_ratio', 'id_skip', 'strides', 'se_ratio' ]) # defaults will be a public argument for namedtuple in Python 3.7 # https://docs.python.org/3/library/collections.html#collections.namedtuple BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields) DEFAULT_BLOCKS_ARGS = [ BlockArgs(kernel_size=3, num_repeat=1, input_filters=32, output_filters=16, expand_ratio=1, id_skip=True, strides=[1, 1], se_ratio=0.25), BlockArgs(kernel_size=3, num_repeat=2, input_filters=16, output_filters=24, expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25), BlockArgs(kernel_size=5, num_repeat=2, input_filters=24, output_filters=40, expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25), BlockArgs(kernel_size=3, num_repeat=3, input_filters=40, output_filters=80, expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25), BlockArgs(kernel_size=5, num_repeat=3, input_filters=80, output_filters=112, expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25), BlockArgs(kernel_size=5, num_repeat=4, input_filters=112, output_filters=192, expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25), BlockArgs(kernel_size=3, num_repeat=1, input_filters=192, output_filters=320, expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25) ] CONV_KERNEL_INITIALIZER = { 'class_name': 'VarianceScaling', 'config': { 'scale': 2.0, 'mode': 'fan_out', # EfficientNet actually uses an untruncated normal distribution for # initializing conv layers, but keras.initializers.VarianceScaling use # a truncated distribution. # We decided against a custom initializer for better serializability. 'distribution': 'normal' } } DENSE_KERNEL_INITIALIZER = { 'class_name': 'VarianceScaling', 'config': { 'scale': 1. / 3., 'mode': 'fan_out', 'distribution': 'uniform' } } def get_swish(): def swish(x): return x * backend.sigmoid(x) return swish def get_dropout(): class FixedDropout(layers.Dropout): def _get_noise_shape(self, inputs): if self.noise_shape is None: return self.noise_shape symbolic_shape = backend.shape(inputs) noise_shape = [symbolic_shape[axis] if shape is None else shape for axis, shape in enumerate(self.noise_shape)] return tuple(noise_shape) return FixedDropout def round_filters(filters, width_coefficient, depth_divisor): filters *= width_coefficient new_filters = int(filters + depth_divisor / 2) // depth_divisor * depth_divisor new_filters = max(depth_divisor, new_filters) if new_filters < 0.9 * filters: new_filters += depth_divisor return int(new_filters) def round_repeats(repeats, depth_coefficient): return int(math.ceil(depth_coefficient * repeats)) def mb_conv_block(inputs, block_args, activation, drop_rate=None, prefix='', freeze_bn=False): has_se = (block_args.se_ratio is not None) and (0 < block_args.se_ratio <= 1) bn_axis = 3 Dropout = get_dropout() filters = block_args.input_filters * block_args.expand_ratio if block_args.expand_ratio != 1: x = layers.Conv2D(filters, 1, padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=prefix + 'expand_conv')(inputs) x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'expand_bn')(x) x = layers.Activation(activation, name=prefix + 'expand_activation')(x) else: x = inputs # Depthwise Convolution x = layers.DepthwiseConv2D(block_args.kernel_size, strides=block_args.strides, padding='same', use_bias=False, depthwise_initializer=CONV_KERNEL_INITIALIZER, name=prefix + 'dwconv')(x) x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'bn')(x) x = layers.Activation(activation, name=prefix + 'activation')(x) # Squeeze and Excitation phase if has_se: num_reduced_filters = max(1, int( block_args.input_filters * block_args.se_ratio )) se_tensor = layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x) target_shape = (1, 1, filters) if backend.image_data_format() == 'channels_last' else (filters, 1, 1) se_tensor = layers.Reshape(target_shape, name=prefix + 'se_reshape')(se_tensor) se_tensor = layers.Conv2D(num_reduced_filters, 1, activation=activation, padding='same', use_bias=True, kernel_initializer=CONV_KERNEL_INITIALIZER, name=prefix + 'se_reduce')(se_tensor) se_tensor = layers.Conv2D(filters, 1, activation='sigmoid', padding='same', use_bias=True, kernel_initializer=CONV_KERNEL_INITIALIZER, name=prefix + 'se_expand')(se_tensor) if backend.backend() == 'theano': # For the Theano backend, we have to explicitly make # the excitation weights broadcastable. pattern = ([True, True, True, False] if backend.image_data_format() == 'channels_last' else [True, False, True, True]) se_tensor = layers.Lambda( lambda x: backend.pattern_broadcast(x, pattern), name=prefix + 'se_broadcast')(se_tensor) x = layers.multiply([x, se_tensor], name=prefix + 'se_excite') # Output phase x = layers.Conv2D(block_args.output_filters, 1, padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name=prefix + 'project_conv')(x) # x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name=prefix + 'project_bn')(x) x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'project_bn')(x) if block_args.id_skip and all( s == 1 for s in block_args.strides ) and block_args.input_filters == block_args.output_filters: if drop_rate and (drop_rate > 0): x = Dropout(drop_rate, noise_shape=(None, 1, 1, 1), name=prefix + 'drop')(x) x = layers.add([x, inputs], name=prefix + 'add') return x def EfficientNet(width_coefficient, depth_coefficient, default_resolution, dropout_rate=0.2, drop_connect_rate=0.2, depth_divisor=8, blocks_args=DEFAULT_BLOCKS_ARGS, model_name='efficientnet', include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, freeze_bn=False, **kwargs): features = [] if input_tensor is None: img_input = layers.Input(shape=input_shape) else: img_input = input_tensor bn_axis = 3 activation = get_swish(**kwargs) # Build stem x = img_input x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor), 3, strides=(2, 2), padding='same', use_bias=False, kernel_initializer=CONV_KERNEL_INITIALIZER, name='stem_conv')(x) # x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name='stem_bn')(x) x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x) x = layers.Activation(activation, name='stem_activation')(x) # Build blocks num_blocks_total = sum(block_args.num_repeat for block_args in blocks_args) block_num = 0 for idx, block_args in enumerate(blocks_args): assert block_args.num_repeat > 0 # Update block input and output filters based on depth multiplier. block_args = block_args._replace( input_filters=round_filters(block_args.input_filters, width_coefficient, depth_divisor), output_filters=round_filters(block_args.output_filters, width_coefficient, depth_divisor), num_repeat=round_repeats(block_args.num_repeat, depth_coefficient)) # The first block needs to take care of stride and filter size increase. drop_rate = drop_connect_rate * float(block_num) / num_blocks_total x = mb_conv_block(x, block_args, activation=activation, drop_rate=drop_rate, prefix='block{}a_'.format(idx + 1), freeze_bn=freeze_bn ) block_num += 1 if block_args.num_repeat > 1: # pylint: disable=protected-access block_args = block_args._replace( input_filters=block_args.output_filters, strides=[1, 1]) # pylint: enable=protected-access for bidx in xrange(block_args.num_repeat - 1): drop_rate = drop_connect_rate * float(block_num) / num_blocks_total block_prefix = 'block{}{}_'.format( idx + 1, string.ascii_lowercase[bidx + 1] ) x = mb_conv_block(x, block_args, activation=activation, drop_rate=drop_rate, prefix=block_prefix, freeze_bn=freeze_bn ) block_num += 1 if idx < len(blocks_args) - 1 and blocks_args[idx + 1].strides[0] == 2: features.append(x) elif idx == len(blocks_args) - 1: features.append(x) return features def EfficientNetB0(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(1.0, 1.0, 224, 0.2, model_name='efficientnet-b0', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs) def EfficientNetB1(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(1.0, 1.1, 240, 0.2, model_name='efficientnet-b1', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs) def EfficientNetB2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(1.1, 1.2, 260, 0.3, model_name='efficientnet-b2', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs) def EfficientNetB3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(1.2, 1.4, 300, 0.3, model_name='efficientnet-b3', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs) def EfficientNetB4(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(1.4, 1.8, 380, 0.4, model_name='efficientnet-b4', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs) def EfficientNetB5(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(1.6, 2.2, 456, 0.4, model_name='efficientnet-b5', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs) def EfficientNetB6(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(1.8, 2.6, 528, 0.5, model_name='efficientnet-b6', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs) def EfficientNetB7(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs): return EfficientNet(2.0, 3.1, 600, 0.5, model_name='efficientnet-b7', include_top=include_top, weights=weights, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, classes=classes, **kwargs)
StarcoderdataPython
3259341
class Environment(object): def __init__(self, loader, autoescape): pass class Template(object): def __init__(self, source, autoescape): pass def select_autoescape(files=[]): def autoescape(template_name): pass return autoescape class FileSystemLoader(object): def __init__(self, searchpath): pass
StarcoderdataPython
3376188
<gh_stars>0 """Start game""" from Skier import Game def main(): game = Game() # if "keyboard_game=False", need to use "step(action)" to play game.start(keyboard_game=True, increase_speed=1, low_speed=6, max_speed=15) if __name__ == '__main__': main()
StarcoderdataPython
3332411
<reponame>bigfoolliu/liu_aistuff<gh_stars>1-10 #!/usr/bin/env python3 # -*- coding:utf-8 -*- # author: bigfoolliu """ 问题8: 100w个数中找出最大的100个数。时间复杂度尽可能的小 方案1:采用局部淘汰法。选取前100个元素,并排序,记为序列L。然后每次取出剩余的元素集合中的一个元素,与排好序的100个元素中最小的元素比, 如果比这个最小的要大,那么把这个最小的元素删除,并把x利用插入排序的思想,插入到序列L中。 依次循环,直到扫描了所有的元素。复杂度为O(100w*100)。 方案2:采用快速排序的思想,每次分割之后只考虑比轴大的一部分,直到比轴大的一部分在比100多的时候,采用传统排序算法排序, 取前100个。复杂度为O(100w*100)。 方案3:在前面的题中,我们已经提到了,用一个含100个元素的最小堆完成。复杂度为O(100w*lg100)。 代码实现如下: """ import heapq # 引入堆模块 import random # 产生随机数 test_list = [] # 测试列表 for i in range(1000000): # 产生100w个数,每个数在【0,1000w】之间 test_list.append(random.random() * 100000000) ret1 = heapq.nlargest(10, test_list) # 求100w个数最大的10个数 ret2 = heapq.nsmallest(10, test_list) # 求100w个数最小的10个数 print(ret1, ret2)
StarcoderdataPython
51117
def is_string(thing): try: return isinstance(thing, basestring) except NameError: return isinstance(thing, str)
StarcoderdataPython
3318167
from django.shortcuts import render from .tasks import add def calc(req): if req.method == "POST": add.send(float(req.POST["a"]), float(req.POST["b"])) return render(req, "calc.html")
StarcoderdataPython
3217966
<filename>input_data/mnist/mnist_input.py # Copyright 2018 <NAME> All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import tensorflow as tf import numpy as np import os import random def _single_process(image, label, specs, cropped_size): """Map function to process single instance of dataset object. Args: image: numpy array image object, (28, 28), 0 ~ 255 uint8; label: numpy array label, (,); specs: dataset specifications; cropped_size: image size after cropping; Returns: feature: a dictionary contains image, label, recons_image, recons_label. """ if specs['distort']: if cropped_size <= specs['image_size']: if specs['split'] == 'train': # random cropping image = tf.random_crop(image, [cropped_size, cropped_size]) # random rotation within -15° ~ 15° image = tf.contrib.image.rotate( image, random.uniform(-0.26179938779, 0.26179938779)) # expand image dimensions into (HWC) image = tf.expand_dims(image, -1) elif specs['split'] == 'test': # expand image dimensions into (HWC) image = tf.expand_dims(image, -1) # central cropping (requires HWC) image = tf.image.resize_image_with_crop_or_pad( image, cropped_size, cropped_size) else: # expand image dimensions into (HWC) image = tf.expand_dims(image, -1) # (HWC) # convert from 0 ~ 255 to 0. ~ 1. image = tf.cast(image, tf.float32) * (1. / 255.) # transpose image into (CHW) image = tf.transpose(image, [2, 0, 1]) # (CHW) feature = { 'image': image, 'label': tf.one_hot(label, 10) } return feature def _feature_process(feature): """Map function to process batched data inside feature dictionary. Args: feature: a dictionary contains image, label, recons_image, recons_label. Returns: batched_feature: a dictionary contains images, labels, recons_images, recons_labels. """ batched_feature = { 'images': feature['image'], 'labels': feature['label'], } return batched_feature def inputs(total_batch_size, num_gpus, max_epochs, cropped_size, data_dir, split, distort=True): """Construct inputs for mnist dataset. Args: total_batch_size: total number of images per batch; num_gpus: number of GPUs available to use; max_epochs: maximum epochs to go through the model; cropped_size: image size after cropping; data_dir: path to the mnist tfrecords data directory; split: 'train' or 'test', which split of dataset to read from; distort: whether to distort the images, including random cropping, rotations. Returns: batched_dataset: Dataset object each instance is a feature dictionary specs: dataset specifications. """ assert split == 'train' or split == 'test' """Dataset specs""" specs = { 'split': split, 'total_size': None, # total size of one epoch 'steps_per_epoch': None, # number of steps per epoch 'total_batch_size': int(total_batch_size), 'num_gpus': int(num_gpus), 'batch_size': int(total_batch_size / num_gpus), 'max_epochs': int(max_epochs), # number of epochs to repeat 'image_size': 28, 'depth': 1, 'num_classes': 10, 'distort': distort } if cropped_size == None: cropped_size = specs['image_size'] assert cropped_size <= specs['image_size'] """Load data from numpy array file""" with np.load(os.path.join(data_dir, 'mnist.npz')) as f: images, labels = f['x_%s' % split], f['y_%s' % split] # image: 0 ~ 255 uint8 # labels 0 ~ 9 uint8 assert images.shape[0] == labels.shape[0] specs['total_size'] = int(images.shape[0]) specs['steps_per_epoch'] = int(specs['total_size'] // specs['total_batch_size']) """Process dataset object""" # read from numpy array dataset = tf.data.Dataset.from_tensor_slices((images, labels)) # ((28, 28), (,)) # prefetch examples dataset = dataset.prefetch( buffer_size=specs['batch_size']*specs['num_gpus']*2) # shuffle (if 'train') and repeat `max_epochs` if split == 'train': dataset = dataset.apply(tf.contrib.data.shuffle_and_repeat( buffer_size=specs['batch_size']*specs['num_gpus']*10, count=specs['max_epochs'])) else: dataset = dataset.repeat(specs['max_epochs']) # process single example dataset = dataset.map( lambda image, label: _single_process(image, label, specs, cropped_size), num_parallel_calls=3) specs['image_size'] = cropped_size # after processed single example, the image size # will be cropped into cropped_size. # stack into batches batched_dataset = dataset.batch(specs['batch_size']) # process into feature batched_dataset = batched_dataset.map( _feature_process, num_parallel_calls=3) # prefetch to improve the performance batched_dataset = batched_dataset.prefetch(specs['num_gpus']) return batched_dataset, specs
StarcoderdataPython
1769181
""" udf.py written in Python3 author: <NAME> <<EMAIL>> """ import numpy as np from refunction import Refunction # Ensure that pandas DataFrame columns are not changed def required_dataframe_columns(columns): """ Specify required columns in pandas DataFrames Parameters ---------- columns : list of str Column names to ensure are represented. Returns ------- bool Whether or not the requirement columns are in the DataFrame """ def _required_dataframe_columns(df, columns): return np.isin(columns, df.columns).all() return Refunction(_required_dataframe_columns, columns=columns)
StarcoderdataPython
3380454
<gh_stars>1-10 from unittest import main from .helpers import api, test_base request_dynamic_parts = ["1337", "3.141592653589793", "dynamic_part_of_url"] request_dynamic = "/".join(str(x) for x in request_dynamic_parts) class RouteDynamicCases: class RouteDynamicBase(test_base.TestBase): def test_dynamicint_in_json(self): json = self.res.json() arg = 'dynamic_int' self.assertIn(arg, json) self.assertEqual(json[arg], request_dynamic_parts[0]) def test_dynamic_float_in_json(self): json = self.res.json() arg = 'dynamic_float' self.assertIn(arg, json) self.assertEqual(json[arg], request_dynamic_parts[1]) def test_dynamic_path_in_json(self): json = self.res.json() arg = 'dynamic_path' self.assertIn(arg, json) self.assertEqual(json[arg], request_dynamic_parts[2]) class TestRouteDynamicGet(RouteDynamicCases.RouteDynamicBase, test_base.TestBase, api.APITests): @classmethod def setup(cls): cls.res = api.get( cls.v['AGW'] + '/test_route_dynamic_get/' + request_dynamic ) class TestRouteDynamicPost(RouteDynamicCases.RouteDynamicBase, test_base.TestBase, api.APITests): @classmethod def setup(cls): cls.res = api.post( cls.v['AGW'] + '/test_route_dynamic_post/' + request_dynamic, json={"empty": "content"} ) class TestRouteDynamicPut(RouteDynamicCases.RouteDynamicBase, test_base.TestBase, api.APITests): @classmethod def setup(cls): cls.res = api.put( cls.v['AGW'] + '/test_route_dynamic_put/' + request_dynamic, json={"empty": "content"} ) class TestRouteDynamicPatch(RouteDynamicCases.RouteDynamicBase, test_base.TestBase, api.APITests): @classmethod def setup(cls): cls.res = api.patch( cls.v['AGW'] + '/test_route_dynamic_patch/' + request_dynamic, json={"empty": "content"} ) class TestRouteDynamicMissingArgs(test_base.TestBase, api.APITests): @classmethod def setup(cls): cls.res = api.get( cls.v['AGW'] + '/test_route_dynamic_get/1234/' ) cls.expected_status_code = 404 cls.expected_content_type = "text/html; charset=UTF-8" if __name__ == '__main__': main()
StarcoderdataPython
4832090
<reponame>emcannaert/CMSSW import FWCore.ParameterSet.Config as cms primitiveRPCProducer = cms.EDProducer("L1TMuonRPCTriggerPrimitivesProducer", Primitiverechitlabel = cms.InputTag("rpcdigis"), Mapsource = cms.string('L1Trigger/L1TMuon/data/rpc/Linkboard_rpc_roll_mapping_lb_chamber2.txt'), ApplyLinkBoardCut = cms.bool(True), LinkBoardCut = cms.int32(2), # Number of clusters per linkboard greater than (default >2) are rejected ClusterSizeCut = cms.int32(3), # Clustersize greater than (default >3) is rejected maskSource = cms.string('File'), maskvecfile = cms.FileInPath('RecoLocalMuon/RPCRecHit/data/RPCMaskVec.dat'), deadSource = cms.string('File'), deadvecfile = cms.FileInPath('RecoLocalMuon/RPCRecHit/data/RPCDeadVec.dat'), recAlgoConfig = cms.PSet(), recAlgo = cms.string('RPCRecHitStandardAlgo') ) from Configuration.Eras.Modifier_phase2_muon_cff import phase2_muon phase2_muon.toModify(primitiveRPCProducer, ApplyLinkBoardCut = cms.bool(False)) phase2_muon.toModify(primitiveRPCProducer, ClusterSizeCut = cms.int32(4))
StarcoderdataPython
1614772
<reponame>Hvass-Labs/SwarmOps ######################################################################## # SwarmOps - Heuristic optimization for Python. # Copyright (C) 2003-2016 <NAME>. # See the file README.md for instructions. # See the file LICENSE.txt for license details. # SwarmOps on the internet: http://www.Hvass-Labs.org/ ######################################################################## ######################################################################## # Parent-classes for doing optimization. These provide various logistics, # printing of status messages, running multiple optimizations, etc. ######################################################################## import numpy as np from swarmops.FitnessTrace import FitnessTrace from swarmops import tools ################################################## class SingleRun: """ Parent-class for performing a single optimization run. The class provides various logistics that are common to all optimizers. If you make a new optimizer, then you should derive from this class. """ def __init__(self, max_evaluations, run_number=0, trace_len=0, display_interval=0): """ Create object instance and initialize variables for keeping track of the optimization progress. Then call self._optimize() to perform the actual optimization. :param max_evaluations: Maximum number of fitness evaluations for the problem. :param run_number: The optimization run number, if performing multiple optimization runs. :param trace_len: Approximate length of the fitness-trace. Default is zero which means that no fitness-trace will be created. :param display_interval: Approximate interval between printing status messages. Default is zero which means that no status messages will be printed. :return: Object instance. """ # Copy arguments to instance variables. self.max_evaluations = max_evaluations self.run_number = run_number self.display_interval = display_interval # Initialize the counter for the next status-display. self.display_next = 0 # Create an object used for tracing the fitness at regular intervals. self.fitness_trace = FitnessTrace(trace_len, max_evaluations) # Initialize best-known position and fitness. self.best = None self.best_fitness = np.inf # Print status at the beginning of the optimization run. self._start_run() # Perform the actual optimization iterations. self._optimize() # Print status at the end of the optimization run. self._end_run() def _optimize(self): """ Perform the actual optimization. This function should be implemented by the child-class. The function does not return anything, instead it will call self._update_best() to update the best-known solution during optimization. """ # Raise an exception if the child-class has not implemented this function. raise NotImplementedError def _start_run(self): """ Print status at the beginning of the optimization run. """ if self.display_interval > 0: msg = "Starting optimization run {0} using {1} ..." print(msg.format(self.run_number, self.name)) def _iteration(self, i): """ Print status and trace fitness during optimization. This function should be called regularly during optimization. """ if self.display_interval > 0 and i >= self.display_next: # Print the status. msg = "Run: {0}, Iteration: {1}, Best Fitness: {2:.4e}" print(msg.format(self.run_number, i, self.best_fitness)) # Increment the counter for the next status-display. self.display_next = i + self.display_interval # Trace the fitness. self.fitness_trace.trace(i, self.best_fitness) def _end_run(self): """ Print status at the end of the optimization run. """ if self.display_interval > 0: # Print the status. msg = "Finished optimization run {0}, Best fitness: {1:.4e}" print(msg.format(self.run_number, self.best_fitness)) def _update_best(self, fitness, x): """ Update the best-known solution and fitness if an improvement. WARNING: This function does NOT copy the array x so you must ensure that the optimizer does not modify the array you pass in as x here. :param fitness: New fitness. :param x: New solution. :return: Boolean whether the fitness was an improvement. """ # If the fitness is an improvement over the best-known fitness. improvement = fitness < self.best_fitness if improvement: # Update the best-known fitness and position. self.best_fitness = fitness self.best = x return improvement ################################################## class MultiRun: """ Perform multiple optimization runs with an optimizer and calculate statistics on the results. This has been separated into its own class for the sake of modularity so it is easier to read and maintain. There is only a tiny overhead of instantiating the SingleRun-class for each run. """ def __init__(self, optimizer, num_runs, problem, parallel=True, *args, **kwargs): """ Create object instance and perform multiple optimization runs. To retrieve the results, access the object variables afterwards. The parameters are the same as for the Optimize-class, except for the following: :param optimizer: Class for the optimizer e.g. PSO or DE. :param num_runs: Number of optimization runs to perform. :param problem: The problem to be optimized. Instance of Problem-class. :param parallel: Perform the optimization runs in parallel (True) or serial (False). :param args: Arguments to pass along to the Optimize-class. :param kwargs: Arguments to pass along to the Optimize-class. :return: Object instance. Get the optimization results from the object's variables. - best_solution is the best-found solution. - best_fitness is the associated fitness of the best-found solution. - fitness_trace is a 2-d numpy array with the fitness trace of each run. """ # Copy arguments to instance variables. self.problem = problem self.optimizer = optimizer # Store the args and kwargs to be passed on to the optimizer. self.args = args self.kwargs = kwargs if not parallel: # Run the optimizer multiple times on one processor. self.runs = [self._optimize(run_number=i) for i in range(num_runs)] else: import multiprocessing as mp # Create a pool of workers sized according to the CPU cores available. pool = mp.Pool() # Run the optimizer multiple times in parallel. # We must use a helper-function for this. self.runs = pool.map(self._optimize_parallel, range(num_runs)) # Close the pool of workers and wait for them all to finish. pool.close() pool.join() # Put the best solutions from all the optimization runs into an array. self.solution = np.array([run.best for run in self.runs]) # Put the best fitness from all the optimization runs into an array. self.fitness = np.array([run.best_fitness for run in self.runs]) # Put the fitness traces from all the optimization runs into an array. # This only works when the fitness-traces are all the same length. # If you make changes to the optimizers so they can stop early, then # you may have to change this. self.fitness_trace = np.array([run.fitness_trace.fitness for run in self.runs]) # Index for the best fitness (minimization). i = np.argmin(self.fitness) # Best optimization run. This is an instance of the SingleRun-class. self.best_run = self.runs[i] # Best fitness of all the optimization runs. self.best_fitness = self.fitness[i] # Best solution of all the optimization runs. self.best = self.solution[i] def _optimize(self, run_number): """ Helper-function used for execution of an optimization run. Non-parallel. :param run_number: Counter for the optimization run. :return: Instance of the SingleRun-class. """ return self.optimizer(problem=self.problem, run_number=run_number, *self.args, **self.kwargs) def _optimize_parallel(self, run_number): """ Helper-function used for parallel execution of an optimization run. :param run_number: Counter for the optimization run. :return: Instance of the SingleRun-class. """ # Create a new Pseudo-Random Number Generator for the thread. tools.new_prng() # Do the optimization. return self._optimize(run_number=run_number) def refine(self): """ Refine the best result from heuristic optimization using SciPy's L-BFGS-B method. This may significantly improve the results on some optimization problems, but it is sometimes very slow to execute. NOTE: This function imports SciPy, which should make it possible to use the rest of this source-code library even if SciPy is not installed. SciPy should first be loaded when calling this function. :return: A tuple with: - The best fitness found. - The best solution found. """ # SciPy requires bounds in another format. bounds = list(zip(self.problem.lower_bound, self.problem.upper_bound)) # Start SciPy optimization at best found solution. import scipy.optimize res = scipy.optimize.minimize(fun=self.problem.fitness, x0=self.best, method="L-BFGS-B", bounds=bounds) # Get best fitness and parameters. refined_fitness = res.fun refined_solution = res.x return refined_fitness, refined_solution def print_statistics(self): """ Print statistics for the fitness. """ # Print header. print("{0} - Optimized by {1}".format(self.problem.name_full, self.optimizer.name)) print("Fitness Statistics:") # Print mean and standard deviation. print("- Mean:\t\t{0:.4e}".format(self.fitness.mean())) print("- Std.:\t\t{0:.4e}".format(self.fitness.std())) # Print quartiles. print("- Min:\t\t{0:.4e}".format(self.fitness.min())) print("- 1st Qrt.:\t{0:.4e}".format(np.percentile(self.fitness, 0.25))) print("- Median:\t{0:.4e}".format(np.percentile(self.fitness, 0.5))) print("- 3rd Qrt.:\t{0:.4e}".format(np.percentile(self.fitness, 0.75))) print("- Max:\t\t{0:.4e}".format(self.fitness.max())) def plot_fitness_trace(self, y_log_scale=True, filename=None): """ Plot the fitness traces. NOTE: This function imports matplotlib, which should make it possible to use the rest of this source-code library even if it is not installed. matplotlib should first be loaded when calling this function. :param y_log_scale: Use log-scale for y-axis. :param filename: Output filename e.g. "foo.svg". If None then plot to screen. :return: Nothing. """ import matplotlib.pyplot as plt # Setup plotting. plt.grid() # Axis labels. plt.xlabel("Iteration") plt.ylabel("Fitness (Lower is better)") # Title. title = "{0} - Optimized by {1}".format(self.problem.name_full, self.optimizer.name) plt.title(title) # Use log-scale for Y-axis. if y_log_scale: plt.yscale("log", nonposy="clip") # Plot the fitness-trace for each optimization run. for run in self.runs: # Array with iteration counter for the optimization run. iteration = run.fitness_trace.iteration # Array with fitness-trace for the optimization run. fitness_trace = run.fitness_trace.fitness # Plot the fitness-trace. plt.plot(iteration, fitness_trace, 'r-', color='black', alpha=0.25) # Plot to screen or file. if filename is None: # Plot to screen. plt.show() else: # Plot to file. plt.savefig(filename, bbox_inches='tight') plt.close() ##################################################
StarcoderdataPython
4840940
#!/usr/bin/env python3 # # Authors: <NAME> import sys import json import itertools import logging from hypertrace import do_hypertrace, mkabs logging.basicConfig(stream=sys.stdout, level=logging.INFO) logger = logging.getLogger(__name__) clean = False if len(sys.argv) > 1: configfile = sys.argv[1] if "--clean" in sys.argv: clean = True else: configfile = "./config.json" configfile = mkabs(configfile) logger.info("Using config file `%s`", configfile) with open(configfile) as f: config = json.load(f) wavelength_file = mkabs(config["wavelength_file"]) reflectance_file = mkabs(config["reflectance_file"]) if "libradtran_template_file" in config: raise Exception("`libradtran_template_file` is deprecated. Use `rtm_template_file` instead.") rtm_template_file = mkabs(config["rtm_template_file"]) lutdir = mkabs(config["lutdir"]) outdir = mkabs(config["outdir"]) if clean and outdir.exists(): import shutil shutil.rmtree(outdir) isofit_config = config["isofit"] hypertrace_config = config["hypertrace"] # Make RTM paths absolute vswir_settings = isofit_config["forward_model"]["radiative_transfer"]["radiative_transfer_engines"]["vswir"] for key in ["lut_path", "template_file", "engine_base_dir"]: if key in vswir_settings: vswir_settings[key] = str(mkabs(vswir_settings[key])) # Create iterable config permutation object ht_iter = itertools.product(*hypertrace_config.values()) logger.info("Starting Hypertrace workflow.") for ht in ht_iter: argd = dict() for key, value in zip(hypertrace_config.keys(), ht): argd[key] = value logger.info("Running config: %s", argd) do_hypertrace(isofit_config, wavelength_file, reflectance_file, rtm_template_file, lutdir, outdir, **argd) logging.info("Workflow completed successfully.")
StarcoderdataPython
141659
<filename>examples/simple.py #!/usr/bin/env python # -*- coding: utf-8 -*- """ .. codeauthor:: <NAME> <<EMAIL>> """ import os from flask import Flask from flask_tat.engine import TATClient os.environ.setdefault('TAT_URL', 'http://1192.168.3.11') os.environ.setdefault('TAT_USERNAME', 'test') os.environ.setdefault('TAT_PASSWORD', '<PASSWORD>') os.environ.setdefault("TAT_TOPIC", "MyTopic") app = Flask(__name__) app.config.update(dict( TAT_URL=os.getenv('TAT_URL'), TAT_USERNAME=os.getenv('TAT_USERNAME'), TAT_PASSWORD=os.getenv('TAT_PASSWORD'), )) app.app_context().push() tat_client = TATClient(app) print(tat_client.message_list( topic=os.getenv('TAT_TOPIC'), limit=5 ))
StarcoderdataPython
4842565
<gh_stars>10-100 from qtools.qtpy import QtCore, QtGui from qtools.qtpy.QtCore import Qt from galry import Manager, ordict import numpy as np __all__ = ['BindingManager', 'Bindings'] class BindingManager(Manager): """Manager several sets of bindings (or interaction modes) and allows to switch between several modes. """ def reset(self): """Reset the modes.""" self.bindings = [] self.index = 0 def add(self, *bindings): """Add one or several bindings to the binding manager. Arguments: * bindings: a list of classes deriving from UserActionBinding. """ bindings = [b for b in bindings if b not in self.bindings] self.bindings.extend(bindings) def remove(self, binding): """Remove a binding. Arguments: * binding: the binding to remove. """ if binding in self.bindings: self.bindings.remove(binding) def set(self, binding): """Set the given binding as the active one. Arguments: * binding: the current binding. """ if not isinstance(binding, Bindings): # here, we assume that binding is a class, so we take the first # existing binding that is an instance of this class binding = [b for b in self.bindings if isinstance(b, binding)][0] self.add(binding) self.index = self.bindings.index(binding) return self.get() def get(self): """Return the current binding.""" if self.index < len(self.bindings): return self.bindings[self.index] else: return None def switch(self): """Switch the active binding. The bindings cycle through all bindings. """ self.index = np.mod(self.index + 1, len(self.bindings)) return self.get() class Bindings(object): """Base class for action-events bindings set. An instance of this class contains all bindings between user actions, and interaction events, that are possible within a given interaction mode. An interaction mode is entirely defined by such set of bindings. The GalryWidget provides a mechanism for switching between successive modes. """ def __init__(self): self.base_cursor = 'ArrowCursor' self.text = None self.binding = ordict() self.descriptions = ordict() self.initialize_default() self.initialize() def set_base_cursor(self, cursor=None): """Define the base cursor in this mode.""" if cursor is None: cursor = 'OpenHandCursor' # set the base cursor self.base_cursor = cursor def get_base_cursor(self): return self.base_cursor def initialize_default(self): """Set bindings that are common to any interaction mode.""" self.set('KeyPress', 'SwitchInteractionMode', key='I') def initialize(self): """Registers all bindings through commands to self.set(). To be overriden. """ pass def set(self, action, event, key_modifier=None, key=None, param_getter=None, description=None): """Register an action-event binding. Arguments: * action: a UserAction string. * event: a InteractionEvent string. * key_modifier=None: the key modifier as a string. * key=None: when the action is KeyPress, the key that was pressed. * param_getter=None: a function that takes an ActionParameters dict as argument and returns a parameter object that will be passed to InteractionManager.process_event(event, parameter) """ if isinstance(key, basestring): key = getattr(Qt, 'Key_' + key) if isinstance(key_modifier, basestring): key_modifier = getattr(Qt, 'Key_' + key_modifier) # if param_getter is a value and not a function, we convert it # to a constant function if not hasattr(param_getter, '__call__'): param = param_getter param_getter = lambda p: param self.binding[(action, key_modifier, key)] = (event, param_getter) if description: self.descriptions[(action, key_modifier, key)] = description def get(self, action, key_modifier=None, key=None): """Return the event and parameter getter function associated to a function. Arguments: * action: the user action. * key_modifier=None: the key modifier. * key=None: the key if the action is `KeyPress`. """ return self.binding.get((action, key_modifier, key), (None, None)) def get_description(self, action, key_modifier=None, key=None): return self.descriptions.get((action, key_modifier, key), None) # Help methods # ------------ def generate_text(self): special_keys = { None: '', QtCore.Qt.Key_Control: 'CTRL', QtCore.Qt.Key_Shift: 'SHIFT', QtCore.Qt.Key_Alt: 'ALT', } texts = {} for (action, key_modifier, key), (event, _) in self.binding.iteritems(): desc = self.get_description(action, key_modifier, key) # key string if key: key = QtGui.QKeySequence(key).toString() else: key = '' # key modifier key_modifier = special_keys[key_modifier] if key_modifier: key_modifier = key_modifier + ' + ' # get binding text if action == 'KeyPress': bstr = 'Press ' + key_modifier + key + ' : ' + event else: bstr = key_modifier + action + ' : ' + event if desc: bstr += ' ' + desc if event not in texts: texts[event] = [] texts[event].append(bstr) # sort events self.text = "\n".join(["\n".join(sorted(texts[key])) for key in sorted(texts.iterkeys())]) def get_text(self): if not self.text: self.generate_text() return self.text def __repr__(self): return self.get_text()
StarcoderdataPython
4819357
from typing import Optional import sys import click from PyInquirer import prompt, Separator from contxt.cli.clients import Clients from contxt.utils.serializer import Serializer from contxt.utils.config import ContextException @click.group() def env() -> None: """Contxt Environment Functions""" @env.command() @click.pass_obj def current(clients: Clients) -> None: """Get current environment settings""" contxt_env = clients.contxt_env print('=== Defaults ===') for key, val in contxt_env.config.defaults.items(): print(f' {key} -> {val}') print('=== Context ====') for service, environment in contxt_env.config.currentContext.items(): print(f' {service} -> {environment.environment}') try: service_env = contxt_env.get_config_for_service_name(service) print(f' {service_env.clientId}, {service_env.apiEnvironment.baseUrl}, {service_env.apiEnvironment.authProvider}') except ContextException: print(f'Context not found for service {service} and env {environment.environment}') @env.command() @click.argument('SERVICE_NAME') @click.pass_obj def detail(clients: Clients, service_name: str) -> None: contxt_env = clients.contxt_env service_env = contxt_env.get_config_for_service_name(service_name) if not service_env: print(f'Service not found for: {service_name}') sys.exit(1) print(Serializer.to_pretty_cli(service_env)) @env.command() @click.argument('SERVICE_NAME') @click.pass_obj def add(clients: Clients, service_name: str) -> None: contxt_env = clients.contxt_env cli_or_machine = { 'type': 'list', 'name': 'selected_type', 'message': f'Choose authentication method for new context for {service_name}', 'choices': ['CLI Auth', 'Machine Auth'] } answers = prompt.prompt(cli_or_machine) if answers['selected_type'] == 'CLI Auth': questions = [ { 'type': 'input', 'name': 'auth_provider', 'message': 'Enter your auth provider (X.auth0.com usually)' }, { 'type': 'input', 'name': 'base_url', 'message': 'Enter base URL for service' }, { 'type': 'input', 'name': 'client_id', 'message': 'Enter Client ID for service' }, { 'type': 'input', 'name': 'environment', 'message': 'What name would you like to use for this environment?' } ] answers = prompt.prompt(questions) print(answers) # Machine auth else: pass @env.command() @click.pass_obj @click.argument('SERVICE_NAME') def set_context(clients: Clients, service_name: str) -> None: """Set context for a particular service""" contxt_env = clients.contxt_env service_envs = contxt_env.get_possible_configs_for_service_name(service_name) if not len(service_envs): print(f'No environments found for service: {service_name}') sys.exit(1) choices = { 'type': 'list', 'name': 'selected_context', 'message': f'Choose context for {service_name}', 'choices': [f'{env.environment}' for env in service_envs] } answers = prompt.prompt(choices) contxt_env.set_context_for_service_name(service_name, answers['selected_context']) print(answers)
StarcoderdataPython