id
stringlengths
2
8
text
stringlengths
16
264k
dataset_id
stringclasses
1 value
1709006
<reponame>mounaiban/padsweb<filename>padsweb/forms.py # # # Public Archive of Days Since Timers # Form Classes # # from django import forms from django.utils import timezone from padsweb.settings import * from padsweb.strings import labels from padsweb.misc import get_timezones_all # Python Beginner's PROTIP: # # Due to the way Django (1.11.2) initialises forms, For some of the # Fields in the Forms herein, the initial state had to be set from the # __init__ constructor, using a dictionary named 'initial'. # A good example is ChoiceField. # # See: https://stackoverflow.com/a/11400559 # See Also: http://avilpage.com/2015/03/django-form-gotchas-dynamic-initial.html class TimerGroupForm(forms.Form): timer_group = forms.ChoiceField( label = labels['TIMER_GROUP'],) def __init__(self, *args, **kwargs): try: timer_group_choices = kwargs.pop('timer_group_choices') except: pass super().__init__(*args, **kwargs) # TODO: Find out why choices must be extended in both choices # and widget.choices, and why widget.choices wasn't automatically # extended along with choices. self.fields['timer_group'].widget.choices.extend( timer_group_choices) self.fields['timer_group'].choices.extend( timer_group_choices) class TimerGroupNamesForm(forms.Form): group_names = forms.CharField( label=labels['TIMER_GROUP_FIELD'], max_length=MAX_MESSAGE_LENGTH_SHORT, min_length=1,) class QuickListImportForm(TimerGroupForm): password = forms.CharField( label=labels['PASSWORD_QL'], max_length=MAX_MESSAGE_LENGTH_SHORT, strip=True,) timer_group = forms.ChoiceField( label=labels['QL_IMPORT_TO_GROUP'], choices=[('', labels['NONE'])], required=False,) class TimerRenameForm(forms.Form): description=forms.CharField( label=labels['DESCRIPTION'], max_length=MAX_MESSAGE_LENGTH_SHORT, strip=True,) class NewTimerForm(forms.Form): description = forms.CharField( label=labels['DESCRIPTION'], max_length=MAX_MESSAGE_LENGTH_SHORT,) first_history_message = forms.CharField( label=labels['TIMER_FIRST_HISTORY'], max_length=MAX_MESSAGE_LENGTH_SHORT, initial=labels['TIMER_DEFAULT_CREATION_REASON'],) year = forms.DecimalField( label = 'Year', max_value = 9999, min_value = 1, max_digits = 4, decimal_places = 0,) month = forms.ChoiceField( label = labels['MONTH'], choices = ( (1, labels['GREGORIAN_MONTH_1']), (2, labels['GREGORIAN_MONTH_2']), (3, labels['GREGORIAN_MONTH_3']), (4, labels['GREGORIAN_MONTH_4']), (5, labels['GREGORIAN_MONTH_5']), (6, labels['GREGORIAN_MONTH_6']), (7, labels['GREGORIAN_MONTH_7']), (8, labels['GREGORIAN_MONTH_8']), (9, labels['GREGORIAN_MONTH_9']), (10, labels['GREGORIAN_MONTH_10']), (11, labels['GREGORIAN_MONTH_11']), (12, labels['GREGORIAN_MONTH_12']), ), ) day = forms.DecimalField( label = labels['DAY'], max_digits = 2, max_value = 31, min_value = 1, decimal_places = 0,) hour = forms.DecimalField( label = labels['HOUR'], max_digits = 2, max_value = 23, min_value = 0, decimal_places = 0,) minute = forms.DecimalField( label = labels['MINUTE'], max_digits = 2, max_value = 59, min_value = 0, decimal_places = 0,) second = forms.DecimalField( label = labels['SECOND'], max_digits = 2, max_value = 59, min_value = 0, decimal_places = 0,) use_current_date_time = forms.BooleanField( label=labels['TIMER_USE_CURRENT_DATE_TIME'], required=False, initial=False,) historical = forms.BooleanField( label=labels['TIMER_CREATE_HISTORICAL'], required=False, initial=False,) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # Important! self.start_datetime = timezone.now() self.initial = { 'year' : self.start_datetime.year, 'month': self.start_datetime.month, 'day': self.start_datetime.day, 'hour': self.start_datetime.hour, 'minute': self.start_datetime.minute, 'second': self.start_datetime.second, } class NewTimerGroupForm(forms.Form): name = forms.CharField( label=labels['NAME'], strip=True, max_length = MAX_NAME_LENGTH_SHORT,) class PasswordChangeForm(forms.Form): old_password = forms.CharField( label=labels['PASSWORD_CURRENT'], widget=forms.PasswordInput, strip=False, max_length=MAX_MESSAGE_LENGTH_SHORT, min_length=MIN_USER_PASSWORD_LENGTH,) new_password = forms.CharField( label=labels['PASSWORD_NEW'], widget=forms.PasswordInput, strip=False, max_length=MAX_MESSAGE_LENGTH_SHORT, min_length=MIN_USER_PASSWORD_LENGTH,) new_password_confirm = forms.CharField( label=labels['PASSWORD_NEW_CONFIRM'], widget=forms.PasswordInput, strip=False, max_length=MAX_MESSAGE_LENGTH_SHORT, min_length=MIN_USER_PASSWORD_LENGTH,) user_id = forms.CharField( widget=forms.HiddenInput,) class SignInForm(forms.Form): username = forms.SlugField( label=labels['USERNAME'], max_length=MAX_NAME_LENGTH_SHORT, ) password = forms.CharField( label=labels['PASSWORD'], widget=forms.PasswordInput, strip=False, max_length=MAX_MESSAGE_LENGTH_SHORT, min_length=1,) class SignInQuickListForm(forms.Form): password = forms.CharField( label=labels['PASSWORD_QL'], max_length=MAX_MESSAGE_LENGTH_SHORT, strip=True,) class SignUpForm(forms.Form): username = forms.SlugField( label=labels['USERNAME'], max_length=MAX_NAME_LENGTH_SHORT, min_length=MIN_USERNAME_LENGTH,) password = forms.CharField( label=labels['PASSWORD'], widget=forms.PasswordInput, strip=False, max_length=MAX_MESSAGE_LENGTH_SHORT, min_length=MIN_USER_PASSWORD_LENGTH,) password_confirm = forms.CharField( label=labels['PASSWORD_<PASSWORD>'], widget=forms.PasswordInput, strip=False, max_length=MAX_MESSAGE_LENGTH_SHORT, min_length=MIN_USER_PASSWORD_LENGTH,) class ReasonForm(forms.Form): reason = forms.CharField( label=labels['REASON'], initial=labels['REASON_NONE'], max_length=MAX_MESSAGE_LENGTH_SHORT, min_length=1,) class TimeZoneForm(forms.Form): time_zone = forms.ChoiceField( label=labels['TIME_ZONE'], choices=get_timezones_all(),)
StarcoderdataPython
9665256
import numpy as np import matplotlib.pyplot as plt def sigmoid(val): return 1/(1 + np.exp(-val)) def stable_coeff(alpha_1, alpha_2): a_1 = 2*np.tanh(alpha_1) a_2 = np.abs(a_1) + (2 - np.abs(a_1))*sigmoid(alpha_2) - 1 return a_1, a_2 def roots_polynomial(a_1, a_2): delta = a_1**2 - 4 * a_2 delta = delta.astype(np.complex) root_1 = (-a_1 + np.sqrt(delta))/2 root_2 = (-a_1 - np.sqrt(delta))/2 idx_real = delta > 0 return root_1, root_2, idx_real if __name__ == '__main__': N = 10000 alpha_1 = np.random.randn(N)*1 alpha_2 = np.random.randn(N)*1 a_1, a_2 = stable_coeff(alpha_1, alpha_2) r_1, r_2, idx_real = roots_polynomial(a_1, a_2) fig, ax = plt.subplots() ax.plot(a_1, a_2, '*') ax.plot(a_1[idx_real], a_2[idx_real], 'k*') ax.set_xlabel('a_1') ax.set_ylabel('a_2') ax.set_xlim([-2, 2]) ax.set_ylim([-2, 2]) fig, ax = plt.subplots() ax.plot(np.real(r_1), np.imag(r_1), 'r*') ax.plot(np.real(r_2), np.imag(r_2), 'r*') ax.plot(np.real(r_1)[idx_real], np.imag(r_1)[idx_real], 'k*') ax.plot(np.real(r_2)[idx_real], np.imag(r_2)[idx_real], 'k*') ax.set_xlim([-1.2, 1.2]) ax.set_ylim([-1.2, 1.2]) perc_real = np.sum(idx_real) / N *100 print(f"Real poles in {perc_real:.1f} cases")
StarcoderdataPython
8055538
from .. import IrccGenerator, IrccType from motor_typing import TYPE_CHECKING _UNSIGNED_VERSIONS = {'i8': 'u8', 'i16': 'u16', 'i32': 'u32', 'i64': 'u64'} class IrccCTypes(IrccGenerator): _VECTOR_TYPES = {} # type: Dict[str, str] def __init__(self, file): # type: (TextIO) -> None IrccGenerator.__init__(self, file) def type_void(self): # type: () -> IrccType return IrccType(['', '', 'void', '']) def type_builtin(self, builtin): # type: (str) -> IrccType # typedefs in the source map the builtins return IrccType(['', '', builtin, ''], ['', '', _UNSIGNED_VERSIONS.get(builtin, builtin), '']) def type_declared(self, name): # type: (str) -> IrccType return IrccType(['', '', name, '']) def make_array(self, type, size): # type: (IrccType, int) -> IrccType return IrccType(['', '', '', '[%d]' % (size)], base=type, sep='') def make_ptr(self, type): # type: (IrccType) -> IrccType return IrccType(['', '', '*', ''], base=type, sep='') def make_struct(self, fields): # type: (List[Tuple[IrccType, str]]) -> IrccType struct_body = ' '.join('%s;' % type.format(['', '', name, '']) for type, name in fields) return IrccType(['struct', '{ %s }' % struct_body, '', '']) def make_const(self, type): # type: (IrccType) -> IrccType return IrccType(['', '', 'const', ''], ['', '', 'const', ''], type) def make_address_space(self, type, address_space): # type: (IrccType, int) -> IrccType return type def make_vector(self, type, count): # type: (IrccType, int) -> IrccType base_type = type._declaration[2] base_type_unsigned = type._unsigned_declaration[2] assert base_type in ['i8', 'i16', 'i32', 'i64', 'float'] assert count in [2, 4, 8, 16] assert type._declaration[0] == '' assert type._declaration[1] == '' assert type._declaration[3] == '' return IrccType( ['', '', self._VECTOR_TYPES[base_type] % { 'size': count }, ''], ['', '', self._VECTOR_TYPES[base_type_unsigned] % { 'size': count }, ''] ) if TYPE_CHECKING: from typing import Dict, List, TextIO, Tuple from ircc import IrccType
StarcoderdataPython
6584555
<reponame>Zhiyuan-w/DeepReg """Provide helper functions or classes for defining loss or metrics.""" from typing import List, Optional, Union import tensorflow as tf from deepreg.loss.kernel import cauchy_kernel1d from deepreg.loss.kernel import gaussian_kernel1d_sigma as gaussian_kernel1d class MultiScaleMixin(tf.keras.losses.Loss): """ Mixin class for multi-scale loss. It applies the loss at different scales (gaussian or cauchy smoothing). It is assumed that loss values are between 0 and 1. """ kernel_fn_dict = dict(gaussian=gaussian_kernel1d, cauchy=cauchy_kernel1d) def __init__( self, scales: Optional[Union[List, float, int]] = None, kernel: str = "gaussian", **kwargs, ): """ Init. :param scales: list of scalars or None, if None, do not apply any scaling. :param kernel: gaussian or cauchy. :param kwargs: additional arguments. """ super().__init__(**kwargs) if kernel not in self.kernel_fn_dict: raise ValueError( f"Kernel {kernel} is not supported." f"Supported kernels are {list(self.kernel_fn_dict.keys())}" ) if scales is not None and not isinstance(scales, list): scales = [scales] self.scales = scales self.kernel = kernel def call(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor: """ Use super().call to calculate loss at different scales. :param y_true: ground-truth tensor, shape = (batch, dim1, dim2, dim3). :param y_pred: predicted tensor, shape = (batch, dim1, dim2, dim3). :return: multi-scale loss, shape = (batch, ). """ if self.scales is None: return super().call(y_true=y_true, y_pred=y_pred) kernel_fn = self.kernel_fn_dict[self.kernel] losses = [] for s in self.scales: if s == 0: # no smoothing losses.append( super().call( y_true=y_true, y_pred=y_pred, ) ) else: losses.append( super().call( y_true=separable_filter( tf.expand_dims(y_true, axis=4), kernel_fn(s) )[..., 0], y_pred=separable_filter( tf.expand_dims(y_pred, axis=4), kernel_fn(s) )[..., 0], ) ) loss = tf.add_n(losses) loss = loss / len(self.scales) return loss def get_config(self) -> dict: """Return the config dictionary for recreating this class.""" config = super().get_config() config["scales"] = self.scales config["kernel"] = self.kernel return config class NegativeLossMixin(tf.keras.losses.Loss): """Mixin class to revert the sign of the loss value.""" def __init__(self, **kwargs): """ Init without required arguments. :param kwargs: additional arguments. """ super().__init__(**kwargs) self.name = self.name + "Loss" def call(self, y_true: tf.Tensor, y_pred: tf.Tensor) -> tf.Tensor: """ Revert the sign of loss. :param y_true: ground-truth tensor. :param y_pred: predicted tensor. :return: negated loss. """ return -super().call(y_true=y_true, y_pred=y_pred) def separable_filter(tensor: tf.Tensor, kernel: tf.Tensor) -> tf.Tensor: """ Create a 3d separable filter. Here `tf.nn.conv3d` accepts the `filters` argument of shape (filter_depth, filter_height, filter_width, in_channels, out_channels), where the first axis of `filters` is the depth not batch, and the input to `tf.nn.conv3d` is of shape (batch, in_depth, in_height, in_width, in_channels). :param tensor: shape = (batch, dim1, dim2, dim3, 1) :param kernel: shape = (dim4,) :return: shape = (batch, dim1, dim2, dim3, 1) """ strides = [1, 1, 1, 1, 1] kernel = tf.cast(kernel, dtype=tensor.dtype) tensor = tf.nn.conv3d( tf.nn.conv3d( tf.nn.conv3d( tensor, filters=tf.reshape(kernel, [-1, 1, 1, 1, 1]), strides=strides, padding="SAME", ), filters=tf.reshape(kernel, [1, -1, 1, 1, 1]), strides=strides, padding="SAME", ), filters=tf.reshape(kernel, [1, 1, -1, 1, 1]), strides=strides, padding="SAME", ) return tensor
StarcoderdataPython
3243759
<gh_stars>1-10 from .. import DB_BASE as Base from sqlalchemy import Column, Integer, Sequence, Text class BlogInfoORM(Base): __tablename__ = 'tb_blog_info' __table_args__ = {'comment': '博客简介信息表'} id = Column(Integer, Sequence("tb_blog_info_id_seq"), primary_key=True) about_content = Column(Text, nullable=False, comment='关于我的内容') __all__ = ["BlogInfoORM"]
StarcoderdataPython
11370888
# -*- coding: utf-8 -*- import os import numpy as np import statsmodels.api as sm # recommended import according to the docs import matplotlib.pyplot as plt import pandas as pd import scipy.stats.mstats as mstats from common import globals as glob import seaborn as sns sns.set(color_codes=True) from scipy import stats BIN_SIZE = 5 def draw_hist(df, country, name): plt.cla() plt.figure(1, figsize=(9, 6)) x = df[country].dropna().values bins = np.linspace(0, (max(x)), (max(x)/BIN_SIZE)) #plt.hist(x, bins, alpha=0.5, label=[country]) sns.distplot(x, bins=bins, kde=False, fit=stats.erlang); plt.legend(loc='upper right') plt.title('Histogram for distribution of Starbucks stores across cities in ' + name) dname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, 'more', country) os.makedirs(dname, exist_ok=True) fname = os.path.join(dname, 'stores_hist.png') plt.savefig(fname) def draw_ecdf(df, country, name): #ecdf # Create a figure instance plt.cla() plt.figure(1, figsize=(9, 6)) x = df[country].dropna().values ecdf = sm.distributions.ECDF(x) y = ecdf(x) plt.step(x, y) ax = plt.gca() ax.grid(True) major_ticks = np.arange(0, 1, 0.1) #minor_ticks = np.arange(0, 101, 5) ax.set_yticks(major_ticks) #ax.set_yticks(minor_ticks, minor=True) major_ticks = np.arange(0, max(x), BIN_SIZE*5) ax.set_xticks(major_ticks) plt.title('ECDF for distribution of Starbucks stores across cities in ' + name) dname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, 'more', country) os.makedirs(dname, exist_ok=True) fname = os.path.join(dname, 'stores_ecdf.png') plt.savefig(fname) def draw_combined_hist(df, countries, country_names, winsorize=False): #make a combined histogram plt.cla() plt.figure(1, figsize=(5, 3)) TRIM = 0.05 if winsorize == True else 0.0 m = 0 #get the max value amongsts all series that we are going to plot for c in countries: x = df[c].dropna().values x=mstats.winsorize(x,(0, TRIM)) if max(x) > m: m = max(x) #we have the max value, now plot each series, bins are decided based on max i = 0 for c in countries: x = df[c].dropna().values x=mstats.winsorize(x,(0, TRIM)) bins = np.linspace(0, m, m) plt.hist(x, bins, alpha=0.5, label=country_names[i]) i += 1 plt.legend(loc='upper right') plt.title('Histogram for distribution of Starbucks stores\n across cities in a country') name = 'hist.png' if winsorize == False else 'winsorized_hist.png' fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, 'more', name) plt.savefig(fname) #plt.show() def draw_combined_boxplot(df, countries, country_names, winsorize=False): plt.cla() # Create the boxplot plt.figure(1, figsize=(5, 6)) TRIM = 0.05 if winsorize == True else 0.0 data = [] for c in countries: x = df[c].dropna().values x = mstats.winsorize(x,(0, TRIM)) data.append(x) plt.boxplot(data, labels=country_names, whis='range') plt.title('Boxplot for distribution of Starbucks stores\n across cities in a country') #plt.xticks(rotation = 45) # Save the figure name = 'boxplot.png' if winsorize == False else 'winsorized_boxplot.png' fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, 'more', name) plt.savefig(fname) def explore_distribution_across_countries(df): countries = ['US', 'GB', 'AE', 'KW', 'KR', 'CA'] country_names = ['United States', 'United Kingdom', 'UAE', 'Kuwait', 'Republic of Korea', 'Canada'] df3 = pd.DataFrame(columns=countries) #kind of kludgy way of doing this but ok.. max_l = 0 city_w_max_stores = [] count_in_city_w_max_stores = [] for country in countries: df2 = df[df['country'] == country] distribution = df2['city'].value_counts() l = len(distribution) if l > max_l: max_l = l glob.log.info('Max number of stores (%s, %d)' %(distribution.index[0], distribution.ix[0])) city_w_max_stores.append(distribution.index[0]) count_in_city_w_max_stores.append(distribution.ix[0]) df_temp = pd.DataFrame(columns = ['country', 'city_with_most_starbucks_stores', 'count_in_city_with_most_stores']) df_temp['country'] = country_names df_temp['city_with_most_starbucks_stores'] = city_w_max_stores df_temp['count_in_city_with_most_stores'] = count_in_city_w_max_stores fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.EDA_DIR, 'more', 'cities_withmost_stores.csv') df_temp.to_csv(fname, index=False, encoding='utf-8') for country in countries: df2 = df[df['country'] == country] distribution = df2['city'].value_counts() #df3 = pd.DataFrame(columns=['stores']) values = distribution.values if len(values) < max_l: padding = max_l - len(values) values = np.append(values, np.repeat(np.nan, padding)) df3[country] = values #now make plots for individual countries and combinations #histogram i = 0 for country in countries: draw_hist(df3, country, country_names[i]) draw_ecdf(df3, country, country_names[i]) i += 1 #combined histogram, followed by a winsorized version draw_combined_hist(df3, countries, country_names) draw_combined_hist(df3, countries, country_names, True) #make a boxplot,followed by a winsorized version draw_combined_boxplot(df3, countries, country_names) draw_combined_boxplot(df3, countries, country_names, True) def run(): glob.log.info('about to begin additional analysis...') fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.COMBINED_DATASET_CSV) df_combined = pd.read_csv(fname) fname = os.path.join(glob.OUTPUT_DIR_NAME, glob.SB_CSV_FILE_W_FEATURES) df_sb = pd.read_csv(fname) explore_distribution_across_countries(df_sb) if __name__ == "__main__": # execute only if run as a script run(sys.argv)
StarcoderdataPython
5139914
<reponame>vainotuisk/icecreamratings __version__ = '16.0'
StarcoderdataPython
363050
<filename>challenges/String_Info_Calculator/poller/for-release/machine.py #!/usr/bin/env python # # Copyright (C) 2014 <NAME>ustries <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from generator.actions import Actions, Variable from random import choice, randint from os.path import dirname, abspath, join import sys import struct sys.path.append(join(dirname(dirname(dirname(abspath(__file__)))), "support")) import support as sp from common import DEBUG class StrInfo(Actions): INBUFSZ = 1000 DEBUG_CONTENT = False TERM = '\x03' GIMME = "Give me your sentences.{0}".format(TERM) DONE = "Here are your results...{0}".format(TERM) def _get_content(self): all_chars = [chr(x) for x in range(128)] all_chars.remove(self.TERM) result = '' b_len = randint(5, self.INBUFSZ-1) #TERM is added at the end. remaining = b_len while 0 < remaining: if self.chance(0.7): result += sp.random_alpha(0, 10 if remaining > 10 else remaining) elif self.chance(0.3): result += sp.random_digits(0, 10 if remaining > 10 else remaining) else: result += ''.join(choice(all_chars) for _ in range(0, 10 if remaining > 10 else remaining)) if len(result) >= b_len: break if self.chance(0.7): result += ' ' else: result += '.' if self.chance(0.1): result += '\n' remaining = b_len - len(result) if True == self.DEBUG_CONTENT: result = "Bob's discount randumb stor-E gener8r for only $12.99. Come on in.\nGet your storie today!\n" if True == DEBUG: r = '\\'.join(hex(ord(c)) for c in result) print "result hex: {0}".format(r) print "result: {0}".format(result) return result def _get_results(self, s): ''' unsigned int hash; unsigned short chars; unsigned short words; unsigned short sentences; unsigned short paragraphs; unsigned short printable; unsigned short non_printable; unsigned short letters; unsigned short numbers; unsigned short symbols; unsigned short session_id; ''' r = {'chars': 0, 'words': 0, 'sentences': 0, 'paragraphs': 0, 'printable': 0, 'non_printable': 0, 'letters': 0, 'numbers': 0, 'symbols': 0, 'session_id': 0} r = self._compute_char_type(s, r) r = self._compute_grammar_components(s, r) r = self._compute_session_id(r) return r def _compute_char_type(self, s, r): for ch in s: if True == sp.is_non_printable(ord(ch)): r['non_printable'] += 1 else: r['printable'] += 1 if True == sp.is_digit(ord(ch)): r['numbers'] += 1 elif True == sp.is_letter(ord(ch)): r['letters'] += 1 else: r['symbols'] += 1 if True == DEBUG: print "type: {0}".format(r) return r def _compute_grammar_components(self, s, r): ch_prev = '\x04' for ch in s: r['chars'] += 1 if True == sp.is_word_end_char(ch) and \ True == sp.is_letter(ord(ch_prev)): r['words'] += 1 if True == sp.is_sentence_end_char(ch_prev): if ' ' == ch: r['sentences'] += 1 elif True == sp.is_paragraph_end_char(ch): r['sentences'] += 1 r['paragraphs'] += 1 ch_prev = ch if True == DEBUG: print "grammar: {0}".format(r) return r def _compute_session_id(self, r): ''' const char *magic_sauce = (const char*)FLAG_PAGE; char session_id[2] = 0; unsigned short *p_sid = (unsigned short *)session_id; session_id[0] = magic_sauce[0] ^ magic_sauce[1000]; session_id[1] = magic_sauce[2000] ^ magic_sauce[3000]; r->session_id = *p_sid; ''' b1 = ord(self.magic_page[0]) ^ ord(self.magic_page[1000]) b2 = ord(self.magic_page[2000]) ^ ord(self.magic_page[3000]) r['session_id'] = (b2 << 8) + b1 return r def start(self): self.delay(50) pass def banner(self): self.read(delim=self.TERM, expect=self.GIMME) def content(self): s = self._get_content() self.write(s + self.TERM) r_clear = self._get_results(s) if True == DEBUG: print "clear: {0}".format(r_clear) r_packed = sp.pack_results(r_clear) if True == DEBUG: r = '\\'.join(hex(ord(c)) for c in r_packed) print "packed: {0}".format(r) self.read(delim=self.TERM, expect=self.DONE) self.read(length=4) # read the hash self.read(length=len(r_packed), expect=r_packed) # read the results # self.read(length=2) # read the session_id value def end(self): return
StarcoderdataPython
8182110
def even_numbers(maximum): return_string = "" for x in range(2, maximum+1, 2): return_string += str(x) + " " return return_string.strip() print(even_numbers(6)) # Should be 2 4 6 print(even_numbers(10)) # Should be 2 4 6 8 10 print(even_numbers(1)) # No numbers displayed print(even_numbers(3)) # Should be 2 print(even_numbers(0)) # No numbers displayed
StarcoderdataPython
123877
<reponame>gablin/python-verisure """ A python module for reading and changing status of verisure devices through verisure app API. """ __all__ = [ 'Error', 'LoginError', 'ResponseError', 'Session' ] from .session import ( # NOQA Error, LoginError, ResponseError, Session ) ALARM_ARMED_HOME = 'ARMED_HOME' ALARM_ARMED_AWAY = 'ARMED_AWAY' ALARM_DISARMED = 'DISARMED' LOCK_LOCKED = 'LOCKED' LOCK_UNLOCKED = 'UNLOCKED' SMARTPLUG_ON = 'on' SMARTPLUG_OFF = 'off'
StarcoderdataPython
3228726
<gh_stars>1-10 # Transform Bible verse lists into a format that Accordance accepts. import re from romnum import romNumVal from urllib import quote def escapePath(value): return quote(value.encode('utf_8')) def escapeQuery(value): value = value.replace(' ', '_') return quote(value.encode('utf_8'), ':;,._') def accordURL(text, verses): return 'accord://read/{}?{}'.format(escapePath(text), escapeQuery(verses)) # Group book and chapter refs into a shorter form: Gen 1:1; Gen 1:3; Gen 2:1 -> Gen 1:1,3; 2:1 def consolidate(verses): result = '' curbook = '' curchap = 0 for verse in verses.split(';'): verse = verse.strip() sep = '; ' match = re.search('\s*(.*?)\s+(\d*:?\d+)', verse) if match != None: book = match.group(1) ref = match.group(2) match = re.search('(\d+):(\d+)', ref) if match != None: ch = match.group(1) vs = match.group(2) else: ch = 1 vs = ref if book == curbook: verse = ref if curchap == ch: sep = ',' verse = vs else: curchap = ch else: curbook = book curchap = ch if result != '': result = result + sep result = result + verse return result def rn(match): return str(romNumVal(match.group(1))) # Handles Roman numerals and extraneous punctuation. def normalize(refs): refs = re.sub(r'\s+', r' ', refs, re.U) # combine redundant spaces refs = re.sub(r'\s+([,;])', r'\1', refs, re.U) # remove space before punctuation refs = re.sub(r'\\u2013', r'-', refs, re.U) # en dash to hyphen refs = re.sub(r'\s*-\s*', r'-', refs, re.U) # remove spaces around hyphen refs = re.sub(r'\b([clxvi]+)\b', rn, refs, re.U | re.I) # decode Roman numeral refs = re.sub(r'(\d+)[.:] *(\d+)', r'\1:\2', refs, re.U) # use : for verses refs = re.sub(r'(\w+)\.', r'\1', refs, re.U) # remove . after book number refs = re.sub(r'(\d+)\s+([A-Za-z]+)', r'\1\2', refs, re.U) # join book num and name refs = re.sub(r'([,;]){2,}', r'\1', refs, re.U) # combine redundant punctuation refs = re.sub(r'^[ ,;]+|[ ,;]+$', r'', refs, re.U) # leading/trailing junk return refs if __name__ == '__main__': import fileinput for line in fileinput.input(): print(consolidate(normalize(line)))
StarcoderdataPython
5084052
# encoding: utf-8 import numpy as np import tensorflow as tf import os import cv2 from tqdm import tqdm import re import sys sys.path.append('..') from config import cfg def convert(size, box): dw = 1./size[0] dh = 1./size[1] x = (box[0] + box[1])/2.0 y = (box[2] + box[3])/2.0 w = box[1] - box[0] h = box[3] - box[2] x = x*dw w = w*dw y = y*dh h = h*dh return [float(x), float(y), float(w), float(h)] def load_file(file_path): ''' load imgs_path, classes and labels ''' imgs_path = [] #classes = [] #labels = [] labels_and_classes = [] with open(file_path, 'r') as f: lines = f.readlines() for line in lines: img_path = '/diskdt/dataset/bar_chart_dataset/bar_chart_batch_1/' + line.strip().split(' ')[0] #cls = int(line.strip().split(' ')[1]) cls_and_label = [float(i) for i in line.strip().split(' ')[1:]] if len(cls_and_label) > 30*5: continue cls_and_label = np.asarray(cls_and_label).reshape(-1,5)[:, [0,1,3,2,4]] cls_and_bb = [] for i in range(cls_and_label.shape[0]): cls = [float(cls_and_label[i][0])] bb = convert((600,600), cls_and_label[i][1:]) bb.extend(cls) cls_and_bb.extend(bb) if cls_and_label.shape[0] < 30: cls_and_bb = cls_and_bb + [0,0,0,0,0]*(30-int(cls_and_label.shape[0])) imgs_path.append(img_path) #classes.append(cls) #labels.append(label) #label.append(cls) labels_and_classes.append(cls_and_bb) return np.asarray(imgs_path), np.array(labels_and_classes) def extract_image(image_path, height, width, is_resize=True): ''' get b->g->r image data ''' image = cv2.imread(image_path) if is_resize: print('is_resize') image = cv2.resize(image, (width, height)) #image_data = np.array(image, dtype='float32') / 255.0 image_data = np.array(image, dtype='uint8') return image_data def run_encode(file_path, tf_records_filename): ''' encode func ''' imgs_path, labels_and_classes = load_file(file_path) height, width = 1080, 1920 imgs = [] writer = tf.python_io.TFRecordWriter(tf_records_filename) for i in tqdm(range(imgs_path.shape[0])): img = extract_image(imgs_path[i], height, width, is_resize=False) img = img.tostring() label_and_class = labels_and_classes[i].flatten().tolist() example = tf.train.Example(features=tf.train.Features(feature={ 'label_and_class' : tf.train.Feature(float_list = tf.train.FloatList(value=label_and_class)), 'feature': tf.train.Feature(bytes_list = tf.train.BytesList(value=[img])) })) writer.write(example.SerializeToString()) writer.close() if __name__ == '__main__': file_path = re.sub(r'prepare_data', '', os.getcwd()) + 'data/train_list/train_list.txt' tf_records_filename = cfg.data_path run_encode(file_path, tf_records_filename)
StarcoderdataPython
11332195
<reponame>AnonymusRaccoon/dotfiles<gh_stars>1-10 # Store interactive Python shell history in ~/.cache/python_history # instead of ~/.python_history. # # Create the following .config/pythonstartup.py file # and export its path using PYTHONSTARTUP environment variable: # # export PYTHONSTARTUP="${XDG_CONFIG_HOME:-$HOME/.config}/pythonstartup.py" import atexit import os import readline histfile = os.path.join(os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache")), "python_history") try: readline.read_history_file(histfile) # default history len is -1 (infinite), which may grow unruly readline.set_history_length(1000) except FileNotFoundError: pass atexit.register(readline.write_history_file, histfile)
StarcoderdataPython
3401049
<gh_stars>1-10 from django.shortcuts import render, HttpResponse, redirect from courses.models import Course def home(request): return render(request, 'home.html', ) def aboutUs(request): return render(request,'about.html',{}) def Contactus(request): if request.method == 'POST': #work for it's backend !! return render(request,'../accounts/templates/thanks.html',{ 'user':first_name, 'message':'Thank You for your invaluable feedback!!.We will reach to you as soon as possible.', 'msg_cnt':'Your Feedback feedback has been sent successfully!!' }) else: return render(request,'contact.html',{})
StarcoderdataPython
8080261
from .model_base import Baseline from .model_eval_op import TwoStreamSwitchBNOp
StarcoderdataPython
5181969
"""Modules that handle the events the bot recognizes and reacts to"""
StarcoderdataPython
1812199
#sample script to be executed by pipeViewer Console from model.node_element import NodeElement highest_access_count = 0 for element in context.GetElements(): if isinstance(element, NodeElement): split = element.GetProperty('data').split(':') if len(split) < 2: continue # either Start or End accesses = int(split[1]) if accesses > highest_access_count: highest_access_count = accesses for element in context.GetElements(): if isinstance(element, NodeElement): split = element.GetProperty('data').split(':') if len(split) < 2: continue # either Start or End accesses = int(split[1]) color_val = int((1 - accesses/(highest_access_count*1.0))*255) context.dbhandle.database.AddMetadata(element.GetProperty('name'), {'color':(color_val, 255, 255)})
StarcoderdataPython
11313696
<reponame>miott/genielibs<filename>pkgs/ops-pkg/src/genie/libs/ops/stp/iosxr/stp.py ''' Stp Genie Ops Object for IOSXR - CLI. ''' # Genie from genie.libs.ops.stp.stp import Stp as SuperStp from genie.ops.base import Context # Parser from genie.libs.parser.iosxr.show_spanning_tree import ShowSpanningTreeMst, \ ShowSpanningTreeMstag, \ ShowSpanningTreePvrst, \ ShowSpanningTreePvrsTag, \ ShowSpanningTreePvsTag class Stp(SuperStp): '''STP Genie Ops Object''' def learn(self, mst_domain=None, mstag_domain=None, pvst_id=None, pvrstag_domain=None, pvstag_domain=None): '''Learn stp Ops''' ######################################################################## # info ######################################################################## # global - N/A # bridge_assurance - N/A # etherchannel_misconfig_guard - N/A # bpduguard_timeout_recovery - N/A # loop_guard - N/A # bpdu_guard - N/A # bpdu_filter - N/A # mstp # mst_domain # domain - N/A # name - N/A # revision - N/A # max_hop - N/A # hello_time - N/A # max_age - N/A # forwarding_delay - N/A # hold_count - N/A # mst_instances # mst_id # mst_id # vlan # bridge_priority # bridge_address # designated_root_priority # designated_root_address # root_port - N/A # root_cost # hold_time - N/A # topology_changes - N/A # time_since_topology_change - N/A # interfaces # m_interface # name # cost # port_priority # port_num # role # port_state # designated_root_priority - N/A # designated_root_address - N/A # designated_cost - N/A # designated_bridge_priority # designated_bridge_address # designated_port_priority # designated_port_num # forward_transitions - N/A # counters - N/A # bpdu_sent - N/A # bpdu_received - N/A # interfaces - N/A # m_interface - N/A # name - N/A # edge_port - N/A # link_type - N/A # guard - N/A # bpdu_guard - N/A # bpdu_filter - N/A # mstag # mag_domain # domain # interfaces # mag_interface # interface # name # revision # bridge_id # preempt_delay # preempt_delay_state - N/A # max_age # provider_bridge # port_id # external_cost # hello_time # active # counters - N/A # bpdu_sent - N/A # instances # mag_id # instance # root_id # vlans # priority # root_priority # port_priority # cost # counters # topology_changes # pvst # pvst_id # pvst_id # max_age - N/A # hold_count - N/A # forwarding_delay - N/A # hello_time - N/A # vlans # vlan_id # vlan_id # hello_time - N/A # max_age - N/A # forwarding_delay - N/A # bridge_priority # configured_bridge_priority - N/A # sys_id_ext # bridge_address # designated_root_priority # designated_root_address # root_port - N/A # root_cost - N/A # hold_time - N/A # topology_changes - N/A # time_since_topology_change - N/A # interface # v_interface # name # cost # port_priority # port_num # role # port_state # designated_root_priority - N/A # designated_root_address - N/A # designated_cost - N/A # designated_bridge_priority # designated_bridge_address # designated_port_priority # designated_port_num # forward_transitions - N/A # counters - N/A # bpdu_sent - N/A # bpdu_received - N/A # interfaces - N/A # p_interface - N/A # name - N/A # edge_port - N/A # link_type - N/A # guard - N/A # bpdu_guard - N/A # bpdu_filter - N/A # hello_time - N/A # rapid_pvst - N/A # pvst_id - N/A # pvst_id - N/A # max_age - N/A # hold_count - N/A # forwarding_delay - N/A # hello_time - N/A # vlans - N/A # vlan_id - N/A # vlan_id - N/A # hello_time - N/A # max_age - N/A # forwarding_delay - N/A # bridge_priority - N/A # configured_bridge_priority - N/A # sys_id_ext - N/A # bridge_address - N/A # designated_root_priority - N/A # designated_root_address - N/A # root_port - N/A # root_cost - N/A # hold_time - N/A # topology_changes - N/A # time_since_topology_change - N/A # interface - N/A # v_interface - N/A # name - N/A # cost - N/A # port_priority - N/A # port_num - N/A # role - N/A # port_state - N/A # designated_root_priority - N/A # designated_root_address - N/A # designated_cost - N/A # designated_bridge_priority - N/A # designated_bridge_address - N/A # designated_port_priority - N/A # designated_port_num - N/A # forward_transitions - N/A # counters - N/A # bpdu_sent - N/A # bpdu_received - N/A # interfaces - N/A # p_interface - N/A # name - N/A # edge_port - N/A # link_type - N/A # guard - N/A # bpdu_guard - N/A # bpdu_filter - N/A # hello_time - N/A # pvrstag # prag_domain # domain # interfaces # prag_interface # interface # vlans # prag_vlan # root_priority # root_id - N/A # root_cost # priority - N/A # bridge_id # port_priority # max_age # hello_time # preempt_delay # preempt_delay_state # sub_interface # sub_interface_state # port_id # active # counters # bpdu_sent - N/A # topology_changes # pvstag # pag_domain # domain # interfaces # pag_interface # interface # vlans # pag_vlan # root_priority # root_id - N/A # root_cost # priority - N/A # bridge_id # port_priority # max_age # hello_time # preempt_delay # preempt_delay_state # sub_interface # sub_interface_state # port_id # active # counters # bpdu_sent - N/A # topology_changes mstp_domain_instances_src = '[mstp][(?P<mstp_domain>.*)][mst_instances][(?P<mst_id>.*)]' mstp_domain_instances_des = 'info[mstp][(?P<mstp_domain>.*)][mst_instances][(?P<mst_id>.*)]' if mst_domain: for key in ['mst_id', 'vlan', 'bridge_priority', 'bridge_address' , 'designated_root_priority', 'designated_root_address', 'root_cost', ]: self.add_leaf(cmd=ShowSpanningTreeMst, src=mstp_domain_instances_src + '[%s]' % key, dest=mstp_domain_instances_des + '[%s]' % key, mst=mst_domain) mstp_domain_interfaces_src = mstp_domain_instances_src + '[interfaces][(?P<m_interface>.*)]' mstp_domain_interfaces_des = mstp_domain_instances_des + '[interfaces][(?P<m_interface>.*)]' if mst_domain: for key in ['name', 'cost', 'port_priority', 'port_num', 'role', 'port_state', 'designated_cost', 'designated_bridge_priority', 'designated_bridge_address', 'designated_port_priority', 'designated_port_num']: self.add_leaf(cmd=ShowSpanningTreeMst, src=mstp_domain_interfaces_src + '[%s]' % key, dest=mstp_domain_interfaces_des + '[%s]' % key, mst=mst_domain) mstag_src = '[mstag][(?P<mstag>.*)]' mstag_des = 'info[mstag][(?P<mstag>.*)]' if mstag_domain: self.add_leaf(cmd=ShowSpanningTreeMstag, src=mstag_src + '[domain]', dest=mstag_des + '[domain]', mag_domain=mstag_domain) mstag_interfaces_src = mstag_src + '[interfaces][(?P<m_interface>.*)]' mstag_interfaces_des = mstag_des + '[interfaces][(?P<m_interface>.*)]' if mstag_domain: for key in ['interface', 'preempt_delay', 'name', 'revision' , 'max_age', 'provider_bridge', 'bridge_id', 'port_id', 'external_cost', 'hello_time', 'active']: self.add_leaf(cmd=ShowSpanningTreeMstag, src=mstag_interfaces_src + '[%s]' % key, dest=mstag_interfaces_des + '[%s]' % key, mag_domain=mstag_domain) mstag_instances_src = mstag_src + '[interfaces][instances][(?P<m_instance>.*)]' mstag_instances_des = mstag_des + '[interfaces][instances][(?P<m_instance>.*)]' if mstag_domain: for key in ['instance', 'vlans', 'priority', 'port_priority', 'cost', 'root_priority']: self.add_leaf(cmd=ShowSpanningTreeMstag, src=mstag_instances_src + '[%s]' % key, dest=mstag_instances_des + '[%s]' % key, mag_domain=mstag_domain) self.add_leaf(cmd=ShowSpanningTreeMstag, src= mstag_instances_src + '[counters][topology_changes]', dest=mstag_instances_des + '[counters][topology_changes]', mag_domain=mstag_domain) pvst_src = '[pvst][(?P<pvst>.*)]' pvst_des = 'info[pvst][(?P<pvst>.*)]' if pvst_id: self.add_leaf(cmd=ShowSpanningTreePvrst, src= pvst_src + '[pvst_id]', dest=pvst_des + '[pvst_id]', pvst_id=pvst_id) pvst_vlans_src = pvst_src + '[vlans][(?P<vlans>.*)]' pvst_vlans_des = pvst_des + '[vlans][(?P<vlans>.*)]' if pvst_id: for key in ['vlan_id', 'designated_root_priority', 'designated_root_address', 'bridge_priority', 'sys_id_ext', 'bridge_address']: self.add_leaf(cmd=ShowSpanningTreePvrst, src=pvst_vlans_src + '[%s]' % key, dest=pvst_vlans_des + '[%s]' % key, pvst_id=pvst_id) pvst_vlans_interface_src = pvst_vlans_src + '[interface][(?P<m_interface>.*)]' pvst_vlans_interface_des = pvst_vlans_des + '[interface][(?P<m_interface>.*)]' if pvst_id: for key in ['name', 'cost', 'role', 'port_priority', 'port_num', 'port_state', 'designated_bridge_priority', 'designated_bridge_address', 'designated_port_priority', 'designated_port_num']: self.add_leaf(cmd=ShowSpanningTreePvrst, src=pvst_vlans_interface_src + '[%s]' % key, dest=pvst_vlans_interface_des + '[%s]' % key, pvst_id=pvst_id) pvrstag_src = '[pvrstag][(?P<pvrstag>.*)]' pvrstag_des = 'info[pvrstag][(?P<pvrstag>.*)]' if pvrstag_domain: self.add_leaf(cmd=ShowSpanningTreePvrsTag, src=pvrstag_src + '[domain]', dest=pvrstag_des + '[domain]', pvrstag_domain=pvrstag_domain) pvrstag_interfaces_src = pvrstag_src + '[interfaces][(?P<m_interface>.*)]' pvrstag_interfaces_des = pvrstag_des + '[interfaces][(?P<m_interface>.*)]' if pvrstag_domain: self.add_leaf(cmd=ShowSpanningTreePvrsTag, src=pvrstag_interfaces_src + '[interface]', dest=pvrstag_interfaces_des + '[interface]', pvrstag_domain=pvrstag_domain) pvrstag_vlans_src = pvrstag_interfaces_src + '[vlans][(?P<vlans>.*)]' pvrstag_vlans_des = pvrstag_interfaces_des + '[vlans][(?P<vlans>.*)]' if pvrstag_domain: for key in ['preempt_delay', 'preempt_delay_state', 'sub_interface', 'sub_interface_state', 'max_age', 'root_priority', 'root_cost', 'bridge_id', 'port_priority', 'port_id', 'hello_time', 'active']: self.add_leaf(cmd=ShowSpanningTreePvrsTag, src=pvrstag_vlans_src + '[%s]' % key, dest=pvrstag_vlans_des + '[%s]' % key, pvrstag_domain=pvrstag_domain) self.add_leaf(cmd=ShowSpanningTreePvrsTag, src= pvrstag_vlans_src + '[counters][topology_changes]', dest=pvrstag_vlans_des + '[counters][topology_changes]', pvrstag_domain=pvrstag_domain) pvstag_src = '[pvstag][(?P<pvrstag>.*)]' pvstag_des = 'info[pvstag][(?P<pvrstag>.*)]' if pvstag_domain: self.add_leaf(cmd=ShowSpanningTreePvsTag, src=pvstag_src + '[domain]', dest=pvstag_des + '[domain]', pvstag_domain=pvstag_domain) pvstag_interfaces_src = pvstag_src + '[interfaces][(?P<m_interface>.*)]' pvstag_interfaces_des = pvstag_des + '[interfaces][(?P<m_interface>.*)]' if pvstag_domain: self.add_leaf(cmd=ShowSpanningTreePvsTag, src=pvstag_interfaces_src + '[interface]', dest=pvstag_interfaces_des + '[interface]', pvstag_domain=pvstag_domain) pvstag_vlans_src = pvstag_interfaces_src + '[vlans][(?P<vlans>.*)]' pvstag_vlans_des = pvstag_interfaces_des + '[vlans][(?P<vlans>.*)]' if pvstag_domain: for key in ['preempt_delay', 'preempt_delay_state', 'sub_interface', 'sub_interface_state', 'max_age', 'root_priority', 'root_cost', 'bridge_id', 'port_priority', 'port_id', 'hello_time', 'active']: self.add_leaf(cmd=ShowSpanningTreePvsTag, src=pvstag_vlans_src + '[%s]' % key, dest=pvstag_vlans_des + '[%s]' % key, pvstag_domain=pvstag_domain) self.add_leaf(cmd=ShowSpanningTreePvsTag, src= pvstag_vlans_src + '[counters][topology_changes]', dest=pvstag_vlans_des + '[counters][topology_changes]', pvstag_domain=pvstag_domain) # make to write in cache self.make(final_call=True)
StarcoderdataPython
4837467
<filename>src/SyntaxHighlight.py """ defines syntax_function which turns str into CSS/HTML formatted syntax highlighted TikZ code """ import re try: from pygments import highlight, lexers from pygments.styles import get_style_by_name from pygments.formatters import HtmlFormatter except ImportError: highlight, lexers, get_style_by_name, HtmlFormatter = 4 * [None] def pygments_syntax_highlight(method, text): """Return CSS/HTML syntax-highlighted TikZ code using Pygments.""" print(method) style = get_style_by_name(method) formatter = HtmlFormatter(full=True, noclasses=True, style=style) lex = lexers.get_lexer_by_name("latex") return highlight(text, lex, formatter) def alternative_syntax_highlight(text): """Return syntax-highlighted text for LaTeX code using regex.""" def replace_pivot_spaces(text): # TODO replace whole function with a regular expression substitution """Replace leading spaces to HTML &nbsp.""" exit_condition = True while exit_condition: exit_condition = False for i in range(len(text)-1): if text[i] == '\n' and text[i+1] == ' ': numspace = 1 while text[i+1+numspace] == ' ': numspace += 1 text = text[:i+1] + numspace*'&nbsp;' + text[i+1+numspace:] exit_condition = True return text match = re.findall(r'[^\\](%.*)\n', text) # find all inline comments does_not_contain_this_word = '' for i in range(5, 10000): # find a word not present in the text if text.find(i * 'a') == -1: does_not_contain_this_word = i * 'a' break for i, result in enumerate(match): # replace and index the inline comments in the text text = text.replace(result, str(i) + does_not_contain_this_word) # highlight begin-end environments text = re.sub(r'(\\begin{.+}|\\end{.*})', r'<span style="color:DarkCyan;font-weight:bold">\1</span>', text) # highlight commands starting with "\" text = re.sub(r'(?!\\begin|\\end)(\\\w+)', r'<span style="color:brown;font-weight:bold">\1</span>', text) # highlight %\, \\, \# text = re.sub(r'(\\%|\\\\|\\#)', r'<span style="color:brown;font-weight:bold">\1</span>', text) # highlight numbers text = re.sub(r'(\W)(\d*\.?\d+)(\W)', r'\1<span style="color:green">\2</span>\3', text) text = replace_pivot_spaces(text) text = text.replace('\n', '<br>') for i, result in enumerate(match): # load back the inline comments and add syntax highlight text = text.replace(str(i) + does_not_contain_this_word, f'<span style="color:blue;font-weight:bold">{result}</span>') return text def syntax_highlight(method, text): """Highlight syntax either with or without Pygments.""" method = '' if method[0] == '$' else method[1:] if not method: return alternative_syntax_highlight(text) try: return pygments_syntax_highlight(method, text) except ImportError: return alternative_syntax_highlight(text)
StarcoderdataPython
1838556
from selenium import webdriver import time class JdSpider: def __init__(self): self.driver = webdriver.Chrome() self.driver.get(url='https://www.jd.com/') self.driver.find_element_by_xpath('//*[@id="key"]').send_keys("<PASSWORD>") self.driver.find_element_by_xpath('//*[@id="search"]/div/div[2]/button').click() time.sleep(2) def get_one_page(self): """获取商品数据""" self.driver.execute_script( 'window.scrollTo(0,document.body.scrollHeight)' ) # 一定给页面元素的加载预留时间 time.sleep(2) # 提取数据 li_list = self.driver.find_elements_by_xpath('//*[@id="J_goodsList"]/ul/li') for li in li_list: print(li.text) print('*' * 50) def crawl(self): while True: self.get_one_page() if self.driver.page_source.find("pn-next disabled") == -1: self.driver.find_element_by_xpath('//*[@id="J_bottomPage"]/span[1]/a[9]').click() time.sleep(1) else: self.driver.quit() break if __name__ == '__main__': spider = JdSpider() spider.crawl()
StarcoderdataPython
22477
#!C:\Users\stpny\Downloads\grasp_public-master\grasp_public-master\Scripts\python.exe # EASY-INSTALL-ENTRY-SCRIPT: 'imageio==2.5.0','console_scripts','imageio_remove_bin' __requires__ = 'imageio==2.5.0' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('imageio==2.5.0', 'console_scripts', 'imageio_remove_bin')() )
StarcoderdataPython
8198582
<filename>src/run.py import tempfile from subprocess import check_output, run def run_script(cwd, script, return_output=False): output = "" print(f"Running script:\n{script}") with tempfile.NamedTemporaryFile() as f: f.write(script.encode("utf-8")) f.seek(0) if return_output: output = ( check_output(["/bin/bash", "-euo", "pipefail", f.name], cwd=cwd) .decode("utf-8") .strip() ) else: run(["/bin/bash", "-euo", "pipefail", f.name], cwd=cwd, check=True) if return_output: print(f"Returning output:\n{output}") return output
StarcoderdataPython
146426
<reponame>applejenny66/snoopy<filename>utils.py # utils.py import numpy as np import cv2 import os def clearall(): import shutil shutil.rmtree('./test') os.mkdir('./test') def blankarray(shape): array = np.zeros(shape) for x in range(0, shape[0]): for y in range(0, shape[1]): array[x, y, 0] = array[x, y, 1] = array[x, y, 2] = 255 return (array)
StarcoderdataPython
11302519
import fs.path from .utils import Docs class TestRegression(Docs): def setUp(self): super(TestRegression, self).setUp() self.base_folder = fs.path.join("tests", "regression_tests") def test_helper_and_partial(self): expected = ( "<h1>People</h1>" + "<ul><li>Bill 100</li><li>Bob 90</li><li>Mark 25</li></ul>" ) folder = "level-11-helpers-and-partials" self.run_moban( [ "moban", "-pd", "helper_and_partial", "-c", "data.json", "--template-type", "hbs", '{{>header}}{{#list people}}{{name}} {{age}}{{/list}}', ], folder, [("moban.output", expected)], )
StarcoderdataPython
11356631
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from builtins import range from builtins import super import mock import string import unittest from parameterized import parameterized import random import json from pprint import pprint from bhive import Hive, exceptions from bhive.amount import Amount from bhive.memo import Memo from bhive.version import version as bsteem_version from bhive.wallet import Wallet from bhive.witness import Witness from bhive.account import Account from bhivegraphenebase.account import PrivateKey from bhive.instance import set_shared_hive_instance from bhive.nodelist import NodeList from bhive.hiveconnect import HiveConnect # Py3 compatibility import sys core_unit = "STM" class Testcases(unittest.TestCase): @classmethod def setUpClass(cls): nodelist = NodeList() nodelist.update_nodes(hive_instance=Hive(node=nodelist.get_nodes(exclude_limited=False), num_retries=10)) cls.bts = Hive( node=nodelist.get_nodes(exclude_limited=True), nobroadcast=True, unsigned=True, data_refresh_time_seconds=900, num_retries=10) cls.account = Account("test", full=True, hive_instance=cls.bts) def test_transfer(self): bts = self.bts acc = self.account acc.hive.txbuffer.clear() tx = acc.transfer( "test1", 1.000, "HIVE", memo="test") sc2 = HiveConnect(hive_instance=bts) url = sc2.url_from_tx(tx) url_test = 'https://hiveconnect.com/sign/transfer?from=test&to=test1&amount=1.000+HIVE&memo=test' self.assertEqual(len(url), len(url_test)) self.assertEqual(len(url.split('?')), 2) self.assertEqual(url.split('?')[0], url_test.split('?')[0]) url_parts = (url.split('?')[1]).split('&') url_test_parts = (url_test.split('?')[1]).split('&') self.assertEqual(len(url_parts), 4) self.assertEqual(len(list(set(url_parts).intersection(set(url_test_parts)))), 4) def test_login_url(self): bts = self.bts sc2 = HiveConnect(hive_instance=bts) url = sc2.get_login_url("localhost", scope="login,vote") url_test = 'https://hiveconnect.com/oauth2/authorize?client_id=None&redirect_uri=localhost&scope=login,vote' self.assertEqual(len(url), len(url_test)) self.assertEqual(len(url.split('?')), 2) self.assertEqual(url.split('?')[0], url_test.split('?')[0]) url_parts = (url.split('?')[1]).split('&') url_test_parts = (url_test.split('?')[1]).split('&') self.assertEqual(len(url_parts), 3) self.assertEqual(len(list(set(url_parts).intersection(set(url_test_parts)))), 3)
StarcoderdataPython
3451101
# ============================================================================== # Copyright (c) 2018, Yamagishi Laboratory, National Institute of Informatics # Author: <NAME> (<EMAIL>) # All rights reserved. # ============================================================================== """ """ import tensorflow as tf class MultiSpeakerPreNet(tf.layers.Layer): def __init__(self, out_units, speaker_embed, is_training, drop_rate=0.5, trainable=True, name=None, dtype=None, **kwargs): super(MultiSpeakerPreNet, self).__init__(trainable=trainable, name=name, dtype=dtype, **kwargs) self.out_units = out_units self.drop_rate = drop_rate self.is_training = is_training self.dense0 = tf.layers.Dense(out_units, activation=tf.nn.relu, dtype=dtype) self.dense = tf.layers.Dense(out_units, activation=tf.nn.relu, dtype=dtype) self.speaker_projection = tf.layers.Dense(out_units, activation=tf.nn.softsign, dtype=dtype) self.speaker_embed = speaker_embed def build(self, _): self.built = True def call(self, inputs, **kwargs): dense0 = self.dense0(inputs) dense0 += self.speaker_projection(self.speaker_embed) dense = self.dense(dense0) dropout = tf.layers.dropout(dense, rate=self.drop_rate, training=self.is_training) return dropout def compute_output_shape(self, input_shape): return self.dense.compute_output_shape(input_shape)
StarcoderdataPython
8128161
# encoding: utf-8 from flask import Blueprint import ckan.plugins as p import ckan.plugins.toolkit as tk def fancy_route(package_type: str): return u'Hello, {}'.format(package_type) def fancy_new_route(package_type: str): return u'Hello, new {}'.format(package_type) def fancy_resource_route(package_type: str, id: str): return u'Hello, {}:{}'.format(package_type, id) class ExampleIDatasetFormPlugin(p.SingletonPlugin, tk.DefaultDatasetForm): p.implements(p.IDatasetForm) def is_fallback(self): return False def package_types(self): return [u'fancy_type'] def prepare_dataset_blueprint(self, package_type: str, bp: Blueprint): bp.add_url_rule(u'/fancy-route', view_func=fancy_route) bp.add_url_rule(u'/new', view_func=fancy_new_route) return bp def prepare_resource_blueprint(self, package_type: str, bp: Blueprint): bp.add_url_rule(u'/new', view_func=fancy_resource_route) return bp
StarcoderdataPython
12800276
import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy.io import wavfile from scipy import fftpack from scipy import signal import os # import soundfile as sf #import pyAudioAnalysis #module to output the sound from playsound import playsound #metadata is a python file which contains a dictoinary of all the speakers and their detals # import metadata """ lets assume the users are tagged the following indices: Speaker 0 : jackson Speaker 1 : nicolas Speaker 2 : theo Speaker 3 : yweweler """ #length of all sample is 1 seconds sample_length = 1 #in seconds samples = [] ; sample_rate = [] #name of the folder containing the samples dataset_folder = "recordings" current_dir = os.listdir() main_dir = os.getcwd() os.chdir(current_dir[current_dir.index(dataset_folder)]) sample_dir = os.getcwd() all_samples = os.listdir() # all_samples.sort() print((all_samples[2])) class Feature_Extraction: """ this class extracts the features from the input dataset , A list of probable features are: 1) Compactness. 2) Magnitude spectrum. 3) Mel-frequency cepstral coefficients. 4) Pitch. 5) Power Spectrum. 6) RMS. 7) Rhythm. 8) Spectral Centroid. 9) Spectral Flux. 10) Spectral RollOff Point. 11) Spectral Variability. 12) Zero Crossings """ def __init__(self,sample,mutiple=False): self.sample = sample self.mutiple = False def ZCR(self): ZCR = [] for i in range(len(self.sample)): #for zero crossing it is necessary that the data points are centered around a mean values # self.sample[i] = self.sample[i] - self.sample[i].mean() pos = self.sample[i]>0 npos = ~pos ZCR.append(len(((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0])) return np.array(ZCR) #the for loop for zcr calculation takes about 342 milliseconds more #for a single input value to compute than the stackflow one # ZCR = 0 # for i in range(1,len(sample)): # prev_sign = np.signbit(sample[i-1]) # cur_sign = np.signbit(sample[i]) # if( (cur_sign != prev_sign) ): # ZCR+=1 def spect(self): freq =[] time = [] power = [] for i in range(len(self.sample)): f, t, p = signal.spectrogram(self.sample[i],fs=8000,window='blackman') #works good freq.append(list(f)) time.append(t) power.append(p) return np.array(freq),np.array(time), np.array(power) def plot_spectogram(self): # select a random file from the dataset random = np.random.randint(0,2000) freq , t , power = signal.spectrogram(self.sample[random],fs=8000,window='blackman') plt.figure(1) plt.subplot(311) plt.plot(self.sample[random]) plt.subplot(312) plt.specgram(self.sample[random],Fs=8000) plt.colorbar() plt.subplot(313) plt.pcolormesh(t,freq,power) pass def print_ZCR(self): print(self.ZCR()) def extract_labels(all_samples): """ this function extracts the labels and speaker from the dataset """ labels = [] speakers = [] print("Extracting Labels") for i in range(len(all_samples)): temp = all_samples[i] temp = (temp[0:-3].split("_")) labels.append(temp[0]) speakers.append(temp[1]) if i%16==0: print("-",end="") print("\nLabels Extracted\n") return np.array(labels),np.array(speakers) def import_data(all_samples): """ this function imports all the wave files in dataset """ samples = [] sample_rate = [] print("Loading Dataset") for i in range(len(all_samples)): s_len, s = wavfile.read(all_samples[i]) samples.append(s); sample_rate.append(s_len) if i%16==0: print("-",end="") print("\nDataset Loaded\n") return np.array(samples),sample_rate def normalize_data(samples): print("Normalizing Data ") for i in range(len(samples)): samples[i] = ( samples[i] - samples[i].mean() ) / samples[i].std() if i%16==0: print("-",end="") print("\nData Normalized\n") return samples def extract_features(sample): # sample = np.array(sample) sample = sample - sample.mean() # sample = sample[sample!=0] #code from stackflow # pos = sample>0 # npos = ~pos # return len(((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]) # return ZCR return signal.spectrogram(sample,fs=8000,window='blackman') #works good # pass def visualize_data(sample,mutiple="False"): """ one or mutiple sample can be given as a input to this function makes the figures accordingy set mutiple to true if multiple samples are passed """ for i in range(sample.shape[0]): plt.figure(i) plt.subplot(411) pass def zero_padding(samples): """ this function pads the samples to make every sample the equal length it cuts off excess values and addes zeros to the insufficient ones making the length a power of 2 makes the calculation of fft faster and convinient """ for i in range(len(samples)): length = len(samples[i]) diff = int(abs(4096-length) / 2) diff = abs(4096-length) pad0 = int(diff/2) pad1 = diff-pad0 if(length == 4096): continue elif(length < 4096): samples[i] = np.pad(samples[i],(pad0,pad1)) else: #chopping the signals with higher number of datas samples[i] = samples[i][pad0:-pad1] return samples def make_dataframe(all_samples): """ """ samples,sample_rate = import_data(all_samples) labels, speakers = extract_labels(all_samples) # samples = zero_padding(samples) samples = zero_padding(samples) #putting the parsed data into a dataframe data = np.transpose(np.array([samples ,sample_rate,labels,speakers])) data = pd.DataFrame(data, columns=['Audio_Data','Sample_Rate','Labels', 'Speakers']) #exploring the formed data frame # print(data.head(10)) #saving the dataframe into a file (excel file, html table file ) # data.to_excel(main_dir+"/parsed_data.xlsx") # data.to_html(main_dir+"/parsed_data.html") return data def train_test_split(samples): pass samples,sample_rate = import_data(all_samples) labels, speakers = extract_labels(all_samples) # samples = zero_padding(samples) samples = zero_padding(samples) samples = normalize_data(samples) labels = labels.astype("uint8") #putting the parsed data into a dataframe data = np.transpose(np.array([samples ,sample_rate,labels,speakers])) data = pd.DataFrame(data, columns=['Audio_Data','Sample_Rate','Labels', 'Speakers']) features = Feature_Extraction(samples) freq , t , power = features.spect() ZCR = features.ZCR() print(freq[0].shape,t.shape,power.shape,ZCR.shape) features.plot_spectogram() print(type(freq[0]),type(t),type(power)) # print(freq[0:2]) # data["frequency"] = freq # data.insert(2,"frequency",freq) features_ZCR = pd.DataFrame(ZCR, columns= ["ZCR"]) data = pd.concat([data,features_ZCR],axis=1) # data.to_excel(main_dir+"/parsed_zcr.xlsx") # data.to_html(main_dir+"/parsed_zcr.html") print(data.info()) plt.figure(2) plt.hist(data["ZCR"]) plt.show()
StarcoderdataPython
351334
x = int(input()) d = [0] * 1000001 for i in range(2,x+1): d[i] = d[i-1] + 1 if i%2 == 0: d[i] = min(d[i], d[i//2] + 1) if i%3 == 0: d[i] = min(d[i], d[i//3] + 1) print(d[x])
StarcoderdataPython
6554118
<filename>custom_functions.py<gh_stars>0 # This module contains custom functions functions for data wrangling import pandas as pd def remove_column_substr(df, substr): ''' remove substring from a column name in a pandas dataframe :param df: pandas dataframe :param substr: substring to remove :return: pandas dataframe with new column names ''' df.columns = df.columns.str.replace(substr, '') return df
StarcoderdataPython
6591958
#!/usr/bin/env python from rftool.rf import get_args, main if __name__ == '__main__': args = get_args() main(args.file, args.cv)
StarcoderdataPython
11302325
from hashlib import sha256 # genesis block hash = (sha256(sha256( bytearray.fromhex( "0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c" )).digest()).digest().hex()) print(hash) # block from test_data.txt hash = (sha256(sha256( bytearray.fromhex( "010000007CC5C24C39FD05792C2865999E25FE87E066A37D7A08DC351F1400000000000049B6276425052B2FC24BFB784ECA55369B2B9E8B4180C869778CACB2BED171159395E64D2194261A0E33337A" )).digest()).digest().hex()) print(hash)
StarcoderdataPython
1683176
<filename>sampling/potential_LAMMPS.py<gh_stars>1-10 ############################################################################## # Python-force-field-parameterization-workflow: # A Python Library for performing force-field optimization # # Authors: <NAME>, <NAME> # # Python-force-field-parameterization-workflow is free software; # you can redistribute it and/or modify it under the terms of the # MIT License # You should have received a copy of the MIT License along with the package. ############################################################################## """ This module contains a class to invoke LAMMPS optimization. The executable "optimize" is invoked from the command-line interface. It will call "main()", which then call the function "optimize_main". Some other command-line programs related to this package can be developed, and invoked in an anaglous fashion. The "optimize_main" is composed of several instances from different modules, whic are laid out in procedure-oriented fashion so that the user can easily understand the whole workflow. This should make the customization more transparant. """ # Standard library: import logging import os import itertools import sys # Local library: # Third-parties library: # GMSO import mbuild as mb import gmso from gmso.external.convert_mbuild import from_mbuild from gmso.formats.top import write_top from gmso.formats import write_lammpsdata from unyt import unyt_quantity # This module defines the force field output format for LAMMPS # Customized pair_style function returns: # A dictionary with the filename as "key", and its content as "values" def choose_lammps_potential(ptype, force_field_parameters): potential_logger = logging.getLogger(__name__) potential_logger.debug("function:choose_lammps_potential " "entered successfully") # a list of available LAMMPS potential functional: potential_type = { "tersoff": __pair_style_tersoff, "tersoff/table": __pair_style_tersoff, "stillinger_weber": __pair_style_sw, "lj/cut": __pair_style_lj_cut, "buck/coul/long": __pair_style_buck_coul_long, "lj/smooth/linear": __pair_style_lj_smooth_linear_GMSO } # raise the errors and exit the program if # requested potential is not defined if (ptype not in potential_type.keys()): potential_logger.error( "ERROR: LAMMPS potential type: " " %s is invalid: \n" % ptype + "Solutions: \n" + "1. Check the spelling\n" + "2. Define a customized force field " "named %s in potential.py\n" % ptype + "Currently available potential types are: " + " , ".join(pt for pt in potential_type.keys()) + "\n") sys.exit("Error messages found in the log file") # choose the chosen output force field chosen_potential = potential_type[ptype] output_force_field_dict = chosen_potential(ptype, force_field_parameters) potential_logger.debug("function:choose_lammps_potential " "returned successfully; Potential type: " "%s is used ..." % ptype) return output_force_field_dict def propagate_force_field(wk_folder_tple, output_force_field_dict): potential_logger = logging.getLogger(__name__) potential_logger.debug("function: propagate_force_field " "entered successfully !") for every_type in wk_folder_tple: for each_folder in every_type: for output_file in output_force_field_dict: output_content = output_force_field_dict[output_file] if (len(output_content) > 1 and output_content[0] == "TurnOnGMSO"): pass #write_lammpsdata = output_content[1] #filename = os.path.join(each_folder, output_file) #write_lammpsdata(output_content[2], filename, output_content[-1]) else: filename = os.path.join(each_folder, output_file) with open(filename, "w") as output: for line in output_content: output.write(line) potential_logger.debug("function:propagate_force_field " " returned successfully; force-field parameters ") return None def set_lj_smooth_linear_GMSO(top, force_field_parameters): ff = gmso.ForceField('ar.xml') ar_type = ff.atom_types['Ar'] ar_type.parameters["sigma"] = unyt_quantity(force_field_parameters[1], 'angstrom') ar_type.parameters["epsilon"] = unyt_quantity(force_field_parameters[0], "6.947694845464e-21*J") for site in top.sites: site.atom_type = ar_type top.update_topology() return top def __pair_style_lj_smooth_linear_GMSO(ptype, force_field_parameters): # output dictionary: # Generate a small box of Argon atoms using mBuild # output dictionary : force_field_dict = {} ar = mb.Compound(name='Ar') # (1.3954 g/cm^3 / 39.948 amu) * (3 nm) ^3 packed_system = mb.fill_box( compound=ar, n_compounds=512, box=mb.Box([4.22187, 4.22187, 4.22187]), ) # Convert system to a backend object top = from_mbuild(packed_system) lamp_data_name = "ar.lmp" force_field_dict[lamp_data_name] = ("TurnOnGMSO", write_lammpsdata, top, "atomic", set_lj_smooth_linear_GMSO) return force_field_dict def __pair_style_lj_cut(ptype, force_field_parameters): # output dictionary : force_field_dict = {} # define the filename include_file = "force_field_parameters" # define the command for each filename lammps_cmd_comment = "#pair style: %s is used \n" % ptype lammps_cmd_1 = "pair_style lj/cut %.3f" % force_field_parameters[0] lammps_cmd_2 = "pair_coeff * * %.9f %.9f" % (force_field_parameters[1], force_field_parameters[2]) lammps_cmd_3 = "pair_modify tail yes" force_field_dict[include_file] = (lammps_cmd_comment, lammps_cmd_1, lammps_cmd_2, lammps_cmd_3) return force_field_dict def __pair_style_sw(ptype, force_field_parameters): # output dictionary : force_field_dict = {} # define the filename potential_file = "mW.sw" # define the filename include_file = "force_field_parameters" lammps_cmd_comment = "#pair style: %s is used \n" % ptype element = "WT" command1 = "pair_style sw\n" command2 = ("pair_coeff" + " " + "* *" + " " + potential_file + " " + element + "\n") pair_command = ((element+" ")*3 + " ".join(str(para) for para in force_field_parameters)) force_field_dict[include_file] = (lammps_cmd_comment, command1, command2) force_field_dict[potential_file] = (pair_command) return force_field_dict def __pair_style_tersoff(ptype, force_field_parameters): # output dictionary: force_field_dict = {} # define the filename potential_file = "WT_ML-BOP.tersoff" # define the filename include_file = "force_field_parameters" lammps_cmd_comment = "# pair style: %s is used \n" % ptype element = "WT" if ("table" in ptype): command1 = "pair_style tersoff/table\n" else: command1 = "pair_style tersoff\n" command2 = "pair_coeff" + " * * " + potential_file + " " + element pair_command = ((element + " ")*3 + " ".join(str(para) for para in force_field_parameters)) force_field_dict[include_file] = (lammps_cmd_comment, command1, command2) force_field_dict[potential_file] = (pair_command) return force_field_dict def __pair_style_buck_coul_long(ptype, force_field_parameters): # output dictionary: force_field_dict = {} # define the filename include_file = "force_field_parameters" # comment of included potential file lammps_cmd_comment = "#pair style: %s is used \n" % ptype # lammps command: lammps_command_1 = ("pair_style buck/coul/long %.3f" % force_field_parameters[0]) lammps_command_2 = ("pair_coeff 1 1 %.5f %.5f %.5f" % (force_field_parameters[1], force_field_parameters[2], force_field_parameters[3])) lammps_command_3 = ("pair_coeff 2 2 %.5f %.5f %.5f" % (force_field_parameters[4], force_field_parameters[5], force_field_parameters[6])) lammps_command_4 = ("pair_coeff 1 2 %.5f %.5f %.5f" % (force_field_parameters[7], force_field_parameters[8], force_field_parameters[9])) lammps_command_5 = "kspace_style pppm 1.0e-4" force_field_dict[include_file] = (lammps_cmd_comment, lammps_command_1, lammps_command_2, lammps_command_3, lammps_command_4, lammps_command_5) return force_field_dict
StarcoderdataPython
3467485
(dog+cat).cat.print(10) f"hi{dog}cat"
StarcoderdataPython
6609124
<gh_stars>100-1000 import sys import os import io from contextlib import contextmanager from unittest import TestCase from sqflint import parse_args, entry_point @contextmanager def captured_output(): new_out, new_err = io.StringIO(), io.StringIO() old_out, old_err = sys.stdout, sys.stderr try: sys.stdout, sys.stderr = new_out, new_err yield sys.stdout, sys.stderr finally: sys.stdout, sys.stderr = old_out, old_err class ParseCode(TestCase): def test_stdin(self): with captured_output() as (out, err): sys.stdin = io.StringIO() sys.stdin.write('hint _x') sys.stdin.seek(0) entry_point([]) self.assertEqual( out.getvalue(), '[1,5]:warning:Local variable "_x" is not from this scope (not private)\n') def test_parser_error(self): with captured_output() as (out, err): sys.stdin = io.StringIO() sys.stdin.write('hint (_x') sys.stdin.seek(0) entry_point([]) self.assertEqual( out.getvalue(), '[1,5]:error:Parenthesis "(" not closed\n') def test_directory(self): args = parse_args(['--directory', 'tests/test_dir']) self.assertEqual('tests/test_dir', args.directory) def test_directory_invalid(self): with self.assertRaises(Exception) as context: parse_args(['--directory', 'i_dont_exist']) self.assertTrue('is not a valid path' in str(context.exception)) def test_exclude(self): args = parse_args(['--exclude', 'tests/test_dir', '--exit', 'w', '--exclude', 'tests/test_dir/test.sqf']) self.assertEqual(['tests/test_dir', 'tests/test_dir/test.sqf'], args.exclude) def test_exit_code(self): with captured_output(): exit_code = entry_point(['tests/test_dir/test.sqf']) self.assertEqual(exit_code, 0) # there are no errors, only a warning with captured_output(): exit_code = entry_point(['tests/test_dir/test.sqf', '-e', 'e']) self.assertEqual(exit_code, 0) with captured_output(): exit_code = entry_point(['tests/test_dir/test.sqf', '-e', 'w']) self.assertEqual(exit_code, 1) def test_filename_run(self): with captured_output() as (out, err): entry_point(['tests/test_dir/test.sqf']) self.assertEqual(out.getvalue(), '[1,5]:warning:Local variable "_0" is not from this scope (not private)\n') def test_directory_run(self): with captured_output() as (out, err): entry_point(['--directory', 'tests/test_dir']) self.assertEqual( out.getvalue(), 'test.sqf\n\t[1,5]:warning:Local variable "_0" is not from this scope (not private)\n' 'test1.sqf\n\t[1,5]:warning:Local variable "_1" is not from this scope (not private)\n' 'subdir/test2.sqf\n\t[1,5]:warning:Local variable "_2" is not from this scope (not private)\n' 'subdir/test3.sqf\n\t[1,5]:warning:Local variable "_3" is not from this scope (not private)\n') def test_directory_run_with_exclusion(self): with captured_output() as (out, err): entry_point(['--directory', 'tests/test_dir', '--exclude', 'subdir', '-x', 'test.\.sqf']) self.assertEqual( out.getvalue(), 'test.sqf\n\t[1,5]:warning:Local variable "_0" is not from this scope (not private)\n' 'tests/test_dir/test1.sqf EXCLUDED\n' 'tests/test_dir/subdir EXCLUDED\n') def test_directory_run_to_file(self): entry_point(['--directory', 'tests/test_dir', '-o', 'tests/result.txt']) with open('tests/result.txt') as f: result = f.read() try: os.remove('tests/result.txt') except OSError: pass self.assertEqual( result, 'test.sqf\n\t[1,5]:warning:Local variable "_0" is not from this scope (not private)\n' 'test1.sqf\n\t[1,5]:warning:Local variable "_1" is not from this scope (not private)\n' 'subdir/test2.sqf\n\t[1,5]:warning:Local variable "_2" is not from this scope (not private)\n' 'subdir/test3.sqf\n\t[1,5]:warning:Local variable "_3" is not from this scope (not private)\n')
StarcoderdataPython
3214224
<reponame>zeou1/maggot_models<filename>notebooks/114.0-BDP-flow-revisited.py # %% [markdown] # ## from src.hierarchy import signal_flow from src.data import load_metagraph from src.visualization import matrixplot from src.visualization import CLASS_COLOR_DICT from src.io import savefig import os from src.graph import preprocess import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns FNAME = os.path.basename(__file__)[:-3] print(FNAME) def stashfig(name, **kws): savefig(name, foldername=FNAME, save_on=True, **kws) VERSION = "2020-03-09" print(f"Using version {VERSION}") graph_types = ["G", "Gad", "Gaa", "Gdd", "Gda"] # load the data graphs = [] for graph_type in graph_types: threshold = 0 weight = "weight" mg = load_metagraph(graph_type, VERSION) mg = preprocess( mg, threshold=threshold, sym_threshold=False, remove_pdiff=True, binarize=False, weight=weight, ) print( f"Preprocessed graph {graph_type} with threshold={threshold}, weight={weight}" ) graphs.append(mg) # %% [markdown] # ## signal flow sort and plot sns.set_context("talk", font_scale=1.25) graph_sfs = [] for mg, graph_type in zip(graphs, graph_types): meta = mg.meta sf = signal_flow(mg.adj) meta["signal_flow"] = -sf graph_sfs.append(sf) fig, ax = plt.subplots(1, 1, figsize=(20, 20)) matrixplot( mg.adj, ax=ax, col_meta=meta, row_meta=meta, col_item_order="signal_flow", row_item_order="signal_flow", col_colors="Merge Class", row_colors="Merge Class", col_palette=CLASS_COLOR_DICT, row_palette=CLASS_COLOR_DICT, plot_type="scattermap", sizes=(2.5, 5), ) fig.suptitle(f"{graph_type}, signal flow sorted", y=0.91) stashfig(f"sf-sort-scattermap-{graph_type}") # %% [markdown] # ## plot the rank orders for each from scipy.stats import rankdata sfs = [] rank_sfs = [] for mg, name in zip(graphs, graph_types): sf = mg.meta["signal_flow"].copy() sf.name = name sfs.append(sf) rank_sf = rankdata(sf) rank_sf = pd.Series(index=sf.index, data=rank_sf, name=name) rank_sfs.append(rank_sf) sf_df = pd.DataFrame(sfs).T sns.pairplot(sf_df) # %% [markdown] # ## rank_sf_df = pd.DataFrame(rank_sfs).T rank_sf_df.loc[meta.index, "class"] = meta["Merge Class"] pg = sns.PairGrid( rank_sf_df, vars=graph_types, hue="class", palette=CLASS_COLOR_DICT, corner=True ) pg.map_offdiag(sns.scatterplot, s=5, alpha=0.5, linewidth=0) def tweak(x, y, **kws): ax = plt.gca() if len(x) > 0: xmax = np.nanmax(x) xtop = ax.get_xlim()[-1] if xmax > xtop: ax.set_xlim([-1, xmax + 1]) if len(y) > 0: ymax = np.nanmax(y) ytop = ax.get_ylim()[-1] if ymax > ytop: ax.set_ylim([-1, ymax + 1]) ax.set_xticks([]) ax.set_yticks([]) def remove_diag(x, **kws): ax = plt.gca() ax.axis("off") ax.set_xticks([]) ax.set_yticks([]) ax.spines["bottom"].set_visible(False) def hide_current_axis(*args, **kwds): plt.gca().set_visible(False) pg.map_offdiag(tweak) pg.map_diag(remove_diag) stashfig("rank-sf-pairs") # %% [markdown] # ## plot sorted by some kind of random walk thingy
StarcoderdataPython
1710803
<gh_stars>10-100 import smbus from time import sleep from skeleton import InputSkeleton class InputDevice(InputSkeleton): """A driver for Adafruit-developed Raspberry Pi character LCD&button shields based on MCP23017, either Adafruit-made or Chinese-made. Tested on hardware compatible with Adafruit schematic and working with Adafruit libraries, but not on genuine Adafruit hardware. """ default_mapping = [ "KEY_KPENTER", "KEY_RIGHT", "KEY_DOWN", "KEY_UP", "KEY_LEFT"] previous_data = 0 def __init__(self, addr = 0x20, bus = 1, **kwargs): """Initialises the ``InputDevice`` object. Kwargs: * ``bus``: I2C bus number. * ``addr``: I2C address of the expander. """ self.bus_num = bus self.bus = smbus.SMBus(self.bus_num) if type(addr) in [str, unicode]: addr = int(addr, 16) self.addr = addr self.init_expander() InputSkeleton.__init__(self, **kwargs) def init_expander(self): """Initialises the IO expander.""" self.setMCPreg(0x00, 0x1F) self.setMCPreg(0x0C, 0x1F) def runner(self): """Polling loop (only one there can be on this shield, since interrupt pin is not connected).""" button_states = [] while not self.stop_flag: if self.enabled: data = (~self.readMCPreg(0x12)&0x1F) if data != self.previous_data: self.process_data(data) self.previous_data = data sleep(0.01) def process_data(self, data): """Checks data received from IO expander and classifies changes as either "button up" or "button down" events. On "button up", calls send_key with the corresponding button name from ``self.mapping``. """ data_difference = data ^ self.previous_data changed_buttons = [] for i in range(8): if data_difference & 1<<i: changed_buttons.append(i) for button_number in changed_buttons: if not data & 1<<button_number: self.send_key(self.mapping[button_number]) def setMCPreg(self, reg, val): """Sets the MCP23017 register.""" self.bus.write_byte_data(self.addr, reg, val) def readMCPreg(self, reg): """Reads the MCP23017 register.""" return self.bus.read_byte_data(self.addr, reg) if __name__ == "__main__": id = InputDevice(addr = 0x20, threaded=False) id.runner()
StarcoderdataPython
3303416
<filename>src/im_task_webapp2/__init__.py from im_task import _launch_task, get_taskroute import webapp2 from google.appengine.ext import webapp def get_webapp_url(): return "%s/(.*)" % get_taskroute() class TaskHandler(webapp.RequestHandler): def post(self, name): _launch_task(self.request.body, name, self.request.headers) class TaskHandler2(webapp2.RequestHandler): def post(self, name): _launch_task(self.request.body, name, self.request.headers) def addrouteforwebapp(routes): routes.append((get_webapp_url(), TaskHandler)) def addrouteforwebapp2(routes): routes.append((get_webapp_url(), TaskHandler2))
StarcoderdataPython
19045
<reponame>bbueno5000/BuildAnAIStartUpDemo import app import flask import flask_debugtoolbar app = flask.Flask(__name__) app.config.from_object('app.config') db = flask.ext.sqlalchemy.SQLAlchemy(app) mail = flask.ext.mail.Mail(app) app.config['DEBUG_TB_TEMPLATE_EDITOR_ENABLED'] = True app.config['DEBUG_TB_PROFILER_ENABLED'] = True toolbar = flask_debugtoolbar.DebugToolbarExtension(app) bcrypt = flask.ext.bcrypt.Bcrypt(app) app.register_blueprint(app.views.user.userbp) login_manager = flask.ext.login.LoginManager() login_manager.init_app(app) login_manager.login_view = 'userbp.signin' @login_manager.user_loader def load_user(email): """ DOCSTRING """ return app.models.User.query.filter(app.models.User.email == email).first()
StarcoderdataPython
8129396
<gh_stars>0 s = input('Enter string to get frequency of its words: ') words = s.split() d = {} for word in words : if word not in d : d[word] = 1 else : d[word] = d[word] + 1 print('Words and their frequencies are') for i in d : print(i, d[i])
StarcoderdataPython
3368078
<reponame>Tobias2023/dolosse """ file: test_data.py brief: File containing test data for the various Pixie16 test fixtures. author: <NAME> date: November 02, 2019 """ from struct import pack def pack_data(data, type): """ Packs an iterable into a bytes like object :param data: The data that we'll pack into the bytes object. :param type: The data type that we'll pack into, E.g. unsigned int, long int, double. :return: The bytes like object that we've packed from the list. """ payload = b'' for element in data: payload += pack(type, element) return payload class Pixie16TestData: def __init__(self): pass @staticmethod def qdc(as_bytes=False): data = [123, 456, 789, 987, 654, 321, 147, 258] if as_bytes: return pack_data(data, 'I') return data @staticmethod def trace(as_bytes=False): data = [437, 436, 434, 434, 437, 437, 438, 435, 434, 438, 439, 437, 438, 434, 435, 439, 438, 434, 434, 435, 437, 440, 439, 435, 437, 439, 438, 435, 436, 436, 437, 439, 435, 433, 434, 436, 439, 441, 436, 437, 439, 438, 438, 435, 434, 434, 438, 438, 434, 434, 437, 440, 439, 438, 434, 436, 439, 439, 437, 436, 434, 436, 438, 437, 436, 437, 440, 440, 439, 436, 435, 437, 501, 1122, 2358, 3509, 3816, 3467, 2921, 2376, 1914, 1538, 1252, 1043, 877, 750, 667, 619, 591, 563, 526, 458, 395, 403, 452, 478, 492, 498, 494, 477, 460, 459, 462, 461, 460, 456, 452, 452, 455, 453, 446, 441, 440, 444, 456, 459, 451, 450, 447, 445, 449, 456, 456, 455] if as_bytes: return pack_data(data, "H") return data @staticmethod def external_timestamp(as_bytes=False): data = [987654321, 1596] if as_bytes: return pack_data(data, "I") return data @staticmethod def esums(as_bytes=False, decoded=False): data = [12, 13, 14, 1164725159] if as_bytes: return pack_data(data, "I") if decoded: data[3] = 3780.728271484375 return data @staticmethod def header(freq=250, firmware=30474, as_bytes=False, decoded=False): if freq == 250 and firmware == 30474: data = [540717, 123456789, 26001, 2345] if decoded: data = { 'channel': 13, 'slot': 2, 'crate': 0, 'header_length': 4, 'event_length': 4, 'finish_code': 0, 'event_time_low': 123456789, 'event_time_high': 26001, 'cfd_fractional_time': 0, 'cfd_trigger_source_bit': 0, 'cfd_forced_trigger_bit': 0, 'energy': 2345, 'trace_length': 0, 'trace_out_of_range': 0 } if as_bytes: return pack_data(data, 'I') return data @staticmethod def header_with_trace(freq=250, firmware=30474, as_bytes=False, decoded=False): if freq == 250 and firmware == 30474: data = [8667181, 123456789, 26001, 8128809] if decoded: data = { 'channel': 13, 'slot': 2, 'crate': 0, 'header_length': 4, 'event_length': 66, 'finish_code': 0, 'event_time_low': 123456789, 'event_time_high': 26001, 'cfd_fractional_time': 0, 'cfd_trigger_source_bit': 0, 'cfd_forced_trigger_bit': 0, 'energy': 2345, 'trace_length': 124, 'trace_out_of_range': 0 } if as_bytes: return pack_data(data, 'I') return data
StarcoderdataPython
309009
"""gecasmo is a package for estimating click models""" from .GCM import GCM from .clickdefinitionreader import ClickDefinition
StarcoderdataPython
3498174
<reponame>sarangbhagwat/Bioindustrial-Park # -*- coding: utf-8 -*- # BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules # Copyright (C) 2020, <NAME> <<EMAIL>> # # This module is under the UIUC open-source license. See # github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt # for license details. """ """ import biosteam as bst from .. import PY37 from . import (utils, _process_settings, _chemicals, _system, _tea, ) __all__ = [*utils.__all__, *_process_settings.__all__, *_chemicals.__all__, *_system.__all__, *_tea.__all__, 'lipidcane_sys', 'lipidcane_tea', 'flowsheet', ] from .utils import * from ._process_settings import * from ._chemicals import * from ._system import * from ._tea import * _system_loaded = False _chemicals_loaded = False def load_chemicals(): global chemicals, _chemicals_loaded chemicals = create_chemicals() _chemicals_loaded = True def load(name): import biosteam as bst from biosteam import main_flowsheet as F, UnitGroup global lipidcane_sys, lipidcane_tea, specs, flowsheet, _system_loaded global unit_groups if not _chemicals_loaded: load_chemicals() flowsheet = bst.Flowsheet('lipidcane2g') F.set_flowsheet(flowsheet) bst.settings.set_thermo(chemicals) load_process_settings() dct = globals() u = flowsheet.unit s = flowsheet.stream if name == 'divided 1 and 2g front end oil separation': lipidcane_sys = create_lipidcane_to_biodiesel_and_ethanol_divided_1_and_2g_front_end_oil_separation() area_names = [ 'Feedstock handling', 'Juicing', 'Biod. prod.', 'Conv. ferm.', 'Pretreatment', 'Cofementation', 'Ethanol sep.', 'Wastewater treatment', 'Boiler turbogenerator', 'Utilities', 'Storage' ] bst.rename_unit(u.BT, 1000) bst.rename_units([u.FT, u.CWP, u.CIP_package, u.ADP, u.CT, u.PWC], 1100) bst.rename_units([i for i in lipidcane_sys.units if bst.is_storage_unit(i)], 1200) elif name == 'divided 1 and 2g hydrolyzate oil separation': area_names = [ 'Feedstock handling', 'Juicing', 'Biod. prod.', 'Conv. ferm.', 'Pretreatment', 'Cofementation', 'Ethanol sep.', 'Wastewater treatment', 'Boiler turbogenerator', 'Utilities', 'Storage' ] lipidcane_sys = create_lipidcane_to_biodiesel_and_ethanol_divided_1_and_2g_hydrolyzate_oil_separation() bst.rename_unit(u.BT, 1000) bst.rename_units([u.FT, u.CWP, u.CIP_package, u.ADP, u.CT, u.PWC], 1100) bst.rename_units([i for i in lipidcane_sys.units if bst.is_storage_unit(i)], 1200) elif name == '1g': lipidcane_sys = create_lipidcane_to_biodiesel_and_ethanol_1g() elif name == 'sugarcane 2g': global sugarcane_sys sugarcane_sys = create_sugarcane_to_ethanol_2g() else: raise NotImplementedError(name) unit_groups = UnitGroup.group_by_area(lipidcane_sys.units) if '2g' in name: # lipidcane_tea = create_tea(lipidcane_sys) # for i in lipidcane_tea.TEAs: i.operating_days = 200 # lipidcane_sys.simulate() for i, j in zip(unit_groups, area_names): i.name = j partial_lipidcane_tea = create_tea(lipidcane_sys) partial_lipidcane_tea.operating_days = 200 cornstover_sys = trim_to_cornstover_hot_water_cellulosic_ethanol(lipidcane_sys) partial_cornstover_tea = create_tea(cornstover_sys) partial_cornstover_tea.operating_days = 130 lipidcane_tea = bst.AgileTEA([partial_lipidcane_tea, partial_cornstover_tea], 0.1) lipidcane_sys.simulate() lipidcane_tea.save_scenario(0) cornstover_sys.simulate() lipidcane_tea.save_scenario(1) dct.update(flowsheet.to_dict()) lipidcane_tea.IRR = 0.10 s.ethanol.price = lipidcane_tea.solve_price(s.ethanol) else: lipidcane_tea = create_tea(lipidcane_sys) for i in lipidcane_tea.TEAs: i.operating_days = 200 try: lipidcane_sys.simulate() except Exception as e: raise e else: lipidcane_tea.IRR = 0.10 s.ethanol.price = lipidcane_tea.solve_price(s.ethanol) finally: dct.update(flowsheet.to_dict()) def ethanol_price(): return lipidcane_tea.solve_price(flowsheet.stream.ethanol) * 2.98668849
StarcoderdataPython
3495438
<filename>src/staff/urls.py from django.urls import path from django.shortcuts import render from django.contrib.admin.views.decorators import staff_member_required from .views import ConsistencyTestView def home_view(request): return render(request, "staff/home.html", {}) urlpatterns = [ path("", home_view, name="staff-home"), path( "consistency-test", staff_member_required(ConsistencyTestView.as_view()), name="staff-consistency_test", ), ]
StarcoderdataPython
178586
import networkx as nx import numpy as np def project3d(points, direction): """ 投影函数,将三维点集投影到二维 投影平面内的y方向为z轴投影(如果投影的法向量为z轴,则y方向为x轴投影) :param points: 三维点集 :param direction: 投影平面的法向量(u,v,w),投影平面通过原点(0,0,0) """ d = direction / np.linalg.norm(direction) y0 = np.array([1, 0, 0]) if np.array([0, 0, 1]).dot(d) == 1 else np.array([0, 0, 1]) y1 = y0 - np.dot(d, y0) * d norm_y = y1 / np.linalg.norm(y1) x0 = np.cross(norm_y, d) norm_x = x0 / np.linalg.norm(x0) pos = {} for k in points: p0 = np.array(points[k]) p1 = p0 - np.dot(d, p0) * d pos[k] = (np.dot(norm_y, p1), np.dot(norm_x, p1)) return pos class Graph: """ 包装nx.Graph的图类 """ def __init__(self, name, nx_graph=None): self.name = name self.info = {} self.g = nx.Graph(nx_graph) def __len__(self): return len(self.nodes()) def __getitem__(self, node): return self.g[node] def copy(self): return Graph(self.name, self.g) def add_node(self, node, **attr): self.g.add_node(node, **attr) def add_edge(self, node1, node2, **attr): self.g.add_edge(node1, node2, **attr) def remove_node(self, node): self.g.remove_node(node) def nodes(self): return self.g.nodes def edges(self): return self.g.edges def degree(self, node=None): if node is not None: return self.g.degree[node] return self.g.degree def subgraph(self, nodes): return Graph(self.name, self.g.subgraph(nodes)) def max_subgraph(self): mc = max(nx.connected_components(self.g), key=len) return Graph(self.name, self.g.subgraph(mc)) def is_connected(self): return nx.is_connected(self.g) def get_node_attributes(self, attr): return nx.get_node_attributes(self.g, attr) def get_edge_attributes(self, attr): return nx.get_edge_attributes(self.g, attr) def draw_graph(self, axes, highlight=None, direction=(0, 0, 1), rotation=None): """用matlotlib画二维投影图""" axes.clear() points = self.get_node_attributes('location') if rotation is not None: for k in points: points[k] = np.dot(points[k], rotation) pos = project3d(points, np.array(direction)) label = self.get_node_attributes('label') edge_label = self.get_edge_attributes('dist') nx.draw_networkx(self.g, pos, alpha=0.7, with_labels=False, edge_color='.4', ax=axes) if highlight is not None: nx.draw_networkx_nodes(self.g, pos=pos, nodelist=highlight, node_color='r', ax=axes) nx.draw_networkx_labels(self.g, pos, labels=label, ax=axes) nx.draw_networkx_edge_labels(self.g, pos, edge_labels=edge_label, ax=axes) axes.axis('off') def draw_3d_graph(self, axes, highlight=None): """用matlotlib画三维图""" axes.clear() points = self.get_node_attributes('location') label = self.get_node_attributes('label') if highlight is None: highlight = [] for key, value in points.items(): c = 'blue' # 普通原子为蓝色 if key in highlight: c = 'red' # 高亮原子用红色表示 xi, yi, zi = value axes.scatter(xi, yi, zi, label[key], c=c, alpha=0.9) for i, j in enumerate(self.edges()): # 用两端原子的坐标连线,绘制化学键 x = np.array((points[j[0]][0], points[j[1]][0])) y = np.array((points[j[0]][1], points[j[1]][1])) z = np.array((points[j[0]][2], points[j[1]][2])) axes.plot(x, y, z, c='black', alpha=0.9) def number_of_edges(self, u, v): return self.g.number_of_edges(u, v)
StarcoderdataPython
3303285
from ...utils.json_schema import method_signature_to_json_schema, JsonParameter, JsonSchemaDocument from ..existing_table_handling import ExistingTableHandling from records_mover.records.delimited.hints import Hints from typing import Any, Dict, List, Callable from ...mover_types import JsonSchema HINT_PARAMETERS = [ JsonParameter(hint_enum.value.hint_name, hint_enum.value.json_schema_document(), optional=True) for hint_enum in list(Hints) ] def method_to_json_schema(method: Callable[..., Any]) -> JsonSchema: special_handling: Dict[str, List[JsonParameter]] = { 'google_cloud_creds': [JsonParameter('gcp_creds_name', JsonSchemaDocument('string'))], 'db_engine': [JsonParameter('db_name', JsonSchemaDocument('string'))], 'records_format': ([JsonParameter('variant', JsonSchemaDocument('string'), optional=True)] + HINT_PARAMETERS), 'initial_hints': HINT_PARAMETERS, 'existing_table_handling': [JsonParameter('existing_table', JsonSchemaDocument('string', enum=[k.lower() for k in ExistingTableHandling.__members__], default='delete_and_overwrite'), optional=True)], } parameters_to_ignore = [ 'self', # maybe in the future we can have people point us to a JSON # file for hints... 'hints', # ...and maybe this particular way of passing in dicts: 'add_user_perms_for', 'add_group_perms_for', # oh yeah, and a way to pass a filename in for those that have # hand-crafted a records schema file yet don't have a full # records directory... 'records_schema' ] return method_signature_to_json_schema(method, special_handling=special_handling, parameters_to_ignore=parameters_to_ignore)
StarcoderdataPython
1744921
<filename>python/exercicios mundo 1/ex004/ex006.py #O mesmo professor do desafio 19 quer sortear a ordem de apresentação de trabalhos dos alunos. Faça um programa que leia o nome dos quatro alunos e mostre a ordem sorteada. import random n1 =str(input('primeiro aluno: ')) n2 =str(input('segundo aluno: ')) n3 =str(input('terceiro aluno: ')) n4 =str(input('quarto aluno: ')) lista = [n1, n2, n3, n4] random.shuffle(lista) print('a ordem da apresentação será ') print(lista)
StarcoderdataPython
6413054
from ..layers.HeteroLinear import HeteroMLPLayer from ..layers.GeneralGNNLayer import MultiLinearLayer def HGNNPreMP(args, node_types, num_pre_mp, in_dim, hidden_dim): """ HGNNPreMP, dimension is in_dim, hidden_dim, hidden_dim ... Note: Final layer has activation. Parameters ---------- args node_types num_pre_mp in_dim hidden_dim Returns ------- """ if num_pre_mp > 0: linear_dict = {} for ntype in node_types: linear_dict[ntype] = [in_dim] for _ in range(num_pre_mp): linear_dict[ntype].append(hidden_dim) return HeteroMLPLayer(linear_dict, act=args.activation, dropout=args.dropout, has_l2norm=args.has_l2norm, has_bn=args.has_bn, final_act=True) def HGNNPostMP(args, node_types, num_post_mp, hidden_dim, out_dim): """ HGNNPostMLP, hidden_dim, hidden_dim, ..., out_dim Final layer has no activation. Parameters ---------- args node_types num_post_mp hidden_dim out_dim Returns ------- """ if num_post_mp > 0: linear_dict = {} for ntype in node_types: linear_dict[ntype] = [hidden_dim] for _ in range(num_post_mp-1): linear_dict[ntype].append(hidden_dim) linear_dict[ntype].append(out_dim) return HeteroMLPLayer(linear_dict, act=args.activation, dropout=args.dropout, has_l2norm=args.has_l2norm, has_bn=args.has_bn, final_act=False) # def GNNPreMP(args, in_dim, hidden_dim): # linear_list = [in_dim] + args.layers_pre_mp * [hidden_dim] # return MultiLinearLayer(linear_list, dropout=args.dropout, act=args.activation, has_bn=args.has_bn, # has_l2norm=args.has_l2norm) # # # def GNNPostMP(args, hidden_dim, out_dim): # linear_list = args.layers_pre_mp * [hidden_dim] + [out_dim] # return MultiLinearLayer(linear_list, dropout=args.dropout, act=args.activation, has_bn=args.has_bn, # has_l2norm=args.has_l2norm)
StarcoderdataPython
1948674
<gh_stars>1-10 from abnormal import AB import proxies def get_proxies(): return proxies.get_proxies() def test_create_ab(): ab = AB(get_proxies()) assert ab def test_get_proxies(): working_proxies = get_proxies() assert len(working_proxies) > 100
StarcoderdataPython
1620904
import unittest import warnings import pytest from qiskit import QuantumCircuit from cirq import ParamResolver from qiskit.providers import JobStatus from azure.quantum.job.job import Job from azure.quantum.qiskit import AzureQuantumProvider from azure.quantum.cirq import AzureQuantumService from azure.quantum.cirq.targets.target import Target from common import QuantumTestBase, ZERO_UID class TestQiskit(QuantumTestBase): """TestIonq Tests the azure.quantum.target.ionq module. """ mock_create_job_id_name = "create_job_id" create_job_id = Job.create_job_id def get_test_job_id(self): return ZERO_UID if self.is_playback \ else Job.create_job_id() def _3_qubit_ghz(self): circuit = QuantumCircuit(3, 3) circuit.name = "Qiskit Sample - 3-qubit GHZ circuit" circuit.h(0) circuit.cx(0, 1) circuit.cx(1, 2) circuit.measure([0,1,2], [0, 1, 2]) return circuit @pytest.mark.ionq @pytest.mark.live_test def test_plugins_submit_qiskit_to_ionq(self): circuit = self._3_qubit_ghz() self._test_qiskit_submit_ionq(circuit=circuit, num_shots=500, num_shots_actual=500) @pytest.mark.ionq @pytest.mark.live_test def test_plugins_submit_qiskit_qobj_to_ionq(self): from qiskit import assemble circuit = self._3_qubit_ghz() qobj = assemble(circuit) self._test_qiskit_submit_ionq(circuit=qobj, num_shots=1024, num_shots_actual=1024) def _qiskit_wait_to_complete(self, qiskit_job, provider): job = qiskit_job._azure_job self.pause_recording() try: job.wait_until_completed(timeout_secs=60) except TimeoutError: self.resume_recording() warnings.warn(f"Qiskit Job {job.id} exceeded timeout. Skipping fetching results.") else: self.resume_recording() self.assertEqual(JobStatus.DONE, qiskit_job.status()) qiskit_job = provider.get_job(job.id) self.assertEqual(JobStatus.DONE, qiskit_job.status()) def _test_qiskit_submit_ionq(self, circuit, num_shots, num_shots_actual): with unittest.mock.patch.object( Job, self.mock_create_job_id_name, return_value=self.get_test_job_id(), ): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) assert "azure-quantum-qiskit" in provider._workspace.user_agent backend = provider.get_backend("ionq.simulator") qiskit_job = backend.run( circuit=circuit, shots=num_shots ) # Make sure the job is completed before fetching the results # playback currently does not work for repeated calls # See: https://github.com/microsoft/qdk-python/issues/118 if self.in_recording: self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() assert result.data()["counts"] == { '000': num_shots_actual//2, '111': num_shots_actual//2 } assert result.data()["probabilities"] == {'000': 0.5, '111': 0.5} counts = result.get_counts() assert counts == result.data()["counts"] @pytest.mark.ionq @pytest.mark.live_test def test_plugins_retrieve_job(self): with unittest.mock.patch.object( Job, self.mock_create_job_id_name, return_value=self.get_test_job_id(), ): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("ionq.simulator") circuit = self._3_qubit_ghz() qiskit_job = backend.run( circuit=circuit, num_shots=100 ) # Make sure the job is completed before fetching the results # playback currently does not work for repeated calls # See: https://github.com/microsoft/qdk-python/issues/118 if self.in_recording: self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): fetched_job = backend.retrieve_job(qiskit_job.id()) assert fetched_job.id() == qiskit_job.id() result = fetched_job.result() assert result.data() == { 'counts': { '000': 250, '111': 250 }, 'probabilities': { '000': 0.5, '111': 0.5 } } @pytest.mark.honeywell @pytest.mark.live_test def test_plugins_submit_qiskit_to_honeywell(self): self._test_qiskit_submit_honeywell(num_shots=None) def _test_qiskit_submit_honeywell(self, num_shots): with unittest.mock.patch.object( Job, self.mock_create_job_id_name, return_value=self.get_test_job_id(), ): workspace = self.create_workspace() provider = AzureQuantumProvider(workspace=workspace) backend = provider.get_backend("honeywell.hqs-lt-s1-apival") assert "honeywell.hqs-lt-s1-apival" in backend.backend_names assert backend.backend_names[0] in [t.name for t in workspace.get_targets(provider_id="honeywell")] circuit = self._3_qubit_ghz() qiskit_job = backend.run( circuit=circuit, num_shots=num_shots ) # Make sure the job is completed before fetching the results # playback currently does not work for repeated calls # See: https://github.com/microsoft/qdk-python/issues/118 if self.in_recording: self._qiskit_wait_to_complete(qiskit_job, provider) if JobStatus.DONE == qiskit_job.status(): result = qiskit_job.result() assert result.data()["counts"] == {'000': 500} assert result.data()["probabilities"] == {'000': 1.0} class TestCirq(QuantumTestBase): mock_create_job_id_name = "create_job_id" def get_test_job_id(self): return ZERO_UID if self.is_playback \ else Job.create_job_id() def _3_qubit_ghz_cirq(self): import cirq # Create qubits q0 = cirq.LineQubit(0) q1 = cirq.LineQubit(1) q2 = cirq.LineQubit(2) # Create a circuit circuit = cirq.Circuit( cirq.H(q0), # H gate cirq.CNOT(q0, q1), cirq.CNOT(q1, q2), cirq.measure(q0, key='q0'), cirq.measure(q1, key='q1'), cirq.measure(q2, key='q2'), ) return circuit def test_plugins_cirq_user_agent(self): # VCR is incompatible with parametrized tests for app_id in [ "test-user-agent", "test-very-very-very-very-very-very-very-very-long-user-agent" ]: workspace = self.create_workspace(user_agent=app_id) service = AzureQuantumService(workspace=workspace) assert app_id in service._workspace.user_agent assert "-azure-quantum-cirq" in service._workspace.user_agent @pytest.mark.honeywell @pytest.mark.ionq @pytest.mark.live_test def test_plugins_cirq_get_targets(self): workspace = self.create_workspace() service = AzureQuantumService(workspace=workspace) assert "azure-quantum-cirq" in service._workspace.user_agent targets = service.targets() target_names = [t.name for t in targets] assert all([isinstance(t, Target) for t in targets]) assert "honeywell.hqs-lt-s1-apival" in target_names assert "ionq.simulator" in target_names @pytest.mark.ionq @pytest.mark.live_test def test_plugins_ionq_cirq(self): with unittest.mock.patch.object( Job, self.mock_create_job_id_name, return_value=self.get_test_job_id(), ): workspace = self.create_workspace() service = AzureQuantumService(workspace=workspace) try: run_result = service.run( program=self._3_qubit_ghz_cirq(), repetitions=500, target="ionq.simulator", timeout_seconds=60 ) except TimeoutError as e: # Pass on timeout warnings.warn("IonQ execution exceeded timeout. \ Skipping fetching results.") if self.is_playback: raise e except RuntimeError as e: # cirq_ionq currently throws a RuntimeError both if the job # failed and on timeout. # See: https://github.com/quantumlib/Cirq/issues/4507 if 'Job failed' in str(e) or self.is_playback: warnings.warn(f"IonQ job execution failed: {str(e)}") raise e else: warnings.warn("IonQ execution exceeded timeout. \ Skipping fetching results.") else: job = service.get_job(self.get_test_job_id()) job_result = job.results().to_cirq_result() for result in [run_result, job_result]: assert "q0" in result.measurements assert "q1" in result.measurements assert "q2" in result.measurements assert len(result.measurements["q0"]) == 500 assert len(result.measurements["q1"]) == 500 assert len(result.measurements["q2"]) == 500 assert result.measurements["q0"].sum() == result.measurements["q1"].sum() assert result.measurements["q1"].sum() == result.measurements["q2"].sum() @pytest.mark.honeywell @pytest.mark.live_test def test_plugins_honeywell_cirq(self): with unittest.mock.patch.object( Job, self.mock_create_job_id_name, return_value=self.get_test_job_id(), ): workspace = self.create_workspace() service = AzureQuantumService(workspace=workspace) program = self._3_qubit_ghz_cirq() try: run_result = service.run( program=program, repetitions=500, target="honeywell.hqs-lt-s1-apival", timeout_seconds=60 ) except TimeoutError as e: # Pass on timeout warnings.warn("Honeywell execution exceeded timeout. \ Skipping fetching results.") if self.is_playback: raise e except RuntimeError as e: # cirq_ionq currently throws a RuntimeError both if the job # failed and on timeout. # See: https://github.com/quantumlib/Cirq/issues/4507 if 'Job failed' in str(e) or self.is_playback: warnings.warn(f"Honeywell job execution failed: {str(e)}") raise e else: warnings.warn("Honeywell execution exceeded timeout. \ Skipping fetching results.") else: job_no_program = service.get_job(self.get_test_job_id()) job_with_program = service.get_job( self.get_test_job_id(), program=program) target = service._target_factory.create_target( provider_id="honeywell", name="honeywell.hqs-lt-s1-apival") job_result1 = target._to_cirq_result( result=job_no_program.results(), param_resolver=ParamResolver({})) job_result2 = target._to_cirq_result( result=job_with_program.results(), param_resolver=ParamResolver({})) for result in [run_result, job_result1, job_result2]: assert "q0" in result.measurements assert "q1" in result.measurements assert "q2" in result.measurements assert len(result.measurements["q0"]) == 500 assert len(result.measurements["q1"]) == 500 assert len(result.measurements["q2"]) == 500 assert result.measurements["q0"].sum() == result.measurements["q1"].sum() assert result.measurements["q1"].sum() == result.measurements["q2"].sum()
StarcoderdataPython
1685
import unittest from networks.QoS import QoS from networks.connections.mathematical_connections import FunctionalDegradation from networks.slicing import SliceConceptualGraph from utils.location import Location class TestBaseStationLinear(unittest.TestCase): def setUp(self): self.name = "network" self.wireless_connection_type = "LinearDegradation" self.backhaul_qos = {'latency': {'delay': '3.0ms', 'deviation': '1.0ms'}, 'bandwidth': '100.0mbps', 'error_rate': '1.0%'} self.midhaul_qos = {'latency': {'delay': '3.0ms', 'deviation': '1.0ms'}, 'bandwidth': '100.0mbps', 'error_rate': '1.0%'} self.parameters = dict( best_qos={'latency': {'delay': '5.0ms', 'deviation': '2.0ms'}, 'bandwidth': '10.0mbps', 'error_rate': '1.0%'}, worst_qos={'latency': {'delay': '100.0ms', 'deviation': '20.0ms'}, 'bandwidth': '5.0mbps', 'error_rate': '2.0%'}, radius="5km") self.network = SliceConceptualGraph(self.name, self.midhaul_qos, self.backhaul_qos, self.parameters) def test_creation(self): self.assertEqual(self.network.get_name(), "network") def test_get_empty_nodes(self): self.assertEqual(self.network.get_nodes(), {}) def test_add_node(self): name, lat, lon = 'node', 33, 40 lat, lon = 33, 40 self.network.set_RU(lat, lon) self.network.add_node(name, lat, lon) self.assertEqual(self.network.get_nodes(), {'node': Location(lat, lon)}) with self.assertRaises(SliceConceptualGraph.NetworkSliceException): self.network.add_node('node', 33, 40) def test_get_empty_RUs(self): self.assertEqual(self.network.get_RUs(), {}) def test_set_basetastion(self): lat, lon = 33, 40 self.network.set_RU(lat, lon) self.assertEqual(self.network.get_RUs(), {f'{lat}-{lon}': Location(lat, lon)}) with self.assertRaises(SliceConceptualGraph.NetworkSliceException): self.network.set_RU(lat, lon) def test_constructor(self): with self.assertRaises(FunctionalDegradation.FunctionDegradationNetworkException): SliceConceptualGraph('test', {}, {}, {}) SliceConceptualGraph('test', self.midhaul_qos, {}, {}) SliceConceptualGraph('test', {}, self.backhaul_qos, {}) SliceConceptualGraph('test', {}, {}, self.parameters) def test_get_qos(self): self.assertEqual(self.network.get_backhaul(), QoS(self.backhaul_qos)) def test_set_qos(self): self.network.set_backhaul(QoS.minimum_qos_dict) self.assertEqual(self.network.get_backhaul(), QoS(QoS.minimum_qos_dict)) def test_qos_from_distance(self): self.assertEqual(self.network.get_qos_from(5).get_formated_qos(), self.parameters.get('worst_qos')) self.assertEqual(self.network.get_qos_from(0.0).get_formated_qos(), self.parameters.get('best_qos')) def test_get_node_location(self): lat, lon = 33, 40 self.network.set_RU(lat, lon) self.network.add_node('test', 10, 10) self.assertEqual(self.network.get_node_location('test2'), None) self.assertEqual(self.network.get_node_location('test'), Location(10, 10)) def test_has_to_pass_through_backhaul(self): self.network.set_RU(10, 10) self.network.set_RU(20, 20) self.network.add_node('source1', 10, 10) self.network.add_node('destination1', 10, 10) self.network.add_node('destination2', 20, 20) def test_set_RUs(self): self.network.set_RUs([{'lat': 10, 'lon': 10}, {'lat': 5, 'lon': 5}]) self.assertEqual(self.network.get_RUs(), {'10-10': Location(**{'lat': 10, 'lon': 10}), '5-5': Location(**{'lat': 5, 'lon': 5})}) lat, lon = 33, 40 self.network.set_RU(lat, lon) with self.assertRaises(SliceConceptualGraph.NetworkSliceException): self.network.set_RUs([{'lat': 10, 'lon': 10}, {'lat': 5, 'lon': 5}]) def test_set_node_location(self): lat, lon = 33, 40 self.network.set_RU(lat, lon) self.network.add_node('destination1', 10, 10) self.network.set_node_location('destination1', 20, 20) self.assertEqual(self.network.get_node_location('destination1'), Location(20, 20)) with self.assertRaises(Location.LocationException): self.network.set_node_location('destination1', 'test', 20) with self.assertRaises(Location.LocationException): self.network.set_node_location('destination1', 20, 'test') class TestBaseLog2Degradation(unittest.TestCase): def setUp(self): self.name = "network" self.wireless_connection_type = "Log2Degradation" self.midhaul_qos = {'latency': {'delay': '3.0ms', 'deviation': '1.0ms'}, 'bandwidth': '100.0mbps', 'error_rate': '1.0%'} self.backhaul_qos = {'latency': {'delay': '3.0ms', 'deviation': '1.0ms'}, 'bandwidth': '100.0mbps', 'error_rate': '1.0%'} self.parameters = dict( best_qos={'latency': {'delay': '5.0ms', 'deviation': '2.0ms'}, 'bandwidth': '10.0mbps', 'error_rate': '1.0%'}, worst_qos={'latency': {'delay': '100.0ms', 'deviation': '20.0ms'}, 'bandwidth': '5.0mbps', 'error_rate': '2.0%'}, radius="5km") self.network = SliceConceptualGraph(self.name, self.midhaul_qos, self.backhaul_qos, self.parameters) def test_creation(self): self.assertEqual(self.network.get_name(), "network") def test_get_empty_nodes(self): self.assertEqual(self.network.get_nodes(), {}) def test_add_node(self): name, lat, lon = 'node', 33, 40 with self.assertRaises(SliceConceptualGraph.NetworkSliceException): self.network.add_node(name, lat, lon) self.network.set_RU(33, 40, 0) self.network.add_node(name, lat, lon) self.assertEqual(self.network.get_nodes(), {'node': Location(lat, lon)}) with self.assertRaises(SliceConceptualGraph.NetworkSliceException): self.network.add_node('node', 33, 40) def test_get_empty_RUs(self): self.assertEqual(self.network.get_RUs(), {}) def test_set_basetastion(self): lat, lon = 33, 40 self.network.set_RU(lat, lon) self.assertEqual(self.network.get_RUs(), {f'{lat}-{lon}': Location(lat, lon)}) with self.assertRaises(SliceConceptualGraph.NetworkSliceException): self.network.set_RU(lat, lon) def test_constructor(self): with self.assertRaises(FunctionalDegradation.FunctionDegradationNetworkException): SliceConceptualGraph('test', {} ,{}, {}) SliceConceptualGraph('test', self.midhaul_qos, {}, {}) SliceConceptualGraph('test', {}, self.backhaul_qos, {}) SliceConceptualGraph('test', {}, {}, self.parameters) def test_get_qos(self): self.assertEqual(self.network.get_backhaul(), QoS(self.backhaul_qos)) def test_set_qos(self): self.network.set_backhaul(QoS.minimum_qos_dict) self.assertEqual(self.network.get_backhaul(), QoS(QoS.minimum_qos_dict)) def test_qos_from_distance(self): self.assertEqual(self.network.get_qos_from(5).get_formated_qos(), self.parameters.get('worst_qos')) self.assertEqual(self.network.get_qos_from(0.0).get_formated_qos(), self.parameters.get('best_qos')) def test_get_node_location(self): lat, lon = 33, 40 self.network.set_RU(lat, lon) self.network.add_node('test', 10, 10) self.assertEqual(self.network.get_node_location('test2'), None) self.assertEqual(self.network.get_node_location('test'), Location(10, 10)) def test_set_RUs(self): self.network.set_RUs([{'lat': 10, 'lon': 10}, {'lat': 5, 'lon': 5}]) self.assertEqual(self.network.get_RUs(), {'10-10': Location(**{'lat': 10, 'lon': 10}), '5-5': Location(**{'lat': 5, 'lon': 5})}) with self.assertRaises(SliceConceptualGraph.NetworkSliceException): self.network.set_RUs([{'lat': 10, 'lon': 10}, {'lat': 5, 'lon': 5}]) def test_set_node_location(self): lat, lon = 33, 40 self.network.set_RU(lat, lon) self.network.add_node('destination1', 10, 10) self.network.set_node_location('destination1', 20, 20) self.assertEqual(self.network.get_node_location('destination1'), Location(20, 20)) with self.assertRaises(Location.LocationException): self.network.set_node_location('destination1', 'test', 20) with self.assertRaises(Location.LocationException): self.network.set_node_location('destination1', 20, 'test')
StarcoderdataPython
8050898
import pytest from godot import RID, Environment, Node def test_base(): v = RID() assert type(v) == RID def test_equal(): v1 = RID() v2 = RID() assert v1 == v2 # Environment is a Ressource which provides unique rid per instance res_a = Environment() v_a_1 = RID(res_a) assert v_a_1 != v1 v_a_2 = RID(res_a) assert v_a_1 == v_a_2 res_b = Environment() v_b = RID(res_b) assert not v_a_1 == v_b # Force use of __eq__ @pytest.mark.parametrize("arg", [None, 0, "foo"]) def test_bad_equal(arg): arr = RID(Environment()) assert arr != arg def test_bad_equal_with_rid(): # Doing `RID(Environment())` will cause garbage collection of enclosed # Environment object and possible reuse of it id env1 = Environment() env2 = Environment() rid1 = RID(env1) rid2 = RID(env2) assert rid1 != rid2 def test_lt(): env1 = Environment() env2 = Environment() rid1 = RID(env1) rid2 = RID(env2) # Ordered is based on resource pointer, so cannot know the order ahead of time small, big = sorted([rid1, rid2]) assert small < big assert big > small assert not small > big assert not big < small def test_repr(): v = RID() assert repr(v) == "<RID(id=0)>" @pytest.mark.parametrize("arg", [42, "dummy", RID()]) def test_bad_instantiate(arg): with pytest.raises(TypeError): RID(arg) def test_bad_instantiate_with_not_resource(generate_obj): # Node doesn't inherit from Resource node = generate_obj(Node) with pytest.raises(TypeError): RID(node) @pytest.mark.parametrize("args", [["get_id", int, ()]], ids=lambda x: x[0]) def test_methods(args): v = RID() # Don't test methods' validity but bindings one field, ret_type, params = args assert hasattr(v, field) method = getattr(v, field) assert callable(method) ret = method(*params) assert type(ret) == ret_type # @pytest.mark.parametrize('args', [ # (Vector2(0, 0), Vector2(2, 3)), # (Vector2(3, 2), Vector2(-1, 1)), # (Vector2(-1, -1), Vector2(3, 4)), # ], ids=lambda x: x[0]) # def test_lt(args): # param, result = args # calc = Vector2(2, 3) - param # assert calc == result # @pytest.mark.parametrize('arg', [ # None, 1, 'dummy' # ], ids=lambda x: x[0]) # def test_bad_add(arg): # with pytest.raises(TypeError): # Vector2(2, 3) + arg
StarcoderdataPython
1891373
import numpy as np import cv2 from config import * from keras.utils.np_utils import to_categorical def pre_process_img(img,colorChannel='RGB'): #first remove the hood of car img = img[TOPCROP:BOTTOMCROP,...]; #normalize image return img/255.-0.5 def pre_process_label(label): #only use third index label = label[:,:,2]; #set car hood to zero mask = (label[-115:,:]==10) mask = np.pad(mask,((485,0),(0,0)),mode='constant'); label[mask] = 0; label = label[TOPCROP:BOTTOMCROP,:]; #set pixels labeled as lane markings(value=6) to be same as the road surface(value=7) mask = (label==6) label[mask] = 7; #now set anything that is not vehicle or drivable surface to 'Other' mask = (label==7) + (label==10); label[~mask] = 0; #now set vehicles and roads label[(label==7)] = 1; #roads label[(label==10)] = 2; #vehicles return label def augment_data(img,label): choice = np.random.choice(['flip','flip','shift','noise','rotate']); if choice=='rotate': M_rot = cv2.getRotationMatrix2D((W/2,H/2),np.random.randint(-15,15),1); img = cv2.warpAffine(img,M_rot,(W,H)); label = cv2.warpAffine(label,M_rot,(W,H)); elif choice=='shift': M_shift = np.float32([[1,0,np.random.randint(0,50)],[0,1,np.random.randint(0,50)]]) img = cv2.warpAffine(img,M_shift,(W,H)) label = cv2.warpAffine(label,M_shift,(W,H)); elif choice=='noise': mean = 0.0 # some constant std = 0.1 # some constant (standard deviation) img = img + np.random.normal(mean, std, img.shape) elif choice=='zoom': zoomfactor = np.random.randint(1,8) M_zoom = cv2.getRotationMatrix2D((W/2,H/2), 0, zoomfactor) img = cv2.warpAffine(img, M_zoom,(W,H)) label = cv2.warpAffine(label, M_zoom,(W,H)) elif choice=='flip': img = cv2.flip(img,1); label = cv2.flip(label,1); return img,label def data_generator(imgs,labels,batchSize=50,augment=True): #initialize pointer idx,n = 0,len(imgs); #input data X_in = np.zeros((batchSize,H,W,C),dtype='float32'); X_out = np.zeros((batchSize,H,W,numClasses),dtype='float32'); #yield data with or w/o augmentation while True: for i in range(batchSize): img = cv2.imread(imgs[idx%n]); img = pre_process_img(img); #get label data label = cv2.imread(labels[idx%n]); label = pre_process_label(label); if augment: img, label = augment_data(img,label) X_in[i,:,:,:] = img.astype('float32'); X_out[i,:,:,:] = to_categorical(label,num_classes=numClasses).reshape((H,W,numClasses)).astype('float32'); #increment counter idx+=1; yield X_in, X_out
StarcoderdataPython
1687288
from features.numpy_sift import SIFTDescriptor import numpy as np import features.feature_utils from features.DetectorDescriptorTemplate import DetectorAndDescriptor class np_sift(DetectorAndDescriptor): def __init__(self, peak_thresh=10.0): super( np_sift, self).__init__( name='np_sift', is_detector=True, is_descriptor=True, is_both=True, patch_input=True) self.peak_thresh = peak_thresh self.descriptor = None def detect_feature(self, image): pass def extract_descriptor(self, image, feature): pass def extract_all(self, image): pass def extract_descriptor_from_patch(self, patches): patch_num = patches.shape[0] patches.shape[1] w = patches.shape[2] if self.descriptor is None or self.descriptor.patchSize != w: self.descriptor = SIFTDescriptor(w) descriptors = np.zeros((patch_num, 128)) for i in range(patch_num): patch = features.feature_utils.all_to_gray(patches[i, :, :, :]) patch = patch[:, :, 0] descriptors[i, :] = self.descriptor.describe(patch).flatten() return descriptors
StarcoderdataPython
1986993
<filename>Game/PlayerLogic/Actions/Double.py import IAction # BUG: Double hits and stands as well? class Double(IAction.IAction): def legal(self, player, admin): return player.cardCount() == 2 and player.isActive() def effect(self, player, admin): admin.dealCard(player) player.active = False admin.wagers[player] *= 2 def actionName(self): return "Double"
StarcoderdataPython
5133202
<filename>main.py #!/usr/bin/env python3 import requests import json from dotenv import load_dotenv import os import mariadb import datetime import time import dateutil.relativedelta load_dotenv() PI_IP = os.getenv("PI_IP") API_KEY = os.getenv("API_KEY") DB_USER = os.getenv("DB_USER") DB_PASSWD = os.getenv("<PASSWORD>") DB_IP = os.getenv("DB_IP") DB_DATABASE = os.getenv("DB_DATABASE") try: conn = mariadb.connect( user=DB_USER, password=<PASSWORD>, host=DB_IP, port=3306, database=DB_DATABASE ) except mariadb.Error as e: print(f"Error connecting to MariaDB Platform: {e}") # exit() def scannet(): c = conn.cursor() print("[*] Creating table if it doesn't exist...") c.execute('''CREATE TABLE IF NOT EXISTS network (addres VARCHAR(255),name VARCHAR(255),macaddr VARCHAR(255),maccompany VARCHAR(255), lastquery INT(255), PRIMARY KEY(addres));''') headers = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:89.0) Gecko/20100101 Firefox/89.0', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'X-Requested-With': 'XMLHttpRequest', 'Connection': 'keep-alive', 'Sec-GPC': '1', 'DNT': '1', } params = ( ('auth', API_KEY), ('network', ''), ('_', '1624463201689'), ) response = requests.get(f'http://{PI_IP}/admin/api_db.php', headers=headers,params=params) print("[*] Sending request...") devices = response.text json_data = json.loads(devices) json_data = json_data['network'] print("[*] Sending data to database...") #print(f"{rd.years} years, {rd.months} months, {rd.days} days, {rd.hours} hours, {rd.minutes} minutes and {rd.seconds} seconds") from alive_progress import alive_bar with alive_bar(len(json_data)) as bar: for entry in json_data: bar() host = "Not found" hostname = None macaddr = None maccompany = None lastQuery = None if entry['lastQuery']: curr_time = time.time() other_time = entry['lastQuery'] dt1 = datetime.datetime.fromtimestamp(other_time) dt2 = datetime.datetime.fromtimestamp(curr_time) rd = dateutil.relativedelta.relativedelta (dt2, dt1) if rd.days >= 1: pass else: lastQuery = entry['lastQuery'] if entry['ip']: host = entry['ip'][0] bar.text(host) if entry['name']: hostname = entry['name'][0] if entry['hwaddr']: macaddr = entry['hwaddr'] if entry['macVendor']: maccompany = entry['macVendor'] c.execute("replace into network values (?,?,?,?,?);", (host,hostname,macaddr,maccompany,lastQuery)) conn.commit() # print("[*] Network scan has completed and has been uploaded...") def client(): cur = conn.cursor() cur.execute('SELECT distinct * from network;') print("[*] Fetching results from database...") for entry in cur: host = None hostname = None macaddr = None maccompany = None lastQuery = None if entry[0]: host = entry[0] if entry[1]: hostname = entry[1] if entry[2]: macaddr = entry[2] if entry[3]: maccompany = entry[3] if entry[4]: lastQuery = entry[4] print(host,hostname,macaddr,maccompany,lastQuery) conn.close() def main(): scannet() # client() if __name__ == "__main__": main()
StarcoderdataPython
8083321
#!/usr/bin/env python import subprocess, shlex, json def get_threat_list(): isi_threat_list_raw = "isi antivirus reports threats list --format json -a -z" isi_threat_list_split = shlex.split(isi_threat_list_raw) isi_threat_list_cmd = subprocess.Popen(isi_threat_list_split, stdout = subprocess.PIPE) isi_threat_list_strings = isi_threat_list_cmd.communicate()[0] isi_threat_list_results = json.loads(isi_threat_list_strings) return isi_threat_list_results def release_threats(): isi_threat_list_results = get_threat_list() for i in isi_threat_list_results: isi_release_quarantine_raw = "isi antivirus release --verbose" isi_release_quarantine_split = shlex.split(isi_release_quarantine_raw) isi_release_quarantine_split.append(i["file"]) isi_release_quarantine_cmd = subprocess.Popen(isi_release_quarantine_split, stdout = subprocess.PIPE) isi_release_quarantine_results = isi_release_quarantine_cmd.communicate()[0] print(isi_release_quarantine_results) def main(): release_threats() if __name__ == '__main__': main()
StarcoderdataPython
1642261
<filename>Python/136_SingleNumber.py class Solution(object): def singleNumber(self, nums): """ :type nums: List[int] :rtype: int """ #Using XOR to find the single number. #Because every number appears twice, while N^N=0, 0^N=N, #XOR is cummutative, so the order of elements does not matter. #Finally, it will be res = 0 ^ singlenumber ==> res = singlenumber res = 0 for num in nums: res ^= num return res nums = [1,1,5,5,3,4,4,9,9,8,8,7,7] foo = Solution() print foo.singleNumber(nums)
StarcoderdataPython
42166
<gh_stars>0 # 저자: Charles # 공공 번호; Charles의 피카츄 # python 작은 게임 시리즈 만들기 - FlappyBird import Bird import Pipe import pygame from pygame.locals import * # 일부 상수 정의 WIDTH, HEIGHT = 640, 480 # 메인 함수 def main(): # 초기화 pygame.init() screen = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32) pygame.display.set_caption('FlappyBird-公众号: Charles的皮卡丘') # 공공번호: Charles의 피카츄 # 사진 삽입 background_img = pygame.image.load("./resources/images/background.png") gameover_img = pygame.image.load("./resources/images/gameover.png") # 음악 삽입 jump_sound = pygame.mixer.Sound("./resources/audios/jump.wav") jump_sound.set_volume(6) pygame.mixer.music.load('./resources/audios/moonlight.wav') pygame.mixer.music.play(-1, 0.0) pygame.mixer.music.set_volume(12) # 서체 지정 font = pygame.font.Font("./resources/fonts/simkai.ttf", 24) # 시계 clock = pygame.time.Clock() # 작은 새 bird = Bird.Bird(HEIGHT, WIDTH) # 파이프 pipes = [] # 시간 time0 = 0 time_interval = 2 # 점수 SCORE = 0 running = True # 메인 루프 while running: # 배경 지정 screen.fill(0) for x in range(WIDTH // background_img.get_width() + 1): for y in range(HEIGHT // background_img.get_height() + 1): screen.blit(background_img, (x * 100, y * 100)) time_passed = clock.tick() / 1000 # 이벤트 for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() exit(0) if event.type == pygame.KEYDOWN: if event.key == K_SPACE: jump_sound.play() bird.cur_jump_height = 0 bird.is_jump = True # 색칠한 새 bird.update(time_passed) screen.blit(bird.rotated_bird, bird.rect) if bird.is_dead(): running = False # 페인트 파이프 time1 = pygame.time.get_ticks() / 1000 if time1 - time0 > time_interval: time0 = time1 pipes.append(Pipe.Pipe(HEIGHT, WIDTH)) for i, pipe in enumerate(pipes): pipe.update(time_passed) for p in pipe.pipe: screen.blit(p.img, p.rect) if bird.rect.left > pipe.x + Pipe.pipeHead().width and not pipe.add_score: SCORE += 1 pipe.add_score = True if pipe.x + Pipe.pipeHead().width < 0: pipes.pop(i) # 충돌 감지 if pygame.sprite.spritecollide(bird, pipe.pipe, False, None): if bird.rect.left < pipe.x + (Pipe.pipeHead().width + Pipe.pipeBody().width) / 2: running = False # 점수 표시 scoreText = font.render('Score: ' + str(SCORE), True, (0, 0, 0)) scoreRect = scoreText.get_rect() scoreRect.topleft = [10, 10] screen.blit(scoreText, scoreRect) pygame.display.flip() pygame.display.update() screen.blit(gameover_img, (0, 0)) pygame.display.flip() pygame.display.update() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() exit(0) if __name__ == "__main__": main()
StarcoderdataPython
12833266
class NotInsideTransaction(Exception): def __init__(self): message = 'Trying to perform an operation that needs to be inside transaction' super(NotInsideTransaction, self).__init__(message) class MixedPositionalAndNamedArguments(Exception): def __init__(self): message = 'Cannot mix positional and named arguments in query' super(MixedPositionalAndNamedArguments, self).__init__(message)
StarcoderdataPython
3319739
from unittest import mock from django.db.utils import IntegrityError from django.test import Client, SimpleTestCase, TestCase from django.urls import resolve, reverse from users.api import CheckVerificationCodeView, EmailView from users.models import Emails from users.tasks import send_verification_code from users.utils import ( generate_verification_code, is_code_correct, is_code_in_redis, set_verification_code, ) class TestUrls(SimpleTestCase): def test_user_email_is_resolved(self): url = reverse("send-verification-code") self.assertEqual(resolve(url).func.view_class, EmailView) def test_verification_code_check_is_resolved(self): url = reverse("check-verification-code") self.assertEqual( resolve(url).func.view_class, CheckVerificationCodeView ) class TestViews(TestCase): def setUp(self): self.client = Client() self.send_verification_code_url = reverse("send-verification-code") self.check_verification_code_url = reverse("check-verification-code") def test_verification_code_POST_no_data_provided(self): response = self.client.post(self.send_verification_code_url) print(response) self.assertEqual(response.status_code, 400) self.assertEqual( response.json().get("email")[0], "This field is required." ) @mock.patch("users.utils.redis.StrictRedis.get", return_value=False) @mock.patch("users.api.is_code_in_redis", return_value=False) def test_verification_code_POST_successful( self, mock_is_code_in_redis, mock_r_get ): response = self.client.post( self.send_verification_code_url, data={"email": "<EMAIL>"} ) self.assertTrue(mock_is_code_in_redis.called) self.assertEqual(response.status_code, 200) self.assertContains(response, "please check your inbox") @mock.patch("users.api.is_code_in_redis", return_value=True) def test_verification_code_POST_wait_120_sec(self, mock_is_code_in_redis): response = self.client.post( self.send_verification_code_url, data={"email": "<EMAIL>"} ) self.assertTrue(mock_is_code_in_redis.called) self.assertEqual(response.status_code, 202) self.assertEqual( response.json().get("detail"), "please wait 120 seconds" ) @mock.patch("users.tasks.send_mail") def test_send_verification_code_func(self, mock_send_mail): ret = send_verification_code(email="<EMAIL>", code="234123") self.assertTrue(mock_send_mail.called) def test_check_verification_POST_no_data(self): response = self.client.post(self.check_verification_code_url) self.assertEqual(response.status_code, 400) def test_check_verification_POST_invalid_email(self): response = self.client.post( self.check_verification_code_url, data={"email": "not_an_email"} ) self.assertEqual(response.status_code, 400) self.assertEqual( response.json().get("email")[0], "Enter a valid email address." ) def test_check_verification_POST_invalid_non_digit_code(self): response = self.client.post( self.check_verification_code_url, data={"code": "abcdef"} ) self.assertEqual(response.status_code, 400) self.assertEqual( response.json().get("code")[0], "Only digits are allowed" ) def test_check_verification_POST_invalid_short_code(self): response = self.client.post( self.check_verification_code_url, data={"code": "12345"} ) self.assertEqual(response.status_code, 400) self.assertEqual( response.json().get("code")[0], "Code is only 6 digits long" ) def test_check_verification_POST_invalid_long_code(self): response = self.client.post( self.check_verification_code_url, data={"code": "1234567"} ) self.assertEqual(response.status_code, 400) self.assertEqual( response.json().get("code")[0], "Code is only 6 digits long" ) @mock.patch("users.api.is_code_correct", return_value=False) def test_check_verification_POST_not_valid_code(self, mock_is_code_correct): response = self.client.post( self.check_verification_code_url, data={"email": "<EMAIL>", "code": "123456"}, ) self.assertTrue(mock_is_code_correct.called) self.assertEqual(response.status_code, 401) self.assertEqual(response.json().get("detail"), "Code is not correct") @mock.patch("users.api.is_code_correct", return_value=True) def test_check_verification_POST_valid_code(self, mock_is_code_correct): response = self.client.post( self.check_verification_code_url, data={"email": "<EMAIL>", "code": "123456"}, ) self.assertTrue(mock_is_code_correct.called) self.assertEqual(response.status_code, 200) self.assertEqual(response.json().get("detail"), "successful") email_obj = Emails.objects.get(email="<EMAIL>") self.assertTrue(email_obj.is_active) class TestModels(TestCase): @classmethod def setUpTestData(cls): cls.email_obj = Emails.objects.create( email="<EMAIL>", is_active=True ) def test_email_field__uniqueness(self): with self.assertRaises(IntegrityError): Emails.objects.create(email="<EMAIL>") def test_email_model_str(self): self.assertEqual( str(self.email_obj), f"{self.email_obj.email} (active: {self.email_obj.is_active})", ) class TestUtils(SimpleTestCase): def test_generate_verification_code_length(self): self.assertEqual(len(generate_verification_code()), 6) def test_generate_verification_code_is_digit(self): self.assertTrue(generate_verification_code().isdigit()) def test_generate_verification_code_returns_str(self): self.assertTrue(type(generate_verification_code()) == str) @mock.patch("users.utils.redis.StrictRedis.set", return_value=True) def test_set_verification_code(self, mock_set_verification_code): ret = set_verification_code("<EMAIL>", "123456") self.assertTrue(mock_set_verification_code.called) self.assertTrue(ret) @mock.patch("users.utils.redis.StrictRedis.get", return_value=True) def test_is_code_in_redis(self, mock_is_code_in_redis): self.assertTrue(is_code_in_redis("<EMAIL>")) self.assertTrue(mock_is_code_in_redis.called) @mock.patch("users.utils.redis.StrictRedis.get", return_value=b"123123") def test_is_code_correct(self, mock_is_code_correct): self.assertTrue(is_code_correct("<EMAIL>", 123123)) self.assertTrue(mock_is_code_correct.called) @mock.patch("users.utils.redis.StrictRedis.get", return_value="123123") def test_is_code_correct_not_working_with_str_return_from_redis( self, mock_is_code_correct ): # returns false because the data that is fetched from redis # should be in byte type. # if you call `.decode()` on it, it will raise an AttributeError self.assertFalse(is_code_correct("<EMAIL>", "123123")) self.assertTrue(mock_is_code_correct.called)
StarcoderdataPython
389856
<filename>galois/factor.py<gh_stars>0 """ A module containing routines for integer factorization. """ import bisect import functools import math import random import numpy as np from .math_ import isqrt from .overrides import set_module from .prime import PRIMES, is_prime __all__ = ["prime_factors", "is_smooth"] def trial_division_factor(n): max_factor = isqrt(n) max_prime_idx = bisect.bisect_right(PRIMES, max_factor) p, e = [], [] for prime in PRIMES[0:max_prime_idx]: degree = 0 while n % prime == 0: degree += 1 n //= prime if degree > 0: p.append(prime) e.append(degree) if n == 1: break return p, e, n @functools.lru_cache(maxsize=1024) def pollard_rho_factor(n, c=1): """ References ---------- * Section 3.2.2 from https://cacr.uwaterloo.ca/hac/about/chap3.pdf """ f = lambda x: (x**2 + c) % n a, b, d = 2, 2, 1 while True: a = f(a) b = f(f(b)) b = f(f(b)) d = math.gcd(a - b, n) if 1 < d < n: return d if d == n: return None # Failure # def fermat_factors(n): # a = isqrt(n) + 1 # b2 = a**2 - n # while isqrt(b2)**2 != b2: # a += 1 # b2 = a**2 - n # b = isqrt(b2) # return a - b, a + b @set_module("galois") def prime_factors(n): """ Computes the prime factors of the positive integer :math:`n`. The integer :math:`n` can be factored into :math:`n = p_1^{e_1} p_2^{e_2} \\dots p_{k-1}^{e_{k-1}}`. **Steps**: 1. Test if :math:`n` is prime. If so, return `[n], [1]`. 2. Use trial division with a list of primes up to :math:`10^6`. If no residual factors, return the discovered prime factors. 3. Use Pollard's Rho algorithm to find a non-trivial factor of the residual. Continue until all are found. Parameters ---------- n : int The positive integer to be factored. Returns ------- list Sorted list of :math:`k` prime factors :math:`p = [p_1, p_2, \\dots, p_{k-1}]` with :math:`p_1 < p_2 < \\dots < p_{k-1}`. list List of corresponding prime powers :math:`e = [e_1, e_2, \\dots, e_{k-1}]`. Examples -------- .. ipython:: python p, e = galois.prime_factors(120) p, e # The product of the prime powers is the factored integer np.multiply.reduce(np.array(p) ** np.array(e)) Prime factorization of 1 less than a large prime. .. ipython:: python prime =1000000000000000035000061 galois.is_prime(prime) p, e = galois.prime_factors(prime - 1) p, e np.multiply.reduce(np.array(p) ** np.array(e)) """ if not isinstance(n, (int, np.integer)): raise TypeError(f"Argument `n` must be an integer, not {type(n)}.") if not n > 1: raise ValueError(f"Argument `n` must be greater than 1, not {n}.") n = int(n) # Step 1 if is_prime(n): return [n], [1] # Step 2 p, e, n = trial_division_factor(n) # Step 3 while n > 1 and not is_prime(n): f = pollard_rho_factor(n) # A non-trivial factor while f is None: # Try again with a different random function f(x) f = pollard_rho_factor(n, c=random.randint(2, n // 2)) if is_prime(f): degree = 0 while n % f == 0: degree += 1 n //= f p.append(f) e.append(degree) else: raise RuntimeError(f"Encountered a very large composite {f}. Please report this as a GitHub issue at https://github.com/mhostetter/galois/issues.") if n > 1: p.append(n) e.append(1) return p, e @set_module("galois") def is_smooth(n, B): """ Determines if the positive integer :math:`n` is :math:`B`-smooth, i.e. all its prime factors satisfy :math:`p \\le B`. The :math:`2`-smooth numbers are the powers of :math:`2`. The :math:`5`-smooth numbers are known as *regular numbers*. The :math:`7`-smooth numbers are known as *humble numbers* or *highly composite numbers*. Parameters ---------- n : int A positive integer. B : int The smoothness bound. Returns ------- bool `True` if :math:`n` is :math:`B`-smooth. Examples -------- .. ipython:: python galois.is_smooth(2**10, 2) galois.is_smooth(10, 5) galois.is_smooth(12, 5) galois.is_smooth(60**2, 5) """ if not isinstance(n, (int, np.integer)): raise TypeError(f"Argument `n` must be an integer, not {type(n)}.") if not isinstance(B, (int, np.integer)): raise TypeError(f"Argument `B` must be an integer, not {type(B)}.") if not n > 0: raise ValueError(f"Argument `n` must be non-negative, not {n}.") if not B >= 2: raise ValueError(f"Argument `B` must be at least 2, not {B}.") if n == 1: return True else: p, _ = prime_factors(n) return p[-1] <= B
StarcoderdataPython
180966
<filename>p1_navigation/plots.py<gh_stars>0 import matplotlib.pyplot as plt def plot_loss(history): history['agent_avg_loss'].plot(label='avg_loss'); history['agent_avg_loss'].rolling(10).mean().plot(label='rolling(10) mean of avg_loss'); plt.title('Agent average loss') plt.xlabel('Episodes') plt.ylabel('Loss') plt.legend(); def plot_score(history): history['score'].plot(label='score'); history['score'].rolling(100).mean().plot(label='rolling(100) mean of score') plt.title('Agent score') plt.xlabel('Episodes') plt.ylabel('Score') plt.legend();
StarcoderdataPython
1931642
<gh_stars>1-10 #!/usr/bin/env python3 # This script converts MeCab analysis result to Juman++ training data import sys import csv import random def escape(x): if '"' in x or ',' in x: replaced = x.replace('"', '""') return f'"{replaced}"' else: return x FIELD_NAMES = [ "pos1", "pos2", "pos3", "pos4", "cType", "cForm", "lForm", "lemma", "orth", "pron", "orthBase", "pronBase", "goshu", "iType", "iForm", "fType", "fForm", "iConType", "fConType", "type", "kana", "kanaBase", "form", "formBase", "aType", "aConType", "aModType", "lid", "lemma_id", ] TRAIN_FIELDS = {"pos1", "pos2", "pos3", "pos4", "cType", "cForm", "lForm", "lemma", "orth", "pron", "orthBase", "pronBase", "goshu", "type", "aType", "aConType", "aModType"} HIRAGANA = 'ぁあぃいぅうぇえぉおかがきぎくぐけげこごさざしじすずせぜそぞただちぢっつづてでとどなにぬねのはばぱひびぴふぶぷへべぺほぼぽまみむめもゃやゅゆょよらりるれろわをんーゎゐゑゕゖゔゝゞ・「」。、' FULL_KATA = 'ァアィイゥウェエォオカガキギクグケゲコゴサザシジスズセゼソゾタダチヂッツヅテデトドナニヌネノハバパヒビピフブプヘベペホボポマミムメモャヤュユョヨラリルレロワヲンーヮヰヱヵヶヴヽヾ・「」。、' KATA_TO_HIRA = str.maketrans(FULL_KATA, HIRAGANA) class Unidic(object): def __init__(self): self.lookup = {} @staticmethod def makeKey(fparts): pos1 = fparts[0] or '*' pos2 = fparts[1] or '*' pos3 = fparts[2] or '*' pos4 = fparts[3] or '*' cType = fparts[4] or '*' cForm = fparts[5] or '*' lForm = fparts[6] or '*' lemma = fparts[7] or '*' pron = fparts[9] or '*' pronBase = fparts[11] or '*' goshu = fparts[12] or '*' type = fparts[19] or '*' aType = fparts[24] or '*' aConType = fparts[25] or '*' aModType = fparts[26] or '*' return ( pos1, pos2, pos3, pos4, cType, cForm, lForm, lemma, pron, pronBase, goshu, type, aType, aConType, aModType ) def add(self, fparts): if len(fparts) < 29: return orth = fparts[8] orthBase = fparts[10] key = Unidic.makeKey(fparts) rds = self.lookup.setdefault(key, []) val = (orth, orthBase) if orth not in rds: rds.append(val) def get(self, fparts): if len(fparts) < 27: return None orth = fparts[8] orthBase = fparts[10] key = Unidic.makeKey(fparts) retval = orth, orthBase items = self.lookup.get(key, None) if items is None: return None notMe = [item for item in items if item != retval] if len(notMe) == 0: return None return random.choice(notMe) def deleteSingles(self): todelete = [] for k, v in self.lookup.items(): if len(v) == 1: todelete.append(k) for i in todelete: self.lookup.pop(i) def readUnidic(file): unidic = Unidic() with open(file, 'rt', newline='', encoding='utf-8') as fd: for line in csv.reader(fd): unidic.add(line[4:]) unidic.deleteSingles() return unidic def makeDict(fields): res = {} for i in range(len(fields)): val = fields[i] if val != '*' and len(val) > 0 and FIELD_NAMES[i] in TRAIN_FIELDS: res[FIELD_NAMES[i]] = val return res def writePart(lines, fd, unidic): for line in lines: fparts = line[1:] dic = makeDict(fparts) surf = line[0] if unidic is not None: changed = unidic.get(fparts) if changed is not None: surf = changed[0] dic['orth'] = surf dic['orthBase'] = changed[1] fd.write('\t') fd.write(surf) for k in dic: v = dic[k] fd.write('\t') fd.write(k) fd.write(':') fd.write(v) fd.write('\n') fd.write('\n') def writeFull(lines, fd, unidic): for line in lines: surf = line[0] fparts = line[1:] featutres = [ surf, fparts[0], fparts[1], fparts[2], fparts[3], fparts[4], fparts[5], fparts[6], fparts[7], fparts[8], fparts[9], fparts[10], fparts[11], fparts[12], fparts[19], fparts[24], fparts[25], fparts[26] ] if unidic is not None: changed = unidic.get(fparts) if changed is not None: orth, orthBase = changed featutres[0] = orth featutres[9] = orth featutres[11] = orthBase print(",".join(escape(x) for x in featutres), file=fd) print("EOS", file=fd) def process(fd, full, part, unidic): lines = [] is_full = True for line in csv.reader(fd): if len(line) == 1: if is_full: writeFull(lines, full, None) writeFull(lines, full, unidic) else: writePart(lines, part, None) writePart(lines, part, unidic) lines.clear() is_full = True continue lines.append(line) if len(line) < 10: is_full = False def main(): unidic = readUnidic(sys.argv[1]) files = sys.argv[2:] for file in files: full_name = file + ".full-tdata" part_name = file + ".part-tdata" with open(file, 'rt', encoding='utf-8', newline='') as fd: with open(full_name, 'wt', encoding='utf-8') as full: with open(part_name, 'wt', encoding='utf-8') as part: process(fd, full, part, unidic) if __name__ == '__main__': main()
StarcoderdataPython
352601
""" Created on May 25, 2016 @author: xiul, t-zalipt """ import numpy as np ################################################################################ # Some helper functions ################################################################################ def unique_states(training_data): unique = [] for datum in training_data: if contains(unique, datum[0]): pass else: unique.append(datum[0].copy()) return unique def contains(unique, candidate_state): for state in unique: if np.array_equal(state, candidate_state): return True else: pass return False
StarcoderdataPython
8135739
<filename>2021-08-11/valid_lab_results.py """ Valid Lab Results | Cannabis Data Science Authors: UFO Software, LLC <NAME> <<EMAIL>> Created: Thursday, July 29, 2021 21:51 Updated: 8/10/2021 License GPLv3+: GNU GPL version 3 or later https://gnu.org/licenses/gpl.html This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. """ import pandas as pd import numpy as np from pathlib import Path import os import seaborn as sns import matplotlib.pyplot as plt from pandas.plotting import scatter_matrix def get_lab_results_df(): # reduce the size of the dataframe's memory footprint by specifying data types # comment out columns you are not using to further decrease the memory footprint col_dtypes = {'global_id' : 'string', #'#mme_id' : 'category', #'user_id' : 'string', #'external_id' : 'string', #'inventory_id' : 'string', 'status' : 'category', #'testing_status' : 'category', #'batch_id' : 'string', #'parent_lab_result_id' : 'string', #'og_parent_lab_result_id' : 'string', #'copied_from_lab_id' : 'string', #'lab_user_id' : 'string', 'type' : 'category', #'foreign_matter' : 'bool', #'moisture_content_percent' : 'float16', #if you are not using Dask change this to float16 #'growth_regulators_ppm' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_status' : 'category', #'cannabinoid_editor' : 'float32', #if you are not using Dask change this to float16 #'cannabinoid_d9_thca_percent': 'float16', #'cannabinoid_d9_thca_mg_g' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_d9_thc_percent' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_d9_thc_mg_g' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_d8_thc_percent' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_d8_thc_mg_g' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_cbd_percent' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_cbd_mg_g' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_cbda_percent' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_cbda_mg_g' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_cbdv_percent' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_cbg_percent' : 'float16', #if you are not using Dask change this to float16 #'cannabinoid_cbg_mg_g' : 'float16', #if you are not using Dask change this to float16 #'terpenoid_pinene_percent' : 'float16', #if you are not using Dask change this to float16 #'terpenoid_pinene_mg_g' : 'float16', #if you are not using Dask change this to float16 #'microbial_status' : 'category', #'microbial_editor' : 'string', #'microbial_bile_tolerant_cfu_g' : 'float16', #if you are not using Dask change this to float16 #'microbial_pathogenic_e_coli_cfu_g' : 'float16', #if you are not using Dask change this to float16 #'microbial_salmonella_cfu_g' : 'float16', #if you are not using Dask change this to float16 #'mycotoxin_status' : 'category', #'mycotoxin_editor' : 'string', #'mycotoxin_aflatoxins_ppb' : 'float16', #if you are not using Dask change this to float16 #'mycotoxin_ochratoxin_ppb' : 'float16', #if you are not using Dask change this to float16 #'metal_status' : 'category', #'metal_editor': 'string', #'metal_arsenic_ppm' : 'float16', #if you are not using Dask change this to float16 #'metal_cadmium_ppm' : 'float16', #if you are not using Dask change this to float16 #'metal_lead_ppm' : 'float16', #if you are not using Dask change this to float16 #'metal_mercury_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_status' : 'category', #'pesticide_editor' : 'string', #'pesticide_abamectin_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_acequinocyl_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_bifenazate_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_cyfluthrin_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_cypermethrin_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_etoxazole_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_flonicamid_ppm' : 'float', #if you are not using Dask change this to float16 #'pesticide_fludioxonil_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_imidacloprid_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_myclobutanil_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_spinosad_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_spirotetramet_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_thiamethoxam_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_trifloxystrobin_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_status' : 'category', #'solvent_editor' : 'string', #'solvent_butanes_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_heptane_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_propane_ppm' : 'float16', #if you are not using Dask change this to float16 #'notes' : 'float32', #if you are not using Dask change this to float16 #'thc_percent' : 'float16', #if you are not using Dask change this to float16 'intermediate_type' : 'category', #'moisture_content_water_activity_rate' : 'float16', #if you are not using Dask change this to float16 #'solvent_acetone_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_benzene_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_cyclohexane_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_chloroform_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_dichloromethane_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_ethyl_acetate_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_hexanes_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_isopropanol_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_methanol_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_pentanes_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_toluene_ppm' : 'float16', #if you are not using Dask change this to float16 #'solvent_xylene_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_acephate_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_acetamiprid_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_aldicarb_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_azoxystrobin_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_bifenthrin_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_boscalid_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_carbaryl_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_carbofuran_ppm' : 'float16', #if you are not using Dask change this to float16 #'pesticide_chlorantraniliprole_ppm' : 'float16' #if you are not using Dask change this to float16 } date_cols = ['created_at', #'deleted_at', #'updated_at', #'tested_at', #'received_at' deprecated ] # combine the column names to load only the columns you are using cols = list(col_dtypes.keys()) + date_cols lab_results_df = pd.read_csv(file_path / 'LabResults_0.csv', sep = '\t', encoding = 'utf-16', usecols = cols, dtype = col_dtypes, parse_dates = date_cols, skipinitialspace = True) # all the datasets in the WA data use global_id but it has different meaning for each dataset which makes the data difficult to understand and causes issues with Pandas when trying to perform operations on more than one dataframe. lab_results_df.rename(columns={'global_id':'lab_results_id'}, inplace=True) # dataframe with rows from the origanal dataframe where the lab_results_id is nan null_lab_results_id_df = lab_results_df.loc[lab_results_df.lab_results_id.isna()] # drop rows with nan lab_results_ids lab_results_df.dropna(subset=['lab_results_id'], inplace=True) # exract the lab_id from the lab_results_id # lab_ids are embedded in the lab_results_id in the form "WAL##." lab_results_df['lab_id'] = lab_results_df.lab_results_id.map(lambda x: x[x.find('WAL') : x.find('.')]) # dataframe with the rows that did not contain a valid lab_id in the lab_results_id # The lab_results_id does not contain a substring in the form of "WA##." invalid_lab_id_df = lab_results_df.loc[(lab_results_df.lab_id == '')] #remove the rows with invalid lab_ids from the dataframe lab_results_df = lab_results_df.loc[~(lab_results_df.lab_id == '')] return lab_results_df, null_lab_results_id_df, invalid_lab_id_df if __name__ == '__main__': # change the file path to match where your data is stored file_path = Path('../.datasets') pd.set_option('display.max_columns', None) pd.options.display.float_format = "{:.2f}".format # Read in the data lab_results_df, null_lab_results_id_df, invalid_lab_id_df = get_lab_results_df()
StarcoderdataPython
5179843
<gh_stars>1-10 import pathlib from setuptools import setup import versioneer # The directory containing this file HERE = pathlib.Path(__file__).parent # The text of the README file README = (HERE / "README.md").read_text() # This call to setup() does all the work setup( name="anonymizedf", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), description="a convenient way to anonymize your data for analytics", long_description=README, long_description_content_type="text/markdown", url="https://github.com/AlexFrid/anonymizedf", author="<NAME>", author_email="<EMAIL>", license="BSD", classifiers=[ "License :: OSI Approved :: BSD License", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.7", "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: Scientific/Engineering", ], keywords='faker pandas anonymize fake mock data', packages=["anonymizedf"], include_package_data=True, install_requires=["pandas", "faker"] )
StarcoderdataPython
5063596
<filename>utils2.py import numpy as np import sklearn import gtg import os from math import log import random random.seed(314) def one_hot(labels, nr_classes): labells = labels[:, 0] labells = labells.astype(int) label_one_hot = np.zeros((labells.size, nr_classes)) label_one_hot[np.arange(labells.size), labells] = 1 return label_one_hot def create_mapping(nr_objects, percentage_labels): mapping = np.arange(nr_objects) np.random.shuffle(mapping) nr_labelled = int(percentage_labels * nr_objects) labelled = mapping[:nr_labelled] unlabelled = mapping[nr_labelled:] return np.sort(labelled), np.sort(unlabelled) def create_mapping2(labels, percentage_labels): nr_classes = int(labels.max() + 1) labelled, unlabelled = [], [] for n_class in xrange(nr_classes): class_labels = list(np.where(labels == n_class)[0]) split = int(percentage_labels * len(class_labels)) random.shuffle(class_labels) labelled += class_labels[:split] unlabelled += class_labels[split:] return np.array(labelled), np.array(unlabelled) def gen_init_rand_probability(labels, labelled, unlabelled, nr_classes): labels_one_hot = np.zeros((labels.shape[0], nr_classes)) for element in labelled: labels_one_hot[element, int(labels[element])] = 1.0 for element in unlabelled: labels_one_hot[element, :] = np.full((1, nr_classes), 1.0/nr_classes) return labels_one_hot def gen_init_probability(W, labels, labelled, unlabelled): """ :param W: similarity matrix to generate the labels for the unlabelled observations :param labels: labels of the already labelled observations :return: """ n = W.shape[0] k = int(log(n) + 1.) # labelled, unlabelled = create_mapping(n, perc_lab) W = W[np.ix_(unlabelled, labelled)] ps = np.zeros(labels.shape) ps[labelled] = labels[labelled] max_k_inds = labelled[np.argpartition(W, -k, axis=1)[:, -k:]] tmp = np.zeros((unlabelled.shape[0], labels.shape[1])) for row in max_k_inds.T: tmp += labels[row] tmp /= float(k) ps[unlabelled] = tmp return ps def get_accuracy(W, softmax_features, labels, labelled, unlabelled, testing_set_size): """ This function computes the accuracy in the testing set :param fc7_features: fc7 features for both training and testing set :param softmax_features: softmax features for both training and testing set :param labels: labels for both training and testing set :param accuracy_cnn: the accuracy of cnn (baseline) :param testing_set_size: the size of the testing set :return: accuracy of our method, accuracy of cnn """ P_new = gtg.gtg(W, softmax_features, labelled, unlabelled, max_iter=1, labels=labels) conf = sklearn.metrics.confusion_matrix(labels[unlabelled, :], (P_new[unlabelled, :]).argmax(axis=1)) return float(conf.trace()) / conf.sum(), P_new def gen_gtg_label_file(fnames, names_folds, labels_GT, out_fname): with open(out_fname, 'w') as file: for i in xrange(len(fnames)): splitted_name = fnames[i][0].split('/') new_name = splitted_name[4] + '/' + splitted_name[5] + ' ' + names_folds[labels_GT[i]] + "\n" file.write(new_name) def only_labelled_file(fnames, labelled, out_fname): with open(out_fname, 'w') as file: for i in xrange(len(fnames)): splitted_name = fnames[i][0].split('/') if i in labelled: new_name = splitted_name[4] + '/' + splitted_name[5] + ' ' + splitted_name[4] + "\n" file.write(new_name) def unit_test(): """ unit_test for gen_init_probability function :return: """ np.random.seed(314) # unlab = 0, 1, 3. lab = 2, 4, 5 W = np.array([[5, 3, (8), 4, (9), (1)], [1, 2, (3), 4, (7), (9)], [7, 1, 2 , 8, 4 , 3 ], [9, 7, (4), 3, (2), (1)], [5, 7, 4 , 2, 8 , 6 ], [6, 4, 5 , 3, 1 , 2 ]]) labels = np.array([[0, 1, 0, 0, 0], [1, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]]) res = np.array([[0, 0 , 0.5, 0, 0.5], [0, 0.5, 0 , 0, 0.5], [0, 0 , 1 , 0, 0 ], [0, 0 , 0.5, 0, 0.5], [0, 0 , 0 , 0, 1 ], [0, 1 , 0 , 0, 0 ]]) print(gen_init_probability(W, labels, 0.5))
StarcoderdataPython
11371801
import functools import operator import torch from sklearn.metrics import ( classification_report, f1_score, precision_recall_fscore_support, ) from torch import optim as optim, nn as nn from models import Model import time """ def fit( TEXT, train_dl, valid_dl, config, conv_depth, dense_depth, hidden_dim=100, lr=1e-3, kernel_size=3, pool_size=2, similarity="dot", loss="BCELoss", validate_each_epoch=True, trainable=False, ): model = Model( TEXT, hidden_dim=hidden_dim, conv_depth=conv_depth, dense_depth=dense_depth, similarity=similarity, max_len=20, kernel_size=kernel_size, pool_size=pool_size, trainable=trainable, ) opt = optim.Adam(model.parameters(), lr=lr) loss_func = getattr(nn, loss)() model.train() if validate_each_epoch: y_true = [v[2] for v in test_dl] y_true = functools.reduce(operator.iconcat, y_true, []) print("Start training") for epoch in range(1, config["epochs"] + 1): running_loss = 0.0 t0 = time.time() for left, right, y in train_dl: opt.zero_grad() preds = model([left, right]) loss = loss_func(preds, torch.unsqueeze(y, 1)) loss.backward() opt.step() running_loss += loss.data.item() epoch_loss = running_loss / len(train_dl) print( "Epoch: {}, Elapsed: {:.2f}s, Training Loss: {:.4f}".format( epoch, time.time() - t0, epoch_loss ) ) if validate_each_epoch: # calculate the validation loss for this epoch predictions = [] val_loss = 0.0 model.eval() # turn on evaluation mode for left, right, y in valid_dl: preds = model([left, right]) loss = loss_func(preds, torch.unsqueeze(y, 1)) val_loss += loss.data.item() val_loss /= len(valid_dl) print("Validate epoch: {}, Val Loss: {:.4f}".format(epoch, val_loss)) return model def evaluate(model, test_dl, print_results=True): y_true = [v[2] for v in test_dl] y_true = functools.reduce(operator.iconcat, y_true, []) predictions = [] model.eval() # turn on evaluation mode for left, right, y in test_dl: preds = model([left, right]) predictions.extend(preds.data > 0.5) if print_results: print(classification_report(y_true, predictions)) return f1_score(y_true, predictions, average="weighted") + 1e-10 """ def fit( TEXT, train_dl, valid_dl, config, hidden_dim, conv_depth, kernel_size, dense_depth1, dense_depth2, lr=1e-3, pool_size=2, similarity="dot", loss="CrossEntropyLoss", validate_each_epoch=True, trainable=False, ): model = Model( TEXT, hidden_dim=hidden_dim, conv_depth=conv_depth, kernel_size=kernel_size, pool_size=pool_size, dense_depth1=dense_depth1, dense_depth2=dense_depth2, max_len=20, similarity=similarity, trainable=trainable, ) opt = optim.Adam(model.parameters(), lr=lr) loss_func = getattr(nn, loss)() model.train() if validate_each_epoch: y_true = [v[2] for v in valid_dl] y_true = functools.reduce(operator.iconcat, y_true, []) print("Start training") for epoch in range(1, config["epochs"] + 1): running_loss = 0.0 t0 = time.time() for left, right, y in train_dl: opt.zero_grad() preds = model([left, right]) loss = loss_func(preds, y.long()) loss.backward() opt.step() running_loss += loss.data.item() epoch_loss = running_loss / len(train_dl) print( "Epoch: {}, Elapsed: {:.2f}s, Training Loss: {:.4f}".format( epoch, time.time() - t0, epoch_loss ) ) if validate_each_epoch: # calculate the validation loss for this epoch predictions = [] val_loss = 0.0 model.eval() # turn on evaluation mode for left, right, y in valid_dl: preds = model([left, right]) loss = loss_func(preds, y.long()) val_loss += loss.data.item() predictions.extend(torch.argmax(torch.log_softmax(preds, dim=1), dim=1)) val_loss /= len(valid_dl) prec, rec, f1, _ = precision_recall_fscore_support( y_true, predictions, labels=[0, 1], average="weighted" ) print( "Validate epoch: {}, Val Loss: {:.4f}, Prec: {:.4f}, Rec: {:.4f}, F1: {:.4f}".format( epoch, val_loss, prec, rec, f1 ) ) return model def evaluate(model, test_dl, print_results=True): y_true = [v[2] for v in test_dl] y_true = functools.reduce(operator.iconcat, y_true, []) predictions = [] model.eval() # turn on evaluation mode for left, right, y in test_dl: preds = model([left, right]) predictions.extend(torch.argmax(torch.log_softmax(preds, dim=1), dim=1)) if print_results: print(classification_report(y_true, predictions)) return f1_score(y_true, predictions, average="weighted") + 1e-10
StarcoderdataPython
12809214
<reponame>xuhang57/atmosphere """ Service Provider model for atmosphere. """ from django.db import models from django.utils import timezone from core.models.provider import Provider class NodeController(models.Model): """ NodeControllers are specific to a provider They have a dedicated, static IP address and a human readable name To use the image manager they must also provide a valid private ssh key """ provider = models.ForeignKey(Provider) alias = models.CharField(max_length=256) hostname = models.CharField(max_length=256) port = models.IntegerField(default=22) private_ssh_key = models.TextField() start_date = models.DateTimeField(default=timezone.now) end_date = models.DateTimeField(null=True, blank=True) def ssh_key_added(self): return len(self.private_ssh_key) > 0 ssh_key_added.boolean = True def __unicode__(self): return "%s - %s" % (self.alias, self.hostname) class Meta: db_table = 'node_controller' app_label = 'core'
StarcoderdataPython
5007430
<reponame>ArniDagur/auto-rental """ # Data file specification: ## Fundamentals: * Data files shall be given the extension .df * Data files be encoded in valid UTF-8 ## Revision ID: * The first line of a data file shall contain the file's revision ID. * The revision ID may be any positive number that has the following properties: 1. A revision ID must be unique for each revision. 2. The number representing the revision ID must get bigger as time progresses. * An example of a number that fulfills the given requirements is the number of seconds since 1. January 1970 (epoch time). ## Data columns: TODO: Expand further... Data file example: ``` 1543597775.6957102 TODO: Expand further... ``` """ from time import time def new_revision_id(): epoch_time = time() return float(epoch_time) class FileData: def __init__(self, file_name): self.__file_name = file_name try: self.read() except FileNotFoundError: # File did not exist; let's fix that self.__revision_id = new_revision_id() self.__cache = [] self.write() """ Reads the data file located at self.__file_name, and sets the following attributes: self.__revision_id self.__cache Does nothing if cache data is newer than file data. """ def read(self): with open(self.__file_name, 'r', encoding='utf-8') as df: lines = df.readlines() df_revision_id = float(lines[0]) if self.__revision_id < df_revision_id: # The data that's in the file is newer than what we've got # in our cache. # Let's write to file what's in our cache... self.__revision_id = df_revision_id """ If cache data is newer than file data, or if no file exists at self.__file_name, write the cache data to the file located at self.__file_name. """ def write(self): with open(self.__file_name, 'wr+', encoding='utf-8') as df: lines = df.readlines() if not lines: df_revision_id = -1 else: df_revision_id = float(lines[0]) if self.__revision_id > df_revision_id: # The data that's in our cache is newer than what is in the # datafile. # Write to file what's in our cache... pass
StarcoderdataPython
329080
<gh_stars>0 import sys import binascii init_byte = 0x0f def step(current_byte, in_bit): out_bit = in_bit ^ ((current_byte & 0x20) >> 5) next_byte = (current_byte & 0x01) << 7 next_byte |= (current_byte & 0x18) << 2 next_byte |= (current_byte & 0x80) >> 3 next_byte |= (current_byte & 0x06) << 1 next_byte |= (current_byte & 0x40) >> 5 next_byte |= ((current_byte & 0x40) >> 6) ^ ((current_byte & 0x20) >> 5) ^ ((current_byte & 0x08) >> 3) ^ ((current_byte & 0x02) >> 1) ^ 0x01 return next_byte, out_bit def run(data, state=init_byte): ret = [] for d in data: state, r = step(state, d) ret.append(r) return ret def n2ba(n, bit_length=None): assert(n >= 0) if bit_length is None: bit_length = n.bit_length() if n > 0 else 1 return [(n >> i) & 1 for i in range(bit_length)] def ba2n(ba): n = 0 for i in range(len(ba)): n |= ba[i] << i return n def ba2s(ba): assert(len(ba) % 8 == 0) r = [] c = 0 for i in range(0, len(ba), 8): for j, e in zip(range(8), ba[i:i+8]): c |= e << j r.insert(0, chr(c)) c = 0 return ''.join(r) if __name__=='__main__': code = '2FED5B7FEB81D3C44E39E4AEA346010240E0CBBB' ba = n2ba(int(code, 16), len(code) * 4) for i in range(256): ret = ba2s(run(ba, i)) if (ret[:5] == '34C3_'): print ret
StarcoderdataPython
4884150
import logging from dataclasses import astuple, fields from pywps import LiteralInput from ravenpy.models import GR4JCN from raven import config from . import wpsio as wio from .wps_raven import RavenProcess LOGGER = logging.getLogger("PYWPS") """ Notes ----- The configuration files for RAVEN's GR4J-Cemaneige model and in models/raven-gr4j-cemaneige. All parameters that could potentially be user-defined are tagged using {}. These tags need to be replaced by actual values before the model is launched. """ params_defaults = GR4JCN.Params( GR4J_X1=0.529, GR4J_X2=-3.396, GR4J_X3=407.29, GR4J_X4=1.072, CEMANEIGE_X1=16.9, CEMANEIGE_X2=0.947, ) params = LiteralInput( "params", "Comma separated list of model parameters", abstract="Parameters: " + ", ".join(f.name for f in fields(params_defaults)), data_type="string", default=", ".join(map(str, astuple(params_defaults))), min_occurs=0, max_occurs=config.max_parallel_processes, ) class RavenGR4JCemaNeigeProcess(RavenProcess): """ RAVEN emulator for the GR4J-Cemaneige model. This process runs the GR4J-Cemaneige model using a RAVEN emulator. Users need to provide netCDF input files for rain, snow minimum and maximum temperature as well as potential evapotranspiration. To run diagnostics, observed stream flows are also required. """ identifier = "raven-gr4j-cemaneige" abstract = "Raven GR4J + CEMANEIGE hydrological model" title = "" version = "" model_cls = GR4JCN tuple_inputs = {"params": GR4JCN.Params} inputs = [ wio.ts, wio.nc_spec, params, wio.start_date, wio.end_date, wio.nc_index, wio.duration, wio.run_name, wio.hrus, wio.area, wio.latitude, wio.longitude, wio.elevation, wio.evaporation, wio.rain_snow_fraction, wio.rvc, ]
StarcoderdataPython
8137965
#!/usr/bin/env python2 import signal, socket, pickle, zlib, os signal.signal(signal.SIGCHLD, signal.SIG_IGN) s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(("0.0.0.0", 1024)) s.listen(5) entries = {} def rl(): l = "" while not l.endswith("\n"): c = s.recv(1) assert(c) l += c return l[:-1] def spam_list(): s.sendall("Listing %d passwords:\n" % len(entries)) for k, v in entries.iteritems(): s.sendall("%s: %s\n" % (k,v)) s.sendall("---\n") def spam_add(): s.sendall("Enter name of the site: ") name = rl() s.sendall("Enter password: ") pasw = rl() entries[name] = pasw s.sendall("Password successfully added.\n") def spam_del(): s.sendall("Enter name of the site which should be deleted: ") name = rl() if name not in entries: s.sendall("Entry not found.\n") else: del entries[name] s.sendall("Entry successfully deleted.\n") def spam_backup(): s.sendall("Your backup: %s\n" % zlib.compress(pickle.dumps(entries)).encode("base64")) def spam_restore(): s.sendall("Paste your backup here: ") backup = rl() global entries entries = pickle.loads(zlib.decompress(backup.decode("base64"))) s.sendall("Successfully restored %d entries\n" % len(entries)) while 1: c, _ = s.accept() p = os.fork() if p != 0: c.close() continue else: s.close() break s = c del c s.sendall("Welcome to Super Password Authentication Manager (SPAM)!\n") while 1: while 1: s.sendall("Menu:\n") s.sendall("1) List Passwords\n") s.sendall("2) Add a Password\n") s.sendall("3) Remove a Password\n") s.sendall("4) Backup Passwords\n") s.sendall("5) Restore backup\n") l = rl() if len(l) == 1 and l in "12345": [spam_list, spam_add, spam_del, spam_backup, spam_restore][int(l) - 1]() else: s.sendall("Invalid choice.\n")
StarcoderdataPython
325500
<gh_stars>1-10 from face.lbpcascade_animeface import LibCascadeAnimeFace class FaceModel: model = None def __init__(self, name): if name == "lbpcascade_animeface": self.model = LibCascadeAnimeFace() def show(self, img_path, args): self.model.show(img_path, args) if __name__ == "__main__": model = FaceModel("lbpcascade_animeface") model.show("I:\\work\\WORK\\ReclassifyAnimeCG\\ReclassifyAnimeCG\\data-sample\\train\\Emilia\\1.jpg", None)
StarcoderdataPython
11256980
<reponame>dolboBobo/python3_ios """ =================== Centered Ticklabels =================== sometimes it is nice to have ticklabels centered. Matplotlib currently associates a label with a tick, and the label can be aligned 'center', 'left', or 'right' using the horizontal alignment property:: ax.xaxis.set_tick_params(horizontalalignment='right') but this doesn't help center the label between ticks. One solution is to "fake it". Use the minor ticks to place a tick centered between the major ticks. Here is an example that labels the months, centered between the ticks """ import numpy as np import matplotlib.cbook as cbook import matplotlib.dates as dates import matplotlib.ticker as ticker import matplotlib.pyplot as plt # load some financial data; apple's stock price with cbook.get_sample_data('aapl.npz') as fh: r = np.load(fh)['price_data'].view(np.recarray) r = r[-250:] # get the last 250 days # Matplotlib works better with datetime.datetime than np.datetime64, but the # latter is more portable. date = r.date.astype('O') fig, ax = plt.subplots() ax.plot(date, r.adj_close) ax.xaxis.set_major_locator(dates.MonthLocator()) ax.xaxis.set_minor_locator(dates.MonthLocator(bymonthday=15)) ax.xaxis.set_major_formatter(ticker.NullFormatter()) ax.xaxis.set_minor_formatter(dates.DateFormatter('%b')) for tick in ax.xaxis.get_minor_ticks(): tick.tick1line.set_markersize(0) tick.tick2line.set_markersize(0) tick.label1.set_horizontalalignment('center') imid = len(r) // 2 ax.set_xlabel(str(date[imid].year)) plt.show()
StarcoderdataPython
9686603
import numpy as np from neuralnet.utils import prep_batch from neuralnet.optimizers import GradientDescent class NeuralNet: def __init__(self,hidden,initializer,optimizer): self.hidden = hidden self.initializer = initializer self.training_error = [] self.validation_error = [] self.layer = None self.optimizer = optimizer def __compile(self,X,y,seed): "Prepare for training." self.weight = self.__weights(y.shape[1],X.shape[1],self.hidden,seed=seed) self.layer = [0 for i in range(len(self.weight)+1)] self.error = [0 for i in range(len(self.weight))] self.delta = self.error.copy() self.n_features = X.shape[1] self.n_samples = X.shape[0] def __weights(self,row,col,hidden,seed): "Create connections." np.random.seed(seed) num_hidden = len(hidden) result = [] result.append(self.initializer(col,hidden[0])) # input weights for i in range(1,num_hidden): result.append(self.initializer(hidden[i-1],hidden[i])) # hidden layer connections with neighbor layers result.append(self.initializer(hidden[-1],row)) # output weights return result def train(self,X,y,epoch=100,batch_size=16,val_X=None,val_y=None,seed=None): if self.layer is None: self.__compile(X,y,seed) for _ in range(epoch): for _X,_y in prep_batch(batch_size,X,y): self.layer[0] = _X # Calculate forward through the network. (feed forward) for i in range(1,len(self.layer)): self.layer[i] = self.optimizer.activation.forward(np.dot(self.layer[i-1],self.weight[i-1])) self.weight = self.optimizer.optimize(_y,self.weight,self.layer) self.training_error.append(self.optimizer.loss.error(y,self.predict(X))) if val_X is not None: self.validation_error.append(self.optimizer.loss.error(val_y,self.predict(val_X))) return self def predict(self,X): x = X.copy() for weight in self.weight: x = self.optimizer.activation.forward(np.dot(x,weight)) return x
StarcoderdataPython
6442121
<reponame>Arenhart/Portfolio # -*- coding: utf-8 -*- """ Created on Tue Feb 4 14:10:31 2020 @author: <NAME> """ from skimage import draw from skimage import filters from skimage import morphology import numpy as np import math import csv import matplotlib.pyplot as plt import io from scipy import ndimage from numba import jit, njit, prange, int32, int64, uint32 from numba.typed import Dict import tkinter.filedialog as filedialog import PIL.Image as pil from reportlab.pdfgen import canvas from reportlab.lib.units import cm, inch from reportlab.lib.pagesizes import A4 from reportlab.lib.utils import ImageReader DICT_PATH = 'dimensions_list.csv' def add_region(arr, position, size, porosity, shape): view = arr[position[0] : position[0] + size[0], position[1] : position[1] + size[1]] target_porosity= - math.log(1-porosity) target_area = size[0] * size[1] * target_porosity pore_area = shape[0] * shape[1] * math.pi n_pores = int(target_area // pore_area) for _ in range(n_pores): x = np.random.randint(0,size[0]) y = np.random.randint(0,size[1]) rr, cc = draw.ellipse(x, y, shape[0], shape[1], shape = size) view[rr, cc] = 1 def old_correlation(arr): ''' Legacy code, only for binary correlation ''' x, y = arr.shape corr_x = np.zeros(x//2) corr_y = np.zeros(y//2) corr_x[0] = arr.sum()/arr.size corr_y[0] = arr.sum()/arr.size for i in range(1,x//2): corr_x[i] = (arr[i:,:] * arr[:-i,:]).sum() / (arr.size - arr.shape[1] * i) for i in range(1,y//2): corr_y[i] = (arr[:,i:] * arr[:,:-i]).sum() / (arr.size - arr.shape[0] * i) return corr_x, corr_y def covariance_3d(arr): ''' 3D correlation for scalar arrays ''' x, y, z = arr.shape corr = np.zeros((3, max(x,y,z)-1)) corr[:,0] = 1 for ax in (0,1,2): for i in range(1, arr.shape[ax] - 1): null_index = slice(None, None, None) left_index = slice(i, None) right_index = slice(-i) left_slice = [null_index,] * 3 left_slice[ax] = left_index left_slice = tuple(left_slice) right_slice = [null_index,] * 3 right_slice[ax] = right_index right_slice = tuple(right_slice) corr[ax][i] =1 - ((((arr[left_slice] - arr[right_slice])**2).sum() / (arr[left_slice]).size) / limit_covariance(arr[left_slice],arr[right_slice])) return corr def limit_covariance(arr1, arr2): low = min(arr1.min(), arr2.min()) high = max(arr2.max(), arr2.max()) bins = int(min(high - low + 1, np.sqrt(arr1.size + arr2.size))) count_1, edges_1 = np.histogram(arr1, bins = bins) count_2, edges_2 = np.histogram(arr2, bins = bins) if 'int' in str(arr1.dtype): disp = edges_1[1] edges_1 -= disp edges_1+= 2*disp * np.linspace(0, 1, num = edges_1.size) if 'int' in str(arr2.dtype): disp = edges_2[1] edges_2 -= disp edges_2 += 2*disp * np.linspace(0, 1, num = edges_2.size) total_1 = count_1.sum() count_1 = count_1 / total_1 total_2 = count_2.sum() count_2 = count_2 / total_2 covariance = 0 for i, j in ((a,b) for a in range(bins) for b in range(bins)): mean_i = (edges_1[i] + edges_1[i+1]) / 2 mean_j = (edges_2[j] + edges_2[j+1]) / 2 probability = count_1[i] * count_2[j] value = (mean_i - mean_j) ** 2 covariance += probability * value return covariance ''' def correlation(arr): x, y = arr.shape corr_x = np.zeros(x) corr_y = np.zeros(y) for i in range(x): corr_y += np.correlate(arr[i,:], arr[i,:], mode = 'full')[-y:] corr_y /= arr.size for i in range(y): corr_x += np.correlate(arr[:,i], arr[:,i], mode = 'full')[-x:] corr_x /= arr.size return corr_x, corr_y def vector_correlation(v): corr = [] corr.append(np.sum(v*v)) for i in range(1,len(v)): corr.append(np.sum(v[i:] * v[:-i])) ''' def draw_image(arr, name = 'default.png'): fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 6)) corr_x, corr_y = old_correlation(arr) phi = arr.mean() phi2 = phi**2 x_size = max((len(corr_x), len(corr_y))) ax1.imshow(arr) ax1.axis('off') ax2.plot(corr_x) ax2.plot(corr_y) ax2.plot((0,x_size), (phi2, phi2), color = '0.75', ls = '--') plt.savefig(f'REV/{name}') def rve_porosity(img, individual_results = False): x, y, z = img.shape divisors = np.array(img.shape) divisors = np.log2(divisors).astype('uint16') total_divisions = divisors.min() extra_divisions = divisors - total_divisions results = [] partial_results = [] for i in range(1, total_divisions): partial = [] for steps in ((x_step, y_step, z_step) for x_step in range(2**(i+extra_divisions[0])) for y_step in range(2**(i+extra_divisions[1])) for z_step in range(2**(i+extra_divisions[2]))): length = 2 ** (total_divisions - i) porosity = img[length*steps[0]: length*(1+steps[0]), length*steps[1]: length*(1+steps[1]), length*steps[2]: length*(1+steps[2])].mean() results.append((i, porosity)) partial.append((porosity)) partial = np.array(partial) partial_results.append((i, partial.mean(), partial.std())) if individual_results: return np.array(partial_results), np.array(results) else: return np.array(partial_results) @jit(nopython=True, parallel = False) def maxi_balls_serial(img, image_edt): out = np.zeros(img.shape) distances = image_edt for x in prange(img.shape[0]): for y in range(img.shape[1]): for z in range(img.shape[2]): if x == y and y == z and x%10 == 0: print(x) radius = distances[x,y,z] if radius <= 0.5: continue point = np.array((x,y,z)) b_size = int(radius)*2+1 b = create_sphere(b_size) if b.shape[0]%2 == 0: b = b[0:-1,0:-1,0:-1] b_radius = b.shape[0] // 2 lower_bounds = point - b_radius upper_bounds = point + b_radius +1 for ax, value in enumerate(lower_bounds): if value < 0: index = [slice(None, None),] * 3 index[ax] = slice(-value, None) b = b[(index[0],index[1],index[2])] lower_bounds[ax] = 0 for ax, value in enumerate(upper_bounds): if value > img.shape[ax]: index = [slice(None, None),] * 3 index[ax] = slice(None, img.shape[ax] - value) b = b[(index[0],index[1],index[2])] upper_bounds[ax] = img.shape[ax] #image_slice = ([slice(lower_bounds[i], upper_bounds[i]) for i in range(3)]) #image_slice = (image_slice[0], image_slice[1], image_slice[2]) #print(type(radius), b.dtype, img.dtype) sub_img = img[lower_bounds[0]:upper_bounds[0], lower_bounds[1]: upper_bounds[1], lower_bounds[2]: upper_bounds[2]].astype(np.float64) b_value = b * radius #print(b_value.dtype, sub_img.dtype, b_value.shape, sub_img.shape,'\n') b_value = b_value * sub_img out[lower_bounds[0]:upper_bounds[0], lower_bounds[1]: upper_bounds[1], lower_bounds[2]: upper_bounds[2]] = np.where(b_value>out[lower_bounds[0]:upper_bounds[0], lower_bounds[1]: upper_bounds[1], lower_bounds[2]: upper_bounds[2]], b_value, out[lower_bounds[0]:upper_bounds[0], lower_bounds[1]: upper_bounds[1], lower_bounds[2]: upper_bounds[2]]) print('Finished Maxiballs') return out def maxi_balls(img): calculated_spheres = Dict.empty(key_type = int64, value_type = uint32[:,:,::1],) image_edt = ndimage.morphology.distance_transform_edt(img).astype('uint32') return _maxi_balls(img, image_edt, calculated_spheres) #distances = ndimage.morphology.distance_transform_edt(img)#.astype('uint16') @jit(nopython=True, parallel = False) def _maxi_balls(img, image_edt, calculated_spheres): x_max, y_max, z_max = img.shape out = np.zeros(img.shape).astype(np.uint64) distances = image_edt progress_steps = (y_max * z_max) // 20 progress = 0 for z in prange(z_max): for y in range(y_max): for x in range(x_max): radius = distances[x,y,z] if radius <= 0.5: continue point = np.array((x,y,z)) b_size = int(radius)*2+1 registered_key = 0 for key in calculated_spheres.keys(): if b_size == key: b = calculated_spheres[b_size] registered_key = 1 break if registered_key == 0: calculated_spheres[b_size] = create_sphere(b_size) b = calculated_spheres[b_size] if b.shape[0]%2 == 0: b = b[0:-1,0:-1,0:-1] b_radius = b.shape[0] // 2 lower_bounds = point - b_radius upper_bounds = point + b_radius +1 for ax, value in enumerate(lower_bounds): if value < 0: index = [slice(None, None),] * 3 index[ax] = slice(-value, None) b = b[(index[0],index[1],index[2])] lower_bounds[ax] = 0 for ax, value in enumerate(upper_bounds): if value > img.shape[ax]: index = [slice(None, None),] * 3 index[ax] = slice(None, img.shape[ax] - value) b = b[(index[0],index[1],index[2])] upper_bounds[ax] = img.shape[ax] sub_out = out[lower_bounds[0]:upper_bounds[0], lower_bounds[1]: upper_bounds[1], lower_bounds[2]: upper_bounds[2]] sub_img = img[lower_bounds[0]:upper_bounds[0], lower_bounds[1]: upper_bounds[1], lower_bounds[2]: upper_bounds[2]] b_value = b * radius b_value = b_value * sub_img #print(sub_out, b_value, radius, radius <= 0.5, (x,y,z)) inscribe_spheres(sub_out, b_value, b.shape) print('Finished Maxiballs') return out @njit(parallel = False) def inscribe_spheres(img, values, shape): for x in prange(shape[0]): for y in range(shape[1]): for z in range(shape[2]): if values[x,y,z] > img[x,y,z]: img[x,y,z] = values[x,y,z] @jit(nopython=True) def create_sphere(diameter): ind = np.zeros((3,diameter, diameter, diameter)) for i in range(1, diameter): ind[0,i,:,:] = i ind[1,:,i,:] = i ind[2,:,:,i] = i ind -= (diameter - 1) / 2 ball = np.sqrt(ind[0,:,:,:]**2 + ind[1,:,:,:]**2 + ind[2,:,:,:]**2) radius = (diameter-1)/2 ball = (ball <= radius).astype(np.uint32) return ball def run(debug = False): samples = [] sample_names = filedialog.askopenfilenames() saved_dimensions = {} try: with open(DICT_PATH, mode = 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',') for row in reader: x, y, z = [int(i.strip()) for i in row[1].split(',')] saved_dimensions[row[0]] = (x, y, z) except FileNotFoundError: pass modified_dict = False for sample in sample_names: if sample in saved_dimensions.keys(): samples.append((sample, saved_dimensions[sample])) else: dimensions = input(f'Amostra: {sample} Dimensões ( [x,y,z] separadas por virgula): ') x, y, z = [int(i.strip()) for i in dimensions.split(',')] samples.append((sample, (x, y, z))) saved_dimensions[sample] = f'{x},{y},{z}' modified_dict = True if modified_dict: with open(DICT_PATH, mode = 'w', newline = '') as csvfile: writer = csv.writer(csvfile, delimiter=',') for key in saved_dimensions.keys(): writer.writerow([key, saved_dimensions[key]]) for sample in samples: path = sample[0] x, y, z = sample[1] img = np.fromfile(path, dtype = 'uint8') img = img.reshape((z,y,x), order = 'C') img = np.transpose(img) mb_img = maxi_balls(img) results = covariance_3d(mb_img) hist = np.histogram(mb_img.flatten(), bins = 20) np.savetxt(path[:-4]+'_covariogram.csv', results, delimiter = ';') np.savetxt(path[:-4]+'_histogram_count.csv', hist[0], delimiter = ';') np.savetxt(path[:-4]+'_histogram_edges.csv', hist[1], delimiter = ';') def plt_as_variable(fig): buffer = io.BytesIO() fig.savefig(buffer) buffer.seek(0) return pil.open(buffer) def consolidate_covariogram_results(): folder = filedialog.askdirectory() reference_file = 'dimensions_list.csv' str_cov = '_covariogram.csv' str_hist_count = '_histogram_count.csv' str_hist_edges = '_histogram_edges.csv' volumes = {} with open(folder + '//' + reference_file, newline = '', encoding = 'utf-8') as file: csv_reader = csv.reader(file, delimiter = ',') for line in csv_reader: key = line[0].split('/')[-1][:-4] val = {} val['size'] = eval(f'({line[1]})') val['res'] = int(line[2]) for new_key, i in (('rock_type', 3),('pore_type', 4),('pore_size',5)): try: val[new_key] = line[i] except IndexError: val[new_key] = 'Indefinido' volumes[key] = val for sample in volumes.keys(): with open(f'{folder}//{sample}{str_cov}', newline = '') as file: csv_reader = csv.reader(file, delimiter = ';') volumes[sample]['covariogram_x'] = [float(i) for i in next(csv_reader)] volumes[sample]['covariogram_y'] = [float(i) for i in next(csv_reader)] volumes[sample]['covariogram_z'] = [float(i) for i in next(csv_reader)] with open(f'{folder}//{sample}{str_hist_count}', newline = '') as file: count = [float(i) for i in file] volumes[sample]['histogram_count'] = count with open(f'{folder}//{sample}{str_hist_edges}', newline = '') as file: count = [float(i) for i in file] volumes[sample]['histogram_edges'] = count return volumes def color_gen(): colors = ('blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'darkgreen', 'gold', 'crimson', 'tomato', 'pink', 'indigo') i = 0 while i < len(colors): yield colors[i] i += 1 def generate_report(volumes, output_file = 'report.pdf'): c = canvas.Canvas(output_file) for sample in volumes.keys(): c.drawString(3*cm, 27*cm, f'Amostra: {sample}') c.drawString(3*cm, 4*cm, f'Tipo de rocha: {volumes[sample]["rock_type"]}') c.drawString(3*cm, 3.5*cm, f'Tipo de poro: {volumes[sample]["pore_type"]}') c.drawString(3*cm, 3*cm, f'Tamanho de poro: {volumes[sample]["pore_size"]}') fig = plt.figure(dpi = 300, figsize = [15 * (cm/inch), 18 * (cm/inch)]) ax1 = fig.add_subplot(211, title = 'Covariograma', xlabel = 'Distância [nm]', ylabel = 'Covariância') ax2 = fig.add_subplot(212, title = 'Distribuição de tamanho de poro', xlabel = 'Tamanho de poro [nm]', ylabel = 'Frequência') res = volumes[sample]['res'] for cov in ('covariogram_x', 'covariogram_y', 'covariogram_z'): vals = volumes[sample][cov] x_axis = [i*res for i in range(len(vals))] ax1.plot(x_axis, vals, label=cov) ax1.legend() edges = volumes[sample]['histogram_edges'] bins = [((edges[i] + edges[i+1])/2)*res for i in range(len(edges)-1)] count = volumes[sample]['histogram_count'] for i in range(1, len(count)-1): if count[i] == 0 and count[i-1] >0 and count[i+1]>0: count[i] = (count[i-1] + count[i+1]) / 2 bins = bins[1:] count = count[1:] ax2.plot(bins, count) img = ImageReader(plt_as_variable(fig)) c.drawImage(img, 3*cm, 5*cm, width = cm*15, height = cm*20) c.showPage() plt.close(fig) for prop in ('rock_type','pore_type','pore_size'): c.setStrokeColor('black') c.drawString(3*cm, 27*cm, f'Propriedade: {prop}') fig = plt.figure(dpi = 300, figsize = [15 * (cm/inch), 18 * (cm/inch)]) ax1 = fig.add_subplot(211, xscale = 'log') ax2 = fig.add_subplot(212, xscale = 'log') col_gen = color_gen() vals = [volumes[i][prop] for i in volumes.keys()] vals = list(set(vals)) legend_pos = 4 for val in vals: color = next(col_gen) c.setStrokeColor(color) c.setFillColor(color) c.drawString(3*cm, legend_pos*cm, f'{val}') legend_pos -= 0.5 samples = [i for i in volumes.keys() if volumes[i][prop] == val] for sample in samples: res = volumes[sample]['res'] vals = volumes[sample]['covariogram_z'] x_axis = [i*res for i in range(len(vals))] ax1.plot(x_axis, vals, color = color, linewidth = 1) edges = volumes[sample]['histogram_edges'] bins = [((edges[i] + edges[i+1])/2)*res for i in range(len(edges)-1)] count = volumes[sample]['histogram_count'] for i in range(1, len(count)-1): if count[i] == 0 and count[i-1] >0 and count[i+1]>0: count[i] = (count[i-1] + count[i+1]) / 2 bins = bins[1:] count = count[1:] total = sum(count) count = [i/total for i in count] ax2.plot(bins, count, color = color, linewidth = 1) img = ImageReader(plt_as_variable(fig)) c.drawImage(img, 3*cm, 5*cm, width = cm*15, height = cm*20) c.showPage() plt.close(fig) c.save() ''' Examples: arr = np.zeros((2000,2000)) add_region(arr, (0,0), (2000,2000), 0.55, (20,10)) draw_image(arr, 'teste3.png') arr = np.zeros((2000,2000)) add_region(arr, (0,0), (1000,2000), 0.55, (20,10)) add_region(arr, (1000,0), (1000,2000), 0.25, (60,60)) draw_image(arr, 'teste4.png') arr = np.zeros((2000,2000)) add_region(arr, (0,0), (1000,2000), 0.55, (20,10)) add_region(arr, (1000,0), (1000,2000), 0.25, (5,5)) draw_image(arr, 'teste5.png') arr = np.zeros((2000,2000)) add_region(arr, (0,0), (1000,2000), 0.55, (20,10)) add_region(arr, (1000,0), (1000,2000), 0.55, (5,5)) draw_image(arr, 'teste6.png') arr = np.zeros((2000,2000)) add_region(arr, (0,0), (2000,2000), 0.35, (3,3)) add_region(arr, (0,0), (2000,2000), 0.25, (100,100)) draw_image(arr, 'teste1.png') arr = np.zeros((2000,2000)) add_region(arr, (0,0), (300,2000), 0.25, (10,10)) add_region(arr, (300,0), (300,2000), 0.55, (3,3)) add_region(arr, (600,0), (100,2000), 0.25, (10,10)) add_region(arr, (700,0), (500,2000), 0.55, (3,3)) add_region(arr, (1200,0), (300,2000), 0.25, (10,10)) add_region(arr, (1300,0), (200,2000), 0.55, (3,3)) add_region(arr, (1500,0), (500,2000), 0.25, (10,10)) draw_image(arr, 'teste2.png') arr = np.zeros((2000,2000)) add_region(arr, (0,0), (2000,2000), 0.25, (10,10)) add_region(arr, (100,100), (400,400), 0.55, (3,3)) add_region(arr, (50,600), (400,400), 0.55, (3,3)) add_region(arr, (1500,600), (400,400), 0.55, (3,3)) add_region(arr, (1000,100), (400,400), 0.55, (3,3)) add_region(arr, (1000,1000), (400,400), 0.55, (3,3)) add_region(arr, (1300,500), (400,400), 0.55, (3,3)) add_region(arr, (500,1300), (400,400), 0.55, (3,3)) draw_image(arr, 'teste7.png') arr = np.zeros((2000,2000)) arr[300:600,:] = 1 arr[700:1200,:] = 1 arr[1300:1500,:] = 1 draw_image(arr, 'teste8.png') sample image path = r'D:\\Desktop\\amostra imagem' dol = np.fromfile(path+r'\dol_crop.raw', dtype = 'int16') dol = dol.reshape((512,512,256), order = 'F') thresh = filters.threshold_otsu(dol) dol = (dol <= thresh).astype('uint8') dol_p = maxi_balls(dol) dol_n = maxi_balls(1 - dol) dol_np = dol_p - dol_n cov_r = covariance_3d(dol) cov_p = covariance_3d(dol_p) cov_n = covariance_3d(dol_n) cov_np = covariance_3d(dol_np) 2 * p * (1-p) #dol_r limit covariance limit_covariance(dol_p) limit_covariance(dol_n) limit_covariance(dol_np) for i,j in (('x',0),('y',1),('z',2)): np.savetxt(f'cov_p_{i}.txt', cov_p[j], delimiter = ';') for i,j in (('x',0),('y',1),('z',2)): np.savetxt(f'cov_n_{i}.txt', cov_n[j], delimiter = ';') for i,j in (('x',0),('y',1),('z',2)): np.savetxt(f'cov_np_{i}.txt', cov_np[j], delimiter = ';') for i,j in (('x',0),('y',1),('z',2)): np.savetxt(f'cov_r_{i}.txt', cov_r[j], delimiter = ';') plt.imsave('dol_np_slice.png', dol_np[:,:,100], cmap = 'seismic') plt.imsave('dol_n_slice.png', dol_n[:,:,100], cmap = 'plasma') plt.imsave('dol_p_slice.png', dol_p[:,:,100], cmap = 'plasma') mb = maxi_balls(arr,edt) maxi_balls.parallel_diagnostics(level=2) '''
StarcoderdataPython
3222130
<filename>stubs/micropython-v1_12-pyboard/stm.py<gh_stars>0 """ Module: 'stm' on micropython-v1.12-pyboard """ # MCU: {'ver': 'v1.12', 'port': 'pyboard', 'arch': 'armv7emsp', 'sysname': 'pyboard', 'release': '1.12.0', 'name': 'micropython', 'mpy': 7685, 'version': '1.12.0', 'machine': 'PYBv1.1 with STM32F405RG', 'build': '', 'nodename': 'pyboard', 'platform': 'pyboard', 'family': 'micropython'} # Stubber: 1.5.4 from typing import Any ADC1 = 1073815552 # type: int ADC123_COMMON = 1073816320 # type: int ADC2 = 1073815808 # type: int ADC3 = 1073816064 # type: int ADC_CR1 = 4 # type: int ADC_CR2 = 8 # type: int ADC_DR = 76 # type: int ADC_HTR = 36 # type: int ADC_JDR1 = 60 # type: int ADC_JDR2 = 64 # type: int ADC_JDR3 = 68 # type: int ADC_JDR4 = 72 # type: int ADC_JOFR1 = 20 # type: int ADC_JOFR2 = 24 # type: int ADC_JOFR3 = 28 # type: int ADC_JOFR4 = 32 # type: int ADC_JSQR = 56 # type: int ADC_LTR = 40 # type: int ADC_SMPR1 = 12 # type: int ADC_SMPR2 = 16 # type: int ADC_SQR1 = 44 # type: int ADC_SQR2 = 48 # type: int ADC_SQR3 = 52 # type: int ADC_SR = 0 # type: int CAN1 = 1073767424 # type: int CAN2 = 1073768448 # type: int CRC = 1073885184 # type: int CRC_CR = 8 # type: int CRC_DR = 0 # type: int CRC_IDR = 4 # type: int DAC = 1073771520 # type: int DAC1 = 1073771520 # type: int DAC_CR = 0 # type: int DAC_DHR12L1 = 12 # type: int DAC_DHR12L2 = 24 # type: int DAC_DHR12LD = 36 # type: int DAC_DHR12R1 = 8 # type: int DAC_DHR12R2 = 20 # type: int DAC_DHR12RD = 32 # type: int DAC_DHR8R1 = 16 # type: int DAC_DHR8R2 = 28 # type: int DAC_DHR8RD = 40 # type: int DAC_DOR1 = 44 # type: int DAC_DOR2 = 48 # type: int DAC_SR = 52 # type: int DAC_SWTRIGR = 4 # type: int DBGMCU = 3758366720 # type: int DBGMCU_APB1FZ = 8 # type: int DBGMCU_APB2FZ = 12 # type: int DBGMCU_CR = 4 # type: int DBGMCU_IDCODE = 0 # type: int DMA1 = 1073897472 # type: int DMA2 = 1073898496 # type: int DMA_HIFCR = 12 # type: int DMA_HISR = 4 # type: int DMA_LIFCR = 8 # type: int DMA_LISR = 0 # type: int EXTI = 1073822720 # type: int EXTI_EMR = 4 # type: int EXTI_FTSR = 12 # type: int EXTI_IMR = 0 # type: int EXTI_PR = 20 # type: int EXTI_RTSR = 8 # type: int EXTI_SWIER = 16 # type: int FLASH = 1073888256 # type: int FLASH_ACR = 0 # type: int FLASH_CR = 16 # type: int FLASH_KEYR = 4 # type: int FLASH_OPTCR = 20 # type: int FLASH_OPTCR1 = 24 # type: int FLASH_OPTKEYR = 8 # type: int FLASH_SR = 12 # type: int GPIOA = 1073872896 # type: int GPIOB = 1073873920 # type: int GPIOC = 1073874944 # type: int GPIOD = 1073875968 # type: int GPIOE = 1073876992 # type: int GPIOF = 1073878016 # type: int GPIOG = 1073879040 # type: int GPIOH = 1073880064 # type: int GPIOI = 1073881088 # type: int GPIO_AFR0 = 32 # type: int GPIO_AFR1 = 36 # type: int GPIO_BSRR = 24 # type: int GPIO_BSRRH = 26 # type: int GPIO_BSRRL = 24 # type: int GPIO_IDR = 16 # type: int GPIO_LCKR = 28 # type: int GPIO_MODER = 0 # type: int GPIO_ODR = 20 # type: int GPIO_OSPEEDR = 8 # type: int GPIO_OTYPER = 4 # type: int GPIO_PUPDR = 12 # type: int I2C1 = 1073763328 # type: int I2C2 = 1073764352 # type: int I2C3 = 1073765376 # type: int I2C_CCR = 28 # type: int I2C_CR1 = 0 # type: int I2C_CR2 = 4 # type: int I2C_DR = 16 # type: int I2C_OAR1 = 8 # type: int I2C_OAR2 = 12 # type: int I2C_SR1 = 20 # type: int I2C_SR2 = 24 # type: int I2C_TRISE = 32 # type: int I2S2EXT = 1073755136 # type: int I2S3EXT = 1073758208 # type: int IWDG = 1073754112 # type: int IWDG_KR = 0 # type: int IWDG_PR = 4 # type: int IWDG_RLR = 8 # type: int IWDG_SR = 12 # type: int PWR = 1073770496 # type: int PWR_CR = 0 # type: int PWR_CSR = 4 # type: int RCC = 1073887232 # type: int RCC_AHB1ENR = 48 # type: int RCC_AHB1LPENR = 80 # type: int RCC_AHB1RSTR = 16 # type: int RCC_AHB2ENR = 52 # type: int RCC_AHB2LPENR = 84 # type: int RCC_AHB2RSTR = 20 # type: int RCC_AHB3ENR = 56 # type: int RCC_AHB3LPENR = 88 # type: int RCC_AHB3RSTR = 24 # type: int RCC_APB1ENR = 64 # type: int RCC_APB1LPENR = 96 # type: int RCC_APB1RSTR = 32 # type: int RCC_APB2ENR = 68 # type: int RCC_APB2LPENR = 100 # type: int RCC_APB2RSTR = 36 # type: int RCC_BDCR = 112 # type: int RCC_CFGR = 8 # type: int RCC_CIR = 12 # type: int RCC_CR = 0 # type: int RCC_CSR = 116 # type: int RCC_PLLCFGR = 4 # type: int RCC_PLLI2SCFGR = 132 # type: int RCC_SSCGR = 128 # type: int RNG = 1342572544 # type: int RNG_CR = 0 # type: int RNG_DR = 8 # type: int RNG_SR = 4 # type: int RTC = 1073752064 # type: int RTC_ALRMAR = 28 # type: int RTC_ALRMASSR = 68 # type: int RTC_ALRMBR = 32 # type: int RTC_ALRMBSSR = 72 # type: int RTC_BKP0R = 80 # type: int RTC_BKP10R = 120 # type: int RTC_BKP11R = 124 # type: int RTC_BKP12R = 128 # type: int RTC_BKP13R = 132 # type: int RTC_BKP14R = 136 # type: int RTC_BKP15R = 140 # type: int RTC_BKP16R = 144 # type: int RTC_BKP17R = 148 # type: int RTC_BKP18R = 152 # type: int RTC_BKP19R = 156 # type: int RTC_BKP1R = 84 # type: int RTC_BKP2R = 88 # type: int RTC_BKP3R = 92 # type: int RTC_BKP4R = 96 # type: int RTC_BKP5R = 100 # type: int RTC_BKP6R = 104 # type: int RTC_BKP7R = 108 # type: int RTC_BKP8R = 112 # type: int RTC_BKP9R = 116 # type: int RTC_CALIBR = 24 # type: int RTC_CALR = 60 # type: int RTC_CR = 8 # type: int RTC_DR = 4 # type: int RTC_ISR = 12 # type: int RTC_PRER = 16 # type: int RTC_SHIFTR = 44 # type: int RTC_SSR = 40 # type: int RTC_TAFCR = 64 # type: int RTC_TR = 0 # type: int RTC_TSDR = 52 # type: int RTC_TSSSR = 56 # type: int RTC_TSTR = 48 # type: int RTC_WPR = 36 # type: int RTC_WUTR = 20 # type: int SDIO = 1073818624 # type: int SPI1 = 1073819648 # type: int SPI2 = 1073756160 # type: int SPI3 = 1073757184 # type: int SPI_CR1 = 0 # type: int SPI_CR2 = 4 # type: int SPI_CRCPR = 16 # type: int SPI_DR = 12 # type: int SPI_I2SCFGR = 28 # type: int SPI_I2SPR = 32 # type: int SPI_RXCRCR = 20 # type: int SPI_SR = 8 # type: int SPI_TXCRCR = 24 # type: int SYSCFG = 1073821696 # type: int SYSCFG_CMPCR = 32 # type: int SYSCFG_EXTICR0 = 8 # type: int SYSCFG_EXTICR1 = 12 # type: int SYSCFG_EXTICR2 = 16 # type: int SYSCFG_EXTICR3 = 20 # type: int SYSCFG_MEMRMP = 0 # type: int SYSCFG_PMC = 4 # type: int TIM1 = 1073807360 # type: int TIM10 = 1073824768 # type: int TIM11 = 1073825792 # type: int TIM12 = 1073747968 # type: int TIM13 = 1073748992 # type: int TIM14 = 1073750016 # type: int TIM2 = 1073741824 # type: int TIM3 = 1073742848 # type: int TIM4 = 1073743872 # type: int TIM5 = 1073744896 # type: int TIM6 = 1073745920 # type: int TIM7 = 1073746944 # type: int TIM8 = 1073808384 # type: int TIM9 = 1073823744 # type: int TIM_ARR = 44 # type: int TIM_BDTR = 68 # type: int TIM_CCER = 32 # type: int TIM_CCMR1 = 24 # type: int TIM_CCMR2 = 28 # type: int TIM_CCR1 = 52 # type: int TIM_CCR2 = 56 # type: int TIM_CCR3 = 60 # type: int TIM_CCR4 = 64 # type: int TIM_CNT = 36 # type: int TIM_CR1 = 0 # type: int TIM_CR2 = 4 # type: int TIM_DCR = 72 # type: int TIM_DIER = 12 # type: int TIM_DMAR = 76 # type: int TIM_EGR = 20 # type: int TIM_OR = 80 # type: int TIM_PSC = 40 # type: int TIM_RCR = 48 # type: int TIM_SMCR = 8 # type: int TIM_SR = 16 # type: int UART4 = 1073761280 # type: int UART5 = 1073762304 # type: int USART1 = 1073811456 # type: int USART2 = 1073759232 # type: int USART3 = 1073760256 # type: int USART6 = 1073812480 # type: int USART_BRR = 8 # type: int USART_CR1 = 12 # type: int USART_CR2 = 16 # type: int USART_CR3 = 20 # type: int USART_DR = 4 # type: int USART_GTPR = 24 # type: int USART_SR = 0 # type: int WWDG = 1073753088 # type: int WWDG_CFR = 4 # type: int WWDG_CR = 0 # type: int WWDG_SR = 8 # type: int mem16: Any ## <class 'mem'> = <16-bit memory> mem32: Any ## <class 'mem'> = <32-bit memory> mem8: Any ## <class 'mem'> = <8-bit memory>
StarcoderdataPython
1995254
# Write the benchmarking functions here. # See "Writing benchmarks" in the asv docs for more information. import numpy as np import xarray as xr from scipy.stats import norm from xskillscore import ( brier_score, crps_ensemble, crps_gaussian, crps_quadrature, threshold_brier_score, ) from . import parameterized, randn, requires_dask PROBABILISTIC_METRICS = [ crps_ensemble, crps_gaussian, crps_quadrature, brier_score, threshold_brier_score, ] including_crps_quadrature = False large_lon_lat = 2000 large_lon_lat_chunksize = large_lon_lat // 2 nmember = 4 class Generate: """ Generate random fct and obs to be benckmarked. """ timeout = 600 repeat = (2, 5, 20) def make_ds(self, nmember, nx, ny): # ds self.obs = xr.Dataset() self.fct = xr.Dataset() self.nmember = nmember self.nx = nx # 4 deg self.ny = ny # 4 deg frac_nan = 0.0 members = np.arange(1, 1 + self.nmember) lons = xr.DataArray( np.linspace(0, 360, self.nx), dims=('lon',), attrs={'units': 'degrees east', 'long_name': 'longitude'}, ) lats = xr.DataArray( np.linspace(-90, 90, self.ny), dims=('lat',), attrs={'units': 'degrees north', 'long_name': 'latitude'}, ) self.fct['tos'] = xr.DataArray( randn((self.nmember, self.nx, self.ny), frac_nan=frac_nan), coords={'member': members, 'lon': lons, 'lat': lats}, dims=('member', 'lon', 'lat'), name='tos', encoding=None, attrs={'units': 'foo units', 'description': 'a description'}, ) self.obs['tos'] = xr.DataArray( randn((self.nx, self.ny), frac_nan=frac_nan), coords={'lon': lons, 'lat': lats}, dims=('lon', 'lat'), name='tos', encoding=None, attrs={'units': 'foo units', 'description': 'a description'}, ) self.fct.attrs = {'history': 'created for xarray benchmarking'} self.obs.attrs = {'history': 'created for xarray benchmarking'} # set nans for land sea mask self.fct = self.fct.where( (abs(self.fct.lat) > 20) | (self.fct.lat < 100) | (self.fct.lat > 160) ) self.obs = self.obs.where( (abs(self.obs.lat) > 20) | (self.obs.lat < 100) | (self.obs.lat > 160) ) class Compute_small(Generate): """ A benchmark xskillscore.metric for small xr.DataArrays""" def setup(self, *args, **kwargs): self.make_ds(nmember, 90, 45) # 4 degree grid @parameterized('metric', PROBABILISTIC_METRICS) def time_xskillscore_probabilistic_small(self, metric): """Take time for xskillscore.metric.""" if metric is crps_gaussian: mu = 0.5 sig = 0.2 metric(self.obs['tos'], mu, sig) elif metric is crps_quadrature: if not including_crps_quadrature: pass else: xmin, xmax, tol = -10, 10, 1e-6 cdf_or_dist = norm metric(self.obs['tos'], cdf_or_dist, xmin, xmax, tol) elif metric is crps_ensemble: metric(self.obs['tos'], self.fct['tos']) elif metric is threshold_brier_score: threshold = 0.5 metric(self.obs['tos'], self.fct['tos'], threshold) elif metric is brier_score: metric(self.obs['tos'] > 0.5, (self.fct['tos'] > 0.5).mean('member')) @parameterized('metric', PROBABILISTIC_METRICS) def peakmem_xskillscore_probabilistic_small(self, metric): """Take time for xskillscore.metric.""" if metric is crps_gaussian: mu = 0.5 sig = 0.2 metric(self.obs['tos'], mu, sig) elif metric is crps_quadrature: if not including_crps_quadrature: pass else: xmin, xmax, tol = -10, 10, 1e-6 cdf_or_dist = norm metric(self.obs['tos'], cdf_or_dist, xmin, xmax, tol) elif metric is crps_ensemble: metric(self.obs['tos'], self.fct['tos']) elif metric is threshold_brier_score: threshold = 0.5 metric(self.obs['tos'], self.fct['tos'], threshold) elif metric is brier_score: metric(self.obs['tos'] > 0.5, (self.fct['tos'] > 0.5).mean('member')) class Compute_large(Generate): """ A benchmark xskillscore.metric for large xr.DataArrays.""" def setup(self, *args, **kwargs): self.make_ds(nmember, large_lon_lat, large_lon_lat) @parameterized('metric', PROBABILISTIC_METRICS) def time_xskillscore_probabilistic_large(self, metric): """Take time for xskillscore.metric.""" if metric is crps_gaussian: mu = 0.5 sig = 0.2 metric(self.obs['tos'], mu, sig) elif metric is crps_quadrature: if not including_crps_quadrature: pass else: xmin, xmax, tol = -10, 10, 1e-6 cdf_or_dist = norm metric(self.obs['tos'], cdf_or_dist, xmin, xmax, tol) elif metric is crps_ensemble: metric(self.obs['tos'], self.fct['tos']) elif metric is threshold_brier_score: threshold = 0.5 metric(self.obs['tos'], self.fct['tos'], threshold) elif metric is brier_score: metric(self.obs['tos'] > 0.5, (self.fct['tos'] > 0.5).mean('member')) @parameterized('metric', PROBABILISTIC_METRICS) def peakmem_xskillscore_probabilistic_large(self, metric): """Take time for xskillscore.metric.""" if metric is crps_gaussian: mu = 0.5 sig = 0.2 metric(self.obs['tos'], mu, sig) elif metric is crps_quadrature: if not including_crps_quadrature: pass else: xmin, xmax, tol = -10, 10, 1e-6 cdf_or_dist = norm metric(self.obs['tos'], cdf_or_dist, xmin, xmax, tol) elif metric is crps_ensemble: metric(self.obs['tos'], self.fct['tos']) elif metric is threshold_brier_score: threshold = 0.5 metric(self.obs['tos'], self.fct['tos'], threshold) elif metric is brier_score: metric(self.obs['tos'] > 0.5, (self.fct['tos'] > 0.5).mean('member')) class Compute_large_dask(Generate): """ A benchmark xskillscore.metric for large xr.DataArrays with dask.""" def setup(self, *args, **kwargs): requires_dask() self.make_ds(nmember, large_lon_lat, large_lon_lat) self.obs = self.obs.chunk( {'lon': large_lon_lat_chunksize, 'lat': large_lon_lat_chunksize} ) self.fct = self.fct.chunk( {'lon': large_lon_lat_chunksize, 'lat': large_lon_lat_chunksize} ) @parameterized('metric', PROBABILISTIC_METRICS) def time_xskillscore_probabilistic_large_dask(self, metric): """Take time for xskillscore.metric.""" if metric is crps_gaussian: mu = 0.5 sig = 0.2 metric(self.obs['tos'], mu, sig).compute() elif metric is crps_quadrature: if not including_crps_quadrature: pass else: xmin, xmax, tol = -10, 10, 1e-6 cdf_or_dist = norm metric(self.obs['tos'], cdf_or_dist, xmin, xmax, tol).compute() elif metric is crps_ensemble: metric(self.obs['tos'], self.fct['tos']).compute() elif metric is threshold_brier_score: threshold = 0.5 metric(self.obs['tos'], self.fct['tos'], threshold).compute() elif metric is brier_score: metric( self.obs['tos'] > 0.5, (self.fct['tos'] > 0.5).mean('member') ).compute() @parameterized('metric', PROBABILISTIC_METRICS) def peakmem_xskillscore_probabilistic_large_dask(self, metric): """Take time for xskillscore.metric.""" if metric is crps_gaussian: mu = 0.5 sig = 0.2 metric(self.obs['tos'], mu, sig).compute() elif metric is crps_quadrature: if not including_crps_quadrature: pass else: xmin, xmax, tol = -10, 10, 1e-6 cdf_or_dist = norm metric(self.obs['tos'], cdf_or_dist, xmin, xmax, tol).compute() elif metric is crps_ensemble: metric(self.obs['tos'], self.fct['tos']).compute() elif metric is threshold_brier_score: threshold = 0.5 metric(self.obs['tos'], self.fct['tos'], threshold).compute() elif metric is brier_score: metric( self.obs['tos'] > 0.5, (self.fct['tos'] > 0.5).mean('member') ).compute()
StarcoderdataPython
9762342
<reponame>scolemann/mlfinlab """ Implements the Combinatorial Purged Cross-Validation class from Chapter 12 """ from itertools import combinations from typing import List import pandas as pd import numpy as np from sklearn.model_selection import KFold from .cross_validation import ml_get_train_times class CombinatorialPurgedKFold(KFold): """ Implements Combinatial Purged Cross Validation (CPCV) from Chapter 12 The train is purged of observations overlapping test-label intervals Test set is assumed contiguous (shuffle=False), w/o training samples in between :param n_splits: The number of splits. Default to 3 :param samples_info_sets: The information range on which each record is constructed from *samples_info_sets.index*: Time when the information extraction started. *samples_info_sets.value*: Time when the information extraction ended. :param pct_embargo: Percent that determines the embargo size. """ def __init__(self, n_splits: int = 3, n_test_splits: int = 2, samples_info_sets: pd.Series = None, pct_embargo: float = 0.): if not isinstance(samples_info_sets, pd.Series): raise ValueError('The samples_info_sets param must be a pd.Series') super(CombinatorialPurgedKFold, self).__init__(n_splits, shuffle=False, random_state=None) self.samples_info_sets = samples_info_sets self.pct_embargo = pct_embargo self.n_test_splits = n_test_splits self.backtest_paths = {k: list(range(self.n_splits)) for k in range(self.n_splits - 1)} # Dictionary of backtest paths, number of paths = n_splits - 1 def _generate_combinatorial_test_ranges(self, splits_indices: dict) -> List: """ Using start and end indices of test splits from KFolds and number of test_splits (self.n_test_splits), generates combinatorial test ranges splits :param splits_indices: (dict) of test fold integer index: [start test index, end test index] :return: (list) of combinatorial test splits ([start index, end index]) """ # Possible test splits for each fold combinatorial_splits = list(combinations(list(splits_indices.keys()), self.n_test_splits)) combinatorial_test_ranges = [] # List of test indices formed from combinatorial splits for combination in combinatorial_splits: temp_test_indices = [] # Array of test indices for current split combination for int_index in combination: temp_test_indices.append(splits_indices[int_index]) combinatorial_test_ranges.append(temp_test_indices) return combinatorial_test_ranges # noinspection PyPep8Naming def split(self, X: pd.DataFrame, y: pd.Series = None, groups=None): """ The main method to call for the PurgedKFold class :param X: The pd.DataFrame samples dataset that is to be split :param y: The pd.Series sample labels series :param groups: array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. :return: This method yields uples of (train, test) where train and test are lists of sample indices """ if X.shape[0] != self.samples_info_sets.shape[0]: raise ValueError("X and the 'samples_info_sets' series param must be the same length") test_ranges: [(int, int)] = [(ix[0], ix[-1] + 1) for ix in np.array_split(np.arange(X.shape[0]), self.n_splits)] splits_indices = {} for index, [start_ix, end_ix] in enumerate(test_ranges): splits_indices[index] = [start_ix, end_ix] combinatorial_test_ranges = self._generate_combinatorial_test_ranges(splits_indices) embargo: int = int(X.shape[0] * self.pct_embargo) for test_splits in combinatorial_test_ranges: # Embargo test_times = pd.Series(index=[self.samples_info_sets[ix[0]] for ix in test_splits], data=[ self.samples_info_sets[ix[1] - 1] if ix[1] - 1 + embargo >= X.shape[0] else self.samples_info_sets[ ix[1] - 1 + embargo] for ix in test_splits]) test_indices = [] for [start_ix, end_ix] in test_splits: test_indices.extend(list(range(start_ix, end_ix))) # Purge train_times = ml_get_train_times(self.samples_info_sets, test_times) # Get indices train_indices = [] for train_ix in train_times.index: train_indices.append(self.samples_info_sets.index.get_loc(train_ix)) yield np.array(train_indices), np.array(test_indices)
StarcoderdataPython
11328220
import logging import unittest from vodem.api import save_sms class TestSaveSms(unittest.TestCase): @classmethod def setUpClass(cls): cls.valid_response = { } @unittest.skip('skip') def test_logs_defaults(self): params = {'SMSMessage': '0074006500730074'} with self.assertLogs(level=logging.DEBUG) as cm: save_sms(params)
StarcoderdataPython
9686363
<filename>DQM/L1TMonitor/python/L1TdeGEMTPG_cfi.py import FWCore.ParameterSet.Config as cms l1tdeGEMTPGCommon = cms.PSet( monitorDir = cms.string("L1TEMU/L1TdeGEMTPG"), verbose = cms.bool(False), ## when multiple chambers are enabled, order them by station number! chambers = cms.vstring("GE11"), dataEmul = cms.vstring("data","emul"), clusterVars = cms.vstring("size", "pad", "bx"), clusterNBin = cms.vuint32(20,384,10), clusterMinBin = cms.vdouble(0,0,-5), clusterMaxBin = cms.vdouble(20,384,5), ## GEM VFAT data is not captured in BX's other than BX0 ## For a good comparison, leave out those data clusters useDataClustersOnlyInBX0 = cms.bool(True), B904Setup = cms.bool(False), ) from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer l1tdeGEMTPG = DQMEDAnalyzer( "L1TdeGEMTPG", l1tdeGEMTPGCommon, data = cms.InputTag("emtfStage2Digis"), emul = cms.InputTag("valMuonGEMPadDigiClusters"), )
StarcoderdataPython
1792856
#!/usr/bin/env python3 import unittest from torch.testing._internal.common_distributed import MultiProcessTestCase from torch.testing._internal.common_utils import TEST_WITH_ASAN, run_tests from torch.testing._internal.distributed.rpc.rpc_test import RpcTest @unittest.skipIf( TEST_WITH_ASAN, "Skip ASAN as torch + multiprocessing spawn have known issues" ) class RpcTestWithSpawn(MultiProcessTestCase, RpcTest): def setUp(self): super(RpcTestWithSpawn, self).setUp() self._spawn_processes() if __name__ == "__main__": run_tests()
StarcoderdataPython
9760190
from torch.testing._internal.jit_utils import JitTestCase import io import os import sys import torch import torch._C # Make the helper files in test/ importable pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) sys.path.append(pytorch_test_dir) if __name__ == '__main__': raise RuntimeError("This test file is not meant to be run directly, use:\n\n" "\tpython test/test_jit.py TESTNAME\n\n" "instead.") def to_test_backend(module, method_compile_spec): return torch._C._jit_to_test_backend(module, {"forward": method_compile_spec}) def to_test_backend_multi(module, method_compile_spec): return torch._C._jit_to_test_backend(module, method_compile_spec) class MyModule(torch.nn.Module): def __init__(self): super(MyModule, self).__init__() def forward(self, x, h): return self.accum(x, h), self.sub_accum(x, h) def accum(self, x, h): return x + h def sub_accum(self, x, h): return x - h class TestBackends(JitTestCase): def setUp(self): super().setUp() # Create Python, JIT and backend versions of MyModule. self.module = MyModule() self.scripted_module = torch.jit.script(MyModule()) self.lowered_module = to_test_backend_multi( self.scripted_module._c, {"accum": {"": ""}, "sub_accum": {"": ""}, "forward": {"": ""}}) def compare_py_jit_backend(self, name, input): """ This is a helper function for comparing the outputs of self.module (Python), self.scripted_module (JIT) and self.lowered_module (backend) when the method named 'name' is invoked using 'input'. """ # Get handles for Python, JIT and backend methods. python_method = self.module.__getattribute__(name) jit_method = self.scripted_module.__getattr__(name) backend_method = self.lowered_module.__getattr__(name) # Run methods. python_output = python_method(input, input) jit_output = jit_method(input, input) backend_output = backend_method(input, input) # The answers returned by Python, JIT and to_backend should all match. self.assertEqual(python_output, backend_output) self.assertEqual(jit_output, backend_output) def test_simple(self): """ This is a simple test that compiles MyModule for the test backend and ensures it produces the correct answers for each method. """ # Test execution with backend against Python and JIT. input = torch.randn(5) # Test all three module methods. self.compare_py_jit_backend("accum", input) self.compare_py_jit_backend("sub_accum", input) self.compare_py_jit_backend("forward", input) def test_save_load(self): """ This method tests that a lowered module till produces the same output as a Python module and ScriptModule after saving and loading. """ # Save the lowered module. buffer = io.BytesIO() torch.jit.save(self.lowered_module, buffer) # Save the compile spec to compare against the version retrieved after loading. pre_compile_spec = self.lowered_module.__getattr__("__method_compile_spec") # Load the lowered module. buffer.seek(0) self.lowered_module = torch.jit.load(buffer) # Get the compile spec after loading. post_compile_spec = self.lowered_module.__getattr__("__method_compile_spec") # Compile specs should match. self.assertEqual(pre_compile_spec, post_compile_spec) # Test execution with backend against Python and JIT. input = torch.randn(5) # Test all three module methods. self.compare_py_jit_backend("accum", input) self.compare_py_jit_backend("sub_accum", input) self.compare_py_jit_backend("forward", input)
StarcoderdataPython
1637631
<reponame>MeiK-h/JudgeLight import sys from distutils.core import Extension, setup sources = [ 'JudgeLight/JudgeLightRunner/judgelightrunner.c', 'JudgeLight/JudgeLightRunner/jl_runner.c', 'JudgeLight/JudgeLightRunner/jl_memory.c', 'JudgeLight/JudgeLightRunner/jl_limit.c', 'JudgeLight/JudgeLightRunner/jl_convert.c', 'JudgeLight/JudgeLightRunner/jl_rules.c', ] setup( name='JudgeLight', version='2.0.7', ext_modules=[Extension('JudgeLight/JudgeLightRunner', sources=sources)], packages=['JudgeLight'], license='MIT', author='MeiK', author_email='<EMAIL>', url='https://github.com/MeiK2333/JudgeLight', )
StarcoderdataPython
9737083
#!/usr/bin/env python3 def bytes_to_array(dat, sz): dat = dat.split() arr = [] for i in range(0, len(dat), sz): arr.append(int(''.join(reversed(dat[i:i+sz])), 16)) return arr key = "IdontKnowWhatsGoingOn" s = "08 00 00 00 06 00 00 00 2c 00 00 00 3a 00 00 00 32 00 00 00 30 00 00 00 1c 00 00 00 5c 00 00 00 01 00 00 00 32 00 00 00 1a 00 00 00 12 00 00 00 45 00 00 00 1d 00 00 00 20 00 00 00 30 00 00 00 0d 00 00 00 1b 00 00 00 03 00 00 00 7c 00 00 00 13 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00" s = bytes_to_array(s, 4) pw = [] for i in range(len(key)): pw.append(ord(key[i]) ^ s[i]) print(''.join(map(chr, pw)))
StarcoderdataPython
5153235
#!/usr/bin/env python """ Converts an event price (tax-included) to a retail price (pre-tax) or vice versa. Event prices are calculated by applying a discount to the online price, adding sales tax, and rounding to the nearest dollar. Retail prices are calculated by removing sales tax, inverting the discount, rounding to the nearest dime, and subtracting a penny. To generate a table of event to retail prices:: echo "event,retail";\ i=1; while [ "$i" -le 10 ]; do\ echo "$i,`./calc_price.py retail $i`";\ i=`echo $i+1|bc`;\ done """ from __future__ import print_function import argparse import math import Tkinter import tkMessageBox def calc_event_price(retail_price, discount_percent, sales_tax_percent): """Calculate the event price from the retail price.""" # Apply discount. price = float(retail_price) * (1.0 - float(discount_percent) / 100.0) # Impose sales tax. price = price * (1.0 + float(sales_tax_percent) / 100.0) # Round to the nearest dollar. price = max(1.0, math.floor(price + 0.5)) # Return as a float. return price def calc_retail_price(event_price, discount_percent, sales_tax_percent): """Calculate the retail price from the event price.""" # Remove sales tax. price = float(event_price) / (1.0 + float(sales_tax_percent) / 100.0) # Unapply discount. price = price / (1.0 - float(discount_percent) / 100.0) # Round to the nearest dime and subtract a penny. price = max(0.01, math.floor(price * 10.0 + 0.5) / 10.0 - 0.01) # Return as a float. return price def calc_wholesale_price(retail_price, wholesale_fraction): """Calculate the wholesale price from the retail price.""" # Round the price to the nearest dollar. retail_price = float(retail_price) if retail_price > 1.0: price = math.floor(retail_price + 0.5) else: price = retail_price # Apply the wholesale fraction. return price * wholesale_fraction class AppGUI(object): """Application graphical user interface.""" # Width of price text entry boxes. PRICE_WIDTH = 10 # pylint: disable=no-self-use def __init__(self, args, root): self.args = args self.root = root self.frame = Tkinter.Frame(root) self.frame.pack() # Retail price group. retail_group = Tkinter.LabelFrame( self.frame, text="Retail (website) price", padx=5, pady=5 ) retail_group.pack(padx=10, pady=10) self.retail_entry = Tkinter.Entry( retail_group, width=AppGUI.PRICE_WIDTH ) self.retail_entry.bind('<Return>', self.calc_from_retail_price) self.retail_entry.pack(side=Tkinter.LEFT) calc_from_retail_button = Tkinter.Button( retail_group, text="Calc event and wholesale", command=self.calc_from_retail_price ) calc_from_retail_button.bind('<Return>', self.calc_from_retail_price) calc_from_retail_button.pack(side=Tkinter.LEFT, padx=(10, 0)) # Event price group. event_group = Tkinter.LabelFrame( self.frame, text="Event (discounted) price", padx=5, pady=5 ) event_group.pack(padx=10, pady=10) self.event_entry = Tkinter.Entry( event_group, width=AppGUI.PRICE_WIDTH ) self.event_entry.bind('<Return>', self.calc_from_event_price) self.event_entry.pack(side=Tkinter.LEFT) calc_from_event_button = Tkinter.Button( event_group, text="Calc retail and wholesale", command=self.calc_from_event_price ) calc_from_event_button.bind('<Return>', self.calc_from_event_price) calc_from_event_button.pack(side=Tkinter.LEFT, padx=(10, 0)) # Wholesale price group. wholesale_group = Tkinter.LabelFrame( self.frame, text="Wholesale price", padx=5, pady=5 ) wholesale_group.pack(fill=Tkinter.X, padx=10, pady=10) self.wholesale_entry = Tkinter.Entry( wholesale_group, width=AppGUI.PRICE_WIDTH ) self.wholesale_entry.bind('<Return>', self.calc_from_wholesale_price) self.wholesale_entry.pack(side=Tkinter.LEFT) calc_from_wholesale_button = Tkinter.Button( wholesale_group, text="Calc retail and event", command=self.calc_from_wholesale_price ) calc_from_wholesale_button.bind( '<Return>', self.calc_from_wholesale_price ) calc_from_wholesale_button.pack(side=Tkinter.LEFT, padx=(10, 0)) # Quit button self.quit_button = Tkinter.Button( self.frame, text="Quit", command=self.frame.quit ) self.quit_button.pack(side=Tkinter.LEFT, padx=5, pady=5) # Give focus to the retail_entry. self.retail_entry.focus_set() def get_price(self, entry, entry_name): """Get float value from text entry box.""" price_str = entry.get() if price_str is None: self.error("No {} price specified".format(entry_name)) return None try: price_float = float(price_str) except ValueError: self.error( "{} price of '{}' is not a valid number".format( entry_name, price_str ) ) return None return price_float def set_price(self, entry, price): """Set price in text entry box.""" entry.delete(0, Tkinter.END) entry.insert(0, price) # Copy to clipboard. # Does not work now that there are 2 derived prices. # self.root.clipboard_clear() # self.root.clipboard_append(price) def calc_from_retail_price(self, _=None): """ Calculate prices derived from retail price action. Second arg is event when called via <Return>. """ retail_price = self.get_price(self.retail_entry, "Retail") if retail_price is None: return event_price = calc_event_price( retail_price, self.args.discount_percent, self.args.avg_tax_percent ) self.set_price(self.event_entry, "{:,.2f}".format(event_price)) wholesale_price = calc_wholesale_price( retail_price, self.args.wholesale_fraction ) self.set_price(self.wholesale_entry, "{:,.2f}".format(wholesale_price)) def calc_from_event_price(self, _=None): """ Calculate prices derived from event price action. Second arg is event when called via <Return>. """ event_price = self.get_price(self.event_entry, "Event") if event_price is None: return retail_price = calc_retail_price( event_price, self.args.discount_percent, self.args.avg_tax_percent ) self.set_price(self.retail_entry, "{:,.2f}".format(retail_price)) wholesale_price = calc_wholesale_price( retail_price, self.args.wholesale_fraction ) self.set_price(self.wholesale_entry, "{:,.2f}".format(wholesale_price)) def calc_from_wholesale_price(self, _=None): """ Calculate prices derived from wholesale price action. Second arg is event when called via <Return>. """ wholesale_price = self.get_price(self.wholesale_entry, "Wholesale") if wholesale_price is None: return retail_price = wholesale_price / self.args.wholesale_fraction self.set_price(self.retail_entry, "{:,.2f}".format(retail_price)) event_price = calc_event_price( retail_price, self.args.discount_percent, self.args.avg_tax_percent ) self.set_price(self.event_entry, "{:,.2f}".format(event_price)) def error(self, msg): """Display an error message.""" tkMessageBox.showerror("Error", msg) def main(): """main""" arg_parser = argparse.ArgumentParser( description="Calculate pre-tax price based upon tax-included price " "or vice-versa." ) arg_parser.add_argument( "--discount", type=float, dest="discount_percent", metavar="PCT", default=30, help="discount in percent (default=%(default).0f)" ) arg_parser.add_argument( "--avg-tax", type=float, dest="avg_tax_percent", metavar="PCT", default=8.3, help="average sales tax rate in percent (default=%(default).2f)" ) arg_parser.add_argument( "--whsle-frac", type=float, dest="wholesale_fraction", metavar="FRAC", default=0.5, help="wholesale price fraction (default=%(default).2f)" ) subparsers = arg_parser.add_subparsers(help='operations') subparsers.add_parser("gui", help="display graphical interface") event_parser = subparsers.add_parser( "event", help="calculate event price" ) event_parser.add_argument( "retail_price", type=float, help="price" ) retail_parser = subparsers.add_parser( "retail", help="calculate retail price" ) retail_parser.add_argument( "event_price", type=float, help="price" ) # Parse command line arguments. args = arg_parser.parse_args() # Convert and print price. if "retail_price" in args: price = calc_event_price( args.retail_price, args.discount_percent, args.avg_tax_percent ) print("{:,.0f}".format(price)) elif "event_price" in args: price = calc_retail_price( args.event_price, args.discount_percent, args.avg_tax_percent ) print("{:,.2f}".format(price)) else: root = Tkinter.Tk() root.title("Calc Product Price") AppGUI(args, root) root.mainloop() return 0 if __name__ == "__main__": main()
StarcoderdataPython
4802137
print(*map(lambda x: x.count(x[0]), [input()]))
StarcoderdataPython
6478164
<filename>apps/backend/subscription/constants.py # -*- coding: utf-8 -*- """ TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # 动作轮询间隔 from __future__ import absolute_import, unicode_literals from celery.schedules import crontab ACTION_POLLING_INTERVAL = 1 # 动作轮询超时时间 ACTION_POLLING_TIMEOUT = 60 * 3 * ACTION_POLLING_INTERVAL # 自动下发触发周期 SUBSCRIPTION_UPDATE_INTERVAL = crontab(hour="*", minute="*/15", day_of_week="*", day_of_month="*", month_of_year="*") # 订阅任务清理周期 INSTANCE_CLEAR_INTERVAL = crontab(minute="*/5", hour="*", day_of_week="*", day_of_month="*", month_of_year="*") # 任务超时时间。距离 create_time 多久后会被判定为超时,防止 pipeline 后台僵死的情况 TASK_TIMEOUT = 60 * 15 # 最大重试次数 MAX_RETRY_TIME = 3 # 自动下发 - 订阅配置单个切片所包含的最大订阅个数 (根据经验,一个订阅需要消耗1~2s) SUBSCRIPTION_UPDATE_SLICE_SIZE = 20 # 单个任务主机数量 TASK_HOST_LIMIT = 500 # 订阅范围实例缓存时间 SUBSCRIPTION_SCOPE_CACHE_TIME = 60 * 60
StarcoderdataPython
5094602
#!/usr/bin/env python # Copyright 2016 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. import contextlib import json import logging import os import socket import sys import tempfile import time import unittest ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath( __file__.decode(sys.getfilesystemencoding())))) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party')) from depot_tools import auto_stub from depot_tools import fix_encoding from third_party import requests from utils import authenticators from utils import auth_server from utils import net from utils import oauth from libs import luci_context import net_utils def global_test_setup(): # Terminate HTTP server in tests 50x faster. Impacts performance though. auth_server._HTTPServer.poll_interval = 0.01 def call_rpc(scopes): ctx = luci_context.read('local_auth') r = requests.post( url='http://127.0.0.1:%d/rpc/LuciLocalAuthService.GetOAuthToken' % ctx['rpc_port'], data=json.dumps({ 'scopes': scopes, 'secret': ctx['secret'], }), headers={'Content-Type': 'application/json'}) return r.json() @contextlib.contextmanager def local_auth_server(token_cb, **overrides): class MockedProvider(object): def generate_token(self, scopes): return token_cb(scopes) s = auth_server.LocalAuthServer() try: local_auth = s.start(MockedProvider()) local_auth.update(overrides) with luci_context.write(local_auth=local_auth): yield finally: s.stop() class LocalAuthServerTest(auto_stub.TestCase): epoch = 12345678 def setUp(self): super(LocalAuthServerTest, self).setUp() self.mock_time(0) def mock_time(self, delta): self.mock(time, 'time', lambda: self.epoch + delta) def test_works(self): calls = [] def token_gen(scopes): calls.append(scopes) return auth_server.AccessToken('tok', time.time() + 300) with local_auth_server(token_gen): # Grab initial token. resp = call_rpc(['B', 'B', 'A', 'C']) self.assertEqual( {u'access_token': u'tok', u'expiry': self.epoch + 300}, resp) self.assertEqual([('A', 'B', 'C')], calls) del calls[:] # Reuses cached token until it is close to expiration. self.mock_time(200) resp = call_rpc(['B', 'A', 'C']) self.assertEqual( {u'access_token': u'tok', u'expiry': self.epoch + 300}, resp) self.assertFalse(calls) # Expired. Generated new one. self.mock_time(300) resp = call_rpc(['A', 'B', 'C']) self.assertEqual( {u'access_token': u'tok', u'expiry': self.epoch + 600}, resp) self.assertEqual([('A', 'B', 'C')], calls) def test_handles_token_errors(self): fatal = False code = 123 def token_gen(_scopes): raise auth_server.TokenError(code, 'error message', fatal=fatal) with local_auth_server(token_gen): self.assertEqual( {u'error_code': 123, u'error_message': u'error message'}, call_rpc(['B', 'B', 'A', 'C'])) # Non-fatal errors aren't cached. code = 456 self.assertEqual( {u'error_code': 456, u'error_message': u'error message'}, call_rpc(['B', 'B', 'A', 'C'])) # Fatal errors are cached. fatal = True code = 789 self.assertEqual( {u'error_code': 789, u'error_message': u'error message'}, call_rpc(['B', 'B', 'A', 'C'])) # Same cached error. code = 111 self.assertEqual( {u'error_code': 789, u'error_message': u'error message'}, call_rpc(['B', 'B', 'A', 'C'])) def test_http_level_errors(self): def token_gen(_scopes): self.fail('must not be called') with local_auth_server(token_gen): # Wrong URL. ctx = luci_context.read('local_auth') r = requests.post( url='http://127.0.0.1:%d/blah/LuciLocalAuthService.GetOAuthToken' % ctx['rpc_port'], data=json.dumps({ 'scopes': ['A', 'B', 'C'], 'secret': ctx['secret'], }), headers={'Content-Type': 'application/json'}) self.assertEqual(404, r.status_code) # Wrong HTTP method. r = requests.get( url='http://127.0.0.1:%d/rpc/LuciLocalAuthService.GetOAuthToken' % ctx['rpc_port'], data=json.dumps({ 'scopes': ['A', 'B', 'C'], 'secret': ctx['secret'], }), headers={'Content-Type': 'application/json'}) self.assertEqual(501, r.status_code) # Wrong content type. r = requests.post( url='http://127.0.0.1:%d/rpc/LuciLocalAuthService.GetOAuthToken' % ctx['rpc_port'], data=json.dumps({ 'scopes': ['A', 'B', 'C'], 'secret': ctx['secret'], }), headers={'Content-Type': 'application/xml'}) self.assertEqual(400, r.status_code) # Bad JSON. r = requests.post( url='http://127.0.0.1:%d/rpc/LuciLocalAuthService.GetOAuthToken' % ctx['rpc_port'], data='not a json', headers={'Content-Type': 'application/json'}) self.assertEqual(400, r.status_code) def test_validation(self): def token_gen(_scopes): self.fail('must not be called') with local_auth_server(token_gen): def must_fail(err, body, code=400): ctx = luci_context.read('local_auth') r = requests.post( url='http://127.0.0.1:%d/rpc/LuciLocalAuthService.GetOAuthToken' % ctx['rpc_port'], data=json.dumps(body), headers={'Content-Type': 'application/json'}) self.assertEqual(code, r.status_code) self.assertIn(err, r.text) must_fail('"scopes" is required', {}) must_fail('"scopes" is required', {'scopes': []}) must_fail('"scopes" must be a list of strings', {'scopes': 'abc'}) must_fail('"scopes" must be a list of strings', {'scopes': [1]}) must_fail('"secret" is required', {'scopes': ['a']}) must_fail('"secret" must be a string', {'scopes': ['a'], 'secret': 123}) must_fail( 'Invalid "secret"', {'scopes': ['a'], 'secret': 'abc'}, code=403) class LocalAuthHttpServiceTest(auto_stub.TestCase): """Tests for LocalAuthServer and LuciContextAuthenticator.""" epoch = 12345678 def setUp(self): super(LocalAuthHttpServiceTest, self).setUp() self.mock_time(0) def mock_time(self, delta): self.mock(time, 'time', lambda: self.epoch + delta) @staticmethod def mocked_http_service( url='http://example.com', perform_request=None): class MockedRequestEngine(object): def perform_request(self, request): return perform_request(request) if perform_request else None @classmethod def timeout_exception_classes(cls): return () @classmethod def parse_request_exception(cls, exc): del exc # Unused argument return None, None return net.HttpService( url, authenticator=authenticators.LuciContextAuthenticator(), engine=MockedRequestEngine()) def test_works(self): service_url = 'http://example.com' request_url = '/some_request' response = 'True' token = '<PASSWORD>' def token_gen(scopes): self.assertEqual(1, len(scopes)) self.assertEqual(oauth.OAUTH_SCOPES, scopes[0]) return auth_server.AccessToken(token, time.time() + 300) def handle_request(request): self.assertTrue( request.get_full_url().startswith(service_url + request_url)) self.assertEqual('', request.body) self.assertEqual(u'Bearer %s' % token, request.headers['Authorization']) return net_utils.make_fake_response(response, request.get_full_url()) with local_auth_server(token_gen): service = self.mocked_http_service(perform_request=handle_request) self.assertEqual(service.request(request_url, data={}).read(), response) def test_bad_secret(self): service_url = 'http://example.com' request_url = '/some_request' response = 'False' def token_gen(scopes): del scopes # Unused argument self.fail('must not be called') def handle_request(request): self.assertTrue( request.get_full_url().startswith(service_url + request_url)) self.assertEqual('', request.body) self.assertIsNone(request.headers.get('Authorization')) return net_utils.make_fake_response(response, request.get_full_url()) with local_auth_server(token_gen, secret='invalid'): service = self.mocked_http_service(perform_request=handle_request) self.assertEqual(service.request(request_url, data={}).read(), response) def test_bad_port(self): request_url = '/some_request' def token_gen(scopes): del scopes # Unused argument self.fail('must not be called') def handle_request(request): del request # Unused argument self.fail('must not be called') # this little dance should pick an unused port, bind it and then close it, # trusting that the OS will not reallocate it between now and when the http # client attempts to use it as a local_auth service. This is better than # picking a static port number, as there's at least some guarantee that the # port WASN'T in use before this test ran. sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind(('localhost', 0)) port = sock.getsockname()[1] sock.close() with local_auth_server(token_gen, rpc_port=port): service = self.mocked_http_service(perform_request=handle_request) with self.assertRaises(socket.error): self.assertRaises(service.request(request_url, data={}).read()) def test_expired_token(self): service_url = 'http://example.com' request_url = '/some_request' response = 'False' token = '<PASSWORD>' def token_gen(scopes): self.assertEqual(1, len(scopes)) self.assertEqual(oauth.OAUTH_SCOPES, scopes[0]) return auth_server.AccessToken(token, time.time()) def handle_request(request): self.assertTrue( request.get_full_url().startswith(service_url + request_url)) self.assertEqual('', request.body) self.assertIsNone(request.headers.get('Authorization')) return net_utils.make_fake_response(response, request.get_full_url()) with local_auth_server(token_gen): service = self.mocked_http_service(perform_request=handle_request) self.assertEqual(service.request(request_url, data={}).read(), response) if __name__ == '__main__': fix_encoding.fix_encoding() logging.basicConfig( level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL) global_test_setup() unittest.main()
StarcoderdataPython
11312336
<gh_stars>0 import numpy as np from queue import deque class Memory(): """Sets up the memory element""" def __init__(self, max_size): """Initializes the memory element""" self.buffer = deque(maxlen = max_size) def add(self, experience): """Adds player experience to the memory element""" self.buffer.append(experience) def sample(self, batch_size): """Collects player experience from the memory element""" buffer_size = len(self.buffer) index = np.random.choice(np.arange(buffer_size), size = batch_size, replace = False) return [self.buffer[i] for i in index]
StarcoderdataPython
6644463
import numpy as np class ExperienceReplay: def __init__(self, size): self.size = size class PrioritisedExperienceReplay: def __init__(self, size): self.size = size
StarcoderdataPython
8042298
load("@local_config_env//:env.bzl", "FELICIA_ROOT") load("@local_config_python//:py.bzl", "PYTHON_BIN") LastChangeInfo = provider("lastchange") def _lastchange_impl(ctx): outputs = [ctx.actions.declare_file("LASTCHANGE"), ctx.actions.declare_file("LASTCHANGE.committime")] tool_path = ctx.expand_location("$(location //third_party/chromium/build/util:lastchange.py)", [ctx.attr._tool]) # Should run on FELICIA_ROOT, otherwise can't run git command. ctx.actions.run_shell( tools = ctx.files._tool, outputs = outputs, progress_message = "Generating LASTCHANGE", use_default_shell_env = True, command = "%s %s --source-dir %s --output LASTCHANGE && mv LASTCHANGE LASTCHANGE.committime %s" % ( PYTHON_BIN, tool_path, FELICIA_ROOT, outputs[0].dirname, ), ) return [ DefaultInfo(files = depset(outputs)), LastChangeInfo(lastchange = depset(outputs)), ] lastchange = rule( implementation = _lastchange_impl, attrs = { "_tool": attr.label( allow_single_file = True, default = Label("//third_party/chromium/build/util:lastchange.py"), ), }, )
StarcoderdataPython
3212855
<gh_stars>0 """ This file provides a wrapper around resnet and vote_fc and is useful for inference since it fuses both forward passes in one. """ import torch import torch.nn as nn from models import get_pose_net, SMPL # from models.resnet import resnet50 # from models.sc_layers_share_global6d import SCFC_Share from models import HMR_HR # from models.geometric_layers import orthographic_projection, rodrigues, quat2mat import numpy as np class HSModel(nn.Module): def __init__(self, cfg, is_train, smpl_mean_params, pretrained_checkpoint=None): super(HSModel, self).__init__() self.hrnet = get_pose_net(cfg, is_train) # hidden_neuron_list = [4096,4096] self.hmr_hr = HMR_HR(cfg, smpl_mean_params) # self.smpl = SMPL() if pretrained_checkpoint is not None: checkpoint = torch.load(pretrained_checkpoint) try: self.hrnet.load_state_dict(checkpoint['hrnet']) except KeyError: print('Warning: hrnet was not found in checkpoint') try: self.hmr_hr.load_state_dict(checkpoint['hmr_hr']) except KeyError: print('Warning: hmr_hr was not found in checkpoint') def forward(self, image): """Fused forward pass for the 2 networks Inputs: image: size = (B, 3, 224, 224) Returns: Regressed SMPL shape: size = (B, 6890, 3) Weak-perspective camera: size = (B, 3) SMPL pose parameters (as rotation matrices): size = (B, 24, 3, 3) SMPL shape parameters: size = (B, 10) """ batch_size = image.shape[0] with torch.no_grad(): outputs = self.hrnet(image) pred_rotmat, pred_shape, pred_cam = self.hmr_hr(outputs) # pred_camera = pred_camera_with_global_rot[:,:3] #(B,3) # pred_global_rot = pred_camera_with_global_rot[:,3:][:,None,:] #(B,1,4) # pose_cube = pred_theta.view(-1, 4) # (batch_size * 24, 4) # R = quat2mat(pose_cube).view(batch_size, 23, 3, 3) # pred_rotmat = R.view(batch_size, 23, 3, 3) # pred_global_rot = pred_global_rot.view(batch_size, 1, 3, 3) # pred_rotmat = torch.cat((pred_global_rot,pred_rotmat),dim=1) #(B,24,3,3) # pred_vertices = self.smpl(pred_rotmat, pred_beta) return outputs, pred_rotmat, pred_shape, pred_cam
StarcoderdataPython
12863384
""" This script creates users in a JAMF Pro Server instance from an LDAP query. """ # Copyright 2020 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import sys from collections import namedtuple from multiprocessing.pool import ThreadPool from typing import List from json.decoder import JSONDecodeError import ldap import requests from ldap.controls import SimplePagedResultsControl from conf import ( JAMF_PASSWORD, JAMF_URL, JAMF_USERNAME, LDAP_BIND_PASSWORD, LDAP_BIND_URI, LDAP_BIND_USERNAME, LDAP_FILTER, LDAP_INSECURE, LDAP_SEARCH_DN_LIST, ) JAMF_AUTH = requests.auth.HTTPBasicAuth(JAMF_USERNAME, JAMF_PASSWORD) SESSION = requests.Session() User = namedtuple("User", ["sAMAccountName", "email", "last_name", "first_name"]) def eprint(*args, **kwargs): """Like print, but outputs to stderr.""" print(*args, file=sys.stderr, **kwargs) def results_for_dn(directory: ldap.ldapobject, base_dn: str, filter: str) -> List[User]: """Returns a list of User objects found in the directory object for filter :param directory: A ldap.LDAPObject that has already been bound to a directory. :param base_dn: The base of the directory tree to run the search filter against. :param filter: The LDAP search filter to run on base_dn using directory. """ req_ctrl = SimplePagedResultsControl(True, size=5000, cookie="") known_ldap_resp_ctrls = { SimplePagedResultsControl.controlType: SimplePagedResultsControl, } # Send search request msgid = directory.search_ext( base_dn, ldap.SCOPE_SUBTREE, filterstr=LDAP_FILTER, serverctrls=[req_ctrl] ) results = [] while True: __, result_data, __, serverctrls = directory.result3( msgid, resp_ctrl_classes=known_ldap_resp_ctrls ) results.extend( [ User( ldap_entry["sAMAccountName"][0].decode(), ldap_entry["mail"][0].decode(), ldap_entry["sn"][0].decode(), ldap_entry["givenName"][0].decode(), ) for __, ldap_entry in result_data ] ) page_controls = [ control for control in serverctrls if control.controlType == SimplePagedResultsControl.controlType ] if page_controls: if page_controls[0].cookie: # Copy cookie from response control to request control req_ctrl.cookie = page_controls[0].cookie msgid = directory.search_ext( base_dn, ldap.SCOPE_SUBTREE, filterstr=LDAP_FILTER, serverctrls=[req_ctrl], ) else: break else: eprint("Warning: Server ignores RFC 2696 control.") break return results def create_user_in_jamf(user: User): """ Creates a user in the JPS :param user: A User object which will be used to create the JPS user. This function uses the following module variables: * SESSION must be a requests.Session instance * JAMF_AUTH must be a requests.auth interface instance * JAMF_URL must be the full base URL of a JAMF instance. """ eprint("Attempting to create", user.sAMAccountName) xml = """ <user> <name>{name}</name> <full_name>{last_name}, {first_name}</full_name> <email>{email}</email> </user> """.format( name=user.sAMAccountName, last_name=user.last_name, first_name=user.first_name, email=user.email, ).encode() r = SESSION.post( JAMF_URL + "/JSSResource/users/id/-1", data=xml, headers={"Content-Type": "application/xml", "Accept": "application/xml"}, auth=JAMF_AUTH, ) try: r.raise_for_status() except requests.exceptions.RequestException as e: eprint("Failed to create user with username", user.sAMAccountName) eprint(e) eprint(r.text) else: print(user.sAMAccountName) def main(): eprint("Binding to LDAP...") if LDAP_INSECURE: ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) directory = ldap.initialize(LDAP_BIND_URI) directory.protocol_version = 3 directory.simple_bind_s(who=LDAP_BIND_USERNAME, cred=LDAP_BIND_PASSWORD) eprint("Searching directory for users...") ldap_users = [] for base_dn in LDAP_SEARCH_DN_LIST: eprint("Searching DN", base_dn, "with filter", LDAP_FILTER) ldap_users.extend(results_for_dn(directory, base_dn, LDAP_FILTER)) directory.unbind_s() directory = None eprint("Total LDAP users:", len(ldap_users)) eprint("Asking JPS for its user list...") jamf_user_request = requests.get( JAMF_URL + "/JSSResource/users", auth=JAMF_AUTH, headers={"Accept": "application/json"}, ) try: jamf_user_json = jamf_user_request.json() except JSONDecodeError: eprint(jamf_user_request.text) eprint("Failed to decode /JSSResource/users response as JSON.") sys.exit(1) jamf_usernames = frozenset([user["name"] for user in jamf_user_json["users"]]) eprint("Total JAMF users:", len(jamf_usernames)) missing_users = [ user for user in ldap_users if user.sAMAccountName not in jamf_usernames ] eprint("Users to create:", len(missing_users)) with ThreadPool(10) as pool: results = pool.map(create_user_in_jamf, missing_users) eprint("Done. Created users:", len(results)) if __name__ == "__main__": main()
StarcoderdataPython
9770450
import collections from copy import deepcopy import meshio import numpy from ._common import ( get_local_index, get_meshio_version, get_new_meshio_cells, get_old_meshio_cells, meshio_data, ) from ._properties import ( _connections, _face_areas, _face_normals, _faces, _materials, _volumes, ) __all__ = [ "Cells", "Mesh", "from_meshio", ] Cells = collections.namedtuple("Cells", ["type", "data"]) class Mesh(object): """toughio mesh. This class is updated following the latest :module:`meshio` version and brings backward compatibility with its previous versions. Parameters ---------- points : array_like (n_points, 3) Cooordinates of points. cells : list of tuples (cell_type, data) Connectivity of cells. point_data : dict or None, optional, default None Point data arrays. cell_data : dict or None, optional, default None Cell data arrays. field_data : dict or None, optional, default None Field data names. point_sets : dict or None, optional, default None Sets of points. cell_sets : dict or None, optional, default None Sets of cells. """ def __init__( self, points, cells, point_data=None, cell_data=None, field_data=None, point_sets=None, cell_sets=None, ): self.points = points self.cells = cells self.point_data = point_data if point_data else {} self.cell_data = cell_data if cell_data else {} self.field_data = field_data if field_data else {} self.point_sets = point_sets if point_sets else {} self.cell_sets = cell_sets if cell_sets else {} def __repr__(self): lines = [ "<toughio mesh object>", " Number of points: {}".format(len(self.points)), ] if len(self.cells) > 0: lines.append(" Number of cells:") for tpe, elems in self.cells: lines.append(" {}: {}".format(tpe, len(elems))) else: lines.append(" No cells.") if self.point_sets: lines.append(" Point sets: {}".format(", ".join(self.point_sets.keys()))) if self.point_data: lines.append(" Point data: {}".format(", ".join(self.point_data.keys()))) if self.cell_data: lines.append(" Cell data: {}".format(", ".join(self.cell_data.keys()))) return "\n".join(lines) def extrude_to_3d(self, height=1.0, axis=2, inplace=True): """Convert a 2D mesh to 3D by extruding cells along given axis. Parameters ---------- height : scalar or array_like, optional, default 1.0 Height of extrusion. axis : int (0, 1 or 2), optional, default 2 Axis along which extrusion is performed. inplace : bool, optional, default True If `False`, return a new :class:`toughio.Mesh`. Returns ------- toughio.Mesh Extruded mesh (only if ``inplace == False``). """ if axis not in [0, 1, 2]: raise ValueError("axis must be 0, 1 or 2.") mesh = self if inplace else deepcopy(self) height = [height] if isinstance(height, (int, float)) else height npts, nh = len(mesh.points), len(height) if mesh.points.shape[1] == 3: if len(set(mesh.points[:, axis])) != 1: raise ValueError("Cannot extrude mesh along axis {}.".format(axis)) else: mesh.points = numpy.column_stack((mesh.points, numpy.zeros(npts))) if axis != 2: mesh.points[:, [axis, 2]] = mesh.points[:, [2, axis]] extra_points = numpy.array(mesh.points) for h in height: extra_points[:, axis] += h mesh.points = numpy.vstack((mesh.points, extra_points)) extruded_types = { "triangle": "wedge", "quad": "hexahedron", } cells = [] for ic, c in enumerate(mesh.cells): if c.type in extruded_types.keys(): extruded_type = extruded_types[c.type] nr, nc = c.data.shape cell = Cells(extruded_type, numpy.tile(c.data, (nh, 2))) for i in range(nh): ibeg, iend = i * nr, (i + 1) * nr cell.data[ibeg:iend, :nc] += i * npts cell.data[ibeg:iend, nc:] += (i + 1) * npts cells.append(cell) if mesh.cell_data: for k, v in mesh.cell_data.items(): v[ic] = numpy.tile(v[ic], nh) mesh.cells = cells if mesh.field_data: for k in mesh.field_data.keys(): mesh.field_data[k][1] = 3 if not inplace: return mesh def prune_duplicates(self, inplace=True): """Delete duplicate points and cells. Parameters ---------- inplace : bool, optional, default True If `False`, return a new :class:`toughio.Mesh`. Returns ------- toughio.Mesh Pruned mesh (only if ``inplace == False``). Note ---- Does not preserve points order from original array in mesh. """ mesh = self if inplace else deepcopy(self) cells = [[c.type, c.data] for c in mesh.cells] # Prune duplicate points unique_points, pind, pinv = numpy.unique( mesh.points, axis=0, return_index=True, return_inverse=True, ) if len(unique_points) < len(mesh.points): mesh.points = unique_points for k, v in mesh.point_data.items(): mesh.point_data[k] = v[pind] for ic, (k, v) in enumerate(cells): cells[ic][1] = pinv[v] # Prune duplicate cells for ic, (k, v) in enumerate(cells): vsort = numpy.sort(v, axis=1) _, order = numpy.unique(vsort, axis=0, return_index=True) cells[ic][1] = v[order] if mesh.cell_data: for kk, vv in mesh.cell_data.items(): mesh.cell_data[kk][ic] = vv[ic][order] mesh.cells = cells if not inplace: return mesh def split(self, arr): """Split input array into subarrays for each cell block in mesh. Parameters ---------- arr : array_like Input array. Returns ------- list of array_like List of subarrays. """ if len(arr) != self.n_cells: raise ValueError() sizes = numpy.cumsum([len(c.data) for c in self.cells]) return numpy.split(numpy.asarray(arr), sizes[:-1]) def to_meshio(self): """Convert mesh to :class:`meshio.Mesh`. Returns ------- meshio.Mesh Output mesh. """ keys = ["points", "point_data", "field_data"] kwargs = {key: getattr(self, key) for key in keys} version = get_meshio_version() if version[0] >= 4: kwargs.update( { "cells": self.cells, "cell_data": self.cell_data, "point_sets": self.point_sets, "cell_sets": self.cell_sets, } ) else: cells, cell_data = get_old_meshio_cells(self.cells, self.cell_data) kwargs.update( {"cells": cells, "cell_data": cell_data, "node_sets": self.point_sets,} ) return meshio.Mesh(**kwargs) def to_pyvista(self): """Convert mesh to :class:`pyvista.UnstructuredGrid`. Returns ------- pyvista.UnstructuredGrid Output mesh. """ try: import pyvista from ._common import ( meshio_to_vtk_type, vtk_type_to_numnodes, ) except ImportError: raise ImportError( "Converting to pyvista.UnstructuredGrid requires pyvista to be installed." ) # Extract cells from toughio.Mesh object offset = [] cells = [] cell_type = [] next_offset = 0 for c in self.cells: vtk_type = meshio_to_vtk_type[c.type] numnodes = vtk_type_to_numnodes[vtk_type] offset += [next_offset + i * (numnodes + 1) for i in range(len(c.data))] cells.append( numpy.hstack((numpy.full((len(c.data), 1), numnodes), c.data)).ravel() ) cell_type += [vtk_type] * len(c.data) next_offset = offset[-1] + numnodes + 1 # Extract cell data from toughio.Mesh object cell_data = {k: numpy.concatenate(v) for k, v in self.cell_data.items()} # Create pyvista.UnstructuredGrid object points = self.points if points.shape[1] == 2: points = numpy.hstack((points, numpy.zeros((len(points), 1)))) mesh = pyvista.UnstructuredGrid( numpy.array(offset), numpy.concatenate(cells), numpy.array(cell_type), numpy.array(points, numpy.float64), ) # Set point data mesh.point_arrays.update( {k: numpy.array(v, numpy.float64) for k, v in self.point_data.items()} ) # Set cell data mesh.cell_arrays.update(cell_data) return mesh def to_tough(self, filename="MESH", **kwargs): """Write TOUGH `MESH` file. Parameters ---------- filename : str, optional, default 'MESH' Output file name. """ self.write(filename, file_format="tough", **kwargs) def read_output(self, file_or_output, time_step=-1): """Import TOUGH results to the mesh. Parameters ---------- file_or_output : str, namedtuple or list of namedtuple Input file name or output data. time_step : int, optional, default -1 Data for given time step to import. Default is last time step. """ from .. import read_output from .._io._helpers import Output, Save, _reorder_labels if not isinstance(file_or_output, (str, list, tuple, Output, Save)): raise TypeError() if not isinstance(time_step, int): raise TypeError() if isinstance(file_or_output, str): out = read_output(file_or_output) else: out = file_or_output if not isinstance(out, (Output, Save)): if not (-len(out) <= time_step < len(out)): raise ValueError() out = out[time_step] if len(out.labels) != self.n_cells: raise ValueError() out = _reorder_labels(out, self.labels) self.cell_data.update(out.data) def write(self, filename, file_format=None, **kwargs): """Write mesh to file. Parameters ---------- filename : str Output file name. file_format : str or None, optional, default None Output file format. If `None`, it will be guessed from file's extension. To write TOUGH MESH, `file_format` must be specified as 'tough' (no specific extension exists for TOUGH MESH). """ from ._helpers import write write(filename, self, file_format, **kwargs) def plot(self, *args, **kwargs): """Display mesh using :method:`pyvista.UnstructuredGrid.plot``.""" mesh = self.to_pyvista() mesh.plot(*args, **kwargs) def add_point_data(self, label, data): """Add a new point data array. Parameters ---------- label : str Point data array name. data : array_like Point data array. """ if not isinstance(label, str): raise TypeError() if not isinstance(data, (list, tuple, numpy.ndarray)): raise TypeError() if len(data) != self.n_points: raise ValueError() self.point_data[label] = numpy.asarray(data) def add_cell_data(self, label, data): """Add a new cell data array. Parameters ---------- label : str Cell data array name. data : array_like Cell data array. """ if not isinstance(label, str): raise TypeError() if not isinstance(data, (list, tuple, numpy.ndarray)): raise TypeError() if len(data) != self.n_cells: raise ValueError() self.cell_data[label] = self.split(data) def set_material(self, material, xlim=None, ylim=None, zlim=None): """Set material to cells in box. Set material for cells within box selection defined by `xlim`, `ylim` and `zlim`. Parameters ---------- material : str Material name. xlim : array_like or None, optional, default None Minimum and maximum values in X direction. ylim : array_like or None, optional, default None Minimum and maximum values in Y direction. zlim : array_like or None, optional, default None Minimum and maximum values in Z direction. Raises ------ AssertionError If any input argument is not valid. """ def isinbounds(x, bounds): return ( numpy.logical_and(x >= min(bounds), x <= max(bounds)) if bounds is not None else numpy.ones(len(x), dtype=bool) ) if not isinstance(material, str): raise TypeError() if not (xlim is not None or ylim is not None or zlim is not None): raise TypeError() if not ( xlim is None or (isinstance(xlim, (list, tuple, numpy.ndarray)) and len(xlim) == 2) ): raise ValueError() if not ( ylim is None or (isinstance(ylim, (list, tuple, numpy.ndarray)) and len(ylim) == 2) ): raise ValueError() if not ( zlim is None or (isinstance(zlim, (list, tuple, numpy.ndarray)) and len(zlim) == 2) ): raise ValueError() x, y, z = numpy.concatenate(self.centers).T mask_x = isinbounds(x, xlim) mask_y = isinbounds(y, ylim) mask_z = isinbounds(z, zlim) mask = numpy.logical_and(numpy.logical_and(mask_x, mask_y), mask_z) if mask.any(): data = numpy.concatenate(self.cell_data["material"]) imat = ( self.field_data[material][0] if material in self.field_data.keys() else data.max() + 1 ) data[mask] = imat self.add_cell_data("material", data) self.field_data[material] = numpy.array([imat, 3]) def near(self, point): """Return local index of cell nearest to query point. Parameters ---------- point : array_like Coordinates of point to query. Returns ------- tuple Local index of cell as a tuple (iblock, icell). """ if not isinstance(point, (list, tuple, numpy.ndarray)): raise TypeError() if numpy.ndim(point) != 1: raise ValueError() if len(point) != self.points.shape[1]: raise ValueError() centers = numpy.concatenate(self.centers) idx = numpy.arange(self.n_cells) idx = idx[numpy.argmin(numpy.linalg.norm(centers - point, axis=1))] return get_local_index(self, idx) @property def points(self): """Return coordinates of points.""" return self._points @points.setter def points(self, value): self._points = value @property def cells(self): """Return connectivity of cells.""" if self._cells: return self._cells else: return [Cells(k, v) for k, v in self._cells_dict.items()] @cells.setter def cells(self, value): if isinstance(value, dict): self._cells = [] self._cells_dict = value else: self._cells = [Cells(k, v) for k, v in value] self._cells_dict = {} @property def cells_dict(self): """Return connectivity of cells (``meshio < 4.0.0``).""" if self._cells: return get_old_meshio_cells(self._cells) else: return self._cells_dict @property def point_data(self): """Return point data arrays.""" return self._point_data @point_data.setter def point_data(self, value): self._point_data = value @property def cell_data(self): """Return cell data arrays.""" return self._cell_data @cell_data.setter def cell_data(self, value): self._cell_data = value @property def field_data(self): """Return field data names.""" return self._field_data @field_data.setter def field_data(self, value): self._field_data = value @property def point_sets(self): """Return sets of points.""" return self._point_sets @point_sets.setter def point_sets(self, value): self._point_sets = value @property def cell_sets(self): """Return sets of cells.""" return self._cell_sets @cell_sets.setter def cell_sets(self, value): self._cell_sets = value @property def n_points(self): """Return number of points.""" return len(self.points) @property def n_cells(self): """Return number of cells.""" return sum(len(c.data) for c in self.cells) @property def labels(self): """Return labels of cell in mesh.""" from ._common import labeler return self.split([labeler(i) for i in range(self.n_cells)]) @property def centers(self): """Return node centers of cell in mesh.""" return [self.points[c.data].mean(axis=1) for c in self.cells] @property def materials(self): """Return materials of cell in mesh.""" return _materials(self) @property def faces(self): """Return connectivity of faces of cell in mesh.""" out = _faces(self) arr = numpy.full((self.n_cells, 6, 4), -1) for i, x in enumerate(out): arr[i, : len(x[0]), : x[0].shape[1]] = x[0] if len(x) > 1: arr[i, len(x[0]) : len(x[0]) + len(x[1]), : x[1].shape[1]] = x[1] return self.split(arr) @property def face_normals(self): """Return normal vectors of faces in mesh.""" return [ numpy.array([face for face in faces]) for faces in self.split(_face_normals(self)) ] @property def face_areas(self): """Return areas of faces in mesh.""" return [ numpy.array([face for face in faces]) for faces in self.split(_face_areas(self)) ] @property def volumes(self): """Return volumes of cell in mesh.""" return _volumes(self) @property def connections(self): """Return mesh connections. Assume conformity and that points and cells are uniquely defined in mesh. Note ---- Only for 3D meshes and first order cells. """ return self.split(_connections(self)) def from_meshio(mesh): """Convert a :class:`meshio.Mesh` to :class:`toughio.Mesh`. Parameters ---------- mesh : meshio.Mesh Input mesh. Returns ------- toughio.Mesh Output mesh. """ if mesh.cell_data: version = get_meshio_version() if version[0] >= 4: cells = mesh.cells cell_data = mesh.cell_data else: cells, cell_data = get_new_meshio_cells(mesh.cells, mesh.cell_data) for k in cell_data.keys(): if k in meshio_data: cell_data["material"] = cell_data.pop(k) break else: cell_data = {} out = Mesh( points=mesh.points, cells=cells, point_data=mesh.point_data, cell_data=cell_data, field_data=mesh.field_data, point_sets=( mesh.point_sets if hasattr(mesh, "point_sets") else mesh.node_sets if hasattr(mesh, "node_sets") else None ), cell_sets=mesh.cell_sets if hasattr(mesh, "cell_sets") else None, ) if "material" not in out.cell_data.keys(): imat = ( numpy.max([v[0] for v in mesh.field_data.values() if v[1] == 3]) + 1 if mesh.field_data else 1 ) out.cell_data["material"] = out.split(numpy.full(out.n_cells, imat, dtype=int)) out.field_data["dfalt"] = numpy.array([imat, 3]) return out
StarcoderdataPython
9638171
import logging import re # STOP: Do not make changes to this file! This file contains defaults for the open group server and # is intended to be replaced on upgrade. If you want to override any changes you should instead set # the variable you care about in `config.py`, which overrides values specified here. # The paths we use for storage; if relative these are in the current working directory of the server # process running sogs. DB_PATH = 'sogs.db' DB_SCHEMA_FILE = 'schema.sql' KEY_FILE = 'key_x25519' # Base url for generating links. Can be http, https, and may or may not include a port. Note if # using https that you need to set up proper HTTPS certificates, for example using certbot to obtain # a free Let's Encrypt certificate. URL_BASE = 'http://example.net' # The log level. LOG_LEVEL = logging.WARNING # Default upload expiry time, in days. UPLOAD_DEFAULT_EXPIRY_DAYS = 15 # We truncate filenames if the sanitized name (not including the initial 'ID_') is longer than this. UPLOAD_FILENAME_MAX = 60 # When a filename exceeds UPLOAD_FILENAME_MAX, we keep this many characters from the beginning, # append "...", and then append enough from the end (i.e. max - this - 3) to hit the _MAX value. UPLOAD_FILENAME_KEEP_PREFIX = 40 UPLOAD_FILENAME_KEEP_SUFFIX = 17 # Maximum size of a file upload that we accept, in bytes. Note that onion requests have a hard # limit of 10MB for a fully-wrapped request, and that Session uploads with base64 encoding, # so this is deliberately set conservatively below that limit. UPLOAD_FILE_MAX_SIZE = 6_000_000 # Regex that matches *invalid* characters in a user-supplied filename (if any); any matches of this # regex get replaced with a single _ when writing the file to disk. The default is intended to # strip out enough so that the filename is storable portably on modern OS filesystems. UPLOAD_FILENAME_BAD = re.compile(r"[^\w+\-.'()@\[\]]+") # How long without activity before we drop user-room activity info, in days. ROOM_ACTIVE_PRUNE_THRESHOLD = 60 # The default user activity cutoff that is used to report a room's current "active users" count; the # unit is in days. ROOM_DEFAULT_ACTIVE_THRESHOLD = 7 # How long we keep message edit/deletion history, in days. MESSAGE_HISTORY_PRUNE_THRESHOLD = 30 # dev import testing option, to be removed in the future IMPORT_ADJUST_MS = 0 # file containing "bad" words for filtration. This feature in temporary and will be removed once # more robust bot/spam filtering is available. BAD_WORDS_FILE = 'badwords.txt'
StarcoderdataPython
3380572
<gh_stars>0 import numpy as np import tensorflow as tf import h5py from sklearn.preprocessing import OneHotEncoder import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import time # Download data from .mat file into numpy array print('==> Experiment 5d') # Functions for initializing neural nets parameters def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape, dtype=tf.float64) return tf.Variable(initial) filepath = '/pylon2/ci560sp/mint96/exp5d_d15_1s.mat' print('==> Loading data from {}'.format(filepath)) f = h5py.File(filepath) data_train = np.array(f.get('trainingFeatures')) # y_train = np.array(f.get('trainingLabels')) data_val = np.array(f.get('validationFeatures')) # y_val = np.array(f.get('validationLabels')) X_test = np.array(f.get('window_testFeatures')) y_test = np.array(f.get('window_testLabels')) del f print('==> Data sizes:',data_train.shape, data_val.shape, X_test.shape, y_test.shape) # Transform labels into on-hot encoding form enc = OneHotEncoder() #y_train = enc.fit_transform(y_train.copy()).astype(int).toarray() #y_val = enc.fit_transform(y_val.copy()).astype(int).toarray() y_test = enc.fit_transform(y_test.copy()).astype(int).toarray() sub_window_size_list = [1, 2, 4, 8, 16] ploty_train_all = [] ploty_val_all = [] ploty_test_all = [] hidden_layer_size = 0 for sub_window_size in sub_window_size_list: ''' NN config parameters ''' num_features = 169*sub_window_size num_frames = 16 hidden_layer_size = hidden_layer_size + 800 num_classes = y_test.shape[1] print("Number of features:", num_features) print("Number of frames:",num_frames) print("Hidden layer size:", hidden_layer_size) # Reshape input features X_train = np.reshape(data_train,(-1, num_features)) X_val = np.reshape(data_val,(-1,num_features)) print("Input sizes:", X_train.shape, X_val.shape) y_train = [] y_val = [] # Add Labels for label in range(num_classes): for sampleCount in range(X_train.shape[0]/num_classes): y_train.append([label]) for sampleCount in range(X_val.shape[0]/num_classes): y_val.append([label]) X_train = np.concatenate((X_train, y_train), axis=1) X_val = np.concatenate((X_val, y_val), axis=1) # Shuffle np.random.shuffle(X_train) np.random.shuffle(X_val) # Separate coefficients and labels y_train = X_train[:, -1].reshape(-1, 1) X_train = X_train[:, :-1] y_val = X_val[:, -1].reshape(-1, 1) X_val = X_val[:, :-1] print('==> Data sizes:',X_train.shape, y_train.shape, X_val.shape, y_val.shape) y_train = enc.fit_transform(y_train.copy()).astype(int).toarray() y_val = enc.fit_transform(y_val.copy()).astype(int).toarray() # Done Processing Data plotx = [] ploty_train = [] ploty_val = [] ploty_test = [] # Set-up NN layers x = tf.placeholder(tf.float64, [None, num_features]) W1 = weight_variable([num_features, hidden_layer_size]) b1 = bias_variable([hidden_layer_size]) # Hidden layer activation function: ReLU h1 = tf.nn.relu(tf.matmul(x, W1) + b1) W2 = weight_variable([hidden_layer_size, num_classes]) b2 = bias_variable([num_classes]) # Softmax layer (Output), dtype = float64 y = tf.matmul(h1, W2) + b2 y_group = tf.reshape(tf.reduce_mean(y, 0),[-1, num_classes]) # NN desired value (labels) y_ = tf.placeholder(tf.float64, [None, num_classes]) # Loss function cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) cross_entropy_group = -tf.reduce_sum(y_ * tf.log(tf.nn.softmax(y_group))) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) sess = tf.InteractiveSession() correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) correct_prediction_group = tf.equal(tf.argmax(y_group, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64)) accuracy_group = tf.reduce_mean(tf.cast(correct_prediction_group, tf.float64)) sess.run(tf.global_variables_initializer()) # Training numTrainingVec = len(X_train) batchSize = 500 numEpochs = 1000 startTime = time.time() for epoch in range(numEpochs): for i in range(0,numTrainingVec,batchSize): # Batch Data batchEndPoint = min(i+batchSize, numTrainingVec) trainBatchData = X_train[i:batchEndPoint] trainBatchLabel = y_train[i:batchEndPoint] train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel}) # Print accuracy if epoch%50 == 0 or epoch == numEpochs-1: plotx.append(epoch) train_error = cross_entropy.eval(feed_dict={x:X_train, y_: y_train}) validation_error = cross_entropy.eval(feed_dict={x:X_val, y_: y_val}) train_acc = accuracy.eval(feed_dict={x:X_train, y_: y_train}) val_acc = accuracy.eval(feed_dict={x:X_val, y_: y_val}) ploty_train.append(train_error) ploty_val.append(validation_error) print("epoch: %d, train acc %g, val acc %g, train error %g, val error %g"%(epoch, train_acc, val_acc, train_error, validation_error)) # Evaluating multi-frame validation set total_error = 0 total_acc = 0 for t in range(len(X_test)): this_x = X_test[t] this_y = [y_test[t]] x_image = np.reshape(this_x, (-1, num_features)) total_error = total_error + cross_entropy_group.eval(feed_dict={x:x_image, y_: this_y}) total_acc = total_acc + accuracy_group.eval(feed_dict={x:x_image, y_: this_y}) ploty_test.append(total_error/len(X_test)) print("====> Window acc %g, err: %g"%(total_acc/len(X_test),ploty_test[-1])) endTime = time.time() print("Elapse Time:", endTime - startTime) ploty_train_all.append(ploty_train) ploty_val_all.append(ploty_val) ploty_test_all.append(ploty_test) print('==> Generating error plot...') errfig = plt.figure() trainErrPlot = errfig.add_subplot(111) trainErrPlot.set_xlabel('Number of Epochs') trainErrPlot.set_ylabel('Cross-Entropy Error') trainErrPlot.set_title('Error vs Number of Epochs') trainErrPlot.scatter(plotx, ploty_train) valErrPlot = errfig.add_subplot(111) valErrPlot.scatter(plotx, ploty_val, c='r') testErrPlot = errfig.add_subplot(111) testErrPlot.scatter(plotx, ploty_test, c='g') errfig.savefig('exp5d_s'+str(sub_window_size)+'_result.png') # Final Result print("Validation accuracy:",total_acc/len(X_test)) print("Validation error:", ploty_test[-1]) print("============================================") # END OF OUTERMOST FOR-LOOP print('==> Generating FINAL training error plot...') errfig = plt.figure() s1_Plot = errfig.add_subplot(111) s1_Plot.set_xlabel('Number of Epochs') s1_Plot.set_ylabel('Cross-Entropy Error') s1_Plot.set_title('Error vs Number of Epochs') s1_Plot.scatter(plotx, ploty_train_all[0]) s2_Plot = errfig.add_subplot(111) s2_Plot.scatter(plotx, ploty_train_all[1], c='r') s4_Plot = errfig.add_subplot(111) s4_Plot.scatter(plotx, ploty_train_all[2], c='g') s8_Plot = errfig.add_subplot(111) s8_Plot.scatter(plotx, ploty_train_all[3], c='yellow') s16_Plot = errfig.add_subplot(111) s16_Plot.scatter(plotx, ploty_train_all[4], c='magenta') errfig.savefig('exp5d_final_train_error.png') print('==> Generating FINAL validation error plot...') errfig = plt.figure() s1_Plot = errfig.add_subplot(111) s1_Plot.set_xlabel('Number of Epochs') s1_Plot.set_ylabel('Cross-Entropy Error') s1_Plot.set_title('Error vs Number of Epochs') s1_Plot.scatter(plotx, ploty_val_all[0]) s2_Plot = errfig.add_subplot(111) s2_Plot.scatter(plotx, ploty_val_all[1], c='r') s4_Plot = errfig.add_subplot(111) s4_Plot.scatter(plotx, ploty_val_all[2], c='g') s8_Plot = errfig.add_subplot(111) s8_Plot.scatter(plotx, ploty_val_all[3], c='yellow') s16_Plot = errfig.add_subplot(111) s16_Plot.scatter(plotx, ploty_val_all[4], c='magenta') errfig.savefig('exp5d_final_val_error.png') print('==> Generating FINAL window validation error plot...') errfig = plt.figure() s1_Plot = errfig.add_subplot(111) s1_Plot.set_xlabel('Number of Epochs') s1_Plot.set_ylabel('Cross-Entropy Error') s1_Plot.set_title('Error vs Number of Epochs') s1_Plot.scatter(plotx, ploty_test_all[0]) s2_Plot = errfig.add_subplot(111) s2_Plot.scatter(plotx, ploty_test_all[1], c='r') s4_Plot = errfig.add_subplot(111) s4_Plot.scatter(plotx, ploty_test_all[2], c='g') s8_Plot = errfig.add_subplot(111) s8_Plot.scatter(plotx, ploty_test_all[3], c='yellow') s16_Plot = errfig.add_subplot(111) s16_Plot.scatter(plotx, ploty_test_all[4], c='magenta') errfig.savefig('exp5d_final_test_error.png')
StarcoderdataPython
6406172
<reponame>AshuMaths1729/Sudoku-Vision<filename>data/gen_n10_data.py import numpy as np import cv2 as cv fileName = "n10.data" def gendata(): img = np.zeros((50, 50, 3), np.uint8)*255 img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) roi = img[:,:] roi_1 = np.append([10], roi) roi_2 = np.append([10], cv.bitwise_not(roi)) res = np.array([roi_1,roi_2]) np.savetxt(fileName, res, delimiter=',', fmt='%u') print("Generate data completed") # [10, cv.bitwise_not(roi)]]) gendata()
StarcoderdataPython