blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
sequencelengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
sequencelengths
1
1
author
stringlengths
0
175
e172c4d221cb93b78fdf15d990b35e7e7e7fd500
48894ae68f0234e263d325470178d67ab313c73e
/scripts/noc-wf.py
9a461df838cfb1119d145697b6241de9a1a2e87f
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
DreamerDDL/noc
7f949f55bb2c02c15ac2cc46bc62d957aee43a86
2ab0ab7718bb7116da2c3953efd466757e11d9ce
refs/heads/master
2021-05-10T18:22:53.678588
2015-06-29T12:28:20
2015-06-29T12:28:20
118,628,133
0
0
null
2018-01-23T15:19:51
2018-01-23T15:19:51
null
UTF-8
Python
false
false
663
py
#!./bin/python # -*- coding: utf-8 -*- ##---------------------------------------------------------------------- ## noc-wf daemon ##---------------------------------------------------------------------- ## Copyright (C) 2007-2011 The NOC Project ## See LICENSE for details ##---------------------------------------------------------------------- if __name__ == "__main__": from noc.wf.wf.daemon import WFDaemon from noc.lib.debug import error_report from noc.main.models import CustomField CustomField.install_fields() try: WFDaemon().process_command() except SystemExit: pass except Exception: error_report()
d8d41ae91224bf4537d44f0b7ab5700facdbe029
7be65d5792bcb270673c7b0279fecdb82ba6bbac
/server-minimal.py
1c382f34cb22e9ec4ccd08d567e31896a03d0792
[]
no_license
niko9797/make_opcua
63dcb6796a5ded50015b8c03cf2a19189c564d3b
eff4fd2c124699d6519022c29ced3a962d0beb09
refs/heads/master
2021-01-24T03:52:46.148866
2018-02-26T13:52:23
2018-02-26T13:52:23
122,910,789
0
0
null
null
null
null
UTF-8
Python
false
false
960
py
import sys sys.path.insert(0, "..") import time from opcua import ua, Server if __name__ == "__main__": # setup our server server = Server() server.set_endpoint("opc.tcp://localhost:4841/freeopcua/server/") # setup our own namespace, not really necessary but should as spec uri = "http://examples.freeopcua.github.io" idx = server.register_namespace(uri) # get Objects node, this is where we should put our nodes objects = server.get_objects_node() # populating our address space myobj = objects.add_object(idx, "MyObject") myvar = myobj.add_variable(idx, "MyVariable", 6.7) myvar.set_writable() # Set MyVariable to be writable by clients # starting! server.start() try: count = 0 while True: time.sleep(1) count += 0.1 myvar.set_value(count) finally: #close connection, remove subcsriptions, etc server.stop()
2b5ac8164f91997e6ead1e624f33df2c4683565b
6eb957a3690c3c758feb84725ccccded529bd50b
/POO/Seccion.py
196a144aef94039a3d3bb6103a9c122167d4855a
[]
no_license
MRpintoM/Ejercicio_3Py
38b8c13a8946cf57f47bd3c341dbc4504c709766
5e90e36e43632b5bf8c9560805d329bedf3bdeac
refs/heads/master
2023-03-08T01:37:18.861742
2021-02-23T02:19:42
2021-02-23T02:19:42
341,399,662
0
0
null
null
null
null
UTF-8
Python
false
false
164
py
class Seccion(): def registerSeccion(self): __seccion=input("Ingresa una sección:") __regseccion= (str(__seccion)) return __regseccion
31640ba88e52306b8f9a5469864d401ce4d992e4
f101fe75892da8d7b5258d22bd31534d47f39ec1
/feature.py
039980b31ea2d443121913c748e60ed024f11554
[]
no_license
xianjunxia/Acoustic-event-detection-with-feature-space-attention-based-convolution-recurrent-neural-network
2ae9d4d0148f5082cc6739f753bf750e1940ecfb
d2a7b36700e798e0da02d3efebb27cd340235f36
refs/heads/master
2020-03-22T17:11:53.028900
2018-07-10T05:15:32
2018-07-10T05:15:32
140,379,734
1
0
null
null
null
null
UTF-8
Python
false
false
8,150
py
import wave import numpy as np import utils #import librosa from IPython import embed import os from sklearn import preprocessing import scipy.io as sio def load_audio(filename, mono=True, fs=44100): file_base, file_extension = os.path.splitext(filename) if file_extension == '.wav': _audio_file = wave.open(filename) # Audio info sample_rate = _audio_file.getframerate() sample_width = _audio_file.getsampwidth() number_of_channels = _audio_file.getnchannels() number_of_frames = _audio_file.getnframes() # Read raw bytes data = _audio_file.readframes(number_of_frames) _audio_file.close() # Convert bytes based on sample_width num_samples, remainder = divmod(len(data), sample_width * number_of_channels) if remainder > 0: raise ValueError('The length of data is not a multiple of sample size * number of channels.') if sample_width > 4: raise ValueError('Sample size cannot be bigger than 4 bytes.') if sample_width == 3: # 24 bit audio a = np.empty((num_samples, number_of_channels, 4), dtype=np.uint8) raw_bytes = np.fromstring(data, dtype=np.uint8) a[:, :, :sample_width] = raw_bytes.reshape(-1, number_of_channels, sample_width) a[:, :, sample_width:] = (a[:, :, sample_width - 1:sample_width] >> 7) * 255 audio_data = a.view('<i4').reshape(a.shape[:-1]).T else: # 8 bit samples are stored as unsigned ints; others as signed ints. dt_char = 'u' if sample_width == 1 else 'i' a = np.fromstring(data, dtype='<%s%d' % (dt_char, sample_width)) audio_data = a.reshape(-1, number_of_channels).T if mono: # Down-mix audio audio_data = np.mean(audio_data, axis=0) # Convert int values into float audio_data = audio_data / float(2 ** (sample_width * 8 - 1) + 1) # Resample if fs != sample_rate: audio_data = librosa.core.resample(audio_data, sample_rate, fs) sample_rate = fs return audio_data, sample_rate return None, None def load_desc_file(_desc_file): _desc_dict = dict() cnt = 1 for line in open(_desc_file): #print(cnt) cnt = cnt + 1 words = line.strip().split('\t') name = words[0].split('/')[-1] if name not in _desc_dict: _desc_dict[name] = list() _desc_dict[name].append([float(words[2]), float(words[3]), __class_labels[words[-1]]]) return _desc_dict def extract_mbe(_y, _sr, _nfft, _nb_mel): spec, n_fft = librosa.core.spectrum._spectrogram(y=_y, n_fft=_nfft, hop_length=_nfft/2, power=1) ''' import matplotlib.pyplot as plot print(y.shape) plot.subplot(411) Pxx, freqs, bins, im = plot.specgram(y, NFFT=_nfft, Fs=44100, noverlap=_nfft/2) print('freqs_{}'.format(freqs)) print(freqs.shape) print(spec.shape) plot.subplot(412) mel_basis = librosa.filters.mel(sr=_sr, n_fft=_nfft, n_mels=_nb_mel) print(mel_basis.shape) import scipy.io as sio sio.savemat("/data/users/21799506/Data/DCASE2017_Data/Evaluation/feat/Melbank",{'arr_0':mel_basis}) plot.plot(mel_basis[:,500]) plot.subplot(413) plot.plot(mel_basis[1,:]) plot.subplot(414) mbe = np.log(np.dot(mel_basis, spec)) print(mbe.shape) plot.plot(np.log(np.dot(mel_basis, spec))) plot.show() exit() ''' mel_basis = librosa.filters.mel(sr=_sr, n_fft=_nfft, n_mels=_nb_mel) return np.log(np.dot(mel_basis, spec)) # ################################################################### # Main script starts here # ################################################################### is_mono = True __class_labels = { 'brakes squeaking': 0, 'car': 1, 'children': 2, 'large vehicle': 3, 'people speaking': 4, 'people walking': 5 } # location of data. #folds_list = [1, 2, 3, 4] folds_list = [0] evaluation_setup_folder = '/data/users/21799506/Data/DCASE2017_Data/Evaluation/evaluation_setup/' audio_folder = '/data/users/21799506/Data/DCASE2017_Data/Evaluation/audio/' # Output feat_folder = '/data/users/21799506/Data/DCASE2017_Data/Evaluation/feat/' utils.create_folder(feat_folder) # User set parameters nfft = 2048 win_len = nfft hop_len = win_len / 2 nb_mel_bands = 40 sr = 44100 # ----------------------------------------------------------------------- # Feature extraction and label generation # ----------------------------------------------------------------------- # Load labels train_file = os.path.join(evaluation_setup_folder, 'street_fold{}_train.txt'.format(0)) evaluate_file = os.path.join(evaluation_setup_folder, 'street_fold{}_evaluate.txt'.format(0)) print(train_file) desc_dict = load_desc_file(train_file) desc_dict.update(load_desc_file(evaluate_file)) # contains labels for all the audio in the dataset ''' # Extract features for all audio files, and save it along with labels for audio_filename in os.listdir(audio_folder): audio_file = os.path.join(audio_folder, audio_filename) print('Extracting features and label for : {}'.format(audio_file)) y, sr = load_audio(audio_file, mono=is_mono, fs=sr) mbe = None if is_mono: mbe = extract_mbe(y, sr, nfft, nb_mel_bands).T else: for ch in range(y.shape[0]): mbe_ch = extract_mbe(y[ch, :], sr, nfft, nb_mel_bands).T if mbe is None: mbe = mbe_ch else: mbe = np.concatenate((mbe, mbe_ch), 1) label = np.zeros((mbe.shape[0], len(__class_labels))) tmp_data = np.array(desc_dict[audio_filename]) frame_start = np.floor(tmp_data[:, 0] * sr / hop_len).astype(int) frame_end = np.ceil(tmp_data[:, 1] * sr / hop_len).astype(int) se_class = tmp_data[:, 2].astype(int) for ind, val in enumerate(se_class): label[frame_start[ind]:frame_end[ind], val] = 1 tmp_feat_file = os.path.join(feat_folder, '{}_{}.npz'.format(audio_filename, 'mon' if is_mono else 'bin')) np.savez(tmp_feat_file, mbe, label) ''' # ----------------------------------------------------------------------- # Feature Normalization # ----------------------------------------------------------------------- for fold in folds_list: train_file = os.path.join(evaluation_setup_folder, 'street_fold{}_train.txt'.format(0)) evaluate_file = os.path.join(evaluation_setup_folder, 'street_fold{}_evaluate.txt'.format(0)) train_dict = load_desc_file(train_file) test_dict = load_desc_file(evaluate_file) X_train, Y_train, X_test, Y_test = None, None, None, None for key in train_dict.keys(): tmp_feat_file = os.path.join(feat_folder, '{}_{}.npz'.format(key, 'mon' if is_mono else 'bin')) dmp = np.load(tmp_feat_file) tmp_mbe, tmp_label = dmp['arr_0'], dmp['arr_1'] if X_train is None: X_train, Y_train = tmp_mbe, tmp_label else: X_train, Y_train = np.concatenate((X_train, tmp_mbe), 0), np.concatenate((Y_train, tmp_label), 0) for key in test_dict.keys(): tmp_feat_file = os.path.join(feat_folder, '{}_{}.npz'.format(key, 'mon' if is_mono else 'bin')) dmp = np.load(tmp_feat_file) tmp_mbe, tmp_label = dmp['arr_0'], dmp['arr_1'] if X_test is None: X_test, Y_test = tmp_mbe, tmp_label else: X_test, Y_test = np.concatenate((X_test, tmp_mbe), 0), np.concatenate((Y_test, tmp_label), 0) # Normalize the training data, and scale the testing data using the training data weights scaler = preprocessing.StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) normalized_feat_file = os.path.join(feat_folder, 'mbe_{}_fold{}_GAN_allthreeclass.npz'.format('mon' if is_mono else 'bin', fold)) np.savez(normalized_feat_file, X_train, Y_train, X_test, Y_test) print(X_train.shape) print('normalized_feat_file : {}'.format(normalized_feat_file))
44856c368483e969256dc97c44a426028c3bbf50
980841fc87bba9a00d849f372528b888453b89ba
/Python 3 Scripting for System Administrators/Accepting Simple Positional Arguments.py
20e611ada99bb5acedb5953be31816bf9a56f018
[]
no_license
Frijke1978/LinuxAcademy
c682eedb48ed637ffe28a55cdfbc7d33ba635779
5100f96b5ba56063042ced3b2737057016caaff3
refs/heads/master
2022-03-24T12:28:25.413483
2019-12-21T12:27:02
2019-12-21T12:27:02
229,418,319
0
1
null
null
null
null
UTF-8
Python
false
false
2,205
py
Accepting Simple Positional Arguments Most of the scripts and utilities that we work with accept positional arguments instead of prompting us for information after we’ve run the command. The simplest way for us to do this in Python is to use the sys module’s argv attribute. Let’s try this out by writing a small script that echoes our first argument back to us: ~/bin/param_echo #!/usr/bin/env python3.6 import sys print(f"First argument {sys.argv[0]}") After we make this executable and give it a shot, we see that the first argument is the script itself: $ chmod u+x ~/bin/param_echo $ param_echo testing First argument /home/user/bin/param_echo That’s not quite what we wanted, but now we know that argv will contain the script and we’ll need to get the index of 1 for our first argument. Let’s adjust our script to echo all of the arguments except the script name and then echo the first positional argument by itself: ~/bin/param_echo #!/usr/bin/env python3.6 import sys print(f"Positional arguments: {sys.argv[1:]}") print(f"First argument: {sys.argv[1]}") Trying the same command again, we get a much different result: $ param_echo testing Positional arguments: ['testing'] First argument: testing $ param_echo testing testing12 'another argument' Positional arguments: ['testing', 'testing12', 'another argument'] First argument: testing $ param_echo Positional arguments: [] Traceback (most recent call last): File "/home/user/bin/param_echo", line 6, in print(f"First argument: {sys.argv[1]}") IndexError: list index out of range This shows us a few things about working with argv: Positional arguments are based on spaces unless we explicitly wrap the argument in quotes. We can get a slice of the first index and after without worrying about it being empty. We risk an IndexError if we assume that there will be an argument for a specific position and one isn’t given. Using sys.argv is the simplest way to allow our scripts to accept positional arguments. In the next video, we’ll explore a standard library package that will allow us to provide a more robust command line experience with help text, named arguments, and flags.
d3905ca9265658e5bf4b7a91a378ed0ea340b520
ac5e52a3fc52dde58d208746cddabef2e378119e
/exps-gsn-edf/gsn-edf_ut=3.0_rd=1_rw=0.04_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=82/sched.py
304905f0cc9f12230fa3ed58eca351b59ad910a9
[]
no_license
ricardobtxr/experiment-scripts
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
7bcebff7ac2f2822423f211f1162cd017a18babb
refs/heads/master
2023-04-09T02:37:41.466794
2021-04-25T03:27:16
2021-04-25T03:27:16
358,926,457
0
0
null
null
null
null
UTF-8
Python
false
false
337
py
-X FMLP -Q 0 -L 2 105 400 -X FMLP -Q 0 -L 2 85 250 -X FMLP -Q 0 -L 2 70 250 -X FMLP -Q 1 -L 2 66 200 -X FMLP -Q 1 -L 2 64 250 -X FMLP -Q 1 -L 2 50 200 -X FMLP -Q 2 -L 1 41 150 -X FMLP -Q 2 -L 1 40 125 -X FMLP -Q 2 -L 1 34 100 -X FMLP -Q 3 -L 1 33 200 -X FMLP -Q 3 -L 1 20 250 -X FMLP -Q 3 -L 1 10 100
c52b1c6c264352ca8f6c39666cd4b3d7f7e23005
555944b5b196fc6e52db6abd913c8cd7eaa00c0a
/HW7/test_FizzBuzz.py
6537d71235828ab0a8b797cdae6248a4aba9fce4
[]
no_license
shinhoj01/CS362
bdd4d8fd1ee5e5f866d5a3432478701890204af6
34dddbb108eff3448af84f8687772202f0dcbfef
refs/heads/main
2023-03-22T00:45:15.184379
2021-03-03T10:19:33
2021-03-03T10:19:33
334,580,473
0
0
null
null
null
null
UTF-8
Python
false
false
381
py
import pytest from FizzBuzz import fizzbuzz class TestCase: def test(self): # lists of inputs and outputs inp = (2,3,5,9,10,15,55,75,99,100) otp = [2,"Fizz","Buzz","Fizz","Buzz","FizzBuzz", "Buzz","FizzBuzz","Fizz","Buzz"] # Used map to apply the function to list res = list(map(fizzbuzz, inp)) assert res == otp
84ea5c637ee27630e4f66977e073aaeeb817e3da
d127e063dd6578a08f48cd4fdff626047a6ee080
/todo/admin.py
9f2b1bfd5e480f55eac46b92ed7cef23e5a60730
[]
no_license
nagarjunnas/Todo
571c33fd6eac3dc603da7283e4d60e88c9362910
2610b86243056b4e8d0d84c61deae8a3518e7307
refs/heads/master
2020-04-01T04:56:22.858222
2018-10-13T15:37:25
2018-10-13T15:37:25
152,882,708
0
0
null
null
null
null
UTF-8
Python
false
false
1,171
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals import csv from django.http import HttpResponse from django.contrib import admin from .models import Todo # Register your models here. class TodoAdmin(admin.ModelAdmin): search_fields = ['title'] list_filter = ('status','created_at', 'modified_at', 'created_date_time') actions = ["export_as_csv"] list_display = ('title', 'description','created_date_time' , 'status', 'created_at', 'modified_at') def export_as_csv(self, request, queryset): meta = self.model._meta field_names_headers = [field.name.title().replace('_', ' ') for field in meta.fields] field_names = [field.name for field in meta.fields] response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=todos.csv' writer = csv.writer(response) writer.writerow(field_names_headers) for obj in queryset: row = writer.writerow([getattr(obj, field) for field in field_names]) return response export_as_csv.short_description = "Export Selected as CSV" admin.site.register(Todo, TodoAdmin)
0e647dd279872f9ca98db25c23550b1a1e7e5fb4
df83f97ed2c6dd199005e96bc7c494cfb3b49f8c
/GeeksForGeeks/String Rotations.py
42ed217509cdfcaf23e1e662e437f71bfb0dfa7b
[]
no_license
poojan14/Python-Practice
45f0b68b0ad2f92bbf0b92286602d64f3b1ae992
ed98acc788ba4a1b53bec3d0757108abb5274c0f
refs/heads/master
2022-03-27T18:24:18.130598
2019-12-25T07:26:09
2019-12-25T07:26:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
469
py
''' Given strings s1 and s2, you need to find if s2 is a rotated version of the string s1. The strings are lowercase. ''' if __name__ == '__main__': T = int(input()) for _ in range(T): s1 = input() s2 = input() if len(s1)==len(s2): tmp = s1+s1 # It gives all possible rotations if s2 in tmp : print(1) # of a string. else : print(0) else: print(0)
7bd9dbb485425614b3b564465bc42b23de0ad1ab
6cf7035780de933f98ad533ecbaf18744f5546a6
/src/apps/accounts/apps.py
770feb5cbdc38a5ec0883c47455350c1d452abed
[]
no_license
Lakanbi37/Social
2a21ac7a7507f1e762c8261950539f946f956b8e
6221d34dac6fd31d68866c76c8f1d19afaffe49f
refs/heads/master
2023-01-30T18:55:38.200523
2020-04-30T04:18:58
2020-04-30T04:18:58
259,552,475
0
0
null
null
null
null
UTF-8
Python
false
false
149
py
from django.apps import AppConfig class AccountsConfig(AppConfig): name = 'apps.accounts' label = "accounts" verbose_name = "Accounts"
7c9975bd2527fff01822ea1ebc940d8c3c0a8bc8
37d8a02e4976a8ca516500d5b9d2fa6626c2b9e3
/A_Scorecard/example/test/scorecard_functions_V3_test.py
6bb0dd6dc82772ca9a4635b439dd71e7090a3287
[]
no_license
sucre111/xiaoxiang_fengkong_peixun
b0bb59243346fc02fea8126d729af1fb29bf907d
5eac4e3011e5bbc7e59e79296c12e81074166551
refs/heads/master
2021-09-17T05:16:19.362017
2018-06-28T11:33:29
2018-06-28T11:33:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
986
py
#conding = utf-8 import pandas as pd target = "y" from example.scorecard_functions_V3 import * train_data_file = "D:/conf_test/A_Scorecard/application.csv" def BadRateEncodingTest(): df = pd.read_csv(train_data_file, encoding = 'latin1') # 处理标签:Fully Paid是正常用户;Charged Off是违约用户 df['y'] = df['loan_status'].map(lambda x: int(x == 'Charged Off')) col = "home_ownership" regroup = BinBadRate(df, col, target, grantRateIndicator=0)[1] print("regroup:") print(regroup) temp_regroup = regroup[[col,'bad_rate']].set_index([col]) print("temp group:") print(temp_regroup) br_dict = regroup[[col,'bad_rate']].set_index([col]).to_dict(orient='index') print("br_dict:") print(br_dict) for k, v in br_dict.items(): print(k) print(v) br_dict[k] = v['bad_rate'] badRateEnconding = df[col].map(lambda x: br_dict[x]) if __name__ == "__main__": BadRateEncodingTest()
af34fd1034f5561c8b73ec840986022f67f088ed
803f0fbc5973ff31fd5faca5c0f2981b2c52a591
/Python/tensorflow/variable.py
c1d7bc9416c924c4de82917774c881ca2d032ea4
[]
no_license
MiohitoKiri5474/CodesBackUp
00ab52bd55732b8164a42cffd664407878f4390e
4247fa032c8e88259dcc3992a21c510b6f2e8850
refs/heads/master
2023-08-09T14:46:10.445697
2023-08-04T01:12:58
2023-08-04T01:12:58
126,162,563
3
1
null
null
null
null
UTF-8
Python
false
false
313
py
import tensorflow as tf B = tf.Variable ( 10 ) with tf.Session() as sess: sess.run ( tf.global_variables_initializer() ) # 用變數要先把變數初始化 print ( sess.run ( B ) ) print ( sess.run ( B.assign ( 100 ) ) ) # 變數可用assign初始化,且一定要開Session run過一次
67d99b5c3eced18b0bdf7022c3511339e5b942b1
e79c8521fb55586e356e17caef7358152ab3a21f
/ismo/bin/generate_samples.py
294b1cb7a48b322adb5a2e97d43c94f586d54277
[ "MIT" ]
permissive
kjetil-lye/iterative_surrogate_optimization
1e06a0727b8385926eab81bbf3d8133b8ceed1f1
f5de412daab1180612837f4c950203ad87d62f7e
refs/heads/master
2023-04-14T03:28:42.715302
2020-10-20T07:25:26
2020-10-20T07:25:26
187,802,115
6
3
MIT
2023-02-02T06:47:00
2019-05-21T09:08:20
Python
UTF-8
Python
false
false
2,211
py
#!/bin/env python import os.path if __name__ == '__main__': import argparse from ismo.samples import create_sample_generator import numpy as np parser = argparse.ArgumentParser(description=""" Generate samples and write them to file using numpy.savetxt. Each row repesents a single sample, and each value represents each component of the given sample. Example use would be: y = numpy.loadtxt('filename.txt') # y[k,i] is component i of sample k """) parser.add_argument('--generator', type=str, default='monte-carlo', help='Name of generator to use, either "monte-carlo" or "sobol"') parser.add_argument('--dimension', type=int, required=True, help="Number of dimensions") parser.add_argument('--number_of_samples', type=int, required=True, help='Number of samples to generate') parser.add_argument('--start', type=int, default=0, help='The first sample (in other words, number of samples to skip first)') parser.add_argument('--output_file', type=str, required=True, help='Output filename (full path)') parser.add_argument('--output_append', action='store_true', help='Append output to end of file') args = parser.parse_args() generator = create_sample_generator(args.generator) samples = generator(args.number_of_samples, args.dimension, start=args.start) if args.output_append: if os.path.exists(args.output_file): previous_samples = np.loadtxt(args.output_file) if len(previous_samples.shape) == 1: # In case of a 1D array, we need to make sure to treat is a two-dim array. previous_samples = previous_samples.reshape((previous_samples.shape[0], 1)) new_samples = np.zeros((samples.shape[0] + previous_samples.shape[0], args.dimension)) new_samples[:previous_samples.shape[0], :] = previous_samples new_samples[previous_samples.shape[0]:, :] = samples samples = new_samples np.savetxt(args.output_file, samples)
67db2b1feed5ce6b57cddb306d4a56163cf97ada
27b599705364707392d66cff4df9e38f28e54433
/ex50/projects/gothonweb/app.py
9114cef9a3766c083c1378c783d9f2d0113410e6
[]
no_license
huanglao2002/lphw
a615598568fe089dda230829c323a2cbe2c1af4f
4fce5374613e713c02b5b86fc39c290c496ed1ba
refs/heads/master
2023-01-23T15:17:13.442619
2020-11-25T13:30:55
2020-11-25T13:30:55
315,808,466
0
0
null
null
null
null
UTF-8
Python
false
false
292
py
from flask import Flask from flask import render_template app = Flask(__name__) @app.route('/') def index(): #greeting = "Hello Jim" #return render_template("index.html", greeting=greeting) return render_template("index.html") if __name__ == "__main__": app.run()
76848a321556dc72d28c4478cf14ea644c690541
11194e8da8d5d496bdadd82ae03c9b6109dc4f6a
/id_PERM/AH/019_AH_PERM.py
0d47195763f7e9d9c25cce4cdf9a89e2897bc8fc
[]
no_license
prepiscak/beatson_rosalind
f4e37ec87dd6d6206c09dbdb78a6ae829efb69fb
529465bdc2edb83eafb687a729513e2e50aff4db
refs/heads/master
2020-04-27T01:31:10.631357
2019-09-26T09:58:27
2019-09-26T09:58:27
173,968,130
0
0
null
null
null
null
UTF-8
Python
false
false
328
py
#!/usr/bin/python3 #usage 019_PERM.py n #improt some stuff import sys from itertools import permutations as perm #get int from command line n=int(sys.argv[1]) #get all the permutations of the set 1:n in a list of lists perm_list=list(perm(set(range(1,n+1)))) #print print(len(perm_list)) for p in perm_list: print(*p)
9bfe15dacc41979c5b07071075e289d74a471d5f
ac2a27debbc62fb4ccd71e550d4cdeb167674d43
/firewall/test/sim_firewall_tcp/run.py
88080576ee170f4b67a4b9e6ed1eb48ac6e28e5c
[]
no_license
TopologyMapping/netfpga-tutorial
8a4227594e96d1f68443c3bc241165abadb051a1
3457b3dda94b5a90d3dbc66cb367764adb826f14
refs/heads/master
2020-04-12T14:55:27.511141
2016-05-18T15:38:29
2016-05-18T15:38:29
31,033,110
2
2
null
null
null
null
UTF-8
Python
false
false
2,415
py
#!/bin/env python from NFTest import * from NFTest import simReg phy2loop0 = ('../connections/conn', 'nf2c0') nftest_init(sim_loop = [], hw_config = [phy2loop0]) nftest_start() pdrop = [1210, 80, 22, 667] #Ports to drop 1210, 80, 22, 667 nftest_regwrite((reg_defines.FIREWALL_DPORT1_REG()),pdrop[0]) nftest_regwrite((reg_defines.FIREWALL_DPORT2_REG()),pdrop[1]) nftest_regwrite((reg_defines.FIREWALL_DPORT3_REG()),pdrop[2]) nftest_regwrite((reg_defines.FIREWALL_DPORT4_REG()),pdrop[3]) nftest_regread_expect((reg_defines.FIREWALL_DPORT1_REG()),pdrop[0]) nftest_regread_expect((reg_defines.FIREWALL_DPORT2_REG()),pdrop[1]) nftest_regread_expect((reg_defines.FIREWALL_DPORT3_REG()),pdrop[2]) nftest_regread_expect((reg_defines.FIREWALL_DPORT4_REG()),pdrop[3]) simReg.regDelay(1000) #1us nftest_regread_expect((reg_defines.SRAM_BASE_ADDR()),(pdrop[2]<<16|pdrop[3])) nftest_regread_expect((reg_defines.SRAM_BASE_ADDR()+4),(pdrop[0]<<16|pdrop[1])) HDR=scapy.TCP() TTL = 64 eth_hdr = 14 ipv4_hdr=20 tcp_hdr=20 LOAD = '' length = 10 for genr in range (length): LOAD += chr(randint(0,255)) #DA = "0xD0:0x27:0x88:0xBC:0xA8:0xE9" #SA = "0x0:0x4E:0x46:0x32:0x43:0x0" #DST_IP = '192.168.101.10' #SRC_IP = '192.168.101.20' PORTS = [567, pdrop[0], 876, pdrop[3], 21, pdrop[2], 37, pdrop[1]] NUM_PKTS = len(PORTS) NUM_PKTS_DROPPED = 4 i=0 while(i < NUM_PKTS): HDR.dport = PORTS[i] HDR.sport = PORTS[NUM_PKTS-1-i] HDR.flags = 0b00010 DA = "0xD0:0x27:0x88:0xBC:0xA8:0x%02x"%(i) SA = "0x0:0x4E:0x46:0x32:0x43:0x%02x"%(i) DST_IP = '192.168.101.%0.3i'%(i) SRC_IP = '192.168.101.%0.3i'%(i+1) pkt = scapy.Ether(dst=DA, src=SA)/scapy.IP(dst=DST_IP, src=SRC_IP, ttl=TTL)/HDR/LOAD #pkt.len = (len(LOAD))+eth_hdr+ipv4_hdr+tcp_hdr #pkt.seq = i*(50) nftest_send_phy('nf2c0', pkt) print "SRC_IP: %s" %(SRC_IP) # if(PORTS[i] not in pdrop): if(PORTS[i] != pdrop[0] and PORTS[i] != pdrop[1] and PORTS[i] != pdrop[2] and PORTS[i] != pdrop[3]): pkt = scapy.Ether(dst=DA, src=SA)/scapy.IP(dst=DST_IP, src=SRC_IP, ttl=TTL-1)/HDR/LOAD #pkt.len = (len(LOAD))+eth_hdr+ipv4_hdr+tcp_hdr #pkt.seq = i*(50) nftest_expect_dma('nf2c0', pkt) i = i+1 nftest_barrier() simReg.regDelay(1000) #1us print "Checking pkt errors" # check counter values nftest_regread_expect(reg_defines.MAC_GRP_0_RX_QUEUE_NUM_PKTS_STORED_REG(), NUM_PKTS) nftest_finish()
[ "bob@crunchbang" ]
bob@crunchbang
d68125104a4b0d63b31ac8183cc83ed87465a499
c7f6f03449bc1cdbda9f62db66ac4aefefd836ea
/preprocessing.py
032424323a888ee4c19e23d1b9c650231c21b5e9
[]
no_license
paul-freeman/aars
d8d382b1f11ed2e25c72e811fd8bac1a5459b298
f054a8cb7fdc27bbca8e2001329b6a5cfbc470ad
refs/heads/master
2021-11-21T20:51:23.462501
2021-08-09T06:36:24
2021-08-09T06:36:24
229,409,003
0
0
null
null
null
null
UTF-8
Python
false
false
9,576
py
import sys import glob import os.path import json AA_LIST = ['ala', 'asn', 'asp', 'gln', 'leu', 'glu', 'gly', 'his', 'ile', 'lys', 'arg', 'met', 'phe', 'pro', 'pyl', 'sep', 'ser', 'thr', 'val', 'cys', 'trp', 'tyr'] KINGDOM_LIST = ['bact', 'arch'] def parse_fasta(path): fasta_data = [] with open(path) as lines: for line in lines: if line[0] == '>': xs = line[1:].strip().split('_') if not (xs and xs[0] and xs[0].lower() in AA_LIST): raise RuntimeError( "Amino Acid ({}) not recognized in {}".format( xs[0], line ) ) aa = xs.pop(0).lower() if (xs and xs[0] and xs[0].lower() == 'reg'): xs.pop(0) regions = True else: regions = False if not (xs and xs[0] and xs[0].lower() in KINGDOM_LIST): raise RuntimeError( "Kingdom ({}) not recognized in {}".format( xs[0], line ) ) kingdom = xs.pop(0).lower() pdb = None if xs and xs[0] and len(xs[0]) == 4: pdb = xs.pop(0).lower() if not (xs and xs[0] and len(xs[0]) == 1): raise RuntimeError( "'{}' not recognized as first letter of genus in {}".format( xs[0], line ) ) letter = xs.pop(0).upper() try: genus, num = '_'.join(xs).lower().split('/') if genus[-3:] == "aln": genus = genus[:-4] except ValueError: genus, num = xs[0].lower(), "0" fasta_data.append({ 'aa': aa, 'kingdom': kingdom, 'regions': regions, 'pdb': pdb, 'letter': letter, 'genus': genus, 'num': num }) return fasta_data def search_data_folder(fasta_data, ext): """Glob the `data` folder looking for matches""" if fasta_data['pdb']: f1 = '{}*_'.format(fasta_data['pdb']) + ext else: f1 = '{}{}_{}_{}'.format( fasta_data['letter'], fasta_data['genus'], fasta_data['aa'], ext ) return glob.glob('data/**/*' + f1, recursive=True) def search_supplemental_folder(fasta_data, ext): """Glob the `supplemental` folder looking for matches""" if fasta_data['pdb']: f = '{}_*_{}_*_{}.fasta'.format( fasta_data['aa'], fasta_data['pdb'], ('aa' if ext == 'aa' else 'nuc') ) else: f = '{}_{}_{}_{}_{}.fasta'.format( fasta_data['aa'], fasta_data['kingdom'], fasta_data['letter'], fasta_data['genus'], ('aa' if ext == 'aa' else 'nuc') ) fastas = glob.glob('data/supplemental/*' + f, recursive=False) if fastas: return fastas # nothing found: check for txt extension if fasta_data['pdb']: f = '{}_*_{}_*_{}.txt'.format( fasta_data['aa'], fasta_data['pdb'], ('aa' if ext == 'aa' else 'nuc') ) else: f = '{}_{}_{}_{}_{}.txt'.format( fasta_data['aa'], fasta_data['kingdom'], fasta_data['letter'], fasta_data['genus'], ('aa' if ext == 'aa' else 'nuc') ) txts = glob.glob('data/supplemental/*' + f, recursive=False) return txts def search_downloads_folder(fasta_data, ext): """Search the Downloads folder looking for matches""" downloads = os.path.join(os.path.expanduser('~'), 'Downloads') if fasta_data['pdb']: f = '{}_*_{}_*_{}.fasta'.format( fasta_data['aa'], fasta_data['pdb'], ('aa' if ext == 'aa' else 'nuc') ) else: f = '{}_{}_{}_{}_{}.fasta'.format( fasta_data['aa'], fasta_data['kingdom'], fasta_data['letter'], fasta_data['genus'], ('aa' if ext == 'aa' else 'nuc') ) return glob.glob('{}/*'.format(downloads) + f, recursive=False) def write_standardized_data(fasta_data): """Look through Alex's data and write file (if found) in standard format.""" for ext in ['aa', 'nuc']: out_path = 'data/{}.{}'.format(make_filename(fasta_data), ext) if not os.path.exists(out_path): g1 = search_downloads_folder(fasta_data, ext) if not g1: g1 = search_supplemental_folder(fasta_data, ext) # possible location in Alex's data g2 = [] if not g1: g2 = search_data_folder(fasta_data, ext) # SPECIAL CASE 1 if fasta_data['genus'] == 'obscuriglobus': f = 'Gemmata_{}_{}'.format( fasta_data['aa'], ext ) g2 = glob.glob('data/**/*' + f, recursive=True) # SPECIAL CASE 2 if fasta_data['aa'] == 'leu' and not g2: f = '{}{}_{}ALPHA_{}'.format( fasta_data['letter'], fasta_data['genus'], fasta_data['aa'], ext ) g2 = glob.glob('data/**/*' + f, recursive=True) # SPECIAL CASE 3 if fasta_data['genus'] == 'asiaticus': f = 'CAmoebophilusAsiaticus_{}_{}'.format( fasta_data['aa'], ext ) g2 = glob.glob('data/**/*' + f, recursive=True) g = g1 + g2 if not g: # print('Missing data for: {}'.format(out_path)) continue else: with open(g[0]) as f_in: with open(out_path, 'w') as f_out: for line in f_in: f_out.write(line) def write_binary_data(filename): dat = parse_fasta(filename) new_dat = [] isMissingData = False for fasta_data in dat: prefix = make_filename(fasta_data) aa_file = 'data/{}.aa'.format(prefix) nuc_file = 'data/{}.nuc'.format(prefix) try: os.remove(aa_file + '.bad') except FileNotFoundError: pass try: os.remove(nuc_file + '.bad') except FileNotFoundError: pass aa_dat = read_fasta_file(aa_file)[2] nuc_dat = read_fasta_file(nuc_file)[2] if not aa_dat or not nuc_dat: if not aa_dat: print("Missing aa data for " + prefix) if not nuc_dat: print("Missing nuc data for " + prefix) isMissingData = True continue if len(aa_dat) * 3 + 3 == len(nuc_dat): nuc_dat = nuc_dat[:-3] elif len(aa_dat) * 3 != len(nuc_dat): err = "Data incorrect length: {}: ({} aas) ({} nucs): expected ({} nucs)".format( prefix, len(aa_dat), len(nuc_dat), len(aa_dat) * 3 ) try: os.rename(aa_file, aa_file + '.bad') except FileExistsError: pass try: os.rename(nuc_file, nuc_file + '.bad') except FileExistsError: pass print(err) isMissingData = True continue raise RuntimeError(err) fasta_data['aa_dat'] = aa_dat fasta_data['nuc_dat'] = nuc_dat new_dat.append(fasta_data) if isMissingData: pass with open(filename + '.json', 'w') as json_file: json.dump(new_dat, json_file, indent=2) def make_filename(fasta_data): return '_'.join( [x for x in [ fasta_data['aa'], fasta_data['kingdom'], fasta_data['pdb'], fasta_data['letter'], fasta_data['genus'], # fasta_data['num'] ] if x] ) def read_fasta_file(path): """read the data from the fasta file""" if path is None: return None, None, None try: header, gi, dat = None, None, '' with open(path) as path_p: for next_dat in path_p.readlines(): if next_dat.strip() == '': continue if next_dat[0] == '>': header = next_dat.strip() if next_dat[0:4] == '>gi|': try: gi = next_dat[4:].split('|')[0].split()[0] except IndexError: gi = None else: gi = None continue else: dat += next_dat.strip() return header, gi, dat except FileNotFoundError: return None, None, None def main(filename): for fasta_data in parse_fasta(filename): write_standardized_data(fasta_data) write_binary_data(filename) if __name__ == "__main__": main(sys.argv[1])
7befe330fcfccd5eda39f24b075dfc42fad72e4e
895a414a8467be8532bbac52eaa199ed2cfd5d75
/greedy/PriyankaAndToys.py
8d9ec6347229a25186662a215f599e9822c284c8
[]
no_license
burakkurt/Hackerrank_Python
92e0c4c17edd8a3e57ad9ae1ba4e2e2dd459f983
a22e632c59100bcebb775b2c1c4551640336ba38
refs/heads/master
2021-01-12T14:16:41.608708
2016-10-06T15:53:57
2016-10-06T15:53:57
70,005,682
0
0
null
null
null
null
UTF-8
Python
false
false
362
py
numToys = int(raw_input()); toys = map(int, raw_input().split()); toys.sort(); priceLeft = toys[0]; priceRight = priceLeft + 4; numToysToBuy=1; for i in range(1,numToys): if(toys[i]>=priceLeft and toys[i]<=priceRight): continue; else: numToysToBuy += 1; priceLeft=toys[i]; priceRight=priceLeft+4; print numToysToBuy;
d8edf4a0202d663a88b7fda1373d4c25ec1d3f06
b183c98f7749a015ca420940be85f8af6c004bb3
/medium/78.py
0ce95a3bbd0bad7e58ef50181285396d20b4dc60
[ "Apache-2.0" ]
permissive
oneTaken/leetcode
b8cfa7e0ff42de2eaef8b64cceef4f183006612e
f9357d839ac8fa6333b0d7eeb2028ba28a63764c
refs/heads/master
2020-03-12T10:08:12.200753
2018-05-05T05:12:24
2018-05-05T05:12:24
130,566,847
0
0
null
null
null
null
UTF-8
Python
false
false
352
py
class Solution: def subsets(self, nums): """ :type nums: List[int] :rtype: List[List[int]] """ import itertools ans = [] for i in range(len(nums) + 1): _ans = itertools.combinations(nums, i) _ans = list(map(list, _ans)) ans.extend(_ans) return ans
049ada4f72aaa8b3ab20c0db52e19f9c60c95d9d
7343859ea0cd609a74a5683aaa3729398c329d43
/visitors/admin.py
e40ec64eb3ff609de7c64fd3573e9bac1e9c71d8
[]
no_license
qiubite31/tw_visitor
6a7ab00bad476ef8180d5888290a7895a93b49d0
b08715d32096c9139d396efc15077666ce1cd5e9
refs/heads/master
2020-05-20T07:27:19.842678
2019-03-07T15:13:30
2019-03-07T15:13:30
63,789,216
2
0
null
null
null
null
UTF-8
Python
false
false
132
py
from django.contrib import admin from .models import ArrivalRecord # Register your models here. admin.site.register(ArrivalRecord)
[ "Dragon Lin" ]
Dragon Lin
ad03c6c89d53f7c088759d2f9b0a1bb92b0aa033
654aba6352851ff88d8acfa04658529c75509b74
/scrapy_sample/scrapy_sample/items.py
885749a754e1d85322e9cb98999bc35d4b94340c
[ "Apache-2.0" ]
permissive
neilnee/octopus
2ecb93b6a83a85826782238f515cebba9c0c72a9
7981e8a926f0ea9d5a09bea6e4828fdc0f7f0e62
refs/heads/master
2021-08-31T16:22:18.744762
2017-12-22T02:10:17
2017-12-22T02:10:17
104,200,838
0
0
null
null
null
null
UTF-8
Python
false
false
308
py
# -*- coding: utf-8 -*- # Define here the models for your scraped items # # See documentation in: # http://doc.scrapy.org/en/latest/topics/items.html import scrapy class HuxiuItem(scrapy.Item): title = scrapy.Field() link = scrapy.Field() desc = scrapy.Field() posttime = scrapy.Field()
3b04f91b01c4c46ccf2a29e2a33fa5ec59f1e0e0
16c24cba9ca47b27dafed2595159b5970fbebbd2
/shan/build_dataset.py
b608756cb823e9f56c47209f56d0fad525ba939c
[]
no_license
jakisou/SHAN
834c80271c2a0ad85c12667d7ddd0187d3aa431a
7e5b5f4970808407f2dc9498e8600bc85b18a4c9
refs/heads/master
2022-04-07T23:49:11.382667
2020-02-29T07:50:12
2020-02-29T07:50:12
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,115
py
import random import pickle import numpy as np import copy max_length = 90 random.seed(1234) with open('../Data/remap.pkl', 'rb') as f: reviews_df = pickle.load(f) item_cate_list = pickle.load(f) user_count, item_count, cate_count, example_count = pickle.load(f) train_set = [] test_set = [] for reviewerID, hist in reviews_df.groupby('reviewerID'): pos_list = hist['asin'].tolist() tim_list = hist['unixReviewTime'].tolist() def gen_neg(): neg = pos_list[0] while neg in pos_list: neg = random.randint(0, item_count-1) return neg neg_list = [gen_neg() for i in range(len(pos_list))] length = len(pos_list) valid_length = min(length, max_length) i = 0 tim_list_session = list(set(tim_list)) tim_list_session.sort() pre_session = [] for t in tim_list_session: count = tim_list.count(t) new_session = pos_list[i:i+count] if t == tim_list_session[0]: pre_session.extend(new_session) else: if i+count < valid_length-1: pre_session_copy = copy.deepcopy(pre_session) train_set.append((reviewerID, pre_session_copy, new_session, pos_list[i+count], 1)) train_set.append((reviewerID, pre_session_copy, new_session, neg_list[i+count], 0)) pre_session.extend(new_session) else: pos_item = pos_list[i] if count > 1: pos_item = random.choice(new_session) new_session.remove(pos_item) neg_index = pos_list.index(pos_item) pos_neg = (pos_item, neg_list[neg_index]) test_set.append((reviewerID, pre_session, new_session, pos_neg)) break i += count random.shuffle(train_set) random.shuffle(test_set) assert len(test_set) == user_count with open('dataset.pkl', 'wb') as f: pickle.dump(train_set, f, pickle.HIGHEST_PROTOCOL) pickle.dump(test_set, f, pickle.HIGHEST_PROTOCOL) pickle.dump((user_count, item_count), f, pickle.HIGHEST_PROTOCOL)
ccb94a4d32c9ff95e32281b681554a1c8059a209
79d41f92e0c0018bd83fc1fafed8e481fc5d3d41
/migrations/versions/84084dd1c091_.py
72d4fb99e71faf80c704c0595eefb4eadde8471e
[]
no_license
fiveCubes/udacity_Fyyur
9cc109778fa8d8d7cd6c19f09384d8e65401de68
0228a85bacc0c563805ef300ea3899bd0ce5a293
refs/heads/master
2022-12-06T15:50:10.583937
2020-09-04T22:18:28
2020-09-04T22:18:28
292,956,940
0
0
null
null
null
null
UTF-8
Python
false
false
1,242
py
"""empty message Revision ID: 84084dd1c091 Revises: 4192e26b4382 Create Date: 2020-09-02 15:37:31.769251 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '84084dd1c091' down_revision = '4192e26b4382' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_table('Show') # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('Show', sa.Column('id', sa.INTEGER(), server_default=sa.text('nextval(\'"Show_id_seq"\'::regclass)'), autoincrement=True, nullable=False), sa.Column('venue_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.Column('artist_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.Column('start_time', postgresql.TIMESTAMP(), autoincrement=False, nullable=False), sa.ForeignKeyConstraint(['artist_id'], ['Artist.id'], name='Show_artist_id_fkey'), sa.ForeignKeyConstraint(['venue_id'], ['Venue.id'], name='Show_venue_id_fkey'), sa.PrimaryKeyConstraint('id', name='Show_pkey') ) # ### end Alembic commands ###
067c502cd6a0bed78cd8a82684f8da8fa51c15dc
1c85f6ba14b7762cf14fc5453b07a93dc735afc2
/python/algorithms/hw0/randomhw/insertion_sort.py
1a0f8c31c2d4e29570591ab625bf0c579d84a1ff
[]
no_license
jashook/ev6
557bceb82d4e0e241c51f7ba27cc4cfa00f98408
97e7787b23fae38719538daf19a6ab119519e662
refs/heads/master
2021-01-22T23:16:06.702451
2013-10-12T18:39:44
2013-10-12T18:39:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,003
py
#!/usr/bin/python ################################################################################ ################################################################################ # # Author: Jarret Shook # # Module: insertion_sort.py # # Modifications: # # 29-Jan-13: Version 1.0: Created # # Timeperiod: ev6 # ################################################################################ ################################################################################ import sys ################################################################################ ################################################################################ def insertion_sort(s): """ Input: list s to be sorted Output: sorted list """ arr = range(len(s)); del arr[0]; for i in arr: j = i data = s[j] while (j > 0 and s[j] < s[j-1]): s[j] = s[j - 1] s[j-1] = data j = j - 1; return s #if __name__ == "__main__":
92c64ad45c5b329b5722db23cdfe5a17175b703f
8ea3fbf18c58e4905a894b7c44059b726f9d522a
/ch4ex2.py
7b917d45392afc2e801a6064fabbbc0e9b7811f5
[]
no_license
leslieawicke/thinkpython
ecc17f91d173462b77da96f9a9ed72cda65cf9f5
231468f471df696561fbe81085f109c0a7fc8373
refs/heads/master
2021-04-29T07:23:59.413862
2018-03-09T02:04:13
2018-03-09T02:04:13
121,819,725
0
0
null
null
null
null
UTF-8
Python
false
false
562
py
import math import turtle from ch4ex import arc bob = turtle.Turtle() radius = 250 n = 5 overlap = 45 def petal(t, r, n, o): """draws a petal shape with given radius (r) and angle (a) derived from the number of petals desired (n). t = turtle""" a = 360/n + o arc(t, r, a) bob.lt(180 - a) arc(t, r, a) bob.lt(180 - o) def flower(t, r, n, o): for i in range(n): petal(t, r, n, o) flower(bob, radius, n, overlap) turtle.mainloop() # the angle of bob's turn at the end of his first arc should be 180 degrees minus the angle of the arc he just drew.
1a95afb8fe2a0cbbec27d84d31a8ca32f302e201
d1847e96c14a7d06aeab2a557eb25b1c6d5170d7
/Python Web App/myapp.py
65c164ffd18f5fef19f59380536518c22555e13e
[]
no_license
ANA-POTJE/WEB_Applications
5dc043b9b63ed5ddb1cc8a17dba4d5de6fb68712
c9c0869b9f8787eb8e400a4f774f9ba387e3bf71
refs/heads/master
2022-11-09T07:53:30.720297
2020-06-18T14:27:53
2020-06-18T14:27:53
273,253,091
0
0
null
null
null
null
UTF-8
Python
false
false
1,230
py
import yfinance as yf import streamlit as st st.write(""" # Simple Stock Price App Shown are the stock closing price and volume of Google! """) # https://towardsdatascience.com/how-to-get-stock-data-using-python-c0de1df17e75 #define the ticker symbol tickerSymbol = 'GOOGL' #get data on this ticker tickerData = yf.Ticker(tickerSymbol) #get the historical prices for this ticker tickerDf = tickerData.history(period='1d', start='2010-5-31', end='2020-5-31') # Open High Low Close Volume Dividends Stock Splits st.line_chart(tickerDf.Close) st.line_chart(tickerDf.Volume) #Running the web app #After saving the code into a file called myapp.py, fire up the command prompt #(or Power Shell in Microsoft Windows) and run the following command: ##### ##### WORKED IN ANACONDA PROMPT!!! (conda activate env first!) ##### # streamlit run myapp.py #Next, we should see the following message: #> streamlit run myapp.py #You can now view your Streamlit app in your browser. #Local URL: http://localhost:8501 #Network URL: http://10.0.0.11:8501 #In a short moment, an internet browser window should pop-up and directs you to the #created web app by taking you to [http://localhost:8501.]http://localhost:8501 as shown below.
cb6129890bf338a65a0d59e425a58c5f8b914d32
498c3189f21f4545eb9829a9c63c6ef6dcce229e
/Algorithms/Strings/Palindrome-Index.py
e5274a4cd869d229daf74495d91fbd15ff84551e
[]
no_license
Damian1724/Hackerrank
af83c74a5a5aa6b4e1684c7a7133571c8dd7d2f8
9c58363e6214eabb4b55330e276c7b414273beee
refs/heads/master
2020-03-17T12:26:18.543723
2018-12-06T23:52:43
2018-12-06T23:52:43
133,103,693
0
0
null
null
null
null
UTF-8
Python
false
false
896
py
/* Author: Damian Cruz source: HackerRank(https://www.hackerrank.com) problem name:Algorithms>Strings>Palindrome-Index problem url: https://www.hackerrank.com/challenges/palindrome-index/problem */ def checking(word,pos): a=0 b=len(word)-1 while a<b: if a==pos:a+=1 if b==pos:b-=1 if word[a]!=word[b]: return False a+=1 b-=1 return True cases=int(input()) for i in range(cases): s=input() j=0 k=len(s)-1 answer=0 valor=False while j < k: if s[j]!=s[k] and s[j+1]==s[k]: valor=checking(s,j) if valor: answer=j break if s[j]!=s[k] and s[j]==s[k-1]: valor=checking(s,k) if valor: answer=k break j+=1 k-=1 if valor:print(answer) else:print(-1)
070bacfa4034a83dfc962b353c22e495e86f20fd
31ae8cf31da9729a93155fff20f079caf853df98
/objects.py
22bb6a1b37da920c06e7828cf0553a7de20b89e4
[]
no_license
cuthb3rt/physics_sim
afd3ac1e00f64fdefb1ea10c9c1223f60f34c4b9
39cee1ca7457ea7a90cdafef41b795210fa7697e
refs/heads/master
2021-01-19T02:05:37.173117
2016-07-21T20:39:17
2016-07-21T20:39:17
29,778,320
1
0
null
null
null
null
UTF-8
Python
false
false
2,301
py
__author__ = 'Andy' import physics import vec_math as vm class Particle(): """ Particle class mass = float i_p = 3vec i_v = 3vec """ NUM_PARTICLES = 1 ALL_PARTICLES = [] def __repr__(self): # return "ID: %s; Mass: %s; Position: %s; Velocity: %s; Acceleration: %s" % (self.id, self.m, self.x, self.v, self.a) return "%s\tm: %s\tx: %s\tv: %s\ta: %s" % (self.id, self.m, vm.v_pretty(self.x), vm.v_pretty(self.v), vm.v_pretty(self.a)) def __init__(self, m, i_x, i_v): self.id = Particle.NUM_PARTICLES Particle.NUM_PARTICLES += 1 Particle.ALL_PARTICLES.append(self) self.m = m self.x = [float(i) for i in i_x] self.v = [float(i) for i in i_v] self.a = vm.NULL_VEC self.proper_time = 0 def update_acceleration(self): """ Calculate acceleration due to all other particles TODO force_dict = {self.id: [0, 0, 0]} set up a dictionary of the force due to each particle then can have already calculated the force due to most particles by the end time... will almost halve sim times :return: """ res_f = [0, 0, 0] # resultant force so far for particle in Particle.ALL_PARTICLES: if not particle.id == self.id: # don't count force due to itself... force = physics.calculate_force(self, particle) res_f = vm.v_add(res_f, force) # print "Force on %s due to %s = %s" % (self.id, particle.id, res_f) # print res_f self.a = vm.v_div(res_f, self.m) def update_velocity(self, delta_t): """ Assume that velocity is initial plus acceleration*time interval :return: """ # print self.a self.v = vm.v_add(self.v, vm.v_mult(self.a, delta_t)) def update_position(self, delta_t): """ Assume that new position is old position plus velocity*time interval :return: """ self.x = vm.v_add(self.x, vm.v_mult(self.v, delta_t)) def iterate(self, delta_t): self.update_acceleration() self.update_velocity(delta_t) self.update_position(delta_t) self.proper_time += delta_t
a17893e3403ed935e013c8026c259ffe22a74959
64ef95039cec3c508b8e3ab911a3318121778119
/day_3_ai_boot_camp_.py
40deaa3367a0ea97035e9ce5b03d418a833f1188
[]
no_license
ALEENA-KT/Practical-AI-Bootcamp
c98f752112e8febb7e7d324ded177f5d36dd0180
0a12a5124e4587decec21354f0f0dbbc40ea4fc9
refs/heads/main
2023-08-18T02:40:34.992591
2021-09-13T18:22:45
2021-09-13T18:22:45
404,694,854
0
0
null
2021-09-09T12:13:57
2021-09-09T11:25:19
null
UTF-8
Python
false
false
3,442
py
# -*- coding: utf-8 -*- """Day 3 AI BOOT CAMP .ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1e_Ee9jcv9rIfmnVXTXQAktNKBwu0GejP """ import tensorflow_datasets as tfds print(tfds.list_builders()) dataloader = tfds.load("cifar10", as_supervised=True) train, test = dataloader["train"], dataloader["test"] import tensorflow as tf directory_url = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/' file_names = ['cowper.txt', 'derby.txt', 'butler.txt'] file_paths = [ tf.keras.utils.get_file(file_name, directory_url + file_name) for file_name in file_names ] dataset = tf.data.TextLineDataset(file_paths) import torch from torch.utils.data import Dataset from torchvision import datasets from torchvision.transforms import ToTensor import matplotlib.pyplot as plt training_data = datasets.FashionMNIST( root="data", train=True, download=True, transform=ToTensor() ) test_data = datasets.FashionMNIST( root="data", train=False, download=True, transform=ToTensor() ) import tensorflow as tf directory_url = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/' file_names = ['cowper.txt', 'derby.txt', 'butler.txt'] file_paths = [ tf.keras.utils.get_file(file_name, directory_url + file_name) for file_name in file_names ] dataset = tf.data.TextLineDataset(file_paths) for line in dataset.take(5): print(line.numpy()) import torch from torch.utils.data import Dataset from torchvision import datasets from torchvision.transforms import ToTensor from torch.utils.data import DataLoader import matplotlib.pyplot as plt training_data = datasets.FashionMNIST( root="data", train=True, download=True, transform=ToTensor() ) test_data = datasets.FashionMNIST( root="data", train=False, download=True, transform=ToTensor() ) train_dataloader = DataLoader(training_data, batch_size=64, shuffle=True) test_dataloader = DataLoader(test_data, batch_size=64, shuffle=True) train_features, train_labels = next(iter(train_dataloader)) print(f"Feature batch shape: {train_features.size()}") print(f"Labels batch shape: {train_labels.size()}") img = train_features[0].squeeze() label = train_labels[0] plt.imshow(img, cmap="gray") plt.show() print(f"Label: {label}") import tensorflow_datasets as tfds dataloader = tfds.load("cifar10", as_supervised=True) train, test = dataloader["train"], dataloader["test"] train = train.map( lambda image, label: (tf.image.convert_image_dtype(image, tf.float32), label) ).cache().map( lambda image, label: (tf.image.random_flip_left_right(image), label) ).map( lambda image, label: (tf.image.random_contrast(image, lower=0.0, upper=1.0), label) ).shuffle( 100 ).batch( 64 ).repeat() import tensorflow as tf directory_url = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/' file_names = ['cowper.txt', 'derby.txt', 'butler.txt'] file_paths = [ tf.keras.utils.get_file(file_name, directory_url + file_name) for file_name in file_names ] dataset = tf.data.TextLineDataset(file_paths) import tensorflow_datasets as tfds from tensorflow.keras.utils import to_categorical import tensorflow as tf import matplotlib.pyplot as plt from tensorflow.keras.callbacks import EarlyStopping import tensorflow.keras.backend as K import numpy as np from lrfinder import LRFinder
95a262821a56a75e0139657f3d8ad7f45772edde
b1edca0e9ea6171493d8cb4232f5c1fc35e853ed
/SVF/tissue-bw-prop.py
3c9aa9a048fcd01b451a31f5b95daed9a591e3d3
[]
no_license
leoguignard/Mouse-Atlas
bcceeb2ba56d5adeb1ab2a5590c28b73f84dd865
d2985fbb6251b60a5830a4d5e7e112a92738a665
refs/heads/master
2018-12-20T03:58:10.587284
2018-09-17T05:59:33
2018-09-17T05:59:33
123,352,501
1
0
null
null
null
null
UTF-8
Python
false
false
14,741
py
# This file is subject to the terms and conditions defined in # file 'LICENSE', which is part of this source code package. # Author: Leo Guignard (guignardl...@[email protected]) from IO import imread, imsave, SpatialImage from scipy import ndimage as nd import numpy as np import os from multiprocessing import Pool from TGMMlibraries import lineageTree from scipy import interpolate import sys def get_spherical_coordinates(x, y, z): ''' Computes spherical coordinates for an x, y, z Cartesian position ''' r = np.linalg.norm([x, y, z]) theta = np.arctan2(y, x) phi = np.arccos(z/r) alpha = (np.pi/2 + np.arctan2(x, z)) % (2*np.pi) return r, theta, phi, alpha def write_header_am_2(f, nb_points, length): ''' Header for Amira .am files ''' f.write('# AmiraMesh 3D ASCII 2.0\n') f.write('define VERTEX %d\n'%(nb_points*2)) f.write('define EDGE %d\n'%nb_points) f.write('define POINT %d\n'%((length)*nb_points)) f.write('Parameters {\n') f.write('\tContentType "HxSpatialGraph"\n') f.write('}\n') f.write('VERTEX { float[3] VertexCoordinates } @1\n') f.write('EDGE { int[2] EdgeConnectivity } @2\n') f.write('EDGE { int NumEdgePoints } @3\n') f.write('POINT { float[3] EdgePointCoordinates } @4\n') f.write('VERTEX { float Vcolor } @5\n') f.write('VERTEX { int Vbool } @6\n') f.write('EDGE { float Ecolor } @7\n') f.write('VERTEX { int Vbool2 } @8\n') def write_to_am_2(path_format, LT_to_print, t_b = None, t_e = None, length = 5, manual_labels = None, default_label = 5, new_pos = None): ''' Writes a lineageTree into an Amira readable data (.am format). Args: path_format: string, path to the output. It should contain 1 %03d where the time step will be entered LT_to_print: lineageTree, lineageTree to write t_b: int, first time point to write (if None, min(LT.to_take_time) is taken) t_e: int, last time point to write (if None, max(LT.to_take_time) is taken) note: if there is no 'to_take_time' attribute, LT_to_print.time_nodes is considered instead (historical) length: int, length of the track to print (how many time before). manual_labels: {id: label, }, dictionary that maps cell ids to default_label: int, default value for the manual label new_pos: {id: [x, y, z]}, dictionary that maps a 3D position to a cell ID. if new_pos == None (default) then LT_to_print.pos is considered. ''' if not hasattr(LT_to_print, 'to_take_time'): LT_to_print.to_take_time = LT_to_print.time_nodes if t_b is None: t_b = min(LT_to_print.to_take_time.keys()) if t_e is None: t_e = max(LT_to_print.to_take_time.keys()) if new_pos is None: new_pos = LT_to_print.pos if manual_labels is None: manual_labels = {} for t in range(t_b, t_e + 1): f = open(path_format%t, 'w') nb_points = len(LT_to_print.to_take_time[t]) write_header_am_2(f, nb_points, length) points_v = {} for C in LT_to_print.to_take_time[t]: C_tmp = C positions = [] for i in xrange(length): C_tmp = LT_to_print.predecessor.get(C_tmp, [C_tmp])[0] positions.append(new_pos[C_tmp]) points_v[C] = positions f.write('@1\n') for i, C in enumerate(LT_to_print.to_take_time[t]): f.write('%f %f %f\n'%tuple(points_v[C][0])) f.write('%f %f %f\n'%tuple(points_v[C][-1])) f.write('@2\n') for i, C in enumerate(LT_to_print.to_take_time[t]): f.write('%d %d\n'%(2*i, 2*i+1)) f.write('@3\n') for i, C in enumerate(LT_to_print.to_take_time[t]): f.write('%d\n'%(length)) f.write('@4\n') tmp_velocity = {} for i, C in enumerate(LT_to_print.to_take_time[t]): for p in points_v[C]: f.write('%f %f %f\n'%tuple(p)) f.write('@5\n') for i, C in enumerate(LT_to_print.to_take_time[t]): f.write('%f\n'%(manual_labels.get(C, default_label))) f.write('%f\n'%(0)) f.write('@6\n') for i, C in enumerate(LT_to_print.to_take_time[t]): f.write('%d\n'%(int(manual_labels.get(C, default_label) != default_label))) f.write('%d\n'%(0)) f.write('@7\n') for i, C in enumerate(LT_to_print.to_take_time[t]): f.write('%f\n'%(np.linalg.norm(points_v[C][0] - points_v[C][-1]))) f.write('@8\n') for i, C in enumerate(LT_to_print.to_take_time[t]): f.write('%d\n'%(1)) f.write('%d\n'%(0)) f.close() def read_param_file(): ''' Asks for, reads and formats the parameter file ''' p_param = raw_input('Please enter the path to the parameter file/folder:\n') p_param = p_param.replace('"', '') p_param = p_param.replace("'", '') p_param = p_param.replace(" ", '') if p_param[-4:] == '.csv': f_names = [p_param] else: f_names = [os.path.join(p_param, f) for f in os.listdir(p_param) if '.csv' in f and not '~' in f] for file_name in f_names: f = open(file_name) lines = f.readlines() f.close() param_dict = {} i = 0 nb_lines = len(lines) while i < nb_lines: l = lines[i] split_line = l.split(',') param_name = split_line[0] if param_name in ['labels', 'downsampling']: name = param_name out = [] while (name == param_name or param_name == '') and i < nb_lines: if split_line[1].isdigit(): out += [int(split_line[1])] else: out += [float(split_line[1])] i += 1 if i < nb_lines: l = lines[i] split_line = l.split(',') param_name = split_line[0] param_dict[name] = np.array(out) elif param_name in ['label_names']: name = param_name out = [] while (name == param_name or param_name == '') and i < nb_lines: out += [split_line[1].replace('\n', '').replace('\r', '')] i += 1 if i < nb_lines: l = lines[i] split_line = l.split(',') param_name = split_line[0] param_dict[name] = np.array(out) else: param_dict[param_name] = split_line[1].strip() i += 1 if param_name == 'time': param_dict[param_name] = int(split_line[1]) path_LT = param_dict.get('path_to_LT', '.') path_VF = param_dict.get('path_to_VF', '.') path_mask = param_dict.get('path_to_mask', '.') t = param_dict.get('time', 0) path_out_am = param_dict.get('path_to_am', '.') labels = param_dict.get('labels', []) DS = param_dict.get('downsampling', []) ani = np.float(param_dict.get('anisotropy', 1.)) path_DB = param_dict.get('path_DB', '.') path_div = param_dict.get('path_div', None) path_bary = param_dict.get('path_bary', None) label_names = param_dict.get('label_names', None) invert = param_dict.get('invert', '1') != '0' return (path_LT, path_VF, path_mask, t, path_out_am, labels, DS, path_DB, path_div, path_bary, label_names, ani, invert) def get_division_mapping(path_div, VF): ''' Computes the mapping between found divisions and SVF objects Args: path_div: sting, name of the division file VF: lineageTree ''' ass_div = {} if path_div is not None: f = open(path_div) lines = f.readlines() f.close() divisions_per_time = {} for l in lines[1:]: x, y, z, t = np.array(l.split(',')[:-1]).astype(float) if t in VF.time_nodes: divisions_per_time.setdefault(int(t), []).append(np.array([x, y, z]) * [1, 1, 5]) div_in_VF = {} dist_to_div = {} for t, d in divisions_per_time.iteritems(): idx3d, data = VF.get_idx3d(t) dist, idxs = idx3d.query(d) div_C = np.array(data)[idxs] dist_to_div.update(dict(zip(div_C, dist))) ass_div.update(dict(zip(div_C, d))) return ass_div def write_DB(path_DB, path_div, VF, tracking_value, tb, te): ''' Write the csv database in Database.csv Args: path_DB: string, path to the output database path_div: string, path to the potential division file VF: lineageTree tracking_value: {int: int, }, dictionary that maps an object id to a label tb: int, first time point to write te: int, last time point to write ''' ass_div = get_division_mapping(path_div) f2 = open(path_DB + 'Database.csv', 'w') f2.write('id, mother_id, x, y, z, r, theta, phi, t, label, D-x, D-y, D-z, D-r, D-theta, D-phi\n') for t in range(tb, te+1): for c in VF.time_nodes[t]: S_p = (-1, -1, -1) if VF.predecessor.get(c, []) != []: M_id = VF.predecessor[c][0] else: M_id = -1 P = tuple(VF.pos[c]) if path_bary is not None: S_p = tuple(get_spherical_coordinates(*(barycenters[t] - VF.pos[c]))[:-1]) L = tracking_value.get(c, -1) D_P = tuple(ass_div.get(c, [-1, -1, -1])) if path_bary is not None: D_S_p = (-1, -1, -1) if not c in ass_div else tuple(get_spherical_coordinates(*(barycenters[t] - ass_div[c]))[:-1]) else: D_S_p = (-1, -1, -1) f2.write(('%d, %d, %.5f, %.5f, %.5f, %.5f, %.5f, %.5f, %d, %d,' + '%.5f, %.5f, %.5f, %.5f, %.5f, %.5f\n')%((c, M_id) + P + S_p + (t, L) + D_P + D_S_p)) f2.close() def get_barycenter(fname, tb, te): ''' Reads and coes a linear piecewise interpolation/extrapolation barycenters Args: fname: string, name of the barycenter file (each line as 'x, y, z, t') tb: first time point to interpolate te: last time point to interpolate Returns: barycenters_interp: {int:[float, float, float], }, dictionary mapping a time point to the interpolated barycenter at that time barycenters: {int:[float, float, float], }, dictionary mapping a time point to the barycenter for each time in fname ''' f = open(fname) lines = f.readlines() f.close() barycenters = {} for l in lines: split_l = l.split(',') try: barycenters[int(split_l[-1])] = tuple(float(v) for v in split_l[:-1]) except Exception as e: pass times = sorted(barycenters) Xb, Yb, Zb = np.array([barycenters[t] for t in times]).T Xb_f = interpolate.InterpolatedUnivariateSpline(times, Xb, k=1) Yb_f = interpolate.InterpolatedUnivariateSpline(times, Yb, k=1) Zb_f = interpolate.InterpolatedUnivariateSpline(times, Zb, k=1) Ti = np.arange(tb - 1, te + 2) barycenters_interp = dict(zip(Ti, zip(Xb_f(Ti), Yb_f(Ti), Zb_f(Ti)))) return barycenters_interp, barycenters if __name__ == '__main__': (path_LT, path_VF, path_mask, t, path_out_am, labels, DS, path_DB, path_div, path_bary, label_names, ani, invert) = read_param_file() if not os.path.exists(path_out_am): os.makedirs(path_out_am) if not os.path.exists('mask_images/'): os.makedirs('mask_images/') VF = lineageTree(path_VF) tb = VF.t_b te = VF.t_e if path_bary is not None: try: barycenters, b_dict = get_barycenter(path_bary, tb, te) except Exception as e: print "Wrong file path to barycenter, please specify the path to the .csv file." print "The process will continue as if no barycenter were provided," print "disabling the computation of the spherical coordinates" print "error raised: ", e path_bary = None im = imread(path_mask) for l in labels: masked_im = im == l tmp = nd.binary_opening(masked_im, iterations = 3) tmp = nd.binary_closing(tmp, iterations = 4) imsave('mask_images/%03d.tif'%l, SpatialImage(tmp).astype(np.uint8)) mask_dir = 'mask_images/' masks = sorted([('mask_images/%03d.tif'%l, label_names[i]) for i, l in enumerate(labels)], cmp=lambda x1, x2:cmp(x1[1], x2[1])) masks = [m[0] for m in masks] init_cells = {m: set() for m in range(len(masks))} x_max, y_max, z_max = 0, 0, 0 for i, path_mask in enumerate(masks): if invert: mask = imread(path_mask).transpose(1, 0, 2) mask = mask[:,::-1,:] else: mask = imread(path_mask) max_vals = np.array(mask.shape) - 1 for c in VF.time_nodes[t]: pos_rounded = np.floor(VF.pos[c]/(np.array(DS)*[1.,1.,ani])).astype(np.int) pos_rounded = tuple(np.min([max_vals, pos_rounded], axis = 0)) if mask[pos_rounded]: init_cells[i].add(c) tracking_value = {} for t, cs in init_cells.iteritems(): for c in cs: to_treat = [c] tracking_value.setdefault(c, set()).add(t) while to_treat != []: c_tmp = to_treat.pop() next_cells = VF.successor.get(c_tmp, []) to_treat += next_cells for n in next_cells: tracking_value.setdefault(n, set()).add(t) to_treat = [c] tracking_value.setdefault(c, set()).add(t) while to_treat != []: c_tmp = to_treat.pop() next_cells = VF.predecessor.get(c_tmp, []) to_treat += next_cells for n in next_cells: tracking_value.setdefault(n, set()).add(t) tracking_value = {k:np.sum(list(v)) for k, v in tracking_value.iteritems() if len(v) == 1} write_to_am_2(path_out_am + '/seg_t%04d.am', VF, t_b= tb, t_e= te, manual_labels = tracking_value, default_label = np.max(tracking_value.values())+1, length = 7) for im_p in masks: os.remove(im_p) write_DB(path_DB, path_div, VF, tracking_value, tb, te)
5f8e1a0fa5bcf1fa053e4be9b091e851fbbb20e0
63870f39c2fd700e5474247a4dfc3cb7cbfea7ac
/Power.py
b66588bdb8fe3987dcee302c91a7948cac0ff742
[]
no_license
DaviPMello27/PythonImageProcessing
2616e306a79d0d26c7af5106ee0110b6e7bd1d3d
c348cf1a38880a76ddb3e8a8edd3cfbbc21e5df3
refs/heads/master
2020-08-23T14:15:59.250354
2019-11-08T20:49:10
2019-11-08T20:49:10
216,636,278
0
0
null
null
null
null
UTF-8
Python
false
false
1,307
py
import matplotlib.pyplot as plt import cv2 def showImage(title, pos, effect = None): image = plt.subplot(pos) image.set_title(title) image.set_yticks([]), image.set_xticks([]) image.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), cmap = effect) def calculateIntensity(image, plot): intervals = () intensities = [] for i in range(256): intervals = intervals + (i,) intensities.append(0) for y in range(image.shape[0]): for x in range(image.shape[1]): g = image[y, x] intensities[g] += 100/(image.shape[0] * image.shape[1]) graph.set_title("Intensity") graph.set_xlabel("Intensity") plot.bar(intervals, intensities, align = "edge", width = 0.3) def transformPow(image, fact): for y in range(image.shape[0]): for x in range(image.shape[1]): image[y,x] = (255/255**fact) * (image[y,x] ** fact) imgName = input("Filename: ") intens = float(input("Type the value of the intensity factor: ")) img = cv2.imread(imgName) graph, graph2 = plt.subplot(224), plt.subplot(223) img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) showImage("Grayscale Image", 221, "gray") calculateIntensity(img, graph2) transformPow(img, intens) showImage("Pow Image", 222) calculateIntensity(img, graph) plt.show()
5273b3149612f78e9ebeacd06c2f9328d000c15e
56c5cbd3629c206fe31da740a3213040464a5483
/driver/views.py
fce79dd6a5c59fe9aa1c00a9f05c50185b4b3a9e
[]
no_license
AhmedYasser27/Fleet-MS
f211df471743eb78130ebc858d52a6016f8951f1
06e2395406956482f56de1df5029985bf37c4441
refs/heads/master
2022-12-10T01:16:59.008445
2020-09-04T20:56:45
2020-09-04T20:56:45
292,945,204
0
0
null
null
null
null
UTF-8
Python
false
false
2,378
py
from django.shortcuts import render,get_object_or_404,redirect from django.http import HttpResponse from django.http import Http404 from django.template import loader from .models import Driver from .forms import DriverForm # Create your views here. def index(request): if request.user.is_authenticated: form=DriverForm() return render(request,'driver/index.html',{'form':form}) else: return redirect("http://localhost:8000/home/404") def driver(request): if request.POST: form=DriverForm(request.POST) if form.is_valid(): form.save() success_message='Driver registered' form=DriverForm() return render(request,'driver/index.html',{'form':form,'success' : success_message}) else: if request.user.is_authenticated: form=DriverForm() error_message='Something went wrong error' return render(request,'driver/index.html',{ 'form' : form ,'error':error_message}) else: return redirect("http://localhost:8000/home/404") def drivers(request): if request.POST: form=DriverForm(request.POST) return render(request,'driver/index.html',{'form':form}) else: if request.user.is_authenticated: drivers = Driver.objects.all() return render(request,'driver/driverlist.html',{ 'drivers' : drivers ,'user':request.user}) else: return redirect("http://localhost:8000/home/404") def delete(request,id): if request.POST: return render(request,'driver/index.html',{'form':form}) else: if request.user.is_authenticated: drivers = Driver.objects.get(id=id) drivers.delete() return redirect('http://localhost:8000/driver/drivers') else: return redirect("http://localhost:8000/home/404") def edit(request,id): if request.method == "POST": driver=Driver.objects.get(id=id) form=DriverForm(request.POST,instance=driver) if form.is_valid(): form.save() return redirect('http://localhost:8000/driver/drivers') elif request.user.is_authenticated: driver=Driver.objects.get(id=id) form=DriverForm(instance=driver) return render(request,'driver/driverEdit.html',{ 'form' : form ,'id':id})
9c726b92873e564d1807d53aeb25eb416f88fba3
e6c65e2e354336a4bea5b6a4ccbccd3682915fe2
/out-bin/py/google/fhir/seqex/bundle_to_seqex_test.runfiles/pypi__apache_beam_2_9_0/apache_beam/runners/worker/sideinputs_test.py
57d59bfa69ad81880b5237c6baf3ea3f0406a320
[ "Apache-2.0" ]
permissive
rasalt/fhir-datalab
c30ab773d84983dd04a37e9d0ddec8bf2824b8a4
3e329fc8b4226d3e3a4a7c23c306a86e7a9ea0de
refs/heads/master
2021-10-09T05:51:04.593416
2018-12-21T18:11:03
2018-12-22T05:38:32
162,744,237
0
0
null
null
null
null
UTF-8
Python
false
false
155
py
/home/rkharwar/.cache/bazel/_bazel_rkharwar/0ddaa3627472ad9d1367a008236ce2f5/external/pypi__apache_beam_2_9_0/apache_beam/runners/worker/sideinputs_test.py
9b65587d2edd34c01f8d3c9311f82ec3d053bda6
0d54e167332199c80e75fa00489dac6c590e3ff3
/MFE.py
15057e6203a76b55cfff767ae4d8b46692216e6f
[]
no_license
TudorCretu/PI-LSTM
4ab1cea8e2ec62a31ce41f6c49b367ca9e47f638
ea2efa71f722746900915c38bb2729805282c82a
refs/heads/master
2022-07-22T15:01:57.924019
2020-05-18T16:32:28
2020-05-18T16:32:28
264,999,161
4
0
null
null
null
null
UTF-8
Python
false
false
15,477
py
import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import h5py Lx = 1.75 * np.pi Lz = 1.2 * np.pi Re = 600 X = 0 Y = 1 Z = 2 alpha = 2 * np.pi / Lx beta = np.pi / 2 gamma = 2 * np.pi / Lz Kay = np.sqrt(alpha ** 2 + gamma ** 2) Kby = np.sqrt(beta ** 2 + gamma ** 2) Kaby = np.sqrt(alpha ** 2 + beta ** 2 + gamma ** 2) N8 = 2 * np.sqrt(2) / np.sqrt((alpha ** 2 + gamma ** 2) * (4 * alpha ** 2 + 4 * gamma ** 2 + np.pi ** 2)) # Domain is 0 < x < Lx ; -1 < y < 1; 0 < z < Lz def da1(a): return beta ** 2 / Re - beta ** 2 / Re * a[0] - np.sqrt(3 / 2) * beta * gamma / Kaby * a[5] * a[7] + np.sqrt( 3 / 2) * beta * gamma / Kby * a[1] * a[2] def da2(a): return -(4 * beta ** 2 / 3 + gamma ** 2) * a[1] / Re + 5 * np.sqrt(2) * gamma ** 2 / (3 * np.sqrt(3) * Kay) * a[3] \ * a[5] - \ gamma ** 2 / (np.sqrt(6) * Kay) * a[4] * a[6] - alpha * beta * gamma / (np.sqrt(6) * Kay * Kaby) * a[4] * a[ 7] - \ np.sqrt(3 / 2) * beta * gamma / Kby * (a[0] * a[2] + a[2] * a[8]) def da3(a): return -(beta ** 2 + gamma ** 2) / Re * a[2] + 2 / np.sqrt(6) * alpha * beta * gamma / (Kay * Kby) * ( a[3] * a[6] + a[4] * a[5]) + \ (beta ** 2 * (3 * alpha ** 2 + gamma ** 2) - 3 * gamma ** 2 * (alpha ** 2 + gamma ** 2)) / ( np.sqrt(6) * Kaby * Kby * Kay) * a[3] * a[7] def da4(a): return -(3 * alpha ** 2 + 4 * beta ** 2) / (3 * Re) * a[3] - alpha / np.sqrt(6) * a[0] * a[4] - 10 / ( 3 * np.sqrt(6)) * alpha ** 2 / Kay * a[1] * a[5] - \ np.sqrt(3 / 2) * alpha * beta * gamma / (Kay * Kby) * a[2] * a[6] - np.sqrt( 3 / 2) * alpha ** 2 * beta ** 2 / (Kay * Kby * Kaby) * a[2] * a[7] - \ alpha / np.sqrt(6) * a[4] * a[8] def da5(a): return -(alpha ** 2 + beta ** 2) / Re * a[4] + alpha / np.sqrt(6) * a[0] * a[3] + alpha ** 2 / (np.sqrt(6) * Kay) \ * a[1] * a[6] - \ alpha * beta * gamma / (np.sqrt(6) * Kay * Kaby) * a[1] * a[7] + alpha / np.sqrt(6) * a[3] * a[ 8] + 2 / np.sqrt(6) * alpha * beta * gamma / (Kay * Kby) * a[2] * a[5] def da6(a): return -(3 * alpha ** 2 + 4 * beta ** 2 + 3 * gamma ** 2) / (3 * Re) * a[5] + alpha / np.sqrt(6) * a[0] * a[6] + \ np.sqrt(3 / 2) * beta * gamma / Kaby * a[0] * a[7] + 10 / (3 * np.sqrt(6)) * ( alpha ** 2 - gamma ** 2) / Kay * a[1] * a[3] - \ 2 * np.sqrt(2 / 3) * alpha * beta * gamma / (Kay * Kby) * a[2] * a[4] + alpha / np.sqrt(6) * a[6] * a[ 8] + np.sqrt(3 / 2) * beta * gamma / Kaby * a[7] * a[8] def da7(a): return -(alpha ** 2 + beta ** 2 + gamma ** 2) / Re * a[6] - alpha / np.sqrt(6) * (a[0] * a[5] + a[5] * a[8]) + \ np.sqrt(1 / 6) * (gamma ** 2 - alpha ** 2) / Kay * a[1] * a[4] + np.sqrt(1 / 6) * alpha * beta * gamma / ( Kay * Kby) * a[2] * a[3] def da8(a): return -(alpha ** 2 + beta ** 2 + gamma ** 2) / Re * a[7] + 2 / np.sqrt(6) * alpha * beta * gamma / (Kay * Kaby) * \ a[1] * a[4] + \ gamma ** 2 * (3 * alpha ** 2 - beta ** 2 + 3 * gamma ** 2) / (np.sqrt(6) * Kay * Kby * Kaby) * a[2] * a[3] def da9(a): return -9 * beta ** 2 / Re * a[8] + np.sqrt(3 / 2) * beta * gamma / Kby * a[1] * a[2] - np.sqrt( 3 / 2) * beta * gamma / Kaby * a[5] * a[7] def model(a): return np.array([da1(a), da2(a), da3(a), da4(a), da5(a), da6(a), da7(a), da8(a), da9(a)]) def u1(p): return np.array([np.sqrt(2) * np.sin(np.pi * p[Y] / 2), 0, 0]) def u2(p): return np.array([4/np.sqrt(3) * np.cos(np.pi * p[Y] / 2)**2 * np.cos(gamma*p[Z]), 0, 0]) def u3(p): return 2/np.sqrt(4 * gamma**2 + np.pi**2) * np.array([0, 2 * gamma * np.cos(np.pi * p[Y] / 2) * np.cos(gamma*p[Z]), np.pi * np.sin(np.pi * p[Y] / 2) * np.sin(gamma * p[Z])]) def u4(p): return np.array([0, 0, 4/np.sqrt(3) * np.cos(alpha * p[X]) * np.cos(np.pi * p[Y] / 2)**2]) def u5(p): return np.array([0, 0, 2 * np.sin(alpha * p[X]) * np.sin(np.pi * p[Y] / 2)]) def u6(p): return 4*np.sqrt(2)/np.sqrt(3 * (alpha**2 + gamma**2)) * np.array([ -gamma*np.cos(alpha*p[X])*np.cos(np.pi*p[Y]/2)**2*np.sin(gamma*p[Z]), 0, alpha*np.sin(alpha*p[X])*np.cos(np.pi*p[Y]/2)**2*np.cos(gamma*p[Z])]) def u7(p): return 2*np.sqrt(2)/np.sqrt(alpha**2 + gamma**2) * np.array([ gamma*np.sin(alpha*p[X])*np.sin(np.pi*p[Y]/2)*np.sin(gamma*p[Z]), 0, alpha*np.cos(alpha*p[X])*np.sin(np.pi*p[Y]/2)*np.cos(gamma*p[Z])]) def u8(p): return N8 * np.array([np.pi * alpha * np.sin(alpha*p[X])*np.sin(np.pi*p[Y]/2)*np.sin(gamma*p[Z]), 2*(alpha**2 + gamma**2) * np.cos(alpha*p[X]) * np.cos(np.pi*p[Y]/2) * np.sin(gamma*p[Z]), -np.pi * gamma * np.cos(alpha*p[X]) * np.sin(np.pi*p[Y]/2) * np.cos(gamma*p[Z])]) def u9(p): return np.array([np.sqrt(2) * np.sin(3 * np.pi * p[Y] / 2), 0, 0]) def make_grid(nx, ny, nz): x = np.linspace(0, Lx, nx) y = np.linspace(-1, 1, ny) z = np.linspace(0, Lz, nz) return x, y, z def generate_u(x, y, z): u_0 = np.zeros([9, len(x), len(y), len(z), 3]) for ix, px in enumerate(x): for iy, py in enumerate(y): for iz, pz in enumerate(z): u_0[0][ix][iy][iz] = u1([px, py, pz]) u_0[1][ix][iy][iz] = u2([px, py, pz]) u_0[2][ix][iy][iz] = u3([px, py, pz]) u_0[3][ix][iy][iz] = u4([px, py, pz]) u_0[4][ix][iy][iz] = u5([px, py, pz]) u_0[5][ix][iy][iz] = u6([px, py, pz]) u_0[6][ix][iy][iz] = u7([px, py, pz]) u_0[7][ix][iy][iz] = u8([px, py, pz]) u_0[8][ix][iy][iz] = u9([px, py, pz]) return u_0 def calculate_velocities(x, y, z, a0, u_0): u = np.zeros([len(x), len(y), len(z), 3]) for ix, px in enumerate(x): for iy, py in enumerate(y): for iz, pz in enumerate(z): u[ix][iy][iz] += a0[0] * u_0[0, ix, iy, iz] u[ix][iy][iz] += a0[1] * u_0[1, ix, iy, iz] u[ix][iy][iz] += a0[2] * u_0[2, ix, iy, iz] u[ix][iy][iz] += a0[3] * u_0[3, ix, iy, iz] u[ix][iy][iz] += a0[4] * u_0[4, ix, iy, iz] u[ix][iy][iz] += a0[5] * u_0[5, ix, iy, iz] u[ix][iy][iz] += a0[6] * u_0[6, ix, iy, iz] u[ix][iy][iz] += a0[7] * u_0[7, ix, iy, iz] u[ix][iy][iz] += a0[8] * u_0[8, ix, iy, iz] return u def calculate_vorticity(x, y, z, u): w = np.zeros([len(u), len(x), len(y), len(z), 3]) dx = x[1] - x[0] dy = y[1] - y[0] dz = z[1] - z[0] dux_dy, dux_dz = np.gradient(u[:, :, :, :, X], dy, dz, axis=(2, 3)) duy_dx, duy_dz = np.gradient(u[:, :, :, :, Y], dx, dz, axis=(1, 3)) duz_dx, duz_dy = np.gradient(u[:, :, :, :, Z], dx, dy, axis=(1, 2)) w[:, :, :, :, X] = duz_dy - duy_dz w[:, :, :, :, Y] = dux_dz - duz_dx w[:, :, :, :, Z] = duy_dx - dux_dy return w def plot_mean_profile(a): x, y, z = make_grid(10, 100, 10) u_0 = generate_u(x, y, z) u = calculate_velocities(x, y, z, a, u_0) ux_mean = np.zeros([len(y)]) for ix, px in enumerate(x): for iy, py in enumerate(y): for iz, pz in enumerate(z): ux_mean[iy] += u[ix][iy][iz][X] N = len(x) * len(z) ux_mean /= N axes = plt.gca() axes.set_xlim([-1, 1]) axes.set_ylim([-1, 1]) axes.set(xlabel="$u_x$", ylabel='y') axes.plot(ux_mean, y) plt.show() def plot_statistics(history, true_future, prediction, model_name=None): fig, axs = plt.subplots(nrows=3, ncols=3, figsize=(12, 12)) gs1 = gridspec.GridSpec(3, 3) print("started statistics") x, y, z = make_grid(25, 50, 25) u_0 = generate_u(x, y, z) sl = slice(0, None, 50) true_future = true_future[sl] prediction = prediction[sl] true_u = [] for a in true_future: true_u.append(calculate_velocities(x, y, z, a, u_0)) true_u = np.array(true_u) predicted_u = [] for a in prediction: predicted_u.append(calculate_velocities(x, y, z, a, u_0)) predicted_u = np.array(predicted_u) print("started plotting") u_mean_true = np.average(true_u, axis=(0, 1, 3)) u_mean_predicted = np.average(predicted_u, axis=(0, 1, 3)) ux_mean_true = u_mean_true[:, X] ux_mean_predicted = u_mean_predicted[:, X] ux_square_mean_true = np.average(np.square(true_u - np.mean(true_u, axis=0)), axis=(0, 1, 3))[:, X] ux_square_mean_predicted = np.average(np.square(predicted_u - np.mean(predicted_u, axis=0)), axis=(0, 1, 3))[:, X] ux_third_mean_true = np.average(np.power(true_u, 3), axis=(0, 1, 3))[:, X] ux_third_mean_predicted = np.average(np.power(predicted_u, 3), axis=(0, 1, 3))[:, X] ux_fourth_mean_true = np.average(np.power(true_u, 4), axis=(0, 1, 3))[:, X] ux_fourth_mean_predicted = np.average(np.power(predicted_u, 4), axis=(0, 1, 3))[:, X] # # v_square_true = np.add(np.square(true_u[:,:,:,:,Y]), np.square(true_u[:,:,:,:,Z])) # v_square_true = # # v_square_predicted = np.add(np.square(predicted_u[:,:,:,:,Y]), np.square(predicted_u[:,:,:,:,Z])) # v_square_predicted = v_square_mean_true = np.average(np.square(true_u[:, :, :, :, Y]), axis=(0, 1, 3)) v_square_mean_predicted = np.average(np.square(predicted_u[:, :, :, :, Y]), axis=(0, 1, 3)) uv_mean_true = np.average(np.multiply(true_u[:, :, :, :, Y], true_u[:,:,:,:,X]), axis=(0, 1, 3)) uv_mean_predicted = np.average(np.multiply(predicted_u[:, :, :, :, Y], predicted_u[:,:,:,:,X]), axis=(0, 1, 3)) w_true = calculate_vorticity(x, y, z, true_u) w_pred = calculate_vorticity(x, y, z, predicted_u) wx_rms_true = np.std(w_true[:, :, :, :, X] - np.mean(w_true[:, :, :, :, X], axis=0), axis=(0, 1, 3)) wy_rms_true = np.std(w_true[:, :, :, :, Y] - np.mean(w_true[:, :, :, :, Y], axis=0), axis=(0, 1, 3)) wz_rms_true = np.std(w_true[:, :, :, :, Z] - np.mean(w_true[:, :, :, :, Z], axis=0), axis=(0, 1, 3)) wx_rms_pred = np.std(w_pred[:, :, :, :, X] - np.mean(w_pred[:, :, :, :, X], axis=0), axis=(0, 1, 3)) wy_rms_pred = np.std(w_pred[:, :, :, :, Y] - np.mean(w_pred[:, :, :, :, Y], axis=0), axis=(0, 1, 3)) wz_rms_pred = np.std(w_pred[:, :, :, :, Z] - np.mean(w_pred[:, :, :, :, Z], axis=0), axis=(0, 1, 3)) ax = plt.subplot(gs1[0]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\overline{u}$", ylabel='y') ax.plot(ux_mean_true, y, label='True profile') ax.plot(ux_mean_predicted, y, label='Predicted Profile') ax.legend(loc='upper left') ax = plt.subplot(gs1[1]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\overline{u^2}$", ylabel='y') ax.plot(ux_square_mean_true, y, label='True profile') ax.plot(ux_square_mean_predicted, y, label='Predicted Profile') ax.legend(loc='upper left') ax = plt.subplot(gs1[2]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\overline{uv}$", ylabel='y') ax.plot(uv_mean_true, y, label='True profile') ax.plot(uv_mean_predicted, y, label='Predicted Profile') ax.legend(loc='upper left') ax = plt.subplot(gs1[3]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\overline{v^2}$", ylabel='y') ax.plot(v_square_mean_true, y, label='True profile') ax.plot(v_square_mean_predicted, y, label='Predicted Profile') ax.legend(loc='upper left') ax = plt.subplot(gs1[4]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\overline{u^3}$", ylabel='y') ax.plot(ux_third_mean_true, y, label='True profile') ax.plot(ux_third_mean_predicted, y, label='Predicted Profile') ax.legend(loc='upper left') ax = plt.subplot(gs1[5]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\overline{u^4}$", ylabel='y') ax.plot(ux_fourth_mean_true, y, label='True profile') ax.plot(ux_fourth_mean_predicted, y, label='Predicted Profile') ax.legend(loc='upper left') ax = plt.subplot(gs1[6]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\omega_{x,rms}$", ylabel='y') ax.plot(wx_rms_true, y, label='True profile') ax.plot(wx_rms_pred, y, label='Predicted Profile') ax.legend(loc='upper left') ax = plt.subplot(gs1[7]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\omega_{y,rms}$", ylabel='y') ax.plot(wy_rms_true, y, label='True profile') ax.plot(wy_rms_pred, y, label='Predicted Profile') ax.legend(loc='upper left') ax = plt.subplot(gs1[8]) # ax.set_xlim([-1, 1]) ax.set_ylim([-1, 1]) ax.set(xlabel=r"$\omega_{z,rms}$", ylabel='y') ax.plot(wz_rms_true, y, label='True profile') ax.plot(wz_rms_pred, y, label='Predicted Profile') ax.legend(loc='upper left') plt.show() def plot_dataset(fln): # train time 0 -> 52K # valid time 52K -> 68K # test time 68K -> 80K # train time 0 -> 208K # valid time 208K -> 272K # test time 272K -> 320K hf = h5py.File(fln, 'r') u = np.array(hf.get('/u')) t = np.array(hf.get('/t')) plt.figure(1) plt.subplot(511) plt.plot(t, u[:, 0]) plt.subplot(512) plt.plot(t, u[:, 1]) plt.subplot(513) plt.plot(t, u[:, 2]) plt.subplot(514) plt.plot(t, u[:, 3]) plt.subplot(515) plt.plot(t, u[:, 4]) plt.figure(2) plt.subplot(511) plt.plot(t, u[:, 5]) plt.subplot(512) plt.plot(t, u[:, 6]) plt.subplot(513) plt.plot(t, u[:, 7]) plt.subplot(514) plt.plot(t, u[:, 8]) plt.show() plt.figure(3) from scipy.signal import find_peaks u0 = u[:, 0] peaks, properties = find_peaks(u0, prominence=0.3, width=100) plt.plot(u0) plt.plot(peaks, u0[peaks], "x") plt.vlines(x=peaks, ymin=u0[peaks] - properties["prominences"], ymax = u0[peaks], color = "C1") plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"], xmax = properties["right_ips"], color = "C1") plt.show() print(peaks) plt.figure(4) from scipy.signal import find_peaks u0 = u[:, 0] peaks, properties = find_peaks(u0, prominence=0.5, width=100) plt.plot(u0) plt.plot(peaks, u0[peaks], "x") plt.vlines(x=peaks, ymin=u0[peaks] - properties["prominences"], ymax = u0[peaks], color = "C1") plt.hlines(y=properties["width_heights"], xmin=properties["left_ips"], xmax = properties["right_ips"], color = "C1") plt.show() print(peaks) if __name__ == '__main__': plot_dataset('data/MFE.h5')
450531988b753188c27f8d2709248ab92e7c0e5c
cf0fd44aa791b5ee547b436c14700ff45ac7944e
/panorama-stitching/stitch.py
4c22bdf66a48c93a0d58684e9c4257931d5308df
[]
no_license
danield0garu/computer-vision
1186e2ba76312af4382df6663961f45635aa4e3d
b0805859c8ae1fa255b7e6c892394adc06e749cc
refs/heads/master
2021-09-10T21:21:49.679892
2018-04-02T12:01:12
2018-04-02T12:01:12
112,088,082
0
0
null
null
null
null
UTF-8
Python
false
false
1,226
py
from pyimagesearch.panorama import Stitcher import argparse import imutils import cv2 # construct the argument parse and parse the arguments """ ap = argparse.ArgumentParser() ap.add_argument("-f", "--first", required=True, help="path to the first image") ap.add_argument("-s", "--second", required=True, help="path to the second image") args = vars(ap.parse_args()) """ # load the two images and resize them to have a width of 400 pixels # (for faster processing) imageA = cv2.imread("images/image1.jpg") imageB = cv2.imread("images/image2.jpg") #imageA = cv2.imread("images/office2.jpg") #imageB = cv2.imread("images/office1.jpg") #imageA = cv2.imread("images/officeOutsideLeft.jpg") #imageB = cv2.imread("images/officeOutsideRight.jpg") #imageA = cv2.imread("images/colegLeft.jpg") #imageB = cv2.imread("images/colegRight.jpg") imageA = imutils.resize(imageA, width=400) imageB = imutils.resize(imageB, width=400) # stitch the images together to create a panorama stitcher = Stitcher() (result, vis) = stitcher.stitch([imageA, imageB], showMatches=True) # show the images cv2.imshow("Image A", imageA) cv2.imshow("Image B", imageB) cv2.imshow("Keypoint Matches", vis) cv2.imshow("Result", result) cv2.waitKey(0)
8835abd75b08767de42f6adfcaa8726b27a17627
2a40963fc6af9a2fcf917bb2dba4d223d3249987
/apps/course/urls.py
0c5515c5bd2dc6260c06a37e53cfe06aa8dee805
[]
no_license
hiimkelvin/courses_django
63736337a50919b6948811ffd51fdaa5fe7b7c74
6f3fd0d2d85dd918b1154ac63de891bd61e9caad
refs/heads/master
2021-01-20T01:32:59.570678
2017-04-24T18:26:44
2017-04-24T18:26:44
89,293,991
1
0
null
null
null
null
UTF-8
Python
false
false
239
py
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index), url(r'^addnew$', views.addnew), url(r'^confirm/(?P<id>\d+)$', views.confirm_page), url(r'^remove/(?P<id>\d+)$', views.remove) ]
7af9568b63838fbfb2206906589c9512df1ae16a
6512957d7a359c633aaaed63b9fd44eb132b0d0f
/parser.py
2db19836c75dfd97b0365cdb2154995535083a95
[]
no_license
j-ibad/cpsc323-compiler-py
78aabb57de8af4e7399c0a51454e465ec90d9ac8
6f9835ff74cc9b74ccb93733aef65ae7ba189318
refs/heads/main
2023-04-20T13:49:31.690254
2021-05-13T02:28:05
2021-05-13T02:28:05
361,184,289
0
0
null
null
null
null
UTF-8
Python
false
false
24,447
py
#!/usr/bin/env python3 ''' Project Name: Project 2 - Syntax Analyzer Class: CPSC 323 - 02, Spring 2021 Professor: Prof. Anthony Le Authors: Winnie Pan Josh Ibad Titus Sudarno Thomas-James Le ''' import lexer import sys import re import getopt #Global variable to help when multiple files are processed firstFile = True #List of types recognized by compiler types = ["int", "float", "bool"] ''' TreeNode class TreeNode class, for representing non-leaf nodes for the internal Parse Tree. The internal Parse Tree uses the TreeNode clas for non-leaf nodes, storing the type of the non-terminal expression, along with an adjacency list of its children. Leaf nodes are simply stored as tokens. ''' class TreeNode: '''Constructor: Creates an internal, non-leaf node for the ParseTree storing the non-terminal expression, and instantiates an empty adjacency list of its children. @param val - Non-terminal expression of node ''' def __init__(self, val): self.val = val self.children = [] ''' Adds a child to the adjacency list of the TreeNode. By default, adds the new child to the tail of the list. @param child - Child of TreeNode to be added to the adjacency list @param index - Index at list in which to add child. Defaults to tail ''' def addChild(self, child, index=None): if index is None: self.children.append(child) else: self.children.insert(index, child) ''' Prints the subtree recursively, in preorder fashion. First prints the type of the current node, then prints its children. If the child is a non-leaf node, then the function is called recursively on the non-leaf TreeNode. If the child is a leaf node, the token is simply printed. Printing keeps track of the level of the tree, and formats the output with the spacer. @param level - Current height of the tree. Defaults to 0 for root @param spacer - String to prepend to all print statements for spacing ''' def printSubtree(self, level=0, spacer=""): print(spacer + '["%s"\theight: %d, ' % (self.val, level) + "Children: {") for child in self.children: if isinstance(child, TreeNode): child.printSubtree(level+1, spacer + " ") else: print(spacer, end=" ") print(child) print(spacer + "} End of (%s, %d)]" % (self.val, level)) ''' Parser Class Class for parsing a file and performing syntax analysis. The class internally calls a lexer on the input file. The resultant token-lexeme list is then passed to the Parser for Syntax Analysis, using the Recursive Descent Parser method. The Parser prints tokens along with production grammar rules matched to them. After the whole file is analyzed, the resultant parse tree is also printed. ''' class Parser: # Constructor # Runs the lexer to analyze the input file. Then, performs syntax analysis # on the tokens received, outputting to the output file. # Generates a parse tree. def __init__(self, fIn, fOut): #Initilize tracking variables self.index = 0 self.filename = fIn self.token = None self.printBuffer = [] #Perform lexical analysis self.tokens = lexer.lexer(fIn) if self.tokens is None or self.tokens[0][0] == -1: print("Could not analyze file. Check if file exists and is readable.") exit() self.realStdOut = sys.stdout #File output global firstFile if fOut: sys.stdout = open(fOut, "w" if firstFile else "a+") firstFile = False #PARSE TREE VARIABLES print("[---Analysis of \"%s\"---]\n" % fIn) self.parseTree = self.statementList() print("\nPrinting Parse Tree:\n") self.parseTree.printSubtree() print("\n[---Successful end of \"%s\"---]\n" % fIn) sys.stdout = self.realStdOut # Iterates to the next token in the list, printing it to output. # If no more tokens to iterate over, an error is printed. def nextToken(self): if self.index >= len(self.tokens): #No more tokens error self.printError("Unexpected end of file. Expected more tokens.") self.token = self.tokens[self.index] #Write token #print("Token:\t%-16s Lexeme:\t%s" % (self.token[0], self.token[1])) print("Token:\t%-10s @%4d,%-4d Lexeme:\t%s" % (self.token[0], self.token[2][0], self.token[2][1], self.token[1])) self.index += 1 # Peeks at the next token if one exists. Otherwise, None is returned def peekToken(self): if self.index < len(self.tokens): return self.tokens[self.index] else: return None # Removes the next token from the token list, and sets it as current token. # Used for removing tokens which are appended to others when reinterpretted. def popNextToken(self): if self.index < len(self.tokens): self.token = self.tokens.pop(self.index) #Write token print("Token:\t%-16s @%4d,%-4d Lexeme:\t%s" % (self.token[0], self.token[2][0], self.token[2][1], self.token[1])) return self.token else: self.printError("Unexpected end of file. Expected more tokens.") # Prints an error message def printError(self, errorMsg): print("%s:%d:%d: Error: %s" % (self.filename, self.token[2][0], self.token[2][1], errorMsg)) if sys.stdout != self.realStdOut: sys.stdout = self.realStdOut print("%s:%d:%d: Error: %s" % (self.filename, self.token[2][0], self.token[2][1], errorMsg)) exit() # Special error that prints the unexpected token along with the error message def printUnexpectedError(self, errorMsg, errorType="Error"): print('%s:%d:%d: %s: Unexpected %s token "%s". %s' % (self.filename, self.token[2][0], self.token[2][1], errorType, self.token[0], self.token[1], errorMsg)) if sys.stdout != self.realStdOut: sys.stdout = self.realStdOut print('%s:%d:%d: %s: Unexpected %s token "%s". %s' % (self.filename, self.token[2][0], self.token[2][1], errorType, self.token[0], self.token[1], errorMsg)) exit() # Prints everything in the print buffer def flushPrintBuffer(self): while self.printBuffer: print( self.printBuffer.pop(0) ) # Expression # Production rules: <StatementList> -> <Statement> <StatementList> | <empty> # Represented in parse tree as non-leaf node with value "SL" # The root of the parse tree is a statement list def statementList(self, ending=None): subRoot = None currNode = None if isinstance(ending, list): while (self.peekToken() is not None) and (self.peekToken()[1] not in ending): #Create new Tree Node for SL nxtNode = TreeNode('SL') if subRoot is None: currNode = nxtNode subRoot = currNode else: currNode.addChild(nxtNode) #Adds SL as child of parent currNode = nxtNode self.printBuffer.append("\t<StatementList> -> <Statement> <StatementList> | <empty>") currNode.addChild( self.statement() ) if (self.peekToken() is not None) and (self.peekToken()[1] == ';'): self.nextToken() currNode.addChild( self.token ) elif isinstance(ending, str): while (self.peekToken() is not None) and (self.peekToken()[1] != ending): #Create new Tree Node for SL nxtNode = TreeNode('SL') if subRoot is None: currNode = nxtNode subRoot = currNode else: currNode.addChild(nxtNode) #Adds SL as child of parent currNode = nxtNode self.printBuffer.append("\t<StatementList> -> <Statement> <StatementList> | <empty>") currNode.addChild( self.statement() ) if (self.peekToken() is not None) and (self.peekToken()[1] == ';'): self.nextToken() currNode.addChild( self.token ) else: while self.peekToken() is not None: #Create new Tree Node for SL nxtNode = TreeNode('SL') if subRoot is None: currNode = nxtNode subRoot = currNode else: currNode.addChild(nxtNode) #Adds SL as child of parent currNode = nxtNode self.printBuffer.append("\t<StatementList> -> <Statement> <StatementList> | <empty>") currNode.addChild( self.statement() ) if (self.peekToken() is not None) and (self.peekToken()[1] == ';'): self.nextToken() currNode.addChild( self.token ) self.flushPrintBuffer() return subRoot # Statement # Production rules: <Statement> -> <Assign> | <Declarative> | begin <StatementList> end # if <Conditional> then <StatementList> else <StatementList> endif | # if <Conditional> then <StatementList> endif | # while <Conditional> do <StatementList> whileend | begin <StatementList> end # Represented in parse tree as non-leaf node with value "S" def statement(self): currNode = TreeNode("S") print("") #Padding between statements for a cleaner look self.nextToken() self.flushPrintBuffer() if self.token[1] == "begin": print("\t<Statement> -> begin <StatementList> end") currNode.addChild( self.token ) currNode.addChild( self.statementList("end") ) if self.peekToken() is not None and self.peekToken()[1] == "end": self.nextToken() currNode.addChild( self.token ) else: #ERROR: Needs "end" self.printError('Expected keyword "end" after statement-list') #Assignment and Declarations elif self.token[0] == "IDENTIFIER": print("\t<Statement> -> <Assign>") tmpToken = self.token tmpNode = self.assign() tmpNode.addChild( tmpToken, 0 ) currNode.addChild(tmpNode) elif self.token[1] in types: print("\t<Statement> -> <Declarative>") tmpToken = self.token tmpNode = self.declarative() tmpNode.addChild( tmpToken, 0 ) currNode.addChild(tmpNode) #Control structures elif self.token[1] == "if": currNode.addChild( self.token ) print("\t<Statement> -> if <Conditional> then <StatementList> endif | if <Conditional> then <StatementList> else <StatementList> endif") currNode.addChild( self.conditional() ) if self.peekToken() is not None and self.peekToken()[1] == "then": self.nextToken() currNode.addChild( self.token ) currNode.addChild( self.statementList(["else", "endif"]) ) if self.peekToken() is not None and self.peekToken()[1] == "else": self.nextToken() currNode.addChild( self.token ) currNode.addChild( self.statementList("endif") ) if self.peekToken() is not None and self.peekToken()[1] == "endif": self.nextToken() currNode.addChild( self.token ) else: #ERROR: Needs endif self.printError('Expected keyword "endif" after statement-list') else: #ERROR: Needs "then" self.printError('Expected keyword "then" before statement-list') elif self.token[1] == "while": currNode.addChild( self.token ) print("\t<Statement> -> while <Conditional> do <StatementList> whileend") currNode.addChild( self.conditional() ) if self.peekToken() is not None and self.peekToken()[1] == "do": self.nextToken() currNode.addChild( self.token ) currNode.addChild( self.statementList('whileend') ) if self.peekToken() is not None and self.peekToken()[1] == 'whileend': self.nextToken() currNode.addChild( self.token ) else: #ERROR: Needs "whileend" self.printError('Expected keyword "whileend" after statement-list') else: #ERROR: should have "do" self.printError('Expected keyword "do" before statement-list') elif self.token[1] == 'input': currNode.addChild( self.token ) self.nextToken() currNode.addChild( self.token ) if self.token[1] != '(': self.printUnexpectedError("Expected SEPARATOR '('.") self.nextToken() currNode.addChild( self.token ) if self.token[0] != 'IDENTIFIER': self.printUnexpectedError("Expected IDENTIFIER.") self.nextToken() currNode.addChild( self.token ) if self.token[1] != ')': self.printUnexpectedError("Expected SEPARATOR ')'.") elif self.token[1] == 'output': currNode.addChild( self.token ) self.nextToken() currNode.addChild( self.token ) if self.token[1] != '(': self.printUnexpectedError("Expected SEPARATOR '('.") currNode.addChild( self.expression() ) self.nextToken() currNode.addChild( self.token ) if self.token[1] != ')': self.printUnexpectedError("Expected SEPARATOR ')'.") else: #ERROR: Next token does not form a statement self.printUnexpectedError(' Was expecting a statement.') return currNode # Assign # Production rules: <Assign> -> <ID> = <Expression>; # Represented in parse tree as non-leaf node with value "A" def assign(self): currNode = TreeNode("A") tmpTok = self.peekToken() if tmpTok is not None and tmpTok[1] == "=": print("\t<Assign> -> <ID> = <Expression>;") self.nextToken() currNode.addChild( self.token ) currNode.addChild( self.expression() ) else: #ERROR: Expecting "=" for assignment statement. self.printUnexpectedError('Was expecting operator "=" for assignment statement') return currNode # Declarative # Production rules: <Declarative> -> <Type> <ID> <MoreIds>; | <empty> # <MoreIds> -> , <ID> <MoreIds> | <empty # Represented in parse tree as non-leaf node with value "D" # MoreIDs are represented as "MI" def declarative(self): subRoot = TreeNode("D") print("\t<Declarative> -> <Type> <ID> <MoreIds>; | <empty>") self.nextToken() #ID subRoot.addChild( self.token ) currNode = subRoot while self.peekToken() is not None and self.peekToken()[1] == ',': tmpNode = TreeNode("MI") self.nextToken() tmpNode.addChild( self.token ) if self.peekToken() is not None and (self.peekToken()[0] != "IDENTIFIER"): #ERROR: Invalid multiple declarative statement self.nextToken() self.printUnexpectedError('Was expecting an IDENTIFIER token for more declarations') print("\t<MoreIds> -> , <ID> <MoreIds> | <empty>") self.nextToken() tmpNode.addChild( self.token ) currNode.addChild( tmpNode ) currNode = tmpNode currNode.addChild( "<empty>" ) return subRoot # Expression # Production rules: <Expression> -> <Term> | <Term> + <Expression> | <Term> - <Expression> # Represented in parse tree as non-leaf node with value "E" # Note: Removal of left recursion is not performed. Rather, the grammar is flipped to not have # left recursion. This will be handled later by the object code generator. def expression(self): currNode = TreeNode("E") self.printBuffer.append("\t<Expression> -> <Term> | <Term> + <Expression> | <Term> - <Expression>") currNode.addChild( self.term() ) tmpTok = self.peekToken() if tmpTok is not None and tmpTok[1] in ['+', '-']: self.nextToken() currNode.addChild( self.token ) currNode.addChild( self.expression() ) self.flushPrintBuffer() return currNode # Term: # Production rules: <Term> -> <Factor> * <Term> | <Factor> / <Term> | <Factor> # Represented in parse tree as non-leaf node with value "T" # Note: Removal of left recursion is not performed. Rather, the grammar is flipped to not have # left recursion. This will be handled later by the object code generator. def term(self): currNode = TreeNode("T") self.printBuffer.append("\t<Term> -> <Factor> * <Term> | <Factor> / <Term> | <Factor>") currNode.addChild( self.factor() ) tmpTok = self.peekToken() if tmpTok is not None and tmpTok[1] in ['*', '/']: self.nextToken() currNode.addChild( self.token ) currNode.addChild( self.term() ) self.flushPrintBuffer() return currNode # Factor: # Production rules: <Factor> -> '(' <Expression> ')' | <ID> | ('+' | '-')?(<FLOAT> | ('.')?<INT>) | 'True' | 'False' # Represented in parse tree as non-leaf node with value "F" # Note: Additional processing of numbers are performed here to recognize all forms of numericals def factor(self): currNode = TreeNode("F") self.nextToken() currNode.addChild( self.token ) self.flushPrintBuffer() print("\t<Factor> -> '(' <Expression> ')' | <ID> | ('+' | '-')?(<FLOAT> | ('.')?<INT>) | 'True' | 'False' | input(ID) | output(E)") if self.token[1] == '(': currNode.addChild( self.expression() ) self.nextToken() currNode.addChild( self.token ) if self.token[1] != ')': #ERROR: Expected ')' after expression self.printUnexpectedError("Expected SEPARATOR ')' after expression") elif self.token[0] in ['IDENTIFIER', 'INTEGER', 'FLOAT'] or self.token[1] in ['True', 'False']: return currNode #IS VALID. Return to go back to callee function elif self.token[1] in ['+', '-']: #Treat as part of number tmpTok = self.popNextToken() if tmpTok[1] == '.': tmpTok2 = self.popNextToken() if tmpTok2[0] == 'INTEGER': self.tokens[self.index-1][1] = self.tokens[self.index-1][1] + tmpTok[1] + tmpTok2[1]#Append to front of number self.tokens[self.index-1][0] = 'FLOAT' else: self.printUnexpectedError("Expected float.") elif tmpTok[0] in ['INTEGER', 'FLOAT']: self.tokens[self.index-1][1] = self.tokens[self.index-1][1] + tmpTok[1] #Append to front of number self.tokens[self.index-1][0] = tmpTok[0] else: self.printUnexpectedError("Expected numerical token.") #self.printUnexpectedError("Expected a Factor in the form of ( <Expression> ), or an IDENTIFIER, or NUMERIC token", "Error: Invalid Factor") elif self.token[1] == '.': tmpTok = self.popNextToken() if tmpTok[0] == 'INTEGER': self.tokens[self.index-1][1] = self.tokens[self.index-1][1] + tmpTok[1]#Append to front of number self.tokens[self.index-1][0] = 'FLOAT' else: self.printUnexpectedError("Expected float.") else: #ERROR: Not a valid Factor. self.printUnexpectedError("Expected a Factor in the form of ( <Expression> ), or an IDENTIFIER, or NUMERIC token", "Error: Invalid Factor") return currNode # Conditional # Production rules: <Conditional> -> <Expression> <Relop> <Expression> | <Expression> | ( <Conditional> ) # Represented in parse tree as non-leaf node with value "C" def conditional(self): wrappedInParenthesis = (self.peekToken()[1] == '(') if wrappedInParenthesis: self.nextToken(); self.printBuffer.append("\t<Conditional> -> <Expression> <Relop> <Expression> | <Expression> | ( <Conditional> )") currNode = TreeNode("C") self.printBuffer.append("\t<Conditional> -> <Expression> <Relop> <Expression> | <Expression> | ( <Conditional> )") currNode.addChild( self.expression() ) tmpTok = self.peekToken() if tmpTok is not None: if tmpTok[1] == "<": self.nextToken() currNode.addChild( self.token ) tmpTok2 = self.peekToken() if tmpTok2 is not None and tmpTok2[1] in ['=', '>']: self.nextToken() #Eval as "<=" or "<>" currNode.addChild( self.token ) currNode.addChild( self.expression() ) #Eval as "<" elif tmpTok[1] == ">": self.nextToken() currNode.addChild( self.token ) tmpTok2 = self.peekToken() if tmpTok2 is not None and tmpTok2[1] == "=": self.nextToken() # Eval as >= currNode.addChild( self.token ) currNode.addChild( self.expression() ) #Eval as > elif tmpTok[1] == "=": self.nextToken() currNode.addChild( self.token ) tmpTok2 = self.peekToken() if tmpTok2 is not None: if tmpTok2[1] == '=': self.nextToken() currNode.addChild( self.token ) currNode.addChild( self.expression() )#Eval as == else: #Eval as assignment, counted as invalid self.printUnexpectedError("Expected RELATIVE OPERATOR between expressions. Did you mean '=='?") #OTHERWISE just a lone expression. (Valid) if wrappedInParenthesis: self.nextToken() if self.token[1] != ')': self.printUnexpectedError("Expected ')'.") self.flushPrintBuffer() return currNode def main(): #Read command line arguments mFlags, files = getopt.gnu_getopt(sys.argv[1:], "ho:", ["help"]) outFile = None #Process command line arguments for opt, arg in mFlags: if opt in ('-h', "--help"): print("USAGE: parser.py <FILE> [<FILE> ...] [-o <OUTFILE>]") exit() elif opt == '-o': outFile = arg else: print("Option '%s' not recognized" % opt) #Prompt for input if none given if len(files) < 1: #Prompt user for file name files = input("Input filename(s): ").split(",") if files is None or len(files[0]) == 0: print("A valid filename must be entered.") exit() for i in range(0, len(files)): files[i] = files[i].strip() #Remove leading and heading whitespace if not outFile: outFile = input("Output filename (default: console): ") if not outFile: outFile = None print("\tDefaulting to standard output.") #Perform syntax analysis on all input files parseForest = [] for filename in files: parser = Parser(filename, outFile) parseForest.append(parser.parseTree) #Return parse forest (list of parse trees from all input files) return parseForest #Execute main function only when directly executing script if __name__ == "__main__": main()
5fb69116e48dca7461402838a90ce882b621c688
6fa0c940edffaeb325205673b4c7643b2ebfffc4
/clicker/admin.py
dfb60e1999dfce5cae2f0891a8b4978f8c49920c
[]
no_license
RustamMullayanov/myDjangoClicker
5f564bab3cd067eebc712f2fff82939e63f6c8b7
caaa47f36904de92be0e16187b0acc707a4497ad
refs/heads/master
2023-05-29T00:22:59.039925
2021-06-13T09:30:49
2021-06-13T09:30:49
367,644,274
0
0
null
null
null
null
UTF-8
Python
false
false
120
py
from django.contrib import admin from . import models # Register your models here. admin.site.register(models.Clicker)
d20beb7361baa30e1f49b5bce1bc4a1d3158cbba
7c61f236f81c642b43abeee79bd36802d92df7d9
/sandbox/envs/maze/point_env.py
4048496a5296e5d875c986a502a17f9cae4bd722
[]
no_license
ZiyeHu/goalgail
e9044b3863c608d2ccd4d49241cf3d0c09962eef
1d940c8efffd519a0a77c58c8adf03b9967aa81a
refs/heads/master
2022-12-11T14:26:28.781537
2020-09-17T16:08:24
2020-09-17T16:08:24
296,116,076
0
0
null
2020-09-16T18:30:54
2020-09-16T18:30:53
null
UTF-8
Python
false
false
7,554
py
from rllab.envs.base import Step from rllab.envs.mujoco.mujoco_env import MujocoEnv from rllab.core.serializable import Serializable from rllab.misc.overrides import overrides from rllab.misc import logger import numpy as np import math import random from sandbox.envs.base import StateGenerator from sandbox.envs.goal_env import GoalEnv from sandbox.envs.rewards import linear_threshold_reward # # def auto_str(cls): # def __str__(self): # return '%s(%s)' % ( # type(self).__name__, # ', '.join('%s=%s' % item for item in vars(self).items()) # ) # cls.__str__ = __str__ # return cls class PointEnv(GoalEnv, MujocoEnv, Serializable): FILE = 'point2.xml' def __str__(self): return '%s(%s)' % ( type(self).__name__, ', '.join('%s=%s' % item for item in vars(self).items()) ) def __init__(self, goal_generator=None, reward_dist_threshold=0.1, indicator_reward=True, append_goal=False, control_mode='linear', *args, **kwargs): """ :param goal_generator: Proceedure to sample and keep the goals :param reward_dist_threshold: :param control_mode: """ Serializable.quick_init(self, locals()) GoalEnv.__init__(self, goal_generator=goal_generator) self.control_mode = control_mode if goal_generator is None: self.update_goal_generator(StateGenerator()) else: self.update_goal_generator(goal_generator) self.reward_dist_threshold = reward_dist_threshold self.indicator_reward = indicator_reward self.append_goal = append_goal MujocoEnv.__init__(self, *args, **kwargs) @overrides def get_current_obs(self): """Append obs with current_goal""" pos = self.model.data.qpos.flat[:2] vel = self.model.data.qvel.flat[:2] if self.append_goal: return np.concatenate([ pos, vel, self.current_goal, ]) else: if self.control_mode == 'pos': return pos else: return np.concatenate([pos, vel]) @overrides def reset(self, init_state=None, goal=(1, 0), *args, **kwargs): # reset called when __init__, so needs goal! # import pdb; pdb.set_trace() """This does both the reset of mujoco, the forward and reset goal""" self.update_goal(goal=goal) qpos = np.zeros((self.model.nq, 1)) qvel = np.zeros((self.model.nv, 1)) # 0 velocity if init_state is not None: # you can reset only the com position! qpos[:2] = np.array(init_state[:2]).reshape((2, 1)) if np.array(init_state).size == 4: qvel[:2] = np.array(init_state[2:]).reshape((2, 1)) qpos[2:, :] = np.array(self.current_goal).reshape((2, 1)) # the goal is part of the mujoco!! self.set_state(qpos, qvel) # this is usually the usual reset self.current_com = self.model.data.com_subtree[0] # CF: this is very weird... gets 0, 2, 0.1 even when it's 0 self.dcom = np.zeros_like(self.current_com) return self.get_current_obs() def step(self, action): # print('PointEnv, the action taken is: ', action) if self.control_mode == 'linear': # action is directly the acceleration self.forward_dynamics(action) elif self.control_mode == 'angular': # action[0] is accel in forward (vel) direction, action[1] in orthogonal. vel = self.model.data.qvel.flat[:2] # Get the unit vector for velocity if np.linalg.norm(vel) < 1e-10: vel = np.array([1., 0.]) else: vel = vel / np.linalg.norm(vel) acc = np.zeros_like(vel) acc += action[0] * vel acc += action[1] * np.array([-vel[1], vel[0]]) self.forward_dynamics(acc) elif self.control_mode == 'pos': desired_pos = self.get_xy() + np.clip(action, -2, 2) / 10. for _ in range(200): self.forward_dynamics(desired_pos) # print(str(self.get_xy())) # print(str(self.model.data.qvel.flat[:2])) # print("desired_pos" + str(desired_pos)) # pos = self.get_xy() # pos += np.clip(action, -2, 2) / 10. # limit the action range to -0.2, 0.2 # self.set_xy(pos) else: raise NotImplementedError("Control mode not supported!") reward_dist = self._compute_dist_reward() # 1000 * self.reward_dist_threshold at goal, decreases with 1000 coef # print("reward", reward_dist) reward_ctrl = - np.square(action).sum() # reward = reward_dist + reward_ctrl reward = reward_dist dist = np.linalg.norm( self.get_body_com("torso") - self.get_body_com("target") ) ob = self.get_current_obs() # print('current obs:', ob) done = False if dist < self.reward_dist_threshold and self.indicator_reward: # print("**DONE***") done = True # print("reward", reward) return Step( ob, reward, done, reward_dist=reward_dist, reward_ctrl=reward_ctrl, distance=dist, ) @overrides @property def goal_observation(self): # transforms a state into a goal (projection, for example) return self.get_body_com("torso")[:2] def _compute_dist_reward(self): """Transforms dist to goal with linear_threshold_reward: gets -threshold * coef at dist=0, and decreases to 0""" dist = np.linalg.norm( self.get_body_com("torso") - self.get_body_com("target") ) if self.indicator_reward and dist <= self.reward_dist_threshold: return 1000 * self.reward_dist_threshold else: # return linear_threshold_reward(dist, threshold=self.reward_dist_threshold, coefficient=-10) return -10 * dist def set_state(self, qpos, qvel): assert qpos.shape == (self.model.nq, 1) and qvel.shape == (self.model.nv, 1) self.model.data.qpos = qpos self.model.data.qvel = qvel # self.model._compute_subtree() #pylint: disable=W0212 self.model.forward() def get_xy(self): qpos = self.model.data.qpos return qpos[0, 0], qpos[1, 0] def set_xy(self, xy): qpos = np.copy(self.model.data.qpos) qpos[0, 0] = xy[0] qpos[1, 0] = xy[1] self.model.data.qpos = qpos self.model.forward() @overrides def log_diagnostics(self, paths): # Process by time steps distances = [ np.mean(path['env_infos']['distance']) for path in paths ] goal_distances = [ path['env_infos']['distance'][0] for path in paths ] reward_dist = [ np.mean(path['env_infos']['reward_dist']) for path in paths ] reward_ctrl = [ np.mean(path['env_infos']['reward_ctrl']) for path in paths ] # Process by trajectories logger.record_tabular('GoalDistance', np.mean(goal_distances)) logger.record_tabular('MeanDistance', np.mean(distances)) logger.record_tabular('MeanRewardDist', np.mean(reward_dist)) logger.record_tabular('MeanRewardCtrl', np.mean(reward_ctrl))
30394348b699ec0d39eb02cb0fca340cdf880ae9
ab1998970b4977b93466820e7a41227a57b21563
/local_lib/pykrige/ok3d.py
7298d0d67801077c78ed146584778128cbc036a1
[]
no_license
kamccormack/PEFiredrake
95b76f0c4e12b9b840fecb98e672621c4047455f
329bb214268fa04fbebd5b6d219941273a849728
refs/heads/master
2020-03-09T19:44:00.191375
2018-04-10T16:38:18
2018-04-10T16:38:18
128,964,966
1
0
null
null
null
null
UTF-8
Python
false
false
35,858
py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals __doc__ = """Code by Benjamin S. Murphy [email protected] Dependencies: numpy scipy matplotlib Classes: OrdinaryKriging3D: Support for 3D Ordinary Kriging. References: P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. Copyright (c) 2015 Benjamin S. Murphy """ import numpy as np import scipy.linalg from scipy.spatial.distance import cdist import matplotlib.pyplot as plt from . import variogram_models from . import core class OrdinaryKriging3D: """class OrdinaryKriging3D Three-dimensional ordinary kriging Dependencies: numpy scipy matplotlib Inputs: X (array-like): X-coordinates of data points. Y (array-like): Y-coordinates of data points. Z (array-like): Z-coordinates of data points. Val (array-like): Values at data points. variogram_model (string, optional): Specified which variogram model to use; may be one of the following: linear, power, gaussian, spherical, exponential. Default is linear variogram model. To utilize as custom variogram model, specify 'custom'; you must also provide variogram_parameters and variogram_function. variogram_parameters (list, optional): Parameters that define the specified variogram model. If not provided, parameters will be automatically calculated such that the root-mean-square error for the fit variogram function is minimized. linear - [slope, nugget] power - [scale, exponent, nugget] gaussian - [sill, range, nugget] spherical - [sill, range, nugget] exponential - [sill, range, nugget] For a custom variogram model, the parameters are required, as custom variogram models currently will not automatically be fit to the data. The code does not check that the provided list contains the appropriate number of parameters for the custom variogram model, so an incorrect parameter list in such a case will probably trigger an esoteric exception someplace deep in the code. variogram_function (callable, optional): A callable function that must be provided if variogram_model is specified as 'custom'. The function must take only two arguments: first, a list of parameters for the variogram model; second, the distances at which to calculate the variogram model. The list provided in variogram_parameters will be passed to the function as the first argument. nlags (int, optional): Number of averaging bins for the semivariogram. Default is 6. weight (boolean, optional): Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating variogram model. True indicates that weights will be applied. Default is False. (Kitanidis suggests that the values at smaller lags are more important in fitting a variogram model, so the option is provided to enable such weighting.) anisotropy_scaling_y (float, optional): Scalar stretching value to take into account anisotropy in the y direction. Default is 1 (effectively no stretching). Scaling is applied in the y direction in the rotated data frame (i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z is/are not 0). anisotropy_scaling_z (float, optional): Scalar stretching value to take into account anisotropy in the z direction. Default is 1 (effectively no stretching). Scaling is applied in the z direction in the rotated data frame (i.e., after adjusting for the anisotropy_angle_x/y/z, if anisotropy_angle_x/y/z is/are not 0). anisotropy_angle_x (float, optional): CCW angle (in degrees) by which to rotate coordinate system about the x axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. anisotropy_angle_y (float, optional): CCW angle (in degrees) by which to rotate coordinate system about the y axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. anisotropy_angle_z (float, optional): CCW angle (in degrees) by which to rotate coordinate system about the z axis in order to take into account anisotropy. Default is 0 (no rotation). Note that the coordinate system is rotated. X rotation is applied first, then y rotation, then z rotation. Scaling is applied after rotation. verbose (Boolean, optional): Enables program text output to monitor kriging process. Default is False (off). enable_plotting (Boolean, optional): Enables plotting to display variogram. Default is False (off). Callable Methods: display_variogram_model(): Displays semivariogram and variogram model. update_variogram_model(variogram_model, variogram_parameters=None, nlags=6, anisotropy_scaling=1.0, anisotropy_angle=0.0): Changes the variogram model and variogram parameters for the kriging system. Inputs: variogram_model (string): May be any of the variogram models listed above. May also be 'custom', in which case variogram_parameters and variogram_function must be specified. variogram_parameters (list, optional): List of variogram model parameters, as listed above. If not provided, a best fit model will be calculated as described above. variogram_function (callable, optional): A callable function that must be provided if variogram_model is specified as 'custom'. See above for more information. nlags (int, optional): Number of averaging bins for the semivariogram. Defualt is 6. weight (boolean, optional): Flag that specifies if semivariance at smaller lags should be weighted more heavily when automatically calculating variogram model. True indicates that weights will be applied. Default is False. anisotropy_scaling (float, optional): Scalar stretching value to take into account anisotropy. Default is 1 (effectively no stretching). Scaling is applied in the y-direction. anisotropy_angle (float, optional): Angle (in degrees) by which to rotate coordinate system in order to take into account anisotropy. Default is 0 (no rotation). switch_verbose(): Enables/disables program text output. No arguments. switch_plotting(): Enables/disable variogram plot display. No arguments. get_epsilon_residuals(): Returns the epsilon residuals of the variogram fit. No arguments. plot_epsilon_residuals(): Plots the epsilon residuals of the variogram fit in the order in which they were calculated. No arguments. get_statistics(): Returns the Q1, Q2, and cR statistics for the variogram fit (in that order). No arguments. print_statistics(): Prints out the Q1, Q2, and cR statistics for the variogram fit. NOTE that ideally Q1 is close to zero, Q2 is close to 1, and cR is as small as possible. execute(style, xpoints, ypoints, mask=None): Calculates a kriged grid. Inputs: style (string): Specifies how to treat input kriging points. Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of x, y,z coordinates that define a rectangular grid. Specifying 'points' treats xpoints, ypoints, and zpoints as arrays that provide coordinates at which to solve the kriging system. Specifying 'masked' treats xpoints, ypoints, zpoints as arrays of x, y, z coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints (array-like, dim N): If style is specific as 'grid' or 'masked', x-coordinates of LxMxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints (array-like, dim M): If style is specified as 'grid' or 'masked', y-coordinates of LxMxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). zpoints (array-like, dim L): If style is specified as 'grid' or 'masked', z-coordinates of LxMxN grid. If style is specified as 'points', z-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). mask (boolean array, dim LxMxN, optional): Specifies the points in the rectangular grid defined by xpoints, ypoints, and zpoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked; True indicates that the point should be masked. backend (string, optional): Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Default is 'vectorized'. Outputs: kvalues (numpy array, dim LxMxN or dim Nx1): Interpolated values of specified grid or at the specified set of points. If style was specified as 'masked', kvalues will be a numpy masked array. sigmasq (numpy array, dim LxMxN or dim Nx1): Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array. References: P.K. Kitanidis, Introduction to Geostatistcs: Applications in Hydrogeology, (Cambridge University Press, 1997) 272 p. """ eps = 1.e-10 # Cutoff for comparison to zero variogram_dict = {'linear': variogram_models.linear_variogram_model, 'power': variogram_models.power_variogram_model, 'gaussian': variogram_models.gaussian_variogram_model, 'spherical': variogram_models.spherical_variogram_model, 'exponential': variogram_models.exponential_variogram_model} def __init__(self, x, y, z, val, variogram_model='linear', variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling_y=1.0, anisotropy_scaling_z=1.0, anisotropy_angle_x=0.0, anisotropy_angle_y=0.0, anisotropy_angle_z=0.0, verbose=False, enable_plotting=False): # Code assumes 1D input arrays. Ensures that any extraneous dimensions # don't get in the way. Copies are created to avoid any problems with # referencing the original passed arguments. self.X_ORIG = np.atleast_1d(np.squeeze(np.array(x, copy=True))) self.Y_ORIG = np.atleast_1d(np.squeeze(np.array(y, copy=True))) self.Z_ORIG = np.atleast_1d(np.squeeze(np.array(z, copy=True))) self.VALUES = np.atleast_1d(np.squeeze(np.array(val, copy=True))) self.verbose = verbose self.enable_plotting = enable_plotting if self.enable_plotting and self.verbose: print("Plotting Enabled\n") self.XCENTER = (np.amax(self.X_ORIG) + np.amin(self.X_ORIG))/2.0 self.YCENTER = (np.amax(self.Y_ORIG) + np.amin(self.Y_ORIG))/2.0 self.ZCENTER = (np.amax(self.Z_ORIG) + np.amin(self.Z_ORIG))/2.0 self.anisotropy_scaling_y = anisotropy_scaling_y self.anisotropy_scaling_z = anisotropy_scaling_z self.anisotropy_angle_x = anisotropy_angle_x self.anisotropy_angle_y = anisotropy_angle_y self.anisotropy_angle_z = anisotropy_angle_z if self.verbose: print("Adjusting data for anisotropy...") self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \ core.adjust_for_anisotropy_3d(np.copy(self.X_ORIG), np.copy(self.Y_ORIG), np.copy(self.Z_ORIG), self.XCENTER, self.YCENTER, self.ZCENTER, self.anisotropy_scaling_y, self.anisotropy_scaling_z, self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z) self.variogram_model = variogram_model if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom': raise ValueError("Specified variogram model '%s' is not supported." % variogram_model) elif self.variogram_model == 'custom': if variogram_function is None or not callable(variogram_function): raise ValueError("Must specify callable function for custom variogram model.") else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] if self.verbose: print("Initializing variogram model...") self.lags, self.semivariance, self.variogram_model_parameters = \ core.initialize_variogram_model_3d(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED, self.VALUES, self.variogram_model, variogram_parameters, self.variogram_function, nlags, weight) if self.verbose: if self.variogram_model == 'linear': print("Using '%s' Variogram Model" % 'linear') print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], '\n') elif self.variogram_model == 'power': print("Using '%s' Variogram Model" % 'power') print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], '\n') elif self.variogram_model == 'custom': print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Sill:", self.variogram_model_parameters[0]) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], '\n') if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = core.find_statistics_3d(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED, self.VALUES, self.variogram_function, self.variogram_model_parameters) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, '\n') def update_variogram_model(self, variogram_model, variogram_parameters=None, variogram_function=None, nlags=6, weight=False, anisotropy_scaling_y=1.0, anisotropy_scaling_z=1.0, anisotropy_angle_x=0.0, anisotropy_angle_y=0.0, anisotropy_angle_z=0.0): """Allows user to update variogram type and/or variogram model parameters.""" if anisotropy_scaling_y != self.anisotropy_scaling_y or anisotropy_scaling_z != self.anisotropy_scaling_z or \ anisotropy_angle_x != self.anisotropy_angle_x or anisotropy_angle_y != self.anisotropy_angle_y or \ anisotropy_angle_z != self.anisotropy_angle_z: if self.verbose: print("Adjusting data for anisotropy...") self.anisotropy_scaling_y = anisotropy_scaling_y self.anisotropy_scaling_z = anisotropy_scaling_z self.anisotropy_angle_x = anisotropy_angle_x self.anisotropy_angle_y = anisotropy_angle_y self.anisotropy_angle_z = anisotropy_angle_z self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED = \ core.adjust_for_anisotropy_3d(np.copy(self.X_ORIG), np.copy(self.Y_ORIG), np.copy(self.Z_ORIG), self.XCENTER, self.YCENTER, self.ZCENTER, self.anisotropy_scaling_y, self.anisotropy_scaling_z, self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z) self.variogram_model = variogram_model if self.variogram_model not in self.variogram_dict.keys() and self.variogram_model != 'custom': raise ValueError("Specified variogram model '%s' is not supported." % variogram_model) elif self.variogram_model == 'custom': if variogram_function is None or not callable(variogram_function): raise ValueError("Must specify callable function for custom variogram model.") else: self.variogram_function = variogram_function else: self.variogram_function = self.variogram_dict[self.variogram_model] if self.verbose: print("Updating variogram mode...") self.lags, self.semivariance, self.variogram_model_parameters = \ core.initialize_variogram_model_3d(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED, self.VALUES, self.variogram_model, variogram_parameters, self.variogram_function, nlags, weight) if self.verbose: if self.variogram_model == 'linear': print("Using '%s' Variogram Model" % 'linear') print("Slope:", self.variogram_model_parameters[0]) print("Nugget:", self.variogram_model_parameters[1], '\n') elif self.variogram_model == 'power': print("Using '%s' Variogram Model" % 'power') print("Scale:", self.variogram_model_parameters[0]) print("Exponent:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], '\n') elif self.variogram_model == 'custom': print("Using Custom Variogram Model") else: print("Using '%s' Variogram Model" % self.variogram_model) print("Sill:", self.variogram_model_parameters[0]) print("Range:", self.variogram_model_parameters[1]) print("Nugget:", self.variogram_model_parameters[2], '\n') if self.enable_plotting: self.display_variogram_model() if self.verbose: print("Calculating statistics on variogram model fit...") self.delta, self.sigma, self.epsilon = core.find_statistics_3d(self.X_ADJUSTED, self.Y_ADJUSTED, self.Z_ADJUSTED, self.VALUES, self.variogram_function, self.variogram_model_parameters) self.Q1 = core.calcQ1(self.epsilon) self.Q2 = core.calcQ2(self.epsilon) self.cR = core.calc_cR(self.Q2, self.sigma) if self.verbose: print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR, '\n') def display_variogram_model(self): """Displays variogram model with the actual binned data""" fig = plt.figure() ax = fig.add_subplot(111) ax.plot(self.lags, self.semivariance, 'r*') ax.plot(self.lags, self.variogram_function(self.variogram_model_parameters, self.lags), 'k-') plt.show() def switch_verbose(self): """Allows user to switch code talk-back on/off. Takes no arguments.""" self.verbose = not self.verbose def switch_plotting(self): """Allows user to switch plot display on/off. Takes no arguments.""" self.enable_plotting = not self.enable_plotting def get_epsilon_residuals(self): """Returns the epsilon residuals for the variogram fit.""" return self.epsilon def plot_epsilon_residuals(self): """Plots the epsilon residuals for the variogram fit.""" fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(range(self.epsilon.size), self.epsilon, c='k', marker='*') ax.axhline(y=0.0) plt.show() def get_statistics(self): return self.Q1, self.Q2, self.cR def print_statistics(self): print("Q1 =", self.Q1) print("Q2 =", self.Q2) print("cR =", self.cR) def _get_kriging_matrix(self, n): """Assembles the kriging matrix.""" xyz = np.concatenate((self.X_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis], self.Z_ADJUSTED[:, np.newaxis]), axis=1) d = cdist(xyz, xyz, 'euclidean') a = np.zeros((n+1, n+1)) a[:n, :n] = - self.variogram_function(self.variogram_model_parameters, d) np.fill_diagonal(a, 0.) a[n, :] = 1.0 a[:, n] = 1.0 a[n, n] = 0.0 return a def _exec_vector(self, a, bd, mask): """Solves the kriging system as a vectorized operation. This method can take a lot of memory for large grids and/or large datasets.""" npt = bd.shape[0] n = self.X_ADJUSTED.shape[0] zero_index = None zero_value = False a_inv = scipy.linalg.inv(a) if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) b = np.zeros((npt, n+1, 1)) b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd) if zero_value: b[zero_index[0], zero_index[1], 0] = 0.0 b[:, n, 0] = 1.0 if (~mask).any(): mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n+1, axis=1) b = np.ma.array(b, mask=mask_b) x = np.dot(a_inv, b.reshape((npt, n+1)).T).reshape((1, n+1, npt)).T kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1) sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1) return kvalues, sigmasq def _exec_loop(self, a, bd_all, mask): """Solves the kriging system by looping over all specified points. Less memory-intensive, but involves a Python-level loop.""" npt = bd_all.shape[0] n = self.X_ADJUSTED.shape[0] kvalues = np.zeros(npt) sigmasq = np.zeros(npt) a_inv = scipy.linalg.inv(a) for j in np.nonzero(~mask)[0]: # Note that this is the same thing as range(npt) if mask is not defined, bd = bd_all[j] # otherwise it takes the non-masked elements. if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_value = False zero_index = None b = np.zeros((n+1, 1)) b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd) if zero_value: b[zero_index[0], 0] = 0.0 b[n, 0] = 1.0 x = np.dot(a_inv, b) kvalues[j] = np.sum(x[:n, 0] * self.VALUES) sigmasq[j] = np.sum(x[:, 0] * -b[:, 0]) return kvalues, sigmasq def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx): """Solves the kriging system by looping over all specified points. Uses only a certain number of closest points. Not very memory intensive, but the loop is done in pure Python. """ import scipy.linalg.lapack npt = bd_all.shape[0] n = bd_idx.shape[1] kvalues = np.zeros(npt) sigmasq = np.zeros(npt) for i in np.nonzero(~mask)[0]: b_selector = bd_idx[i] bd = bd_all[i] a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1]))) a = a_all[a_selector[:, None], a_selector] if np.any(np.absolute(bd) <= self.eps): zero_value = True zero_index = np.where(np.absolute(bd) <= self.eps) else: zero_value = False zero_index = None b = np.zeros((n+1, 1)) b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd) if zero_value: b[zero_index[0], 0] = 0.0 b[n, 0] = 1.0 x = scipy.linalg.solve(a, b) kvalues[i] = x[:n, 0].dot(self.VALUES[b_selector]) sigmasq[i] = - x[:, 0].dot(b[:, 0]) return kvalues, sigmasq def execute(self, style, xpoints, ypoints, zpoints, mask=None, backend='vectorized', n_closest_points=None): """Calculates a kriged grid and the associated variance. This is now the method that performs the main kriging calculation. Note that currently measurements (i.e., z values) are considered 'exact'. This means that, when a specified coordinate for interpolation is exactly the same as one of the data points, the variogram evaluated at the point is forced to be zero. Also, the diagonal of the kriging matrix is also always forced to be zero. In forcing the variogram evaluated at data points to be zero, we are effectively saying that there is no variance at that point (no uncertainty, so the value is 'exact'). In the future, the code may include an extra 'exact_values' boolean flag that can be adjusted to specify whether to treat the measurements as 'exact'. Setting the flag to false would indicate that the variogram should not be forced to be zero at zero distance (i.e., when evaluated at data points). Instead, the uncertainty in the point will be equal to the nugget. This would mean that the diagonal of the kriging matrix would be set to the nugget instead of to zero. Inputs: style (string): Specifies how to treat input kriging points. Specifying 'grid' treats xpoints, ypoints, and zpoints as arrays of x, y, and z coordinates that define a rectangular grid. Specifying 'points' treats xpoints, ypoints, and zpoints as arrays that provide coordinates at which to solve the kriging system. Specifying 'masked' treats xpoints, ypoints, and zpoints as arrays of x, y, and z coordinates that define a rectangular grid and uses mask to only evaluate specific points in the grid. xpoints (array-like, dim N): If style is specific as 'grid' or 'masked', x-coordinates of LxMxN grid. If style is specified as 'points', x-coordinates of specific points at which to solve kriging system. ypoints (array-like, dim M): If style is specified as 'grid' or 'masked', y-coordinates of LxMxN grid. If style is specified as 'points', y-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). zpoints (array-like, dim L): If style is specified as 'grid' or 'masked', z-coordinates of LxMxN grid. If style is specified as 'points', z-coordinates of specific points at which to solve kriging system. Note that in this case, xpoints, ypoints, and zpoints must have the same dimensions (i.e., L = M = N). mask (boolean array, dim LxMxN, optional): Specifies the points in the rectangular grid defined by xpoints, ypoints, zpoints that are to be excluded in the kriging calculations. Must be provided if style is specified as 'masked'. False indicates that the point should not be masked, so the kriging system will be solved at the point. True indicates that the point should be masked, so the kriging system should will not be solved at the point. backend (string, optional): Specifies which approach to use in kriging. Specifying 'vectorized' will solve the entire kriging problem at once in a vectorized operation. This approach is faster but also can consume a significant amount of memory for large grids and/or large datasets. Specifying 'loop' will loop through each point at which the kriging system is to be solved. This approach is slower but also less memory-intensive. Default is 'vectorized'. n_closest_points (int, optional): For kriging with a moving window, specifies the number of nearby points to use in the calculation. This can speed up the calculation for large datasets, but should be used with caution. As Kitanidis notes, kriging with a moving window can produce unexpected oddities if the variogram model is not carefully chosen. Outputs: kvalues (numpy array, dim LxMxN or dim Nx1): Interpolated values of specified grid or at the specified set of points. If style was specified as 'masked', kvalues will be a numpy masked array. sigmasq (numpy array, dim LxMxN or dim Nx1): Variance at specified grid points or at the specified set of points. If style was specified as 'masked', sigmasq will be a numpy masked array. """ if self.verbose: print("Executing Ordinary Kriging...\n") if style != 'grid' and style != 'masked' and style != 'points': raise ValueError("style argument must be 'grid', 'points', or 'masked'") xpts = np.atleast_1d(np.squeeze(np.array(xpoints, copy=True))) ypts = np.atleast_1d(np.squeeze(np.array(ypoints, copy=True))) zpts = np.atleast_1d(np.squeeze(np.array(zpoints, copy=True))) n = self.X_ADJUSTED.shape[0] nx = xpts.size ny = ypts.size nz = zpts.size a = self._get_kriging_matrix(n) if style in ['grid', 'masked']: if style == 'masked': if mask is None: raise IOError("Must specify boolean masking array when style is 'masked'.") if mask.ndim != 3: raise ValueError("Mask is not three-dimensional.") if mask.shape[0] != nz or mask.shape[1] != ny or mask.shape[2] != nx: if mask.shape[0] == nx and mask.shape[2] == nz and mask.shape[1] == ny: mask = mask.swapaxes(0, 2) else: raise ValueError("Mask dimensions do not match specified grid dimensions.") mask = mask.flatten() npt = nz * ny * nx grid_z, grid_y, grid_x = np.meshgrid(zpts, ypts, xpts, indexing='ij') xpts = grid_x.flatten() ypts = grid_y.flatten() zpts = grid_z.flatten() elif style == 'points': if xpts.size != ypts.size and ypts.size != zpts.size: raise ValueError("xpoints, ypoints, and zpoints must have same dimensions " "when treated as listing discrete points.") npt = nx else: raise ValueError("style argument must be 'grid', 'points', or 'masked'") xpts, ypts, zpts = core.adjust_for_anisotropy_3d(xpts, ypts, zpts, self.XCENTER, self.YCENTER, self.ZCENTER, self.anisotropy_scaling_y, self.anisotropy_scaling_z, self.anisotropy_angle_x, self.anisotropy_angle_y, self.anisotropy_angle_z) if style != 'masked': mask = np.zeros(npt, dtype='bool') xyz_points = np.concatenate((zpts[:, np.newaxis], ypts[:, np.newaxis], xpts[:, np.newaxis]), axis=1) xyz_data = np.concatenate((self.Z_ADJUSTED[:, np.newaxis], self.Y_ADJUSTED[:, np.newaxis], self.X_ADJUSTED[:, np.newaxis]), axis=1) bd = cdist(xyz_points, xyz_data, 'euclidean') if n_closest_points is not None: from scipy.spatial import cKDTree tree = cKDTree(xyz_data) bd, bd_idx = tree.query(xyz_points, k=n_closest_points, eps=0.0) if backend == 'loop': kvalues, sigmasq = self._exec_loop_moving_window(a, bd, mask, bd_idx) else: raise ValueError("Specified backend '{}' not supported for moving window.".format(backend)) else: if backend == 'vectorized': kvalues, sigmasq = self._exec_vector(a, bd, mask) elif backend == 'loop': kvalues, sigmasq = self._exec_loop(a, bd, mask) else: raise ValueError('Specified backend {} is not supported for 3D ordinary kriging.'.format(backend)) if style == 'masked': kvalues = np.ma.array(kvalues, mask=mask) sigmasq = np.ma.array(sigmasq, mask=mask) if style in ['masked', 'grid']: kvalues = kvalues.reshape((nz, ny, nx)) sigmasq = sigmasq.reshape((nz, ny, nx)) return kvalues, sigmasq
fcf1f6548924e0a7b8d556c5614e9aee7511b172
ffea8d9c5a742170fb21c5c95e3f84ce9ab2f3bd
/algorithms_find_unique_chars.py
43ee638f4f6311ce56d55f48d9b623bd298e71d9
[]
no_license
catechnix/greentree
3b8583bd4ccb1a506f3e24f03a6c1592f1664518
ffcd7b1b26fa6552b4f58ac9645151afb591628b
refs/heads/master
2023-04-08T23:41:15.502014
2021-04-03T03:48:07
2021-04-03T03:48:07
288,299,640
0
0
null
null
null
null
UTF-8
Python
false
false
1,043
py
# Given a string, find the first non-repeating character in it and return its index. # If it doesn't exist, return -1. # Note: all the input strings are already lowercase. #Approach 1 def solution(s): frequency = {} for i in s: if i not in frequency: frequency[i] = 1 else: frequency[i] +=1 for i in range(len(s)): if frequency[s[i]] == 1: return i return -1 print(solution('alphabet')) print(solution('barbados')) print(solution('crunchy')) print('###') #Approach 2 import collections def solution(s): # build hash map : character and how often it appears count = collections.Counter(s) # <-- gives back a dictionary with words occurrence count #Counter({'l': 1, 'e': 3, 't': 1, 'c': 1, 'o': 1, 'd': 1}) # find the index for idx, ch in enumerate(s): if count[ch] == 1: return idx return -1 print(solution('alphabet')) print(solution('barbados')) print(solution('crunchy'))
537cc1b377a1a29fe985de13d1284703ca373594
ebcc40516adba151e6a1c772223b0726899a26eb
/slicedimage/url/__init__.py
903fa8c5d102018aed1a5b5cd312397b50a9e499
[ "MIT" ]
permissive
spacetx/slicedimage
acf4a767f87b6ab78e657d85efad22ee241939f4
eb8e1d3899628db66cffed1370f2a7e6dd729c4f
refs/heads/master
2021-04-09T10:53:15.057821
2020-05-26T17:40:11
2020-05-26T17:40:11
125,316,414
7
4
MIT
2020-05-26T17:40:15
2018-03-15T05:24:24
Python
UTF-8
Python
false
false
19
py
from . import path
ade4325ffae0867072eb07d5294917e637b30a23
de4d26a724b966ca8d0b95ec3063b5b784129028
/UserData/UserApp/migrations/0002_auto_20190402_0505.py
cc02790701761a7d0486f6803b359929ae666412
[]
no_license
ChetanKoranga/UserRESTapi
88904a326a093842ad68628eed98ea5ca2a95de0
11342bef21be163c4faf79744e90e9848e3a89bf
refs/heads/master
2020-05-04T00:01:22.998117
2019-04-02T05:51:18
2019-04-02T05:51:18
178,876,580
0
0
null
null
null
null
UTF-8
Python
false
false
373
py
# Generated by Django 2.2 on 2019-04-02 05:05 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('UserApp', '0001_initial'), ] operations = [ migrations.AlterField( model_name='usermodel', name='zip', field=models.CharField(max_length=10), ), ]
e1ff873dc7162e68805ea496e18d054103fd202b
6246f61c6bb4143fc88d74c72f6d2e7936433ee9
/saper.py
d8ce9e5291c0319c76368b2ce8e78d6c68c45df6
[]
no_license
aramann/mineswapper
0663d1189d38ec0704d39e6b97f8690e80367b38
8fab851d7e948924e88c4101bc35e4745d7971b3
refs/heads/master
2021-06-13T20:36:39.147322
2017-03-06T14:28:31
2017-03-06T14:28:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,094
py
import random import tkinter as tk def gen_bomb(field): i = random.randint(1, m - 1) j = random.randint(1, n - 1) while field[i][j] == 'b': i = random.randint(1, m - 1) j = random.randint(1, n - 1) field[i][j] = 'b' return field # if field[i][j] == 'b': # return gen_field(field) # else: # field[i][j] = 'b' # return field def gen_field(field): for i in range(1, m): for j in range(1, n): cnt = 0 if field[i][j] == 'b': continue else: if field[i - 1][j - 1] == 'b': cnt += 1 if field[i - 1][j] == 'b': cnt += 1 if field[i - 1][j + 1] == 'b': cnt += 1 if field[i][j - 1] == 'b': cnt += 1 if field[i][j + 1] == 'b': cnt += 1 if field[i + 1][j - 1] == 'b': cnt += 1 if field[i + 1][j] == 'b': cnt += 1 if field[i + 1][j + 1] == 'b': cnt += 1 field[i][j] = cnt return field def opencell(i, j): if field[i][j] == 'b': for k in range(1, n): for l in range(1, m): if field[k][l] == 'b': buttons[k][l]["bg"] = 'red' buttons[k][l]["text"] = 'bomb' # exit() if field[i][j] == -1: return if field[i][j] == 0 and (i, j) not in walken: walken.append((i, j)) opencell(i - 1, j - 1) opencell(i - 1, j) opencell(i - 1, j - 1) opencell(i, j - 1) opencell(i, j + 1) opencell(i + 1, j - 1) opencell(i + 1, j) opencell(i + 1, j + 1) if field[i][j] == 0: buttons[i][j]["text"] = 'no' else: buttons[i][j]["text"] = field[i][j] if buttons[i][j] == 1: buttons[i][j]["fg"] = 'azure' elif buttons[i][j] == 2: buttons[i][j]["fg"] = 'green' elif buttons[i][j] == 3: buttons[i][j]["fg"] = 'red' elif buttons[i][j] == 4: buttons[i][j]["fg"] = 'purple' elif buttons[i][j] == 5: buttons[i][j]["fg"] = 'brown' elif buttons[i][j] == 6: buttons[i][j]["fg"] = 'yellow' elif buttons[i][j] == 7: buttons[i][j]["fg"] = 'orange' elif buttons[i][j] == 8: buttons[i][j]["fg"] = 'white' buttons[i][j]["bg"] = 'grey' def setflag(i, j): if buttons[i][j]["text"] == 'b': buttons[i][j]["text"] = '?' elif buttons[i][j]["text"] == '?': buttons[i][j]["text"] = '' else: buttons[i][j]["text"] = 'b' def _opencell(i, j): def opencell_(event): opencell(i, j) return opencell_ def _setflag(i, j): def setflag_(event): setflag(i, j) return setflag_ root = tk.Tk() print('Select level of difficulty(1 - easy (9x9 10 mines), 2 - medium (16x16 40 mines), 3 - hard (30x16 99 mines), 4 - custom') lvl = int(input()) if lvl == 1: n, m, bombs = 9, 9, 10 elif lvl == 2: n, m, bombs = 16, 16, 40 elif lvl == 3: n, m, bombs = 30, 16, 99 else: print('Enter size of the field (x, y) and number of bombs, spliting with space') n, m, bombs = map(int, input().split()) if n * m <= bombs: bombs = n * m - 1 field = [[0 for i in range(n + 1)] for j in range(m + 1)] for i in range(n + 1): field[0][i] = -1 field[-1][i] = -1 for i in range(m + 1): field[i][0] = -1 field[i][-1] = -1 for i in range(bombs): field = gen_bomb(field) field = gen_field(field) for i in range(m + 1): print(*field[i]) buttons = [[0 for i in range(0, n + 1)] for j in range(0, m + 1)] for i in range(n + 1): buttons[0][i] = -1 buttons[-1][i] = -1 for i in range(m + 1): buttons[i][0] = -1 buttons[i][-1] = -1 for i in range(1, m): for j in range(1, n): btn = tk.Button(root, text='', bg='grey') btn.bind("<Button-1>", _opencell(i, j)) btn.bind("<Button-2>", _setflag(i, j)) btn.grid(row=i, column=j) buttons[i][j] = btn walken = [] # btn = tk.Button(root, #родительское окно # text="Click me", #надпись на кнопке # width=30,height=5, #ширина и высота # bg="white",fg="black") # btn.bind("<Button-1>", opencell) # btn.pack() root.mainloop() # root = tk.Tk() # def Hello(event): # print("Yet another hello world") # # btn = tk.Button(root, #родительское окно # text="Click me", #надпись на кнопке # width=30,height=5, #ширина и высота # bg="white",fg="black") #цвет фона и надписи # btn.bind("<Button-1>", Hello) #при нажатии ЛКМ на кнопку вызывается функция Hello # btn.pack() #расположить кнопку на главном окне # root.mainloop()
b1755b3e3660b7295bc811bc77f3610c15e76d96
5f8b40db6465a2d28a2369d8b99c8d0db9d06e90
/samples/pad.py
9632098c4f8b2c26d1502e92c9d4ec77e99e9779
[]
no_license
asherkobin/GameOfLife
4d816ef8b7fcd46ddb3ef055ccfd172a53db9446
4fc9a9e568b58bb412b0586698c614211efc168a
refs/heads/master
2022-11-24T02:10:30.081982
2020-07-13T21:35:32
2020-07-13T21:35:32
267,113,638
0
0
null
null
null
null
UTF-8
Python
false
false
1,004
py
import curses import curses.panel def main(win): global stdscr global max_y,max_x,mult stdscr = win curses.initscr() curses.cbreak() curses.noecho() stdscr.keypad(1) curses.curs_set(0) y,x = 0,1 mult = 40 maxcoords = stdscr.getmaxyx() max_y, max_x = maxcoords[y],maxcoords[x] pad = curses.newpad(max_y,2000) drawstuff(pad) running = True while running: mvmt = stdscr.getch() if mvmt == ord('a'): mult += 1 pad.refresh(1,1,1,1,max_y,mult) if mvmt == ord('b'): mult += 1 pad.refresh(1,1,1,1,max_y,mult) if mvmt == ord('Q'): running = False return curses.doupdate() curses.endwin() def drawstuff(pad): for i in range(max_x): for j in range(max_y): if i % 2 == 0: pad.addch(j,i,'+') else: pad.addch(j,i,'-') if __name__ == '__main__': curses.wrapper(main)
843eaba7ba980ea2096e0e19da78db3ac8be4534
41682d817cd6aab0e73e9c0733a515d77ae7c540
/worker.py
8d3c50e7af3f697054a683db8b6a68711b57b3c1
[]
no_license
subhamsps/testRepo
d2cd328ec40ab5b37b220a0f449f8cbcf14993bc
3046507cb893e0d58a8722561118b494e06b5c3a
refs/heads/master
2020-04-29T02:33:56.717073
2019-04-08T17:47:20
2019-04-08T17:47:20
175,773,978
0
0
null
null
null
null
UTF-8
Python
false
false
839
py
# A simple Python program for traversal of a linked list # Node class class Node: # Function to initialise the node object def __init__(self, data): self.data = data # Assign data self.next = None # Initialize next as null # Linked List class contains a Node object class LinkedList: # Function to initialize head def __init__(self): self.head = None # This function prints contents of linked list # starting from head def printList(self): temp = self.head while (temp): print(temp.data) temp = temp.next # Code execution starts here if __name__=='__main__': # Start with the empty list llist = LinkedList() llist.head = Node(1) second = Node(2) third = Node(3) llist.head.next = second; # Link first node with second second.next = third; # Link second node with the third node llist.printList()
51c188fc3582d89f30984fe761bd4de74c07d286
711756b796d68035dc6a39060515200d1d37a274
/output_cog/optimized_24247.py
f41dd9eb54effc2fae8b2b76ddc93da38babc1a1
[]
no_license
batxes/exocyst_scripts
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
a6c487d5053b9b67db22c59865e4ef2417e53030
refs/heads/master
2020-06-16T20:16:24.840725
2016-11-30T16:23:16
2016-11-30T16:23:16
75,075,164
0
0
null
null
null
null
UTF-8
Python
false
false
10,840
py
import _surface import chimera try: import chimera.runCommand except: pass from VolumePath import markerset as ms try: from VolumePath import Marker_Set, Link new_marker_set=Marker_Set except: from VolumePath import volume_path_dialog d= volume_path_dialog(True) new_marker_set= d.new_marker_set marker_sets={} surf_sets={} if "Cog2_GFPN" not in marker_sets: s=new_marker_set('Cog2_GFPN') marker_sets["Cog2_GFPN"]=s s= marker_sets["Cog2_GFPN"] mark=s.place_marker((591.127, 550.172, 433.724), (0.89, 0.1, 0.1), 18.4716) if "Cog2_0" not in marker_sets: s=new_marker_set('Cog2_0') marker_sets["Cog2_0"]=s s= marker_sets["Cog2_0"] mark=s.place_marker((558.151, 528.977, 490.027), (0.89, 0.1, 0.1), 17.1475) if "Cog2_1" not in marker_sets: s=new_marker_set('Cog2_1') marker_sets["Cog2_1"]=s s= marker_sets["Cog2_1"] mark=s.place_marker((514.189, 493.443, 549.935), (0.89, 0.1, 0.1), 17.1475) if "Cog2_GFPC" not in marker_sets: s=new_marker_set('Cog2_GFPC') marker_sets["Cog2_GFPC"]=s s= marker_sets["Cog2_GFPC"] mark=s.place_marker((541.078, 422.008, 433.053), (0.89, 0.1, 0.1), 18.4716) if "Cog2_Anch" not in marker_sets: s=new_marker_set('Cog2_Anch') marker_sets["Cog2_Anch"]=s s= marker_sets["Cog2_Anch"] mark=s.place_marker((416.095, 453.45, 712.259), (0.89, 0.1, 0.1), 18.4716) if "Cog3_GFPN" not in marker_sets: s=new_marker_set('Cog3_GFPN') marker_sets["Cog3_GFPN"]=s s= marker_sets["Cog3_GFPN"] mark=s.place_marker((560.441, 539.013, 466.666), (1, 1, 0), 18.4716) if "Cog3_0" not in marker_sets: s=new_marker_set('Cog3_0') marker_sets["Cog3_0"]=s s= marker_sets["Cog3_0"] mark=s.place_marker((560.484, 539.715, 465.403), (1, 1, 0.2), 17.1475) if "Cog3_1" not in marker_sets: s=new_marker_set('Cog3_1') marker_sets["Cog3_1"]=s s= marker_sets["Cog3_1"] mark=s.place_marker((552.11, 550.537, 440.768), (1, 1, 0.2), 17.1475) if "Cog3_2" not in marker_sets: s=new_marker_set('Cog3_2') marker_sets["Cog3_2"]=s s= marker_sets["Cog3_2"] mark=s.place_marker((528.693, 538.373, 431.649), (1, 1, 0.2), 17.1475) if "Cog3_3" not in marker_sets: s=new_marker_set('Cog3_3') marker_sets["Cog3_3"]=s s= marker_sets["Cog3_3"] mark=s.place_marker((511.774, 558.115, 441.881), (1, 1, 0.2), 17.1475) if "Cog3_4" not in marker_sets: s=new_marker_set('Cog3_4') marker_sets["Cog3_4"]=s s= marker_sets["Cog3_4"] mark=s.place_marker((491.711, 546.71, 425.922), (1, 1, 0.2), 17.1475) if "Cog3_5" not in marker_sets: s=new_marker_set('Cog3_5') marker_sets["Cog3_5"]=s s= marker_sets["Cog3_5"] mark=s.place_marker((490.69, 571.213, 412.926), (1, 1, 0.2), 17.1475) if "Cog3_GFPC" not in marker_sets: s=new_marker_set('Cog3_GFPC') marker_sets["Cog3_GFPC"]=s s= marker_sets["Cog3_GFPC"] mark=s.place_marker((585.097, 551.597, 459.839), (1, 1, 0.4), 18.4716) if "Cog3_Anch" not in marker_sets: s=new_marker_set('Cog3_Anch') marker_sets["Cog3_Anch"]=s s= marker_sets["Cog3_Anch"] mark=s.place_marker((395.497, 593.978, 372.046), (1, 1, 0.4), 18.4716) if "Cog4_GFPN" not in marker_sets: s=new_marker_set('Cog4_GFPN') marker_sets["Cog4_GFPN"]=s s= marker_sets["Cog4_GFPN"] mark=s.place_marker((354.731, 547.327, 564.438), (0, 0, 0.8), 18.4716) if "Cog4_0" not in marker_sets: s=new_marker_set('Cog4_0') marker_sets["Cog4_0"]=s s= marker_sets["Cog4_0"] mark=s.place_marker((354.731, 547.327, 564.438), (0, 0, 0.8), 17.1475) if "Cog4_1" not in marker_sets: s=new_marker_set('Cog4_1') marker_sets["Cog4_1"]=s s= marker_sets["Cog4_1"] mark=s.place_marker((381.51, 546.932, 552.901), (0, 0, 0.8), 17.1475) if "Cog4_2" not in marker_sets: s=new_marker_set('Cog4_2') marker_sets["Cog4_2"]=s s= marker_sets["Cog4_2"] mark=s.place_marker((408.798, 544.379, 541.772), (0, 0, 0.8), 17.1475) if "Cog4_3" not in marker_sets: s=new_marker_set('Cog4_3') marker_sets["Cog4_3"]=s s= marker_sets["Cog4_3"] mark=s.place_marker((436.698, 538.561, 532.225), (0, 0, 0.8), 17.1475) if "Cog4_4" not in marker_sets: s=new_marker_set('Cog4_4') marker_sets["Cog4_4"]=s s= marker_sets["Cog4_4"] mark=s.place_marker((464.967, 534.516, 524.764), (0, 0, 0.8), 17.1475) if "Cog4_5" not in marker_sets: s=new_marker_set('Cog4_5') marker_sets["Cog4_5"]=s s= marker_sets["Cog4_5"] mark=s.place_marker((492.885, 537.772, 518.397), (0, 0, 0.8), 17.1475) if "Cog4_6" not in marker_sets: s=new_marker_set('Cog4_6') marker_sets["Cog4_6"]=s s= marker_sets["Cog4_6"] mark=s.place_marker((513.156, 551.557, 503.757), (0, 0, 0.8), 17.1475) if "Cog4_GFPC" not in marker_sets: s=new_marker_set('Cog4_GFPC') marker_sets["Cog4_GFPC"]=s s= marker_sets["Cog4_GFPC"] mark=s.place_marker((263.588, 534.134, 438.154), (0, 0, 0.8), 18.4716) if "Cog4_Anch" not in marker_sets: s=new_marker_set('Cog4_Anch') marker_sets["Cog4_Anch"]=s s= marker_sets["Cog4_Anch"] mark=s.place_marker((762.458, 564.868, 573.574), (0, 0, 0.8), 18.4716) if "Cog5_GFPN" not in marker_sets: s=new_marker_set('Cog5_GFPN') marker_sets["Cog5_GFPN"]=s s= marker_sets["Cog5_GFPN"] mark=s.place_marker((516.971, 541.55, 550.287), (0.3, 0.3, 0.3), 18.4716) if "Cog5_0" not in marker_sets: s=new_marker_set('Cog5_0') marker_sets["Cog5_0"]=s s= marker_sets["Cog5_0"] mark=s.place_marker((516.971, 541.55, 550.287), (0.3, 0.3, 0.3), 17.1475) if "Cog5_1" not in marker_sets: s=new_marker_set('Cog5_1') marker_sets["Cog5_1"]=s s= marker_sets["Cog5_1"] mark=s.place_marker((524.701, 520.043, 532.301), (0.3, 0.3, 0.3), 17.1475) if "Cog5_2" not in marker_sets: s=new_marker_set('Cog5_2') marker_sets["Cog5_2"]=s s= marker_sets["Cog5_2"] mark=s.place_marker((518.156, 496.095, 517.283), (0.3, 0.3, 0.3), 17.1475) if "Cog5_3" not in marker_sets: s=new_marker_set('Cog5_3') marker_sets["Cog5_3"]=s s= marker_sets["Cog5_3"] mark=s.place_marker((522.974, 469.313, 527.494), (0.3, 0.3, 0.3), 17.1475) if "Cog5_GFPC" not in marker_sets: s=new_marker_set('Cog5_GFPC') marker_sets["Cog5_GFPC"]=s s= marker_sets["Cog5_GFPC"] mark=s.place_marker((597.329, 492.17, 431.012), (0.3, 0.3, 0.3), 18.4716) if "Cog5_Anch" not in marker_sets: s=new_marker_set('Cog5_Anch') marker_sets["Cog5_Anch"]=s s= marker_sets["Cog5_Anch"] mark=s.place_marker((450.148, 440.633, 626.042), (0.3, 0.3, 0.3), 18.4716) if "Cog6_GFPN" not in marker_sets: s=new_marker_set('Cog6_GFPN') marker_sets["Cog6_GFPN"]=s s= marker_sets["Cog6_GFPN"] mark=s.place_marker((567.05, 508.111, 472.181), (0.21, 0.49, 0.72), 18.4716) if "Cog6_0" not in marker_sets: s=new_marker_set('Cog6_0') marker_sets["Cog6_0"]=s s= marker_sets["Cog6_0"] mark=s.place_marker((567.18, 507.83, 472.003), (0.21, 0.49, 0.72), 17.1475) if "Cog6_1" not in marker_sets: s=new_marker_set('Cog6_1') marker_sets["Cog6_1"]=s s= marker_sets["Cog6_1"] mark=s.place_marker((539.941, 505.099, 467.782), (0.21, 0.49, 0.72), 17.1475) if "Cog6_2" not in marker_sets: s=new_marker_set('Cog6_2') marker_sets["Cog6_2"]=s s= marker_sets["Cog6_2"] mark=s.place_marker((516.22, 513.736, 455.831), (0.21, 0.49, 0.72), 17.1475) if "Cog6_3" not in marker_sets: s=new_marker_set('Cog6_3') marker_sets["Cog6_3"]=s s= marker_sets["Cog6_3"] mark=s.place_marker((495.42, 532.128, 457.926), (0.21, 0.49, 0.72), 17.1475) if "Cog6_4" not in marker_sets: s=new_marker_set('Cog6_4') marker_sets["Cog6_4"]=s s= marker_sets["Cog6_4"] mark=s.place_marker((478.453, 554.246, 457.003), (0.21, 0.49, 0.72), 17.1475) if "Cog6_5" not in marker_sets: s=new_marker_set('Cog6_5') marker_sets["Cog6_5"]=s s= marker_sets["Cog6_5"] mark=s.place_marker((483.011, 579.139, 445.049), (0.21, 0.49, 0.72), 17.1475) if "Cog6_6" not in marker_sets: s=new_marker_set('Cog6_6') marker_sets["Cog6_6"]=s s= marker_sets["Cog6_6"] mark=s.place_marker((500.654, 596.305, 432.034), (0.21, 0.49, 0.72), 17.1475) if "Cog6_GFPC" not in marker_sets: s=new_marker_set('Cog6_GFPC') marker_sets["Cog6_GFPC"]=s s= marker_sets["Cog6_GFPC"] mark=s.place_marker((545.229, 600.839, 505.286), (0.21, 0.49, 0.72), 18.4716) if "Cog6_Anch" not in marker_sets: s=new_marker_set('Cog6_Anch') marker_sets["Cog6_Anch"]=s s= marker_sets["Cog6_Anch"] mark=s.place_marker((453.3, 585.077, 359.443), (0.21, 0.49, 0.72), 18.4716) if "Cog7_GFPN" not in marker_sets: s=new_marker_set('Cog7_GFPN') marker_sets["Cog7_GFPN"]=s s= marker_sets["Cog7_GFPN"] mark=s.place_marker((569.881, 572.648, 532.303), (0.7, 0.7, 0.7), 18.4716) if "Cog7_0" not in marker_sets: s=new_marker_set('Cog7_0') marker_sets["Cog7_0"]=s s= marker_sets["Cog7_0"] mark=s.place_marker((564.77, 547.607, 527.181), (0.7, 0.7, 0.7), 17.1475) if "Cog7_1" not in marker_sets: s=new_marker_set('Cog7_1') marker_sets["Cog7_1"]=s s= marker_sets["Cog7_1"] mark=s.place_marker((551.494, 493.359, 514.045), (0.7, 0.7, 0.7), 17.1475) if "Cog7_2" not in marker_sets: s=new_marker_set('Cog7_2') marker_sets["Cog7_2"]=s s= marker_sets["Cog7_2"] mark=s.place_marker((536.082, 439.216, 501.45), (0.7, 0.7, 0.7), 17.1475) if "Cog7_GFPC" not in marker_sets: s=new_marker_set('Cog7_GFPC') marker_sets["Cog7_GFPC"]=s s= marker_sets["Cog7_GFPC"] mark=s.place_marker((608.487, 438.957, 465.359), (0.7, 0.7, 0.7), 18.4716) if "Cog7_Anch" not in marker_sets: s=new_marker_set('Cog7_Anch') marker_sets["Cog7_Anch"]=s s= marker_sets["Cog7_Anch"] mark=s.place_marker((461.56, 366.071, 509.789), (0.7, 0.7, 0.7), 18.4716) if "Cog8_0" not in marker_sets: s=new_marker_set('Cog8_0') marker_sets["Cog8_0"]=s s= marker_sets["Cog8_0"] mark=s.place_marker((575.251, 474.626, 465.989), (1, 0.5, 0), 17.1475) if "Cog8_1" not in marker_sets: s=new_marker_set('Cog8_1') marker_sets["Cog8_1"]=s s= marker_sets["Cog8_1"] mark=s.place_marker((595.039, 471.362, 485.911), (1, 0.5, 0), 17.1475) if "Cog8_2" not in marker_sets: s=new_marker_set('Cog8_2') marker_sets["Cog8_2"]=s s= marker_sets["Cog8_2"] mark=s.place_marker((591.024, 491.415, 505.418), (1, 0.5, 0), 17.1475) if "Cog8_3" not in marker_sets: s=new_marker_set('Cog8_3') marker_sets["Cog8_3"]=s s= marker_sets["Cog8_3"] mark=s.place_marker((580.611, 493.353, 531.521), (1, 0.5, 0), 17.1475) if "Cog8_4" not in marker_sets: s=new_marker_set('Cog8_4') marker_sets["Cog8_4"]=s s= marker_sets["Cog8_4"] mark=s.place_marker((564.708, 490.26, 555.028), (1, 0.5, 0), 17.1475) if "Cog8_5" not in marker_sets: s=new_marker_set('Cog8_5') marker_sets["Cog8_5"]=s s= marker_sets["Cog8_5"] mark=s.place_marker((553.024, 482.707, 580.407), (1, 0.5, 0), 17.1475) if "Cog8_GFPC" not in marker_sets: s=new_marker_set('Cog8_GFPC') marker_sets["Cog8_GFPC"]=s s= marker_sets["Cog8_GFPC"] mark=s.place_marker((570.894, 525.42, 514.688), (1, 0.6, 0.1), 18.4716) if "Cog8_Anch" not in marker_sets: s=new_marker_set('Cog8_Anch') marker_sets["Cog8_Anch"]=s s= marker_sets["Cog8_Anch"] mark=s.place_marker((533.826, 440.217, 648.202), (1, 0.6, 0.1), 18.4716) for k in surf_sets.keys(): chimera.openModels.add([surf_sets[k]])
9ef7705adfd20289b94d32e635c9782513a6bf09
22168b5ae3a347f3a310ac23c87ffe4d313534bb
/s3backupper.py
2d4542231ca6bc4f31c20983b275a66a6e42f306
[]
no_license
iamanders/s3backupper
f93b8ae96b83524fdb0b5e07f5ef882bfa5d45b6
a2870eeaa93753246f7498d1af1dc591b605f082
refs/heads/master
2021-01-19T07:55:07.968577
2010-01-22T13:53:00
2010-01-22T13:53:00
482,263
1
0
null
null
null
null
UTF-8
Python
false
false
2,164
py
#!/usr/bin/python # -*- coding: utf-8 -*- import os import time import tarfile from boto.s3.connection import S3Connection from boto.s3.key import Key class uploader: """Class for gzip and upload files to Amazon S3""" def __init__(self, access_key, secret_key, crypt_key): self.access_key = access_key self.secret_key = secret_key self.crypt_key = crypt_key self.s3connection = S3Connection(access_key, secret_key) def the_magic(self, id, path_to_bup, bucketname, date_in_filename, crypt): file_contents = os.listdir(path_to_bup) if date_in_filename: s3_filename = "%s_%s.tar.gz" % (id, time.strftime("%Y-%m-%d-%H%M%S")) else: s3_filename = id + '.tar.gz' temp_filename = '/tmp/backup.tar.gz' #tar files files = os.listdir(path_to_bup) tar = tarfile.open(temp_filename, 'w:gz') for f in files: tar.add(path_to_bup + f) tar.close() #crypt the file? if crypt: os.system("gpg -c --passphrase '%s' --yes /tmp/backup.tar.gz" % self.crypt_key) temp_filename += '.gpg' s3_filename += '.gpg' #upload bucket = self.s3connection.get_bucket(bucketname) s3key = Key(bucket) s3key.key = s3_filename s3key.set_contents_from_filename(temp_filename) #clean the tmp folder os.remove('/tmp/backup.tar.gz') if crypt: os.remove('/tmp/backup.tar.gz.gpg') if __name__ == '__main__': access_key = 'PUT YOUR AMAZON ACCESS KEY HERE' secret_key = 'PUT YOUR AMAZON SECRET KEY HERE' crypt_key = 'PUT-A-SECRET-CRYPT-CODE-HERE' uploader = uploader(access_key, secret_key, crypt_key) #To backup to_backup = [ { 'id': 'foo', 'path': '/path/to/files1/', 'bucket': 'bucket1', 'date': True, 'crypt': True }, #{ 'id': 'bar', 'path': '/path/to/files2/', 'bucket': 'bucket2', 'date': False, 'Crypt': False }, ] print '' #Loop and upload for b in to_backup: print '%s - start backup %s' % (time.strftime("%Y-%m-%d %H:%M:%S"), b['id']) try: uploader.the_magic(b['id'], b['path'], b['bucket'], b['date'], b['crypt']) except: print '%s - FAIL %s' % (time.strftime("%Y-%m-%d %H:%M:%S"), b['id']) print '' print '%s - DONE!' % time.strftime("%Y-%m-%d %H:%M:%S") print ''
f3c5d20d29dd9b88627ce522e66785298e8855f1
498fcf34fa4482be5c9fefc488666e60edcf46c7
/supervised_learning/0x08-deep_cnns/6-transition_layer.py
b1f56c159fcbde725fe51e00dbf6f594f96be8dd
[]
no_license
MansourKef/holbertonschool-machine_learning
7dbc465def04c311c1afb0e8b8903cbe34c72ad3
19f78fc09f0ebeb9f27f3f76b98e7a0e9212fd22
refs/heads/main
2023-03-12T16:18:08.919099
2021-03-05T09:42:09
2021-03-05T09:42:09
317,303,125
0
0
null
null
null
null
UTF-8
Python
false
false
659
py
#!/usr/bin/env python3 """module""" import tensorflow.keras as K def transition_layer(X, nb_filters, compression): """function""" BN1 = K.layers.BatchNormalization(axis=3)(X) Relu1 = K.layers.Activation("relu")(BN1) conv1 = K.layers.Conv2D(int(compression * nb_filters), kernel_size=(1, 1), padding="same", kernel_initializer="he_normal", strides=(1, 1))(Relu1) pool5 = K.layers.AveragePooling2D(pool_size=(2, 2), strides=(2, 2))(conv1) return pool5, int(compression * nb_filters)
e2143b19b050bbbb72261ab6e4bd8ec5639ae272
9cc3011ae618d43f6f09a98f2ea4577bb57b3482
/week8/3.2.Цикл while/a.py
d927561ceda36772314c4c577b15165260fde5a5
[]
no_license
Alexey929/web-development
2992db5f18917641740886d8920c1486f61b84e9
c50a394734614bb5b05cfc0d43cb7ccd9e4087af
refs/heads/master
2023-01-13T20:29:06.663877
2020-04-28T13:41:32
2020-04-28T13:41:32
236,985,130
0
0
null
null
null
null
UTF-8
Python
false
false
120
py
import math x = int(input()) i = 1 root = math.sqrt(x) while i <= root: print(str(i * i) + " ", end='') i += 1
19e02e30a90fc1c0781c84ee261b118d7bd1b1bb
91652afbc75037f6c631dbe9c14c343514d07469
/examples/static.py
80e29ed6bb54b099e5dd92eeaef8911dc9804300
[]
no_license
BitTheByte/Pybook
beb2186077cdecd821a25b015522beeb3e1d4426
8385a9006b4c8577412fa75d7c2196e0e0c539a5
refs/heads/master
2023-06-11T02:57:21.458411
2023-06-04T18:58:26
2023-06-04T18:58:26
148,077,126
4
2
null
2023-06-04T18:58:27
2018-09-10T00:15:29
Python
UTF-8
Python
false
false
717
py
""" please move this example to the root directory """ from lib.session import * from lib.parser import * from lib.engine import * fbsession = login("[email protected]","Secret_Password123") # login with facebook def hi(msg): print msg return "HELLO FROM FUNCTION" """ def custom(message): print message return message + " WOW!" """ myreplies = { "hi":"Hello from python!", "failReply":"Sorry i don't understand :(", "func_hello":hi } options = { "keysearch" :1, # find the closest key replies "failReply" :0, # use a fail reply #"replyHook" :custom, use a custom function to generate answers } StaticMessageHook(fbsession,options,myreplies)
47204ab5273867d202c0c4bdbd8c953a99b17499
f9c223341e3c052705cc08291d2246399121f037
/LSR/lsr.py
3e5ea30d0781c904bd887def9f5932d263d6258a
[]
no_license
andreaeliasc/Lab3-Redes
185155d91e7f0eec6c59956751c830a19e2e197e
037f06a632d0e5972f150dc005c29cae232dcf48
refs/heads/main
2023-07-15T14:57:28.684337
2021-09-01T03:03:57
2021-09-01T03:03:57
401,242,615
0
1
null
null
null
null
UTF-8
Python
false
false
7,107
py
import asyncio from asyncio.tasks import sleep import slixmpp from getpass import getpass from aioconsole import ainput, aprint import time from utils import * class LSRClient(slixmpp.ClientXMPP): def __init__(self, jid, password, topo_file,names_file): slixmpp.ClientXMPP.__init__(self, jid, password) self.add_event_handler("session_start", self.start) self.add_event_handler("message", self.message) self.topo_file = topo_file self.names_file = names_file self.network = [] self.echo_sent = None self.LSP = { 'type': lsp, 'from': self.boundjid.bare, 'sequence': 1, 'neighbours':{} } self.id = get_ID(self.names_file, jid) self.neighbours_IDS = get_neighbors(self.topo_file, self.id) self.neighbours = [] self.neighbours_JID() async def start(self, event): self.send_presence() await self.get_roster() print("Press enter to start:") start = await ainput() for neighbour in self.neighbours: await self.send_hello_msg(neighbour) for neighbour in self.neighbours: await self.send_echo_message(neighbour, echo_send) self.network.append(self.LSP) self.loop.create_task(self.send_LSP()) await sleep(2) print("Type the jid of the user you want to message (or wait until someone messages you!)") send = await ainput() if send != None: message = await ainput('Type your message: ') #Waiting some time so that the network converges print("Waiting for network to converge") await sleep(17) print("Network converged, sending message") self.send_chat_message(self.boundjid.bare,send,steps=1,visited_nodes=[self.boundjid.bare],message=message) print("press enter to exit") exit = await ainput() self.disconnect() def neighbours_JID(self): for id in self.neighbours_IDS: neighbour_JID = get_JID(self.names_file, id) self.neighbours.append(neighbour_JID) async def message(self, msg): body = json_to_object(msg['body']) if body['type'] == hello: print("Hello from: ", msg['from']) elif body['type'] == echo_send: print("Echoing back to: ", msg['from']) await self.send_echo_message(body['from'],echo_response) elif body['type'] == echo_response: distance = time.time()-self.echo_sent print("Distance to ", msg['from'], ' is ', distance) self.LSP['neighbours'][body['from']] = distance elif body['type'] == lsp: new = await self.update_network(body) await self.flood_LSP(body, new) elif body['type'] == message_type: if body['to'] != self.boundjid.bare: print('Got a message that is not for me, sending it ') self.send_chat_message(source = body['from'],to = body['to'], steps=body['steps'] +1, distance=body['distance'],visited_nodes= body['visited_nodes'].append(self.boundjid.bare),message=body['mesage']) elif body['to'] == self.boundjid.bare: print('Got a message!') print(body['from'], " : ", body['mesage']) print(body) async def send_hello_msg(self,to, steps = 1): you = self.boundjid.bare to = to json = { 'type': hello, 'from':you, 'to': to, 'steps': steps } to_send = object_to_json(json) self.send_message(mto = to, mbody=to_send, mtype='chat') async def send_echo_message(self, to, type ,steps = 1): you = self.boundjid.bare to = to json = { 'type': type, 'from':you, 'to': to, 'steps': steps } to_send = object_to_json(json) self.send_message(mto = to, mbody=to_send, mtype='chat') self.echo_sent = time.time() async def send_LSP(self): while True: for neighbour in self.neighbours: lsp_to_send = object_to_json(self.LSP) self.send_message(mto =neighbour,mbody=lsp_to_send,mtype='chat') await sleep(2) self.LSP['sequence'] += 1 def send_chat_message(self,source,to,steps=0, distance = 0, visited_nodes = [],message="Hola mundo"): body ={ 'type':message_type, 'from': source, 'to': to, 'steps': steps, 'distance': distance, 'visited_nodes':visited_nodes, 'mesage':message } path = self.calculate_path(self.boundjid.bare, to) body['distance'] += self.LSP['neighbours'][path[1]['from']] to_send = object_to_json(body) self.send_message(mto=path[1]['from'],mbody = to_send,mtype='chat') async def update_network(self, lsp): for i in range(0,len(self.network)): node = self.network[i] if lsp['from'] == node['from']: if lsp['sequence'] > node['sequence']: node['sequence'] = lsp['sequence'] node['neighbours'] = lsp['neighbours'] return 1 if lsp['sequence'] <= node['sequence']: return None self.network.append(lsp) return 1 def calculate_path(self, source, dest): distance = 0 visited = [] current_node = self.find_node_in_network(source) while current_node['from'] != dest: node_distances = [] neighbours = current_node['neighbours'] for neighbour in neighbours.keys(): if neighbour == dest: visited.append(current_node) current_node = self.find_node_in_network(neighbour) visited.append(current_node) return visited elif neighbour not in visited: distance_to_neighbour = neighbours[neighbour] node_distances.append(distance_to_neighbour) min_distance = min(node_distances) node_index = node_distances.index(min_distance) all_nodes = list(current_node['neighbours'].keys()) next_node_id = all_nodes[node_index] visited.append(current_node) next_node = self.find_node_in_network(next_node_id) current_node = next_node distance += min_distance return visited def find_node_in_network(self, id): for i in range(len(self.network)): node = self.network[i] if id in node['from']: return node return False async def flood_LSP(self, lsp, new): for neighbour in self.neighbours: if new and neighbour != lsp['from']: self.send_message(mto =neighbour,mbody=object_to_json(lsp),mtype='chat')
a003a04f25ae531bcff5fcc6b77658dab1d893f8
ca82e3c6084e697ecbdbf32d96c08293c5540287
/courses/python_data_structures_linked_lists/Exercise Files/Ch05/05_01/End/dll.py
50cac5b09fdcbb4bdfd6e43e8d6640dcd496bb4e
[]
no_license
bheki-maenetja/small-projects-py
8c8b35444ff2ecef7ad77e709392a9c860967ecc
18504d2e1f1ea48b612a4e469828682f426c9704
refs/heads/master
2023-08-17T00:38:06.208787
2023-08-16T16:25:22
2023-08-16T16:25:22
131,871,876
1
0
null
2023-08-14T23:44:23
2018-05-02T15:37:58
Python
UTF-8
Python
false
false
1,390
py
class DLLNode: def __init__(self, data): self.data = data self.next = None self.previous = None def __repr__(self): return "DLLNode object: data={}".format(self.data) def get_data(self): """Return the self.data attribute.""" return self.data def set_data(self, new_data): """Replace the existing value of the self.data attribute with new_data parameter.""" self.data = new_data def get_next(self): """Return the self.next attribute""" return self.next def set_next(self, new_next): """Replace the existing value of the self.next attribute with new_next parameter.""" self.next = new_next def get_previous(self): """Return the self.previous attribute""" return self.previous def set_previous(self, new_previous): """Replace the existing value of the self.previous attribute with new_previous parameter.""" self.previous = new_previous class DLL: def __init__(self): self.head = None def __repr__(self): return "<DLL object: head=>".format(self.head) def is_empty(self): return self.head is None def size(self): pass def search(self, data): pass def add_front(self, data): pass def remove(self, data): pass
c32ad72dc0b1b7022ee645d091b9a96888d5760e
373ecb4548a8412b50b685d1ec0e5cea59a9425f
/Week 13/gallons_convert.py
1b8e1392e2709b2e5541696f6c477f0ab07e0127
[]
no_license
stewartrowley/CSE110
847c04c036b62f718952a12a65163281e23fac2c
57c744aa1f2b40c5f9b13b26b6558196aef3d515
refs/heads/main
2023-08-15T22:25:50.487798
2021-09-29T02:34:24
2021-09-29T02:34:24
411,504,145
0
0
null
null
null
null
UTF-8
Python
false
false
978
py
def convert_gallons_to_ounces(gallon): """ Functions: convert_gallon_to_ounces Description: convert gallons to ounces Params: gallons (float) - The amoun in gallons returns: Float - The amoun in ounces """ return gallon * 128 def convert_gallons_to_litiets(gallons): """ Functions: convert_gallons_to_litiers Description: Converts gallons to litiers Params: gallons (float) - The amount in gallons Returns: float: The amount in litiers """ return gallons * 3.751 def main(): starting_number = int(input("How many bottles of milk do you want to start with? ")) for bottle_count in range(starting_number, 0, -1): ounces = convert_gallons_to_ounces(bottle_count) litiers = convert_gallons_to_litiets(bottle_count) print(f"{bottle_count} gallons ({ounces} oz, {litiers:.2f} litiers) of milk on the wall... ") main() # print(convert_gallons_to_litiers())
21821ff603c4c08ad1e79cfc45c19e207342a87f
da8e3af2bfbad7109c19e37efe6b10bd5b1e0f56
/이코테/DP/q32_정수삼각형.py
b6fb0e0512b1f63802f795516949612640493eb5
[]
no_license
cheonyeji/algorithm_study
fead92b52e42d98a0fdf7f3395ed0a6676c8b972
ef2f5e50f874751a9ba8e9157a13f832f439618b
refs/heads/master
2023-09-01T00:41:24.600614
2023-08-31T07:39:45
2023-08-31T07:39:45
234,265,247
0
0
null
null
null
null
UTF-8
Python
false
false
521
py
# 2022-01-19 # 이코테 ch16 다이나믹 프로그래밍 # https://www.acmicpc.net/problem/1932 n = int(input()) tri = [] for _ in range(n): tri.append(list(map(int, input().split()))) for i in range(1, n): for j in range(0, i + 1): if j == 0: tri[i][j] += tri[i - 1][j] elif j == i: tri[i][j] += tri[i - 1][i - 1] else: tri[i][j] += max(tri[i - 1][j - 1], tri[i - 1][j]) print(max(tri[n - 1])) """ TC -> 30 5 7 3 8 8 1 0 2 7 4 4 4 5 2 6 5 """
8ad86d60b83d577d0c3cb1c91bfc2655109ff2b6
60130f382f2ffff27994a6fc573cd56c87d99450
/Requests/requeststests.py
336a072034b6c8686021d63c8e3b9100eff94194
[]
no_license
hellknight87/python
4be75dc8ffee810edaa485366f423d24f0fdec0a
ced1ff205e21ba5e73dfd35dcbefad5ba4d4a3cf
refs/heads/master
2021-04-03T06:28:36.208338
2018-05-09T06:11:30
2018-05-09T06:11:30
125,104,124
0
0
null
null
null
null
UTF-8
Python
false
false
197
py
import requests params = {"q": "pizza"} r = requests.get("http://www.bing.com/search", params=params) print("Status:", r.status_code) print(r.url) f = open("./page.html", "w+") f.write(r.text)
ccd8398a6f0d223f5b36f6d68625ad6b7499eeec
0d42d967b732a789705f9e08e538090f69808706
/flaskr/__init__.py
1ada9012763c5ef7962dc17079683c63cf8938b6
[ "MIT" ]
permissive
aicioara-old/flask_tutorial2
f812ed25f57430b48669587dd2e6905760df33bb
acb5c6fa2743f2f060ad6a3a26cc7eef56b6490b
refs/heads/master
2020-05-23T16:31:32.983214
2019-05-14T17:35:23
2019-05-14T17:35:23
186,851,392
0
0
null
null
null
null
UTF-8
Python
false
false
1,170
py
import os import datetime import click from flask import Flask from flask import current_app, g def create_app(test_config=None): # create and configure the app app = Flask(__name__, instance_relative_config=True) app.config.from_mapping( SECRET_KEY='dev', DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'), SQLALCHEMY_DATABASE_URI='sqlite:///' + os.path.join(app.instance_path, 'flaskr2.sqlite'), SQLALCHEMY_TRACK_MODIFICATIONS=False, ) if test_config is None: # load the instance config, if it exists, when not testing app.config.from_pyfile('config.py', silent=True) else: # load the test config if passed in app.config.from_mapping(test_config) # ensure the instance folder exists try: os.makedirs(app.instance_path) except OSError: pass from flaskr import commands commands.init_app(app) from .models import db db.init_app(app) from .views import auth app.register_blueprint(auth.bp) from .views import blog app.register_blueprint(blog.bp) app.add_url_rule('/', endpoint='index') return app
14de26357470049d34384244bc6dfe2cabc076b9
a80ce010d5d9b459d07bc4ff838123e3d9e8a394
/conftest.py
82c66e9c1664f34e156e8abc834440a12310ad01
[]
no_license
teola/mail-test
df20e7d7e2f3ef178a85c777320222183f7a3eeb
6594455a1a9f5323cadb20bc4997327cca171c88
refs/heads/main
2023-05-07T00:44:42.754919
2021-06-01T14:10:36
2021-06-01T14:18:13
372,849,095
0
0
null
null
null
null
UTF-8
Python
false
false
193
py
import pytest import time from selenium import webdriver @pytest.fixture(scope="session") def browser(): driver = webdriver.Firefox() yield driver time.sleep(3) driver.quit()
1a7945122da319698aab18dff3ea548ff1990001
cd7557f4daedf3447673c67e13b1c67220905b0e
/Judgment Classifier.py
718395714852f46853f26e330aace481d2f0abae
[]
no_license
Jason1286/Copyright_88_Classifier
5774703773ac5816401ba2256777f74d0f9a0859
02ba028235c21aa79cae00727effb15a111b8568
refs/heads/main
2023-06-02T01:51:59.552419
2021-06-25T07:12:30
2021-06-25T07:12:30
380,103,097
0
0
null
null
null
null
UTF-8
Python
false
false
7,381
py
#!/usr/bin/env python # coding: utf-8 # 使用套件 import os import re import pandas as pd import numpy as np from itertools import compress # 人工標記結果 manual_label_df = pd.read_excel(r'C:\Users\ASUS VivoBook\Desktop\計算與法律分析\Final_Project\判決標註.xlsx', sheet_name = '工作表1') # read all sheets manual_label_id = list(manual_label_df['檔案編號']) manual_filename = ['verdict_' + str('{:03}'.format(x)) + '.txt' for x in sorted(manual_label_id)] # 建立自動判決結果dataframe dict2df = {'verdict':manual_filename, '判決書案號':list(manual_label_df['判決書案號']), '駁回_Auto':None,'駁回_Manual':manual_label_df['駁回'], '原告引用法條_Auto':None,'法官判決法條_Auto':None, '原告引用法條_Manual':manual_label_df['原告引用法條'], '法官判決法條_Manual':manual_label_df['法官判決法條'], '駁回_Diff':None,'原告引用法條_Diff':None,'法官判決法條_Diff':None } label_df = pd.DataFrame.from_dict(dict2df) label_df = label_df.set_index(['verdict']) # 讀去判決書 def read_verdict(entry): os.chdir(r'C:\Users\ASUS VivoBook\Desktop\計算與法律分析\Final_Project\All_Verdicts') f = open(entry, 'r', encoding = 'utf-8-sig') txt = f.readlines() txt = [re.sub('\n', '', x) for x in txt] txt = [x for x in txt if x != ''] return txt # 著作權法第88條項目提取 def case_detection(txt): c23_regex = re.compile(r'著作權法(第\d+條)?(、)?第(88|八十八)條(第)?(1|一)?(項)?(、)?(第)?(2|二)(項)?(、)?(或)?(第)?(3|三)項') c2_regex = re.compile(r'著作權法(第\d+條)?(、)?第(88|八十八)條第(1|一)?(項)?(、)?(第)?(2|二)項') c3_regex = re.compile(r'著作權法(第\d+條)?(、)?第(88|八十八)條第(1|一)?(項)?(、)?(第)?(3|三)項') cX_regex = re.compile(r'著作權法(第\d+條)?(、)?第(88|八十八)條(\S+)?') if bool(c23_regex.search(txt)) == True: return 4 elif bool(c2_regex.search(txt)) == True: return 2 elif bool(c3_regex.search(txt)) == True: return 3 else: return 99 def fill_dataframe(classify_, colname, filename): if 4 in classify_: label_df.loc[filename,colname] = 4 elif 3 in classify_: label_df.loc[filename,colname] = 3 elif 2 in classify_: label_df.loc[filename,colname] = 2 elif 99 in classify_: label_df.loc[filename,colname] = 99 elif classify_ == []: label_df.loc[filename,colname] = 99 # 著作權法第88條項目分類 def Classify(filename): current_verdict = read_verdict(filename) # dissmiss detection main_rex = re.compile('^主文') main_txt = [current_verdict[i] for i, x in enumerate(current_verdict) if main_rex.search(x) != None] rex1 = re.compile(r'(應?(連帶)?給付)(周年利率|週年利率|年息|年利率)?(百分之五|百分之5|5%|5%)?') if bool(rex1.search(main_txt[0])) == True: label_df.loc[filename,'駁回_Auto'] = 0 else: label_df.loc[filename,'駁回_Auto'] = 1 # 提取著作權法第88條相關條文 rex88 = re.compile(r'著作權法(第\d+條)?(、)?(第\d+項)?(、)?第(88|八十八)(、\d+-\d)?(、\d+){0,2}?條(第)?(1|一|2|二|3|三)?(項)?(及)?((、)?第(2|二)項)?((、)?第(3|三)項)?((、)?(2|二)項)?((、)?(3|三)項)?') filter1 = [current_verdict[i] for i, x in enumerate(current_verdict) if rex88.search(x) != None] filter1 # 原告引用法條 copyright88 = [filter1[i] for i, x in enumerate(filter1) if re.search(r'(原告|被告|被上訴人|上訴人|被害人|公司)', x) != None] copyright88 = [copyright88[i] for i, x in enumerate(copyright88) if not bool(re.search(r'(二造|爭點|抗辯|\?|\?|定有明文)', x)) == True] plaintiff = [copyright88[i] for i, x in enumerate(copyright88) if bool(re.search('請求(原告|被告|被害人|上訴人|被上訴人)?(等連帶負損害賠償責任)?', x)) == True] # 法官判決法條 court = [copyright88[i] for i, x in enumerate(copyright88) if bool(re.search('(為有理由|即有理由|洵屬正當|即非不合|核屬正當|應予准許|核屬合法適當|核屬有據|於法有據|即無不合)(,)?(應予准許)?', x)) == True] court_ = [x for x in court if x in plaintiff] plaintiff_ = [x for x in plaintiff if x not in court_] plaintiff_classify = list(set([case_detection(x) for x in plaintiff_])) court_classify = list(set([case_detection(x) for x in court_])) # 填入dataframe fill_dataframe(plaintiff_classify, '原告引用法條_Auto', filename) fill_dataframe(court_classify, '法官判決法條_Auto', filename) # 判斷分類對錯 if label_df.loc[filename, '駁回_Auto'] != label_df.loc[filename, '駁回_Manual']: label_df.loc[filename, '駁回_Diff'] = 1 else: label_df.loc[filename, '駁回_Diff'] = 0 if label_df.loc[filename, '原告引用法條_Auto'] != label_df.loc[filename, '原告引用法條_Manual']: label_df.loc[filename, '原告引用法條_Diff'] = 1 else: label_df.loc[filename, '原告引用法條_Diff'] = 0 if label_df.loc[filename, '法官判決法條_Auto'] != label_df.loc[filename, '法官判決法條_Manual']: label_df.loc[filename, '法官判決法條_Diff'] = 1 else: label_df.loc[filename, '法官判決法條_Diff'] = 0 def Copyright_88_Classifier(filename_lst): # 將挑選判決進行分類並填入表格 for filename in filename_lst: Classify(filename) # 結果分析 dismiss_wrong = label_df.loc[label_df['駁回_Diff'] == 1,:] both_wrong = label_df.loc[label_df.loc[:,['原告引用法條_Diff','法官判決法條_Diff']].sum(axis = 1) == 2,:] tmp = label_df.loc[label_df['原告引用法條_Diff'] == 1,:] plaintiff_wrong = tmp.loc[[ind for ind in list(tmp.index) if ind not in list(both_wrong.index)],:] tmp = label_df.loc[label_df['法官判決法條_Diff'] == 1,:] court_wrong = tmp.loc[[ind for ind in list(tmp.index) if ind not in list(both_wrong.index)],:] both_right = label_df.loc[label_df.loc[:,['原告引用法條_Diff','法官判決法條_Diff']].sum(axis = 1) == 0,:] cases_dct = {'both_wrong':both_wrong, 'plaintiff_wrong':plaintiff_wrong, 'court_wrong': court_wrong, 'both_right': both_right, 'dismiss_wrong': dismiss_wrong} summary_dict = {'Case':['僅原告引用法條分錯', '僅法官判決法條分錯','皆分錯','皆分對','總和'], 'amount':None,'proportion':None} summary_df = pd.DataFrame.from_dict(summary_dict) summary_df = summary_df.set_index(['Case']) summary_df.iloc[0,0:2] = [len(plaintiff_wrong), len(plaintiff_wrong)/len(label_df)] summary_df.iloc[1,0:2] = [len(court_wrong), len(court_wrong)/len(label_df)] summary_df.iloc[2,0:2] = [len(both_wrong), len(both_wrong)/len(label_df)] summary_df.iloc[3,0:2] = [len(both_right), len(both_right)/len(label_df)] summary_df.iloc[4,0:2] = summary_df.iloc[0:4,].sum(axis = 0) summary_df return label_df, summary_df, cases_dct label_df, summary_df, cases_dct = Copyright_88_Classifier(manual_filename)
4d4174c05cd9b20b5c5012cc3f5ba7fd0900d63d
669660bee94af2f9312ca84fb5ea956e35e551ac
/quickstart/views.py
f7fe0ca0347e4d744beeb57d36c23e64e4c4a31c
[]
no_license
jaymodi-ntech/demo3
d8bc9746788927fc7dc5cd72352c85f552b32ce7
f930bfe2b087a0a446ed9deeddf8205d895a18bd
refs/heads/master
2021-01-02T09:34:39.014444
2014-10-06T11:13:34
2014-10-06T11:13:34
null
0
0
null
null
null
null
UTF-8
Python
false
false
591
py
__author__ = 'mj' from django.shortcuts import render # Create your views here. from django.contrib.auth.models import User, Group from rest_framework import viewsets from quickstart.serializers import * class UserViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ queryset = User.objects.all() serializer_class = UserSerializer class GroupViewSet(viewsets.ModelViewSet): """ API endpoint that allows groups to be viewed or edited. """ queryset = Group.objects.all() serializer_class = GroupSerializer
[ "ntech@n-tech.(none)" ]
ntech@n-tech.(none)
2282495d9f9f1ac8079c3e9d8dbe84bc6f9a1e8d
edbf80fb7ae7f411aaa1bdc58c1c5ed96c7aeec5
/app/gateways/OwlveyGateway.py
cd8c37ae458588cbdbea35a4e823f9290733298f
[ "Apache-2.0" ]
permissive
owlvey/power_tools
3eff4339855e6731b600915732f2a0a011688de8
cec83fb13a21ebd0592f8d203cc3705101c109b8
refs/heads/master
2022-07-18T07:55:17.259885
2020-05-15T14:21:20
2020-05-15T14:21:20
263,683,971
0
0
null
null
null
null
UTF-8
Python
false
false
8,664
py
import datetime import requests from app.components.ConfigurationComponent import ConfigurationComponent class OwlveyGateway: def __init__(self, configuration_component: ConfigurationComponent): self.host = configuration_component.owlvey_url self.identity = configuration_component.owlvey_identity self.token = None self.token_on = None self.client_id = configuration_component.owlvey_client self.client_secret = configuration_component.owlvey_secret @staticmethod def __validate_status_code(response): if response.status_code > 299: raise ValueError(response.text) def generate_token(self): payload = { "grant_type": "client_credentials", "scope": "api", "client_id": self.client_id, "client_secret": self.client_secret } response = requests.post(self.identity + "/connect/token", data=payload) OwlveyGateway.__validate_status_code(response) self.token_on = datetime.datetime.now() self.token = response.json() def __build_authorization_header(self): if self.token: expires_in = self.token["expires_in"] if (self.token_on + datetime.timedelta(seconds=expires_in + 30)) > datetime.datetime.now(): self.generate_token() else: self.generate_token() return { "Authorization": "Bearer " + self.token["access_token"] } def __internal_get(self, url): response = requests.get(url, headers=self.__build_authorization_header(), verify=False) OwlveyGateway.__validate_status_code(response) return response.json() def __internal_put(self, url, payload): response = requests.put(url, json=payload, headers=self.__build_authorization_header(), verify=False) OwlveyGateway.__validate_status_code(response) def __internal_delete(self, url, payload): response = requests.delete(url, json=payload, headers=self.__build_authorization_header(), verify=False) OwlveyGateway.__validate_status_code(response) def __internal_post(self, url, payload): response = requests.post(url, json=payload, headers=self.__build_authorization_header(), verify=False) OwlveyGateway.__validate_status_code(response) return response.json() def get_customers(self): return self.__internal_get(self.host + "/customers") def get_customer(self, name): customers = self.get_customers() for cus in customers: if cus['name'] == name: return cus return None def get_products(self, organization_id): return self.__internal_get(self.host + '/products?customerId={}'.format(organization_id)) def get_product(self, organization_id, name): products = self.get_products(organization_id) for product in products: if product['name'] == name: return product return None def get_syncs(self, product_id): return self.__internal_get(self.host + "/products/{}/sync".format(product_id)) def post_sync(self, product_id, name): return self.__internal_post(self.host + "/products/{}/sync/{}".format(product_id, name), {}) def put_last_anchor(self, product_id, name, target): self.__internal_put(self.host + "/products/{}/sync/{}".format(product_id, name), {"target": target.isoformat()}) def get_features(self, product_id): return self.__internal_get(self.host + "/features?productId={}".format(product_id)) def get_feature_detail(self, feature_id): return self.__internal_get(self.host + "/features/{}".format(feature_id)) def create_customer(self, name): return self.__internal_post(self.host + "/customers", {"name": name}) def create_product(self, customer_id, name): return self.__internal_post(self.host + "/products", {"customerId": customer_id, "name": name}) def create_service(self, product_id, name, slo): service = self.__internal_post(self.host + "/services", {"productId": product_id, "name": name}) service_id = service["id"] service["slo"] = slo self.__internal_put(self.host + "/services/" + str(service_id), service) return service def assign_indicator(self, feature_id, source_id): return self.__internal_put(self.host + "/features/{}/indicators/{}".format(feature_id, source_id), {}) def un_assign_indicator(self, feature_id, source_id): return self.__internal_delete(self.host + "/features/{}/indicators/{}".format(feature_id, source_id), {}) def create_feature(self, product_id, name): return self.__internal_post(self.host + "/features", {"productId": product_id, "name": name}) def create_incident(self, product_id, key, title, resolution_on: datetime, ttd, tte, ttf, url): response = requests.post(self.host + "/incidents", json={"productId": product_id, "key": key, "title": title }, verify=False) OwlveyGateway.__validate_status_code(response) incident_id = response.json()["id"] response = requests.put(self.host + "/incidents/{}".format(incident_id), json={"title": title, "ttd": ttd, "tte": tte, "ttf": ttf, "url": url, "affected": 1, "end": resolution_on.isoformat()}, verify=False) OwlveyGateway.__validate_status_code(response) return response.json() def assign_incident_feature(self, incident_id, feature_id): response = requests.put(self.host + "/incidents/{}/features/{}".format(incident_id, feature_id), verify=False) OwlveyGateway.__validate_status_code(response) def get_sources(self, product_id): return self.__internal_get(self.host + "/sources?productId={}".format(product_id)) def create_source(self, product_id, name, kind, group, good_definition: str = "", total_definition: str = ""): result = self.__internal_post(self.host + "/sources", { "productId": product_id, "name": name, "kind": kind, "group": group }) result["goodDefinition"] = good_definition result["totalDefinition"] = total_definition self.__internal_put(self.host + "/sources/{}".format(result["id"]), result) return result def create_sli(self, feature_id, source_id): self.__internal_put(self.host + "/features/{}/indicators/{}".format(feature_id, source_id), {}) def search_feature(self, product_id, name): return self.__internal_get(self.host + "/features/search?productId={}&name={}".format(product_id, name)) def create_source_item(self, source_id, start, total, good): return self.__internal_post(self.host + "/sourceItems", { "sourceId": source_id, "start": start.isoformat(), "end": start.isoformat(), "total": int(total), "good": int(good) }) def create_source_item_proportion(self, source_id, start, percent): result = self.__internal_post(self.host + "/sourceItems/proportion", { "sourceId": source_id, "start": start.isoformat(), "end": start.isoformat(), "proportion": percent, }) return result
270b750136f37b35a8ec6301de7546fe80dc514e
8186514b510a801863229e3f9711c0c657e727e5
/assembly/qtable/qlist_q.py
c4d46f59661410f1d3c06c6df3d6c2b23370a997
[]
no_license
masknugget/mypyqt
274b2cbbf66c04927453815248f9c1bc5e65ca17
b86a49e4b8c7c8c3d8546ce1b49f8f3bb6332307
refs/heads/main
2023-08-17T13:30:11.451066
2021-09-27T14:14:54
2021-09-27T14:14:54
355,904,935
0
0
null
null
null
null
UTF-8
Python
false
false
2,241
py
# 自定义控件--实现了一个带全选功能的复选框 import sys from PyQt5.QtWidgets import QApplication, QListWidget, QCheckBox, QListWidgetItem from PyQt5.QtCore import Qt class FilteredList(QListWidget): # 继承自列表控件 def __init__(self, textList, parent=None): super().__init__(parent) self.selectAll_ch = QCheckBox("全选(selectAll)") self.selectAll_ch.setCheckState(Qt.Checked) self.selectAll_ch.stateChanged[int].connect(self.on_selectAll) # item = QListWidgetItem(self) self.setItemWidget(item, self.selectAll_ch) # 列表控件的项设为 QCheckBox self.dict = dict() self.boxes = set() for index, text in enumerate(textList): ch = QCheckBox(text) ch.setCheckState(Qt.Unchecked) ch.stateChanged[int].connect(self.on_stateChanged) # item.setCheckState(Qt.Unchecked)# item = QListWidgetItem(self) self.setItemWidget(item, ch) self.boxes.add(ch) self.dict[index] = ch def on_selectAll(self, state): if state == 2: for ch in self.boxes: ch.setCheckState(2) if state == 0: for ch in self.boxes: ch.setCheckState(0) def on_stateChanged(self, state): ch = self.sender() if state: if len([ch for ch in self.boxes if ch.checkState()]) == self.count() - 1: # 0 不选中, 1 部分选中,2 全选中 #Qt.Unchecked #Qt.PartiallyChecked #Qt.Checked self.selectAll_ch.setCheckState(2) else: self.selectAll_ch.setCheckState(1) else: if len([k for k in self.boxes if k.checkState()]): self.selectAll_ch.setCheckState(1) else: self.selectAll_ch.setCheckState(0) def keyPressEvent(self, event): # Ctrl+A 全选 if event.modifiers() & Qt.ControlModifier and event.key() == Qt.Key_A: self.selectAll_ch.setCheckState(2) if __name__ == '__main__': app = QApplication(sys.argv) myList = FilteredList(textList=["a", "b", "c", "d"]) myList.show() sys.exit(app.exec_())
6ff2f7bbff706bd61e5fbd3eab9118c72980e899
a7723fa70d4a7701b038b37d1913d917fd2e8e8f
/codeif/app/dashboard/writer/post/apps.py
f64003df9a3ac78b305400cd0e393cb9a0f03ee8
[]
no_license
Nym-Git/codeif-internship
e3f8656b1eb0a1baf095f80d262eb172cf88a027
5af8597cadf02b670dd593a804dcc0e0b3f6bb53
refs/heads/master
2023-07-16T03:03:55.343589
2021-08-26T14:11:48
2021-08-26T14:11:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
104
py
from django.apps import AppConfig class PostConfig(AppConfig): name = 'app.dashboard.writer.post'
12655a75caf61802783410d883ae5ec5680cefe5
b77cc1448ae2c68589c5ee24e1a0b1e53499e606
/asset/migrations/0005_auto_20171026_1532.py
eb4e2ea65956f0a359a6c7516eb7dbb444b94e2a
[]
no_license
PregTech-c/Hrp_system
a5514cf6b4c778bf7cc58e8a6e8120ac7048a0a7
11d8dd3221497c536dd7df9028b9991632055b21
refs/heads/master
2022-10-09T07:54:49.538270
2018-08-21T11:12:04
2018-08-21T11:12:04
145,424,954
1
1
null
2022-10-01T09:48:53
2018-08-20T13:58:31
JavaScript
UTF-8
Python
false
false
664
py
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-10-26 12:32 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('asset', '0004_auto_20171022_1404'), ] operations = [ migrations.AddField( model_name='asset', name='model', field=models.CharField(default='test', max_length=32), preserve_default=False, ), migrations.AlterField( model_name='asset', name='description', field=models.CharField(max_length=256, null=True), ), ]
069a57e3df39d947bfd19a9af8fff7b242cce5d6
04b1faa6394131c8119700f731894d607a88a100
/loja/models.py
7720d2797da7e27eb8189382485d3582eed88e0c
[]
no_license
tamilly/loja_pe
c9ae340cbbbb5df86fb27dca89e6cd8f6a2fff10
453becffc74a853a616ee005ad00fff9ecc56d41
refs/heads/master
2022-11-06T23:05:29.740496
2020-07-06T23:05:08
2020-07-06T23:05:08
277,662,440
0
0
null
null
null
null
UTF-8
Python
false
false
1,255
py
#Tamilly's code / 2020-01-07 from django.db import models from django.conf import settings from django.utils import timezone class User(models.Model): cpf = models.BigIntegerField(primary_key=True) name = models.CharField(max_length=100) email = models.EmailField(max_length=100) birth_date = models.DateField(auto_now=False, auto_now_add=False) password = models.CharField(max_length=20) admin = models.BooleanField() def add_user(self): self.save() def __str__(self): return self.name class Product(models.Model): # Model definition # Attributes type definition user = models.ForeignKey(User, on_delete=models.CASCADE) bar_code = models.IntegerField() name = models.CharField(max_length=100) description = models.TextField() created_date = models.DateTimeField(default=timezone.now) price = models.DecimalField(max_digits=10, decimal_places=3) categories = models.TextChoices('Categoria', 'BELEZA LIMPEZA ALIMENTO CASA OUTROS') category = models.CharField(blank=True, choices=categories.choices, max_length=10) validity = models.DateField(auto_now=False, auto_now_add=False) # Methods definition def add_product(self): self.created_date = timezone.now() self.save() def __str__(self): return self.name
ff81fb06f2512900c9797697d5792d8253d5a9af
a5631fae399c750eb4f8de58bcc16da0edb2e33d
/lessons/lesson14_RegressionDiagnostics_Imputations/Diagnostics_Walkthrough.py
a883a00d9e82960b5c8d4a34817556a3dbeee4e7
[]
no_license
robertdavidwest/GADS11-NYC-Summer2014
1fa50b577b1f6c62c43522d0823bc6fa50dcace2
d9cbdcef0f9f94a01597b04b9a24d867b127ce30
refs/heads/master
2020-12-24T23:28:17.395587
2014-08-20T01:46:09
2014-08-20T01:46:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,717
py
#<<<<<<< HEAD import csv import numpy as np import pandas as pd from dateutil import parser import pylab as pl import statsmodels.api as sm import matplotlib.pyplot as plt import random from sklearn.preprocessing import scale from numpy import inf import scipy.stats as stats import pylab #======= ### IMPORT DATA ### batting_salary = pd.read_csv("/Users/patrickmcnamara/Documents/GA_DataScience/Teaching/Summer14/GADS11-NYC-Summer2014/projects/Project2/baseball.csv") pitching = pd.read_csv("/Users/patrickmcnamara/Documents/GA_DataScience/Teaching/Summer14/GADS11-NYC-Summer2014/projects/Project2/pitching.csv") # DROP UNWANTED VARIABLES # batting_salary = batting_salary.drop(['lahmanID', 'managerID', 'birthYear', 'birthMonth', 'birthDay', 'birthCountry', 'birthState', 'birthCity', 'deathYear', 'deathMonth', 'deathDay', 'deathCountry', 'deathState', 'deathCity', 'nameFirst','nameLast', 'nameNote', 'nameGiven', 'nameNick','bats', 'throws', 'debut', 'finalGame', 'college','lahman40ID', 'lahman45ID', 'retroID', 'holtzID', 'bbrefID', 'deathDate', 'birthDate','teamID', 'lgID', 'stint','G_batting','X2B', 'X3B', 'CS', 'SO', 'IBB', 'HBP', 'SH', 'SF', 'GIDP', 'G_old', 'hofID'], axis =1) # KEEP VARIABLES IN PITCHING DATA THAT AREN'T IN BATTING DATA # keep_cols = list(set(pitching.columns)-set(batting_salary.columns)) keep_cols = keep_cols + ['playerID','yearID'] pitching = pitching[keep_cols] pitching = pitching.drop(['GIDP','SH','SF'], axis=1) pitching = pitching[['ERA','playerID','yearID']] # MERGE DATASETS # data = pd.merge(batting_salary, pitching, on=['playerID','yearID'], how='outer') data = data.drop(['playerID','yearID'], axis=1) # DROP PITCHERS FROM DATASET # index = data['ERA'].index[data['ERA'].apply(np.isnan)] slimdata = data.loc[index] slimdata = slimdata.drop(['ERA'], axis=1) ### CHECKING VARIABLE RELATIONSHIPS ### # SHRINKING THE DATA TO MAKE VISUALIZATIONS EASIER # slimdata['random'] = np.random.randn(len(slimdata)) slimdata = slimdata[slimdata.random > 1] del slimdata['random'] # COLLINEARITY HISTOGRAM # pd.tools.plotting.scatter_matrix(slimdata, alpha=0.2, diagonal='hist') plt.show() # LOG TRANSFORMATIONS WHERE NECESSARY # slimdata.SB = np.log(slimdata.SB) slimdata.HR = np.log(slimdata.HR) slimdata.BB = np.log(slimdata.BB) slimdata.RBI = np.log(slimdata.RBI) slimdata.salary = np.log(slimdata.salary) # REPLACE INF VALUES WITH NAN # slimdata = slimdata.replace([inf, -inf], np.nan) # DROP NAN # slimdata = slimdata.dropna() ### PLOTTING SCATTERPLOT MATRIX FOR COLLINEARITY ### # HISTOGRAM # pd.tools.plotting.scatter_matrix(slimdata, alpha=0.2, diagonal='hist') plt.show() # KERNEL DENSITY # pd.tools.plotting.scatter_matrix(slimdata, alpha=0.2, diagonal='kde') plt.show() ### RUNNING REGRESSION MODEL ### # CREATING INTERCEPT # slimdata['intercept'] = 1 # DEFINING IVs & DVs # X = slimdata.drop(['salary', 'intercept'], axis = 1) y = slimdata['salary'] # RUNNING REGRESSION # model = sm.OLS(y, X) results = model.fit() results.summary() ### NORMALIZATION ### # BOX PLOT FOR OUTLIERS # slimdata.boxplot() plt.show() # SCALING # Mean-center then divide by std dev data_norm = pd.DataFrame(scale(slimdata), index=slimdata.index, columns=slimdata.columns) data_norm.boxplot() plt.show() ### RUNNING REGRESSION MODEL AGAIN ### data_norm['intercept'] = 1 X = data_norm.drop(['salary'], axis = 1) y = data_norm['salary'] model2 = sm.OLS(y, X) results2 = model2.fit() results2.summary() ### INFLUENCE PLOT FOR SINGLE OBSERVATIONS ### fig, ax = plt.subplots(figsize=(10,10)) fig = sm.graphics.influence_plot(results2, ax=ax, criterion="cooks") plt.show() # INFLUENCE TABLE # influence = results2.get_influence() influence.summary_frame()['cooks_d'].order() # THE EFFECT OF RESHAPING/DROPPING VARIABLES # res_dropped = results.params / results2.params * 100 - 100 '''Create new regressions and see what these look like after dropping extremely influential variables''' ### RESIDUALS PLOT ### plt.scatter(results2.norm_resid(), results2.fittedvalues) plt.xlabel('Fitted Values') plt.ylabel('Normalized residuals') plt.show() '''Here we're looking for something resembling a shotgun blast. Random points with no identifiable structure''' ### LOOKING AT INDIVIDUAL VARIABLES ### # PARTIAL REGRESSION PLOTS # fig = plt.figure(figsize=(10,10)) fig = sm.graphics.plot_partregress_grid(results2, fig=fig) plt.show() '''Here we want to see a linear relationship''' fig = plt.figure(figsize=(10,10)) fig = sm.graphics.plot_regress_exog(results, 'RBI', fig=fig) plt.show() '''2x2 plot containing DV and fitted values with CIs vs. selected IV, residuals vs. the IV, a partial regression plot, and a CCPR plot. Don't worry about the CCPR plot'''
2906ecce75b2ac95aa33a8672781a51b33e0c125
93e42a3c0f546694992844b7f24668538fcfb01e
/controller.py
e36a7d116f3c83778b2186b56a18296dcafafbf7
[]
no_license
zongjunhao/tencent_relationship_map
a89def2fdf151e12cd2b6d599451a37ac611dab8
61d2c808f1e3f4ea014be5192cec56ee386780c2
refs/heads/master
2023-08-23T20:10:22.132142
2021-10-24T10:17:08
2021-10-24T10:17:08
416,812,111
1
0
null
null
null
null
UTF-8
Python
false
false
1,396
py
from flask import Flask, redirect, url_for, request from flask_cors import CORS import util app = Flask(__name__, static_folder="visualization", static_url_path="") CORS(app, supports_credentials=True) @app.route('/get_init_data') def get_init_data(): return util.generate_graph_data() @app.route('/get_raw_data') def get_raw_data(): return util.generate_raw_data() @app.route('/get_node_distribution') def get_node_distribution(): G = util.load_raw_relation() degree_distribution = util.get_degree_distribution(G) return degree_distribution @app.route('/get_degree_of_node') def get_degree_of_node(): node_id = request.args.get("node_id") G = util.load_raw_relation() return str(util.get_degree_of_node(G, int(node_id))) @app.route('/get_clustering_of_node') def get_clustering_of_node(): node_id = request.args.get("node_id") G = util.load_raw_relation() return str(util.get_clustering_of_node(G, int(node_id))) @app.route('/get_core_of_node') def get_core_of_node(): node_id = request.args.get("node_id") G = util.load_raw_relation() return str(util.get_core_of_node(G, int(node_id))) @app.route('/attack_graph', methods=['POST']) def attack_graph(): node_id = request.form['node_id'] graph = request.form['graph'] return util.attack_graph(graph, node_id) if __name__ == '__main__': app.run(debug=True)
d7a80960660ed56f591c130f94a6772a5d9f6e60
c30450b7794e8ae888a5916e20c74c17d014c6fa
/parser/summary.py
c8a0a835fa2b440f48949edd4f73d0a609967c61
[]
no_license
zacateras/yansp
cc9cfc49c1ee5e1558a5a8bddb6dd2f523b3a508
20e094047364a5cfcb0f5a293fbd979f14411023
refs/heads/master
2020-04-22T18:11:53.363848
2019-05-12T21:33:44
2019-05-12T21:33:44
170,569,331
0
0
null
null
null
null
UTF-8
Python
false
false
2,215
py
from utils.model import count_variables def summary_for_parser(parser): return { 'variables_all': count_variables(parser.variables), 'variables_all_trainable': count_variables(parser.trainable_variables), 'variables_word': count_variables(parser.word_model.variables) if hasattr(parser, 'word_model') else 0, 'variables_word_trainable': count_variables(parser.word_model.trainable_variables) if hasattr(parser, 'word_model') else 0, 'variables_char': count_variables(parser.char_model.variables) if hasattr(parser, 'char_model') else 0, 'variables_char_trainable': count_variables(parser.char_model.trainable_variables) if hasattr(parser, 'char_model') else 0, 'variables_core': count_variables(parser.core_model.variables) if hasattr(parser, 'core_model') else 0, 'variables_core_trainable': count_variables(parser.core_model.trainable_variables) if hasattr(parser, 'core_model') else 0, 'variables_lemma': count_variables(parser.lemma_model.variables) if hasattr(parser, 'lemma_model') else 0, 'variables_lemma_trainable': count_variables(parser.lemma_model.trainable_variables) if hasattr(parser, 'lemma_model') else 0, 'variables_upos': count_variables(parser.upos_model.variables) if hasattr(parser, 'upos_model') else 0, 'variables_upos_trainable': count_variables(parser.upos_model.trainable_variables) if hasattr(parser, 'upos_model') else 0, 'variables_feats': count_variables(parser.feats_model.variables) if hasattr(parser, 'feats_model') else 0, 'variables_feats_trainable': count_variables(parser.feats_model.trainable_variables) if hasattr(parser, 'feats_model') else 0, 'variables_head': count_variables(parser.head_model.variables) if hasattr(parser, 'head_model') else 0, 'variables_head_trainable': count_variables(parser.head_model.trainable_variables) if hasattr(parser, 'head_model') else 0, 'variables_deprel': count_variables(parser.deprel_model.variables) if hasattr(parser, 'deprel_model') else 0, 'variables_deprel_trainable': count_variables(parser.deprel_model.trainable_variables) if hasattr(parser, 'deprel_model') else 0, }
33282c89da89f060278ed17e50013ffdb1f88707
455c1cec4101254a0b7f50349e915411033a0af1
/supervised_learning/0x00-binary_classification/9-neural_network.py
5f65dc0fea7fe410b59fbce3194f1ddcd97e815b
[]
no_license
Daransoto/holbertonschool-machine_learning
30c9f2753463d57cac87f245b77c8d6655351e75
1e7cd1589e6e4896ee48a24b9ca85595e16e929d
refs/heads/master
2021-03-10T14:32:09.419389
2020-10-23T19:47:31
2020-10-23T19:47:31
246,461,514
0
1
null
null
null
null
UTF-8
Python
false
false
1,290
py
#!/usr/bin/env python3 """ Creates a neural network. """ import numpy as np class NeuralNetwork: """ Neural network class. """ def __init__(self, nx, nodes): """ Initializer for the neural network. """ if type(nx) != int: raise TypeError('nx must be an integer') if nx < 1: raise ValueError('nx must be a positive integer') if type(nodes) != int: raise TypeError('nodes must be an integer') if nodes < 1: raise ValueError('nodes must be a positive integer') self.__W1 = np.random.randn(nodes, nx) self.__b1 = np.zeros((nodes, 1)) self.__A1 = 0 self.__W2 = np.random.randn(1, nodes) self.__b2 = 0 self.__A2 = 0 @property def W1(self): """ Getter for W1. """ return self.__W1 @property def b1(self): """ Getter for b1. """ return self.__b1 @property def A1(self): """ Getter for A1. """ return self.__A1 @property def W2(self): """ Getter for W2. """ return self.__W2 @property def b2(self): """ Getter for b2. """ return self.__b2 @property def A2(self): """ Getter for A2. """ return self.__A2
e46294ff8a6718f3fb4b012273d0b1ce052e33ed
ca5334081b6fc6298becc0aac4b6ef5872b484e2
/comments/validators.py
079e918c70c9b1ce25fbb81d90f9340ac85afa63
[]
no_license
tachiefab/codewithtm
1b575f9884b19124303419fb5c5029e4ab7b3306
42ea8d761103a695c4428a5a1204a176cef2e3b5
refs/heads/master
2023-03-15T17:28:45.582101
2021-03-08T04:41:31
2021-03-08T04:41:31
284,474,285
0
0
null
null
null
null
UTF-8
Python
false
false
197
py
from django.core.exceptions import ValidationError def validate_content(value): content = value if content == "": raise ValidationError("Content cannot be blank") return value
82bf4f9dd14a53dadeab63ace0b26d76c4989687
866d527d9264765dc2ada2fcd523163e9d686061
/practices/baby_shark.py
c01dc9e39c3901d132766be6c7f50e4862e58490
[ "MIT" ]
permissive
kimjiwook0129/Coding-Interivew-Cheatsheet
910d245b83039d59302df71ac5776425ab1b92c2
574e6acecdb617b9c3cef7ec3b154ab183d8b99a
refs/heads/main
2023-08-21T03:57:43.328811
2021-10-02T08:59:27
2021-10-02T08:59:27
371,915,448
3
0
null
null
null
null
UTF-8
Python
false
false
1,835
py
# https://www.acmicpc.net/problem/16236 from collections import deque def bfs(table, curRow, curCol, N, sharkSize): visited = [[False] * N for _ in range(N)] q = deque([(0, curRow, curCol)]) dx, dy = [-1, 0, 0, 1], [0, -1, 1, 0] arrayOfPossibilities = [] while q: dis, row, col = q.popleft() visited[row][col] = True if len(arrayOfPossibilities) > 0 and dis > arrayOfPossibilities[0][2]: break if table[row][col] > 0 and table[row][col] < sharkSize: arrayOfPossibilities.append((row, col, dis)) for i in range(4): nx = row + dx[i] ny = col + dy[i] if nx >= 0 and nx < N and ny >= 0 and ny < N: if not visited[nx][ny]: if table[nx][ny] <= sharkSize: q.append((dis + 1, nx, ny)) else: visited[nx][ny] = True if len(arrayOfPossibilities) > 0: arrayOfPossibilities.sort() thisOne = arrayOfPossibilities[0] return ((thisOne[0], thisOne[1]), thisOne[2]) return ((1, 1), -1) if __name__ == "__main__": N = int(input()) table = [] curRow, curCol = 0, 0 for i in range(N): data = list(map(int, input().split())) if 9 in data: j = data.index(9) curRow, curCol = i, j table.append(data) sharkSize, sharkFeed = 2, 0 time = 0 while True: nextCoord, distance = bfs(table, curRow, curCol, N, sharkSize) if distance == -1: break time += distance table[curRow][curCol] = 0 curRow, curCol = nextCoord table[curRow][curCol] = 9 sharkFeed += 1 if sharkFeed == sharkSize: sharkSize += 1 sharkFeed = 0 print(time)
8982cf56ab7561232b24afc577a5d47a1a120f37
57d9d4e881ea308db01938c39afc20091d87a1ab
/src/web/views.py
f96b8cb43a75b61fe5d479b89a92e0b96f0a90c3
[]
no_license
logworthy/parku
14eea4351c366d5b0a03e56fa385fb8df0d71b90
e2fc9323d42abc4e616ac717ab4e7d009c1abe87
refs/heads/master
2020-04-06T06:54:25.282258
2014-08-30T03:56:39
2014-08-30T03:56:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
102
py
from django.shortcuts import render def index(request): return render(request, "web/index.html")
c0fd1993ef34225c74db2d83b77f92209b7547fb
08355c36cafc4ad86a5e85a6a70204796a6722f6
/api/entity/image.py
219bd7cfa852a70ffeb8bf1b98f43b22c530210b
[]
no_license
vankcdhv/CosinSimilarity
0a3d114be3cfc8dec3e80ec88dbbea7d0f4bd690
70cb59329e55eecc0c57e9e9dbcca04e39ff3ea5
refs/heads/main
2023-04-19T22:08:03.653394
2021-05-11T02:07:48
2021-05-11T02:07:48
366,228,611
0
0
null
null
null
null
UTF-8
Python
false
false
703
py
from multipledispatch import dispatch class Image: @dispatch() def __init__(self): self.__id = 0 self.__postID = 0 self.__url = '' @dispatch(object) def __init__(self, row): self.__id = row[0] self.__postID = row[1] self.__url = row[2] @property def id(self): return self.__id @id.setter def id(self, value): self.__id = value @property def postID(self): return self.__postID @postID.setter def postID(self, value): self.__postID = value @property def url(self): return self.__url @url.setter def url(self, value): self.__url = value
06f952c695c3533ca0dd029f3e93895af5b02c59
5c8139f1e57e06c7eaf603bd8fe74d9f22620513
/PartB/py删除链表的倒数第n个节点的位置的值2.py
ab9093a8ca2755b9b1f62111641d210996e07d4a
[]
no_license
madeibao/PythonAlgorithm
c8a11d298617d1abb12a72461665583c6a44f9d2
b4c8a75e724a674812b8a38c0202485776445d89
refs/heads/master
2023-04-03T07:18:49.842063
2021-04-11T12:02:40
2021-04-11T12:02:40
325,269,130
0
0
null
null
null
null
UTF-8
Python
false
false
915
py
# 把一个链表的倒数的第n个节点来进行删除。 class ListNode(object): def __init__(self, x): self.val = x self.next = None class Solution(object): def remove(self, head, n): dummy = ListNode(-1) dummy.next = head slow = dummy fast = dummy for i in range(n): fast = fast.next while fast and fast.next: fast = fast.next slow = slow.next slow.next = slow.next.next return dummy.next if __name__ == "__main__": s = Solution() n1 = ListNode(1) n2 = ListNode(2) n3 = ListNode(3) n4 = ListNode(4) n5 = ListNode(5) n6 = ListNode(6) n1.next = n2 n2.next = n3 n3.next = n4 n4.next = n5 n5.next = n6 n6.next = None k = 2 res = s.remove(n1, k) while res: print(res.val, end="->") res = res.next
47e8f9432798989895c7cbfef00d209e0fdc4bb3
45c870a3edf37781efd6059a3d879aedf9da7f7f
/custom_resize_drag_toolbar_pyqt5/example.py
cd9c9f2dad5a08e344715d5aaa95e6dcedafa101
[]
no_license
saladeen/custom_resize_drag_toolbar_pyqt5
e6dc8598df6b7d58bf3114bfa348db38c2b1512b
f38aa8b263b08fd0f94ea2e1428e873cdadce80e
refs/heads/main
2023-08-11T04:44:53.349929
2021-10-01T19:10:20
2021-10-01T19:10:20
412,588,371
0
0
null
null
null
null
UTF-8
Python
false
false
618
py
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout from PyQt5.QtCore import Qt import resizable_qwidget import toolbar import sys class ExampleWindow(resizable_qwidget.TestWindow): def __init__(self): super().__init__() layout = QHBoxLayout() layout.addWidget(toolbar.CustomToolbar(self, "Example")) layout.setAlignment(Qt.AlignTop) self.setLayout(layout) self.move(300, 300) self.resize(300, 300) if __name__ == "__main__": app = QApplication(sys.argv) mw = ExampleWindow() mw.show() sys.exit(app.exec_())
4611dd4f883110ac156a9299f169d60ae6b42feb
12a47a2aca78086c76dc4e669abfe00fa012f6c5
/day9.py
f51e18f0fadb0285ed3c50cf564b13ddd53747dc
[]
no_license
susieir/advent_of_code_2020
67e155ffff57b662b3edde0235da6290cfb2c331
f1881dad8c7d790654357c2cdb5a1830af560c12
refs/heads/main
2023-02-20T05:18:33.991166
2021-01-21T15:56:05
2021-01-21T15:56:05
330,618,452
0
0
null
null
null
null
UTF-8
Python
false
false
1,480
py
""" Advent of code puzzle - Day 9""" def create_input_list(filename): """ Reads the input data and stores as a list""" with open(filename, 'r') as fp: return [int(x) for x in fp.read().splitlines()] def inspect_check_number(filename, step=5, inc=0): """ Function that inspects whether the check number is valid by checking against a list of valid numbers""" # Set up the list cypher = create_input_list(filename) # Find the check number to be inspected check_number = cypher[step + inc] # Initialise check_list - stores possible valid check_numbers check_list = [] # Loop through the previous numbers for i in cypher[inc : step + inc]: for j in cypher[inc : step + inc]: if i != j: check_list.append(i + j) # Check if check_number is in check_list if check_number in check_list: return (check_number, "ok") else: return (check_number, "test failed") def main(filename, step = 5): """Iterates through each item in cypher""" c = 0 # initialise c cypher = create_input_list(filename) for c in range(len(cypher) - step): if inspect_check_number(filename, step, inc = c)[1] == "test failed": return inspect_check_number(filename, step, inc = c) break #print(inspect_check_number('day9.txt', inc = 9)) if __name__ == '__main__': print(main('day9.txt', step=25))
b0ebd397cc8459a46dd0ef18c330ccdc2c8d2efb
bef4b43dc0a93697dfb7befdf4434994d109d242
/extract_features.py
0bb7bcc29969f2399ab42483e98a35287f5e4aac
[]
no_license
karanjsingh/Object-detector
69d9e5154b9f73028760d6d76da1a0f55038cfea
9114e95f79e2dd77a3cbbbee92e4432e5c237362
refs/heads/master
2020-06-25T22:31:14.941147
2020-01-14T23:36:22
2020-01-14T23:36:22
199,440,746
1
0
null
2019-07-29T11:43:34
2019-07-29T11:34:47
null
UTF-8
Python
false
false
3,513
py
#import necessary packages from __future__ import print_function from sklearn.feature_extraction.image import extract_patches_2d from pyimagesearch.object_detection import helpers from pyimagesearch.utils import dataset from pyimagesearch.utils import conf from pyimagesearch.descriptors import hog from imutils import paths from scipy import io import numpy as np import argparse import random import cv2 import progressbar # construct an argument parser ap = argparse.ArgumentParser() ap.add_argument("-c","--conf",required=True,help="path to configuration file") args = vars(ap.parse_args()) #load configuration file conf= conf.Conf(args["conf"]) hog = hog.HOG(orientations=conf["orientations"], pixelsPerCell = tuple(conf["pixels_per_cell"]), cellsPerBlock=tuple(conf["cells_per_block"]) , normalise = conf["normalize"]) data=[] labels=[] #grab the ground truth of in=mages and select a percentage of them for training trnPaths=list(paths.list_images(conf["image_dataset"])) trnPaths= random.sample(trnPaths, int(len(trnPaths)*conf["percent_gt_images"])) print("[info] describing training ROI.........") # set up the progress bar widgets = ["Extracting: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()] pbar = progressbar.ProgressBar(maxval=len(trnPaths), widgets=widgets).start() #loop over training paths for (i,trnPath) in enumerate(trnPaths): #load image cvt it into gray scl , extractthe image ID from the path image = cv2.imread(trnPath) image = cv2.cvtColor(image , cv2.COLOR_BGR2GRAY) imageID = trnPath[trnPath.rfind("_")+1:].replace(".jpg","") #load the annotation file and extract the bb p="{}/annotation_{}.mat".format(conf["image_annotations"], imageID) bb=io.loadmat(p)["box_coord"][0] roi = helpers.crop_ct101_bb(image,bb,padding=conf["offset"],dstSize=tuple(conf["window_dim"])) # define the list of ROIs that will be described, based on whether or not the # horizontal flip of the image should be used rois = (roi, cv2.flip(roi, 1)) if conf["use_flip"] else (roi,) #loop over the ROIs for roi in rois: #extractfeatures from the ROI and update the list of features and labels features = hog.describe(roi) data.append(features) labels.append(1) #update the process bar pbar.update(i) ## grab the disttraction(-ve) image path and reset the process bar pbar.finish() dstPaths= list(paths.list_images(conf["image_distractions"])) pbar = progressbar.ProgressBar(maxval=conf["num_distraction_images"], widgets=widgets).start() print("[INFO] describing distraction ROIs...") #Loop over desired number of distraction images for i in np.arange(0,conf["num_distraction_images"]): # randomly select a distraction image, load it, convert it to grayscale, and # then extract random patches from the image image = cv2.imread(random.choice(dstPaths)) image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) patches = extract_patches_2d(image, tuple(conf["window_dim"]), max_patches=conf["num_distractions_per_image"]) # loop over the patches for patch in patches: # extract features from the patch, then update the data and label list features = hog.describe(patch) data.append(features) labels.append(-1) # update the progress bar pbar.update(i) #dump the dataset to file pbar.finish() print("[INFO] dumping features and labels to file...") dataset.dump_dataset(data, labels, conf["features_path"], "features")
d41314fca2d3cdd3ddb87acbf55bd346e0642cd2
05b598f07c5f58c4278fd7d3e31b2a74f84d7fcb
/SConstruct
429244eff91806e3da6047183f46ddd3e9f0a1f8
[]
no_license
jpcummins/sortr
1e604e1a0b9aecf8ae4456489c7d6e692008b97f
329f0f1c9a89002088673945f2464740637f5612
refs/heads/master
2020-05-16T21:08:18.930490
2010-11-22T01:04:38
2010-11-22T01:04:38
957,342
1
0
null
null
null
null
UTF-8
Python
false
false
94
import os env = Environment(CCFLAGS = '-g -Wall -pedantic') env.Program('test', Glob('*.c'))
3add78b720f640eb549c134985d2b3184558de5d
151f71970df1e6e3734b1a9f834db51d02bdf2db
/tools/validator.py
6c54b18254dbdae4f036144f4dfdbc4dff7b9cf9
[]
no_license
mbiparva/slowfast-networks-pytorch
20e6ea77f58f4c67c40cda1234e6f30a234ef8aa
27da5fc479e38d7440d9651ee236bf4a296e7a55
refs/heads/master
2020-07-07T17:42:30.538222
2019-08-20T18:59:08
2019-08-20T18:59:08
203,425,393
21
4
null
null
null
null
UTF-8
Python
false
false
545
py
from tv_abc import TVBase import torch class Validator(TVBase): def __init__(self, mode, meters, device): super().__init__(mode, meters, device) def set_net_mode(self, net): net.eval() def batch_main(self, net, x_slow, x_fast, annotation): with torch.no_grad(): p = net.forward((x_slow, x_fast)) a = self.generate_gt(annotation) loss = net.loss_update(p, a, step=False) acc = self.evaluate(p, a) return {'loss': loss, 'label_accuracy': acc}
b7f47856158c111a86a9df8272bdabd459f506dc
8d0e0fa1062d575a0370bb842f7cafd85dc58ff9
/Graphics/Players_Graphic.py
3e9fece82cadf4d62cc54acc9677149cc92b6134
[]
no_license
KorneevVladislav/Blackjack
4aa1418bed12ddb17e0abd2324d814007939ac2a
abeeceba061e1fc874286ce002611694083ec0d9
refs/heads/master
2021-05-17T18:38:16.106196
2020-04-27T17:29:49
2020-04-27T17:29:49
250,922,343
0
0
null
null
null
null
UTF-8
Python
false
false
61
py
import pygame #def PlayerGraphics(): #def DeilerGraphics():
68ad4b3925c2279ab9c55e808cc56b98321c3f2a
042688362e547b2c45427f90723196f5d9f56792
/応用編/37.改行コードの取得/os.linesep.py
a60233ee7ad6f2da8f510d8e3f3cb0754b671128
[]
no_license
Watapon1704/python_study
5a0b482f2cd5f4b02f4411a812b30ef260a8a7c5
a196c692ff5b232c108f301ce5e165bc781df55e
refs/heads/master
2020-03-12T00:59:31.671306
2019-03-27T12:48:44
2019-03-27T12:48:44
130,363,832
0
0
null
null
null
null
UTF-8
Python
false
false
85
py
import os test_str = 'python-izm.com' print(test_str.replace('.', os.linesep))
f10d585c637387ccc269aab61ce295e13ab11663
321e58ab3e6b2385bb3549aaaefd56a58c2a51e7
/python/atpic/perf_postgres.py
3c2b1312c886a38a2fa3d9e62deeb883a4697fb5
[]
no_license
alexmadon/atpic_photosharing
7829118d032344bd9a67818cd50e2c27a228d028
9fdddeb78548dadf946b1951aea0d0632e979156
refs/heads/master
2020-06-02T15:00:29.282979
2017-06-12T17:09:52
2017-06-12T17:09:52
94,095,494
0
0
null
null
null
null
UTF-8
Python
false
false
1,408
py
import atpic.database import time import pycurl import StringIO import cStringIO time1=time.time() for i in range(1,100): print i con=atpic.database.connect() listofdict=atpic.database.query("select 1",con) con.close() time2=time.time() print "==========" con=atpic.database.connect() for i in range(1,100): print i query="select id from artist_pic where id='%i'" % i listofdict=atpic.database.query(query,con) con.close() time3=time.time() # using Solr + curl new curl handle each time (new socket) #fp=open("/dev/null","w") fp=cStringIO.StringIO() for i in range(1,100): print i url="http://localhost:8983/solr/select/?q=pid:%i&fl=pid" % i c=pycurl.Curl() # c.setopt(c.WRITEDATA,fp); c.setopt(c.WRITEFUNCTION, fp.write) c.setopt(c.URL, url); c.perform() c.close() # print data fp.close() time4=time.time() # using Solr + curl same curl handle c=pycurl.Curl() fp=cStringIO.StringIO() for i in range(1,100): print i #c.setopt(c.WRITEDATA,fp); url="http://localhost:8983/solr/select/?q=pid:%i&fl=pid" % i c.setopt(c.WRITEFUNCTION, fp.write) c.setopt(c.URL, url); c.perform() c.close() fp.close() time5=time.time() print "Time1 %s" % (time2-time1) print "Time2 %s" % (time3-time2) print "Ratio=%f" % ((time2-time1)/(time3-time2)) print "Time3 %s" % (time4-time3) print "Time4 %s" % (time5-time4)
9ac60f6dc3755d4c8f3c20fd4d1cd54718994a90
2faf152deabb0476ac43d4754f3b529fd678a36d
/ch_18.py
3d923149df97df02941390334db1bf1ff1f74392
[]
no_license
Sakartu/matasano
46cba1325a01c41f6272f80b9fa698c6338c2e50
b42e5a2ce5daa2fcc6691873e995a4b0d05e03d2
refs/heads/master
2021-01-23T09:51:50.305296
2015-08-10T15:37:59
2015-08-10T15:37:59
32,535,769
0
0
null
null
null
null
UTF-8
Python
false
false
542
py
#!/usr/bin/env python3 # -*- coding: utf8 -*- """ Usage: test_ctr.py """ import base64 import util __author__ = 'peter' def main(): test = base64.b64decode('L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvoOLSFQ==') assert util.aes_ctr_decrypt(test, b"YELLOW SUBMARINE") == b"Yo, VIP Let's kick it Ice, Ice, baby Ice, Ice, baby " k = util.get_random_bytes(16) m = b'This is an interesting message' assert util.aes_ctr_decrypt(util.aes_ctr_encrypt(m, k), k) == m if __name__ == '__main__': main()
68cfaa5dc120b4df33a466b592bd6685a7dceb21
83aca2c2e2608d3a44b43c2bc9fa396e290580f9
/Faculty/PlagueGame/src/repository/file_repository.py
0007ecd2807ac5d2d2e4d86144fef2d96e340587
[]
no_license
biancadragomir/school-work
b91fb2e947435b9030d7e5ef8e2f5e362698d5eb
ca43abc656d2e0d87bc5a389d77de038fa220fdd
refs/heads/master
2020-04-03T05:49:58.245702
2018-10-28T10:07:46
2018-10-28T10:07:46
155,057,194
1
0
null
null
null
null
UTF-8
Python
false
false
1,359
py
from domain.entities import Person from repository.person_repository import PersonRepository class FileRepoException(Exception): def __init__(self, msg): self.__msg = msg def __str__(self): return self.__msg class FileRepo(PersonRepository): def __init__(self, fileName): PersonRepository.__init__(self) self.__fName = fileName self.__readFromFile() def __readFromFile(self): try: f = open(self.__fName, 'r') line = f.readline().strip() while line != "": args = line.split(",") person = Person(args[0], args[1], args[2]) PersonRepository.add(self, person) line = f.readline().strip() except IOError: raise FileRepoException("sth is not ok... ") finally: f.close() def __writeToFile(self): f = open(self.__fName, 'w') persons = PersonRepository.get_all(self) for p in persons: pers = str(p.id) + "," + p.immunization + "," + p.status pers += "\n" f.write(pers) f.close() def add(self, person): PersonRepository.add(self, person) self.__writeToFile() def remove(self, person): PersonRepository.remove(self, person) self.__writeToFile()
fd910a171a8cf2b17e59bea547030c2eb288ab75
32c993540a42ac5110e82ee9f23b1c7c9ce32332
/logicaldoc/__init__.py
2f8ffb535901cf92a58100dcf389fa5cab41f57e
[]
no_license
OpenCode/logicaldoc
2f7ef4287eac677b5033e4ed9b8c4eefd68c67f0
089a35c86db9c0bb024bc6bfcee699f84b8169ad
refs/heads/master
2020-05-21T06:00:46.577484
2019-03-27T13:54:30
2019-03-27T13:54:30
84,583,273
0
0
null
null
null
null
UTF-8
Python
false
false
203
py
# -*- coding: utf-8 -*- # © 2016 Francesco Apruzzese <[email protected]> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from .logicaldoc import LogicalDoc from .constant import *
abe8946f42327bcd1a8465e567045df15152ea4d
5e5a301bbd9887dee50d416e2a0de61b5c8133ad
/webhelper/models.py
85b0fce4b4ec05e137f39107ff5fca70e91959a7
[]
no_license
dmalikcs/django-webhelper
a3965656e1b95a9d715b442919dcd80ba45baa2f
d2f0e7ac3f72b16ba696525f7ac903c8d8c876ee
refs/heads/master
2020-06-02T08:28:33.194264
2014-04-15T16:56:45
2014-04-15T16:56:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,603
py
from django.db import models from django.contrib.sites.models import Site from django.core.validators import RegexValidator class SocialLinks(models.Model): ''' facebook linkedin twitter gluse rss ''' facebook = models.URLField( max_length=100, blank=True, null=True ) linkedin = models.URLField( max_length=100, blank=True, null=True ) twitter = models.URLField( max_length=100, blank=True, null=True ) gpluse = models.URLField( max_length=100, blank=True, null=True ) rss = models.URLField( max_length=100, blank=True, null=True ) site = models.OneToOneField(Site) class Meta: verbose_name = 'Social Links' verbose_name_plural = 'Soical Links' def __unicode__(self): return self.site.domain class BaseAddress(models.Model): ''' BaseAddress model extend in OfficeAddress/RegisterAddress ''' name = models.CharField( max_length=30, blank=True, null=True ) street_1 = models.CharField( max_length=500, blank=True, null=True ) street_2 = models.CharField( max_length=200, blank=True, null=True ) city = models.CharField( max_length=100, blank=True, null=True ) country = models.CharField( max_length=100, blank=True, null=True ) site = models.OneToOneField(Site) class Meta: abstract = True class RegisterAddress(BaseAddress): class Meta: verbose_name = 'Register Address' verbose_name_plural = 'Register Address' def __unicode__(self): return self.name class OfficeAddress(BaseAddress): class Meta: verbose_name = 'office Address' verbose_name = 'office Address' def __unicode__(self): return self.name class GeneralInfo(models.Model): phone_1 = models.CharField( max_length=15, blank=True, null=True, validators=[ RegexValidator( r'^[-\d+]+$', 'Enter the valid phone number' ), ] ) phone_2 = models.CharField( max_length=15, blank=True, null=True, validators=[ RegexValidator( r'^[-\d+]+$', 'Enter the valid phone number' ), ] ) phone_3 = models.CharField( max_length=15, blank=True, null=True, validators=[ RegexValidator( r'^[-\d+]+$', 'Enter the valid phone number' ), ] ) fax = models.CharField( max_length=15, blank=True, null=True, validators=[ RegexValidator( r'^[-\d+]+$', 'Enter the valid Fax number' ), ] ) tollfree = models.CharField( max_length=11, blank=True, null=True ) support_email = models.EmailField( blank=True, null=True ) sales_email = models.EmailField( blank=True, null=True ) Billing_email = models.EmailField( blank=True, null=True ) Website = models.URLField() site = models.OneToOneField(Site) class Meta: verbose_name = 'general Info' verbose_name_plural = 'general infos' def __unicode__(self): return self.site.domain
052349cced621bbf4fe2950e0da1e1f43cdde479
ece5aafef31d93ad9e344f71f5d33d19a7a87651
/model/pspnet2/cil.pspnet2.R101/eval_all.py
e2620a6800f92f1d3190d77d02c16e18fd42762b
[ "MIT" ]
permissive
lxxue/TorchSeg
f8325b97b55d4da7ea4a25ea812b122ab9ce661c
da5eae8c2c3734924a5178cf3c9e4dafb9c6c16f
refs/heads/master
2022-11-27T11:39:11.302225
2020-08-01T07:57:56
2020-08-01T07:57:56
274,429,653
0
2
MIT
2020-08-01T13:39:10
2020-06-23T14:37:22
Python
UTF-8
Python
false
false
360
py
import os import numpy as np epoch_nums = np.arange(200, 4100, step=200) print(epoch_nums) for e in epoch_nums: print("-----------------------------------------------------------------") print(e) os.system("python eval.py -e {} -p results_eval/epoch{}/".format(e, e)) print("-----------------------------------------------------------------")
4d9688118734773d4a204a1b601cc1ffda2a036e
dc987f2153345dfb383de804c112aa54be2199d7
/Code buoi hoc/Buoi 7/bt.py
a3530b4d9ecee6edfc2ad920e627c86ecbde8793
[]
no_license
lva123/python101-sinh-vien
c2d204a7dec26bcff2f212372468bb658beed0ac
cdc177f30404cd2175b11e6ad0e9877df7252497
refs/heads/main
2023-09-03T00:51:35.641239
2021-11-20T03:19:10
2021-11-20T03:19:10
429,988,208
0
0
null
2021-11-20T02:23:03
2021-11-20T02:23:02
null
UTF-8
Python
false
false
351
py
#!/bin/python3 import math import os import random import re import sys if __name__ == '__main__': n = int(input().strip()) if n % 2 != 0: print("Weird") else: if 2 <= n <= 5: print("Not weird") elif 6 <= n <= 20: print("Weird") elif 20 < n: print("Not weird")
532a4c353a1544432b498ed028eb0f65b6b9fc4d
e2860eb874db045fb8d0279566a935af907e5bdf
/ml/ml07_1_boston.py
b245a54bef04d78667e33b52f33e63088f0a8179
[]
no_license
MinseokCHAE/Bitcamp2_new
dda7990907cb136c2e709a345eec634dfdb6ac02
849adb5a330b621f1c681f0b5e92005d1281a44d
refs/heads/main
2023-08-31T03:28:18.068561
2021-10-05T00:48:52
2021-10-05T00:48:52
390,228,262
0
0
null
null
null
null
UTF-8
Python
false
false
1,847
py
import numpy as np import time from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, QuantileTransformer, OneHotEncoder from sklearn.datasets import load_boston from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Input, Conv1D, Flatten, MaxPooling1D, GlobalAveragePooling1D, Dropout from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.utils import to_categorical #1. data preprocessing boston = load_boston() x = boston.data y = boston.target x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=21) scaler = MinMaxScaler() scaler.fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test) from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import accuracy_score n_splits = 5 kfold = KFold(n_splits=n_splits, shuffle=True, random_state=21) parameter = [ {'n_estimators': [100,200]}, {'max_depth': [6, 8, 10, 12]}, {'min_samples_leaf': [3, 5, 7, 10]}, {'min_samples_split': [2, 3, 5, 10]}, {'n_jobs': [-1, 2, 4]} ] model = RandomForestRegressor() grid = GridSearchCV(model, parameter, cv=kfold) grid.fit(x_train, y_train) best_estimator = grid.best_estimator_ best_score = grid.best_score_ # y_pred = grid.predict(x_test) # acc_score = accuracy_score(y_test, y_pred) grid_score = grid.score(x_test, y_test) print('best parameter = ', best_estimator) print('best score = ', best_score) # print('acc score = ', acc_score) print('grid score = ', grid_score) # best parameter = RandomForestRegressor(min_samples_split=5) # best score = 0.830591307770115 # grid score = 0.8783616408326427
dbb6da85866332dca534ebaa601baddbff1949fb
059742e69e6842eea5fff25acc8329a08ea3eb86
/OauthDemo/bloggo/posts/urls.py
28d10dcf290be21c565343a95fdf034bcb0bbb13
[]
no_license
commanderchewbacca/Guides
856aa556fa263f32282b4607d64f1f02f98316d3
e07a5d71c556d46cebb54dc404dc73ce16057b7a
refs/heads/master
2020-03-11T00:22:07.701088
2018-04-15T02:08:32
2018-04-15T02:08:32
null
0
0
null
null
null
null
UTF-8
Python
false
false
380
py
from django.conf.urls import url from . import views app_name="posts" urlpatterns = [ url(r'^/$', views.index, name='index'), url(r'^about/$', views.about, name='about'), url(r'^details/(?P<pk>\d+)$', views.post_details, name='post_details'), url(r'^fitbitinfo', views.fitbit_info, name='registerfitbit'), url(r'^getactivitydata/', views.fitbit_callback), ]
78b580625bf05f9a4e3f617d22606d8993dc1471
07c27cbba56ffb1f2e391d2aaceefba039f68667
/bin/svg.py
0f7e900113122f37f95eb346261053c090c4287c
[]
no_license
rheiland/tool4nanobio
beb3914ad23638bb856454832c83ab3c6535ae86
e872ae02e7df784bcde0481b30c6d97a0ae3a517
refs/heads/master
2020-04-25T17:11:52.995649
2020-01-16T11:32:47
2020-01-16T11:32:47
172,938,698
3
3
null
null
null
null
UTF-8
Python
false
false
13,242
py
# SVG (Cell Plot) Tab import os from ipywidgets import Layout, Label, Text, Checkbox, Button, HBox, VBox, Box, \ FloatText, BoundedIntText, BoundedFloatText, HTMLMath, Dropdown, interactive, Output from collections import deque import xml.etree.ElementTree as ET import matplotlib.pyplot as plt import matplotlib.colors as mplc import numpy as np import zipfile import glob import platform from debug import debug_view hublib_flag = True if platform.system() != 'Windows': try: # print("Trying to import hublib.ui") from hublib.ui import Download except: hublib_flag = False else: hublib_flag = False class SVGTab(object): def __init__(self): # tab_height = '520px' # tab_layout = Layout(width='900px', # border='2px solid black', # height=tab_height, overflow_y='scroll') self.output_dir = '.' constWidth = '180px' # self.fig = plt.figure(figsize=(6, 6)) # self.fig = plt.figure(figsize=(7, 7)) max_frames = 1 self.svg_plot = interactive(self.plot_svg, frame=(0, max_frames), continuous_update=False) plot_size = '500px' plot_size = '700px' self.svg_plot.layout.width = plot_size self.svg_plot.layout.height = plot_size self.use_defaults = True self.show_nucleus = 0 # 0->False, 1->True in Checkbox! self.show_edge = 1 # 0->False, 1->True in Checkbox! self.scale_radius = 1.0 self.axes_min = 0.0 self.axes_max = 2000 # hmm, this can change (TODO?) self.max_frames = BoundedIntText( min=0, max=99999, value=max_frames, description='Max', layout=Layout(width='160px'), # layout=Layout(flex='1 1 auto', width='auto'), #Layout(width='160px'), ) self.max_frames.observe(self.update_max_frames) self.show_nucleus_checkbox= Checkbox( description='nucleus', value=False, disabled=False, layout=Layout(width=constWidth), # layout=Layout(flex='1 1 auto', width='auto'), #Layout(width='160px'), ) self.show_nucleus_checkbox.observe(self.show_nucleus_cb) self.show_edge_checkbox= Checkbox( description='edge', value=True, disabled=False, layout=Layout(width=constWidth), # layout=Layout(flex='1 1 auto', width='auto'), #Layout(width='160px'), ) self.show_edge_checkbox.observe(self.show_edge_cb) # row1 = HBox([Label('(select slider: drag or left/right arrows)'), # self.max_frames, VBox([self.show_nucleus_checkbox, self.show_edge_checkbox])]) # self.max_frames, self.show_nucleus_checkbox], layout=Layout(width='500px')) # self.tab = VBox([row1,self.svg_plot], layout=tab_layout) items_auto = [Label('select slider: drag or left/right arrows'), self.max_frames, self.show_nucleus_checkbox, self.show_edge_checkbox, ] #row1 = HBox([Label('(select slider: drag or left/right arrows)'), # max_frames, show_nucleus_checkbox, show_edge_checkbox], # layout=Layout(width='800px')) box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='70%') row1 = Box(children=items_auto, layout=box_layout) if (hublib_flag): self.download_button = Download('svg.zip', style='warning', icon='cloud-download', tooltip='You need to allow pop-ups in your browser', cb=self.download_cb) download_row = HBox([self.download_button.w, Label("Download all cell plots (browser must allow pop-ups).")]) # self.tab = VBox([row1, self.svg_plot, self.download_button.w], layout=tab_layout) # self.tab = VBox([row1, self.svg_plot, self.download_button.w]) self.tab = VBox([row1, self.svg_plot, download_row]) else: self.tab = VBox([row1, self.svg_plot]) def update(self, rdir=''): # with debug_view: # print("SVG: update rdir=", rdir) if rdir: self.output_dir = rdir all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snapshot*.svg'))) if len(all_files) > 0: last_file = all_files[-1] self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "snapshot%08d.svg" # with debug_view: # print("SVG: added %s files" % len(all_files)) def download_cb(self): file_str = os.path.join(self.output_dir, '*.svg') # print('zip up all ',file_str) with zipfile.ZipFile('svg.zip', 'w') as myzip: for f in glob.glob(file_str): myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive def show_nucleus_cb(self, b): global current_frame if (self.show_nucleus_checkbox.value): self.show_nucleus = 1 else: self.show_nucleus = 0 # self.plot_svg(self,current_frame) self.svg_plot.update() def show_edge_cb(self, b): if (self.show_edge_checkbox.value): self.show_edge = 1 else: self.show_edge = 0 self.svg_plot.update() def update_max_frames(self,_b): self.svg_plot.children[0].max = self.max_frames.value def plot_svg(self, frame): # global current_idx, axes_max global current_frame current_frame = frame fname = "snapshot%08d.svg" % frame full_fname = os.path.join(self.output_dir, fname) # with debug_view: # print("plot_svg:", full_fname) if not os.path.isfile(full_fname): print("Once output files are generated, click the slider.") return xlist = deque() ylist = deque() rlist = deque() rgb_list = deque() # print('\n---- ' + fname + ':') # tree = ET.parse(fname) tree = ET.parse(full_fname) root = tree.getroot() # print('--- root.tag ---') # print(root.tag) # print('--- root.attrib ---') # print(root.attrib) # print('--- child.tag, child.attrib ---') numChildren = 0 for child in root: # print(child.tag, child.attrib) # print("keys=",child.attrib.keys()) if self.use_defaults and ('width' in child.attrib.keys()): self.axes_max = float(child.attrib['width']) # print("debug> found width --> axes_max =", axes_max) if child.text and "Current time" in child.text: svals = child.text.split() # title_str = "(" + str(current_idx) + ") Current time: " + svals[2] + "d, " + svals[4] + "h, " + svals[7] + "m" # title_str = "Current time: " + svals[2] + "d, " + svals[4] + "h, " + svals[7] + "m" title_str = svals[2] + "d, " + svals[4] + "h, " + svals[7] + "m" # print("width ",child.attrib['width']) # print('attrib=',child.attrib) # if (child.attrib['id'] == 'tissue'): if ('id' in child.attrib.keys()): # print('-------- found tissue!!') tissue_parent = child break # print('------ search tissue') cells_parent = None for child in tissue_parent: # print('attrib=',child.attrib) if (child.attrib['id'] == 'cells'): # print('-------- found cells, setting cells_parent') cells_parent = child break numChildren += 1 num_cells = 0 # print('------ search cells') for child in cells_parent: # print(child.tag, child.attrib) # print('attrib=',child.attrib) for circle in child: # two circles in each child: outer + nucleus # circle.attrib={'cx': '1085.59','cy': '1225.24','fill': 'rgb(159,159,96)','r': '6.67717','stroke': 'rgb(159,159,96)','stroke-width': '0.5'} # print(' --- cx,cy=',circle.attrib['cx'],circle.attrib['cy']) xval = float(circle.attrib['cx']) s = circle.attrib['fill'] # print("s=",s) # print("type(s)=",type(s)) if (s[0:3] == "rgb"): # if an rgb string, e.g. "rgb(175,175,80)" rgb = list(map(int, s[4:-1].split(","))) rgb[:] = [x / 255. for x in rgb] else: # otherwise, must be a color name rgb_tuple = mplc.to_rgb(mplc.cnames[s]) # a tuple rgb = [x for x in rgb_tuple] # test for bogus x,y locations (rwh TODO: use max of domain?) too_large_val = 10000. if (np.fabs(xval) > too_large_val): print("bogus xval=", xval) break yval = float(circle.attrib['cy']) if (np.fabs(yval) > too_large_val): print("bogus xval=", xval) break rval = float(circle.attrib['r']) # if (rgb[0] > rgb[1]): # print(num_cells,rgb, rval) xlist.append(xval) ylist.append(yval) rlist.append(rval) rgb_list.append(rgb) # For .svg files with cells that *have* a nucleus, there will be a 2nd if (self.show_nucleus == 0): #if (not self.show_nucleus): break num_cells += 1 # if num_cells > 3: # for debugging # print(fname,': num_cells= ',num_cells," --- debug exit.") # sys.exit(1) # break # print(fname,': num_cells= ',num_cells) xvals = np.array(xlist) yvals = np.array(ylist) rvals = np.array(rlist) rgbs = np.array(rgb_list) # print("xvals[0:5]=",xvals[0:5]) # print("rvals[0:5]=",rvals[0:5]) # print("rvals.min, max=",rvals.min(),rvals.max()) # rwh - is this where I change size of render window?? (YES - yipeee!) # plt.figure(figsize=(6, 6)) # plt.cla() title_str += " (" + str(num_cells) + " agents)" # plt.title(title_str) # plt.xlim(axes_min,axes_max) # plt.ylim(axes_min,axes_max) # plt.scatter(xvals,yvals, s=rvals*scale_radius, c=rgbs) # self.fig = plt.figure(figsize=(6, 6)) self.fig = plt.figure(figsize=(7, 7)) # axx = plt.axes([0, 0.05, 0.9, 0.9]) # left, bottom, width, height # axx = fig.gca() # print('fig.dpi=',fig.dpi) # = 72 # im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20]) # ax.xlim(axes_min,axes_max) # ax.ylim(axes_min,axes_max) # convert radii to radii in pixels # ax2 = fig.gca() ax2 = self.fig.gca() N = len(xvals) rr_pix = (ax2.transData.transform(np.vstack([rvals, rvals]).T) - ax2.transData.transform(np.vstack([np.zeros(N), np.zeros(N)]).T)) rpix, _ = rr_pix.T markers_size = (144. * rpix / self.fig.dpi)**2 # = (2*rpix / fig.dpi * 72)**2 # markers_size = (2*rpix / fig.dpi * 72)**2 markers_size = markers_size/4000000. # print('max=',markers_size.max()) # ax.scatter(xvals,yvals, s=rvals*self.scale_radius, c=rgbs) # axx.scatter(xvals,yvals, s=markers_size, c=rgbs) #rwh - temp fix - Ah, error only occurs when "edges" is toggled on if (self.show_edge): try: plt.scatter(xvals,yvals, s=markers_size, c=rgbs, edgecolor='black', linewidth=0.5) except (ValueError): pass else: plt.scatter(xvals,yvals, s=markers_size, c=rgbs) plt.xlim(self.axes_min, self.axes_max) plt.ylim(self.axes_min, self.axes_max) # ax.grid(False) # axx.set_title(title_str) plt.title(title_str) # video-style widget - perhaps for future use # svg_play = widgets.Play( # interval=1, # value=50, # min=0, # max=100, # step=1, # description="Press play", # disabled=False, # ) # def svg_slider_change(change): # print('svg_slider_change: type(change)=',type(change),change.new) # plot_svg(change.new) #svg_play # svg_slider = widgets.IntSlider() # svg_slider.observe(svg_slider_change, names='value') # widgets.jslink((svg_play, 'value'), (svg_slider,'value')) # (svg_slider, 'value'), (plot_svg, 'value')) # svg_slider = widgets.IntSlider() # widgets.jslink((play, 'value'), (slider, 'value')) # widgets.HBox([svg_play, svg_slider]) # Using the following generates a new mpl plot; it doesn't use the existing plot! #svg_anim = widgets.HBox([svg_play, svg_slider]) #svg_tab = widgets.VBox([svg_dir, svg_plot, svg_anim], layout=tab_layout) #svg_tab = widgets.VBox([svg_dir, svg_anim], layout=tab_layout) #---------------------
8332e30937e9e1b5e5122db696b4431f00c38374
6223dc2e5de7921696cb34fb62142fd4a4efe361
/.metadata/.plugins/org.eclipse.core.resources/.history/51/40e6c6177739001412b5c17ef71e72e3
6db0fb731998676d3ddb05dbce7d5249db6922c6
[]
no_license
Mushirahmed/python_workspace
5ef477b2688e8c25b1372f546752501ee53d93e5
46e2ed783b17450aba29e4e2df7b656522b2b03b
refs/heads/master
2021-03-12T19:24:50.598982
2015-05-25T10:23:54
2015-05-25T10:23:54
24,671,376
0
1
null
2015-02-06T09:27:40
2014-10-01T08:40:33
Python
UTF-8
Python
false
false
1,442
#!/usr/bin/env python import wx def slider(parent, min, max, callback): """ Return a wx.Slider object. @param min: minimum slider value @type min: float @param max: maximum slider value @type max: float @param callback: function of one arg invoked when slider moves. @rtype: wx.Slider """ new_id = wx.NewId() s = wx.Slider(parent, new_id, (max+min)/2, min, max, wx.DefaultPosition, wx.Size(250,-1), wx.SL_HORIZONTAL | wx.SL_LABELS) wx.EVT_COMMAND_SCROLL(parent, new_id, lambda evt : callback(evt.GetInt())) return s # ---------------------------------------------------------------- # Demo app # ---------------------------------------------------------------- if __name__ == '__main__': from gnuradio.wxgui import stdgui2 class demo_graph(stdgui.gui_flow_graph): def __init__(self, frame, panel, vbox, argv): stdgui.gui_flow_graph.__init__ (self, frame, panel, vbox, argv) vbox.Add(slider(panel, 23, 47, self.my_callback1), 1, wx.ALIGN_CENTER) vbox.Add(slider(panel, -100, 100, self.my_callback2), 1, wx.ALIGN_CENTER) def my_callback1(self, val): print "cb1 = ", val def my_callback2(self, val): print "cb2 = ", val def main (): app = stdgui.stdapp (demo_graph, "Slider Demo") app.MainLoop () main ()
a161266ee413fb7f3bb8b94466c9d03314de7ee9
633b695a03e789f6aa644c7bec7280367a9252a8
/lmfit_gallery/documentation/fitting_withreport.py
412f4c07159b2a6fb06c2af10b0d239b29d68e3f
[]
no_license
tnakaicode/PlotGallery
3d831d3245a4a51e87f48bd2053b5ef82cf66b87
5c01e5d6e2425dbd17593cb5ecc973982f491732
refs/heads/master
2023-08-16T22:54:38.416509
2023-08-03T04:23:21
2023-08-03T04:23:21
238,610,688
5
2
null
null
null
null
UTF-8
Python
false
false
1,206
py
""" doc_fitting_withreport.py ========================= """ # <examples/doc_fitting_withreport.py> from numpy import exp, linspace, pi, random, sign, sin from lmfit import Parameters, fit_report, minimize p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.46) p_true.add('shift', value=0.123) p_true.add('decay', value=0.032) def residual(pars, x, data=None): """Model a decaying sine wave and subtract data.""" vals = pars.valuesdict() amp = vals['amp'] per = vals['period'] shift = vals['shift'] decay = vals['decay'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = amp * sin(shift + x/per) * exp(-x*x*decay*decay) if data is None: return model return model - data random.seed(0) x = linspace(0.0, 250., 1001) noise = random.normal(scale=0.7215, size=x.size) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0) fit_params.add('period', value=2) fit_params.add('shift', value=0.0) fit_params.add('decay', value=0.02) out = minimize(residual, fit_params, args=(x,), kws={'data': data}) print(fit_report(out)) # <end examples/doc_fitting_withreport.py>
2472d991874e1382b2a57fe70f66ab353ff64c6b
1ec1d20f16a9bd9d51b8a7be9b7fa5fcec6a4f02
/main.py
79809bf14ee1eb4fa881034b745157b21fdc73eb
[]
no_license
billyjia1/UnfairBulletHell
b676f66a3c033e765f832e57a0f1bb3d8acbc394
8d623113ec1491e949164599e9db77217bb8c35d
refs/heads/main
2023-08-15T00:09:45.935440
2021-09-27T21:39:09
2021-09-27T21:39:09
409,778,226
0
0
null
null
null
null
UTF-8
Python
false
false
10,077
py
import pygame import os import time import random pygame.font.init() WIDTH, HEIGHT = 750, 750 WIN = pygame.display.set_mode((WIDTH, HEIGHT)) pygame.display.set_caption("Space Shooter Tutorial") # Load images RED_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_red_small.png")) GREEN_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_green_small.png")) BLUE_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_blue_small.png")) # Player player YELLOW_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_yellow.png")) # Lasers RED_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_red.png")) GREEN_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_green.png")) BLUE_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_blue.png")) YELLOW_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_yellow.png")) # Background BG = pygame.transform.scale(pygame.image.load(os.path.join("assets", "background-black.png")), (WIDTH, HEIGHT)) class Laser: def __init__(self, x, y, img): self.x = x self.y = y self.img = img self.mask = pygame.mask.from_surface(self.img) def draw(self, window): window.blit(self.img, (self.x, self.y)) def move(self, vel): self.y += vel self.x += random.randrange(-20,20, 5) def move_friendly(self, vel): self.y += vel def off_screen(self, height): return not(self.y <= height and self.y >= 0) def collision(self, obj): return collide(self, obj) class Ship: COOLDOWN = 30 def __init__(self, x, y, health=100): self.x = x self.y = y self.health = health self.ship_img = None self.laser_img = None self.lasers = [] self.cool_down_counter = 0 def draw(self, window): window.blit(self.ship_img, (self.x, self.y)) for laser in self.lasers: laser.draw(window) def move_lasers(self, vel, obj): self.cooldown() for laser in self.lasers: laser.move(vel) if laser.off_screen(HEIGHT): self.lasers.remove(laser) elif laser.collision(obj): obj.health -= 10 self.lasers.remove(laser) def cooldown(self): if self.cool_down_counter >= self.COOLDOWN: self.cool_down_counter = 0 elif self.cool_down_counter > 0: self.cool_down_counter += 1 def shoot(self): if self.cool_down_counter == 0: laser = Laser(self.x, self.y, self.laser_img) self.lasers.append(laser) self.cool_down_counter = 1 def get_width(self): return self.ship_img.get_width() def get_height(self): return self.ship_img.get_height() class Player(Ship): def __init__(self, x, y, health=1000): super().__init__(x, y, health) self.ship_img = YELLOW_SPACE_SHIP self.laser_img = YELLOW_LASER self.mask = pygame.mask.from_surface(self.ship_img) self.max_health = health def move_lasers(self, vel, objs): self.cooldown() for laser in self.lasers: laser.move_friendly(vel) if laser.off_screen(HEIGHT): self.lasers.remove(laser) else: for obj in objs: if laser.collision(obj): obj.health -= 10 if obj.health <= 0: objs.remove(obj) if laser in self.lasers: self.lasers.remove(laser) def draw(self, window): super().draw(window) self.healthbar(window) def healthbar(self, window): pygame.draw.rect(window, (255,0,0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10)) pygame.draw.rect(window, (0,255,0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width() * (self.health/self.max_health), 10)) # class Enemy(Ship): # COLOR_MAP = { # "red": (RED_SPACE_SHIP, RED_LASER), # "green": (GREEN_SPACE_SHIP, GREEN_LASER), # "blue": (BLUE_SPACE_SHIP, BLUE_LASER) # } # def __init__(self, x, y, color, health=100): # super().__init__(x, y, health) # self.ship_img, self.laser_img = self.COLOR_MAP[color] # self.mask = pygame.mask.from_surface(self.ship_img) # def move(self, vel): # self.y += vel # def shoot(self): # if self.cool_down_counter == 0: # laser = Laser(self.x-20, self.y, self.laser_img) # self.lasers.append(laser) # self.cool_down_counter = 1 class Red_Enemy(Ship): def __init__(self, x, y, health=100): super().__init__(x, y, health=health) self.ship_img, self.laser_img = RED_SPACE_SHIP, RED_LASER self.mask = pygame.mask.from_surface(self.ship_img) def move(self, vel): self.y += vel + 5 self.x += vel def shoot(self): if self.cool_down_counter == 0: laser = Laser(self.x - 100, self.y, self.laser_img) self.lasers.append(laser) self.cool_down_counter = 1 class Blue_Enemy(Ship): def __init__(self, x, y, health=100): super().__init__(x, y, health=health) self.ship_img, self.laser_img = BLUE_SPACE_SHIP, BLUE_LASER self.mask = pygame.mask.from_surface(self.ship_img) def move(self, vel): self.y += vel + 2 self.x += vel def shoot(self): if self.cool_down_counter == 0: laser = Laser(self.x - 100, self.y, self.laser_img) self.lasers.append(laser) self.cool_down_counter = 1 class Green_Enemy(Ship): def __init__(self, x, y, health=100): super().__init__(x, y, health=health) self.ship_img, self.laser_img = GREEN_SPACE_SHIP, GREEN_LASER self.mask = pygame.mask.from_surface(self.ship_img) def move(self, vel): self.y += vel def shoot(self): if self.cool_down_counter == 0: laser = Laser(self.x - 50, self.y, self.laser_img) self.lasers.append(laser) self.cool_down_counter = 1 def collide(obj1, obj2): offset_x = obj2.x - obj1.x offset_y = obj2.y - obj1.y return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None def main(): run = True FPS = 60 level = 0 lives = 5 main_font = pygame.font.SysFont("comicsans", 50) lost_font = pygame.font.SysFont("comicsans", 60) enemies = [] wave_length = 5 enemy_vel = 1 player_vel = 5 laser_vel = 5 player = Player(300, 630) clock = pygame.time.Clock() lost = False lost_count = 0 def redraw_window(): WIN.blit(BG, (0,0)) # draw text lives_label = main_font.render(f"Lives: {lives}", 1, (255,255,255)) level_label = main_font.render(f"Level: {level}", 1, (255,255,255)) WIN.blit(lives_label, (10, 10)) WIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10)) for enemy in enemies: enemy.draw(WIN) player.draw(WIN) if lost: lost_label = lost_font.render("You Lost!!", 1, (255,255,255)) WIN.blit(lost_label, (WIDTH/2 - lost_label.get_width()/2, 350)) pygame.display.update() while run: clock.tick(FPS) redraw_window() if lives <= 0 or player.health <= 0: lost = True lost_count += 1 if lost: if lost_count > FPS * 3: run = False else: continue if len(enemies) == 0: level += 1 wave_length += 5 for i in range(wave_length): enemy_r = Red_Enemy(random.randrange(50, WIDTH-100), random.randrange(-1500, -100)) enemy_b = Blue_Enemy(random.randrange(50, WIDTH-100), random.randrange(-1500, -100)) enemy_g = Green_Enemy(random.randrange(50, WIDTH-100), random.randrange(-1500, -100)) enemy_list = (enemy_b, enemy_r, enemy_g) enemies.append(random.choice(enemy_list)) for event in pygame.event.get(): if event.type == pygame.QUIT: quit() keys = pygame.key.get_pressed() if keys[pygame.K_a] and player.x - player_vel > 0: # left player.x -= player_vel if keys[pygame.K_d] and player.x + player_vel + player.get_width() < WIDTH: # right player.x += player_vel if keys[pygame.K_w] and player.y - player_vel > 0: # up player.y -= player_vel if keys[pygame.K_s] and player.y + player_vel + player.get_height() + 15 < HEIGHT: # down player.y += player_vel if keys[pygame.K_SPACE]: player.shoot() for enemy in enemies[:]: enemy.move(enemy_vel) enemy.move_lasers(laser_vel, player) if random.randrange(0, 120) == 1: enemy.shoot() if collide(player, enemy): enemy.health -= 50 player.health -= 10 if enemy.health <= 0: enemies.remove(enemy) elif enemy.y + enemy.get_height() > HEIGHT: lives -= 1 enemies.remove(enemy) player.move_lasers(-laser_vel, enemies) def main_menu(): title_font = pygame.font.SysFont("comicsans", 70) run = True while run: WIN.blit(BG, (0,0)) title_label = title_font.render("Press the mouse to begin...", 1, (255,255,255)) WIN.blit(title_label, (WIDTH/2 - title_label.get_width()/2, 350)) pygame.display.update() for event in pygame.event.get(): if event.type == pygame.QUIT: run = False if event.type == pygame.MOUSEBUTTONDOWN: main() pygame.quit() main_menu()
2d96b9e44c4d5cf68a3afe6db0a8a384018a4a30
0dafa1dff4429bb8893445d05202061ff4f9f710
/plotting_scripts/plot_sigloss_modeloop.py
1aba5bcdf53def99fce4f45924bf9b849ed55c96
[]
no_license
carinacheng/PAPERMethods_paper
8d879291eb4d30e3a0d98286fbab4dd42bba3ef7
9b657e0274842477643407db89916781ac948f80
refs/heads/master
2021-07-23T21:59:29.331251
2018-11-01T23:25:08
2018-11-01T23:25:08
95,481,809
0
0
null
null
null
null
UTF-8
Python
false
false
6,724
py
#! /usr/bin/env python import numpy as n import matplotlib.pyplot as p from scipy.optimize import curve_fit # Reads in power spectrum results from projecting out 0,1,2... modes # Plots power spectrum results before and after signal loss correction as a function of modes removed if True: # eigenmodes set to 1 path = 'plot_sigloss_modeloop_mode' startmode=0 nmodes=22 deltamode=1 xlabel='Number of modes down-weighted using inverse covariance weighting' f1 = '/project_' f2 = '_modes' loop = n.arange(startmode,nmodes,deltamode) if True: # added identity parameters in LOG space path_add = 'plot_sigloss_modeloop_add' startmode_add=-4 endmode_add=0 nmodes_add=20 #20000 xlabel_add='Strength of identity added: $\mathbf{\widehat{C}}$ + $\gamma$Tr$(\mathbf{\widehat{C}})\mathbf{I}$' f1_add = '/add_' f2_add = '_identity' loop_add = n.logspace(startmode_add,endmode_add,nmodes_add) # Read files sense=14419782.9029*2 #/ n.sqrt(2) # XXX from plotting one of the "project_#_modes" directories (divide by sqrt(2) for folded case) #sense=14419782.9029*2 # unfolded version PS_i_up = [] PS_f_up = [] PS_i = [] PS_f = [] k_ind = -3 # Read in range of projected eigenmodes for mode_num in loop: filename = path + f1 + str(mode_num) + f2 print 'Reading', filename print mode_num f = n.load(filename+'/pspec_final_sep0,1_full.npz') #kpl = f['kpl_fold'] # folded version kpl = f['kpl'] # unfolded version k = kpl[k_ind] #PS_i_up.append(2*n.array(f['pCv_fold_err_old'])[k_ind]) # folded version #PS_f_up.append(2*n.array(f['pCv_fold_err'])[k_ind]) #PS_i.append(n.abs(f['pCv_fold_old'])[k_ind]) #PS_f.append(n.abs(f['pCv_fold'])[k_ind]) PS_i_up.append(2*n.array(f['pCv_err_old'])[k_ind]) # unfolded version PS_f_up.append(2*n.array(f['pCv_err'])[k_ind]) PS_i.append(n.abs(f['pCv_old'])[k_ind]) PS_f.append(n.abs(f['pCv'])[k_ind]) # Read in added identity case as a second curve being plotted PS_i_up_add = [] PS_f_up_add = [] PS_i_add = [] PS_f_add = [] for mode_num in loop_add: filename = path_add + f1_add + str(mode_num) + f2_add print 'Reading', filename print mode_num f = n.load(filename + '/pspec_final_sep0,1_full.npz') #kpl = f['kpl_fold'] # folded version kpl = f['kpl'] k = kpl[k_ind] #PS_i_up_add.append(2*n.array(f['pCv_fold_err_old'])[k_ind]) # folded version #PS_f_up_add.append(2*n.array(f['pCv_fold_err'])[k_ind]) #PS_i_add.append(n.abs(f['pCv_fold_old'])[k_ind]) #PS_f_add.append(n.abs(f['pCv_fold'])[k_ind]) PS_i_up_add.append(2*n.array(f['pCv_err_old'])[k_ind]) # unfolded version PS_f_up_add.append(2*n.array(f['pCv_err'])[k_ind]) PS_i_add.append(n.abs(f['pCv_old'])[k_ind]) PS_f_add.append(n.abs(f['pCv'])[k_ind]) """ # Theory from Switzer et al. - first term only fixmode = 3 # start fix at 3rd mode since first few modes are dominated by systematics xs = n.arange(fixmode,nmodes,1.) # number of modes removed err_theory_firstterm = 1./(1 - xs/nmodes) normalization = PS_f_up[fixmode]/err_theory_firstterm[0] err_theory_firstterm = err_theory_firstterm*normalization # Fit N_ind (number of independent modes) in second term def func(mode_num, N_ind): fit = 1./((1-mode_num/nmodes)*(1-mode_num/N_ind))*PS_f_up[fixmode] normalization = PS_f_up[fixmode]/fit[0] return fit*normalization N_ind,_ = curve_fit(func, xs, PS_f_up[fixmode:], bounds=(0,1000)) err_theory_fit = 1./((1 - xs/nmodes)*(1 - xs/N_ind)) normalization = PS_f_up[fixmode]/err_theory_fit[0] err_theory_fit = err_theory_fit*normalization print "Fit for number of independent modes =", N_ind # Force fit for full equation if True: N_ind = 15 err_theory_fit = 1./((1 - xs/nmodes)*(1 - xs/N_ind)) normalization = PS_f_up[fixmode]/err_theory_fit[0] err_theory_fit = err_theory_fit*normalization """ # Best PS (Identity Mult) f = n.load('plot_sigloss_modeloop_identitymult.npz') #ps_mult = n.abs(f['pCv'][k_ind]) + 2*f['pCv_err'][k_ind] # point + 2err #ps_mult = 2*f['pCv_fold_err'][k_ind] # 2sigma upper limit ps_mult = 2*f['pCv_err'][k_ind] # unfolded case # Plot p.figure(figsize=(8,10)) p.subplot(211) # plot before/after for # eigenmodes down-weighted p.plot(loop, n.array(PS_i) + n.array(PS_i_up), color='red', linestyle='--', linewidth=2, label='Pre-signal loss estimation') p.plot(loop, n.array(PS_f_up), 'r-', linewidth=2, label='Post-signal loss estimation') p.xlim(loop[0], loop[-1]) # plot unweighted #p.axhline(f['pIv_old'][k_ind]+2*f['pIv_err_old'][k_ind],color='b',linestyle='-',linewidth=2) #p.axhline(2*f['pIv_fold_err'][k_ind],color='b',linestyle='-',linewidth=2) p.axhline(2*f['pIv_err'][k_ind],color='b',linestyle='-',linewidth=2) # plot inverse variance p.axhline(ps_mult,color='k',linestyle='-',linewidth=2) # plot analytic p.axhline(sense,color='g',linestyle='-',linewidth=2) p.xlabel(xlabel,fontsize=14) p.ylabel('$P(k)$ [mK$^{2}$($h^{-1}$ Mpc)$^{3}$]',fontsize=16) p.ylim(1e5,1e11) p.legend(prop={'size':12}, loc=2, numpoints=1) p.tick_params(axis='both', which='major', labelsize=12) p.yscale('log') p.grid() p.title('k = ' +str(round(k,3)) + ' $h$ Mpc$^{-1}$') p.subplot(212) # plot before/after for added identity p.plot(loop_add, n.array(PS_i_add) + n.array(PS_i_up_add), color='red', linewidth=2, linestyle='--', label='Pre-signal loss estimation') p.plot(loop_add, n.array(PS_f_up_add), color='r', linewidth=2, linestyle='-', label='Post-signal loss estimation') p.xlim(loop_add[0], loop_add[-1]) p.gca().invert_xaxis() # plot unweighted #p.axhline(f['pIv_old'][k_ind]+2*f['pIv_err_old'][k_ind],color='b',linestyle='-',linewidth=2,label='Uniform weighting') #p.axhline(2*f['pIv_fold_err'][k_ind],color='b',linestyle='-',linewidth=2,label='Uniform weighting') p.axhline(2*f['pIv_err'][k_ind],color='b',linestyle='-',linewidth=2,label='Uniform weighting') # plot inverse variance p.axhline(ps_mult,color='k',linestyle='-',linewidth=2,label='$\hat{C} = \hat{C} \circ I$') # plot analytic p.axhline(sense,color='g',linestyle='-',linewidth=2,label='Analytical $2\sigma$ Error') # plot theory #p.plot(n.arange(fixmode,nmodes,1), err_theory_firstterm, 'b--', label='Theory from Switzer et al., only frequency modes') #p.plot(n.arange(fixmode,nmodes,1), err_theory_fit, 'b-', label='Theory from Switzer et al., both frequency and time modes') p.xlabel(xlabel_add,fontsize=14) p.ylabel('$P(k)$ [mK$^{2}$($h^{-1}$ Mpc)$^{3}$]',fontsize=16) p.legend(prop={'size':12}, loc=2, numpoints=1, ncol=2) p.tick_params(axis='both', which='major', labelsize=12) p.yscale('log') p.xscale('log') p.ylim(1e5,1e11) p.grid() p.subplots_adjust(hspace=0.3) #p.tight_layout() p.show()
7c851f6cf3c45e4effa984c2a42fc8551f5c800e
a40950330ea44c2721f35aeeab8f3a0a11846b68
/INTERACTIONS_V1/INTERACTION2/AppSBC/UI/UI.py
d3fdd88cbfb7142e29190f9222894fe2a9977d87
[]
no_license
huang443765159/kai
7726bcad4e204629edb453aeabcc97242af7132b
0d66ae4da5a6973e24e1e512fd0df32335e710c5
refs/heads/master
2023-03-06T23:13:59.600011
2023-03-04T06:14:12
2023-03-04T06:14:12
233,500,005
3
1
null
null
null
null
UTF-8
Python
false
false
35,377
py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'UI.ui' # # Created by: PyQt5 UI code generator 5.15.0 # # WARNING: Any manual changes made to this file will be lost when pyuic5 is # run again. Do not edit this file unless you know what you are doing. from PyQt5 import QtCore, QtGui, QtWidgets class Ui_SBC(object): def setupUi(self, SBC): SBC.setObjectName("SBC") SBC.resize(395, 602) self.SBC_2 = QtWidgets.QWidget(SBC) self.SBC_2.setObjectName("SBC_2") self.tab_device = QtWidgets.QTabWidget(self.SBC_2) self.tab_device.setGeometry(QtCore.QRect(10, 20, 371, 91)) self.tab_device.setTabPosition(QtWidgets.QTabWidget.West) self.tab_device.setTabShape(QtWidgets.QTabWidget.Triangular) self.tab_device.setElideMode(QtCore.Qt.ElideLeft) self.tab_device.setObjectName("tab_device") self.device = QtWidgets.QWidget() self.device.setObjectName("device") self.label_pump_station = QtWidgets.QLabel(self.device) self.label_pump_station.setGeometry(QtCore.QRect(0, 20, 91, 14)) self.label_pump_station.setMinimumSize(QtCore.QSize(0, 14)) self.label_pump_station.setMaximumSize(QtCore.QSize(16777215, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_pump_station.setFont(font) self.label_pump_station.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_pump_station.setObjectName("label_pump_station") self.ip_local = QtWidgets.QLabel(self.device) self.ip_local.setGeometry(QtCore.QRect(180, 20, 150, 14)) self.ip_local.setMinimumSize(QtCore.QSize(75, 14)) self.ip_local.setMaximumSize(QtCore.QSize(150, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.ip_local.setFont(font) self.ip_local.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.ip_local.setObjectName("ip_local") self.ip_nuc = QtWidgets.QLabel(self.device) self.ip_nuc.setGeometry(QtCore.QRect(180, 50, 160, 14)) self.ip_nuc.setMinimumSize(QtCore.QSize(160, 14)) self.ip_nuc.setMaximumSize(QtCore.QSize(170, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.ip_nuc.setFont(font) self.ip_nuc.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.ip_nuc.setObjectName("ip_nuc") self.led_pump_station = QtWidgets.QToolButton(self.device) self.led_pump_station.setGeometry(QtCore.QRect(100, 20, 50, 14)) self.led_pump_station.setMinimumSize(QtCore.QSize(50, 0)) self.led_pump_station.setMaximumSize(QtCore.QSize(50, 14)) font = QtGui.QFont() font.setPointSize(8) self.led_pump_station.setFont(font) self.led_pump_station.setToolTip("") self.led_pump_station.setToolTipDuration(-1) self.led_pump_station.setObjectName("led_pump_station") self.label_guides = QtWidgets.QLabel(self.device) self.label_guides.setGeometry(QtCore.QRect(0, 50, 91, 14)) self.label_guides.setMinimumSize(QtCore.QSize(0, 14)) self.label_guides.setMaximumSize(QtCore.QSize(16777215, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_guides.setFont(font) self.label_guides.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_guides.setObjectName("label_guides") self.led_guides = QtWidgets.QToolButton(self.device) self.led_guides.setGeometry(QtCore.QRect(100, 50, 50, 14)) self.led_guides.setMinimumSize(QtCore.QSize(50, 0)) self.led_guides.setMaximumSize(QtCore.QSize(50, 14)) font = QtGui.QFont() font.setPointSize(8) self.led_guides.setFont(font) self.led_guides.setToolTip("") self.led_guides.setToolTipDuration(-1) self.led_guides.setObjectName("led_guides") self.tab_device.addTab(self.device, "") self.tab_device_2 = QtWidgets.QTabWidget(self.SBC_2) self.tab_device_2.setGeometry(QtCore.QRect(10, 120, 371, 111)) self.tab_device_2.setTabPosition(QtWidgets.QTabWidget.West) self.tab_device_2.setTabShape(QtWidgets.QTabWidget.Triangular) self.tab_device_2.setElideMode(QtCore.Qt.ElideLeft) self.tab_device_2.setObjectName("tab_device_2") self.device_2 = QtWidgets.QWidget() self.device_2.setObjectName("device_2") self.gridLayoutWidget_4 = QtWidgets.QWidget(self.device_2) self.gridLayoutWidget_4.setGeometry(QtCore.QRect(-10, 20, 361, 40)) self.gridLayoutWidget_4.setObjectName("gridLayoutWidget_4") self.gridLayout_4 = QtWidgets.QGridLayout(self.gridLayoutWidget_4) self.gridLayout_4.setContentsMargins(0, 0, 0, 0) self.gridLayout_4.setObjectName("gridLayout_4") self.ui_stage_show = QtWidgets.QLineEdit(self.gridLayoutWidget_4) self.ui_stage_show.setMaximumSize(QtCore.QSize(250, 14)) font = QtGui.QFont() font.setPointSize(9) self.ui_stage_show.setFont(font) self.ui_stage_show.setObjectName("ui_stage_show") self.gridLayout_4.addWidget(self.ui_stage_show, 0, 1, 1, 1) self.label_stage_show = QtWidgets.QLabel(self.gridLayoutWidget_4) self.label_stage_show.setMinimumSize(QtCore.QSize(0, 14)) self.label_stage_show.setMaximumSize(QtCore.QSize(70, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_stage_show.setFont(font) self.label_stage_show.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_stage_show.setObjectName("label_stage_show") self.gridLayout_4.addWidget(self.label_stage_show, 0, 0, 1, 1) self.label_stage_show_btn = QtWidgets.QLabel(self.gridLayoutWidget_4) self.label_stage_show_btn.setMinimumSize(QtCore.QSize(0, 14)) self.label_stage_show_btn.setMaximumSize(QtCore.QSize(70, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_stage_show_btn.setFont(font) self.label_stage_show_btn.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_stage_show_btn.setObjectName("label_stage_show_btn") self.gridLayout_4.addWidget(self.label_stage_show_btn, 1, 0, 1, 1) self.btn_welcome = QtWidgets.QPushButton(self.device_2) self.btn_welcome.setGeometry(QtCore.QRect(10, 60, 80, 20)) self.btn_welcome.setMaximumSize(QtCore.QSize(80, 25)) font = QtGui.QFont() font.setPointSize(9) self.btn_welcome.setFont(font) self.btn_welcome.setObjectName("btn_welcome") self.btn_forward = QtWidgets.QPushButton(self.device_2) self.btn_forward.setGeometry(QtCore.QRect(120, 60, 80, 20)) self.btn_forward.setMaximumSize(QtCore.QSize(80, 25)) font = QtGui.QFont() font.setPointSize(9) self.btn_forward.setFont(font) self.btn_forward.setObjectName("btn_forward") self.btn_stop_forward = QtWidgets.QPushButton(self.device_2) self.btn_stop_forward.setGeometry(QtCore.QRect(230, 60, 80, 20)) self.btn_stop_forward.setMaximumSize(QtCore.QSize(80, 25)) font = QtGui.QFont() font.setPointSize(9) self.btn_stop_forward.setFont(font) self.btn_stop_forward.setObjectName("btn_stop_forward") self.btn_back_driving = QtWidgets.QPushButton(self.device_2) self.btn_back_driving.setGeometry(QtCore.QRect(10, 80, 80, 20)) self.btn_back_driving.setMaximumSize(QtCore.QSize(80, 25)) font = QtGui.QFont() font.setPointSize(9) self.btn_back_driving.setFont(font) self.btn_back_driving.setObjectName("btn_back_driving") self.btn_washing = QtWidgets.QPushButton(self.device_2) self.btn_washing.setGeometry(QtCore.QRect(120, 80, 80, 20)) self.btn_washing.setMaximumSize(QtCore.QSize(80, 25)) font = QtGui.QFont() font.setPointSize(9) self.btn_washing.setFont(font) self.btn_washing.setObjectName("btn_washing") self.btn_washing_end = QtWidgets.QPushButton(self.device_2) self.btn_washing_end.setGeometry(QtCore.QRect(230, 80, 80, 20)) self.btn_washing_end.setMaximumSize(QtCore.QSize(80, 25)) font = QtGui.QFont() font.setPointSize(9) self.btn_washing_end.setFont(font) self.btn_washing_end.setObjectName("btn_washing_end") self.gridLayoutWidget_2 = QtWidgets.QWidget(self.device_2) self.gridLayoutWidget_2.setGeometry(QtCore.QRect(0, 0, 341, 17)) self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2") self.gridLayout_2 = QtWidgets.QGridLayout(self.gridLayoutWidget_2) self.gridLayout_2.setContentsMargins(0, 0, 0, 0) self.gridLayout_2.setObjectName("gridLayout_2") self.ui_guides_data1 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.ui_guides_data1.setMaximumSize(QtCore.QSize(16777215, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_guides_data1.setFont(font) self.ui_guides_data1.setObjectName("ui_guides_data1") self.gridLayout_2.addWidget(self.ui_guides_data1, 0, 1, 1, 1) self.label_guides_2 = QtWidgets.QLabel(self.gridLayoutWidget_2) self.label_guides_2.setMinimumSize(QtCore.QSize(0, 14)) self.label_guides_2.setMaximumSize(QtCore.QSize(16777215, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_guides_2.setFont(font) self.label_guides_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_guides_2.setObjectName("label_guides_2") self.gridLayout_2.addWidget(self.label_guides_2, 0, 0, 1, 1) self.ui_guides_data2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2) self.ui_guides_data2.setMaximumSize(QtCore.QSize(16777215, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_guides_data2.setFont(font) self.ui_guides_data2.setObjectName("ui_guides_data2") self.gridLayout_2.addWidget(self.ui_guides_data2, 0, 2, 1, 1) self.tab_device_2.addTab(self.device_2, "") self.tab_pumps_station = QtWidgets.QTabWidget(self.SBC_2) self.tab_pumps_station.setGeometry(QtCore.QRect(10, 370, 371, 221)) self.tab_pumps_station.setTabPosition(QtWidgets.QTabWidget.West) self.tab_pumps_station.setTabShape(QtWidgets.QTabWidget.Triangular) self.tab_pumps_station.setElideMode(QtCore.Qt.ElideLeft) self.tab_pumps_station.setObjectName("tab_pumps_station") self.device_3 = QtWidgets.QWidget() self.device_3.setObjectName("device_3") self.gridLayoutWidget = QtWidgets.QWidget(self.device_3) self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 321, 17)) self.gridLayoutWidget.setObjectName("gridLayoutWidget") self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.ui_drain_data1 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.ui_drain_data1.setMaximumSize(QtCore.QSize(16777215, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_drain_data1.setFont(font) self.ui_drain_data1.setObjectName("ui_drain_data1") self.gridLayout.addWidget(self.ui_drain_data1, 0, 1, 1, 1) self.DRAIN = QtWidgets.QLabel(self.gridLayoutWidget) self.DRAIN.setMinimumSize(QtCore.QSize(0, 14)) self.DRAIN.setMaximumSize(QtCore.QSize(16777215, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.DRAIN.setFont(font) self.DRAIN.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.DRAIN.setObjectName("DRAIN") self.gridLayout.addWidget(self.DRAIN, 0, 0, 1, 1) self.ui_drain_data2 = QtWidgets.QLineEdit(self.gridLayoutWidget) self.ui_drain_data2.setMaximumSize(QtCore.QSize(16777215, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_drain_data2.setFont(font) self.ui_drain_data2.setObjectName("ui_drain_data2") self.gridLayout.addWidget(self.ui_drain_data2, 0, 2, 1, 1) self.gridLayoutWidget_3 = QtWidgets.QWidget(self.device_3) self.gridLayoutWidget_3.setGeometry(QtCore.QRect(10, 40, 321, 173)) self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3") self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_3) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName("gridLayout_3") self.ui_wheel_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3) self.ui_wheel_data.setMaximumSize(QtCore.QSize(35, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_wheel_data.setFont(font) self.ui_wheel_data.setObjectName("ui_wheel_data") self.gridLayout_3.addWidget(self.ui_wheel_data, 4, 1, 1, 1) self.DRAIN_6 = QtWidgets.QLabel(self.gridLayoutWidget_3) self.DRAIN_6.setMinimumSize(QtCore.QSize(0, 14)) self.DRAIN_6.setMaximumSize(QtCore.QSize(25, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.DRAIN_6.setFont(font) self.DRAIN_6.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.DRAIN_6.setObjectName("DRAIN_6") self.gridLayout_3.addWidget(self.DRAIN_6, 2, 2, 1, 1) self.DRAIN_10 = QtWidgets.QLabel(self.gridLayoutWidget_3) self.DRAIN_10.setMinimumSize(QtCore.QSize(0, 14)) self.DRAIN_10.setMaximumSize(QtCore.QSize(25, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.DRAIN_10.setFont(font) self.DRAIN_10.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.DRAIN_10.setObjectName("DRAIN_10") self.gridLayout_3.addWidget(self.DRAIN_10, 4, 2, 1, 1) self.ui_acid_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3) self.ui_acid_data.setMaximumSize(QtCore.QSize(35, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_acid_data.setFont(font) self.ui_acid_data.setObjectName("ui_acid_data") self.gridLayout_3.addWidget(self.ui_acid_data, 3, 1, 1, 1) self.ui_alkali_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3) self.ui_alkali_data.setMaximumSize(QtCore.QSize(35, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_alkali_data.setFont(font) self.ui_alkali_data.setObjectName("ui_alkali_data") self.gridLayout_3.addWidget(self.ui_alkali_data, 2, 1, 1, 1) self.DRAIN_4 = QtWidgets.QLabel(self.gridLayoutWidget_3) self.DRAIN_4.setMinimumSize(QtCore.QSize(0, 14)) self.DRAIN_4.setMaximumSize(QtCore.QSize(25, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.DRAIN_4.setFont(font) self.DRAIN_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.DRAIN_4.setObjectName("DRAIN_4") self.gridLayout_3.addWidget(self.DRAIN_4, 1, 2, 1, 1) self.DRAIN_8 = QtWidgets.QLabel(self.gridLayoutWidget_3) self.DRAIN_8.setMinimumSize(QtCore.QSize(0, 14)) self.DRAIN_8.setMaximumSize(QtCore.QSize(25, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.DRAIN_8.setFont(font) self.DRAIN_8.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.DRAIN_8.setObjectName("DRAIN_8") self.gridLayout_3.addWidget(self.DRAIN_8, 3, 2, 1, 1) self.label_chem = QtWidgets.QLabel(self.gridLayoutWidget_3) self.label_chem.setMinimumSize(QtCore.QSize(0, 14)) self.label_chem.setMaximumSize(QtCore.QSize(40, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_chem.setFont(font) self.label_chem.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_chem.setObjectName("label_chem") self.gridLayout_3.addWidget(self.label_chem, 0, 0, 1, 1) self.ui_wax_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3) self.ui_wax_data.setMaximumSize(QtCore.QSize(35, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_wax_data.setFont(font) self.ui_wax_data.setObjectName("ui_wax_data") self.gridLayout_3.addWidget(self.ui_wax_data, 5, 1, 1, 1) self.label_wheel_data = QtWidgets.QLabel(self.gridLayoutWidget_3) self.label_wheel_data.setMinimumSize(QtCore.QSize(0, 14)) self.label_wheel_data.setMaximumSize(QtCore.QSize(40, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_wheel_data.setFont(font) self.label_wheel_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_wheel_data.setObjectName("label_wheel_data") self.gridLayout_3.addWidget(self.label_wheel_data, 4, 0, 1, 1) self.label_wax_data = QtWidgets.QLabel(self.gridLayoutWidget_3) self.label_wax_data.setMinimumSize(QtCore.QSize(0, 14)) self.label_wax_data.setMaximumSize(QtCore.QSize(40, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_wax_data.setFont(font) self.label_wax_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_wax_data.setObjectName("label_wax_data") self.gridLayout_3.addWidget(self.label_wax_data, 5, 0, 1, 1) self.label_acid_data = QtWidgets.QLabel(self.gridLayoutWidget_3) self.label_acid_data.setMinimumSize(QtCore.QSize(0, 14)) self.label_acid_data.setMaximumSize(QtCore.QSize(40, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_acid_data.setFont(font) self.label_acid_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_acid_data.setObjectName("label_acid_data") self.gridLayout_3.addWidget(self.label_acid_data, 3, 0, 1, 1) self.label_water_data = QtWidgets.QLabel(self.gridLayoutWidget_3) self.label_water_data.setMinimumSize(QtCore.QSize(0, 14)) self.label_water_data.setMaximumSize(QtCore.QSize(40, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_water_data.setFont(font) self.label_water_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_water_data.setObjectName("label_water_data") self.gridLayout_3.addWidget(self.label_water_data, 1, 0, 1, 1) self.label_alkali_data = QtWidgets.QLabel(self.gridLayoutWidget_3) self.label_alkali_data.setMinimumSize(QtCore.QSize(0, 14)) self.label_alkali_data.setMaximumSize(QtCore.QSize(40, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_alkali_data.setFont(font) self.label_alkali_data.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_alkali_data.setObjectName("label_alkali_data") self.gridLayout_3.addWidget(self.label_alkali_data, 2, 0, 1, 1) self.ui_water_data = QtWidgets.QLineEdit(self.gridLayoutWidget_3) self.ui_water_data.setMaximumSize(QtCore.QSize(35, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_water_data.setFont(font) self.ui_water_data.setObjectName("ui_water_data") self.gridLayout_3.addWidget(self.ui_water_data, 1, 1, 1, 1) self.DRAIN_12 = QtWidgets.QLabel(self.gridLayoutWidget_3) self.DRAIN_12.setMinimumSize(QtCore.QSize(0, 14)) self.DRAIN_12.setMaximumSize(QtCore.QSize(25, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.DRAIN_12.setFont(font) self.DRAIN_12.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.DRAIN_12.setObjectName("DRAIN_12") self.gridLayout_3.addWidget(self.DRAIN_12, 5, 2, 1, 1) self.led_water = QtWidgets.QToolButton(self.gridLayoutWidget_3) self.led_water.setMaximumSize(QtCore.QSize(150, 15)) font = QtGui.QFont() font.setPointSize(9) self.led_water.setFont(font) self.led_water.setObjectName("led_water") self.gridLayout_3.addWidget(self.led_water, 1, 3, 1, 1) self.led_alkali = QtWidgets.QToolButton(self.gridLayoutWidget_3) self.led_alkali.setMaximumSize(QtCore.QSize(150, 15)) font = QtGui.QFont() font.setPointSize(9) self.led_alkali.setFont(font) self.led_alkali.setObjectName("led_alkali") self.gridLayout_3.addWidget(self.led_alkali, 2, 3, 1, 1) self.led_acid = QtWidgets.QToolButton(self.gridLayoutWidget_3) self.led_acid.setMaximumSize(QtCore.QSize(150, 15)) font = QtGui.QFont() font.setPointSize(9) self.led_acid.setFont(font) self.led_acid.setObjectName("led_acid") self.gridLayout_3.addWidget(self.led_acid, 3, 3, 1, 1) self.led_wheel = QtWidgets.QToolButton(self.gridLayoutWidget_3) self.led_wheel.setMaximumSize(QtCore.QSize(150, 15)) font = QtGui.QFont() font.setPointSize(9) self.led_wheel.setFont(font) self.led_wheel.setObjectName("led_wheel") self.gridLayout_3.addWidget(self.led_wheel, 4, 3, 1, 1) self.led_wax = QtWidgets.QToolButton(self.gridLayoutWidget_3) self.led_wax.setMaximumSize(QtCore.QSize(150, 15)) font = QtGui.QFont() font.setPointSize(9) self.led_wax.setFont(font) self.led_wax.setObjectName("led_wax") self.gridLayout_3.addWidget(self.led_wax, 5, 3, 1, 1) self.tab_pumps_station.addTab(self.device_3, "") self.tab_device_3 = QtWidgets.QTabWidget(self.SBC_2) self.tab_device_3.setGeometry(QtCore.QRect(10, 230, 371, 141)) self.tab_device_3.setTabPosition(QtWidgets.QTabWidget.West) self.tab_device_3.setTabShape(QtWidgets.QTabWidget.Triangular) self.tab_device_3.setElideMode(QtCore.Qt.ElideLeft) self.tab_device_3.setObjectName("tab_device_3") self.pumpswitch = QtWidgets.QWidget() self.pumpswitch.setObjectName("pumpswitch") self.btn_all_stop = QtWidgets.QCheckBox(self.pumpswitch) self.btn_all_stop.setGeometry(QtCore.QRect(0, 60, 91, 16)) font = QtGui.QFont() font.setPointSize(10) self.btn_all_stop.setFont(font) self.btn_all_stop.setObjectName("btn_all_stop") self.btn_high_water = QtWidgets.QCheckBox(self.pumpswitch) self.btn_high_water.setGeometry(QtCore.QRect(70, 60, 91, 16)) font = QtGui.QFont() font.setPointSize(10) self.btn_high_water.setFont(font) self.btn_high_water.setObjectName("btn_high_water") self.btn_wheel = QtWidgets.QCheckBox(self.pumpswitch) self.btn_wheel.setGeometry(QtCore.QRect(170, 60, 71, 16)) font = QtGui.QFont() font.setPointSize(10) self.btn_wheel.setFont(font) self.btn_wheel.setObjectName("btn_wheel") self.btn_alkali = QtWidgets.QCheckBox(self.pumpswitch) self.btn_alkali.setGeometry(QtCore.QRect(240, 60, 71, 16)) font = QtGui.QFont() font.setPointSize(10) self.btn_alkali.setFont(font) self.btn_alkali.setObjectName("btn_alkali") self.btn_acid = QtWidgets.QCheckBox(self.pumpswitch) self.btn_acid.setGeometry(QtCore.QRect(0, 80, 71, 16)) font = QtGui.QFont() font.setPointSize(10) self.btn_acid.setFont(font) self.btn_acid.setObjectName("btn_acid") self.btn_water_wax = QtWidgets.QCheckBox(self.pumpswitch) self.btn_water_wax.setGeometry(QtCore.QRect(70, 80, 91, 16)) font = QtGui.QFont() font.setPointSize(10) self.btn_water_wax.setFont(font) self.btn_water_wax.setObjectName("btn_water_wax") self.btn_drain = QtWidgets.QCheckBox(self.pumpswitch) self.btn_drain.setGeometry(QtCore.QRect(170, 80, 91, 16)) font = QtGui.QFont() font.setPointSize(10) self.btn_drain.setFont(font) self.btn_drain.setObjectName("btn_drain") self.btn_water_inflow = QtWidgets.QCheckBox(self.pumpswitch) self.btn_water_inflow.setGeometry(QtCore.QRect(240, 80, 101, 16)) font = QtGui.QFont() font.setPointSize(10) self.btn_water_inflow.setFont(font) self.btn_water_inflow.setObjectName("btn_water_inflow") self.label_pump_1 = QtWidgets.QLabel(self.pumpswitch) self.label_pump_1.setGeometry(QtCore.QRect(0, 10, 51, 14)) self.label_pump_1.setMinimumSize(QtCore.QSize(0, 14)) self.label_pump_1.setMaximumSize(QtCore.QSize(16777215, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_pump_1.setFont(font) self.label_pump_1.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_pump_1.setObjectName("label_pump_1") self.ui_log_pump = QtWidgets.QLineEdit(self.pumpswitch) self.ui_log_pump.setGeometry(QtCore.QRect(40, 10, 251, 15)) self.ui_log_pump.setMaximumSize(QtCore.QSize(16777215, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_log_pump.setFont(font) self.ui_log_pump.setText("") self.ui_log_pump.setObjectName("ui_log_pump") self.led_high_water = QtWidgets.QToolButton(self.pumpswitch) self.led_high_water.setGeometry(QtCore.QRect(40, 30, 50, 14)) self.led_high_water.setMinimumSize(QtCore.QSize(50, 0)) self.led_high_water.setMaximumSize(QtCore.QSize(55, 14)) font = QtGui.QFont() font.setPointSize(8) self.led_high_water.setFont(font) self.led_high_water.setToolTip("") self.led_high_water.setToolTipDuration(-1) self.led_high_water.setObjectName("led_high_water") self.led_ch_alkali = QtWidgets.QToolButton(self.pumpswitch) self.led_ch_alkali.setGeometry(QtCore.QRect(90, 30, 50, 14)) self.led_ch_alkali.setMinimumSize(QtCore.QSize(50, 0)) self.led_ch_alkali.setMaximumSize(QtCore.QSize(55, 14)) font = QtGui.QFont() font.setPointSize(8) self.led_ch_alkali.setFont(font) self.led_ch_alkali.setToolTip("") self.led_ch_alkali.setToolTipDuration(-1) self.led_ch_alkali.setObjectName("led_ch_alkali") self.led_ch_acid = QtWidgets.QToolButton(self.pumpswitch) self.led_ch_acid.setGeometry(QtCore.QRect(140, 30, 50, 14)) self.led_ch_acid.setMinimumSize(QtCore.QSize(50, 0)) self.led_ch_acid.setMaximumSize(QtCore.QSize(55, 14)) font = QtGui.QFont() font.setPointSize(8) self.led_ch_acid.setFont(font) self.led_ch_acid.setToolTip("") self.led_ch_acid.setToolTipDuration(-1) self.led_ch_acid.setObjectName("led_ch_acid") self.led_ch1_wheel = QtWidgets.QToolButton(self.pumpswitch) self.led_ch1_wheel.setGeometry(QtCore.QRect(190, 30, 50, 14)) self.led_ch1_wheel.setMinimumSize(QtCore.QSize(50, 0)) self.led_ch1_wheel.setMaximumSize(QtCore.QSize(55, 14)) font = QtGui.QFont() font.setPointSize(8) self.led_ch1_wheel.setFont(font) self.led_ch1_wheel.setToolTip("") self.led_ch1_wheel.setToolTipDuration(-1) self.led_ch1_wheel.setObjectName("led_ch1_wheel") self.led_ch1_wax = QtWidgets.QToolButton(self.pumpswitch) self.led_ch1_wax.setGeometry(QtCore.QRect(240, 30, 50, 14)) self.led_ch1_wax.setMinimumSize(QtCore.QSize(50, 0)) self.led_ch1_wax.setMaximumSize(QtCore.QSize(55, 14)) font = QtGui.QFont() font.setPointSize(8) self.led_ch1_wax.setFont(font) self.led_ch1_wax.setToolTip("") self.led_ch1_wax.setToolTipDuration(-1) self.led_ch1_wax.setObjectName("led_ch1_wax") self.label_pump_2 = QtWidgets.QLabel(self.pumpswitch) self.label_pump_2.setGeometry(QtCore.QRect(10, 110, 51, 14)) self.label_pump_2.setMinimumSize(QtCore.QSize(0, 14)) self.label_pump_2.setMaximumSize(QtCore.QSize(16777215, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_pump_2.setFont(font) self.label_pump_2.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_pump_2.setObjectName("label_pump_2") self.ui_log_pump_countdown = QtWidgets.QLineEdit(self.pumpswitch) self.ui_log_pump_countdown.setGeometry(QtCore.QRect(50, 110, 121, 15)) self.ui_log_pump_countdown.setMaximumSize(QtCore.QSize(16777215, 15)) font = QtGui.QFont() font.setPointSize(9) self.ui_log_pump_countdown.setFont(font) self.ui_log_pump_countdown.setText("") self.ui_log_pump_countdown.setObjectName("ui_log_pump_countdown") self.label_pump_3 = QtWidgets.QLabel(self.pumpswitch) self.label_pump_3.setGeometry(QtCore.QRect(190, 110, 71, 14)) self.label_pump_3.setMinimumSize(QtCore.QSize(0, 14)) self.label_pump_3.setMaximumSize(QtCore.QSize(16777215, 14)) font = QtGui.QFont() font.setPointSize(10) font.setBold(False) font.setWeight(50) self.label_pump_3.setFont(font) self.label_pump_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter) self.label_pump_3.setObjectName("label_pump_3") self.pump_countdown_box = QtWidgets.QSpinBox(self.pumpswitch) self.pump_countdown_box.setGeometry(QtCore.QRect(260, 110, 48, 16)) font = QtGui.QFont() font.setPointSize(10) self.pump_countdown_box.setFont(font) self.pump_countdown_box.setObjectName("pump_countdown_box") self.tab_device_3.addTab(self.pumpswitch, "") SBC.setCentralWidget(self.SBC_2) self.retranslateUi(SBC) self.tab_device.setCurrentIndex(0) self.tab_device_2.setCurrentIndex(0) self.tab_pumps_station.setCurrentIndex(0) self.tab_device_3.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(SBC) def retranslateUi(self, SBC): _translate = QtCore.QCoreApplication.translate SBC.setWindowTitle(_translate("SBC", "SBC")) self.label_pump_station.setText(_translate("SBC", "PUMP STATION")) self.ip_local.setText(_translate("SBC", "LocalIP : 0.0.0.0")) self.ip_nuc.setText(_translate("SBC", "NucIP : 0.0.0.0")) self.led_pump_station.setText(_translate("SBC", "OFF")) self.label_guides.setText(_translate("SBC", "GUIDES")) self.led_guides.setText(_translate("SBC", "OFF")) self.tab_device.setTabText(self.tab_device.indexOf(self.device), _translate("SBC", "DEVICE")) self.label_stage_show.setText(_translate("SBC", "STAGE SHOW")) self.label_stage_show_btn.setText(_translate("SBC", "SHOW BTN")) self.btn_welcome.setText(_translate("SBC", "欢迎光临")) self.btn_forward.setText(_translate("SBC", "向前行驶")) self.btn_stop_forward.setText(_translate("SBC", "停止行驶")) self.btn_back_driving.setText(_translate("SBC", "向后行驶")) self.btn_washing.setText(_translate("SBC", "正在清洗")) self.btn_washing_end.setText(_translate("SBC", "清洗结束")) self.label_guides_2.setText(_translate("SBC", "GUIDES")) self.tab_device_2.setTabText(self.tab_device_2.indexOf(self.device_2), _translate("SBC", "GUIDES")) self.DRAIN.setText(_translate("SBC", "DRAIN")) self.DRAIN_6.setText(_translate("SBC", "mm")) self.DRAIN_10.setText(_translate("SBC", "mm")) self.DRAIN_4.setText(_translate("SBC", "mm")) self.DRAIN_8.setText(_translate("SBC", "mm")) self.label_chem.setText(_translate("SBC", "LIQUID")) self.label_wheel_data.setText(_translate("SBC", "WHEEL")) self.label_wax_data.setText(_translate("SBC", "WAX")) self.label_acid_data.setText(_translate("SBC", "ACID")) self.label_water_data.setText(_translate("SBC", "WATER")) self.label_alkali_data.setText(_translate("SBC", "ALKALI")) self.DRAIN_12.setText(_translate("SBC", "mm")) self.led_water.setText(_translate("SBC", "full")) self.led_alkali.setText(_translate("SBC", "full")) self.led_acid.setText(_translate("SBC", "full")) self.led_wheel.setText(_translate("SBC", "full")) self.led_wax.setText(_translate("SBC", "full")) self.tab_pumps_station.setTabText(self.tab_pumps_station.indexOf(self.device_3), _translate("SBC", "PUMPS STATION")) self.btn_all_stop.setText(_translate("SBC", "ALL STOP")) self.btn_high_water.setText(_translate("SBC", "HIGH WATER")) self.btn_wheel.setText(_translate("SBC", "WHEEL")) self.btn_alkali.setText(_translate("SBC", "ALKALI ")) self.btn_acid.setText(_translate("SBC", "ACID")) self.btn_water_wax.setText(_translate("SBC", "WATER WAX")) self.btn_drain.setText(_translate("SBC", "DRAIN")) self.btn_water_inflow.setText(_translate("SBC", "WATER INFLOW")) self.label_pump_1.setText(_translate("SBC", "PUMP")) self.led_high_water.setText(_translate("SBC", "P")) self.led_ch_alkali.setText(_translate("SBC", "C1")) self.led_ch_acid.setText(_translate("SBC", "C2")) self.led_ch1_wheel.setText(_translate("SBC", "WE")) self.led_ch1_wax.setText(_translate("SBC", "WX")) self.label_pump_2.setText(_translate("SBC", "PUMP")) self.label_pump_3.setText(_translate("SBC", "剩余延迟时间")) self.tab_device_3.setTabText(self.tab_device_3.indexOf(self.pumpswitch), _translate("SBC", "PUMPSWITCH"))
d4ae3c0ec0b6138ccbc71c51af7764f03636fedc
f2bd7e127de1a49407858bfa24e2dacdf8a2159a
/exercises/ex3_1.py
f83e013df042248baedc94da8f381edfa85a83ed
[]
no_license
eddyhuyhp/ThreeCat
795b808040540fb14773938ccb9d4aca2a1c5d0a
81d51938ea5080f286decf3011493487e2639713
refs/heads/master
2020-03-16T13:31:25.208675
2018-05-09T03:19:44
2018-05-09T03:19:44
132,692,766
0
0
null
null
null
null
UTF-8
Python
false
false
743
py
#!/usr/bin/env python3 def solve(input_data): '''Đầu vào: một số nguyên dương Đầu ra: số nguyên tạo bởi phần từ số 1 cuối cùng trở về bên phải - của dạng binary của số đầu vào. Ví dụ:: input_data = 5 # (0101) output = 1 input_data = 24 (11000) output = 1000 input_data = 9 (1001) output = 1 Hàm có sẵn: bin(10) == '0b1010' Hàm có sẵn tạo ra integer từ string: 69 == int('69') ''' result = None a = bin(input_data) result = a[len(a)-a[::-1].find('1')-1:len(a)] return result def main(): i = input('Nhap vao so nguyen:') print(solve(int(i))) if __name__ == "__main__": main()
4e37a9db4d23fe3b02b8714633e4e1eb463a253b
a67f928aea79cfceca16cb40e62e51dd7e484dd4
/analysis/analysisLog.py
ad6ade7d698ca4e0aa19a76b6a049ee594142bb2
[]
no_license
daniyuu/LatticeLSTM
2b6293d35f8ed674854bda8611d91992fa2fbd59
03953e576db12c741e804b1c36aa461696d018b9
refs/heads/master
2020-03-22T19:38:56.944213
2018-09-14T07:09:47
2018-09-14T07:09:47
140,542,239
2
0
null
2018-09-14T07:09:49
2018-07-11T08:04:15
Python
UTF-8
Python
false
false
3,079
py
import os from datetime import date import pygal log_folder_path = "./log/" result_folder_path = "./result/" if not os.path.exists(result_folder_path): os.makedirs(result_folder_path) def analysis_overall(file_name): logFile = open(log_folder_path + '{0}.txt'.format(file_name), 'r') y_test_p = [] y_test_r = [] y_test_f = [] y_test_acc = [] for line in logFile.readlines(): if "*** Test: " in line: items = line.split('; ') f = float(items[-1].split(': ')[1]) r = float(items[-2].split(': ')[1]) p = float(items[-3].split(': ')[1]) acc = float(items[-4].split(': ')[1]) y_test_f.append(f) y_test_r.append(r) y_test_p.append(p) y_test_acc.append(acc) line_chart = pygal.Line() line_chart.title = "Overall performance" # line_chart.x_labels = x line_chart.add("acc", y_test_acc) line_chart.add("p", y_test_p) line_chart.add("r", y_test_r) line_chart.add("f", y_test_f) line_chart.render_to_file(result_folder_path + 'Overall_{0}.svg'.format(file_name)) return y_test_p, y_test_r, y_test_f, y_test_acc def analysis_acc(file_name): logFile = open(log_folder_path + '{0}.txt'.format(file_name), 'r') index = 0 x = [] y_acc = [] for line in logFile.readlines(): if "Instance" in line: index += 1 acc = line.split('=')[1].split('\n')[0] x.append(index) y_acc.append(float(acc)) line_chart = pygal.Line() line_chart.title = "Acc Performance" # line_chart.x_labels = x line_chart.add("Acc", y_acc) line_chart.render_to_file(result_folder_path + 'Acc_{0}.svg'.format(file_name)) return def compare_logs(*file_names): p_chart = pygal.Line() p_chart.title = "Precious compare" r_chart = pygal.Line() r_chart.title = "Recall compare" f_chart = pygal.Line() f_chart.title = "F1 Score compare" acc_chart = pygal.Line() acc_chart.title = "Acc Score compare" for file_name in file_names: p, r, f, acc = analysis_overall(file_name) acc_chart.add(file_name, acc) p_chart.add(file_name, p) r_chart.add(file_name, r) f_chart.add(file_name, f) acc_chart.render_to_file( result_folder_path + 'Compare_{0}_Acc_{1}.svg'.format(date.today().isoformat(), '_'.join(file_names))) p_chart.render_to_file( result_folder_path + 'Compare_{0}_P_{1}.svg'.format(date.today().isoformat(), '_'.join(file_names))) r_chart.render_to_file( result_folder_path + 'Compare_{0}_R_{1}.svg'.format(date.today().isoformat(), '_'.join(file_names))) f_chart.render_to_file( result_folder_path + 'Compare_{0}_F_{1}.svg'.format(date.today().isoformat(), '_'.join(file_names))) return def analysis(file_name): analysis_overall(file_name) analysis_acc(file_name) return # # analysis('2018-08-10') # analysis('2018-08-13') # analysis('2018-08-21') compare_logs('2018-08-10', '2018-08-24')
9b8aae38ac4636bc7486232355f8895685ede2c4
042f1fe8d0b89b0df7043af0d37f24ef5508784c
/websphere-traditional/virtual-host.py
d1099de42f1326bdeca2ecc44c1ec019fba1a7b0
[]
no_license
pdprof/icp4a-helloworld
2e6eeeb25e665f32d2dc86c03d1a2332501cb847
849b4bc07b70fd78c28539326c8df48421b671f2
refs/heads/master
2023-08-10T09:21:36.404230
2021-09-13T09:41:31
2021-09-13T09:41:31
319,898,688
0
0
null
null
null
null
UTF-8
Python
false
false
521
py
print "set default-host..." AdminConfig.create('HostAlias', AdminConfig.getid('/Cell:DefaultCell01/VirtualHost:admin_host/'), '[[hostname "twas-admin-route-default.apps-crc.testing"] [port "80"]]') print "delete *:80..." AdminConfig.remove('(cells/DefaultCell01|virtualhosts.xml#HostAlias_2)') print "set admin-host..." AdminConfig.create('HostAlias', AdminConfig.getid('/Cell:DefaultCell01/VirtualHost:default_host/'), '[[hostname "twas-route-default.apps-crc.testing"] [port "80"]]') print "save..." AdminConfig.save()
e5825d77166ea761d10b731b739736acb581c092
f061602595a78bdbdbf32e2dfdcfe623db5b8efd
/graph/models.py
e48f2ed8c0b90fc6e2e06d8ee764bb75c38b0d6f
[]
no_license
NorbertMichalski/utilities
b9e0643d4b8e0097e0c774d63adbeaa66d3da06b
da27a23add9c42d62ae21a5e74eef920bbd3d839
refs/heads/master
2020-05-14T19:04:23.262384
2014-01-27T13:45:28
2014-01-27T13:45:28
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,937
py
from django.db import models from django.template.loader import render_to_string from prices.models import Product, Result from scrapers import ClickyScraper, RankScraper, OrderScraper, CashScraper import datetime # Create your models here. class OverviewGraph(models.Model): brand = models.CharField(max_length=50, unique=True) class Meta: ordering = ['id'] def __unicode__(self): return self.brand def chart(self): graph_pk = self.pk % 5 if graph_pk == 0: graph_pk = 5 stats = OverviewStat.objects.filter(graph=graph_pk).order_by('date') title = self.__unicode__().capitalize() + ' Statistics' prices = [] ranks = [] all_sales = [] all_visits = [] dates = [] money = [] if 'weekly' in self.brand: current_week = stats[0].date.isocalendar()[1] weekly_money, weekly_price, weekly_rank, weekly_visits, weekly_sales = 0, 0, 0, 0, 0 counter = 1 for stat in stats: week = stat.date.isocalendar()[1] if week != current_week: if self.brand == 'All weekly': money.append(float('%.2f' %weekly_money)) prices.append(float('%.2f' %(weekly_price/counter, ))) ranks.append(float('%.2f' %(weekly_rank/counter, ))) dates.append(stat.get_date()) all_visits.append(weekly_visits) all_sales.append(weekly_sales) current_week = week counter = 1 weekly_money, weekly_price, weekly_rank, weekly_visits, weekly_sales = 0, 0, 0, 0, 0 continue if self.brand == 'All weekly': weekly_money += float('%.2f' %(stat.get_money()/10,)) weekly_price += float('%.2f' %(stat.get_price()/10,)) weekly_rank += stat.get_rank() weekly_visits += stat.get_visits() weekly_sales += stat.get_sales() counter += 1 else: for stat in stats: if stat.is_weekend(): continue if self.brand == 'All': money.append(float('%.2f' %(stat.get_money()/10,))) prices.append(float('%.2f' %(stat.get_price()/10,))) ranks.append(stat.get_rank()) dates.append(stat.get_date()) all_visits.append(stat.get_visits()) all_sales.append(stat.get_sales()) data = { 'title' : '"' + title + '"', 'dates' : dates, 'prices' : prices, 'ranks' : ranks, 'sales' : all_sales, 'visits' : all_visits, 'dates' : dates, 'money' : money, } return render_to_string('admin/graph/overviewgraph/chart.html', data ) chart.allow_tags = True def week_chart(self): stats = OverviewStat.objects.filter(graph=self.pk).order_by('date') title = self.__unicode__().capitalize() + ' Statistics' prices = [] ranks = [] all_sales = [] all_visits = [] dates = [] money = [] if 'weekly' in self.brand: current_week = stats[0].date.isocalendar()[1] weekly_money, weekly_price, weekly_rank, weekly_visits, weekly_sales = 0, 0, 0, 0, 0 for stat in stats: week = stat.date.isocalendar()[1] counter = 1 if week != current_week: if 'All' in self.brand: money.append(weekly_money) prices.append(float('%.2f' %(weekly_price/counter, ))) ranks.append(float('%.2f' %(weekly_rank/counter, ))) dates.append(stat.get_date()) all_visits.append(weekly_visits) all_sales.append(weekly_sales) current_week = week counter = 1 weekly_money, weekly_price, weekly_rank, weekly_visits, weekly_sales = 0, 0, 0, 0, 0 if 'All' in self.brand: weekly_money += float('%.2f' %(stat.get_money()/10,)) weekly_price = float('%.2f' %(stat.get_price()/10,)) weekly_rank += stat.get_rank() weekly_visits += stat.get_visits() weekly_sales = stat.get_sales() data = { 'title' : '"' + title + '"', 'dates' : dates, 'prices' : prices, 'ranks' : ranks, 'sales' : all_sales, 'visits' : all_visits, 'dates' : dates, 'money' : money, } return render_to_string('admin/graph/overviewgraph/chart.html', data ) week_chart.allow_tags = True class OverviewStat(models.Model): graph = models.ForeignKey(OverviewGraph) price = models.DecimalField(max_digits=6, decimal_places=2, default=0) rank = models.DecimalField(max_digits=5, decimal_places=2, default=0) visits = models.IntegerField(default=0) sales = models.IntegerField(default=0) money = models.DecimalField(max_digits=9, decimal_places=2, default=0) date = models.DateField('date last updated', default=datetime.date.today) class Meta: unique_together = ("graph", "date") def get_price(self): return float(self.price) def get_rank(self): return float(self.rank) def get_date(self): return self.date.strftime("%Y-%m-%d") def is_weekend(self): if self.date.weekday()==5 or self.date.weekday()==6: return True return False def get_sales(self): return int(self.sales) def get_visits(self): return int(self.visits) def get_money(self): return float(self.money) def __unicode__(self): return self.get_date() + ' ' + str(self.graph) def update_price(self): brand_name = self.graph.brand.lower() if brand_name == 'all': all_products = Product.objects.all().count() cheaper_results = Product.objects.all().filter(is_cheaper=True).count() else: all_products = Product.objects.filter(brand__name=brand_name).count() cheaper_results = Product.objects.filter(brand__name=brand_name, is_cheaper=True).count() ratio = 100 - float(cheaper_results)/float(all_products) * 100 print 'market share', brand_name, ratio self.price = '%.2f' %ratio def update_visits(self, date=datetime.date.today()): brand_name = self.graph.brand.lower() scraper = ClickyScraper() visits = scraper.brand_visits(brand_name, date) print 'visits', brand_name, visits self.visits = visits def update_rank(self, date=datetime.date.today()): brand_name = self.graph.brand.lower() scraper = RankScraper() rank = scraper.get_rank(brand_name) print 'rank', brand_name, rank if rank: self.rank = float(rank) def update_sales(self, date=datetime.date.today()): brand_name = self.graph.brand.lower() scraper = OrderScraper() sales = scraper.get_sales(brand_name, date) print 'sales', brand_name, sales self.sales = sales def update_money(self, date=datetime.date.today()): brand_name = self.graph.brand.lower() scraper = CashScraper() sales = scraper.get_money(date) print 'sales', brand_name, sales self.money = sales