blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
777 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
149 values
src_encoding
stringclasses
26 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
3
10.2M
extension
stringclasses
188 values
content
stringlengths
3
10.2M
authors
listlengths
1
1
author_id
stringlengths
1
132
91efd913c270d343c4b45b6d1eb44d4aa58f912c
35a6b6b5cabcf9fb39527bab020ef7c96265a026
/p3.py
5911e61bf240cc3e917c3377949ca16c9c46851d
[]
no_license
mepky/data-structure-and-algorithm
9a1324142276e6966692c51734613f15234f5300
96f64e657f97e46fc2d32cca5294fa0f104d5d01
refs/heads/master
2020-03-24T08:57:41.692564
2020-02-10T12:40:13
2020-02-10T12:40:13
142,614,071
1
0
null
null
null
null
UTF-8
Python
false
false
476
py
from collections import defaultdict t=int(input()) for _ in range(t): l=defaultdict(int) n=int(input()) d=2**20 t=0 s=input() a=[-1]*27 for i in range(n): if a[ord(s[i])-97]==-1: a[ord(s[i])-97]=i else: d=min(d,i-a[ord(s[i])-97]) t=1 a[ord(s[i])-97]=i if t==0: print(0) else: print(n-d)
f4cc030b9c8573c816c10160ff087a8c68c9d808
e00cf0bf72421ec31e4d3608c615aeeba5064731
/wows/move.py
3165d0d74b85208a58ea1b2ed7ee70fd489a053c
[]
no_license
lorne-luo/auto-wows
b4a84c7d99585c84a635fb5be11fd0f03a5f37fd
992ad473f1d5a78686e1c4c939c6c218e72373d7
refs/heads/master
2020-12-30T00:52:17.497039
2020-02-25T11:10:30
2020-02-25T11:10:30
238,803,938
0
0
null
null
null
null
UTF-8
Python
false
false
1,190
py
import time from random import randint import pyautogui as pag import settings as settings from helper import search_template, get_map_image class WOWS_Move(object): def move_ship(self): global MOVE_TO pag.press('m', presses=1, interval=0.25) pag.sleep(1.5) if not MOVE_TO: map_image = get_map_image() self_loc = search_template(map_image, 'map_self_icon.bmp') print('self_loc', self_loc) if self_loc: MOVE_TO = (settings.BATTLE_MAP_TOPLEFT[0] + settings.BATTLE_MAP_SIZE[0] - self_loc[1], settings.BATTLE_MAP_TOPLEFT[1] + settings.BATTLE_MAP_SIZE[1] - self_loc[0]) else: MOVE_TO = (settings.BATTLE_MAP_TOPLEFT[0] + settings.BATTLE_MAP_SIZE[0] / 2, settings.BATTLE_MAP_TOPLEFT[1] + settings.BATTLE_MAP_SIZE[1] / 2) for i in range(4): loc = (MOVE_TO[0] + randint(-50, 50), MOVE_TO[1] + randint(-50, 50)) pag.moveTo(loc) pag.click(clicks=2, interval=0.5, button='left') time.sleep(1) pag.press('esc') time.sleep(2)
327de0fb6195fa9d70bb2f59a1b649c60f9ad8da
31900bdf5648061a3093230711c5394e20b90436
/usr/lib/enigma2/python/Plugins/Extensions/MediaPortal/additions/porn/cliphunter.py
f0ad44408f3a495045054598cafc29b1ceb97fb7
[]
no_license
linuxbox10/enigma2-plugin-extensions-mediaportal
aa6f14ecfc42ce91e22c487070541459a1ab820c
e6b388918c186442718e7200e03c83d0db260831
refs/heads/master
2021-05-01T18:50:50.332850
2018-02-10T11:33:48
2018-02-10T11:33:48
121,009,954
0
0
null
null
null
null
UTF-8
Python
false
false
10,746
py
# -*- coding: utf-8 -*- ############################################################################################### # # MediaPortal for Dreambox OS # # Coded by MediaPortal Team (c) 2013-2018 # # This plugin is open source but it is NOT free software. # # This plugin may only be distributed to and executed on hardware which # is licensed by Dream Property GmbH. This includes commercial distribution. # In other words: # It's NOT allowed to distribute any parts of this plugin or its source code in ANY way # to hardware which is NOT licensed by Dream Property GmbH. # It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way # on hardware which is NOT licensed by Dream Property GmbH. # # This applies to the source code as a whole as well as to parts of it, unless # explicitely stated otherwise. # # If you want to use or modify the code or parts of it, # you have to keep OUR license and inform us about the modifications, but it may NOT be # commercially distributed other than under the conditions noted above. # # As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware # which is licensed by satco europe GmbH, if the VTi image is used on that hardware. # # As an exception regarding modifcations, you are NOT permitted to remove # any copy protections implemented in this plugin or change them for means of disabling # or working around the copy protections, unless the change has been explicitly permitted # by the original authors. Also decompiling and modification of the closed source # parts is NOT permitted. # # Advertising with this plugin is NOT allowed. # For other uses, permission from the authors is necessary. # ############################################################################################### from Plugins.Extensions.MediaPortal.plugin import _ from Plugins.Extensions.MediaPortal.resources.imports import * agent='Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36' default_cover = "file://%s/cliphunter.png" % (config.mediaportal.iconcachepath.value + "logos") class cliphunterGenreScreen(MPScreen): def __init__(self, session): MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover) self["actions"] = ActionMap(["MP_Actions"], { "ok" : self.keyOK, "0" : self.closeAll, "cancel" : self.keyCancel, "up" : self.keyUp, "down" : self.keyDown, "right" : self.keyRight, "left" : self.keyLeft }, -1) self['title'] = Label("cliphunter.com") self['ContentTitle'] = Label("Genre:") self.keyLocked = True self.suchString = '' self.genreliste = [] self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent) self['liste'] = self.ml self.onLayoutFinish.append(self.layoutFinished) def layoutFinished(self): url = "http://www.cliphunter.com/categories/" getPage(url, agent=agent).addCallback(self.genreData).addErrback(self.dataError) def genreData(self, data): Cats = re.findall(' <a href="(/categories/.*?)" title="(.*?)">.*?<img src="(.*?)"/>', data, re.S) if Cats: for (Url, Title, Image) in Cats: Url = 'http://www.cliphunter.com%s/' % Url.replace(' ','%20') if not Title == "All": self.genreliste.append((Title, Url, Image)) self.genreliste.sort() self.genreliste.insert(0, ("Pornstars", 'http://www.cliphunter.com/pornstars/top/overview/', default_cover)) self.genreliste.insert(0, ("Top Year", 'http://www.cliphunter.com/popular/ratings/year/', default_cover)) self.genreliste.insert(0, ("Top Month", 'http://www.cliphunter.com/popular/ratings/month/', default_cover)) self.genreliste.insert(0, ("Top Week", 'http://www.cliphunter.com/popular/ratings/week/', default_cover)) self.genreliste.insert(0, ("Top Yesterday", 'http://www.cliphunter.com/popular/ratings/yesterday/', default_cover)) self.genreliste.insert(0, ("Top Today", 'http://www.cliphunter.com/popular/ratings/today/', default_cover)) self.genreliste.insert(0, ("Hall of Fame", 'http://www.cliphunter.com/popular/ratings/all/', default_cover)) self.genreliste.insert(0, ("Newest", 'http://www.cliphunter.com/categories/All/', default_cover)) self.genreliste.insert(0, ("--- Search ---", "callSuchen", default_cover)) self.ml.setList(map(self._defaultlistcenter, self.genreliste)) self.ml.moveToIndex(0) self.keyLocked = False self.showInfos() def showInfos(self): Image = self['liste'].getCurrent()[0][2] CoverHelper(self['coverArt']).getCover(Image) def keyOK(self): if self.keyLocked: return Name = self['liste'].getCurrent()[0][0] Link = self['liste'].getCurrent()[0][1] if Name == "--- Search ---": self.suchen() elif Name == "Pornstars": self.session.open(cliphunterPornstarScreen, Link, Name) else: self.session.open(cliphunterFilmScreen, Link, Name) def SuchenCallback(self, callback = None, entry = None): if callback is not None and len(callback): self.suchString = callback.replace(' ', '%20') Link = '%s' % (self.suchString) Name = "--- Search ---" self.session.open(cliphunterFilmScreen, Link, Name) class cliphunterPornstarScreen(MPScreen, ThumbsHelper): def __init__(self, session, Link, Name): self.Link = Link self.Name = Name MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover) ThumbsHelper.__init__(self) self["actions"] = ActionMap(["MP_Actions"], { "ok" : self.keyOK, "0" : self.closeAll, "cancel" : self.keyCancel, "5" : self.keyShowThumb, "up" : self.keyUp, "down" : self.keyDown, "right" : self.keyRight, "left" : self.keyLeft, "nextBouquet" : self.keyPageUp, "prevBouquet" : self.keyPageDown, "green" : self.keyPageNumber }, -1) self['title'] = Label("cliphunter.com") self['ContentTitle'] = Label("Genre: %s" % self.Name) self['F2'] = Label(_("Page")) self['Page'] = Label(_("Page:")) self.keyLocked = True self.page = 1 self.lastpage = 1 self.genreliste = [] self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent) self['liste'] = self.ml self.onLayoutFinish.append(self.loadPage) def loadPage(self): self.keyLocked = True self.genreliste = [] url = "%s%s" % (self.Link, str(self.page)) getPage(url, agent=agent).addCallback(self.genreData).addErrback(self.dataError) def genreData(self, data): self.getLastPage(data, '', 'maxPages="(.*?)"') Parse = re.search('photoGrid">(.*?)class="clearfix">', data, re.S) Cats = re.findall('href="(.*?)">.*?src=\'(.*?)\'/>.*?<span>(.*?)</span>', Parse.group(1), re.S) if Cats: for (Url, Image, Title) in Cats: Url = "http://www.cliphunter.com" + Url + "/movies/" self.genreliste.append((Title.title(), Url, Image)) self.ml.setList(map(self._defaultlistleft, self.genreliste)) self.ml.moveToIndex(0) self.keyLocked = False self.th_ThumbsQuery(self.genreliste, 0, 1, 2, None, None, self.page, int(self.lastpage), mode=1) self.showInfos() def showInfos(self): Title = self['liste'].getCurrent()[0][0] Image = self['liste'].getCurrent()[0][2] self['name'].setText(Title) CoverHelper(self['coverArt']).getCover(Image) def keyOK(self): if self.keyLocked: return Name = self['liste'].getCurrent()[0][0] Link = self['liste'].getCurrent()[0][1] self.session.open(cliphunterFilmScreen, Link, Name) class cliphunterFilmScreen(MPScreen, ThumbsHelper): def __init__(self, session, Link, Name): self.Link = Link self.Name = Name MPScreen.__init__(self, session, skin='MP_PluginDescr', default_cover=default_cover) ThumbsHelper.__init__(self) self["actions"] = ActionMap(["MP_Actions"], { "ok" : self.keyOK, "0" : self.closeAll, "cancel" : self.keyCancel, "5" : self.keyShowThumb, "up" : self.keyUp, "down" : self.keyDown, "right" : self.keyRight, "left" : self.keyLeft, "nextBouquet" : self.keyPageUp, "prevBouquet" : self.keyPageDown, "green" : self.keyPageNumber }, -1) self['title'] = Label("cliphunter.com") self['ContentTitle'] = Label("Genre: %s" % self.Name) self['F2'] = Label(_("Page")) self['Page'] = Label(_("Page:")) self.keyLocked = True self.page = 1 self.lastpage = 1 self.filmliste = [] self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent) self['liste'] = self.ml self.onLayoutFinish.append(self.loadPage) def loadPage(self): self.keyLocked = True self['name'].setText(_('Please wait...')) self.filmliste = [] if re.match(".*?Search", self.Name): url = "http://www.cliphunter.com/search/%s/%s" % (self.Link, str(self.page)) else: url = "%s%s" % (self.Link, str(self.page)) getPage(url, agent=agent).addCallback(self.loadData).addErrback(self.dataError) def loadData(self, data): self.getLastPage(data, '', 'maxPages="(.*?)"') Movies = re.findall('class="t"\shref="(/w/\d+/(.*?))".*?class="i"\ssrc="(.*?)".*?class="tr">(.*?)</div>.*?class="vttl.*?">(.*?)</a>', data, re.S) if Movies: for (Url, TitleUrl, Image, Runtime, Title) in Movies: Url = "http://www.cliphunter.com" + Url self.filmliste.append((TitleUrl.replace('_',' '), Url, Image, Runtime)) if len(self.filmliste) == 0: self.filmliste.append((_('No videos found!'), '', None, '')) self.ml.setList(map(self._defaultlistleft, self.filmliste)) self.ml.moveToIndex(0) self.keyLocked = False self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, int(self.lastpage), mode=1) self.showInfos() def showInfos(self): title = self['liste'].getCurrent()[0][0] url = self['liste'].getCurrent()[0][1] pic = self['liste'].getCurrent()[0][2] runtime = self['liste'].getCurrent()[0][3] self['handlung'].setText("Runtime: %s" % runtime) self['name'].setText(title) CoverHelper(self['coverArt']).getCover(pic) def keyOK(self): if self.keyLocked: return Link = self['liste'].getCurrent()[0][1] self.keyLocked = True getPage(Link, agent=agent).addCallback(self.getVideoPage).addErrback(self.dataError) def getVideoPage(self, data): url = re.findall('"url":"(.*?)"}', data, re.S) if url: url = url[-1] url = url.replace('\u0026', '.') translation_table = { 'a': 'h', 'd': 'e', 'e': 'v', 'f': 'o', 'g': 'f', 'i': 'd', 'l': 'n', 'm': 'a', 'n': 'm', 'p': 'u', 'q': 't', 'r': 's', 'v': 'p', 'x': 'r', 'y': 'l', 'z': 'i', '$': ':', '&': '.', '(': '=', '^': '&', '=': '/', } url = ''.join(translation_table.get(c, c) for c in url) self.keyLocked = False Title = self['liste'].getCurrent()[0][0] self.session.open(SimplePlayer, [(Title, url)], showPlaylist=False, ltype='cliphunter')
12e05ceaac7c5c4174fb21ada9bdbb1e70c90c54
ffb05b145989e01da075e2a607fb291955251f46
/pypers/oxford/non_cooperative.py
6c7b293967ae50f89ebf7f90ccccdc8e62ba6d40
[]
no_license
micheles/papers
a5e7f2fa0cf305cd3f8face7c7ecc0db70ce7cc7
be9070f8b7e8192b84a102444b1238266bdc55a0
refs/heads/master
2023-06-07T16:46:46.306040
2018-07-14T04:17:51
2018-07-14T04:17:51
32,264,461
2
0
null
null
null
null
UTF-8
Python
false
false
255
py
# non_cooperative.py class B1(object): def __init__(self, **kw): print "B1.__init__" super(B1, self).__init__(**kw) class B2(object): def __init__(self, **kw): print "B2.__init__" super(B2, self).__init__(**kw)
94f2093636ae67fdc8ec2d5431c2b52cbd51d7c2
ac5e52a3fc52dde58d208746cddabef2e378119e
/exps-gsn-edf/gsn-edf_ut=3.0_rd=0.5_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=25/params.py
1e06a0ee411f4cd8e4e96c1df8f010d7336d6730
[]
no_license
ricardobtxr/experiment-scripts
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
7bcebff7ac2f2822423f211f1162cd017a18babb
refs/heads/master
2023-04-09T02:37:41.466794
2021-04-25T03:27:16
2021-04-25T03:27:16
358,926,457
0
0
null
null
null
null
UTF-8
Python
false
false
254
py
{'cpus': 4, 'duration': 30, 'final_util': '3.041500', 'max_util': '3.0', 'periods': 'harmonic-2', 'release_master': False, 'res_distr': '0.5', 'res_nmb': '4', 'res_weight': '0.06', 'scheduler': 'GSN-EDF', 'trial': 25, 'utils': 'uni-medium-3'}
6cd613bcdd91d3c252c77c5671f432f525d64cfc
bee2af5228232ce94f418b61810cecd93af62615
/virtual/bin/django-admin.py
d6b83b56c12b4e16e7f24824f482b665a071f57a
[]
no_license
thuitafaith/djangoapp
b64c2e1a05c67b1135d4d9dd7975c17522238a69
e06280b34a7b1ec012d0baab6f0fb153875a39b4
refs/heads/master
2022-12-11T19:06:08.540528
2019-08-29T12:36:45
2019-08-29T12:36:45
203,321,071
0
0
null
2022-11-22T04:13:07
2019-08-20T07:15:28
Python
UTF-8
Python
false
false
158
py
#!/home/faith/Desktop/django-rem/virtual/bin/python from django.core import management if __name__ == "__main__": management.execute_from_command_line()
f3467f1043b80a0ea9337c61aa83eb37180e440c
98c6ea9c884152e8340605a706efefbea6170be5
/examples/data/Assignment_2/mphnok005/question3.py
8ab32014734be33c45000ec60015c87758483dae
[]
no_license
MrHamdulay/csc3-capstone
479d659e1dcd28040e83ebd9e3374d0ccc0c6817
6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2
refs/heads/master
2021-03-12T21:55:57.781339
2014-09-22T02:22:22
2014-09-22T02:22:22
22,372,174
0
0
null
null
null
null
UTF-8
Python
false
false
207
py
from math import* x=sqrt(2) a=2 pi=2*(a/x) while x<2: x=(sqrt(2+x)) pi=(pi*a/x) print("Approximation of pi:",round(pi,3)) c=eval(input("Enter the radius:\n")) print("Area:",round(c**2*pi,3))
3da3e17495525b485fd627a5d52d55b261e728ec
8d50cc4f37c153fcb51de4501f3fa50c00394d9b
/test/benchmark/resnet_tl_benchmark.py
0273e724c53f6d0c0598924637c84431b5b3fe0c
[ "MIT" ]
permissive
liujuanLT/InsightFace_TF
dbd239dfdda1866c348e82211932884f73cb3067
257b6e0dcf7e7c3523dc7e1c08ba529fab1bf75b
refs/heads/master
2022-04-27T21:24:01.458277
2022-03-17T12:28:15
2022-03-17T12:28:15
463,040,192
0
0
MIT
2022-02-24T06:51:16
2022-02-24T06:51:15
null
UTF-8
Python
false
false
1,255
py
import tensorflow as tf import tensorflow.contrib.slim.nets as nets import numpy as np from nets.resnet import get_resnet slim = tf.contrib.slim resnet = nets.resnet_v1 if __name__ == '__main__': output_shape = 85164 batch_size = 128 image = tf.placeholder(name='input_x', shape=[None, 224, 224, 3], dtype=tf.float32) labels = tf.placeholder(name='input_label', shape=[None, output_shape], dtype=tf.float32) with slim.arg_scope(nets.resnet_utils.resnet_arg_scope()): nets = get_resnet(image, output_shape, 50, type='resnet', sess=None, pretrained=False) print(nets.outputs) probabilities = tf.reduce_mean(tf.nn.softmax(nets.outputs, dim=-1)) print(probabilities) losses = tf.norm(tf.subtract(probabilities, labels)) train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(losses) sess = tf.Session() saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) while True: datasets = np.random.randn(batch_size, 224, 224, 3).astype(np.float32) datasets_labels = np.random.randn(batch_size, output_shape).astype(np.float32) losses_val, _ = sess.run([losses, train_op], feed_dict={image: datasets, labels: datasets_labels}) print(losses_val)
51d5ae1fa2d5ae73a65d826bd1113e9b57cef767
03383b657ad6d526e7e6aa6639fe41019cd39ea2
/recursion/palandrome.py
985c2d63e7528bf16d0978634606988d462fbf30
[]
no_license
ahmedmeshref/Leetcode-Solutions
1c5f908cb2f6487c9dfadcc8f91192dedbb5a17e
28f848cb25e4aa22e6d8c9d715488f191ed15137
refs/heads/main
2023-05-26T14:33:11.246122
2021-06-07T21:32:13
2021-06-07T21:32:13
356,045,965
0
0
null
null
null
null
UTF-8
Python
false
false
402
py
def isPalindrome(s: str) -> bool: l = 0 r = len(s) - 1 while l != r: if not s[l].isalpha(): l += 1 elif not s[r].isalpha(): r -= 1 elif s[l].lower() != s[r].lower(): return False else: l += 1 r -= 1 return s[l] == s[r] print(isPalindrome("A man, a plan, a canal: Panama"))
e49952bb3039c47341a3a2001f153c1fcea8521c
05169e203974411667ab947298a74575b8a179e0
/packages/jet_bridge_base/jet_bridge_base/serializers/relationship_override.py
c985788ccf2e5eeaf886f08eb8bb093846f356d8
[ "MIT" ]
permissive
jet-admin/jet-bridge
f6b563e1801985063483ddb02e9e1c3301dc0612
c53d30fb308eed5822083eaf71f641c4098610cc
refs/heads/master
2023-09-01T14:31:42.261427
2023-08-24T13:54:34
2023-08-24T13:54:34
163,167,532
1,564
166
MIT
2023-03-18T03:20:04
2018-12-26T10:27:33
Python
UTF-8
Python
false
false
4,873
py
from jet_bridge_base.models.model_relation_override import ModelRelationOverrideModel from jet_bridge_base.store import store from sqlalchemy import inspect from jet_bridge_base import fields from jet_bridge_base.db import get_mapped_base, reload_request_graphql_schema, get_request_connection from jet_bridge_base.exceptions.validation_error import ValidationError from jet_bridge_base.serializers.serializer import Serializer from jet_bridge_base.logger import logger class ModelDescriptionRelationOverrideSerializer(Serializer): direction = fields.CharField() local_field = fields.CharField() related_model = fields.CharField() related_field = fields.CharField() class ModelDescriptionRelationOverridesSerializer(Serializer): model = fields.CharField() relations = ModelDescriptionRelationOverrideSerializer(many=True) def get_model(self, request, name): MappedBase = get_mapped_base(request) return MappedBase.classes.get(name) def generate_many_to_one_name(self, mapper, local_field, related_model, related_field): name = '__'.join([local_field, 'to', related_model, related_field]) if name in mapper.columns: name = name + '_relation' logger.warning('Already detected column name, using {}'.format(name)) return name def generate_one_to_many_name(self, mapper, local_field, related_model, related_field): name = '__'.join([related_model, related_field, 'to', local_field]) if name in mapper.columns: name = name + '_relation' logger.warning('Already detected column name, using {}'.format(name)) return name def validate(self, attrs): request = self.context.get('request') Model = self.get_model(request, attrs['model']) if Model is None: raise ValidationError('Unknown relation override model: {}'.format(attrs['model'])) mapper = inspect(Model) for item in attrs['relations']: if item['direction'] == 'MANYTOONE': item['name'] = self.generate_many_to_one_name(mapper, item['local_field'], item['related_model'], item['related_field']) elif item['direction'] == 'ONETOMANY': item['name'] = self.generate_one_to_many_name(mapper, item['local_field'], item['related_model'], item['related_field']) else: raise ValidationError('Unknown relation direction: {}'.format(item['direction'])) return attrs def save(self): request = self.context.get('request') connection = get_request_connection(request) draft = bool(request.get_argument('draft', False)) with store.session() as session: with session.begin(): for item in self.validated_data: set_overrides = sorted(item['relations'], key=lambda x: x['name']) existing_overrides = session.query(ModelRelationOverrideModel).filter( ModelRelationOverrideModel.connection_id == connection['id'], ModelRelationOverrideModel.model == item['model'], draft == draft ).order_by(ModelRelationOverrideModel.name).all() existing_overrides = list(existing_overrides) for i, override in enumerate(set_overrides): existing_override = existing_overrides[i] if i < len(existing_overrides) else None if existing_override: existing_override.name = override.get('name') existing_override.direction = override.get('direction') existing_override.local_field = override.get('local_field') existing_override.related_model = override.get('related_model') existing_override.related_field = override.get('related_field') else: session.add(ModelRelationOverrideModel( connection_id=connection['id'], model=item['model'], draft=draft, name=override.get('name'), direction=override.get('direction'), local_field=override.get('local_field'), related_model=override.get('related_model'), related_field=override.get('related_field') )) delete_overrides = existing_overrides[len(item['relations']):] for override in delete_overrides: session.delete(override) reload_request_graphql_schema(request, draft)
8ba80ac4b037dde92443141d60bd35bf1f98031e
e4414bd8152e52855db7ab9065ae12b7329143e0
/python/src/hangman.py
0dd38bbfdc6501bc39f632a253400dd40bbf2d07
[]
no_license
catalinc/programmingpraxis-solutions
39cb847877ec46d2fb85740791c24889ab5654a8
c0b13906aa76ffac705bf108db138fb9a38bc16a
refs/heads/master
2021-03-27T16:46:47.781839
2017-09-09T15:17:38
2017-09-09T15:17:38
53,532,233
1
0
null
null
null
null
UTF-8
Python
false
false
1,889
py
#!/usr/bin/env python # See http://programmingpraxis.com/2011/12/20/hangman/ import random import sys HANGMAN = [ "", """ O """, """ O | """, """ _O | """, """ _O_ | """, """ _O_ | / """, """ _O_ | / \\ """ ] def play_game(): secret_word = random_word().upper() guessed_letters = set() failed_attempts = 0 print_matches(secret_word, guessed_letters) while True: try: letter = raw_input("Your guess ? ").upper() except KeyboardInterrupt: exit_game() if letter in secret_word: guessed_letters.add(letter) else: failed_attempts += 1 print_hangman(failed_attempts) if lose(failed_attempts): print("Sorry, you lose...") print("The word was: %s" % (" ".join(list(secret_word)))) break print_matches(secret_word, guessed_letters) if win(secret_word, guessed_letters): print("You nail it !") break def random_word(words_file='words.lst'): word = None n = 0 with open(words_file) as f: for line in f: n += 1 if random.random() < 1.0 / n: word = line return word def print_matches(word, letters): out = [] for l in word: if l in letters: out.append(l) else: out.append("_") print(" ".join(out)) def exit_game(): print("Bye !") sys.exit(0) def print_hangman(guess_attempts): print HANGMAN[guess_attempts] def win(secret_word, guessed_letters): return len(secret_word) == len(guessed_letters) def lose(failed_attempts): return failed_attempts == len(HANGMAN) - 1 if __name__ == '__main__': print("Let's play Hangman !") while True: play_game() if raw_input("Play another ? [Y]/N ").upper() == "N": exit_game()
74242c01bb15f0739920d399519d7227f57b8f8a
fc91e867bb74cbebcb0ee608f1477ae16af91631
/.venv/bin/django-admin.py
96ac186b1a98732e5519c8d218d286e285191802
[]
no_license
karthik018/FacebookPosts
7580afbfab066c6bd09a43be086e4ce9621bbd65
2fbe59e8640ca2da01e0028f7b10c4f9d7b62b65
refs/heads/master
2020-06-12T05:29:43.493418
2019-07-16T06:16:25
2019-07-16T06:16:25
194,204,422
0
0
null
null
null
null
UTF-8
Python
false
false
157
py
#!/home/ib_admin/FacebookPosts/.venv/bin/python3.7 from django.core import management if __name__ == "__main__": management.execute_from_command_line()
900ce188b2d480f02d08d8fe381e861ddc612129
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
/langs/4/j33.py
32bc61a86b88039321d406fcdd145e2fdf95b792
[]
no_license
G4te-Keep3r/HowdyHackers
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
refs/heads/master
2020-08-01T12:08:10.782018
2016-11-13T20:45:50
2016-11-13T20:45:50
73,624,224
0
1
null
null
null
null
UTF-8
Python
false
false
486
py
import sys def printFunction(lineRemaining): if lineRemaining[0] == '"' and lineRemaining[-1] == '"': if len(lineRemaining) > 2: #data to print lineRemaining = lineRemaining[1:-1] print ' '.join(lineRemaining) else: print def main(fileName): with open(fileName) as f: for line in f: data = line.split() if data[0] == 'j33': printFunction(data[1:]) else: print 'ERROR' return if __name__ == '__main__': main(sys.argv[1])
99023c5533e743afb8349cd031816969f2e0f52e
6527b66fd08d9e7f833973adf421faccd8b765f5
/yuancloud/recicler/event/tests/test_mail_schedule.py
7b92308e184f89e0d7bc6436545f7d9324c6b05d
[]
no_license
cash2one/yuancloud
9a41933514e57167afb70cb5daba7f352673fb4d
5a4fd72991c846d5cb7c5082f6bdfef5b2bca572
refs/heads/master
2021-06-19T22:11:08.260079
2017-06-29T06:26:15
2017-06-29T06:26:15
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,952
py
# -*- coding: utf-8 -*- import datetime from dateutil.relativedelta import relativedelta from yuancloud import fields, tools from yuancloud.addons.event.tests.common import TestEventCommon from yuancloud.tools import mute_logger class TestMailSchedule(TestEventCommon): @mute_logger('yuancloud.addons.base.ir.ir_model', 'yuancloud.models') def test_00_event_mail_schedule(self): """ Test mail scheduling for events """ self.env['ir.values'].set_default('event.config.settings', 'auto_confirmation', True) now = fields.datetime.now() event_date_begin = now + relativedelta(days=1) event_date_end = now + relativedelta(days=3) test_event = self.Event.sudo(self.user_eventmanager).create({ 'name': 'TestEventMail', 'date_begin': event_date_begin, 'date_end': event_date_end, 'seats_max': 10, 'event_mail_ids': [ (0, 0, { # right at subscription 'interval_unit': 'now', 'interval_type': 'after_sub', 'template_id': self.env['ir.model.data'].xmlid_to_res_id('event.event_subscription')}), (0, 0, { # 2 days before event 'interval_nbr': 2, 'interval_unit': 'days', 'interval_type': 'before_event', 'template_id': self.env['ir.model.data'].xmlid_to_res_id('event.event_reminder')}), ] }) # create some registrations self.Registration.sudo(self.user_eventuser).create({ 'event_id': test_event.id, 'name': 'Reg0', 'email': '[email protected]', }) self.Registration.sudo(self.user_eventuser).create({ 'event_id': test_event.id, 'name': 'Reg1', 'email': '[email protected]', }) # check subscription scheduler schedulers = self.EventMail.search([('event_id', '=', test_event.id), ('interval_type', '=', 'after_sub')]) self.assertEqual(len(schedulers), 1, 'event: wrong scheduler creation') self.assertEqual(schedulers[0].scheduled_date, test_event.create_date, 'event: incorrect scheduled date for checking controller') # verify that subscription scheduler was auto-executed after each registration self.assertEqual(len(schedulers[0].mail_registration_ids), 2, 'event: incorrect number of mail scheduled date') mails = self.env['mail.mail'].search([('subject', 'ilike', 'subscription'), ('date', '>=', datetime.datetime.strftime(now, tools.DEFAULT_SERVER_DATETIME_FORMAT))], order='date DESC', limit=3) self.assertEqual(len(mails), 2, 'event: wrong number of subscription mail sent') for registration in schedulers[0].mail_registration_ids: self.assertTrue(registration.mail_sent, 'event: wrongly confirmed mailing on subscription') # check before event scheduler schedulers = self.EventMail.search([('event_id', '=', test_event.id), ('interval_type', '=', 'before_event')]) self.assertEqual(len(schedulers), 1, 'event: wrong scheduler creation') self.assertEqual(schedulers[0].scheduled_date, datetime.datetime.strftime(event_date_begin + relativedelta(days=-2), tools.DEFAULT_SERVER_DATETIME_FORMAT), 'event: incorrect scheduled date') # execute event reminder scheduler explicitly schedulers[0].execute() self.assertTrue(schedulers[0].mail_sent, 'event: reminder scheduler should have sent an email') self.assertTrue(schedulers[0].done, 'event: reminder scheduler should be done') mails = self.env['mail.mail'].search([('subject', 'ilike', 'reminder'), ('date', '>=', datetime.datetime.strftime(now, tools.DEFAULT_SERVER_DATETIME_FORMAT))], order='date DESC', limit=3) self.assertEqual(len(mails), 2, 'event: wrong number of reminders in outgoing mail queue')
f2b1502e69ebbcb83d168b947b4e1536ab2f5ca9
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
/PyTorch/dev/nlp/Speech_Transformer_ID0487_for_PyTorch/test/test_lr.py
50038c0c6c0a3d30f8e5cf25a74a0eebf9c1ebc4
[ "GPL-1.0-or-later", "MIT", "Apache-2.0", "BSD-2-Clause", "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference" ]
permissive
Ascend/ModelZoo-PyTorch
4c89414b9e2582cef9926d4670108a090c839d2d
92acc188d3a0f634de58463b6676e70df83ef808
refs/heads/master
2023-07-19T12:40:00.512853
2023-07-17T02:48:18
2023-07-17T02:48:18
483,502,469
23
6
Apache-2.0
2022-10-15T09:29:12
2022-04-20T04:11:18
Python
UTF-8
Python
false
false
1,717
py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import matplotlib.pyplot as plt if __name__ == '__main__': k = 0.2 warmup_steps = 4000 d_model = 512 init_lr = d_model ** (-0.5) lr_list = [] for step_num in range(1, 500000): lr = k * init_lr * min(step_num ** (-0.5), step_num * (warmup_steps ** (-1.5))) lr_list.append(lr) print(lr_list[:100]) print(lr_list[-100:]) plt.plot(lr_list) plt.show()
7d82abc23d5e3d4bf5e54cd6ec2da4a4d1a8768f
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02631/s475540632.py
3a87c44b29ca2411c0463a78d1676b61c5e7616c
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
267
py
N = int(input()) a = list(map(int, input().split())) # XOR演算子 ^ # aの要素全てのXORを計算、それをSとする S = 0 for aa in a: S ^= aa # i番目の番号はaiとSのXORで表される ans = [] for ai in a: ans.append(S ^ ai) print(*ans)
8319e02dd8e51c0f3c972288a559d15a0f3bb1c5
acd41dc7e684eb2e58b6bef2b3e86950b8064945
/res/packages/scripts/scripts/common/Lib/plat-mac/Carbon/Cm.py
81888a1e6189f6251d73285153430da7c7720a3a
[]
no_license
webiumsk/WoT-0.9.18.0
e07acd08b33bfe7c73c910f5cb2a054a58a9beea
89979c1ad547f1a1bbb2189f5ee3b10685e9a216
refs/heads/master
2021-01-20T09:37:10.323406
2017-05-04T13:51:43
2017-05-04T13:51:43
90,268,530
0
0
null
null
null
null
WINDOWS-1250
Python
false
false
362
py
# 2017.05.04 15:34:09 Střední Evropa (letní čas) # Embedded file name: scripts/common/Lib/plat-mac/Carbon/Cm.py from _Cm import * # okay decompyling C:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\Lib\plat-mac\Carbon\Cm.pyc # decompiled 1 files: 1 okay, 0 failed, 0 verify failed # 2017.05.04 15:34:09 Střední Evropa (letní čas)
8e3269fafdfc4c4927faaa47a88c3a3c531bf398
676f6f2d02db6aeeaa1bb0b28ab49e8c73923d0e
/venv/Lib/site-packages/falcon/bench/bench.py
b0c60863ab0aaefa5eee2e548c536b1e6bc55c82
[ "Apache-2.0" ]
permissive
vrian/orsen
ce34f74ea3a14c95d37ffa5c694b7c66725925df
9c10148aba62868fad4b679a4b9b717829586e96
refs/heads/master
2023-01-21T21:47:06.210918
2018-06-23T04:46:26
2018-06-23T04:46:26
120,284,869
1
0
Apache-2.0
2023-01-09T09:39:16
2018-02-05T09:44:03
Python
UTF-8
Python
false
false
11,546
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2014 by Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import argparse from collections import defaultdict, deque from decimal import Decimal import gc import inspect import platform import random import sys import tempfile import timeit try: import cProfile except ImportError: import profile as cProfile try: import guppy except ImportError: heapy = None else: heapy = guppy.hpy() try: import pprofile except ImportError: pprofile = None try: import vmprof from vmshare.service import Service except ImportError: vmprof = None from falcon.bench import create # NOQA import falcon.testing as helpers # NOTE(kgriffs): Based on testing, these values provide a ceiling that's # several times higher than fast x86 hardware can achieve today. ITER_DETECTION_MAX_ATTEMPTS = 27 ITER_DETECTION_MULTIPLIER = 1.7 ITER_DETECTION_STARTING = 3000 # NOTE(kgriffs): Benchmark duration range, in seconds, to target ITER_DETECTION_DURATION_MIN = 1.0 ITER_DETECTION_DURATION_MAX = 5.0 JIT_WARMING_MULTIPLIER = 30 PYPY = platform.python_implementation() == 'PyPy' BODY = helpers.rand_string(10240, 10240).encode('utf-8') # NOQA HEADERS = {'X-Test': 'Funky Chicken'} # NOQA class StartResponseMockLite(object): """Mock object representing a WSGI `start_response` callable.""" def __init__(self): self._called = 0 self.status = None self.headers = None self.exc_info = None def __call__(self, status, headers, exc_info=None): """Implements the PEP-3333 `start_response` protocol.""" self._called += 1 self.status = status self.headers = headers self.exc_info = exc_info @property def call_count(self): return self._called def bench(func, iterations, stat_memory): gc.collect() heap_diff = None if heapy and stat_memory: heap_before = heapy.heap() total_sec = timeit.timeit(func, setup=gc.enable, number=iterations) if heapy and stat_memory: heap_diff = heapy.heap() - heap_before sec_per_req = Decimal(str(total_sec)) / Decimal(str(iterations)) return (sec_per_req, heap_diff) def determine_iterations(func): # NOTE(kgriffs): Algorithm adapted from IPython's magic timeit # function to determine iterations so that 0.2 <= total time < 2.0 iterations = ITER_DETECTION_STARTING for __ in range(1, ITER_DETECTION_MAX_ATTEMPTS): gc.collect() total_sec = timeit.timeit( func, setup=gc.enable, number=int(iterations) ) if total_sec >= ITER_DETECTION_DURATION_MIN: assert total_sec < ITER_DETECTION_DURATION_MAX break iterations *= ITER_DETECTION_MULTIPLIER return int(iterations) def profile(name, env, filename=None, verbose=False): if filename: filename = name + '-' + filename print('Profiling %s ==> %s' % (name, filename)) else: filename = None title = name + ' profile' print() print('=' * len(title)) print(title) print('=' * len(title)) func = create_bench(name, env) gc.collect() num_iterations = 100000 if PYPY: print('JIT warmup...') # TODO(kgriffs): Measure initial time, and keep iterating until # performance increases and then steadies for x in range(num_iterations * JIT_WARMING_MULTIPLIER): func() print('Ready.') code = 'for x in range({0}): func()'.format(num_iterations) if verbose: if pprofile is None: print('pprofile not found. Please install pprofile and try again.') return pprofile.runctx(code, locals(), globals(), filename=filename) else: cProfile.runctx(code, locals(), globals(), sort='tottime', filename=filename) def profile_vmprof(name, env): if vmprof is None: print('vmprof not found. Please install vmprof and try again.') return func = create_bench(name, env) gc.collect() # # Based on: https://github.com/vmprof/vmprof-python/blob/master/vmprof/__main__.py # prof_file = tempfile.NamedTemporaryFile(delete=False) filename = prof_file.name vmprof.enable(prof_file.fileno()) try: for __ in range(1000000): func() except BaseException as e: if not isinstance(e, (KeyboardInterrupt, SystemExit)): raise vmprof.disable() service = Service('vmprof.com') service.post({ Service.FILE_CPU_PROFILE: filename, Service.FILE_JIT_PROFILE: filename + '.jit', 'argv': ' '.join(sys.argv[:]), 'VM': platform.python_implementation(), }) prof_file.close() def exhaust(iterator_or_generator): # from https://docs.python.org/dev/library/itertools.html#itertools-recipes deque(iterator_or_generator, maxlen=0) def create_bench(name, env): srmock = StartResponseMockLite() function = name.lower().replace('-', '_') app = eval('create.{0}(BODY, HEADERS)'.format(function)) def bench(): app(env, srmock) assert srmock.status == '200 OK' def bench_generator(): exhaust(app(env, srmock)) assert srmock.status == '200 OK' if inspect.isgeneratorfunction(app): return bench_generator else: return bench def consolidate_datasets(datasets): results = defaultdict(list) for dataset in datasets: for name, sec_per_req, _ in dataset: results[name].append(sec_per_req) return [(name, min(vector)) for name, vector in results.items()] def round_to_int(dec): return int(dec.to_integral_value()) def avg(array): return sum(array) / len(array) def hello_env(): request_headers = {'Content-Type': 'application/json'} return helpers.create_environ('/hello/584/test', query_string='limit=10&thing=ab', headers=request_headers) def queues_env(): request_headers = {'Content-Type': 'application/json'} path = ('/v1/852809/queues/0fd4c8c6-bd72-11e2-8e47-db5ebd4c8125' '/claims/db5ebd4c8125') qs = 'limit=10&thing=a+b&x=%23%24' return helpers.create_environ(path, query_string=qs, headers=request_headers) def get_env(framework): return queues_env() if framework == 'falcon-ext' else hello_env() def run(frameworks, trials, iterations, stat_memory): # Skip any frameworks that are not installed for name in frameworks: try: create_bench(name, hello_env()) except ImportError as ex: print(ex) print('Skipping missing library: ' + name) del frameworks[frameworks.index(name)] print() datasets = [] if not frameworks: print('Nothing to do.\n') return datasets benchmarks = [] for name in frameworks: bm = create_bench(name, get_env(name)) bm_iterations = iterations if iterations else determine_iterations(bm) if PYPY: print('{}: JIT warmup'.format(name)) # TODO(kgriffs): Measure initial time, and keep iterating until # performance increases and then steadies bench(bm, bm_iterations * JIT_WARMING_MULTIPLIER, False) bm_iterations = iterations if iterations else determine_iterations(bm) benchmarks.append((name, bm_iterations, bm)) print('{}: {} iterations'.format(name, bm_iterations)) print() for r in range(trials): random.shuffle(frameworks) sys.stdout.write('Benchmarking, Trial %d of %d' % (r + 1, trials)) sys.stdout.flush() dataset = [] for name, bm_iterations, bm in benchmarks: sec_per_req, heap_diff = bench( bm, bm_iterations, stat_memory ) dataset.append((name, sec_per_req, heap_diff)) sys.stdout.write('.') sys.stdout.flush() datasets.append(dataset) print('done.') return datasets def main(): frameworks = [ 'bottle', 'django', 'falcon', 'falcon-ext', 'flask', 'pecan', 'werkzeug', ] parser = argparse.ArgumentParser(description='Falcon benchmark runner') parser.add_argument('-b', '--benchmark', type=str, action='append', choices=frameworks, dest='frameworks', nargs='+') parser.add_argument('-i', '--iterations', type=int, default=0) parser.add_argument('-t', '--trials', type=int, default=10) parser.add_argument('-p', '--profile', type=str, choices=['standard', 'verbose', 'vmprof']) parser.add_argument('-o', '--profile-output', type=str, default=None) parser.add_argument('-m', '--stat-memory', action='store_true') args = parser.parse_args() if args.stat_memory and heapy is None: print('WARNING: Guppy not installed; memory stats are unavailable.\n') if args.frameworks: frameworks = args.frameworks # Normalize frameworks type normalized_frameworks = [] for one_or_many in frameworks: if isinstance(one_or_many, list): normalized_frameworks.extend(one_or_many) else: normalized_frameworks.append(one_or_many) frameworks = normalized_frameworks # Profile? if args.profile: framework = 'falcon-ext' if args.profile == 'vmprof': profile_vmprof(framework, get_env(framework)) else: profile(framework, get_env(framework), filename=args.profile_output, verbose=(args.profile == 'verbose')) print() return # Otherwise, benchmark datasets = run(frameworks, args.trials, args.iterations, args.stat_memory) if not datasets: return dataset = consolidate_datasets(datasets) dataset = sorted(dataset, key=lambda r: r[1]) baseline = dataset[-1][1] print('\nResults:\n') for i, (name, sec_per_req) in enumerate(dataset): req_per_sec = round_to_int(Decimal(1) / sec_per_req) us_per_req = (sec_per_req * Decimal(10 ** 6)) factor = round_to_int(baseline / sec_per_req) print('{3}. {0:.<20s}{1:.>06d} req/sec or {2: >3.2f} μs/req ({4}x)'. format(name, req_per_sec, us_per_req, i + 1, factor)) if heapy and args.stat_memory: print() for name, _, heap_diff in datasets[0]: title = 'Memory change induced by ' + name print() print('=' * len(title)) print(title) print('=' * len(title)) print(heap_diff) print() if __name__ == '__main__': main()
b3df535e0bf14619764330e153f9691f97ebfe7a
ae3df32afc258c80cb2ce504ce87fa5bb7740ea7
/main/apps.py
a1c166fbd7a6ef9873d13d2341e00132f5d8b9dd
[]
no_license
chensandiego/elast-python
622251d806b947899d74dc064c19193b418ac505
8c28a47acfc5ef540a017abcd786cf815591b163
refs/heads/master
2020-12-24T05:40:38.917432
2016-08-08T09:16:39
2016-08-08T09:16:39
65,190,208
0
0
null
null
null
null
UTF-8
Python
false
false
240
py
from __future__ import unicode_literals from django.apps import AppConfig from elasticsearch_dsl.connections import connections class MainConfig(AppConfig): name = 'main' def ready(self): connections.create_connection()
03077baac22100638f1f73d6914d61d5790e636d
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03804/s359892659.py
cf5a2561cd5c03f89d2bfa0dc2d375e6139544c1
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
477
py
n, m = map(int, input().split()) a = [""] * n b = [""] * m for i in range(n): a[i] = input() for i in range(m): b[i] = input() for i in range(n): for j in range(n): if i + m > n or j + m > n: continue flag = True for k in range(m): for l in range(m): if a[i + k][j + l] != b[k][l]: flag = False if flag is True: print("Yes") exit(0) print("No")
d5ca2bcbd5de3c1b9c9bac46eab8058ddbdaa268
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_200/1005.py
d1785f14535df9f9f8739a47a08da1ea17308063
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
405
py
import numpy as np def solve(n, j): front = 0 for i in range(1, len(n)): if n[-i-1] > n[-i]: n[-i-1] -= 1 front = i if front: n[-front:] = 9 if not n[0]: n = n[1:] print('Case #{}: {}'.format(j+1, ''.join(map(str, n)))) def main(): T = int(input()) for i in range(T): solve(np.array(list(map(int, list(input())))), i) if __name__ == '__main__': main()
4b4fb06a5c7779a15bbde10c3ca456691d7aa16b
2ed6ad4a736879a47d192159da45ca56610c089a
/tests/test_utils.py
5322f50e74f0e19c141fd1adbdd2a5b05e92fb39
[ "MIT" ]
permissive
poonyisaTH/gsheets-db-api
a82bd35984766697757cc96aa74a1281d948f019
f023b32986d4da9a501fca8d435f2b6edc153353
refs/heads/master
2023-05-29T15:01:10.604324
2021-02-17T20:59:41
2021-02-17T20:59:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,952
py
# -*- coding: utf-8 -*- import unittest from moz_sql_parser import parse import pyparsing from .context import format_gsheet_error, format_moz_error class UtilsTestSuite(unittest.TestCase): def test_format_moz_error(self): query = 'SELECT ))) FROM table' with self.assertRaises(pyparsing.ParseException) as context: parse(query) result = format_moz_error(query, context.exception) expected = ( 'SELECT ))) FROM table\n' ' ^\n' 'Expected {{expression1 [{[as] column_name1}]} | "*"} ' '(at char 7), (line:1, col:8)' ) self.assertEqual(result, expected) def test_format_gsheet_error(self): query = 'SELECT A + B FROM "http://docs.google.com"' translated_query = 'SELECT A + B' errors = [{ 'reason': 'invalid_query', 'detailed_message': ( "Invalid query: Can't perform the function sum on values that " "are not numbers" ), 'message': 'INVALID_QUERY', }] result = format_gsheet_error(query, translated_query, errors) expected = ( 'Original query:\n' 'SELECT A + B FROM "http://docs.google.com"\n\n' 'Translated query:\n' 'SELECT A + B\n\n' 'Error:\n' "Invalid query: Can't perform the function sum on values that " "are not numbers" ) self.assertEqual(result, expected) def test_format_gsheet_error_caret(self): query = 'SELECT A IS NULL FROM "http://docs.google.com"' translated_query = 'SELECT A IS NULL' errors = [{ 'reason': 'invalid_query', 'detailed_message': ( 'Invalid query: PARSE_ERROR: Encountered " "is" "IS "" at ' 'line 1, column 10.\nWas expecting one of:\n' ' <EOF> \n' ' "where" ...\n' ' "group" ...\n' ' "pivot" ...\n' ' "order" ...\n' ' "skipping" ...\n' ' "limit" ...\n' ' "offset" ...\n' ' "label" ...\n' ' "format" ...\n' ' "options" ...\n' ' "," ...\n' ' "*" ...\n' ' "+" ...\n' ' "-" ...\n' ' "/" ...\n' ' "%" ...\n' ' "*" ...\n' ' "/" ...\n' ' "%" ...\n' ' "+" ...\n' ' "-" ...\n' ' ' ), 'message': 'INVALID_QUERY', }] result = format_gsheet_error(query, translated_query, errors) expected = ( 'Original query:\n' 'SELECT A IS NULL FROM "http://docs.google.com"\n\n' 'Translated query:\n' 'SELECT A IS NULL\n\n' 'Error:\n' 'SELECT A IS NULL\n' ' ^\n' 'Invalid query: PARSE_ERROR: Encountered " "is" "IS "" at line 1, ' 'column 10.\n' 'Was expecting one of:\n' ' <EOF> \n' ' "where" ...\n' ' "group" ...\n' ' "pivot" ...\n' ' "order" ...\n' ' "skipping" ...\n' ' "limit" ...\n' ' "offset" ...\n' ' "label" ...\n' ' "format" ...\n' ' "options" ...\n' ' "," ...\n' ' "*" ...\n' ' "+" ...\n' ' "-" ...\n' ' "/" ...\n' ' "%" ...\n' ' "*" ...\n' ' "/" ...\n' ' "%" ...\n' ' "+" ...\n' ' "-" ...' ) self.assertEqual(result, expected)
26c3ed7037c5e7c99c281a9602db0848de390886
ce55c319f5a78b69fefc63595d433864a2e531b5
/前后端分离-vue-DRF/houfen_DRF-projects/15day周末作业/booklogin/user/views.py
ea76a3b29e6788ab22cbcb4e135039d76dd5f722
[]
no_license
Suijng/1809_data
a072c875e8746190e3b715e53f1afe3323f4666b
45f8a57089f5c30ccc1a3cddb03b76dc95355417
refs/heads/master
2022-12-21T12:38:30.458291
2019-09-27T01:14:41
2019-09-27T01:14:41
211,207,071
0
0
null
2022-11-22T03:16:18
2019-09-27T00:55:21
HTML
UTF-8
Python
false
false
7,016
py
from django.shortcuts import render # Create your views here. from rest_framework.views import APIView from rest_framework.response import Response from user.serializers import ResgsterUserSerializer,CategorySerializer,\ BookDetailSerializer,BookSerializer,\ ChpaterListSerializer,ChpaterDetailSerializer from user.models import User,Token,Category,Book,Chpater from utils.pagination import MyPageNumberPagination # 注册 # class RegisterView(APIView): # # def post(self,request,*args,**kwargs): # ret = { # 'code':1, # 'msg':'注册成功' # } # # 获取post请求参数 # data = request.data # # 序列化请求参数 # ser = ResgsterUserSerializer(data=data) # if ser.is_valid(): # 验证字段 # print(ser.validated_data) # ser.save() # else: # # 验证失败打印错误信息 # print(ser.errors) # ret['code'] = 0 # ret['msg'] = '参数错误,注册失败' # # return Response(ret) from rest_framework.viewsets import GenericViewSet from rest_framework.mixins import CreateModelMixin,ListModelMixin,RetrieveModelMixin from rest_framework import status # 注册 class RegisterView(CreateModelMixin,GenericViewSet): queryset = User.objects.all() serializer_class = ResgsterUserSerializer # 重写内部创建方法 def create(self, request, *args, **kwargs): ret = { 'code': 1, 'msg': '注册成功' } serializer = self.get_serializer(data=request.data) if serializer.is_valid(): self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return Response(ret,status=status.HTTP_201_CREATED, headers=headers) else: # 验证失败打印错误信息 print(serializer.errors) ret['code'] = 0 ret['msg'] = '参数错误,注册失败' return Response(ret) #************************** 登录 # 生成token import time,hashlib def get_token(name,password): add_time = str(int(time.time() * 1000)) md5_obj = hashlib.md5(add_time.encode('utf8')) md5_obj.update(name.encode('utf8')) md5_obj.update(password.encode('utf8')) return md5_obj.hexdigest() # 登录 class LoginView(APIView): def post(self,request,*args,**kwargs): ret = { 'code': 1, 'msg': '登录成功' } # 获取post请求 data = request.data # 获取用户名 name = data['name'] # 获取密码 password = data['password'] try: obj = User.objects.filter(name=name).first() if obj: # 用户存在的 if obj.password == password: # 登录成功 生成登录标识 token = get_token(name,password) Token.objects.update_or_create(user=obj,defaults={'token':token}) ret['token'] = token else: # 密码错误 ret['msg'] = '账号或密码错误' ret['code'] = 0 else: ret['msg'] = '该用户不存在' ret['code'] = 0 except Exception as e: print(e) ret['msg'] = '捕获异常' ret['code'] = 0 return Response(ret) #****************** 书籍分类 class CategoryView(ListModelMixin,RetrieveModelMixin,GenericViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer pagination_class = MyPageNumberPagination def get_serializer_class(self): # 动态设置序列化的类 if self.action == 'list': return CategorySerializer elif self.action == 'retrieve': return BookSerializer # 给前端展示的字典套列表套字典 def list(self, request, *args, **kwargs): print(request.version) # 打印版本 ret = { 'code': 1, } queryset = self.filter_queryset(self.get_queryset()) # 没有分页展示所有数据 serializer = self.get_serializer(queryset, many=True) ret['data'] = serializer.data return Response(ret) #***** 书籍分类下的书 def retrieve(self, request, *args, **kwargs): category_id = kwargs.get('pk') if category_id: books = Book.objects.filter(category=category_id) # 调用paginate_queryset方法获取当前分页数据 page = self.paginate_queryset(books) # 通过判断page结果 判断是否使用了分页 if page is not None: serializer = self.get_serializer(page,many=True) return self.get_paginated_response(serializer.data) #******** 书籍详情视图 获取每本book书的url地址 class BookDetailView(RetrieveModelMixin,GenericViewSet): queryset = Book.objects.all() serializer_class = BookDetailSerializer def retrieve(self, request, *args, **kwargs): instance = self.get_object() serializer = self.get_serializer(instance) ret = {'code':1,'data':serializer.data} return Response(ret) # 章节列表视图 from utils.authenandpermission import MyPermission,MyAuthentication class ChapterView(ListModelMixin,RetrieveModelMixin,GenericViewSet): queryset = Chpater.objects.all() serializer_class = ChpaterListSerializer pagination_class = MyPageNumberPagination def get_serializer_class(self): if self.action == 'list': return ChpaterListSerializer elif self.action == 'retrieve': return ChpaterDetailSerializer # 认证 def get_authenticators(self): if self.kwargs.get('pk'): # 根据章节id获取,章节详情 return [MyAuthentication(),] return [] # 权限 def get_permissions(self): if self.kwargs.get('pk'): # 根据章节id获取,章节详情,返回权限类 return [MyPermission(), ] return [] def list(self, request, *args, **kwargs): book_id = kwargs.get('bookid') if book_id: chpaters = Chpater.objects.filter(book=book_id) # 调用paginate_queryset方法获取当前分页数据 page = self.paginate_queryset(chpaters) # 通过判断page结果 判断是否使用了分页 if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) def retrieve(self, request, *args, **kwargs): # 根据章节的id获取章节详情信息 instance = self.get_object() serializer = self.get_serializer(instance) ret = {'code':1,'data':serializer.data} return Response(ret)
bcf4694b4be4de84974a88f8c1e0c68664a56527
4913fb7fd32c3dd0da53af7a012569ec2254b35a
/59.集合数据的操作.py
75c83dbf6875876bad10856772cd2746191883a6
[]
no_license
puhaoran12/python_note
8a21954050ba3126f2ef6d5d1e4a2904df954b9b
b807e7b7dd90c87cee606f50421400c8f3d0ba03
refs/heads/master
2023-07-07T20:20:04.546541
2021-08-21T02:17:12
2021-08-21T02:17:12
398,439,725
0
0
null
null
null
null
UTF-8
Python
false
false
325
py
#交集intersection() 或者 & s1={10,20,30} s2={30,40,50} print(s1.intersection(s2)) print(s1 & s2) #并集union() 或者 | print(s1.union(s2)) print(s1 | s2) #差集difference() 或者 - print(s1.difference(s2)) print(s1 - s2) #对称差集symmetric_difference() 或者 ^ print(s1.symmetric_difference(s2)) print(s1 ^ s2)
cbc7ff8ebf0f55d2d54140505ce153702a872ce4
9784a90cac667e8e0aaba0ca599b4255b215ec67
/convert_models.py
d3b743be02b1e3c57c11c482699a1440c12daa44
[ "MIT" ]
permissive
osmr/imgclsmob
d2f48f01ca541b20119871393eca383001a96019
f2993d3ce73a2f7ddba05da3891defb08547d504
refs/heads/master
2022-07-09T14:24:37.591824
2021-12-14T10:15:31
2021-12-14T10:15:31
140,285,687
3,017
624
MIT
2022-07-04T15:18:37
2018-07-09T12:57:46
Python
UTF-8
Python
false
false
87,933
py
""" Script for converting models between frameworks (MXNet, Gluon, PyTroch, Chainer, Keras, TensorFlow). """ import argparse import logging import re import numpy as np from common.logger_utils import initialize_logging def parse_args(): parser = argparse.ArgumentParser(description="Convert models (Gluon/PyTorch/Chainer/MXNet/Keras/TF/TF2)", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( "--src-fwk", type=str, required=True, help="source model framework name") parser.add_argument( "--dst-fwk", type=str, required=True, help="destination model framework name") parser.add_argument( "--src-model", type=str, required=True, help="source model name") parser.add_argument( "--dst-model", type=str, required=True, help="destination model name") parser.add_argument( "--src-params", type=str, default="", help="source model parameter file path") parser.add_argument( "--dst-params", type=str, default="", help="destination model parameter file path") parser.add_argument( "--load-ignore-extra", action="store_true", help="ignore extra layers in the source PyTroch model") parser.add_argument( "--remove-module", action="store_true", help="enable if stored PyTorch model has module") parser.add_argument( "--src-num-classes", type=int, default=1000, help="number of classes for source model") parser.add_argument( "--src-in-channels", type=int, default=3, help="number of input channels for source model") parser.add_argument( "--dst-num-classes", type=int, default=1000, help="number of classes for destination model") parser.add_argument( "--dst-in-channels", type=int, default=3, help="number of input channels for destination model") parser.add_argument( "--model-type", type=str, default="image", help="model type (image or audio)") parser.add_argument( "--save-dir", type=str, default="", help="directory of saved models and log-files") parser.add_argument( "--logging-file-name", type=str, default="train.log", help="filename of training log") args = parser.parse_args() return args def prepare_src_model(src_fwk, src_model, src_params_file_path, dst_fwk, ctx, use_cuda, load_ignore_extra=False, remove_module=False, num_classes=None, in_channels=None): ext_src_param_keys = None ext_src_param_keys2 = None src_net = None if src_fwk == "gluon": from gluon.utils import prepare_model as prepare_model_gl src_net = prepare_model_gl( model_name=src_model, use_pretrained=False, pretrained_model_file_path=src_params_file_path, dtype=np.float32, tune_layers="", classes=(num_classes if num_classes > 0 else None), in_channels=in_channels, ctx=ctx) src_params = src_net._collect_params_with_prefix() src_param_keys = list(src_params.keys()) if src_model in ["oth_resnet50_v1", "oth_resnet101_v1", "oth_resnet152_v1", "oth_resnet50_v1b", "oth_resnet101_v1b", "oth_resnet152_v1b"]: src_param_keys = [key for key in src_param_keys if not (key.startswith("features.") and key.endswith(".bias"))] if src_model in ["oth_resnet50_v1", "oth_resnet101_v1", "oth_resnet152_v1", "oth_resnet50_v1b", "oth_resnet101_v1b", "oth_resnet152_v1b"]: src_param_keys = [key for key in src_param_keys if not (key.startswith("features.") and key.endswith(".bias"))] if src_model.startswith("wrn20_10_1bit") or src_model.startswith("wrn20_10_32bit"): src_param_keys = [key for key in src_param_keys if not (key.startswith("features.") and (key.endswith(".bn.gamma") or key.endswith(".bn.beta")))] if dst_fwk == "chainer": src_param_keys_ = src_param_keys.copy() src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".running_mean")) and (not key.endswith(".running_var"))] ext_src_param_keys = [key for key in src_param_keys_ if (key.endswith(".running_mean")) or (key.endswith(".running_var"))] if src_model in ["condensenet74_c4_g4", "condensenet74_c8_g8"]: src_param_keys_ = src_param_keys.copy() src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".index"))] ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".index"))] elif src_model.startswith("xdensenet"): src_param_keys_ = src_param_keys.copy() src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".mask"))] ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".mask"))] elif src_model.startswith("jasper") or src_model.startswith("quartznet"): src_param_keys_ = src_param_keys.copy() src_param_keys = [key for key in src_param_keys_ if (not key.endswith(".window")) and (not key.endswith(".fb"))] ext_src_param_keys2 = [key for key in src_param_keys_ if (key.endswith(".window")) or (key.endswith(".fb"))] elif src_fwk == "pytorch": from pytorch.utils import prepare_model as prepare_model_pt src_net = prepare_model_pt( model_name=src_model, use_pretrained=False, pretrained_model_file_path=src_params_file_path, use_cuda=use_cuda, use_data_parallel=False, load_ignore_extra=load_ignore_extra, num_classes=(num_classes if num_classes > 0 else None), in_channels=in_channels, remove_module=remove_module) src_params = src_net.state_dict() src_param_keys = list(src_params.keys()) if dst_fwk != "pytorch": src_param_keys = [key for key in src_param_keys if not key.endswith("num_batches_tracked")] if src_model in ["oth_shufflenetv2_wd2"]: src_param_keys = [key for key in src_param_keys if not key.startswith("network.0.")] if src_model.startswith("oth_dla"): src1 = list(filter(re.compile("\.project").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src2 = [] for i in range(2, 6): src1_i = list(filter(re.compile("level{}".format(i)).search, src1)) if len(src1_i) == 0: continue max_len = max([len(k) for k in src1_i]) pattern_i = [k for k in src1_i if len(k) == max_len][0][:-21] src2_i = list(filter(re.compile(pattern_i).search, src1)) src2 += src2_i src_param_keys = src2 + src1n elif src_fwk == "mxnet": import mxnet as mx src_sym, src_arg_params, src_aux_params = mx.model.load_checkpoint( prefix=src_params_file_path, epoch=0) src_params = {} src_params.update(src_arg_params) src_params.update(src_aux_params) src_param_keys = list(src_params.keys()) elif src_fwk == "tensorflow": # import tensorflow as tf # from tensorflow_.utils import prepare_model as prepare_model_tf # src_net = prepare_model_tf( # model_name=src_model, # classes=num_classes, # use_pretrained=False, # pretrained_model_file_path=src_params_file_path) # src_param_keys = [v.name for v in tf.global_variables()] # src_params = {v.name: v for v in tf.global_variables()} src_net = None src_params = dict(np.load(src_params_file_path)) src_param_keys = list(src_params.keys()) elif (src_fwk == "tf2") and (dst_fwk == "tfl"): import tensorflow as tf from tensorflow2.utils import prepare_model as prepare_model_tf2 gpus = tf.config.experimental.list_physical_devices("GPU") if gpus: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) src_net = prepare_model_tf2( model_name=src_model, use_pretrained=True, pretrained_model_file_path="") batch_size = 1 input_shape = ((batch_size, 3, src_net.in_size[0], src_net.in_size[1]) if src_net.data_format == "channels_first" else (batch_size, src_net.in_size[0], src_net.in_size[1], 3)) src_net(tf.random.normal(input_shape)) src_params = None src_param_keys = None else: raise ValueError("Unsupported src fwk: {}".format(src_fwk)) return src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_net def prepare_dst_model(dst_fwk, dst_model, src_fwk, ctx, use_cuda, num_classes=None, in_channels=None, model_type="image"): if dst_fwk == "gluon": from gluon.utils import prepare_model as prepare_model_gl dst_net = prepare_model_gl( model_name=dst_model, use_pretrained=False, pretrained_model_file_path="", dtype=np.float32, tune_layers="", classes=(num_classes if num_classes > 0 else None), in_channels=in_channels, ctx=ctx) dst_params = dst_net._collect_params_with_prefix() dst_param_keys = list(dst_params.keys()) elif dst_fwk == "pytorch": from pytorch.utils import prepare_model as prepare_model_pt dst_net = prepare_model_pt( model_name=dst_model, use_pretrained=False, pretrained_model_file_path="", use_cuda=use_cuda, use_data_parallel=False, num_classes=(num_classes if num_classes > 0 else None), in_channels=in_channels) dst_params = dst_net.state_dict() dst_param_keys = list(dst_params.keys()) if src_fwk != "pytorch": dst_param_keys = [key for key in dst_param_keys if not key.endswith("num_batches_tracked")] elif dst_fwk == "chainer": from chainer_.utils import prepare_model as prepare_model_ch dst_net = prepare_model_ch( model_name=dst_model, use_pretrained=False, pretrained_model_file_path="") dst_params = {i[0]: i[1] for i in dst_net.namedparams()} dst_param_keys = list(dst_params.keys()) elif dst_fwk == "keras": from keras_.utils import prepare_model as prepare_model_ke dst_net = prepare_model_ke( model_name=dst_model, use_pretrained=False, pretrained_model_file_path="") # dst_param_keys = list(dst_net._arg_names) + list(dst_net._aux_names) dst_param_keys = [v.name for v in dst_net.weights] dst_params = {} for layer in dst_net.layers: if layer.name: for weight in layer.weights: if weight.name: dst_params.setdefault(weight.name, []).append(weight) dst_params[weight.name] = (layer, weight) elif dst_fwk == "tensorflow": import tensorflow as tf from tensorflow_.utils import prepare_model as prepare_model_tf dst_net = prepare_model_tf( model_name=dst_model, use_pretrained=False, pretrained_model_file_path="") dst_param_keys = [v.name for v in tf.global_variables()] dst_params = {v.name: v for v in tf.global_variables()} elif dst_fwk == "tf2": import tensorflow as tf from tensorflow2.utils import prepare_model as prepare_model_tf2 gpus = tf.config.experimental.list_physical_devices("GPU") if gpus: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) dst_net = prepare_model_tf2( model_name=dst_model, use_pretrained=False, pretrained_model_file_path="") batch_size = 1 if model_type == "image": input_shape = ((batch_size, 3, dst_net.in_size[0], dst_net.in_size[1]) if dst_net.data_format == "channels_first" else (batch_size, dst_net.in_size[0], dst_net.in_size[1], 3)) dst_net(tf.random.normal(input_shape)) else: seq_len = 100 * 640 # input_shape = ((batch_size, dst_net.in_channels, seq_len) if # dst_net.data_format == "channels_first" else # (batch_size, seq_len, dst_net.in_channels)) input_shape = (batch_size, seq_len) x_len = tf.convert_to_tensor(np.array([seq_len - 0], dtype=np.long)) dst_net(tf.random.normal(input_shape), x_len) dst_param_keys = [v.name for v in dst_net.weights] dst_params = {v.name: v for v in dst_net.weights} elif dst_fwk == "tfl": dst_net = None dst_params = None dst_param_keys = None else: raise ValueError("Unsupported dst fwk: {}".format(dst_fwk)) return dst_params, dst_param_keys, dst_net def convert_mx2gl(dst_net, dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys, src_model, ctx): if src_model in ["crunet56", "crunet116"]: src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) src_param_keys = [re.sub("^conv", "features.", key) for key in src_param_keys] src_param_keys = [re.sub("^fc6", "output.1.", key) for key in src_param_keys] src_param_keys = [re.sub('_c1x1-a', '.body.conv1.', key) for key in src_param_keys] src_param_keys = [re.sub('_c3x3-b', '.body.conv2A.', key) for key in src_param_keys] src_param_keys = [re.sub('_c1x1-b', '.body.conv2B.', key) for key in src_param_keys] src_param_keys = [re.sub('_c1x1-c', '.body.conv3.', key) for key in src_param_keys] src_param_keys = [re.sub('_x__x_1x1_bases\[dim3\]_weight$', '_x__1.body.conv1.convT.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__x_3x3_bases\[dim21\]_weight$', '_x__1.body.conv2.convT.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__\(1\)_1x1_bases\[dim3\]_weight$', '_x__1.body.conv1.convQ.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__\(1\)_3x3_bases\[dim21\]_weight$', '_x__1.body.conv2.convQ.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__\(2\)_1x1_bases\[dim3\]_weight$', '_x__7.body.conv1.convQ.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__\(2\)_3x3_bases\[dim21\]_weight$', '_x__7.body.conv2.convQ.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__\(3\)_1x1_bases\[dim3\]_weight$', '_x__14.body.conv1.convQ.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__\(3\)_3x3_bases\[dim21\]_weight$', '_x__14.body.conv2.convQ.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_c1x1-w\(s\/2\)', '.input_convZ.', key) for key in src_param_keys] src_param_keys = [re.sub('_c1x1-w_weight$', '.input_convZ.conv.weight', key) for key in src_param_keys] src_param_keys = [re.sub('_c1x1-w\(s\/1\)', '.input_conv.', key) for key in src_param_keys] src_param_keys = [re.sub('_c1x1-w\(s\/key\)', '.identity_conv.', key) for key in src_param_keys] src_param_keys = [re.sub('__conv_weight$', '.conv.weight', key) for key in src_param_keys] src_param_keys = [re.sub('__bn__bn_beta$', '.bn.beta', key) for key in src_param_keys] src_param_keys = [re.sub('__bn__bn_gamma$', '.bn.gamma', key) for key in src_param_keys] src_param_keys = [re.sub('__bn__bn_moving_mean$', '.bn.running_mean', key) for key in src_param_keys] src_param_keys = [re.sub('__bn__bn_moving_var$', '.bn.running_var', key) for key in src_param_keys] src_param_keys = [re.sub('1_x_1__relu-sp__bn_', '1_x_1.conv.bnA.', key) for key in src_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) src_param_keys = [re.sub("^features\.", "conv", key) for key in src_param_keys] src_param_keys = [re.sub('^output\.1\.', 'fc6', key) for key in src_param_keys] src_param_keys = [re.sub('_x__1\.body\.conv1\.convT\.weight$', '_x__x_1x1_bases[dim3]_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__1\.body\.conv2\.convT\.weight$', '_x__x_3x3_bases[dim21]_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__1\.body\.conv1\.convQ\.weight$', '_x__(1)_1x1_bases[dim3]_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__1\.body\.conv2\.convQ\.weight$', '_x__(1)_3x3_bases[dim21]_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__7\.body\.conv1\.convQ\.weight$', '_x__(2)_1x1_bases[dim3]_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__7\.body\.conv2\.convQ\.weight$', '_x__(2)_3x3_bases[dim21]_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__14\.body\.conv1\.convQ\.weight$', '_x__(3)_1x1_bases[dim3]_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_x__14\.body\.conv2\.convQ\.weight$', '_x__(3)_3x3_bases[dim21]_weight', key) for key in src_param_keys] src_param_keys = [re.sub('\.body\.conv1\.', '_c1x1-a', key) for key in src_param_keys] src_param_keys = [re.sub('\.body\.conv2A\.', '_c3x3-b', key) for key in src_param_keys] src_param_keys = [re.sub('\.body\.conv2B\.', '_c1x1-b', key) for key in src_param_keys] src_param_keys = [re.sub('\.body\.conv3\.', '_c1x1-c', key) for key in src_param_keys] src_param_keys = [re.sub('\.input_convZ\.conv\.weight$', '_c1x1-w_weight', key) for key in src_param_keys] src_param_keys = [re.sub('\.input_convZ\.', '_c1x1-w(s/2)', key) for key in src_param_keys] src_param_keys = [re.sub('\.input_conv\.', '_c1x1-w(s/1)', key) for key in src_param_keys] src_param_keys = [re.sub('\.identity_conv\.', '_c1x1-w(s/key)', key) for key in src_param_keys] src_param_keys = [re.sub('\.conv\.weight$', '__conv_weight', key) for key in src_param_keys] src_param_keys = [re.sub('\.bn\.beta$', '__bn__bn_beta', key) for key in src_param_keys] src_param_keys = [re.sub('\.bn\.gamma$', '__bn__bn_gamma', key) for key in src_param_keys] src_param_keys = [re.sub('\.bn\.running_mean$', '__bn__bn_moving_mean', key) for key in src_param_keys] src_param_keys = [re.sub('\.bn\.running_var$', '__bn__bn_moving_var', key) for key in src_param_keys] src_param_keys = [re.sub('1_x_1\.conv\.bnA\.', '1_x_1__relu-sp__bn_', key) for key in src_param_keys] dst_i = 0 for src_i, src_key in enumerate(src_param_keys): dst_key = dst_param_keys[dst_i] for tt in range(10): if (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]) and\ (dst_params[dst_key].shape == src_params[src_key].shape): break assert (dst_key.split('.')[-1].split('_')[-1] == "weight") dst_i += 1 dst_key = dst_param_keys[dst_i] dst_i += 1 assert (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]) assert (dst_params[dst_key].shape == src_params[src_key].shape), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape) dst_params[dst_key]._load_init(src_params[src_key], ctx) for param in dst_net.collect_params().values(): if param._data is not None: continue print("param={}".format(param)) param.initialize(ctx=ctx) dst_net.save_parameters(dst_params_file_path) return elif src_model in ["igcv3_w1"]: src_param_keys = [key.replace("seq-", "features.") for key in src_param_keys] src_param_keys = [key.replace("fc_", "output.1.") for key in src_param_keys] src_param_keys = [key.replace('-batchnorm_beta', '.bn.beta') for key in src_param_keys] src_param_keys = [key.replace('-batchnorm_gamma', '.bn.gamma') for key in src_param_keys] src_param_keys = [key.replace('-batchnorm_moving_mean', '.bn.running_mean') for key in src_param_keys] src_param_keys = [key.replace('-batchnorm_moving_var', '.bn.running_var') for key in src_param_keys] src_param_keys = [key.replace('-conv2d_weight', '.conv.weight') for key in src_param_keys] src_param_keys = [key.replace('first-3x3-conv', 'features.A') for key in src_param_keys] src_param_keys = [key.replace('last-1x1-conv', 'features.B') for key in src_param_keys] src_param_keys = [key.replace('-exp', '.conv1') for key in src_param_keys] src_param_keys = [key.replace('-depthwise', '.conv2') for key in src_param_keys] src_param_keys = [key.replace('-linear', '.conv3') for key in src_param_keys] src_param_keys = [key.replace("-block", ".block") for key in src_param_keys] dst_param_keys = [key.replace('features.0.', 'features.A.') for key in dst_param_keys] dst_param_keys = [key.replace('features.6.', 'features.B.') for key in dst_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) src_param_keys = [key.replace('.bn.beta', '-batchnorm_beta') for key in src_param_keys] src_param_keys = [key.replace('.bn.gamma', '-batchnorm_gamma') for key in src_param_keys] src_param_keys = [key.replace('.bn.running_mean', '-batchnorm_moving_mean') for key in src_param_keys] src_param_keys = [key.replace('.bn.running_var', '-batchnorm_moving_var') for key in src_param_keys] src_param_keys = [key.replace('.conv.weight', '-conv2d_weight') for key in src_param_keys] src_param_keys = [key.replace('features.A', 'first-3x3-conv') for key in src_param_keys] src_param_keys = [key.replace('features.B', 'last-1x1-conv') for key in src_param_keys] src_param_keys = [key.replace('.conv1', '-exp') for key in src_param_keys] src_param_keys = [key.replace('.conv2', '-depthwise', ) for key in src_param_keys] src_param_keys = [key.replace('.conv3', '-linear') for key in src_param_keys] src_param_keys = [key.replace("features.", "seq-") for key in src_param_keys] src_param_keys = [key.replace("output.1.", "fc_") for key in src_param_keys] src_param_keys = [key.replace(".block", "-block") for key in src_param_keys] dst_param_keys = [key.replace('features.A.', 'features.0.') for key in dst_param_keys] dst_param_keys = [key.replace('features.B.', 'features.6.') for key in dst_param_keys] elif src_model in ["preresnet269b"]: dst_net.features[1][0].body.conv1a.bn.initialize(ctx=ctx, verbose=True, force_reinit=True) dst1 = list(filter(re.compile("^features.1.0.body.conv1.bn.").search, dst_param_keys)) dst_param_keys = [key for key in dst_param_keys if key not in dst1] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) src_param_keys = [re.sub('^classifier_', "output.", key) for key in src_param_keys] src_param_keys = [re.sub('^res', "features.", key) for key in src_param_keys] src_param_keys = [re.sub('_conv1_weight$', '_conv1_aweight', key) for key in src_param_keys] src_param_keys = [re.sub('_conv2_weight$', '_conv2_aweight', key) for key in src_param_keys] src_param_keys = [re.sub('_conv3_weight$', '_conv3_aweight', key) for key in src_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) src_param_keys = [re.sub("^output\.", "classifier_", key) for key in src_param_keys] src_param_keys = [re.sub("^features\.", "res", key) for key in src_param_keys] src_param_keys = [re.sub('_conv1_aweight$', '_conv1_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_conv2_aweight$', '_conv2_weight', key) for key in src_param_keys] src_param_keys = [re.sub('_conv3_aweight$', '_conv3_weight', key) for key in src_param_keys] for src_i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): assert (dst_key.split('.')[-1].split('_')[-1] == src_key.split('_')[-1]), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape) assert (dst_params[dst_key].shape == src_params[src_key].shape), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape) dst_params[dst_key]._load_init(src_params[src_key], ctx) for param in dst_net.collect_params().values(): if param._data is not None: continue print("param={}".format(param)) param.initialize(ctx=ctx) dst_net.save_parameters(dst_params_file_path) def convert_gl2ch(dst_net, dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_model): if src_model.startswith("diares") or src_model.startswith("diapreres"): src1 = list(filter(re.compile("^features\.[0-9]*\.\d*[1-9]\d*\.attention").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1n assert (len(src_param_keys) == len(dst_param_keys)) if src_model.startswith("quartznet") or src_model.startswith("jasper"): dst_param_keys = [key.replace("features/final_block/", "features/zfinal_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/W", "/weight") for key in dst_param_keys] dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys] dst_param_keys = [key.replace("/features/body/", "/features/zbody/") for key in dst_param_keys] dst_param_keys = [key.replace("features/final_postactiv/", "features/stageN/final_postactiv/") for key in dst_param_keys] dst_param_keys = [key.replace("features/final_block/", "features/stageN/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/final_block/", "/zfinal_block/") for key in dst_param_keys] dst_param_keys = [key.replace("features/final_conv/", "features/stageN/final_conv/") for key in dst_param_keys] dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys] dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys] if not src_model.startswith("ibppose_coco"): dst_param_keys = [key.replace("/hg/", "/stage1_hg/") for key in dst_param_keys] if src_model.startswith("centernet"): dst_param_keys = [key.replace("/unit", "/a_unit") for key in dst_param_keys] dst_param_keys = [key.replace("/reg_block/", "/z_reg_block/") for key in dst_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) if src_model.startswith("quartznet") or src_model.startswith("jasper"): dst_param_keys = [key.replace("features/zfinal_block/", "features/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/weight", "/W") for key in dst_param_keys] dst_param_keys = [key.replace("/zfinal_block/", "/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys] dst_param_keys = [key.replace("/stageN/final_postactiv/", "/final_postactiv/") for key in dst_param_keys] dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/features/zbody/", "/features/body/") for key in dst_param_keys] dst_param_keys = [key.replace("features/stageN/final_conv/", "features/final_conv/") for key in dst_param_keys] dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys] dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys] if not src_model.startswith("ibppose_coco"): dst_param_keys = [key.replace("/stage1_hg/", "/hg/") for key in dst_param_keys] if src_model.startswith("centernet"): dst_param_keys = [key.replace("/a_unit", "/unit") for key in dst_param_keys] dst_param_keys = [key.replace("/z_reg_block/", "/reg_block/") for key in dst_param_keys] if src_model.startswith("wrn20_10_1bit") or src_model.startswith("wrn20_10_32bit"): ext2_src_param_keys = [key.replace('.conv.weight', '.bn.beta') for key in src_param_keys if key.endswith(".conv.weight")] ext2_src_param_keys.append("features.4.bn.beta") ext2_dst_param_keys = [key.replace("/conv/W", "/bn/beta") for key in dst_param_keys if key.endswith("/conv/W")] ext2_dst_param_keys.append("/features/post_activ/bn/beta") ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)} ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys)) else: ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".beta")] ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/beta")] ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)} ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys)) for i, src_key in enumerate(ext_src_param_keys): src_key1 = src_key.split(".")[-1] src_key2 = ".".join(src_key.split(".")[:-1]) dst_ind = ext3_src_param_keys[src_key2] dst_path = ext3_dst_param_keys[dst_ind] obj = dst_net for j, sub_path in enumerate(dst_path): obj = getattr(obj, sub_path) if src_key1 == 'running_mean': assert (obj.avg_mean.shape == src_params[src_key].shape), \ "src_key={}, dst_path={}, src_shape={}, obj.avg_mean.shape={}".format( src_key, dst_path, src_params[src_key].shape, obj.avg_mean.shape) obj.avg_mean = src_params[src_key]._data[0].asnumpy() elif src_key1 == 'running_var': assert (obj.avg_var.shape == src_params[src_key].shape) obj.avg_var = src_params[src_key]._data[0].asnumpy() if src_model in ["condensenet74_c4_g4", "condensenet74_c8_g8"]: assert (dst_net.output.fc.index.shape == src_params["output.1.index"].shape) dst_net.output.fc.index = src_params["output.1.index"]._data[0].asnumpy().astype(np.int32) ext_src_param_keys2.remove("output.1.index") ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".conv1.conv.weight")] ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/conv1/conv/W")] ext3_src_param_keys = {".".join(v.split(".")[:-2]): i for i, v in enumerate(ext2_src_param_keys)} ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-2], ext2_dst_param_keys)) for i, src_key in enumerate(ext_src_param_keys2): src_key2 = ".".join(src_key.split(".")[:-1]) dst_ind = ext3_src_param_keys[src_key2] dst_path = ext3_dst_param_keys[dst_ind] obj = dst_net for j, sub_path in enumerate(dst_path): obj = getattr(obj, sub_path) assert (obj.index.shape == src_params[src_key].shape), \ "src_key={}, dst_path={}, src_shape={}, obj.index.shape={}".format( src_key, dst_path, src_params[src_key].shape, obj.index.shape) obj.index = src_params[src_key]._data[0].asnumpy().astype(np.int32) elif src_model.startswith("xdensenet"): ext2_src_param_keys = [key for key in src_param_keys if key.endswith(".conv1.conv.weight")] +\ [key for key in src_param_keys if key.endswith(".conv2.conv.weight")] ext2_dst_param_keys = [key for key in dst_param_keys if key.endswith("/conv1/conv/W")] +\ [key for key in dst_param_keys if key.endswith("/conv2/conv/W")] ext3_src_param_keys = {".".join(v.split(".")[:-1]): i for i, v in enumerate(ext2_src_param_keys)} ext3_dst_param_keys = list(map(lambda x: x.split("/")[1:-1], ext2_dst_param_keys)) for i, src_key in enumerate(ext_src_param_keys2): src_key2 = ".".join(src_key.split(".")[:-1]) dst_ind = ext3_src_param_keys[src_key2] dst_path = ext3_dst_param_keys[dst_ind] obj = dst_net for j, sub_path in enumerate(dst_path): obj = getattr(obj, sub_path) assert (obj.mask.shape == src_params[src_key].shape), \ "src_key={}, dst_path={}, src_shape={}, obj.index.shape={}".format( src_key, dst_path, src_params[src_key].shape, obj.mask.shape) obj.mask = src_params[src_key]._data[0].asnumpy() for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): assert (dst_params[dst_key].array.shape == src_params[src_key].shape), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].array.shape) dst_params[dst_key].array = src_params[src_key]._data[0].asnumpy() # print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( # src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].array.shape)) from chainer.serializers import save_npz save_npz( file=dst_params_file_path, obj=dst_net) def convert_gl2gl(dst_net, dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys, finetune, src_model, ctx): if src_model.startswith("oth_danet_resnet"): src6 = list(filter(re.compile("^head.sa.gamma").search, src_param_keys)) src6n = [key for key in src_param_keys if key not in src6] src_param_keys = src6n + src6 src7 = list(filter(re.compile("^head.conv51").search, src_param_keys)) src7n = [key for key in src_param_keys if key not in src7] src_param_keys = src7n + src7 src8 = list(filter(re.compile("^head.conv6").search, src_param_keys)) src8n = [key for key in src_param_keys if key not in src8] src_param_keys = src8n + src8 src1 = list(filter(re.compile("^head.conv5c").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1n + src1 src2 = list(filter(re.compile("^head.sc").search, src_param_keys)) src2n = [key for key in src_param_keys if key not in src2] src_param_keys = src2n + src2 src3 = list(filter(re.compile("^head.conv52").search, src_param_keys)) src3n = [key for key in src_param_keys if key not in src3] src_param_keys = src3n + src3 src4 = list(filter(re.compile("^head.conv7").search, src_param_keys)) src4n = [key for key in src_param_keys if key not in src4] src_param_keys = src4n + src4 src5 = list(filter(re.compile("^head.conv8").search, src_param_keys)) src5n = [key for key in src_param_keys if key not in src5] src_param_keys = src5n + src5 elif src_model.startswith("oth_icnet_resnet50_citys"): src1 = list(filter(re.compile("^conv_sub1").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1 + src1n src2 = list(filter(re.compile("^head").search, src_param_keys)) src2n = [key for key in src_param_keys if key not in src2] src_param_keys = src2n + src2 elif src_model.startswith("oth_fastscnn_citys"): src1 = list(filter(re.compile("^feature_fusion").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1n + src1 dst0 = list(filter(re.compile("^fusion").search, dst_param_keys)) dst0n = [key for key in dst_param_keys if key not in dst0] dst_param_keys = dst0n + dst0 dst1 = list(filter(re.compile("^fusion.low_pw_conv.bn").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst_param_keys = dst1n + dst1 dst2 = list(filter(re.compile("^fusion.high_conv.bn").search, dst_param_keys)) dst2n = [key for key in dst_param_keys if key not in dst2] dst_param_keys = dst2n + dst2 for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): if dst_params[dst_key].shape != src_params[src_key].shape: logging.warning( "dst_param.shape != src_param.shape, src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)) if finetune: continue else: raise ValueError if dst_key.split('.')[-1] != src_key.split('.')[-1]: logging.warning( "dst_key.suff != src_key.suff, src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, src_params[src_key].shape, dst_params[dst_key].shape)) dst_params[dst_key]._load_init(src_params[src_key]._data[0], ctx) dst_net.save_parameters(dst_params_file_path) def convert_gl2ke(dst_net, dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys): import mxnet as mx dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys] dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys] dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys] dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys] dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys] dst_param_keys_orig = dst_param_keys.copy() dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s for s in dst_param_keys] dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True) dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()]) # dst_param_keys = list(np.unique(dst_param_keys)) assert (len(src_param_keys) == len(dst_param_keys)) def process_width(src_key, dst_key, src_weight): dst_layer = dst_params[dst_key][0] dst_weight = dst_params[dst_key][1] if (dst_layer.__class__.__name__ in ["Conv2D"]) and dst_key.endswith("kernel1") and\ (dst_layer.data_format == "channels_last"): src_weight = np.transpose(src_weight, (2, 3, 1, 0)) if (dst_layer.__class__.__name__ in ["DepthwiseConv2D"]) and dst_key.endswith("kernel1") and\ (dst_layer.data_format == "channels_last"): src_weight = np.transpose(src_weight, (2, 3, 0, 1)) if (dst_layer.__class__.__name__ in ["Dense"]) and dst_key.endswith("kernel1"): src_weight = np.transpose(src_weight, (1, 0)) assert (dst_weight._keras_shape == src_weight.shape), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, src_weight.shape, dst_weight._keras_shape) # print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( # src_key, dst_key, src_weight.shape, dst_weight._keras_shape)) dst_weight.bind(mx.nd.array(src_weight)) for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): if dst_key.find("convgroup") >= 0: dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)] dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)] if src_key.endswith("weight"): dst_keys = [s for s in dst_keys if s.endswith("kernel1")] elif src_key.endswith("bias"): dst_keys = [s for s in dst_keys if s.endswith("bias1")] groups = len(dst_keys) src_weight0 = src_params[src_key]._data[0] src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups) for gi in range(groups): src_weight_gi = src_weight0_list[gi].asnumpy() dst_key_gi = dst_keys[gi] process_width(src_key, dst_key_gi, src_weight_gi) else: src_weight = src_params[src_key]._data[0].asnumpy() process_width(src_key, dst_key, src_weight) dst_net.save_weights(dst_params_file_path) def convert_gl2tf(dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys): import mxnet as mx dst_param_keys = [key.replace("/kernel:", "/weight:") for key in dst_param_keys] dst_param_keys = [key.replace("/dw_kernel:", "/weight_dw:") for key in dst_param_keys] dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys] dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys] dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys = [key.replace("/weight:", "/kernel:") for key in dst_param_keys] dst_param_keys = [key.replace("/weight_dw:", "/dw_kernel:") for key in dst_param_keys] dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys] dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys] dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys] dst_param_keys_orig = dst_param_keys.copy() dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s for s in dst_param_keys] dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True) dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()]) assert (len(src_param_keys) == len(dst_param_keys)) import tensorflow as tf with tf.Session() as sess: sess.run(tf.global_variables_initializer()) def process_width(src_key, dst_key, src_weight): if len(src_weight.shape) == 4: if dst_key.split("/")[-1][:-2] == "dw_kernel": src_weight = np.transpose(src_weight, axes=(2, 3, 0, 1)) else: src_weight = np.transpose(src_weight, axes=(2, 3, 1, 0)) elif len(src_weight.shape) == 2: src_weight = np.transpose(src_weight, axes=(1, 0)) assert (tuple(dst_params[dst_key].get_shape().as_list()) == src_weight.shape) sess.run(dst_params[dst_key].assign(src_weight)) # print(dst_params[dst_key].eval(sess)) for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): if dst_key.find("convgroup") >= 0: dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)] dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)] if src_key.endswith("weight"): dst_keys = [s for s in dst_keys if s.endswith("kernel:0")] elif src_key.endswith("bias"): dst_keys = [s for s in dst_keys if s.endswith("bias:0")] groups = len(dst_keys) src_weight0 = src_params[src_key]._data[0] src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups) for gi in range(groups): src_weight_gi = src_weight0_list[gi].asnumpy() dst_key_gi = dst_keys[gi] process_width(src_key, dst_key_gi, src_weight_gi) else: src_weight = src_params[src_key]._data[0].asnumpy() process_width(src_key, dst_key, src_weight) # saver = tf.train.Saver() # saver.save( # sess=sess, # save_path=dst_params_file_path) from tensorflow_.utils import save_model_params save_model_params( sess=sess, file_path=dst_params_file_path) def convert_gl2tf2(dst_net, dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys, src_model): if src_model.startswith("hrnet"): src_param_keys = [key.replace(".transition.", ".atransition.") for key in src_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) if src_model.startswith("hrnet"): src_param_keys = [key.replace(".atransition.", ".transition.") for key in src_param_keys] dst_param_keys = [key.replace("/kernel:", "/weight:") for key in dst_param_keys] dst_param_keys = [key.replace("/depthwise_kernel:", "/weight_depthwise:") for key in dst_param_keys] dst_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in dst_param_keys] if (not src_model.startswith("pspnet_")) and (not src_model.startswith("deeplabv3_")) and\ (not src_model.startswith("simplepose_")) and (not src_model.startswith("alphapose_")) and\ (not src_model.startswith("lwopenpose")) and (not src_model.startswith("quartznet")) and\ (not src_model.startswith("jasper")): dst_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/final_block/", "/zfinal_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in dst_param_keys] dst_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in dst_param_keys] if src_model.startswith("hrnet"): dst_param_keys = [key.replace("/transition/", "/atransition/") for key in dst_param_keys] if src_model.startswith("hardnet"): # dst_param_keys = [key.replace('/dw_conv/', '/z_dw_conv/') for key in dst_param_keys] dst_param_keys = [key.replace("features/down", "features/z_down") for key in dst_param_keys] if src_model.startswith("centernet"): dst_param_keys = [key.replace("/unit", "/a_unit") for key in dst_param_keys] dst_param_keys = [key.replace("/reg_block/", "/z_reg_block/") for key in dst_param_keys] # if src_model.startswith("danet"): # dst_param_keys = [key.replace("da_net/head/", "z_da_net/head/") for key in dst_param_keys] dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys = [key.replace("/weight:", "/kernel:") for key in dst_param_keys] dst_param_keys = [key.replace("/weight_depthwise:", "/depthwise_kernel:") for key in dst_param_keys] dst_param_keys = [key.replace("/zfinal_block/", "/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in dst_param_keys] if (not src_model.startswith("pspnet_")) and (not src_model.startswith("deeplabv3_")) and\ (not src_model.startswith("simplepose_")) and (not src_model.startswith("alphapose_")) and\ (not src_model.startswith("lwopenpose")) and (not src_model.startswith("quartznet")) and\ (not src_model.startswith("jasper")): dst_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in dst_param_keys] dst_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in dst_param_keys] dst_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in dst_param_keys] if src_model.startswith("hrnet"): dst_param_keys = [key.replace("/atransition/", "/transition/") for key in dst_param_keys] if src_model.startswith("hardnet"): # dst_param_keys = [key.replace('/z_dw_conv/', '/dw_conv/') for key in dst_param_keys] dst_param_keys = [key.replace("features/z_down", "features/down") for key in dst_param_keys] if src_model.startswith("centernet"): dst_param_keys = [key.replace("/a_unit", "/unit") for key in dst_param_keys] dst_param_keys = [key.replace("/z_reg_block/", "/reg_block/") for key in dst_param_keys] # if src_model.startswith("danet"): # dst_param_keys = [key.replace("z_da_net/head/", "da_net/head/") for key in dst_param_keys] dst_param_keys_orig = dst_param_keys.copy() dst_param_keys = [s[:(s.find("convgroup") + 9)] + "/" + s.split("/")[-1] if s.find("convgroup") >= 0 else s for s in dst_param_keys] dst_param_keys_uniq, dst_param_keys_index = np.unique(dst_param_keys, return_index=True) dst_param_keys = list(dst_param_keys_uniq[dst_param_keys_index.argsort()]) assert (len(src_param_keys) == len(dst_param_keys)) def process_width(src_key, dst_key, src_weight): if len(src_weight.shape) == 4: if dst_key.split("/")[-1][:-2] == "depthwise_kernel": src_weight = np.transpose(src_weight, axes=(2, 3, 0, 1)) else: src_weight = np.transpose(src_weight, axes=(2, 3, 1, 0)) elif len(src_weight.shape) == 2: src_weight = np.transpose(src_weight, axes=(1, 0)) elif len(src_weight.shape) == 3: if not ((src_model.startswith("jasper") or src_model.startswith("quartznet")) and dst_key.split("/")[-1][:-2] == "fb"): src_weight = np.transpose(src_weight, axes=(2, 1, 0)) if dst_key.split("/")[-1][:-2] == "depthwise_kernel": assert(len(dst_params[dst_key].shape) == 4) src_weight = np.expand_dims(src_weight, -1) dst_weight = dst_params[dst_key] assert (tuple(dst_weight.shape) == src_weight.shape), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, src_weight.shape, tuple(dst_weight.shape)) dst_weight.assign(src_weight) for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): # print("src_key={},\tsrc_key2={},\tdst_key={}".format(src_key, src_params[src_key].name, dst_key)) if dst_key.find("convgroup") >= 0: import mxnet as mx dst_key_stem = dst_key[:(dst_key.find("convgroup") + 9)] dst_keys = [s for s in dst_param_keys_orig if s.startswith(dst_key_stem)] if src_key.endswith("weight"): dst_keys = [s for s in dst_keys if s.endswith("kernel:0")] elif src_key.endswith("bias"): dst_keys = [s for s in dst_keys if s.endswith("bias:0")] groups = len(dst_keys) src_weight0 = src_params[src_key]._data[0] src_weight0_list = mx.nd.split(src_weight0, axis=0, num_outputs=groups) for gi in range(groups): src_weight_gi = src_weight0_list[gi].asnumpy() dst_key_gi = dst_keys[gi] process_width(src_key, dst_key_gi, src_weight_gi) else: src_weight = src_params[src_key]._data[0].asnumpy() process_width(src_key, dst_key, src_weight) dst_net.save_weights(dst_params_file_path) def convert_pt2pt(dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys, src_model, dst_model): import torch if src_model.startswith("oth_quartznet") or src_model.startswith("oth_jasper"): src1 = list(filter(re.compile("\.res\.").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1n + src1 dst1 = list(filter(re.compile("\.identity_block\.").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst_param_keys = dst1n + dst1 elif src_model.startswith("oth_dicenet"): src1 = list(filter(re.compile("\.conv_height\.").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src2 = list(filter(re.compile("\.conv_width\.").search, src1n)) src2n = [key for key in src1n if key not in src2] src3 = list(filter(re.compile("\.linear_comb_layer\.").search, src2n)) src3n = [key for key in src2n if key not in src3] src4 = list(filter(re.compile("\.proj_layer\.").search, src3n)) src4n = [key for key in src3n if key not in src4] src_param_keys = src4n + src1 + src2 + src3 + src4 dst1 = list(filter(re.compile("\.h_conv\.").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst2 = list(filter(re.compile("\.w_conv\.").search, dst1n)) dst2n = [key for key in dst1n if key not in dst2] dst3 = list(filter(re.compile("\.att\.").search, dst2n)) dst3n = [key for key in dst2n if key not in dst3] dst4 = list(filter(re.compile("\.proj_conv\.").search, dst3n)) dst4n = [key for key in dst3n if key not in dst4] dst_param_keys = dst4n + dst1 + dst2 + dst3 + dst4 elif src_model.startswith("oth_proxyless"): src1 = src_param_keys[5] del src_param_keys[5] src_param_keys.insert(0, src1) src2 = src_param_keys[-3] del src_param_keys[-3] src_param_keys.insert(-7, src2) elif src_model.startswith("oth_scnet"): pass src1 = list(filter(re.compile(".k1.").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src2 = list(filter(re.compile(".scconv.").search, src1n)) src2n = [key for key in src1n if key not in src2] src_param_keys = src2n + src1 + src2 dst1 = list(filter(re.compile(".conv2a.").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst2 = list(filter(re.compile(".conv2b.").search, dst1n)) dst2n = [key for key in dst1n if key not in dst2] dst_param_keys = dst2n + dst1 + dst2 elif src_model == "oth_bisenet": src1 = list(filter(re.compile("^cp.conv_avg").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src2 = list(filter(re.compile("^cp.arm32").search, src1n)) src2n = [key for key in src1n if key not in src2] src3 = list(filter(re.compile("^cp.conv_head32").search, src2n)) src3n = [key for key in src2n if key not in src3] src4 = list(filter(re.compile("^cp.arm16").search, src3n)) src4n = [key for key in src3n if key not in src4] src5 = list(filter(re.compile("^cp.conv_head16").search, src4n)) src5n = [key for key in src4n if key not in src5] src6 = list(filter(re.compile("^ffm").search, src5n)) src6n = [key for key in src5n if key not in src6] src_param_keys = src6n + src1 + src2 + src3 + src4 + src5 + src6 dst1 = list(filter(re.compile("^pool").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst_param_keys = dst1n + dst1 elif src_model.startswith("oth_dla"): src1 = list(filter(re.compile("\.project").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1 + src1n dst1 = list(filter(re.compile("\.project_conv").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst_param_keys = dst1 + dst1n elif dst_model == "ntsnet": src1 = list(filter(re.compile("^proposal_net").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1 + src1n dst1 = list(filter(re.compile("^navigator_unit\.branch\d+\.down").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst2 = list(filter(re.compile("^navigator_unit\.branch\d+\.tidy").search, dst1n)) dst2n = [key for key in dst1n if key not in dst2] dst_param_keys = dst1 + dst2 + dst2n elif dst_model == "fishnet150": src1 = list(filter(re.compile("^(conv|fish\.fish\.[0-2])").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src2 = list(filter(re.compile("^fish\.fish\.6\.1").search, src1n)) src2n = [key for key in src1n if key not in src2] src3 = list(filter(re.compile("^fish\.fish\.5\.1").search, src2n)) src3n = [key for key in src2n if key not in src3] src4 = list(filter(re.compile("^fish\.fish\.4\.1").search, src3n)) src4n = [key for key in src3n if key not in src4] src5 = list(filter(re.compile("^fish\.fish\.3\.[0-1]").search, src4n)) src5n = [key for key in src4n if key not in src5] src6 = list(filter(re.compile("^fish\.fish\.3\.3").search, src5n)) src6n = [key for key in src5n if key not in src6] src7 = list(filter(re.compile("^fish\.fish\.[3-6]").search, src6n)) src7n = [key for key in src6n if key not in src7] src8 = list(filter(re.compile("^fish\.fish\.9\.1").search, src7n)) src8n = [key for key in src7n if key not in src8] src9 = list(filter(re.compile("^fish\.fish\.8\.1").search, src8n)) src9n = [key for key in src8n if key not in src9] src10 = list(filter(re.compile("^fish\.fish\.7\.1").search, src9n)) src10n = [key for key in src9n if key not in src10] src_param_keys = src1 + src2 + src3 + src4 + src5 + src6 + src7 + src8 + src9 + src10 + src10n elif dst_model == "bam_resnet50": src_bams = list(filter(re.compile("^bam").search, src_param_keys)) src_param_keys = [key for key in src_param_keys if key not in src_bams] src_param_keys = src_param_keys + src_bams dst_bams = list(filter(re.compile("^features.stage[0-9].unit1.bam.").search, dst_param_keys)) dst_param_keys = [key for key in dst_param_keys if key not in dst_bams] dst_param_keys = dst_param_keys + dst_bams elif dst_model.startswith("sinet"): src1 = list(filter(re.compile("\.vertical.weight").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1n + src1 src2 = list(filter(re.compile("\.horizontal.weight").search, src_param_keys)) src2n = [key for key in src_param_keys if key not in src2] src_param_keys = src2n + src2 src3 = list(filter(re.compile("\.B_v\.").search, src_param_keys)) src3n = [key for key in src_param_keys if key not in src3] src_param_keys = src3n + src3 src4 = list(filter(re.compile("\.B_h\.").search, src_param_keys)) src4n = [key for key in src_param_keys if key not in src4] src_param_keys = src4n + src4 src5 = list(filter(re.compile("bn_4\.").search, src_param_keys)) src5n = [key for key in src_param_keys if key not in src5] src_param_keys = src5n + src5 src6 = list(filter(re.compile("bn_3\.").search, src_param_keys)) src6n = [key for key in src_param_keys if key not in src6] src_param_keys = src6n + src6 dst1 = list(filter(re.compile("\.v_conv.conv\.").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst_param_keys = dst1n + dst1 dst2 = list(filter(re.compile("\.h_conv.conv\.").search, dst_param_keys)) dst2n = [key for key in dst_param_keys if key not in dst2] dst_param_keys = dst2n + dst2 dst3 = list(filter(re.compile("\.v_conv.bn\.").search, dst_param_keys)) dst3n = [key for key in dst_param_keys if key not in dst3] dst_param_keys = dst3n + dst3 dst4 = list(filter(re.compile("\.h_conv.bn\.").search, dst_param_keys)) dst4n = [key for key in dst_param_keys if key not in dst4] dst_param_keys = dst4n + dst4 dst5 = list(filter(re.compile("decoder.decode1.bn\.").search, dst_param_keys)) dst5n = [key for key in dst_param_keys if key not in dst5] dst_param_keys = dst5n + dst5 dst6 = list(filter(re.compile("decoder.decode2.bn\.").search, dst_param_keys)) dst6n = [key for key in dst_param_keys if key not in dst6] dst_param_keys = dst6n + dst6 elif src_model.startswith("oth_ibppose"): def sort_hg(src2): src2b1 = list(filter(re.compile("^hourglass.[0-9].hg.0.1.").search, src2)) src2b2 = list(filter(re.compile("^hourglass.[0-9].hg.1.1.").search, src2)) src2b3 = list(filter(re.compile("^hourglass.[0-9].hg.2.1.").search, src2)) src2b4 = list(filter(re.compile("^hourglass.[0-9].hg.3.1.").search, src2)) src2b5 = list(filter(re.compile("^hourglass.[0-9].hg.3.2.").search, src2)) src2b6 = list(filter(re.compile("^hourglass.[0-9].hg.3.3.").search, src2)) src2b7 = list(filter(re.compile("^hourglass.[0-9].hg.2.2.").search, src2)) src2b8 = list(filter(re.compile("^hourglass.[0-9].hg.2.3.").search, src2)) src2b9 = list(filter(re.compile("^hourglass.[0-9].hg.1.2.").search, src2)) src2b10 = list(filter(re.compile("^hourglass.[0-9].hg.1.3.").search, src2)) src2b11 = list(filter(re.compile("^hourglass.[0-9].hg.0.2.").search, src2)) src2b12 = list(filter(re.compile("^hourglass.[0-9].hg.0.3.").search, src2)) src2b13 = list(filter(re.compile("^hourglass.[0-9].hg.0.0.").search, src2)) src2b14 = list(filter(re.compile("^hourglass.[0-9].hg.1.0.").search, src2)) src2b15 = list(filter(re.compile("^hourglass.[0-9].hg.2.0.").search, src2)) src2b16 = list(filter(re.compile("^hourglass.[0-9].hg.3.0.").search, src2)) src2b17 = list(filter(re.compile("^hourglass.[0-9].hg.3.4.").search, src2)) return src2b1 + src2b2 + src2b3 + src2b4 +\ src2b11 + src2b12 + src2b9 + src2b10 + src2b7 + src2b8 + src2b5 + src2b6 +\ src2b13 + src2b14 + src2b15 + src2b16 + src2b17 src1 = list(filter(re.compile("^pre.").search, src_param_keys)) src1n = [key for key in src_param_keys if key not in src1] src_param_keys = src1n + src1 src2 = list(filter(re.compile("^hourglass.").search, src_param_keys)) src2n = [key for key in src_param_keys if key not in src2] src2b1 = sort_hg(list(filter(re.compile("^hourglass.0.hg.").search, src2))) src2b2 = sort_hg(list(filter(re.compile("^hourglass.1.hg.").search, src2))) src2b3 = sort_hg(list(filter(re.compile("^hourglass.2.hg.").search, src2))) src2b4 = sort_hg(list(filter(re.compile("^hourglass.3.hg.").search, src2))) src_param_keys = src2n + src2b1 + src2b2 + src2b3 + src2b4 src3 = list(filter(re.compile("^features.[0-9].before_regress").search, src_param_keys)) src3n = [key for key in src_param_keys if key not in src3] src3b = list(filter(re.compile("^features.[0-9].before_regress.0.").search, src3)) src_param_keys = src3n + src3b src4 = list(filter(re.compile("^outs.[0-9].").search, src_param_keys)) src4n = [key for key in src_param_keys if key not in src4] src4b = list(filter(re.compile("^outs.[0-9].0.").search, src4)) src_param_keys = src4n + src4b src5 = list(filter(re.compile("^merge_features.[0-9].").search, src_param_keys)) src5n = [key for key in src_param_keys if key not in src5] src5b = list(filter(re.compile("^merge_features.[0-9].0.").search, src5)) src_param_keys = src5n + src5b src6 = list(filter(re.compile("^merge_preds.[0-9].").search, src_param_keys)) src6n = [key for key in src_param_keys if key not in src6] src6b = list(filter(re.compile("^merge_preds.[0-9].0.").search, src6)) src_param_keys = src6n + src6b dst1 = list(filter(re.compile("^backbone.").search, dst_param_keys)) dst1n = [key for key in dst_param_keys if key not in dst1] dst_param_keys = dst1n + dst1 dst2 = list(filter(re.compile("^decoder.pass[1-9].hg.").search, dst_param_keys)) dst2n = [key for key in dst_param_keys if key not in dst2] dst_param_keys = dst2n + dst2 dst3 = list(filter(re.compile("^decoder.pass[1-9].pre_block.").search, dst_param_keys)) dst3n = [key for key in dst_param_keys if key not in dst3] dst_param_keys = dst3n + dst3 dst4 = list(filter(re.compile("^decoder.pass[1-9].post_block.").search, dst_param_keys)) dst4n = [key for key in dst_param_keys if key not in dst4] dst_param_keys = dst4n + dst4 dst5 = list(filter(re.compile("^decoder.pass[1-9].pre_merge_block.").search, dst_param_keys)) dst5n = [key for key in dst_param_keys if key not in dst5] dst_param_keys = dst5n + dst5 dst6 = list(filter(re.compile("^decoder.pass[1-9].post_merge_block.").search, dst_param_keys)) dst6n = [key for key in dst_param_keys if key not in dst6] dst_param_keys = dst6n + dst6 assert (len(src_param_keys) == len(dst_param_keys)) for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): if (src_model == "oth_shufflenetv2_wd2" and dst_model == "shufflenetv2_wd2") and \ (src_key == "network.8.weight"): dst_params[dst_key] = torch.from_numpy(src_params[src_key].numpy()[:, :, 0, 0]) else: # print("src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( # src_key, dst_key, tuple(src_params[src_key].size()), tuple(dst_params[dst_key].size()))) assert (tuple(dst_params[dst_key].size()) == tuple(src_params[src_key].size())), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, tuple(src_params[src_key].size()), tuple(dst_params[dst_key].size())) assert (dst_key.split('.')[-1] == src_key.split('.')[-1]) dst_params[dst_key] = torch.from_numpy(src_params[src_key].numpy()) torch.save( obj=dst_params, f=dst_params_file_path) def convert_gl2pt(dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys): import torch for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): assert (tuple(dst_params[dst_key].size()) == src_params[src_key].shape) dst_params[dst_key] = torch.from_numpy(src_params[src_key]._data[0].asnumpy()) torch.save( obj=dst_params, f=dst_params_file_path) def convert_pt2gl(dst_net, dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys, ctx): import mxnet as mx for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): assert (dst_params[dst_key].shape == tuple(src_params[src_key].size())), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, tuple(src_params[src_key].size()), dst_params[dst_key].shape) dst_params[dst_key]._load_init(mx.nd.array(src_params[src_key].numpy(), ctx), ctx) dst_net.save_parameters(dst_params_file_path) def convert_tf2tf(dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys): import re src_param_keys = [key.replace("/W:", "/kernel:") for key in src_param_keys] src_param_keys = [key.replace("/b:", "/bias:") for key in src_param_keys] src_param_keys = [key.replace("linear/", "output/") for key in src_param_keys] src_param_keys = [key.replace("stage", "features/stage") for key in src_param_keys] src_param_keys = [re.sub("^conv1/", "features/init_block/conv/", key) for key in src_param_keys] src_param_keys = [re.sub("^conv5/", "features/final_block/conv/", key) for key in src_param_keys] src_param_keys = [key.replace('/dconv_bn/', '/dconv/bn/') for key in src_param_keys] src_param_keys = [key.replace('/shortcut_dconv_bn/', '/shortcut_dconv/bn/') for key in src_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) src_param_keys = [key.replace("/kernel:", "/W:") for key in src_param_keys] src_param_keys = [key.replace("/bias:", "/b:") for key in src_param_keys] src_param_keys = [key.replace("output/", "linear/") for key in src_param_keys] src_param_keys = [key.replace("features/stage", "stage") for key in src_param_keys] src_param_keys = [key.replace("features/init_block/conv/", 'conv1/') for key in src_param_keys] src_param_keys = [key.replace("features/final_block/conv/", 'conv5/') for key in src_param_keys] src_param_keys = [key.replace('/dconv/bn/', '/dconv_bn/') for key in src_param_keys] src_param_keys = [key.replace('/shortcut_dconv/bn/', '/shortcut_dconv_bn/') for key in src_param_keys] assert (len(src_param_keys) == len(dst_param_keys)) import tensorflow as tf with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): assert (src_params[src_key].shape == tuple(dst_params[dst_key].get_shape().as_list())) sess.run(dst_params[dst_key].assign(src_params[src_key])) from tensorflow_.utils import save_model_params save_model_params( sess=sess, file_path=dst_params_file_path) def convert_tf2gl(dst_net, dst_params_file_path, dst_params, dst_param_keys, src_params, src_param_keys, ctx): import mxnet as mx src_param_keys = [key.replace("/kernel:", "/weight:") for key in src_param_keys] src_param_keys = [key.replace("/dw_kernel:", "/weight_dw:") for key in src_param_keys] src_param_keys = [key.replace("/post_activ/", "/stageN/post_activ/") for key in src_param_keys] src_param_keys = [key.replace("/final_block/", "/stageN/final_block/") for key in src_param_keys] src_param_keys = [key.replace("/stem1_unit/", "/stage0/stem1_unit/") for key in src_param_keys] src_param_keys = [key.replace("/stem2_unit/", "/stage0/stem2_unit/") for key in src_param_keys] src_param_keys.sort() src_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) dst_param_keys.sort() dst_param_keys.sort(key=lambda var: ["{:10}".format(int(x)) if x.isdigit() else x for x in re.findall(r"[^0-9]|[0-9]+", var)]) src_param_keys = [key.replace("/weight:", "/kernel:") for key in src_param_keys] src_param_keys = [key.replace("/weight_dw:", "/dw_kernel:") for key in src_param_keys] src_param_keys = [key.replace("/stageN/post_activ/", "/post_activ/") for key in src_param_keys] src_param_keys = [key.replace("/stageN/final_block/", "/final_block/") for key in src_param_keys] src_param_keys = [key.replace("/stage0/stem1_unit/", "/stem1_unit/") for key in src_param_keys] src_param_keys = [key.replace("/stage0/stem2_unit/", "/stem2_unit/") for key in src_param_keys] assert (len(src_param_keys) == len(dst_param_keys)) for i, (src_key, dst_key) in enumerate(zip(src_param_keys, dst_param_keys)): src_weight = src_params[src_key] if len(src_weight.shape) == 4: if src_key.split("/")[-1][:-2] == "dw_kernel": dst_weight = np.transpose(src_weight, axes=(2, 3, 0, 1)) else: dst_weight = np.transpose(src_weight, axes=(3, 2, 0, 1)) elif len(src_weight.shape) == 2: dst_weight = np.transpose(src_weight, axes=(1, 0)) else: dst_weight = src_weight assert (dst_weight.shape == dst_params[dst_key].shape), \ "src_key={}, dst_key={}, src_shape={}, dst_shape={}".format( src_key, dst_key, dst_weight.shape, dst_params[dst_key].shape) dst_params[dst_key]._load_init(mx.nd.array(dst_weight, ctx), ctx) dst_net.save_parameters(dst_params_file_path) def convert_tf22tfl(src_net, dst_params_file_path): import tensorflow as tf converter = tf.lite.TFLiteConverter.from_keras_model(src_net) tflite_model = converter.convert() open(dst_params_file_path, "wb").write(tflite_model) # batch_size = 1 # input_shape = ((batch_size, 3, src_net.in_size[0], src_net.in_size[1]) if # src_net.data_format == "channels_first" else # (batch_size, src_net.in_size[0], src_net.in_size[1], 3)) # input_data = tf.random.normal(input_shape) # tf_results = src_net(input_data) # interpreter = tf.lite.Interpreter(model_content=tflite_model) # interpreter.allocate_tensors() # input_details = interpreter.get_input_details() # output_details = interpreter.get_output_details() # input_data = np.array(np.random.random_sample(input_details[0]["shape"]), dtype=np.float32) # interpreter.set_tensor(input_details[0]["index"], input_data) # interpreter.invoke() # tflite_results = interpreter.get_tensor(output_details[0]["index"]) # for tf_result, tflite_result in zip(tf_results, tflite_results): # np.testing.assert_almost_equal(tf_result.numpy(), tflite_result, decimal=5) def _init_ctx(args): ctx = None if args.src_fwk in ("gluon", "mxnet", "keras") or args.dst_fwk in ("gluon", "mxnet", "keras"): import mxnet as mx ctx = mx.cpu() return ctx def _prepare_src_model(args, ctx, use_cuda): return prepare_src_model( src_fwk=args.src_fwk, src_model=args.src_model, src_params_file_path=args.src_params, dst_fwk=args.dst_fwk, ctx=ctx, use_cuda=use_cuda, load_ignore_extra=args.load_ignore_extra, remove_module=args.remove_module, num_classes=args.src_num_classes, in_channels=args.src_in_channels) def _prepare_dst_model(args, ctx, use_cuda): return prepare_dst_model( dst_fwk=args.dst_fwk, dst_model=args.dst_model, src_fwk=args.src_fwk, ctx=ctx, use_cuda=use_cuda, num_classes=args.dst_num_classes, in_channels=args.dst_in_channels, model_type=args.model_type) def update_and_initialize_logging(args): """ Update arguments ans initialize logging. Parameters: ---------- args : ArgumentParser Main script arguments. """ packages = [] pip_packages = [] if (args.src_fwk == "gluon") or (args.dst_fwk == "gluon"): packages += ["mxnet, numpy"] pip_packages += ["mxnet-cu110", "mxnet-cu112"] if (args.src_fwk == "pytorch") or (args.dst_fwk == "pytorch"): packages += ["torch", "torchvision"] if (args.src_fwk == "chainer") or (args.dst_fwk == "chainer"): packages += ["chainer"] pip_packages += ["cupy-cuda110", "cupy-cuda112", "chainer"] if (args.src_fwk == "keras") or (args.dst_fwk == "keras"): packages += ["keras"] pip_packages += ["keras", "keras-mxnet", "mxnet-cu110", "mxnet-cu112"] if (args.src_fwk == "tensorflow") or (args.dst_fwk == "tensorflow"): packages += ["tensorflow-gpu"] pip_packages += ["tensorflow", "tensorflow-gpu", "tensorpack"] if (args.src_fwk == "tf2") or (args.dst_fwk == "tf2") or (args.dst_fwk == "tfl"): packages += ["tensorflow"] pip_packages += ["tensorflow", "tensorflow-gpu"] _, log_file_exist = initialize_logging( logging_dir_path=args.save_dir, logging_file_name=args.logging_file_name, script_args=args, log_packages=packages, log_pip_packages=pip_packages) def main(): args = parse_args() ctx = None use_cuda = False if args.dst_fwk == "tf2": dst_params, dst_param_keys, dst_net = _prepare_dst_model(args, ctx, use_cuda) update_and_initialize_logging(args=args) ctx = _init_ctx(args) src_params, src_param_keys, ext_src_param_keys, ext_src_param_keys2, src_net =\ _prepare_src_model(args, ctx, use_cuda) if args.dst_fwk != "tf2": dst_params, dst_param_keys, dst_net = _prepare_dst_model(args, ctx, use_cuda) if ((args.dst_fwk in ["keras", "tensorflow", "tf2"]) and any([s.find("convgroup") >= 0 for s in dst_param_keys]))\ or ((args.src_fwk == "mxnet") and (args.src_model in ["crunet56", "crunet116", "preresnet269b"])): assert (len(src_param_keys) <= len(dst_param_keys)) elif ((args.dst_fwk == "chainer") and (args.src_model.startswith("diaresnet") or args.src_model.startswith("diapreresnet"))) or\ args.src_model.startswith("oth_ibppose"): assert (len(src_param_keys) >= len(dst_param_keys)) elif args.dst_fwk == "tfl": pass else: assert (len(src_param_keys) == len(dst_param_keys)) if args.src_fwk == "gluon" and args.dst_fwk == "gluon": convert_gl2gl( dst_net=dst_net, dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys, finetune=((args.src_num_classes != args.dst_num_classes) or (args.src_in_channels != args.dst_in_channels)), src_model=args.src_model, ctx=ctx) elif args.src_fwk == "pytorch" and args.dst_fwk == "pytorch": convert_pt2pt( dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys, src_model=args.src_model, dst_model=args.dst_model) elif args.src_fwk == "gluon" and args.dst_fwk == "pytorch": convert_gl2pt( dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys) elif args.src_fwk == "gluon" and args.dst_fwk == "chainer": convert_gl2ch( dst_net=dst_net, dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys, ext_src_param_keys=ext_src_param_keys, ext_src_param_keys2=ext_src_param_keys2, src_model=args.src_model) elif args.src_fwk == "gluon" and args.dst_fwk == "keras": convert_gl2ke( dst_net=dst_net, dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys) elif args.src_fwk == "gluon" and args.dst_fwk == "tensorflow": convert_gl2tf( dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys) elif args.src_fwk == "gluon" and args.dst_fwk == "tf2": convert_gl2tf2( dst_net=dst_net, dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys, src_model=args.src_model) elif args.src_fwk == "pytorch" and args.dst_fwk == "gluon": convert_pt2gl( dst_net=dst_net, dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys, ctx=ctx) elif args.src_fwk == "mxnet" and args.dst_fwk == "gluon": convert_mx2gl( dst_net=dst_net, dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys, src_model=args.src_model, ctx=ctx) elif args.src_fwk == "tensorflow" and args.dst_fwk == "tensorflow": convert_tf2tf( dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys) elif args.src_fwk == "tensorflow" and args.dst_fwk == "gluon": convert_tf2gl( dst_net=dst_net, dst_params_file_path=args.dst_params, dst_params=dst_params, dst_param_keys=dst_param_keys, src_params=src_params, src_param_keys=src_param_keys, ctx=ctx) elif args.src_fwk == "tf2" and args.dst_fwk == "tfl": convert_tf22tfl( src_net=src_net, dst_params_file_path=args.dst_params) else: raise NotImplementedError logging.info("Convert {}-model {} into {}-model {}".format( args.src_fwk, args.src_model, args.dst_fwk, args.dst_model)) if __name__ == '__main__': main()
687597834631051ff823fce8248de22de124ebb5
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02553/s242742740.py
19cb61040111d626c69f31e200e94dcd971f47c1
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
78
py
a,b,c,d = map(int,input().split()) s = max(max(a*c,a*d),max(b*c,b*d)) print(s)
4e860abb10b550c48e16c59c692326f5f75730fa
f08177abce14672891c34c0eecc064f2b42f2441
/hydrus/client/db/ClientDBMappingsCounts.py
5df0ef9fa636429b38e960148136fbb76dc8a01a
[ "WTFPL" ]
permissive
bbappserver/hydrus-userpatch
d217006cc7691a08c11c98ddfd2415da56d6b27d
ef19e2167e24433d960a0811a93a683d62203518
refs/heads/master
2022-09-19T03:36:16.575489
2022-05-04T21:40:27
2022-05-04T21:40:27
179,595,032
0
0
null
null
null
null
UTF-8
Python
false
false
22,801
py
import collections import sqlite3 import typing from hydrus.core import HydrusConstants as HC from hydrus.core import HydrusDBBase from hydrus.client import ClientData from hydrus.client.db import ClientDBModule from hydrus.client.db import ClientDBServices from hydrus.client.metadata import ClientTags def GenerateCombinedFilesMappingsCountsCacheTableName( tag_display_type, tag_service_id ): if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE: name = 'combined_files_ac_cache' elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL: name = 'combined_files_display_ac_cache' suffix = str( tag_service_id ) combined_counts_cache_table_name = 'external_caches.{}_{}'.format( name, suffix ) return combined_counts_cache_table_name def GenerateSpecificCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ): if tag_display_type == ClientTags.TAG_DISPLAY_STORAGE: name = 'specific_ac_cache' elif tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL: name = 'specific_display_ac_cache' suffix = '{}_{}'.format( file_service_id, tag_service_id ) specific_counts_cache_table_name = 'external_caches.{}_{}'.format( name, suffix ) return specific_counts_cache_table_name class ClientDBMappingsCounts( ClientDBModule.ClientDBModule ): CAN_REPOPULATE_ALL_MISSING_DATA = True def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices ): self.modules_services = modules_services ClientDBModule.ClientDBModule.__init__( self, 'client mappings counts', cursor ) self._missing_storage_tag_service_pairs = set() self._missing_display_tag_service_pairs = set() def _GetServiceTableGenerationDictSingle( self, tag_display_type, file_service_id, tag_service_id ): table_dict = {} table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) # the version was earlier here but we updated when adding combined delete files and ipfs to these tables version = 465 table_dict[ table_name ] = ( 'CREATE TABLE IF NOT EXISTS {} ( tag_id INTEGER PRIMARY KEY, current_count INTEGER, pending_count INTEGER );', version ) return table_dict def _GetServiceTableGenerationDict( self, service_id ) -> dict: tag_service_id = service_id table_dict = {} file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) ) file_service_ids.append( self.modules_services.combined_file_service_id ) for file_service_id in file_service_ids: for tag_display_type in ( ClientTags.TAG_DISPLAY_STORAGE, ClientTags.TAG_DISPLAY_ACTUAL ): single_table_dict = self._GetServiceTableGenerationDictSingle( tag_display_type, file_service_id, tag_service_id ) table_dict.update( single_table_dict ) return table_dict def _GetServiceIdsWeGenerateDynamicTablesFor( self ): return self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) def _RepairRepopulateTables( self, table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ): file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_TAG_LOOKUP_CACHES ) ) file_service_ids.append( self.modules_services.combined_file_service_id ) tag_service_ids = list( self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) ) for tag_service_id in tag_service_ids: for file_service_id in file_service_ids: storage_table_dict_for_this = self._GetServiceTableGenerationDictSingle( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id ) storage_table_names_for_this = set( storage_table_dict_for_this.keys() ) if not storage_table_names_for_this.isdisjoint( table_names ): self._missing_storage_tag_service_pairs.add( ( file_service_id, tag_service_id ) ) display_table_dict_for_this = self._GetServiceTableGenerationDictSingle( ClientTags.TAG_DISPLAY_ACTUAL, file_service_id, tag_service_id ) display_table_names_for_this = set( display_table_dict_for_this.keys() ) if not display_table_names_for_this.isdisjoint( table_names ): self._missing_display_tag_service_pairs.add( ( file_service_id, tag_service_id ) ) def AddCounts( self, tag_display_type, file_service_id, tag_service_id, ac_cache_changes ): counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) new_tag_ids = set() new_local_tag_ids = set() for ( tag_id, current_delta, pending_delta ) in ac_cache_changes: self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, current_count, pending_count ) VALUES ( ?, ?, ? );'.format( counts_cache_table_name ), ( tag_id, current_delta, pending_delta ) ) if self._GetRowCount() > 0: new_tag_ids.add( tag_id ) if file_service_id == self.modules_services.combined_local_file_service_id: # and tag_service_id = all known tags new_local_tag_ids.add( tag_id ) if len( new_tag_ids ) < len( ac_cache_changes ): self._ExecuteMany( 'UPDATE {} SET current_count = current_count + ?, pending_count = pending_count + ? WHERE tag_id = ?;'.format( counts_cache_table_name ), ( ( num_current, num_pending, tag_id ) for ( tag_id, num_current, num_pending ) in ac_cache_changes if tag_id not in new_tag_ids ) ) return ( new_tag_ids, new_local_tag_ids ) def ClearCounts( self, tag_display_type, file_service_id, tag_service_id, keep_current = False, keep_pending = False ): table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) if keep_current: self._Execute( 'UPDATE {} SET pending_count = 0 WHERE pending_count > 0;'.format( table_name ) ) self._Execute( 'DELETE FROM {} WHERE current_count = 0 AND pending_count = 0;'.format( table_name ) ) elif keep_pending: self._Execute( 'UPDATE {} SET current_count = 0 WHERE current_count > 0;'.format( table_name ) ) self._Execute( 'DELETE FROM {} WHERE current_count = 0 AND pending_count = 0;'.format( table_name ) ) else: self._Execute( 'DELETE FROM {};'.format( table_name ) ) def CreateTables( self, tag_display_type, file_service_id, tag_service_id, populate_from_storage = False ): table_generation_dict = self._GetServiceTableGenerationDictSingle( tag_display_type, file_service_id, tag_service_id ) for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items(): self._Execute( create_query_without_name.format( table_name ) ) # if tag_display_type == ClientTags.TAG_DISPLAY_ACTUAL and populate_from_storage: display_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) storage_table_name = self.GetCountsCacheTableName( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id ) self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, current_count, pending_count ) SELECT tag_id, current_count, pending_count FROM {};'.format( display_table_name, storage_table_name ) ) def DropTables( self, tag_display_type, file_service_id, tag_service_id ): table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) self._Execute( 'DROP TABLE IF EXISTS {};'.format( table_name ) ) def FilterExistingTagIds( self, tag_display_type, file_service_id, tag_service_id, tag_ids_table_name ): counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) return self._STS( self._Execute( 'SELECT tag_id FROM {} CROSS JOIN {} USING ( tag_id );'.format( tag_ids_table_name, counts_cache_table_name ) ) ) def GetAutocompleteCountEstimate( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ], include_current_tags: bool, include_pending_tags: bool ): count = 0 if not include_current_tags and not include_pending_tags: return count ( current_count, pending_count ) = self.GetAutocompleteCountEstimateStatuses( tag_display_type, tag_service_id, file_service_id, tag_ids ) if include_current_tags: count += current_count if include_current_tags: count += pending_count return count def GetAutocompleteCountEstimateStatuses( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ] ): include_current_tags = True include_pending_tags = True ids_to_count = self.GetCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current_tags, include_pending_tags ) current_count = 0 pending_count = 0 for ( current_min, current_max, pending_min, pending_max ) in ids_to_count.values(): current_count += current_min pending_count += pending_min return ( current_count, pending_count ) def GetCounts( self, tag_display_type, tag_service_id, file_service_id, tag_ids, include_current, include_pending, domain_is_cross_referenced = True, zero_count_ok = False, job_key = None, tag_ids_table_name = None ): if len( tag_ids ) == 0: return {} if tag_service_id == self.modules_services.combined_tag_service_id and file_service_id == self.modules_services.combined_file_service_id: ids_to_count = {} return ids_to_count if tag_service_id == self.modules_services.combined_tag_service_id: search_tag_service_ids = self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) else: search_tag_service_ids = [ tag_service_id ] cache_results = [] if len( tag_ids ) > 1: if tag_ids_table_name is None: with self._MakeTemporaryIntegerTable( tag_ids, 'tag_id' ) as temp_tag_id_table_name: for search_tag_service_id in search_tag_service_ids: if job_key is not None and job_key.IsCancelled(): return {} cache_results.extend( self.GetCountsForTags( tag_display_type, file_service_id, search_tag_service_id, temp_tag_id_table_name ) ) else: for search_tag_service_id in search_tag_service_ids: if job_key is not None and job_key.IsCancelled(): return {} cache_results.extend( self.GetCountsForTags( tag_display_type, file_service_id, search_tag_service_id, tag_ids_table_name ) ) else: ( tag_id, ) = tag_ids for search_tag_service_id in search_tag_service_ids: cache_results.extend( self.GetCountsForTag( tag_display_type, file_service_id, search_tag_service_id, tag_id ) ) # ids_to_count = {} for ( tag_id, current_count, pending_count ) in cache_results: if not include_current: current_count = 0 if not include_pending: pending_count = 0 if current_count == 0 and pending_count == 0 and not zero_count_ok: continue current_max = current_count pending_max = pending_count if domain_is_cross_referenced: # file counts are perfectly accurate current_min = current_count pending_min = pending_count else: # for instance this is a search for 'my files' deleted files, but we are searching on 'all deleted files' domain current_min = 0 pending_min = 0 if tag_id in ids_to_count: ( existing_current_min, existing_current_max, existing_pending_min, existing_pending_max ) = ids_to_count[ tag_id ] ( current_min, current_max ) = ClientData.MergeCounts( existing_current_min, existing_current_max, current_min, current_max ) ( pending_min, pending_max ) = ClientData.MergeCounts( existing_pending_min, existing_pending_max, pending_min, pending_max ) ids_to_count[ tag_id ] = ( current_min, current_max, pending_min, pending_max ) if zero_count_ok: for tag_id in tag_ids: if tag_id not in ids_to_count: ids_to_count[ tag_id ] = ( 0, 0, 0, 0 ) return ids_to_count def GetCountsCacheTableName( self, tag_display_type, file_service_id, tag_service_id ): if file_service_id == self.modules_services.combined_file_service_id: counts_cache_table_name = GenerateCombinedFilesMappingsCountsCacheTableName( tag_display_type, tag_service_id ) else: counts_cache_table_name = GenerateSpecificCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) return counts_cache_table_name def GetCountsEstimate( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ], include_current_tags: bool, include_pending_tags: bool ): ids_to_count = collections.Counter() if not include_current_tags and not include_pending_tags: return ids_to_count ids_to_count_statuses = self.GetCountsEstimateStatuses( tag_display_type, tag_service_id, file_service_id, tag_ids ) for ( tag_id, ( current_count, pending_count ) ) in ids_to_count_statuses.items(): count = 0 if include_current_tags: count += current_count if include_current_tags: count += pending_count ids_to_count[ tag_id ] = count return ids_to_count def GetCountsEstimateStatuses( self, tag_display_type: int, tag_service_id: int, file_service_id: int, tag_ids: typing.Collection[ int ] ): include_current_tags = True include_pending_tags = True ids_to_count_full = self.GetCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current_tags, include_pending_tags ) ids_to_count_statuses = collections.defaultdict( lambda: ( 0, 0 ) ) for ( tag_id, ( current_min, current_max, pending_min, pending_max ) ) in ids_to_count_full.items(): ids_to_count_statuses[ tag_id ] = ( current_min, pending_min ) return ids_to_count_statuses def GetCountsForTag( self, tag_display_type, file_service_id, tag_service_id, tag_id ): counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) return self._Execute( 'SELECT tag_id, current_count, pending_count FROM {} WHERE tag_id = ?;'.format( counts_cache_table_name ), ( tag_id, ) ).fetchall() def GetCountsForTags( self, tag_display_type, file_service_id, tag_service_id, temp_tag_id_table_name ): counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) # temp tags to counts return self._Execute( 'SELECT tag_id, current_count, pending_count FROM {} CROSS JOIN {} USING ( tag_id );'.format( temp_tag_id_table_name, counts_cache_table_name ) ).fetchall() def GetCurrentPendingPositiveCountsAndWeights( self, tag_display_type, file_service_id, tag_service_id, tag_ids, tag_ids_table_name = None ): include_current = True include_pending = True ids_to_count = self.GetCounts( tag_display_type, tag_service_id, file_service_id, tag_ids, include_current, include_pending, tag_ids_table_name = tag_ids_table_name ) current_tag_ids = set() current_tag_weight = 0 pending_tag_ids = set() pending_tag_weight = 0 for ( tag_id, ( current_min, current_max, pending_min, pending_max ) ) in ids_to_count.items(): if current_min > 0: current_tag_ids.add( tag_id ) current_tag_weight += current_min if pending_min > 0: pending_tag_ids.add( tag_id ) pending_tag_weight += pending_min return ( current_tag_ids, current_tag_weight, pending_tag_ids, pending_tag_weight ) def GetMissingTagCountServicePairs( self ): return ( self._missing_storage_tag_service_pairs, self._missing_display_tag_service_pairs ) def GetQueryPhraseForCurrentTagIds( self, tag_display_type, file_service_id, tag_service_id ): counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) return 'SELECT tag_id FROM {} WHERE current_count > 0'.format( counts_cache_table_name ) def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]: tables_and_columns = [] if content_type == HC.CONTENT_TYPE_TAG: table_dict = self._GetServicesTableGenerationDict() for table_name in table_dict.keys(): tables_and_columns.append( ( table_name, 'tag_id' ) ) return tables_and_columns def GetTotalCurrentCount( self, tag_display_type, file_service_id, tag_service_id ): counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) result = self._Execute( 'SELECT SUM( current_count ) FROM {};'.format( counts_cache_table_name ) ).fetchone() if result is None or result[0] is None: count = 0 else: ( count, ) = result return count def ReduceCounts( self, tag_display_type, file_service_id, tag_service_id, ac_cache_changes ): # this takes positive counts, despite ultimately being a reduce guy counts_cache_table_name = self.GetCountsCacheTableName( tag_display_type, file_service_id, tag_service_id ) deleted_tag_ids = set() deleted_local_tag_ids = set() for ( tag_id, current_delta, pending_delta ) in ac_cache_changes: self._Execute( 'DELETE FROM {} WHERE tag_id = ? AND current_count = ? AND pending_count = ?;'.format( counts_cache_table_name ), ( tag_id, current_delta, pending_delta ) ) if self._GetRowCount() > 0: deleted_tag_ids.add( tag_id ) if file_service_id == self.modules_services.combined_local_file_service_id: # and tag_service_id = all known tags deleted_local_tag_ids.add( tag_id ) if len( deleted_tag_ids ) < len( ac_cache_changes ): self._ExecuteMany( 'UPDATE {} SET current_count = current_count - ?, pending_count = pending_count - ? WHERE tag_id = ?;'.format( counts_cache_table_name ), ( ( current_delta, pending_delta, tag_id ) for ( tag_id, current_delta, pending_delta ) in ac_cache_changes if tag_id not in deleted_tag_ids ) ) return ( deleted_tag_ids, deleted_local_tag_ids )
1265bb8736bd9b11afc120fcc3bdcb77428869ec
29a4c1e436bc90deaaf7711e468154597fc379b7
/modules/ieee/doc2/nextpow2.py
7c5fcf15c55e96875561be4f21550ed813ecbc7a
[ "BSL-1.0" ]
permissive
brycelelbach/nt2
31bdde2338ebcaa24bb76f542bd0778a620f8e7c
73d7e8dd390fa4c8d251c6451acdae65def70e0b
refs/heads/master
2021-01-17T12:41:35.021457
2011-04-03T17:37:15
2011-04-03T17:37:15
1,263,345
1
0
null
null
null
null
UTF-8
Python
false
false
1,923
py
[{'functor': {'arity': '1', 'call_types': [], 'ret_arity': '0', 'rturn': {'default': 'typename nt2::meta::as_integer<typename boost::result_of<nt2::meta::floating(T)>::type, signed>::type'}, 'type_defs': [], 'types': ['real_', 'unsigned_int_', 'signed_int_']}, 'unit': {'global_header': {'first_stamp': 'modified by jt the 04/12/2010', 'included': [], 'notes': [], 'ranges': {'real_': [['T(-10)', 'T(10)']], 'signed_int_': [['-100', '100']], 'unsigned_int_': [['0', '100']]}, 'specific_values': {'default': {}, 'real_': {'nt2::Inf<T>()': 'nt2::Zero<r_t>()', 'nt2::Minf<T>()': 'nt2::Zero<r_t>()', 'nt2::Mone<T>()': 'nt2::Zero<r_t>()', 'nt2::One<T>()': 'nt2::Zero<r_t>()', 'nt2::Zero<T>()': 'nt2::Zero<r_t>()'}, 'signed_int_': {'nt2::Mone<T>()': 'nt2::Zero<r_t>()', 'nt2::One<T>()': 'nt2::Zero<r_t>()', 'nt2::Zero<T>()': 'nt2::Zero<r_t>()'}, 'unsigned_int_': {'nt2::One<T>()': 'nt2::Zero<r_t>()', 'nt2::Zero<T>()': 'nt2::Zero<r_t>()'}}, 'stamp': 'modified by jt the 12/12/2010', 'verif_test': {}}}, 'version': '0.1'}]
969ff18c3b0c3ebd06ccfc2dc0dfe97216e6a725
6a47ec6800610ea93479f91505e73a3eb4f34ae0
/user/serviced.py
74e25a9df84b86e320e670d436afb861e42769b5
[]
no_license
risification/queue_project
1158aac7bae3b04f98c106c23c27281c96bcaf41
e85f9f2d1835f10a0247a569f88d4cb29803538a
refs/heads/master
2023-04-26T08:05:25.573243
2021-06-07T21:03:47
2021-06-07T21:03:47
374,119,444
0
0
null
null
null
null
UTF-8
Python
false
false
579
py
from django.contrib.auth.models import User from django.core.mail import EmailMessage def mailing(username): email_list = [] obj = User.objects.filter(is_superuser=True) for user in obj: email_list.append(user.email) subjects = 'hi' body = f'User with {username} register in database, pls check him !' email = EmailMessage(subject=subjects, body=body, to=email_list) email.send() def validate_password(password): if len(password) >= 8 and password.isdigit() and password.isalpha(): return True else: return False
5a4d8c674b599a2c01fdc8fd795bf0ea39b3d9b4
0ddcfcbfc3faa81c79e320c34c35a972dab86498
/puzzles/orderly_queue.py
a373cf1f75c26e6261bdd30af8d0855a2660bb45
[]
no_license
IvanWoo/coding-interview-questions
3311da45895ac4f3c394b22530079c79a9215a1c
1312305b199b65a11804a000432ebe28d1fba87e
refs/heads/master
2023-08-09T19:46:28.278111
2023-06-21T01:47:07
2023-06-21T01:47:07
135,307,912
0
0
null
2023-07-20T12:14:38
2018-05-29T14:24:43
Python
UTF-8
Python
false
false
1,048
py
# https://leetcode.com/problems/orderly-queue/ """ ou are given a string s and an integer k. You can choose one of the first k letters of s and append it at the end of the string.. Return the lexicographically smallest string you could have after applying the mentioned step any number of moves. Example 1: Input: s = "cba", k = 1 Output: "acb" Explanation: In the first move, we move the 1st character 'c' to the end, obtaining the string "bac". In the second move, we move the 1st character 'b' to the end, obtaining the final result "acb". Example 2: Input: s = "baaca", k = 3 Output: "aaabc" Explanation: In the first move, we move the 1st character 'b' to the end, obtaining the string "aacab". In the second move, we move the 3rd character 'c' to the end, obtaining the final result "aaabc". Constraints: 1 <= k <= s.length <= 1000 s consist of lowercase English letters. """ def orderly_queue(s: str, k: int) -> str: if k == 1: return min([s[i:] + s[:i] for i in range(len(s))]) else: return "".join(sorted(s))
7267956f1f7b465699fb043dc755525ce97b5ccf
2c73882fc59ca85f4854a43bcda8cc9edd282b8d
/polls_api/views.py
2664dfc220c7e377fed156deed7d18e979f75115
[]
no_license
mjstealth/guide-to-backbonejs-with-django
540236f3535ee171c3aa4c43a1be9394a8a7e4bc
e7d5016c800e1e0e282da0386cc6112d4eed63c1
refs/heads/master
2021-01-17T22:40:28.191509
2012-09-04T22:17:12
2012-09-04T22:17:12
5,679,419
1
0
null
null
null
null
UTF-8
Python
false
false
990
py
from django.shortcuts import get_object_or_404 from django.core.urlresolvers import reverse from djangorestframework import views from djangorestframework.response import Response from polls.forms import PollForm from polls.models import Poll from .resources import PollResource class PollResults (views.View): def get(self, request, poll_id): poll = get_object_or_404(Poll.objects.all(), pk=poll_id) results = PollResource().serialize(poll) return results class PollVotes (views.View): def post(self, request, poll_id): poll = get_object_or_404(Poll.objects.all(), pk=poll_id) form = PollForm(request.POST, instance=poll) if form.is_valid(): form.save() else: return Response(content=form.errors, status=400) return Response(status=303, headers={'Location': reverse('polls_api_results', args=[poll_id])}) poll_results_view = PollResults.as_view() poll_votes_view = PollVotes.as_view()
42a8dac1509c16a1f9ee4746a23db2e89449bf64
11d265eba2ced9de43c339e4014c779b521320cd
/accounts/migrations/0004_auto_20200423_2253.py
eccb31bc3dd7e0a1872e9574429fc5cdc2edd129
[]
no_license
Sloshpit/budget_old
d9271de625cd7e3aa66ccbec501b005e50cd2812
a5603996b026542adb3bc8c578c03bcb843bea01
refs/heads/master
2022-04-23T08:42:43.377827
2020-04-25T14:40:39
2020-04-25T14:40:39
null
0
0
null
null
null
null
UTF-8
Python
false
false
606
py
# Generated by Django 3.0.5 on 2020-04-24 02:53 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('accounts', '0003_auto_20200423_2251'), ] operations = [ migrations.RenameField( model_name='account', old_name='transaction_date', new_name='balance_date', ), migrations.RemoveField( model_name='account', name='transaction', ), migrations.RemoveField( model_name='account', name='transaction_amount', ), ]
f71387df36af9f3c0cb4897aa762c93b0ccbdb5f
3f60b999ea7bda83c9586f75f52463dc20337f24
/sensitive_user_portrait/weibo_rank/Offline_task.py
de919db3a2449e8b9e35b521386aa9943040a4ae
[]
no_license
jianjian0dandan/sensitive_user_portrait
629e49ce71db92b50634bac9c828811cdb5381e9
cacc30267ebc0e621b1d48d4f1206277a0f48123
refs/heads/master
2021-01-20T23:18:07.138057
2016-05-22T12:09:40
2016-05-22T12:09:40
42,869,287
0
0
null
2015-09-21T13:55:12
2015-09-21T13:55:11
null
UTF-8
Python
false
false
6,454
py
#-*-coding: utf-8 -*- import datetime import json import time as TIME from elasticsearch import Elasticsearch from time_utils import ts2datetime, datetime2ts, ts2date from global_utils import es_user_portrait as es WEIBO_RANK_KEYWORD_TASK_INDEX = 'weibo_rank_keyword_task' WEIBO_RANK_KEYWORD_TASK_TYPE = 'weibo_rank_task' MAX_ITEMS = 2 ** 10 def add_task(user_name , type="keyword", range="all", pre='flow_text_', during='1', start_time='2013-09-07', end_time='2013-09-07', keyword='hello,world', sort_norm='reposts_count', sort_scope='all_limit_keyword', time=1, number=100): time_now = int(TIME.time()) task_id = user_name + "-" + str(time_now) tmp_list = keyword.split(',') keyword_list = [] for item in tmp_list: if item: keyword_list.append(item) body_json = { 'submit_user' : user_name , 'keyword' : json.dumps(keyword_list), 'keyword_string': "&".join(keyword_list), 'submit_time' : ts2datetime(time_now), 'create_time': time_now, 'end_time' : datetime2ts(end_time), 'search_type' : type, 'status':0, 'range' : range , 'user_ts' : user_name + '-'+ str(time_now), 'pre' : pre, 'during' : during , 'start_time' : datetime2ts(start_time) , 'sort_norm' : sort_norm , 'sort_scope' : sort_scope, 'time' : time , 'isall' : isall, 'number': number } es.index(index = WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id=task_id, body=body_json) return body_json["user_ts"] def search_weibo_task(user_name): c_result = {} query = {"query":{"bool":{"must":[{"term":{"user_rank_task.submit_user":user_name}}]}},"size":MAX_ITEMS,"sort":[{"create_time":{"order":"desc"}}],"fields":["status","search_type","keyword","submit_user","sort_scope","sort_norm","start_time","user_ts","end_time", "create_time", 'number']} if 1: return_list = [] result = es.search(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , body=query)['hits'] c_result['flag'] = True for item in result['hits']: result_temp = {} result_temp['submit_user'] = item['fields']['submit_user'][0] result_temp['search_type'] = item['fields']['search_type'][0] result_temp['keyword'] = json.loads(item['fields']['keyword'][0]) result_temp['sort_scope'] = item['fields']['sort_scope'][0] result_temp['sort_norm'] = item['fields']['sort_norm'][0] result_temp['start_time'] = ts2datetime(item['fields']['start_time'][0]) result_temp['end_time'] = ts2datetime(item['fields']['end_time'][0]) result_temp['status'] = item['fields']['status'][0] result_temp['create_time'] = ts2date(item['fields']['create_time'][0]) result_temp['search_id'] = item['fields']['user_ts'][0] tmp = item['fields'].get('number', 0) if tmp: result_temp['number'] = int(tmp[0]) else: result_temp['number'] = 100 return_list.append(result_temp) c_result['data'] = return_list return c_result def getResult(search_id): item = es.get(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id=search_id) try: result_obj = {} result_obj['keyword'] = json.loads(item['_source']['keyword']) result_obj['sort_scope'] = item['_source']['sort_scope'] result_obj['sort_norm'] = item['_source']['sort_norm'] result_obj['start_time'] = ts2datetime(item['_source']['start_time']) result_obj['end_time'] =ts2datetime(item['_source']['end_time']) result_obj['result'] = json.loads(item['_source']['result']) result_obj['text_results'] = json.loads(item['_source']['text_results']) result_obj['number'] = item['_source']['number'] return result_obj except : return [] def delOfflineTask(search_id): es.delete(index=WEIBO_RANK_KEYWORD_TASK_INDEX , doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE , id = search_id ) return True def sort_task(user, keyword, status, start_time, end_time, submit_time): query_body = { "query":{ "filtered":{ "filter":{ "bool":{ "must":[ {"term":{"submit_user": user}} ] } } } }, "size": 10000, "sort":{"submit_time":{"order":"desc"}} } query_list = [] if keyword: keyword_list = keyword.split(',') query_list.append({"terms":{"keyword_string":keyword_list}}) if status != 2: query_list.append({"term":{"status": status}}) if start_time and end_time: start_ts = datetime2ts(start_time) end_ts = datetime2ts(end_time) query_list.append({"range":{"start_time":{"gte":start_ts, "lte":end_ts}}}) query_list.append({"range":{"end_time":{"gte":start_ts, "lte":end_ts}}}) if submit_time: query_list.append({"term":{"submit_time": submit_time}}) if query_list: query_body["query"]["filtered"]["filter"]["bool"]["must"].extend(query_list) #print query_body search_results = es.search(index=WEIBO_RANK_KEYWORD_TASK_INDEX, doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE, body=query_body)["hits"]["hits"] results = [] if search_results: for item in search_results: iter_item = item['_source'] tmp = [] tmp.append(iter_item['search_type']) tmp.append(json.loads(iter_item['keyword'])) tmp.append(ts2datetime(iter_item['start_time'])) tmp.append(ts2datetime(iter_item['end_time'])) tmp.append(iter_item['range']) tmp.append(ts2date(iter_item['create_time'])) tmp.append(iter_item['status']) tmp.append(iter_item['sort_norm']) tmp.append(iter_item['sort_scope']) tmp.append(item['_id']) # task_name results.append(tmp) return results if __name__ == "__main__": print search_task("[email protected]", [], 0, '', '', '2016-04-12')
43bf411f069beff4b058247c875c82e5f19f01bc
4b1965b3d831ab54998973afb26f4327ed010336
/info/user/views.py
4edbf7fa25d4329141c2449cb244798b16174185
[]
no_license
yc12192057/information11_mm
7d353dfe61962eb0bd2c29b7f0b54a2a62953262
2e4052d130b200797aa8a57a0d37f8267d523a8b
refs/heads/master
2020-03-21T10:22:23.558714
2018-06-24T02:34:25
2018-06-24T02:34:25
138,447,718
0
0
null
null
null
null
UTF-8
Python
false
false
6,383
py
from flask import current_app from flask import g from flask import request from flask import session from info.utils.image_storage import storage from info import constants from info import db from info.models import Category, News from info.utils.response_code import RET from . import profile_blue from flask import render_template,redirect,jsonify from info.utils.common import user_login_data from info.utils.image_storage import storage @profile_blue.route("/news_list") @user_login_data def news_list(): page = request.args.get("p",1) try: page = int(page) except Exception as e: current_app.logger.error(e) page = 1 user = g.user paginate = News.query.filter(News.user_id == user.id).paginate(page,2,False) items = paginate.items current_page = paginate.page total_page = paginate.pages news_list = [] for item in items: news_list.append(item.to_review_dict()) data = { "current_page":current_page, "total_page":total_page, "news_list":news_list } return render_template("news/user_news_list.html",data = data) @profile_blue.route("/news_release",methods = ["GET","POST"]) @user_login_data def news_release(): if request.method == "GET": # 首先获取到新闻分类,然后传递到模板页码,进行展示 category_list = Category.query.all() categorys = [] for category in category_list: categorys.append(category.to_dict()) # 删除列表当中0的元素 categorys.pop(0) data = { "categories":categorys } return render_template("news/user_news_release.html",data = data) # 获取到表单页码提交过来的数据,获取的是用户发布的新闻数据 title = request.form.get("title") category_id = request.form.get("category_id") digest = request.form.get("digest") index_image = request.files.get("index_image") content = request.form.get("content") if not all([title,category_id,digest,index_image,content]): return jsonify(errno = RET.PARAMERR,errmsg = "参数错误") user = g.user index_image = index_image.read() key = storage(index_image) # 用户发布完成之后,我们需要把当前发布的新闻存储到数据 news = News() news.title = title news.source = "个人来源" news.digest = digest news.content = content news.index_image_url = constants.QINIU_DOMIN_PREFIX + key news.category_id = category_id news.user_id = user.id # 当前的状态1表示正在审核中 news.status = 1 db.session.add(news) db.session.commit() return jsonify(errno = RET.OK,errmsg = "发布成功") @profile_blue.route("/collection") @user_login_data def collection(): # 当前表示用户所有收藏的新闻,获取所有新闻涉及到分页,那么肯定是从第一页开始 page = request.args.get("p",1) try: page = int(page) except Exception as e: current_app.logger.error(e) page = 1 user = g.user # 获取到当前登陆用户的所有的收藏新闻列表 # 第一个参数表示页码 # 第二个参数表示当前每个页码一共有多少条数据 paginate = user.collection_news.paginate(page,10,False) items = paginate.items current_page = paginate.page total_page = paginate.pages collections = [] for item in items: collections.append(item.to_dict()) data = { "collections":collections, "current_page":current_page, "total_page":total_page, } return render_template("news/user_collection.html",data = data) """修改密码""" @profile_blue.route("/pass_info",methods = ["GET","POST"]) @user_login_data def pass_info(): if request.method == "GET": return render_template("news/user_pass_info.html") user = g.user old_password = request.json.get("old_password") new_password = request.json.get("new_password") if not all([old_password,new_password]): return jsonify(errno = RET.PARAMERR,errmsg = "请输入密码") # 判断旧的密码是否正确,只有当旧密码正确,才能修改新的密码 if not user.check_password(old_password): return jsonify(errno = RET.PARAMERR,errmsg = "旧密码错误") # 如果旧密码正确,那么直接更新到当前的数据库里面 user.password = new_password db.session.commit() return jsonify(errno = RET.OK,errmsg = "密码修改成功") @profile_blue.route("/pic_info",methods= ["GET","POST"]) @user_login_data def pic_info(): user = g.user if request.method == "GET": data = { "user_info": user.to_dict() if user else None } return render_template("news/user_pic_info.html", data=data) avatar = request.files.get("avatar").read() # 如果上传成功,那么就会返回一个url地址,或者叫做key # 如果想在浏览器里面浏览刚刚 上传的图片,那么必须通过 # 七牛的地址 + 刚刚返回的url # http: // oyucyko3w.bkt.clouddn.com / + url url = storage(avatar) user.avatar_url = url db.session.commit() return jsonify(errno = RET.OK,errmsg = "上传成功",data={"avatar_url": constants.QINIU_DOMIN_PREFIX + url}) """修改个人信息""" @profile_blue.route("/base_info",methods = ["GET","POST"]) @user_login_data def base_info(): user = g.user if request.method == "GET": data = { "user_info": user.to_dict() if user else None } return render_template("news/user_base_info.html",data = data) nick_name = request.json.get("nick_name") signature = request.json.get("signature") gender = request.json.get("gender") user.nick_name = nick_name user.signature = signature user.gender = gender # 更新数据库 db.session.commit() # 更新session里面的数据 session["nick_name"] = user.nick_name return jsonify(errno = RET.OK,errmsg = "修改成功") @profile_blue.route("/info") @user_login_data def info(): user = g.user if not user: # 重新跳转到首页 return redirect("/") data = { "user_info": user.to_dict() if user else None } return render_template("news/user.html",data = data)
1fcb9a5bc116b70cacd5ddbd3646b35b3f6e0d8c
e0527bce5c53a196752d3a16adf50cb60754de5f
/05-How to Repeat Actions demos/02-dotty_dots.py
47bb00a38d29385492c81d3cb4b98ea027472cab
[]
no_license
ARWA-ALraddadi/python-tutorial-for-beginners
ddeb657f419fbc176bea273bc9fb6b88d1894191
21cedfc47871ca4d25c2382464c60ab0a2121205
refs/heads/master
2023-06-30T20:24:30.688800
2021-08-08T08:22:29
2021-08-08T08:22:29
193,094,651
0
0
null
null
null
null
UTF-8
Python
false
false
1,825
py
#--------------------------------------------------------------------- # # Dotty dots - Repeating actions with minor variations # # Up until now the only repetition we've seen has been the same action # done many times. This simple demonstration shows how actions can # be repeated with minor variations for each different value in a # list. # # The program simply draws a grid of multi-coloured dots. Experiment # with the code to produce different patterns! # # Some useful constant values, all in pixels canvas_size = 600 max_coord = 250 grid_size = 20 dot_size = 15 # Set up a drawing canvas with a black background from turtle import * setup(canvas_size, canvas_size) title("Dotty dots") bgcolor('black') # Set up some drawing characteristics penup() speed('fastest') # Define a list of colours column_colours = ['red', 'green', 'blue', 'yellow', 'white', 'orange', 'aqua', 'olive', 'misty rose', 'salmon', 'spring green', 'fuchsia', 'deep sky blue', 'silver', 'aquamarine', 'orange red', 'seashell', 'chocolate', 'light steel blue', 'tomato', 'chartreuse', 'bisque', 'dark orchid', 'powder blue', 'gainsboro'] # Determine how many rows we can fit between the maximum # and minimum y-coords, separated by the given grid size number_of_rows = max_coord * 2 // grid_size # Do the same action multiple times, with the only # difference being the row number for row_number in range(number_of_rows): # Go to the start of the row goto(-max_coord, max_coord - row_number * grid_size) # Do the same action multiple times, with the only # difference being the colour for colour in column_colours: color(colour) dot(dot_size) forward(grid_size) # Exit gracefully hideturtle() done()
93d1c4b038d428ed57ee5e22dfd6aa42a7abb5be
d0168d08221da5bf95c0dd511efeecddc9b0a73d
/profiles/migrations/0001_initial.py
cdaa8070d22ad1710c0d0041619d3e087f6b3285
[]
no_license
alexarirok/roret-farm-software
900b5842c7b39c4a19543e138a719e4b496531a9
aa23fd729351f0d045b2e310dc839a8b4d639c6d
refs/heads/master
2021-04-08T21:03:59.709224
2020-05-01T00:07:53
2020-05-01T00:07:53
248,808,990
0
0
null
null
null
null
UTF-8
Python
false
false
853
py
# Generated by Django 3.0.5 on 2020-04-23 21:43 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Profile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('firstName', models.CharField(max_length=50, null=True)), ('lastName', models.CharField(max_length=50)), ('email', models.EmailField(blank=True, max_length=254)), ('phoneNumber', models.IntegerField(blank=True)), ('department', models.CharField(blank=True, max_length=30)), ('bio', models.TextField(blank=True, max_length=500)), ], ), ]
22f3c9cd4a9a1004dd7c7bb512643d2bbf2cbdb2
048405bfa0b48eaf78dd2298bdfe61472bd74eef
/scripts/multiproc.py
d0bf77fe6d0ed47a785ac752a9bddf3529d5e1ed
[]
no_license
sousa-edvan/greedy_grasp_ant
2218ae20f707baa8d5428db76129e5c758a21d07
12f5ac99b4d0e9599a2ecd138f8f6a3551fe2473
refs/heads/master
2022-01-27T02:30:35.977782
2019-07-19T16:39:20
2019-07-19T16:39:20
null
0
0
null
null
null
null
UTF-8
Python
false
false
654
py
import os import pandas as pd import subprocess from multiprocessing import cpu_count, Pool from auto_tqdm import tqdm from notipy_me import Notipy def score(data): csv = "scores/{data}.csv".format(data=data) subprocess.run([ "./gga/greedy_grasp_ant", "--data=data/{data}".format(data=data), "--log={csv}".format(csv=csv), "--all" ]) df = pd.read_csv(csv, index_col=0)[["mean"]] df.columns = [data] return df.T data = os.listdir("data") with Notipy(): with Pool(cpu_count()) as p: df = pd.concat(list(tqdm(p.imap(score, data), total=len(data)))) df.to_csv("scores/all_scores.csv")
6e29a6e6c458214a2a869d88d2a4615e11373078
adf2e802c7563e4b7b7cc279a54deceb6a803098
/openapi_client/models/pdf_annotate_parameters.py
9dc6b17b59f3a07476f94e6f5e9c90548eeb50bf
[]
no_license
Orpalis/passportpdfsdk-python
2466f7568becf2bd386bd9e4e00b4e3c1e642727
257d305ca9e6508d44fe521a1e4721f1835e8d0e
refs/heads/master
2022-04-24T15:58:21.257112
2020-04-27T11:09:37
2020-04-27T11:09:37
254,665,250
2
0
null
null
null
null
UTF-8
Python
false
false
12,981
py
# coding: utf-8 """ PassportPDF API Another brick in the cloud # noqa: E501 The version of the OpenAPI document: 1.0.1 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from openapi_client.configuration import Configuration class PdfAnnotateParameters(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'file_id': 'str', 'page_range': 'str', 'annotation_type': 'AnnotationType', 'sticky_note_annotation_parameters': 'StickyNoteAnnotationParameters', 'link_annotation_parameters': 'LinkAnnotationParameters', 'free_text_annotation_parameters': 'FreeTextAnnotationParameters', 'line_annotation_parameters': 'LineAnnotationParameters', 'square_and_circle_annotation_parameters': 'SquareAndCircleAnnotationParameters', 'rubber_stamp_annotation_parameters': 'RubberStampAnnotationParameters' } attribute_map = { 'file_id': 'FileId', 'page_range': 'PageRange', 'annotation_type': 'AnnotationType', 'sticky_note_annotation_parameters': 'StickyNoteAnnotationParameters', 'link_annotation_parameters': 'LinkAnnotationParameters', 'free_text_annotation_parameters': 'FreeTextAnnotationParameters', 'line_annotation_parameters': 'LineAnnotationParameters', 'square_and_circle_annotation_parameters': 'SquareAndCircleAnnotationParameters', 'rubber_stamp_annotation_parameters': 'RubberStampAnnotationParameters' } def __init__(self, file_id=None, page_range=None, annotation_type=None, sticky_note_annotation_parameters=None, link_annotation_parameters=None, free_text_annotation_parameters=None, line_annotation_parameters=None, square_and_circle_annotation_parameters=None, rubber_stamp_annotation_parameters=None, local_vars_configuration=None): # noqa: E501 """PdfAnnotateParameters - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._file_id = None self._page_range = None self._annotation_type = None self._sticky_note_annotation_parameters = None self._link_annotation_parameters = None self._free_text_annotation_parameters = None self._line_annotation_parameters = None self._square_and_circle_annotation_parameters = None self._rubber_stamp_annotation_parameters = None self.discriminator = None self.file_id = file_id self.page_range = page_range if annotation_type is not None: self.annotation_type = annotation_type if sticky_note_annotation_parameters is not None: self.sticky_note_annotation_parameters = sticky_note_annotation_parameters if link_annotation_parameters is not None: self.link_annotation_parameters = link_annotation_parameters if free_text_annotation_parameters is not None: self.free_text_annotation_parameters = free_text_annotation_parameters if line_annotation_parameters is not None: self.line_annotation_parameters = line_annotation_parameters if square_and_circle_annotation_parameters is not None: self.square_and_circle_annotation_parameters = square_and_circle_annotation_parameters if rubber_stamp_annotation_parameters is not None: self.rubber_stamp_annotation_parameters = rubber_stamp_annotation_parameters @property def file_id(self): """Gets the file_id of this PdfAnnotateParameters. # noqa: E501 The identifier of the previously uploaded file to be processed. # noqa: E501 :return: The file_id of this PdfAnnotateParameters. # noqa: E501 :rtype: str """ return self._file_id @file_id.setter def file_id(self, file_id): """Sets the file_id of this PdfAnnotateParameters. The identifier of the previously uploaded file to be processed. # noqa: E501 :param file_id: The file_id of this PdfAnnotateParameters. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and file_id is None: # noqa: E501 raise ValueError("Invalid value for `file_id`, must not be `None`") # noqa: E501 self._file_id = file_id @property def page_range(self): """Gets the page_range of this PdfAnnotateParameters. # noqa: E501 Specifies the page or the range of page to be annotated. # noqa: E501 :return: The page_range of this PdfAnnotateParameters. # noqa: E501 :rtype: str """ return self._page_range @page_range.setter def page_range(self, page_range): """Sets the page_range of this PdfAnnotateParameters. Specifies the page or the range of page to be annotated. # noqa: E501 :param page_range: The page_range of this PdfAnnotateParameters. # noqa: E501 :type: str """ if self.local_vars_configuration.client_side_validation and page_range is None: # noqa: E501 raise ValueError("Invalid value for `page_range`, must not be `None`") # noqa: E501 self._page_range = page_range @property def annotation_type(self): """Gets the annotation_type of this PdfAnnotateParameters. # noqa: E501 :return: The annotation_type of this PdfAnnotateParameters. # noqa: E501 :rtype: AnnotationType """ return self._annotation_type @annotation_type.setter def annotation_type(self, annotation_type): """Sets the annotation_type of this PdfAnnotateParameters. :param annotation_type: The annotation_type of this PdfAnnotateParameters. # noqa: E501 :type: AnnotationType """ self._annotation_type = annotation_type @property def sticky_note_annotation_parameters(self): """Gets the sticky_note_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :return: The sticky_note_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :rtype: StickyNoteAnnotationParameters """ return self._sticky_note_annotation_parameters @sticky_note_annotation_parameters.setter def sticky_note_annotation_parameters(self, sticky_note_annotation_parameters): """Sets the sticky_note_annotation_parameters of this PdfAnnotateParameters. :param sticky_note_annotation_parameters: The sticky_note_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :type: StickyNoteAnnotationParameters """ self._sticky_note_annotation_parameters = sticky_note_annotation_parameters @property def link_annotation_parameters(self): """Gets the link_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :return: The link_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :rtype: LinkAnnotationParameters """ return self._link_annotation_parameters @link_annotation_parameters.setter def link_annotation_parameters(self, link_annotation_parameters): """Sets the link_annotation_parameters of this PdfAnnotateParameters. :param link_annotation_parameters: The link_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :type: LinkAnnotationParameters """ self._link_annotation_parameters = link_annotation_parameters @property def free_text_annotation_parameters(self): """Gets the free_text_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :return: The free_text_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :rtype: FreeTextAnnotationParameters """ return self._free_text_annotation_parameters @free_text_annotation_parameters.setter def free_text_annotation_parameters(self, free_text_annotation_parameters): """Sets the free_text_annotation_parameters of this PdfAnnotateParameters. :param free_text_annotation_parameters: The free_text_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :type: FreeTextAnnotationParameters """ self._free_text_annotation_parameters = free_text_annotation_parameters @property def line_annotation_parameters(self): """Gets the line_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :return: The line_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :rtype: LineAnnotationParameters """ return self._line_annotation_parameters @line_annotation_parameters.setter def line_annotation_parameters(self, line_annotation_parameters): """Sets the line_annotation_parameters of this PdfAnnotateParameters. :param line_annotation_parameters: The line_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :type: LineAnnotationParameters """ self._line_annotation_parameters = line_annotation_parameters @property def square_and_circle_annotation_parameters(self): """Gets the square_and_circle_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :return: The square_and_circle_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :rtype: SquareAndCircleAnnotationParameters """ return self._square_and_circle_annotation_parameters @square_and_circle_annotation_parameters.setter def square_and_circle_annotation_parameters(self, square_and_circle_annotation_parameters): """Sets the square_and_circle_annotation_parameters of this PdfAnnotateParameters. :param square_and_circle_annotation_parameters: The square_and_circle_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :type: SquareAndCircleAnnotationParameters """ self._square_and_circle_annotation_parameters = square_and_circle_annotation_parameters @property def rubber_stamp_annotation_parameters(self): """Gets the rubber_stamp_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :return: The rubber_stamp_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :rtype: RubberStampAnnotationParameters """ return self._rubber_stamp_annotation_parameters @rubber_stamp_annotation_parameters.setter def rubber_stamp_annotation_parameters(self, rubber_stamp_annotation_parameters): """Sets the rubber_stamp_annotation_parameters of this PdfAnnotateParameters. :param rubber_stamp_annotation_parameters: The rubber_stamp_annotation_parameters of this PdfAnnotateParameters. # noqa: E501 :type: RubberStampAnnotationParameters """ self._rubber_stamp_annotation_parameters = rubber_stamp_annotation_parameters def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, PdfAnnotateParameters): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, PdfAnnotateParameters): return True return self.to_dict() != other.to_dict()
46716e05f494d85df10a692e589e37f999ee1bdd
487ce91881032c1de16e35ed8bc187d6034205f7
/codes/CodeJamCrawler/CJ/16_0_2_anrieff_b.py
9217d8d49f04d08baad00e10f7695015df8cedd7
[]
no_license
DaHuO/Supergraph
9cd26d8c5a081803015d93cf5f2674009e92ef7e
c88059dc66297af577ad2b8afa4e0ac0ad622915
refs/heads/master
2021-06-14T16:07:52.405091
2016-08-21T13:39:13
2016-08-21T13:39:13
49,829,508
2
0
null
2021-03-19T21:55:46
2016-01-17T18:23:00
Python
UTF-8
Python
false
false
570
py
#!/usr/bin/env python # Contestant: Veselin 'anrieff' Georgiev # Round: Google Code Jam Qualification 2016 # Task: B. Revenge of the pancakes # Solution: Greedy. At each step, find the largest single-colored block at the top, and flip it, until we finish. TC = int(raw_input().strip()) for tc in xrange(1, TC + 1): print "Case #%d:" % tc, a = list(raw_input().strip()) n = len(a) steps = 0 while a.count('-') != 0: steps += 1 i = 0 while i < n and a[i] == a[0]: i += 1 for j in xrange(i): a[j] = '-' if a[j] == '+' else '+' # reverse print steps
e82f2bd71cc0846186353d8c20817723d286fc4f
4d4fcde3efaa334f7aa56beabd2aa26fbcc43650
/server/src/uds/reports/lists/__init__.py
2cb963d21ee727e9b5b0bcc891ec8e5716d7db72
[]
no_license
xezpeleta/openuds
a8b11cb34eb0ef7bb2da80f67586a81b2de229ef
840a7a02bd7c9894e8863a8a50874cdfdbf30fcd
refs/heads/master
2023-08-21T17:55:48.914631
2021-10-06T10:39:06
2021-10-06T10:39:06
414,489,331
1
0
null
null
null
null
UTF-8
Python
false
false
1,712
py
# -*- coding: utf-8 -*- # # Copyright (c) 2015-2020 Virtual Cable S.L.U. # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of Virtual Cable S.L. nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ @author: Adolfo Gómez, dkmaster at dkmon dot com """ # Make reports visible to autoloader from . import users
630d1956e0b63d64523db795b9be2b9148d32cde
5cb98473ea9972d0a9a0278cde9b6ee8264f9bac
/01. Jump to python/chap05/5_6/260.py
3716f386db385102e9e74387bd614dd0a58e1cff
[]
no_license
libus1204/bigdata2019
fd85dbcd8c89db991ab5c3efa11ff85466a823f8
5e9a6fa2c340c1fcd2840889ba40c7b805926558
refs/heads/master
2020-04-21T10:56:33.519490
2019-04-15T05:28:19
2019-04-15T05:28:19
169,503,607
0
0
null
null
null
null
UTF-8
Python
false
false
83
py
import time index = 1 while True: print(index) index += 1 time.sleep(5)
89dffaba38711b93fdcb658ebbf0b28432889f78
113b962bd5e2eb770067bd374a15dfe8a1c2d09f
/py_scripts/get_mappedcount_byLibID.py
8a5cee444afcf4ef2e1d1d2b47beaaa11f6be665
[]
no_license
aungthurhahein/biotech_script
ecce51950bcef69405843da12ece2f84ea5541d6
2fda699343e6c46543fa1df2412c8ca2f2622cda
refs/heads/master
2020-12-24T06:20:13.028141
2016-07-06T15:23:34
2016-07-06T15:23:34
25,574,741
5
0
null
null
null
null
UTF-8
Python
false
false
827
py
""" # get occurences of ref_ids by Lib_IDs # modification tips: file type, column of file ids # __author__ = 'atrx' # Date: 22012015 """ import sys from Bio import SeqIO usage = "Usage %s infile" % sys.argv[0] # specific massage for no input try: fastafile = sys.argv[1] contigid = sys.argv[2] except: print usage, sys.exit(1) fasta_file = open(fastafile, 'r') ref_file = open(contigid, 'r') id_list = [] contig_list = [] id_key = [] for l in ref_file: id = l.split() id_list.append(l) id_key.append(id[1].strip()) for seq in SeqIO.parse(fasta_file, "fasta"): contig_list.append(seq.id) for seq_record in contig_list: contigid = seq_record.strip() if contigid in id_key: lo = id_key.index(contigid) print id_list[lo].strip() else: print "0 " + seq_record
fa568dcd357b037a884e720bb3f4b2961b3d5e46
343413e76c09d2bd3d009f382d9dcd19c984d58f
/.history/main_20201229180214.py
e7c1f84e7a234bab2a2ddd0a968647204387eebe
[]
no_license
rozbeh1212/cipher
7b81e640501639cefb0fe6bf100647dd2602291e
abdebdd7d1e155ffab78ce38be8bf28074366c42
refs/heads/master
2023-02-04T13:44:36.892470
2020-12-29T14:44:10
2020-12-29T14:44:10
325,314,022
0
0
null
null
null
null
UTF-8
Python
false
false
991
py
alphabet = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' ] direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n") text = input("Type your message:\n").lower() shift = int(input("Type the shift number:\n")) def caeser(start_text, shipft_amount, cipher_direction): end_text = "" for letter in start_text: position = alphabet.index(position) if cipher_direction == "decode": shift_amount *= -1 new_position = position + shift_amount end_text += alphabet[new_position] print(f"the {c}d text is {plane_text}") import art caeser(start_text=text, shift_amount=shift, cipher_direction=direction)
21a65c73620f2a40477d64a11550fc36704d99f4
85a9ffeccb64f6159adbd164ff98edf4ac315e33
/pysnmp/A3COM-HUAWEI-RS485-MIB.py
6e4c22f4a88026cd33571d0023c92ae07bce922d
[ "Apache-2.0" ]
permissive
agustinhenze/mibs.snmplabs.com
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
1fc5c07860542b89212f4c8ab807057d9a9206c7
refs/heads/master
2020-12-26T12:41:41.132395
2019-08-16T15:51:41
2019-08-16T15:53:57
237,512,469
0
0
Apache-2.0
2020-01-31T20:41:36
2020-01-31T20:41:35
null
UTF-8
Python
false
false
10,571
py
# # PySNMP MIB module A3COM-HUAWEI-RS485-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/A3COM-HUAWEI-RS485-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 16:52:12 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # h3cCommon, = mibBuilder.importSymbols("A3COM-HUAWEI-OID-MIB", "h3cCommon") Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint") ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex") InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Counter64, Counter32, iso, IpAddress, MibIdentifier, ObjectIdentity, TimeTicks, Integer32, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, NotificationType, Bits, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Counter32", "iso", "IpAddress", "MibIdentifier", "ObjectIdentity", "TimeTicks", "Integer32", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "NotificationType", "Bits", "Gauge32") DisplayString, RowStatus, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "RowStatus", "TextualConvention") h3cRS485 = ModuleIdentity((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109)) if mibBuilder.loadTexts: h3cRS485.setLastUpdated('200910210000Z') if mibBuilder.loadTexts: h3cRS485.setOrganization('Hangzhou H3C Technologies Co., Ltd.') h3cRS485Properties = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1)) h3cRS485PropertiesTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1), ) if mibBuilder.loadTexts: h3cRS485PropertiesTable.setStatus('current') h3cRS485PropertiesEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex")) if mibBuilder.loadTexts: h3cRS485PropertiesEntry.setStatus('current') h3cRS485RawSessionNextIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cRS485RawSessionNextIndex.setStatus('current') h3cRS485BaudRate = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("bautRate300", 1), ("bautRate600", 2), ("bautRate1200", 3), ("bautRate2400", 4), ("bautRate4800", 5), ("bautRate9600", 6), ("bautRate19200", 7), ("bautRate38400", 8), ("bautRate57600", 9), ("bautRate115200", 10))).clone('bautRate9600')).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cRS485BaudRate.setStatus('current') h3cRS485DataBits = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("five", 1), ("six", 2), ("seven", 3), ("eight", 4))).clone('eight')).setUnits('bit').setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cRS485DataBits.setStatus('current') h3cRS485Parity = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("none", 1), ("odd", 2), ("even", 3), ("mark", 4), ("space", 5))).clone('none')).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cRS485Parity.setStatus('current') h3cRS485StopBits = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("one", 1), ("two", 2), ("oneAndHalf", 3))).clone('one')).setUnits('bit').setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cRS485StopBits.setStatus('current') h3cRS485FlowControl = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("hardware", 2), ("xonOrxoff", 3))).clone('none')).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cRS485FlowControl.setStatus('current') h3cRS485TXCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 7), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cRS485TXCharacters.setStatus('current') h3cRS485RXCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 8), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cRS485RXCharacters.setStatus('current') h3cRS485TXErrCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 9), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cRS485TXErrCharacters.setStatus('current') h3cRS485RXErrCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 10), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cRS485RXErrCharacters.setStatus('current') h3cRS485ResetCharacters = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 1, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("counting", 1), ("clear", 2))).clone('counting')).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cRS485ResetCharacters.setStatus('current') h3cRS485RawSessions = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2)) h3cRS485RawSessionSummary = MibIdentifier((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 1)) h3cRS485RawSessionMaxNum = MibScalar((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cRS485RawSessionMaxNum.setStatus('current') h3cRS485RawSessionTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2), ) if mibBuilder.loadTexts: h3cRS485RawSessionTable.setStatus('current') h3cRS485RawSessionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "A3COM-HUAWEI-RS485-MIB", "h3cRS485SessionIndex")) if mibBuilder.loadTexts: h3cRS485RawSessionEntry.setStatus('current') h3cRS485SessionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))) if mibBuilder.loadTexts: h3cRS485SessionIndex.setStatus('current') h3cRS485SessionType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("udp", 1), ("tcpClient", 2), ("tcpServer", 3)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cRS485SessionType.setStatus('current') h3cRS485SessionAddType = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 3), InetAddressType().clone('ipv4')).setMaxAccess("readwrite") if mibBuilder.loadTexts: h3cRS485SessionAddType.setStatus('current') h3cRS485SessionRemoteIP = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 4), InetAddress()).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cRS485SessionRemoteIP.setStatus('current') h3cRS485SessionRemotePort = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1024, 65535))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cRS485SessionRemotePort.setStatus('current') h3cRS485SessionLocalPort = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1024, 65535))).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cRS485SessionLocalPort.setStatus('current') h3cRS485SessionStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 2, 1, 7), RowStatus()).setMaxAccess("readcreate") if mibBuilder.loadTexts: h3cRS485SessionStatus.setStatus('current') h3cRS485RawSessionErrInfoTable = MibTable((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 3), ) if mibBuilder.loadTexts: h3cRS485RawSessionErrInfoTable.setStatus('current') h3cRS485RawSessionErrInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "A3COM-HUAWEI-RS485-MIB", "h3cRS485SessionIndex")) if mibBuilder.loadTexts: h3cRS485RawSessionErrInfoEntry.setStatus('current') h3cRS485RawSessionErrInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 43, 45, 1, 10, 2, 109, 2, 3, 1, 1), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: h3cRS485RawSessionErrInfo.setStatus('current') mibBuilder.exportSymbols("A3COM-HUAWEI-RS485-MIB", h3cRS485RawSessionMaxNum=h3cRS485RawSessionMaxNum, h3cRS485StopBits=h3cRS485StopBits, h3cRS485SessionRemotePort=h3cRS485SessionRemotePort, h3cRS485RawSessionErrInfoEntry=h3cRS485RawSessionErrInfoEntry, h3cRS485TXErrCharacters=h3cRS485TXErrCharacters, h3cRS485=h3cRS485, h3cRS485RXCharacters=h3cRS485RXCharacters, h3cRS485PropertiesEntry=h3cRS485PropertiesEntry, h3cRS485SessionAddType=h3cRS485SessionAddType, h3cRS485FlowControl=h3cRS485FlowControl, h3cRS485Properties=h3cRS485Properties, h3cRS485PropertiesTable=h3cRS485PropertiesTable, h3cRS485ResetCharacters=h3cRS485ResetCharacters, PYSNMP_MODULE_ID=h3cRS485, h3cRS485RawSessionEntry=h3cRS485RawSessionEntry, h3cRS485RawSessionNextIndex=h3cRS485RawSessionNextIndex, h3cRS485RawSessionTable=h3cRS485RawSessionTable, h3cRS485RawSessionErrInfo=h3cRS485RawSessionErrInfo, h3cRS485TXCharacters=h3cRS485TXCharacters, h3cRS485SessionStatus=h3cRS485SessionStatus, h3cRS485RawSessions=h3cRS485RawSessions, h3cRS485SessionRemoteIP=h3cRS485SessionRemoteIP, h3cRS485SessionLocalPort=h3cRS485SessionLocalPort, h3cRS485SessionIndex=h3cRS485SessionIndex, h3cRS485RXErrCharacters=h3cRS485RXErrCharacters, h3cRS485RawSessionSummary=h3cRS485RawSessionSummary, h3cRS485BaudRate=h3cRS485BaudRate, h3cRS485DataBits=h3cRS485DataBits, h3cRS485SessionType=h3cRS485SessionType, h3cRS485RawSessionErrInfoTable=h3cRS485RawSessionErrInfoTable, h3cRS485Parity=h3cRS485Parity)
6b98012278b2ef3e02f3bdbc33e146865eb26807
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/59/usersdata/201/46743/submittedfiles/testes.py
3378d84c86f02731a2be48d3b0f834f8d9cb8366
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
111
py
# -*- coding: utf-8 -*- #COMECE AQUI ABAIXO r=float(input('Digite um raio:')) pi=3,14 a=pi*(r**2): print(a)
3ab2ab497849cf95c4e137ee698165d20cbe687f
1939f5b78e6dbd0675f6f2a9a0f4f49c2b069389
/instagram/src/instabot.py
91bd65f0544dd187a1655c570d6dcb9774751ecf
[ "MIT" ]
permissive
pavel-malin/instagram
cb7988d9fbfad14911bf39567f7b2f6336b1fb34
b53f00c20521f46f5836946a499f476859d431f5
refs/heads/master
2021-03-16T07:54:57.942684
2017-11-20T11:23:46
2017-11-20T11:23:46
111,401,293
0
0
null
null
null
null
UTF-8
Python
false
false
36,349
py
#!/usr/bin/env python # -*- coding: utf-8 -*- import atexit import datetime import itertools import json import logging import random import signal import sys if 'threading' in sys.modules: del sys.modules['threading'] import time import requests from unfollow_protocol import unfollow_protocol from userinfo import UserInfo class InstaBot: """ Instagram bot v 1.1.0 like_per_day=1000 - How many likes set bot in one day. media_max_like=0 - Don't like media (photo or video) if it have more than media_max_like likes. media_min_like=0 - Don't like media (photo or video) if it have less than media_min_like likes. tag_list = ['cat', 'car', 'dog'] - Tag list to like. max_like_for_one_tag=5 - Like 1 to max_like_for_one_tag times by row. log_mod = 0 - Log mod: log_mod = 0 log to console, log_mod = 1 log to file, log_mod = 2 no log. https://github.com/LevPasha/instabot.py """ url = 'https://www.instagram.com/' url_tag = 'https://www.instagram.com/explore/tags/%s/?__a=1' url_likes = 'https://www.instagram.com/web/likes/%s/like/' url_unlike = 'https://www.instagram.com/web/likes/%s/unlike/' url_comment = 'https://www.instagram.com/web/comments/%s/add/' url_follow = 'https://www.instagram.com/web/friendships/%s/follow/' url_unfollow = 'https://www.instagram.com/web/friendships/%s/unfollow/' url_login = 'https://www.instagram.com/accounts/login/ajax/' url_logout = 'https://www.instagram.com/accounts/logout/' url_media_detail = 'https://www.instagram.com/p/%s/?__a=1' url_user_detail = 'https://www.instagram.com/%s/?__a=1' user_agent = ("Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 " "(KHTML, like Gecko) Chrome/48.0.2564.103 Safari/537.36") accept_language = 'ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4' # If instagram ban you - query return 400 error. error_400 = 0 # If you have 3 400 error in row - looks like you banned. error_400_to_ban = 3 # If InstaBot think you are banned - going to sleep. ban_sleep_time = 2 * 60 * 60 # All counter. bot_mode = 0 like_counter = 0 follow_counter = 0 unfollow_counter = 0 comments_counter = 0 current_user = 'hajka' current_index = 0 current_id = 'abcds' # List of user_id, that bot follow bot_follow_list = [] user_info_list = [] user_list = [] ex_user_list = [] unwanted_username_list = [] is_checked = False is_selebgram = False is_fake_account = False is_active_user = False is_following = False is_follower = False is_rejected = False is_self_checking = False is_by_tag = False is_follower_number = 0 self_following = 0 self_follower = 0 # Log setting. log_file_path = '' log_file = 0 # Other. user_id = 0 media_by_tag = 0 media_on_feed = [] media_by_user = [] login_status = False # For new_auto_mod next_iteration = {"Like": 0, "Follow": 0, "Unfollow": 0, "Comments": 0} def __init__(self, login, password, like_per_day=1000, media_max_like=50, media_min_like=0, follow_per_day=0, follow_time=5 * 60 * 60, unfollow_per_day=0, comment_list=[["this", "the", "your"], ["photo", "picture", "pic", "shot", "snapshot"], ["is", "looks", "feels", "is really"], ["great", "super", "good", "very good", "good", "wow", "WOW", "cool", "GREAT", "magnificent", "magical", "very cool", "stylish", "beautiful", "so beautiful", "so stylish", "so professional", "lovely", "so lovely", "very lovely", "glorious", "so glorious", "very glorious", "adorable", "excellent", "amazing"],[".", "..", "...", "!", "!!", "!!!"]], comments_per_day=0, tag_list=['cat', 'car', 'dog'], max_like_for_one_tag=5, unfollow_break_min=15, unfollow_break_max=30, log_mod=0, proxy="", user_blacklist={}, tag_blacklist=[], unwanted_username_list=[], unfollow_whitelist=[]): self.bot_start = datetime.datetime.now() self.unfollow_break_min = unfollow_break_min self.unfollow_break_max = unfollow_break_max self.user_blacklist = user_blacklist self.tag_blacklist = tag_blacklist self.unfollow_whitelist = unfollow_whitelist self.comment_list = comment_list self.time_in_day = 24 * 60 * 60 # Like self.like_per_day = like_per_day if self.like_per_day != 0: self.like_delay = self.time_in_day / self.like_per_day # Follow self.follow_time = follow_time self.follow_per_day = follow_per_day if self.follow_per_day != 0: self.follow_delay = self.time_in_day / self.follow_per_day # Unfollow self.unfollow_per_day = unfollow_per_day if self.unfollow_per_day != 0: self.unfollow_delay = self.time_in_day / self.unfollow_per_day # Comment self.comments_per_day = comments_per_day if self.comments_per_day != 0: self.comments_delay = self.time_in_day / self.comments_per_day # Don't like if media have more than n likes. self.media_max_like = media_max_like # Don't like if media have less than n likes. self.media_min_like = media_min_like # Auto mod seting: # Default list of tag. self.tag_list = tag_list # Get random tag, from tag_list, and like (1 to n) times. self.max_like_for_one_tag = max_like_for_one_tag # log_mod 0 to console, 1 to file self.log_mod = log_mod self.s = requests.Session() # if you need proxy make something like this: # self.s.proxies = {"https" : "http://proxyip:proxyport"} # by @ageorgios if proxy != "": proxies = { 'http': 'http://' + proxy, 'https': 'http://' + proxy, } self.s.proxies.update(proxies) # convert login to lower self.user_login = login.lower() self.user_password = password self.bot_mode = 0 self.media_by_tag = [] self.media_on_feed = [] self.media_by_user = [] self.unwanted_username_list = unwanted_username_list now_time = datetime.datetime.now() log_string = 'Instabot v1.1.0 started at %s:\n' % \ (now_time.strftime("%d.%m.%Y %H:%M")) self.write_log(log_string) self.login() self.populate_user_blacklist() signal.signal(signal.SIGTERM, self.cleanup) atexit.register(self.cleanup) def populate_user_blacklist(self): for user in self.user_blacklist: user_id_url = self.url_user_detail % (user) info = self.s.get(user_id_url) # prevent error if 'Account of user was deleted or link is invalid from json import JSONDecodeError try: all_data = json.loads(info.text) except JSONDecodeError as e: self.write_log('Account of user %s was deleted or link is ' 'invalid' % (user)) else: # prevent exception if user have no media id_user = all_data['user']['id'] # Update the user_name with the user_id self.user_blacklist[user] = id_user log_string = "Blacklisted user %s added with ID: %s" % (user, id_user) self.write_log(log_string) time.sleep(5 * random.random()) def login(self): log_string = 'Trying to login as %s...\n' % (self.user_login) self.write_log(log_string) self.s.cookies.update({ 'sessionid': '', 'mid': '', 'ig_pr': '1', 'ig_vw': '1920', 'csrftoken': '', 's_network': '', 'ds_user_id': '' }) self.login_post = { 'username': self.user_login, 'password': self.user_password } self.s.headers.update({ 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': self.accept_language, 'Connection': 'keep-alive', 'Content-Length': '0', 'Host': 'www.instagram.com', 'Origin': 'https://www.instagram.com', 'Referer': 'https://www.instagram.com/', 'User-Agent': self.user_agent, 'X-Instagram-AJAX': '1', 'X-Requested-With': 'XMLHttpRequest' }) r = self.s.get(self.url) self.s.headers.update({'X-CSRFToken': r.cookies['csrftoken']}) time.sleep(5 * random.random()) login = self.s.post( self.url_login, data=self.login_post, allow_redirects=True) self.s.headers.update({'X-CSRFToken': login.cookies['csrftoken']}) self.csrftoken = login.cookies['csrftoken'] time.sleep(5 * random.random()) if login.status_code == 200: r = self.s.get('https://www.instagram.com/') finder = r.text.find(self.user_login) if finder != -1: ui = UserInfo() self.user_id = ui.get_user_id_by_login(self.user_login) self.login_status = True log_string = '%s login success!' % (self.user_login) self.write_log(log_string) else: self.login_status = False self.write_log('Login error! Check your login data!') else: self.write_log('Login error! Connection error!') def logout(self): now_time = datetime.datetime.now() log_string = 'Logout: likes - %i, follow - %i, unfollow - %i, comments - %i.' % \ (self.like_counter, self.follow_counter, self.unfollow_counter, self.comments_counter) self.write_log(log_string) work_time = datetime.datetime.now() - self.bot_start log_string = 'Bot work time: %s' % (work_time) self.write_log(log_string) try: logout_post = {'csrfmiddlewaretoken': self.csrftoken} logout = self.s.post(self.url_logout, data=logout_post) self.write_log("Logout success!") self.login_status = False except: self.write_log("Logout error!") def cleanup(self, *_): # Unfollow all bot follow if self.follow_counter >= self.unfollow_counter: for f in self.bot_follow_list: log_string = "Trying to unfollow: %s" % (f[0]) self.write_log(log_string) self.unfollow_on_cleanup(f[0]) sleeptime = random.randint(self.unfollow_break_min, self.unfollow_break_max) log_string = "Pausing for %i seconds... %i of %i" % ( sleeptime, self.unfollow_counter, self.follow_counter) self.write_log(log_string) time.sleep(sleeptime) self.bot_follow_list.remove(f) # Logout if (self.login_status): self.logout() exit(0) def get_media_id_by_tag(self, tag): """ Get media ID set, by your hashtag """ if (self.login_status): log_string = "Get media id by tag: %s" % (tag) self.write_log(log_string) if self.login_status == 1: url_tag = self.url_tag % (tag) try: r = self.s.get(url_tag) all_data = json.loads(r.text) self.media_by_tag = list(all_data['tag']['media']['nodes']) except: self.media_by_tag = [] self.write_log("Except on get_media!") else: return 0 def like_all_exist_media(self, media_size=-1, delay=True): """ Like all media ID that have self.media_by_tag """ if self.login_status: if self.media_by_tag != 0: i = 0 for d in self.media_by_tag: # Media count by this tag. if media_size > 0 or media_size < 0: media_size -= 1 l_c = self.media_by_tag[i]['likes']['count'] if ((l_c <= self.media_max_like and l_c >= self.media_min_like) or (self.media_max_like == 0 and l_c >= self.media_min_like) or (self.media_min_like == 0 and l_c <= self.media_max_like) or (self.media_min_like == 0 and self.media_max_like == 0)): for blacklisted_user_name, blacklisted_user_id in self.user_blacklist.items( ): if self.media_by_tag[i]['owner'][ 'id'] == blacklisted_user_id: self.write_log( "Not liking media owned by blacklisted user: " + blacklisted_user_name) return False if self.media_by_tag[i]['owner'][ 'id'] == self.user_id: self.write_log( "Keep calm - It's your own media ;)") return False try: caption = self.media_by_tag[i][ 'caption'].encode( 'ascii', errors='ignore') tag_blacklist = set(self.tag_blacklist) if sys.version_info[0] == 3: tags = { str.lower( (tag.decode('ASCII')).strip('#')) for tag in caption.split() if (tag.decode('ASCII') ).startswith("#") } else: tags = { unicode.lower( (tag.decode('ASCII')).strip('#')) for tag in caption.split() if (tag.decode('ASCII') ).startswith("#") } if tags.intersection(tag_blacklist): matching_tags = ', '.join( tags.intersection(tag_blacklist)) self.write_log( "Not liking media with blacklisted tag(s): " + matching_tags) return False except: self.write_log( "Couldn't find caption - not liking") return False log_string = "Trying to like media: %s" % \ (self.media_by_tag[i]['id']) self.write_log(log_string) like = self.like(self.media_by_tag[i]['id']) # comment = self.comment(self.media_by_tag[i]['id'], 'Cool!') # follow = self.follow(self.media_by_tag[i]["owner"]["id"]) if like != 0: if like.status_code == 200: # Like, all ok! self.error_400 = 0 self.like_counter += 1 log_string = "Liked: %s. Like #%i." % \ (self.media_by_tag[i]['id'], self.like_counter) self.write_log(log_string) elif like.status_code == 400: log_string = "Not liked: %i" \ % (like.status_code) self.write_log(log_string) # Some error. If repeated - can be ban! if self.error_400 >= self.error_400_to_ban: # Look like you banned! time.sleep(self.ban_sleep_time) else: self.error_400 += 1 else: log_string = "Not liked: %i" \ % (like.status_code) self.write_log(log_string) return False # Some error. i += 1 if delay: time.sleep(self.like_delay * 0.9 + self.like_delay * 0.2 * random.random()) else: return True else: return False else: return False else: return False else: self.write_log("No media to like!") def like(self, media_id): """ Send http request to like media by ID """ if self.login_status: url_likes = self.url_likes % (media_id) try: like = self.s.post(url_likes) last_liked_media_id = media_id except: self.write_log("Except on like!") like = 0 return like def unlike(self, media_id): """ Send http request to unlike media by ID """ if self.login_status: url_unlike = self.url_unlike % (media_id) try: unlike = self.s.post(url_unlike) except: self.write_log("Except on unlike!") unlike = 0 return unlike def comment(self, media_id, comment_text): """ Send http request to comment """ if self.login_status: comment_post = {'comment_text': comment_text} url_comment = self.url_comment % (media_id) try: comment = self.s.post(url_comment, data=comment_post) if comment.status_code == 200: self.comments_counter += 1 log_string = 'Write: "%s". #%i.' % (comment_text, self.comments_counter) self.write_log(log_string) return comment except: self.write_log("Except on comment!") return False def follow(self, user_id): """ Send http request to follow """ if self.login_status: url_follow = self.url_follow % (user_id) try: follow = self.s.post(url_follow) if follow.status_code == 200: self.follow_counter += 1 log_string = "Followed: %s #%i." % (user_id, self.follow_counter) self.write_log(log_string) return follow except: self.write_log("Except on follow!") return False def unfollow(self, user_id): """ Send http request to unfollow """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = "Unfollow: %s #%i." % (user_id, self.unfollow_counter) self.write_log(log_string) return unfollow except: self.write_log("Exept on unfollow!") return False def unfollow_on_cleanup(self, user_id): """ Unfollow on cleanup by @rjmayott """ if self.login_status: url_unfollow = self.url_unfollow % (user_id) try: unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = "Unfollow: %s #%i of %i." % ( user_id, self.unfollow_counter, self.follow_counter) self.write_log(log_string) else: log_string = "Slow Down - Pausing for 5 minutes so we don't get banned!" self.write_log(log_string) time.sleep(300) unfollow = self.s.post(url_unfollow) if unfollow.status_code == 200: self.unfollow_counter += 1 log_string = "Unfollow: %s #%i of %i." % ( user_id, self.unfollow_counter, self.follow_counter) self.write_log(log_string) else: log_string = "Still no good :( Skipping and pausing for another 5 minutes" self.write_log(log_string) time.sleep(300) return False return unfollow except: log_string = "Except on unfollow... Looks like a network error" self.write_log(log_string) return False def auto_mod(self): """ Star loop, that get media ID by your tag list, and like it """ if self.login_status: while True: random.shuffle(self.tag_list) self.get_media_id_by_tag(random.choice(self.tag_list)) self.like_all_exist_media(random.randint \ (1, self.max_like_for_one_tag)) def new_auto_mod(self): while True: # ------------------- Get media_id ------------------- if len(self.media_by_tag) == 0: self.get_media_id_by_tag(random.choice(self.tag_list)) self.this_tag_like_count = 0 self.max_tag_like_count = random.randint( 1, self.max_like_for_one_tag) # ------------------- Like ------------------- self.new_auto_mod_like() # ------------------- Follow ------------------- self.new_auto_mod_follow() # ------------------- Unfollow ------------------- self.new_auto_mod_unfollow() # ------------------- Comment ------------------- self.new_auto_mod_comments() # Bot iteration in 1 sec time.sleep(3) # print("Tic!") def new_auto_mod_like(self): if time.time() > self.next_iteration["Like"] and self.like_per_day != 0 \ and len(self.media_by_tag) > 0: # You have media_id to like: if self.like_all_exist_media(media_size=1, delay=False): # If like go to sleep: self.next_iteration["Like"] = time.time() + \ self.add_time(self.like_delay) # Count this tag likes: self.this_tag_like_count += 1 if self.this_tag_like_count >= self.max_tag_like_count: self.media_by_tag = [0] # Del first media_id del self.media_by_tag[0] def new_auto_mod_follow(self): if time.time() > self.next_iteration["Follow"] and \ self.follow_per_day != 0 and len(self.media_by_tag) > 0: if self.media_by_tag[0]["owner"]["id"] == self.user_id: self.write_log("Keep calm - It's your own profile ;)") return log_string = "Trying to follow: %s" % ( self.media_by_tag[0]["owner"]["id"]) self.write_log(log_string) if self.follow(self.media_by_tag[0]["owner"]["id"]) != False: self.bot_follow_list.append( [self.media_by_tag[0]["owner"]["id"], time.time()]) self.next_iteration["Follow"] = time.time() + \ self.add_time(self.follow_delay) def new_auto_mod_unfollow(self): if time.time() > self.next_iteration["Unfollow"] and \ self.unfollow_per_day != 0 and len(self.bot_follow_list) > 0: if self.bot_mode == 0: for f in self.bot_follow_list: if time.time() > (f[1] + self.follow_time): log_string = "Trying to unfollow #%i: " % ( self.unfollow_counter + 1) self.write_log(log_string) self.auto_unfollow() self.bot_follow_list.remove(f) self.next_iteration["Unfollow"] = time.time() + \ self.add_time(self.unfollow_delay) if self.bot_mode == 1: unfollow_protocol(self) def new_auto_mod_comments(self): if time.time() > self.next_iteration["Comments"] and self.comments_per_day != 0 \ and len(self.media_by_tag) > 0 \ and self.check_exisiting_comment(self.media_by_tag[0]['code']) == False: comment_text = self.generate_comment() log_string = "Trying to comment: %s" % (self.media_by_tag[0]['id']) self.write_log(log_string) if self.comment(self.media_by_tag[0]['id'], comment_text) != False: self.next_iteration["Comments"] = time.time() + \ self.add_time(self.comments_delay) def add_time(self, time): """ Make some random for next iteration""" return time * 0.9 + time * 0.2 * random.random() def generate_comment(self): c_list = list(itertools.product(*self.comment_list)) repl = [(" ", " "), (" .", "."), (" !", "!")] res = " ".join(random.choice(c_list)) for s, r in repl: res = res.replace(s, r) return res.capitalize() def check_exisiting_comment(self, media_code): url_check = self.url_media_detail % (media_code) check_comment = self.s.get(url_check) all_data = json.loads(check_comment.text) if all_data['graphql']['shortcode_media']['owner']['id'] == self.user_id: self.write_log("Keep calm - It's your own media ;)") # Del media to don't loop on it del self.media_by_tag[0] return True comment_list = list(all_data['graphql']['shortcode_media']['edge_media_to_comment']['edges']) for d in comment_list: if d['node']['owner']['id'] == self.user_id: self.write_log("Keep calm - Media already commented ;)") # Del media to don't loop on it del self.media_by_tag[0] return True return False def auto_unfollow(self): chooser = 1 current_user = 'abcd' current_id = '12345' checking = True self.media_on_feed = [] if len(self.media_on_feed) < 1: self.get_media_id_recent_feed() if len(self.media_on_feed) != 0: chooser = random.randint(0, len(self.media_on_feed) - 1) current_id = self.media_on_feed[chooser]['node']["owner"]["id"] current_user = self.media_on_feed[chooser]['node']["owner"][ "username"] while checking: for wluser in self.unfollow_whitelist: if wluser == current_user: chooser = random.randint(0, len(self.media_on_feed) - 1) current_id = self.media_on_feed[chooser]['node'][ "owner"]["id"] current_user = self.media_on_feed[chooser]['node'][ "owner"]["username"] log_string = ( "found whitelist user, starting search again") self.write_log(log_string) break else: checking = False if self.login_status: now_time = datetime.datetime.now() log_string = "%s : Get user info \n%s" % ( self.user_login, now_time.strftime("%d.%m.%Y %H:%M")) self.write_log(log_string) if self.login_status == 1: url_tag = self.url_user_detail % (current_user) try: r = self.s.get(url_tag) all_data = json.loads(r.text) self.user_info = all_data['user'] i = 0 log_string = "Checking user info.." self.write_log(log_string) while i < 1: follows = self.user_info['follows']['count'] follower = self.user_info['followed_by']['count'] media = self.user_info['media']['count'] follow_viewer = self.user_info['follows_viewer'] followed_by_viewer = self.user_info[ 'followed_by_viewer'] requested_by_viewer = self.user_info[ 'requested_by_viewer'] has_requested_viewer = self.user_info[ 'has_requested_viewer'] log_string = "Follower : %i" % (follower) self.write_log(log_string) log_string = "Following : %s" % (follows) self.write_log(log_string) log_string = "Media : %i" % (media) self.write_log(log_string) if follower / follows > 2: self.is_selebgram = True self.is_fake_account = False print(' >>>This is probably Selebgram account') elif follows / follower > 2: self.is_fake_account = True self.is_selebgram = False print(' >>>This is probably Fake account') else: self.is_selebgram = False self.is_fake_account = False print(' >>>This is a normal account') if follows / media < 10 and follower / media < 10: self.is_active_user = True print(' >>>This user is active') else: self.is_active_user = False print(' >>>This user is passive') if follow_viewer or has_requested_viewer: self.is_follower = True print(" >>>This account is following you") else: self.is_follower = False print(' >>>This account is NOT following you') if followed_by_viewer or requested_by_viewer: self.is_following = True print(' >>>You are following this account') else: self.is_following = False print(' >>>You are NOT following this account') i += 1 except: media_on_feed = [] self.write_log("Except on get_info!") time.sleep(20) return 0 else: return 0 if self.is_selebgram is not False or self.is_fake_account is not False or self.is_active_user is not True or self.is_follower is not True: print(current_user) self.unfollow(current_id) try: del self.media_on_feed[chooser] except: self.media_on_feed = [] self.media_on_feed = [] def get_media_id_recent_feed(self): if self.login_status: now_time = datetime.datetime.now() log_string = "%s : Get media id on recent feed" % (self.user_login) self.write_log(log_string) if self.login_status == 1: url_tag = 'https://www.instagram.com/?__a=1' try: r = self.s.get(url_tag) all_data = json.loads(r.text) self.media_on_feed = list( all_data['graphql']['user']['edge_web_feed_timeline'][ 'edges']) log_string = "Media in recent feed = %i" % ( len(self.media_on_feed)) self.write_log(log_string) except: self.media_on_feed = [] self.write_log("Except on get_media!") time.sleep(20) return 0 else: return 0 def write_log(self, log_text): """ Write log by print() or logger """ if self.log_mod == 0: try: print(log_text) except UnicodeEncodeError: print("Your text has unicode problem!") elif self.log_mod == 1: # Create log_file if not exist. if self.log_file == 0: self.log_file = 1 now_time = datetime.datetime.now() self.log_full_path = '%s%s_%s.log' % ( self.log_file_path, self.user_login, now_time.strftime("%d.%m.%Y_%H:%M")) formatter = logging.Formatter('%(asctime)s - %(name)s ' '- %(message)s') self.logger = logging.getLogger(self.user_login) self.hdrl = logging.FileHandler(self.log_full_path, mode='w') self.hdrl.setFormatter(formatter) self.logger.setLevel(level=logging.INFO) self.logger.addHandler(self.hdrl) # Log to log file. try: self.logger.info(log_text) except UnicodeEncodeError: print("Your text has unicode problem!")
84a2a9db3cd847433912ae84459035f42045f6bc
da3e36172daaf863ef73372f8c36cc2629ec1769
/UMDC/03/17g.py
ce55196990dd77c97e38c5ebc70122baad56ce1d
[]
no_license
mentecatoDev/python
08eef1cb5a6ca2f16b01ee98192ccf1a65b9380a
80ddf541d3d1316ba8375db8f6ec170580e7831b
refs/heads/master
2021-06-30T07:03:51.957376
2021-02-22T09:40:46
2021-02-22T09:40:46
222,322,503
3
4
null
null
null
null
UTF-8
Python
false
false
2,333
py
""" Ejercicio 17g Escribir funciones que resuelvan los siguientes problemas: g) Dadas dos fechas (dia1, mes1, año1, dia2, mes2, año2), indicar el tiempo transcurrido entre ambas, en años, meses y dias. Nota: en todos los casos, involucrar las funciones escritas previamente cuando sea posible. """ def bisiesto(anio): """Devuelve True si el anio es bisiesto.""" if anio % 4: return False else: if anio % 100: return True else: if anio % 400: return False else: return True def dias_mes(mes, anio): """Devuelve los días de cualquier mes teniendo en cuenta el anio.""" if mes in (1, 3, 5, 7, 8, 10, 12): return 31 elif mes in (4, 6, 9, 11): return 30 elif mes == 2: if bisiesto(anio): return 29 else: return 28 else: return -1 def validar_fecha(dia, mes, anio): dm = dias_mes(mes, anio) if dm == -1: return -1 if dm < dia: return False elif mes > 12: return False else: return True def dias_faltan(dia, mes, anio): if validar_fecha(dia, mes, anio): return dias_mes(mes, anio)-dia else: return -1 print(dias_faltan(1, 1, 2000)) def dias_fin_anio(dia, mes, anio): if validar_fecha(dia, mes, anio): dias = 0 for m in range(mes+1, 12+1): dias += dias_mes(m, anio) dias += dias_faltan(dia, mes, anio) return dias else: return -1 def dias_principio(dia, mes, anio): if validar_fecha(dia, mes, anio): if bisiesto(anio): return 365 - dias_fin_anio(dia, mes, anio) else: return 364 - dias_fin_anio(dia, mes, anio) else: return -1 def dias_transcurridos(dia1, mes1, anio1, dia2, mes2, anio2): if anio1 == anio2: total = -dias_principio(dia1, mes1, anio1) + \ dias_principio(dia2, mes2, anio2) else: total = dias_fin_anio(dia1, mes1, anio1) + \ dias_principio(dia2, mes2, anio2)+1 for a in range(anio1+1, anio2): if bisiesto(a): total += 366 else: total += 365 return total print(dias_transcurridos(1, 1, 2001, 31, 12, 2002))
eee6c26c594ab5b9fa6e26288db0e7e9dee3d498
ff886f5f947460576feaec2a049f6a9f78f2a63f
/core/management/commands/wait_for_db.py
460989d579a419bc219cba5e76cc9fcb204aa701
[ "MIT" ]
permissive
devendraprasad1984/loan_payment_app
2bc927afbc084504bb10a959105d72f6f419e2c8
1a4c31d03a8c5ecf4dae2a981373649f4f699aa3
refs/heads/main
2023-07-21T19:00:40.692978
2021-09-09T03:36:04
2021-09-09T03:36:04
400,111,124
0
0
null
null
null
null
UTF-8
Python
false
false
707
py
import time from django.db import connections from django.db.utils import OperationalError from django.core.management.base import BaseCommand class Command(BaseCommand): """django overriding default app run until database is made available""" def handle(self, *args, **options): self.stdout.write('waiting for db connection...') db_conn = None while not db_conn: try: db_conn = connections['default'] except OperationalError: self.stdout.write(self.style.ERROR('database is not available, re-checking in 1sec')) time.sleep(1) self.stdout.write(self.style.SUCCESS('database is available'))
0d543afecf4a0144548d3602be45f8e851f1657b
a61dae5e34605f708cec4ba661a3e6d2ed78ab73
/Weather/GUI.py
595c6a018becfbd6c4cf78563b0fa83b0d9947dd
[]
no_license
ChyiYaqing/PythonPrj
a475c2f83db2a607377d24a78a8c0aa1220229e3
a7f8e9c0263b4f0f3827c5488ab4fed002962a1b
refs/heads/master
2020-06-02T01:27:05.095901
2017-06-25T12:48:30
2017-06-25T12:48:30
94,090,551
0
0
null
null
null
null
UTF-8
Python
false
false
8,424
py
#!/usr/bin/env python3 #-*- coding: utf-8 -*- try: # for Python2 from Tkinter import * ## notice capitalized T in Tkinter except ImportError: # for Python3 from tkinter import * ## notice lowercase 't' in tkinter here from Weathers import * import urllib2 from threading import Thread import tkMessageBox from sys import exit class MyThread(Thread): def run(self): tkMessageBox.showinfo("Error", "The City Is Not Exist!") class Top(Frame): def __init__(self, master=None, content=None, chart=None, today=None): Frame.__init__(self, master) self.content = content self.master = master self.chart = chart self.today = today self.createWidget() def createWidget(self): self.e = StringVar() self.top = LabelFrame(self, text = 'City Name', padx = 5, pady = 5) #create the LabelFrame widget with string 'City Name' self.e.set(self.content.weathers.city) #set the varible of type of StringVar as self.cityname self.entry = Entry(self.top, width=29, textvariable=self.e) #create the Entry widget self.submitbutton = Button(self.top, text = "submit", command=self.submitcity) #create the Button widget self.submitbutton.bind("<Return>", self.submitcity) #bind the Button namely submit with Enter Key in keyboard self.entry.pack(side='left') self.submitbutton.pack(side="right") self.top.pack(fill=X) #place the widgets on frame namely Top #define the function namely submit and it is the activity of button namely submit def submitcity(self): lastcityname = self.content.weathers.city #backup the cityname as lastcityname cityname = self.entry.get().capitalize() #make cityname as a same formate if self.content.updateWeathers(cityname) == 1: MyThread().start() cityname = lastcityname self.e.set(lastcityname) else: self.chart.updateLineChart() self.today.updateToday() class Today(Frame): def __init__(self, master=None, content=None): Frame.__init__(self, master) self.content = content self.createWidget() def createWidget(self): self.today = LabelFrame(self, text='today') self.img = PhotoImage(file='Today.gif') self.canvas = Canvas(self.today, height=90) self.item1 = self.canvas.create_image(150, 50, image=self.img) self.item2 = self.canvas.create_text(20, 10, text=self.content.weathers.day[0]) self.item3 = self.canvas.create_text(80, 10, text=self.content.weathers.date[0]) self.item4 = self.canvas.create_text(150, 80, text=self.content.weathers[0].mindegree + 'C ~ ' + self.content.weathers[0].maxdegree + 'C') self.canvas.pack(fill=X) self.today.pack(fill=X) def updateToday(self): self.img = PhotoImage(file='Today.gif') self.canvas.itemconfigure(self.item1, image=self.img) self.canvas.itemconfigure(self.item2, text=self.content.weathers.day[0]) self.canvas.itemconfigure(self.item3, text=self.content.weathers.date[0]) self.canvas.itemconfigure(self.item4, text=self.content.weathers[0].mindegree + 'C ~ ' + self.content.weathers[0].maxdegree + 'C') class Content(Frame): def __init__(self, master=None): Frame.__init__(self, master) self.master = master self.weathers = Weathers() self.weathers.setDefaultCity() self.createWidget() self.initWeathers() def createWidget(self): self.labels = []; #self.today = LabelFrame(self, text = 'Today Weather', padx = 5, pady = 5) self.feture = LabelFrame(self, text = 'Feture Weather', padx = 5, pady = 5) #self.label.pack() self.labels.append(Label(self, justify = 'left', anchor = 'w', fg = 'red')) #self.labels[0].pack(fill=X) for i in range(1, 5): self.labels.append(Label(self.feture, justify = 'left', anchor = 'w')) self.labels[i].pack(fill=X) #self.today.pack(fill=X) self.feture.pack(fill=X) def initWeathers(self): try: self.weathers.setURL() self.weathers.setWeathersFromInternet() except urllib2.URLError: tkMessageBox.showinfo("Error", "Please check connect!") exit(0) for i, wea in enumerate(self.weathers, start=0): self.labels[i]['text'] = wea.message #fill in message in the text of labels def updateWeathers(self, cityname): self.weathers.setCity(cityname) self.weathers.setURL() try: if self.weathers.setWeathersFromInternet() == 1: return 1 except urllib2.URLError: tkMessageBox.showinfo("Error", "Please check connect!") exit(0) for i, wea in enumerate(self.weathers, start=0): self.labels[i]['text'] = wea.message self.weathers.saveWeathers() return 0 class LineChart(Frame): def __init__(self, master=None, content=None): Frame.__init__(self, master) self.content = content self.createWidget() self.initChart() self.drawLineChart() def createWidget(self): self.chartframe = LabelFrame(self, text = 'LineChart', padx = 5, pady = 5); self.chart = Canvas(self.chartframe, height=200) self.chartframe.pack(fill=X) self.chart.pack() img = PhotoImage(file='./icon/purple_retina.gif') self.label = Label(self) self.label.configure(image=img) self.label.image = img self.label.pack(side='right') def initChart(self): self.chart.create_line(20, 20, 20, 180, fill='black') #y self.chart.create_line(10, 170, 270, 170, fill='black') #x self.chart.create_line(15, 25, 20, 20, fill='black'); self.chart.create_line(25, 25, 20, 20, fill='black'); #y self.chart.create_line(265, 165, 270, 170, fill='black'); self.chart.create_line(265, 175, 270, 170, fill='black'); for i in range(0, 5): self.chart.create_line(40 * i + 60, 170, 40 * i + 60, 165, fill='black') def drawLineChart(self): self.pointmax = [] self.pointmin = [] self.minmin = sorted(self.content.weathers.mindegree)[0] self.maxmax = sorted(self.content.weathers.maxdegree)[len(self.content.weathers.maxdegree) - 1] self.gap = 150 / (self.maxmax - self.minmin) - 1 for i, d in enumerate(self.content.weathers.maxdegree[0:6]): self.pointmax.append((170 - ((d - self.minmin) * self.gap) - 10, 60 + i * 40)) for i, d in enumerate(self.content.weathers.mindegree[0:6]): self.pointmin.append((170 - ((d - self.minmin) * self.gap) - 10, 60 + i * 40)) for i, wea in enumerate(self.content.weathers, start=0): self.chart.create_text(40 * i + 60, 175, text=wea.date.split()[0]) for i in range(1, len(self.pointmax)): self.chart.create_line(20, self.pointmax[i - 1][0], 25, self.pointmax[i - 1][0], fill='red') self.chart.create_text(30, self.pointmax[i - 1][0], text=self.content.weathers[i - 1].maxdegree, fill='red') self.chart.create_line(self.pointmax[i - 1][1], self.pointmax[i - 1][0], self.pointmax[i][1], self.pointmax[i][0], fill='red') self.chart.create_line(20, self.pointmax[len(self.pointmax) - 1][0], 25, self.pointmax[len(self.pointmax) - 1][0], fill='red') self.chart.create_text(30, self.pointmax[len(self.pointmax) - 1][0], text=self.content.weathers[len(self.pointmax) - 1].maxdegree, fill='red'); for i in range(1, len(self.pointmin)): self.chart.create_line(15, self.pointmin[i - 1][0], 20, self.pointmin[i - 1][0], fill='blue') self.chart.create_text(10, self.pointmin[i - 1][0], text=self.content.weathers[i - 1].mindegree, fill='blue'); self.chart.create_line(self.pointmin[i - 1][1], self.pointmin[i - 1][0], self.pointmin[i][1], self.pointmin[i][0], fill='blue') self.chart.create_line(15, self.pointmin[len(self.pointmin) - 1][0], 20, self.pointmin[len(self.pointmin) - 1][0], fill='blue') self.chart.create_text(10, self.pointmin[len(self.pointmin) - 1][0], text=self.content.weathers[len(self.pointmin) - 1].mindegree, fill='blue'); def updateLineChart(self): self.chart.delete(ALL) self.initChart() self.drawLineChart() def GUI(): root = Tk(className='MyWeather') root.resizable(width = False, height = False) content = Content(root) today = Today(root, content) chart = LineChart(root, content) top = Top(root, content, chart, today) top.pack(fill=X) today.pack(fill=X) content.pack(fill=X) chart.pack(fill=X) root.mainloop()
95414f7ed3b48f7baf6bd13799ea4698d7f6093f
199522cb43b4e2c7e3bf034a0e604794258562b1
/0x0F-python-object_relational_mapping/3-my_safe_filter_states.py
3659d402edd14791ff0d3dce555884770499752c
[]
no_license
jormao/holbertonschool-higher_level_programming
a0fd92f2332f678e6fe496057c04f2995d24a4ac
360b3a7294e9e0eadcadb57d4c48c22369c05111
refs/heads/master
2020-09-29T01:36:20.094209
2020-05-15T03:27:06
2020-05-15T03:27:06
226,915,744
0
0
null
null
null
null
UTF-8
Python
false
false
607
py
#!/usr/bin/python3 """ script that takes in arguments and displays all values in the states table of hbtn_0e_0_usa where name matches the argument. But this time, write one that is safe from MySQL injections! """ import MySQLdb from sys import argv if __name__ == "__main__": db = MySQLdb.connect(host="localhost", port=3306, user=argv[1], passwd=argv[2], db=argv[3]) cur = db.cursor() cur.execute("SELECT * FROM states\ WHERE name = %s\ ORDER BY id", (argv[4],)) rows = cur.fetchall() for row in rows: print(row) cur.close() db.close()
041350efe6b160a115e9e22c301c74a34ff53193
71257430418ed7410ddffb6df692a5e816eb53b7
/61hunter.py
ed47aa6e3c043824bfe5d9b810408fd96bd965c2
[]
no_license
aarthisandhiya/aarthi
917283541b9aa133db5d50a3b68eda2a10c38af7
00b31831832ea573dfd886eb0001ad824325136d
refs/heads/master
2020-04-15T05:10:42.585357
2019-07-21T13:57:58
2019-07-21T13:57:58
164,411,781
0
0
null
null
null
null
UTF-8
Python
false
false
199
py
a=int(input()) c=0 b=[int(a) for a in input().split()] u,v=map(int,input().split()) for i in range(0,len(b)): if b[i]==u: while b[i]<int(v): c=c+1 i=i+1 print(c)
0ac0305052893eb0942f039d2bc543f72d5454e5
cf09d6430e37b5460d7208d6cae6d3af0fa15925
/jsonbot/jsb/lib/reboot.py
9209bd73573898a076abeb23cc8bd8fa26b3fd6a
[ "MIT" ]
permissive
Lujeni/old-projects
2bbf0ff89852a3e4a9677475a615d2ee4b07d635
657304c8b017a98935de9728fc695abe8be7cc4f
refs/heads/master
2021-03-12T23:08:34.054777
2014-10-16T23:10:15
2014-10-16T23:10:15
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,632
py
# jsb/reboot.py # # """ reboot code. """ ## jsb imports from jsb.lib.fleet import getfleet from jsb.imports import getjson json = getjson() ## basic imports import os import sys import pickle import tempfile import logging import time ## reboot function def reboot(): """ reboot the bot. """ logging.warn("reboot - rebooting") os.execl(sys.argv[0], *sys.argv) ## reboot_stateful function def reboot_stateful(bot, ievent, fleet, partyline): """ reboot the bot, but keep the connections (IRC only). """ logging.warn("reboot - doing statefull reboot") session = {'bots': {}, 'name': bot.cfg.name, 'channel': ievent.channel, 'partyline': []} fleet = getfleet() for i in fleet.bots: logging.warn("reboot - updating %s" % i.cfg.name) data = i._resumedata() if not data: continue session['bots'].update(data) if i.type == "sxmpp": i.exit() ; continue if i.type == "convore": i.exit() ; continue if i.type == "tornado": i.exit() time.sleep(0.1) for socketlist in i.websockets.values(): for sock in socketlist: sock.stream.close() session['partyline'] = partyline._resumedata() sfile, sessionfile = tempfile.mkstemp('-session', 'jsb-', text=True) logging.warn("writing session file %s" % sessionfile) json.dump(session, open(sessionfile, "w")) args = [] skip = False for a in sys.argv[1:]: if skip: skip = False ; continue if a == "-r": skip = True ; continue args.append(a) os.execl(sys.argv[0], sys.argv[0], '-r', sessionfile, *args)
e25ee8942b20a0704262265705ad3ad2b5b7b407
f99f30752e9bb9e023b37c731f64fb2155ac3daf
/03/zip.py
69160f8e55ebbe1da4acd417e0cd571fe8488b3e
[]
no_license
chu83/python-basics
148ff6977f5ca04775951d90ed1f5f763c51a9ff
19fe0937842c668f604876be0aeb0962a2630dd2
refs/heads/master
2023-01-19T01:29:25.203738
2020-11-29T18:34:33
2020-11-29T18:34:33
311,258,549
0
0
null
null
null
null
UTF-8
Python
false
false
335
py
print('========== zip() 함수 사용 예 ===========') s1 = ['foo', 'bar', 'baz'] s2 = ['one', 'two', 'three', 'four'] z = zip(s1, s2) print(z, type(z)) print('========== 순회1 ===========') for t in z: print(t, type(t)) z = zip(s1, s2) for a, b in z: print(a, b) print('========== 순회2 ===========') z = zip(s1, s2)
318d59a2c7fd3d07c465da350c7d3b65dd8f4934
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
/ivs_write_f/playback-key-pair_import.py
e6227c4b54f4786793a1d735b01d2cf516e72ad9
[]
no_license
lxtxl/aws_cli
c31fc994c9a4296d6bac851e680d5adbf7e93481
aaf35df1b7509abf5601d3f09ff1fece482facda
refs/heads/master
2023-02-06T09:00:33.088379
2020-12-27T13:38:45
2020-12-27T13:38:45
318,686,394
0
0
null
null
null
null
UTF-8
Python
false
false
773
py
#!/usr/bin/python # -*- codding: utf-8 -*- import os import sys sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__)))) from common.execute_command import write_parameter # url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html if __name__ == '__main__': """ delete-playback-key-pair : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/delete-playback-key-pair.html get-playback-key-pair : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/get-playback-key-pair.html list-playback-key-pairs : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ivs/list-playback-key-pairs.html """ write_parameter("ivs", "import-playback-key-pair")
4cdb8d4ce152583225c607c387d527a82eced8d3
7c9707f0f1cb8e633ac605934f3dbd8036790868
/projet/rpi_manager/models.py
f61c7c196096c5f2351a5ccd5919b2269e0b3f2e
[]
no_license
ometeore/hydropo
891e1abd4c1b8ccd0a3b27a043abf894b70ceb5b
324076d4b7ddbd14e718c424eb24d129c2a2243c
refs/heads/master
2023-06-14T08:35:55.838469
2021-07-04T16:28:09
2021-07-04T16:28:09
290,198,666
0
0
null
null
null
null
UTF-8
Python
false
false
3,146
py
from django.db import models from django import forms from datetime import datetime from channels.layers import get_channel_layer from asgiref.sync import async_to_sync class Rpi(models.Model): name = models.CharField(max_length=200) uid_name = models.CharField(max_length=200) last_connect = models.DateTimeField() is_conected = models.BooleanField() # plutot que de comparer des str sources de bugs # import datetime # regler le passage a minuit aussi # date_time_obj = datetime.datetime.strptime(date_time_str, '%Y-%m-%d %H:%M:%S.%f') def compare_time(self, begin_test, end_test, cat): if cat: schedule = self.water.all() else: schedule = self.lights.all() for times in schedule: if begin_test > str(times.begin) and begin_test < str(times.end): return False if end_test > str(times.begin) and end_test < str(times.end): return False if begin_test < str(times.begin) and end_test > str(times.end): return False return True def broadcast_schedule(self): message = {} message["manual"] = False schedule_water_list = [ [str(elm.begin), str(elm.end)] for elm in self.water.all() ] message["water"] = schedule_water_list schedule_lights_list = [ [str(elm.begin), str(elm.end)] for elm in self.lights.all() ] message["lights"] = schedule_lights_list objectif_ph = self.ph.filter(objectif=True) message["ph"] = objectif_ph[0].value objectif_ec = self.ec.filter(objectif=True) message["ec"] = objectif_ec[0].value ####### This part is sending the message to the websocket in group call "group0" channel_layer = get_channel_layer() async_to_sync(channel_layer.group_send)( self.uid_name, {"type": "send_message", "message": message} ) def broadcast_manual(self, tool): message = {} message["manual"] = True message["tool"] = tool print("ASK FOR MANUAL MODE FOR ID: {}".format(self.uid_name)) print(message) channel_layer = get_channel_layer() async_to_sync(channel_layer.group_send)( self.uid_name, {"type": "send_message", "message": message} ) class WaterSchedule(models.Model): begin = models.TimeField() end = models.TimeField() rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="water") class LightSchedule(models.Model): begin = models.TimeField() end = models.TimeField() rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="lights") class Ph(models.Model): date = models.DateTimeField() value = models.FloatField() objectif = models.BooleanField() rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="ph") class Ec(models.Model): date = models.DateTimeField() value = models.IntegerField() objectif = models.BooleanField() rpi = models.ForeignKey(Rpi, on_delete=models.CASCADE, related_name="ec")
6d21cd382eeb98e10bb5bc8a2a202726211def5f
ce6ace34704e74c2a53e9b38b2630876d9cd52e2
/mdias_addons/metro_park_maintenance/models/day_plan_limit.py
8f25985b0d5a8f7e281a0c359a8c74c657e8ef34
[]
no_license
rezaghanimi/main_mdias
e3cfd8033204d8e7e484041f506892621a3e3479
13b428a5c4ade6278e3e5e996ef10d9fb0fea4b9
refs/heads/master
2022-09-17T20:15:42.305452
2020-05-29T05:38:35
2020-05-29T05:38:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
861
py
# -*- coding: utf-8 -*- from odoo import models, fields, api class DayPlanLimit(models.Model): ''' 日计划限制 ''' _name = 'metro_park_maintenance.day_plan_limit' location = fields.Many2one(string='地点', comodel_name='metro_park_base.location') max_repair_after_high_run = fields.Integer(string='高峰车最大检修数量') max_repair_back_time = fields.Char(string="返回时间", help='最大返回时间') class DayPlanLimit(models.TransientModel): ''' 日计划向导限制 ''' _name = 'metro_park_maintenance.day_plan_wizard_limit' location = fields.Many2one(string='地点', comodel_name='metro_park_base.location') max_repair_after_high_run = fields.Integer(string='高峰车最大检修数量') max_repair_back_time = fields.Char(string="返回时间", help='最大返回时间')
f23d62dafdbb77a295d93ac632a4441e517a6c10
c92d5b8509f23444622529aa24d4bc85bf1d3c9f
/main/question47/book1.py
44b832b082eb8ed9403996e4f3f5e5ee8a3f4ad1
[]
no_license
qcymkxyc/JZoffer
75dfb747394018f14552f521413b01a5faa9c07f
28628616589061653a8322d5b400f9af32f2249d
refs/heads/master
2021-07-15T00:53:00.711360
2019-02-10T03:16:52
2019-02-10T03:16:52
149,714,958
0
0
null
null
null
null
UTF-8
Python
false
false
926
py
#!/usr/bin/env python # _*_coding:utf-8_*_ """ @Time : 19-1-24 上午10:58 @Author: qcymkxyc @File: book1.py @Software: PyCharm """ def max_value(matrix): """ 动态规划 :param matrix: List[List[int]] 矩阵 :return: int 最大值 """ n_row, n_col = len(matrix), len(matrix[0]) value_matrix = list() for i, v in enumerate(matrix): value_matrix.append([0] * len(v)) # 第一行初始化 for i in range(n_col): value_matrix[0][i] += sum(matrix[0][:i + 1]) # 第一列初始化 for row in range(n_row): value_matrix[row][0] = sum(map(lambda x:x[0], matrix[:row + 1])) for row in range(1,n_row): for col in range(1,n_col): value_matrix[row][col] = max(value_matrix[row - 1][col], value_matrix[row][col - 1]) + \ matrix[row][col] return value_matrix[-1][-1]
5802eeeb0ab8d6f8d89deb95c876c1ac27840b2a
4fc9cb4cf01e41c4ed3de89f13d213e95c87dd33
/angr/procedures/definitions/win32_wdsclientapi.py
dcbb07c74c314d0c5708a98cc0f55187713044fc
[ "BSD-2-Clause" ]
permissive
mborgerson/angr
ea5daf28576c3d31b542a0e229139ab2494326e9
8296578e92a15584205bfb2f7add13dd0fb36d56
refs/heads/master
2023-07-24T22:41:25.607215
2022-10-19T19:46:12
2022-10-20T18:13:31
227,243,942
1
2
BSD-2-Clause
2021-04-07T22:09:51
2019-12-11T00:47:55
Python
UTF-8
Python
false
false
14,784
py
# pylint:disable=line-too-long import logging from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64 from .. import SIM_PROCEDURES as P from . import SimLibrary _l = logging.getLogger(name=__name__) lib = SimLibrary() lib.set_default_cc('X86', SimCCStdcall) lib.set_default_cc('AMD64', SimCCMicrosoftAMD64) lib.set_library_names("wdsclientapi.dll") prototypes = \ { # 'WdsCliClose': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Handle"]), # 'WdsCliRegisterTrace': SimTypeFunction([SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="SByte"), offset=0)], SimTypeBottom(label="Void"), arg_names=["pwszFormat", "Params"]), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pfn"]), # 'WdsCliFreeStringArray': SimTypeFunction([SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), label="LPArray", offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["ppwszArray", "ulCount"]), # 'WdsCliFindFirstImage': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "phFindHandle"]), # 'WdsCliFindNextImage': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Handle"]), # 'WdsCliGetEnumerationFlags': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["Handle", "pdwFlags"]), # 'WdsCliGetImageHandleFromFindHandle': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["FindHandle", "phImageHandle"]), # 'WdsCliGetImageHandleFromTransferHandle': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hTransfer", "phImageHandle"]), # 'WdsCliCreateSession': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"pwszUserName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszDomain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszPassword": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WDS_CLI_CRED", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pwszServer", "pCred", "phSession"]), # 'WdsCliAuthorizeSession': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimStruct({"pwszUserName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszDomain": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pwszPassword": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WDS_CLI_CRED", pack=False, align=None), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "pCred"]), # 'WdsCliInitializeLog': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="CPU_ARCHITECTURE"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "ulClientArchitecture", "pwszClientId", "pwszClientAddress"]), # 'WdsCliLog': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "ulLogLevel", "ulMessageCode"]), # 'WdsCliGetImageName': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]), # 'WdsCliGetImageDescription': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]), # 'WdsCliGetImageType': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="WDS_CLI_IMAGE_TYPE"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pImageType"]), # 'WdsCliGetImageFiles': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pppwszFiles", "pdwCount"]), # 'WdsCliGetImageLanguage': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]), # 'WdsCliGetImageLanguages': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="SByte"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pppszValues", "pdwNumValues"]), # 'WdsCliGetImageVersion': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]), # 'WdsCliGetImagePath': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]), # 'WdsCliGetImageIndex': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pdwValue"]), # 'WdsCliGetImageArchitecture': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeInt(signed=False, label="CPU_ARCHITECTURE"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pdwValue"]), # 'WdsCliGetImageLastModifiedTime': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimStruct({"wYear": SimTypeShort(signed=False, label="UInt16"), "wMonth": SimTypeShort(signed=False, label="UInt16"), "wDayOfWeek": SimTypeShort(signed=False, label="UInt16"), "wDay": SimTypeShort(signed=False, label="UInt16"), "wHour": SimTypeShort(signed=False, label="UInt16"), "wMinute": SimTypeShort(signed=False, label="UInt16"), "wSecond": SimTypeShort(signed=False, label="UInt16"), "wMilliseconds": SimTypeShort(signed=False, label="UInt16")}, name="SYSTEMTIME", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppSysTimeValue"]), # 'WdsCliGetImageSize': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pullValue"]), # 'WdsCliGetImageHalName': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]), # 'WdsCliGetImageGroup': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]), # 'WdsCliGetImageNamespace': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ppwszValue"]), # 'WdsCliGetImageParameter': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=False, label="WDS_CLI_IMAGE_PARAM_TYPE"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "ParamType", "pResponse", "uResponseLen"]), # 'WdsCliGetTransferSize': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeLongLong(signed=False, label="UInt64"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hIfh", "pullValue"]), # 'WdsCliSetTransferBufferSize': SimTypeFunction([SimTypeInt(signed=False, label="UInt32")], SimTypeBottom(label="Void"), arg_names=["ulSizeInBytes"]), # 'WdsCliTransferImage': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeFunction([SimTypeInt(signed=False, label="PFN_WDS_CLI_CALLBACK_MESSAGE_ID"), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["dwMessageId", "wParam", "lParam", "pvUserData"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hImage", "pwszLocalPath", "dwFlags", "dwReserved", "pfnWdsCliCallback", "pvUserData", "phTransfer"]), # 'WdsCliTransferFile': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeFunction([SimTypeInt(signed=False, label="PFN_WDS_CLI_CALLBACK_MESSAGE_ID"), SimTypePointer(SimTypeInt(signed=False, label="UInt"), label="UIntPtr", offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeBottom(label="Void"), arg_names=["dwMessageId", "wParam", "lParam", "pvUserData"]), offset=0), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pwszServer", "pwszNamespace", "pwszRemoteFilePath", "pwszLocalFilePath", "dwFlags", "dwReserved", "pfnWdsCliCallback", "pvUserData", "phTransfer"]), # 'WdsCliCancelTransfer': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hTransfer"]), # 'WdsCliWaitForTransfer': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hTransfer"]), # 'WdsCliObtainDriverPackages': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hImage", "ppwszServerName", "pppwszDriverPackages", "pulCount"]), # 'WdsCliObtainDriverPackagesEx': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), SimTypePointer(SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0), offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["hSession", "pwszMachineInfo", "ppwszServerName", "pppwszDriverPackages", "pulCount"]), # 'WdsCliGetDriverQueryXml': SimTypeFunction([SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)], SimTypeInt(signed=True, label="Int32"), arg_names=["pwszWinDirPath", "ppwszDriverQuery"]), } lib.set_prototypes(prototypes)
8168b5bf889b97e447da255e86d69e116f571d47
c8975f8bbe32637399a3ca00ad21e8e6602e358d
/aoc/year2021/day14/day14.py
bf40f0314c32c659180cf4eaa5ae69c3475ea98d
[ "Unlicense" ]
permissive
Godsmith/adventofcode
0e8e0beb813300206b2810b523b54a6c40ca936f
3c59ea66830f82b63881e0ea19bfe3076f2a500d
refs/heads/master
2021-12-28T13:05:42.579374
2021-12-26T22:19:55
2021-12-26T22:24:01
225,074,419
0
0
null
null
null
null
UTF-8
Python
false
false
977
py
from collections import Counter from aocd import get_data from more_itertools import pairwise def run(data, iterations): new_element_from_pair = {tuple(line.split(" -> ")[0]): line.split(" -> ")[1] for line in data.splitlines()[2:]} new_pairs_from_pair = {(e1, e2): [(e1, inserted), (inserted, e2)] for (e1, e2), inserted in new_element_from_pair.items()} template = data.splitlines()[0] element_counter = Counter(template) pair_counter = Counter(pairwise(template)) for _ in range(iterations): new_pair_counter = Counter() for pair in pair_counter: for new_pair in new_pairs_from_pair[pair]: new_pair_counter[new_pair] += pair_counter[pair] element_counter[new_element_from_pair[pair]] += pair_counter[pair] pair_counter = new_pair_counter return element_counter.most_common()[0][1] - element_counter.most_common()[-1][1] print(run(get_data(), 10)) print(run(get_data(), 40))
f32d24f9fdb32d8eb2a1eef5c82ae7102d01c864
cc5f2ee6a5de6faf141f10b1b10717243821a0a5
/problems/problem 106.py
e9b15f310b5e23ecf09e8a1864e6f1ea2dd3f449
[]
no_license
alexandrepoulin/ProjectEulerInPython
faf634025c86bc74fc764d315813bf1706e58f63
97cb52cdd7508f2db891d1644e3d247814571718
refs/heads/master
2020-04-22T11:47:04.594328
2019-02-16T00:17:40
2019-02-16T00:17:40
170,341,883
0
0
null
null
null
null
UTF-8
Python
false
false
793
py
print("Starting") import useful ## only need to check subset pairs which have the same number of elements ## there are useful.nChooseK(n,s)*useful.nChooseK(n-s,s)*0.5 such pairs ## for a specific subset, we only need to look at pairs which have interweining elements ## such as (1,3)(2,4) ## number of times this doesn't happend is given by Catalan numbers given by c ## multiply that by the total number of ways to make two subsets of that size ## or useful.nChooseK(n,2*s) ## and you find how many pairs you need to check for a subset size def c(s): return useful.nChooseK(2*s,s)/(s+1) def x(n,s): return useful.nChooseK(n,s)*useful.nChooseK(n-s,s)*0.5-c(s)*useful.nChooseK(n,2*s) answer = 0 N= 12 for s in range(2,7): answer += x(N,s) print(answer)
3cd515eca280170fe3a32456a2936ef77006c086
286b6dc56323f982092ffafbfac8a32dbbaeb7ef
/Day_09/sample_pyvmomi.py
10033c34292fb7b9d33af55fc34b5e48284d85bb
[]
no_license
learndevops19/pythonTraining-CalsoftInc
ccee0d90aadc00bfdb17f9578620f6bf92f80a4c
c5f61516b835339b394876edd1c6f62e7cc6f0c3
refs/heads/master
2021-02-05T04:27:17.590913
2019-11-20T17:27:06
2019-11-20T17:27:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,761
py
import ssl from pyVim import connect from pyVmomi import vim def connectVcenter(vCenterHost, username, password, portNum=443): """ Description : Performs vCenter connection. Parameters : vCenterHost - vCenter server ip address (STRING) username - vCenter server username (STRING) password - vCenter server password (STRING) portNum - Port number for connection, default is 443 (INT) Returns : Service instance object """ context = ssl._create_unverified_context() si = connect.SmartConnect( host=vCenterHost, user=username, pwd=password, port=portNum, sslContext=context ) return si def getObj(content, vimtype, name): """ Description: Get the vsphere object associated with a given text name Parameters : content - Data object having properties for the ServiceInstance managed object (OBJECT) vimtype - Managed object type (OBJECT) name - Managed object entity name (STRING) Return: Matched Managed object (OBJECT) """ container = content.viewManager.CreateContainerView( content.rootFolder, vimtype, True ) for vmObj in container.view: if vmObj.name == name: return vmObj def getDatacenterByName(si, name): """ Description: Find a datacenter by it's name and return it Parameters : si - vCenter connection session (OBJECT) name - datacenter name (STRING) Return: datacenter Object (OBJECT) """ return getObj(si.RetrieveContent(), [vim.Datacenter], name) def getClusterByName(si, name): """ Description: Find a cluster by it's name and return it Parameters : si - vCenter connection session (OBJECT) name - cluster name (STRING) Return: cluster Object (OBJECT) """ return getObj(si.RetrieveContent(), [vim.ClusterComputeResource], name) def getHostByName(si, name): """ Description: Find a host by it's name and return it Parameters : si - vCenter connection session (OBJECT) name - host name (STRING) Return: host Object (OBJECT) """ return getObj(si.RetrieveContent(), [vim.HostSystem], name) def getVirtualMachineByName(si, name): """ Description: Find a vm by it's name and return it Parameters : si - vCenter connection session (OBJECT) name - vm name (STRING) Return: virtual machine Object (OBJECT) """ return getObj(si.RetrieveContent(), [vim.VirtualMachine], name) def getDatastoreByName(si, name): """ Description: Find a datastore by it's name and return it Parameters : si - vCenter connection session (OBJECT) name - datastore name (STRING) Return: datastore Object (OBJECT) """ return getObj(si.RetrieveContent(), [vim.Datastore], name) def getNetworkByName(si, name, isVDS=False): """ Description: Find a network by it's name and return it Parameters : si - vCenter connection session (OBJECT) name - network name (STRING) Return: network Object """ if isVDS is False: networkObj = getObj(si.RetrieveContent(), [vim.Network], name) else: networkObj = getObj( si.RetrieveContent(), [vim.dvs.DistributedVirtualPortgroup], name ) return networkObj # connect vcenter siObj = connectVcenter(vcenterIp, vcenterUsername, vcenterPassword) # print(siObj.content.about) # get datacenter by name datacenterName = "UCP CI Datacenter" datacenterObj = getDatacenterByName(siObj, datacenterName) print("datacenterName is", datacenterObj.name, datacenterObj.datastore[0].name) # get cluster by name # clusterName = 'Dockerized' # clusterObj = getClusterByName(siObj, clusterName) # print("clusterName is", clusterObj.name) # get host by name # hostName = '192.168.25.205' # hostObj = getHostByName(siObj, hostName) # print("hostName is", hostObj.name) # get datastore by name # datastoreName = 'ds1' # datastoreObj = getDatastoreByName(siObj, datastoreName) # print("datastoreName is", datastoreObj.name) # get network by name # networkName = 'VM Network' # networkObj = getNetworkByName(siObj, networkName) # print("networkName is", networkObj.name) # print("Vm's in this network", [vm.name for vm in networkObj.vm]) # get all vms inside datacenter # vmsList = datacenterObj.vmFolder.childEntity # for vm in vmsList: # print("Virtual Machine - ", vm.name) # get vm by name # vmObj = getVirtualMachineByName(siObj, 'k8s-master') # print('VirtualMachineName', vmObj.name, dir(vmObj)) # poweroff the above virtual machine # vmObj.PowerOff() # poweron the above virtual machine # vmObj.PowerOn()
aaf684914f88ee47e002fe6283aad1328b10f3ad
6cce023315d4083c7df0fcdeea2a037b00818878
/py-files/data_setup.py
3a1efa85492db400854022be0137e9d4defafa58
[]
no_license
Limmen/Distributed_ML
e02e865a123e552d3795c76a4a0846f2da6f3a55
d5b65a0bcb89182e3ac773b0a3cec46625dabccb
refs/heads/master
2021-05-09T03:52:41.530823
2018-01-28T12:21:47
2018-01-28T12:21:47
119,255,519
5
1
null
null
null
null
UTF-8
Python
false
false
2,735
py
import argparse import pyspark from pyspark.sql.functions import udf from pyspark.sql.types import * import tensorflow as tf import pandas as pd import numpy as np SEQ_LABELS_TRAIN = "data/y_train.csv" SEQ_FEATURES_TRAIN = "data/x_train.csv" SEQ_LABELS_TEST = "data/y_test.csv" SEQ_FEATURES_TEST = "data/x_test.csv" conf = pyspark.SparkConf() conf = conf.setAppName("har_data_setup").set("spark.hadoop.validateOutputSpecs", "false") sc = pyspark.SparkContext(conf=conf) sql = pyspark.SQLContext(sc) CLEANED_DATA_PATH = "./cleaned_data" def read_raw_data(sql): seq_features_train_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_FEATURES_TRAIN) seq_labels_train_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_LABELS_TRAIN) seq_features_test_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_FEATURES_TEST) seq_labels_test_raw = sql.read.format("com.databricks.spark.csv").options(header="false").load(SEQ_LABELS_TEST) return seq_features_train_raw,seq_labels_train_raw, seq_features_test_raw, seq_labels_test_raw seq_features_train_raw, seq_labels_train_raw,seq_features_test_raw,seq_labels_test_raw = read_raw_data(sql) features_train_size = seq_features_train_raw.count() labels_train_size = seq_labels_train_raw.count() features_test_size = seq_features_test_raw.count() labels_test_size = seq_labels_test_raw.count() print("train feat size: {0}, train label size: {1}, test feat size {2}, test label size {3}".format(features_train_size, labels_train_size, features_test_size, labels_test_size)) seq_labels_test_raw.printSchema classes = seq_labels_train_raw.unionAll(seq_labels_test_raw).select("_c0").distinct().rdd.map(lambda row: row._c0).zipWithIndex().collectAsMap() seq_labels_train_clean = seq_labels_train_raw.select("_c0").rdd.map(lambda row: classes[row._c0]) seq_labels_test_clean = seq_labels_test_raw.select("_c0").rdd.map(lambda row: classes[row._c0]) labels_train_np = seq_labels_train_clean.collect() labels_test_np = seq_labels_test_clean.collect() np.savetxt(CLEANED_DATA_PATH + "/train/labels/y_train.csv", np.array(labels_train_np).astype(int), fmt='%i', delimiter=",") np.savetxt(CLEANED_DATA_PATH + "/test/labels/y_test.csv", np.array(labels_test_np).astype(int), fmt='%i', delimiter=",") np.savetxt(CLEANED_DATA_PATH + "/classes/classes.csv", np.array([[k,v] for k,v in classes.items()]),fmt="%s", delimiter=",") np.savetxt(CLEANED_DATA_PATH + "/size/sizes.csv", np.array([["features_train_size", features_train_size], ["labels_train_size", labels_train_size], ["features_test_size", features_test_size], ["labels_test_size", labels_test_size]]), fmt="%s", delimiter=",")
df7e6169ccdea122ba78000885ec1008e3579ebd
bc539788b876773e294383863252c1637de9eb7f
/Pscrapy/PycharmProjects/Reptile/Practise/practise4.py
a3249cb05379978d945554dfcee685df198bd50c
[]
no_license
umsung/scrapy
4eb56bf74f3e617e49dcdec61cf77010eb912f4f
deacd9f289159c5af114b0dd3110448ad7eb43e8
refs/heads/master
2020-05-31T14:11:46.530793
2019-10-16T01:32:25
2019-10-16T01:32:25
190,321,772
3
0
null
null
null
null
UTF-8
Python
false
false
120
py
num = input('输入数字:').strip() num = int(num) for i in range(1, num): print(' '*(num-i),'* '*i)
a3abadf3e5c58d3196f6ac02b55a07ff35093bd4
16c77266859989d156fe3f4d0ce3a37a1898ad38
/ml/sk/__init__.py
2e9e9ccc69e32e7f29ce91893b4883b61084bc5d
[ "MIT" ]
permissive
SRHerzog/ut
92620e66be2ea9707d9cd3cf390179326ed2eefe
894bd5607eb76676aaea7a37ed8a91b5fb5e805e
refs/heads/master
2021-06-30T19:15:46.131299
2017-09-15T20:47:35
2017-09-15T20:47:35
103,696,926
0
0
null
2017-09-15T20:08:10
2017-09-15T20:08:10
null
UTF-8
Python
false
false
54
py
from __future__ import division __author__ = 'thor'
8cf423d1f9f0257fa371e065ae2d57628aeedaf2
ee4db47ccecd23559b3b6f3fce1822c9e5982a56
/Analyse Data/NumpPy.py
d2bf919d8ad04f330f143dfea2c477d7387bd3ee
[]
no_license
meoclark/Data-Science-DropBox
d51e5da75569626affc89fdcca1975bed15422fd
5f365cedc8d0a780abeb4e595cd0d90113a75d9d
refs/heads/master
2022-10-30T08:43:22.502408
2020-06-16T19:45:05
2020-06-16T19:45:05
265,558,242
0
1
null
null
null
null
UTF-8
Python
false
false
4,021
py
# Introduction to NumPy: Numerical Python #NumPy is great at storing and manipulating numerical data in arrays. import numpy as np # NumPy Arrays #A NumPy array is a special type of list. It’s a data structure that organizes multiple items. Each item can be of any type (strings, numbers, or even other arrays). test_1 = np.array([92, 94, 88, 91, 87]) # test_1 is now a numpy array #Creating an Array from a CSV # Note the delimiter can be in other formats such as semi colon and tabs. test_2 = np.genfromtxt('test_2.csv', delimiter=',') # Operations with NumPy Arrays # Let’s compare how to add a number to each value in a python list versus a NumPy array: # With a list A = [1, 2, 3, 4, 5,6] A_plus_3 = [] for i in range(len(A)): l_plus_3.append(A[i] + 3) # With an array a = np.array(l) a_plus_3 = a + 3 #Squaring each value: a ** 2 #array([ 1, 4, 9, 16, 25, 36]) #Taking the square root of each value: np.sqrt(a) #array([ 1, 1.41421356, 1.73205081, 2, 2.23606798, 2.44948974]) # Add extra 2 points to test_3 test_3 = np.array([87, 85, 72, 90, 92]) test_3_fixed = test_3 + 2 # Operations with NumPy Arrays II # Arrays can also be added to or subtracted from each other in NumPy, # assuming the arrays have the same number of elements. a = np.array([1, 2, 3, 4, 5]) b = np.array([6, 7, 8, 9, 10]) a + b #array([ 7, 9, 11, 13, 15]) test_1 = np.array([92, 94, 88, 91, 87]) test_2 = np.array([79, 100, 86, 93, 91]) test_3 = np.array([87, 85, 72, 90, 92]) test_3_fixed = test_3 + 2 total_grade = test_1 + test_2 + test_3_fixed # average score final_grade = total_grade / 3 print(final_grade) # Two-Dimensional Arrays # in NumPy we can create an array of arrays. # If the arrays that make up our bigger array are all the same size, then it has a special name: a two-dimensional array. #we could have also stored all of this data in a single, two-dimensional array: Notice the double square brackets syntax [[]] np.array([[92, 94, 88, 91, 87], [79, 100, 86, 93, 91], [87, 85, 72, 90, 92]]) coin_toss_again = np.array([[1,0,0,1,0],[0,0,1,1,1]]) # Selecting Elements from a 1-D Array # This uses normal indexing test_1 = np.array([92, 94, 88, 91, 87]) test_2 = np.array([79, 100, 86, 93, 91]) test_3 = np.array([87, 85, 72, 90, 92]) jeremy_test_2 = test_2[-2] #grabs 93 manual_adwoa_test_1 = test_1[1:3] # grabs 94 88 # Selecting Elements from a 2-D Array # The syntax for selecting from a 2-d array is a[row,column] where a is the array. a = np.array([[32, 15, 6, 9, 14], [12, 10, 5, 23, 1], [2, 16, 13, 40, 37]]) a[2,1] #16 # selects the first column a[:,0] #array([32, 12, 2]) a[1,:] #array([12, 10, 5, 23, 1]) # selects the first three elements of the first row a[0,0:3] #array([32, 15, 6]) student_scores = np.array([[92, 94, 88, 91, 87], [79, 100, 86, 93, 91], [87, 85, 72, 90, 92]]) tanya_test_3 = student_scores[2,0] cody_test_scores = student_scores[:,4] # Logical Operations with Arrays # < > == != | & porridge = np.array([79, 65, 50, 63, 56, 90, 85, 98, 79, 51]) cold = porridge[porridge < 60] hot = porridge[porridge > 80] just_right = porridge[(porridge >= 60) & (porridge <= 80)] print(cold, hot, just_right ) # Review import numpy as np temperatures = np.genfromtxt('temperature_data.csv',delimiter=',') print(temperatures) temperatures_fixed = temperatures + 3.0 monday_temperatures = temperatures_fixed[0,:] thursday_friday_morning = temperatures_fixed[3:,1] temperature_extremes = temperatures_fixed[(temperatures_fixed < 50) | (temperatures_fixed > 60)] # Project Bettys Bakery import numpy as np cupcakes = np.array([2,0.75,2,1,0.5]) recipes = np.genfromtxt('recipes.csv',delimiter=',') print(recipes) eggs = recipes[:,2] print(eggs) #egg = recipes[recipes[:,2] == 1] cookies = recipes[2,:] print(cookies) double_batch = cupcakes * 2 print(double_batch) grocery_list = cookies + double_batch print(grocery_list)
e1e458abdbc5777af32bf1194e4add3db39fd867
36b9fa9f2d8ff655546a33cb47ddacd009bc00c9
/autogalaxy/profiles/light/linear/__init__.py
bb275334df92a6bf4b34d18f21e6aa123ae5dc62
[ "MIT" ]
permissive
Jammy2211/PyAutoGalaxy
67b76968b8516309b2ebdbff7affd5c1923cf0b1
d1a2e400b7ac984a21d972f54e419d8783342454
refs/heads/main
2023-08-19T01:00:22.320073
2023-08-17T15:39:46
2023-08-17T15:39:46
216,190,501
27
9
MIT
2023-09-13T14:07:43
2019-10-19T10:45:44
Python
UTF-8
Python
false
false
401
py
from .abstract import LightProfile, LightProfileLinear, LightProfileLinearObjFuncList from .gaussian import Gaussian, GaussianSph from .moffat import Moffat from .sersic import Sersic, SersicSph from .exponential import Exponential, ExponentialSph from .dev_vaucouleurs import DevVaucouleurs, DevVaucouleursSph from .sersic_core import SersicCore from .exponential_core import ExponentialCore
7a8252f05c1ee87e900b5ed853a3cabc43688b96
74081581575e80b2b0f6b75ba912d58ea4f37ac6
/maskrcnn_benchmark/modeling/detector/detectors.py
35064d2f9992fd2c2e08d4a29ad31d1e5a229f8f
[]
no_license
youngfly11/LCMCG-PyTorch
5f6b9f231613b86ac7b250ca0f34229402e1615e
e95299b9a9f1b13e21750ef0dcde0941d703d009
refs/heads/master
2021-10-25T19:29:12.967318
2021-10-25T03:35:14
2021-10-25T03:35:14
221,908,808
56
12
null
null
null
null
UTF-8
Python
false
false
419
py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .generalized_rcnn import GeneralizedRCNN from .generalized_rcnn_det import GeneralizedRCNNDet _DETECTION_META_ARCHITECTURES = {"GeneralizedRCNN": GeneralizedRCNN, "GeneralizedRCNNDet": GeneralizedRCNNDet} def build_detection_model(cfg): meta_arch = _DETECTION_META_ARCHITECTURES[cfg.MODEL.META_ARCHITECTURE] return meta_arch(cfg)
cbf3083dd8ea5ae4718b4b154ac624468f4e7c15
68b23f776fddb77de735419cbf30f33a49e9def2
/backend/terminus/home/urls.py
501046c917470aac71074c89c9f1d1a75f5cceac
[]
no_license
vash512/terminus
cbd00f74a600a13fd52aa2206c3eb1e7b5301ec7
4eb86d853bc76c22cd1af3c86fed1bc10d457c88
refs/heads/master
2016-09-05T14:49:42.655635
2015-07-09T03:34:38
2015-07-09T03:34:38
32,414,141
0
0
null
null
null
null
UTF-8
Python
false
false
1,155
py
# -*- coding: utf-8 -*- from django.conf.urls import patterns, include, url from django.views.generic import TemplateView urlpatterns=patterns('home.views', url(r'^$', 'index_view', name='index'), url(r'^humans.txt$', TemplateView.as_view(template_name='statics/humans.txt', content_type='text/plain; charset=utf-8')), url(r'^robots.txt$', TemplateView.as_view(template_name='statics/robots.txt', content_type='text/plain; charset=utf-8')), url(r'^sitemap.xml$', TemplateView.as_view(template_name='statics/sitemap.xml', content_type='application/xml; charset=utf-8')), url(r'^contacto/', 'contacto' ), url(r'^acercade/', 'acercade'), url(r'^corpuscontable', 'corpus'), url(r'^ayuda', 'ayuda'), #terminos urls de prueba url(r'^terminos', 'terminos'), url(r'^terminos/termino', 'termino_detalle'), url(r'^q/$', 'busqueda'), url(r'^q/termino', 'busqueda_list'), url(r'^docs/doc', 'doc_detalle'), url(r'^docs/$', 'docs'), #estas direcciones las debe administrar terminos.urls y terminos.views url(r'^login/', 'log_in'), url(r'^registro/', 'registro'), url(r'^logout/', 'log_out'), )
a05385930991319e2dc5ebf3029f337f10410b3a
ffba5c4a64a87214160c5904b220be8a6e88cd58
/python-packages/maizy_f/r.py
5e83d68deeca516eed184231752129e90e707f19
[]
no_license
maizy/dev-setup
6e8ae5bc9b56dd85f7612b453e92e31043816189
5eb8473cf9c66c66ff8fd9e8c72cecf931f62494
refs/heads/master
2022-11-13T22:08:00.184435
2022-11-13T08:33:25
2022-11-13T08:33:25
7,286,016
2
0
null
2017-12-22T11:57:00
2012-12-22T13:20:57
Python
UTF-8
Python
false
false
361
py
# coding: utf-8 # Copyright (c) Nikita Kovaliov, maizy.ru, 2013 from __future__ import print_function, absolute_import, unicode_literals from fabric.api import task, run @task def info(): run('uname -a') run('hostname') lsb_rel = run('which lsb_release') if lsb_rel != '': print('Debian like os found') run('lsb_release -a')
9290a1f679623cb6793f2eaef635da4b4689e597
6fce025097cebfd9d1dd37f6611e7fdfdbea90e6
/rainfields/model_conv.py
8a22d603ee800aed0a84aee26d7728f6111a7b66
[]
no_license
ANU-WALD/pluvi_pondus
ec0439d19acdcf4fdf712d6b14a1714297d661b2
ff8680f7115ab2cb75138bf6705abb59618e47d1
refs/heads/master
2021-07-01T14:32:14.501631
2020-08-22T09:41:28
2020-08-22T09:41:28
138,804,652
0
0
null
null
null
null
UTF-8
Python
false
false
2,287
py
from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Conv2D, Flatten from tensorflow.keras.optimizers import Adam, SGD import numpy as np x = np.load("x_conv.npy")[:10000000] print(x.shape) y = np.load("y_conv.npy")[:10000000,None] print(y.shape) prec_mask = np.nonzero(y>0) print(prec_mask) print(len(prec_mask)) #print(prec_mask.shape) #print(prec_mask[0]) print(prec_mask[0].shape) x_prec = x[prec_mask[0], :] y_prec = y[prec_mask[0], :] print(x_prec.shape, y_prec.shape) zero_mask = np.nonzero(y==0) x_dry = x[zero_mask[0], :] y_dry = y[zero_mask[0], :] print(x_dry.shape, y_dry.shape) idxs = np.arange(x_dry.shape[0]) np.random.seed(0) np.random.shuffle(idxs) n = x_prec.shape[0] * 2 x_dry = x_dry[idxs[:n],:] y_dry = y_dry[idxs[:n],:] print(x_dry.shape, y_dry.shape) x = np.concatenate((x_prec, x_dry), axis=0) y = np.concatenate((y_prec, y_dry), axis=0) print(x.shape, y.shape) idxs = np.arange(x.shape[0]) np.random.shuffle(idxs) x = x[idxs,:] x = np.reshape(x, (x.shape[0], -1)) y = y[idxs,:] print(x.shape, y.shape) model= Sequential() model.add(Dense(100, activation='relu', input_dim=100)) model.add(Dense(200, activation='relu')) model.add(Dense(400, activation='relu')) model.add(Dense(100, activation='relu')) model.add(Dense(50, activation='relu')) model.add(Dense(1, activation='relu')) """ model= Sequential() model.add(Conv2D(16, kernel_size=3, activation='relu', padding='same', input_shape=(5,5,4))) model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same')) model.add(Conv2D(64, kernel_size=3, activation='relu', padding='same')) model.add(Conv2D(32, kernel_size=3, activation='relu', padding='same')) model.add(Flatten()) model.add(Dense(1, activation='relu')) """ x_train = x[:175000,:] x_test = x[175000:,:] y_train = y[:175000,:] y_test = y[175000:,:] print(y_train.shape, y_test.shape) print(np.square(y_train).mean(axis=0)) print(np.square(y_test).mean(axis=0)) print(np.abs(y_train).mean(axis=0)) print(np.abs(y_test).mean(axis=0)) #classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) model.compile(optimizer=Adam(lr=0.000001), loss='mse', metrics=['mae', 'mse']) model.fit(x_train, y_train, batch_size=32, nb_epoch=10, validation_data=(x_test, y_test))
53b09cda44362a8837373232a16c18428dcb871d
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p02585/s497439083.py
c64c92fb3dffd1704057e5332cba0a7d2217f5e0
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
1,416
py
n,k = map(int,input().split()) P = list(map(int,input().split())) C = list(map(int,input().split())) g = [[0]*(n) for _ in range(n)] A = [n]*n # for i in range(n): # tmp = 0 # idx = i # cnt = 0 # set_ =set() # while cnt<n: # if C[idx] not in set_: # tmp += C[idx] # set_.add(C[idx]) # g[i][cnt] = tmp # idx = P[idx]-1 # cnt += 1 # else: # p = len(set_) # A[i] = p # break ans = -float('inf') for i in range(n): S = [] idx = P[i]-1 S.append(C[idx]) while idx != i: idx = P[idx]-1 S.append(S[-1] +C[idx]) v,w = k//len(S),k%len(S) if k<=len(S): val = max(S[:k]) elif S[-1]<=0: val = max(S) else: val1 = S[-1] *(v-1) val1 += max(S) val2 = S[-1]*v if w!=0: val2 += max(0,max(S[:w])) val = max(val1,val2) ans = max(ans,val) # for i in range(n): # v,w = k//A[i],k%A[i] # if A[i]<k: # if g[i][A[i]-1]<=0: # val = max(g[i][:A[i]]) # else: # val1 = (v-1)*g[i][A[i]-1] # val1 += max(g[i][:A[i]]) # val2 = v*g[i][A[i]-1] # if w!=0: # val2 += max(0,max(g[i][:w])) # val = max(val1,val2) # else: # val = max(g[i][:k]) # ans = max(ans,val) print(ans)
9a79bc2d121ab5020b5787e81900966444fc6e38
76fefdb20c453e830b8db86f32a1b42d79108cdd
/venv/bin/django-admin.py
a02fc84b3fe69bf065a0bd2537f44fe524e8d4d0
[]
no_license
Belie06Loryn/chaty-final-master
35f6762e9bf45e1086db6280cd0bd7dc0828fb96
86e839c069eb54dad3390e84c6b7534d165a3942
refs/heads/master
2022-12-09T14:31:19.478090
2019-12-08T16:00:45
2019-12-08T16:00:45
226,365,156
0
0
null
2022-12-08T06:59:44
2019-12-06T16:08:51
Python
UTF-8
Python
false
false
166
py
#!/home/alexie/Documents/chaty-final-master/venv/bin/python from django.core import management if __name__ == "__main__": management.execute_from_command_line()
5f28d3473174758f29072135291cc13603f342ab
94bb77d0847df86ead773650cf4aa0885ed3ad4e
/dappcrowd/restapi/submissions_endpoint.py
d2fb9fe38acbfc271ba77225c557ec7a4ae17f5a
[]
no_license
Tribler/dappcoder
3766f0b252ac38d889ad3596b5b6335669d31100
8ae43d51a284929bc081c87debc9ef003d1f9116
refs/heads/master
2020-04-01T07:53:38.236183
2019-01-17T13:07:53
2019-01-17T13:07:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,196
py
import json from twisted.web import http from twisted.web.server import NOT_DONE_YET from dappcrowd.restapi.root_endpoint import DAppCrowdEndpoint class SubmissionsEndpoint(DAppCrowdEndpoint): def render_GET(self, request): """ Get all submissions. """ dappcrowd_overlay = self.get_dappcrowd_overlay() return json.dumps({"submissions": dappcrowd_overlay.persistence.get_submissions()}) def getChild(self, path, request): return SubmissionPKEndpoint(self.ipv8, self.ipfs_api, path) def render_PUT(self, request): """ Create a new submission for an app request. """ parameters = http.parse_qs(request.content.read(), 1) required_params = ['project_pk', 'project_id', 'submission'] for required_param in required_params: if required_param not in parameters: request.setResponseCode(http.BAD_REQUEST) return json.dumps({"error": "missing parameter %s" % required_param}) def on_block_created(blocks): request.write(json.dumps({"success": True})) request.finish() self.get_dappcrowd_overlay().create_submission(parameters['project_pk'][0].decode('hex'), parameters['project_id'][0], parameters['submission'][0]).addCallback(on_block_created) return NOT_DONE_YET class SubmissionPKEndpoint(DAppCrowdEndpoint): def __init__(self, ipv8, ipfs_api, public_key): DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api) self.public_key = public_key.decode('hex') def getChild(self, path, request): return SpecificSubmissionEndpoint(self.ipv8, self.ipfs_api, self.public_key, path) class SpecificSubmissionEndpoint(DAppCrowdEndpoint): def __init__(self, ipv8, ipfs_api, public_key, submission_id): DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api) self.public_key = public_key self.submission_id = submission_id self.putChild("reviews", SpecificSubmissionReviewsEndpoint(ipv8, ipfs_api, public_key, submission_id)) def render_GET(self, request): if not self.get_dappcrowd_overlay().persistence.has_submission(self.public_key, self.submission_id): request.setResponseCode(http.NOT_FOUND) return json.dumps({"error": "the submission is not found"}) return json.dumps({ "submission": self.get_dappcrowd_overlay().persistence.get_submission(self.public_key, self.submission_id) }) class SpecificSubmissionReviewsEndpoint(DAppCrowdEndpoint): def __init__(self, ipv8, ipfs_api, public_key, submission_id): DAppCrowdEndpoint.__init__(self, ipv8, ipfs_api) self.public_key = public_key self.submission_id = submission_id def render_GET(self, request): if not self.get_dappcrowd_overlay().persistence.has_submission(self.public_key, self.submission_id): request.setResponseCode(http.NOT_FOUND) return json.dumps({"error": "the submission is not found"}) return json.dumps({ "reviews": self.get_dappcrowd_overlay().persistence.get_reviews(self.public_key, self.submission_id) })
b4b798b4b5b230c3088019cf13bf6acb5fe54680
95d4c8f4cda7ad1c7623a2df02da8cee1ad0941d
/src/classifier/german_pos_tagger.py
a66192956c55f524f2b1142c45afdf0df24c1383
[ "MIT" ]
permissive
tiefenauer/ip7-python
8f587b7b77430facff19b24441490756b01d5b17
512105ba39110ec77d2ea0961dd7c2a42d4ec26d
refs/heads/master
2021-09-09T20:37:21.647146
2018-02-20T12:07:46
2018-02-20T12:07:46
107,635,390
0
0
null
null
null
null
UTF-8
Python
false
false
3,382
py
# -*- coding: utf-8 -*- # # Natural Language Toolkit: ClassifierBasedGermanTagger # # URL: <http://www.experimentallabor.de/> # # Copyright 2011 Philipp Nolte # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tag German text. """ import re from nltk.tag.sequential import ClassifierBasedTagger class ClassifierBasedGermanTagger(ClassifierBasedTagger): """A classifier based German part-of-speech tagger. It has an accuracy of 96.09% after being trained on 90% of the German TIGER corpus. The tagger extends the NLTK ClassifierBasedTagger and implements a slightly modified feature detector. """ def feature_detector(self, tokens, index, history): """Implementing a slightly modified feature detector. @param tokens: The tokens from the sentence to tag. @param index: The current token index to tag. @param history: The previous tagged tokens. """ word = tokens[index] if index == 0: # At the beginning of the sentence prevword = prevprevword = None prevtag = prevprevtag = None # word = word.lower() # Lowercase at the beginning of sentence elif index == 1: prevword = tokens[index - 1] # Note: no lowercase prevprevword = None prevtag = history[index - 1] prevprevtag = None else: prevword = tokens[index - 1] prevprevword = tokens[index - 2] prevtag = history[index - 1] prevprevtag = history[index - 2] if re.match('[0-9]+([\.,][0-9]*)?|[0-9]*[\.,][0-9]+$', word): # Included "," as decimal point shape = 'number' elif re.compile('\W+$', re.UNICODE).match(word): # Included unicode flag shape = 'punct' elif re.match('([A-ZÄÖÜ]+[a-zäöüß]*-?)+$', word): # Included dash for dashed words and umlauts shape = 'upcase' elif re.match('[a-zäöüß]+', word): # Included umlauts shape = 'downcase' elif re.compile("\w+", re.UNICODE).match(word): # Included unicode flag shape = 'mixedcase' else: shape = 'other' features = { 'prevtag': prevtag, 'prevprevtag': prevprevtag, 'word': word, 'word.lower': word.lower(), 'suffix3': word.lower()[-3:], # 'suffix2': word.lower()[-2:], # 'suffix1': word.lower()[-1:], 'preffix1': word[:1], # included 'prevprevword': prevprevword, 'prevword': prevword, 'prevtag+word': '%s+%s' % (prevtag, word), 'prevprevtag+word': '%s+%s' % (prevprevtag, word), 'prevword+word': '%s+%s' % (prevword, word), 'shape': shape } return features
73b8eea0e247cc2fb5986af3fd0beca8578749f2
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/E9FwvGyad5CDbiH4C_9.py
d750b3eefe4c93d1c5db878cb337dcc042cf9e95
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
1,348
py
""" Create a function that takes a 2D array as an argument and returns the number of people whose view is blocked by a tall person. The concert stage is pointed towards the top of the 2D array and the tall person (represented by a 2) blocks the view of all the people (represented by a 1) behind them. ### Examples block([ [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 2], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1] ]) ➞ 2 # The tall person blocks 2 people behind him thus # the function returns 2. block([ [1, 2, 1, 1], [1, 1, 1, 2], [1, 1, 1, 1], [1, 1, 1, 1], ]) ➞ 5 # There are 2 tall people that block everyone behind # them. The first tall person in the first row blocks 3 # people behind him while the second tall person in # the second row blocks 2 people behind him thus the # function returns 5. block([ [1, 1, 1, 1], [2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 1, 1], ]) ➞ 4 ### Notes 1. There is only a maximum of 1 tall person in every column. 2. No view is blocked if the tall person is in the last row. """ def block(lst): total = 0 for x in range(len(lst[0])): for y in range(len(lst)-1, 0, -1): if lst[y][x] < lst[y-1][x]: total += len(lst) - y return total
2d4b29a8afb8ba840c0c97a4e5296c98779b4382
1f696631898e0279951709e150da6d87045e4bc4
/mysite/blog/migrations/0003_auto_20201018_1329.py
55a90dffcfe644d3983ed6e04e389fcac44cd412
[]
no_license
henryfrstr/django_project_blog
9f50b004b2fed59304c3f5a1f05247d44a232992
0f3c391a3cd790ae504fb84a786158a1d775abda
refs/heads/main
2022-12-31T05:05:20.989719
2020-10-25T11:26:27
2020-10-25T11:26:27
305,067,579
0
0
null
null
null
null
UTF-8
Python
false
false
357
py
# Generated by Django 3.0.8 on 2020-10-18 10:29 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('blog', '0002_auto_20201018_1327'), ] operations = [ migrations.RenameField( model_name='post', old_name='titles', new_name='title', ), ]
10177a53490eb98107c90432833b44de0dc5241f
36e3d735e06d0642f1e8c26bff57305a01cc627c
/nms/priClient/settings.py
70dc9c3a439ae7865ae186e64032e891229bbeb1
[]
no_license
WilsonWangTHU/ipv6_server
5c768cdaeaf22ee508c5fff162b208481a42f95d
5088f58ab25061e65127699ed328ddaab24f9aac
refs/heads/master
2021-01-18T21:18:39.653994
2016-05-27T04:22:23
2016-05-27T04:22:23
55,656,523
4
0
null
null
null
null
UTF-8
Python
false
false
2,819
py
""" Django settings for subClient project. Generated by 'django-admin startproject' using Django 1.9.5. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '2+ytb#pjeh*g!9_3m(id@&mn$c+f56$q6fp=*%lkr)wp8hpfz%' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'net_data' ] MIDDLEWARE_CLASSES = [ # 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', # 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'priClient.urls' TEMPLATE_DEBUG = True TEMPLATE_DIRS = ( os.path.join(BASE_DIR, 'templates'), ) WSGI_APPLICATION = 'priClient.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = os.path.join(BASE_DIR, 'static/')
ee692b4e30708d7c40cc7efe0f041f668c08dcb4
f63314b4852fb97ad740e53e450110fcd08a515b
/app.py
ce08afaac1bc0e74d4fe1216c543d83672b30fd1
[]
no_license
xsomam/askfm-scrapper
ee0e2c4439d5be3e4ebd7fceb27d01fbff3aa4da
2e997268f40cd37dfc56bc7adc496d609106b327
refs/heads/master
2023-04-09T03:45:56.074797
2020-08-07T23:48:31
2020-08-07T23:48:31
384,990,560
0
0
null
2023-03-17T21:32:19
2021-07-11T15:56:16
null
WINDOWS-1250
Python
false
false
7,611
py
# Basic imports import os import time import json import logging # Modules imports import requests from bs4 import BeautifulSoup # Imports from files from answer import SinglePost from locators import * logging.basicConfig(format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s', datefmt='%d-%m-%Y %H:%M:%S', level=logging.INFO, filename='logs.txt') logger = logging.getLogger('app') ask_url = 'https://ask.fm' #Base Ask URL BASE_DIR = os.getcwd() # List with pending profile links to scrape. # First link is simply base URL: ask.fm/profile. # After first visit, if there is "next" button on page (next page) # Then this link is appended to this array and program continues scraping _pending_list = [] # Array for each Singlequestion_obj object. # Singlequestion_obj object contains every answer encapsulated within one question_obj # With info about question, answer etc. _question = [] # Retrieving function. # First loops through profile, and stops looping if there are no more "next" pages left # If there are none, starts writing text content to drive by iterating through questions array def retrieve(askfm_nick, dl_img, to_csv, to_json): logger.info('Running "retrieve" function.') _n = 1 # Append base Askfm profile link to pending list to be scraped first _pending_list.append(ask_url + '/' + askfm_nick) # scraping będzie trwał tak długo, jak istnieje chociaż 1 link w liście "_pending_list". # len = 0 oznacza, że nie ma więcej linków # Trial of creating folder for downloaded content with user's nickname as name try: logger.info('Trial of creation of directory for scraping: '+ BASE_DIR) os.mkdir(askfm_nick) except Exception: logger.info('FAIL of creation of directory for scraping: ' + BASE_DIR) print('Directory already exists or another error happened. Skipping...') pass # Loop runs as long as there is some page to scrape. # If there was no "next" page, loop ends while len(_pending_list) > 0: for link in _pending_list: print(f'Connecting : {link}') logger.info('Establishing connection to: ' + link) # Get content of page, parse it with BS4 try: site = requests.get(link).content soup = BeautifulSoup(site, 'html.parser') # Select each indivifual question on profile all_questions = soup.select(SiteLocators.QUESTIONS) except Exception: print(f'Connection error at: {link}. Retrial in 5 seconds...') # logger.info(f'Connection error at: {link}. Retrial in 5 seconds...') time.sleep(5) continue # From array of questions we crate actual objects which will contain # Only important data (like question, answer in text etc.) # WHich we will perform operations on later for question in all_questions: question_obj = SinglePost(question) _question.append(question_obj) logger.info(f'Adding question #{_n}.') # If given question had image, we access it and download it if question_obj.image: save_one_image(askfm_nick, question_obj) _n += 1 # Remove already scraped profile from pedning list _pending_list.remove(link) print(f'{link} removed from temp...') logger.info(f'{link} removed from temp.') # If there is next page, we again start looping next_page = soup.select_one(SiteLocators.NEXT) logger.info('Retrieving next page link') print('Retrieving next page link') if next_page: _pending_list.append(ask_url + next_page.attrs['href']) logger.info(f"Link to next site appended to temp list: {ask_url}{next_page.attrs['href']}") print(f"{ask_url}{next_page.attrs['href']} appending successful! Looping back...") else: logger.info(f'No "Next" link found. Retrieving done.') print('No "Next" link found. Retrieving done.') perform_file_operations(askfm_nick, dl_img, to_csv, to_json) # Function dispatching file operations def perform_file_operations(askfm_nick, dl_img, to_csv, to_json): for each in _question: if to_csv: save_to_csv(askfm_nick) if to_json: save_to_json(askfm_nick) def save_to_json(askfm_nick): logger.info('Running "save_to_json" function.') print('Saving to JSON') _list = [] file_name = f'{askfm_nick}.json' with open(os.path.join(BASE_DIR, askfm_nick,file_name), 'w') as file: for each in _question: json_dict = { 'question': each.question, 'answer': each.answer, 'likes': each.likes, 'time': each.date, 'link': each.link, 'img': f"{askfm_nick}-{each.link.split('/')[-1]}.{each.image_extension}" if each.image else None, 'asker_url': each.asker } _list.append(json_dict) # if each.image: # save_images(each.image_link, each.image_extension, each.link) json.dump(_list, file, indent=4, ensure_ascii=True) print(f'Saved to JSON: {file_name}') def save_to_csv(askfm_nick): logger.info('Running "save_to_csv" function.') pass def save_images(askfm_nick): logger.info('Running "save_images" function.') for each in _question: if each.image: print('Saving image....') _photo_name = f"{askfm_nick}-{each.link.split('/')[-1]}.{each.image_extension}" try: logger.info('Trial of saving image begins.') logger.info('Requesting image from: ' + each.image_link) photo_file = requests.get(each.image_link).content img_path = os.path.join(BASE_DIR, askfm_nick, _photo_name) with open(img_path, 'wb') as f: f.write(photo_file) logger.info('Saving image to: ' + img_path) print(f"Image saved: {_photo_name}") except Exception: print(f"Could not get image {_photo_name}. Skipping...") logger.info('Error with saving image: ' + _photo_name) pass def save_one_image(askfm_nick, question_obj): logger.info('Running "save_one_image" function.') print('Saving image....') _photo_name = f"{askfm_nick}-{question_obj.link.split('/')[-1]}.{question_obj.image_extension}" try: logger.info('Trial of saving image begins.') logger.info('Requesting image from: ' + question_obj.image_link) photo_file = requests.get(question_obj.image_link).content img_path = os.path.join(BASE_DIR, askfm_nick,_photo_name) with open(img_path, 'wb') as f: f.write(photo_file) logger.info('Saving image to: ' + img_path) print(f"Image saved: {_photo_name}") except Exception: print(f"Could not get image {_photo_name}. Skipping...") logger.info('Error with saving image: ' + _photo_name) pass
023d1d8dece7491ac60d165dac3295008bf0a004
6109a95a284891792c35d0d19906ab8d1697f9c7
/src/k8s-configuration/azext_k8s_configuration/vendored_sdks/v2021_05_01_preview/aio/operations/_operations.py
3c82397333fb155b013c9017f213f97b26d8e9e6
[ "MIT", "LicenseRef-scancode-generic-cla" ]
permissive
Tatsinnit/azure-cli-extensions
3e5a1752edced00d7c33660027d2c17fae074569
a1959b123d4c11149adae2728ab5791949889d54
refs/heads/master
2022-10-05T17:40:10.825889
2022-03-16T10:33:56
2022-03-16T10:33:56
250,102,909
0
0
MIT
2020-03-25T22:12:01
2020-03-25T22:12:01
null
UTF-8
Python
false
false
4,949
py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core.exceptions import ARMErrorFormat from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class Operations: """Operations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, **kwargs: Any ) -> AsyncIterable["_models.ResourceProviderOperationList"]: """List all the available operations the KubernetesConfiguration resource provider supports. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ResourceProviderOperationList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2021_05_01_preview.models.ResourceProviderOperationList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceProviderOperationList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-05-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('ResourceProviderOperationList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/providers/Microsoft.KubernetesConfiguration/operations'} # type: ignore
cee01a1f512e64d11d177b39003a6d66c4c62798
f375899369ba86aed1da89101c31817168ffec40
/cinema/urls.py
f73d16e3686a88eb9e7b40b41d324fb2021b3100
[]
no_license
sheremilbekov/cinema
f596a7d1e41f03161a3ddf0c3594f39619c812df
d8baac5c7e25a90340a35e1e0b0cce093014f965
refs/heads/master
2023-03-30T10:19:20.654540
2021-04-07T12:10:54
2021-04-07T12:10:54
354,850,726
0
0
null
null
null
null
UTF-8
Python
false
false
941
py
"""cinema URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.1/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.conf import settings from django.conf.urls.static import static from django.contrib import admin from django.urls import path, include urlpatterns = [ path('admin/', admin.site.urls), path('cooking/', include('main.urls')), ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
c3a87e73a1c9e46f95c2c5bbf037974632f09470
747f759311d404af31c0f80029e88098193f6269
/addons/esale_joomla/__terp__.py
0b9ba5da767aab77ff4bccd6f9b18701e5bdec38
[]
no_license
sgeerish/sirr_production
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
1081f3a5ff8864a31b2dcd89406fac076a908e78
refs/heads/master
2020-05-19T07:21:37.047958
2013-09-15T13:03:36
2013-09-15T13:03:36
9,648,444
0
1
null
null
null
null
UTF-8
Python
false
false
62
py
/home/openerp/production/extra-addons/esale_joomla/__terp__.py
16effac639ce13ca5ccf22f2cfad0658eac06638
547ba955855ff623a8ef6e80fcfaddebadf34bed
/Chapter08/B06246_08_14-slope.py
a5a8f6c01c4af2755f280d506db81016afd04122
[]
no_license
CodedQuen/QGIS-Python-Programming-Cookbook
94a36d265d0336d5bb36ac02d637ba17ee765b04
f84e0159f7c8ec81a29573a7fd2e03b046efce33
refs/heads/master
2022-05-28T05:04:35.053121
2020-05-05T09:44:50
2020-05-05T09:44:50
261,414,871
0
0
null
null
null
null
UTF-8
Python
false
false
2,009
py
# Computing Road Slope using Elevation Data # https://github.com/GeospatialPython/Learn/raw/master/road.zip from PyQt4.QtCore import * from PyQt4.QtGui import * import processing dem = "/qgis_data/road/dem.asc" road = "/qgis_data/road/road.shp" slope = "/qgis_data/road/slope.tif" segRoad = "/qgis_data/road/segRoad.shp" steepness = "/qgis_data/road/steepness.shp" hillshade = "/qgis_data/road/hillshade.tif" demLyr = QgsRasterLayer(dem, "DEM") roadLyr = QgsVectorLayer(road, "Road", "ogr") ext = demLyr.extent() xmin = ext.xMinimum() ymin = ext.yMinimum() xmax = ext.xMaximum() ymax = ext.yMaximum() demBox = "{},{},{},{}".format(xmin,xmax,ymin,ymax) processing.runalg("grass7:r.slope",dem,0,False,1,0,demBox,0,slope) ext = roadLyr.extent() xmin = ext.xMinimum() ymin = ext.yMinimum() xmax = ext.xMaximum() ymax = ext.yMaximum() roadBox = "{},{},{},{}".format(xmin,xmax,ymin,ymax) processing.runalg("grass7:v.split.length",road,500,roadBox,-1,0.0001,0,segRoad) slopeLyr = QgsRasterLayer(slope, "Slope") segRoadLyr = QgsVectorLayer(segRoad, "Segmented Road", "ogr") QgsMapLayerRegistry.instance().addMapLayers([segRoadLyr,slopeLyr], False) processing.runalg("saga:addgridvaluestoshapes",segRoad,slope,0,steepness) steepLyr = QgsVectorLayer(steepness, "Road Gradient", "ogr") roadGrade = ( ("Rolling Hill", 0.0, 20.0, "green"), ("Steep", 20.0, 40.0, "yellow"), ("Very Steep", 40.0, 90.0, "red")) ranges = [] for label, lower, upper, color in roadGrade: sym = QgsSymbolV2.defaultSymbol(steepLyr.geometryType()) sym.setColor(QColor(color)) sym.setWidth(3.0) rng = QgsRendererRangeV2(lower, upper, sym, label) ranges.append(rng) field = "slopetif" renderer = QgsGraduatedSymbolRendererV2(field, ranges) steepLyr.setRendererV2(renderer) processing.runalg("saga:analyticalhillshading",dem,0,158,45,4,hillshade) hs = QgsRasterLayer(hillshade, "Terrain") QgsMapLayerRegistry.instance().addMapLayers([steepLyr, hs])
9620af649f65a0c0002935d9e24ea87dd7578b35
b0cdab54c5e81681125c01801148c287605ee8d0
/speciality/migrations/0005_auto_20181228_2150.py
ad4d5210ff776dbc55eeccf74f5266e8a064ed44
[]
no_license
lpd76/rdavid2
5528746749acc51d4d0f5efd77886929798e2569
18aa5120fe4ba0ea44f611dd52b008db52641f17
refs/heads/master
2020-04-13T20:47:58.141579
2019-01-17T16:51:31
2019-01-17T16:51:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
637
py
# Generated by Django 2.1.4 on 2018-12-28 21:50 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('speciality', '0004_auto_20181228_2140'), ] operations = [ migrations.AlterModelOptions( name='speciality', options={'verbose_name_plural': 'specialities'}, ), migrations.AlterField( model_name='specialitydetails', name='speciality', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='speciality.Speciality'), ), ]
b68987bce2f40abf5a5b3be0d046f49f02354bc8
650f9e246de38d0ceaee8726f27801f3337e24ce
/string-trainer/simple/csimple.py
7d3e49c4a22d4dd82a8951120a8f4e6961a58054
[ "MIT" ]
permissive
paulscottrobson/old-trainer-archive
57b6fbb5886e5fe526c37f40b7fb9c179176bce7
b3659d3f36b7443594202e0ae9439e80e493a22c
refs/heads/master
2021-01-20T02:46:44.751050
2017-04-26T07:26:34
2017-04-26T07:26:34
89,451,106
0
0
null
null
null
null
UTF-8
Python
false
false
5,861
py
# ***************************************************************************************** # # Simple compiler for TAB1 format # # ***************************************************************************************** import re,sys # ***************************************************************************************** # Compiler / Processor Exception # ***************************************************************************************** class CompilerException(Exception): def __init__(self,message): self.message = message Exception.__init__(self) # ***************************************************************************************** # Strum class # ***************************************************************************************** class Strum: def __init__(self,strumDef,qbTime,voices,label = ""): self.strum = strumDef self.qbTime = qbTime self.label = label self.preRender = self.convertToRender(strumDef,voices) def getStrum(self): return self.strum def getQuarterBeatTime(self): return self.qbTime def getLabel(self): return self.label def toString(self): s = self.strum+"@"+str(self.time) if self.label != "": s = s + "("+self.label+")" return s def convertToRender(self,strum,voices): strum = strum.upper().strip() r = [] while strum != "": if strum[0] == 'X': r.append(-1) strum = strum[1:] elif strum[0] in Strum.FRETS: diatonic = Strum.FRETS.index(strum[0]) r.append(Strum.TOCHROMATIC[diatonic % 7]+int(diatonic / 7) * 12) strum = strum[1:] if (strum+" ")[0] == '+': r[-1] += 1 strum = strum[1:] else: raise CompilerException("Bad strum "+strum) # first strum given is the treble so make it the last. r.reverse() # right pad while len(r) < voices: r.insert(0,-1) return "".join([chr(x+97) if x >= 0 else "-" for x in r]) def render(self): return self.preRender Strum.TOCHROMATIC = [ 0, 2, 4, 5, 7, 9, 10 # D E F# G A B C ] Strum.FRETS = "0123456789TLWHF" Strum.QBOFFSETS = { "O":8, "o":8, "-":-2, "=":-3, ".":2 } # ***************************************************************************************** # Bar class # ***************************************************************************************** class Bar: def __init__(self,barNumber,beats,voices): self.barNumber = barNumber self.beats = beats self.strums = [] self.voices = voices self.qbPosition = 0 def add(self,strumDef,label = ""): self.strums.append(Strum(strumDef,self.qbPosition,self.voices,label)) self.qbPosition += 4 return self def toString(self): s = "#{0} B:{1} V:{2} C:{3} {{".format(self.barNumber,self.beats,self.voices,len(self.strums)) s = s + " ".join([x.toString() for x in self.strums]) + "}" return s def isOffset(self,c): return c in Strum.QBOFFSETS def offset(self,c): if not self.isOffset(c): raise CompilerException("Unknown offset "+c) self.qbPosition += Strum.QBOFFSETS[c] def render(self): r = "" qbPosition = 0 for strum in self.strums: qbElapsed = strum.getQuarterBeatTime() - qbPosition while qbElapsed > 0: amt = min(8,qbElapsed) r = r + str(amt) qbElapsed = qbElapsed - amt r = r + strum.render() qbPosition = strum.getQuarterBeatTime() return r # ***************************************************************************************** # Song Class # ***************************************************************************************** class Song: def __init__(self,sourceFile): self.reset() self.loadTab1(sourceFile) self.compileBody() if self.get("title") == "": raise CompilerException("No title provided") def reset(self): self.bars = [] self.keys = { "title":"","author":"","beats":"4","tempo":"100", \ "version":"1", "tuning":"d3,a4,d4", "type":"dulcimer" } def get(self,key): return self.keys[key.strip().lower()] def loadTab1(self,sourceFile): # pre process file - tabs, spaces, comments source = open(sourceFile).readlines() source = [x if x.find("//") < 0 else x[:x.find("//")] for x in source] source = [x.replace("\t"," ").strip() for x in source] # key updates. for assign in [x for x in source if x.find(":=") >= 0]: assign = [x.strip() for x in assign.split(":=")] if assign[0] == '"' and assign[-1] == '"': assign = assign[1:-1] self.keys[assign[0].lower()] = assign[1] source = [x for x in source if x.find(":=") < 0] self.source = source def compileBody(self): for line in range(0,len(self.source)): if self.source[line] != "": for barPart in [x.strip() for x in self.source[line].split("|") if x.strip() != ""]: newBar = Bar(len(self.bars),int(self.get("beats")),3) self.bars.append(newBar) try: self.compileTab1(newBar,barPart.upper()) except CompilerException as cEx: newMsg = cEx.message+" @ "+str(line+1) raise Exception(newMsg) def compileTab1(self,bar,src): while src != "": m = re.match("^([X"+Strum.FRETS+"\\+]+)\\s*(.*)$",src) if m is not None: strum = m.group(1) bar.add(strum) src = m.group(2) elif src[0] in Strum.QBOFFSETS: bar.offset(src[0]) src = src[1:].strip() else: raise CompilerException("Unknown command "+src) def exportToJSON(self,handle): handle.write("{ \n") keys = [x for x in self.keys.keys()] keys.sort() for k in keys: handle.write(' {0:14}:"{1}",\n'.format('"'+k+'"',self.keys[k])) handle.write(' "bars": [\n') for n in range(0,len(self.bars)): r = self.bars[n].render() handle.write('{0:14}"{1}"{2}\n'.format("",r,"," if n < len(self.bars)-1 else "")) handle.write("\n ]\n") handle.write("} \n") s = Song("twinkle.tab1") s.exportToJSON(sys.stdout) s.exportToJSON(open("../app/music.json","w"))
10252106e1b1114e8e4adf56f12d670ac5aee1e0
397c9e2743c41cf591692c4fc37f43a9070119bd
/build/env/lib/python2.7/site-packages/SQLAlchemy-1.2.0b3-py2.7-linux-x86_64.egg/sqlalchemy/cutils.py
a62e8adc17fa043f78d6b8b32d3c703fd2682408
[ "Apache-2.0" ]
permissive
bopopescu/myhue
cf41238c782d12b3a1a0ee9ef70196359bb67894
5f566970a5a1fa5af9f01832c9e9808c47634bc7
refs/heads/master
2022-11-18T05:37:24.467150
2019-11-23T16:16:22
2019-11-23T16:16:22
282,390,507
0
0
Apache-2.0
2020-07-25T07:03:40
2020-07-25T07:03:39
null
UTF-8
Python
false
false
282
py
def __bootstrap__(): global __bootstrap__, __loader__, __file__ import sys, pkg_resources, imp __file__ = pkg_resources.resource_filename(__name__, 'cutils.so') __loader__ = None; del __bootstrap__, __loader__ imp.load_dynamic(__name__,__file__) __bootstrap__()
93c4453f26512207811cdba404053b9a07b2e9c1
b68887f55cfcd0225d732acfbfcc7f3724e49d5d
/pages/factories.py
a59431ddc5e91da1966fb1ba58c8d6ad49dcbfb0
[ "MIT" ]
permissive
rds0751/nhsuk-content-store
0ac7eb06f85cc97cd57e58a3f24e19db9991a8a2
7bd6a386e3583779ddba2347a4b3a80fdf75b368
refs/heads/master
2020-04-19T08:53:54.273378
2019-01-29T05:08:18
2019-01-29T05:08:18
168,092,530
0
0
null
2019-01-29T05:05:33
2019-01-29T05:05:33
null
UTF-8
Python
false
false
1,515
py
import factory from home.factories import HomePageFactory, ParentBasedFactory from . import models class ConditionsPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): title = 'Conditions' slug = 'conditions' path = '000100010002' depth = 3 _ParentFactory = HomePageFactory _unique = True class Meta: model = models.FolderPage class ConditionPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): path = factory.Sequence(lambda n: '000100010002%04d' % (n + 1)) depth = 4 _ParentFactory = ConditionsPageFactory _unique = False class Meta: model = models.EditorialPage class SymptomsPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): title = 'Symptoms' slug = 'symptoms' path = '000100010001' depth = 3 _ParentFactory = HomePageFactory _unique = True class Meta: model = models.FolderPage class SymptomPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): path = factory.Sequence(lambda n: '000100010001%04d' % (n + 1)) depth = 4 _ParentFactory = SymptomsPageFactory _unique = False class Meta: model = models.EditorialPage class ConditionFolderPageFactory(ParentBasedFactory, factory.django.DjangoModelFactory): path = factory.Sequence(lambda n: '000100010002%04d' % (n + 1)) depth = 4 _ParentFactory = ConditionsPageFactory _unique = False class Meta: model = models.FolderPage
93f72136ac32eeb64dafcf570b2025f0c7a8aca7
a51b2cfd2fcfce0e800fba591efb675d7e83da61
/src/multi_gpu_train.py
6bb22be2ec00116833cd6539d7ba2be1e6fb2546
[]
no_license
yyht/tf-center-loss
a3c4164b9de151b970ec3efaf38f79609e1f8f34
5cdcb638f1cec355f1938bf3a646338596bc21ef
refs/heads/master
2020-05-05T08:05:36.618949
2018-08-05T04:29:33
2018-08-05T04:29:33
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,137
py
# auther : lxy # time : 2017.12.15 /09:56 #project: # tool: python2 #version: 0.1 #modify: #name: center loss #citations: https://github.com/ydwen/caffe-face ############################# import numpy as np import tensorflow as tf from read_tfrecord_v2 import read_single_tfrecord from net import * from Center_loss_custom import * from mnist import mnist_data import argparse CENTER_LOSS_ALPHA = 0.9 def argument(): parser = argparse.ArgumentParser(description="face resnet center loss") parser.add_argument('--batch_size',type=int,default=16,help='the batch_size num') parser.add_argument('--epoch_num',type=int,default=10,\ help='the epoch num should bigger than 10000') parser.add_argument('--save_model_name',type=str,default='./face_model/model.ckpt',\ help='model Parameters saved name and directory') parser.add_argument('--lr',type=float,default=0.001,help='the Learning rate begin') parser.add_argument('--sta',type=str,default='train',help="input should 'train' or 'test' ") parser.add_argument('--img_shape',type=int,default='300',help="the input image reshape size") args = parser.parse_args() return args def build_network(input_images, labels): num_class = 526 sta = 'train' ratio = 0.003 net = face_net(input_images,num_class,sta) #logits, features = net.inference() logits, features = net.get_resnet18() #res1 = net.res1 assert num_class== net.num_classes,"net class should be equal to loss" with tf.name_scope('loss'): with tf.name_scope('center_loss'): center_loss, centers, centers_update_op = get_center_loss(features,logits, labels, CENTER_LOSS_ALPHA, num_class) with tf.name_scope('softmax_loss'): #softmax_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)) labels_onehot = tf.one_hot(labels,on_value=1,off_value=0,depth=num_class) entropy_loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels_onehot, logits=logits) print("entropy_loss ",entropy_loss.shape) softmax_loss = tf.reduce_mean(entropy_loss) with tf.name_scope('total_loss'): regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) total_loss = softmax_loss + ratio * center_loss+0.01 * sum(regularization_losses) #total_loss = softmax_loss with tf.name_scope('acc'): accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(tf.arg_max(logits, 1),tf.int32), labels), tf.float32)) with tf.name_scope('pred_class'): pred_class = tf.arg_max(logits, 1) with tf.name_scope('loss/'): tf.summary.scalar('CenterLoss', center_loss) tf.summary.scalar('SoftmaxLoss', softmax_loss) tf.summary.scalar('TotalLoss', total_loss) #return total_loss, accuracy, centers_update_op, center_loss, softmax_loss,pred_class return total_loss def make_parallel(model,num_gpus,**kwargs): in_splits = {} for k,v in kwargs.items(): in_splits[k] = tf.split(v,num_gpus) out_splits = [] for i in range(num_gpus): with tf.device(tf.DeviceSpec(device_type="GPU",device_index=i)): with tf.variable_scope(tf.get_variable_scope(),reuse=i>0): out_splits.append(model(**{k:v[i] for k,v in in_splits.items()})) return tf.stack(out_splits,axis=0) def main(): LAMBDA = 0.001 num_class = 526 args = argument() checkpoint_dir = args.save_model_name lr = args.lr batch_size = args.batch_size epoch_num = args.epoch_num sta = args.sta img_shape = args.img_shape num_gpus = 4 #train_batch_loader = BatchLoader("./data/facescrub_train.list", batch_size,img_shape) #test_batch_loader = BatchLoader("./data/facescrub_val.list", batch_size,img_shape) #(Height,Width) = (train_batch_loader.height,train_batch_loader.width) #train_batch_loader = mnist_data(batch_size) tfrecord_file = './data/MegaFace_train.tfrecord_shuffle' val_file = './data/MegaFace_val.tfrecord_shuffle' image_batch, label_batch = read_single_tfrecord(tfrecord_file, batch_size, img_shape) val_image_batch, val_label_batch = read_single_tfrecord(val_file, batch_size, img_shape) print("img shape",img_shape) with tf.name_scope('input'): input_images = tf.placeholder(tf.float32, shape=(batch_size,img_shape,img_shape,3), name='input_images') labels = tf.placeholder(tf.int32, shape=(batch_size), name='labels') learn_rate = tf.placeholder(tf.float32,shape=(None),name='learn_rate') with tf.name_scope('var'): global_step = tf.Variable(0, trainable=False, name='global_step') #total_loss, accuracy, centers_update_op, center_loss, softmax_loss,pred_class = build_network(input_images,labels) #total_loss, accuracy, centers_update_op, center_loss, softmax_loss,pred_class = make_parallel(build_network,num_gpus,input_images=input_images,labels=labels) total_loss = make_parallel(build_network,num_gpus,input_images=input_images,labels=labels) #optimizer = tf.train.AdamOptimizer(learn_rate) optimizer = tf.train.GradientDescentOptimizer(learn_rate) #with tf.control_dependencies([centers_update_op]): train_op = optimizer.minimize(tf.reduce_mean(total_loss), colocate_gradients_with_ops=True) #train_op = optimizer.minimize(total_loss, global_step=global_step) summary_op = tf.summary.merge_all() with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess: sess.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter('./tmp/face_log', sess.graph) saver = tf.train.Saver() #begin coord = tf.train.Coordinator() #begin enqueue thread threads = tf.train.start_queue_runners(sess=sess, coord=coord) step = sess.run(global_step) epoch_idx =0 graph_step=0 item = './data/facescrub_train.list' imagelist = open(item, 'r') files_item = imagelist.readlines() file_len = len(files_item) batch_num = np.ceil(file_len / batch_size) while epoch_idx <= epoch_num: step = 0 ckpt_fg = 'True' ps_loss=0.0 pc_loss=0.0 acc_sum = 0.0 while step < batch_num: train_img_batch, train_label_batch = sess.run([image_batch,label_batch]) #print("data in ",in_img[0,:2,:2,0]) _, summary_str,Center_loss = sess.run( [train_op, summary_op,total_loss], feed_dict={ input_images: train_img_batch, labels: train_label_batch, learn_rate: lr }) step += 1 #print("step",step, str(Softmax_loss),str(Center_loss)) #print("res1",res1_o[0,:20]) #print("step label",step, str(batch_labels)) graph_step+=1 if step %10 ==0 : writer.add_summary(summary_str, global_step=graph_step) pc_loss+=Center_loss #ps_loss+=Softmax_loss #acc_sum+=train_acc if step % 100 == 0: #lr = lr*0.1 #c_loss+=c_loss #s_loss+=s_loss print ("****** Epoch {} Step {}: ***********".format(str(epoch_idx),str(step)) ) print ("center loss: {}".format(pc_loss/100.0)) print ("softmax_loss: {}".format(ps_loss/100.0)) print ("train_acc: {}".format(acc_sum/100.0)) print ("*******************************") if (Center_loss<0.1 and ckpt_fg=='True'): print("******************************************************************************") saver.save(sess, checkpoint_dir, global_step=epoch_idx) ckpt_fg = 'False' ps_loss=0.0 pc_loss=0.0 acc_sum=0.0 epoch_idx +=1 if epoch_idx % 5 ==0: print("******************************************************************************") saver.save(sess, checkpoint_dir, global_step=epoch_idx) #writer.add_summary(summary_str, global_step=step) if epoch_idx % 5 == 0: lr = lr*0.5 if epoch_idx: val_img_batch,val_label_batch = sess.run([val_image_batch,val_label_batch]) vali_acc = sess.run( total_loss, feed_dict={ input_images: val_img_batch, labels: val_label_batch }) print(("epoch: {}, train_acc:{:.4f}, vali_acc:{:.4f}". format(epoch_idx, Center_loss, vali_acc))) coord.join(threads) sess.close() if __name__ == '__main__': main()
6acd9d44dc1191828b5807335b648d30c0e9194d
0eda43d797abfc69ad28000b3c3599af44049bdf
/setup.py
21f2ea356d1b8e4b3e0b98a7bd61d346e529cf0b
[ "CC0-1.0", "LicenseRef-scancode-public-domain" ]
permissive
biomodels/BIOMD0000000048
d8d23b0491ac80e27692b6e115b9884ee46397d6
6d17577fdde45ed5c0ec8457eacb860458e30215
refs/heads/master
2021-01-18T14:19:32.446581
2014-10-16T05:18:50
2014-10-16T05:18:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
377
py
from setuptools import setup, find_packages setup(name='BIOMD0000000048', version=20140916, description='BIOMD0000000048 from BioModels', url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000048', maintainer='Stanley Gu', maintainer_url='[email protected]', packages=find_packages(), package_data={'': ['*.xml', 'README.md']}, )
a03f688cd3bb6ceef3f26b749170bc2c0ac710d7
82770c7bc5e2f27a48b8c370b0bab2ee41f24d86
/microblog/flask/venv/lib/python2.7/site-packages/billiard/forking.py
57fc9795e47d83e6c656232b9ddde88438c6ec12
[ "Apache-2.0" ]
permissive
johankaito/fufuka
77ddb841f27f6ce8036d7b38cb51dc62e85b2679
32a96ecf98ce305c2206c38443e58fdec88c788d
refs/heads/master
2022-07-20T00:51:55.922063
2015-08-21T20:56:48
2015-08-21T20:56:48
39,845,849
2
0
Apache-2.0
2022-06-29T23:30:11
2015-07-28T16:39:54
Python
UTF-8
Python
false
false
17,515
py
# # Module for starting a process object using os.fork() or CreateProcess() # # multiprocessing/forking.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # from __future__ import absolute_import import os import sys import signal import warnings from pickle import load, HIGHEST_PROTOCOL from billiard import util from billiard import process from billiard.five import int_types from .reduction import dump from .compat import _winapi as win32 __all__ = ['Popen', 'assert_spawning', 'exit', 'duplicate', 'close'] try: WindowsError = WindowsError # noqa except NameError: class WindowsError(Exception): # noqa pass W_OLD_DJANGO_LAYOUT = """\ Will add directory %r to path! This is necessary to accommodate \ pre-Django 1.4 layouts using setup_environ. You can skip this warning by adding a DJANGO_SETTINGS_MODULE=settings \ environment variable. """ # # Choose whether to do a fork or spawn (fork+exec) on Unix. # This affects how some shared resources should be created. # _forking_is_enabled = sys.platform != 'win32' # # Check that the current thread is spawning a child process # def assert_spawning(self): if not Popen.thread_is_spawning(): raise RuntimeError( '%s objects should only be shared between processes' ' through inheritance' % type(self).__name__ ) # # Unix # if sys.platform != 'win32': try: import thread except ImportError: import _thread as thread # noqa import select WINEXE = False WINSERVICE = False exit = os._exit duplicate = os.dup close = os.close _select = util._eintr_retry(select.select) # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): _tls = thread._local() def __init__(self, process_obj): # register reducers from billiard import connection # noqa _Django_old_layout_hack__save() sys.stdout.flush() sys.stderr.flush() self.returncode = None r, w = os.pipe() self.sentinel = r if _forking_is_enabled: self.pid = os.fork() if self.pid == 0: os.close(r) if 'random' in sys.modules: import random random.seed() code = process_obj._bootstrap() os._exit(code) else: from_parent_fd, to_child_fd = os.pipe() cmd = get_command_line() + [str(from_parent_fd)] self.pid = os.fork() if self.pid == 0: os.close(r) os.close(to_child_fd) os.execv(sys.executable, cmd) # send information to child prep_data = get_preparation_data(process_obj._name) os.close(from_parent_fd) to_child = os.fdopen(to_child_fd, 'wb') Popen._tls.process_handle = self.pid try: dump(prep_data, to_child, HIGHEST_PROTOCOL) dump(process_obj, to_child, HIGHEST_PROTOCOL) finally: del(Popen._tls.process_handle) to_child.close() # `w` will be closed when the child exits, at which point `r` # will become ready for reading (using e.g. select()). os.close(w) util.Finalize(self, os.close, (r,)) def poll(self, flag=os.WNOHANG): if self.returncode is None: try: pid, sts = os.waitpid(self.pid, flag) except os.error: # Child process not yet created. See #1731717 # e.errno == errno.ECHILD == 10 return None if pid == self.pid: if os.WIFSIGNALED(sts): self.returncode = -os.WTERMSIG(sts) else: assert os.WIFEXITED(sts) self.returncode = os.WEXITSTATUS(sts) return self.returncode def wait(self, timeout=None): if self.returncode is None: if timeout is not None: r = _select([self.sentinel], [], [], timeout)[0] if not r: return None # This shouldn't block if select() returned successfully. return self.poll(os.WNOHANG if timeout == 0.0 else 0) return self.returncode def terminate(self): if self.returncode is None: try: os.kill(self.pid, signal.SIGTERM) except OSError: if self.wait(timeout=0.1) is None: raise @staticmethod def thread_is_spawning(): if _forking_is_enabled: return False else: return getattr(Popen._tls, 'process_handle', None) is not None @staticmethod def duplicate_for_child(handle): return handle # # Windows # else: try: import thread except ImportError: import _thread as thread # noqa import msvcrt try: import _subprocess except ImportError: import _winapi as _subprocess # noqa # # # TERMINATE = 0x10000 WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") exit = win32.ExitProcess close = win32.CloseHandle # # # def duplicate(handle, target_process=None, inheritable=False): if target_process is None: target_process = _subprocess.GetCurrentProcess() h = _subprocess.DuplicateHandle( _subprocess.GetCurrentProcess(), handle, target_process, 0, inheritable, _subprocess.DUPLICATE_SAME_ACCESS ) if sys.version_info[0] < 3 or ( sys.version_info[0] == 3 and sys.version_info[1] < 3): h = h.Detach() return h # # We define a Popen class similar to the one from subprocess, but # whose constructor takes a process object as its argument. # class Popen(object): ''' Start a subprocess to run the code of a process object ''' _tls = thread._local() def __init__(self, process_obj): _Django_old_layout_hack__save() # create pipe for communication with child rfd, wfd = os.pipe() # get handle for read end of the pipe and make it inheritable rhandle = duplicate(msvcrt.get_osfhandle(rfd), inheritable=True) os.close(rfd) # start process cmd = get_command_line() + [rhandle] cmd = ' '.join('"%s"' % x for x in cmd) hp, ht, pid, tid = _subprocess.CreateProcess( _python_exe, cmd, None, None, 1, 0, None, None, None ) close(ht) if isinstance(ht, int_types) else ht.Close() (close(rhandle) if isinstance(rhandle, int_types) else rhandle.Close()) # set attributes of self self.pid = pid self.returncode = None self._handle = hp self.sentinel = int(hp) # send information to child prep_data = get_preparation_data(process_obj._name) to_child = os.fdopen(wfd, 'wb') Popen._tls.process_handle = int(hp) try: dump(prep_data, to_child, HIGHEST_PROTOCOL) dump(process_obj, to_child, HIGHEST_PROTOCOL) finally: del Popen._tls.process_handle to_child.close() @staticmethod def thread_is_spawning(): return getattr(Popen._tls, 'process_handle', None) is not None @staticmethod def duplicate_for_child(handle): return duplicate(handle, Popen._tls.process_handle) def wait(self, timeout=None): if self.returncode is None: if timeout is None: msecs = _subprocess.INFINITE else: msecs = max(0, int(timeout * 1000 + 0.5)) res = _subprocess.WaitForSingleObject(int(self._handle), msecs) if res == _subprocess.WAIT_OBJECT_0: code = _subprocess.GetExitCodeProcess(self._handle) if code == TERMINATE: code = -signal.SIGTERM self.returncode = code return self.returncode def poll(self): return self.wait(timeout=0) def terminate(self): if self.returncode is None: try: _subprocess.TerminateProcess(int(self._handle), TERMINATE) except WindowsError: if self.wait(timeout=0.1) is None: raise # # # if WINSERVICE: _python_exe = os.path.join(sys.exec_prefix, 'python.exe') else: _python_exe = sys.executable def set_executable(exe): global _python_exe _python_exe = exe def is_forking(argv): ''' Return whether commandline indicates we are forking ''' if len(argv) >= 2 and argv[1] == '--billiard-fork': assert len(argv) == 3 os.environ["FORKED_BY_MULTIPROCESSING"] = "1" return True else: return False def freeze_support(): ''' Run code for process object if this in not the main process ''' if is_forking(sys.argv): main() sys.exit() def get_command_line(): ''' Returns prefix of command line used for spawning a child process ''' if process.current_process()._identity == () and is_forking(sys.argv): raise RuntimeError(''' Attempt to start a new process before the current process has finished its bootstrapping phase. This probably means that have forgotten to use the proper idiom in the main module: if __name__ == '__main__': freeze_support() ... The "freeze_support()" line can be omitted if the program is not going to be frozen to produce a Windows executable.''') if getattr(sys, 'frozen', False): return [sys.executable, '--billiard-fork'] else: prog = 'from billiard.forking import main; main()' return [_python_exe, '-c', prog, '--billiard-fork'] def _Django_old_layout_hack__save(): if 'DJANGO_PROJECT_DIR' not in os.environ: try: settings_name = os.environ['DJANGO_SETTINGS_MODULE'] except KeyError: return # not using Django. conf_settings = sys.modules.get('django.conf.settings') configured = conf_settings and conf_settings.configured try: project_name, _ = settings_name.split('.', 1) except ValueError: return # not modified by setup_environ project = __import__(project_name) try: project_dir = os.path.normpath(_module_parent_dir(project)) except AttributeError: return # dynamically generated module (no __file__) if configured: warnings.warn(UserWarning( W_OLD_DJANGO_LAYOUT % os.path.realpath(project_dir) )) os.environ['DJANGO_PROJECT_DIR'] = project_dir def _Django_old_layout_hack__load(): try: sys.path.append(os.environ['DJANGO_PROJECT_DIR']) except KeyError: pass def _module_parent_dir(mod): dir, filename = os.path.split(_module_dir(mod)) if dir == os.curdir or not dir: dir = os.getcwd() return dir def _module_dir(mod): if '__init__.py' in mod.__file__: return os.path.dirname(mod.__file__) return mod.__file__ def main(): ''' Run code specifed by data received over pipe ''' global _forking_is_enabled _Django_old_layout_hack__load() assert is_forking(sys.argv) _forking_is_enabled = False handle = int(sys.argv[-1]) if sys.platform == 'win32': fd = msvcrt.open_osfhandle(handle, os.O_RDONLY) else: fd = handle from_parent = os.fdopen(fd, 'rb') process.current_process()._inheriting = True preparation_data = load(from_parent) prepare(preparation_data) # Huge hack to make logging before Process.run work. try: os.environ["MP_MAIN_FILE"] = sys.modules["__main__"].__file__ except KeyError: pass except AttributeError: pass loglevel = os.environ.get("_MP_FORK_LOGLEVEL_") logfile = os.environ.get("_MP_FORK_LOGFILE_") or None format = os.environ.get("_MP_FORK_LOGFORMAT_") if loglevel: from billiard import util import logging logger = util.get_logger() logger.setLevel(int(loglevel)) if not logger.handlers: logger._rudimentary_setup = True logfile = logfile or sys.__stderr__ if hasattr(logfile, "write"): handler = logging.StreamHandler(logfile) else: handler = logging.FileHandler(logfile) formatter = logging.Formatter( format or util.DEFAULT_LOGGING_FORMAT, ) handler.setFormatter(formatter) logger.addHandler(handler) self = load(from_parent) process.current_process()._inheriting = False from_parent.close() exitcode = self._bootstrap() exit(exitcode) def get_preparation_data(name): ''' Return info about parent needed by child to unpickle process object ''' from billiard.util import _logger, _log_to_stderr d = dict( name=name, sys_path=sys.path, sys_argv=sys.argv, log_to_stderr=_log_to_stderr, orig_dir=process.ORIGINAL_DIR, authkey=process.current_process().authkey, ) if _logger is not None: d['log_level'] = _logger.getEffectiveLevel() if not WINEXE and not WINSERVICE: main_path = getattr(sys.modules['__main__'], '__file__', None) if not main_path and sys.argv[0] not in ('', '-c'): main_path = sys.argv[0] if main_path is not None: if (not os.path.isabs(main_path) and process.ORIGINAL_DIR is not None): main_path = os.path.join(process.ORIGINAL_DIR, main_path) d['main_path'] = os.path.normpath(main_path) return d # # Prepare current process # old_main_modules = [] def prepare(data): ''' Try to get current process ready to unpickle process object ''' old_main_modules.append(sys.modules['__main__']) if 'name' in data: process.current_process().name = data['name'] if 'authkey' in data: process.current_process()._authkey = data['authkey'] if 'log_to_stderr' in data and data['log_to_stderr']: util.log_to_stderr() if 'log_level' in data: util.get_logger().setLevel(data['log_level']) if 'sys_path' in data: sys.path = data['sys_path'] if 'sys_argv' in data: sys.argv = data['sys_argv'] if 'dir' in data: os.chdir(data['dir']) if 'orig_dir' in data: process.ORIGINAL_DIR = data['orig_dir'] if 'main_path' in data: main_path = data['main_path'] main_name = os.path.splitext(os.path.basename(main_path))[0] if main_name == '__init__': main_name = os.path.basename(os.path.dirname(main_path)) if main_name == '__main__': main_module = sys.modules['__main__'] main_module.__file__ = main_path elif main_name != 'ipython': # Main modules not actually called __main__.py may # contain additional code that should still be executed import imp if main_path is None: dirs = None elif os.path.basename(main_path).startswith('__init__.py'): dirs = [os.path.dirname(os.path.dirname(main_path))] else: dirs = [os.path.dirname(main_path)] assert main_name not in sys.modules, main_name file, path_name, etc = imp.find_module(main_name, dirs) try: # We would like to do "imp.load_module('__main__', ...)" # here. However, that would cause 'if __name__ == # "__main__"' clauses to be executed. main_module = imp.load_module( '__parents_main__', file, path_name, etc ) finally: if file: file.close() sys.modules['__main__'] = main_module main_module.__name__ = '__main__' # Try to make the potentially picklable objects in # sys.modules['__main__'] realize they are in the main # module -- somewhat ugly. for obj in list(main_module.__dict__.values()): try: if obj.__module__ == '__parents_main__': obj.__module__ = '__main__' except Exception: pass
47f0abfaceb11e660d4f305e745db7fe9fee819f
da84fa23cc4cf2e81e50892085ac162508bff155
/nestris_ocr/capturing/linux/linux_mgr.py
6381209b8dd85f1880aca2b7eb9cbc653ec9f0cd
[]
no_license
alex-ong/NESTrisOCR
83ddaba55b100f0ee20e924731459e547e321887
488beeb30e596ccd0548152e241e1c6f772e717b
refs/heads/master
2023-01-10T08:02:41.702538
2021-07-07T06:33:39
2021-07-07T06:33:39
169,196,192
25
8
null
2022-12-27T15:37:40
2019-02-05T05:44:34
Python
UTF-8
Python
false
false
1,202
py
import Xlib import Xlib.display from Xlib import X class WindowMgr: """Encapsulates some calls for window management""" def __init__(self, hwnd=None): self.handle = hwnd def checkWindow(self, hwnd): """checks if a window still exists""" return hwnd def getWindows(self): """ Return a list of tuples (handler, window name) for each real window. """ windows = [] def getWindowHierarchy(window, windows): children = window.query_tree().children for w in children: try: w.get_image(0, 0, 1, 1, X.ZPixmap, 0xFFFFFFFF) windows.append( ( w.id, w.get_wm_class()[1] if w.get_wm_class() is not None else "", ) ) except Xlib.error.BadMatch: pass finally: windows = getWindowHierarchy(w, windows) return windows root = Xlib.display.Display().screen().root windows = getWindowHierarchy(root, windows) return windows
11065362a8ac77972c519aadeae585300bb5085d
7bededcada9271d92f34da6dae7088f3faf61c02
/pypureclient/flasharray/FA_2_25/models/active_directory_get_response.py
ee529854e041a3ff612ccf174315845d4e2c49ef
[ "BSD-2-Clause" ]
permissive
PureStorage-OpenConnect/py-pure-client
a5348c6a153f8c809d6e3cf734d95d6946c5f659
7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e
refs/heads/master
2023-09-04T10:59:03.009972
2023-08-25T07:40:41
2023-08-25T07:40:41
160,391,444
18
29
BSD-2-Clause
2023-09-08T09:08:30
2018-12-04T17:02:51
Python
UTF-8
Python
false
false
5,613
py
# coding: utf-8 """ FlashArray REST API No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: 2.25 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re import six import typing from ....properties import Property if typing.TYPE_CHECKING: from pypureclient.flasharray.FA_2_25 import models class ActiveDirectoryGetResponse(object): """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'more_items_remaining': 'bool', 'total_item_count': 'int', 'continuation_token': 'str', 'items': 'list[ActiveDirectory]' } attribute_map = { 'more_items_remaining': 'more_items_remaining', 'total_item_count': 'total_item_count', 'continuation_token': 'continuation_token', 'items': 'items' } required_args = { } def __init__( self, more_items_remaining=None, # type: bool total_item_count=None, # type: int continuation_token=None, # type: str items=None, # type: List[models.ActiveDirectory] ): """ Keyword args: more_items_remaining (bool): Returns a value of `true` if subsequent items can be retrieved. total_item_count (int): The total number of records after applying all filter query parameters. The `total_item_count` will be calculated if and only if the corresponding query parameter `total_item_count` is set to `true`. If this query parameter is not set or set to `false`, a value of `null` will be returned. continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified). items (list[ActiveDirectory]): A list of Active Directory computer account configuration objects. """ if more_items_remaining is not None: self.more_items_remaining = more_items_remaining if total_item_count is not None: self.total_item_count = total_item_count if continuation_token is not None: self.continuation_token = continuation_token if items is not None: self.items = items def __setattr__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key)) self.__dict__[key] = value def __getattribute__(self, item): value = object.__getattribute__(self, item) if isinstance(value, Property): raise AttributeError else: return value def __getitem__(self, key): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key)) return object.__getattribute__(self, key) def __setitem__(self, key, value): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key)) object.__setattr__(self, key, value) def __delitem__(self, key): if key not in self.attribute_map: raise KeyError("Invalid key `{}` for `ActiveDirectoryGetResponse`".format(key)) object.__delattr__(self, key) def keys(self): return self.attribute_map.keys() def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): if hasattr(self, attr): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ActiveDirectoryGetResponse, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ActiveDirectoryGetResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
e0cf8c5298a8ee4e8a3b21eb3b1fe65504c3047e
204ec78fcebcea9e1e1da4905cf3fad0a514b01f
/test/unit/test_timeout.py
4990b87aad1b2d40888f75acc3481c349d3eb4e0
[ "Apache-2.0" ]
permissive
ARMmbed/pyOCD
659340bf8753aa8e15a72890b8bea64dff2c2f42
d4cdcf7e532cae17caad866839287bbe1e0d952b
refs/heads/master
2023-05-31T13:45:15.797588
2020-10-12T13:55:47
2020-10-12T13:55:47
190,203,829
3
1
Apache-2.0
2019-07-05T11:05:40
2019-06-04T13:09:56
Python
UTF-8
Python
false
false
1,942
py
# pyOCD debugger # Copyright (c) 2017-2019 Arm Limited # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from time import (time, sleep) import pytest from pyocd.utility.timeout import Timeout class TestTimeout: def test_no_timeout(self): with Timeout(0.05) as to: cnt = 0 while to.check(): sleep(0.01) cnt += 1 if cnt == 4: break else: assert False assert not to.did_time_out def test_timeout_a(self): s = time() with Timeout(0.05) as to: while to.check(): sleep(0.01) assert to.did_time_out assert (time() - s) >= 0.05 def test_timeout_b(self): timedout = False s = time() with Timeout(0.05) as to: cnt = 0 while cnt < 10: if to.did_time_out: timedout = True sleep(0.02) cnt += 1 assert timedout assert to.did_time_out assert (time() - s) >= 0.05 def test_timeout_c(self): timedout = False with Timeout(0.05) as to: cnt = 0 while cnt < 10: if to.did_time_out: timedout = True cnt += 1 assert not timedout assert not to.did_time_out
f158c6821e350e490fa25d9eda4fc880f01fe9d0
6e2dc82bcfbc420ce6fd8e890f9f254e8e594902
/www/cursivedata/migrations/0005_auto__add_field_pipeline_anim_loop.py
92063d7e04206acf824ce406bdbd7eabe0b2d325
[ "CC-BY-4.0" ]
permissive
mattvenn/cursivedata
8ea86bde4a58a5678b1116953d17f0ae3600daf6
43e43263bef6f01698166d87bcff00b246957277
refs/heads/master
2021-01-17T08:06:38.715586
2016-07-22T16:04:10
2016-07-22T16:04:10
5,599,674
1
0
null
null
null
null
UTF-8
Python
false
false
12,340
py
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Pipeline.anim_loop' db.add_column('cursivedata_pipeline', 'anim_loop', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) def backwards(self, orm): # Deleting field 'Pipeline.anim_loop' db.delete_column('cursivedata_pipeline', 'anim_loop') models = { 'cursivedata.cosmsource': { 'Meta': {'object_name': 'COSMSource'}, 'add_feed_id': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'add_feed_title': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'add_location': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'api_key': ('django.db.models.fields.CharField', [], {'default': "'WsH6oBOmVbflt5ytsSYHYVGQzCaSAKw0Ti92WHZzajZHWT0g'", 'max_length': '400'}), 'cosm_trigger_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}), 'cosm_url': ('django.db.models.fields.CharField', [], {'default': "'http://api.cosm.com/v2/triggers/'", 'max_length': '200'}), 'feed_id': ('django.db.models.fields.CharField', [], {'default': "'96779'", 'max_length': '400'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'last_value': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}), 'name': ('django.db.models.fields.CharField', [], {'default': "'Unknown Source'", 'max_length': '100'}), 'pipelines': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cursivedata.Pipeline']", 'symmetrical': 'False', 'blank': 'True'}), 'stream_id': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '400'}), 'use_stream_id': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'cursivedata.datapoint': { 'Meta': {'object_name': 'DataPoint'}, 'current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'datastore': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.DataStore']"}), 'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'cursivedata.datastore': { 'Meta': {'object_name': 'DataStore'}, 'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'cursivedata.endpoint': { 'Meta': {'object_name': 'Endpoint'}, 'device': ('django.db.models.fields.CharField', [], {'default': "'web'", 'max_length': '200'}), 'full_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'full_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'generate_gcode': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'height': ('django.db.models.fields.FloatField', [], {'default': '200', 'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'img_height': ('django.db.models.fields.IntegerField', [], {'default': '500'}), 'img_width': ('django.db.models.fields.IntegerField', [], {'default': '500'}), 'last_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'last_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}), 'location': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}), 'name': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}), 'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'robot_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'side_margin': ('django.db.models.fields.FloatField', [], {'default': '10', 'max_length': '200'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '200'}), 'status_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'top_margin': ('django.db.models.fields.FloatField', [], {'default': '10', 'max_length': '200'}), 'width': ('django.db.models.fields.FloatField', [], {'default': '200', 'max_length': '200'}) }, 'cursivedata.gcodeoutput': { 'Meta': {'object_name': 'GCodeOutput'}, 'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'served': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'cursivedata.generator': { 'Meta': {'object_name': 'Generator'}, 'description': ('django.db.models.fields.CharField', [], {'default': "'Unknown'", 'max_length': '2000'}), 'file_path': ('django.db.models.fields.CharField', [], {'default': "'./generators'", 'max_length': '200'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.CharField', [], {'default': "'No Image'", 'max_length': '200'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}), 'last_used': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}), 'module_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'cursivedata.generatorstate': { 'Meta': {'object_name': 'GeneratorState'}, 'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'params': ('jsonfield.fields.JSONField', [], {'default': '{}'}), 'state': ('jsonfield.fields.JSONField', [], {'default': '{}'}) }, 'cursivedata.parameter': { 'Meta': {'object_name': 'Parameter'}, 'data_type': ('django.db.models.fields.CharField', [], {'default': "'float'", 'max_length': '20'}), 'default': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '200', 'blank': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'default': "'Some parameter'", 'max_length': '1000', 'blank': 'True'}), 'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'cursivedata.pipeline': { 'Meta': {'object_name': 'Pipeline'}, 'anim_autoplay': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'anim_loop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'anim_speed': ('django.db.models.fields.IntegerField', [], {'default': '1000'}), 'auto_begin_days': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'data_store': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cursivedata.DataStore']", 'unique': 'True'}), 'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '2000', 'blank': 'True'}), 'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']"}), 'full_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'full_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'img_height': ('django.db.models.fields.IntegerField', [], {'default': '500'}), 'img_width': ('django.db.models.fields.IntegerField', [], {'default': '500'}), 'last_image_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'last_svg_file': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}), 'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 5, 25, 0, 0)'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'next_auto_begin_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'paused': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'print_top_left_x': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'print_top_left_y': ('django.db.models.fields.FloatField', [], {'default': '0'}), 'print_width': ('django.db.models.fields.FloatField', [], {'default': '500'}), 'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cursivedata.COSMSource']", 'symmetrical': 'False', 'blank': 'True'}), 'state': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cursivedata.GeneratorState']", 'unique': 'True'}) }, 'cursivedata.storedoutput': { 'Meta': {'object_name': 'StoredOutput'}, 'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Endpoint']", 'null': 'True', 'blank': 'True'}), 'filename': ('django.db.models.fields.CharField', [], {'default': "'output/none'", 'max_length': '200'}), 'filetype': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '10'}), 'generator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Generator']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'pipeline': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cursivedata.Pipeline']", 'null': 'True', 'blank': 'True'}), 'run_id': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'complete'", 'max_length': '10'}) } } complete_apps = ['cursivedata']
4a1c309a93de9647a0f1adc90e88ad9c8624b3be
2b8c88dfee5c5a784357515eafe8cd5f997c8774
/leetcode/dynamic_programming/code-84.py
1b9e7013a5652e79e6603e09d069daf7eb6aa134
[]
no_license
archenRen/learnpy
e060f3aa2f77c35fc1b12345720af6c8b528da57
934ef76b97297f746a722a48c76672c7bc744cd9
refs/heads/master
2022-04-28T20:25:59.114036
2020-05-03T02:16:03
2020-05-03T02:16:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,187
py
# This is a TLE solution. def largestRectangleArea2(heights: 'List[int]') -> int: n = len(heights) max_val = 0 for i in range(n): min_val = heights[i] max_val = max(max_val, min_val) for j in range(i-1, -1, -1): min_val = min(heights[j], min_val) max_val = max(max_val, min_val * (i - j + 1)) return max_val def largestRectangleArea(heights: 'List[int]') -> int: # The stack maintain the indexes of buildings with ascending height. n = len(heights) heights.append(0) stack = [] ans = 0 i = 0 while i <= n: if not stack or heights[i] >= heights[stack[-1]]: stack.append(i) else: tp = stack.pop() if stack: ans = max(ans, heights[tp] * (i - stack[-1] - 1)) else: ans = max(ans, heights[tp] * i) i -= 1 i += 1 return ans # print(largestRectangleArea([2, 1, 5, 6, 2, 3])) # expect 10 (2*5) # print(largestRectangleArea([2, 1, 3, 6, 2, 3]))# expect 8 (4*2) # print(largestRectangleArea([2,3])) # print(largestRectangleArea([3])) print(largestRectangleArea(list(range(10))))
610ac8671393a3cc93c8ac2f5fb7cbe982e9e849
96090102d5e87f1771ba5a90f7b676f4ccb0afa6
/src/profiles/forms.py
ef4d8adbf95e2f2acf6f725493fe0bef6afcef2b
[]
no_license
rahulsayon/SocialWedding
b4b37ad69b89236784c6fb983ab27b4cd2e4266e
ab96b6a5d381936463065e75f74d0c8ffd3b1907
refs/heads/master
2022-12-18T15:08:39.380348
2020-09-27T14:49:40
2020-09-27T14:49:40
299,053,233
0
0
null
null
null
null
UTF-8
Python
false
false
197
py
from django import forms from . models import Profile class ProfileModalForm(forms.ModelForm): class Meta: model = Profile fields = [ 'first_name','last_name','bio','avatar' ]
ffd4ff39507434f06cbbc5a0767aeadf66cdf5a4
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
/src/main/output/idea/party/issue/line_friend/group_lot_guy_lombok_kind/door/oauth.py
555d4e970019c6d7f81128a63b321c2efb7bdedb
[]
no_license
matkosoric/GenericNameTesting
860a22af1098dda9ea9e24a1fc681bb728aa2d69
03f4a38229c28bc6d83258e5a84fce4b189d5f00
refs/heads/master
2021-01-08T22:35:20.022350
2020-02-21T11:28:21
2020-02-21T11:28:21
242,123,053
1
0
null
null
null
null
UTF-8
Python
false
false
2,672
py
const request = require('request') const uuidv4 = require('uuid/v4') const { LimitReachedError } = require('./errors') const ITEMS_IN_REQUEST_LIMIT = 25 const REQUEST_CHAR_LIMIT = 5000 const CHAR_PER_HOUR_LIMIT = 2000000 // const subscriptionKey = process.env.TRANSLATOR_TEXT_KEY; // if (!subscriptionKey) { // throw new Error('Environment variable for your subscription key is not set.') // } const subscriptionKey = 'a674785ff843a278a87995ef4ee1659b' function MicrosoftTranslator () {} MicrosoftTranslator.prototype.translate = function (strings, targetLang) { console.log(`Microsoft: translating ${strings.length} strings to ${targetLang}...`) let options = { method: 'POST', baseUrl: 'https://api.cognitive.microsofttranslator.com/', url: 'translate', qs: { 'api-version': '3.0', to: targetLang }, headers: { '5bb321a1b738949e8bace956a490028a': subscriptionKey, 'Content-type': 'application/json', 'X-ClientTraceId': uuidv4().toString() }, body: strings.map(str => ({ text: str })), json: true } return new Promise((resolve, reject) => { request(options, (err, res, body) => { if (err) { reject(err) return } if (body.error) { console.log('body', body) if (body.error.code === 400077) { reject(new LimitReachedError('Microsoft', 'Maximum request size')) } else if (body.error.code === 403001) { reject(new LimitReachedError('Microsoft', 'Quota per hour')) } else { reject(new Error(body.error.message)) } } else { let translations = body .reduce((accum, item) => accum.concat(item.translations), []) .map(i => i.text) resolve(translations) } }) }).then(translations => { console.log(`Microsoft: Translation succeed. Got ${translations.length} translations.`) return translations }) } MicrosoftTranslator.prototype.getRequestLimit = function () { return REQUEST_CHAR_LIMIT } MicrosoftTranslator.prototype.getRequestItemsCountLimit = function () { return ITEMS_IN_REQUEST_LIMIT } MicrosoftTranslator.prototype.getMaxLimit = function () { return CHAR_PER_HOUR_LIMIT } module.exports = MicrosoftTranslator // new MicrosoftTranslator() // .translate([(new Array(5001)).join('a'), 'b'], 'ru') // .then(translations => console.log('Result', translations)) // .catch(err => console.error(err)) /* * Limits: https://docs.microsoft.com/en-us/azure/cognitive-services/translator/request-limits * https://docs.microsoft.com/en-us/azure/cognitive-services/translator/reference/v3-0-translate?tabs=curl * */
7094d4bbe7a500eb46faa9fac35c316ada1389af
77fc5af96da1d461c86c7f9668b64b99ca04a1b6
/codes/horner.py
4458f960d38c57f60ba6940082b190afccdbd331
[]
no_license
rene-d/edupython
5b6bc8ddb5eb8ec896ee70fb961d4e689af1075a
1261d0c7aae17bb2d4ff3370860768b73ba4172d
refs/heads/master
2020-11-24T10:07:18.504472
2019-12-21T21:03:08
2019-12-21T21:03:08
228,099,675
0
0
null
null
null
null
UTF-8
Python
false
false
425
py
# Méthode de Hörner # https://edupython.tuxfamily.org/sources/view.php?code=horner # Créé par IANTE, le 12/07/2011 from lycee import * P=liste_demande('entrez les coefficients de P(x) par ordre des puissances croissantes') r=demande('Entrez une racine évidente') Q=[0]*(len(P)-1) v=0 for d in range(len(P)-2,-1,-1): v=P[d+1]+r*v Q[d]=v print (affiche_poly(P)+'=('+affiche_poly([-r,1])+')('+affiche_poly(Q)+')')
39d5e277eb935eee8876c1af0b0557edcf5f6b91
146012dda21ab72badad6daa8f98e6b26fedb128
/13day/9-名片系统.py
c647a413c352cb726036cb58e94329648c26b284
[]
no_license
fengshuai1/1805
41786c3561beca580ba82d9e9d4347571e38e198
8dc3e6605cc1d6f91685ae45bfebfc062f0aa489
refs/heads/master
2020-03-19T07:41:40.608389
2018-06-28T01:45:43
2018-06-28T01:45:43
136,140,329
0
0
null
null
null
null
UTF-8
Python
false
false
3,124
py
list = []#存放名字 print("名片管理系统".center(50,"*")) while True: print("1:添加名片".center(50," ")) print("2:查找名片".center(50," ")) print("3:修改名片".center(50," ")) print("4:删除名片".center(50," ")) print("5:打印名片".center(50," ")) num = int(input("请选择功能")) if num == 1: d = {}#空字典 while True: name = input("请输入要添加的名字") if len(name) > 4: print("太长,请重新输入") continue job = input("请输入要添加的职位") if len(job) > 4: print("太长,请重新输入") continue phone = input("请输入手机号") if len(phone) != 11 or phone.startswith("1") == False: print("手机号输入有误,请重新输入") continue d["name"] = name d["job"] = job d["phone"] = phone #添加到列表 list.append(d) print("添加成功") break elif num == 2: name = input("请输入要查找的姓名") flag = False#假设没有咱们要找的人 for i in list: if name == i["name"]: print("姓名:%s\n职位:%s\n电话:%s"%(i["name"],i["job"],i["phone"])) flag = True#找到了 break if flag == False: print("查无此人") elif num == 3: #要改之前,你得先查到你要找的那个 name = input("请输入你要改的人的姓名") flag = False for i in list: if name == i["name"]: print("1:修改名字") print("2:修改职位") print("3:修改电话") num_1 = int(input("请选择功能")) if num_1 == 1: new_name = input("请输入新的名字") i["name"] = new_name elif num_1 == 2: new_job = input("请输入新的职位") i["job"] = new_job elif num_1 == 3: new_phone = input("请输入新的电话") i["phone"] = new_phone flag = True break if flag == False: print("查无此人") elif num == 4: name = input("请输入你要删除的名字") flag = False for position,i in enumerate(list):#把索引遍历出来 if name == i["name"]: flag = True#找到了 print("1:确认删除") print("2:取消删除") num_2 = int(input("请选择序号")) if num_2 == 1: list.pop(position)#直接删除 break if flag == False: print("查无此人") elif num == 5:#打印名片 print("名字\t职位\t电话") for i in list: print(" "+i["name"]+"\t "+i["job"]+"\t "+i["phone"])
c441941156bd0808bc93eb34a0c6ef9a076dbaee
06164402e4a9c46a03d579175e588519dbd4048d
/experiments/experiments_gdsc/cross_validation/vb_nmf/linesearch_xval_vb.py
013c70a9a0481ff098be2e4b97b6fb3098dc6e91
[ "Apache-2.0" ]
permissive
XuanHeIIIS/BNMTF
19547e36466ecee8d45fb0002d305ee6b7ba6c23
34df0c3cebc5e67a5e39762b9305b75d73a2a0e0
refs/heads/master
2020-03-27T12:47:58.375964
2018-06-10T10:22:19
2018-06-10T10:22:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,825
py
""" Run the cross validation with line search for model selection using VB-NMF on the Sanger dataset. """ import sys, os project_location = os.path.dirname(__file__)+"/../../../../../" sys.path.append(project_location) import numpy, random from BNMTF.code.models.bnmf_vb_optimised import bnmf_vb_optimised from BNMTF.code.cross_validation.line_search_cross_validation import LineSearchCrossValidation from BNMTF.data_drug_sensitivity.gdsc.load_data import load_gdsc # Settings standardised = False iterations = 1000 init_UV = 'random' K_range = [15,20,25,30] no_folds = 10 restarts = 1 quality_metric = 'AIC' output_file = "./results.txt" alpha, beta = 1., 1. lambdaU = 1./10. lambdaV = 1./10. priors = { 'alpha':alpha, 'beta':beta, 'lambdaU':lambdaU, 'lambdaV':lambdaV } # Load in the Sanger dataset (_,X_min,M,_,_,_,_) = load_gdsc(standardised=standardised,sep=',') # Run the cross-validation framework #random.seed(42) #numpy.random.seed(9000) nested_crossval = LineSearchCrossValidation( classifier=bnmf_vb_optimised, R=X_min, M=M, values_K=K_range, folds=no_folds, priors=priors, init_UV=init_UV, iterations=iterations, restarts=restarts, quality_metric=quality_metric, file_performance=output_file ) nested_crossval.run() """ all_MSE = [2.2242309355503416, 2.3108126630384804, 2.4095896447817631, 2.2188694213830114, 2.4185938516134278, 2.1808748510586002, 2.2503432196374651, 2.2305023229025145, 2.3595465204422488, 2.2186318302878667] all_R2 = [0.8123419361488506, 0.8011409466575017, 0.7943028271877304, 0.8125046212085996, 0.7934881370166628, 0.8111969927756486, 0.8058878338360765, 0.811089129626958, 0.798953276136085, 0.8151865445946502] Average MSE: 2.2821995260695718 +- 0.0066998949966021598 Average R^2: 0.80560922451887629 +- 5.8495363723835686e-05 """
72dde4d0cca5ada32dd37e6e36d79b7dc6680cba
685e7dc080a383d12dd526a510a8f74c34ef2e71
/tests/nonci/test_compare_pretrained.py
cf57a0df969679003ebcb54d6d1f3d881dc8170d
[ "MIT" ]
permissive
18813055625/bert-for-tf2
f1b86351675861ebe710bb4f94e99b89a639f83a
e71d108f0bd8c5af0c4e0b8427b144e996c02fdb
refs/heads/master
2020-07-29T08:24:33.635201
2019-09-09T11:56:37
2019-09-09T11:56:37
209,729,589
0
1
MIT
2019-09-20T07:16:54
2019-09-20T07:16:53
null
UTF-8
Python
false
false
8,706
py
# coding=utf-8 # # created by kpe on 27.Mar.2019 at 15:37 # from __future__ import absolute_import, division, print_function import unittest import re import os import numpy as np import tensorflow as tf from tensorflow.python import keras from tensorflow.python.keras import backend as K import params from bert import BertModelLayer from bert.loader import map_from_stock_variale_name, map_to_stock_variable_name, load_stock_weights from bert.loader import StockBertConfig, map_stock_config_to_params from bert.tokenization import FullTokenizer tf.compat.v1.disable_eager_execution() class TestCompareBertsOnPretrainedWeight(unittest.TestCase): bert_ckpt_dir = ".models/uncased_L-12_H-768_A-12/" bert_ckpt_file = bert_ckpt_dir + "bert_model.ckpt" bert_config_file = bert_ckpt_dir + "bert_config.json" def test_bert_original_weights(self): print("bert checkpoint: ", self.bert_ckpt_file) bert_vars = tf.train.list_variables(self.bert_ckpt_file) for ndx, var in enumerate(bert_vars): print("{:3d}".format(ndx), var) def create_bert_model(self, max_seq_len=18): bc = None with tf.io.gfile.GFile(self.bert_config_file, "r") as reader: bc = StockBertConfig.from_json_string(reader.read()) bert = BertModelLayer.from_params(map_stock_config_to_params(bc), name="bert") input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="input_ids") token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="token_type_ids") output = bert([input_ids, token_type_ids]) model = keras.Model(inputs=[input_ids, token_type_ids], outputs=output) return model, bert, (input_ids, token_type_ids) def test_keras_weights(self): max_seq_len = 18 model, bert, inputs = self.create_bert_model(18) model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)]) model.summary() for ndx, var in enumerate(bert.trainable_variables): print("{:3d}".format(ndx), var.name, var.shape) #for ndx, var in enumerate(model.trainable_variables): # print("{:3d}".format(ndx), var.name, var.shape) def test___compare_weights(self): #tf.reset_default_graph() max_seq_len = 18 model, bert, inputs = self.create_bert_model(18) model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)]) stock_vars = tf.train.list_variables(self.bert_ckpt_file) stock_vars = {name: list(shape) for name, shape in stock_vars} keras_vars = model.trainable_variables keras_vars = {var.name.split(":")[0]: var.shape.as_list() for var in keras_vars} matched_vars = set() unmatched_vars = set() shape_errors = set() for name in stock_vars: bert_name = name keras_name = map_from_stock_variale_name(bert_name) if keras_name in keras_vars: if keras_vars[keras_name] == stock_vars[bert_name]: matched_vars.add(bert_name) else: shape_errors.add(bert_name) else: unmatched_vars.add(bert_name) print("bert -> keras:") print(" matched count:", len(matched_vars)) print(" unmatched count:", len(unmatched_vars)) print(" shape error count:", len(shape_errors)) print("unmatched:\n", "\n ".join(unmatched_vars)) self.assertEqual(197, len(matched_vars)) self.assertEqual(9, len(unmatched_vars)) self.assertEqual(0, len(shape_errors)) matched_vars = set() unmatched_vars = set() shape_errors = set() for name in keras_vars: keras_name = name bert_name = map_to_stock_variable_name(keras_name) if bert_name in stock_vars: if stock_vars[bert_name] == keras_vars[keras_name]: matched_vars.add(keras_name) else: shape_errors.add(keras_name) else: unmatched_vars.add(keras_name) print("keras -> bert:") print(" matched count:", len(matched_vars)) print(" unmatched count:", len(unmatched_vars)) print(" shape error count:", len(shape_errors)) print("unmatched:\n", "\n ".join(unmatched_vars)) self.assertEqual(197, len(matched_vars)) self.assertEqual(0, len(unmatched_vars)) self.assertEqual(0, len(shape_errors)) def predict_on_keras_model(self, input_ids, input_mask, token_type_ids): max_seq_len = input_ids.shape[-1] model, bert, k_inputs = self.create_bert_model(max_seq_len) model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)]) load_stock_weights(bert, self.bert_ckpt_file) k_res = model.predict([input_ids, token_type_ids]) return k_res def predict_on_stock_model(self, input_ids, input_mask, token_type_ids): from tests.ext.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint tf.compat.v1.reset_default_graph() tf_placeholder = tf.compat.v1.placeholder max_seq_len = input_ids.shape[-1] pl_input_ids = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len)) pl_mask = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len)) pl_token_type_ids = tf.compat.v1.placeholder(tf.int32, shape=(1, max_seq_len)) bert_config = BertConfig.from_json_file(self.bert_config_file) tokenizer = FullTokenizer(vocab_file=os.path.join(self.bert_ckpt_dir, "vocab.txt")) s_model = BertModel(config=bert_config, is_training=False, input_ids=pl_input_ids, input_mask=pl_mask, token_type_ids=pl_token_type_ids, use_one_hot_embeddings=False) tvars = tf.compat.v1.trainable_variables() (assignment_map, initialized_var_names) = get_assignment_map_from_checkpoint(tvars, self.bert_ckpt_file) tf.compat.v1.train.init_from_checkpoint(self.bert_ckpt_file, assignment_map) with tf.compat.v1.Session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) s_res = sess.run( s_model.get_sequence_output(), feed_dict={pl_input_ids: input_ids, pl_token_type_ids: token_type_ids, pl_mask: input_mask, }) return s_res def test_direct_keras_to_stock_compare(self): from tests.ext.modeling import BertModel, BertConfig, get_assignment_map_from_checkpoint bert_config = BertConfig.from_json_file(self.bert_config_file) tokenizer = FullTokenizer(vocab_file=os.path.join(self.bert_ckpt_dir, "vocab.txt")) # prepare input max_seq_len = 6 input_str = "Hello, Bert!" input_tokens = tokenizer.tokenize(input_str) input_tokens = ["[CLS]"] + input_tokens + ["[SEP]"] input_ids = tokenizer.convert_tokens_to_ids(input_tokens) input_ids = input_ids + [0]*(max_seq_len - len(input_tokens)) input_mask = [1]*len(input_tokens) + [0]*(max_seq_len - len(input_tokens)) token_type_ids = [0]*len(input_tokens) + [0]*(max_seq_len - len(input_tokens)) input_ids = np.array([input_ids], dtype=np.int32) input_mask = np.array([input_mask], dtype=np.int32) token_type_ids = np.array([token_type_ids], dtype=np.int32) print(" tokens:", input_tokens) print("input_ids:{}/{}:{}".format(len(input_tokens), max_seq_len, input_ids), input_ids.shape, token_type_ids) s_res = self.predict_on_stock_model(input_ids, input_mask, token_type_ids) k_res = self.predict_on_keras_model(input_ids, input_mask, token_type_ids) np.set_printoptions(precision=9, threshold=20, linewidth=200, sign="+", floatmode="fixed") print("s_res", s_res.shape) print("k_res", k_res.shape) print("s_res:\n {}".format(s_res[0, :2, :10]), s_res.dtype) print("k_res:\n {}".format(k_res[0, :2, :10]), k_res.dtype) adiff = np.abs(s_res-k_res).flatten() print("diff:", np.max(adiff), np.argmax(adiff)) self.assertTrue(np.allclose(s_res, k_res, atol=1e-6))