blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
288
content_id
stringlengths
40
40
detected_licenses
listlengths
0
112
license_type
stringclasses
2 values
repo_name
stringlengths
5
115
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
684 values
visit_date
timestamp[us]date
2015-08-06 10:31:46
2023-09-06 10:44:38
revision_date
timestamp[us]date
1970-01-01 02:38:32
2037-05-03 13:00:00
committer_date
timestamp[us]date
1970-01-01 02:38:32
2023-09-06 01:08:06
github_id
int64
4.92k
681M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
22 values
gha_event_created_at
timestamp[us]date
2012-06-04 01:52:49
2023-09-14 21:59:50
gha_created_at
timestamp[us]date
2008-05-22 07:58:19
2023-08-21 12:35:19
gha_language
stringclasses
147 values
src_encoding
stringclasses
25 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
128
12.7k
extension
stringclasses
142 values
content
stringlengths
128
8.19k
authors
listlengths
1
1
author_id
stringlengths
1
132
0ae53c0486a272162874ca46f41ecb302e6654f0
a9d65a3fb6e990c5bb250cfde44220182f6cd430
/indra/tools/analyze_ontology.py
2f8bda343d488ebd0f277e9aedab4e858bfe2742
[ "BSD-2-Clause", "BSD-2-Clause-Views" ]
permissive
dianakolusheva/indra
66e6c69b762922d4f79757e388b693f76b3fcd56
205a719c5b1ff2333e415476b4136e8c57c22949
refs/heads/master
2022-03-14T23:10:57.718762
2022-02-11T14:58:12
2022-02-11T14:58:12
170,338,649
0
0
BSD-2-Clause
2019-02-12T15:09:36
2019-02-12T15:09:30
Python
UTF-8
Python
false
false
1,402
py
from collections import Counter, defaultdict import networkx from indra.ontology.bio import bio_ontology def plot_problem(problem): import matplotlib.pyplot as plt plt.ion() plt.figure() G = bio_ontology.subgraph(problem) pos = networkx.spring_layout(G) networkx.draw_networkx(G, pos, node_color='pink') edge_labels = networkx.get_edge_attributes(G, 'source') networkx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels) plt.show() if __name__ == '__main__': bio_ontology.initialize() xrefs = [(e[0], e[1]) for e in bio_ontology.edges(data=True) if e[2]['type'] == 'xref'] xrefg = bio_ontology.edge_subgraph(xrefs) comps = networkx.algorithms.strongly_connected_components(xrefg) problems = [] for comp in comps: namespaces = [bio_ontology.get_ns(node) for node in comp] cnt = Counter(namespaces) if any(v > 1 for k, v in cnt.items()): problems.append(comp) print('Found %d problems in total' % len(problems)) problems_by_ns = defaultdict(list) for problem in problems: nscnt = Counter([bio_ontology.get_ns(n) for n in problem]) namespaces = [ns for ns, cnt in nscnt.items() if cnt > 1] for ns in namespaces: problems_by_ns[ns].append(problem) for ns, problems_ns in problems_by_ns.items(): print(ns, len(problems_ns))
2e92fd124092bb606c4af2b949bf986b8f0f39e0
33c6b5d9f1852ac878aada3eb646ac2eabc6b705
/junk/users2/signals.py
490388c9433b9bb4b0c0de06090ea7a91ecbb2b2
[]
no_license
detalikota/trading2
3c0e4308f45a620fe4c034403143cf68dd16db9c
1aaafb6bf7d304be1896efb5ea0963fc40893b23
refs/heads/master
2023-02-04T14:03:10.860410
2020-12-17T14:28:12
2020-12-17T14:28:12
313,858,784
0
0
null
null
null
null
UTF-8
Python
false
false
486
py
from django.db.models.signals import post_save from django.contrib.auth.models import User from django.dispatch import receiver from .models import Profile2 @receiver(post_save, sender=User) #when the user is saved - send this signal def create_profile(sender, instance, created, **kwargs): if created: Profile2.objects.create(user=instance) @receiver(post_save, sender=User) # save the profile def save_profile(sender, instance, **kwargs): instance.profile.save()
0b34b5732a18165fbcd70164b4c2648ea5eaeeb0
661b6c3d0d2344f86ed126d9b4f6f10c0d9c630b
/track/admin.py
1d2bcc9fdd717c1e4f90db9346add4da60c66ec6
[]
no_license
Sababa123/activity_tracker
69eae58dbbf7523dcc144d3f05f6952cc4e4225b
7c5e2c83e5fc76f8c9a2c5f58569ed92c9eb4421
refs/heads/master
2020-06-25T17:43:14.321638
2019-08-11T21:45:16
2019-08-11T21:45:16
199,380,581
0
0
null
2019-07-29T04:53:25
2019-07-29T04:53:24
null
UTF-8
Python
false
false
175
py
from django.contrib import admin from .models import Activity, ActivityTracker # Register your models here. admin.site.register(Activity) admin.site.register(ActivityTracker)
7b15c3a5d2060f2149607d1ee7ea040fb35c2eb7
913fb9ec1e709a5140676ba7b2371b1976afca72
/seqPeaks/mirrorPeaks.py
db673bb1b55f332087c53b7b17d7dc5e614e6de3
[]
no_license
cgreer/ResearchScripts
171cfe9555ea06fdeb91084c12d07d1b45a2335c
1107803bb1459d6b6e1dfb1a89679d2b6fd49062
refs/heads/master
2016-09-05T10:43:19.090247
2012-04-12T21:38:11
2012-04-12T21:38:11
1,673,080
0
3
null
null
null
null
UTF-8
Python
false
false
840
py
import cgPeaks import compareData as compare import math import bioLibCG as cg knowns = compare.tccFileToList('mouseKnownMirs.tcc', 0) eLevels = [] for known in knowns: chrom, strand, start, end = cg.tccSplit(known, True) #text... if strand == '1': strand = '-1' else: strand = '1' oppTcc = cg.makeTcc(chrom, strand, start, end) knownStretch = cgPeaks.stretch(known) knownStretch.createPeaks(1,20) kPos = knownStretch.getHighestPeak() if kPos: eLevels.append(knownStretch.profile[kPos]) oppStretch = cgPeaks.stretch(oppTcc) oppStretch.createPeaks(1,20) oPos = oppStretch.getHighestPeak() if oPos and kPos: #determine if they are close enough to be considered mirrored... if math.fabs(int(kPos) - int(oPos)) < 12: print known, oPos, kPos, oppStretch.profile[oPos], knownStretch.profile[kPos] print eLevels
113f2aeb9ba582a085e977d64df0240587c81645
5c5e7b03c3373e6217665842f542ca89491290ff
/2016/day18.py
b5a92f12f811a0ccb9b0c88bae32c9802f1ce21c
[]
no_license
incnone/AdventOfCode
9c35214e338e176b6252e52a25a0141a01e290c8
29eac5d42403141fccef3c3ddbb986e01c89a593
refs/heads/master
2022-12-21T21:54:02.058024
2022-12-15T17:33:58
2022-12-15T17:33:58
229,338,789
0
0
null
null
null
null
UTF-8
Python
false
false
970
py
from getinput import get_input def parse_input(s): return [True if c == '^' else False for c in s] def next_trap_row(s): next_row = [s[1]] next_row += [(s1 and not s2) or (not s1 and s2) for s1, s2 in zip(s, s[2:])] next_row += [s[-2]] return next_row def generate_traps(init_row, num_rows): traps = [init_row] for _ in range(num_rows - 1): traps.append(next_trap_row(traps[-1])) return traps def trap_str(traps): return '\n'.join(''.join('^' if c else '.' for c in line) for line in traps) def part_1(trap_row): traps = generate_traps(trap_row, 40) return sum(sum(1 for x in line if not x) for line in traps) def part_2(trap_row): traps = generate_traps(trap_row, 400000) return sum(sum(1 for x in line if not x) for line in traps) if __name__ == "__main__": the_trap_list = parse_input(get_input(18)) print('Part 1:', part_1(the_trap_list)) print('Part 2:', part_2(the_trap_list))
f1da8fb43bb78b4f502b576a1f67c671e6e1abed
1a4bc1a11fdb3f714f22f5e0e826b47aa0569de2
/lab/lab04/tests/q1_3.py
79768f8de727ed291ba49dffc59b456e772584a8
[]
no_license
taylorgibson/ma4110-fa21
201af7a044fd7d99140c68c48817306c18479610
a306e1b6e7516def7de968781f6c8c21deebeaf5
refs/heads/main
2023-09-05T21:31:44.259079
2021-11-18T17:42:15
2021-11-18T17:42:15
395,439,687
0
1
null
null
null
null
UTF-8
Python
false
false
227
py
test = { 'name': 'q1_3', 'points': None, 'suites': [{'cases': [{'code': ">>> disemvowel('datasceince')\n'dtscnc'", 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
44c7ea4c09cf76c997f45dc2c463741f3ba3af03
5f6425e9d83b57b864e48f227e1dc58356a555c0
/utils/palettes/personalized/piotr_kozlowski.py
8f12ab59b92e28ce4bf7bd066c55b145ec16a2f9
[ "MIT" ]
permissive
jan-warchol/selenized
b374fa7822f281b16aa8b52e34bd1e585db75904
df1c7f1f94f22e2c717f8224158f6f4097c5ecbe
refs/heads/master
2023-06-22T09:37:02.962677
2022-09-12T20:24:40
2022-09-12T20:24:40
45,570,283
663
58
MIT
2023-04-18T09:33:22
2015-11-04T22:00:52
Emacs Lisp
UTF-8
Python
false
false
217
py
import selenized_base name = 'Piotr Kozlowski' palette = selenized_base.generate_palette( background=(97, 0, 8), foreground=(25, -6, -6), saturation=1.4, accent_offset=5, accent_l_spread=30, )
45297b843b717fd571b9a542906a15e1a9b43bb3
8b3ca44ee3d990233e74655b7131d616094f70c2
/experiments/sparsity/methylation_gm/gaussian_truncatednormal_hierarchical.py
427b535bd7e7c6d03bb77c738acc2c5ee7ee563c
[]
no_license
zshwuhan/BMF_Priors
8b8c54271285a72d2085a56a9475c0756f375e67
6a600da1c41f1ccde2f2ba99298b40e68fb9910a
refs/heads/master
2021-05-13T19:10:07.203215
2017-12-01T13:30:21
2017-12-01T13:30:21
116,883,181
1
0
null
2018-01-09T23:36:13
2018-01-09T23:36:13
null
UTF-8
Python
false
false
1,480
py
''' Measure sparsity experiment on the methylation GM dataset, with the Gaussian + Truncated Normal + hierarchical model. ''' project_location = "/Users/thomasbrouwer/Documents/Projects/libraries/" import sys sys.path.append(project_location) from BMF_Priors.code.models.bmf_gaussian_truncatednormal_hierarchical import BMF_Gaussian_TruncatedNormal_Hierarchical from BMF_Priors.data.methylation.load_data import load_gene_body_methylation_integer from BMF_Priors.experiments.sparsity.sparsity_experiment import sparsity_experiment import matplotlib.pyplot as plt ''' Run the experiment. ''' R, M = load_gene_body_methylation_integer() model_class = BMF_Gaussian_TruncatedNormal_Hierarchical n_repeats = 10 stratify_rows = False fractions_unknown = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99] settings = { 'R': R, 'M': M, 'K': 5, 'hyperparameters': { 'alpha':1., 'beta':1., 'mu_mu':0., 'tau_mu':0.1, 'a':1., 'b':1. }, 'init': 'random', 'iterations': 250, 'burn_in': 200, 'thinning': 2, } fout = './results/performances_gaussian_truncatednormal_hierarchical.txt' average_performances, all_performances = sparsity_experiment( n_repeats=n_repeats, fractions_unknown=fractions_unknown, stratify_rows=stratify_rows, model_class=model_class, settings=settings, fout=fout) ''' Plot the performance. ''' plt.figure() plt.title("Sparsity performances") plt.plot(fractions_unknown, average_performances['MSE']) plt.ylim(0,10)
77eae7de5545c636d596feec9e0fe110b7b5700a
bc441bb06b8948288f110af63feda4e798f30225
/architecture_view_sdk/model/flowable_service/bpmn_end_event_pb2.py
3c2b2fa877b300c5b615c5a2704c5007ff77e7ce
[ "Apache-2.0" ]
permissive
easyopsapis/easyops-api-python
23204f8846a332c30f5f3ff627bf220940137b6b
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
refs/heads/master
2020-06-26T23:38:27.308803
2020-06-16T07:25:41
2020-06-16T07:25:41
199,773,131
5
0
null
null
null
null
UTF-8
Python
false
true
3,097
py
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: bpmn_end_event.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from architecture_view_sdk.model.flowable_service import bpmn_links_pb2 as architecture__view__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='bpmn_end_event.proto', package='flowable_service', syntax='proto3', serialized_options=_b('ZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_service'), serialized_pb=_b('\n\x14\x62pmn_end_event.proto\x12\x10\x66lowable_service\x1a=architecture_view_sdk/model/flowable_service/bpmn_links.proto\"F\n\x0c\x42PMNEndEvent\x12\n\n\x02id\x18\x01 \x01(\t\x12*\n\x05links\x18\x02 \x01(\x0b\x32\x1b.flowable_service.BPMNLinksBLZJgo.easyops.local/contracts/protorepo-models/easyops/model/flowable_serviceb\x06proto3') , dependencies=[architecture__view__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2.DESCRIPTOR,]) _BPMNENDEVENT = _descriptor.Descriptor( name='BPMNEndEvent', full_name='flowable_service.BPMNEndEvent', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='flowable_service.BPMNEndEvent.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='links', full_name='flowable_service.BPMNEndEvent.links', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=105, serialized_end=175, ) _BPMNENDEVENT.fields_by_name['links'].message_type = architecture__view__sdk_dot_model_dot_flowable__service_dot_bpmn__links__pb2._BPMNLINKS DESCRIPTOR.message_types_by_name['BPMNEndEvent'] = _BPMNENDEVENT _sym_db.RegisterFileDescriptor(DESCRIPTOR) BPMNEndEvent = _reflection.GeneratedProtocolMessageType('BPMNEndEvent', (_message.Message,), { 'DESCRIPTOR' : _BPMNENDEVENT, '__module__' : 'bpmn_end_event_pb2' # @@protoc_insertion_point(class_scope:flowable_service.BPMNEndEvent) }) _sym_db.RegisterMessage(BPMNEndEvent) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
03bca080a7ade2f5c8e31f19c07701b55b95c6aa
8f7c595f2b9d075a89417760b7fbf9abb1fecb72
/tele_twitter.py
8b5573134e70b914312a9c197b1313e688781062
[ "MIT" ]
permissive
MainakMaitra/trading-utils
555ed240a20b26d4876f1490fc8a2d9273231fc5
3e73091b4d3432e74c385a9677b7f7ca4192c67f
refs/heads/main
2023-07-04T09:19:40.122188
2021-08-08T09:01:37
2021-08-08T09:01:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,825
py
""" Twitter -> Telegram """ import logging import os import time from argparse import ArgumentParser from peewee import * from common import uuid_gen from common.environment import GROUP_CHAT_ID from common.logger import init_logging from common.tele_notifier import send_message_to_telegram from common.twitter_api import get_twitter_home_timeline home_dir = os.getenv("HOME") db = SqliteDatabase(home_dir + "/tele_twitter.db") class TweetData(Model): id = UUIDField(primary_key=True) twitter_handle = CharField() timestamp = BigIntegerField() tweet_id = CharField() tweet = CharField() posted_at = DateTimeField(null=True) class Meta: database = db @staticmethod def save_from(twitter_handle, tweet, tweet_id, posted_at): entity = dict( id=uuid_gen(), timestamp=time.time(), twitter_handle=twitter_handle, tweet_id=tweet_id, tweet=tweet, posted_at=posted_at, ) TweetData.insert(entity).execute() TweetData.create_table() def save_data(tweet_data): TweetData.save_from(**tweet_data) def tweet_already_processed(current_tweet_id): selected_tweet = TweetData.get_or_none(TweetData.tweet_id == current_tweet_id) return selected_tweet is not None def extract_tweet_id(new_tweet): return new_tweet.id def extract_tweet_time(recent_tweet): return recent_tweet.created_at def main(poll_freq_in_secs): home_timeline = get_twitter_home_timeline() logging.info("==> Found tweets {}".format(len(home_timeline))) for tweet in home_timeline: tweet_author_name = tweet.author.name tweet_author_screen_name = tweet.author.screen_name tweet_id = tweet.id tweet_posted_date = tweet.created_at formatted_posted_dt = tweet_posted_date.strftime("%H:%M(%d %B)") tweet_text = tweet.text if tweet_already_processed(tweet_id): logging.warning( "Old Tweet from {} at {} -> {} - already processed".format( tweet_author_screen_name, tweet_posted_date, tweet_id ) ) continue else: entity = dict( twitter_handle=tweet_author_screen_name, tweet=tweet_text, tweet_id=tweet_id, posted_at=tweet_posted_date, ) save_data(entity) if tweet_text.startswith("RT"): continue try: header = f"""👀 {tweet_author_name} at [{formatted_posted_dt}](https://twitter.com/{tweet_author_screen_name}/status/{tweet_id})""" send_message_to_telegram( header, disable_web_preview=False, override_chat_id=GROUP_CHAT_ID ) except: send_message_to_telegram( "🚨 Something went wrong trying to process {}".format(tweet) ) logging.info(f"⏱ Sleeping for {poll_freq_in_secs}") time.sleep(poll_freq_in_secs) def parse_args(): parser = ArgumentParser(description=__doc__) parser.add_argument( "-w", "--wait-in-seconds", type=int, help="Wait between sending tweets in seconds", default=30, ) parser.add_argument( "-r", "--run-once", action="store_true", default=False, help="Run once" ) return parser.parse_args() if __name__ == "__main__": args = parse_args() init_logging() poll_freq_in_secs = args.wait_in_seconds run_once = args.run_once while True: try: main(poll_freq_in_secs) if run_once: logging.info("Running once => Exit") break except Exception: logging.exception("🚨🚨🚨 Something is wrong")
3bcd4da0f4a0652b9ceae41db83ea03b41ab9201
99bfa15723593ea351191d82fac80e36ab25aab1
/LeetCode/merge_interval.py
602b47cb4dc68d07416d23bb801d695654ec3578
[]
no_license
Kartavya-verma/Python-Projects
f23739ef29eab67a8e25569e3f7bf110e42576cb
02ffe926a7ed82bc783e4c4034a2fa53d4d1a870
refs/heads/master
2023-06-22T07:59:39.595084
2021-07-18T15:51:55
2021-07-18T15:51:55
387,139,965
0
0
null
null
null
null
UTF-8
Python
false
false
504
py
# interval = [[1,3],[2,6],[8,10],[15,18]] interval = [[1,4],[4,5]] n = interval.copy() print(n) res = [] v = [] for i in range(len(n)-1): for j in range(len(n)): print(n[i][1], n[i+1][0]) if n[i][1] > n[i+1][0]: # print(n[i][1], n[i+1][0]) n[i].pop() n[i].append(n[i+1][1]) v = n[i+1] print(n[i],v) n.remove(v) print(n) # l = list() # for i in interval: # for j in range(i[0], i[1]+1): # l.append(j) # print(l)
4df3ceca1f9d06815d43914cad8c76bf3d206085
cd78d84441e69c1fc40b6a6e9e235e7cf6882454
/python/48.rotate_image.py
75ea51649a271ec6a48d11d85c9fa7c4a00e2bc0
[]
no_license
buy/leetcode
53a12d4e0298284a5a2034c88353d0dc195aa66c
da0e834e3f2e3016396fffc96ef943ab9ec58ea4
refs/heads/master
2021-01-13T01:48:01.176632
2015-06-14T06:17:17
2015-06-14T06:17:17
31,863,627
1
1
null
null
null
null
UTF-8
Python
false
false
821
py
# You are given an n x n 2D matrix representing an image. # Rotate the image by 90 degrees (clockwise). # Follow up: # Could you do this in-place? # /* # * clockwise rotate # * first reverse up to down, then swap the symmetry # * 1 2 3 7 8 9 7 4 1 # * 4 5 6 => 4 5 6 => 8 5 2 # * 7 8 9 1 2 3 9 6 3 # */ class Solution: # @param matrix, a list of lists of integers # @return a list of lists of integers def rotate(self, matrix): matrix.reverse() for i in range(len(matrix)): for j in range(i): matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j] # /* # * anticlockwise rotate # * first reverse left to right, then swap the symmetry # * 1 2 3 3 2 1 3 6 9 # * 4 5 6 => 6 5 4 => 2 5 8 # * 7 8 9 9 8 7 1 4 7 # */
2a7ecd17534e9ce6ebfd36b4b2168cfe3d21c7a2
03d1982e2d594f13567afb37f2a5cea2f0d631b6
/setup.py
cde1cd7244e468fc53e94c6fb1355245c8ab6099
[ "Apache-2.0" ]
permissive
maartendraijer/django-fluent-dashboard
e26f29d434528d3b11360549c6452812176e4ecb
8a00fa810f001d1a778eada88b8a390f495f9994
refs/heads/master
2020-04-03T04:22:38.353890
2012-09-26T19:55:18
2012-09-26T19:58:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,478
py
#!/usr/bin/env python from setuptools import setup, find_packages from os.path import dirname, join import sys, os # When creating the sdist, make sure the django.mo file also exists: try: os.chdir('fluent_dashboard') from django.core.management.commands.compilemessages import compile_messages compile_messages(sys.stderr) finally: os.chdir('..') setup( name='django-fluent-dashboard', version='0.4.0dev', license='Apache License, Version 2.0', install_requires=[ 'django-admin-tools>=0.4.1', # 0.4.1 is the first release with Django 1.3 support. ], extras_require = { 'cachestatus': ['dashboardmods>=0.2.2'], }, description='An improved django-admin-tools dashboard for Django projects', long_description=open(join(dirname(__file__), 'README.rst')).read(), author='Diederik van der Boor', author_email='[email protected]', url='https://github.com/edoburu/django-fluent-dashboard', download_url='https://github.com/edoburu/django-fluent-dashboard/zipball/master', packages=find_packages(), include_package_data=True, zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Framework :: Django', ] )
65bb65eef08655b1bc9f00fecef269efb447b5c5
8e115fc8273fd7123438fa8cb85cd7b7992246f5
/App_Login/migrations/0003_follow.py
1e2b7e845c904aeaa10db6c63664e1517b698f1c
[]
no_license
tasim313/Social_Media_django_project
35160f83fa278acd616f9f952ac5acd3ec6430e6
78cf24305a32dfe937d7fcb031ed2f78649a4775
refs/heads/main
2023-06-16T00:45:39.025388
2021-07-11T06:38:38
2021-07-11T06:38:38
384,453,963
1
0
null
null
null
null
UTF-8
Python
false
false
962
py
# Generated by Django 2.2.5 on 2021-07-07 13:52 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('App_Login', '0002_auto_20210706_1237'), ] operations = [ migrations.CreateModel( name='Follow', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_date', models.DateTimeField(auto_now_add=True)), ('follower', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='follower', to=settings.AUTH_USER_MODEL)), ('following', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='following', to=settings.AUTH_USER_MODEL)), ], ), ]
c321533388cef074c4a7501847d5ddca0b9ae10e
fcc88521f63a3c22c81a9242ae3b203f2ea888fd
/Python3/0838-Push-Dominoes/soln-1.py
b7c9ed30d6e1a5c6ea1654056f23001653264cab
[ "MIT" ]
permissive
wyaadarsh/LeetCode-Solutions
b5963e3427aa547d485d3a2cb24e6cedc72804fd
3719f5cb059eefd66b83eb8ae990652f4b7fd124
refs/heads/master
2022-12-06T15:50:37.930987
2020-08-30T15:49:27
2020-08-30T15:49:27
291,811,790
0
1
MIT
2020-08-31T19:57:35
2020-08-31T19:57:34
null
UTF-8
Python
false
false
1,479
py
class Solution: def pushDominoes(self, dominoes: str) -> str: leftRs = [] left_R = None n = len(dominoes) ans = [None] * n for i, d in enumerate(dominoes): if d == 'R': left_R = 0 ans[i] = 'R' elif d == 'L': left_R = None ans[i] = 'L' else: if left_R is not None: left_R += 1 ans[i] = 'R' else: ans[i] = '.' leftRs.append(left_R) right_L = None for i in reversed(range(n)): d = dominoes[i] if d == 'L': right_L = 0 ans[i] = 'L' elif d == 'R': right_L = None ans[i] = 'R' else: if right_L is not None: right_L += 1 if leftRs[i] is None: ans[i] = 'L' else: if leftRs[i] < right_L: ans[i] = 'R' elif leftRs[i] == right_L: ans[i] = '.' else: ans[i] = 'L' else: if leftRs[i] is not None: ans[i] = 'R' else: ans[i] = '.' return ''.join(ans)
76c8f94b2f1120d880d69b6121372442259a80bc
a08409f712dc0b1045f695fd2ffee2bb6cc7835b
/math/0x02-calculus/17-integrate.py~
07a15858a238dab1c6d0b0de4984d551527088f4
[]
no_license
mohsenabedelaal/holbertonschool-machine_learning
d3f2137761e10d620472ca6e5f3288c45898381d
2765a09ba3064168b024952d18b1a2471952c8a2
refs/heads/main
2023-06-02T16:11:55.600921
2021-06-10T19:08:13
2021-06-10T19:08:13
318,244,087
0
1
null
null
null
null
UTF-8
Python
false
false
754
#!/usr/bin/env python3 """derivative poly""" def poly_integral(poly, C=0): """Module for integral""" if not isinstance(poly, list): return None if len(poly) == 0: return None if len(poly) == 1: return [0] if len(poly) == 2: return [poly(1)] else: integral = [] for i in range(0, len(poly)): if isinstance(poly[i], (int, float)): if i == 0: integral.append(0) if poly[i] % (i + 1) == 0: result = int((1/(i+1)) * poly[i]) else: result = (1/(i+1)) * poly[i] integral.append(result) else: return None return der
6562ceefb580fe6394f1e927b79291c2063a56c7
5692e8a3357f7afe6284b43c4a9770d81957a511
/student/migrations/0015_auto_20201119_1605.py
8fc7c2324f4ec87bd9f70cdb6eabaa98d7202789
[]
no_license
OmarFateh/student-management-system
49bcfbdf15a631cf7f64ff200d530a44a44409ac
2c53f81a55fe631406b642365a68de19501c0f17
refs/heads/master
2023-07-16T00:02:54.796428
2021-08-25T01:54:02
2021-08-25T01:54:02
355,033,774
0
0
null
null
null
null
UTF-8
Python
false
false
440
py
# Generated by Django 3.1.2 on 2020-11-19 14:05 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('student', '0014_sessionyear_date_range'), ] operations = [ migrations.AlterModelOptions( name='student', options={'ordering': ['user__full_name'], 'verbose_name': 'Student', 'verbose_name_plural': 'Students'}, ), ]
91a80f36411eaa0287c81fe0a4414a82d2b3022a
a7104434e0ddb4575ef0a6cd467bac6620570de8
/hunter108.py
ff44819a2ca4401163bea362d9ae1cf41d6bc5c3
[]
no_license
GauthamAjayKannan/GUVI-1
7b276eef3195bec9671eec8bb6bcc588cb5c970e
fafabab93df55abcc399f6e2664286ed511fd683
refs/heads/master
2020-06-25T07:38:08.465414
2019-05-17T11:24:53
2019-05-17T11:24:53
null
0
0
null
null
null
null
UTF-8
Python
false
false
243
py
x = list(input()) list1 = [] out = 0 if len(x) == 1: out = int(x[0]) * int(x[0]) else: for i in x: list1.append(int(i)) for i in range(len(list1)-1): out += (list1[i] ** list1[i+1]) out += (list1[len(list1)-1] ** list1[0] ) print(out)
def3dce5cc56dc5116d525765b8c6bc66cb2e7fa
a44cfbdacdb9d695533f425ee72da86f904232c1
/bin/summarize-days
3a7583591a34ce37d84e1baec23fe453d926fdf1
[ "LicenseRef-scancode-warranty-disclaimer", "Apache-2.0" ]
permissive
sofie-lu/quac
434d1481949ad0a229e10b7ccc725f54740c2d44
03e3bd9691dddd819f629aba628e9fe6d45c2d3b
refs/heads/master
2020-04-08T09:33:54.217874
2014-05-15T20:32:00
2014-05-15T20:32:00
null
0
0
null
null
null
null
UTF-8
Python
false
false
744
#!/usr/bin/env python '''Parse the given metadata pickle file and print, TSV format, a summary of each day's metadata on stdout. Column order matches the metadata field documentation.''' # Copyright (c) 2012-2013 Los Alamos National Security, LLC, and others. import argparse import sys import quacpath import pickle_glue import tsv_glue import u ap = argparse.ArgumentParser() ap.add_argument('file', metavar='METADATA_FILE') args = u.parse_args(ap) tsv = tsv_glue.Writer(sys.stdout.fileno()) for (day, md) in sorted(pickle_glue.File(args.file).data['days'].items()): tsv.writerow([str(day), md['count'] or 0, md['count_geotag'], md['min_id'], md['max_id']])
25933a755301dda6561a58f195d7462cdc9f384c
a9e3f3ad54ade49c19973707d2beb49f64490efd
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/api_admin/api/filters.py
63d4bf6ec7132ca2326fad9c709142a6713249fd
[ "MIT", "AGPL-3.0-only", "AGPL-3.0-or-later" ]
permissive
luque/better-ways-of-thinking-about-software
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
5809eaca7079a15ee56b0b7fcfea425337046c97
refs/heads/master
2021-11-24T15:10:09.785252
2021-11-22T12:14:34
2021-11-22T12:14:34
163,850,454
3
1
MIT
2021-11-22T12:12:31
2019-01-02T14:21:30
JavaScript
UTF-8
Python
false
false
432
py
""" Filters for api_admin api """ from rest_framework import filters class IsOwnerOrStaffFilterBackend(filters.BaseFilterBackend): """ Filter that only allows users to see their own objects or all objects if it is staff user. """ def filter_queryset(self, request, queryset, view): if request.user.is_staff: return queryset else: return queryset.filter(user=request.user)
76ea21e0cd0bb9f8f9684fc16048be3713d1df62
1cc54d31a4a443230668ca063bcd27179ff096c2
/store/urls.py
fbf6972755a0e56ef5d0ca947869dfff8b9f903d
[]
no_license
akhmadakhmedov/e-commerce
8d84f0ae7acd4dc80c8afbe3ab55ed13873ef631
6708aa62dec08be9b18fae15125eeef266d869e3
refs/heads/main
2023-05-29T21:36:40.354231
2021-06-14T13:23:22
2021-06-14T13:23:22
370,982,449
0
0
null
null
null
null
UTF-8
Python
false
false
350
py
from django.urls import path from . import views urlpatterns = [ path('', views.store, name='store'), path('category/<slug:category_slug>/', views.store, name='products_by_category'), path('category/<slug:category_slug>/<product_slug>/', views.product_detail, name='product_detail'), path('search/', views.search, name='search'), ]
53aa1b2409b3fe45fb8cacb3d6c9abc63b5229eb
f6f3ade5a59fcb904a147fa3cf1933a1b225338f
/src/gate_timer.py
536a3cef3cb573db60d205d844c69d50ccab9872
[]
no_license
HajimeKawahara/autobop
3b559011f9dceba68b02e47cd95fdef4fa9ef41e
2c99625895206d24587db90a2ac03d1e536eb9ca
refs/heads/master
2021-01-16T01:03:01.351588
2018-03-18T09:15:53
2018-03-18T09:15:53
107,845,791
1
0
null
null
null
null
UTF-8
Python
false
false
607
py
#!/usr/bin/python import sys import numpy as np import matplotlib import matplotlib.pyplot as plt import pylab import argparse #import chord_probability as cp import rest def gate_stop(mnow,counter,finger=1,width=5.0,c=65.0): if c==np.inf: counter=counter+1 return mnow, counter numrest=rest.get_numrest() stopp=1.0/(1+np.exp(-(1.0/width)*(float(counter)-c))) #stop probability (sigmoid type) j=np.random.random() if j < stopp: mnow=numrest*np.ones(finger,dtype=int) counter=-1 else: counter=counter+1 return mnow, counter
55e4a9778ff59c0161d9877f8b727552e30befcb
6b2a8dd202fdce77c971c412717e305e1caaac51
/solutions_5738606668808192_0/Python/Nihilant/p3.py
9a79afbff433b6e056e4bf1c99769fccfd98c045
[]
no_license
alexandraback/datacollection
0bc67a9ace00abbc843f4912562f3a064992e0e9
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
refs/heads/master
2021-01-24T18:27:24.417992
2017-05-23T09:23:38
2017-05-23T09:23:38
84,313,442
2
4
null
null
null
null
UTF-8
Python
false
false
1,096
py
import sys, functools, math def jc(n, j): coin_n = 2**(n-1) + 1 for i in range(j): test = True while test: coin = bin(coin_n)[2:] sol = [] for base in range(2, 11): num = int(coin, base=base) k = -1 limit = int(math.sqrt(num)) for div in range(2, limit): if num % div == 0: k = div break if k == -1: coin_n = coin_n + 2 break else: sol.append(k) if len(sol) == 9: coin_n = coin_n + 2 print(coin, ' '.join(map(str, sol))) test = False if __name__ == "__main__": f = sys.stdin if len(sys.argv) >= 2: fn = sys.argv[1] if fn != '-': f = open(fn) T = int(f.readline()) for i in range(T): N, J = f.readline().strip('\n').split(" ") print("Case #{0}:".format(i + 1)) jc(int(N), int(J))
3f1ce17c7e56aa343e288281207e4e0013191cf9
ec53949dafa4b6ad675d679b05ed7c83fef2c69a
/DataStructuresAndAlgo/DynamicProgramming/FibonacciTabulation.py
d691cd1c6b7a1de451aa33b49df8d84df1b3b17e
[]
no_license
tpotjj/Python
9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a
ca73c116ada4d05c0c565508163557744c86fc76
refs/heads/master
2023-07-11T16:37:10.039522
2021-08-14T11:17:55
2021-08-14T11:17:55
null
0
0
null
null
null
null
UTF-8
Python
false
false
130
py
def fibTab(n): tb = [0, 1] for i in range(2, n): tb.append(tb[i-1] + tb[i-2]) return tb[n-1] print(fibTab(6))
5bcb0760c6c64e527ed4a662ff790c3cb71afad6
b1ff576cdde5adf698b98446538e0b56d18f070f
/grading/apps.py
b507c75018b33f6f0904ff9ce425d1006d934d9a
[]
no_license
DUMBALINYOLO/gbc_oms
e3cfba17a12f3600b6503fc70cc9f3dcab5cc0e2
cdea6fd81333088b2db9911140681fec9577132a
refs/heads/main
2023-08-20T11:48:36.418990
2021-10-11T23:25:35
2021-10-11T23:25:35
322,593,446
0
0
null
null
null
null
UTF-8
Python
false
false
152
py
from django.apps import AppConfig class GradingConfig(AppConfig): name = 'grading' # def ready(self): # import grading.signals
3cf0e063b91a5be11fd48040ca02637fab5c720d
cb1d0dd68b1136b8a371f7d2b423e45171e98ab7
/src/xsd_trips/urls.py
4d153c6902a452d9f38693e3b2a855184692fcd9
[]
no_license
ScubaJimmE/xSACdb
86640ab791327392f88eb4993c858aa6d340c758
1996ab286ee0446b0a0e38882104bbf8904d8bdc
refs/heads/develop
2021-07-15T04:47:11.279138
2016-05-25T01:44:05
2016-05-25T01:44:05
62,212,226
0
0
null
2021-03-20T00:40:24
2016-06-29T09:09:50
Python
UTF-8
Python
false
false
256
py
from django.conf.urls import patterns, include, url from django.conf import settings from views import * urlpatterns = patterns('', url(r'^$', TripList.as_view(), name='TripList'), url(r'^new/$', TripCreate.as_view(), name='TripCreate'), )
da9e00f2af1599c983cb133c32b539da17ece7fe
155fa6aaa4ef31cc0dbb54b7cf528f36743b1663
/Static and Class Methods/Gym/subscription.py
c93a716815e0c338d34e9dadac30833811a61828
[]
no_license
GBoshnakov/SoftUni-OOP
efe77b5e1fd7d3def19338cc7819f187233ecab0
0145abb760b7633ca326d06a08564fad3151e1c5
refs/heads/main
2023-07-13T18:54:39.761133
2021-08-27T08:31:07
2021-08-27T08:31:07
381,711,275
1
0
null
null
null
null
UTF-8
Python
false
false
467
py
class Subscription: _id = 0 def __init__(self, date, customer_id, trainer_id, exercise_id): Subscription._id += 1 self.date = date self.customer_id = customer_id self.trainer_id = trainer_id self.exercise_id = exercise_id self.id = Subscription._id @staticmethod def get_next_id(): return Subscription._id + 1 def __repr__(self): return f"Subscription <{self.id}> on {self.date}"
5690b8a65b35121276c3493f5273eae7f9e1b7fb
609ee4aad38036c29456581f821a9bad4d6b729a
/tests/test_pay.py
37b2629d535646bc20848fbf05772211f9a8c3b2
[]
no_license
sdkwe/pywe-pay
32f14d218b0f8c029fb08a54df99ba70b90374b4
daf1699c7dafd0960359b0c3f570f32cc906dc5f
refs/heads/master
2020-05-29T15:13:05.371833
2020-04-27T07:32:54
2020-04-27T07:32:54
62,115,428
5
4
null
null
null
null
UTF-8
Python
false
false
677
py
# -*- coding: utf-8 -*- import time from pywe_pay import WeChatPay from local_config_example import WechatPayConfig class TestPayCommands(object): def test_native_unifiedorder(self): native = WechatPayConfig.get('JSAPI', {}) wxpay = WeChatPay(native.get('appID'), native.get('apiKey'), native.get('mchID')) result = wxpay.order.create(body=u'支付测试', notify_url='https://a.com', out_trade_no=int(time.time() * 1000), total_fee=1, trade_type='NATIVE') assert isinstance(result, dict) assert result.get('return_code') == 'SUCCESS' assert result.get('result_code') == 'SUCCESS' assert result.get('code_url')
42a62da8e1d51a7a3b3e573cdd0c1b6b3f423315
80afa26ba73b53f38e3fc21bf395030762fe8981
/576. Out of Boundary Paths.py
5481266d25818462836a2c72949c9f604ad39dc5
[]
no_license
iamshivamgoswami/Random-DSA-Questions
45b402063dbd2e31da2eee7590b6991aa624637d
e36250d08cf0de59cd0a59b4f3293e55793b1a6f
refs/heads/main
2023-07-15T15:48:36.363321
2021-08-26T03:40:47
2021-08-26T03:40:47
392,702,686
0
0
null
null
null
null
UTF-8
Python
false
false
563
py
class Solution: def findPaths(self, m: int, n: int, maxMove: int, i: int, j: int) -> int: d = {} def dfs(N, i, j): if (i, j, N) in d: return d[(i, j, N)] if i == m or i < 0 or j == n or j < 0: return 1 if N == 0: return 0 s = 0 for x, y in [(i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)]: s += (dfs(N - 1, x, y)) d[(i, j, N)] = s return s return dfs(maxMove, i, j) % (10 ** 9 + 7)
0e65583a2f3733544f9d2a193e93f68be851c9df
4b2c5fe21ffcc35837bba06d2c3b43c5116f74bd
/Blackjack.py
8d5c04a77bb98a1ca5b4be7994bed8812a47cdf5
[]
no_license
joydas65/Codeforces-Problems
8870cbbf1db9fa12b961cee7aaef60960af714ae
eb0f5877d0fede95af18694278029add7385973d
refs/heads/master
2023-06-23T07:16:49.151676
2023-06-17T07:28:24
2023-06-17T07:28:24
184,123,514
5
1
null
2020-11-28T07:28:03
2019-04-29T18:33:23
Python
UTF-8
Python
false
false
150
py
n = int(input()) if n <= 10 or n > 21: print(0) elif (n >= 11 and n <= 19) or n == 21: print(4) else: print(15)
4a3bb92e0a9b95c1fc10eb9db2fd34e8f5cdcb8d
1669bf106be7e4e88ad957aa1f0a708a49f9ef87
/first_website/setup.py
d87bfe260f13a957af9d07c566ab6284fad70c61
[]
no_license
genzj/python-fundamentals-course
280166037bb6ff25e2400fa3b281de153824c622
31218a42c609d923b3ae0c7d785b9dc02c0d9a6e
refs/heads/master
2023-01-09T22:13:04.040355
2021-03-20T02:33:18
2021-03-20T02:33:18
218,776,587
2
2
null
2022-12-26T20:59:32
2019-10-31T13:47:43
Python
UTF-8
Python
false
false
1,512
py
#!/usr/bin/env python # -*- coding: utf-8 -*- """The setup script.""" from setuptools import setup, find_packages with open('README.rst') as readme_file: readme = readme_file.read() with open('HISTORY.rst') as history_file: history = history_file.read() requirements = [ ] setup_requirements = [ ] test_requirements = [ ] setup( author="Jie ZHU", author_email='[email protected]', python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', ], description="Python Boilerplate contains all the boilerplate you need to create a Python package.", install_requires=requirements, long_description=readme + '\n\n' + history, include_package_data=True, keywords='first_website', name='first_website', packages=find_packages(include=['first_website', 'first_website.*']), setup_requires=setup_requirements, test_suite='tests', tests_require=test_requirements, url='https://github.com/genzj/first_website', version='0.1.0', zip_safe=False, )
93af8f67f99cacadec773970d6e4593f6c1b339e
dd098f8a93f787e38676283679bb39a290ba28b4
/samples/openapi3/client/3_0_3_unit_test/python-experimental/test/test_paths/test_response_body_post_maxlength_validation_response_body_for_content_types/test_post.py
b5ee61f9396dd84fdab727dd1f5cee04e4b0aa0f
[ "Apache-2.0" ]
permissive
InfoSec812/openapi-generator
727c0235d3bad9b85ac12068808f844287af6003
e0c72702c3d5dae2a627a2926f0cddeedca61e32
refs/heads/master
2022-10-22T00:31:33.318867
2022-08-20T14:10:31
2022-08-20T14:10:31
152,479,633
1
0
Apache-2.0
2023-09-04T23:34:09
2018-10-10T19:38:43
Java
UTF-8
Python
false
false
7,073
py
# coding: utf-8 """ Generated by: https://openapi-generator.tech """ import unittest from unittest.mock import patch import urllib3 import unit_test_api from unit_test_api.paths.response_body_post_maxlength_validation_response_body_for_content_types import post # noqa: E501 from unit_test_api import configuration, schemas, api_client from .. import ApiTestMixin class TestResponseBodyPostMaxlengthValidationResponseBodyForContentTypes(ApiTestMixin, unittest.TestCase): """ ResponseBodyPostMaxlengthValidationResponseBodyForContentTypes unit test stubs """ _configuration = configuration.Configuration() def setUp(self): used_api_client = api_client.ApiClient(configuration=self._configuration) self.api = post.ApiForpost(api_client=used_api_client) # noqa: E501 def tearDown(self): pass response_status = 200 def test_too_long_is_invalid_fails(self): # too long is invalid accept_content_type = 'application/json' with patch.object(urllib3.PoolManager, 'request') as mock_request: payload = ( "foo" ) mock_request.return_value = self.response( self.json_bytes(payload), status=self.response_status ) with self.assertRaises((unit_test_api.ApiValueError, unit_test_api.ApiTypeError)): self.api.post( accept_content_types=(accept_content_type,) ) self.assert_pool_manager_request_called_with( mock_request, self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes', method='post'.upper(), content_type=None, accept_content_type=accept_content_type, ) def test_ignores_non_strings_passes(self): # ignores non-strings accept_content_type = 'application/json' with patch.object(urllib3.PoolManager, 'request') as mock_request: payload = ( 100 ) mock_request.return_value = self.response( self.json_bytes(payload), status=self.response_status ) api_response = self.api.post( accept_content_types=(accept_content_type,) ) self.assert_pool_manager_request_called_with( mock_request, self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes', method='post'.upper(), accept_content_type=accept_content_type, ) assert isinstance(api_response.response, urllib3.HTTPResponse) assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson) deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data( payload, _configuration=self._configuration ) assert api_response.body == deserialized_response_body def test_shorter_is_valid_passes(self): # shorter is valid accept_content_type = 'application/json' with patch.object(urllib3.PoolManager, 'request') as mock_request: payload = ( "f" ) mock_request.return_value = self.response( self.json_bytes(payload), status=self.response_status ) api_response = self.api.post( accept_content_types=(accept_content_type,) ) self.assert_pool_manager_request_called_with( mock_request, self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes', method='post'.upper(), accept_content_type=accept_content_type, ) assert isinstance(api_response.response, urllib3.HTTPResponse) assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson) deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data( payload, _configuration=self._configuration ) assert api_response.body == deserialized_response_body def test_two_supplementary_unicode_code_points_is_long_enough_passes(self): # two supplementary Unicode code points is long enough accept_content_type = 'application/json' with patch.object(urllib3.PoolManager, 'request') as mock_request: payload = ( "💩💩" ) mock_request.return_value = self.response( self.json_bytes(payload), status=self.response_status ) api_response = self.api.post( accept_content_types=(accept_content_type,) ) self.assert_pool_manager_request_called_with( mock_request, self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes', method='post'.upper(), accept_content_type=accept_content_type, ) assert isinstance(api_response.response, urllib3.HTTPResponse) assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson) deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data( payload, _configuration=self._configuration ) assert api_response.body == deserialized_response_body def test_exact_length_is_valid_passes(self): # exact length is valid accept_content_type = 'application/json' with patch.object(urllib3.PoolManager, 'request') as mock_request: payload = ( "fo" ) mock_request.return_value = self.response( self.json_bytes(payload), status=self.response_status ) api_response = self.api.post( accept_content_types=(accept_content_type,) ) self.assert_pool_manager_request_called_with( mock_request, self._configuration.host + '/responseBody/postMaxlengthValidationResponseBodyForContentTypes', method='post'.upper(), accept_content_type=accept_content_type, ) assert isinstance(api_response.response, urllib3.HTTPResponse) assert isinstance(api_response.body, post.SchemaFor200ResponseBodyApplicationJson) deserialized_response_body = post.SchemaFor200ResponseBodyApplicationJson._from_openapi_data( payload, _configuration=self._configuration ) assert api_response.body == deserialized_response_body if __name__ == '__main__': unittest.main()
38801dbfe2808511d05323af89e49be9254d06bd
40b5c4a77be465b47fe6fd7ff408db957261cc7f
/python-spake2-0.7/setup.py
c6365b7a7fcc9b95f11fa6dfb09513fabbc2ab8f
[ "MIT" ]
permissive
warner/spake2-interop-server
7c1f0502a93615d2e2b5b7a323731a7e20040f86
b3f2ae42971e4217d9f503bb672b2d9288225acc
refs/heads/master
2021-01-25T11:27:45.696023
2017-06-10T22:15:15
2017-06-10T22:15:15
93,924,508
0
0
null
null
null
null
UTF-8
Python
false
false
913
py
"A server that performs SPAKE2 operations, for interoperability testing." # Install this, then run "twist spake2_interop" and hit http://HOST:8705/ from setuptools import setup import versioneer setup( name="spake2-interop-python-spake2-0.7", version=versioneer.get_version(), author="Brian Warner", author_email="[email protected]", package_dir={"": "src"}, # this must be installed into its own virtualenv (e.g. spake2-0.7 can't # share a venv with spake2-0.3), so we don't need a version-specific # package name, and keeping it neutral will minimize the diff packages=["spake2_interop_python"], license="MIT", cmdclass=versioneer.get_cmdclass(), install_requires=[ "spake2==0.7", ], entry_points={ "console_scripts": [ "spake2_interop_python_0_7 = spake2_interop_python:run", ] }, )
9fa28da8427b89b3d954bdd756fd2ebcba4686a1
83048ab1abb6941ed0b19fb5e5ff4a9d14b48e8c
/CODEFORCES/two_teams.py
7d256d350d953f576fe903bf8811b7a18f57716a
[]
no_license
harshitalpha/Algorithms
ebad07cc77516ab5c35ae414462d10a38d5ef97e
2f7dcf4c3bb4390267231c7c96f7e76399c0166e
refs/heads/master
2021-07-14T17:34:02.546583
2020-06-25T06:38:39
2020-06-25T06:38:39
178,813,562
2
0
null
null
null
null
UTF-8
Python
false
false
693
py
t = int(input()) while(t): t = t - 1 n = int(input()) a = [int(s) for s in input().split()] d = {} for i in a: try: d[i] += 1 except KeyError: d[i] = 1 max_size = d[a[0]] ele_in_sec_arr = a[0] for i in d.keys(): if d[i] > max_size: max_size = d[i] ele_in_sec_arr = i count = 0 for i in d.keys(): if i is not ele_in_sec_arr: count = count + 1 if count == max_size: print(max_size) elif count == max_size - 1: print(count) elif count <= max_size-2: print(count+1) elif count > max_size: print(max_size)
b67ec65da5b89ee26ecfac71462afdedf4ad07d3
a72f39b82966cd6e2a3673851433ce7db550429a
/configs/_base_/models/lxmert/lxmert_vqa_config.py
781219951d7cd25d348c58e16f382837a1dcbeaf
[ "Apache-2.0" ]
permissive
linxi1158/iMIX
85841d6b95e1d99ed421a1ac3667658e49cae6fc
af87a17275f02c94932bb2e29f132a84db812002
refs/heads/master
2023-06-09T23:37:46.534031
2021-06-30T12:09:42
2021-06-30T12:09:42
381,608,650
0
0
Apache-2.0
2021-06-30T07:08:40
2021-06-30T07:08:39
null
UTF-8
Python
false
false
1,753
py
# model settings model = dict( type='LXMERT', params=dict( random_initialize=False, num_labels=3129, # BertConfig vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, # mode='lxr', l_layers=9, # 12 x_layers=5, # 5 r_layers=5, # 0 visual_feat_dim=2048, visual_pos_dim=4, freeze_base=False, max_seq_length=20, model='bert', training_head_type='vqa2', bert_model_name='bert-base-uncased', pretrained_path='/home/datasets/mix_data/iMIX/data/models/model_LXRT.pth', label2ans_path='/home/datasets/mix_data/lxmert/vqa/trainval_label2ans.json', )) loss = dict(type='LogitBinaryCrossEntropy') optimizer = dict( type='BertAdam', lr=5e-5, weight_decay=0.01, eps=1e-6, betas=[0.9, 0.999], max_grad_norm=-1, training_encoder_lr_multiply=1, ) optimizer_config = dict(grad_clip=dict(max_norm=5)) ''' fp16 = dict( init_scale=2.**16, growth_factor=2.0, backoff_factor=0.5, growth_interval=2000, ) ''' lr_config = dict( warmup=0.1, warmup_method='warmup_linear', # max_iters=55472, # ceil(totoal 443753 / batch size 32) * epoch size datasets: train max_iters=79012, # floor(totoal 632117 / batch size 32) * epoch size datasets: train, nominival policy='BertWarmupLinearLR') total_epochs = 4 seed = 9595
8626edcebc5d57619798aec921223388d499ef0b
f77327128a8da9702ae3443e2171bc7485ceb915
/cadence/items.py
08b1b50b16175e30c34717833ce6af94ae712ed4
[]
no_license
SimeonYS/cadence
0eeba6a54c03ffb2d55466f9d8de6f1b1662002f
cdaef13c85a03e031a0050c89c17249cd7d83125
refs/heads/main
2023-03-31T08:24:41.408507
2021-03-31T10:14:01
2021-03-31T10:14:01
353,312,955
0
0
null
null
null
null
UTF-8
Python
false
false
156
py
import scrapy class CadenceItem(scrapy.Item): title = scrapy.Field() content = scrapy.Field() date = scrapy.Field() link = scrapy.Field()
17602e35cc61bc0f7fc211873d8f4e8f3498781a
4ca44b7bdb470fcbbd60c2868706dbd42b1984c9
/20.12.14/BOJ_20056.py
d156e0cef3cfd5583ae7fcf2a95e7de4fd8f8efa
[]
no_license
titiman1013/Algorithm
3b3d14b3e2f0cbc4859029eb73ad959ec8778629
8a67e36931c42422779a4c90859b665ee468255b
refs/heads/master
2023-06-29T17:04:40.015311
2021-07-06T01:37:29
2021-07-06T01:37:29
242,510,483
2
0
null
null
null
null
UTF-8
Python
false
false
3,827
py
import sys; sys.stdin = open('text1.txt', 'r') # solve test1 # dx = [-1, -1, 0, 1, 1, 1, 0, -1] # dy = [0, 1, 1, 1, 0, -1, -1, -1] # def move_fireball(x, y, m, s, d): # for _ in range(s): # nx = x + dx[d] # ny = y + dy[d] # if 0 <= nx < N and 0 <= ny < N: # x, y = nx, ny # else: # if nx < 0 or nx >= N: # if nx < 0: # x = N + nx # else: # x = nx - N # if ny < 0 or nx >= N: # if ny < 0: # y = N + ny # else: # y = ny - N # if bool(arr[x][y]): # arr[x][y].append([x, y, m, s, d]) # else: # arr[x][y] = [[x, y, m, s, d]] # return # def sum_fireball(sum_list): # list_cnt = len(sum_list) # m = 0 # s = 0 # d = [] # for idx in range(list_cnt): # m += sum_list[idx][2] # s += sum_list[idx][3] # if d % 2: # d.append(1) # else: # d.append(0) # m = m // 5 # if m == 0: # return [0] # s = s // list_cnt # d_check = True # temp_d = d[0] # for i in range(1, len(d)): # if d[i] != temp_d: # d_check = False # break # if d_check == True: # d = [0, 2, 4, 6] # else: # d = [1, 3, 5, 7] # temp_list = [] # for i in range(4): # temp_list.append([sum_list[0], sum_list[1], m, s, d[i]]) # return temp_list # # 방향 # # 인접한 행렬 12시부터 시계방향 # # 7 0 1 # # 6 2 # # 5 4 3 # for tc in range(1, int(input()) + 1): # N, M, K = map(int, input().split()) # # [r, c, m, s, d] # items = [list(map(int, input().split())) for _ in range(M)] # arr = [[0] * N for _ in range(N)] # if K > 0: # # 처음 시행 # for item in items: # move_fireball(item[0] - 1, item[1] - 1, item[2], item[3], item[4]) # print(arr) # move_cnt = 1 # while move_cnt <= K: # # 움직이기 # for i in range(N): # for j in range(N): # if bool(arr[i][j]): # if len(arr[i][j]) >= 2: # temp_list = arr[i][j][0] # arr[i][j] = 0 # for k in range(len(temp_list)): # move_fireball(temp_list[k][0], temp_list[k][1], temp_list[k][2], temp_list[k][3], temp_list[k][4]) # else: # temp_list = arr[i][j][0] # arr[i][j] = 0 # print(arr) # move_fireball(temp_list[0], temp_list[1], temp_list[2], temp_list[3], temp_list[4]) # # 합치기 # for i in range(N): # for j in range(N): # if len(arr[i][j]) >= 2: # arr[i][j] = sum_fireball(arr[i][j]) # move_cnt += 1 # res = 0 # for i in range(N): # for j in range(N): # if bool(arr[i][j]): # if len(arr[i][j]) >= 2: # for k in range(len(arr[i][j])): # res += arr[i][j][k][2] # else: # res += arr[i][j][0][2] # print(f'#{tc} {res}') # solve test2 from collections import deque for tc in range(1, int(input()) + 1): N, M, K = map(int, input().split()) # [r, c, m, s, d] items = [list(map(int, input().split())) for _ in range(M)] arr = [[0] * N for _ in range(N)] q = deque() for item in items: q.append(item) for _ in range(K): while q:
16f4f84a799fbad2d4951affd28a3893ee356839
a667b52cb8d2ec857c55d33f04fc0e81d36dc681
/options/data/mc/pipipi0_DecProdCut_PHSP_2012_MC_2012_Beam4000GeV-2012-MagUp-Nu2.5-Pythia8_Sim08e_Digi13_Trig0x409f0045_Reco14a_Stripping20r0p2NoPrescalingFlagged_27163403_ALLSTREAMS.DST.py
befddb5e34f37022361b1b2ddd67efe8ea3fa6bd
[]
no_license
wenyanyin/CP_violation_simulation
639d73333a3795654275cb43cc7dad7c742d1be1
7b93b2fe1050fb30d0b809b758cd5a3b2824b875
refs/heads/master
2022-04-29T14:19:23.744004
2022-04-01T13:05:18
2022-04-01T13:05:18
168,570,282
0
0
null
null
null
null
UTF-8
Python
false
false
3,745
py
# lb-run LHCbDirac/prod dirac-bookkeeping-get-files -B /MC/2012/Beam4000GeV-2012-MagUp-Nu2.5-Pythia8/Sim08e/Digi13/Trig0x409f0045/Reco14a/Stripping20r0p2NoPrescalingFlagged/27163403/ALLSTREAMS.DST from Gaudi.Configuration import * from GaudiConf import IOHelper IOHelper('ROOT').inputFiles( ['LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000001_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000003_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000004_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000005_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000006_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000008_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000009_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000011_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000012_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000013_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000014_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000015_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000016_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000017_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000018_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000019_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000020_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000021_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000022_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000024_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000025_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000027_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000028_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000029_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000030_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000033_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000034_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000036_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000037_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000039_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000040_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000042_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000044_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000046_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000048_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000049_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000050_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000052_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000053_2.AllStreams.dst', 'LFN:/lhcb/MC/2012/ALLSTREAMS.DST/00034127/0000/00034127_00000054_2.AllStreams.dst'], clear=True)
f6aff156beb68f479d76392ed5097e84546ed4e6
764a157c1ef369664144a112f390165809c37861
/apps/app/views.py
75a6c7124d899b9dae8673fed292fa32dbe61aff
[]
no_license
Maliaotw/gogoweb
b044678b0a34c2748267c8f8ac1f6af91d42bcd0
aad84f11163e62716a239972436eb92e7cc601d0
refs/heads/main
2023-07-07T19:46:37.470811
2021-08-31T15:01:58
2021-08-31T15:01:58
341,254,107
0
0
null
null
null
null
UTF-8
Python
false
false
667
py
from django.shortcuts import render from apps.app import models from apps.app import serializers from rest_framework import viewsets from rest_framework.pagination import LimitOffsetPagination from rest_framework.views import APIView, Response from rest_framework import status # Create your views here. class TaskModelViewSet(viewsets.ModelViewSet): queryset = models.Task.objects.all() serializer_class = serializers.TaskSerializer pagination_class = LimitOffsetPagination def destroy(self, request, *args, **kwargs): instance = self.get_object() self.perform_destroy(instance) return Response(status=status.HTTP_200_OK)
50482a45f14d167f9dd6e9fc7d00d93c3fcaad60
5b93930ce8280b3cbc7d6b955df0bfc5504ee99c
/nodes/Geron17Hands/B_PartI/H_Chapter8/C_PCA/D_UsingScikitLearn/index.py
f3808ea9cd7e1c90a935491935c7d8dd01be2ef0
[]
no_license
nimra/module_gen
8749c8d29beb700cac57132232861eba4eb82331
2e0a4452548af4fefd4cb30ab9d08d7662122cf4
refs/heads/master
2022-03-04T09:35:12.443651
2019-10-26T04:40:49
2019-10-26T04:40:49
213,980,247
0
1
null
null
null
null
UTF-8
Python
false
false
3,414
py
# Lawrence McAfee # ~~~~~~~~ import ~~~~~~~~ from modules.node.HierNode import HierNode from modules.node.LeafNode import LeafNode from modules.node.Stage import Stage from modules.node.block.CodeBlock import CodeBlock as cbk from modules.node.block.HierBlock import HierBlock as hbk from modules.node.block.ImageBlock import ImageBlock as ibk from modules.node.block.ListBlock import ListBlock as lbk from modules.node.block.MarkdownBlock import MarkdownBlock as mbk # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ blocks = [ # Download from finelybook www.finelybook.com # ing the first d principal components (i.e., the matrix composed of the first d columns # of VT), as shown in Equation 8-2. # # Equation 8-2. Projecting the training set down to d dimensions # �d‐proj = � · �d # # The following Python code projects the training set onto the plane defined by the first # two principal components: # W2 = V.T[:, :2] # X2D = X_centered.dot(W2) # There you have it! You now know how to reduce the dimensionality of any dataset # down to any number of dimensions, while preserving as much variance as possible. # # Using Scikit-Learn # Scikit-Learn’s PCA class implements PCA using SVD decomposition just like we did # before. The following code applies PCA to reduce the dimensionality of the dataset # down to two dimensions (note that it automatically takes care of centering the data): # from sklearn.decomposition import PCA # # pca = PCA(n_components = 2) # X2D = pca.fit_transform(X) # # After fitting the PCA transformer to the dataset, you can access the principal compo‐ # nents using the components_ variable (note that it contains the PCs as horizontal vec‐ # tors, so, for example, the first principal component is equal to pca.components_.T[:, # 0]). # # Explained Variance Ratio # Another very useful piece of information is the explained variance ratio of each prin‐ # cipal component, available via the explained_variance_ratio_ variable. It indicates # the proportion of the dataset’s variance that lies along the axis of each principal com‐ # ponent. For example, let’s look at the explained variance ratios of the first two compo‐ # nents of the 3D dataset represented in Figure 8-2: # >>> print(pca.explained_variance_ratio_) # array([ 0.84248607, 0.14631839]) # This tells you that 84.2% of the dataset’s variance lies along the first axis, and 14.6% # lies along the second axis. This leaves less than 1.2% for the third axis, so it is reason‐ # able to assume that it probably carries little information. # # # # # 214 | Chapter 8: Dimensionality Reduction # ] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ class Content(LeafNode): def __init__(self): super().__init__( "Using Scikit-Learn", # Stage.REMOVE_EXTRANEOUS, # Stage.ORIG_BLOCKS, # Stage.CUSTOM_BLOCKS, # Stage.ORIG_FIGURES, # Stage.CUSTOM_FIGURES, # Stage.CUSTOM_EXERCISES, ) [self.add(a) for a in blocks] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ class UsingScikitLearn(HierNode): def __init__(self): super().__init__("Using Scikit-Learn") self.add(Content(), "content") # eof
cb50337db2d8006a698aab101b52e25241b61b67
292437b85108504a7ca91571f26a639a313501b6
/venv2/lib/python2.7/site-packages/keystoneclient/auth/identity/generic/token.py
6a5d15b281e8931b3199251c3a6ea2c8f77eef3e
[]
no_license
heekof/monitoring-agent
c86bebcf77091490df7a6b8c881b85fdb2b9e4eb
b1c079efdf2dabe854f2aa3d96f36d2ec7021070
refs/heads/master
2021-01-15T15:39:01.512801
2016-08-31T20:53:38
2016-08-31T20:53:38
58,620,098
1
0
null
null
null
null
UTF-8
Python
false
false
1,656
py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_config import cfg from keystoneclient import _discover from keystoneclient.auth.identity.generic import base from keystoneclient.auth.identity import v2 from keystoneclient.auth.identity import v3 LOG = logging.getLogger(__name__) def get_options(): return [ cfg.StrOpt('token', secret=True, help='Token to authenticate with'), ] class Token(base.BaseGenericPlugin): """Generic token auth plugin. :param string token: Token for authentication. """ def __init__(self, auth_url, token=None, **kwargs): super(Token, self).__init__(auth_url, **kwargs) self._token = token def create_plugin(self, session, version, url, raw_status=None): if _discover.version_match((2,), version): return v2.Token(url, self._token, **self._v2_params) elif _discover.version_match((3,), version): return v3.Token(url, self._token, **self._v3_params) @classmethod def get_options(cls): options = super(Token, cls).get_options() options.extend(get_options()) return options
64a694d6c95f4ea237880b1e4abbce5a36e03343
a8b37bd399dd0bad27d3abd386ace85a6b70ef28
/airbyte-integrations/connectors/destination-weaviate/destination_weaviate/client.py
3ba83b2a4a53a92af1f8413bd85c69ca41b056c9
[ "MIT", "LicenseRef-scancode-free-unknown", "Elastic-2.0" ]
permissive
thomas-vl/airbyte
5da2ba9d189ba0b202feb952cadfb550c5050871
258a8eb683634a9f9b7821c9a92d1b70c5389a10
refs/heads/master
2023-09-01T17:49:23.761569
2023-08-25T13:13:11
2023-08-25T13:13:11
327,604,451
1
0
MIT
2021-01-07T12:24:20
2021-01-07T12:24:19
null
UTF-8
Python
false
false
6,094
py
# # Copyright (c) 2023 Airbyte, Inc., all rights reserved. # import json import logging import time import uuid from dataclasses import dataclass from typing import Any, List, Mapping, MutableMapping import weaviate from .utils import generate_id, parse_id_schema, parse_vectors, stream_to_class_name @dataclass class BufferedObject: id: str properties: Mapping[str, Any] vector: List[Any] class_name: str class WeaviatePartialBatchError(Exception): pass class Client: def __init__(self, config: Mapping[str, Any], schema: Mapping[str, str]): self.client = self.get_weaviate_client(config) self.config = config self.batch_size = int(config.get("batch_size", 100)) self.schema = schema self.vectors = parse_vectors(config.get("vectors")) self.id_schema = parse_id_schema(config.get("id_schema")) self.buffered_objects: MutableMapping[str, BufferedObject] = {} self.objects_with_error: MutableMapping[str, BufferedObject] = {} def buffered_write_operation(self, stream_name: str, record: MutableMapping): if self.id_schema.get(stream_name, "") in record: id_field_name = self.id_schema.get(stream_name, "") record_id = generate_id(record.get(id_field_name)) del record[id_field_name] else: if "id" in record: record_id = generate_id(record.get("id")) del record["id"] # Weaviate will throw an error if you try to store a field with name _id elif "_id" in record: record_id = generate_id(record.get("_id")) del record["_id"] else: record_id = uuid.uuid4() record_id = str(record_id) # TODO support nested objects instead of converting to json string when weaviate supports this for k, v in record.items(): if self.schema[stream_name].get(k, "") == "jsonify": record[k] = json.dumps(v) # Handling of empty list that's not part of defined schema otherwise Weaviate throws invalid string property if isinstance(v, list) and len(v) == 0 and k not in self.schema[stream_name]: record[k] = "" missing_properties = set(self.schema[stream_name].keys()).difference(record.keys()).discard("id") for prop in missing_properties or []: record[prop] = None additional_props = set(record.keys()).difference(self.schema[stream_name].keys()) for prop in additional_props or []: if isinstance(record[prop], dict): record[prop] = json.dumps(record[prop]) if isinstance(record[prop], list) and len(record[prop]) > 0 and isinstance(record[prop][0], dict): record[prop] = json.dumps(record[prop]) # Property names in Weaviate have to start with lowercase letter record = {k[0].lower() + k[1:]: v for k, v in record.items()} vector = None if stream_name in self.vectors: vector_column_name = self.vectors.get(stream_name) vector = record.get(vector_column_name) del record[vector_column_name] class_name = stream_to_class_name(stream_name) self.client.batch.add_data_object(record, class_name, record_id, vector=vector) self.buffered_objects[record_id] = BufferedObject(record_id, record, vector, class_name) if self.client.batch.num_objects() >= self.batch_size: self.flush() def flush(self, retries: int = 3): if len(self.objects_with_error) > 0 and retries == 0: error_msg = f"Objects had errors and retries failed as well. Object IDs: {self.objects_with_error.keys()}" raise WeaviatePartialBatchError(error_msg) results = self.client.batch.create_objects() self.objects_with_error.clear() for result in results: errors = result.get("result", {}).get("errors", []) if errors: obj_id = result.get("id") self.objects_with_error[obj_id] = self.buffered_objects.get(obj_id) logging.info(f"Object {obj_id} had errors: {errors}. Going to retry.") for buffered_object in self.objects_with_error.values(): self.client.batch.add_data_object( buffered_object.properties, buffered_object.class_name, buffered_object.id, buffered_object.vector ) if len(self.objects_with_error) > 0 and retries > 0: logging.info("sleeping 2 seconds before retrying batch again") time.sleep(2) self.flush(retries - 1) self.buffered_objects.clear() def delete_stream_entries(self, stream_name: str): class_name = stream_to_class_name(stream_name) try: original_schema = self.client.schema.get(class_name=class_name) self.client.schema.delete_class(class_name=class_name) logging.info(f"Deleted class {class_name}") self.client.schema.create_class(original_schema) logging.info(f"Recreated class {class_name}") except weaviate.exceptions.UnexpectedStatusCodeException as e: if e.message.startswith("Get schema! Unexpected status code: 404"): logging.info(f"Class {class_name} did not exist.") else: raise e @staticmethod def get_weaviate_client(config: Mapping[str, Any]) -> weaviate.Client: url, username, password = config.get("url"), config.get("username"), config.get("password") if username and not password: raise Exception("Password is required when username is set") if password and not username: raise Exception("Username is required when password is set") if username and password: credentials = weaviate.auth.AuthClientPassword(username, password) return weaviate.Client(url=url, auth_client_secret=credentials) return weaviate.Client(url=url, timeout_config=(2, 2))
3e95f067fba14f5bd1ebdb04147f9f4ed532c262
781e2692049e87a4256320c76e82a19be257a05d
/all_data/exercism_data/python/bob/60d6f99657e4479ab9beda33d53f774e.py
3a8da1104c746329450661a00dc2b7bf64a87b09
[]
no_license
itsolutionscorp/AutoStyle-Clustering
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
refs/heads/master
2020-12-11T07:27:19.291038
2016-03-16T03:18:00
2016-03-16T03:18:42
59,454,921
4
0
null
2016-05-23T05:40:56
2016-05-23T05:40:56
null
UTF-8
Python
false
false
227
py
#test def hey(string): if string.isupper(): return 'Whoa, chill out!' elif len(string) > 0 and string[-1] == '?': return 'Sure.' elif len(string.strip()) == 0: return 'Fine. Be that way!' else: return 'Whatever.'
bf64c9862aa6fd295ce0cc911835562fde0bac8f
55fc41d645e2f2cb4e94eaeb01c21a8f36b522e3
/data_processing/split_dataset.py
b6b410c63593ed0c4727101c19b45e3069e4d2bb
[]
no_license
andreiqv/pytorch_scale_classifier
6c4515127ee9ad182242cc429326ed99984c2398
9448690ab0a2c5e9ec4c235ff85360be22572949
refs/heads/master
2020-04-04T17:34:27.169290
2018-11-08T09:24:35
2018-11-08T09:24:35
156,126,083
0
0
null
null
null
null
UTF-8
Python
false
false
2,137
py
import os import sys import random if os.path.exists('.local'): src_dir = '/w/WORK/ineru/06_scales/_dataset/copy/' dst_dir = '/w/WORK/ineru/06_scales/_dataset/splited/' else: src_dir = '/home/andrei/Data/Datasets/Scales/classifier_dataset_181018/' dst_dir = '/home/andrei/Data/Datasets/Scales/splited/' parts = ['train', 'valid', 'test'] def copy_files_to_subdirs(src_dir, dst_dir, parts, ratio=[1,1,1]): src_dir = src_dir.rstrip('/') dst_dir = dst_dir.rstrip('/') os.system('mkdir -p {}'.format(dst_dir)) for p in parts: os.system('mkdir -p {}'.format(dst_dir + '/' + p)) subdirs = os.listdir(src_dir) for class_name in subdirs: subdir = src_dir + '/' + class_name if not os.path.isdir(subdir): continue file_names = os.listdir(subdir) if len(file_names) == 0: print('{0} - empty subdir'.format(class_name)) continue # calculate train, valid and test sizes num_files = len(file_names) num_valid = num_files * ratio[1] // sum(ratio) num_test = num_files * ratio[2] // sum(ratio) num_train = num_files - num_valid - num_test min_num_train = 0 # if 0, then do nothing if min_num_train > 0: if num_train < min_num_train: (num_train, num_valid, num_test) = (num_files, 0, 0) # SHUFFLE OR SORT random.shuffle(file_names) #file_names.sort() files = dict() files['train'] = file_names[ : num_train] files['valid'] = file_names[num_train : num_train + num_valid] files['test'] = file_names[num_train + num_valid : ] print('[{}] - {} - {}:{}:{}'.\ format(class_name, num_files, num_train, num_valid, num_test)) #print('train:valid:test = ', len(files['train']),\ # len(files['valid']), len(files['test'])) for part in parts: cmd = 'mkdir -p {}'.format(dst_dir + '/' + part + '/' + class_name) os.system(cmd) #print(cmd) for file_name in files[part]: src_path = subdir + '/' + file_name dst_path = dst_dir + '/' + part + '/' + class_name + '/' + file_name cmd = 'cp {} {}'.format(src_path, dst_path) os.system(cmd) #print(cmd) if __name__ == '__main__': copy_files_to_subdirs(src_dir, dst_dir, parts, ratio=[16,3,1])
455d14cf9f53cdf563bf65094e78b103076f2743
7922714a4fd81acd2dac3875d2dd75a2bf24ef5e
/handlers/inlines/search.py
57caa66a47de26c22dcbb842b488ae9e5bcde09f
[ "MIT" ]
permissive
hexatester/ut-telegram-bot
32bf9a20ffaf82a5b6f1420d6bb041249ff93d6c
20f6f063726913cb6d21e42538103e3498b929a7
refs/heads/master
2023-01-20T06:50:30.941786
2020-11-18T08:31:03
2020-11-18T08:31:03
290,542,370
0
0
MIT
2020-09-16T03:09:47
2020-08-26T16:02:02
Python
UTF-8
Python
false
false
1,141
py
from telegram import Update, InlineQuery, InlineQueryResult from telegram.ext import CallbackContext from typing import List from core.utils.inline_query import article from libs.rss.rss import Rss from libs.search.search import Search RSS = Rss() SEARCH = Search() EMPTY = article( title="❌ Tidak ada hasil", description="", message_text="Pm @UniversitasTerbukaBot untuk mengakses layanan UT. 😁", ) def search(update: Update, context: CallbackContext): inline_query: InlineQuery = update.inline_query query = inline_query.query results_list: List[InlineQueryResult] = [] if len(query) > 0: results_list.extend(SEARCH(query)) results_list.extend(RSS(query)) if not results_list: if RSS.inline_results: results_list.extend(RSS.inline_results) else: results_list.append(EMPTY) inline_query.answer( results_list, switch_pm_text="Bantuan", switch_pm_parameter="inline-help" ) return -1 inline_query.answer( results_list, switch_pm_text="Bantuan", switch_pm_parameter="inline-help" ) return -1
8f8c577a98fec3fb5d6a1d25c2d0f8350c64abb4
163bbb4e0920dedd5941e3edfb2d8706ba75627d
/Code/CodeRecords/2311/60829/306892.py
9a645be64e8410c702a8a8169df2d59fed3ed6d4
[]
no_license
AdamZhouSE/pythonHomework
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
ffc5606817a666aa6241cfab27364326f5c066ff
refs/heads/master
2022-11-24T08:05:22.122011
2020-07-28T16:21:24
2020-07-28T16:21:24
259,576,640
2
1
null
null
null
null
UTF-8
Python
false
false
358
py
def dl(x): res="" for i in range(len(x)): if not x[len(x)-1-i]==" ": break res=x[0:i+1] return res a=[] b=[int(x) for x in dl(input()).split(" ")] c=[int(x) for x in dl(input()).split(" ")] a.append(b) a.append(c) aa=[[[10], [8]]] bb=["0 4 0 20 0 12 0 "] for i in range(len(aa)): if aa[i]==a: a=bb[i] print(a)
c7f523807f996cae2f07692c4918cebcb18a824f
b37fdefb01d7b93a4f56a7c7cc60f9f78549de4c
/DI_Bootcamp/Week_9/Day_1/Exercise_XP/film_project_root/account_app/views.py
15d82374b6660e2d3071afe8839fff8d9102006d
[]
no_license
AchimGoral/DI_Bootcamp
e7b13d7397ab5c9e5ad8041430c8bfbafec13c88
9345731503e2bb298bd3a579ffad590350f13df5
refs/heads/main
2023-04-18T20:06:45.631067
2021-05-01T08:08:45
2021-05-01T08:08:45
328,769,128
0
1
null
2021-01-27T14:30:09
2021-01-11T19:24:48
HTML
UTF-8
Python
false
false
2,040
py
from django.shortcuts import render, redirect from django.contrib.auth import authenticate, login, logout from django.contrib.auth.models import User from django.contrib import messages from .models import * from .forms import * def sign_up(request): if request.method == 'POST': form = RegistrationForm(request.POST) if form.is_valid(): form.save() # Stay logged in after signing up user = authenticate(username=form.cleaned_data['username'], password=form.cleaned_data['password1'],) login(request, user) return redirect('homepage') else: form = RegistrationForm() return render(request, 'sign_up.html', {'form': form}) def login_view(request): if request.method == "GET": my_form = LoginForm() return render(request, 'login.html', {'my_form': my_form}) if request.method == "POST": username = request.POST['username'] password = request.POST['password'] user = authenticate(request, username=username, password=password) if user is not None: login(request, user) return redirect('homepage') else: messages.error(request, 'Username and/or password incorrect. Please try again') return redirect('login') def logout_view(request): logout(request) return redirect ('homepage') def profile(request, pk): my_profile = User.objects.get(id=pk) return render(request, 'profile.html', {'my_profile': my_profile}) def profile_edit(request): if request.method == "GET": user_form = UserChange() return render(request, 'edit_user.html', {'user_form': user_form}) if request.method == "POST": user_form = UserChange(request.POST, instance = request.user) if user_form.is_valid(): user_form.save() return redirect('homepage') else: user_form = UserChange() return render(request, 'edit_user.html', {'user_form': user_form})
4f445597c5ac30039c0f3c3333dae8b68184c0c5
9c862bb7f9ac093a9bcf17d9060389dbbb8b655b
/examples/instrumentation/19_show_window_tree.py
3f1c33ed09182131494b8863549ee7626b2aad1c
[]
no_license
fabioz/winappdbg
24917ce29a90a08e890e8cd7d44feaad22daf0c4
1603870dc3fa3d2984ef23b6d77e400fb0a21b99
refs/heads/master
2023-08-02T07:40:10.496090
2020-04-22T15:18:42
2020-04-22T15:18:42
23,669,656
2
0
null
null
null
null
UTF-8
Python
false
false
2,702
py
#!~/.wine/drive_c/Python25/python.exe # -*- coding: utf-8 -*- # Copyright (c) 2009-2014, Mario Vilas # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice,this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # $Id$ from winappdbg import System, HexDump def show_window_tree( window, indent = 0 ): # Show this window's handle and caption. # Use some ASCII art to show the layout. :) handle = HexDump.integer( window.get_handle() ) caption = window.get_text() line = "" if indent > 0: print "| " * indent line = "| " * (indent - 1) + "|---" else: print "|" if caption is not None: line += handle + ": " + caption else: line += handle print line # Recursively show the child windows. for child in window.get_children(): show_window_tree( child, indent + 1 ) def main(): # Create a system snaphot. system = System() # Get the Desktop window. root = system.get_desktop_window() # Now show the window tree. show_window_tree(root) # You can also ge the tree as a Python dictionary: # tree = root.get_tree() # print tree if __name__ == '__main__': main()
dfd2e20af52b997ca2c30f3e5abe74095b8ad76b
e5d5fa28999bcc6c642bb42dda93afd38e272b81
/UVA/531 - Compromise/generate.py
6dee0d625c29b89113c0412c8f3c2aec4602f471
[]
no_license
chiahsun/problem_solving
cd3105969983d16d3d5d416d4a0d5797d4b58e91
559fafa92dd5516058bdcea82a438eadf5aa1ede
refs/heads/master
2023-02-05T06:11:27.536617
2023-01-26T10:51:23
2023-01-26T10:51:23
30,732,382
3
0
null
null
null
null
UTF-8
Python
false
false
221
py
import random; import string; random.seed(0); def generate_seq(n_char = 100): for _ in range(n_char): print(random.choice(string.ascii_lowercase), end=' '); print('\n#'); generate_seq(); generate_seq();
d1eac20d6ecb9450cba8f91e1a7e1d4e1e5741a0
a8933adda6b90ca158096009165bf27b74a2733d
/auroracallback/index.py
8e612d31c3268553e12c1b19be4ad251306e88d6
[]
no_license
knighton/aurora-callback
6c40db9c271b782ca8c14119b8937e3656980a36
26efc9069fcd5d48ae55bca3b06e3adf3927164e
refs/heads/master
2020-12-18T11:53:16.516590
2020-01-21T15:05:41
2020-01-21T15:05:41
235,369,780
0
0
null
null
null
null
UTF-8
Python
false
false
465
py
_INDEX = """ <!DOCTYPE HTML> <head> <style type="text/css"> html, body, #image { width: 100%; height: 100%; } body { background: radial-gradient( circle at center, #000 0%, #002 50%, #004 65%, #408 75%, #824 85%, #f40 90%, #fb0 95%, white 100% ); } </style> </head> <body> <img id="image" src="/aurora.png"></img> </body> </html> """ def get_index(): return _INDEX
06240216f9210c8e6d145968274d7682c2efaa25
5364927a0f594958ef226cd8b42120e96a970beb
/detectors/countauditor.py
2ba6d830b4429d9f01dfd0aa9dab54dc2415fc0b
[]
no_license
psf/bpo-tracker-cpython
883dd13f557179ee2f16e38d4f38e53c7f257a4a
1a94f0977ca025d2baf45ef712ef87f394a59b25
refs/heads/master
2023-06-11T23:59:46.300683
2023-04-25T12:18:00
2023-04-25T12:18:00
276,213,165
24
10
null
2023-04-11T14:16:30
2020-06-30T21:32:40
Python
UTF-8
Python
false
false
507
py
def count_nosy_msg(db, cl, nodeid, newvalues): ''' Update the counts of messages and nosy users on issue edit''' if 'nosy' in newvalues: newvalues['nosy_count'] = len(set(newvalues['nosy'])) if 'messages' in newvalues: newvalues['message_count'] = len(set(newvalues['messages'])) def init(db): # Should run after the creator and auto-assignee are added db.issue.audit('create', count_nosy_msg, priority=120) db.issue.audit('set', count_nosy_msg, priority=120)
[ "devnull@localhost" ]
devnull@localhost
00678ab8ff79facecf814370e31c6cd5fe27add6
55c250525bd7198ac905b1f2f86d16a44f73e03a
/Python/Flask/Book_evaluator/venv/Lib/encodings/latin_1.py
dc74012c5ec50ada8637c3b65596d11567dc8a16
[]
no_license
NateWeiler/Resources
213d18ba86f7cc9d845741b8571b9e2c2c6be916
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
refs/heads/master
2023-09-03T17:50:31.937137
2023-08-28T23:50:57
2023-08-28T23:50:57
267,368,545
2
1
null
2022-09-08T15:20:18
2020-05-27T16:18:17
null
UTF-8
Python
false
false
129
py
version https://git-lfs.github.com/spec/v1 oid sha256:b75503e532a27c636477396c855209ff5f3036536d2a4bede0a576c89382b60c size 1264
d9ed45e757f36c4737c4f53b459548e973a94c38
042b3e6553dbd61b204bdbad25e05aaeba79dde8
/tests/ope/test_fqe.py
2a871634bfc6235fdfc70ba63e851fba1934a267
[ "MIT" ]
permissive
jkbjh/d3rlpy
822e51e1c5b4ef37795aa2be089ff5a7ff18af07
43f0ba7e420aba077d85c897a38207f0b3ca6d17
refs/heads/master
2023-03-20T06:36:55.424681
2021-03-17T14:17:40
2021-03-17T14:17:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,859
py
import pytest import numpy as np from unittest.mock import Mock from d3rlpy.ope.fqe import FQE, DiscreteFQE from d3rlpy.algos import DDPG, DQN from tests.base_test import base_tester from tests.algos.algo_test import algo_update_tester from tests.algos.algo_test import DummyImpl def ope_tester(ope, observation_shape, action_size=2): # dummy impl object impl = DummyImpl(observation_shape, action_size) base_tester(ope, impl, observation_shape, action_size) ope._algo.impl = impl ope.impl = impl # check save policy impl.save_policy = Mock() ope.save_policy("policy.pt", False) impl.save_policy.assert_called_with("policy.pt", False) # check predict x = np.random.random((2, 3)).tolist() ref_y = np.random.random((2, action_size)).tolist() impl.predict_best_action = Mock(return_value=ref_y) y = ope.predict(x) assert y == ref_y impl.predict_best_action.assert_called_with(x) # check predict_value action = np.random.random((2, action_size)).tolist() ref_value = np.random.random((2, 3)).tolist() impl.predict_value = Mock(return_value=ref_value) value = ope.predict_value(x, action) assert value == ref_value impl.predict_value.assert_called_with(x, action, False) # check sample_action impl.sample_action = Mock(return_value=ref_y) try: y = ope.sample_action(x) assert y == ref_y impl.sample_action.assert_called_with(x) except NotImplementedError: pass ope.impl = None ope._algo.impl = None @pytest.mark.parametrize("observation_shape", [(100,), (4, 84, 84)]) @pytest.mark.parametrize("action_size", [2]) @pytest.mark.parametrize("q_func_factory", ["mean", "qr", "iqn", "fqf"]) @pytest.mark.parametrize("scaler", [None, "min_max"]) @pytest.mark.parametrize("action_scaler", [None, "min_max"]) def test_fqe( observation_shape, action_size, q_func_factory, scaler, action_scaler ): algo = DDPG() fqe = FQE( algo=algo, scaler=scaler, action_scaler=action_scaler, q_func_factory=q_func_factory, ) ope_tester(fqe, observation_shape) algo.create_impl(observation_shape, action_size) algo_update_tester(fqe, observation_shape, action_size, discrete=False) @pytest.mark.parametrize("observation_shape", [(100,), (4, 84, 84)]) @pytest.mark.parametrize("action_size", [2]) @pytest.mark.parametrize("q_func_factory", ["mean", "qr", "iqn", "fqf"]) @pytest.mark.parametrize("scaler", [None, "standard"]) def test_discrete_fqe(observation_shape, action_size, q_func_factory, scaler): algo = DQN() fqe = DiscreteFQE(algo=algo, scaler=scaler, q_func_factory=q_func_factory) ope_tester(fqe, observation_shape) algo.create_impl(observation_shape, action_size) algo_update_tester(fqe, observation_shape, action_size, discrete=True)
1f45f423f9b9c7a6771aa411b46fc92b4c8473ea
c4520d8327124e78a892ef5a75a38669f8cd7d92
/venv/bin/pip3.6
5de7730cde95e1872365e26e4f9afc03673e919d
[]
no_license
arsh9806/GW2019PA1
81d62d3d33cfe3bd9e23aff909dd529b91c17035
c3d12aed77d2810117ce741c48208edc2b6a1f34
refs/heads/master
2020-05-31T09:18:13.112929
2019-06-04T06:51:12
2019-06-04T06:51:12
190,209,074
2
0
null
2019-06-04T13:38:46
2019-06-04T13:38:46
null
UTF-8
Python
false
false
412
6
#!/Users/ishantkumar/PycharmProjects/GW2019PA1/venv/bin/python # EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6' __requires__ = 'pip==9.0.1' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')() )
05a8f71fc5e7b421ee098845806cc55f6460df06
9e204a5b1c5ff4ea3b115ff0559b5af803ab4d15
/086 Scramble String.py
24fb5940b4e91bad75604cd71f6ca376a0c51d99
[ "MIT" ]
permissive
Aminaba123/LeetCode
178ed1be0733cc7390f30e676eb47cc7f900c5b2
cbbd4a67ab342ada2421e13f82d660b1d47d4d20
refs/heads/master
2020-04-20T10:40:00.424279
2019-01-31T08:13:58
2019-01-31T08:13:58
168,795,374
1
0
MIT
2019-02-02T04:50:31
2019-02-02T04:50:30
null
UTF-8
Python
false
false
2,347
py
""" Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively. Below is one possible representation of s1 = "great": great / \ gr eat / \ / \ g r e at / \ a t To scramble the string, we may choose any non-leaf node and swap its two children. For example, if we choose the node "gr" and swap its two children, it produces a scrambled string "rgeat". rgeat / \ rg eat / \ / \ r g e at / \ a t We say that "rgeat" is a scrambled string of "great". Similarly, if we continue to swap the children of nodes "eat" and "at", it produces a scrambled string "rgtae". rgtae / \ rg tae / \ / \ r g ta e / \ t a We say that "rgtae" is a scrambled string of "great". Given two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1. """ __author__ = 'Danyang' class Solution: def isScramble(self, s1, s2): """ dfs partition and compare Compare two trees constructed from the two strings respectively. Two trees are scramble of the other iff A's left/right subtree is the scramble of B's left/right subtree or A's left/right subtree is the scramble of B's right/left subtree. .....|... vs. .....|... or ...|..... vs. .....|... :param s1: :param s2: :return: boolean """ if len(s1)!=len(s2): return False chars = [0 for _ in xrange(26)] for char in s1: chars[ord(char)-ord('a')] += 1 for char in s2: chars[ord(char)-ord('a')] -= 1 # if filter(lambda x: x!=0, chars): # return False for val in chars: if val!=0: return False if len(s1)==1: return True for i in xrange(1, len(s1)): if self.isScramble(s1[:i], s2[:i]) and self.isScramble(s1[i:], s2[i:]) or \ self.isScramble(s1[:i], s2[-i:]) and self.isScramble(s1[i:], s2[:len(s2)-i]): return True return False if __name__=="__main__": assert Solution().isScramble("abc", "bca")==True
c37b5e7c091393c55c01af84e23f3f883de3ea13
7ae9081aff882476ad0caa687ca41796e2035f85
/planout/apps/accounts/migrations/0005_auto_20150301_1811.py
176b7b2a6ed4e66694227f5ede41151cde8c9ee6
[]
no_license
siolag161/planout
eb6b8720dfe0334d379c1040d607bb459a8e695a
f967db9636618906345132d006c2f9a597025a0f
refs/heads/master
2020-04-14T13:17:26.011810
2015-03-21T11:53:48
2015-03-21T11:53:48
32,376,449
1
0
null
null
null
null
UTF-8
Python
false
false
842
py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone import core.fields class Migration(migrations.Migration): dependencies = [ ('accounts', '0004_basicuser_description'), ] operations = [ migrations.AddField( model_name='basicuser', name='modified', field=core.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False), preserve_default=True, ), migrations.AlterField( model_name='basicuser', name='date_joined', field=core.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='date joined', editable=False), preserve_default=True, ), ]
deec298358d4449942d8f95f300d77c1da85a33b
1a3d6caf89e5b51a33627458ae7c0bbb00efdc1d
/src/gluonts/torch/model/deep_npts/__init__.py
e664774be4903e7274f0dcb979a150dd03d6169c
[ "Apache-2.0" ]
permissive
zoolhasson/gluon-ts
e9ff8e4ead4d040d9f8fa8e9db5f07473cb396ed
3dfc0af66b68e3971032a6bd0f75cd216988acd6
refs/heads/master
2023-01-25T01:52:57.126499
2023-01-13T17:50:38
2023-01-13T17:50:38
241,743,126
0
1
Apache-2.0
2020-08-06T16:53:11
2020-02-19T22:45:54
Python
UTF-8
Python
false
false
911
py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. from ._estimator import DeepNPTSEstimator from ._network import ( DeepNPTSNetwork, DeepNPTSMultiStepPredictor, DeepNPTSNetworkDiscrete, DeepNPTSNetworkSmooth, ) __all__ = [ "DeepNPTSEstimator", "DeepNPTSNetwork", "DeepNPTSMultiStepPredictor", "DeepNPTSNetworkDiscrete", "DeepNPTSNetworkSmooth", ]
9f827b5cd072b3c5a7b8abb08cbeb1c57976822f
b3ac12dfbb8fa74500b406a0907337011d4aac72
/goldcoin/cmds/units.py
f39f52b9ed6ece8e4515e68efda51a35c69354ac
[ "Apache-2.0" ]
permissive
chia-os/goldcoin-blockchain
ab62add5396b7734c11d3c37c41776994489d5e7
5c294688dbbe995ae1d4422803f6fcf3e1cc6077
refs/heads/main
2023-08-11T23:58:53.617051
2021-09-12T15:33:26
2021-09-12T15:33:26
null
0
0
null
null
null
null
UTF-8
Python
false
false
330
py
from typing import Dict # The rest of the codebase uses mojos everywhere. # Only use these units for user facing interfaces. units: Dict[str, int] = { "goldcoin": 10 ** 12, # 1 goldcoin (ozt) is 1,000,000,000,000 mojo (1 trillion) "mojo:": 1, "colouredcoin": 10 ** 3, # 1 coloured coin is 1000 colouredcoin mojos }
605166acc000057f4f8e1a72739b30cd9d77d644
17fe4529fd2772b7d046f039bde140768634d028
/misc/samples/unittest_sample_fixture.py
ec183aa51203926248509bf02996e096d24dc86e
[]
no_license
namesuqi/tapir
b9c21f30bf781eec314f0ae4f57c232f167e4734
a5d4e9bb45d8cbf7e41d42d9006b43b753f3ecf1
refs/heads/master
2020-03-07T04:16:45.213561
2018-03-29T08:34:46
2018-03-29T08:34:46
127,261,810
0
0
null
null
null
null
UTF-8
Python
false
false
1,112
py
# coding=utf-8 # author: zengyuetian import unittest def setUpModule(): print("setUpModule >>>") def tearDownModule(): print("tearDownModule >>>") class Test1(unittest.TestCase): @classmethod def setUpClass(cls): print("setUpClass for Test1 >>") @classmethod def tearDownClass(cls): print("tearDownClass for Test1 >>") def setUp(self): print("setUp for Test1 >") def tearDown(self): print("tearDown for Test1 >") def testCase1(self): print("testCase1 for Test1") def testCase2(self): print("testCase2 for Test1") class Test2(unittest.TestCase): @classmethod def setUpClass(cls): print("setUpClass for Test2 >>") @classmethod def tearDownClass(cls): print("tearDownClass for Test2 >>") def setUp(self): print("setUp for Test2 >") def tearDown(self): print("tearDown for Test2 >") def testCase1(self): print("testCase1 for Test2") def testCase2(self): print("testCase2 for Test2") if __name__ == "__main__": unittest.main()
79d06d973f4350530acd4a498fc14d7d9edb3e00
124b35ccbae76ba33b9044071a056b9109752283
/Understanding_Concepts/viz/IntegratedGradientsTF/integrated_gradients_tf.py
d6198201ac70bf6560adfe7d8e5fd6aa4984b345
[]
no_license
anilmaddu/Daily-Neural-Network-Practice-2
94bc78fe4a5a429f5ba911bae5f231f3d8246f61
748de55c1a17eae9f65d7ea08d6b2b3fc156b212
refs/heads/master
2023-03-08T22:04:45.535964
2019-03-15T23:10:35
2019-03-15T23:10:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,654
py
################################################################# # Implementation of Integrated Gradients function in Tensorflow # # Naozumi Hiranuma ([email protected]) # ################################################################# import tensorflow as tf import numpy as np # INPUT: tensor of samples to explain # OUTPUT: interpolated: linearly interpolated samples between input samples and references. # stepsize: stepsizes between samples and references # reference: a placeholder tensor for optionally specifying reference values. def linear_inpterpolation(sample, num_steps=50): # Constrtuct reference values if not available. reference = tf.placeholder_with_default(tf.zeros_like(sample), shape=sample.get_shape()) # Expand sample and reference sample_ = tf.stack([sample for _ in range(num_steps)]) reference_ = tf.stack([reference for _ in range(num_steps)]) # Get difference between sample and reference dif = sample_ - reference_ stepsize = tf.divide(dif, num_steps) # Get multipliers multiplier = tf.divide(tf.stack([tf.ones_like(sample)*i for i in range(num_steps)]), num_steps) interploated_dif = tf.multiply(dif, multiplier) # Get parameters for reshaping _shape = [-1] + [int(s) for s in sample.get_shape()[1:]] perm = [1, 0]+[i for i in range(2,len(sample_.get_shape()))] # Reshape interploated = tf.reshape(reference_ + interploated_dif, shape=_shape) stepsize = tf.reshape(stepsize, shape=_shape) return interploated, stepsize, reference # INPUT: samples: linearly interpolated samples between input samples and references. output of linear_interpolation() # stepsizse: output of linear_interpolation() # _output: output tensor to be explained. It needs to be connected to samples. # OUTPUT: explanations: A list of tensors with explanation values. def build_ig(samples, stepsizes, _output, num_steps=50): grads = tf.gradients(ys=_output, xs=samples) flag = False if not isinstance(samples, list): samples = [samples] stepsizes = [stepsizes] flag=True # Estimate riemann sum output = [] for i in range(len(samples)): s = stepsizes[i] g = grads[i] riemann = tf.multiply(s, g) riemann = tf.reshape(riemann, shape=[num_steps,-1]+[int(s) for s in s.get_shape()[1:]]) explanation = tf.reduce_sum(riemann, axis=0) output.append(explanation) # Return the values. if flag: return output[0] else: return output # -- end code --
e5f8dd86564f6f2ac9a03aeef761b298c102eb92
f0d713996eb095bcdc701f3fab0a8110b8541cbb
/gH3QMvF3czMDjENkk_9.py
19552a338cba87d2d304d1d2bbfb9850243e1af0
[]
no_license
daniel-reich/turbo-robot
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
a7a25c63097674c0a81675eed7e6b763785f1c41
refs/heads/main
2023-03-26T01:55:14.210264
2021-03-23T16:08:01
2021-03-23T16:08:01
350,773,815
0
0
null
null
null
null
UTF-8
Python
false
false
838
py
""" Create a function that takes a list and string. The function should remove the letters in the string from the list, and return the list. ### Examples remove_letters(["s", "t", "r", "i", "n", "g", "w"], "string") ➞ ["w"] remove_letters(["b", "b", "l", "l", "g", "n", "o", "a", "w"], "balloon") ➞ ["b", "g", "w"] remove_letters(["d", "b", "t", "e", "a", "i"], "edabit") ➞ [] ### Notes * If number of times a letter appears in the list is greater than the number of times the letter appears in the string, the extra letters should be left behind (see example #2). * If all the letters in the list are used in the string, the function should return an empty list (see example #3). """ def remove_letters(letters, word): l = letters for i in word: if i in l: l.remove(i) return l
26f18c303e12dd1ea296568f3185d5b1df7582fe
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
/cases/pa3/sample/op_cmp_int-106.py
89011902bfe43fdb6bd7bee90efed2d33564d626
[]
no_license
Virtlink/ccbench-chocopy
c3f7f6af6349aff6503196f727ef89f210a1eac8
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
refs/heads/main
2023-04-07T15:07:12.464038
2022-02-03T15:42:39
2022-02-03T15:42:39
451,969,776
0
0
null
null
null
null
UTF-8
Python
false
false
186
py
x:int = 42 y:int = 7 print(x == y) print(x != y) print(x < y) print(x <= y) print(x > y) print(x >= y) $Var(x == x) print(x != x) print(x < x) print(x <= x) print(x > x) print(x >= x)
cc9411b7251704073d70f510559e49b20473e415
4e30d990963870478ed248567e432795f519e1cc
/tests/models/validators/v3_1_patch_1/jsd_df4fb303a3e5661ba12058f18b225af.py
f31472450dc722d87e16a1a2c2c919e92e4c5463
[ "MIT" ]
permissive
CiscoISE/ciscoisesdk
84074a57bf1042a735e3fc6eb7876555150d2b51
f468c54998ec1ad85435ea28988922f0573bfee8
refs/heads/main
2023-09-04T23:56:32.232035
2023-08-25T17:31:49
2023-08-25T17:31:49
365,359,531
48
9
MIT
2023-08-25T17:31:51
2021-05-07T21:43:52
Python
UTF-8
Python
false
false
8,158
py
# -*- coding: utf-8 -*- """Identity Services Engine getNetworkAccessConditions data model. Copyright (c) 2021 Cisco and/or its affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import absolute_import, division, print_function, unicode_literals import json from builtins import * import fastjsonschema from ciscoisesdk.exceptions import MalformedRequest class JSONSchemaValidatorDf4Fb303A3E5661Ba12058F18B225Af(object): """getNetworkAccessConditions request schema definition.""" def __init__(self): super(JSONSchemaValidatorDf4Fb303A3E5661Ba12058F18B225Af, self).__init__() self._validator = fastjsonschema.compile(json.loads( '''{ "$schema": "http://json-schema.org/draft-04/schema#", "properties": { "response": { "items": { "properties": { "attributeName": { "type": "string" }, "attributeValue": { "type": "string" }, "children": { "items": { "properties": { "conditionType": { "enum": [ "ConditionAndBlock", "ConditionAttributes", "ConditionOrBlock", "ConditionReference", "LibraryConditionAndBlock", "LibraryConditionAttributes", "LibraryConditionOrBlock", "TimeAndDateCondition" ], "type": "string" }, "isNegate": { "type": "boolean" }, "link": { "properties": { "href": { "type": "string" }, "rel": { "enum": [ "next", "previous", "self", "status" ], "type": "string" }, "type": { "type": "string" } }, "type": "object" } }, "type": "object" }, "type": "array" }, "conditionType": { "enum": [ "ConditionAndBlock", "ConditionAttributes", "ConditionOrBlock", "ConditionReference", "LibraryConditionAndBlock", "LibraryConditionAttributes", "LibraryConditionOrBlock", "TimeAndDateCondition" ], "type": "string" }, "datesRange": { "properties": { "endDate": { "type": "string" }, "startDate": { "type": "string" } }, "type": "object" }, "datesRangeException": { "properties": { "endDate": { "type": "string" }, "startDate": { "type": "string" } }, "type": "object" }, "description": { "type": "string" }, "dictionaryName": { "type": "string" }, "dictionaryValue": { "type": "string" }, "hoursRange": { "properties": { "endTime": { "type": "string" }, "startTime": { "type": "string" } }, "type": "object" }, "hoursRangeException": { "properties": { "endTime": { "type": "string" }, "startTime": { "type": "string" } }, "type": "object" }, "id": { "type": "string" }, "isNegate": { "type": "boolean" }, "link": { "properties": { "href": { "type": "string" }, "rel": { "enum": [ "next", "previous", "self", "status" ], "type": "string" }, "type": { "type": "string" } }, "type": "object" }, "name": { "type": "string" }, "operator": { "enum": [ "contains", "endsWith", "equals", "greaterOrEquals", "greaterThan", "in", "ipEquals", "ipGreaterThan", "ipLessThan", "ipNotEquals", "lessOrEquals", "lessThan", "matches", "notContains", "notEndsWith", "notEquals", "notIn", "notStartsWith", "startsWith" ], "type": "string" }, "weekDays": { "items": { "enum": [ "Friday", "Monday", "Saturday", "Sunday", "Thursday", "Tuesday", "Wednesday" ], "type": "string" }, "type": "array" }, "weekDaysException": { "items": { "enum": [ "Friday", "Monday", "Saturday", "Sunday", "Thursday", "Tuesday", "Wednesday" ], "type": "string" }, "type": "array" } }, "type": "object" }, "type": "array" }, "version": { "type": "string" } }, "required": [ "response", "version" ], "type": "object" }'''.replace("\n" + ' ' * 16, '') )) def validate(self, request): try: self._validator(request) except fastjsonschema.exceptions.JsonSchemaException as e: raise MalformedRequest( '{} is invalid. Reason: {}'.format(request, e.message) )
6076c919a7fc64e1832cdfff14fd936313f6f605
3fd7adb56bf78d2a5c71a216d0ac8bc53485b034
/tensorflow_data/position_ctrl_action5r3_rel/conf.py
968527a0c3070d794fdb27d5931531bfada19c90
[]
no_license
anair13/lsdc
6d1675e493f183f467cab0bfe9b79a4f70231e4e
7760636bea24ca0231b4f99e3b5e8290c89b9ff5
refs/heads/master
2021-01-19T08:02:15.613362
2017-05-12T17:13:54
2017-05-12T17:13:54
87,596,344
0
0
null
2017-04-08T00:18:55
2017-04-08T00:18:55
null
UTF-8
Python
false
false
1,872
py
import os current_dir = os.path.dirname(os.path.realpath(__file__)) # tf record data location: DATA_DIR = '/'.join(str.split(current_dir, '/')[:-2]) + '/pushing_data/position_control_a5r3rel/train' # local output directory OUT_DIR = current_dir + '/modeldata' from video_prediction.prediction_model_downsized_lesslayer import construct_model configuration = { 'experiment_name': 'position_rel', 'data_dir': DATA_DIR, # 'directory containing data.' , 'output_dir': OUT_DIR, #'directory for model checkpoints.' , 'current_dir': current_dir, #'directory for writing summary.' , 'num_iterations': 50000, #'number of training iterations.' , 'pretrained_model': '', # 'filepath of a pretrained model to resume training from.' , 'sequence_length': 15, # 'sequence length, including context frames.' , 'skip_frame': 1, # 'use ever i-th frame to increase prediction horizon' , 'context_frames': 2, # of frames before predictions.' , 'use_state': 1, #'Whether or not to give the state+action to the model' , 'model': 'DNA', #'model architecture to use - CDNA, DNA, or STP' , 'num_masks': 1, # 'number of masks, usually 1 for DNA, 10 for CDNA, STN.' , 'schedsamp_k': 900.0, # 'The k hyperparameter for scheduled sampling -1 for no scheduled sampling.' , 'train_val_split': 0.95, #'The percentage of files to use for the training set vs. the validation set.' , 'batch_size': 32, #'batch size for training' , 'learning_rate': 0.001, #'the base learning rate of the generator' , 'visualize': '', #'load model from which to generate visualizations 'downsize': construct_model, #'create downsized model' 'file_visual': '', # datafile used for making visualizations 'penal_last_only': False # penalize only the last state, to get sharper predictions }
75ccc26a4c4472390ed15c91ff1250d21f8742ba
9bb521d515a2401b69df797efed11b04e04401a7
/tests/runtests-herd.py
6b8ab527d581912293ea513b8d1152d11ea11811
[ "BSD-3-Clause" ]
permissive
risent/django-redis
be512f1bf6c51b8e238e2fa8b1eec5073c03916e
46bfd076c197846035e3f31348748d464ace74d0
refs/heads/master
2021-01-14T14:10:59.664982
2015-06-11T20:15:28
2015-06-11T20:15:28
37,450,021
0
0
null
2015-06-15T07:25:20
2015-06-15T07:25:20
null
UTF-8
Python
false
false
311
py
# -*- coding: utf-8 -*- import os, sys sys.path.insert(0, '..') os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_sqlite_herd") if __name__ == "__main__": from django.core.management import execute_from_command_line args = sys.argv args.insert(1, "test") execute_from_command_line(args)
98a5ba2fce68657fdaed702892ee3ed449bf727e
3e862ce90e7f17c1f1c586aad20bda6c4fc6cbd4
/home/management/commands/load_initial_data.py
19443015cfb4110723cc564ebfbfb35c06d46937
[]
no_license
crowdbotics-users/kailashacrowdboticscom-kai-638
621fc891f449a843e0334f4443462f78d1a1d5b6
e3753824bbd240c64eeadde9671438cc77a8dc0b
refs/heads/master
2020-04-09T19:42:14.674638
2018-12-05T17:00:39
2018-12-05T17:00:39
160,551,011
0
0
null
null
null
null
UTF-8
Python
false
false
757
py
from django.core.management import BaseCommand from home.models import CustomText, HomePage def load_initial_data(): homepage_body = """ <h1 class="display-4 text-center">image-to-text-converter-211</h1> <p class="lead"> This is the sample application created and deployed from the crowdbotics slack app. You can view list of packages selected for this application below </p>""" customtext_title = 'image-to-text-converter-211' CustomText.objects.create(title=customtext_title) HomePage.objects.create(body=homepage_body) class Command(BaseCommand): can_import_settings = True help = 'Load initial data to db' def handle(self, *args, **options): load_initial_data()
bc898e40424cb1cafb5b4b23ba444477869ae983
5c1531b47fb4dc4d7e5998d44f7200bf1786b12b
/__UNSORTED/130_surrounded_regions/surrounded_regions_TLE.py
3c923ea0a7709fbedcb124df62b7253ab7f96642
[]
no_license
Web-Dev-Collaborative/Leetcode-JS-PY-MD
d1f560051aad1896a80eccdd4b4fbb389e7033e3
675b94fa5da8d40f0ea79efe6d3ef1393221425f
refs/heads/master
2023-09-01T22:30:32.313793
2021-10-26T02:17:03
2021-10-26T02:17:03
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,019
py
class Solution: # @param {character[][]} board # @return {void} Do not return anything, modify board in-place instead. def solve(self, board): if not board: return lx = len(board) ly = len(board[0]) for x in range(lx): for y in range(ly): if board[x][y] == "O": self.area = [] if self.explore(board, x, y): for xx, yy in self.area: board[xx][yy] = "X" def explore(self, board, x, y): if board[x][y] != "O": return True if x == 0 or x == len(board) - 1 or y == 0 or y == len(board[0]) - 1: return False if (x, y) in self.area: return True self.area.append((x, y)) return ( self.explore(board, x, y + 1) and self.explore(board, x + 1, y) and self.explore(board, x - 1, y) and self.explore(board, x, y - 1) )
8e204756e205394482650c812c5b994b021ff48c
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
/pardus/tags/2007.2/programming/libs/geoip/actions.py
8ea47d4a7aaa461b4099e2a3b64026df8fc2b019
[]
no_license
aligulle1/kuller
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
7f98de19be27d7a517fe19a37c814748f7e18ba6
refs/heads/master
2021-01-20T02:22:09.451356
2013-07-23T17:57:58
2013-07-23T17:57:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
634
py
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2006,2007 TUBITAK/UEKAE # Licensed under the GNU General Public License, version 2. # See the file http://www.gnu.org/copyleft/gpl.txt. from pisi.actionsapi import autotools from pisi.actionsapi import pisitools from pisi.actionsapi import get WorkDir = "GeoIP-%s" % get.srcVERSION() def setup(): autotools.configure("--enable-shared \ --disable-static") def build(): autotools.make() def install(): autotools.rawInstall("DESTDIR=%s" % get.installDIR()) pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "NEWS", "README", "TODO")
564d007f30314f626af2a6f9ebbfe6aa75131c69
1c4a19c0d1953280f79193f30ad8c4759e3aff58
/ansys/dpf/core/operators/math/conjugate.py
ddeae28fea91fca0f6c68e3c561790131c01251f
[ "MIT" ]
permissive
hoangxuyenle/DPF-Core
d02c843b678560f12715ea90dc8c9764b3bffc99
a404dd290c7b3ee75463b2487cafb8bf48468691
refs/heads/master
2023-06-15T15:27:02.597938
2021-06-22T15:19:04
2021-06-22T15:19:04
381,611,135
0
0
MIT
2021-06-30T07:18:30
2021-06-30T07:18:30
null
UTF-8
Python
false
false
4,859
py
""" conjugate ========= """ from ansys.dpf.core.dpf_operator import Operator from ansys.dpf.core.inputs import Input, _Inputs from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type from ansys.dpf.core.operators.specification import PinSpecification, Specification """Operators from Ans.Dpf.Native plugin, from "math" category """ class conjugate(Operator): """Computes element-wise conjugate of field containers containing complex fields. available inputs: - fields_container (FieldsContainer) available outputs: - fields_container (FieldsContainer) Examples -------- >>> from ansys.dpf import core as dpf >>> # Instantiate operator >>> op = dpf.operators.math.conjugate() >>> # Make input connections >>> my_fields_container = dpf.FieldsContainer() >>> op.inputs.fields_container.connect(my_fields_container) >>> # Instantiate operator and connect inputs in one line >>> op = dpf.operators.math.conjugate(fields_container=my_fields_container) >>> # Get output data >>> result_fields_container = op.outputs.fields_container()""" def __init__(self, fields_container=None, config=None, server=None): super().__init__(name="conjugate", config = config, server = server) self._inputs = InputsConjugate(self) self._outputs = OutputsConjugate(self) if fields_container !=None: self.inputs.fields_container.connect(fields_container) @staticmethod def _spec(): spec = Specification(description="""Computes element-wise conjugate of field containers containing complex fields.""", map_input_pin_spec={ 0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")}, map_output_pin_spec={ 0 : PinSpecification(name = "fields_container", type_names=["fields_container"], optional=False, document="""""")}) return spec @staticmethod def default_config(): return Operator.default_config(name = "conjugate") @property def inputs(self): """Enables to connect inputs to the operator Returns -------- inputs : InputsConjugate """ return super().inputs @property def outputs(self): """Enables to get outputs of the operator by evaluationg it Returns -------- outputs : OutputsConjugate """ return super().outputs #internal name: conjugate #scripting name: conjugate class InputsConjugate(_Inputs): """Intermediate class used to connect user inputs to conjugate operator Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.math.conjugate() >>> my_fields_container = dpf.FieldsContainer() >>> op.inputs.fields_container.connect(my_fields_container) """ def __init__(self, op: Operator): super().__init__(conjugate._spec().inputs, op) self._fields_container = Input(conjugate._spec().input_pin(0), 0, op, -1) self._inputs.append(self._fields_container) @property def fields_container(self): """Allows to connect fields_container input to the operator Parameters ---------- my_fields_container : FieldsContainer, Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.math.conjugate() >>> op.inputs.fields_container.connect(my_fields_container) >>> #or >>> op.inputs.fields_container(my_fields_container) """ return self._fields_container class OutputsConjugate(_Outputs): """Intermediate class used to get outputs from conjugate operator Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.math.conjugate() >>> # Connect inputs : op.inputs. ... >>> result_fields_container = op.outputs.fields_container() """ def __init__(self, op: Operator): super().__init__(conjugate._spec().outputs, op) self._fields_container = Output(conjugate._spec().output_pin(0), 0, op) self._outputs.append(self._fields_container) @property def fields_container(self): """Allows to get fields_container output of the operator Returns ---------- my_fields_container : FieldsContainer, Examples -------- >>> from ansys.dpf import core as dpf >>> op = dpf.operators.math.conjugate() >>> # Connect inputs : op.inputs. ... >>> result_fields_container = op.outputs.fields_container() """ return self._fields_container
76223c165e5e9ac07147392a1c676096c926a704
9f1039075cc611198a988034429afed6ec6d7408
/tensorflow-stubs/python/debug/cli/ui_factory.pyi
b43ca70100629b7c956effd95df1bc66726070c7
[]
no_license
matangover/tensorflow-stubs
9422fbb1cb3a3638958d621461291c315f9c6ec2
664bd995ef24f05ba2b3867d979d23ee845cb652
refs/heads/master
2020-05-23T12:03:40.996675
2019-05-15T06:21:43
2019-05-15T06:21:43
186,748,093
0
0
null
null
null
null
UTF-8
Python
false
false
352
pyi
# Stubs for tensorflow.python.debug.cli.ui_factory (Python 3) # # NOTE: This dynamically typed stub was automatically generated by stubgen. from typing import Any as Any, Optional as Optional SUPPORTED_UI_TYPES: Any def get_ui(ui_type: Any, on_ui_exit: Optional[Any] = ..., available_ui_types: Optional[Any] = ..., config: Optional[Any] = ...): ...
3f45a0ead1cf4666a92a4e6dc6450c3ac923cd4a
e5efada3529d94875455c4230c8dabe27fb72a89
/apps/api/migrations/0015_auto_20230210_1801.py
aa364ba5d0abda9b62dcdf65fd85023772bbf6fc
[]
no_license
alexmon1989/uma
d8c321fb0ec9b1a9039b1c83aeaaff774f657416
5dea579d634eeb1c8103c21157299b33ca5590f0
refs/heads/master
2023-08-03T04:31:13.598577
2023-07-22T18:17:13
2023-07-22T18:17:13
154,835,498
0
0
null
2023-03-02T11:20:54
2018-10-26T13:02:12
Nunjucks
UTF-8
Python
false
false
411
py
# Generated by Django 3.2.12 on 2023-02-10 18:01 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('api', '0014_auto_20211213_1900'), ] operations = [ migrations.AddField( model_name='opendata', name='files_path', field=models.CharField(blank=True, max_length=500, null=True), ), ]
a220aea2b5c78023a22076d9c19a6dd6523da5d2
30d1902232eb9ddb84fdf5404a3a1dfd6232406a
/wxpython/project/panels/WorkExperience.py
17bf63ac737356239469250d54b41bd0999928ea
[]
no_license
sxnys/mypython
c3a768b054077ed97ff1e2fac31cb93f0765deb3
de48cd883ad2de3320cb0c6b46b451ebb2311ac7
refs/heads/master
2022-11-07T15:11:48.936412
2019-04-14T12:04:30
2019-04-14T12:04:30
119,686,106
0
1
null
2022-10-31T05:13:00
2018-01-31T12:46:06
Python
UTF-8
Python
false
false
791
py
# -*- coding: utf-8 __author__ = 'Sxn' __date__ = '2017/5/22 19:09' from . import StudyExperience from extra import JsonIO class TabPanel(StudyExperience.TabPanel): def __init__(self, parent): StudyExperience.TabPanel.__init__(self, parent, tabName=u'工作经历', instructText=u'含学术兼职情况', numLimit=10, editInfo=[u'起止年月', u'工作单位', u'职务/职称'], colSize=[250, 250, 250], childOrder=2) def addToJsonDict(self): JsonIO.working_exp = [] for i in xrange(self.gridRow): tmp = {} tmp['start_end_date'] = self.infoGrid.GetCellValue(i, 0) tmp['working_dep'] = self.infoGrid.GetCellValue(i, 1) tmp['job'] = self.infoGrid.GetCellValue(i, 2) JsonIO.working_exp.append(tmp)
5e65254844f16b658ad6828501d1c3536c170e7f
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/63/usersdata/230/28042/submittedfiles/swamee.py
88094a850a0912518cfb951ac45f0e9faca901c7
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
493
py
# -*- coding: utf-8 -*- import math #COMECE SEU CÓDIGO AQUI f = float(input('Digite o valor de f: ')) L = float(input('Digite o valor de L: ')) Q = float(input('Digite o valor de Q: ')) DeltaH = float(input('Digite o valor de DeltaH: ')) v = float(input('Digite o valor de v: ')) g = 9.81 E = 0.000002 D = ((8*f*L*(Q**2))/((math.pi**2)*g*DeltaH))**0.2 Rey = (4*Q)/(math.pi*D*v) k = (0.25)/(math.log10((E/3.7*D))+(5.74/(Rey**0.9)))**2 print ('%.4f' % D) print ('%.4f' % Rey) print ('%.4f' % k)
2d1ff66d90a2adb3e0779f18b5a50d2212b45545
13f5984be7be77852e4de29ab98d5494a7fc6767
/LeetCode/binary_serach_tree.py
ac894fb9c1a5994df4054cf4407beae85859a72b
[]
no_license
YuanXianguo/Python-Interview-Master
4252514763fc3f563d9b94e751aa873de1719f91
2f73786e8c51dbd248341559de171e18f67f9bf2
refs/heads/master
2020-11-26T18:14:50.190812
2019-12-20T02:18:03
2019-12-20T02:18:03
229,169,825
0
0
null
null
null
null
UTF-8
Python
false
false
3,513
py
from tree import Tree class Node(object): """结点""" def __init__(self, val=None): self.val = val self.left = None self.right = None class BinarySearchTree(Tree): """二叉搜索树""" def __init__(self, node=None): super().__init__(node) def insert(self, val): """二叉搜索树插入""" node = Node(val) if not self.root: self.root = node else: if val < self.root.val: self.root.left = BinarySearchTree( self.root.left).insert(val) elif val > self.root.val: self.root.right = BinarySearchTree( self.root.right).insert(val) return self.root def find(self, val): """递归查找值""" if not self.root: return "查找失败" if val < self.root.val: return BinarySearchTree(self.root.left).find(val) elif val > self.root.val: return BinarySearchTree(self.root.right).find(val) else: # 找到了 return self.root def find2(self, val): """非递归查找""" root = self.root while root: if val < root.val: root = root.left elif val > root.val: root = root.right else: return root return "查找失败" def find_min(self): """递归查找最小值,一定是在树的最左分支的端结点上""" if not self.root: return "查找失败" if not self.root.left: return self.root # 最小值没有左子树 else: return BinarySearchTree(self.root.left).find_min() def find_max(self): """迭代查找最大值,一定是在树的最右分支的端结点上""" root = self.root if not root: return "查找失败" while root.right: root = root.right return root def delete(self, val): """每次递归删除都把删除后的子树返回""" if not self.root: return "删除失败" elif val < self.root.val: self.root.left = BinarySearchTree( self.root.left).delete(val) elif val > self.root.val: self.root.right = BinarySearchTree( self.root.right).delete(val) else: # 该结点为要删除结点 # 如果左右子树都不为空 if self.root.left and self.root.right: # 找到右子树最小值或左子树最大值 right_min = BinarySearchTree(self.root.right).find_min() # 将找到的右子树最小值填充要删除的根结点 self.root.val = right_min.val # 删除右子树最小值 self.root.right = BinarySearchTree( self.root.right).delete(right_min) else: # 被删除结点有一个或无子树 if not self.root.left: self.root = self.root.right elif not self.root.right: self.root = self.root.left return self.root if __name__ == '__main__': bt = BinarySearchTree() for i in range(10): bt.insert(i) print(bt.find_min().val) print(bt.find_max().val) print(bt.find(10)) bt.postorder() print("") bt.delete(9) print(bt.find_max().val) bt.inorder()
05f2f300257d5ca6375765b26379c1ae5bcd4984
3ec9d3aa7e59475683dba30a87ca68242a7ec181
/cn/edgedetection/03Sample.py
843592f93fc567466fb142220d9454d1c28724ac
[ "Apache-2.0" ]
permissive
Jasonandy/Python-X
58bf36499572cdfb7d7bf80c6a3cd0c818f62c1e
2f02b9a17bd5495dd1f8746b191f11ec2d7bccbe
refs/heads/master
2021-06-16T17:07:29.277404
2021-03-07T14:17:05
2021-03-07T14:17:05
175,353,402
0
2
null
null
null
null
UTF-8
Python
false
false
2,419
py
""" 边缘检测示列 https://blog.csdn.net/HuangZhang_123/article/details/80511270 """ import cv2 import numpy as np def show_image(image_path): """ show_image 展示 :param image_path: :return: """ img = cv2.pyrDown(cv2.imread(image_path, cv2.IMREAD_UNCHANGED)) # threshold 函数对图像进行二化值处理,由于处理后图像对原图像有所变化,因此img.copy()生成新的图像,cv2.THRESH_BINARY是二化值 ret, thresh = cv2.threshold(cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY), 127, 255, cv2.THRESH_BINARY) # findContours函数查找图像里的图形轮廓 # 函数参数thresh是图像对象 # 层次类型,参数cv2.RETR_EXTERNAL是获取最外层轮廓,cv2.RETR_TREE是获取轮廓的整体结构 # 轮廓逼近方法 # 输出的返回值,image是原图像、contours是图像的轮廓、hier是层次类型 contours, hier = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in contours: # 轮廓绘制方法一 # boundingRect函数计算边框值,x,y是坐标值,w,h是矩形的宽和高 x, y, w, h = cv2.boundingRect(c) # 在img图像画出矩形,(x, y), (x + w, y + h)是矩形坐标,(0, 255, 0)设置通道颜色,2是设置线条粗度 cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2) # 轮廓绘制方法二 # 查找最小区域 rect = cv2.minAreaRect(c) # 计算最小面积矩形的坐标 box = cv2.boxPoints(rect) # 将坐标规范化为整数 box = np.int0(box) # 绘制矩形 cv2.drawContours(img, [box], 0, (0, 0, 255), 3) # 轮廓绘制方法三 # 圆心坐标和半径的计算 (x, y), radius = cv2.minEnclosingCircle(c) # 规范化为整数 center = (int(x), int(y)) radius = int(radius) # 勾画圆形区域 img = cv2.circle(img, center, radius, (0, 255, 0), 2) # # 轮廓绘制方法四 # 围绕图形勾画蓝色线条 cv2.drawContours(img, contours, -1, (255, 0, 0), 2) # 显示图像 cv2.imshow("contours", img) cv2.waitKey() cv2.destroyAllWindows() def run(): # image_path = "media/13.jpg" # image_path = "media/lena/lena.jpg" image_path = "media/sample/sample.png" show_image(image_path) if __name__ == '__main__': run()
13e9b3fafe2f0f5e0947fec71bd1d9c4f1fd6730
2a171178942a19afe9891c2425dce208ae04348b
/kubernetes/client/models/v1_job_list.py
4c27f86f781c5f929aaeab8ca1386fdec70302fc
[ "Apache-2.0" ]
permissive
ouccema/client-python
ac3f1dee1c5ad8d82f15aeecb87a2f5f219ca4f4
d7f33ec53e302e66674df581904a3c5b1fcf3945
refs/heads/master
2021-01-12T03:17:54.274888
2017-01-03T22:13:14
2017-01-03T22:13:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,395
py
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.5.0-snapshot Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class V1JobList(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, items=None, metadata=None): """ V1JobList - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'items': 'list[V1Job]', 'metadata': 'UnversionedListMeta' } self.attribute_map = { 'items': 'items', 'metadata': 'metadata' } self._items = items self._metadata = metadata @property def items(self): """ Gets the items of this V1JobList. Items is the list of Job. :return: The items of this V1JobList. :rtype: list[V1Job] """ return self._items @items.setter def items(self, items): """ Sets the items of this V1JobList. Items is the list of Job. :param items: The items of this V1JobList. :type: list[V1Job] """ if items is None: raise ValueError("Invalid value for `items`, must not be `None`") self._items = items @property def metadata(self): """ Gets the metadata of this V1JobList. Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata :return: The metadata of this V1JobList. :rtype: UnversionedListMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """ Sets the metadata of this V1JobList. Standard list metadata More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata :param metadata: The metadata of this V1JobList. :type: UnversionedListMeta """ self._metadata = metadata def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
51e410a7583b82d254106376c125b43aa5f99007
ed7e61c8eef7fb2213adeb67557d605470c17fb3
/ML/confusion-matrix/split_two.py
b3bc65d93e39225a414ead9d46ec4d8d6b6fd697
[]
no_license
MartinThoma/algorithms
535840224323822f2ea6b7dd6f82a0fdd22a0ff9
a251e9599b685dbf89c891f02d20fefd8538ead5
refs/heads/master
2023-02-23T17:58:10.913634
2023-02-21T05:58:59
2023-02-21T05:58:59
4,939,076
241
126
null
2023-02-16T05:16:23
2012-07-07T16:07:23
Python
UTF-8
Python
false
false
6,693
py
#!/usr/bin/env python """Split the classes into two equal-sized groups to maximize accuracy.""" import json import os import random import numpy as np random.seed(0) import logging import sys from visualize import apply_permutation, plot_cm, read_symbols, swap, swap_1d logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG, stream=sys.stdout) def calculate_split_accuracy(cm): """ Calculate the accuracy of the adjusted classifier. The adjusted classifier is built by joining the first n/2 classes into one group and the rest into another group. """ n = len(cm) first = int(n / 2) cm_small = np.zeros((2, 2)) for i in range(n): class_i = int(i < first) for j in range(n): class_j = int(j < first) cm_small[class_i][class_j] += cm[i][j] return (float(cm_small[0][0] + cm_small[1][1]) / cm_small.sum()) def calculate_split_error(cm): """Calculate the error of 2 group split.""" return 1.0 - calculate_split_accuracy(cm) def simulated_annealing(current_cm, current_perm=None, score=calculate_split_error, steps=2 * 10**5, temp=100.0, cooling_factor=0.99, deterministic=False): """ Optimize current_cm by randomly swapping elements. Parameters ---------- current_cm : numpy array current_perm : None or iterable, optional (default: None) steps : int, optional (default: 2 * 10**4) temp : float > 0.0, optional (default: 100.0) Temperature cooling_factor: float in (0, 1), optional (default: 0.99) """ assert temp > 0 assert cooling_factor > 0 assert cooling_factor < 1 n = len(current_cm) if current_perm is None: current_perm = list(range(n)) current_perm = np.array(current_perm) # Debugging code perm_exp = np.zeros((n, n), dtype=np.int) for i in range(n): for j in range(n): perm_exp[i][j] = j current_cm = apply_permutation(current_cm, current_perm) perm_exp_current = apply_permutation(perm_exp, current_perm) logging.debug(perm_exp_current[0]) print("apply permutation %s" % str(current_perm)) current_score = score(current_cm) best_perm = current_perm best_cm = current_cm best_score = current_score print("## Starting Score: {:0.2f}%".format(current_score * 100)) for step in range(steps): tmp = np.array(current_cm, copy=True) split_part = int(n / 2) - 1 i = random.randint(0, split_part) j = random.randint(split_part + 1, n - 1) perm = swap_1d(current_perm.copy(), i, j) tmp = swap(tmp, i, j) # tmp = apply_permutation(tmp, perm) tmp_score = score(tmp) if deterministic: chance = 1.0 else: chance = random.random() temp *= 0.99 hot_prob = min(1, np.exp(-(tmp_score - current_score) / temp)) if chance <= hot_prob: if best_score > tmp_score: # Minimize the score best_perm = perm best_cm = tmp best_score = tmp_score current_score = tmp_score perm_exp_current = swap(perm_exp_current, i, j) print(list(perm_exp_current[0])) current_cm = tmp logging.info(("Current: %0.2f%% (best: %0.2f%%, hot_prob=%0.2f%%, " "step=%i)"), (current_score * 100), (best_score * 100), (hot_prob * 100), step) return {'cm': best_cm, 'perm': list(perm_exp_current[0])} def main(cm_file, perm_file, steps, labels_file): """Orchestrate.""" # Load confusion matrix with open(cm_file) as f: cm = json.load(f) cm = np.array(cm) # Load permutation if os.path.isfile(perm_file): print("loaded %s" % perm_file) with open(perm_file) as data_file: perm = json.load(data_file) else: perm = random.shuffle(list(range(len(cm)))) print("Score without perm: {:0.2f}%".format(calculate_split_error(cm) * 100)) result = simulated_annealing(cm, perm, score=calculate_split_error, deterministic=True, steps=steps) # First recursive step # split_i = int(len(cm) / 2) # cm = result['cm'][:split_i, :split_i] # perm = list(range(split_i)) # result = simulated_annealing(cm, perm, # score=calculate_split_error, # deterministic=True, # steps=steps) print("Score: {}".format(calculate_split_error(result['cm']))) print("Perm: {}".format(list(result['perm']))) # Load labels if os.path.isfile(labels_file): with open(labels_file) as f: symbols = json.load(f) else: symbols = read_symbols() print("Symbols: {}".format([symbols[i] for i in result['perm']])) plot_cm(result['cm'], zero_diagonal=True) def get_parser(): """Get parser object for script xy.py.""" from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser parser = ArgumentParser(description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--cm", dest="cm_file", help=("path of a json file with a confusion matrix"), metavar="cm.json", default='confusion-matrix.json') parser.add_argument("--perm", dest="perm_file", help=("path of a json file with a permutation to " "start with"), metavar="perm.json", default="") parser.add_argument("--labels", dest="labels_file", help=("path of a json file with a list of label " "names"), metavar="labels.json", default="") parser.add_argument("-n", dest="n", default=4 * 10**5, type=int, help="number of steps to iterate") return parser if __name__ == "__main__": args = get_parser().parse_args() main(args.cm_file, args.perm_file, args.n, args.labels_file)
f5435f602b8973519150389a75dd7328fe65e570
c0f808504dd3d7fd27c39f1503fbc14c1d37bf9f
/sources/scipy-scipy-414c1ab/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py
6f7cd7acdb421fa1497d93d5b68da26ef2943b61
[]
no_license
georgiee/lip-sync-lpc
7662102d4715e4985c693b316a02d11026ffb117
e931cc14fe4e741edabd12471713bf84d53a4250
refs/heads/master
2018-09-16T08:47:26.368491
2018-06-05T17:01:08
2018-06-05T17:01:08
5,779,592
17
4
null
null
null
null
UTF-8
Python
false
false
6,310
py
#!/usr/bin/env python # Created by: Robert Cimrman, 05.12.2005 """Benchamrks for umfpack module""" from optparse import OptionParser import time import urllib import gzip import numpy as np import scipy.sparse as sp import scipy.sparse.linalg.dsolve.umfpack as um import scipy.linalg as nla defaultURL = 'http://www.cise.ufl.edu/research/sparse/HBformat/' usage = """%%prog [options] <matrix file name> [<matrix file name>, ...] <matrix file name> can be a local or distant (gzipped) file default url is: %s supported formats are: triplet .. [nRow, nCol, nItem] followed by 'nItem' * [ir, ic, value] hb .. Harwell-Boeing format N/A """ % defaultURL ## # 05.12.2005, c def read_triplet( fd ): nRow, nCol = map( int, fd.readline().split() ) nItem = int( fd.readline() ) ij = np.zeros( (nItem,2), np.int32 ) val = np.zeros( (nItem,), np.float64 ) for ii, row in enumerate( fd.readlines() ): aux = row.split() ij[ii] = int( aux[0] ), int( aux[1] ) val[ii] = float( aux[2] ) mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem ) return mtx ## # 06.12.2005, c def read_triplet2( fd ): nRow, nCol = map( int, fd.readline().split() ) nItem = int( fd.readline() ) ij, val = io.read_array( fd, columns = [(0,1), (2,)], atype = (np.int32, np.float64), rowsize = nItem ) mtx = sp.csc_matrix( (val, ij), dims = (nRow, nCol), nzmax = nItem ) return mtx formatMap = {'triplet' : read_triplet} ## # 05.12.2005, c def readMatrix( matrixName, options ): if options.default_url: matrixName = defaultURL + matrixName print 'url:', matrixName if matrixName[:7] == 'http://': fileName, status = urllib.urlretrieve( matrixName ) ## print status else: fileName = matrixName print 'file:', fileName try: readMatrix = formatMap[options.format] except: raise ValueError('unsupported format: %s' % options.format) print 'format:', options.format print 'reading...' if fileName.endswith('.gz'): fd = gzip.open( fileName ) else: fd = open( fileName ) mtx = readMatrix( fd ) fd.close() print 'ok' return mtx ## # 05.12.2005, c def main(): parser = OptionParser( usage = usage ) parser.add_option( "-c", "--compare", action = "store_true", dest = "compare", default = False, help = "compare with default scipy.sparse solver [default: %default]" ) parser.add_option( "-p", "--plot", action = "store_true", dest = "plot", default = False, help = "plot time statistics [default: %default]" ) parser.add_option( "-d", "--default-url", action = "store_true", dest = "default_url", default = False, help = "use default url [default: %default]" ) parser.add_option( "-f", "--format", type = type( '' ), dest = "format", default = 'triplet', help = "matrix format [default: %default]" ) (options, args) = parser.parse_args() if (len( args ) >= 1): matrixNames = args; else: parser.print_help(), return sizes, nnzs, times, errors = [], [], [], [] legends = ['umfpack', 'sparse.solve'] for ii, matrixName in enumerate( matrixNames ): print '*' * 50 mtx = readMatrix( matrixName, options ) sizes.append( mtx.shape ) nnzs.append( mtx.nnz ) tts = np.zeros( (2,), dtype = np.double ) times.append( tts ) err = np.zeros( (2,2), dtype = np.double ) errors.append( err ) print 'size : %s (%d nnz)' % (mtx.shape, mtx.nnz) sol0 = np.ones( (mtx.shape[0],), dtype = np.double ) rhs = mtx * sol0 umfpack = um.UmfpackContext() tt = time.clock() sol = umfpack( um.UMFPACK_A, mtx, rhs, autoTranspose = True ) tts[0] = time.clock() - tt print "umfpack : %.2f s" % tts[0] error = mtx * sol - rhs err[0,0] = nla.norm( error ) print '||Ax-b|| :', err[0,0] error = sol0 - sol err[0,1] = nla.norm( error ) print '||x - x_{exact}|| :', err[0,1] if options.compare: tt = time.clock() sol = sp.solve( mtx, rhs ) tts[1] = time.clock() - tt print "sparse.solve : %.2f s" % tts[1] error = mtx * sol - rhs err[1,0] = nla.norm( error ) print '||Ax-b|| :', err[1,0] error = sol0 - sol err[1,1] = nla.norm( error ) print '||x - x_{exact}|| :', err[1,1] if options.plot: try: import pylab except ImportError: raise ImportError("could not import pylab") times = np.array( times ) print times pylab.plot( times[:,0], 'b-o' ) if options.compare: pylab.plot( times[:,1], 'r-s' ) else: del legends[1] print legends ax = pylab.axis() y2 = 0.5 * (ax[3] - ax[2]) xrng = range( len( nnzs ) ) for ii in xrng: yy = y2 + 0.4 * (ax[3] - ax[2])\ * np.sin( ii * 2 * np.pi / (len( xrng ) - 1) ) if options.compare: pylab.text( ii+0.02, yy, '%s\n%.2e err_umf\n%.2e err_sp' % (sizes[ii], np.sum( errors[ii][0,:] ), np.sum( errors[ii][1,:] )) ) else: pylab.text( ii+0.02, yy, '%s\n%.2e err_umf' % (sizes[ii], np.sum( errors[ii][0,:] )) ) pylab.plot( [ii, ii], [ax[2], ax[3]], 'k:' ) pylab.xticks( xrng, ['%d' % (nnzs[ii] ) for ii in xrng] ) pylab.xlabel( 'nnz' ) pylab.ylabel( 'time [s]' ) pylab.legend( legends ) pylab.axis( [ax[0] - 0.05, ax[1] + 1, ax[2], ax[3]] ) pylab.show() if __name__ == '__main__': main()
fc25356354bc680cf49d82450ed1864df13bc7cb
18ccaa1160f49f0d91f1d9dc376f860aed8a9c2a
/tracpro/groups/tests/test_middleware.py
170e207ec77587705841395a880fa174d72a1d05
[ "BSD-3-Clause" ]
permissive
caktus/tracpro
bb6033b170b7a77cf9ac76b1be2779b71afa80e0
368f43e666d3c718843dffe934ba35ca859ebaf7
refs/heads/develop
2020-12-24T22:06:21.341755
2016-01-22T13:16:29
2016-01-22T13:16:29
50,186,576
0
0
null
2016-01-22T14:38:29
2016-01-22T14:38:28
null
UTF-8
Python
false
false
8,083
py
from django.contrib.auth.models import AnonymousUser from django.test import RequestFactory from tracpro.test import factories from tracpro.test.cases import TracProTest from ..middleware import UserRegionsMiddleware from ..models import Region class TestUserRegionsMiddleware(TracProTest): def setUp(self): super(TestUserRegionsMiddleware, self).setUp() self.middleware = UserRegionsMiddleware() self.org = factories.Org() self.user = factories.User() def get_request(self, **kwargs): request_kwargs = {'HTTP_HOST': "{}.testserver".format(self.org.subdomain)} request = RequestFactory().get("/", **request_kwargs) for key, value in kwargs.items(): setattr(request, key, value) return request def make_regions(self): """Create a collection of nested regions.""" self.region_uganda = factories.Region( org=self.org, name="Uganda") self.region_kampala = factories.Region( org=self.org, name="Kampala", parent=self.region_uganda) self.region_makerere = factories.Region( org=self.org, name="Makerere", parent=self.region_kampala) self.region_entebbe = factories.Region( org=self.org, name="Entebbe", parent=self.region_uganda) self.region_kenya = factories.Region( org=self.org, name="Kenya") self.region_nairobi = factories.Region( org=self.org, name="Nairobi", parent=self.region_kenya) self.region_mombasa = factories.Region( org=self.org, name="Mombasa", parent=self.region_kenya) self.region_inactive = factories.Region( org=self.org, name="Inactive", parent=self.region_nairobi, is_active=False) return Region.get_all(self.org) def test_variables_set(self): """Middleware should set several commonly-used region variables.""" request = self.get_request(user=self.user, org=self.org, session={}) self.middleware.process_request(request) self.assertTrue(hasattr(request, 'region')) self.assertTrue(hasattr(request, 'include_subregions')) self.assertTrue(hasattr(request, 'user_regions')) self.assertTrue(hasattr(request, 'data_regions')) def test_user_regions__unauthenticated(self): """User regions should be set to null for unauthenticated users.""" request = self.get_request(user=AnonymousUser(), org=self.org) self.middleware.set_user_regions(request) self.assertIsNone(request.user_regions) def test_user_regions__no_org(self): """User regions should be set to null for non-org views.""" request = self.get_request(user=self.user, org=None) self.middleware.set_user_regions(request) self.assertIsNone(request.user_regions) def test_user_regions(self): """User regions should be set to the value of get_all_regions.""" self.make_regions() self.region_kenya.users.add(self.user) request = self.get_request(user=self.user, org=self.org) self.middleware.set_user_regions(request) self.assertEqual( set(request.user_regions), set([self.region_kenya, self.region_nairobi, self.region_mombasa])) def test_include_subregions__default(self): """If key is not in the session, should default to True.""" request = self.get_request(session={}) self.middleware.set_include_subregions(request) self.assertTrue(request.include_subregions) def test_include_subregions__yes(self): """include_subregions should be retrieved from the session.""" request = self.get_request(session={'include_subregions': True}) self.middleware.set_include_subregions(request) self.assertTrue(request.include_subregions) def test_include_subregions__no(self): """include_subregions should be retrieved from the session.""" request = self.get_request(session={'include_subregions': False}) self.middleware.set_include_subregions(request) self.assertFalse(request.include_subregions) def test_data_regions__no_region(self): """If there is no current region, data_regions should be None.""" request = self.get_request(user=self.user, region=None) self.middleware.set_data_regions(request) self.assertIsNone(request.data_regions) def test_data_regions__include_subregions(self): """Include all subregions user has access to if include_subregions is True.""" self.make_regions() user_regions = Region.objects.filter(pk__in=( self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk)) request = self.get_request( user=self.user, region=self.region_kenya, include_subregions=True, user_regions=user_regions) self.middleware.set_data_regions(request) self.assertEqual( set(request.data_regions), set([self.region_kenya, self.region_nairobi])) def test_data_regions__exclude_subregions(self): """Include only the current region if include_subregions is False.""" self.make_regions() user_regions = Region.objects.filter(pk__in=( self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk)) request = self.get_request( user=self.user, region=self.region_kenya, include_subregions=False, user_regions=user_regions) self.middleware.set_data_regions(request) self.assertEqual( set(request.data_regions), set([self.region_kenya])) def test_region__unauthenticated(self): """Current region should be None for an unauthenticated user.""" request = self.get_request(user=AnonymousUser(), org=self.org) self.middleware.set_region(request) self.assertIsNone(request.region) def test_region__no_org(self): """Current region should be None if there is no current org.""" request = self.get_request(user=self.user, org=None) self.middleware.set_region(request) self.assertIsNone(request.region) def test_region__not_set__admin(self): """If region_id is not in the session, admin will see All Regions.""" self.make_regions() self.org.administrators.add(self.user) user_regions = Region.objects.filter(pk__in=( self.region_uganda.pk, self.region_kenya.pk, self.region_nairobi.pk)) request = self.get_request( user=self.user, org=self.org, session={}, user_regions=user_regions) self.middleware.set_region(request) self.assertIsNone(request.region) def test_region__not_set(self): """If region_id is not in the session, user will see first of their regions.""" self.make_regions() user_regions = Region.objects.filter(pk=self.region_kenya.pk) request = self.get_request( user=self.user, org=self.org, session={}, user_regions=user_regions) self.middleware.set_region(request) self.assertEqual(request.region, self.region_kenya) def test_region__not_in_user_regions(self): """If region is not in user regions, return the first of the user's regions.""" self.make_regions() user_regions = Region.objects.filter(pk=self.region_kenya.pk) request = self.get_request( user=self.user, org=self.org, session={'region_id': self.region_nairobi.pk}, user_regions=user_regions) self.middleware.set_region(request) self.assertEqual(request.region, self.region_kenya) def test_region(self): self.make_regions() user_regions = Region.objects.filter(pk=self.region_kenya.pk) request = self.get_request( user=self.user, org=self.org, session={'region_id': self.region_kenya.pk}, user_regions=user_regions) self.middleware.set_region(request) self.assertEqual(request.region, self.region_kenya)
7e60a4df9930178e0ae0a8e732141a2219d3acd4
a0cbbc57dd1b583ab66ce37ad8c6970e74a600ba
/raylab/policy/modules/model/stochastic/single.py
0aa460ac8354ed4246fca21f0ef0ac8245a399ee
[ "MIT" ]
permissive
GapData/raylab
ccf6c39ea20d5568561207d92a4b9097657fb909
c5e862334dc1f29a09b42286ddcc40e72c6eb3a2
refs/heads/master
2022-12-19T07:09:45.799180
2020-09-29T17:09:54
2020-09-29T17:09:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,157
py
"""NN modules for stochastic dynamics estimation.""" from dataclasses import dataclass from typing import List from typing import Tuple import torch import torch.nn as nn from gym.spaces import Box from torch import Tensor import raylab.torch.nn as nnx import raylab.torch.nn.distributions as ptd from raylab.policy.modules.networks.mlp import StateActionMLP from raylab.utils.types import TensorDict SampleLogp = Tuple[Tensor, Tensor] class StochasticModel(nn.Module): """Represents a stochastic model as a conditional distribution module.""" def __init__( self, params_module: nn.Module, dist_module: ptd.ConditionalDistribution ): super().__init__() self.params = params_module self.dist = dist_module def forward(self, obs, action) -> TensorDict: # pylint:disable=arguments-differ return self.params(obs, action) @torch.jit.export def sample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp: """ Generates a sample_shape shaped sample or sample_shape shaped batch of samples if the distribution parameters are batched. Returns a (sample, log_prob) pair. """ return self.dist.sample(params, sample_shape) @torch.jit.export def rsample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp: """ Generates a sample_shape shaped reparameterized sample or sample_shape shaped batch of reparameterized samples if the distribution parameters are batched. Returns a (rsample, log_prob) pair. """ return self.dist.rsample(params, sample_shape) @torch.jit.export def log_prob(self, next_obs: Tensor, params: TensorDict) -> Tensor: """ Returns the log probability density/mass function evaluated at `next_obs`. """ return self.dist.log_prob(next_obs, params) @torch.jit.export def cdf(self, next_obs: Tensor, params: TensorDict) -> Tensor: """Returns the cumulative density/mass function evaluated at `next_obs`.""" return self.dist.cdf(next_obs, params) @torch.jit.export def icdf(self, prob, params: TensorDict) -> Tensor: """Returns the inverse cumulative density/mass function evaluated at `prob`.""" return self.dist.icdf(prob, params) @torch.jit.export def entropy(self, params: TensorDict) -> Tensor: """Returns entropy of distribution.""" return self.dist.entropy(params) @torch.jit.export def perplexity(self, params: TensorDict) -> Tensor: """Returns perplexity of distribution.""" return self.dist.perplexity(params) @torch.jit.export def reproduce(self, next_obs, params: TensorDict) -> SampleLogp: """Produce a reparametrized sample with the same value as `next_obs`.""" return self.dist.reproduce(next_obs, params) @torch.jit.export def deterministic(self, params: TensorDict) -> SampleLogp: """ Generates a deterministic sample or batch of samples if the distribution parameters are batched. Returns a (rsample, log_prob) pair. """ return self.dist.deterministic(params) class ResidualMixin: """Overrides StochasticModel interface to model state transition residuals.""" # pylint:disable=missing-function-docstring,not-callable def forward(self, obs: Tensor, action: Tensor) -> TensorDict: params = self.params(obs, action) params["obs"] = obs return params @torch.jit.export def sample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp: res, log_prob = self.dist.sample(params, sample_shape) return params["obs"] + res, log_prob @torch.jit.export def rsample(self, params: TensorDict, sample_shape: List[int] = ()) -> SampleLogp: res, log_prob = self.dist.rsample(params, sample_shape) return params["obs"] + res, log_prob @torch.jit.export def log_prob(self, next_obs: Tensor, params: TensorDict) -> Tensor: return self.dist.log_prob(next_obs - params["obs"], params) @torch.jit.export def cdf(self, next_obs: Tensor, params: TensorDict) -> Tensor: return self.dist.cdf(next_obs - params["obs"], params) @torch.jit.export def icdf(self, prob, params: TensorDict) -> Tensor: residual = self.dist.icdf(prob, params) return params["obs"] + residual @torch.jit.export def reproduce(self, next_obs, params: TensorDict) -> SampleLogp: sample_, log_prob_ = self.dist.reproduce(next_obs - params["obs"], params) return params["obs"] + sample_, log_prob_ @torch.jit.export def deterministic(self, params: TensorDict) -> SampleLogp: sample, log_prob = self.dist.deterministic(params) return params["obs"] + sample, log_prob class DynamicsParams(nn.Module): """Neural network mapping state-action pairs to distribution parameters. Args: encoder: Module mapping state-action pairs to 1D features params: Module mapping 1D features to distribution parameters """ def __init__(self, encoder: nn.Module, params: nn.Module): super().__init__() self.encoder = encoder self.params = params def forward(self, obs, actions): # pylint:disable=arguments-differ return self.params(self.encoder(obs, actions)) @dataclass class MLPModelSpec(StateActionMLP.spec_cls): """Specifications for stochastic mlp model network. Inherits parameters from `StateActionMLP.spec_cls`. Args: units: Number of units in each hidden layer activation: Nonlinearity following each linear layer delay_action: Whether to apply an initial preprocessing layer on the observation before concatenating the action to the input. standard_scaler: Whether to transform the inputs of the NN using a standard scaling procedure (subtract mean and divide by stddev). The transformation mean and stddev should be fitted during training and used for both training and evaluation. fix_logvar_bounds: Whether to use fixed or dynamically adjusted bounds for the log-scale outputs of the network. input_dependent_scale: Whether to parameterize the Gaussian standard deviation as a function of the state and action """ fix_logvar_bounds: bool = True input_dependent_scale: bool = True class MLPModel(StochasticModel): """Stochastic model with multilayer perceptron state-action encoder. Attributes: params: NN module mapping obs-act pairs to obs dist params dist: NN module implementing the distribution API encoder: NN module used in `params` to map obs-act pairs to vector embeddings """ spec_cls = MLPModelSpec def __init__(self, obs_space: Box, action_space: Box, spec: MLPModelSpec): encoder = StateActionMLP(obs_space, action_space, spec) params = nnx.NormalParams( encoder.out_features, obs_space.shape[0], input_dependent_scale=spec.input_dependent_scale, bound_parameters=not spec.fix_logvar_bounds, ) if spec.fix_logvar_bounds: params.max_logvar.fill_(2) params.min_logvar.fill_(-20) params = DynamicsParams(encoder, params) dist = ptd.Independent(ptd.Normal(), reinterpreted_batch_ndims=1) super().__init__(params, dist) # Can only assign modules and parameters after calling nn.Module.__init__ self.encoder = encoder def initialize_parameters(self, initializer_spec: dict): """Initialize all encoder parameters. Args: initializer_spec: Dictionary with mandatory `name` key corresponding to the initializer function name in `torch.nn.init` and optional keyword arguments. """ self.encoder.initialize_parameters(initializer_spec) class ResidualMLPModel(ResidualMixin, MLPModel): """Residual stochastic multilayer perceptron model."""
59128387488db0592ddb5fef863061a8952d1da3
929cdbe211fbf254e1ec8122f9b48fa32520232c
/analysisflow.py
0f35da16f85c1ab0b53e1d567b3def8ec7103f46
[]
no_license
arizzi/nail
c8edec306628cecd269ad9d4241100afdbf6a7fc
a5ba9aed1bcc266cd9d9a36167ce66e51d851e8f
refs/heads/master
2023-05-11T15:55:34.038861
2023-05-05T12:56:42
2023-05-05T12:56:42
162,547,201
3
1
null
2023-02-22T15:40:31
2018-12-20T08:09:10
Python
UTF-8
Python
false
false
3,501
py
from .nail import * flow.SetAlias("n(.*)", "\\1.size()", defaultPersitency=True) flow.SetAlias( "(.*)_p4", "{TLorentzVector ret; ret.SetPtEtaPhiM(\\1_pt,\\1_eta,\\1_phi,\\1_mass); return ret;}", defaultPersistency=False) # SubCikkectuib actuib" flow.SetAlias("SelectedMuon_(.*)([\.*\])", "Muon_\1[SelectedMuon[\2]]") flow = SampleProcessing("") # cuts value should not be hardcoded below but rather being declared here so that scans and optimizations are possible flow.DefaultConfig(muIsoCut=0.13, muIdCut=3, muPtCut=25) # Higgs to mumu reconstruction # Maps to plain RDF VecOps flow.DefineCollAttr("Muon_id", "Muon_tightId*3+Muon_looseId") # this should generate some kind of wrapper/ref that can be used as the parent collection flow.SubCollection("SelectedMuon", "Muon", sel="Muon_iso < muIsoCut && Muon_id > muIdCut && Muon_pt > muPtCut") flow.Filter("twoOppositeSignMuons", "nSelectedMuon==2 && SelectedMuon_charge[0]*SelectedMuon_charge[1] < 0") # p4 should be handled somehow ... any syntax is ok such as p4(SelectedMuon[0]) or _p4 or .p4 etc.. flow.Define("Higgs", "p4at(SelectedMuon,0)+p4at(SelectedMuon,1)", requires=["twoOppositeSignMuons"]) # the following could work # define p4at(x,y) ROOT::Math::PtEtaPhiMVector(x##_pt[y] , x##_eta[y], x##_phi[y], x##_mass[y]) # define p4(x) ROOT::Math::PtEtaPhiMVector(x##_pt , x##_eta, x##_phi, x##_mass) # VBF Jets kinematics flow.DefaultConfig(jetPtCut=25) flow.SubCollection("SelectedJet", "Jet", "Jet_pt > jetPtCut && (Jet_muonIdx1 == -1 || Muon_iso[Jet_muonIdx1] > muIsoCut || Muon_id[Jet_muonIdx1] > 0") flow.Filter("twoJets", "nSelectedJet>=2") flow.Define("Qjet1", "SelectedJet[0].p4()", requires=["twoJets"]) flow.Define("Qjet2", "SelectedJet[1].p4()", requires=["twoJets"]) flow.Define("qq", "Qjet1+Qjet2") flow.Define("Mqq", "qq.M()") flow.Define("qq_pt", "qq.Pt()") flow.Define("qqDeltaEta", "TMath::Abs(Qjet1.Eta()-Qjet2.Eta())") flow.Define("qqDeltaPhi", "TMath::Abs(Qjet1.DeltaPhi(Qjet2))") # QQ vs ll kinematic flow.Define( "ll_ystar", "Higgs.Rapidity() - (Qjet1.Rapidity() + Qjet2.Rapidity())") flow.Define( "ll_zstar", " TMath::Abs( ll_ystar/ (Qjet1.Rapidity()-Qjet2.Rapidity() )) ") flow.Define("DeltaEtaQQSum", "TMath::Abs(Qjet1.Eta()) + TMath::Abs(Qjet2.Eta())") flow.Define("PhiZQ1", "TMath::Abs(Higgs.DeltaPhi(Qjet1))") flow.Define("PhiZQ2", "TMath::Abs(Higgs.DeltaPhi(Qjet2))") flow.Define("EtaHQ1", "TMath::Abs(Higgs.Eta() - Qjet1.Eta())") flow.Define("EtaHQ2", "TMath::Abs(Higgs.Eta() - Qjet2.Eta())") flow.Define("DeltaRelQQ", "(Qjet1+Qjet2).Pt()/( Qjet1.Pt()+Qjet2.Pt())") flow.Define( "Rpt", "(Qjet1+Qjet2+ Higgs).Pt()/( Qjet1.Pt()+Qjet2.Pt() + Higgs.Pt())") flow.DefaultConfig(higgsMassWindowWidth=15, mQQcut=400, nominalHMass=125.03) flow.Filter("MassWindow", "abs(Higgs_m-nominalHMass)<higgsMassWindowWidth") flow.Filter("SideBand", "! MassWindow") flow.Filter("VBFRegion", "Mqq > mQQcut") flow.Filter("SignalRegion", "VBFRegion && MassWindow") # flow.Trainable("SBClassifier","evalMVA",["Higgs_pt","Higgs_m","Mqq","Rpt","DeltaRelQQ"],splitMode="TripleMVA",requires="VBFRegion") print((flow.NeededInputs())) # flow.AddSystematic("MuScaleUp","Muon_pt","Muon_pt*1.01") #name, target, replacement # flow.AddSystematic("HMassUncertainityUp","nominalHMass","125.1") #name, target, replacement # flow.OptimizationScan("MuPtCutScan","muPtCut","30") #name, target, replacement #from samples import background,signal,data
c43f3fc077de3a17d699c5cb4c8416a0f23c88d5
315450354c6ddeda9269ffa4c96750783963d629
/CMSSW_7_0_4/src/TotemDQMLite/GUI/scripts/.svn/text-base/reco_template_T1_cfg.py.svn-base
f60ffa0be9c54729e058dea21046f4747c66c5f4
[]
no_license
elizamelo/CMSTOTEMSim
e5928d49edb32cbfeae0aedfcf7bd3131211627e
b415e0ff0dad101be5e5de1def59c5894d7ca3e8
refs/heads/master
2021-05-01T01:31:38.139992
2017-09-12T17:07:12
2017-09-12T17:07:12
76,041,270
0
2
null
null
null
null
UTF-8
Python
false
false
2,233
import FWCore.ParameterSet.Config as cms process = cms.Process("recoT1") # Specify the maximum events to simulate process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) ) # Configure if you want to detail or simple log information. # LoggerMax -- detail log info output including: errors.log, warnings.log, infos.log, debugs.log # LoggerMin -- simple log info output to the standard output (e.g. screen) process.load("Configuration.TotemCommon.LoggerMin_cfi") # RawDataSource process.load('TotemRawData.Readers.RawDataSource_cfi') #process.source.fileNames.append('/project/gruppo1/totem/IP5_2015/Data/run_EVB-wn10_9261.000.vmeb') process.source.fileNames.append('$input_file') # Raw to digi conversion process.load('TotemCondFormats.DAQInformation.DAQMappingSourceXML_cfi') process.DAQMappingSourceXML.mappingFileNames.append('TotemCondFormats/DAQInformation/data/t1_all_run2.xml') process.DAQMappingSourceXML.maskFileNames.append('TotemCondFormats/DAQInformation/test/T1DeadChannelsList_9255_onlyStrips.xml') # Random number generator service process.load("Configuration.TotemCommon.RandomNumbers_cfi") ################## STEP 1process.Raw2DigiProducer*process.TriggerBits process.load('TotemRawData.RawToDigi.Raw2DigiProducer_cfi') process.load("RecoTotemT1T2.T1MakeCluster.T1MakeCluster_cfi") process.t1cluster.T1DigiVfatCollectionLabel = cms.InputTag("Raw2DigiProducer", "t1DataOutput") process.t1cluster.ActivateDeadChannels = cms.bool(True) process.load("RecoTotemT1T2.T1RecHit.T1RecHit_cfi") process.t1rechit.T1DigiWireCollectionLabel = cms.InputTag("Raw2DigiProducer", "t1DataOutput") process.load("RecoTotemT1T2.T1RoadProducer.T1RoadProducer_cfi") process.t1roads.Alignment = cms.bool(True) process.load("RecoTotemT1T2.T1TrackProducer2.T1TrackProducer2_cfi") # Configure the output module (save the result in a file) process.output = cms.OutputModule("PoolOutputModule", fileName = cms.untracked.string('$output_file'), outputCommands = cms.untracked.vstring('keep *') ) process.path = cms.Path( process.Raw2DigiProducer *process.t1cluster *process.t1rechit # *process.t1roads # *process.t1tracks2 ) process.outpath = cms.EndPath(process.output)
b4e990d93bfd4a2916201a75c53557884579150a
f62fd455e593a7ad203a5c268e23129473d968b6
/python-watcherclient-1.0.0/watcherclient/osc/plugin.py
5885de3b4873319fa9e70d64baded41315c15e6c
[ "Apache-2.0" ]
permissive
MinbinGong/OpenStack-Ocata
5d17bcd47a46d48ff9e71e2055f667836174242f
8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3
refs/heads/master
2021-06-23T05:24:37.799927
2017-08-14T04:33:05
2017-08-14T04:33:05
99,709,985
0
2
null
2020-07-22T22:06:22
2017-08-08T15:48:44
Python
UTF-8
Python
false
false
1,854
py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from osc_lib import utils LOG = logging.getLogger(__name__) DEFAULT_API_VERSION = '1' API_VERSION_OPTION = 'os_infra_optim_api_version' API_NAME = 'infra-optim' API_VERSIONS = { '1': 'watcherclient.v1.client.Client', } def make_client(instance): """Returns an infra-optim service client.""" infraoptim_client_class = utils.get_client_class( API_NAME, instance._api_version[API_NAME], API_VERSIONS) LOG.debug('Instantiating infraoptim client: %s', infraoptim_client_class) client = infraoptim_client_class( os_watcher_api_version=instance._api_version[API_NAME], session=instance.session, region_name=instance._region_name, ) return client def build_option_parser(parser): """Hook to add global options.""" parser.add_argument('--os-infra-optim-api-version', metavar='<infra-optim-api-version>', default=utils.env( 'OS_INFRA_OPTIM_API_VERSION', default=DEFAULT_API_VERSION), help=('Watcher API version, default=' + DEFAULT_API_VERSION + ' (Env: OS_INFRA_OPTIM_API_VERSION)')) return parser
b13aec275da4151009697accac9711e4949a705d
76f549c062600a0c713315a9a7361ebb111178f8
/Taller/Preguntas/PrimeraPregunta.py
329f447c04d5a3317beecceb811b7df4bb35473d
[]
no_license
jorszs/AI
f612f26537fc3563dd2837c8f67801f091f7e3a0
05a839e6e115e7c6c9378e84d5ac7f50afe2870d
refs/heads/master
2020-03-11T17:57:32.555978
2018-06-07T01:48:36
2018-06-07T01:48:36
130,162,558
0
1
null
null
null
null
UTF-8
Python
false
false
1,335
py
import urllib3 def getNameNodes(): i = 0 res = {} archivo = open('links.csv', 'rt') for linea in archivo: k = linea.replace(' ', '') k = k.replace('\n', '') if i > 0: j = k.split('.') if j[0] in res: res[j[0]].append(k) else: res[j[0]] = [k] i+=1 archivo.close() return res def getDataWeb(url): http = urllib3.PoolManager() r = http.request('GET', url) r.status return r.data def makeArchivos(archivos): base = 'elib.zib.de/pub/mp-testdata/tsp/tsplib/tsp/' for k,v in archivos.items(): for e in v: data = str(getDataWeb(base + e)) a =data.replace('\\n', ',') #b =a.replace('\\', '') j = a.split(',') if len(e.split('.')) > 2: #captura el optimo f = open ('archivos/'+ k + '.opt'+'.txt','w') for elem in j: f.write(elem + '\n') f.close() else: f = open ('archivos/'+ k +'.txt','w') for elem in j: f.write(elem + '\n') f.close() if __name__ == "__main__": archivos = getNameNodes() #print(archivos) makeArchivos(archivos)
f514f0c972565ebfc8022902b1abcc0fa242ca14
9d07335de5a17453bf8ae290d70993d7b20dddcd
/.history/dice_20210223203524.py
a9e39059915547dad83aa8fbce4c3cc0fedfd011
[]
no_license
wh-debug/Matplotlib
8d12291cd4135b3b42c185e6700f22c627ddb046
b4f5bf63d977620f799d953c67b262c75344a1cb
refs/heads/master
2023-03-14T10:09:33.602492
2021-02-23T13:51:21
2021-02-23T13:51:21
340,374,612
0
0
null
null
null
null
UTF-8
Python
false
false
672
py
''' Author: your name Date: 2021-02-23 20:07:30 LastEditTime: 2021-02-23 20:35:24 LastEditors: Please set LastEditors Description: In User Settings Edit FilePath: \Matplotlib\dice.py ''' from make_plotly import Die import matplotlib.pyplot as plt x_values = [1, 2, 3, 4, 5, 6] y_values = [] die = Die() #todo 创建一个空列表结果存储在空列表中 results = [] for roll_num in range(1000): result = die.roll() results.append(result) frequencies = [] for value in range(1, die.num_sides+1): frequency = results.count(value) #todo value是数字几,就会统计列表中相应数字个数 frequencies.append(frequency) print(frequencies)
3b9283807f9a633e9ca03ea36b3db90607bb9388
5063587053951fc1dc558c657d06e0b99187baf5
/electrumx/server/controller.py
6c449b6927f07497c4a7bd4b643a57b2177d5729
[ "MIT" ]
permissive
Japangeek/electrumx
04cbd7f793afe9fa2dff8adad8e7900f4a80b279
a4ea34c6fb9bc887afb19779bde107d97006d8b7
refs/heads/master
2020-05-15T11:46:52.339001
2019-04-18T13:29:37
2019-04-18T13:29:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,092
py
# Copyright (c) 2016-2018, Neil Booth # # All rights reserved. # # See the file "LICENCE" for information about the copyright # and warranty status of this software. from asyncio import Event from aiorpcx import _version as aiorpcx_version, TaskGroup import electrumx from electrumx.lib.server_base import ServerBase from electrumx.lib.util import version_string from electrumx.server.db import DB from electrumx.server.mempool import MemPool, MemPoolAPI from electrumx.server.session import SessionManager class Notifications(object): # hashX notifications come from two sources: new blocks and # mempool refreshes. # # A user with a pending transaction is notified after the block it # gets in is processed. Block processing can take an extended # time, and the prefetcher might poll the daemon after the mempool # code in any case. In such cases the transaction will not be in # the mempool after the mempool refresh. We want to avoid # notifying clients twice - for the mempool refresh and when the # block is done. This object handles that logic by deferring # notifications appropriately. def __init__(self): self._touched_mp = {} self._touched_bp = {} self._highest_block = -1 async def _maybe_notify(self): tmp, tbp = self._touched_mp, self._touched_bp common = set(tmp).intersection(tbp) if common: height = max(common) elif tmp and max(tmp) == self._highest_block: height = self._highest_block else: # Either we are processing a block and waiting for it to # come in, or we have not yet had a mempool update for the # new block height return touched = tmp.pop(height) for old in [h for h in tmp if h <= height]: del tmp[old] for old in [h for h in tbp if h <= height]: touched.update(tbp.pop(old)) await self.notify(height, touched) async def notify(self, height, touched): pass async def start(self, height, notify_func): self._highest_block = height self.notify = notify_func await self.notify(height, set()) async def on_mempool(self, touched, height): self._touched_mp[height] = touched await self._maybe_notify() async def on_block(self, touched, height): self._touched_bp[height] = touched self._highest_block = height await self._maybe_notify() class Controller(ServerBase): '''Manages server initialisation and stutdown. Servers are started once the mempool is synced after the block processor first catches up with the daemon. ''' async def serve(self, shutdown_event): '''Start the RPC server and wait for the mempool to synchronize. Then start serving external clients. ''' if not (0, 15, 0) <= aiorpcx_version < (0, 16): raise RuntimeError('aiorpcX version 0.15.x is required') env = self.env min_str, max_str = env.coin.SESSIONCLS.protocol_min_max_strings() self.logger.info(f'software version: {electrumx.version}') self.logger.info(f'aiorpcX version: {version_string(aiorpcx_version)}') self.logger.info(f'supported protocol versions: {min_str}-{max_str}') self.logger.info(f'event loop policy: {env.loop_policy}') self.logger.info(f'reorg limit is {env.reorg_limit:,d} blocks') notifications = Notifications() Daemon = env.coin.DAEMON BlockProcessor = env.coin.BLOCK_PROCESSOR daemon = Daemon(env.coin, env.daemon_url) db = DB(env) bp = BlockProcessor(env, db, daemon, notifications) # Set notifications up to implement the MemPoolAPI def get_db_height(): return db.db_height notifications.height = daemon.height notifications.db_height = get_db_height notifications.cached_height = daemon.cached_height notifications.mempool_hashes = daemon.mempool_hashes notifications.raw_transactions = daemon.getrawtransactions notifications.lookup_utxos = db.lookup_utxos MemPoolAPI.register(Notifications) mempool = MemPool(env.coin, notifications) session_mgr = SessionManager(env, db, bp, daemon, mempool, shutdown_event) # Test daemon authentication, and also ensure it has a cached # height. Do this before entering the task group. await daemon.height() caught_up_event = Event() mempool_event = Event() async def wait_for_catchup(): await caught_up_event.wait() await group.spawn(db.populate_header_merkle_cache()) await group.spawn(mempool.keep_synchronized(mempool_event)) async with TaskGroup() as group: await group.spawn(session_mgr.serve(notifications, mempool_event)) await group.spawn(bp.fetch_and_process_blocks(caught_up_event)) await group.spawn(wait_for_catchup())
6c201191527104f2d328b58b2ba84caec9c846d3
a5ea93395d8d762caefd129648b2e954754afb00
/examples/6_p_scale_test_Yokoo_Pt.py
fc618a74a4e14089082439c5476fe6df9f86e0e2
[ "Apache-2.0" ]
permissive
SHDShim/pytheos
4295e233dd089d0c9c66218a127d3f099f1d36df
bb86e0ff345efcffb04f08182c09b06b3c54930e
refs/heads/master
2023-03-16T23:23:56.840071
2023-03-11T03:13:23
2023-03-11T03:13:23
93,273,486
7
6
Apache-2.0
2019-11-18T13:11:46
2017-06-03T20:54:46
Python
UTF-8
Python
false
false
1,369
py
# coding: utf-8 # In[1]: get_ipython().run_line_magic('cat', '0Source_Citation.txt') # In[2]: get_ipython().run_line_magic('matplotlib', 'inline') # %matplotlib notebook # for interactive # For high dpi displays. # In[3]: get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'") # # 0. General note # This example compares pressure calculated from `pytheos` and original publication for the platinum scale by Yokoo 2009. # # 1. Global setup # In[4]: import matplotlib.pyplot as plt import numpy as np from uncertainties import unumpy as unp import pytheos as eos # # 3. Compare # In[5]: eta = np.linspace(1., 0.60, 21) print(eta) # In[6]: yokoo_pt = eos.platinum.Yokoo2009() # In[7]: yokoo_pt.print_equations() # In[8]: yokoo_pt.print_equations() # In[9]: yokoo_pt.print_parameters() # In[10]: v0 = 60.37930856339099 # In[11]: yokoo_pt.three_r # In[12]: v = v0 * (eta) temp = 3000. # In[13]: p = yokoo_pt.cal_p(v, temp * np.ones_like(v)) # <img src='./tables/Yokoo_Pt.png'> # In[14]: print('for T = ', temp) for eta_i, p_i in zip(eta, p): print("{0: .3f} {1: .2f}".format(eta_i, p_i)) # It is alarming that even 300 K isotherm does not match with table value. The difference is 1%. # In[15]: v = yokoo_pt.cal_v(p, temp * np.ones_like(p), min_strain=0.6) print(1.-(v/v0))
8aad654f743a97284e6607a741abc184b41bf200
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
/daily/20181014/example_pycomment/pycomment.py
4604d5e1df1248b710f77c3ea0471b933a54d818
[]
no_license
podhmo/individual-sandbox
18db414fafd061568d0d5e993b8f8069867dfcfb
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
refs/heads/master
2023-07-23T07:06:57.944539
2023-07-09T11:45:53
2023-07-09T11:45:53
61,940,197
6
0
null
2022-10-19T05:01:17
2016-06-25T11:27:04
Python
UTF-8
Python
false
false
6,165
py
import sys import contextlib from io import StringIO from lib2to3 import pytree from lib2to3 import pygram from lib2to3.pgen2 import driver from lib2to3.pgen2 import token from lib2to3.pgen2.parse import ParseError from lib2to3.fixer_util import Assign, Name, Newline # utf8 's PUA(https://en.wikipedia.org/wiki/Private_Use_Areas) SEP = "\U000F0000" SEP_MARKER = "ZZ{}ZZ".format(SEP) COMMENT_MARKER = "# =>" STDOUT_HEADER_MARKER = "# -- stdout --------------------" default_driver = driver.Driver(pygram.python_grammar_no_print_statement, convert=pytree.convert) def parse_string(code, parser_driver=default_driver, *, debug=True): return parser_driver.parse_string(code, debug=debug) def parse_file(filename, parser_driver=default_driver, *, debug=True): try: return parser_driver.parse_file(filename, debug=debug) except ParseError as e: if "bad input:" not in repr(e): # work around raise with open(filename) as rf: body = rf.read() return parse_string(body + "\n", parser_driver=parser_driver, debug=debug) def node_name(node): # Nodes with values < 256 are tokens. Values >= 256 are grammar symbols. if node.type < 256: return token.tok_name[node.type] else: return pygram.python_grammar.number2symbol[node.type] type_repr = pytree.type_repr class PyTreeVisitor: def visit(self, node): method = 'visit_{0}'.format(node_name(node)) if hasattr(self, method): # Found a specific visitor for this node if getattr(self, method)(node): return elif hasattr(node, "value"): # Leaf self.default_leaf_visit(node) else: self.default_node_visit(node) def default_node_visit(self, node): for child in node.children: self.visit(child) def default_leaf_visit(self, leaf): pass def transform_string(source: str): t = parse_string(source) return transform(t) def transform_file(fname: str): with open(fname) as rf: return transform_string(rf.read()) def transform(node): t = Transformer() t.transform(node) return node class Transformer(PyTreeVisitor): marker = COMMENT_MARKER def visit_NEWLINE(self, node): if node.prefix.lstrip().startswith(self.marker): # MEMO: <expr> -> _ = <expr> target = node while True: parent = target.parent if parent is None: return if type_repr(target.parent.type) == "simple_stmt": break target = parent eol = target # target is Leaf("\n]") target = eol.prev_sibling cloned = target.clone() cloned.parent = None assigned = Assign(Name("_"), cloned) assigned.prefix = target.prefix target.replace(assigned) # MEMO: adding print(SEP_MARKER, _, SEP_MARKER, sep="\n") this_stmt = eol.parent print_stmt = this_stmt.clone() print_stmt.children = [] print_stmt.append_child( Name( "print({ms!r}, repr(_), {me!r}, sep='')".format( ms="{}{}:".format(SEP_MARKER, node.get_lineno()), me=SEP_MARKER ) ) ) print_stmt.prefix = assigned.prefix # xxx: for first line if not print_stmt.prefix: prev_line = assigned.parent.prev_sibling if prev_line.type == token.INDENT: print_stmt.prefix = prev_line.value print_stmt.append_child(Newline()) for i, stmt in enumerate(this_stmt.parent.children): if stmt == this_stmt: this_stmt.parent.insert_child(i + 1, print_stmt) break transform = PyTreeVisitor.visit def run(sourcefile, out=sys.stdout): o = StringIO() with contextlib.redirect_stdout(o): exec(str(transform_file(sourcefile))) result_map = {} stdout_outputs = [] for line in o.getvalue().splitlines(): if line.startswith(SEP_MARKER) and line.endswith(SEP_MARKER): line = line.strip(SEP_MARKER) lineno, line = line.split(":", 2) result_map[lineno] = line else: stdout_outputs.append(line) i = 0 with open(sourcefile) as rf: import re rx = re.compile(COMMENT_MARKER + ".*$") for lineno, line in enumerate(rf, 1): if line.rstrip() == STDOUT_HEADER_MARKER: break m = rx.search(line) k = str(lineno) if m is None or k not in result_map: print(line, end="", file=out) else: print(line[:m.start()] + COMMENT_MARKER, result_map[k], file=out) i += 1 if stdout_outputs: print(STDOUT_HEADER_MARKER, file=out) for line in stdout_outputs: print("# >>", line, file=out) def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("sourcefile") parser.add_argument("--inplace", action="store_true") parser.add_argument("--show-only", action="store_true") args = parser.parse_args() if args.show_only: print(str(transform_file(args.sourcefile))) from prestring.python.parse import dump_tree dump_tree(transform_file(args.sourcefile)) elif not args.inplace: run(args.sourcefile) else: import tempfile import os import shutil name = None try: with tempfile.NamedTemporaryFile("w", delete=False) as wf: name = wf.name run(args.sourcefile, out=wf) print("replace: {} -> {}".format(name, args.sourcefile), file=sys.stderr) shutil.move(name, args.sourcefile) except Exception: if os.path.exists(name): os.unlink(name) raise if __name__ == "__main__": main()
221ab1ad77324845627959d14968b0eed0e8e187
f66016b962e105898ea14982e229bd44f66f32a2
/settings.py
c142f11377dc1d70c39b3451e46f9a4f2ab30a36
[ "MIT" ]
permissive
DerThorsten/pc
d3ceace388dd3460c0133e97b7fba0fde8d1e811
41d7474ceff8de7b95be5d4fbc42a40e89799e34
refs/heads/master
2021-01-12T10:41:47.797694
2016-11-10T21:59:35
2016-11-10T21:59:35
72,621,794
0
0
null
null
null
null
UTF-8
Python
false
false
4,216
py
from collections import OrderedDict import h5py from features import registerdFeatureOperators class Settings(object): def __init__(self, settingsDict, predictionSettingsDict=None): self.settingsDict = settingsDict self.featureBlockShape = tuple(self.settingsDict["setup"]["blockShape"]) if predictionSettingsDict is not None: self.featureBlockShape = tuple(predictionSettingsDict['setup']["blockShape"]) self.numberOfClasses = self.settingsDict["setup"]["nClasses"] self.predictionSettingsDict = predictionSettingsDict print(self.settingsDict['setup']) self.useBlockF = self.settingsDict['setup'].get("useBlock", None) #self.useBlockF = self.settingsDict['setup']['useBlock'] assert self.useBlockF is not None def useTrainingBlock(self, blockIndex, blockBegin, blockEnd): if self.useBlockF is not None: return self.useBlockF(blockIndex=blockIndex, blockBegin=blockBegin, blockEnd=blockBegin) else: return True def trainingInstancesNames(self): setup = self.settingsDict["setup"] return setup['trainingDataNames'] def predictionInstancesNames(self): return self.predictionSettingsDict['predictionInput'].keys() def trainignInstancesDataDicts(self): setup = self.settingsDict["setup"] trainingDataNames = setup['trainingDataNames'] trainingInstancesSettings = [ ] for trainingDataName in trainingDataNames: s = self.settingsDict["trainingData"][trainingDataName] s['name'] = trainingDataName trainingInstancesSettings.append(s) return trainingInstancesSettings def predictionInstancesDataDicts(self): assert self.predictionSettingsDict is not None d = self.predictionSettingsDict['predictionInput'] dicts = [] for key in d.keys(): ddict = d[key] ddict['name'] = key dicts.append(ddict) return dicts def featureSetttingsList(self): return self.settingsDict["setup"]["featureSettings"] def getLabelsH5Path(self, instanceName): trainingInstanceDataDict = self.settingsDict["trainingData"][instanceName] f,d = trainingInstanceDataDict['labels'] return f,d def getDataH5Dsets(self, instanceDataDict, openH5Files): dataH5Dsets = OrderedDict() for featureSettings in self.featureSetttingsList(): inputFileName = featureSettings['name'] print(" ","inputFile:",inputFileName) # get the h5filename dataDict = instanceDataDict['data'] f,d = dataDict[inputFileName]['file'] h5File = h5py.File(f,'r') dset = h5File[d] # dsets dataH5Dsets[inputFileName] = dset # remeber all files opend openH5Files.append(h5File) return dataH5Dsets, openH5Files def getFeatureOperators(self): dataH5Dsets = OrderedDict() outerList = [] maxHaloList = [] #print("fs0",self.featureSetttingsList()[0]) #print("fs1",self.featureSetttingsList()[0]) for featureSettings in self.featureSetttingsList(): inputFileName = featureSettings['name'] #print("features for",inputFileName) featureOperatorsSettingsList = featureSettings["features"] innerList = [] maxHalo = (0,0,0) for featureOperatorSettings in featureOperatorsSettingsList: #print(featureOperatorSettings) fOpName = featureOperatorSettings['type'] fOpKwargs = featureOperatorSettings['kwargs'] fOpCls = registerdFeatureOperators[fOpName] fOp = fOpCls(**fOpKwargs) halo = fOp.halo() maxHalo = map(lambda aa,bb: max(aa,bb), halo, maxHalo) innerList.append(fOp) outerList.append(innerList) maxHaloList.append(maxHalo) return outerList,maxHaloList
c31a0e73d6e975d7fadf3b697bac94aa6dd6b066
3c06dc187183b5f78dbe24d38f7a3556b7cc9975
/Python/LC51_NQueens.py
24f5ce90b79b47c78b3abc2259af3dc85e7f029c
[]
no_license
wondershow/CodingTraining
071812ffd34850ce0417b95a91ac39a983fca92d
0250c3764b6e68dfe339afe8ee047e16c45db4e0
refs/heads/master
2021-07-02T22:18:46.774286
2021-06-30T14:08:54
2021-06-30T14:08:54
77,458,117
0
1
null
null
null
null
UTF-8
Python
false
false
1,110
py
class Solution: def solveNQueens(self, n: int) -> List[List[str]]: def generate_board(pos, n): res = [] for i in range(n): line = ["."] * n line[pos[i]] = "Q" res.append("".join(line)) return res cols, diagnoal, anti_diagnoal = set(), set(), set() def dfs(res, row, n, cur): nonlocal cols, diagnoal, anti_diagnoal if row == n: res.append(generate_board(cur, n)) return for col in range(n): if col in cols or (row + col) in anti_diagnoal or (row - col) in diagnoal: continue cols.add(col) anti_diagnoal.add(row + col) diagnoal.add(row - col) cur.append(col) dfs(res, row + 1, n, cur) cur.pop() cols.remove(col) anti_diagnoal.remove(row + col) diagnoal.remove(row - col) res = [] dfs(res, 0, n, []) return res
87f86ee5f18ff897da50586e96692fc1a9d89d64
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03371/s901597766.py
123ae25fb816ba52b35bc7528454231feb8593bf
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
532
py
A, B, C, X, Y = map(int, input().split()) cntA = 0 cntB = 0 cntC = 0 value = 0 if 2*C<=A+B: #Cで買いそろえた方が安いので #(max(X, Y)-abs(X-Y))*2枚は買うことになる Cmaisu = (max(X, Y)-abs(X-Y))*2 value += C * Cmaisu if (2*C<=A and X>Y) or (2*C<=B and X<Y): #abs(X-Y)枚についても2Cが安けりゃCで買う value += C * abs(X-Y) * 2 else: if X>Y: value += A*(X-Y) else: value += B*(Y-X) else: value += A*X+B*Y print(value)
f6b658b1ddac70cd71d916a2ed089c862e530a4e
f8666599b83d34c861651861cc7db5b3c434fc87
/plotly/validators/scatterpolargl/marker/colorbar/tickformatstop/_templateitemname.py
3aca0890a4cb67b0ff4f477575d146dfbe41dbf8
[ "MIT" ]
permissive
mode/plotly.py
8b66806e88c9f1820d478bab726f0bea81884432
c5a9ac386a40df2816e6c13264dadf14299401e4
refs/heads/master
2022-08-26T00:07:35.376636
2018-09-26T19:08:54
2018-09-26T19:19:31
60,372,968
1
1
MIT
2019-11-13T23:03:22
2016-06-03T19:34:55
Python
UTF-8
Python
false
false
545
py
import _plotly_utils.basevalidators class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator): def __init__( self, plotly_name='templateitemname', parent_name='scatterpolargl.marker.colorbar.tickformatstop', **kwargs ): super(TemplateitemnameValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop('edit_type', 'calc'), role=kwargs.pop('role', 'info'), **kwargs )
bf825d15878d7b99d77904e32eb9daf305bfa790
4eaa1b9b08914e0a2cc9276363e489ccef19d3a2
/ch3/guest_list.py
3a09e97b39d325d4f92b191d3cabd2777d74e4f8
[]
no_license
melihcanyardi/Python-Crash-Course-2e-Part-I
69b3b5b3f63cdbd7be6fabd6d4f2ddfd9a3434a3
0c9b250f512985c04b2c0397f3afaa8bf3a57f17
refs/heads/main
2023-03-12T21:43:14.012537
2021-03-03T19:23:41
2021-03-03T19:23:41
344,236,741
0
0
null
null
null
null
UTF-8
Python
false
false
416
py
guest_list = ['Ali', 'Ayşe', 'Mehmet', 'Ahmet'] message = f"Hey {guest_list[0]}, would you like to join me for the dinner?" print(message) message = f"Hey {guest_list[1]}, would you like to join me for the dinner?" print(message) message = f"Hey {guest_list[2]}, would you like to join me for the dinner?" print(message) message = f"Hey {guest_list[3]}, would you like to join me for the dinner?" print(message)
4696d4803fcbd9b7f1fa002caeed6d15ed478d7e
4d0f3e2d7455f80caea978e4e70621d50c6c7561
/Threading/Lock.py
efce0601f22fa1de07581c3637eba0dc6384a431
[]
no_license
mhdr/PythonSamples
66940ee2353872d2947c459e3865be42140329c6
1a9dccc05962033ea02b081a39cd67c1e7b29d0c
refs/heads/master
2020-04-14T01:10:13.033940
2016-05-28T15:33:52
2016-05-28T15:33:52
30,691,539
1
0
null
null
null
null
UTF-8
Python
false
false
164
py
import threading from threading import Lock def print_multi(): lock=Lock() lock.acquire() print("Hello World") lock.release() print_multi()
4733bae1eb944dc330c20c4483dd7b1171de45b2
99833651e4a6a0bc1221d577d9fc43b8568abedd
/nltk_contrib/hadoop/tf_idf/tf_map.py
25f49052f7b5472b8a00478c177aac8e3dd514cd
[ "LicenseRef-scancode-unknown-license-reference", "Apache-2.0" ]
permissive
nltk/nltk_contrib
689e2683aa01b120c7473b9a4fc50bc49f014390
95d1806e2f4e89e960b76a685b1fba2eaa7d5142
refs/heads/master
2023-07-31T13:32:47.358897
2022-11-21T18:49:33
2022-11-21T18:49:33
2,530,774
145
127
NOASSERTION
2022-11-21T18:49:34
2011-10-07T05:59:13
Python
UTF-8
Python
false
false
692
py
from hadooplib.mapper import MapperBase class TFMapper(MapperBase): """ get the filename (one filename per line), open the file and count the term frequency. """ def map(self, key, value): """ output (word filename, 1) for every word in files @param key: None @param value: filename """ filename = value.strip() if len(filename) == 0: return file = open(filename, 'r') for line in file: words = line.strip().split() for word in words: self.outputcollector.collect(word + " " + filename, 1) if __name__ == "__main__": TFMapper().call_map()
d68dd9aee38f272a57637402ae90918c73bc1986
641df38bb75077cd8da28b69e38b84af293b5db7
/docassemble_base/setup.py
73a6d2bf9b44a96183a19d6f23282978224b061d
[ "MIT" ]
permissive
bgordo3/docassemble
f19e01f2daf41eb05e2c19b5d4278bdc0d6d3ea5
3ce22e22e818598badc2242038f4e4abc4ee9fde
refs/heads/master
2020-12-26T01:03:14.840009
2016-05-15T13:50:35
2016-05-15T13:50:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,809
py
#!/usr/bin/env python import os import sys from setuptools import setup, find_packages from fnmatch import fnmatchcase from distutils.util import convert_path standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*') standard_exclude_directories = ('.*', 'CVS', '_darcs', './build', './dist', 'EGG-INFO', '*.egg-info') def find_package_data(where='.', package='', exclude=standard_exclude, exclude_directories=standard_exclude_directories): out = {} stack = [(convert_path(where), '', package)] while stack: where, prefix, package = stack.pop(0) for name in os.listdir(where): fn = os.path.join(where, name) if os.path.isdir(fn): bad_name = False for pattern in exclude_directories: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue if os.path.isfile(os.path.join(fn, '__init__.py')): if not package: new_package = name else: new_package = package + '.' + name stack.append((fn, '', new_package)) else: stack.append((fn, prefix + name + '/', package)) else: bad_name = False for pattern in exclude: if (fnmatchcase(name, pattern) or fn.lower() == pattern.lower()): bad_name = True break if bad_name: continue out.setdefault(package, []).append(prefix+name) return out setup(name='docassemble.base', version='0.1', description=('A python module for assembling documents from templates while automatically querying a user for necessary information.'), author='Jonathan Pyle', author_email='[email protected]', license='MIT', url='http://docassemble.org', namespace_packages = ['docassemble'], install_requires = ['docassemble', '3to2', 'babel', 'bcrypt', 'blinker', 'cffi', 'fdfgen', 'guess-language-spirit', 'html2text', 'httplib2', 'itsdangerous', 'jellyfish', 'jinja2', 'lxml', 'mako', 'markdown', 'markupsafe', 'mdx-smartypants', 'namedentities==1.5.2', 'passlib', 'pdfminer', 'pillow', 'pip', 'pycparser', 'pycrypto', 'geopy', 'pygments', 'pyjwt', 'pypdf', 'PyPDF2', 'pyrtf-ng', 'python-dateutil', 'pytz', 'pyyaml', 'qrcode', 'six', 'titlecase', 'us', 'wheel'], packages=find_packages(), zip_safe = False, package_data=find_package_data(where='docassemble/base/', package='docassemble.base'), )
47821cfbba0dbe4c3efe3982af6bf0e12bc36614
8e7a2b9efbc0d25111f01f4cddb781961032685a
/python-1025/python/a_socket/3_ssh/cli.py
a8eb9d81fb94a0cd363d5f6691ffd91955caf960
[]
no_license
Dituohgasirre/python
e044aa2e1fb2233b6ccd59701b834ab01e4e24c2
05f036d2723f75cd89e4412aaed7ee0ba5d3a502
refs/heads/master
2023-06-03T13:50:18.641433
2021-06-17T10:23:40
2021-06-17T10:23:40
366,942,423
0
0
null
null
null
null
UTF-8
Python
false
false
834
py
#!/usr/bin/env python3 import socket from pargs import parse from net import Packet if __name__ == "__main__": def main(): args, opt = parse(['s|srv|1', 'p|port|1']) srvIp = opt['srv'] if 'srv' in opt else "3.3.3.3" port = int(opt['port']) if 'port' in opt else 9000 sd = socket.socket(type=socket.SOCK_DGRAM) addr = (srvIp, port) packet = Packet(sd) while True: cmd = input("<自己的网络SHELL>: ") packet.send(cmd, addr, Packet.DATA) if cmd == "exit": break out = "" while True: data, addr = packet.recv() if data['type'] == Packet.QUIT: break out += data['data'] print(out) sd.close() main()
aa886a213c5f135a412aba43490ad62764c40613
33836016ea99776d31f7ad8f2140c39f7b43b5fe
/fip_collab/2017_02_24_HCF_pearson/get_linkage_alt.py
81760c80d5b2307eb97b7f5b1d063001511737ce
[]
no_license
earthexploration/MKS-Experimentation
92a2aea83e041bfe741048d662d28ff593077551
9b9ff3b468767b235e7c4884b0ed56c127328a5f
refs/heads/master
2023-03-17T23:11:11.313693
2017-04-24T19:24:35
2017-04-24T19:24:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,035
py
import numpy as np import functions as rr import reg_functions as rf from constants import const from scipy.stats import pearsonr import h5py import time from sklearn.preprocessing import PolynomialFeatures def analysis(X, response_tot, groups, iscal): RpredCV = rf.cv(X[iscal, :], response_tot[iscal], groups[iscal]) coef = rf.regression(X[iscal, :], response_tot[iscal]) Rpred = rf.prediction(X, coef) return coef, RpredCV, Rpred def pearson_eval(X, y): Nfeat = X.shape[1] pvec = np.zeros((Nfeat,)) # pvec[0] = 1 # for the constant term for ii in xrange(Nfeat): """ pearsonr returns tuples with the pearson correlation and the P-value (chance of observing the data if the null hypothesis is true). I'm going to throw away the p-value""" if np.all(X[:, ii] == 1): pvec[ii] = 1 else: pvec[ii] = pearsonr(X[:, ii], y)[0] return pvec def optimal_set(X, y, names): C = const() Nfeat = X.shape[1] """obtain pearson correlation scores against the response variable""" pvec = pearson_eval(X, y) indxv_ = np.argsort(np.abs(pvec))[::-1] pvec_f = np.zeros((C['fmax'],)) names_f = np.zeros((C['fmax'],), dtype="S20") support = np.zeros((Nfeat,), dtype='bool') indxv = np.zeros((C['fmax']), dtype='int32') """start by adding zero vector""" pvec_f[0] = pvec[indxv_[0]] names_f[0] = names[indxv_[0]] indxv[0] = indxv_[0] """add vector with highest pearson correlation besides the zero vector""" pvec_f[1] = pvec[indxv_[1]] names_f[1] = str(names[indxv_[1]]) support[indxv_[1]] = True indxv[1] = indxv_[1] c = 1 for ii in xrange(2, C['fmax']): pvecA = np.ones((ii,)) while True: c += 1 pvecT = pearson_eval(X[:, support], X[:, indxv_[c]]) pvecA = np.abs(pvecT) # if pvecA.max() < 0.6: if pvecA.max() < 1.6: break # print str(c) + ', ' + str(np.argmax(pvecA)) + ', ' + str(pvecA.max()) pvec_f[ii] = pvec[indxv_[c]] names_f[ii] = names[indxv_[c]] support[indxv_[c]] = True indxv[ii] = indxv_[c] """we add support for the vector of ones at the end to not screw up the calculation""" support[indxv_[0]] = True return pvec_f, names_f, support, indxv def preanalysis(loc_tot, cov_tot): npc = loc_tot.shape[1] ns = loc_tot.shape[0] """extract names from mean loc info""" mean_only_names = [] for ii in xrange(npc): mean_only_names += ['m%s' % str(ii+1)] """extract variance info from covariance matrix""" var_only = np.zeros((ns, npc)) var_only_names = [] for ii in xrange(npc): var_only[:, ii] = cov_tot[:, ii, ii] var_only_names += ['c%s_%s' % (str(ii+1), str(ii+1))] """extract unique, off-diagonal co-variance info from covariance matrix""" nc = (npc**2-npc)/2 cov_only = np.zeros((ns, nc)) cov_only_names = [] c = 0 for ii in xrange(npc): for jj in xrange(ii+1, npc): cov_only[:, c] = cov_tot[:, ii, jj] cov_only_names += ['c%s_%s' % (str(ii+1), str(jj+1))] c += 1 return loc_tot, var_only, cov_only, \ mean_only_names, var_only_names, cov_only_names def get_poly(X_pre, names_pre): C = const() """get the polynomial features""" poly = PolynomialFeatures(C['deg_max']) poly.fit(X_pre) X = poly.transform(X_pre) """get the names of the polynomial features""" names = poly.get_feature_names(names_pre) return X, names def prepare(par): np.random.seed(0) C = const() p = C['n_sc'] f_link = h5py.File("sample_L%s.hdf5" % C['H'], 'r') """gather the calibration data""" n_tot = len(C['sid']) ns_tot = n_tot*p groups = np.zeros(ns_tot, dtype='int16') response_tot = np.zeros(ns_tot, dtype='float64') loc_tot = np.zeros((ns_tot, C['n_pc_max']), dtype='float64') cov_tot = np.zeros((ns_tot, C['n_pc_max'], C['n_pc_max']), dtype='float64') iscal = np.zeros((ns_tot,), dtype='bool') c = 0 for ii in xrange(n_tot): c_ = c + p sid = C['sid'][ii] """flag elements of the calibration set""" if sid in C['sid_cal']: iscal[c:c_] = True groups[c:c_] = 2*ii+np.round(np.random.random((p,))) dset_name = "%s_%s" % (par, sid) response_tot[c:c_] = f_link.get(dset_name)[...] tmp = f_link.get('samp_%s' % sid)[:, :, :C['n_pc_max']] loc_tot[c:c_, :] = np.mean(tmp, 1) for jj in xrange(p): cov_tot[c+jj, ...] = np.cov(tmp[jj, ...], rowvar=False) c = c_ f_link.close() return groups, response_tot, loc_tot, cov_tot, iscal def linkage(par): st = time.time() C = const() p = C['n_sc'] n_tot = len(C['sid']) ns_tot = n_tot*p """create arrays required for linkage creation""" precursors = prepare(par) groups = precursors[0] response_tot = precursors[1] loc_tot = precursors[2] cov_tot = precursors[3] iscal = precursors[4] f_reg = h5py.File("regression_results_L%s.hdf5" % C['H'], 'a') f_reg.create_dataset('Rsim_%s' % par, data=response_tot) f_reg.create_dataset('iscal_%s' % par, data=iscal) coef_set = f_reg.create_dataset('coef_%s' % par, (C['fmax'], C['fmax']), dtype='float64') Rpred_set = f_reg.create_dataset('Rpred_%s' % par, (C['fmax'], ns_tot), dtype='float64') RpredCV_set = f_reg.create_dataset('RpredCV_%s' % par, (C['fmax'], p*len(C['sid_cal'])), dtype='float64') """get the polynomial features""" tmp = preanalysis(loc_tot, cov_tot) var_only = tmp[1] cov_only = tmp[2] mean_only_names = tmp[3] var_only_names = tmp[4] cov_only_names = tmp[5] # X_pre = np.concatenate((loc_tot, var_only, cov_only), axis=1) # names_pre = mean_only_names + var_only_names + cov_only_names # X_pre = np.concatenate((loc_tot, var_only), axis=1) # names_pre = mean_only_names + var_only_names X_pre = loc_tot names_pre = mean_only_names f_reg.create_dataset('featurenames_%s' % par, data=names_pre) X, names = get_poly(X_pre, names_pre) print "# deg1 features: " + str(len(names_pre)) print "# higher deg features: " + str(len(names)) """perform the pearson correlation""" pvec, names_f, support, indxv = optimal_set(X[iscal, :], response_tot[iscal], names) f_reg.create_dataset('scores_%s' % par, data=pvec) f_reg.create_dataset('indxsel_%s' % par, data=indxv) """select the most highly correlated features""" Xp = X[:, indxv] # import matplotlib.pyplot as plt # plt.plot(np.arange(pvec.size), np.abs(pvec)) # plt.show() msg = "\ntop 20 scoring features" rr.WP(msg, C['wrt_file']) for ii in xrange(20): msg = "%s: %s" % (names_f[ii], pvec[ii]) rr.WP(msg, C['wrt_file']) """create and evaluate the final linkages""" meanc = np.abs(response_tot[iscal]).mean() for ii in xrange(C['fmax']): coef, RpredCV, Rpred = analysis(Xp[:, :(ii+1)], response_tot, groups, iscal) coef_set[ii, :] = 0 coef_set[ii, :(ii+1)] = coef RpredCV_set[ii, :] = RpredCV Rpred_set[ii, :] = Rpred err = np.mean(np.abs(RpredCV - response_tot[iscal]))/meanc msg = "%s features: cv.mean(): %s" % (str(ii+1), str(err)) rr.WP(msg, C['wrt_file']) f_reg.close() timeE = np.round(time.time()-st, 1) msg = "regressions and cross-validations completed: %s s" % timeE rr.WP(msg, C['wrt_file']) if __name__ == '__main__': par = 'mu' linkage(par)
cfcf4c4c948d02a6254d41f9f56773077bb97583
de24f83a5e3768a2638ebcf13cbe717e75740168
/moodledata/vpl_data/59/usersdata/201/47100/submittedfiles/testes.py
ffc9bae8827fb40a5106a1cf09f62f630d98ad23
[]
no_license
rafaelperazzo/programacao-web
95643423a35c44613b0f64bed05bd34780fe2436
170dd5440afb9ee68a973f3de13a99aa4c735d79
refs/heads/master
2021-01-12T14:06:25.773146
2017-12-22T16:05:45
2017-12-22T16:05:45
69,566,344
0
0
null
null
null
null
UTF-8
Python
false
false
288
py
# -*- coding: utf-8 -*- #COMECE AQUI ABAIXO r=float(input('Quanto ganha por hora:')) h=float(input('Horas trabalhadas no mês:')) t=r*t print('%.2f' %t) inss=0.08: i=inss*t print('Desconto do INSS:' '%.2f' %i) sind=0.05: j=sind*t print('Desconto do sindicato:' '%.2f' %j)
c2db9fed8d3d7953430514b58aa47a57e52a59f2
6acea1c5206052393beb5cba132f40e55b637c11
/doc_curation/scraping/misc_sites/iitk.py
6df37d4e6779f4afcd0c5073d46022a249314662
[ "MIT" ]
permissive
sanskrit-coders/doc_curation
a44afacf68d1711bcebd02c97b30a42b6d82bccc
db330393d3df052c008811f4b442421900e5fa84
refs/heads/master
2023-08-27T09:41:22.784001
2023-08-11T05:40:46
2023-08-11T05:40:46
157,027,340
8
4
MIT
2022-12-08T08:26:49
2018-11-10T22:32:26
Python
UTF-8
Python
false
false
880
py
import logging import regex from doc_curation.scraping.html_scraper import souper from indic_transliteration import sanscript def dump_item(item_url, outfile_path, title_maker): logging.info(item_url) def html_fixer(soup): souper.tag_replacer(soup=soup, css_selector="table", tag_name="div") souper.element_remover(soup=soup, css_selector="div.view-filters") def md_fixer(md): md = md.replace("।।", " ॥ ") # md = md.replace(".", " - ") md = md.replace(":", "ः") md = md.replace("\n \n", "\n\n") md = regex.sub("\n{3, 13}", "\n\n", md) md = sanscript.transliterate(md, sanscript.IAST, sanscript.DEVANAGARI) return md souper.dump_text_from_element(url=item_url, outfile_path=outfile_path, text_css_selector="div.content", title_maker=title_maker, title_prefix="", html_fixer=html_fixer, md_fixer=md_fixer, dry_run=False)