blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
52b02305f0de1254e5208c5e6fa83a19b37f8baa | 4716a4b59d8ca385d93542b07130fd7e05262552 | /import_tweets_v3.py | 7ae8b15d976ed887341b2fe80234f255bf4784bb | [] | no_license | harshbarclays/Surveillance | 0377b7fc15ef6a43d126b5f5a8c38624c60b1d76 | e77c5168f9c9cc782f2ec9aa75e824601358bcc7 | refs/heads/master | 2021-05-16T04:27:07.853088 | 2019-08-21T00:49:02 | 2019-08-21T00:49:02 | 72,831,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,831 | py | import tweepy
import pandas as pd
import datetime
def get_tweets_currency(currency_string):
# Fill the X's with the credentials obtained by
# following the above mentioned procedure.
# twitter API info has been deleted to maintain security
consumer_key = ""
consumer_secret = ""
access_key = ""
access_secret = ""
# Authorization to consumer key and consumer secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
# Access to user's access key and access secret
auth.set_access_token(access_key, access_secret)
# Calling api
api = tweepy.API(auth)
#searchQuery = 'USDJPY OR USD/JPY' # Keyword
searchQuery = currency_string
print(currency_string)
tweets_all = tweepy.Cursor(api.search,
q=searchQuery,
include_entities=True,
show_user=True,
rpp=400,
since="2019-05-13",
until="2019-05-18",
lang="en").items(400)
message, favorite_count, retweet_count, created_at, user_name = [], [], [], [], []
for tweet in tweets_all:
message.append(tweet.text)
favorite_count.append(tweet.favorite_count)
retweet_count.append(tweet.retweet_count)
created_at.append(tweet.created_at)
user_name.append(tweet.user.name)
df = pd.DataFrame({'Message': message,
'Favorite Count': favorite_count,
'Retweet Count': retweet_count,
'Created At': created_at,
'Source': user_name})
print(len(df))
#df = df[(df['Created At'] >= pd.to_datetime(datetime.date(2019,5,3)))]
print(len(df))
return df
| [
"[email protected]"
] | |
ef7dcf27560b561e80bb4f4a68f159d63bf00127 | bbf1ae079309eca11270422d3f0d259d1515d430 | /numerical-tours/python/nt_solutions/ml_3_classification/exo5.py | 3c0f3f3cda5068fd794b1a41a27c032ac538f66e | [
"BSD-2-Clause"
] | permissive | ZichaoDi/Di_MATLABTool | 5e6a67b613c4bcf4d904ddc47c2744b4bcea4885 | c071291c63685c236f507b2cb893c0316ab6415c | refs/heads/master | 2021-08-11T07:28:34.286526 | 2021-08-04T18:26:46 | 2021-08-04T18:26:46 | 149,222,333 | 9 | 5 | null | null | null | null | UTF-8 | Python | false | false | 743 | py | sigma_list = np.array( [.1, .5, 1, 4] )
niter = 4000
plt.clf
for io in np.arange(0, np.size(sigma_list)):
sigma = sigma_list[io]
# grad descent
K = kappa(X,X,sigma)
tau = .5
if io==4:
tau = .05
h = np.zeros((n,1))
for i in np.arange(0,niter):
h = h - tau * nablaF(h,K,y)
# evaluate on a grid
K1 = kappa(G,X,sigma)
Theta = theta( K1.dot(h) )
Theta = Theta.reshape((q,q))
# Display the classification probability.
plt.subplot(2,2,io+1)
plt.imshow(Theta.transpose(), origin="lower", extent=[-tmax, tmax, -tmax, tmax])
plt.plot(X[I,0], X[I,1], '.')
plt.plot(X[J,0], X[J,1], '.')
plt.axis('equal')
plt.axis('off')
plt.title('$\sigma=' + str(sigma) + '$')
| [
"[email protected]"
] | |
47e266d665db77c973d48ba03cb937966bfcbd41 | c733e6b433914a8faba256c7853f5cf2cd39c62a | /Python/Leetcode Daily Practice/unclassified/647. Palindromic Substrings.py | a75c67380d9fa7090153a83f1116d883ea245643 | [] | no_license | YaqianQi/Algorithm-and-Data-Structure | 3016bebcc1f1356b6e5f3c3e588f3d46c276a805 | 2e1751263f484709102f7f2caf18776a004c8230 | refs/heads/master | 2021-10-27T16:29:18.409235 | 2021-10-14T13:57:36 | 2021-10-14T13:57:36 | 178,946,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | """
Given a string, your task is to count how many palindromic substrings in this string.
The substrings with different start indexes or end indexes are counted as different
substrings even they consist of same characters.
Input: "abc"
Output: 3
Explanation: Three palindromic strings: "a", "b", "c".
"""
class Solution(object):
def countSubstrings_dp(self, s):
n = len(s)
dp = [[False for i in range(n)] for i in range(n)]
# dp[j][i] = (dp[j-1][i+1] or i - j <= 2) and s[j][i]
res = 0
for j in range(n):
for i in range(j+1):
if s[i] == s[j] and (j - i <=2 or dp[i+1][j-1]):
dp[i][j] = True
res += 1
return res
s = "aaa"
print(Solution().countSubstrings_dfs(s))
| [
"[email protected]"
] | |
1a81bb986a4589822e0a672a92632b48ceeb9136 | 94f2f89b1f0efc7df4da4945d6a909f7380d63f7 | /SubgroupDiscovery/boolean_expressions.py | 007dfcaf38c183d13fdb92357d25964f6fd2c229 | [] | no_license | MKhairtdinova/CustomerApp | e5978d3a9bfc199b28335ff1c7ae80887f4eac95 | 3e7df7d5d2a8ec09b45bba8e15f0dd425c5df883 | refs/heads/master | 2022-10-31T07:40:04.664648 | 2020-06-17T15:55:47 | 2020-06-17T15:55:47 | 270,341,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,946 | py | from abc import ABC, abstractmethod
from functools import total_ordering
import copy
import numpy as np
from SubgroupDiscovery import subgroup as sg
class BooleanExpressionBase(ABC):
def __or__(self, other):
tmp = copy.copy(self)
tmp.append_or(other)
return tmp
def __and__(self, other):
tmp = self.__copy__()
tmp.append_and(other)
return tmp
@abstractmethod
def append_and(self, to_append):
pass
@abstractmethod
def append_or(self, to_append):
pass
@abstractmethod
def __copy__(self):
pass
@total_ordering
class Conjunction(BooleanExpressionBase):
def __init__(self, selectors):
try:
it = iter(selectors)
self._selectors = list(it)
except TypeError:
self._selectors = [selectors]
def covers(self, instance):
# empty description ==> return a list of all '1's
if not self._selectors:
return np.full(len(instance), True, dtype=bool)
# non-empty description
return np.all([sel.covers(instance) for sel in self._selectors], axis=0)
def __len__(self):
return len(self._selectors)
def __str__(self, open_brackets="", closing_brackets="", and_term=" AND "):
if not self._selectors:
return "Dataset"
attrs = sorted(str(sel) for sel in self._selectors)
return "".join((open_brackets, and_term.join(attrs), closing_brackets))
def __repr__(self):
if hasattr(self, "_repr"):
return self._repr
else:
self._repr = self._compute_repr()
return self._repr
def __eq__(self, other):
return repr(self) == repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
def __hash__(self):
if hasattr(self, "_hash"):
return self._hash
else:
self._hash = self._compute_hash()
return self._hash
def _compute_representations(self):
self._repr=self._compute_repr()
self._hash=self._compute_hash()
def _compute_repr(self):
if not self._selectors:
return "True"
reprs = sorted(repr(sel) for sel in self._selectors)
return "".join(("(", " and ".join(reprs), ")"))
def _compute_hash(self):
return hash(repr(self))
def _invalidate_representations(self):
if hasattr(self, '_repr'):
delattr(self, '_repr')
if hasattr(self, '_hash'):
delattr(self, '_hash')
def append_and(self, to_append):
if isinstance(to_append, sg.SelectorBase):
self._selectors.append(to_append)
elif isinstance(to_append, Conjunction):
self._selectors.extend(to_append._selectors)
else:
try:
self._selectors.extend(to_append)
except TypeError:
self._selectors.append(to_append)
self._invalidate_representations()
def append_or(self, to_append):
raise RuntimeError("Or operations are not supported by a pure Conjunction. Consider using DNF.")
def pop_and(self):
return self._selectors.pop()
def pop_or(self):
raise RuntimeError("Or operations are not supported by a pure Conjunction. Consider using DNF.")
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
result._selectors = list(self._selectors)
return result
@property
def depth(self):
return len(self._selectors)
@total_ordering
class Disjunction(BooleanExpressionBase):
def __init__(self, selectors):
if isinstance(selectors, (list, tuple)):
self._selectors = selectors
else:
self._selectors = [selectors]
def covers(self, instance):
# empty description ==> return a list of all '1's
if not self._selectors:
return np.full(len(instance), False, dtype=bool)
# non-empty description
return np.any([sel.covers(instance) for sel in self._selectors], axis=0)
def __len__(self):
return len(self._selectors)
def __str__(self, open_brackets="", closing_brackets="", or_term=" OR "):
if not self._selectors:
return "Dataset"
attrs = sorted(str(sel) for sel in self._selectors)
return "".join((open_brackets, or_term.join(attrs), closing_brackets))
def __repr__(self):
if not self._selectors:
return "True"
reprs = sorted(repr(sel) for sel in self._selectors)
return "".join(("(", " or ".join(reprs), ")"))
def __eq__(self, other):
return repr(self) == repr(other)
def __lt__(self, other):
return repr(self) < repr(other)
def __hash__(self):
return hash(repr(self))
def append_and(self, to_append):
raise RuntimeError("And operations are not supported by a pure Conjunction. Consider using DNF.")
def append_or(self, to_append):
try:
self._selectors.extend(to_append)
except TypeError:
self._selectors.append(to_append)
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
result._selectors = copy.copy(self._selectors)
return result
class DNF(Disjunction):
def __init__(self, selectors=None):
if selectors is None:
selectors = []
super().__init__([])
self.append_or(selectors)
@staticmethod
def _ensure_pure_conjunction(to_append):
if isinstance(to_append, Conjunction):
return to_append
elif isinstance(to_append, SelectorBase):
return Conjunction(to_append)
else:
it = iter(to_append)
if all(isinstance(sel, SelectorBase) for sel in to_append):
return Conjunction(it)
else:
raise ValueError("DNFs only accept an iterable of pure Selectors")
def append_or(self, to_append):
try:
it = iter(to_append)
conjunctions = [DNF._ensure_pure_conjunction(part) for part in it]
except TypeError:
conjunctions = DNF._ensure_pure_conjunction(to_append)
super().append_or(conjunctions)
def append_and(self, to_append):
conj = DNF._ensure_pure_conjunction(to_append)
if len(self._selectors) > 0:
for conjunction in self._selectors:
conjunction.append_and(conj)
else:
self._selectors.append(conj)
def pop_and(self):
out_list = [s.pop_and() for s in self._selectors]
return_val = out_list[0]
if all(x == return_val for x in out_list):
return return_val
else:
raise RuntimeError("pop_and failed as the result was inconsistent") | [
"[email protected]"
] | |
c3aa6f720efe8768439860b38a575f134ac2de29 | 03aa3f1577ed502b2a40a1b9481990b75a541e13 | /web_project/hello/views.py | 8f685c750777d175f0e3fb7471e9380daf5730fc | [] | no_license | nhj6858/my-first-blog | 7963f305c7030cf988a8a2a7dc41d4b125a409ce | 9f99f182e71f520b62e90531f85783ecdad4535b | refs/heads/master | 2022-12-02T12:19:27.000947 | 2020-08-06T08:27:25 | 2020-08-06T08:27:25 | 283,923,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | import re
from django.http import HttpResponse
from datetime import datetime
from django.shortcuts import render
from django.shortcuts import redirect
from hello.forms import LogMessageForm
from hello.models import LogMessage
from django.views.generic import ListView
def home(request):
return render(request,"hello/home.html")
def about(request):
return render(request,"hello/about.html")
def contact(request):
return render(request,"hello/contact.html")
def hello_there(request, name):
return render(
request,'hello/hello_there.html',
{
'name':name,
'date':datetime.now()
}
)
def log_message(request):
form = LogMessageForm(request.POST or None)
if request.method == "POST":
if form.is_valid():
message = form.save(commit=False)
message.log_date = datetime.now()
message.save()
return redirect("home")
else:
return render(request, "hello/log_message.html", {"form": form})
class HomeListView(ListView):
"""Renders the home page, with a list of all messages."""
model = LogMessage
def get_context_data(self, **kwargs):
context = super(HomeListView, self).get_context_data(**kwargs)
return context
| [
"[email protected]"
] | |
82a92a36985c8f9dee7533edec87f1a40f828d1f | c907efd159c655147c11619ef18106eea5abab6a | /user/migrations/0007_auto_20201025_2226.py | 992a87e4b70fe5ef91a81776a22464f078c5dcb3 | [] | no_license | Drstrange007/MessagePosting | 860b065dfb3badbd51ebf2fa51235f6b79ec47d7 | fc69f831929a1883b38899ccb139be163358ea0d | refs/heads/master | 2023-01-04T12:37:21.432152 | 2020-10-27T10:07:43 | 2020-10-27T10:07:43 | 307,179,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # Generated by Django 3.1.1 on 2020-10-25 16:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0006_auto_20201025_2224'),
]
operations = [
migrations.AlterField(
model_name='tweet',
name='parent_tweet_id',
field=models.IntegerField(),
),
]
| [
"[email protected]"
] | |
0f30fd048fe46f8f7d9d4744b2d53594f10f3705 | 03f2263a7872bd7b4bbc790f6bd40fe4bc40546d | /env_facial_recognition_v1/lib/python3.6/tempfile.py | 7b4a93fdcd6666b0335f5e80ff46ad0127244b99 | [] | no_license | tanlull/facial_recognition_v1 | b17715d506b22f6807fecad5d423506f876efcf4 | 214684ea7df95a8d8b3b15e9873220335b7d8520 | refs/heads/master | 2020-07-19T19:11:09.640314 | 2019-09-21T16:16:08 | 2019-09-21T16:16:08 | 206,497,312 | 0 | 0 | null | 2019-09-05T07:03:24 | 2019-09-05T07:03:24 | null | UTF-8 | Python | false | false | 50 | py | /Users/Anthony/anaconda3/lib/python3.6/tempfile.py | [
"[email protected]"
] | |
be89c6f36127b2d02e42f9e96b78a4fe1bc54e8c | 3fcb8097c72ba32e1468016c477e63dfa9a2fa74 | /Point.py | 4c51d32ab03f16b9342cb4952bf5199f57d47616 | [] | no_license | prizraksarvar/ml_test | 7504b5631839afeb56ac9234b9b6f7546694c537 | 57da0fc7483cfc1d42f60f33bbc2d60abd893e20 | refs/heads/master | 2023-04-29T19:42:08.246725 | 2021-03-17T18:37:50 | 2021-03-17T18:37:50 | 347,922,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | class Point(object):
"""Точка"""
def __init__(self, y_in=0, x_in=0, color_in=0.5):
self.color = color_in # Цвет точки
self.y = y_in
self.x = x_in
# Функция Установки точки в произвольных координатах
def set_point_position(self, y_in, x_in):
self.y = y_in
self.x = x_in
# Функция получения координат точки
def get_point_position(self):
return self.y, self.x, self.color
# Функция движения точки вниз
def move_point_down(self):
self.y += 1 | [
"sarkhas@test"
] | sarkhas@test |
fdca816fbc46080860edd1a7f1e49542fc3742e8 | 10cdeddc3711e9bb375f9eebb905ffdf89098cf8 | /wsgi_html_get_new.py | dfd9a6a3a3fc9369d619151988c34e2e4db7aff0 | [] | no_license | zaeval/sw-project | 4d89a0eb332f50dc0f7a709b58bf23b919ee7df5 | 31aa088b1cf41539da9ac23b053c28a05c1d42c0 | refs/heads/master | 2021-03-19T10:52:48.586537 | 2017-06-13T06:35:59 | 2017-06-13T06:35:59 | 91,997,537 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from wsgiref.simple_server import make_server
def wsgi_application(environ, start_response):
print("wsgi handler")
response = ""
for key,val in sorted(environ.items()):
response += "%s : %s\n" % (key,val)
print(response)
start_response("200 OK",[('Content-Type','text/html; charset=utf-8')])
return [response.encode()]
print("Start Program")
httpd = make_server("0.0.0.0", 8000, wsgi_application)
httpd.handle_request() | [
"[email protected]"
] | |
b3f2770e845535eabd152fab071e294e8f4ec740 | 62494569ad62bae44c13499dc222ffa0301541cb | /SentimentAnalysis/Google_Trends/pytrends_extract_threaded.py | 1b065bbd8ac51fec28487b70e2cb521957496a3e | [] | no_license | rendorHaevyn/Project_WinLife | b1f5c71a338b366cfe094e7aef296bc982e52a2f | ef409e7e56c57a17ccb69f2183ddca42cae0039a | refs/heads/master | 2021-07-09T15:20:03.979369 | 2018-12-09T22:08:28 | 2018-12-09T22:08:28 | 132,147,130 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 4,768 | py | # -*- coding: utf-8 -*-
"""
Created on Fri May 04 21:41:47 2018
@author: rendorHaevyn
URL: https://github.com/GeneralMills/pytrends
"""
## TODO re-cut interest by 8 minute intervals into interest by 15 minute intervals
## IMPORTS
from __future__ import print_function
from pytrends.request import TrendReq
import pandas as pd
import os
import sys
from dateutil import rrule
from datetime import datetime, timedelta
from threading import Thread
import time
sys.path.insert(0, "C:\\Users\\Admin\\Documents\\GitHub\\Project_WinLife\\SentimentAnalysis\\Google_Trends\\")
import LogThread
## LOAD DATA
OUTDIR = 'C:\\Users\\Admin\\Documents\\GitHub\\Project_WinLife\\SentimentAnalysis\\Google_Trends\\pt_data'
INDIR = 'C:\\Users\\Admin\\Documents\\GitHub\\Project_WinLife\\SentimentAnalysis\\Google_Trends\\pt_inputs'
os.chdir(OUTDIR)
kw_df = pd.read_csv(INDIR + os.sep + 'kw.csv',delimiter='|')
## HACK FOR GOOGLE 429 ERRORS
GGL_HACK = 1
## CONSTANTS
CATEGORY = 107 # INVESTING
PROPERTY = 'news' # SET TO EMPTY, ELSE images, news, youtube or froogle
GEOLOC = '' # SET TO EMPTY, ELSE 2 LETTER COUNTRY ABBREVIATION
NOW = datetime.utcnow() #datetime(2018, 5, 6, 10, 58, 14, 198000) - locking this to current to baseline all coins
WKS_BACK = 12
YR_BACK = NOW - timedelta(weeks=WKS_BACK)
DAY_CNT = WKS_BACK * 7
COIN_CNT = len(kw_df)
# Login to Google. Only need to run this once, the rest of requests will use the same session.
pytrend = TrendReq()
# Populate list of days in prior year
day_lst = list(rrule.rrule(rrule.DAILY, dtstart=YR_BACK, until=NOW))
# Func to use in threading
def get_trend(i,results,coin):
s_tf = day_lst[i].strftime("%Y-%m-%dT00") + ' ' + day_lst[i+1].strftime("%Y-%m-%dT00")
#print('Fetching: coin - {}, day - {}'.format(coin,s_tf))
sys.stdout.write('Fetching: coin - {}, day - {}\r'.format(coin,s_tf))
sys.stdout.flush()
pytrend.build_payload(kw_list = kw
,cat = CATEGORY
,geo = GEOLOC
,gprop = PROPERTY
,timeframe = s_tf
)
iot_df = pytrend.interest_over_time()
iot_df = iot_df.drop(['isPartial'],axis=1)
results[i] = iot_df
# Create empty lists for threads and data frame results
threads = [None] * DAY_CNT
results = [None] * DAY_CNT
coin_trends = [None] * COIN_CNT
df_consolidated = pd.DataFrame()
# Iterate keyword list by coin of interest
for indx,vals in kw_df.iterrows():
# if indx >= 2:
# break
if indx > 0 and GGL_HACK == 1: # Restriction to usurp getting google 429 error - too-many-requests
for i in range(10):
sys.stdout.write('Sleeping for next coin - {} of 30\r'.format(i))
time.sleep(1)
sys.stdout.flush()
if indx >= 0:
kw = vals['kw_lst'].split(',')
# Iterate days in period
coin_trends[indx] = pd.DataFrame()
for i in range(DAY_CNT):
if GGL_HACK == 1: # Restriction to usurp getting google 429 error - too-many-requests
time.sleep(2)
threads[i] = Thread(target=get_trend, args=(i,results,vals['coin']))
threads[i].start()
for i in range(DAY_CNT):
threads[i].join()
coin_trends[indx] = coin_trends[indx].append(results)
coin_trends[indx] = coin_trends[indx].sort_index()
# Export file
coin_trends[indx].to_csv(OUTDIR
+ os.sep
+ 'coin_kw_trends_{}_{}_{}_at-{}.csv'.format(vals['coin']
,NOW.strftime("%Y-%m-%d-%H%M")
,YR_BACK.strftime("%Y-%m-%d")
,NOW.strftime("%H%M")))
# Mergetime series pytrend data frames
df_consolidated = pd.concat(coin_trends, axis=1)
# Export file
df_consolidated.to_csv(OUTDIR
+ os.sep
+ 'coin_kw_trends_complete_{}_{}_at-{}.csv'
.format(
NOW.strftime("%Y-%m-%d-%H%M")
,YR_BACK.strftime("%Y-%m-%d")
,NOW.strftime("%H%M")))
# Interest by Region
"""
ibr_df = pytrend.interest_by_region(resolution='CITY') #COUNTRY/CITY/DMA/REGION - 'REGION' seems to fail
print(ibr_df.head())
ibr_gt0 = ibr_df[(ibr_df['IOTA'] > 0) |
(ibr_df['iOTA'] > 0) |
(ibr_df['iota'] > 0) |
(ibr_df['Iota'] > 0)
]
"""
import string
import random
def random_word(length):
"""Return a random word of 'length' letters."""
return ''.join(random.choice(string.ascii_letters) for i in range(length))
| [
"[email protected]"
] | |
59817d4f4915dfc4c470c6d51b0592362187ec0b | 350d6b7246d6ef8161bdfccfb565b8671cc4d701 | /Binary Tree Vertical Order Traversal.py | da22a1ddbb5aca8b4d6f3dbd14fa43d4a483c554 | [] | no_license | YihaoGuo2018/leetcode_python_2 | 145d5fbe7711c51752b2ab47a057b37071d2fbf7 | 2065355198fd882ab90bac6041c1d92d1aff5c65 | refs/heads/main | 2023-02-14T14:25:58.457991 | 2021-01-14T15:57:10 | 2021-01-14T15:57:10 | 329,661,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
dic = {}
def verticalOrder(self, root):
self.help(root, 1)
save = []
keys = sorted(self.dic.keys())
for k in keys:
save.append(self.dic[k])
return save
def help(self, root, depth):
if root == None:
return
if depth not in self.dic.keys():
self.dic[depth] = []
self.dic[depth].append(root.val)
self.help(root.left, depth - 1)
self.help(root.right, depth + 1)
return
| [
"[email protected]"
] | |
4084a64ffe7d52b14cb8b756e1efe29d46730493 | 8784a3a9d4054d1aca752ec742902abb51a9de80 | /python_stack/python_OOP/arithmetic_module/main.py | 785338b9b6b6a9481506f9e74ad051b34a087637 | [] | no_license | MichaelKirkaldyV/mean-deploy-2 | 25eaf7cc430ac095f5327c04be84b9212314c7f2 | f30b8ea14ccbaecfe62929948f2a84191d316c22 | refs/heads/master | 2023-01-13T07:20:28.984728 | 2019-05-23T16:42:15 | 2019-05-23T16:42:15 | 151,123,880 | 0 | 0 | null | 2022-12-30T09:47:11 | 2018-10-01T16:54:09 | TypeScript | UTF-8 | Python | false | false | 313 | py | #imports arithmetic module within the same folder.
#Then uses the module as a variable and calls its functions using the .method
#Adds parameters.
#prints out the solutions that each function returns.
import arithmetic
print arithmetic.add(5, 8)
print arithmetic.subtract(10, 5)
print arithmetic.multiply(12, 6)
| [
"[email protected]"
] | |
bf6b20299d2e28a5fcec21f2b34363fe2579f3a1 | 643d519b690028bee74be5c9deaa16d2ae23ece8 | /API_s/services/estoque_service.py | 89983d21b8b10c9a34b917c8dcc31598bd5f6861 | [] | no_license | vinicius-machado00/OPE_Gabicelll | b585b5c94b6c49a8a53393c4b0bc5055a47db141 | 70a37b4c2ebb627cec1121e0847f841a57355321 | refs/heads/master | 2022-04-01T21:07:42.893725 | 2019-09-16T22:27:28 | 2019-09-16T22:27:28 | 208,908,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | # from Model.estoque import Estoque
from infra.estoque_dao import listar as listar_dao, novo as novo_dao,\
deletar as deletar_dao, atualizar as atualizar_dao,\
listarEstoque as listarEstoque_dao, buscarUser as buscarUser_dao, buscarProduto as buscarProduto_dao
# Métodos da API
def listarEstoque():
return listarEstoque_dao()
def buscarUser(id):
return buscarUser_dao(id)
def buscarProduto(nome):
return buscarProduto_dao(id)
# Métodos da API
def listar():
return listar_dao()
def localiza(id):
for p in listar_dao():
if p.id == id:
return p
return None
# def novo(aluno_data):
# # p = Aluno.cria(aluno_data)
# # novo_dao(p.__dict__())
# return listar_dao()
def remover(id):
deletar_dao(id)
return None
def atualiza(id):
p = Aluno.cria(aluno_data)
# atualizar_dao(id,p.__dict__())
return atualizar_dao(id) | [
"[email protected]"
] | |
c7522273e2e8d856f65fdc707ee671c9669ecaa6 | 063a39b246989a3cebb73ae67b5ce6c718aa44af | /tests/test_cases/add_interaction.py | 8c70911bdea248c68894afa6461df6ad8c0eb10c | [] | no_license | tspecht/python-api-client | f41a0213d523aeb00b0815a5357a86cc7ed621e3 | c0442e9ad62bf6d0d8a6ae0aa852321f12e2a07a | refs/heads/master | 2021-07-19T23:13:35.461910 | 2017-10-26T07:31:37 | 2017-10-26T07:31:37 | 108,376,966 | 0 | 0 | null | 2017-10-26T07:31:13 | 2017-10-26T07:31:12 | null | UTF-8 | Python | false | false | 2,106 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is auto-generated, do not edit
#
from tests.test_cases.recombee_test import RecombeeTest, InteractionsTest
from recombee_api_client.exceptions import ResponseException
from recombee_api_client.api_requests import *
class AddInteractionTest (RecombeeTest ):
def create_request(self,user_id,item_id,timestamp=None,cascade_create=None):
pass
def test_add_interaction(self):
# it 'does not fail with cascadeCreate'
req = self.create_request('u_id','i_id',cascade_create=True)
resp = self.client.send(req)
# it 'does not fail with existing item and user'
req = self.create_request('entity_id','entity_id')
resp = self.client.send(req)
# it 'does not fail with valid timestamp'
req = self.create_request('entity_id','entity_id',timestamp='2013-10-29T09:38:41.341Z')
resp = self.client.send(req)
# it 'fails with nonexisting item id'
req = self.create_request('entity_id','nonex_id')
try:
self.client.send(req)
self.assertFail()
except ResponseException as ex:
self.assertEqual(ex.status_code, 404)
# it 'fails with nonexisting user id'
req = self.create_request('nonex_id','entity_id')
try:
self.client.send(req)
self.assertFail()
except ResponseException as ex:
self.assertEqual(ex.status_code, 404)
# it 'fails with invalid time'
req = self.create_request('entity_id','entity_id',timestamp=-15)
try:
self.client.send(req)
self.assertFail()
except ResponseException as ex:
self.assertEqual(ex.status_code, 400)
# it 'really stores interaction to the system'
req = self.create_request('u_id2','i_id2',cascade_create=True,timestamp=5)
resp = self.client.send(req)
try:
self.client.send(req)
self.assertFail()
except ResponseException as ex:
self.assertEqual(ex.status_code, 409)
| [
"[email protected]"
] | |
64db72079dc2438f42dcc5f4e3ecafa46502073d | b306aab9dcea2dd83dda700bc9f7b9f1a32cff3a | /CAIL2020/ydljy/data.py | be956ba1641be5f4507b7e05e32f92376548b540 | [
"Apache-2.0"
] | permissive | Tulpen/CAIL | d6ca9981c7ea2603ae61675ba330a9614cd9398d | c4cfa98ab4ecedbce34a7a5a186830486047540c | refs/heads/master | 2023-04-23T20:07:56.774530 | 2021-04-16T13:18:36 | 2021-04-16T13:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,078 | py | """Data processor for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy ([email protected])
In data file, each line contains 1 sc sentence and 5 bc sentences.
The data processor convert each line into 5 samples,
each sample with 1 sc sentence and 1 bc sentence.
Usage:
1. Tokenizer (used for RNN model):
from data import Tokenizer
vocab_file = 'vocab.txt'
sentence = '我饿了,想吃东西了。'
tokenizer = Tokenizer(vocab_file)
tokens = tokenizer.tokenize(sentence)
# ['我', '饿', '了', ',', '想', '吃', '东西', '了', '。']
ids = tokenizer.convert_tokens_to_ids(tokens)
2. Data:
from data import Data
# For training, load train and valid set
# For BERT model
data = Data('model/bert/vocab.txt', model_type='bert')
datasets = data.load_train_and_valid_files(
'SMP-CAIL2020-train.csv', 'SMP-CAIL2020-valid.csv')
train_set, valid_set_train, valid_set_valid = datasets
# For RNN model
data = Data('model/rnn/vocab.txt', model_type='rnn')
datasets = data.load_all_files(
'SMP-CAIL2020-train.csv', 'SMP-CAIL2020-valid.csv')
train_set, valid_set_train, valid_set_valid = datasets
# For testing, load test set
data = Data('model/bert/vocab.txt', model_type='bert')
test_set = data.load_file('SMP-CAIL2020-test.csv', train=False)
"""
from typing import List
import jieba
import torch
import pandas as pd
from torch.utils.data import TensorDataset
from transformers import BertTokenizer
# from pytorch_pretrained_bert import BertTokenizer
from tqdm import tqdm
class Tokenizer:
"""Tokenizer for Chinese given vocab.txt.
Attributes:
dictionary: Dict[str, int], {<word>: <index>}
"""
def __init__(self, vocab_file='vocab.txt'):
"""Initialize and build dictionary.
Args:
vocab_file: one word each line
"""
self.dictionary = {'[PAD]': 0, '[UNK]': 1}
count = 2
with open(vocab_file, encoding='utf-8') as fin:
for line in fin:
word = line.strip()
self.dictionary[word] = count
count += 1
def __len__(self):
return len(self.dictionary)
@staticmethod
def tokenize(sentence: str) -> List[str]:
"""Cut words for a sentence.
Args:
sentence: sentence
Returns:
words list
"""
return jieba.lcut(sentence)
def convert_tokens_to_ids(
self, tokens_list: List[str]) -> List[int]:
"""Convert tokens to ids.
Args:
tokens_list: word list
Returns:
index list
"""
return [self.dictionary.get(w, 1) for w in tokens_list]
class Data:
"""Data processor for BERT and RNN model for SMP-CAIL2020-Argmine.
Attributes:
model_type: 'bert' or 'rnn'
max_seq_len: int, default: 512
tokenizer: BertTokenizer for bert
Tokenizer for rnn
"""
def __init__(self,
vocab_file='',
max_seq_len: int = 512,
model_type: str = 'bert', config=None):
"""Initialize data processor for SMP-CAIL2020-Argmine.
Args:
vocab_file: one word each line
max_seq_len: max sequence length, default: 512
model_type: 'bert' or 'rnn'
If model_type == 'bert', use BertTokenizer as tokenizer
Otherwise, use Tokenizer as tokenizer
"""
self.model_type = model_type
if self.model_type == 'bert':
self.tokenizer = BertTokenizer.from_pretrained(config.bert_model_path)#BertTokenizer(vocab_file)
else: # rnn
self.tokenizer = Tokenizer(vocab_file)
self.max_seq_len = max_seq_len
def load_file(self,
file_path='SMP-CAIL2020-train.csv',
train=True) -> TensorDataset:
"""Load SMP-CAIL2020-Argmine train file and construct TensorDataset.
Args:
file_path: train file with last column as label
train:
If True, train file with last column as label
Otherwise, test file without last column as label
Returns:
BERT model:
Train:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids, label)
Test:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids)
RNN model:
Train:
torch.utils.data.TensorDataset
each record: (s1_ids, s2_ids, s1_length, s2_length, label)
Test:
torch.utils.data.TensorDataset
each record: (s1_ids, s2_ids, s1_length, s2_length)
"""
sc_list, bc_list, label_list = self._load_file(file_path, train)
if self.model_type == 'bert':
dataset = self._convert_sentence_pair_to_bert_dataset(
sc_list, bc_list, label_list)
else: # rnn
dataset = self._convert_sentence_pair_to_rnn_dataset(
sc_list, bc_list, label_list)
return dataset
def load_train_and_valid_files(self, train_file, valid_file):
"""Load all files for SMP-CAIL2020-Argmine.
Args:
train_file, valid_file: files for SMP-CAIL2020-Argmine
Returns:
train_set, valid_set_train, valid_set_valid
all are torch.utils.data.TensorDataset
"""
print('Loading train records for train...')
train_set = self.load_file(train_file, True)
print(len(train_set), 'training records loaded.')
print('Loading train records for valid...')
valid_set_train = self.load_file(train_file, False)
print(len(valid_set_train), 'train records loaded.')
print('Loading valid records...')
valid_set_valid = self.load_file(valid_file, False)
print(len(valid_set_valid), 'valid records loaded.')
return train_set, valid_set_train, valid_set_valid
def _load_file(self, filename, train: bool = True):
"""Load SMP-CAIL2020-Argmine train/test file.
For train file,
The ratio between positive samples and negative samples is 1:4
Copy positive 3 times so that positive:negative = 1:1
Args:
filename: SMP-CAIL2020-Argmine file
train:
If True, train file with last column as label
Otherwise, test file without last column as label
Returns:
sc_list, bc_list, label_list with the same length
sc_list, bc_list: List[List[str]], list of word tokens list
label_list: List[int], list of labels
"""
data_frame = pd.read_csv(filename)
sc_list, bc_list, label_list = [], [], []
for row in data_frame.itertuples(index=False):
# candidates = row[0:2]
answer = bool(row[-1]) if train else None
sc_tokens = self.tokenizer.tokenize(row[0])
bc_tokens = self.tokenizer.tokenize(row[1])
label = 1 if answer else 0
sc_list.append(sc_tokens)
bc_list.append(bc_tokens)
if train:
label_list.append(label)
# for i, _ in enumerate(candidates):
# bc_tokens = self.tokenizer.tokenize(candidates[i])
# if train:
# if i + 1 == answer:
# # Copy positive sample 4 times
# for _ in range(len(candidates) - 1):
# sc_list.append(sc_tokens)
# bc_list.append(bc_tokens)
# label_list.append(1)
# else:
# sc_list.append(sc_tokens)
# bc_list.append(bc_tokens)
# label_list.append(0)
# else: # test
# sc_list.append(sc_tokens)
# bc_list.append(bc_tokens)
return sc_list, bc_list, label_list
def _convert_sentence_pair_to_bert_dataset(
self, s1_list, s2_list, label_list=None):
"""Convert sentence pairs to dataset for BERT model.
Args:
sc_list, bc_list: List[List[str]], list of word tokens list
label_list: train: List[int], list of labels
test: []
Returns:
Train:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids, label)
Test:
torch.utils.data.TensorDataset
each record: (input_ids, input_mask, segment_ids)
"""
all_input_ids, all_input_mask, all_segment_ids = [], [], []
for i, _ in tqdm(enumerate(s1_list), ncols=80):
tokens = ['[CLS]'] + s1_list[i] + ['[SEP]']
segment_ids = [0] * len(tokens)
tokens += s2_list[i] + ['[SEP]']
segment_ids += [1] * (len(s2_list[i]) + 1)
if len(tokens) > self.max_seq_len:
tokens = tokens[:self.max_seq_len]
segment_ids = segment_ids[:self.max_seq_len]
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
tokens_len = len(input_ids)
input_ids += [0] * (self.max_seq_len - tokens_len)
segment_ids += [0] * (self.max_seq_len - tokens_len)
input_mask += [0] * (self.max_seq_len - tokens_len)
all_input_ids.append(input_ids)
all_input_mask.append(input_mask)
all_segment_ids.append(segment_ids)
all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)
all_input_mask = torch.tensor(all_input_mask, dtype=torch.long)
all_segment_ids = torch.tensor(all_segment_ids, dtype=torch.long)
if label_list: # train
all_label_ids = torch.tensor(label_list, dtype=torch.long)
return TensorDataset(
all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# test
return TensorDataset(
all_input_ids, all_input_mask, all_segment_ids)
def _convert_sentence_pair_to_rnn_dataset(
self, s1_list, s2_list, label_list=None):
"""Convert sentences pairs to dataset for RNN model.
Args:
sc_list, bc_list: List[List[str]], list of word tokens list
label_list: train: List[int], list of labels
test: []
Returns:
Train:
torch.utils.data.TensorDataset
each record: (s1_ids, s2_ids, s1_length, s2_length, label)
Test:
torch.utils.data.TensorDataset
each record: (s1_ids, s2_ids, s1_length, s2_length, label)
"""
all_s1_ids, all_s2_ids = [], []
all_s1_lengths, all_s2_lengths = [], []
for i in tqdm(range(len(s1_list)), ncols=80):
tokens_s1, tokens_s2 = s1_list[i], s2_list[i]
all_s1_lengths.append(min(len(tokens_s1), self.max_seq_len))
all_s2_lengths.append(min(len(tokens_s2), self.max_seq_len))
if len(tokens_s1) > self.max_seq_len:
tokens_s1 = tokens_s1[:self.max_seq_len]
if len(tokens_s2) > self.max_seq_len:
tokens_s2 = tokens_s2[:self.max_seq_len]
s1_ids = self.tokenizer.convert_tokens_to_ids(tokens_s1)
s2_ids = self.tokenizer.convert_tokens_to_ids(tokens_s2)
if len(s1_ids) < self.max_seq_len:
s1_ids += [0] * (self.max_seq_len - len(s1_ids))
if len(s2_ids) < self.max_seq_len:
s2_ids += [0] * (self.max_seq_len - len(s2_ids))
all_s1_ids.append(s1_ids)
all_s2_ids.append(s2_ids)
all_s1_ids = torch.tensor(all_s1_ids, dtype=torch.long)
all_s2_ids = torch.tensor(all_s2_ids, dtype=torch.long)
all_s1_lengths = torch.tensor(all_s1_lengths, dtype=torch.long)
all_s2_lengths = torch.tensor(all_s2_lengths, dtype=torch.long)
if label_list: # train
all_label_ids = torch.tensor(label_list, dtype=torch.long)
return TensorDataset(
all_s1_ids, all_s2_ids, all_s1_lengths, all_s2_lengths,
all_label_ids)
# test
return TensorDataset(
all_s1_ids, all_s2_ids, all_s1_lengths, all_s2_lengths)
def test_data():
"""Test for data module."""
# For BERT model
data = Data('model/bert/vocab.txt', model_type='bert')
_, _, _ = data.load_train_and_valid_files(
'SMP-CAIL2020-train.csv',
'SMP-CAIL2020-test1.csv')
# For RNN model
data = Data('model/rnn/vocab.txt', model_type='rnn')
_, _, _ = data.load_train_and_valid_files(
'SMP-CAIL2020-train.csv',
'SMP-CAIL2020-test1.csv')
if __name__ == '__main__':
test_data()
| [
"[email protected]"
] | |
95ed3d25daa6622c36d744536708a097a674cfd6 | 790b35cf27579bc8fde7e71077a83feab8d29471 | /steamworks/interfaces/microtxn.py | f9b62cd2a3b2d3efdb55f2e06f179b5eb12670bd | [
"MIT"
] | permissive | philippj/SteamworksPy | f094742966054ce2106dc03876fff074319abbfb | 9496d308cff71a1bed9e21940245424a244432ca | refs/heads/master | 2023-05-31T12:58:41.135249 | 2023-02-25T21:05:06 | 2023-02-25T21:05:06 | 39,316,769 | 123 | 25 | MIT | 2023-05-25T14:25:49 | 2015-07-19T00:11:28 | Python | UTF-8 | Python | false | false | 958 | py | from ctypes import *
from enum import Enum
import steamworks.util as util
from steamworks.enums import *
from steamworks.structs import *
from steamworks.exceptions import *
class SteamMicroTxn(object):
_MicroTxnAuthorizationResponse_t = CFUNCTYPE(None, MicroTxnAuthorizationResponse_t)
_MicroTxnAuthorizationResponse = None
def __init__(self, steam: object):
self.steam = steam
if not self.steam.loaded():
raise SteamNotLoadedException('STEAMWORKS not yet loaded')
def SetAuthorizationResponseCallback(self, callback: object) -> bool:
"""Set callback for when Steam informs about the consent flow result
:param callback: callable
:return: bool
"""
self._MicroTxnAuthorizationResponse = SteamMicroTxn._MicroTxnAuthorizationResponse_t(callback)
self.steam.MicroTxn_SetAuthorizationResponseCallback(self._MicroTxnAuthorizationResponse)
return True
| [
"[email protected]"
] | |
b10babdea49ce37fcf0d72cf4b365b842918a6e1 | 14845cd391da36d4c7fdc2c4d85ec63abf043a0b | /venv/bin/django-admin | fc01ca872a4c497ab028183b0e7c8c43f2c88100 | [] | no_license | cavalryjim/mysite | 6946adb56d2973c555f68b58decb8bd7e4c18ca6 | 0ebd910eb61969ec9c8708be84a44fa2127b0312 | refs/heads/master | 2020-03-11T08:44:01.971220 | 2018-05-01T23:21:31 | 2018-05-01T23:21:31 | 129,891,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | #!/Users/james/Projects/mysite/venv/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
0780bc486c4355eaef2a4df385fc503799cbf3eb | 79e19819aec49b500825f82a7de149eb6a0ba81d | /leetcode/1018.py | 632dc46703f709c5e2bf6b31ac1d966e91cbfa8c | [] | no_license | seoyeonhwng/algorithm | 635e5dc4a2e9e1c50dc0c75d9a2a334110bb8e26 | 90406ee75de69996e666ea505ff5d9045c2ad941 | refs/heads/master | 2023-05-03T16:51:48.454619 | 2021-05-26T00:54:40 | 2021-05-26T00:54:40 | 297,548,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | class Solution:
def prefixesDivBy5(self, A: List[int]) -> List[bool]:
answer = [False] * len(A)
answer[0], prev = (A[0] == 0), A[0]
for i in range(1, len(A)):
answer[i] = ((prev * 2 + A[i]) % 5 == 0)
prev = prev * 2 + A[i]
return answer
"""
- 왼쪽으로 shift = 2를 곱한다
""" | [
"[email protected]"
] | |
c7984ce328339a910916d87e8b897f8b81df8ac6 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02659/s866773123.py | 485fb9b9a423580f938055c9b85b90aa424797da | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | AB = input().split()
A = int(AB[0])
B = AB[1].split('.')
B = int(B[0]) * 100 + int(B[1])
print(A * B // 100)
| [
"[email protected]"
] | |
83d88a5ed0bdcad629a6e3815dd75d21cc5a72e0 | e61e664d95af3b93150cda5b92695be6551d2a7c | /vega/networks/pytorch/customs/modnas/arch_space/construct/torch/model_init.py | 63a2eea20c488ff5f7c5cdf7026be84854afb40b | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/vega | 44aaf8bb28b45f707ed6cd4e871ba70fc0c04846 | 12e37a1991eb6771a2999fe0a46ddda920c47948 | refs/heads/master | 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 | NOASSERTION | 2023-02-15T09:37:01 | 2020-06-20T08:20:06 | Python | UTF-8 | Python | false | false | 5,623 | py | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model weight initializer."""
import copy
import math
import torch.nn.init as init
from modnas.registry.construct import register
def _t_init_he_normal_fout(t, gain, fan_in, fan_out):
stdv = gain / math.sqrt(fan_out)
init.normal_(t, 0, stdv)
def _t_init_he_normal_fin(t, gain, fan_in, fan_out):
stdv = gain / math.sqrt(fan_in)
init.normal_(t, 0, stdv)
def _t_init_he_uniform_fout(t, gain, fan_in, fan_out):
b = math.sqrt(3.) * gain / math.sqrt(fan_out)
init.uniform_(t, -b, b)
def _t_init_he_uniform_fin(t, gain, fan_in, fan_out):
b = math.sqrt(3.) * gain / math.sqrt(fan_in)
init.uniform_(t, -b, b)
def _t_init_xavier_uniform(t, gain, fan_in, fan_out):
b = math.sqrt(6.) * gain / math.sqrt(fan_in + fan_out)
init.uniform_(t, -b, b)
def _t_init_xavier_normal(t, gain, fan_in, fan_out):
stdv = math.sqrt(2.) * gain / math.sqrt(fan_in + fan_out)
init.normal_(t, 0, stdv)
def _t_init_uniform_fin(t, gain, fan_in, fan_out):
b = 1.0 / math.sqrt(fan_in)
init.uniform_(t, -b, b)
def _t_init_uniform_fout(t, gain, fan_in, fan_out):
b = 1.0 / math.sqrt(fan_out)
init.uniform_(t, -b, b)
def _t_init_uniform(t, gain, fan_in, fan_out):
init.uniform_(t)
def _t_init_normal(t, gain, fan_in, fan_out):
init.normal_(t)
def _t_init_zeros(t, gain, fan_in, fan_out):
init.zeros_(t)
def _t_init_ones(t, gain, fan_in, fan_out):
init.ones_(t)
def _init_tensor(init_type, t, gain, fan_in, fan_out):
init_fn = _tensor_init_fn.get(init_type)
if init_fn is None or t is None:
return
init_fn(t, gain, fan_in, fan_out)
def _m_init_conv(m, config):
init_type = config['conv']['type']
bias_init_type = config['bias']['type']
gain = config['gain']
if init_type is None:
return
rec_size = m.kernel_size[0] * m.kernel_size[1]
fan_in = rec_size * m.in_channels
fan_out = rec_size * m.out_channels
if config['conv'].get('div_groups', True):
fan_in /= m.groups
fan_out /= m.groups
_init_tensor(init_type, m.weight, gain, fan_in, fan_out)
if m.bias is not None:
_init_tensor(bias_init_type, m.bias, gain, fan_in, fan_out)
def _m_init_norm(m, config):
init_type = config['norm']['type']
bias_init_type = config['bias']['type']
momentum = config['norm'].get('momentum')
eps = config['norm'].get('eps')
gain = config['gain']
m.reset_running_stats()
if momentum is not None:
m.momentum = momentum
if eps is not None:
m.eps = eps
if not m.affine:
return
fan_in = fan_out = m.num_features
_init_tensor(init_type, m.weight, gain, fan_in, fan_out)
_init_tensor(bias_init_type, m.bias, gain, fan_in, fan_out)
def _m_init_fc(m, config):
init_type = config['fc']['type']
bias_init_type = config['bias']['type']
gain = config['gain']
if init_type is None:
return
fan_in, fan_out = m.in_features, m.out_features
_init_tensor(init_type, m.weight, gain, fan_in, fan_out)
if m.bias is None:
return
_init_tensor(bias_init_type, m.bias, gain, fan_in, fan_out)
_tensor_init_fn = {k[8:]: v for (k, v) in globals().items() if k.startswith('_t_init_')}
_module_init_fn = {k[8:]: v for (k, v) in globals().items() if k.startswith('_m_init_')}
_default_init_config = {
'conv': {
'type': None,
'div_groups': True,
},
'norm': {
'type': None,
},
'fc': {
'type': None,
},
'bias': {
'type': None,
},
}
_default_module_map = {
'Conv2d': 'conv',
'BatchNorm2d': 'norm',
'GroupNorm': 'norm',
'Linear': 'fc',
}
@register
class DefaultModelInitializer():
"""Model weight initializer class."""
def __init__(self,
init_config=None,
module_init_map=None,
default_init_type=None,
neg_slope=math.sqrt(5),
nonlinear='leaky_relu'):
self.init_config = copy.deepcopy(_default_init_config)
self.init_config['gain'] = init.calculate_gain(nonlinear, neg_slope)
self.init_config.update(init_config or {})
self.module_init_map = _default_module_map.copy()
self.module_init_map.update(module_init_map or {})
self.default_init_type = default_init_type
def __call__(self, model):
"""Return initialized model."""
for m in model.modules():
m_init_type = self.module_init_map.get(type(m).__name__)
if m_init_type is not None:
_module_init_fn[m_init_type](m, self.init_config)
elif len(list(m.children())) == 0:
for p in m.parameters():
sz = p.shape
fan_out = sz[0] if len(sz) else 1
fan_in = sz[min(1, len(sz) - 1)] if len(sz) else 1
_init_tensor(self.default_init_type, p, self.init_config['gain'], fan_in, fan_out)
return model
| [
"[email protected]"
] | |
c081c11e09a0c8926549feb6429c50cbf08a8126 | cf7f38d1c34ca088ebcc12bdcd25c7f0f0b538de | /homework2/14301134/MyWSGI/Application.py | 40209bed1e9baf400e43390415e229a4bf3501d5 | [] | no_license | JeromeWGQ/JavaEEHomework | f9263369439793398afc56a9b4ba7e9cef241972 | 23d35dc5ba1b59da4bcc9397f891a91128afe6c1 | refs/heads/master | 2020-04-10T21:00:51.138919 | 2017-01-05T14:06:16 | 2017-01-05T14:06:16 | 68,166,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 313 | py | def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/html')])
if environ['PATH_INFO'][1:]:
body = '<h1>Received text: %s</h1>' % environ['PATH_INFO'][1:]
else:
body = '<h1>This is the default static page.</h1>'
return [body.encode('utf-8')]
| [
"[email protected]"
] | |
1d629c3f80bdea998e1edcc704dadcb450ca56ed | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/nlp/BERT-ITPT-FiT_ID0340_for_PyTorch/baseline_main.py | c8f1c9fc7e3f924283508a3653a8b1f69083fc34 | [
"BSD-3-Clause",
"MIT",
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 8,638 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
"""
Script for training, testing, and saving baseline, binary classification models for the IMDB
dataset.
"""
import logging
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
# !pip install pytorch_transformers
from pytorch_transformers import AdamW # Adam's optimization w/ fixed weight decay
from models.baseline_models import SimpleRNN, SimpleRNNWithBERTEmbeddings
from bert_utils.data_utils import IMDBDataset
from bert_utils.model_utils import train, test
# Disable unwanted warning messages from pytorch_transformers
# NOTE: Run once without the line below to check if anything is wrong, here we target to eliminate
# the message "Token indices sequence length is longer than the specified maximum sequence length"
# since we already take care of it within the tokenize() function through fixing sequence length
logging.getLogger('pytorch_transformers').setLevel(logging.CRITICAL)
DEVICE = torch.device('npu' if torch.npu.is_available() else 'cpu')
print("DEVICE FOUND: %s" % DEVICE)
# Set seeds for reproducibility
SEED = 42
torch.manual_seed(seed=SEED)
torch.backends.cudnn.deterministic = True
# Define hyperparameters
USE_BERT_EMBEDDING_PARAMETERS = True
PRETRAINED_MODEL_NAME = 'bert-base-cased'
NUM_EPOCHS = 50
BATCH_SIZE = 32
MAX_VOCABULARY_SIZE = 25000
MAX_TOKENIZATION_LENGTH = 512
EMBEDDING_DIM = 100
NUM_CLASSES = 2
NUM_RECURRENT_LAYERS = 1
HIDDEN_SIZE = 128
USE_BIDIRECTIONAL = True
DROPOUT_RATE = 0.20
# Initialize model
if USE_BERT_EMBEDDING_PARAMETERS:
model = SimpleRNNWithBERTEmbeddings(pretrained_model_name_for_embeddings=PRETRAINED_MODEL_NAME,
max_tokenization_length=MAX_TOKENIZATION_LENGTH,
num_classes=NUM_CLASSES,
num_recurrent_layers=NUM_RECURRENT_LAYERS,
use_bidirectional=USE_BIDIRECTIONAL,
hidden_size=HIDDEN_SIZE,
dropout_rate=DROPOUT_RATE,
use_gpu=True if torch.npu.is_available() else False)
# IMPORTANT NOTE: Maximum vocabulary size should be set to be equal or larger than the maximum
# encoded (embedded) index used for any token, else the embedding matrix will not capture that token
else:
model = SimpleRNN(pretrained_model_name_for_tokenizer=PRETRAINED_MODEL_NAME,
max_vocabulary_size=MAX_VOCABULARY_SIZE*4,
max_tokenization_length=MAX_TOKENIZATION_LENGTH,
embedding_dim=EMBEDDING_DIM,
num_classes=NUM_CLASSES,
num_recurrent_layers=NUM_RECURRENT_LAYERS,
hidden_size=HIDDEN_SIZE,
use_bidirectional=USE_BIDIRECTIONAL,
dropout_rate=DROPOUT_RATE,
use_gpu=True if torch.npu.is_available() else False)
# Initialize train & test datasets
train_dataset = IMDBDataset(input_directory='aclImdb/train',
tokenizer=model.get_tokenizer(),
apply_cleaning=False,
max_tokenization_length=MAX_TOKENIZATION_LENGTH,
truncation_method='head-only',
device=DEVICE)
test_dataset = IMDBDataset(input_directory='aclImdb/test',
tokenizer=model.get_tokenizer(),
apply_cleaning=False,
max_tokenization_length=MAX_TOKENIZATION_LENGTH,
truncation_method='head-only',
device=DEVICE)
# Acquire iterators through data loaders
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=BATCH_SIZE,
shuffle=False)
# Define loss function
criterion = nn.CrossEntropyLoss()
# Define identifiers & group model parameters accordingly (check README.md for the intuition)
if USE_BERT_EMBEDDING_PARAMETERS:
bert_learning_rate = 3e-5
custom_learning_rate = 1e-3
bert_identifiers = ['embeddings']
no_weight_decay_identifiers = ['bias', 'LayerNorm.weight']
grouped_model_parameters = [
{'params': [param for name, param in model.named_parameters()
if any(identifier in name for identifier in bert_identifiers) and
not any(identifier_ in name for identifier_ in no_weight_decay_identifiers)],
'lr': bert_learning_rate,
'betas': (0.9, 0.999),
'weight_decay': 0.01,
'eps': 1e-8},
{'params': [param for name, param in model.named_parameters()
if any(identifier in name for identifier in bert_identifiers) and
any(identifier_ in name for identifier_ in no_weight_decay_identifiers)],
'lr': bert_learning_rate,
'betas': (0.9, 0.999),
'weight_decay': 0.0,
'eps': 1e-8},
{'params': [param for name, param in model.named_parameters()
if not any(identifier in name for identifier in bert_identifiers)],
'lr': custom_learning_rate,
'betas': (0.9, 0.999),
'weight_decay': 0.0,
'eps': 1e-8}
]
# Define optimizer
optimizer = AdamW(grouped_model_parameters)
else:
# Define optimizer
optimizer = optim.Adam(params=model.parameters(),
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-8)
# Place model & loss function on GPU
model, criterion = model.to(DEVICE), criterion.to(DEVICE)
# Start actual training, check test loss after each epoch
best_test_loss = float('inf')
for epoch in range(NUM_EPOCHS):
print("EPOCH NO: %d" % (epoch + 1))
train_loss, train_acc = train(model=model,
iterator=train_loader,
criterion=criterion,
optimizer=optimizer,
device=DEVICE,
include_bert_masks=True)
test_loss, test_acc = test(model=model,
iterator=test_loader,
criterion=criterion,
device=DEVICE,
include_bert_masks=True)
if test_loss < best_test_loss:
best_test_loss = test_loss
torch.save(model.state_dict(), 'saved_models/simple-lstm-model.pt')
print(f'\tTrain Loss: {train_loss:.3f} | Train Accuracy: {train_acc * 100:.2f}%')
print(f'\tTest Loss: {test_loss:.3f} | Test Accuracy: {test_acc * 100:.2f}%')
| [
"[email protected]"
] | |
1f7b48c5171e997960e77712eaa1a5cbe00f751f | cafe1b855b9e027d42ab6db4826ee6d100c39e83 | /src/classes/Fetcher.py | fc7746d74c133124629298a84109cbeb6accfbec | [
"MIT"
] | permissive | nit1994oct/Screeni-py | 206977eb89e32f8ae6c983dcc111cdafda809ad4 | 2606667a6295136b2f5c3668df2a764fb13f93c3 | refs/heads/main | 2023-04-17T05:23:47.675293 | 2021-05-05T09:10:49 | 2021-05-05T09:10:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | '''
* Project : Screenipy
* Author : Pranjal Joshi
* Created : 28/04/2021
* Description : Class for handling networking for fetching stock codes and data
'''
import sys
import urllib
import requests
import random
import yfinance as yf
import pandas as pd
import classes.ConfigManager as ConfigManager
from nsetools import Nse
from classes.ColorText import colorText
listStockCodes = []
screenCounter = 1
nse = Nse()
# This Class Handles Fetching of Stock Data over the internet
class tools:
# Fetch all stock codes from NSE
def fetchStockCodes(executeOption):
global listStockCodes
if executeOption == 0:
stockCode = None
while stockCode == None or stockCode == "":
stockCode = str(input(colorText.BOLD + colorText.BLUE + "[+] Enter Stock Code(s) for screening (Multiple codes should be seperated by ,): ")).upper()
stockCode = stockCode.replace(" ","")
listStockCodes = stockCode.split(',')
else:
print(colorText.BOLD + "[+] Getting Stock Codes From NSE... ", end='')
listStockCodes = list(nse.get_stock_codes(cached=False))[1:]
if len(listStockCodes) > 10:
print(colorText.GREEN + ("=> Done! Fetched %d stock codes." % len(listStockCodes)) + colorText.END)
if ConfigManager.shuffleEnabled:
random.shuffle(listStockCodes)
print(colorText.WARN + "[+] Stock shuffling is active." + colorText.END)
else:
print(colorText.WARN + "[+] Stock shuffling is inactive." + colorText.END)
else:
input(colorText.FAIL + "=> Error getting stock codes from NSE! Press any key to exit!" + colorText.END)
sys.exit("Exiting script..")
# Fetch stock price data from Yahoo finance
def fetchStockData(stockCode, period, duration, proxyServer, screenResults):
global screenCounter
data = yf.download(
tickers = stockCode+".NS",
period = period,
duration = duration,
proxy = proxyServer,
progress=False
)
sys.stdout.write("\r\033[K")
try:
print(colorText.BOLD + colorText.GREEN + ("[%d%%] Screened %d, Found %d. Fetching data & Analyzing %s..." % (int(screenCounter/len(listStockCodes)*100), screenCounter, len(screenResults), stockCode)) + colorText.END, end='')
except ZeroDivisionError:
pass
if len(data) == 0:
print(colorText.BOLD + colorText.FAIL + "=> Failed to fetch!" + colorText.END, end='\r', flush=True)
return None
print(colorText.BOLD + colorText.GREEN + "=> Done!" + colorText.END, end='\r', flush=True)
screenCounter += 1
return data
| [
"[email protected]"
] | |
f3fee1bb1a62840950a2e1cd12f1fed5e15ecedd | f2912a48b33e634fa2d1d983fb4d4a06c2ebc6fa | /req_jd_clildwatch.py | fc2681f03dc9a412f1f3d91e98409af974b8970d | [] | no_license | wwsr06/Requests-Study | 6eb6ef14a01f05bffd8655821cd0120c698ba33a | 3de604e1cdb3cf2de89606e18fc3298db4d28529 | refs/heads/master | 2021-05-20T15:11:42.143274 | 2020-04-03T07:17:07 | 2020-04-03T07:17:07 | 252,344,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,022 | py | #writen by WSJ
#coding=utf-8
import sys
import requests
import json
import re
import time
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf8')
def crow_page1_firsthalf():
url='https://search.jd.com/Search?keyword=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&enc=utf-8&pvid=255e823a6d0a475a971486d8cc0ae7db'
headers = {
'authority': 'search.jd.com',
'method': 'GET',
'path': '/Search?keyword=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&wq=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&page=3&s=61&click=0',
'scheme': 'https',
'referer': '',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
'Cookie':''
}
# post请求
r = requests.get(url,headers=headers)
r.encoding='utf-8'
print r.status_code
#f = open('html.txt','w')
#print >> f , r.text
#f.close()
#f = open('html.txt','r')
#r = f.read()
#f.close()
paser_productitem(r.text)
def crow_page_firsthalf(n):
url='https://search.jd.com/s_new.php?keyword=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page=' #7&s=181&click=0'
url += str(2*n-1)
url += '&s='
url += str(1+60*(n-1))
url += '&click=0'
#print url
#raw_input()
headers = {
'authority': 'search.jd.com',
'method': 'GET',
'path': '/s_new.php?keyword=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page=5&s=121&click=0',
'scheme': 'https',
'accept': '*/*',
#'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'referer': 'https://search.jd.com/Search?keyword=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page=5&s=121&click=0',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'x-requested-with': 'XMLHttpRequest',
'Cookie':'shshshfpa=e827ba58-975f-9aa7-a2b0-eeacfad16b2c-1585407708; __jdv=122270672|qdan.me|-|-|not set|1585407708824; __jdu=1231924428; areaId=2; ipLoc-djd=2-2825-0-0; PCSYCityID=CN_310000_310100_310115; shshshfpb=i6%203cd7q3w6U5YGmLIJrWMA%3D%3D; xtest=4572.cf6b6759; qrsc=3; rkv=V0800; __jdc=122270672; shshshfp=fd21732d1a95680c1173748647be0007; 3AB9D23F7A4B3C9B=R4PCDSC7UIPDWAHWS2Z2LKZETH6FXFH6ITPNCKGRUFX2ZWKGKSS3WQN74Q7UWHZUHISJ7WIFUYXAQY4CHHRF557UA4; __jda=122270672.1231924428.1584597159.1585876243.1585880613.8'
}
# post请求
r = requests.get(url,headers=headers)
r.encoding='utf-8'
print r.status_code
#tmp_fname = str(n)+'.txt'
#f = open(tmp_fname,'w')
#print >> f , r.text
#f.close()
#f = open('2.txt','r')
#r = f.read()
#f.close()
#paser_productitem(r)
paser_productitem(r.text)
def crow_page_lasthalf(n):
url='https://search.jd.com/s_new.php?keyword=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page=' #=4&s=91&scrolling=y&log_id=1585893878.93568'
url += str(2*n)
url += '&s='
url += str(31+60*(n-1))
url += '&scrolling=y&log_id='
a=time.time()
b='%.5f'%a
url += str(b)
#print url
#raw_input()
#return
headers = {
'authority': 'search.jd.com',
'method': 'GET',
'path': '/s_new.php?keyword=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page=5&s=121&click=0',
'scheme': 'https',
'accept': '*/*',
#'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'referer': 'https://search.jd.com/Search?keyword=%E5%84%BF%E7%AB%A5%E6%89%8B%E8%A1%A8&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&page=5&s=121&click=0',
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'x-requested-with': 'XMLHttpRequest',
'Cookie':'shshshfpa=e827ba58-975f-9aa7-a2b0-eeacfad16b2c-1585407708; __jdv=122270672|qdan.me|-|-|not set|1585407708824; __jdu=1231924428; areaId=2; ipLoc-djd=2-2825-0-0; PCSYCityID=CN_310000_310100_310115; shshshfpb=i6%203cd7q3w6U5YGmLIJrWMA%3D%3D; xtest=4572.cf6b6759; qrsc=3; rkv=V0800; __jdc=122270672; shshshfp=fd21732d1a95680c1173748647be0007; 3AB9D23F7A4B3C9B=R4PCDSC7UIPDWAHWS2Z2LKZETH6FXFH6ITPNCKGRUFX2ZWKGKSS3WQN74Q7UWHZUHISJ7WIFUYXAQY4CHHRF557UA4; __jda=122270672.1231924428.1584597159.1585876243.1585880613.8'
}
# post请求
r = requests.get(url,headers=headers)
r.encoding='utf-8'
print r.status_code
#tmp_fname = str(n)+'_last.txt'
#f = open(tmp_fname,'w')
#print >> f , r.text
#f.close()
#f = open('2.txt','r')
#r = f.read()
#f.close()
#paser_productitem(r)
paser_productitem(r.text)
def paser_productitem(rsptext):
soup = BeautifulSoup(rsptext,"html.parser")
search_div = soup.findAll(attrs={"class":"gl-i-wrap"})
for ss in search_div:
ss_1 = ss.find(attrs={"class":"p-name p-name-type-2"})
ss_11 = ss_1.a
#f = open('ss_11.txt','w')
#print >> f , ss_11
#f.close()
#raw_input()
#search product name
ss_111 = re.search( r'<em>(.*?)</em>',str(ss_11), re.S) #get string from "xxx" to "xxx"
ss_1111 = ss_111.group()[4:-5]
p_name = re.sub(r'<.*?>','',ss_1111)
#print p_name.decode('utf-8')
#search product price
ss_1 = ss.find(attrs={"class":"p-price"})
ss_11 = re.findall(r"<i>(.+?)</i>",str(ss_1))
if ss_11 == []:
ss_11 = re.findall(r'data-price=\"(.+?)\"><em>',str(ss_1))
p_price = ss_11[0]
#print p_price
print >> fo , p_name + ',' + p_price
#-----------------------main flow---------------------------------------------------------
fo = open('product.txt','w')
crow_page1_firsthalf()
time.sleep(2)
crow_page_lasthalf(1)
for i in range(2,101):
print 'processing page ' + str(i)
crow_page_firsthalf(i)
time.sleep(1)
crow_page_lasthalf(i)
time.sleep(1)
fo.close()
print 'DONE'
| [
"[email protected]"
] | |
b22a6ad2ebb62fc0c9123a855db2ce24a05b50b7 | a66a7b3a048ecdce24fb43dde19db29ee341f756 | /checkout/migrations/0004_auto_20210310_0940.py | 494fe265cff71d30b18caf5bd4fdbac0eeda6041 | [] | no_license | patricksingleton86/boutique_ado_v1 | 2b3e2ecd7577fb187525c6fa0d895145fbc08152 | ac9e991ff2c64a98ce0b8f7f506a10837956dafb | refs/heads/master | 2023-03-17T01:34:40.658991 | 2021-03-16T14:02:47 | 2021-03-16T14:02:47 | 340,324,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | # Generated by Django 3.1.7 on 2021-03-10 09:40
from django.db import migrations
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('checkout', '0003_auto_20210309_1552'),
]
operations = [
migrations.AlterField(
model_name='order',
name='country',
field=django_countries.fields.CountryField(max_length=2),
),
]
| [
"[email protected]"
] | |
a8cb759a47d6d63e2d69156574096e7ced94e9bc | 59911f060202e0329a305a4ffb8329fef46079df | /src/mappers/person_mapper.py | 8ad047e7733204b1679d325a9b52d8a988ee4a1a | [
"MIT"
] | permissive | GDGPetropolis/backend-event-checkin | 1e3280b55ae2838c1cd00f3560ee1cc8a79ca3e7 | 694089ad19ee6ce8173b8fb3e38fcc1741eb4723 | refs/heads/master | 2020-04-24T12:29:41.837473 | 2019-03-28T16:34:09 | 2019-03-28T16:34:09 | 171,957,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | from src.repositories.entities.person import Person as DataPerson
from src.domain.entities.person import Person as DomainPerson
from src.application.models.person_model import PersonModel
class PersonMapper(object):
@classmethod
def model_to_domain(cls, model: PersonModel):
if model:
return DomainPerson(id=model.id, nick=model.nick, photo=model.photo, email=None, name=None, events=list())
@classmethod
def data_to_domain(cls, data: DataPerson):
if data:
return DomainPerson(id=data.id, nick=data.nick, photo=data.photo, email=data.email, name=data.name, events=list())
@classmethod
def domain_to_model(cls, domain: DomainPerson):
if domain:
return DataPerson(id=domain.id, nick=domain.nick, photo=domain.photo, email=domain.email, name=domain.name)
| [
"[email protected]"
] | |
b778f0bcd27786a4be937fba9a023d8f4c35c68c | 9923e30eb99716bfc179ba2bb789dcddc28f45e6 | /openapi-generator/python/test/test_hos_logs_summary_response_drivers.py | 5733eeb8fc3d9ded047880b3b5940e1ba43f6fd4 | [] | no_license | silverspace/samsara-sdks | cefcd61458ed3c3753ac5e6bf767229dd8df9485 | c054b91e488ab4266f3b3874e9b8e1c9e2d4d5fa | refs/heads/master | 2020-04-25T13:16:59.137551 | 2019-03-01T05:49:05 | 2019-03-01T05:49:05 | 172,804,041 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,564 | py | # coding: utf-8
"""
Samsara API
# Introduction Samsara provides API endpoints for interacting with Samsara Cloud, so that you can build powerful applications and custom solutions with sensor data. Samsara has endpoints available to track and analyze sensors, vehicles, and entire fleets. The Samsara Cloud API is a [RESTful API](https://en.wikipedia.org/wiki/Representational_state_transfer) accessed by an [HTTP](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol) client such as wget or curl, or HTTP libraries of most modern programming languages including python, ruby, java. We use built-in HTTP features, like HTTP authentication and HTTP verbs, which are understood by off-the-shelf HTTP clients. We allow you to interact securely with our API from a client-side web application (though you should never expose your secret API key). [JSON](http://www.json.org/) is returned by all API responses, including errors. If you’re familiar with what you can build with a REST API, the following API reference guide will be your go-to resource. API access to the Samsara cloud is available to all Samsara administrators. To start developing with Samsara APIs you will need to [obtain your API keys](#section/Authentication) to authenticate your API requests. If you have any questions you can reach out to us on [[email protected]](mailto:[email protected]) # Endpoints All our APIs can be accessed through HTTP requests to URLs like: ```curl https://api.samsara.com/<version>/<endpoint> ``` All our APIs are [versioned](#section/Versioning). If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. # Authentication To authenticate your API request you will need to include your secret token. You can manage your API tokens in the [Dashboard](https://cloud.samsara.com). They are visible under `Settings->Organization->API Tokens`. Your API tokens carry many privileges, so be sure to keep them secure. Do not share your secret API tokens in publicly accessible areas such as GitHub, client-side code, and so on. Authentication to the API is performed via [HTTP Basic Auth](https://en.wikipedia.org/wiki/Basic_access_authentication). Provide your API token as the basic access_token value in the URL. You do not need to provide a password. ```curl https://api.samsara.com/<version>/<endpoint>?access_token={access_token} ``` All API requests must be made over [HTTPS](https://en.wikipedia.org/wiki/HTTPS). Calls made over plain HTTP or without authentication will fail. # Request Methods Our API endpoints use [HTTP request methods](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_methods) to specify the desired operation to be performed. The documentation below specified request method supported by each endpoint and the resulting action. ## GET GET requests are typically used for fetching data (like data for a particular driver). ## POST POST requests are typically used for creating or updating a record (like adding new tags to the system). With that being said, a few of our POST requests can be used for fetching data (like current location data of your fleet). ## PUT PUT requests are typically used for updating an existing record (like updating all devices associated with a particular tag). ## PATCH PATCH requests are typically used for modifying an existing record (like modifying a few devices associated with a particular tag). ## DELETE DELETE requests are used for deleting a record (like deleting a tag from the system). # Response Codes All API requests will respond with appropriate [HTTP status code](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes). Your API client should handle each response class differently. ## 2XX These are successful responses and indicate that the API request returned the expected response. ## 4XX These indicate that there was a problem with the request like a missing parameter or invalid values. Check the response for specific [error details](#section/Error-Responses). Requests that respond with a 4XX status code, should be modified before retrying. ## 5XX These indicate server errors when the server is unreachable or is misconfigured. In this case, you should retry the API request after some delay. # Error Responses In case of a 4XX status code, the body of the response will contain information to briefly explain the error reported. To help debugging the error, you can refer to the following table for understanding the error message. | Status Code | Message | Description | |-------------|----------------|-------------------------------------------------------------------| | 401 | Invalid token | The API token is invalid and could not be authenticated. Please refer to the [authentication section](#section/Authentication). | | 404 | Page not found | The API endpoint being accessed is invalid. | | 400 | Bad request | Default response for an invalid request. Please check the request to make sure it follows the format specified in the documentation. | # Versioning All our APIs are versioned. Our current API version is `v1` and we are continuously working on improving it further and provide additional endpoints. If we intend to make breaking changes to an API which either changes the response format or request parameter, we will increment the version. Thus, you can use our current API version worry free. # FAQs Check out our [responses to FAQs here](https://kb.samsara.com/hc/en-us/sections/360000538054-APIs). Don’t see an answer to your question? Reach out to us on [[email protected]](mailto:[email protected]). # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openapi_client
from openapi_client.models.hos_logs_summary_response_drivers import HosLogsSummaryResponseDrivers # noqa: E501
from openapi_client.rest import ApiException
class TestHosLogsSummaryResponseDrivers(unittest.TestCase):
"""HosLogsSummaryResponseDrivers unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHosLogsSummaryResponseDrivers(self):
"""Test HosLogsSummaryResponseDrivers"""
# FIXME: construct object with mandatory attributes with example values
# model = openapi_client.models.hos_logs_summary_response_drivers.HosLogsSummaryResponseDrivers() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
33895a3141abd63586e6a69b3fa2532c28faf4de | eda9ab41ad5e02076df4c26d886bf6bbe944f888 | /web_flask/8-cities_by_states.py | e14df9502765c4a1b4457deb1f6f2be2cfeee3dc | [] | no_license | dondropo/AirBnB_clone_v2 | 6a8790abda87aa2a393f194005703be981f1bbd3 | d64097efddd06e660739124496a1ff1b0afa6ac2 | refs/heads/master | 2022-12-13T07:32:31.354201 | 2020-09-03T02:07:26 | 2020-09-03T02:07:26 | 287,393,695 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,970 | py | #!/usr/bin/python3
"""starts a Flask web application"""
from flask import Flask, render_template
from models import storage
from models.state import State
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hbnb_greetings():
""" Hello HBNB """
return "Hello HBNB!"
@app.route('/hbnb', strict_slashes=False)
def hbnb():
""" hbnb """
return "HBNB"
@app.route('/c/<text>', strict_slashes=False)
def c_text(text):
""" C Text Replacing """
return 'C ' + text.replace("_", " ")
@app.route('/python/', strict_slashes=False)
@app.route('/python/<text>', strict_slashes=False)
def python_text(text="is cool"):
""" display defined txt """
return 'Python ' + text.replace("_", " ")
@app.route('/number/<int:n>', strict_slashes=False)
def number(n):
"""Display n is a number if type(n) == int"""
if type(n) == int:
return "{} is a number".format(n)
@app.route('/number_template/<int:n>', strict_slashes=False)
def numbersandtemplates(n):
"""display a HTML page only if n is an integer"""
return render_template('5-number.html', n=n)
@app.route('/number_odd_or_even/<int:n>', strict_slashes=False)
def even_odd(n):
""" finds if even or odd """
if n % 2 == 0:
its = 'even'
else:
its = 'odd'
return render_template('6-number_odd_or_even.html', n=n,
its=its)
@app.route('/states_list', strict_slashes=False)
def states_list():
"""states list"""
states = storage.all(State).values()
return render_template('7-states_list.html', states=states)
@app.teardown_appcontext
def t1clozer(self):
""" string to be returned """
storage.close()
@app.route('/cities_by_states', strict_slashes=False)
def cities_states():
""" cities with states """
states = storage.all(State).values()
return render_template('8-cities_by_states.html', states=states)
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000")
| [
"[email protected]"
] | |
4c76ba6e16aa935db62df6e2583816694dcdcf7c | f156583c45d7507be1133655961840c2bec8493a | /Lab2/task2.py | 73262b59459cb0919cc3c598ee6e55c526b9086f | [] | no_license | Abilash7/BigDataProgramming | 749464ad705501fb3873406a65ada3260ffd1986 | f3066a06463816dd541bc5bbdccb69606eb296f8 | refs/heads/master | 2020-05-31T04:30:46.154127 | 2019-07-23T21:09:36 | 2019-07-23T21:09:36 | 190,099,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py | import os
from pyspark import *
from pyspark.sql import SQLContext,Row
from pyspark.sql.types import *
from pyspark.sql.types import StructType,StructField,LongType,StringType,TimestampType,DateType
os.environ["SPARK_HOME"] = '/home/apandit7'
sc = SparkContext(appName="dataframe")
sq=SQLContext(sc)
#matchesdf =sq.read.format("csv").option("header", "true").load("WorldCupMatches.csv")
#matchesdf.show(truncate=False)
#matchesdf.printSchema()
mySchema =StructType([
StructField("Year",LongType(), True),
StructField("DateTime",StringType(), True),
StructField("Stage",StringType(), True),
StructField("Stadium",StringType(),True),
StructField("City",StringType(), True),
StructField("Home Team Name",StringType(), True),
StructField("Home Team Goals",IntegerType(), True),
StructField("Away Team Goals",IntegerType(), True),
StructField("Away Team Name",StringType(), True),
StructField("Win conditions",StringType(), True),
StructField("Attendance",IntegerType(), True),
StructField("Half-time Home Goals",IntegerType(), True),
StructField("Half-time Away Goals",IntegerType(), True),
StructField("Referee",StringType(), True),
StructField("Assistant1",StringType(), True),
StructField("Assistant2",StringType(), True),
StructField("RoundID",IntegerType(), True),
StructField("MatchID",LongType(), True),
StructField("Home Team Initials",StringType(), True),
StructField("Away Team Initials",StringType(), True)
])
Structdf=sq.read.format("csv").schema(mySchema).option("header","true").load("WorldCupMatches.csv")
Structdf.show(10)
Structdf.printSchema()
#Structdf.show(10)
Structdf.registerTempTable('Structdf')
df= sq.sql("SELECT * FROM Structdf")
df.show(5)
#df.dropDuplicates(df.columns).show()
#df.show(5)
df1=df.limit(30)
df2=df.limit(50)
Uniondf=df1.unionAll(df2)
Uniondf.show()
Uniondf.registerTempTable('Uniondf')
#Uniondf.printSchema()
#dfquery3=sq.sql("SELECT Year,DateTime,Stage,Stadium from Structdf")
#dfquery3.show(10)
dfquery3=sq.sql("SELECT Year,City,`Home Team Goals` as Home_Team_Goals from Structdf where City like '%Zurich%'")
dfquery3.show(50)
dfquery4=sq.sql("select City,Attendance from Structdf where Attendance>9000 order by Attendance desc limit 20")
#,Referee,RoundID,MatchID,`Home Team Initials` as Home_Team_Initials from Structdf where Attendance>4444 group by City")
dfquery4.show()
dfquery5=sq.sql("select * from Structdf left join Uniondf on Structdf.City=Uniondf.City")
dfquery5.show()
dfquery6=Structdf.filter(Structdf['Half-time Home Goals']>2).show(10)
dfquery7=Structdf.groupBy("Home Team Goals").count().show(30)
dfquery8=sq.sql("select * from Structdf where `Home Team Name` like '%Argentina%' and Year like '%1930%'")
dfquery8.show()
dfquery9=Structdf.drop("Home Team Name", "City")
dfquery9.show(10)
#dfquery10=Structdf.select("Stage", "Attendance").write.save("StageAndAttendance.parquet")
dfquery10=dfquery9.withColumnRenamed("Home Team Goals", "Home_Goals").withColumnRenamed("Away Team Goals", "Away_Goals").withColumnRenamed("Away Team Name", "Away_Name").withColumnRenamed("Win co
nditions", "Wc").withColumnRenamed("Half-time Home Goals", "HalfHome").withColumnRenamed("Half-time Away Goals","HalfAway").withColumnRenamed("Home Team Initials","HomeIn").withColumnRenamed("Awa
y Team Initials","AwayIn")
dfquery11=dfquery10.write.parquet("Attendance_part1",mode="overwrite",partitionBy="Year",compression='gzip')
#dfquery10.rdd.take(2)
| [
"[email protected]"
] | |
b284f9b10b8c572c65a64f1f9b88cde920a8b781 | d0cb58e1658d4b5b88bdc07e497dc8092707ae02 | /2021/01january/24specify_data.py | 6381a461e0645467957c5e23c467055af3ce9fb7 | [] | no_license | June-fu/python365 | 27f9b753d38ade549d59aa8f2d8bda0fb8b1e20c | 242033a4b644a7566fbfa4dba9b60f60aa31fe91 | refs/heads/master | 2021-07-02T21:42:28.454091 | 2021-05-04T15:08:44 | 2021-05-04T15:08:44 | 233,629,713 | 0 | 0 | null | 2020-01-13T15:52:58 | 2020-01-13T15:36:53 | null | UTF-8 | Python | false | false | 466 | py | #!/usr/bin/python
'''
# @ Author: june-fu
# @ Create Time: 2021-02-22 23:59:17
# @ Modified by: june-fu
# @ Modified time: 2021-02-22 23:59:19
# @ Description:arguments parse_dates
'''
import pandas as pd
from io import StringIO
data =('date,A,B,C\n'
'20090101,a,1,2\n'
'20090102,b,3,4\n'
'20090103,c,4,5')
# arguments parse_dates
df = pd.read_csv(StringIO(data), index_col=0, parse_dates=True)
print(df)
# These are Python datetime objects
print(df.index) | [
"[email protected]"
] | |
bb94b6c1e0c9f982738b056dbb616b77648347a3 | dad01fede3f4802e98d5e6ef1d7a3d2c1c448257 | /백준/[백준_10798] 세로읽기.py | acf14f2f104ba19607027f68b86b204b69d0a95a | [] | no_license | alyssa1996/CodingExercise | 55ae8b1833552f1106a08005d651289d2dd5fd60 | d31c3822a39ae8a301d48e1926fa787f556cff87 | refs/heads/master | 2021-10-17T04:11:22.155442 | 2021-10-08T13:45:08 | 2021-10-08T13:45:08 | 218,627,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | words = []
max_length = 0
for _ in range(5):
current_line = input()
words.append(current_line)
if max_length < len(current_line):
max_length = len(current_line)
answer = ''
for j in range(max_length):
for i in range(5):
if len(words[i]) < j+1:
continue
answer += words[i][j]
print(answer)
| [
"[email protected]"
] | |
df764e47c2d329a93fb18dde5d51c979d42f66a9 | c17de119c15572dd857ff7eda8bd107fb2d759f7 | /practice/Nastya/Nastya_4hw/four/art/art/art/urls.py | ab23f525aaf40ec8b887e8e3cbc9a09a7b6600a0 | [] | no_license | zdimon/course-3 | 356e5b45c7d13cd2ff1c7bd631a341ec6cb7687a | 64a6db6ab832b106d957203fd6c85117fcf6e273 | refs/heads/master | 2020-04-06T11:14:18.943711 | 2019-01-19T08:51:13 | 2019-01-19T08:51:13 | 157,409,064 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | """art URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from main.views import home, detail
urlpatterns = [
url(r'^$',home,name='home'),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^detail/(?P<id>[0-9]+)$',detail),
url(r'^admin/', admin.site.urls),
]
#url(r'^grappelli/', include('grappelli.urls')),
| [
"[email protected]"
] | |
b8c7d45a7d9f06babb8a3b0f66e4053d7a8987c1 | 70dbf5fbb3c8597f4d3db9f2b4f76e3627124440 | /Forex/wsgi.py | 089ce26e8bcadee8a253354db99e400c4b0913ca | [] | no_license | JuanPabloArbelaez/forex | bcf6a746181645ced6c4f4545c16f57b09dc70ac | 5d8eea46679cd08a3572abb46a18d84aa6f8b9b9 | refs/heads/main | 2023-01-12T00:08:05.652151 | 2020-11-01T18:18:45 | 2020-11-01T18:18:45 | 308,888,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
WSGI config for Forex project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Forex.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
03ed535cd8d7970bd8d2efdc797ed61e8b8f896d | 4ba706744ab27ff97c2f02655f90c3242b350aa1 | /python/Mukesh_solution.py | bd9006641b179e50544f63c84822d753d805296f | [] | no_license | PengInGitHub/Kaggle-Titanic | 3d6cd6e66dd72e6425c9c4de5c903d1322243704 | 795d4a6b464a0c0e23ea89b420f3b9d18dca9e98 | refs/heads/master | 2020-03-26T20:34:46.680789 | 2018-08-26T13:56:48 | 2018-08-26T13:56:48 | 145,333,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,359 | py | #https://www.kaggle.com/chapagain/titanic-solution-a-beginner-s-guide
#strategy from Mukesh Chapagain
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from func import load_data
#outline
#1.EDA: Exploratory Data Analysis with Visualization
#2.Feature Extraction
#3.Data Modeling
#4.Model Evaluation
#1.EDA: Exploratory Data Analysis with Visualization
#1.1 load data
train, test, combine = load_data()
#1.2 data structure
print train.shape#891*12
train.describe()#statistics on numerical variables
train.describe(include=['O'])#categorical data
train.info()#check data tyoe and missing value
train.isnull().sum()
train['Embarked'].value_counts(normalize=True)
#1.3 relationship btw features and target variable
#target var distribution
survived = train['Survived'][train['Survived']==1]
not_survived = train['Survived'][train['Survived']==0]
print "Survived: %i (%.1f%%)"%(len(survived),float(len(survived))/len(train)*100.0)
print "Not Survived: %i (%.1f%%)"%(len(not_survived), float(len(not_survived))/len(train)*100.0)
#features and target var
#Passenger class
train.Pclass.value_counts()
#average survive rate is 38%
train.groupby('Pclass').Survived.value_counts(normalize=True)
#most 1st class survived (63%)
#47% 2nd class survived
#only 24% class 3 survived
train[['Pclass','Survived']].groupby('Pclass', as_index=False).mean()
sns.barplot(x='Pclass', y='Survived', data=train)
#Sex
train.groupby('Sex').Survived.value_counts(normalize=True)
#female is much more likely to survive(74%) in comparison to male(19%)
train[['Sex','Survived']].groupby('Sex', as_index=False).mean()
sns.barplot(x='Sex', y='Survived', data=train)
#sex and passenger class
tab = pd.crosstab(train['Pclass'],train['Sex'])
tab.div(tab.sum(1).astype(float), axis=0).plot(kind='bar', stacked=True)
plt.xlable('Pclass')
plt.ylable('Survived')
sns.factorplot('Sex','Survived', hue='Pclass', size=2, aspect=3, data=train)
#women in the 1st and 2nd class were almost all survived (close to 100%)
#men from the 2nd and 3rd class were almost all died(90%)
#Embarked
train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()
sns.barplot(x='Embarked', y='Survived', data=train)
#Parch
train[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean()
sns.barplot(x='Parch', y='Survived', data=train)
#SibSp
train[['SibSp','Survived']].groupby(['SibSp'], as_index=False).mean()
sns.barplot(x='SibSp', y='Survived', data=train)
#Age
#violin plot
fig = plt.figure(figsize=(15,5))
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
sns.violinplot(x='Embarked', y='Age', data=train, hue='Survived', split=True, ax=ax1)
sns.violinplot(x='Pclass', y='Age', hue='Survived', data=train, split=True, ax=ax2)
sns.violinplot(x='Sex', y='Age', hue='Survived', data=train, split=True, ax=ax3)
#1st class have many more old ppl but less children
#almost all children in the 2nd class survived
#most children in the 3rd class survived
#younger ppl in the first class survived in comparison to the old
#check correlation
#Heatmap of correlation btw diff features
#focus on features have strong pos or neg correlation with target var
plt.figure(figsize=(25,10))
corr = train.drop('PassengerId', axis=1).corr()
sns.heatmap(corr, vmax=0.6, square=True, annot=True)
#Pclass and Fare have relative strong corr
#Feature Extraction
#generate Title
#use pd.Series.str.extract(' ([A-Za-z]+)\.')
train_test_data = [train, test]#return a list
for data in train_test_data:
data['Title'] = data.Name.str.extract(' ([A-Za-z]+)\.')
train.head()
#distribution
train.groupby('Title').Survived.value_counts()
#re-group
#use pd.Series.replace(to_be_replaced, new_value)
to_be_replaced = ['Lady', 'Countess','Capt', 'Col', \
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona']
for data in train_test_data:
data['Title'] = data['Title'].replace(to_be_replaced, 'Other')
data['Title'] = data['Title'].replace('Mme', 'Mrs')
data['Title'] = data['Title'].replace('Ms', 'Miss')
data['Title'] = data['Title'].replace('Mlle', 'Miss')
train['Title'].head()
#see new distribution
train[['Title','Survived']].groupby('Title', as_index=False).mean()
#convert categorical into ordinal
#use Series.map(a_map_object)
title_mapping = {'Mr':1, 'Miss':2, 'Mrs':3, 'Master':4, 'Other':5}
for data in train_test_data:
data['Title'] = data['Title'].map(title_mapping)
data['Title'] = data['Title'].fillna(0)
for data in train_test_data:
data['Sex'] = data['Sex'].map({'male':0, 'female':1}).astype(int)
#missing Embarked
for data in train_test_data:
data['Embarked'] = data['Embarked'].fillna('S')
data['Embarked'] = data['Embarked'].map({'S':0, 'C':1, 'Q':2}).astype(int)
#Age
#fill na by a random num in (age_mean-std, age_mean+std)
for data in train_test_data:
avg = data['Age'].mean()
std = data['Age'].std()
null_count = data['Age'].isnull().sum()
random_list = np.random.randint(avg-std, avg+std, size=null_count)
#impute nan
data['Age'][np.isnan(data['Age'])] = random_list
data['Age'] = data['Age'].astype(int)
for data in train_test_data:
data['AgeBand'] = pd.cut(data['Age'], 5)
#map category to int
#use df.ioc[condition, a_col] = new_value
for data in train_test_data:
data.loc[data['Age']<=16, 'Age'] = 0
data.loc[(data['Age']>16)&(data['Age']<=32), 'Age'] = 1
data.loc[(data['Age']>32)&(data['Age']<=48), 'Age'] = 2
data.loc[(data['Age']>48)&(data['Age']<=64), 'Age'] = 3
data.loc[data['Age']>64, 'Age'] = 4
#Fare
#fill na
for data in train_test_data:
data['Fare'] = data['Fare'].fillna(data['Fare'].median())
#cut
for data in train_test_data:
data['Fareband'] = pd.qcut(data['Fare'],4)
#map to category
for data in train_test_data:
data.loc[data['Fare']<=7.91, 'Fare'] = 0
data.loc[(data['Fare']>7.91)&(data['Fare']<=14.454), 'Fare'] = 1
data.loc[(data['Fare']>14.454)&(data['Fare']<=31.0), 'Fare'] = 2
data.loc[data['Fare']>31, 'Fare'] = 3
data['Fare'] = data['Fare'].astype(int)
#SibSp and Parch
#FamilySize
for data in train_test_data:
data['FamilySize'] = data['SibSp'] + data['Parch'] + 1
train[['FamilySize', 'Survived']].groupby('FamilySize', as_index=False).mean()
#Travel alone
for data in train_test_data:
data['IsAlone'] = 0
data.loc[data['FamilySize'] == 1 , 'IsAlone'] = 1
#Feature Selection
drops = ['FamilySize', 'Name', 'Parch', 'SibSp', 'Ticket', 'Cabin']
train = train.drop(drops, axis=1)#drop column
test = test.drop(drops, axis=1)#drop column
train = train.drop(['PassengerId', 'AgeBand', 'Fareband'], axis=1)
test = test.drop(['AgeBand', 'Fareband'], axis=1)
train.head()
#Classification and Accuracy
#Random Forest
#prepare data
x_train = train.drop('Survived', axis=1)
y_train = train['Survived']
x_test = test.drop("PassengerId", axis=1).copy()
#classify
forest = RandomForestClassifier(max_depth=5, min_samples_split=2,
n_estimators=100, random_state=1)
forest = forest.fit(x_train, y_train)
my_prediction = forest.predict(x_test)
#submit
PassengerId = np.array(test['PassengerId']).astype(int)
submit = pd.DataFrame(my_prediction, PassengerId, columns=['Survived'] )
submit.to_csv('submit.csv', index_label=['PassengerId'])
| [
"[email protected]"
] | |
8e4439a5213755463643b9a98d6b098eb3614207 | 92e26b93057723148ecb8ca88cd6ad755f2e70f1 | /cov_exp/plain30_orth/plt.py | 15145016643831e2908e2041dc913dd1c9a66851 | [] | no_license | lyuyanyii/CIFAR | 5906ad9fbe1377edf5b055098709528e06b5ace2 | d798834942d6a9d4e3295cda77488083c1763962 | refs/heads/master | 2021-08-30T20:09:52.819883 | 2017-12-19T08:37:37 | 2017-12-19T08:37:37 | 112,701,370 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | import matplotlib.pyplot as plt
import pickle
import numpy as np
import scipy.signal as signal
with open("hisloss.data", "rb") as f:
his = pickle.load(f)
his = np.array(his)
hisloss = his[:,1]
hisloss = signal.medfilt(hisloss, 9)
#print(np.max(hisloss[10000:]))
plt.plot(range(len(hisloss)), hisloss)
plt.show()
| [
"[email protected]"
] | |
3f7b44bbb4eec93fdac8dcc96a87aaa350bf30b9 | fe62351e9d71d1147b56d4bab13498551e876074 | /Smartphone/controller_smartphone.py | 6d833a1ec5ca403517e5e2da8e115530bc8311a6 | [] | no_license | denisousa/BabyMonitor-RabbitMQ | 245f80d4e5570302e76f0ae7a373593207cff605 | 1e4be1131fd55e43025fa8c58cfa030098f43902 | refs/heads/master | 2021-02-05T08:38:46.785808 | 2020-06-11T23:54:09 | 2020-06-11T23:54:09 | 243,760,598 | 2 | 1 | null | 2020-05-15T19:07:09 | 2020-02-28T12:45:34 | Python | UTF-8 | Python | false | false | 1,642 | py | from .conection_smartphone import SmartphoneConnection
class SmartphoneController():
def __init__(self):
database = SmartphoneConnection()
def run(self):
if self.button_is_pressed:
print(" [*] Smartphone waiting for messages. To exit press CTRL+C")
def callback_smartphone(ch, method, properties, body):
if "NOTIFICATION" in str(body):
self.is_notification = True
self.message = str(body)
else:
self.is_notification = False
self.message = str(body).replace('b"STATUS: ', "")
self.message = self.message.replace('"', "")
self.channel.basic_consume(
queue=queue_smartphone,
on_message_callback=callback_smartphone,
auto_ack=True,
)
self.channel.start_consuming()
self.connection.close()
class SmartphoneProducer(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host="localhost")
)
self.channel = self.connection.channel()
self.channel.exchange_declare(
exchange=exchange_baby_monitor, exchange_type="direct"
)
self.button_is_pressed = False
def run(self):
message = "CONFIRMATION: Notification received!"
self.channel.basic_publish(
exchange=exchange_baby_monitor,
routing_key=routing_key_baby_monitor,
body=message,
)
| [
"[email protected]"
] | |
6b41fa9282806ca80f38421921c2bf0ba0074b8a | 67a92a91696b6d475aa2791a439fa8a109dfbf93 | /Naive Bayes classification example/Naive Bayes classification example.py | 9e22b2998671aae83ca81d3f00601b6161ad689a | [] | no_license | mcoric96/Natural-language-processing | 92eafc3540653506678d12a28d7de2f3b66d18f0 | c45dc585f17b8f868a72b1150f91e510e9f0bea6 | refs/heads/master | 2020-04-17T08:49:38.090606 | 2019-06-16T13:58:55 | 2019-06-16T13:58:55 | 166,428,627 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,009 | py | import re
import math
from test_v06 import *
classes = ['AFRIKA','AUSTRALIJA','AZIJA','EUROPA','JUZNA_AMERIKA','SJEVERNA_AMERIKA']#croatian names for continents
def read_file(filename):
return open(filename, 'r', encoding='utf-8').read()
#first we read file,create unigram model(dictionary) of that document,countries in text are separated with " "
def file_to_doc_model(filename):
d = {} #dictionary-unigram model of a given text
l = [w.lower() for w in read_file(filename).strip().split()]
#iterating on list of countries
for country in l:
if country in d:
d[country.lower()]+=1
else:
d[country.lower()]=1
return d
#build learning model , input: path - path of folder which contains training examples. Each folder is 1 class(continent) and contains
#documents with names of countries. Output : trained model - dictionary with classes(continents - Africa,Europe,...) as keys and list of documents
#from that particular class turned into unigram model
def build_train_model(path):
import os
train_model = {}
for folder in os.listdir(path):
train_model[folder] = []
for filename in os.listdir(path + folder + '/'):
train_model[folder].append(file_to_doc_model(path + folder + '/' + filename))
return train_model
TRAIN_PATH = 'train/'
TEST_PATH = 'test/'
train_model = build_train_model(TRAIN_PATH)#dictionary,key is the name of the continent,value is a list of unigrams
#input: trained model from function build_train_model
#output: prior is a dictionary with classes as keys and their prior probabilities as values.For each class c: P(c) = Nc / N where Nc is
#number of documents of class c and N is number of all documents from training set
def get_prior(train_model):
d = {} #prior probabilities
n = 0 #number of all documents
for continent in classes:
n += len(train_model[continent])
for continent in classes:
d[continent] = len(train_model[continent])/n
return d
#input: trained model from function build_train_model
#output: megadoc_model is dictionary with classes as keys and mega-document(concatenation of all documents of particular class c) are values
def get_megadoc_model(train_model):
d = {} #returned value,dicntionary
for continent in train_model.keys():
c_dict = {} #dictionary for 1 continent
for d1 in train_model[continent]: #for each unigram(dictionary) of 1 continent
for k in d1.keys():
if k in c_dict:
c_dict[k] += d1[k]
else:
c_dict[k] = d1[k]
d[continent] = c_dict
return d
#input: megadoc_model from function get_megadoc_model,output: set of all words from all megadocuments
def get_vocabulary(megadoc_model):
l = []
for continent in megadoc_model.keys():
for country in megadoc_model[continent]:
l.append(country)
return set(l)
#input: megadoc_model and test_model represented as unigram
#output:conditional probability P(w|c)(with add-1 smoothing) for all words from test model per each class
# P(c|w) = (|(w,C)|+1) / (|C|+|V|) where C is set of all classes and V is complete vocabulary
def get_likelihood(megadoc_model, test_model):
#for dictionary megadoc_model, keys are classes(continents) and values are dictionaries with countries as keys,each with number of showing up
#in that unigram as values test model is unigram with countries as keys and number of showing up as values
continents = megadoc_model.keys()
n = len(get_vocabulary(megadoc_model)) #length of vocabulary
d = {} # unigram - dictionary
for continent in continents:
num_classes = 0
for country in megadoc_model[continent]:
num_classes += megadoc_model[continent][country]
for country in test_model.keys():
#for each continent and each country, we compute pobability that country belongs to that continent
#we sum up all apperances of that country in unigram(dictionary) of that continent
#we sum up all words in megadoc of that particular class(continent)
num = 0
if country in megadoc_model[continent].keys():#ako se drzava ijednom "pojavila" u kontinentu
num = megadoc_model[continent][country]
p = (num+1)/(num_classes+n) #P(w|c) likelihood
d[(country,continent)] = p
return d
#input:likelihood of conditional probability P(w|c) , test model as unigram,prior probability od classes
#conditional probabilities P(c|d) that document d is in the class c. Its is represented with dictionary,such that classes are keys and P(c|d)
#probabilities are values. P(c|d) = P(c)*P(c|w1)*...*P(c|wn) for all words w1,...,wn from vocabulary V.
#For this problem we use log-space because probabilities are very small numbers. logP(c|d) = logP(c)+logP(c|w1)+...+logP(c|wn)
def get_posterior(prior, likelihood, test_model):
d = {} #dictionary
for continent in classes:
posterior = math.log(prior[continent])#probability od class(continent)
for key in likelihood.keys():
if key[1] == continent:
posterior += math.log(likelihood[key])
d[continent] = round(posterior,5)
return d
#input:trained model and test model from files , output: the most probable continent(class)
def classify(train_model,test_model):
megadoc = get_megadoc_model(train_model) #megadoc model from training set
prior = get_prior(train_model)#dictionary of prior probabilities for classes
likelihood = get_likelihood(megadoc,test_model)#conditional probabilities for test model
posterior = get_posterior(prior,likelihood,test_model)#posterior probabilities for test model
#from posterior probabilities ,we choose class with the biggest posterior probability
c = classes[0]
for i in range(1,len(classes)):
if posterior[classes[i]] > posterior[c]:
c=classes[i]
return c
| [
"[email protected]"
] | |
6e793b2cc17c297f591a41cdd1a13abe0a306ba9 | 02a7f56baa550100780fcd01d5d030e8ecd1c42a | /utils/rest.py | 9af6ef2458c501f5ad084c83a02d7a56cbca2ba0 | [] | no_license | srecinto/coupon-code-redemption | de7891f3f3dd8ade5bbbad7422d52cc9c02ed4e1 | 19a12650efacdc942f5671fbb4c63263bca16fb3 | refs/heads/master | 2023-05-10T22:01:38.967442 | 2020-02-04T23:27:08 | 2020-02-04T23:27:08 | 153,345,545 | 0 | 0 | null | 2023-05-02T17:42:39 | 2018-10-16T19:52:29 | Python | UTF-8 | Python | false | false | 9,324 | py | import os
import requests
import base64
import json
# from requests.packages.urllib3.exceptions import InsecurePlatformWarning
# from requests.packages.urllib3.exceptions import SNIMissingWarning
class OktaUtil:
# TODO: This should be configuration driven
REST_HOST = None
REST_TOKEN = None
OKTA_SESSION_ID_KEY = "okta_session_id"
OKTA_SESSION_TOKEN_KEY = "okta_session_id"
DEVICE_TOKEN = None
OKTA_HEADERS = {}
OKTA_OAUTH_HEADERS = {}
OIDC_CLIENT_ID = None
OIDC_CLIENT_SECRET = None
AUTH_SERVER_ID = None
def __init__(self, headers):
# This is to supress the warnings for the older version
# requests.packages.urllib3.disable_warnings((InsecurePlatformWarning, SNIMissingWarning))
self.REST_HOST = os.environ["OKTA_ORG_URL"]
self.REST_TOKEN = os.environ["OKTA_API_TOKEN"]
self.OIDC_CLIENT_ID = os.environ["OKTA_APP_CLIENT_ID"]
self.OIDC_CLIENT_SECRET = os.environ["OKTA_APP_CLIENT_SECRET"]
self.OIDC_REDIRECT_URL = os.environ["OKTA_OIDC_REDIRECT_URL"]
if "OKTA_AUTHSERVER_ID" in os.environ:
self.AUTH_SERVER_ID = os.environ["OKTA_AUTHSERVER_ID"]
print("HAS AUTH SERVER: {0}".format(self.AUTH_SERVER_ID))
user_agent = ""
if "User-Agent" in headers:
user_agent = headers["User-Agent"]
self.OKTA_HEADERS = {
"Accept": "application/json",
"Content-Type": "application/json",
"Authorization": "SSWS {api_token}".format(api_token=self.REST_TOKEN),
"User-Agent": user_agent
}
if "X-Forwarded-For" in headers:
self.OKTA_HEADERS["X-Forwarded-For"] = headers["X-Forwarded-For"]
if "X-Forwarded-Port" in headers:
self.OKTA_HEADERS["X-Forwarded-Port"] = headers["X-Forwarded-Port"]
if "X-Forwarded-Proto" in headers:
self.OKTA_HEADERS["X-Forwarded-Proto"] = headers["X-Forwarded-Proto"]
self.OKTA_OAUTH_HEADERS = {
"Accept": "application/json",
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic {encoded_auth}".format(
encoded_auth=self.get_encoded_auth(
client_id=self.OIDC_CLIENT_ID,
client_secret=self.OIDC_CLIENT_SECRET))
}
print("OKTA_OAUTH_HEADERS: {0}".format(json.dumps(self.OKTA_OAUTH_HEADERS, indent=4, sort_keys=True)))
def get_user(self, user_id):
print("get_user()")
url = "{host}/api/v1/users/{user_id}".format(host=self.REST_HOST, user_id=user_id)
body = {}
return self.execute_get(url, body)
def update_user(self, user):
print("update_user()")
url = "{host}/api/v1/users/{user_id}".format(host=self.REST_HOST, user_id=user["id"])
return self.execute_post(url, user)
def userinfo_oauth(self, oauth_token):
print("userinfo_oauth()")
auth_server = ""
if self.AUTH_SERVER_ID:
auth_server = "/{0}".format(self.AUTH_SERVER_ID)
url = "{host}/oauth2{auth_server}/v1/userinfo".format(
host=self.REST_HOST,
auth_server=auth_server)
body = {}
# print("oauth_token: ", oauth_token)
headers = {
"Accept": "application/json",
"Authorization": "Bearer {0}".format(oauth_token)
}
return self.execute_get(url, body, headers)
def introspect_oauth_token(self, oauth_token):
print("introspect_oauth_token()")
auth_server = ""
if self.AUTH_SERVER_ID:
auth_server = "/{0}".format(self.AUTH_SERVER_ID)
url = "{host}/oauth2{auth_server}/v1/introspect?token={token}".format(
host=self.REST_HOST,
auth_server=auth_server,
token=oauth_token)
body = {}
return self.execute_post(url, body, self.OKTA_OAUTH_HEADERS)
def get_user_application_profile(self, app_id, user_id):
print("get_user_application_profile()")
url = "{host}/api/v1/apps/{app_id}/users/{user_id}".format(host=self.REST_HOST, app_id=app_id, user_id=user_id)
body = {}
return self.execute_get(url, body)
def update_user_application_profile(self, app_id, user_id, user_app_profile):
print("update_user_application_profile()")
url = "{host}/api/v1/apps/{app_id}/users/{user_id}".format(host=self.REST_HOST, app_id=app_id, user_id=user_id)
body = user_app_profile
return self.execute_post(url, body)
def send_mail(self, template_id, recipients, substitution=None):
print("send_mail()")
url = "{0}/transmissions".format(os.environ["SPARKPOST_API_URL"])
headers = {
"Authorization": os.environ["SPARKPOST_API_KEY"],
"Content-Type": "application/json"
}
body = {
"options": {
"sandbox": False
},
"content": {
"template_id": template_id,
"use_draft_template": False
},
"recipients": recipients
}
if substitution:
body["substitution_data"] = substitution
return self.execute_post(url, body, headers=headers)
def execute_post(self, url, body, headers=None):
print("execute_post(): ", url)
print(body)
headers = self.reconcile_headers(headers)
rest_response = requests.post(url, headers=headers, json=body)
response_json = rest_response.json()
# print json.dumps(response_json, indent=4, sort_keys=True)
return response_json
def execute_put(self, url, body, headers=None):
print("execute_put(): ", url)
print(body)
headers = self.reconcile_headers(headers)
rest_response = requests.put(url, headers=headers, json=body)
response_json = rest_response.json()
# print json.dumps(response_json, indent=4, sort_keys=True)
return response_json
def execute_delete(self, url, body, headers=None):
print("execute_delete(): ", url)
print(body)
headers = self.reconcile_headers(headers)
rest_response = requests.delete(url, headers=headers, json=body)
try:
response_json = rest_response.json()
except:
response_json = {"status": "none"}
# print json.dumps(response_json, indent=4, sort_keys=True)
return response_json
def execute_get(self, url, body, headers=None):
print("execute_get(): ", url)
print(body)
headers = self.reconcile_headers(headers)
rest_response = requests.get(url, headers=headers, json=body)
response_json = rest_response.json()
# print json.dumps(response_json, indent=4, sort_keys=True)
return response_json
def reconcile_headers(self, headers):
if headers is None:
headers = self.OKTA_HEADERS
return headers
def get_encoded_auth(self, client_id, client_secret):
print("get_encoded_auth()")
auth_raw = "{client_id}:{client_secret}".format(
client_id=client_id,
client_secret=client_secret
)
print("auth_raw: {0}".format(auth_raw))
encoded_auth = base64.b64encode(bytes(auth_raw, 'UTF-8')).decode("UTF-8")
print("encoded_auth: {0}".format(encoded_auth))
return encoded_auth
def create_oidc_auth_code_url(self, state, nonce, session_token=None):
print("create_oidc_auth_code_url()")
print("session_token: {0}".format(session_token))
session_option = ""
auth_server = ""
if (session_token):
session_option = "&sessionToken={session_token}".format(session_token=session_token)
if self.AUTH_SERVER_ID:
auth_server = "/{0}".format(self.AUTH_SERVER_ID)
url = (
"{host}/oauth2{auth_server}/v1/authorize?"
"response_type=code&"
"client_id={clint_id}&"
"redirect_uri={redirect_uri}&"
"state={state}&"
"nonce={nonce}&"
"response_mode=form_post&"
"scope=openid"
"{session_option}"
).format(
host=self.REST_HOST,
auth_server=auth_server,
clint_id=self.OIDC_CLIENT_ID,
redirect_uri=self.OIDC_REDIRECT_URL,
state=state,
nonce=nonce,
session_option=session_option
)
return url
def get_oauth_token(self, oauth_code):
print("get_oauth_token()")
print("oauth_code: {0}".format(oauth_code))
auth_server = ""
if self.AUTH_SERVER_ID:
auth_server = "/{0}".format(self.AUTH_SERVER_ID)
url = (
"{host}/oauth2{auth_server}/v1/token?"
"grant_type=authorization_code&"
"code={code}&"
"redirect_uri={redirect_uri}"
).format(
host=self.REST_HOST,
auth_server=auth_server,
code=oauth_code,
redirect_uri=self.OIDC_REDIRECT_URL
)
body = {
"authorization_code": oauth_code
}
return self.execute_post(url, body, self.OKTA_OAUTH_HEADERS)
| [
"[email protected]"
] | |
fcc5ddc73b281b313b617e5c843aecd709f936d2 | ec20f1b1b85fae194d2704ca048fd5f845a427e0 | /August LeetCoding Challenge/Week 1/Detect Capital.py | e89b4baed4e78fa09717a477f4f67d4822d01926 | [] | no_license | Shivakumar98/LeetCode | 3536a1fe97cba023cbb2585d916b4cb13e6d82db | 2ad4f6eef36eeca8999c37aed61c4479e4ceb1c6 | refs/heads/master | 2022-12-14T07:12:43.187679 | 2020-09-06T06:39:12 | 2020-09-06T06:39:12 | 262,058,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 888 | py | Given a word, you need to judge whether the usage of capitals in it is right or not.
We define the usage of capitals in a word to be right when one of the following cases holds:
All letters in this word are capitals, like "USA".
All letters in this word are not capitals, like "leetcode".
Only the first letter in this word is capital, like "Google".
Otherwise, we define that this word doesn't use capitals in a right way.
Example 1:
Input: "USA"
Output: True
Example 2:
Input: "FlaG"
Output: False
Note: The input will be a non-empty word consisting of uppercase and lowercase latin letters.
class Solution:
def detectCapitalUse(self, word: str) -> bool:
if word[0].isupper() and word[1:].islower():
return True
elif word.isupper() or word.islower():
return True
else:
return False
| [
"[email protected]"
] | |
c61b23d686afe38a56b1d534466a67d5e436bf6e | ef83d87e639830d6482daccb77baff7d68b2bf1c | /Learning Record/2.18 学习记录.py | 428042f6bc01eb8d0b59389616891b7fde535794 | [] | no_license | Hu-sky/Advance | bf28fc7ea0711bf2cf4a4437eb0c4008eadb34c2 | 185276d1e57705b4b93f1812aaa199d263798db1 | refs/heads/master | 2021-01-16T05:26:56.374265 | 2020-04-03T12:04:02 | 2020-04-03T12:04:02 | 242,990,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # _*_ coding:utf-8 _*_
# 1.切片list[0:3],索引0,1,2,不包括3
# 第一个索引是0,可省略:list[:3]
# 支持倒数切片:list[-2:-1],不包括-1(倒数第一个元素)
# 前10,每2取1:list[:10:2]
# 所有数,每5取1:list[::5]
# list[:]原样复制一个list
# 2.tuple支持切片:(1,2,3,4,5)[:3] >>> (0,1,2)
# 字符串支持切片:'ABCDEFG'[::2] >>> 'ACEG'
# 3.迭代:使用for in遍历list\tuple\str等
# dict默认迭代key
# 如要迭代value,可用for value in d.values()
# 如要同时迭代key和value,可用 for k,v in d.items()
# 字符串也可迭代: for ch in 'ABC' >>> A B C
# isinstance(123,Iterable) >>> 整数不可迭代
# 把list变成索引-元素对: for i,value in enumerate(['a','b','c'])
# for可同时引用两个变量:for x,y in [(1,1),(2,4),(3,,9)]
# 4.列表生成式[x*x for x in range(1,11)] >>> [1,4,9,16...]
# 添加条件判断:[x*x for in range(100) if x%2==0] >>> [4,16,36...]
# 两层全排列[m+n for in 'ABC' for n in 'XYZ'] >>> ['AX','AY'...]
# 使用两个变量生成list:[k+'='+v for k,v in d.items()]
# 5.list改小写:[s.lower() for s in L]
# 6.for前面的if ... else是表达式,而for后面的if是过滤条件,不能else
# 7.生成器generator:一边循环,一边完善
# 有两种生成方法,g=(x*x for x in range(10))
# 第二种:yield 改函数为genderator
# 8.L=[1] L1=[0]+L >>> [0,1]
# 9.可以被next()函数调用并不断返回下一个值的对象称为迭代器:Iterator
# list、dict、str不是Iterator(长度不可知)
# isinstance(iter([]),Iterator),可用iter()变成Iterator
| [
"[email protected]"
] | |
90619c1f6a028e3a697b9c37d87410d71dce8baf | 66f080fa42b270c048ee08c5853cb938d5db51f1 | /정수 삼각형.py | 9f57c0945eb87b89f1f5a7faa65348d14da9ee1d | [] | no_license | hunseok329/programmers | 9a87883b69ef5fbab8867b3d5b077cec4abd221d | 1788d27af6da62e2148f230998e0f5c58b5d1037 | refs/heads/master | 2023-09-01T20:02:41.670645 | 2023-08-31T13:41:22 | 2023-08-31T13:41:22 | 272,699,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | def solution(triangle):
for i in range(len(triangle)-1, 0, -1):
for j in range(len(triangle[i])-1):
if triangle[i][j] > triangle[i][j+1]:
triangle[i-1][j] += triangle[i][j]
else:
triangle[i-1][j] += triangle[i][j+1]
return triangle[0][0]
def solution(triangle):
for high in range(len(triangle)-1, 0, -1):
for row in range(len(triangle[high])-1):
if triangle[high][row] < triangle[high][row+1]:
triangle[high-1][row] += triangle[high][row+1]
else:
triangle[high-1][row] += triangle[high][row]
return triangle[0][0] | [
"[email protected]"
] | |
7a331bbe5897c6a6da42a725c81f67a212dd0dd6 | f33efeeabef0ab9533ffce0408545ba012da65fa | /setup.py | 8df1560c6a7d9639ae05daf161af91107460ef2a | [
"MIT"
] | permissive | flowpl/env_config | 092c52ef6967dae218a5b54f84fb90c58967a5db | 0c02f4066c1f35f6e6a7416c5461e3820efa111d | refs/heads/master | 2021-09-18T01:05:22.046887 | 2018-07-07T21:09:24 | 2018-07-07T21:09:49 | 108,753,619 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | from setuptools import setup, find_packages
packages = find_packages('.', exclude=['src.snapshots'])
setup(
packages=packages,
pbr=True,
setup_requires=['pbr>=3.1.1'],
)
| [
"[email protected]"
] | |
cf7d7dec598b4f5e7b24321b01625fcdeb2e45df | ab562c32209aa9e1f6572477bfbb75f2223e5c41 | /cybervision-project/env.py | c0db2390a6c80b37b2af1544b78c3723adaf04aa | [] | no_license | Charlesmutua/CyberVision-Security | 64ac255beab8ba3b685da3db6b37a4eec92086ad | d32cd01c45bad5ef06c9c72c6beec18714c52e51 | refs/heads/main | 2023-05-29T01:04:40.919690 | 2021-06-08T14:11:51 | 2021-06-08T14:11:51 | 375,032,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,854 | py | """
Copyright (c) 2020 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
UMBRELLA = {
"en_url": "https://s-platform.api.opendns.com/1.0/",
"inv_url": "https://investigate.api.umbrella.com",
"inv_token": "a3a5258b-c898-403d-8d01-6352a6360cd0",
"en_key": "<insert enforcement_key>"
}
# IMPORTANT!
# Please use 169.254.0.42:4443 if you are running the script in the remote CV center,
# and 10.49.213.88 if you are running the script in your local machine
CYBERVISION = {
"base_url" : "https://169.254.0.42:4443/api/3.0/",
"x-token-id": "ics-878b728e16c2c5c9b50dcd1d25fb3e7e7609f7de-2c06567246b01600ca6f2d6fbdf3817e06886b51"
}
WHOISXML = {
"apiKey": "at_YVPLPe7gZhdAln6QFGX3jfCDjp9sd",
"base_url": "https://domain-reputation.whoisxmlapi.com/api/v1"
}
IPQUALITYSCORE = {
"apiKey": "Xgl8DPyHcIIheAGFD5croASmcPTCSCsR",
"base_url": "https://ipqualityscore.com/api/json/url"
}
#ONLY CHECK FLOWS SEEN IN THE LAST X DAYS. eg, ( PERIOD = {'period':7})
# TYPE IN X AS AN INTEGER VALUE TO THE KEY 'period
#LEAVE THIS BLANK IF YOU'RE RUNNING THE SCRIPT FOR THE FIRST TIME OR IF YOU WANT TO RETRIEVE ALL DOMAINS SEEN IN CV
PERIOD = {'period':170}
# Time (in days) that needs to pass to push into CV a domain that has already been queried
time_between_queries = 7 | [
"[email protected]"
] | |
301833774d1ed065f1c33f3976acc96e1735d9aa | ecde18c421d01b1fec6792d3733a42c28da0d933 | /flask/venv2/bin/cors | 72cf52f51d2ad36f3b55281c7139f7764de1abf0 | [] | no_license | koufide/bbg_reports_production | d455e6fa18537656ddece9788b7146f806ca86f5 | 85db302d89533f08d4f0a2fb27a8599b3d21ae83 | refs/heads/master | 2023-05-06T20:07:20.185558 | 2021-05-10T10:35:48 | 2021-05-10T10:35:48 | 366,005,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/var/www/html/bbg-reports/flask/venv2/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from CORScanner.cors_scan import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
cc6fd9d2d1e1189cd0cf54388ff2f5212a84b77f | d7797ec584d722d5d15cb7355a63ecbdc5200123 | /EducationSystem/course_system/apps.py | fa1a9fd70ac8a24a15454501040abc0d654bf4de | [] | no_license | Rositsazz/course_system | f9790890802450b82663b4b01640c3ccdf87d721 | 5f67336a2298452efbea4cd0f7d3bfd992fd1d34 | refs/heads/master | 2021-01-22T07:23:09.956371 | 2017-06-28T10:46:56 | 2017-06-28T10:46:56 | 81,813,140 | 0 | 0 | null | 2017-06-28T10:37:04 | 2017-02-13T10:31:03 | Python | UTF-8 | Python | false | false | 100 | py | from django.apps import AppConfig
class CourseSystemConfig(AppConfig):
name = 'course_system'
| [
"[email protected]"
] | |
7f35f21521a54dd44ad6f3bc467623d65a8eb763 | aaf569bb58f306bf52eceb1b696f0642b8d629dd | /stream-tweet.py | 03ca7a2ce82bf7786b07da47c9ca28036ec1f7d4 | [] | no_license | EhsanArabnezhad/social-media | e3b8a340a56e6b6b7a00c4243868e56f9797bdcf | fa890d2d2fc34f2c522344782a4379abd49de687 | refs/heads/master | 2023-01-09T09:11:30.937921 | 2020-11-07T14:13:40 | 2020-11-07T14:13:40 | 264,376,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | py | import sys
import tweepy
import csv
import time
import constants
consumer_key = constants.API_KEY
consumer_secret = constants.API_SECRET
access_token = constants.ACCESS_TOKEN
access_token_secret = constants.ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
def twitter_stream_listener(file_name,
filter_track,
follow=None,
locations=None,
languages=None,
time_limit=20):
class CustomStreamListener(tweepy.StreamListener):
def __init__(self, time_limit):
self.start_time = time.time()
self.limit = time_limit
# self.saveFile = open('abcd.json', 'a')
super(CustomStreamListener, self).__init__()
def on_status(self, status):
if (time.time() - self.start_time) < self.limit:
# print(".", end=" ")
# Writing status data
with open(file_name, 'a') as f:
writer = csv.writer(f)
writer.writerow([
status.author.screen_name, status.created_at,
status.text
])
else:
print("\n\n[INFO] Closing file and ending streaming")
return False
def on_error(self, status_code):
if status_code == 420:
print('Encountered error code 420. Disconnecting the stream')
# returning False in on_data disconnects the stream
return False
else:
print('Encountered error with status code: {}'.format(
status_code))
return True # Don't kill the stream
def on_timeout(self):
print('Timeout...')
return True # Don't kill the stream
# Writing csv titles
print(
'\n[INFO] Open file: [{}] and starting {} seconds of streaming for {}\n'
.format(file_name, time_limit, filter_track))
with open(file_name, 'w') as f:
writer = csv.writer(f)
writer.writerow(['author', 'date', 'text'])
streamingAPI = tweepy.streaming.Stream(
auth, CustomStreamListener(time_limit=time_limit))
streamingAPI.filter(
track=filter_track,
follow=follow,
locations=locations,
languages=languages,
)
f.close()
filter_track = ['Iranprotest']
file_name = 'Iran_protest.csv'
twitter_stream_listener (file_name, filter_track, time_limit=60)
| [
"[email protected]"
] | |
b4d3d35e1e73ac91d6ca8d83320e36a7280c4a36 | bb9b0a40c5577bb41f233e25987b4352acb11a4d | /Django/Time_Display_Assignment/apps/time_display/views.py | 0b0f2125572bcc549cc9fa6e6e8ae7f501ff3ddb | [] | no_license | Farashi25/Python | 6fc605045493f2cf69e7dd0230e0019153747254 | 4dc71ba290658feadc8e9a6f294dec0c999c509b | refs/heads/master | 2021-08-16T01:23:44.923051 | 2017-11-18T18:53:58 | 2017-11-18T18:53:58 | 111,230,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,HttpResponse, redirect
from django.contrib import messages
from time import localtime, strftime
from django.utils.crypto import get_random_string
def index(request):
context = {
"date": strftime("%b %d, %Y", localtime()), "time": strftime("%-I:%M %p",localtime())
}
return render(request, 'time_display/index.html', context)
# Create your views here.
| [
"[email protected]"
] | |
4e28cb0efbe2d04866921745a06e6125dbe3563f | f80b6116d0c315446b7a802fe4de00d45bff1381 | /plots/plot_task.py | 86975db5e743f2917eb32caccb1bff24f5b1a415 | [] | no_license | Wonder9988/GWO-1 | cf484079189c863d86839853579cd7812278e6f8 | bc4ef95a82dcc750a50583b2496a7b4aaf14c20b | refs/heads/main | 2023-09-04T08:55:18.581174 | 2021-10-29T14:59:10 | 2021-10-29T14:59:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,592 | py | # -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt
LABEL = {
0 : "GREEDY",
1 : "DP",
4 : "GREEDY-N",
6 : "BAT",
}
def graf_recompensa():
dados_Greedy = [1.3, 2.3, 3.8]
dados_GreedyN = [1.8, 3.5, 5.3]
dados_DP = [1.2, 2.1, 3.6]
dados_BAT = [2.2, 4.3, 10]
erro_margem_Greedy = [0.3, 0.3, 0.4]
erro_margem_GreedyN = [0.4, 0.2, 0.3]
erro_margem_DP = [0.2, 0.2, 0.3]
erro_margem_BAT = [0.2, 0.3, 0.5]
barWidth = 0.20
plt.figure(figsize=(310,25))
r1 = np.arange(len(dados_Greedy))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
r4 = [x + barWidth for x in r3]
plt.bar(r1, dados_Greedy, color="aquamarine", width=barWidth, label="Greedy", yerr=erro_margem_Greedy)
plt.bar(r2, dados_GreedyN, color="turquoise", width=barWidth, label="Greedy-N",yerr=erro_margem_GreedyN)
plt.bar(r3, dados_DP, color="slateblue", width=barWidth, label="DP",yerr=erro_margem_DP)
plt.bar(r4, dados_BAT, color="blueviolet", width=barWidth, label="ARCANE",yerr=erro_margem_BAT)
plt.legend(loc='best', fontsize=14)
plt.xlabel("Recursos (#)")
plt.xticks([r + barWidth for r in range(len(dados_Greedy))],["1","2","3","4"])
plt.ylabel("Recompensa (#)")
plt.show()
def graf_atendimento():
dados_Greedy = [3.6, 6.3, 9.8]
dados_GreedyN = [5.3, 10.0, 14]
dados_DP = [3.3, 5.9, 8.9]
dados_BAT = [6.4, 12.2, 18.2]
erro_margem_Greedy = [0.3, 0.4, 0.4]
erro_margem_GreedyN = [0.3, 0.4, 0.8]
erro_margem_DP = [0.3, 0.4, 0.4]
erro_margem_BAT = [0.4, 0.8, 1.0]
barWidth = 0.20
plt.figure(figsize=(310,25))
r1 = np.arange(len(dados_Greedy))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
r4 = [x + barWidth for x in r3]
plt.bar(r1, dados_Greedy, color="aquamarine", width=barWidth, label="Greedy", yerr=erro_margem_Greedy)
plt.bar(r2, dados_GreedyN, color="turquoise", width=barWidth, label="Greedy-N",yerr=erro_margem_GreedyN)
plt.bar(r3, dados_DP, color="slateblue", width=barWidth, label="DP",yerr=erro_margem_DP)
plt.bar(r4, dados_BAT, color="blueviolet", width=barWidth, label="ARCANE",yerr=erro_margem_BAT)
plt.legend(loc='best', fontsize=14)
plt.xlabel("Recursos (#)")
plt.xticks([r + barWidth for r in range(len(dados_Greedy))],["1","2","3","4"])
plt.ylabel("Atendimento (%)")
plt.show()
def graf_utilizacao():
dados_Greedy = [16.6, 13.8, 16.6]
dados_GreedyN = [41.6, 40.2, 42.7]
dados_DP = [16.6, 15.2, 17.4]
dados_BAT = [41.6, 40.2, 43]
erro_margem_Greedy = [0.6, 0.8, 0.6]
erro_margem_GreedyN = [0.6, 0.8, 0.8]
erro_margem_DP = [0.6, 0.4, 0.8]
erro_margem_BAT = [0.6, 0.8, 0.7]
barWidth = 0.15
plt.figure(figsize=(310,25))
r1 = np.arange(len(dados_Greedy))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
r4 = [x + barWidth for x in r3]
plt.bar(r1, dados_Greedy, color="aquamarine", width=barWidth, label="Greedy", yerr=erro_margem_Greedy)
plt.bar(r2, dados_GreedyN, color="turquoise", width=barWidth, label="Greedy-N",yerr=erro_margem_GreedyN)
plt.bar(r3, dados_DP, color="slateblue", width=barWidth, label="DP",yerr=erro_margem_DP)
plt.bar(r4, dados_BAT, color="blueviolet", width=barWidth, label="ARCANE",yerr=erro_margem_BAT)
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), mode="expand", loc="3", ncol="4", borderaxespad="0.")
plt.legend(loc='best', fontsize=7.5)
plt.xlabel("Recursos (#)")
plt.xticks([r + barWidth for r in range(len(dados_Greedy))],["1","2","3","4"])
plt.ylabel("Utilizacao (%)")
plt.show()
def graf_cpu():
dados_Greedy = [2.1, 2.2, 2.1]
dados_GreedyN = [3.3, 3.7, 4.0]
dados_DP = [5.3, 5.9, 7.1]
dados_BAT = [0.5, 0.7, 0.6]
erro_margem_Greedy = [0.1, 0.2, 0.1]
erro_margem_GreedyN = [0.3, 0.4, 0.4]
erro_margem_DP = [0.3, 0.4, 0.4]
erro_margem_BAT = [0.1, 0.2, 0.15]
barWidth = 0.20
plt.figure(figsize=(310,25))
r1 = np.arange(len(dados_Greedy))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
r4 = [x + barWidth for x in r3]
plt.bar(r1, dados_Greedy, color="aquamarine", width=barWidth, label="Greedy", yerr=erro_margem_Greedy)
plt.bar(r2, dados_GreedyN, color="turquoise", width=barWidth, label="Greedy-N",yerr=erro_margem_GreedyN)
plt.bar(r3, dados_DP, color="slateblue", width=barWidth, label="DP",yerr=erro_margem_DP)
plt.bar(r4, dados_BAT, color="blueviolet", width=barWidth, label="ARCANE",yerr=erro_margem_BAT)
plt.legend(loc='best', fontsize=14)
plt.xlabel("Recursos (#)")
plt.xticks([r + barWidth for r in range(len(dados_Greedy))],["1","2","3","4"])
plt.ylabel("Tempo de CPU (s)")
plt.show()
graf_recompensa()
graf_atendimento()
graf_utilizacao()
graf_cpu() | [
"[email protected]"
] | |
d023eb89afce4942485850bf7147fde746c38bec | 6fcbc311fabccd7fff15f308c74ae7982214a1d5 | /Utils/LVX_Reader.py | afef231b32b32d73ec33f1d845b60bf6d3a0e94c | [] | no_license | shohame/Livox_Detection | f7113b7485f3158b049e0a89b2749526b03a0d21 | 24d893be8d5596929b1f6249c3cc84c58c86201a | refs/heads/main | 2023-03-31T14:18:47.422927 | 2021-04-03T07:02:31 | 2021-04-03T07:02:31 | 340,581,796 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py |
from LVX_File_Struct import *
class LVX_Reader:
def __init__(self):
self.public_header = LvxFilePublicHeader()
self.private_header = LvxFilePrivateHeader()
self.device_info_list = [] # of LvxDeviceInfo()
self.frame_header = FrameHeader()
self.base_pack_detail = BasePackDetail()
def ReadStruct(self, struct):
raw = self.f.read(len(struct))
struct.unpack(raw)
def open(self, lvx_fn):
self.f = open(lvx_fn,'rb')
self.ReadStruct(self.public_header)
print ('After public_header file position:', self.f.tell())
self.ReadStruct(self.private_header)
print('After private_header file position:', self.f.tell())
for i in range(self.private_header.device_count):
device_info = LvxDeviceInfo()
self.ReadStruct(device_info)
self.device_info_list.append(device_info)
print('After device_info file position:', self.f.tell())
def ReadFrame(self):
self.ReadStruct(self.frame_header)
self.ReadStruct(self.base_pack_detail)
return self.base_pack_detail.raw_point
def close(self):
self.f.close()
# After public_header file position: 24
# After private_header file position: 29
# After device_info file position: 88
if __name__ == "__main__":
lvx_file = LVX_Reader()
lvx_file.open('../lvx/2021-04-01 19-26-44.lvx')
while(True):
frm = lvx_file.ReadFrame()
if not frm:
break
lvx_file.close()
a = 4 | [
"[email protected]"
] | |
10775720c0d36b946ee7361a76ed25a2663c18f6 | 86d5e4a4236f8797a8449bff451af991b79d6c67 | /gloria/wsgi.py | e38c5e143525099fdc6678e6277ed108112fe4cc | [] | no_license | LizethVelasco/gloria | 08a5c060caad5685f0789cfa412b6878c19d756d | bfa79794111b1af61c3073f52012d0eb271cf1fe | refs/heads/master | 2023-07-13T20:22:51.146350 | 2021-09-04T20:28:51 | 2021-09-04T20:28:51 | 403,150,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for gloria project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gloria.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
d64ca201d337cfc52a4d5353b2fc1d74ba476f7c | efbb1d6abdde55976b9f0fd55bcdeff609b19bc6 | /positioningservice/tests/test_models.py | 578a2e4f659a92cb34ededddc312988fab3a355c | [] | no_license | drager/toerh | 1350e71d8831f14db083979e603500ff8ce0ee82 | 48826b70da4363197f7a9e8c6fefdcc708527a99 | refs/heads/master | 2021-01-02T22:38:34.972741 | 2015-08-01T12:31:47 | 2015-08-01T12:31:47 | 40,045,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | from django.contrib.auth.models import User
from django.test import TestCase
from ..models import Position, Tag
class PositionModelTest(TestCase):
def test_string_representation(self):
position = Position.objects.create(
longitude=56,
latitude=19
)
self.assertEqual(position.__str__(), "[%i, %i]" %
(position.longitude, position.latitude))
class TagModelTest(TestCase):
def test_string_representation(self):
self.user_obj = User.objects.create_user('test', "[email protected]", 'password')
tag = Tag.objects.create(
name='#guldveckan',
user=self.user_obj
)
self.assertEqual(tag.__str__(), tag.name)
def test_get_absolute_url(self):
self.user_obj = User.objects.create_user('test', "[email protected]", 'password')
tag = Tag.objects.create(
name='#guldveckan',
user=self.user_obj
)
self.assertIsNotNone(tag.get_absolute_url())
| [
"[email protected]"
] | |
400bf02fc0019db268807c9e226d4c4c7710b285 | 80e1051f0f8255db7831a755fa00529eda4988ec | /py3_shebang.py | f88f953c73803f6f447f631c0fad60614f3dfb94 | [] | no_license | ericpongpugdee/mycode | abda946576b84abb2e0fa4ee1d2452dabaa0fd9b | eb380fc103ecc15cc94df40a2500058d325a4f9a | refs/heads/master | 2022-12-10T13:05:34.896207 | 2019-08-02T16:19:52 | 2019-08-02T16:19:52 | 199,458,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | #! /usr/bin/env python3
print("ello!")
print("Did you say, hello?")
print("No, I said ello, but that\'s close enough")
| [
"[email protected]"
] | |
2b30b5866a2434a75fca7122aacfe0427dceea2a | f99fb6955df8d9bd5812c38a103c214b3e0f7c57 | /python_challenge/grait/__init__.py | 5f54f59f4ba92ff0711905c20799ed0bd05ab942 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | lvm/it-challenges | 7c9ff87ccc28ad01d5bc845d88833da7089a0de4 | 688fe1dc68426c685bacc8d54f1f7a982a11c8b1 | refs/heads/master | 2023-08-26T07:27:19.245321 | 2021-10-19T00:28:32 | 2021-10-19T00:28:32 | 339,818,701 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from .utils import IPv4
from .utils import IPobject
from .utils import RDAPResponse
from .utils import RDAPService
from .geoip import GeoIP
from .rdap import RDAP
from .grabber import IPGrabber
__version__ = '0.0.1'
| [
"[email protected]"
] | |
9f0f1f26ad66edbbdcddf2850d6211604e0f7b04 | a3aabd71ab289ffbe62e4559c47ff9ef74cc7f95 | /app/decorators.py | 0b06854a9fea369a25b9ef868925f496fc88dc47 | [] | no_license | hhwithgroups/ieltspracticegroup | ac5dbed9f461fc0d6e30b32112912d6ca1094c20 | d7dddd85dc1c236d7ff52f34130d362497f2012e | refs/heads/master | 2021-01-12T08:38:33.545381 | 2016-12-16T09:31:00 | 2016-12-16T09:31:00 | 76,639,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | # -*- coding: utf-8 -*-
from functools import wraps
from flask import request, redirect, current_app, abort
from .models import Permission
from flask.ext.login import current_user
def ssl_required(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
if current_app.config.get("SSL"):
if request.is_secure:
return fn(*args, **kwargs)
else:
return redirect(request.url.replace("http://", "https://"))
return fn(*args, **kwargs)
return decorated_view
def permission_required(permission):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kargs):
if not current_user.can(permission):
abort(403)
return f(*args, **kargs)
return decorated_function
return decorator
def admin_required(f):
return permission_required(Permission.ADMINISTER)(f) | [
"[email protected]"
] | |
b77cd80c0c1fbc74c1487f9da2d71f3e83e1b0ec | 54de64c1bd866c2cd1ef7f23dff20019a87ae408 | /src/bio2bel_drugbank/patent_utils.py | ea41eb5ec3822be8c47b5a766041d5e8125fa9e7 | [
"MIT"
] | permissive | AspirinCode/drugbank-1 | 83fc8bfb3b275df085423ac53c698bc0a8bc9c27 | 1b842ed7a9de7904e8a11fd19ad35164ffb781bf | refs/heads/master | 2020-04-07T20:29:55.925875 | 2018-11-20T18:26:38 | 2018-11-20T18:26:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,704 | py | # -*- coding: utf-8 -*-
"""Utilities for downloading patents from Google.
Code modified from original work by Alexander Esser.
"""
import os
import re
from typing import Optional, Set
import requests
from bs4 import BeautifulSoup
LINK_PATTERN = "https?:\/\/patentimages\.storage\.googleapis\.com\/.+\/([A-z0-9]+\.pdf)"
LINK_RE = re.compile(LINK_PATTERN, re.IGNORECASE)
prefix_map = {
'United States': 'US',
'Canada': 'CA',
}
def download_google_patents(url: str, directory: str) -> Set[str]:
"""Crawls a list of URLs at patent.google.com and downloads the attached PDF documents
:param url: The url (e.g., https://patents.google.com/patent/US5972916)
:param directory: The output directory
"""
rv = set()
try:
r = requests.get(url)
data = r.text
soup = BeautifulSoup(data, "html.parser")
for link in soup.find_all("a"):
target = link.get("href")
link = _process_link(target, directory)
if link:
rv.add(link)
except Exception as e:
print("Could not download patent from {}: {}".format(url, str(e)))
return rv
def _process_link(target, directory: str) -> Optional[str]:
"""Download the link if it fits the description and return it if it works."""
m = LINK_RE.search(target)
if not m:
return
outfile = os.path.join(directory, m.group(1))
if os.path.exists(outfile):
return target
print(f"Downloading {target} to {outfile}")
r2 = requests.get(target, stream=True)
if r2.status_code != 200:
return
with open(outfile, 'wb') as f:
for chunk in r2:
f.write(chunk)
return target
| [
"[email protected]"
] | |
a5b59ba4111f9aa1a1667e14c78b04c6a91f0bd2 | 2e3b52f56369a5a32f427759592b21edcfbcd805 | /basicsforms/wsgi.py | 7777e5e283f597dc9f2b2fe0de2575e9114040ad | [] | no_license | Prapti123-git/django_basicproject | 925078ca08645d1296d3ed0c4d87893d0a69aee9 | 9284a0e63d6c686e4161f24e841e6b16e7db0c9c | refs/heads/master | 2023-01-07T16:04:02.333341 | 2020-11-06T14:44:34 | 2020-11-06T14:44:34 | 310,624,396 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for basicsforms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'basicsforms.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
9f994aed9612c79c469b24068d640c314f235565 | 8196bfbb6f12eaf0f6527bac6d5566f164c18bb5 | /recwork.py | fe8715383498b880355b27e06a068ac7efc67ac7 | [] | no_license | darkdefender228/Digit-Recogniser | 6aeabb53df95c2adf5d24fc1c9deb75e10fd9b94 | 112765560ed21fbb8a032553e8ec79719484a4d2 | refs/heads/master | 2020-03-23T12:54:04.766789 | 2018-12-01T15:15:02 | 2018-12-01T15:15:02 | 141,588,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 24 18:02:01 2018
@author: romanilechko
"""
import numpy as np
def make_lines(coords, close, far):
lines = []
openning = True
for index in range(len(coords[:-1])):
if openning:
if coords[index] + close >= coords[index + 1]:
lines.append(coords[index])
openning = False
else:
if index == len(coords) - 2 and coords[index] + close >= coords[index + 1]:
lines.append(coords[index])
openning = True
elif coords[index] + far <= coords[index + 1]:
lines.append(coords[index])
openning = True
return np.asarray(lines)
def cropping(y_lines, x_lines, im):
"""
y - horizontal line for cropping
x - verical line for cropping
return - np.ndarray coordinates. bottom y, x, top y, x.
"""
coords = []
for i, y in enumerate(y_lines[:-1]):
if i % 2 == 0:
single_coord = []
for j, x in enumerate(x_lines):
if j % 2 == 1:
single_coord.append(y_lines[i+1])
single_coord.append(x + 5)
coords.append(np.asarray(single_coord))
single_coord = []
else:
single_coord.append(y)
single_coord.append(x - 5)
return np.asarray(coords) | [
"[email protected]"
] | |
177b7126af592da8cd5ef4635c538faa27d1c2e8 | 46d525625c406a2f18c1145a4fed046d86e23507 | /scrapingName/scrapingDataMultiVersion.py | 86192334332ac7a84f0466c2d8dd57124efa24f2 | [] | no_license | anxu5829/iptv-gsvd | c0664cd96c7a73076dee9cd88d11efcdc0bef17b | 7b25207b1e49141bfbef4f20752aedf9b05d3391 | refs/heads/master | 2021-05-09T18:59:51.890628 | 2018-01-27T16:00:25 | 2018-01-27T16:00:25 | 119,178,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,255 | py | import pandas as pd
import os
import re
import requests
from bs4 import BeautifulSoup
import time
import numpy as np
import gc
from sltools import save_pickle,load_pickle
import multiprocessing
def scraping(data):
typeList = dict()
processName = multiprocessing.current_process().name
counter = 0
for name in data:
counter += 1
try:
r = requests.get("http://www.baidu.com/s?wd="+name)
if r.status_code == 200:
beautiful = BeautifulSoup(r.content.decode('utf-8'), "lxml")
typeOfit = beautiful.find(attrs={'mu':re.compile(r'baike')})
try:
if type(typeOfit.p.text) == str :
typeList[name] = (typeOfit.p.text)
print('process name is ',processName,' name :',name, ' type: ', typeOfit.p.text)
else:
typeList[name] = "no type"
print('process name is ',processName,name, ' ', "no type")
#time.sleep(np.random.random()*4)
except:
typeList[name] = "no type"
print('process name is ', processName, name, ' ', "no type")
save_pickle(typeList, str(processName) + 'typeList.data')
save_pickle(counter, str(processName) + 'counterRecord.data')
else:
typeList.append('no name')
print(name, ' ', "no type")
if counter%2000 == 0:
print('process',str(processName),'is now having a ',"counter of",counter)
save_pickle(typeList, str(processName) +'typeList.data')
save_pickle(counter, str(processName) + 'counterRecord.data')
except:
save_pickle(typeList, str(processName) + 'typeList.data')
save_pickle(counter , str(processName) + 'counterRecord.data')
print(typeList)
save_pickle(typeList,str(processName)+'typeList.data')
if __name__ == "__main__":
os.chdir("C:\\Users\\22560\\Documents\\iptv")
name = pd.read_csv("name.csv")
print("name is loaded!!")
gc.collect()
nameList = name.iloc[:,0].unique()
data = np.split(nameList,[int(.2*len(nameList)),
int(.4*len(nameList)),
int(.6*len(nameList)),
int(.8*len(nameList))
])
print(len(nameList))
del name
processList = []
for i in range(5):
process = multiprocessing.Process(target=scraping,name = str(i),args=(data[i],) )
processList.append(process)
for i in range(5):
processList[i].start()
for i in range(5):
processList[i].join()
nameList = []
typeList = []
for i in range(5):
type = load_pickle(str(i)+'typeList.data')
nameList.extend(list(type.keys()))
typeList.extend(list(type.values()))
name_type = pd.DataFrame({'name':nameList,'type':typeList})
name_type.shape
name_type.groupby('type').count().sort_values(by = 'name',ascending=False)[:10]
name_type[name_type.type == 'no type']
name_type.to_csv('name_type.csv') | [
"[email protected]"
] | |
3063710b0ac8aaa385538f205892710690f32773 | 252047784ec6126424a209b562219e75c48b4404 | /virtual orchestration layer/src/vm_create2.py | ab744a4dfec7193fb7054cc921ad8ba1300458cf | [] | no_license | pankhuri31/Projects | 0ad7fdb355be74657e6389486e66e01b79d2d4e7 | 2d1457ae7c28da0bad6f1b97b628aac35d1117e6 | refs/heads/master | 2016-09-06T10:25:39.327150 | 2013-10-07T13:04:21 | 2013-10-07T13:04:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,593 | py | #!/usr/bin/python
import libvirt
import allocate1
import json
from flask import jsonify
from random import random
import os
import subprocess
from uuid import uuid4
def create_xml(vm_name,instance_type,uuid,image_path,cpu,mem,emu_path,domain,arch):
xml=r"<domain type='" + instance_type + r"'> \
<name>" + vm_name + r"</name> \
<memory>" + str((mem*100000)/1024) + r"</memory> \
<uuid>" + uuid + r"</uuid> \
<vcpu>" + str(cpu) + r"</vcpu> \
<os> \
<type arch='" + arch + r"' machine='pc'>hvm</type> \
<boot dev='hd'/> \
</os> \
<features> \
<acpi/> \
<apic/> \
<pae/> \
</features> \
<on_poweroff>destroy</on_poweroff> \
<on_reboot>restart</on_reboot> \
<on_crash>restart</on_crash> \
<devices> \
<emulator>" + emu_path + r"</emulator> \
<disk type='file' device='disk'> \
<driver name=" + domain + r" type='raw'/> \
<source file='" + image_path + r"'/> \
<target dev='hda' bus='ide'/> \
<address type='drive' controller='0' bus='0' unit='0'/> \
</disk> \
</devices> \
</domain>"
return xml
def get_instance_type(tid):
print allocate1.vmtype
type_list=allocate1.vmtype['types']
t=[]
for d in type_list:
if int(d['tid']) is tid:
t.append(int(d['cpu']))
t.append(int(d['ram']))
# print t
return t
def pm_number(typelist):
machine_num = len(allocate1.machinelist)
while(machine_num > 0):
pmid = allocate1.get_pmid()
pmachine = allocate1.machinelist[pmid-1]
user = pmachine[0]
ip = pmachine[1]
# try:
# ssh_cmd0 = "ssh " + user+'@'+ip + ' ' + 'cat /proc/cpuinfo | grep lm'
# res = subprocess.check_output(ssh_cmd0,shell='True')
# result = 64
# except:
# result = 32
ssh_cmd1 = "ssh" +' '+ user+'@'+ip +' '+'nproc'
try:
cpu_count = subprocess.check_output(ssh_cmd1,shell='True')
cpu_count = int(cpu_count[:-1])
except:
print 'error'
ssh_cmd2 = "ssh" +' '+ user+'@'+ip +' '+ 'free -m'
try:
free_mem = subprocess.check_output(ssh_cmd2,shell='True')
free_mem=free_mem.split('\n')
free_mem=free_mem[1]
free_mem=free_mem.split()
free_mem=free_mem[3]
except:
print "error"
# print free_mem,cpu_count
# print typelist[1],typelist[0]
if int(free_mem) >= int(typelist[1]) and int(cpu_count) >= int(typelist[0]):
return pmid
machine_num = machine_num -1
return -1
def create(attr_vm):
try:
name = attr_vm['name']
instance_type = int(attr_vm['instance_type'])
image_id = int(attr_vm['image_id'])
machine_from = allocate1.imagelist[image_id-1][0]
iso_file_path = allocate1.imagelist[image_id-1][1]
# cpu_type = iso_file_path
# cpu_type=cpu_type.split(".")
# cpu_type=str(cpu_type[0]).split("_")
typelist=get_instance_type(instance_type)
pmid = pm_number(typelist)
if int(pmid) is int(-1):
return 0
pmachine = allocate1.machinelist[pmid-1]
user = pmachine[0]
ip = pmachine[1]
uuid = pmachine[2]
pmid = pmachine[3]
string_uuid = str(uuid4())
try:
conn = libvirt.open(allocate1.path(user,ip))
except:
print 'connection not found'
sys_info = conn.getCapabilities()
emulator_path = sys_info.split("emulator>")
emulator_path = emulator_path[1].split("<")[0] #location of xen/qemu
domain = sys_info.split("<domain type=")
domain = domain[1].split(">")[0] #type of emulator present on given machine xen/qemu
arch_type = sys_info.split("<arch>")
arch_type = arch_type[1].split("<")[0] #archituctue of machine print arch_type
local_copy = '~/' + 'image'+str(image_id)+'.img'
if os.path.exists(local_copy) is False:
scp_command = "scp" + ' ' + machine_from+':'+iso_file_path + ' ' + local_copy + ' 2> /dev/null'
os.system(scp_command)
new_iso_path = '/home/'+ user + '/' + 'image'+str(image_id)+'.img'
scp_command = "scp" + ' ' + local_copy + ' ' + user + '@' + ip + ':' + new_iso_path + ' 2> /dev/null'
os.system(scp_command)
try:
req = conn.defineXML(create_xml(name, conn.getType().lower(),string_uuid,new_iso_path,int(typelist[0]),int(typelist[1]),emulator_path,domain,arch_type))
vmid = allocate1.get_vmid()
#print req
except:
print 'Error: Virtual Machine not defined'
try:
req.create()
print 'Request Completed'
except:
print 'Error: Virtual Machine not created'
# return jsonify({"vmid":0})
allocate1.vmlist[vmid] = [name, instance_type, image_id, pmid]
return vmid
except:
print 'Error: Virtual Machine not allocated!'
return 0
| [
"pankhuri@pankhuri.(none)"
] | pankhuri@pankhuri.(none) |
e86af748470270a3bd18fbbcd3dc8e992712cb17 | 8cf0cf9b71b7c5fbaa150e9893bf461ef661045e | /ownblock/ownblock/apps/accounts/views.py | 77aad64c0c1af2bb8b440208af2f015e13b0a50a | [
"MIT"
] | permissive | danjac/ownblock | 676b27a5aa0d4ce2ac2cd924a632489cd6fc21ee | ac662fb7efb2f04567e2f85638c1250286452611 | refs/heads/master | 2016-08-02T21:51:56.055598 | 2015-05-02T12:54:47 | 2015-05-02T12:54:47 | 34,940,828 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,442 | py | from django.db.models import Q
from rest_framework import status, viewsets, permissions
from rest_framework.response import Response
from rest_framework.views import APIView
from ..storage.models import Item
from ..parking.models import Vehicle
from ..messaging.models import Message
from ..notices.models import Notice
from ..amenities.models import Booking
from .models import User
from .serializers import (
UserSerializer,
RestrictedUserSerializer,
AuthUserSerializer,
)
class UserViewSet(viewsets.ModelViewSet):
model = User
def get_serializer_class(self):
if self.request.user.role == 'manager':
return RestrictedUserSerializer
return UserSerializer
def retrieve(self, request, *args, **kwargs):
self.object = self.get_object()
data = self.get_serializer(self.object).data
data['gravatar'] = self.object.get_gravatar_url(size=40)
notices = Notice.objects.filter(author=self.object)
data['notices'] = []
for notice in notices.iterator():
data['notices'].append({
'id': notice.id,
'title': notice.title,
'details': notice.details,
'created': notice.created,
})
if self.object != self.request.user:
messages = Message.objects.filter(
Q(sender=self.object) | Q(recipient=self.object)).filter(
Q(sender=self.request.user) | Q(recipient=self.request.user)
).order_by('-created')
data['sent_messages'] = []
data['received_messages'] = []
for message in messages.iterator():
message_data = {
'id': message.id,
'header': message.header,
'details': message.details,
'created': message.created,
}
if message.sender_id == self.object.id:
data['sent_messages'].append(message_data)
else:
data['received_messages'].append(message_data)
if self.object.role == 'resident':
items = Item.objects.filter(
resident=self.object
).select_related('place')
data['items'] = []
for item in items.iterator():
data['items'].append({
'id': item.id,
'place_id': item.place_id,
'place_name': item.place.name,
'description': item.description,
})
vehicles = Vehicle.objects.filter(
resident=self.object
)
data['vehicles'] = []
for vehicle in vehicles.iterator():
data['vehicles'].append({
'id': vehicle.id,
'description': vehicle.description,
'registration_number': vehicle.registration_number,
})
bookings = Booking.objects.filter(
resident=self.object
).select_related('amenity')
data['bookings'] = []
for booking in bookings:
data['bookings'].append({
'id': booking.id,
'amenity': {
'id': booking.amenity.id,
'name': booking.amenity.name,
},
'reserved_from': booking.reserved_from,
'reserved_to': booking.reserved_to,
})
return Response(data)
def get_queryset(self, *args, **kwargs):
qs = super().get_queryset(*args, **kwargs).select_related(
'apartment'
).filter(is_active=True).order_by('last_name', 'first_name')
if self.request.GET.get('residents'):
return qs.filter(apartment__building=self.request.building)
elif self.request.GET.get('managers'):
return qs.filter(role='manager', site=self.request.building.site)
return qs.filter(
Q(
Q(apartment__building=self.request.building) |
Q(site=self.request.building.site)
),
)
class AuthView(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get_user_response(self, request):
return Response(AuthUserSerializer(
request.user, context={'request': request}).data)
def get(self, request, *args, **kwargs):
return self.get_user_response(request)
def put(self, request, *args, **kwargs):
serializer = AuthUserSerializer(request.user, data=request.DATA)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
serializer.save(force_update=True)
return Response(serializer.data)
def patch(self, request, *args, **kwargs):
password = request.DATA.get('password')
if not password:
return Response('Password is missing',
status=status.HTTP_400_BAD_REQUEST)
request.user.set_password(request.DATA['password'])
request.user.save()
return Response()
| [
"[email protected]"
] | |
bfca6c0531a704417241810a33f46ee4c038afad | 2b167e29ba07e9f577c20c54cb943861d0ccfa69 | /numerical_analysis_backup/small-scale-multiobj/pod50_milp/throughput/runsimu11_throughput.py | 89588a3c2132dc6081ea0222defc8c77da4d7d2d | [] | no_license | LiYan1988/kthOld_OFC | 17aeeed21e195d1a9a3262ec2e67d6b1d3f9ff0f | b1237577ea68ad735a65981bf29584ebd889132b | refs/heads/master | 2021-01-11T17:27:25.574431 | 2017-01-23T05:32:35 | 2017-01-23T05:32:35 | 79,773,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,440 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Aug 4 15:15:10 2016
@author: li
optimize throughput
"""
#import sys
#sys.path.insert(0, '/home/li/Dropbox/KTH/numerical_analysis/ILPs')
import csv
from gurobipy import *
import numpy as np
from arch4_decomposition import Arch4_decompose
from arch1 import ModelSDM_arch1
from arch2_decomposition import Arch2_decompose
from arch5_decomposition import Arch5_decompose
np.random.seed(2010)
num_cores=3
num_slots=60
n_sim = 1 # number of simulations
n_start = 11 # index of start
n_end = n_start+n_sim # index of end
time_limit_routing = 1000 # 1000
time_limit_sa = 18000
alpha = 0
beta = 0.01
result = np.zeros((n_sim, 15))
total_cnk = []
for i in range(n_start, n_end):
filename = 'traffic_matrix__matrix_'+str(i)+'.csv'
# print filename
tm = []
with open(filename) as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if idx>11:
row.pop()
row = [int(u) for u in row]
tm.append(row)
tm = np.array(tm)*25
total_cnk.append(tm.flatten().astype(bool).sum())
result[i-n_start, 14] = tm.flatten().astype(bool).sum()
print "\n"
print total_cnk
print "\n"
#%% arch4
print "Architecture 4"
m = Arch4_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01)
m.create_model_sa(mipfocus=1,timelimit=time_limit_sa)
result[i-n_start, 0] = m.connections_lb
result[i-n_start, 1] = m.connections_ub
result[i-n_start, 2] = m.throughput_lb
result[i-n_start, 3] = m.throughput_ub
#%% arch1
print "Architecutre 1"
m = ModelSDM_arch1(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model(mipfocus=1, timelimit=time_limit_routing,mipgap=0.01)
result[i-n_start, 4] = m.connections
result[i-n_start, 5] = m.throughput
#%% arch2
print "Architecture 2"
m = Arch2_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1,timelimit=time_limit_routing,mipgap=0.01)
m.create_model_sa(mipfocus=1,timelimit=time_limit_sa)
result[i-n_start, 6] = m.connections_lb
result[i-n_start, 7] = m.connections_ub
result[i-n_start, 8] = m.throughput_lb
result[i-n_start, 9] = m.throughput_ub
#%% arch5
print "Architecture 5"
m = Arch5_decompose(tm, num_slots=num_slots, num_cores=num_cores,alpha=alpha,beta=beta)
m.create_model_routing(mipfocus=1, timelimit=time_limit_routing, mipgap=0.01)
m.create_model_sa(mipfocus=1, timelimit=time_limit_sa)
result[i-n_start, 10] = m.connections_lb
result[i-n_start, 11] = m.connections_ub
result[i-n_start, 12] = m.throughput_lb
result[i-n_start, 13] = m.throughput_ub
file_name = "result_throughput_{}to{}.csv".format(n_start, n_end)
with open(file_name, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['arch4_connections_lb', 'arch4_connections_ub',
'arch4_throughput_lb', 'arch4_throughput_ub',
'arch1_connections', 'arch1_throughput',
'arch2_connections_lb', 'arch2_connections_ub',
'arch2_throughput_lb', 'arch2_throughput_ub',
'arch5_connections_lb', 'arch5_connections_ub',
'arch5_throughput_lb', 'arch5_throughput_ub',
'total_cnk'])
writer.writerows(result) | [
"[email protected]"
] | |
5206f5335349b508bf02a2020ac043a694043479 | 7537fde06047ef5d137c1d51e0642c73b2c6d561 | /backend/user/serializers.py | 6885913c24c3d24b95d2dfdb83036a838e52dc04 | [] | no_license | fnavarro1612/pomodoro-tracker | ddb482ff7552fb7a6c8a88859ae4e63e7c1545cf | d926cad870ad516475a5b95a2cd6216d57bd1ea2 | refs/heads/master | 2021-06-25T11:30:14.880292 | 2020-05-18T03:48:06 | 2020-05-18T03:48:06 | 216,279,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | from django.contrib.auth import get_user_model, authenticate
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'email', 'name',
'password', 'is_superuser', 'is_staff']
class RegisterUserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'email', 'name', 'password']
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
user = get_user_model().objects.create_user(**validated_data)
return user
class LoginUserSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Incorrect Login Credentials")
class RegisterSuperUserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'email', 'name', 'password', 'is_superuser']
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
user = get_user_model().objects.create_super_user(**validated_data)
return user
| [
"[email protected]"
] | |
39b7a8e063f66335d7c8deeff4486a2146ecdb2e | 7dc1b1d75829b58bb2abe0e91b0bfcde89e1ea9a | /venv/lib/python3.8/site-packages/yaml/representer.py | 2174887b92d75fca887fc613d82ba35324f88ef0 | [] | no_license | fredhu0514/Py2Mat | cc617a6d65e617d09cea50b516e1a0f00d5197fd | 06e8e7c7ddb20431489482be8a8b026c19dde3a7 | refs/heads/master | 2023-03-30T08:53:56.650967 | 2021-03-21T03:55:17 | 2021-03-21T03:55:17 | 347,419,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,183 | py |
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError']
from .error import *
from .nodes import *
import datetime, copyreg, types, base64, collections
class RepresenterError(YAMLError):
pass
class BaseRepresenter:
yaml_representers = {}
yaml_multi_representers = {}
def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
self.default_style = default_style
self.sort_keys = sort_keys
self.default_flow_style = default_flow_style
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
self.object_keeper = []
self.alias_key = None
def represent_data(self, data):
if self.ignore_aliases(data):
self.alias_key = None
else:
self.alias_key = id(data)
if self.alias_key is not None:
if self.alias_key in self.represented_objects:
node = self.represented_objects[self.alias_key]
#if node is None:
# raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
#self.represented_objects[alias_key] = None
self.object_keeper.append(data)
data_types = type(data).__mro__
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
for data_type in data_types:
if data_type in self.yaml_multi_representers:
node = self.yaml_multi_representers[data_type](self, data)
break
else:
if None in self.yaml_multi_representers:
node = self.yaml_multi_representers[None](self, data)
elif None in self.yaml_representers:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, str(data))
#if alias_key is not None:
# self.represented_objects[alias_key] = node
return node
@classmethod
def add_representer(cls, data_type, representer):
if not 'yaml_representers' in cls.__dict__:
cls.yaml_representers = cls.yaml_representers.copy()
cls.yaml_representers[data_type] = representer
@classmethod
def add_multi_representer(cls, data_type, representer):
if not 'yaml_multi_representers' in cls.__dict__:
cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
cls.yaml_multi_representers[data_type] = representer
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
node = ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
def represent_sequence(self, tag, sequence, flow_style=None):
value = []
node = SequenceNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
value.append(node_item)
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
if self.sort_keys:
try:
mapping = sorted(mapping)
except TypeError:
pass
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def ignore_aliases(self, data):
return False
class SafeRepresenter(BaseRepresenter):
def ignore_aliases(self, data):
if data is None:
return True
if isinstance(data, tuple) and data == ():
return True
if isinstance(data, (str, bytes, bool, int, float)):
return True
def represent_none(self, data):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
def represent_str(self, data):
return self.represent_scalar('tag:yaml.org,2002:str', data)
def represent_binary(self, data):
if hasattr(base64, 'encodebytes'):
data = base64.encodebytes(data).decode('ascii')
else:
data = base64.encodestring(data).decode('ascii')
return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
def represent_bool(self, data):
if data:
value = 'true'
else:
value = 'false'
return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_int(self, data):
return self.represent_scalar('tag:yaml.org,2002:int', str(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value*inf_value):
inf_value *= inf_value
def represent_float(self, data):
if data != data or (data == 0.0 and data == 1.0):
value = '.nan'
elif data == self.inf_value:
value = '.inf'
elif data == -self.inf_value:
value = '-.inf'
else:
value = repr(data).lower()
# Note that in some cases `repr(data)` represents a float number
# without the decimal parts. For instance:
# >>> repr(1e17)
# '1e17'
# Unfortunately, this is not a valid float representation according
# to the definition of the `!!float` tag. We fix this by adding
# '.0' before the 'e' symbol.
if '.' not in value and 'e' in value:
value = value.replace('e', '.0e', 1)
return self.represent_scalar('tag:yaml.org,2002:float', value)
def represent_list(self, data):
#pairs = (len(data) > 0 and isinstance(data, list))
#if pairs:
# for item in data:
# if not isinstance(item, tuple) or len(item) != 2:
# pairs = False
# break
#if not pairs:
return self.represent_sequence('tag:yaml.org,2002:seq', data)
#value = []
#for item_key, item_value in data:
# value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
# [(item_key, item_value)]))
#return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping('tag:yaml.org,2002:map', data)
def represent_set(self, data):
value = {}
for key in data:
value[key] = None
return self.represent_mapping('tag:yaml.org,2002:set', value)
def represent_date(self, data):
value = data.isoformat()
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
value = data.isoformat(' ')
return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
if hasattr(data, '__getstate__'):
state = data.__getstate__()
else:
state = data.__dict__.copy()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
raise RepresenterError("cannot represent an object", data)
SafeRepresenter.add_representer(type(None),
SafeRepresenter.represent_none)
SafeRepresenter.add_representer(str,
SafeRepresenter.represent_str)
SafeRepresenter.add_representer(bytes,
SafeRepresenter.represent_binary)
SafeRepresenter.add_representer(bool,
SafeRepresenter.represent_bool)
SafeRepresenter.add_representer(int,
SafeRepresenter.represent_int)
SafeRepresenter.add_representer(float,
SafeRepresenter.represent_float)
SafeRepresenter.add_representer(list,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(tuple,
SafeRepresenter.represent_list)
SafeRepresenter.add_representer(dict,
SafeRepresenter.represent_dict)
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
SafeRepresenter.add_representer(datetime.date,
SafeRepresenter.represent_date)
SafeRepresenter.add_representer(datetime.datetime,
SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
def represent_complex(self, data):
if data.imag == 0.0:
data = '%r' % data.real
elif data.real == 0.0:
data = '%rj' % data.imag
elif data.imag > 0:
data = '%r+%rj' % (data.real, data.imag)
else:
data = '%r%rj' % (data.real, data.imag)
return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
def represent_tuple(self, data):
return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
def represent_name(self, data):
name = '%s.%s' % (data.__module__, data.__name__)
return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
def represent_module(self, data):
return self.represent_scalar(
'tag:yaml.org,2002:python/module:'+data.__name__, '')
def represent_object(self, data):
# We use __reduce__ API to save the data. data.__reduce__ returns
# a tuple of length 2-5:
# (function, args, state, listitems, dictitems)
# For reconstructing, we calls function(*args), then set its state,
# listitems, and dictitems if they are not None.
# A special case is when function.__name__ == '__newobj__'. In this
# case we create the object with args[0].__new__(*args).
# Another special case is when __reduce__ returns a string - we don't
# support it.
# We produce a !!python/object, !!python/object/new or
# !!python/object/apply node.
cls = type(data)
if cls in copyreg.dispatch_table:
reduce = copyreg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
raise RepresenterError("cannot represent an object", data)
reduce = (list(reduce)+[None]*5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
if state is None:
state = {}
if listitems is not None:
listitems = list(listitems)
if dictitems is not None:
dictitems = dict(dictitems)
if function.__name__ == '__newobj__':
function = args[0]
args = args[1:]
tag = 'tag:yaml.org,2002:python/object/new:'
newobj = True
else:
tag = 'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = '%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
return self.represent_mapping(
'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
and isinstance(state, dict) and not state:
return self.represent_sequence(tag+function_name, args)
value = {}
if args:
value['args'] = args
if state or not isinstance(state, dict):
value['state'] = state
if listitems:
value['listitems'] = listitems
if dictitems:
value['dictitems'] = dictitems
return self.represent_mapping(tag+function_name, value)
def represent_ordered_dict(self, data):
# Provide uniform representation across different Python versions.
data_type = type(data)
tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
% (data_type.__module__, data_type.__name__)
items = [[key, value] for key, value in data.items()]
return self.represent_sequence(tag, [items])
Representer.add_representer(complex,
Representer.represent_complex)
Representer.add_representer(tuple,
Representer.represent_tuple)
Representer.add_representer(type,
Representer.represent_name)
Representer.add_representer(collections.OrderedDict,
Representer.represent_ordered_dict)
Representer.add_representer(types.FunctionType,
Representer.represent_name)
Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
Representer.add_representer(types.ModuleType,
Representer.represent_module)
Representer.add_multi_representer(object,
Representer.represent_object)
| [
"[email protected]"
] | |
1bcc044f73c4078b86a750699379205dc233d54a | e7a05f0072bb1a92efce3c0b7c7384e60162e6f5 | /records.py | 4bb559b72a805b61f9146a799fa931e305f793ca | [] | no_license | contextfreecode/records | d2b19146cbd85398038a784247ea932d68e3b9df | 606529b1a22b352caf541b17ffbd3b48c67e8219 | refs/heads/main | 2023-01-23T15:10:27.802549 | 2020-12-07T15:22:24 | 2020-12-07T15:22:24 | 314,120,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | from dataclasses import dataclass, replace
from datetime import date
from typing import NamedTuple, TypedDict
def main():
# alice = Employee("Alice", date(2000, 1, 1))
alice = Employee(name="Alice", hire_date=date(2000, 1, 1))
alice2 = replace(alice, hire_date=date(2010, 1, 1))
alice3 = replace(alice2, hire_date=alice.hire_date)
# alice2 = alice._replace(hire_date=date(2010, 1, 1))
# alice3 = alice2._replace(hire_date=alice.hire_date)
# alice2 = {**alice, "hire_date": date(2010, 1, 1)}
# alice3 = alice2 | {"hire_date": alice["hire_date"]}
# alice3 = dict(alice2, hire_date=alice["hire_date"])
print(alice)
print(alice2)
print(alice3)
print(hash(alice))
print(hash(alice2))
print(hash(alice3))
print(alice == alice2)
print(alice == alice3)
print(alice < alice2)
@dataclass(frozen=True, order=True)
class Employee:
name: str
hire_date: date
def __post_init__(self):
assert self.name is not None
assert self.hire_date is not None
def years_employed(self):
return date.today().year - self.hire_date.year
@dataclass(frozen=True, order=True)
class DetailEmployee(Employee):
color: str
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
8c37577beb948a84c1017887ad0ff113575583c4 | 87b7d7948aa51fdb4a27540240579788896369ea | /code/runs_sacred/model_4_classes/_sources/main_0d7ea3a13b62ec2b4e0ed10b9b965fe4.py | 721ea09321b607fc28b8b2985a463f302725e990 | [] | no_license | Samuel-Levesque/Projet_GLO7030 | 6f13accd63b52107ec3e3a0b9b5f52edccda7c8d | 557bce3235f09723900f65c6e3b44a0ed9d2b519 | refs/heads/master | 2022-01-16T12:49:22.884798 | 2019-05-05T18:38:35 | 2019-05-05T18:38:35 | 177,038,991 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,978 | py | from sacred import Experiment
from sacred.observers import FileStorageObserver
from data_set_file import create_huge_data_set,create_encoding_deconding_dict
from model_creation import create_model
from trainning import train_model,load_model_weights,create_scheduler
from test_metrics import calcul_metric_concours
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import random
from torch.utils.data import DataLoader
#Trucs sacred
experiment_sacred=Experiment("Doodle_Boys")
experiment_sacred.observers.append(FileStorageObserver.create('runs_sacred/model_4_classes'))
#Configs
@experiment_sacred.config
def configuration():
path_data = 'D:/User/William/Documents/Devoir/Projet Deep/data/mini_train/'
path_save_model="saves_model/model_4_classes.tar"
path_load_existing_model=None
# path_load_existing_model = "saves_model/model_4_classes.tar"
path_model_weights_test = "saves_model/model_4_classes.tar"
use_gpu = True
do_training=True
do_testing=True
nb_row_per_classe=300
nb_epoch = 3
batch_size = 32
learning_rate = 0.1
type_schedule="constant"
seed=123 #marche paas
torch.manual_seed(123)
np.random.seed(123)
random.seed(123)
#Main
@experiment_sacred.automain
def main_program(path_data,path_save_model,path_load_existing_model,path_model_weights_test,
use_gpu,do_training,do_testing,
nb_row_per_classe,
nb_epoch,batch_size,
learning_rate,type_schedule,
seed
):
#Seed
# torch.manual_seed(123)
# np.random.seed(123)
# random.seed(123)
# Label encoding and decoding dicts
enc_dict, dec_dict = create_encoding_deconding_dict(path_data)
#Data_set
size_image_train = 224
data_train=create_huge_data_set(path_data,nb_rows=nb_row_per_classe,size_image=size_image_train,encoding_dict=enc_dict)
data_valid=create_huge_data_set(path_data,nb_rows=100,size_image=size_image_train,skip_rows=range(1,nb_row_per_classe),encoding_dict=enc_dict)
# Model
model = create_model(use_gpu)
if use_gpu:
model.cuda()
#Loss
criterion = nn.CrossEntropyLoss()
#Optimiser
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# Scheduler LR
scheduler = create_scheduler(start_lr=learning_rate,type=type_schedule,optimizer=optimizer)
#Data loader
train_loader=DataLoader(data_train,batch_size=batch_size,shuffle=True)
valid_loader=DataLoader(data_valid,batch_size=batch_size,shuffle=True)
#Train
if do_training:
train_model(model,train_loader,valid_loader,nb_epoch,
scheduler,optimizer,criterion,use_gpu,
path_save=path_save_model,path_start_from_existing_model=path_load_existing_model)
#Test
if do_testing:
data_test = create_huge_data_set(path_data, nb_rows=100, size_image=size_image_train,
skip_rows=range(1, nb_row_per_classe + 100), encoding_dict=enc_dict)
test_loader = DataLoader(data_test, batch_size=batch_size)
model_final,history=load_model_weights(model,path_model_weights_test,type="best",use_gpu=use_gpu,get_history=True)
# history.display()
acc,loss,score_top3,conf_mat,acc_per_class=calcul_metric_concours(model_final,test_loader,use_gpu=use_gpu,show_acc_per_class=True)
print("Accuracy test: {}".format(acc))
print("Score top 3 concours: {}".format(score_top3))
print(acc_per_class)
#Log experiment
experiment_sacred.log_scalar("Test accuracy",acc)
experiment_sacred.log_scalar("Test loss", loss)
experiment_sacred.log_scalar("Test score top3", score_top3)
experiment_sacred.log_scalar("Test confusion matrix", conf_mat)
experiment_sacred.log_scalar("Test accuracy per class", acc_per_class)
| [
"[email protected]"
] | |
ebaf280808546ea7fec78fd24df53f0f63526ca3 | 7c096ed19a6e3585b0713a9e9b55f2105c27e582 | /domain/state.py | e1a9021025d559f5a5fe6d1af6bc75e4e414974a | [] | no_license | gizmoy/PySecurityCameras | c666963426628a072798fdfe91ecc73b910f2a47 | 6a07af7de52ac02b14ae2be87c70539ed09146b4 | refs/heads/master | 2020-12-30T11:40:03.443464 | 2018-04-07T19:03:13 | 2018-04-07T19:03:13 | 91,270,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,859 | py | import random
import copy
from camera import Camera
class State:
def __init__(self, problem, cameras=None):
self.problem = problem
self.cameras = cameras if cameras else self.generate_cameras()
self.num_unobserved = None
def generate_cameras(self):
# generated cameras
cameras = []
# random init number of cameras
num_cameras = random.randint(1, int(self.problem.max_cameras))
# select one box num_cameras times
picked_boxes = [random.choice(self.problem.boxes) for _ in range(num_cameras)]
for box in picked_boxes:
camera = Camera(self.problem, box)
cameras.append(camera)
# return generated cameras
return cameras
def generate_neighbour(self):
# clone cameras
cameras = [copy.copy(c) for c in self.cameras]
num_cameras = len(cameras)
# choose mutation type
if num_cameras == self.problem.max_cameras:
mutation_type = random.choice(['remove', 'modify'])
elif num_cameras == 1:
mutation_type = random.choice(['insert', 'modify'])
else:
mutation_type = random.choice(['insert', 'remove', 'modify'])
# perform mutation depending on selected type
if mutation_type == 'insert':
# insert new camera
box = random.choice(self.problem.boxes)
new_camera = Camera(self.problem, box)
cameras.append(new_camera)
elif mutation_type == 'remove':
# remove random camera
camera = random.choice(cameras)
cameras.remove(camera)
else:
# modify position of random camera
camera = random.choice(cameras)
camera.modify_position()
# return new state
return State(self.problem, cameras)
| [
"[email protected]"
] | |
14a928013222509d04f26b06e8abf20fb3940fbe | f1051f4e35cd1cb2137b480154e28c81d630b8fe | /zima_tables/wsgi.py | 96a02899d9df0af612254514e495c87231e1c1c4 | [] | no_license | ZIMA-Engineering/ZIMA-Tables.org | e011e428fe28681b9a00dfeafc09dd17aab6e5ca | b9a58d2319a63052035fc764a7705c8efc8f13ab | refs/heads/master | 2021-06-03T05:32:04.072318 | 2020-11-30T09:30:03 | 2020-11-30T09:30:03 | 57,451,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for zima_tables project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zima_tables.local_settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
80673d37a36b6001f6089f3cd5b2832d36c94e01 | f85cc0e9a2a3726e40f6c3e9fc8dfcd0b9c5ed71 | /Chapter 8/Q1.py | 1de49f7f679c3cb3c1d27db18ed9bb0258e8e814 | [] | no_license | joshdavham/Starting-Out-with-Python-Unofficial-Solutions | 5edbef384a030c936c69829b3fae4a7b2b118560 | 6ba40fe01f4c0e11ad4ab91f6214fda44a8aefe2 | refs/heads/master | 2022-10-09T01:54:49.797361 | 2020-06-09T13:20:31 | 2020-06-09T13:20:31 | 270,884,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | #Question 1
def main():
sales = get_input()
total = get_total(sales)
print("\nTotal weekly sales: $", total, sep = "")
def get_input():
sales = [0] * 7
for day in range(7):
sales[day] = float((input("What were the sales for day " + str(day+1) + "? ")))
return sales
def get_total(sales):
total = 0
for day in range(len(sales)):
total += sales[day]
return total
main()
| [
"[email protected]"
] | |
6d403a923833ed1c94a3fbcb54bde1b1d9559d31 | 2a3c4d9f7c94147aadf9ca674c5e1577c368e752 | /api_recorder/tests/scenario.py | 72f29ef77f8c9dd08b4e92bfa5dd7f13d8ae04ce | [
"Apache-2.0"
] | permissive | timitee/api_recorder | 1d3a291f546deb6d6633dabe981caac81450d701 | dda1df8535a65f46c360ed9803b52ebdee2396a0 | refs/heads/master | 2021-08-11T10:33:55.432639 | 2017-11-13T15:26:45 | 2017-11-13T15:26:45 | 110,565,305 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | # -*- encoding: utf-8 -*-
from api_recorder.api_recorder import api_recorder, api_class_recorder
scenario_val = 'ISeeYouGhost!'
def api_response(module_, class_, method_, vals_):
return {'mod': module_, 'cls': class_, 'mtd': method_, 'val': vals_}
class ApiMarshall(object):
@api_recorder
def decorated_m(self, val):
return api_response(self.__module__,'ApiMarshall', 'decorated_m', val)
def undecorated_m(self, val):
return api_response(self.__module__,'ApiMarshall', 'undecorated_m', val)
class BpiMarshall(object):
@api_recorder
def decorated_m(self, val):
return api_response(self.__module__,'BpiMarshall', 'decorated_m', val)
def undecorated_m(self, val):
return api_response(self.__module__,'BpiMarshall', 'undecorated_m', val)
#@api_class_recorder(api_recorder)
class ApiSuperClassDecorated(object):
def decorated_super(self, val):
return api_response(self.__module__,'ApiSuperClassDecorated', 'decorated_super', val)
#@api_class_recorder(api_recorder)
class ApiSubClassDecorated(ApiSuperClassDecorated):
def decorated_sub(self, val):
return api_response(self.__module__,'ApiSubClassDecorated', 'decorated_sub', val)
| [
"[email protected]"
] | |
e39839ef6ccd8fe2a32d4df89c7f7d433225eb15 | 7fb1f981bd37a2d02b262f58c75c4d6b11ac9ee4 | /mnist_exp/mnist_learning.py | b9d0009f5a25ba4cac0769071c34de0ec49cd168 | [] | no_license | 66RING/deeplearing | 3c96fef6df6d3b7b17ee54bd50df4efc678ee884 | da66a2d3d858d32f989e1bd36f33fef875ee7a70 | refs/heads/master | 2021-04-01T22:32:17.000036 | 2020-03-18T12:01:12 | 2020-03-18T12:01:12 | 248,219,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | import tensorflow as tf
from tensorflow.keras import datasets
#tf.enable_eager_execution()
# (x, y), (test_x, test_y) = datasets.mnist.load_data()
(x, y), (test_x, test_y) = datasets.fashion_mnist.load_data()
print('datasets:', x.shape, y.shape, test_x.shape, test_y.shape)
test_x = tf.convert_to_tensor(test_x, dtype=tf.float32) / 255
x = tf.convert_to_tensor(x, dtype=tf.float32) / 255
lr = 0.001
db = tf.data.Dataset.from_tensor_slices((x, y)).batch(128)
test_db = tf.data.Dataset.from_tensor_slices((test_x, test_y)).batch(128)
db_iter = iter(db)
sample = next(db_iter)
print('batch:', sample[0].shape, sample[1].shape)
w1 = tf.Variable(tf.random.truncated_normal([784, 256], stddev=0.1))
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256, 125], stddev=0.1))
b2 = tf.Variable(tf.zeros([125]))
w3 = tf.Variable(tf.random.truncated_normal([125, 10], stddev=0.1))
b3 = tf.Variable(tf.zeros([10]))
acc = 0
while acc < 0.95:
for step, (x, y) in enumerate(db):
x = tf.reshape(x, [-1, 28 * 28])
with tf.GradientTape() as tape:
h1 = x @ w1 + b1
h1 = tf.nn.relu(h1)
h2 = h1 @ w2 + b2
h2 = tf.nn.relu(h2)
out = h2 @ w3 + b3
y_onehot = tf.one_hot(y, depth=10)
loss = tf.square(y_onehot - out)
loss = tf.reduce_mean(loss)
grads = tape.gradient(loss, [w1, b1, w2, b2, w3, b3])
w1.assign_sub(lr * grads[0])
b1.assign_sub(lr * grads[1])
w2.assign_sub(lr * grads[2])
b2.assign_sub(lr * grads[3])
w3.assign_sub(lr * grads[4])
b3.assign_sub(lr * grads[5])
if step % 100 == 0:
print("step", step / 100, "loss:", loss)
correct_sum, total = 0, 0
for step, (test_x, test_y) in enumerate(test_db):
test_x = tf.reshape(test_x, [-1, 28 * 28])
h1 = test_x @ w1 + b1
h1 = tf.nn.relu(h1)
h2 = h1 @ w2 + b2
h2 = tf.nn.relu(h2)
out = h2 @ w3 + b3
res = tf.nn.softmax(out, axis=1)
res = tf.argmax(res, axis=1)
res = tf.cast(res, dtype=tf.int32)
y = tf.cast(test_y, dtype=tf.int32)
correct = tf.cast(tf.equal(res, y), dtype=tf.int32)
correct = tf.reduce_sum(correct)
correct_sum += correct
total += test_x.shape[0]
acc = correct_sum / total
acc = float(acc)
print("accuracy:", acc)
print("w1:", w1, "w2:", w2, "w3:", w3)
print("b1:", b1, "b2:", b2, "b3:", b3)
| [
"[email protected]"
] | |
8fcc9b9fcb2d3773828fcb001c5e5282e5601c8e | 22cec5da2b1fb83dcc9cf7c888f1e2078b05b62e | /flora/wallet/sign_coin_solutions.py | e1848b04de272fc1cbdb5bc12e37e82971b93c6b | [
"Apache-2.0"
] | permissive | JuEnPeHa/flora-blockchain | 649d351e096e73222ab79759c71e191e42da5d34 | 656b5346752d43edb89d7f58aaf35b1cacc9a366 | refs/heads/main | 2023-07-18T08:52:51.353754 | 2021-09-07T08:13:35 | 2021-09-07T08:13:35 | 399,297,784 | 0 | 0 | Apache-2.0 | 2021-08-24T01:30:45 | 2021-08-24T01:30:44 | null | UTF-8 | Python | false | false | 2,037 | py | import inspect
from typing import List, Any
import blspy
from blspy import AugSchemeMPL
from flora.types.coin_solution import CoinSolution
from flora.types.spend_bundle import SpendBundle
from flora.util.condition_tools import conditions_dict_for_solution, pkm_pairs_for_conditions_dict
async def sign_coin_solutions(
coin_solutions: List[CoinSolution],
secret_key_for_public_key_f: Any, # Potentially awaitable function from G1Element => Optional[PrivateKey]
additional_data: bytes,
max_cost: int,
) -> SpendBundle:
signatures: List[blspy.G2Element] = []
pk_list: List[blspy.G1Element] = []
msg_list: List[bytes] = []
for coin_solution in coin_solutions:
# Get AGG_SIG conditions
err, conditions_dict, cost = conditions_dict_for_solution(
coin_solution.puzzle_reveal, coin_solution.solution, max_cost
)
if err or conditions_dict is None:
error_msg = f"Sign transaction failed, con:{conditions_dict}, error: {err}"
raise ValueError(error_msg)
# Create signature
for pk, msg in pkm_pairs_for_conditions_dict(
conditions_dict, bytes(coin_solution.coin.name()), additional_data
):
pk_list.append(pk)
msg_list.append(msg)
if inspect.iscoroutinefunction(secret_key_for_public_key_f):
secret_key = await secret_key_for_public_key_f(pk)
else:
secret_key = secret_key_for_public_key_f(pk)
if secret_key is None:
e_msg = f"no secret key for {pk}"
raise ValueError(e_msg)
assert bytes(secret_key.get_g1()) == bytes(pk)
signature = AugSchemeMPL.sign(secret_key, msg)
assert AugSchemeMPL.verify(pk, msg, signature)
signatures.append(signature)
# Aggregate signatures
aggsig = AugSchemeMPL.aggregate(signatures)
assert AugSchemeMPL.aggregate_verify(pk_list, msg_list, aggsig)
return SpendBundle(coin_solutions, aggsig)
| [
"[email protected]"
] | |
11ab1959489a7411be404afb634f1d17f425117f | e7c3576783aad4472806e827032424a062a97fdb | /Science/Parse.py | a5a6a737ac8c2147523f198770285da5d49676ec | [] | no_license | huskyroboticsteam/2016-17 | d428cc11022783f0b5c41c439a62341e874f4042 | f11ecc1715fbe1c73e778ce65ae38f8d56e9edc5 | refs/heads/master | 2020-09-26T22:48:26.453894 | 2017-06-01T14:19:07 | 2017-06-01T14:19:07 | 65,952,771 | 10 | 10 | null | 2017-02-18T20:14:27 | 2016-08-18T00:53:19 | Python | UTF-8 | Python | false | false | 3,032 | py | import sys
import struct
import Error
import Util
from Packet import PacketType
from threading import Thread
IMG_REQ_CONST = 6370218008217978682469763330258393040577855L
msgQueue = []
# [ LAST TIMESTAMP, CMD_VAL_ID1, CMD_VAL_ID2, ... ]
aux_ctrl = []
sys_ctrl = []
"""
*** WHEN PICTURE IS CAPTURED, THE RESPECTIVE
CAMERA ID, SHOULD BE SET TO 'False' IN
THIS HANDLER ARRAY.
"""
# [ LAST TIMESTAMP, CAM_NUM0_BOOL, CAM_NUM1_BOOL, ... ]
cam_ctrl = []
reset = False
"""
Queue a message to the handler
"""
def queueMessage(msg):
global msgQueue
msgQueue += [msg]
"""
Get Message from Queue
"""
def nextMsg():
global msgQueue
temp = msgQueue[0]
del msgQueue[0]
return temp
"""
Parse message into timestamp and id
"""
def parse(msg):
global msgQueue
global reset
if len(msgQueue) == 0 and reset:
reset = False # Set reset back to default value
if msg.ID == PacketType.AuxControl:
parse_aux(msg)
elif msg.ID == PacketType.SysControl:
parse_sysctrl(msg)
elif msg.ID == PacketType.ImageRequest:
parse_imgreq(msg)
else:
# Throw Failed to Parse incoming Packet
Error.throw(0x0504)
"""
Parse Auxilliary Ctrl Packet
"""
def parse_aux(msg):
global aux_ctrl
cmd_id = 0
# Set Timestamp
aux_ctrl[0] = Util.bytesToInt(msg.data, 0, 4)
# Get Command ID at byte pos 5
cmd_id = msg.data[5]
# Get Command Value
cmd_value = Util.bytesToInt(msg.data, 6, 10)
aux_ctrl[cmd_id + 1] = cmd_value
"""
Parse System Ctrl Packet
"""
def parse_sysctrl(msg):
global cam_ctrl
# Set Timestamp
sys_ctrl[0] = Util.bytesToInt(msg.data, 0, 4)
# Find Command ID at byte pos 5
cmd_id = msg.data[5]
# Find value as trailing 8 bytes
cmd_value = Util.bytesToInt(msg.data, 6, 10)
# Set Controller to specified value at specified location
sys_ctrl[cmd_id + 1] = cmd_value
"""
Parse Img Request
"""
def parse_imgreq(msg):
global cam_ctrl
# Set Timestamp
cam_ctrl[0] = Util.bytesToInt(msg.data, 0, 4)
# Get CMD Value
cmd_value = Util.bytesToInt(msg.data, 5, 28)
# Throw error if value incorrect
if cmd_value != IMG_REQ_CONST:
# Throw invalid request error
Error.throw(0x0505)
# Set the camera number
cmd_camera = msg.data[28]
cam_ctrl[cmd_camera] = True
"""
Parsing Handler
"""
def parse_all():
while len(msgQueue) > 0:
parse(nextMsg())
"""
Threading method, call to setup thread
"""
def thread_parsing():
global cam_ctrl
global reset
while True:
if reset:
msgQueue = []
parse_all()
"""
Call every camera capture
"""
def resetCam():
for i in range(1, len(cam_ctrl)):
cam_ctrl[i] = False
"""
Setup Parsing with all zero arrays
"""
def setupParsing():
global aux_ctrl
global sys_ctrl
global cam_ctrl
aux_ctrl = [0] * 32
sys_ctrl = [0] * 32
cam_ctrl = [0] + [False] * 31
runThread = Thread(target=thread_parsing)
runThread.start()
| [
"[email protected]"
] | |
81a54439253dce29241c49928fd05e2c8db9e060 | ac7f2369cf136cef946ee6eb89c5be1edda27769 | /hare_turtle_algorithm/scratch_4.py | d209eb062e4425d63c53283db7cf6454fa6fc968 | [] | no_license | iluvjava/Silly_Python_Stuff | d244a94a6c8236713123815ccd1f1f6c27b1cb98 | eb12a67c060de783e6b00d6030668f8d32630dad | refs/heads/master | 2021-04-22T23:14:14.215801 | 2021-04-05T05:25:42 | 2021-04-05T05:25:42 | 249,879,410 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | import numpy as np
y = x = np.array([np.arange(0, 10)]).T
print(x.T.shape)
print(y.shape)
print([email protected])
print(np.linspace(0, 100, 400)[np.newaxis, :].T)
| [
"[email protected]"
] | |
347bc0c82a866bbe432d6d496139810783afbdee | a8575e06f36b96e502803c0e564f906959ee4896 | /alteSrcHZRR010/hzrr200_pythonSim/hzrr200_regSim01.py | bf4c2f2f5a18f4ba5abf0268ccd35261e98a6720 | [] | no_license | peetsi/hzrr200_RegSim | 9c3b849979c13fd9dcd1be625d5ad8d1297bde9d | 1ec9d959da78d068d67c2614478e47bac2b85667 | refs/heads/master | 2022-11-16T11:01:41.698848 | 2020-07-16T05:39:49 | 2020-07-16T05:39:49 | 278,379,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,837 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# hzrr200 Rücklauf-Regler Simulation
# Peter Loster, Mai/Juni 2020
#
import time
import numpy as np
import matplotlib.pyplot as plt
# *** status variables are changed during operation
# NOTE: values have to be initialized for formal reasons
# they are set in a separate init function
class St: # Status variables
firstLoop =1 # 1 if first loop is in progress
tOld =0 # sec; previous timestamp initial value
dt =0 # sec; timeslice, initial value
tempVlLP =0 # Temperature Vorlauf low-pass
tempRlLP =0 # Temperature Rücklauf, low-pass
tempRlLP2 =0 # Temp.Rücklauf, low-pass 2.order
mRL =0 # degC/sec; RL temperature slope
motPauseEnd =0 # sec; end-time of valve-motor inactive
# *** parameters are stored in EEPROM; may be changed via network
class Par: # Parameter variables, in EEPROM
fMeas = 0.2 # measurments per second
tauTVL= 10.0*60.0 # sec; tau to reach 1/e amplitude of VL with low-pass filter
tauTRL= 30.0*60.0 # sec; tau to reach 1/e amplitude of RL with low-pass filter
mUp = 7.0 # degC/sec; Steigung fuer Aufwärtsflanke
mDn = -8.0 # degC/sec; Steigung fuer Abwärtsflanke
offtime = 45.0*60.0 # sec; to stop motor after strong slope
motDelay =10*60 # sec; pause motor after steep slope
def init_var_status():
St.firstLoop = 1;
St.tOld = -9999.0
St.dt = 1.0 / Par.fMeas
print("St.dt=",St.dt)
St.tempVlLP = 60.0 # degC; start value -> measured later
St.tempRlLP = 44.0 # degC; start value -> measured later
St.mRL = 0.0 # degC/sec; start value
motPauseEnd = time.time() # sec; End of pause set to now
def regler( tn,tempVl,tempRl ):
# tn sec; actual time
# tempVl degC; Vorlauf temperature
# tempRl degC; Rücklauf temperature
# Rücklauf; fast lowpass filter
if St.firstLoop > 0:
St.tempVlLP = tempVl # init low-pass filter Vorlauf
St.tempRlLP = tempRl # init low-pass filter Ruecklauf
St.mRl = 0.0 # init slope Ruecklauf
m2high = 0.0 # init slope Ruecklauf too high
m2low = 0.0 # init slope Ruecklauf too low
mPause = tn-1 # endtime for slope-pause
St.tempRLOld = tempRl
else:
#print(".")
# *** claculate filter factor for low-pass of VL and RL
faktVL = 1.0/(Par.tauTVL*Par.fMeas) # 1; filter factor
faktRL = 1.0/(Par.tauTRL*Par.fMeas) # 1; filter factor
St.tempVlLP = tempVl * faktVL + St.tempVlLP * (1.0 - faktVL)
St.tempRlLP = tempRl * faktRL + St.tempRlLP * (1.0 - faktRL)
St.tempRlLP2= St.tempRlLP * faktRL + St.tempRlLP2 * (1.0 - faktRL)
# *** RL temperature slope evaluation
# find slope of input tempRl
dTempRL = (tempRl - St.tempRLOld)*1000 # mK; temp. change
St.mRL = dTempRL / St.dt; # mK/sec; slope over time
#print("dTempRL,St.dt,St.mRL=",dTempRL,St.dt,St.mRL)
St.tempRLOld = tempRl;
# find too high slopes
# if found: stop regulator for moving valve-motor to avoid
# resonance effects
#print(St.mRL,Par.mUp)
if St.mRL > Par.mUp :
m2high = 0.9 # m too high
# start valve-motor delay time
St.motPauseEnd = tn + Par.motDelay
else:
m2high = 0
if St.mRL < Par.mDn :
m2low = -0.9 # m too low
# start valve-motor delay time
St.motPauseEnd = tn + Par.motDelay
else:
m2low = 0
# for simulation only:
#print("tn=%f; motPauseEnd=%f"%(tn,St.motPauseEnd))
if tn < St.motPauseEnd:
mPause=1
else:
mPause=0.0
# *** activate motor if too long disabled
# TODO parameter in Par for max. idle time of motor
# TODO switch motor on only for a limited time <-- other parameter
# *** main regulator
# always use mean temperature of RL (not current temp.)
# do nothing if motor delay is active
pass
# *** return values for plotting
St.firstLoop=0
return (St.tempRlLP, St.tempRlLP2, St.mRL, m2high, m2low, mPause)
if __name__ == "__main__":
# **** ATTENTION +++ ATTENTION +++ ATTENTION +++ ATTENTION +++
''' FIRST read all measured values THEN initialize variables '''
# **** ATTENTION +++ ATTENTION +++ ATTENTION +++ ATTENTION +++
init_var_status()
# *** generate a temperature curve
# TODO alternatively read it from file
def temp_verlauf( tvon, tbis, dt, T, a0 ):
# tvon, tbis in Minuten
t = np.arange( tvon, tbis, dt )
tr = 2.0*np.pi * t / T
rl = a0 * np.sin(tr)
return (t,rl)
# generate changing temperature values over time
vonSeconds = 0
bisSeconds = 240*60
dt = 1.0/Par.fMeas # sec; sample rate
TMinutes = 60*60 # sec; Periode einer Schwingung
a0 = 5.0 # degC; Amplitude in Grad Celsius
(t,tempRL)=temp_verlauf(vonSeconds,bisSeconds,dt,TMinutes,a0)
tm=t/60.0
rlLP=[] # degC; temperature Rücklauf after low-pass
rlLP2=[] # degC; temperature Rücklauf after 2. low-pass
mRL =[] # K/min; temperature slope Rücklauf
mHi =[] # too high positive RL slope detected
mLo =[] # too low negative RL slope detected
motPause=[] # 1 if valve-motor shall not work
St.firstLoop=1 # indicate first loop to all iterations
for i in range (len(t)):
#for i in range (5):
(tempRlLP,tempRlLP2,m,a,b,mp) = regler(t[i],60.0,tempRL[i])
St.firstloop=0
# store results for later plotting
rlLP.append(tempRlLP)
rlLP2.append(tempRlLP2)
mRL.append(m)
mHi.append(a) # only used for sim-plot
mLo.append(b) # only used for sim-plot
motPause.append(mp) # motor pause active
# plot results
plt.plot(tm,tempRL,label="tempRL")
plt.plot(tm,rlLP,label="rlLP")
plt.plot(tm,rlLP2,label="rlLP2")
plt.plot(tm,mRL,":",label="mRL")
plt.plot(tm,mHi,label="mHi")
plt.plot(tm,mLo,label="mLo")
plt.plot(tm,motPause,":",label="motPause")
plt.grid()
plt.xlabel("Minutes")
plt.legend()
plt.show()
'''
m = [rl[i]-rl[i-1] for i in range(1,len(t)) ]
m.insert(0,0)
print(m)
mUp = [ 1 if m[i] > Par.mUp else 0 for i in range(len(m)) ]
mDn = [-1 if m[i] < Par.mDn else 0 for i in range(len(m)) ]
delayUp = []
cnt=0
for x in mUp:
if x != 0:
delayUp.append(.9)
cnt = Par.offtime
else:
if cnt > 0:
cnt-=1
delayUp.append(.9)
else:
delayUp.append(0)
delayDn = []
cnt=0
for x in mDn:
if x != 0:
delayDn.append(-.9)
cnt = Par.offtime
else:
if cnt > 0:
cnt-=1
delayDn.append(-.9)
else:
delayDn.append(0)
print(max(m),min(m))
print(mUp)
rlMean = np.zeros(len(m),float)
rlMean[0] = rl[0]
rlMean = [rlMean[i-1]*(1-Par.fakt) + rl[i]*Par.fakt for i in range(1,len(rl))]
rlMean.insert(0,rl[0])
rlMeanA = np.array(rlMean )
print(rlMeanA)
ma = np.array(m)
plt.plot(t,rl)
plt.plot(t,ma)
plt.plot(t,mUp)
plt.plot(t,mDn)
plt.plot(t,rlMeanA)
plt.plot(t,delayUp,":")
plt.plot(t,delayDn,":")
plt.show()
'''
| [
"[email protected]"
] | |
f4b3f17b7b0a85a8901d1509c2176c8fd48e49c9 | 48792dc23fafc5c18b709e74586fd5054724158f | /core/migrations/0004_auto_20201209_0650.py | cfd4292fb60c8406928f05fdaafaa1ef0823a528 | [] | no_license | guilleijo/vendomatic | 4126ebe0fcc4eee5f91a3b8f174e65a292a2ff65 | ea6689d6986094c17b72af6ff4eb42e68643c9b7 | refs/heads/master | 2023-01-31T22:35:49.755979 | 2020-12-12T15:11:31 | 2020-12-12T15:13:12 | 320,855,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | # Generated by Django 3.1.4 on 2020-12-09 06:50
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20201209_0324'),
]
operations = [
migrations.AlterField(
model_name='inventory',
name='quantity',
field=models.PositiveIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)]),
),
]
| [
"[email protected]"
] | |
1ac29a1d452db19f9e2a28a4a6f437d4178cb338 | 3438e8c139a5833836a91140af412311aebf9e86 | /third_party/WebKit/Tools/Scripts/webkitpy/tool/commands/rebaseline_cl.py | 74cdd52504e672cac908cfea45de9dd8d5cf062b | [
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | permissive | Exstream-OpenSource/Chromium | 345b4336b2fbc1d5609ac5a67dbf361812b84f54 | 718ca933938a85c6d5548c5fad97ea7ca1128751 | refs/heads/master | 2022-12-21T20:07:40.786370 | 2016-10-18T04:53:43 | 2016-10-18T04:53:43 | 71,210,435 | 0 | 2 | BSD-3-Clause | 2022-12-18T12:14:22 | 2016-10-18T04:58:13 | null | UTF-8 | Python | false | false | 9,230 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A command to fetch new baselines from try jobs for a Rietveld issue.
This command interacts with the Rietveld API to get information about try jobs
with layout test results.
"""
import logging
import optparse
from webkitpy.common.net.rietveld import Rietveld
from webkitpy.common.net.web import Web
from webkitpy.common.net.git_cl import GitCL
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.models.test_expectations import BASELINE_SUFFIX_LIST
from webkitpy.tool.commands.rebaseline import AbstractParallelRebaselineCommand
_log = logging.getLogger(__name__)
class RebaselineCL(AbstractParallelRebaselineCommand):
name = "rebaseline-cl"
help_text = "Fetches new baselines for a CL from test runs on try bots."
long_help = ("By default, this command will check the latest try job results "
"for all platforms, and start try jobs for platforms with no "
"try jobs. Then, new baselines are downloaded for any tests "
"that are being rebaselined. After downloading, the baselines "
"for different platforms will be optimized (consolidated).")
show_in_main_help = True
def __init__(self):
super(RebaselineCL, self).__init__(options=[
optparse.make_option(
'--issue', type='int', default=None,
help='Rietveld issue number; if none given, this will be obtained via `git cl issue`.'),
optparse.make_option(
'--dry-run', action='store_true', default=False,
help='Dry run mode; list actions that would be performed but do not do anything.'),
optparse.make_option(
'--only-changed-tests', action='store_true', default=False,
help='Only download new baselines for tests that are changed in the CL.'),
optparse.make_option(
'--no-trigger-jobs', dest='trigger_jobs', action='store_false', default=True,
help='Do not trigger any try jobs.'),
self.no_optimize_option,
self.results_directory_option,
])
self.rietveld = Rietveld(Web())
def execute(self, options, args, tool):
self._tool = tool
issue_number = self._get_issue_number(options)
if not issue_number:
return
builds = self.rietveld.latest_try_job_results(issue_number, self._try_bots())
if options.trigger_jobs:
if self.trigger_jobs_for_missing_builds(builds):
_log.info('Please re-run webkit-patch rebaseline-cl once all pending try jobs have finished.')
return
if not builds:
# TODO(qyearsley): Also check that there are *finished* builds.
# The current behavior would still proceed if there are queued
# or started builds.
_log.info('No builds to download baselines from.')
if args:
test_prefix_list = {}
for test in args:
test_prefix_list[test] = {b: BASELINE_SUFFIX_LIST for b in builds}
else:
test_prefix_list = self._test_prefix_list(
issue_number, only_changed_tests=options.only_changed_tests)
# TODO(qyearsley): Fix places where non-existing tests may be added:
# 1. Make sure that the tests obtained when passing --only-changed-tests include only existing tests.
test_prefix_list = self._filter_existing(test_prefix_list)
self._log_test_prefix_list(test_prefix_list)
if options.dry_run:
return
self.rebaseline(options, test_prefix_list)
def _filter_existing(self, test_prefix_list):
"""Filters out entries in |test_prefix_list| for tests that don't exist."""
new_test_prefix_list = {}
port = self._tool.port_factory.get()
for test in test_prefix_list:
path = port.abspath_for_test(test)
if self._tool.filesystem.exists(path):
new_test_prefix_list[test] = test_prefix_list[test]
else:
_log.warning('%s not found, removing from list.', path)
return new_test_prefix_list
def _get_issue_number(self, options):
"""Gets the Rietveld CL number from either |options| or from the current local branch."""
if options.issue:
return options.issue
issue_number = self.git_cl().get_issue_number()
_log.debug('Issue number for current branch: %s', issue_number)
if not issue_number.isdigit():
_log.error('No issue number given and no issue for current branch. This tool requires a CL\n'
'to operate on; please run `git cl upload` on this branch first, or use the --issue\n'
'option to download baselines for another existing CL.')
return None
return int(issue_number)
def git_cl(self):
"""Returns a GitCL instance; can be overridden for tests."""
return GitCL(self._tool)
def trigger_jobs_for_missing_builds(self, builds):
"""Returns True if jobs were triggered; False otherwise."""
builders_with_builds = {b.builder_name for b in builds}
builders_without_builds = set(self._try_bots()) - builders_with_builds
if not builders_without_builds:
return False
_log.info('Triggering try jobs for:')
for builder in sorted(builders_without_builds):
_log.info(' %s', builder)
# If the builders may be under different masters, then they cannot
# all be started in one invocation of git cl try without providing
# master names. Doing separate invocations is slower, but always works
# even when there are builders under different master names.
for builder in sorted(builders_without_builds):
self.git_cl().run(['try', '-b', builder])
return True
def _test_prefix_list(self, issue_number, only_changed_tests):
"""Returns a collection of test, builder and file extensions to get new baselines for.
Args:
issue_number: The CL number of the change which needs new baselines.
only_changed_tests: Whether to only include baselines for tests that
are changed in this CL. If False, all new baselines for failing
tests will be downloaded, even for tests that were not modified.
Returns:
A dict containing information about which new baselines to download.
"""
builds_to_tests = self._builds_to_tests(issue_number)
if only_changed_tests:
files_in_cl = self.rietveld.changed_files(issue_number)
finder = WebKitFinder(self._tool.filesystem)
tests_in_cl = [finder.layout_test_name(f) for f in files_in_cl]
result = {}
for build, tests in builds_to_tests.iteritems():
for test in tests:
if only_changed_tests and test not in tests_in_cl:
continue
if test not in result:
result[test] = {}
result[test][build] = BASELINE_SUFFIX_LIST
return result
def _builds_to_tests(self, issue_number):
"""Fetches a list of try bots, and for each, fetches tests with new baselines."""
_log.debug('Getting results for Rietveld issue %d.', issue_number)
builds = self.rietveld.latest_try_job_results(issue_number, self._try_bots())
if not builds:
_log.debug('No try job results for builders in: %r.', self._try_bots())
return {build: self._tests_to_rebaseline(build) for build in builds}
def _try_bots(self):
"""Returns a collection of try bot builders to fetch results for."""
return self._tool.builders.all_try_builder_names()
def _tests_to_rebaseline(self, build):
"""Fetches a list of LayoutTestResult objects for unexpected results with new baselines."""
buildbot = self._tool.buildbot
results_url = buildbot.results_url(build.builder_name, build.build_number)
layout_test_results = buildbot.fetch_layout_test_results(results_url)
if layout_test_results is None:
_log.warning('Failed to request layout test results from "%s".', results_url)
return []
failure_results = layout_test_results.unexpected_mismatch_results()
missing_results = layout_test_results.missing_results()
return sorted(r.test_name() for r in failure_results + missing_results)
@staticmethod
def _log_test_prefix_list(test_prefix_list):
"""Logs the tests to download new baselines for."""
if not test_prefix_list:
_log.info('No tests to rebaseline; exiting.')
return
_log.info('Tests to rebaseline:')
for test, builds in test_prefix_list.iteritems():
builds_str = ', '.join(sorted('%s (%s)' % (b.builder_name, b.build_number) for b in builds))
_log.info(' %s: %s', test, builds_str)
| [
"[email protected]"
] | |
970e032873598b577c478df4bda72a6d70df2593 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_328/ch45_2020_04_11_19_23_00_469501.py | 654fc6cd7bedadc387007d4d63a90c312e9a584c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | lista= []
while True:
x= int(input('Digite algum número: '))
if x>0:
lista.append(x)
elif x <= 0:
lista.reverse()
print(lista) | [
"[email protected]"
] | |
810697777c5034e7487f2544f56c437f9efecb71 | fbb659d7d9bcd3add877fe2a7b9ee360948d0dec | /MovieTicketBook.py | 51dd23e17e40ec3f36426c4864706d4c6e91289b | [] | no_license | viralpatel12345/movie-ticket-booking-using-python-OOP-concept- | d1c9efeb7b5aad50d62f428d3c65e0145aca27d2 | 5f569835f1fb414e18a1208486a2a1e8671a3274 | refs/heads/master | 2020-06-19T20:11:09.817784 | 2019-07-14T15:49:49 | 2019-07-14T15:49:49 | 196,854,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,512 | py | '''
"FairyLand Multiplex" wants to automate ticket booking and seat allocation process.
Method description:
check_seat_availability(movie_index,number_of_tickets): Checks seat availability for the given movie. Refer the code given in starter code
calculate_ticket_price(movie_index,number_of_tickets): Calculates total ticket price for the given movie. Refer the code given in starter code
generate_seat_number(movie_index,number_of_tickets): Allocate required number of seats for the given movie.
Seat numbers should be auto-generated as mentioned below:
Seat numbers should be generated starting from 1, prefixed by "M1-" for movie-1 and "M2-" for movie 2
Examples movie-1: M1-1, M1-2, M1-3 etc, movie-2: M2-1,M2-2 etc
Update total number of tickets available for the given movie in list_total_tickets
Update last seat number allocated for the given movie in list_last_seat_number
Return the list of generated seat numbers
book_ticket(movie_name,number_of_tickets): Book tickets for the given movie.
Return 0, if movie name is invalid
Return -1, if enough tickets are not available for the given movie
Else,
Generate seat numbers
Initialize attribute, seat_numbers with the list of generated seat numbers
Calculate total ticket price
Perform case sensitive string comparison.
'''
class Multiplex:
__list_movie_name=["movie1","movie2"]
__list_total_tickets=[100,60]
__list_last_seat_number=[None,None]
__list_ticket_price=[150,200]
def __init__(self):
self.__seat_numbers=None
self.__total_price=None
def calculate_ticket_price(self,movie_index,number_of_tickets):
self.__total_price= Multiplex.__list_ticket_price[movie_index]*number_of_tickets
def check_seat_availability(self,movie_index,number_of_tickets):
if(Multiplex.__list_total_tickets[movie_index]<number_of_tickets):
return False
else:
return True
def get_total_price(self):
return self.__total_price
def get_seat_numbers(self):
return self.__seat_numbers
def book_ticket(self, movie_name, number_of_tickets):
if movie_name not in Multiplex.__list_movie_name :
return 0
elif not(self.check_seat_availability(Multiplex.__list_movie_name.index(movie_name),number_of_tickets)) :
return -1
else :
self.__seat_numbers=self.generate_seat_number(Multiplex.__list_movie_name.index(movie_name),number_of_tickets)
self.calculate_ticket_price(Multiplex.__list_movie_name.index(movie_name),number_of_tickets)
def generate_seat_number(self,movie_index, number_of_tickets):
l=[]
if Multiplex.__list_last_seat_number[movie_index]==None :
Multiplex.__list_last_seat_number[movie_index]="M"+str(movie_index+1)+"-"+str(0)
x=int(Multiplex.__list_last_seat_number[movie_index].split('-')[1])
for k in range(0,len(Multiplex.__list_movie_name)):
if movie_index==k :
for i in range(x+1,x+number_of_tickets+1):
l+=["M"+str(k+1)+"-"+str(i)]
Multiplex.__list_total_tickets[movie_index]=Multiplex.__list_total_tickets[movie_index]-number_of_tickets
Multiplex.__list_last_seat_number[movie_index]=l[len(l)-1]
return l
booking1=Multiplex()
status=booking1.book_ticket("movie1",10)
'''
booking5=Multiplex()
print(booking1.generate_seat_number(1,10))
print(Multiplex._Multiplex__list_total_tickets)
print(Multiplex._Multiplex__list_last_seat_number)
print(booking5.generate_seat_number(0,10))
print(Multiplex._Multiplex__list_total_tickets)
print(Multiplex._Multiplex__list_last_seat_number)
'''
if(status==0):
print("invalid movie name")
elif(status==-1):
print("Tickets not available for movie-1")
else:
print("Booking successful")
print("Seat Numbers :", booking1.get_seat_numbers())
print("Total amount to be paid:", booking1.get_total_price())
print("-----------------------------------------------------------------------------")
booking2=Multiplex()
status=booking2.book_ticket("movie2",6)
if(status==0):
print("invalid movie name")
elif(status==-1):
print("Tickets not available for movie-2")
else:
print("Booking successful")
print("Seat Numbers :", booking2.get_seat_numbers())
print("Total amount to be paid:", booking2.get_total_price())
| [
"[email protected]"
] | |
ef76fce18c4d75abc69a31441786b2d3465aaad6 | 5ac40dd0907f6b5a7adff338465c7c41fffc4348 | /src/jukeboxcore/gui/widgets/guerilla/shotcreator_ui.py | a94b8806cff4c0c262fcc729863f846a82ed3722 | [] | permissive | JukeboxPipeline/jukebox-core | 8effaf675c8a3b39d043bb69e40b75e591bb4a21 | bac2280ca49940355270e4b69400ce9976ab2e6f | refs/heads/master | 2021-07-22T13:50:58.168148 | 2015-06-01T16:20:56 | 2015-06-01T16:20:56 | 24,540,320 | 2 | 0 | BSD-3-Clause | 2021-06-10T19:34:28 | 2014-09-27T19:06:31 | Python | UTF-8 | Python | false | false | 2,282 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'h:\projects\jukebox-core\src\jukeboxcore\gui\widgets\guerilla\shotcreator.ui'
#
# Created: Tue Jan 13 18:54:57 2015
# by: pyside-uic 0.2.15 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_shotcreator_dialog(object):
def setupUi(self, shotcreator_dialog):
shotcreator_dialog.setObjectName("shotcreator_dialog")
shotcreator_dialog.resize(694, 398)
self.gridLayout = QtGui.QGridLayout(shotcreator_dialog)
self.gridLayout.setObjectName("gridLayout")
self.name_lb = QtGui.QLabel(shotcreator_dialog)
self.name_lb.setObjectName("name_lb")
self.gridLayout.addWidget(self.name_lb, 0, 0, 1, 1)
self.name_le = QtGui.QLineEdit(shotcreator_dialog)
self.name_le.setObjectName("name_le")
self.gridLayout.addWidget(self.name_le, 0, 1, 1, 1)
self.desc_lb = QtGui.QLabel(shotcreator_dialog)
self.desc_lb.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.desc_lb.setObjectName("desc_lb")
self.gridLayout.addWidget(self.desc_lb, 1, 0, 1, 1)
self.desc_pte = QtGui.QPlainTextEdit(shotcreator_dialog)
self.desc_pte.setObjectName("desc_pte")
self.gridLayout.addWidget(self.desc_pte, 1, 1, 1, 1)
self.create_pb = QtGui.QPushButton(shotcreator_dialog)
self.create_pb.setObjectName("create_pb")
self.gridLayout.addWidget(self.create_pb, 2, 1, 1, 1)
self.retranslateUi(shotcreator_dialog)
QtCore.QMetaObject.connectSlotsByName(shotcreator_dialog)
def retranslateUi(self, shotcreator_dialog):
shotcreator_dialog.setWindowTitle(QtGui.QApplication.translate("shotcreator_dialog", "Create Shot", None, QtGui.QApplication.UnicodeUTF8))
self.name_lb.setText(QtGui.QApplication.translate("shotcreator_dialog", "Name", None, QtGui.QApplication.UnicodeUTF8))
self.desc_lb.setText(QtGui.QApplication.translate("shotcreator_dialog", "Description", None, QtGui.QApplication.UnicodeUTF8))
self.create_pb.setText(QtGui.QApplication.translate("shotcreator_dialog", "Create", None, QtGui.QApplication.UnicodeUTF8))
| [
"[email protected]"
] | |
9386a5a1444912dff57160dc9c6749ea52dd4376 | 6810201a040c7a77f1e5e9a93882f34717aaa4a4 | /singing contest.py | d2c933266a593c5b9c7bcaf4f72ee490e7e81df3 | [] | no_license | phyllis-jia/array-practice | b848bfa52a82d1508b11ad946ba168a58e668030 | 3c1974866489384992d3e76307d90e5127edaf6f | refs/heads/master | 2020-11-29T06:15:29.915254 | 2020-01-11T07:04:57 | 2020-01-11T07:04:57 | 230,043,151 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,054 | py | ##singing contest: to design a program which remove the highest score and the lowest score from scores, then take average of the remain scores as final score
### optimized one
def singing_score(values):
small_pos = 0
for i in range(1, len(values)):
if values[i] < values[small_pos]:
small_pos = i
high_pos = 0
for i in range(1, len(values)):
if values[i] > values[high_pos]:
high_pos = i
values.remove(values[small_pos])
values.remove(values[high_pos])
rst = sum(values)/len(values)
return rst
###optimized two
def singing_score2(values):
maxx = values[0]
minn = values[0]
summ = values[0]
for i in range(1,len(values)):
if values[i]<minn:
minn = values[i]
if values[i]>maxx:
maxx = values[i]
summ += values[i]
rst = (summ-maxx-minn)/(len(values)-2)
return rst
### original one
def score(a):
h = max(a)
l = min(a)
x = (sum(a)-h-l)/(len(a)-2)
return x
| [
"[email protected]"
] | |
ef2ce2f42c587b9dcfe630d08cfc405c4fdbbb6b | 28975460d4333accaf861dbbc295d6b640f72da8 | /TokenProj/token_app/migrations/0001_initial.py | b4302f448434e9c835d210f9300266845a1a1d75 | [] | no_license | chaitrabhoomesh55/Django | 5f3685e479470c4019dacfea6910d006d44162a2 | d2a7ccd299c4247e8c7cbb6f8caeb5f051d4a251 | refs/heads/main | 2023-07-17T16:55:44.087883 | 2021-09-04T02:52:37 | 2021-09-04T02:52:37 | 402,954,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,083 | py | # Generated by Django 3.2 on 2021-08-21 00:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Question',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='token_app.question')),
],
),
]
| [
"[email protected]"
] | |
254af0d9384a30250010f298351c0b7d6cf418a2 | f601878be7f5b632bc89a1dc82135c703c72550c | /santiblog/polls/urls.py | 9ddf27a20e2bd99eec35f70af1ecdc16316033bc | [] | no_license | SantiagoVergara/santiagoblog | 34f2344ba4eb54b588cfe4916a1e4e93fcb0dc35 | 03950661be262acbec0542156ec68874e8db9684 | refs/heads/master | 2020-05-27T22:33:13.910585 | 2015-08-10T02:52:27 | 2015-08-10T02:52:27 | 40,459,962 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,270 | py | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from .import views
urlpatterns = [
url(r'^$', 'polls.views.verhome', name='index' ),
url(r'^Curriculum/$', 'polls.views.vercv', name='Curriculum' ),
url(r'^Botonmagico/$', 'polls.views.verbtnmg', name='Boton' ),
url(r'^Calculadora/$', 'polls.views.vercal', name='Calculadora' ),
url(r'^Contactos/$', 'polls.views.vercon', name='Contacto' ),
url(r'^Conversor/$', 'polls.views.verconv', name='Conversor' ),
url(r'^Cronometro/$', 'polls.views.vercro', name='Cronometro' ),
url(r'^Barra/$', 'polls.views.verimagen', name='Barra' ),
url(r'^Noticias/$', 'polls.views.vernoticias', name='Noticias' ),
url(r'^ver_post/(?P<id_post>[0-9]+)$', 'polls.views.ver_post', name='ver_post'),
url(r'^save_message/$', 'polls.views.save_message', name='save_message'),
url(r'^contact/$', 'polls.views.contact', name='contact'),
url(r'^categoria/(?P<categoria>[0-9]+)$', 'polls.views.categoria', name='categoria'),
]
| [
"[email protected]"
] | |
4d6d0d807153f2c306934aa28a01ff9aaf4d6556 | a6c470e16e964b305e756591317197d90a2f8a76 | /RP-Report/files/01_GetUniqueIPs.py | 245cd358555e24c75c722202bfe3f4ae85aae671 | [] | no_license | ramakrishna1994/RP | 3ae116fea0004ef3ce8d9723fc6c1ac7ee746cbd | 05b058ee8d9302638a97c47e745d126879612a14 | refs/heads/master | 2021-01-24T11:27:01.202433 | 2018-04-23T07:50:10 | 2018-04-23T07:50:10 | 123,081,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,633 | py | import json
import requests
import psycopg2
from Globals import *
uniqueIPs = set()
dbName = "cowrie"
try:
conn = psycopg2.connect("dbname='"+str(dbName)+"' user='postgres' host='localhost' password='postgres'")
except Exception as e:
print e
cur = conn.cursor()
def createTables():
cur.execute("CREATE TABLE IF NOT EXISTS STATS("
"IP VARCHAR(20) PRIMARY KEY NOT NULL,"
"LOGINATTEMPTS INT,"
"COUNTOFCOMMANDS INT,"
"DOSCLUSTER INT,"
"SENTIMENT INT,"
"COMMANDS TEXT)")
cur.execute("CREATE TABLE IF NOT EXISTS DOS_ATTACKS("
"IP VARCHAR(20) PRIMARY KEY NOT NULL,"
"LOGINATTEMPTS INT,"
"COUNTOFCOMMANDS INT,"
"COMMANDS TEXT)")
conn.commit()
cur.execute("TRUNCATE TABLE STATS")
conn.commit()
createTables()
url = ELASTIC_URL + str("?size=1")
response = requests.get(url)
countOfData = json.loads(response.content)["hits"]["total"]
print countOfData
size = 10000
for i in range(0,countOfData+1,size):
url = ELASTIC_URL + str("?_source=geoip.ip&size=")+str(size)+"&from="+str(i)
response = requests.get(url)
IPs = json.loads(response.content)["hits"]["hits"]
print len(IPs)
for IP in IPs:
if IP["_source"] != {}:
ip = IP["_source"]["geoip"]["ip"]
uniqueIPs.add(ip)
print "Completed : from = "+str(i)+" and end = "+str(i+size)
for ip in uniqueIPs:
cur.execute("INSERT INTO STATS(IP) VALUES('"+ip+"')")
conn.commit()
print "Total Number of Unique IP's = " + str(len(uniqueIPs))
| [
"[email protected]"
] | |
154dbb46bd17e2448c7da76ab6996748123ecb63 | 3ddd5f99ae589e3eb06fab8bd8102d734461d674 | /bin/Debug/InjuryProps.py | 9118d09ec67a7090be8fdf6687c5e52163fa7892 | [] | no_license | MartinChristiaan/RimworldReflect | d2a81163974d492bee9a58e6ab4682b7fdd456e7 | 94417f783cdfc732ad0fb77c7ddb7caa17ed2923 | refs/heads/master | 2022-06-06T10:53:31.561545 | 2020-05-02T01:16:54 | 2020-05-02T01:16:54 | 260,590,859 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | class InjuryProps
def __init__(self):
self.painPerSeverity_float = ''
self.averagePainPerSeverityOld_float = ''
self.bleedRate_float = ''
self.canMerge_bool = ''
self.destroyedLabel_str = ''
self.destroyedOutLabel_str = ''
self.useRemovedLabel_bool = ''
| [
"[email protected]"
] | |
39dc5f3d31638142c0eedc032b1e1c254d88f39c | d0efbc5c44823e0abe144fe05e86b621198d5d79 | /module01/ex04/generator.py | 8192852bb590d65e2e627c3187622e585dfbf22f | [] | no_license | yanis-fourel/42ai-bootcamp-python | a7b1e4ac9fcc3b1b3fff327bbc31f0396c4118e0 | 1e0eec6089c4540f90d2a6917afb01ac5543dbd1 | refs/heads/main | 2023-08-04T18:35:35.381487 | 2021-08-12T10:14:09 | 2021-08-12T10:14:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 568 | py | import random
def generator(text:str, sep: str=" ", option=None):
if not text:
return
words = text.split(sep)
if option == "shuffle":
tmp = words
words = []
while tmp:
idx = random.randint(0, len(tmp) - 1)
words.append(tmp.pop(idx))
elif option == "unique":
words = list(dict.fromkeys(words))
elif option == "ordered":
words = sorted(words)
elif option is not None:
raise Exception("Invalid option: '%s'" % option)
for w in words:
yield w
| [
"[email protected]"
] | |
1714522d57f5a6b08f0f8b9a6ee5d2674b03adda | bd1f952a38cfed36243c12b74dfa851de5f0cc9b | /cGAN_v2.py | c6405cbc4ed29a360ebaec6914820e38d5ac6b80 | [] | no_license | RufinaMay/cGAN | 1dd818303392303d10d7e29e4ea86698d6db745a | 95a7538a9f1e9fbb0f706e3eb804320cc7222813 | refs/heads/master | 2022-01-25T17:55:44.999177 | 2019-06-30T08:01:57 | 2019-06-30T08:01:57 | 181,176,755 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,934 | py | import numpy as np
from keras.optimizers import Adam
from keras.layers import Input, LeakyReLU, BatchNormalization, Reshape
from keras.models import Model, Sequential
from matplotlib import pyplot as plt
import cv2
from keras.layers import Conv2D, Conv2DTranspose, ReLU, Dropout
import pickle
class GAN():
def __init__(self):
self.IM_SIZE = 256
self.CHANNELS = 3
self.IM_SHAPE = (self.IM_SIZE, self.IM_SIZE, self.CHANNELS)
self.IMAGE_TO_TEST = cv2.imread('data/Image/00000850/day/20151101_165511.jpg')
self.IMAGE_TO_TEST = cv2.resize(self.IMAGE_TO_TEST, (self.IM_SIZE,self.IM_SIZE))
self.DATA_FOLDER = r'data/Image/'
"""
self.DATA_FOLDER = '/content/gdrive/My Drive/Colab Notebooks/THESIS/cGAN/data/Image/'
self.IMAGE_TO_TEST = cv2.imread(f'{self.DATA_FOLDER}00000850/day/20151101_165511.jpg')
self.IMAGE_TO_TEST = cv2.resize(self.IMAGE_TO_TEST, (self.IM_SIZE, self.IM_SIZE))
"""
with open('day_paths.pickle', 'rb') as f:
self.DAY_PATH = pickle.load(f)
with open('night_paths.pickle', 'rb') as f:
self.NIGHT_PATH = pickle.load(f)
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
self.discriminator = self.Discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=['accuracy'])
# Build and compile the generator
self.generator = self.Generator()
self.generator.compile(loss='binary_crossentropy', optimizer=optimizer)
day = Input(shape=self.IM_SHAPE)
night = self.generator(day)
self.discriminator.trainable = False
valid = self.discriminator(night)
self.combined = Model(day, valid)
self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)
def Generator(self):
G = Sequential()
#ENCODER PART
G.add( Conv2D(filters=64, kernel_size=2, strides=(2,2), input_shape=(256,256,3)))
G.add(LeakyReLU(0.2))
G.add( Conv2D(filters=128, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(LeakyReLU(0.2))
G.add( Conv2D(filters=256, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(LeakyReLU(0.2))
G.add( Conv2D(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(LeakyReLU(0.2))
G.add( Conv2D(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(LeakyReLU(0.2))
G.add( Conv2D(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(LeakyReLU(0.2))
G.add( Conv2D(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(LeakyReLU(0.2))
G.add( Conv2D(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(LeakyReLU(0.2))
#DECODER PART
G.add(Conv2DTranspose(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(ReLU())
G.add(Dropout(0.5))
G.add(Conv2DTranspose(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(ReLU())
G.add(Dropout(0.5))
G.add(Conv2DTranspose(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(ReLU())
G.add(Dropout(0.5))
G.add(Conv2DTranspose(filters=512, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(ReLU())
G.add(Dropout(0.5))
G.add(Conv2DTranspose(filters=256, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(ReLU())
G.add(Dropout(0.5))
G.add(Conv2DTranspose(filters=128, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(ReLU())
G.add(Dropout(0.5))
G.add(Conv2DTranspose(filters=64, kernel_size=2, strides=(2,2)))
G.add(BatchNormalization())
G.add(ReLU())
G.add(Dropout(0.5))
G.add(Conv2DTranspose(filters=3, kernel_size=2, strides=(2,2), activation='tanh'))
G.summary()
day = Input(shape=self.IM_SHAPE)
night = G(day)
return Model(day, night)
def Discriminator(self):
D = Sequential()
#ENCODER PART
D.add( Conv2D(filters=64, kernel_size=4, strides=(2,2), input_shape=(self.IM_SIZE,self.IM_SIZE,3)))
D.add(LeakyReLU(0.2))
D.add( Conv2D(filters=128, kernel_size=4, strides=(2,2)))
D.add(LeakyReLU(0.2))
D.add(BatchNormalization())
D.add( Conv2D(filters=256, kernel_size=4, strides=(2,2)))
D.add(LeakyReLU(0.2))
D.add(BatchNormalization())
D.add( Conv2D(filters=512, kernel_size=4, strides=(2,2)))
D.add(LeakyReLU(0.2))
D.add(BatchNormalization())
D.add( Conv2D(filters=512, kernel_size=4, strides=(2,2)))
D.add(LeakyReLU(0.2))
D.add(BatchNormalization())
D.add( Conv2D(filters=512, kernel_size=4, strides=(2,2)))
D.add(LeakyReLU(0.2))
D.add(BatchNormalization())
D.add( Conv2D(filters=1, kernel_size=2, strides=(2,2), activation='sigmoid'))
D.add(Reshape((-1,)))
D.summary()
image = Input(shape=self.IM_SHAPE)
validity = D(image)
return Model(image, validity)
def image_normalization_mapping(self, image, from_min, from_max, to_min, to_max):
"""
Map data from any interval [from_min, from_max] --> [to_min, to_max]
Used to normalize and denormalize images
"""
from_range = from_max - from_min
to_range = to_max - to_min
scaled = np.array((image - from_min) / float(from_range), dtype=float)
return to_min + (scaled * to_range)
def Batch(self, day_paths, night_paths):
N = len(day_paths)
for i in range(N):
day = cv2.imread(f'{self.DATA_FOLDER}{day_paths[i]}')
night = cv2.imread(f'{self.DATA_FOLDER}{night_paths[i]}')
day = cv2.resize(day, (self.IM_SIZE,self.IM_SIZE))
night = cv2.resize(night,(self.IM_SIZE,self.IM_SIZE))
yield day, night
def train(self, epochs):
N = len(self.DAY_PATH)
for epoch in range(epochs):
D_LOSS, G_LOSS = 0.,0.
#SGD
for d, n in self.Batch(self.DAY_PATH, self.NIGHT_PATH):
#day, night = d/255, n/255
day = self.image_normalization_mapping(d, 0, 255, -1, 1)
night = self.image_normalization_mapping(n, 0, 255, -1, 1)
gen_imgs = self.generator.predict(day[np.newaxis,:])
d_loss_real = self.discriminator.train_on_batch(night[np.newaxis,:], np.ones((1,1)))
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((1,1)))
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
g_loss = self.combined.train_on_batch(day[np.newaxis,:], np.ones((1, 1)))
D_LOSS += d_loss
G_LOSS += g_loss
print(f'epoch: {epoch}, D_LOSS: {D_LOSS/N}, G_LOSS: {G_LOSS} ')
img = self.image_normalization_mapping(self.IMAGE_TO_TEST, 0, 255, -1, 1)
img = self.generator.predict(img[np.newaxis,:])
img = self.image_normalization_mapping(img[0], -1, 1, 0, 255).astype('uint8')
plt.imshow(img)
plt.show()
if __name__ == '__main__':
gan = GAN()
gan.train(epochs=500)
| [
"[email protected]"
] | |
005e5a725ed1c03028b59a4a059da8c3eebb2635 | 67e493e7f8806254a6b88351c2660bd6695c7de1 | /Trap/Tensorflow/test.py | 7f6e47b42da4f378b4d3093fc540815df8e7f9f5 | [] | no_license | onthejeep/MixtureModel | 6c312dd6a726a2611264e04c6ca924c6f9606c98 | 0565dde3ec0fe5cf0043e864feffcff6628b6db0 | refs/heads/master | 2021-04-15T17:07:53.406981 | 2018-03-26T19:31:21 | 2018-03-26T19:31:21 | 126,879,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,037 | py | Traveltime = [3.82371016,3.69024781,3.55131291,5.16093999,4.11502671
,4.89527333,3.46792928,4.95218788,5.69964306,5.45113731
,2.78296871,2.28874201,3.72048284,4.80621287,3.94261533
,3.24939139,5.33275141,3.8936405, 4.71077051,3.543074
,3.92405279,4.2632726, 3.55034841,3.92414084,2.68468112
,4.04336137,5.5612573, 4.82418221,3.29918703,3.60219205
,3.83303382,5.13677806,4.17604853,4.0257285, 4.89923438
,4.81335735,3.94777934,2.90616947,5.42168891,4.83822501
,4.91449306,3.22818989,3.76151302,5.32343843,3.95242393
,3.90905958,2.91557951,2.85535903,2.54985753,4.65974557
,2.70136958,3.8761431, 4.97513553,3.29110147,5.18687886
,4.38610715,4.10099073,3.81343185,4.04904104,2.73833875
,4.12961311,3.54921046,2.72957599,3.76362689,4.11653306
,4.03783611,4.84984873,3.8677683, 4.51268389,3.83219738
,5.20384815,4.64924372,4.14137661,5.88620122,3.87063324
,2.55043617,4.03533835,2.94464121,5.12708846,5.21541307
,3.85943135,5.08493243,3.19897814,3.20287191,6.38183901
,4.70341787,3.42941614,3.47124326,4.62179715,4.46194685
,3.19131053,2.20919558,3.83114715,4.34409484,4.33654264
,6.87033422,3.26621023,5.20122803,4.68844525,3.92141537
,4.97974672,4.19172539,4.31151147,4.64089418,2.82790249
,2.7481964, 3.81547895,5.93951329,4.25506516,3.87865117
,3.93040764,4.545346,5.81437241,2.70640118,3.82604712
,6.66537656,3.61110149,1.69158671,4.79237448,1.44131388
,3.16116185,3.59926929,2.93549916,4.33001274,3.84879647
,2.67977231,3.8572318, 4.27564335,3.2679062, 6.23886559
,4.9944987, 1.60880839,4.07013541,3.91741526,4.76351838
,5.40053503,3.5334187, 4.76292122,4.17375876,3.97258759
,3.78601532,3.94130634,2.50630286,7.15312829,3.30924682
,3.96479378,4.88042931,2.93709304,2.79260988,2.28642639
,5.12151922,3.23050664,4.90411262,3.75437165,2.57155484
,4.53916511,4.22706495,5.606811,2.64489191,4.5601503
,2.32630092,3.97981975,3.65943852,3.26093784,3.89233022
,4.26122548,3.08970908,4.24526432,3.13605187,3.71948156
,5.02157729,2.13665444,4.86794389,3.82463838,4.63096234
,5.60799131,4.16720818,4.07623141,4.50501515,3.68312093
,3.48272381,5.72193214,6.043501,3.34528944,3.71998038
,3.67469326,4.23746495,4.17065789,4.86472749,3.91627863
,2.82368796,4.20336347,4.07446614,5.03932996,3.57816241
,5.59735056,4.08817315,3.58362647,2.47827345,3.15037416
,4.13531666,1.38396794,5.36280207,3.01057009,4.13327812
,5.00221507,3.30217613,3.51662816,4.29273334,3.43266449
,4.6957561, 5.91390711,4.56575443,4.52007555,4.74217298
,2.64353243,3.20757086,4.41525302,2.41623492,3.41951762
,2.79507462,2.88515582,4.10950251,3.53168506,5.95191514
,3.18854676,3.14115452,4.19365923,2.11869939,3.97597212
,5.31631499,3.78800999,3.79247549,2.95875804,2.93631715
,3.79935826,3.55984431,4.18263542,4.38691857,4.79438799
,3.70599116,3.77730196,2.4158005, 1.24353946,2.93931328
,4.06583713,3.70024178,4.07150718,4.69058903,3.71489169
,4.37803885,4.30114308,3.57405983,2.61981627,3.83843845
,2.7219222, 4.78501216,5.70700607,4.18289124,3.53940228
,2.70132673,4.41354299,4.88362223,4.16103001,3.35954081
,4.83682386,3.69394391,3.24375426,4.80857385,4.06052424
,2.77271759,6.59304252,3.03171747,4.97606049,4.26844047
,4.51506249,3.43593317,3.41731759,3.38559083,3.45012167
,4.21112248,3.09518576,5.25169523,3.60374861,4.94123543
,4.47246377,5.92388537,5.4662164, 3.03108555,5.17696657
,5.25244305,2.18417556,3.58407704,4.01390698,3.68171863
,4.28096744,5.26940373,4.30863426,3.17347421,3.92981394
,3.74844967,3.90676976,3.69540628,1.89846559,4.48016163
,5.39660671,2.8107389, 4.00049964,4.36892152,3.40361642
,4.51560384,2.81035996,5.67636046,4.32514972,2.98989124
,4.3914684, 2.74227093,3.93873901,5.31033883,3.18656214
,5.01024788,1.8511542, 3.33966735,4.39252651,2.56002423
,4.29659128,3.79965961,4.55160529,2.06916945,4.61339133
,3.14878383,3.75017925,5.50877144,3.05242176,4.72515649
,5.07192201,3.52387396,4.46051854,2.45693068,4.14972318
,6.00675636,3.13928113,4.77784699,5.00884804,3.49535158
,2.39806565,4.0120877, 4.34927557,4.59691371,4.54542291
,5.97602601,2.52767008,5.77826123,5.57068538,3.67136351
,3.49631897,3.29008038,5.06684779,5.65994466,4.23506521
,3.01176872,4.27600171,3.78867639,4.34055387,3.41884208
,3.36457046,4.01926608,3.75358287,4.11662359,3.3631673
,5.80351515,4.24942792,2.9631048, 4.34701168,3.28798544
,5.11149043,4.24484008,3.90053124,5.35107263,2.15114835
,4.54531432,4.548927,5.02987023,3.73321618,2.98067318
,3.86142895,3.66210433,3.10458001,5.41651704,3.98671911
,3.20404522,4.36603785,4.19258931,2.50510782,2.42746053
,3.31154073,4.96919698,4.58226412,3.51287164,3.17707583
,4.45825584,2.7345212, 3.18256143,2.5995791, 4.36114526
,5.20142577,3.27013647,3.36053605,4.09783182,4.77902912
,4.03368451,2.75124634,1.50769743,4.35436193,4.95097831
,3.60967025,4.42524786,3.65513847,5.2693159, 3.41849793
,3.78004387,2.09032396,5.20212763,2.84264962,4.69867536
,4.48614909,4.57262696,2.63707437,4.64150696,3.70727712
,4.17888991,3.51194932,4.89340959,3.67780304,2.81036106
,6.12134155,2.40988586,2.99609231,4.1602831, 2.70969991
,4.88414913,1.94166604,2.29284339,4.41671771,4.35720116
,3.30967658,4.18069955,4.13617923,4.47774652,3.97660494
,3.78166186,5.08454149,5.36141372,4.5154576, 2.91240654
,3.75863755,3.50453414,3.07898095,2.98578988,3.30262947
,3.98405024,2.91376001,4.19142484,3.37225874,2.32793054
,4.66317143,4.74524999,4.62442852,4.27163504,5.05499498
,3.48106325,4.77536806,2.94794637,5.26306818,3.86978423
,4.84116132,5.0351833, 6.24641742,3.33420973,5.25185225
,3.78638659,4.69768311,3.23771542,3.76456555,4.51271559
,3.85698553,2.96486377,3.39002326,3.44032411,3.79917436
,5.02818549,4.30707647,4.07237742,5.22230669,5.02064482
,5.51527773,6.50425526,2.77569287,2.19340121,4.00386356
,3.98506494,3.5667977, 5.16732686,4.78571015,4.02292399
,5.22172598,3.75232234,3.77505277,5.09084184,4.62399366
,3.16782862,2.3487991, 4.60464158,4.45139632,3.10102287
,5.83545995,4.44542241,4.02214737,4.51650067,4.785272
,3.84563897,4.51772881,4.62590065,4.25481894,4.81527038
,2.79130178,4.39286214,3.49771614,3.54037344,4.70106948
,5.04940606,3.15401618,5.52518747,3.65021053,2.12851498
,5.41829551,4.69050303,4.87365542,4.44124202,3.52394396
,3.64662501,3.61728352,5.10102603,5.21541615,3.71801212
,4.88527448,2.90163088,4.96808262,2.98852388,7.164922
,4.87883728,3.82879968,2.69527696,2.61564739,4.52334225
,4.14073716,4.37731293,4.10464255,3.22216321,4.06087505
,3.79547438,5.40334492,5.2455048, 2.38943368,3.21378837
,4.43983813,2.85772096,5.0909155, 4.35474384,4.70596857
,1.82817017,5.41604838,3.56539847,4.3897794, 2.35300974
,4.40133514,3.82414879,4.36231429,2.9151823, 4.35381692
,5.43489867,4.02554905,3.40366916,5.17078241,5.04287095
,4.09477632,4.20707095,3.16788109,2.31284216,3.62375431
,2.86952665,3.32705655,4.8037043, 2.4712326, 3.54228835
,2.47656367,4.2401261, 4.12654625,3.70711133,5.81619121
,5.90131124,6.5215455, 5.89008055,9.13543455,8.42273503
,5.89701114,4.88558431,9.7989407, 6.19569189,7.76097723
,6.44358461,8.3776732, 2.57255118,7.39302296,9.1153393
,9.29833362,7.30812711,6.00313752,7.52776211,8.38903005
,8.07864179,7.82632378,8.10368811,10.94105107,6.99157184
,5.51170034,8.20085094,5.68107787,6.0470912, 7.47381262
,7.57721946,3.97141421,5.09799982,9.63711516,5.29378071
,5.85816785,2.28365599,8.28626535,7.23297867,6.36608927
,7.10995176,10.7074932, 8.71945758,7.38840445,10.0884164
,7.72466521,11.36042616,6.18523373,8.17558559,10.77238358
,7.18963291,7.52448456,11.46175425,7.0097434, 6.1592406
,9.55488772,10.63793735,7.96201934,7.95393759,8.4530337
,6.95669609,7.76285698,8.05225913,6.37938291,7.32801872
,7.85964348,5.34236521,3.11520468,6.45264296,7.64042341
,5.29813138,3.59555169,5.71964702,8.01411823,7.06283676
,8.41287964,7.0820878, 5.80671381,8.31980823,9.01409657
,4.2092875, 5.86041164,4.87363978,9.15841424,9.38079914
,3.5141527, 4.277895,5.89756322,3.73774322,6.99245445
,6.76235213,8.79834756,7.24215247,5.81574533,6.66605497
,4.79178625,7.53085208,8.72216491,4.97837516,7.83493674
,7.64317961,3.55200997,8.08401166,5.93229744,3.88776325
,8.70686846,8.07593332,6.89271278,5.71927554,7.66933473
,5.96156434,9.32385131,5.04556167,5.08874245,6.65389223
,9.52609868,5.30082757,8.23987642,5.45297606,8.10352583
,5.50530714,7.57234258,6.58122191,6.63878481,4.70213993
,5.55930788,6.15751316,3.5690273, 7.1011172, 7.15544095
,8.26791403,10.43085523,9.41700898,4.69959816,11.80273834
,10.57365582,6.01365754,5.96376535,6.43189691,6.51185468
,9.34497587,5.69742588,5.55756863,6.31794754,6.88219916
,13.76594949,10.29912821,6.94009549,4.36325834,9.71286838
,8.35752869,4.96592054,9.61651464,6.63231596,11.17806379
,7.53245572,5.09563576,3.61854691,9.22665373,3.47371006
,7.15129275,5.8104415, 3.24840236,8.94624515,4.2919363
,3.17603217,6.06288231,7.886799,5.73287155,7.80124139
,6.33146056,5.27818641,6.00936197,7.81147801,8.52284746
,2.394477,9.84705732,10.47972661,6.64041337,4.41489483
,8.88442926,9.62105512,5.03983289,7.13672146,9.75691756
,5.2882603, 4.33156344,5.66249761,8.49503618,7.57834725
,6.56641854,8.85392858,8.35260516,5.16614972,8.11748666
,4.75396864,4.09003343,7.88603206,5.42015341,6.41999114
,5.52521289,7.09778559,7.87335761,5.35953437,7.36275252
,5.63552366,6.60773181,6.29832802,10.05386669,7.54438631
,4.25065619,8.64506025,8.90903953,8.74363616,5.61430617
,7.40831149,8.79118184,6.08657835,9.22829838,7.99912415
,8.52306025,4.44179961,7.93724845,10.34971867,8.3861515
,5.4106296, 7.23992242,7.67310718,12.364368, 10.84137732
,11.96747224,9.05244308,8.70293144,9.62668677,7.26188888
,8.28676961,9.89041114,4.61952935,3.97183184,8.55213728
,7.94659031,7.28242102,7.86509754,6.10600049,12.54312173
,8.42504975,7.46521561,5.53000953,11.46233086,3.43141288
,11.22116274,9.65312721,9.06888279,9.79173071,8.00367109
,7.76701765,6.71607977,5.94569807,6.1801244, 8.15042241
,8.26717556,5.91007794,7.70129947,6.24086952,6.48741487
,5.8599868, 7.0207611, 5.4595458, 6.4608045, 7.37717378
,9.96925964,6.02657769,4.89740793,8.52361211,6.85457753
,8.92468425,7.61675411,4.58194815,6.52406485,8.47328388
,5.73946783,5.13366616,9.8959598, 8.30952329,6.16097291
,8.32906879,6.69771849,8.68169095,7.70316371,7.56033071
,6.52564096,7.47278185,7.08279377,9.3568184, 5.19290215
,5.58896995,8.80402912,7.02075673,9.39308892,3.19730501
,11.41228523,12.86209289,10.99177461,9.13628098,10.97618631
,8.32619269,13.53687657,11.50136843,14.64860571,13.41162668
,8.56413629,5.76423305,8.16113743,11.00106931,5.64735137
,11.71739314,3.40790619,13.74066081,6.14912847,0.11938045
,12.51210883,10.71867981,2.85440611,9.8712914, 8.9594293
,6.75276335,9.2432247,11.29182022,7.8711561, 7.8456051
,10.28750801,8.84106973,10.41768259,6.88046234,8.56289691
,6.27968727,9.55057681,7.74237562,11.12049693,7.20259053
,4.69853681,4.59482899,9.07851554,10.1072139,13.32901428
,11.36847274,12.65540187,8.22441799,11.73008618,12.60613321
,11.9772535,14.61927194,17.73237078,10.11432488,8.05944134
,11.51680692,8.35511658,8.58928893,7.30169812,10.453069
,4.55763614,8.74007817,4.79432583,11.23989459,11.88505908
,9.12427894,16.49449673,11.29976156,14.76108454,3.80898584
,7.18696577,6.77431759,18.03383087,14.42932615,13.82337282
,10.75411121,16.80291204,8.77901387,7.77681892,6.59228494
,10.34289694,11.5464465, 8.34770161,13.17985584,8.94191086
,13.53506829,10.0552611,11.94592142,7.44282088,7.34729258
,9.89807391,10.75221858,11.34927719,9.76196552,13.45099198
,12.38905113,11.91468699,7.87038135,4.21993857,8.71811882];
import Deeplearning;
import SaveData as SaveData;
import DistributionStatistics as DistributionStatistics;
import matplotlib.pyplot as plt;
import numpy as np;
import MixtureModel;
Traveltime = np.array(Traveltime);
Optimizer = Deeplearning.MLE();
Optimizer._Distribution = 'lognormal';
Optimizer._DataSize = len(Traveltime);
Optimizer.DefineFlow();
print('length of data = ', Optimizer._DataSize);
Mu, Sigma, Weight, Loglikelihood = Optimizer.Training(data = Traveltime);
Mu, Sigma, Weight, Loglikelihood = np.array(Mu), np.array(Sigma), np.array(Weight), np.array(Loglikelihood);
Mean = MixtureModel.SingleDistribution_Mean(Optimizer._Distribution, Mu, Sigma);
Std = MixtureModel.SingleDistribution_Std(Optimizer._Distribution, Mu, Sigma);
Skewness = MixtureModel.SingleDistribution_Skewness(Optimizer._Distribution, Sigma);
SampleMean = np.mean(Traveltime);
SampleVar = np.var(Traveltime);
SampleSkew = DistributionStatistics.Empirical_Skewness(Traveltime);
MM_Mean = DistributionStatistics.MixtureModel_Mean(Mean, Weight);
MM_Var = DistributionStatistics.MixtureModel_Var(Mean, Std, Weight);
MM_Skew = DistributionStatistics.MixtureModel_Skewness(Mean, Std, Skewness, Weight);
print(Mean, Std);
print(SampleMean, SampleVar, SampleSkew);
print(MM_Mean, MM_Var, MM_Skew);
print(Loglikelihood[-1]);
plt.plot(Loglikelihood);
plt.show();
plt.hist(Traveltime, bins=40, density=True);
X, Y = MixtureModel.MixturePDF(Traveltime, Optimizer._Distribution, Optimizer._NumComponent, Mu, Sigma, Weight);
plt.plot(X, Y, 'r');
plt.show(); | [
"[email protected]"
] | |
9065fe02899f0cd1910bf736bcbcb42209adb190 | ca2572200776aaa1587948e1cba66801293a57c3 | /app.py | 66027de554d23b86240957216d5fe17ba9f0f7f4 | [] | no_license | rajshekharsingh09/Spam-Ham-Classifier | ca50b13e7975dc1e6682127730c25991408435f2 | 3ed6da1a31824e2954efdf4b5973f2353901943b | refs/heads/main | 2023-07-14T13:24:32.661555 | 2021-08-25T08:31:06 | 2021-08-25T08:31:06 | 399,743,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | from flask import Flask,render_template,url_for,request
import pandas as pd
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
#from sklearn.externals import joblib
#import sklearn.externals.joblib as extjoblib
import joblib
import pickle
# load the model from disk
spam_detect_model = open('pickle1.pkl','rb')
clf = joblib.load(spam_detect_model)
cv_model = open('transform.pkl', 'rb')
cv = joblib.load(cv_model)
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict',methods=['POST'])
def predict():
if request.method == 'POST':
message = request.form['message']
data = [message]
vect = cv.transform(data).toarray()
my_prediction = clf.predict(vect)
return render_template('result.html',prediction = my_prediction)
if __name__ == '__main__':
app.run(debug=True) | [
"rajshekharsingh09@gmail"
] | rajshekharsingh09@gmail |
66d97cf320481774462c902df971c3063f758013 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/25aug_corrMC/up/mc/SUSYBBHToTauTau_M-100_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0_1377467523/HTT_24Jul_newTES_manzoni_Up_Jobs/base_cfg.py | 0293621aeda87a45bbf66fd504f3590c52ec1173 | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60,473 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("H2TAUTAU")
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/group/cmgtools/CMG/SUSYBBHToTauTau_M-100_8TeV-pythia6-tauola/Summer12_DR53X-PU_S10_START53_V7A-v1/AODSIM/PAT_CMG_V5_16_0/cmgTuple_9_1_kFk.root')
)
process.cmgDiTauCorSVFitPreSel = cms.EDProducer("TauTauWithSVFitProducer",
diTauSrc = cms.InputTag("recoilCorMETDiTau"),
SVFitVersion = cms.int32(2),
verbose = cms.untracked.bool(False)
)
process.cmgTauEleCorSVFitPreSel = cms.EDProducer("TauEleWithSVFitProducer",
diTauSrc = cms.InputTag("recoilCorMETTauEle"),
SVFitVersion = cms.int32(2),
verbose = cms.untracked.bool(False)
)
process.cmgTauMuCorSVFitPreSel = cms.EDProducer("TauMuWithSVFitProducer",
diTauSrc = cms.InputTag("recoilCorMETTauMu"),
SVFitVersion = cms.int32(2),
verbose = cms.untracked.bool(False)
)
process.diTauSVFit = cms.EDProducer("TauTauWithSVFitProducer",
diTauSrc = cms.InputTag("cmgDiTauCorPreSel"),
SVFitVersion = cms.int32(2),
verbose = cms.untracked.bool(False)
)
process.genWorZ = cms.EDProducer("GenParticlePruner",
src = cms.InputTag("genParticlesPruned"),
select = cms.vstring('keep status()==3 & pdgId = {W+}',
'keep status()==3 & pdgId = {W-}',
'keep status()==3 & pdgId = {Z0}',
'keep status()==3 & pdgId = {gamma}',
'keep status()==3 & pdgId = {h0}',
'keep status()==3 & pdgId = 35',
'keep status()==3 & pdgId = 36')
)
process.mvaMETDiTau = cms.EDProducer("MVAMETProducerDiTau",
pucmetSrc = cms.InputTag("pcMet"),
enable = cms.bool(True),
tkmetSrc = cms.InputTag("tkMet"),
verbose = cms.untracked.bool(False),
nopumetSrc = cms.InputTag("nopuMet"),
rhoSrc = cms.InputTag("kt6PFJets","rho"),
pfmetSrc = cms.InputTag("pfMetForRegression"),
nJetsPtGt1Src = cms.InputTag("nJetsPtGt1"),
pumetSrc = cms.InputTag("puMet"),
weights_gbrmetu1cov = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbru1cov_53_Dec2012.root'),
weights_gbrmetu2cov = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbru2cov_53_Dec2012.root'),
vertexSrc = cms.InputTag("goodPVFilter"),
jetSrc = cms.InputTag("cmgPFJetSel"),
leadJetSrc = cms.InputTag("cmgPFBaseJetLead"),
recBosonSrc = cms.InputTag("cmgDiTauPreSel"),
weights_gbrmetphi = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbrmetphi_53_Dec2012.root'),
weights_gbrmet = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbrmet_53_Dec2012.root'),
puJetIdLabel = cms.string('met53x')
)
process.mvaMETTauEle = cms.EDProducer("MVAMETProducerTauEle",
pucmetSrc = cms.InputTag("pcMet"),
enable = cms.bool(True),
tkmetSrc = cms.InputTag("tkMet"),
verbose = cms.untracked.bool(False),
nopumetSrc = cms.InputTag("nopuMet"),
rhoSrc = cms.InputTag("kt6PFJets","rho"),
pfmetSrc = cms.InputTag("pfMetForRegression"),
nJetsPtGt1Src = cms.InputTag("nJetsPtGt1"),
pumetSrc = cms.InputTag("puMet"),
weights_gbrmetu1cov = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbru1cov_53_Dec2012.root'),
weights_gbrmetu2cov = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbru2cov_53_Dec2012.root'),
vertexSrc = cms.InputTag("goodPVFilter"),
jetSrc = cms.InputTag("cmgPFJetSel"),
leadJetSrc = cms.InputTag("cmgPFBaseJetLead"),
recBosonSrc = cms.InputTag("cmgTauElePreSel"),
weights_gbrmetphi = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbrmetphi_53_Dec2012.root'),
weights_gbrmet = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbrmet_53_Dec2012.root'),
puJetIdLabel = cms.string('met53x')
)
process.mvaMETTauMu = cms.EDProducer("MVAMETProducerTauMu",
pucmetSrc = cms.InputTag("pcMet"),
enable = cms.bool(True),
tkmetSrc = cms.InputTag("tkMet"),
verbose = cms.untracked.bool(False),
nopumetSrc = cms.InputTag("nopuMet"),
rhoSrc = cms.InputTag("kt6PFJets","rho"),
pfmetSrc = cms.InputTag("pfMetForRegression"),
nJetsPtGt1Src = cms.InputTag("nJetsPtGt1"),
pumetSrc = cms.InputTag("puMet"),
weights_gbrmetu1cov = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbru1cov_53_Dec2012.root'),
weights_gbrmetu2cov = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbru2cov_53_Dec2012.root'),
vertexSrc = cms.InputTag("goodPVFilter"),
jetSrc = cms.InputTag("cmgPFJetSel"),
leadJetSrc = cms.InputTag("cmgPFBaseJetLead"),
recBosonSrc = cms.InputTag("cmgTauMuPreSel"),
weights_gbrmetphi = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbrmetphi_53_Dec2012.root'),
weights_gbrmet = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/mvaMET/gbrmet_53_Dec2012.root'),
puJetIdLabel = cms.string('met53x')
)
process.recoilCorMETDiTau = cms.EDProducer("RecoilCorrectedMETProducerDiTau",
leptonLeg = cms.int32(0),
force = cms.bool(False),
verbose = cms.untracked.bool(False),
genBosonSrc = cms.InputTag("genWorZ"),
fileZmmMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_zmm53XRR_2012_njet.root'),
fileZmmData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_datamm53XRR_2012_njet.root'),
fileCorrectTo = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection//recoilfit_htt53X_20pv_njet.root'),
enable = cms.bool(True),
correctionType = cms.int32(1),
jetSrc = cms.InputTag("cmgPFJetForRecoil"),
recBosonSrc = cms.InputTag("cmgDiTauPtSel")
)
process.recoilCorMETTauEle = cms.EDProducer("RecoilCorrectedMETProducerTauEle",
leptonLeg = cms.int32(0),
force = cms.bool(False),
verbose = cms.untracked.bool(False),
genBosonSrc = cms.InputTag("genWorZ"),
fileZmmMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_zmm53XRR_2012_njet.root'),
fileZmmData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_datamm53XRR_2012_njet.root'),
fileCorrectTo = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection//recoilfit_htt53X_20pv_njet.root'),
enable = cms.bool(True),
correctionType = cms.int32(1),
jetSrc = cms.InputTag("cmgPFJetForRecoil"),
recBosonSrc = cms.InputTag("cmgTauEleTauPtSel")
)
process.recoilCorMETTauMu = cms.EDProducer("RecoilCorrectedMETProducerTauMu",
leptonLeg = cms.int32(0),
force = cms.bool(False),
verbose = cms.untracked.bool(False),
genBosonSrc = cms.InputTag("genWorZ"),
fileZmmMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_zmm53XRR_2012_njet.root'),
fileZmmData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_datamm53XRR_2012_njet.root'),
fileCorrectTo = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection//recoilfit_htt53X_20pv_njet.root'),
enable = cms.bool(True),
correctionType = cms.int32(1),
jetSrc = cms.InputTag("cmgPFJetForRecoil"),
recBosonSrc = cms.InputTag("cmgTauMuTauPtSel")
)
process.recoilCorrectedMETDiTau = cms.EDProducer("RecoilCorrectedMETProducerDiTau",
leptonLeg = cms.int32(0),
force = cms.bool(False),
verbose = cms.untracked.bool(False),
genBosonSrc = cms.InputTag("genWorZ"),
fileZmmMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_zmm53XRR_2012_njet.root'),
fileZmmData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_datamm53XRR_2012_njet.root'),
fileCorrectTo = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_ztt53X_20pv_njet.root'),
enable = cms.bool(True),
correctionType = cms.int32(2),
jetSrc = cms.InputTag("cmgPFJetForRecoil"),
recBosonSrc = cms.InputTag("cmgDiTauSel")
)
process.recoilCorrectedMETMuEle = cms.EDProducer("RecoilCorrectedMETProducerMuEle",
leptonLeg = cms.int32(2),
force = cms.bool(False),
verbose = cms.untracked.bool(False),
genBosonSrc = cms.InputTag("genWorZ"),
fileZmmMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_zmm53X_20pv_njet.root'),
fileZmmData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_datamm53X_20pv_njet.root'),
metSrc = cms.InputTag("cmgPFMET"),
enable = cms.bool(True),
correctionType = cms.int32(2),
jetSrc = cms.InputTag("cmgPFJetForRecoil"),
recBosonSrc = cms.InputTag("cmgMuEleSel"),
fileCorrectTo = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_wjets53X_20pv_njet.root')
)
process.recoilCorrectedMETTauEle = cms.EDProducer("RecoilCorrectedMETProducerTauEle",
leptonLeg = cms.int32(2),
force = cms.bool(False),
verbose = cms.untracked.bool(False),
genBosonSrc = cms.InputTag("genWorZ"),
fileZmmMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_zmm53X_20pv_njet.root'),
fileZmmData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_datamm53X_20pv_njet.root'),
fileCorrectTo = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_wjets53X_20pv_njet.root'),
enable = cms.bool(True),
correctionType = cms.int32(2),
jetSrc = cms.InputTag("cmgPFJetForRecoil"),
recBosonSrc = cms.InputTag("cmgTauEleSel")
)
process.recoilCorrectedMETTauMu = cms.EDProducer("RecoilCorrectedMETProducerTauMu",
leptonLeg = cms.int32(2),
force = cms.bool(False),
verbose = cms.untracked.bool(False),
genBosonSrc = cms.InputTag("genWorZ"),
fileZmmMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_zmm53X_20pv_njet.root'),
fileZmmData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_datamm53X_20pv_njet.root'),
fileCorrectTo = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/Utilities/data/metRecoilCorrection/recoilfit_wjets53X_20pv_njet.root'),
enable = cms.bool(True),
correctionType = cms.int32(2),
jetSrc = cms.InputTag("cmgPFJetForRecoil"),
recBosonSrc = cms.InputTag("cmgTauMuSel")
)
process.tauEleSVFit = cms.EDProducer("TauEleWithSVFitProducer",
diTauSrc = cms.InputTag("cmgTauEleCorPreSel"),
SVFitVersion = cms.int32(2),
verbose = cms.untracked.bool(False)
)
process.tauMuSVFit = cms.EDProducer("TauMuWithSVFitProducer",
diTauSrc = cms.InputTag("cmgTauMuCorPreSel"),
SVFitVersion = cms.int32(2),
verbose = cms.untracked.bool(False)
)
process.vertexWeight05AugReReco = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_170249-172619_7TeV_ReReco5Aug_Collisions11_JSON_v2.pileup_v2.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeight2011AB = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_160404-180252_4.6invfb.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeight2011B = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_2011B.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeight2invfb = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_160404-173692_2.1invfb.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeight3D05AugReReco = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_170249-172619_7TeV_ReReco5Aug_Collisions11_JSON_v2.pileupTruth_v2_finebin.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Summer11MC.root')
)
process.vertexWeight3D2011AB = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_160404-180252_4.6invfb.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Summer11MC.root')
)
process.vertexWeight3D2011B = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_2011B.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Summer11MC.root')
)
process.vertexWeight3D2invfb = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_160404-173692_2.1invfb.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Summer11MC.root')
)
process.vertexWeight3DFall1105AugReReco = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_170249-172619_7TeV_ReReco5Aug_Collisions11_JSON_v2.pileupTruth_v2_finebin.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Fall11MC.root')
)
process.vertexWeight3DFall112011AB = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_160404-180252_4.6invfb.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Fall11MC.root')
)
process.vertexWeight3DFall112011B = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_2011B.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Fall11MC.root')
)
process.vertexWeight3DFall112invfb = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_160404-173692_2.1invfb.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Fall11MC.root')
)
process.vertexWeight3DFall11May10ReReco = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_160404-163869_7TeV_May10ReReco_Collisions11_JSON_v3.pileupTruth_v2_finebin.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Fall11MC.root')
)
process.vertexWeight3DFall11PromptRecov4 = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_165088-167913_7TeV_PromptReco_JSON.pileupTruth_v2_finebin.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Fall11MC.root')
)
process.vertexWeight3DFall11PromptRecov6 = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_172620-173692_PromptReco_JSON.pileupTruth_v2_finebin.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Fall11MC.root')
)
process.vertexWeight3DMay10ReReco = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_160404-163869_7TeV_May10ReReco_Collisions11_JSON_v3.pileupTruth_v2_finebin.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Summer11MC.root')
)
process.vertexWeight3DPromptRecov4 = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_165088-167913_7TeV_PromptReco_JSON.pileupTruth_v2_finebin.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Summer11MC.root')
)
process.vertexWeight3DPromptRecov6 = cms.EDProducer("PileUpWeight3DProducer",
verbose = cms.untracked.bool(False),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_172620-173692_PromptReco_JSON.pileupTruth_v2_finebin.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup3D_Summer11MC.root')
)
process.vertexWeightEPSJul8 = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Pileup_2011_EPS_8_jul.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeightFall1105AugReReco = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_170249-172619_7TeV_ReReco5Aug_Collisions11_JSON_v2.pileup_v2.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightFall112011AB = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_160404-180252_4.6invfb.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightFall112011B = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_2011B.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightFall112invfb = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_160404-173692_2.1invfb.pileup.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightFall11EPSJul8 = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Pileup_2011_EPS_8_jul.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightFall11LeptonPhoton = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Pileup_2011_to_172802_LP_LumiScale.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightFall11May10ReReco = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_160404-163869_7TeV_May10ReReco_Collisions11_JSON_v3.pileup_v2.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightFall11PromptRecov4 = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_165088-167913_7TeV_PromptReco_JSON.pileup_v2.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightFall11PromptRecov6 = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_172620-173692_PromptReco_JSON.pileup_v2.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Fall11MC.root')
)
process.vertexWeightLeptonPhoton = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Pileup_2011_to_172802_LP_LumiScale.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeightMay10ReReco = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_160404-163869_7TeV_May10ReReco_Collisions11_JSON_v3.pileup_v2.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeightPromptRecov4 = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_165088-167913_7TeV_PromptReco_JSON.pileup_v2.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeightPromptRecov6 = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(1),
inputHistData = cms.string('/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions11/7TeV/PileUp/Cert_172620-173692_PromptReco_JSON.pileup_v2.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer11MC.root')
)
process.vertexWeightSummer12MC53X2012ABCDData = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(2),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_2012ABCD.true.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer12MC53X.true.root')
)
process.vertexWeightSummer12MC53X2012BCDData = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(2),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_2012BCD.true.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer12MC53X.true.root')
)
process.vertexWeightSummer12MC53X2012D6fbData = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(2),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_2012D6fb_203894_207898.true.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer12MC53X.true.root')
)
process.vertexWeightSummer12MC53XHCPData = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(2),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_2012HCP_190456_203002.true.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer12MC53X.true.root')
)
process.vertexWeightSummer12MC53XICHEPData = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(2),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_2012ICHEP_start_196509.true.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer12MC53X.true.root')
)
process.vertexWeightSummer12MCICHEPData = cms.EDProducer("PileUpWeightProducer",
src = cms.InputTag("addPileupInfo"),
verbose = cms.untracked.bool(False),
type = cms.int32(2),
inputHistData = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_2012ICHEP_start_196509.true.root'),
inputHistMC = cms.string('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/RootTools/data/vertexWeight/Pileup_Summer12MC52X.true.root')
)
process.cmgBaseMETFromPFMET = cms.EDFilter("PFMETPOProducer",
cfg = cms.PSet(
ptThreshold = cms.double(-1.0),
inputCollection = cms.InputTag("pfMet")
),
cuts = cms.PSet(
)
)
process.cmgDiTau = cms.EDFilter("DiTauPOProducer",
cfg = cms.PSet(
leg2Collection = cms.InputTag("cmgTauSel"),
leg1Collection = cms.InputTag("cmgTauSel"),
metsigCollection = cms.InputTag(""),
metCollection = cms.InputTag("cmgPFMET")
),
cuts = cms.PSet(
baseline = cms.PSet(
tau1Leg = cms.PSet(
iso = cms.string('leg1().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.0'),
kinematics = cms.PSet(
eta = cms.string('abs(leg1().eta())<2.1'),
pt = cms.string('leg1().pt()>35.')
),
id = cms.PSet(
decay = cms.string('leg1().tauID("decayModeFinding")')
)
),
mass = cms.string('mass()>10'),
tau2Leg = cms.PSet(
iso = cms.string('leg2().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.0'),
kinematics = cms.PSet(
eta = cms.string('abs(leg2().eta())<2.1'),
pt = cms.string('leg2().pt()>35.')
),
id = cms.PSet(
decay = cms.string('leg2().tauID("decayModeFinding")')
)
)
)
)
)
process.cmgDiTauCor = cms.EDFilter("DiTauUpdatePOProducer",
cfg = cms.PSet(
shift1Prong1Pi0 = cms.double(0.012),
diObjectCollection = cms.InputTag("mvaMETDiTau"),
leg1Collection = cms.InputTag(""),
shiftMet = cms.bool(True),
shiftTaus = cms.bool(True),
uncertainty = cms.double(0.03),
nSigma = cms.double(1),
shift3Prong = cms.double(0.012),
shift1ProngNoPi0 = cms.double(0.0),
leg2Collection = cms.InputTag(""),
ptDependence1Pi0 = cms.double(0.0),
ptDependence3Prong = cms.double(0.0)
),
cuts = cms.PSet(
)
)
process.cmgDiTauCorSVFitFullSel = cms.EDFilter("CmgDiTauSelector",
src = cms.InputTag("cmgDiTauCorSVFitPreSel"),
cut = cms.string('')
)
process.cmgDiTauCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgDiTauSel"),
minNumber = cms.uint32(1)
)
process.cmgDiTauPreSel = cms.EDFilter("CmgDiTauSelector",
src = cms.InputTag("cmgDiTau"),
cut = cms.string('leg1().pt()>38. && leg2().pt()>38. && leg1().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10. && leg2().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.')
)
process.cmgDiTauPtSel = cms.EDFilter("CmgDiTauSelector",
src = cms.InputTag("cmgDiTauCor"),
cut = cms.string('leg1().pt()>45. && leg2().pt()>45.')
)
process.cmgDiTauSel = cms.EDFilter("CmgDiTauSelector",
src = cms.InputTag("cmgDiTau"),
cut = cms.string(' pt()>0 ')
)
process.cmgMuEle = cms.EDFilter("MuElePOProducer",
cfg = cms.PSet(
leg2Collection = cms.InputTag("cmgElectronSel"),
leg1Collection = cms.InputTag("cmgMuonSel"),
metCollection = cms.InputTag("")
),
cuts = cms.PSet(
)
)
process.cmgMuEleCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgMuEleSel"),
minNumber = cms.uint32(1)
)
process.cmgMuEleSel = cms.EDFilter("CmgMuEleSelector",
src = cms.InputTag("cmgMuEle"),
cut = cms.string('pt()>0')
)
process.cmgPFJetForRecoil = cms.EDFilter("CMGJetPUIDSelector",
src = cms.InputTag("cmgPFJetForRecoilPresel"),
cut = cms.string(''),
puJetIDParams = cms.VPSet(cms.PSet(
minDiscs = cms.vdouble(-0.95, -0.96, -0.94, -0.95),
maxPt = cms.double(20.0),
minPt = cms.double(0.0),
maxEtas = cms.vdouble(2.5, 2.75, 3.0, 5.0)
),
cms.PSet(
minDiscs = cms.vdouble(-0.63, -0.6, -0.55, -0.45),
maxPt = cms.double(99999.0),
minPt = cms.double(20.0),
maxEtas = cms.vdouble(2.5, 2.75, 3.0, 5.0)
)),
puIDName = cms.string('full53x')
)
process.cmgPFJetForRecoilPresel = cms.EDFilter("CmgPFJetSelector",
src = cms.InputTag("cmgPFJetSel"),
cut = cms.string('pt()>30 && abs(eta)<4.7 && getSelection("cuts_looseJetId")')
)
process.cmgPFJetPUIDSel = cms.EDFilter("CMGJetPUIDSelector",
src = cms.InputTag("cmgPFJetSel"),
cut = cms.string(''),
puJetIDParams = cms.VPSet(cms.PSet(
minDiscs = cms.vdouble(-0.95, -0.96, -0.94, -0.95),
maxPt = cms.double(20.0),
minPt = cms.double(0.0),
maxEtas = cms.vdouble(2.5, 2.75, 3.0, 5.0)
),
cms.PSet(
minDiscs = cms.vdouble(-0.63, -0.6, -0.55, -0.45),
maxPt = cms.double(99999.0),
minPt = cms.double(20.0),
maxEtas = cms.vdouble(2.5, 2.75, 3.0, 5.0)
)),
puIDName = cms.string('full53x')
)
process.cmgPFJetSel = cms.EDFilter("CmgPFJetSelector",
src = cms.InputTag("cmgPFJet"),
cut = cms.string('pt()>0')
)
process.cmgTauEle = cms.EDFilter("TauElePOProducer",
cfg = cms.PSet(
leg2Collection = cms.InputTag("cmgElectronSel"),
leg1Collection = cms.InputTag("cmgTauSel"),
metsigCollection = cms.InputTag(""),
metCollection = cms.InputTag("cmgPFMET")
),
cuts = cms.PSet(
baseline = cms.PSet(
tauLeg = cms.PSet(
iso = cms.string('leg1().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.0'),
kinematics = cms.PSet(
eta = cms.string('abs(leg1().eta())<2.3'),
pt = cms.string('leg1().pt()>15.0')
),
id = cms.PSet(
decay = cms.string('leg1().tauID("decayModeFinding")')
)
),
eleLeg = cms.PSet(
kinematics = cms.PSet(
eta = cms.string('abs(leg2().eta())<2.1'),
pt = cms.string('leg2().pt()>20.0')
),
ID = cms.PSet(
hitsnum = cms.string('leg2().numberOfHits==0'),
mvaID = cms.string('(abs(leg2().sourcePtr().superCluster().eta())<0.8 && leg2().mvaNonTrigV0() > 0.925) || (abs(leg2().sourcePtr().superCluster().eta())>0.8 && abs(leg2().sourcePtr().superCluster().eta())<1.479 && leg2().mvaNonTrigV0() > 0.975) || (abs(leg2().sourcePtr().superCluster().eta())>1.479 && leg2().mvaNonTrigV0() > 0.985)'),
convVeto = cms.string('leg2().passConversionVeto()!=0')
)
)
)
)
)
process.cmgTauEleCor = cms.EDFilter("TauEleUpdatePOProducer",
cfg = cms.PSet(
shift1Prong1Pi0 = cms.double(0.0),
diObjectCollection = cms.InputTag("mvaMETTauEle"),
leg1Collection = cms.InputTag(""),
metCollection = cms.InputTag("recoilCorrectedMET"),
uncertainty = cms.double(0.03),
nSigma = cms.double(0),
shift3Prong = cms.double(0.0),
shift1ProngNoPi0 = cms.double(0.0),
leg2Collection = cms.InputTag(""),
ptDependence1Pi0 = cms.double(0.0),
ptDependence3Prong = cms.double(0.0)
),
cuts = cms.PSet(
)
)
process.cmgTauEleCorSVFitFullSel = cms.EDFilter("CmgTauEleSelector",
src = cms.InputTag("cmgTauEleCorSVFitPreSel"),
cut = cms.string('')
)
process.cmgTauEleCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgTauEleSel"),
minNumber = cms.uint32(1)
)
process.cmgTauEleMVAPreSel = cms.EDFilter("TauEleUpdatePOProducer",
cfg = cms.PSet(
shift1Prong1Pi0 = cms.double(0.0),
diObjectCollection = cms.InputTag("cmgTauElePreSel"),
leg1Collection = cms.InputTag(""),
metCollection = cms.InputTag("recoilCorrectedMET"),
uncertainty = cms.double(0.03),
nSigma = cms.double(0),
shift3Prong = cms.double(0.0),
shift1ProngNoPi0 = cms.double(0.0),
leg2Collection = cms.InputTag(""),
ptDependence1Pi0 = cms.double(0.0),
ptDependence3Prong = cms.double(0.0)
),
cuts = cms.PSet(
)
)
process.cmgTauElePreSel = cms.EDFilter("CmgTauEleSelector",
src = cms.InputTag("cmgTauEle"),
cut = cms.string('getSelection("cuts_baseline")')
)
process.cmgTauEleSel = cms.EDFilter("CmgTauEleSelector",
src = cms.InputTag("cmgTauEle"),
cut = cms.string('pt()>0')
)
process.cmgTauEleTauPtSel = cms.EDFilter("CmgTauEleSelector",
src = cms.InputTag("cmgTauEleCor"),
cut = cms.string('leg1().pt()>18.')
)
process.cmgTauMu = cms.EDFilter("TauMuPOProducer",
cfg = cms.PSet(
leg2Collection = cms.InputTag("cmgMuonSel"),
leg1Collection = cms.InputTag("cmgTauSel"),
metsigCollection = cms.InputTag(""),
metCollection = cms.InputTag("cmgPFMET")
),
cuts = cms.PSet(
caloMuVeto = cms.string('leg1().eOverP()>0.2'),
baseline = cms.PSet(
tauLeg = cms.PSet(
iso = cms.string('leg1().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.0'),
kinematics = cms.PSet(
eta = cms.string('abs(leg1().eta())<2.3'),
pt = cms.string('leg1().pt()>15.0')
),
id = cms.PSet(
muRejection = cms.string('leg1().tauID("againstMuonTight") > 0.5'),
decay = cms.string('leg1().tauID("decayModeFinding")')
)
),
muLeg = cms.PSet(
kinematics = cms.PSet(
eta = cms.string('abs(leg2().eta())<2.1'),
pt = cms.string('leg2().pt()>17.0')
)
),
mass = cms.string('mass()>10')
)
)
)
process.cmgTauMuCor = cms.EDFilter("TauMuUpdatePOProducer",
cfg = cms.PSet(
shift1Prong1Pi0 = cms.double(0.0),
diObjectCollection = cms.InputTag("mvaMETTauMu"),
leg1Collection = cms.InputTag(""),
metCollection = cms.InputTag("recoilCorrectedMET"),
uncertainty = cms.double(0.03),
nSigma = cms.double(0),
shift3Prong = cms.double(0.0),
shift1ProngNoPi0 = cms.double(0.0),
leg2Collection = cms.InputTag(""),
ptDependence1Pi0 = cms.double(0.0),
ptDependence3Prong = cms.double(0.0)
),
cuts = cms.PSet(
)
)
process.cmgTauMuCorSVFitFullSel = cms.EDFilter("CmgTauMuSelector",
src = cms.InputTag("cmgTauMuCorSVFitPreSel"),
cut = cms.string('')
)
process.cmgTauMuCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgTauMuSel"),
minNumber = cms.uint32(1)
)
process.cmgTauMuMVAPreSel = cms.EDFilter("TauMuUpdatePOProducer",
cfg = cms.PSet(
shift1Prong1Pi0 = cms.double(0.0),
diObjectCollection = cms.InputTag("cmgTauMuPreSel"),
leg1Collection = cms.InputTag(""),
metCollection = cms.InputTag("recoilCorrectedMET"),
uncertainty = cms.double(0.03),
nSigma = cms.double(0),
shift3Prong = cms.double(0.0),
shift1ProngNoPi0 = cms.double(0.0),
leg2Collection = cms.InputTag(""),
ptDependence1Pi0 = cms.double(0.0),
ptDependence3Prong = cms.double(0.0)
),
cuts = cms.PSet(
)
)
process.cmgTauMuPreSel = cms.EDFilter("CmgTauMuSelector",
src = cms.InputTag("cmgTauMu"),
cut = cms.string('getSelection("cuts_baseline")')
)
process.cmgTauMuSel = cms.EDFilter("CmgTauMuSelector",
src = cms.InputTag("cmgTauMu"),
cut = cms.string('pt()>0')
)
process.cmgTauMuTauPtSel = cms.EDFilter("CmgTauMuSelector",
src = cms.InputTag("cmgTauMuCor"),
cut = cms.string('leg1().pt()>18.')
)
process.diTauFullSelCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgDiTauCorSVFitFullSel"),
minNumber = cms.uint32(1)
)
process.diTauPreSelCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgDiTauCorSVFitPreSel"),
minNumber = cms.uint32(1)
)
process.goodPVFilter = cms.EDFilter("VertexSelector",
filter = cms.bool(True),
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string('!isFake && ndof > 4 && abs(z) <= 24 && position.Rho <= 2')
)
process.muEleFullSelCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgMuEleCorSVFitFullSel"),
minNumber = cms.uint32(1)
)
process.muElePreSelCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgMuEleCorSVFitPreSel"),
minNumber = cms.uint32(1)
)
process.tauEleFullSelCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgTauEleCorSVFitFullSel"),
minNumber = cms.uint32(1)
)
process.tauElePreSelCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgTauEleCorSVFitPreSel"),
minNumber = cms.uint32(1)
)
process.tauMuFullSelCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgTauMuCorSVFitFullSel"),
minNumber = cms.uint32(1)
)
process.tauMuPreSelCount = cms.EDFilter("CandViewCountFilter",
src = cms.InputTag("cmgTauMuCorSVFitPreSel"),
minNumber = cms.uint32(1)
)
process.diTau_fullsel_tree_CMG = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('drop *',
'drop *',
'keep *_source_*_*',
'keep *_generator_*_*',
'keep *_TriggerResults__*',
'keep *_addPileupInfo__HLT',
'keep *_genJetSel__PAT',
'keep *_tauGenJetsSelectorAllHadrons__PAT',
'keep *_genParticlesPruned__PAT',
'keep *_vertexWeight*__*',
'keep *_ak5CaloJets_rho_RECO',
'keep *_ak5PFJets_rho_RECO',
'keep *_ak5TrackJets_rho_RECO',
'keep *_ak7BasicJets_rho_RECO',
'keep *_ak7CaloJets_rho_RECO',
'keep *_ak7PFJets_rho_RECO',
'keep *_kt4CaloJets_rho_RECO',
'keep *_kt4PFJets_rho_RECO',
'keep *_kt6CaloJets_rho_RECO',
'keep *_kt6CaloJetsCentral_rho_RECO',
'keep *_kt6PFJets_rho_RECO',
'keep *_kt6PFJetsCentralChargedPileUp_rho_RECO',
'keep *_kt6PFJetsCentralNeutral_rho_RECO',
'keep *_kt6PFJetsCentralNeutralTight_rho_RECO',
'keep *_TriggerResults__RECO',
'keep *_offlinePrimaryVertices__RECO',
'keep *_pfMetSignificance__PAT',
'keep *_ak5PFJetsCHS_rho_PAT',
'keep *_ak5PFJetsCHSpruned_rho_PAT',
'keep *_kt6PFJetsCHSForIso_rho_PAT',
'keep *_kt6PFJetsForIso_rho_PAT',
'keep *_kt6PFJetsForRhoComputationVoronoi_rho_PAT',
'keep *_TriggerResults__PAT',
'keep *_nJetsPtGt1__PAT',
'keep *_cmgPFBaseJetLead__PAT',
'keep *_cmgPFBaseJetLeadCHS__PAT',
'keep *_cmgPFMET__PAT',
'keep *_cmgPFMETRaw__PAT',
'keep *_cmgDiElectronSel__PAT',
'keep *_cmgDiMuonSel__PAT',
'keep *_cmgElectronSel__PAT',
'keep *_cmgMuonSel__PAT',
'keep *_cmgPFJetLooseJetIdFailed__PAT',
'keep *_cmgPFJetMediumJetIdFailed__PAT',
'keep *_cmgPFJetSel__PAT',
'keep *_cmgPFJetSelCHS__PAT',
'keep *_cmgPFJetTightJetIdFailed__PAT',
'keep *_cmgPFJetVeryLooseJetId95Failed__PAT',
'keep *_cmgPFJetVeryLooseJetId95gammaFailed__PAT',
'keep *_cmgPFJetVeryLooseJetId95h0Failed__PAT',
'keep *_cmgPFJetVeryLooseJetId99Failed__PAT',
'keep *_cmgPhotonSel__PAT',
'keep *_cmgStructuredPFJetSel__PAT',
'keep *_cmgTriggerObjectListSel__PAT',
'keep *_cmgTriggerObjectSel__PAT',
'keep *_patElectronsWithTrigger__PAT',
'keep *_patMuonsWithTrigger__PAT',
'keep *_nopuMet__PAT',
'keep *_pcMet__PAT',
'keep *_pfMetForRegression__PAT',
'keep *_puMet__PAT',
'keep *_tkMet__PAT',
'keep *_TriggerResults__H2TAUTAU',
'keep *_cmgDiTauCorSVFitFullSel__H2TAUTAU',
'keep *_mvaMETdiTau__H2TAUTAU',
'keep *_goodPVFilter__H2TAUTAU',
'keep *_genParticles_*_*'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('diTauPath')
),
fileName = cms.untracked.string('diTau_fullsel_tree_CMG.root')
)
process.diTauPreSelSkimSequence = cms.Sequence(process.diTauPreSelCount)
process.muEleFullSelSkimSequence = cms.Sequence(process.muEleFullSelCount)
process.tauEleMvaMETRecoilSequence = cms.Sequence(process.goodPVFilter+process.mvaMETTauEle+process.cmgTauEleCor+process.cmgTauEleTauPtSel+process.recoilCorMETTauEle)
process.tauEleFullSelSkimSequence = cms.Sequence(process.tauEleFullSelCount)
process.mvaMETSequence = cms.Sequence(process.goodPVFilter+process.mvaMETDiTau+process.cmgDiTauCor+process.cmgDiTauPtSel+process.recoilCorMETDiTau)
process.tauMuStdSequence = cms.Sequence(process.cmgTauMu+process.cmgTauMuPreSel)
process.tauEleStdSequence = cms.Sequence(process.cmgTauEle+process.cmgTauElePreSel)
process.tauMuMvaMETrecoilSequence = cms.Sequence(process.goodPVFilter+process.mvaMETTauMu+process.cmgTauMuCor+process.cmgTauMuTauPtSel+process.recoilCorMETTauMu)
process.diTauFullSelSkimSequence = cms.Sequence(process.diTauFullSelCount)
process.metRecoilCorrectionInputSequence = cms.Sequence(process.cmgPFJetForRecoilPresel+process.cmgPFJetForRecoil+process.genWorZ)
process.metRecoilCorrectionSequence = cms.Sequence(process.metRecoilCorrectionInputSequence+process.recoilCorrectedMETTauMu+process.recoilCorrectedMETTauEle+process.recoilCorrectedMETMuEle)
process.diTauCorSVFitSequence = cms.Sequence(process.mvaMETSequence+process.cmgDiTauCorSVFitPreSel+process.cmgDiTauCorSVFitFullSel)
process.tauElePreSelSkimSequence = cms.Sequence(process.tauElePreSelCount)
process.muElePreSelSkimSequence = cms.Sequence(process.muElePreSelCount)
process.tauEleCorSVFitSequence = cms.Sequence(process.tauEleMvaMETRecoilSequence+process.cmgTauEleCorSVFitPreSel+process.cmgTauEleCorSVFitFullSel)
process.vertexWeightSequence = cms.Sequence(process.vertexWeightEPSJul8+process.vertexWeightLeptonPhoton+process.vertexWeightMay10ReReco+process.vertexWeightPromptRecov4+process.vertexWeight05AugReReco+process.vertexWeightPromptRecov6+process.vertexWeight2invfb+process.vertexWeight2011B+process.vertexWeight2011AB+process.vertexWeightFall11EPSJul8+process.vertexWeightFall11LeptonPhoton+process.vertexWeightFall11May10ReReco+process.vertexWeightFall11PromptRecov4+process.vertexWeightFall1105AugReReco+process.vertexWeightFall11PromptRecov6+process.vertexWeightFall112invfb+process.vertexWeightFall112011B+process.vertexWeightFall112011AB+process.vertexWeight3DMay10ReReco+process.vertexWeight3DPromptRecov4+process.vertexWeight3D05AugReReco+process.vertexWeight3DPromptRecov6+process.vertexWeight3D2invfb+process.vertexWeight3D2011B+process.vertexWeight3D2011AB+process.vertexWeight3DFall11May10ReReco+process.vertexWeight3DFall11PromptRecov4+process.vertexWeight3DFall1105AugReReco+process.vertexWeight3DFall11PromptRecov6+process.vertexWeight3DFall112invfb+process.vertexWeight3DFall112011B+process.vertexWeight3DFall112011AB+process.vertexWeightSummer12MCICHEPData+process.vertexWeightSummer12MC53XICHEPData+process.vertexWeightSummer12MC53XHCPData+process.vertexWeightSummer12MC53X2012D6fbData+process.vertexWeightSummer12MC53X2012ABCDData+process.vertexWeightSummer12MC53X2012BCDData)
process.diTauStdSequence = cms.Sequence(process.cmgDiTau+process.cmgDiTauPreSel)
process.tauMuPreSelSkimSequence = cms.Sequence(process.tauMuPreSelCount)
process.tauMuFullSelSkimSequence = cms.Sequence(process.tauMuFullSelCount)
process.genSequence = cms.Sequence(process.metRecoilCorrectionInputSequence+process.vertexWeightSequence)
process.tauEleSequence = cms.Sequence(process.tauEleStdSequence+process.tauEleCorSVFitSequence)
process.tauMuCorSVFitSequence = cms.Sequence(process.tauMuMvaMETrecoilSequence+process.cmgTauMuCorSVFitPreSel+process.cmgTauMuCorSVFitFullSel)
process.tauMuSequence = cms.Sequence(process.tauMuStdSequence+process.tauMuCorSVFitSequence)
process.diTauSequence = cms.Sequence(process.diTauStdSequence+process.diTauCorSVFitSequence)
process.diTauPath = cms.Path(process.genSequence+process.diTauSequence+process.diTauFullSelSkimSequence)
process.tauElePath = cms.Path(process.genSequence+process.tauEleSequence+process.tauEleFullSelSkimSequence)
process.tauMuPath = cms.Path(process.genSequence+process.tauMuSequence+process.tauMuFullSelSkimSequence)
process.outpath = cms.EndPath(process.diTau_fullsel_tree_CMG)
process.MessageLogger = cms.Service("MessageLogger",
suppressInfo = cms.untracked.vstring(),
debugs = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
suppressDebug = cms.untracked.vstring(),
cout = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
cerr_stats = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
output = cms.untracked.string('cerr'),
threshold = cms.untracked.string('WARNING')
),
warnings = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
default = cms.untracked.PSet(
),
errors = cms.untracked.PSet(
placeholder = cms.untracked.bool(True)
),
cerr = cms.untracked.PSet(
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
noTimeStamps = cms.untracked.bool(False),
FwkReport = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
reportEvery = cms.untracked.int32(5000),
limit = cms.untracked.int32(10000000)
),
default = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
),
Root_NoDictionary = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
limit = cms.untracked.int32(0)
),
optionalPSet = cms.untracked.bool(True),
FwkJob = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
limit = cms.untracked.int32(0)
),
FwkSummary = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
reportEvery = cms.untracked.int32(1),
limit = cms.untracked.int32(10000000)
),
threshold = cms.untracked.string('INFO')
),
FrameworkJobReport = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
FwkJob = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
limit = cms.untracked.int32(10000000)
)
),
suppressWarning = cms.untracked.vstring(),
statistics = cms.untracked.vstring('cerr_stats'),
infos = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
Root_NoDictionary = cms.untracked.PSet(
optionalPSet = cms.untracked.bool(True),
limit = cms.untracked.int32(0)
),
placeholder = cms.untracked.bool(True)
),
destinations = cms.untracked.vstring('warnings',
'errors',
'infos',
'debugs',
'cout',
'cerr'),
debugModules = cms.untracked.vstring(),
categories = cms.untracked.vstring('FwkJob',
'FwkReport',
'FwkSummary',
'Root_NoDictionary'),
fwkJobReports = cms.untracked.vstring('FrameworkJobReport')
)
process.HepPDTESSource = cms.ESSource("HepPDTESSource",
pdtFileName = cms.FileInPath('SimGeneral/HepPDTESSource/data/pythiaparticle.tbl')
)
process.diObjectFactory = cms.PSet(
leg2Collection = cms.InputTag("dummy"),
leg1Collection = cms.InputTag("dummy"),
metCollection = cms.InputTag("")
)
process.diTauCuts = cms.PSet(
baseline = cms.PSet(
tau1Leg = cms.PSet(
iso = cms.string('leg1().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.0'),
kinematics = cms.PSet(
eta = cms.string('abs(leg1().eta())<2.3'),
pt = cms.string('leg1().pt()>15.0')
),
id = cms.PSet(
decay = cms.string('leg1().tauID("decayModeFinding")')
)
),
mass = cms.string('mass()>10'),
tau2Leg = cms.PSet(
iso = cms.string('leg2().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.0'),
kinematics = cms.PSet(
eta = cms.string('abs(leg2().eta())<2.3'),
pt = cms.string('leg2().pt()>15.0')
),
id = cms.PSet(
decay = cms.string('leg2().tauID("decayModeFinding")')
)
)
)
)
process.ditauFactory = cms.PSet(
leg2Collection = cms.InputTag("cmgTauSel"),
leg1Collection = cms.InputTag("cmgTauSel"),
metsigCollection = cms.InputTag(""),
metCollection = cms.InputTag("cmgPFMET")
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.maxLuminosityBlocks = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.muEleFactory = cms.PSet(
leg2Collection = cms.InputTag("cmgElectronSel"),
leg1Collection = cms.InputTag("cmgMuonSel"),
metCollection = cms.InputTag("")
)
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(False)
)
process.puJetIdAlgo = cms.PSet(
tmvaVariables = cms.vstring('nvtx',
'jetPt',
'jetEta',
'jetPhi',
'dZ',
'beta',
'betaStar',
'nCharged',
'nNeutrals',
'dR2Mean',
'ptD',
'frac01',
'frac02',
'frac03',
'frac04',
'frac05'),
tmvaMethod = cms.string('JetIDMVAMET'),
cutBased = cms.bool(False),
tmvaWeights = cms.string('CMGTools/External/data/TMVAClassificationCategory_JetID_MET_53X_Dec2012.weights.xml'),
tmvaSpectators = cms.vstring(),
label = cms.string('met53x'),
version = cms.int32(-1),
JetIdParams = cms.PSet(
Pt2030_Tight = cms.vdouble(-2, -2, -2, -2, -2),
Pt2030_Loose = cms.vdouble(-2, -2, -2, -2, -2),
Pt3050_Medium = cms.vdouble(-2, -2, -2, -2, -2),
Pt1020_MET = cms.vdouble(-0.2, -0.2, -0.5, -0.3),
Pt2030_Medium = cms.vdouble(-2, -2, -2, -2, -2),
Pt010_Tight = cms.vdouble(-2, -2, -2, -2, -2),
Pt1020_Tight = cms.vdouble(-2, -2, -2, -2, -2),
Pt3050_MET = cms.vdouble(-0.2, -0.2, 0.0, 0.2),
Pt010_MET = cms.vdouble(-0.2, -0.3, -0.5, -0.5),
Pt1020_Loose = cms.vdouble(-2, -2, -2, -2, -2),
Pt010_Medium = cms.vdouble(-2, -2, -2, -2, -2),
Pt1020_Medium = cms.vdouble(-2, -2, -2, -2, -2),
Pt2030_MET = cms.vdouble(-0.2, -0.2, -0.2, 0.1),
Pt010_Loose = cms.vdouble(-2, -2, -2, -2, -2),
Pt3050_Loose = cms.vdouble(-2, -2, -2, -2, -2),
Pt3050_Tight = cms.vdouble(-2, -2, -2, -2, -2)
),
impactParTkThreshold = cms.double(1.0)
)
process.tauEFactory = cms.PSet(
leg2Collection = cms.InputTag("cmgElectronSel"),
leg1Collection = cms.InputTag("cmgTauSel"),
metCollection = cms.InputTag("cmgPFMET")
)
process.tauEleCuts = cms.PSet(
baseline = cms.PSet(
tauLeg = cms.PSet(
iso = cms.string('leg1().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.0'),
kinematics = cms.PSet(
eta = cms.string('abs(leg1().eta())<2.3'),
pt = cms.string('leg1().pt()>15.0')
),
id = cms.PSet(
decay = cms.string('leg1().tauID("decayModeFinding")')
)
),
eleLeg = cms.PSet(
kinematics = cms.PSet(
eta = cms.string('abs(leg2().eta())<2.1'),
pt = cms.string('leg2().pt()>20.0')
),
ID = cms.PSet(
hitsnum = cms.string('leg2().numberOfHits==0'),
mvaID = cms.string('(abs(leg2().sourcePtr().superCluster().eta())<0.8 && leg2().mvaNonTrigV0() > 0.925) || (abs(leg2().sourcePtr().superCluster().eta())>0.8 && abs(leg2().sourcePtr().superCluster().eta())<1.479 && leg2().mvaNonTrigV0() > 0.975) || (abs(leg2().sourcePtr().superCluster().eta())>1.479 && leg2().mvaNonTrigV0() > 0.985)'),
convVeto = cms.string('leg2().passConversionVeto()!=0')
)
)
)
)
process.tauMuCuts = cms.PSet(
caloMuVeto = cms.string('leg1().eOverP()>0.2'),
baseline = cms.PSet(
tauLeg = cms.PSet(
iso = cms.string('leg1().tauID("byCombinedIsolationDeltaBetaCorrRaw3Hits") < 10.0'),
kinematics = cms.PSet(
eta = cms.string('abs(leg1().eta())<2.3'),
pt = cms.string('leg1().pt()>15.0')
),
id = cms.PSet(
muRejection = cms.string('leg1().tauID("againstMuonTight") > 0.5'),
decay = cms.string('leg1().tauID("decayModeFinding")')
)
),
muLeg = cms.PSet(
kinematics = cms.PSet(
eta = cms.string('abs(leg2().eta())<2.1'),
pt = cms.string('leg2().pt()>17.0')
)
),
mass = cms.string('mass()>10')
)
)
process.tauMuFactory = cms.PSet(
leg2Collection = cms.InputTag("cmgMuonSel"),
leg1Collection = cms.InputTag("cmgTauSel"),
metCollection = cms.InputTag("cmgPFMET")
)
process.schedule = cms.Schedule(*[ process.diTauPath, process.outpath ])
| [
"[email protected]"
] | |
a598fcd7dee0d9d1086638ea0070df9ddf2b0601 | 202c4b5ee4a50180ee708196a54acda77cf6e101 | /beautifulSoup/scraping_bs4.py | c4585928e7e257db3618abfe53248165260e7ca2 | [] | no_license | Gulliverinv/webscraping_asyncio_2016 | 006936b712f516ee11f53788f5b9d799dfa8da8b | 6a22b01d2fd090236fc861ebe8e3c09b6cc2a32a | refs/heads/master | 2023-08-31T23:35:48.882921 | 2016-11-01T23:31:56 | 2016-11-01T23:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from Scraping import Scraping
if __name__ == "__main__":
url = 'http://python.ie/pycon-2016/schedule/'
scraping = Scraping()
#scraping.scrapingImagesPdf(url)
scraping.scrapingBeautifulSoup(url) | [
"[email protected]"
] | |
17c01bc7f13cf265a544598facd8021d982d4145 | 20c1f61d4f3bdfec8bbe47e2b6bfa8d36e5456ea | /18.2/39_find_greater_numbers/find_greater_numbers.py | 9a893dd4cce83eaf1c6e2876906ab6dbd60cf817 | [] | no_license | SNSTRUTHERS/springboard | de910db081838480a5bdc5275a85f4aff50f1570 | ebeb94906be8774c2f4e2b0e8eaefd6c1ce10959 | refs/heads/master | 2023-04-23T08:46:13.541922 | 2021-05-18T17:53:46 | 2021-05-18T17:53:46 | 287,401,487 | 0 | 1 | null | 2020-08-14T02:03:43 | 2020-08-13T23:45:48 | JavaScript | UTF-8 | Python | false | false | 657 | py | #!/usr/bin/env python
def find_greater_numbers(nums):
"""Return # of times a number is followed by a greater number.
For example, for [1, 2, 3], the answer is 3:
- the 1 is followed by the 2 *and* the 3
- the 2 is followed by the 3
Examples:
>>> find_greater_numbers([1, 2, 3])
3
>>> find_greater_numbers([6, 1, 2, 7])
4
>>> find_greater_numbers([5, 4, 3, 2, 1])
0
>>> find_greater_numbers([])
0
"""
count = 0
for i in range(len(nums)):
for j in range(i, len(nums)):
if nums[j] > nums[i]:
count += 1
return count
| [
"[email protected]"
] | |
5394f22fdbc67366bc5ef90cc2677d91c754ea07 | ebc065836607b62e0f4668bf9fde28df907634fc | /products/migrations/0004_auto_20210420_2359.py | 70239888f64a7575b16a17a2bed4a5da568edf16 | [] | no_license | tgmike/DjangoTut | 2660c7285a73f7445073406a03d9f1cd2fc6cd99 | 5f3bc6a2e09a37d25bbb20e24f939771ab26d317 | refs/heads/main | 2023-04-09T15:16:09.919784 | 2021-04-21T00:12:39 | 2021-04-21T00:12:39 | 358,706,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # Generated by Django 2.0.7 on 2021-04-20 23:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20210420_2357'),
]
operations = [
migrations.AlterField(
model_name='product',
name='summary',
field=models.TextField(),
),
]
| [
"[email protected]"
] | |
1e97786e8ad920f1219f134f9df8cbde8c663820 | 83c40bc3c9882014c6298a5d7969e6ff6823e66d | /src/phosgrapher.py | 5e237c2c382d41fd15f98605bc9a7cc02880fb51 | [] | no_license | fishnsotong/hexagonal-silica | d05c22171eeb1b36392ed59de559d72587e258f3 | 4370e99297722d0fc438fc09a37f853c815b4e4d | refs/heads/master | 2022-08-29T09:25:44.315100 | 2018-02-21T14:47:50 | 2018-02-21T14:47:50 | 99,875,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,624 | py | # @author: Wayne Yeo <fishnsotong>
# @date: 2017-08-10T12:53:16+08:00
# @email: [email protected]
# @Last modified by: fishnsotong
# @Last modified time: 2017-12-23T21:32:33+08:00
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
# importing data
volume_total, pHdata = np.loadtxt("../data/titration1.csv", unpack=True,
delimiter=",", skiprows=1, dtype=float)
# declaring constants
Ka1 = 7.6e-3
Ka2 = 6.2e-8
Ka3 = 2.1e-13
pHeqv = 7.02
# defining functions
pH = np.linspace(0, 14, 1000)
conc_hydronium = 10 ** -pH
conc_hydronium_data = 10 ** -pHdata
def H_value(conc_hydronium, Ka1, Ka2, Ka3):
H = conc_hydronium ** 3 + conc_hydronium ** 2 * Ka1 + conc_hydronium * Ka1 * Ka2 + Ka1 * Ka2 * Ka3
return H
H = H_value(conc_hydronium, Ka1, Ka2, Ka3)
H_data = H_value(conc_hydronium_data, Ka1, Ka2, Ka3)
f_h3po4 = conc_hydronium ** 3 / H
f_h2po4 = Ka1 * conc_hydronium ** 2 / H
f_hpo4 = Ka1 * Ka2 * conc_hydronium / H
f_po4 = Ka1 * Ka2 * Ka3 / H
f_h3po4_data = conc_hydronium_data ** 3 / H_data
f_h2po4_data = Ka1 * conc_hydronium_data ** 2 / H_data
f_hpo4_data = Ka1 * Ka2 * conc_hydronium_data / H_data
f_po4_data = Ka1 * Ka2 * Ka3 / H_data
# plotting
plt.figure(figsize=(15, 9.5))
plt.subplot(211)
plt.plot(pH, f_h3po4, "r-", label=r"$\mathregular{H_3PO_4}$")
plt.plot(pH, f_h2po4, "b-", label=r"$\mathregular{H_2PO_4^{-}}$")
plt.plot(pH, f_hpo4, "g-", label=r"$\mathregular{HPO_4^{2-}}$")
plt.plot(pH, f_po4, "y-", label=r"$\mathregular{PO_4^{3-}}$")
plt.subplot(212)
plt.plot(pHdata, f_h3po4_data, "ro", label=r"$\mathregular{H_3PO_4}$")
plt.plot(pHdata, f_h2po4_data, "bo", label=r"$\mathregular{H_2PO_4^{-}}$")
plt.plot(pHdata, f_hpo4_data, "go", label=r"$\mathregular{HPO_4^{2-}}$")
plt.plot(pHdata, f_po4_data, "yo", label=r"$\mathregular{PO_4^{3-}}$")
# labels
plt.subplot(211)
plt.ylim(0, 1)
plt.xlim(0, 14)
plt.legend(loc='best', prop={'size': 10})
plt.xlabel("pH")
plt.ylabel("Fraction of species")
plt.title("Fractional composition of phosphoric acid ions")
plt.xticks([0, 7, 14])
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
plt.subplot(212)
plt.ylim(0, 1)
plt.xlim(0, 14)
plt.legend(loc='best', prop={'size': 10})
plt.xlabel("pH")
plt.ylabel("Fraction of species")
plt.title("Fractional composition of phosphoric acid ions")
plt.xticks([0, 7, 14])
plt.yticks([0.0, 0.2, 0.4, 0.6, 0.8, 1.0])
plt.text(5, .65, r'$\mathregular{H_2PO_4^{-}=0.6063}$')
plt.text(5, .35, r'$\mathregular{HPO_4^{2-}=0.3936}$')
# save plot to file
now = str(dt.datetime.today())
plt.savefig("../figs/phosfraction"+ now +".png", dpi = 500)
# display plot on screen
plt.show()
| [
"[email protected]"
] | |
075f4ad5344bcaaf0e110d70162ea3e959fa8e3b | 802c57c7103852c592fc205918e935dae9771e18 | /alien_invasion/alien_invasion.py | 7e2a58b08e55b694c040d4763a231428ee314e46 | [] | no_license | postavtezachet/hghg | b10d4bbd1cbb07982d63beac138b003b9eeadda6 | 18d71cf19ffc58de8cb2583356224aaa8b2ad99e | refs/heads/master | 2023-03-24T19:51:48.984159 | 2021-03-24T17:37:46 | 2021-03-24T17:37:46 | 294,964,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | import pygame
from pygame.sprite import Group
from settings import Settings
from ship import Ship
from game_stats import GameStats
from scoreboard import Scoreboard
from button import Button
import game_functions as gf
def run_game():
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width,
ai_settings.screen_height))
pygame.display.set_caption("Alien invasion")
bg_color = (ai_settings.bg_color)
ship = Ship(ai_settings, screen)
stats = GameStats(ai_settings)
sb = Scoreboard(ai_settings, screen, stats)
play_button = Button(ai_settings, screen, "Play")
bullets = Group()
aliens = Group()
gf.create_fleet(ai_settings, screen, ship, aliens)
while True:
gf.check_events(ai_settings, screen, stats, sb, play_button, ship,
aliens, bullets)
if stats.game_active:
ship.update()
gf.update_bullets(ai_settings, screen, stats, sb,
ship, aliens, bullets)
gf.update_aliens(ai_settings, screen, stats, sb, ship, aliens,
bullets)
gf.update_screen(ai_settings,screen, stats, sb, ship, aliens,
bullets, play_button)
run_game()
| [
"[email protected]"
] | |
c07863208ec57cd034f45db75656bb501efdbae1 | 0cff66b776bc80e2724bb8215bfd0518817c611b | /matrix/clock_matrix_end.py | 71d3c06ec12fd97b8139505a4f114a5a95832a5b | [] | no_license | mrharris/lessons | 5191f6407b9615bd350ad0bc510f9b651d6abce5 | fd05f12e5f6f01ef46608dbf4b09982a61347ee2 | refs/heads/master | 2020-03-27T04:31:29.627044 | 2020-02-06T21:20:29 | 2020-02-06T21:20:29 | 34,625,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,621 | py | import pymel.core as pm
import math
pm.newFile(force=True)
hand = pm.polyCube()[0]
frame = 1
# we're going to create 4 matrices
# S, T1, R, T2
# pymel has a special "Matrix" datatype for working with matrices
# So far we only used built in datatypes (strings, ints, floats...)
# We define a matrix by passing 16 floats!
for frame in range(200):
pm.currentTime(frame)
# scale it down skinny in both sx and sz
S = pm.dt.Matrix(
0.1, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.1, 0.0,
0.0, 0.0, 0.0, 1.0
)
# raise it up ty so it sits on the origin
T1 = pm.dt.Matrix(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.5, 0.0, 1.0
)
# rotate it around the origin in rz
# work out how far a second hand moves in 1/24th of a second
a = math.radians(360/60/24.0*frame) * -1
R = pm.dt.Matrix(
math.cos(a), math.sin(a), 0.0, 0.0,
-math.sin(a), math.cos(a), 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0
)
# move it into position in the scene
# worldspace (say, (9, 11, -6))
T2 = pm.dt.Matrix(
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
9.0, 11.0, -6.0, 1.0
)
# maya uses row vectors rather than column vectors
# which means we need to pre-multiply rather
# than post-multiply the matrices. In most
# maths books the order of multiplication
# would be reversed (column major)
pm.currentTime(frame)
hand.setMatrix(S*T1*R*T2)
pm.setKeyframe(hand)
| [
"[email protected]"
] | |
bcfa4a1cd9f4a15ee1fac07467537560977a4feb | 922656cb4ab1043e246e8bad3d8f8d07c3265d8c | /sdk/storage/azure-storage-blob/tests/test_blob_tags.py | cade03321dbaf4bd567e498b2a28edfe461d2e94 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | TommyZihao/azure-sdk-for-python | d374900f81d2b7e72c52d32353842819dd9e3580 | 513a199329194656039987ac5352a647fdf753df | refs/heads/master | 2022-11-28T11:19:28.887548 | 2020-08-03T22:00:37 | 2020-08-03T22:00:37 | 284,934,907 | 0 | 1 | MIT | 2020-08-04T09:27:05 | 2020-08-04T09:27:04 | null | UTF-8 | Python | false | false | 16,857 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from datetime import datetime, timedelta
from enum import Enum
from time import sleep
import pytest
from devtools_testutils import StorageAccountPreparer
try:
from urllib.parse import quote
except ImportError:
from urllib2 import quote
from _shared.testcase import StorageTestCase, GlobalStorageAccountPreparer, GlobalResourceGroupPreparer
from azure.core.exceptions import (
ResourceExistsError)
from azure.storage.blob import (
BlobServiceClient,
BlobBlock, generate_account_sas, ResourceTypes, AccountSasPermissions, generate_container_sas,
ContainerSasPermissions, BlobClient, generate_blob_sas, BlobSasPermissions)
#------------------------------------------------------------------------------
TEST_CONTAINER_PREFIX = 'container'
TEST_BLOB_PREFIX = 'blob'
#------------------------------------------------------------------------------
class StorageBlobTagsTest(StorageTestCase):
def _setup(self, storage_account, key):
self.bsc = BlobServiceClient(self.account_url(storage_account, "blob"), credential=key)
self.container_name = self.get_resource_name("container")
if self.is_live:
container = self.bsc.get_container_client(self.container_name)
try:
container.create_container(timeout=5)
except ResourceExistsError:
pass
self.byte_data = self.get_random_bytes(1024)
def _teardown(self, FILE_PATH):
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
#--Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
def _create_block_blob(self, tags=None, container_name=None, blob_name=None):
blob_name = blob_name or self._get_blob_reference()
blob_client = self.bsc.get_blob_client(container_name or self.container_name, blob_name)
resp = blob_client.upload_blob(self.byte_data, length=len(self.byte_data), overwrite=True, tags=tags)
return blob_client, resp
def _create_empty_block_blob(self):
blob_name = self._get_blob_reference()
blob_client = self.bsc.get_blob_client(self.container_name, blob_name)
resp = blob_client.upload_blob(b'', length=0, overwrite=True)
return blob_client, resp
def _create_append_blob(self, tags=None):
blob_name = self._get_blob_reference()
blob_client = self.bsc.get_blob_client(self.container_name, blob_name)
resp = blob_client.create_append_blob(tags=tags)
return blob_client, resp
def _create_page_blob(self, tags=None):
blob_name = self._get_blob_reference()
blob_client = self.bsc.get_blob_client(self.container_name, blob_name)
resp = blob_client.create_page_blob(tags=tags, size=512)
return blob_client, resp
def _create_container(self, prefix="container"):
container_name = self.get_resource_name(prefix)
try:
self.bsc.create_container(container_name)
except:
pass
return container_name
#-- test cases for blob tags ----------------------------------------------
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_set_blob_tags(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob_client, _ = self._create_block_blob()
# Act
blob_tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
resp = blob_client.set_blob_tags(blob_tags)
# Assert
self.assertIsNotNone(resp)
@pytest.mark.playback_test_only
@GlobalStorageAccountPreparer()
def test_set_blob_tags_for_a_version(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
# use this version to set tag
blob_client, resp = self._create_block_blob()
self._create_block_blob()
# TODO: enable versionid for this account and test set tag for a version
# Act
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
resp = blob_client.set_blob_tags(tags, version_id=resp['version_id'])
# Assert
self.assertIsNotNone(resp)
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_get_blob_tags(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob_client, resp = self._create_block_blob()
# Act
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
blob_client.set_blob_tags(tags)
resp = blob_client.get_blob_tags()
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), 3)
for key, value in resp.items():
self.assertEqual(tags[key], value)
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_get_blob_tags_for_a_snapshot(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
tags = {"+-./:=_ ": "firsttag", "tag2": "+-./:=_", "+-./:=_1": "+-./:=_"}
blob_client, resp = self._create_block_blob(tags=tags)
snapshot = blob_client.create_snapshot()
snapshot_client = self.bsc.get_blob_client(self.container_name, blob_client.blob_name, snapshot=snapshot)
resp = snapshot_client.get_blob_tags()
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), 3)
for key, value in resp.items():
self.assertEqual(tags[key], value)
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_upload_block_blob_with_tags(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
blob_client, resp = self._create_block_blob(tags=tags)
resp = blob_client.get_blob_tags()
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), 3)
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_get_blob_properties_returns_tags_num(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
blob_client, resp = self._create_block_blob(tags=tags)
resp = blob_client.get_blob_properties()
downloaded = blob_client.download_blob()
# Assert
self.assertIsNotNone(resp)
self.assertEqual(resp.tag_count, len(tags))
self.assertEqual(downloaded.properties.tag_count, len(tags))
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_create_append_blob_with_tags(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
tags = {"+-./:=_ ": "firsttag", "tag2": "+-./:=_", "+-./:=_1": "+-./:=_"}
blob_client, resp = self._create_append_blob(tags=tags)
resp = blob_client.get_blob_tags()
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), 3)
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_create_page_blob_with_tags(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
blob_client, resp = self._create_page_blob(tags=tags)
resp = blob_client.get_blob_tags()
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), 3)
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_commit_block_list_with_tags(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
blob_client, resp = self._create_empty_block_blob()
blob_client.stage_block('1', b'AAA')
blob_client.stage_block('2', b'BBB')
blob_client.stage_block('3', b'CCC')
# Act
block_list = [BlobBlock(block_id='1'), BlobBlock(block_id='2'), BlobBlock(block_id='3')]
blob_client.commit_block_list(block_list, tags=tags)
resp = blob_client.get_blob_tags()
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), len(tags))
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_start_copy_from_url_with_tags(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
blob_client, resp = self._create_block_blob()
# Act
sourceblob = '{0}/{1}/{2}'.format(
self.account_url(storage_account, "blob"), self.container_name, blob_client.blob_name)
copyblob = self.bsc.get_blob_client(self.container_name, 'blob1copy')
copy = copyblob.start_copy_from_url(sourceblob, tags=tags)
# Assert
self.assertIsNotNone(copy)
self.assertEqual(copy['copy_status'], 'success')
self.assertFalse(isinstance(copy['copy_status'], Enum))
self.assertIsNotNone(copy['copy_id'])
copy_content = copyblob.download_blob().readall()
self.assertEqual(copy_content, self.byte_data)
resp = copyblob.get_blob_tags()
# Assert
self.assertIsNotNone(resp)
self.assertEqual(len(resp), len(tags))
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_list_blobs_returns_tags(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
self._create_block_blob(tags=tags)
container = self.bsc.get_container_client(self.container_name)
blob_list = container.list_blobs(include="tags")
#Assert
for blob in blob_list:
self.assertEqual(blob.tag_count, len(tags))
for key, value in blob.tags.items():
self.assertEqual(tags[key], value)
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_filter_blobs(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
container_name1 = self._create_container(prefix="container1")
container_name2 = self._create_container(prefix="container2")
container_name3 = self._create_container(prefix="container3")
tags = {"tag1": "firsttag", "tag2": "secondtag", "tag3": "thirdtag"}
self._create_block_blob(tags=tags, blob_name="blob1")
self._create_block_blob(tags=tags, blob_name="blob2", container_name=container_name1)
self._create_block_blob(tags=tags, blob_name="blob3", container_name=container_name2)
self._create_block_blob(tags=tags, blob_name="blob4", container_name=container_name3)
if self.is_live:
sleep(10)
where = "tag1='firsttag'"
blob_list = self.bsc.find_blobs_by_tags(filter_expression=where, results_per_page=2).by_page()
first_page = next(blob_list)
items_on_page1 = list(first_page)
second_page = next(blob_list)
items_on_page2 = list(second_page)
self.assertEqual(2, len(items_on_page1))
self.assertEqual(2, len(items_on_page2))
@pytest.mark.live_test_only
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_filter_blobs_using_account_sas(self, resource_group, location, storage_account, storage_account_key):
token = generate_account_sas(
storage_account.name,
storage_account_key,
ResourceTypes(service=True, container=True, object=True),
AccountSasPermissions(write=True, list=True, read=True, delete_previous_version=True, tag=True,
filter_by_tags=True),
datetime.utcnow() + timedelta(hours=1),
)
self._setup(storage_account, token)
tags = {"year": '1000', "tag2": "secondtag", "tag3": "thirdtag", "habitat_type": 'Shallow Lowland Billabongs'}
blob_client, _ = self._create_block_blob(tags=tags, container_name=self.container_name)
blob_client.set_blob_tags(tags=tags)
tags_on_blob = blob_client.get_blob_tags()
self.assertEqual(len(tags_on_blob), len(tags))
if self.is_live:
sleep(10)
# To filter in a specific container use:
# where = "@container='{}' and tag1='1000' and tag2 = 'secondtag'".format(container_name1)
where = "\"year\"='1000' and tag2 = 'secondtag' and tag3='thirdtag'"
blob_list = self.bsc.find_blobs_by_tags(filter_expression=where, results_per_page=2).by_page()
first_page = next(blob_list)
items_on_page1 = list(first_page)
self.assertEqual(1, len(items_on_page1))
@pytest.mark.live_test_only
@GlobalResourceGroupPreparer()
@StorageAccountPreparer(random_name_enabled=True, location="canadacentral", name_prefix='pytagstorage')
def test_set_blob_tags_using_blob_sas(self, resource_group, location, storage_account, storage_account_key):
token = generate_account_sas(
storage_account.name,
storage_account_key,
ResourceTypes(service=True, container=True, object=True),
AccountSasPermissions(write=True, list=True, read=True, delete_previous_version=True, tag=True,
filter_by_tags=True),
datetime.utcnow() + timedelta(hours=1),
)
self._setup(storage_account, token)
tags = {"year": '1000', "tag2": "secondtag", "tag3": "thirdtag", "habitat_type": 'Shallow Lowland Billabongs'}
blob_client, _ = self._create_block_blob(tags=tags, container_name=self.container_name)
token1 = generate_blob_sas(
storage_account.name,
self.container_name,
blob_client.blob_name,
account_key=storage_account_key,
permission=BlobSasPermissions(delete_previous_version=True, tag=True),
expiry=datetime.utcnow() + timedelta(hours=1),
)
blob_client=BlobClient.from_blob_url(blob_client.url, token1)
blob_client.set_blob_tags(tags=tags)
tags_on_blob = blob_client.get_blob_tags()
self.assertEqual(len(tags_on_blob), len(tags))
if self.is_live:
sleep(10)
# To filter in a specific container use:
# where = "@container='{}' and tag1='1000' and tag2 = 'secondtag'".format(container_name1)
where = "\"year\"='1000' and tag2 = 'secondtag' and tag3='thirdtag'"
blob_list = self.bsc.find_blobs_by_tags(filter_expression=where, results_per_page=2).by_page()
first_page = next(blob_list)
items_on_page1 = list(first_page)
self.assertEqual(1, len(items_on_page1))
#------------------------------------------------------------------------------
| [
"[email protected]"
] | |
b5bdb49b0f10ad60fb94b46dfb51dc65f1d0da29 | 254b0a0630c2039bad1dc5fc8836afbd75491c52 | /python/logging/logging_snippets.py | f8ab36363c70e4140d474f8adac2ba8f237d37fc | [] | no_license | apollokit/snippets | f0c85a4b9951d9779ed2d5df24708ba44768adfa | fdd8648490b3bdaae6ab01a99e3d291835409b62 | refs/heads/master | 2023-08-18T16:17:43.501063 | 2023-08-15T18:47:01 | 2023-08-15T18:47:01 | 229,328,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | # see here for logging formatters:
# https://docs.python.org/2/library/logging.html#logrecord-attributes
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
form = "%(asctime)s %(levelname)-8s %(funcName)-15s %(message)s"
logging.basicConfig(format=form,
datefmt="%H:%M:%S")
logger.debug("KeystrokeExec: typing keys")
| [
"[email protected]"
] | |
0706b0fa734c3ffad62d0751d17481ff6cbda64b | da39df1a594c8bbc88f98f768e9d0b5735459d81 | /andr_omeda/andr_update/views/game/serializers.py | 591c236049c943f18042b04b1356ab059b528a41 | [] | no_license | IhebTrabelsi/andr_omeda | fbc1bde9d3d6419ba75f9f3fabd402e9074b46f5 | 165e98878e2616ad67ba5ca07ed438fcc4c0259b | refs/heads/master | 2023-05-25T23:18:54.384708 | 2021-06-09T18:30:15 | 2021-06-09T18:30:15 | 344,915,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,721 | py | # automatically created
from rest_framework import serializers
from andr_omeda.andr_update.models import Game
from andr_omeda.andr_update.views.animation.serializers import AnimationSerializer
from andr_omeda.andr_update.views.messageentity.serializers import MessageEntitySerializer
from andr_omeda.andr_update.views.photosize.serializers import PhotoSizeSerializer
class GameSerializer(serializers.ModelSerializer):
animation = AnimationSerializer()
text_entities = MessageEntitySerializer(many=True)
photo = PhotoSizeSerializer(many=True)
class Meta:
model = Game
fields = '__all__'
def create(self, validated_data):
animation_data = validated_data.pop('animation', None)
text_entities_data = validated_data.pop('text_entities', None)
photo_data = validated_data.pop('photo', None)
if animation_data:
animation = AnimationSerializer(data=animation_data)
animation_is_valid = animation.is_valid(raise_exception=True)
animation = animation.save()
validated_data['animation'] = animation
if text_entities_data:
text_entities = MessageEntitySerializer(data=text_entities_data)
text_entities_is_valid = text_entities.is_valid(raise_exception=True)
text_entities = text_entities.save()
validated_data['text_entities'] = text_entities
if photo_data:
photo = PhotoSizeSerializer(data=photo_data)
photo_is_valid = photo.is_valid(raise_exception=True)
photo = photo.save()
validated_data['photo'] = photo
game = Game(**validated_data)
return game.save()
| [
"[email protected]"
] | |
8e98e33afe303bf99252c1cad7c43d607e0fe5c6 | 6cd94b5370349484e93fdfa9913e6204909e3ba4 | /wsd/evaluation/evaluator.py | 4194ffa2562a87f9154b5c0f80c14f26edd03fe6 | [
"MIT"
] | permissive | paulsavoie/wikiwsd | 090375cf1289b96cbf44385e6e572e93873357b2 | 0469debb209d691b9cee9397431f0df9e3e7b0bc | refs/heads/master | 2021-05-28T00:08:50.698780 | 2014-05-14T07:13:49 | 2014-05-14T07:13:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,033 | py | import logging
import Queue
from wsd.database import MySQLDatabase
from wsd.algorithm import MeaningFinder
from wsd.algorithm import RelatednessCalculator
from wsd.algorithm import Decider
from wsd.algorithm import LinkDetector
from wsd.wikipedia import WikipediaReader
from wsd.wikipedia import WikipediaPreProcessor
from wsd.wikipedia import LinkExtractor
from wsd.wikipedia import NGramExtractor
from workview import EvaluationWorkView
from outputter import EvaluationOutputter
class Evaluator():
'''The Evaluator class manages the evaluation process
'''
'''constructor
@param input_file the xml samples file to read from
@param work_view the database work view to use
'''
def __init__(self, input_file, work_view):
self._input_path = input_file
self._orig_work_view = work_view
def evaluate_link_detection(self):
'''evaluates the detection of links
'''
# read sample
article_queue = Queue.Queue()
reader = WikipediaReader(self._input_path, article_queue)
preprocessor = WikipediaPreProcessor()
linkextractor = LinkExtractor(self._orig_work_view)
ngramextractor = NGramExtractor()
reader.start()
reader.join()
articles = []
while article_queue.empty() == False:
article = article_queue.get()
preprocessor.process(article)
linkextractor.process(article)
# store ngrams additionally
article['ngrams'] = ngramextractor.process(article)
# remove links from text and save links in original ones
article['orig_links'] = article['links']
article['links'] = []
article['text'] = article['text'].replace('[[', '')
article['text'] = article['text'].replace(']]', '')
articles.append(article)
# do actual evaluation
num_correct = 0
num_found = 0
num_reference = 0
for article in articles:
# wrap work view in evaluation
work_view = EvaluationWorkView(self._orig_work_view, article)
logging.info('starting to evaluate sample %s' % article['title'])
# start for each sample from the beginning
work_view.reset_cache()
logging.info('detecting links...')
linkdetector = LinkDetector(work_view)
linkdetector.detect_links(article)
outputter = EvaluationOutputter()
results = outputter.output_detected_links(article)
num_correct += results['correct']
num_found += results['total_found']
num_reference += results['total_reference']
precision_rate = 0.0
recall_rate = 0.0
if results['total_found'] != 0:
precision_rate = float(results['correct']) / float(results['total_found'])
if results['total_reference'] != 0:
recall_rate = float(results['correct']) / float(results['total_reference'])
logging.info('evaluated sample %s: precision: %d%%, recall. %d%%', article['title'], round(precision_rate*100), round(recall_rate*100))
logging.info('done evaluating %d samples' % len(articles))
overall_precision = 0.0
overall_recall = 0.0
if num_found != 0:
overall_precision = float(num_correct) / float(num_found)
if num_reference != 0:
overall_recall = float(num_correct) / float(num_reference)
return { 'precision': overall_precision, 'recall': overall_recall }
def evaluate_disambiguations(self):
'''evaluates the finding of correct target link for a given link
'''
# read sample
article_queue = Queue.Queue()
reader = WikipediaReader(self._input_path, article_queue)
preprocessor = WikipediaPreProcessor()
linkextractor = LinkExtractor(self._orig_work_view)
reader.start()
reader.join()
articles = []
while article_queue.empty() == False:
article = article_queue.get()
preprocessor.process(article)
linkextractor.process(article)
articles.append(article)
# do actual evaluation
num_links = 0.0
num_correct = 0.0
num_resolved = 0.0
for article in articles:
# wrap work view in evaluation
work_view = EvaluationWorkView(self._orig_work_view, article)
logging.info('starting to evaluate sample %s' % article['title'])
# start for each sample from the beginning
work_view.reset_cache()
meaningFinder = MeaningFinder(work_view)
meaningFinder.find_meanings(article)
relatednessCalculator = RelatednessCalculator(work_view)
decider = Decider(relatednessCalculator)
decider.decide(article)
outputter = EvaluationOutputter()
results = outputter.output_disambiguations(article)
num_links += results['total']
num_correct += results['correct']
num_resolved += results['resolved']
precision_rate = 0.0
recall_rate = 0.0
if results['resolved'] != 0:
precision_rate = float(results['correct']) / float(results['resolved'])
if results['total'] != 0:
recall_rate = float(results['correct']) / float(results['total'])
logging.info('evaluated sample %s: precision: %d%%, recall. %d%%', article['title'], round(precision_rate*100), round(recall_rate*100))
logging.info('done evaluating %d samples' % len(articles))
overall_precision = 0.0
overall_recall = 0.0
if num_resolved != 0:
overall_precision = float(num_correct) / float(num_resolved)
if num_links != 0:
overall_recall = float(num_correct) / float(num_links)
return { 'precision': overall_precision, 'recall': overall_recall } | [
"[email protected]"
] | |
9cf59ca6e5ef627197a86a2bb92140e88d0242ff | a0564d59a6705738cbe23dc2eeba291a2dc671b7 | /Python/apr_calculator.py | aa39560373aa7ffc3d4e13115d81343e70f19b66 | [] | no_license | derekforesman/CMPSC-131 | 76c439b56b6af3741058ce8110688ef9949d0be6 | 4186c592fb07c22684f402623c46da342409f848 | refs/heads/master | 2021-07-06T21:42:49.606956 | 2017-09-28T19:00:38 | 2017-09-28T19:00:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python3
deposit = float(input("What is the amount you will be depositing?\n")) # get the amount to be deposited
apr = float(input("Please enter the APR for the account. For example (2) will be read as 2% or 0.02\n")) # get the percent APR
years = int(input("How many years will it gain interest?\n")) # get the number of years
interest = ((apr / 100) + 1) # calculate the interest. Will always be 1.XX unless interest is over 100%
total = (interest** years) * deposit # calculate the final amount in the account
final = round(total, 2)
print("Your final amount will be: $",final) # print the amount to the console
| [
"[email protected]"
] |
Subsets and Splits