blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4b67e7ff399bcd6df368357f00b2f7d69f2ca6ad | dbf9ca9f46020e255246eaa45c5a094a0ec5ac43 | /django/django_full_stack/amadon/apps/poorly_coded_store/urls.py | 31dc103b0e8e0d26c09fe923d12b435d56b51d1d | [] | no_license | Tandd2015/python_stack | 677ef9772aa54c727d239024c17812516dabe293 | d63af9f8442ce7d631a4fec7fe494ea307b02709 | refs/heads/master | 2022-02-05T21:51:32.545489 | 2020-07-16T18:15:07 | 2020-07-16T18:15:07 | 206,409,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^checkout$', views.checkout_index),
url(r'^amadon/buy$', views.amadon_buying),
] | [
"[email protected]"
] | |
963e864510c02d03f393dda68b55ceb7ee18fe59 | 8ab9ecba0bb8e474803b587e5b56978264ba0060 | /Residual network/util.py | 402570bcde540705eebdf2faa943fd8862858dc2 | [] | no_license | ewu001/Deep-learning-with-convolutional-network | 646ff6673f1e517a1ace89cec888c2f313423af1 | 4d63d9fee2607d00ce8caff066f5a23942359db6 | refs/heads/master | 2020-12-09T10:07:45.204460 | 2020-01-11T19:14:50 | 2020-01-11T19:14:50 | 233,272,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,388 | py | import os
import numpy as np
import tensorflow as tf
import h5py
import math
def forward_propagation_for_predict(X, parameters):
"""
3 layer model:
LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SOFTMAX
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
W1 = parameters['W1']
b1 = parameters['b1']
W2 = parameters['W2']
b2 = parameters['b2']
W3 = parameters['W3']
b3 = parameters['b3']
Z1 = tf.add(tf.matmul(W1, X), b1)
A1 = tf.nn.relu(Z1)
Z2 = tf.add(tf.matmul(W2, A1), b2)
A2 = tf.nn.relu(Z2)
Z3 = tf.add(tf.matmul(W3, A2), b3)
return Z3
def predict(X, parameters):
W1 = tf.convert_to_tensor(parameters["W1"])
b1 = tf.convert_to_tensor(parameters["b1"])
W2 = tf.convert_to_tensor(parameters["W2"])
b2 = tf.convert_to_tensor(parameters["b2"])
W3 = tf.convert_to_tensor(parameters["W3"])
b3 = tf.convert_to_tensor(parameters["b3"])
params = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2,
"W3": W3,
"b3": b3}
x = tf.placeholder("float", [12288, 1])
z3 = forward_propagation_for_predict(x, params)
p = tf.argmax(z3)
sess = tf.Session()
prediction = sess.run(p, feed_dict = {x: X})
return prediction
def load_dataset():
train_dataset = h5py.File('data/train_signs.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # train set labels
test_dataset = h5py.File('data/test_signs.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:])
test_set_y_orig = np.array(test_dataset["test_set_y"][:])
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
| [
"[email protected]"
] | |
78892b1fe9d2c4ea6c02dc6d8137c39b0dab819d | a7fc9dff8c5ca42cc4435f8f57ca64e187688650 | /classifyMusic.py | 4dbad4d88684b9edb0c5b7779d3de51a7d49fe0a | [
"Apache-2.0"
] | permissive | IArrationality/voiceai | 5d07ba4943193a639571a9df714b01b9a1db2026 | 47ee48e642e1e141101adfd697dd03913dc2f614 | refs/heads/master | 2023-03-17T00:58:50.865410 | 2017-04-14T17:53:25 | 2017-04-14T17:53:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | from sklearn import cluster
import json
import pickle
list = []
data = []
nameList = []
with open('music_metadata.json') as data_file:
data = json.load(data_file)
for song in data:
nameList.append(str(song["name"]))
list.append((float(song["energy"]), float(song["tempo"]), float(song["centroid"]), float(song["vocals"])))#, float(song["chroma"])))
"""
k_means = cluster.KMeans(n_clusters=11)
k_means.fit(list)
songTuple = [(nameList[i], k_means.labels_[i]) for i in range(len(nameList))]
"""
mean_shift = cluster.Birch(n_clusters=7)
mean_shift.fit(list)
songTuple = [(nameList[i], mean_shift.labels_[i]) for i in range(len(nameList))]
songTuple.sort(key=lambda tup:tup[1])
for i in range(len(nameList)):
print(songTuple[i][0] + " : " + str(songTuple[i][1]))
pickleFile = open('music_cluster.dat', 'wb')
pickle.dump(mean_shift, pickleFile)
"""
print(nameList)
print(k_means.labels_)
""" | [
"[email protected]"
] | |
83ee413fda04f449f6526de6a49368ced91020e1 | 477a3a53c90355434982a88979e6845ab3c279e6 | /collective/externalizelink/__init__.py | 6210661e894ded712a521bba498d1bc4710a84be | [] | no_license | collective/collective.externalizelink | dac2738d77b0db51be4ef8a0ac3909454d2584d6 | 54fa332edab64061f7656cdb77ce240629f48682 | refs/heads/master | 2023-08-18T10:39:49.519346 | 2014-05-02T13:16:37 | 2014-05-02T13:16:37 | 19,216,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # -*- extra stuff goes here -*-
import logging
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('collective.externalizelink')
logger = logging.getLogger('collective.externalizelink')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| [
"[email protected]"
] | |
72256797911fd1d2f4086199d9208a6f445d9b2d | caa6dced49f2d03d4fed4c0e9c6f09d0d374e7ce | /leetcode/211_design_add_and_search_words_data_structure.py | 7694a60a8c77adcd524ee56cdb1942e524202e68 | [] | no_license | hooong/baekjoon | 72e70cea5769467609150416be6f075f9d4c136a | 541f67859de0871ecfa134f8e5c6a8f399d17a1e | refs/heads/master | 2022-04-06T09:21:26.761766 | 2022-03-02T12:57:21 | 2022-03-02T12:57:21 | 180,535,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | class WordDictionary:
def __init__(self):
self.trie = {}
def addWord(self, word: str) -> None:
cur = self.trie
for ch in word:
if ch not in cur.keys():
cur[ch] = {}
cur = cur[ch]
cur['*'] = {}
def search(self, word: str) -> bool:
check = False
def dfs(level, cur):
nonlocal check
if level == len(word):
if '*' in cur.keys():
check = True
return
ch = word[level]
if ch == '.':
for candidate in cur.keys():
dfs(level + 1, cur[candidate])
else:
if ch not in cur.keys():
return
dfs(level + 1, cur[ch])
dfs(0, self.trie)
return check
def test():
wd = WordDictionary()
wd.addWord("a")
wd.addWord("ab")
assert wd.search("a") is True
assert wd.search("a.") is True
assert wd.search("ab") is True
assert wd.search(".a") is False
assert wd.search(".b") is True
assert wd.search("ab.") is False
assert wd.search(".") is True
assert wd.search("..") is True
| [
"[email protected]"
] | |
a4cf42f2de2ac6b15740ea0a339f19e5160ade80 | 105aeb11792ef666693ba8c2fa84b53eef8e4778 | /quick_scraper.py | 23c144adb08a384fdcc7706096ec47bbc44a1757 | [
"MIT"
] | permissive | CodeFanatic23/newsBehavior | 6a26ecf52a1d990ccf0cf31f361cd17347641c18 | 8b10ca339dc9bc4cbc033acee5707ff777a4e85c | refs/heads/master | 2021-01-11T14:14:49.765909 | 2017-04-17T18:04:50 | 2017-04-17T18:04:50 | 81,243,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,846 | py | import scrapy
from bs4 import BeautifulSoup
import sys
import os
import _pickle as pickle
import pandas as pd
from .scrape_with_bs4 import *
import datetime
class ContentSpider(scrapy.Spider):
name = "yolo"
handle_httpstatus_list = [i for i in range(100,999) if i!=200]
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
date_=None
file=None
url=None
total_urls=0
counter=0
##these variables store the content scraped
b={}
date={}
contents={}
total_links={}
NEWS={'reuters.com':sc_reuters,'thehindu.com':sc_thehindu,'economictimes.indiatimes':sc_econt,
'moneycontrol.com':moneyControl,'ndtv.com':ndtv,'hindubusinessline.com':hindu_bl}
#initialises the data types with the respective keys and empty list/dictionary
for key in NEWS:
date[key]=[]
b[key]={}
contents[key]=[]
total_links[key]=[]
#generates all the links to be scraped
def start_requests(self):
print("\n\nEnter company name to scrape content for")
cos=[i.split('_')[1] for i in list_files('links/finallinks')]
print('\n'+str(cos))
self.dest_file=input()
for file_name in list_files('links/finallinks'):
if(self.dest_file.lower() in file_name.lower()):
tracker(file_name)
print("SCRAPING DATA FOR "+file_name)
links = [line.rstrip('\n') for line in open('links/finallinks/'+file_name)]
self.total_urls=len(links)
self.file=file_name
for l in links:
self.date_,self.url=l.split('::')
request=scrapy.Request(self.url,self.parse,dont_filter=True)
request.meta['date']=self.date_
yield request
# gets called at the end when all the data has been scraped .
# It maintains the same folder format for data storage as before.
def writeTo(self):
company=self.dest_file
for webp in self.date:
make_directory(company,webp)
with open('content/'+company+'/'+webp+'/raw_'+self.file.split('.data')[0]+'_'+webp+'.pkl', 'wb') as fp:
pickle.dump(self.b[webp], fp)
temp = {'date':self.date[webp],
'data':self.contents[webp],
'url':self.total_links[webp]
}
df = pd.DataFrame(temp)
df.set_index('date',inplace=True)
df.to_pickle('content/'+company+'/'+webp+'/'+self.file.split('.data')[0]+'_'+webp+'_content.pkl')
df.to_csv('content/'+company+'/'+webp+'/'+self.file.split('.data')[0]+'_'+webp+'_content.csv')
def parse(self, response):
if(response.status in self.handle_httpstatus_list):
self.counter+=1
else:
self.counter+=1
for key in self.NEWS:
if key in response.url:
bs=BeautifulSoup(response.text,'html.parser')
content=self.NEWS[key](bs)
str1=''
tokens=[]
for text in content:
tokens.extend(text)
for tk in tokens:
str1+=''.join(tk)
c = datetime.datetime.strptime(response.meta['date'], '%d-%b-%Y')
#yield self.logger.info("date -"+str(c)+" #"*15)
self.date[key].append(c)
self.contents[key].append(str1)
self.total_links[key].append(response.url)
temp_={c:str1}
self.b[key].update(temp_)
yield self.logger.info("COUNTER -"+str(self.counter)+" #"*15)
yield self.logger.info("TOTAL URLS -"+str(self.total_urls)+" #"*12)
if(self.counter==self.total_urls):
self.writeTo()
| [
"[email protected]"
] | |
25a2374839cd2c7f930233b6db890caca568dfb0 | 13696a9691b173d75b11b4aee22b79d4ea6b7c0b | /test/test_catalog_info_response_limits.py | 827202ca652ed76a76bc7410801e23ae23589d58 | [
"Apache-2.0"
] | permissive | square/connect-python-sdk | 410613bc4b04f0f70176275591a16c9e49e25ede | e00e2889b2dd2c55048219cbe64db79962a68633 | refs/heads/master | 2023-06-15T09:24:17.190416 | 2019-08-15T17:44:41 | 2019-08-15T17:44:41 | 64,772,029 | 53 | 45 | Apache-2.0 | 2020-12-20T18:41:31 | 2016-08-02T16:07:17 | Python | UTF-8 | Python | false | false | 1,284 | py | # coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.models.catalog_info_response_limits import CatalogInfoResponseLimits
class TestCatalogInfoResponseLimits(unittest.TestCase):
""" CatalogInfoResponseLimits unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCatalogInfoResponseLimits(self):
"""
Test CatalogInfoResponseLimits
"""
model = squareconnect.models.catalog_info_response_limits.CatalogInfoResponseLimits()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a5e2b6e8d569206a8301e3712f4e280d509449a7 | 98c5d00f821c5fe516ef8e5e2bcee8eb68eda3f6 | /dirbot/pipelines.py | a7afe458de55d73acd8f2af194c99cd43ba9daa9 | [
"MIT"
] | permissive | java1001/crawler_links_website | 36107c3056cd553bda39a4a4634a27699f6e37cc | 220368184bb0c98bdd9f90ddea4cce9ceff89e9f | refs/heads/master | 2016-09-06T06:56:10.504018 | 2015-07-22T14:27:47 | 2015-07-22T14:27:47 | 39,508,825 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | from datetime import datetime
from hashlib import md5
from scrapy import log
from scrapy.exceptions import DropItem
from twisted.enterprise import adbapi
class FilterWordsPipeline(object):
"""A pipeline for filtering out items which contain certain words in their
description"""
# put all words in lowercase
words_to_filter = ['politics', 'religion']
def process_item(self, item, spider):
for word in self.words_to_filter:
desc = item.get('description') or ''
if word in desc.lower():
raise DropItem("Contains forbidden word: %s" % word)
else:
return item
class RequiredFieldsPipeline(object):
"""A pipeline to ensure the item have the required fields."""
required_fields = ()
def process_item(self, item, spider):
for field in self.required_fields:
if not item.get(field):
raise DropItem("Field '%s' missing: %r" % (field, item))
return item
class MySQLStorePipeline(object):
"""A pipeline to store the item in a MySQL database.
This implementation uses Twisted's asynchronous database API.
"""
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbargs = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
passwd=settings['MYSQL_PASSWD'],
charset='utf8',
use_unicode=True,
)
dbpool = adbapi.ConnectionPool('MySQLdb', **dbargs)
return cls(dbpool)
def process_item(self, item, spider):
# run db query in the thread pool
d = self.dbpool.runInteraction(self._do_upsert, item, spider)
d.addErrback(self._handle_error, item, spider)
# at the end return the item in case of success or failure
d.addBoth(lambda _: item)
# return the deferred instead the item. This makes the engine to
# process next item (according to CONCURRENT_ITEMS setting) after this
# operation (deferred) has finished.
return d
def _do_upsert(self, conn, item, spider):
"""Perform an insert or update."""
guid = self._get_guid(item)
now = datetime.utcnow().replace(microsecond=0).isoformat(' ')
conn.execute("""SELECT count(*) FROM link WHERE guid = %s
""", [guid])
ret = conn.fetchone()[0]
if ret > 0:
conn.execute("""
UPDATE link
SET link=%s,updated=%s
WHERE guid=%s
""", (item['link'], now, guid))
spider.log("Item updated in db: %s %r" % (guid, item))
else:
conn.execute("""
INSERT INTO link (guid, link, updated)
VALUES (%s, %s, %s)
""", (guid, item['link'],now))
spider.log("Item stored in db: %s %r" % (guid, item))
def _handle_error(self, failure, item, spider):
"""Handle occurred on db interaction."""
# do nothing, just log
log.err(failure)
def _get_guid(self, item):
"""Generates an unique identifier for a given item."""
# hash based solely in the link field
return md5(item['link']).hexdigest()
| [
"[email protected]"
] | |
1e263b3cac8762c460e2841631a5e16b52522fb5 | 144b707fc34a9247d1031ceab341cbed783659c4 | /AzureDataLake/asgi.py | 80468fd6f0c33a1f7e54b6f5796d109b1b494095 | [] | no_license | Hari-prasad-129/datacomparetool | 20f306422f48d99cc770674cb88271dae218a8b3 | c114b8117c56e8c23fb811ed085e2728413fe96c | refs/heads/master | 2023-06-14T06:54:49.395926 | 2021-07-13T14:35:30 | 2021-07-13T14:35:30 | 385,633,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for AzureDataLake project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AzureDataLake.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
4a68c7e592a01f77ff122680a68193458b1fabfd | 8bc4e3f82a1bfd1587630b9405e7b883626ef2e1 | /BBB/PatchCPSInstallerCMFInstaller.py | 7a789cadd95c70f9f4be3d614cfa785b03a8ea55 | [] | no_license | nuxeo-cps/products--CPSLuceneCatalog | a7bec49b730bb871ab5ba2ba49589eb53c14f2ac | e57772c36eca51b4c9d08565cc9238866552500d | refs/heads/main | 2023-01-23T11:03:25.833385 | 2010-01-01T16:44:32 | 2010-01-01T16:44:32 | 317,961,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,795 | py | # (C) Copyright 2006 Nuxeo SAS <http://nuxeo.com>
# Author: Julien Anguenot <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
# $Id: catalog.py 31175 2006-03-10 14:57:43Z janguenot $
"""Patching CPSInstaller.CMFInstaller.
BBB for CPS 3.4.0
"""
import logging
from Products.CPSInstaller.CMFInstaller import CMFInstaller
def reindexCatalog(self):
pass
CMFInstaller.reindexCatalog = reindexCatalog
def addZCTextIndexLexicon(self, id, title=''):
pass
CMFInstaller.addZCTextIndexLexicon = addZCTextIndexLexicon
def addPortalCatalogIndex(self, id, type, extra=None, destructive=False):
pass
CMFInstaller.addPortalCatalogIndex = addPortalCatalogIndex
def addPortalCatalogMetadata(self, id, default_value=None):
pass
CMFInstaller.addPortalCatalogMetadata = addPortalCatalogIndex
def flagCatalogForReindex(self, indexid=None):
pass
CMFInstaller.flagCatalogForReindex = flagCatalogForReindex
def flagCatalogForReindexMetadata(self, metadataid=None):
pass
CMFInstaller.flagCatalogForReindexMetadata = flagCatalogForReindexMetadata
logger = logging.getLogger('CPSLuceneCatalog')
logger.info('Patching CPSInstaller.CMFInstaller for BBB for CPS <= 3.4.0')
| [
"devnull@localhost"
] | devnull@localhost |
10df3e09b4623c5572d881fcb803c48ba838be08 | a49f40455986f138b419c0eea917a369f84e6e0e | /utilss/discordUtils.py | 82bab4de64531cb0530771bcbf8bb8bf5a9c15a8 | [] | no_license | LiteralGenie/ZeroBot | aa367e6f33b352ddd1544a35d6b0ad6e7c74b450 | 4eaff29678dadd7eb8d164d0c028e5824b91f80c | refs/heads/master | 2021-09-27T06:52:26.775641 | 2021-09-18T21:42:31 | 2021-09-18T21:42:31 | 238,117,400 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,146 | py | from utilss import utils, globals
from utilss.commands import *
import utilss.sheetUtils.sheetUtils as sheetUtils
import discord, json, traceback
import utilss.sheetUtils.parseMsgs as parse
import utilss.sheetUtils.createSheet as sheet
CONFIG= utils.loadJson(globals.CONFIG_FILE)
class Client(discord.Client):
def load(self, prefix="$"):
self.prefix = prefix
async def on_ready(self):
print('Logged in as', self.user.name, self.user.id)
act = discord.Activity(name=f"{self.prefix}help for commands", type=discord.ActivityType.playing)
await self.change_presence(activity=act)
async def on_message(self, message):
if message.author.id == self.user.id: return
m = message.content.lower().replace(self.prefix, "", 1)
args = [message, self]
isAdmin= str(message.author.id) in utils.loadJson(globals.CONFIG_FILE)['ADMINS']
if isAdmin:
print(message.author.name, message.content)
if m.startswith("help"):
await help(*args)
elif m.startswith("sett"):
await settings(*args)
elif m.startswith("setde"):
await setDelay(*args)
elif m.startswith("setdis"):
await setDiscordDelay(*args)
elif m.startswith("addc"):
await modChannel(*args, typ="add")
elif m.startswith("removec"):
await modChannel(*args, typ="remove")
elif m.startswith("addu"):
await modUser(*args, typ="add")
elif m.startswith("removeu"):
await modUser(*args, typ="remove")
elif m.startswith("addr"):
await modMention(*args, typ="add")
elif m.startswith("remover"):
await modMention(*args, typ="remove")
elif m.startswith("ser"):
await listSeries(*args)
elif m.startswith("listr"):
await listRoles(*args)
# todo: hardcode
if isAdmin or message.guild.id == 425423910759170049:
if m.startswith("stat"):
print(message.author.name, message.content)
await scanMessages(self, last=True)
await stats(*args)
elif m.startswith("update"):
await updateSheet(self, message)
async def scanMessages(client, last=False):
channel = client.get_channel(int(CONFIG['SUBMISSION_CHANNEL']))
log= None
if last: log= utils.loadJson(globals.MSG_FILE)
if not log: log= {"log": [], "members": {}, "last_parsed": ""}
try:
if last and log["last_parsed"]: last= await channel.fetch_message(log['last_parsed'])
else: last= None
except:
last=None
print("resetting log")
log= {"log": [], "members": {}, "last_parsed": ""}
async for msg in channel.history(limit=None, oldest_first=True, after=last):
print("Scanning", msg.content)
log['log'].append(sheetUtils.msgToDict(msg))
log['members'][msg.author.id] = msg.author.name
log['last_parsed'] = msg.id
with open("data/msg_log.json", "w+") as file:
json.dump(log, file, indent=2)
parse.parse()
return len(log['log'])
async def updateSheet(client, message):
try:
async with message.channel.typing():
await message.channel.send("Scanning messages...")
print("Scanning")
numMsgs= await scanMessages(client)
async with message.channel.typing():
await message.channel.send(f"Parsing {numMsgs} messages...")
print(f"Parsing {numMsgs} messages...")
parse.parse()
# async with message.channel.typing():
# await message.channel.send("Uploading...")
# print("Uploading")
sheet.make()
# print("Done")
# await message.channel.send("Done: <https://docs.google.com/spreadsheets/d/blah>")
await message.channel.send("Done")
except Exception as e:
traceback.print_exc()
await message.channel.send(str(e) + "\n<@113843036965830657>")
| [
"[email protected]"
] | |
8b2324f62bfd9afa1616350ddc22100c00c0de19 | f84c7c209d21a03ad88cec1efd2795edb8d97a8b | /Skyfit Menu.py | 2906d687b5f051c4bf4988e6e8aa94e48d20e0a4 | [] | no_license | Chapinzo/python | 3d036c35e995ff00d51fd212660098cf1b55baa2 | 88fdc8e8b928a32ca61978ba2c6547f3eb20c99a | refs/heads/master | 2022-12-04T12:41:34.834680 | 2020-08-24T23:57:24 | 2020-08-24T23:57:24 | 277,551,608 | 0 | 0 | null | 2020-08-24T23:57:25 | 2020-07-06T13:37:57 | Python | UTF-8 | Python | false | false | 2,035 | py | #This is a Skyfit Menu
print("This is a Skyfit Gym Menu")
#Display the welcome message
print("\t")
print("Welcome to Sky fit Gym Menu")
#Welcome the user
#Request for details
print("\t")
first_name = input("Please what is your name:\r")
Lastname = input("What is your surname:\r")
fullname = (first_name +" "+ Lastname)
#Display Options
print("\t")
options = int(input("What would you like to do? \n1. Register \n2. Enquiry \n3. Complaint \n4. Talk to a customer representative \n"))
if(options == 1):
print(fullname,"you want to register? Okay.")
print("\t")
plan_choice = int(input("What plan would you like to subscribe to: \n1. Annually (375,000)\n2. 6 months (275,000)\n3. 3 months (150,000)\n4. Monthly (70,000)\n5. 10 days (31,000)\n6. Weekly (20,500)\n"))
if(plan_choice == 1):
print("Okay",first_name,"You'll pay 375,000")
elif(plan_choice == 2):
print("Okay",first_name,"You'll pay 275,000")
elif(plan_choice == 3):
print("Okay",first_name,"You'll pay 150,000")
elif(plan_choice == 4):
print("Okay",first_name,,"You'll pay 70,000")
elif(plan_choice == 5):
print("Okay",first_name,"You'll pay 31,500")
elif(plan_choice == 6):
print("Okay",first_name,"You'll pay 20,500")
else:
print("Invalid Option, Kindly restart menu")
elif(options == 2):
enquiry = int(input("What would you like to find out: \n1. \n2. \n3. \n.4 \n"))
if(enquiry == 1):
print()
elif(enquiry == 2):
print()
elif(enquiry == 3):
print()
elif(enquiry == 4):
print()
else:
print("Invalid Option, Kindly restart menu")
elif(options == 3):
complaint = input("Kindly let us know what you are displeased about:\n")
print(first_name,"your complaint has been noted and we'll look into it as soon as possible")
elif(options == 4):
print("We are routing you to the customer care rep")
else:
print("Invalid Choice, Kindly restart Menu")
| [
"[email protected]"
] | |
566aec85a27a89e4b6081ffa2e1548f26e034677 | 334fcae13a0927b879f3cda454bd4d8855a05971 | /pyCharm/pandas.dataframe.merge.py | 59aaea1aff362887912c05b99417c466601440ec | [] | no_license | cleartime/lean-py-100days | 439724b395f15694c9756382adad98ff8b543853 | b63f9e12b3f0e30009963ed7db1fcf80e0b10b9b | refs/heads/master | 2020-04-22T10:10:27.797514 | 2019-02-27T07:49:45 | 2019-02-27T07:49:45 | 170,295,895 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | import numpy as np
import pandas as pd
from pandas import Series, DataFrame
d1 = DataFrame({'key':['x','y','z'], 'data_set1':[1,2,3]})
d2 = DataFrame({'key':['a','b','c'], 'data_set2':[4,5,6]})
d3 = pd.merge(d1,d2)
d4 = pd.merge(d1,d2, how='left')
d5 = pd.merge(d1,d2, how='right')
d6 = pd.merge(d1,d2, how='outer')
d7 = pd.merge(d1,d2, on='key') | [
"[email protected]"
] | |
6084438e54162ab818dd2e2004f8e3122056a822 | 3119eb1944f96d5c32221f7e68d7227bc1350109 | /mysite/settings.py | 74272c00826b4c2db69c3254919da5cd90ab29bf | [] | no_license | anil1pma/RemindMeLater | 35f10030919d83dbe1cbf9ce43e35d5f13bf588a | 37e3b529751a3d94d3b0c6d8c12d55e51ce37e95 | refs/heads/master | 2021-01-20T20:56:38.460531 | 2016-07-24T21:15:18 | 2016-07-24T21:15:18 | 64,084,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,061 | py | """
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
#Send Mail Info
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.zoho.com'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = '7417531930'
EMAIL_PORT = 587
SERVER_EMAIL = '[email protected]'
#SMS Info
SMS_VENDOR_ID = "2000145902"
SMS_VENDOR_PASSWORD = "raOkNpg8u"
SMS_VENDOR_URL = "http://enterprise.smsgupshup.com/GatewayAPI/rest"
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# BASE_DIR = os.path.dirname(os.path.dirname(__file__))
BASE_DIR = os.path.dirname(__file__)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_2_3t4e*3+2c^oac1j7@#5)k2$o5g6ir=w_*$pjdjj!9z2cri!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
# PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# # Other settings...
# TEMPLATE_DIRS = (
# os.path.join(PROJECT_ROOT, "templates"),
# )
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
BASE_DIR + '/templates/',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'celery',
#Local Apps
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sendMessage',
'USER': 'root',
'PASSWORD': 'asdf123',
'HOST': 'localhost', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
eb18d262501a3f0f5037223192af0cf5469af69d | 13cfec97e43d55d8e940a20972c9f167c72d9327 | /tests/path.py | 6cda0cfa5eb64909e4177bfa1cfba678437ae4eb | [] | no_license | hofa/py_game_ddz_asyncio_protobuf | bf22a9adec4b68459595722c93a50c8cc943fe06 | b3d13cc40875be1c283e163cfb2bef097cb315da | refs/heads/master | 2020-04-09T22:21:02.282086 | 2018-12-06T06:13:33 | 2018-12-06T06:13:33 | 160,625,959 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | import sys, os
sys.path.append(os.path.abspath(".."))
print(os.path.abspath(".."))
print(os.path.dirname(os.path.realpath(__file__)) + "/../")
print(os.path.dirname(os.getcwd())) | [
"[email protected]"
] | |
2fc60233814185b1c1d01f8ff2198f079d658b48 | bd40df660eda9536089aaf9b75c600f246154c6b | /Day08/Part01/main.py | ba8536bf39f358d28bea5b8e1225fa6eb0eac27c | [] | no_license | superjodash/advent2019 | 0543ac5ed842ce478f4ce44e5f343ab8812d9e15 | 9675cb4038a08aed4835c78713326929b83374e6 | refs/heads/master | 2020-12-14T02:56:10.225248 | 2020-02-12T05:54:39 | 2020-02-12T05:54:39 | 234,613,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 989 | py | width = 25
height = 6
dim = width * height
def main():
img = load_file()
layerCount = int(len(img) / dim)
fewestZeros = None
layerData = {}
for layerIndex in range(0, layerCount):
layer = layerData.get(layerIndex)
if(layer == None):
layer = [0, 0, 0]
layerData[layerIndex] = layer
for h in range(0, height):
for w in range(0, width):
pindex = (layerIndex * dim) + (h * width) + w
pixel = img[pindex]
print(f"Index: {pindex}, Value: {pixel}")
layer[pixel] += 1
if(fewestZeros == None):
fewestZeros = layer
else:
if(fewestZeros[0] > layer[0]):
fewestZeros = layer
print(f"{fewestZeros} with value of {fewestZeros[1] * fewestZeros[2]}")
def load_file():
f = open('Day08/puzzle.img', 'r')
lst = [int(x) for i, x in enumerate(f.read())]
f.close()
return lst
main()
| [
"[email protected]"
] | |
91ad3df12a0b300e02ed46114d6beaa8e3a658ce | 3eb068cc71bddbec131defe21c0ae9a430efb1d5 | /scripts/export_condcomments.py | c233e20062381449e029649ea62059e5b26107fc | [
"Unlicense"
] | permissive | hivdb/hivfacts | b2830acb513a1688bc780c6680e3dd35c4e60371 | 58ac768d5099f14c9c99cfaf18a016b1e79e5fe3 | refs/heads/main | 2023-08-07T13:54:18.492995 | 2023-07-20T12:44:25 | 2023-07-21T02:01:13 | 158,280,510 | 3 | 2 | Unlicense | 2023-07-21T02:01:15 | 2018-11-19T19:43:15 | Java | UTF-8 | Python | false | false | 1,945 | py | #! /usr/bin/env python3
import os
import json
import yaml
import click
from yaml import Loader
from sqlalchemy import create_engine
BASEDIR = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
VERSIONS_PATH = os.path.join(
BASEDIR, 'data', 'algorithms', 'versions.yml')
DATABASE_URI = os.environ.get(
'DATABASE_URI_HIVDBRULES',
'mysql+pymysql://rshafer:[email protected]/HIVDB_Results'
)
QUERY_TBL_CMTS_HIV1 = (
'SELECT CommentName, DrugClass, ConditionType, ConditionValue, Comment '
'FROM tblConditionalCommentsWithVersions WHERE Version=%s '
'ORDER BY CommentName'
)
QUERY_TBL_CMTS_HIV2 = (
'SELECT CommentName, DrugClass, ConditionType, ConditionValue, Comment '
"FROM tblConditionalCommentsWithVersions WHERE Version='V9_0a3' "
'ORDER BY CommentName'
)
def get_latest_db_version():
with open(VERSIONS_PATH) as fp:
data = yaml.load(fp, Loader=Loader)
ver, *_ = data['HIVDB'][-1]
return 'V' + ver.split('-', 1)[0].replace('.', '_')
@click.command()
@click.argument('species', type=click.Choice(['HIV1', 'HIV2']))
@click.argument('output_json', type=click.File('w'))
def main(species, output_json):
engine = create_engine(DATABASE_URI)
engine.connect()
dbver = get_latest_db_version()
if species == 'HIV1':
results = engine.execute(QUERY_TBL_CMTS_HIV1, dbver)
else: # species == 'HIV2'
results = engine.execute(QUERY_TBL_CMTS_HIV2)
cmtlist = []
for row in results:
condval = json.loads(row['ConditionValue'])
cmtlist.append({
'strain': condval.get('strain', 'HIV1'),
'commentName': row['CommentName'],
'drugClass': row['DrugClass'],
'conditionType': row['ConditionType'],
'conditionValue': condval,
'comment': row['Comment']
})
json.dump(cmtlist, output_json, indent=2)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
e24cfca7650647d0fd68d5b65a3f02ec842aa1ae | 01f44a6a3298566b8d03dc6c32431a1b9e236809 | /p2.py | b9811ad8552198f0f21008fc12a05035a9261d54 | [] | no_license | nmwalsh/project-euler-solutions | 3ca49123e55c2da04e2a4316b07f06fe06b3a868 | 3413fac909e31bc84f14564a98ffaf8a0ec4dfda | refs/heads/master | 2021-05-14T14:35:52.330261 | 2018-01-10T01:41:37 | 2018-01-10T01:41:37 | 115,973,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | """
Problem 2
Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
"""
# Concepts used:
# fibonacci, lambda, filter
import numpy
seed_low = 1
seed_high = 2
max_val = 4000000
def find_even_fibs(seed_low, seed_high, max_val):
fib_array = [seed_low, seed_high]
cond_array = []
while fib_array[-1] < max_val:
seed_low = fib_array[-2]
seed_high = fib_array[-1]
fib_array.append(seed_low + seed_high)
#remove last term, since it will exceed max_val
fib_array.pop()
even_fibs = list(filter(lambda x: x/2 == x//2, fib_array))
even_fibs_sum = sum(even_fibs)
print(even_fibs_sum)
return even_fibs_sum
find_even_fibs(seed_low, seed_high, max_val) | [
"[email protected]"
] | |
8c04564df7c3a89af5d2e4f21836754bffffc055 | 0395525e4cf99944b496e4bf6de183ae5456b0e2 | /array/taskX.py | 8ae6254d35203f5c4b9e4c1c60c4e191f70d60cc | [] | no_license | Dismissall/info_msk_task | a98610d854c7adfb2b829e5593d9eebe5013b972 | 0f29d18ccfb6834af318418e6f6006681d295e4c | refs/heads/master | 2020-06-27T09:30:03.289481 | 2019-09-01T15:31:30 | 2019-09-01T15:31:30 | 199,649,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | sizeL, steps = map(int, input().split())
L = ["I"] * sizeL
for i in range(steps):
begin, end = map(int, input().split())
for element in range(begin - 1, end):
L[element] = '.'
[print(x, end="") for x in L]
| [
"[email protected]"
] | |
70ecad88c9ac84c198f61883c5e1885f3efc6486 | 1b45990df208915d9456412e5d195d4771ead1ab | /cd71A.py | 8e230df2d37a0d8e9bd59f150c09a2cbb6fe2ed9 | [] | no_license | AhasanulIslam/python_code | 0b906697f1ca8d3e4078c245951d3a74aedfb35c | f3e8047d883760ddca1827665fa6e901e7232a35 | refs/heads/main | 2023-02-05T21:33:31.374831 | 2020-12-29T00:56:31 | 2020-12-29T00:56:31 | 325,209,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | int_input = int(input())
for new in range(0, int_input):
sting_input = str(input())
length = len(sting_input)
if (length > 10):
print(f'{sting_input[0]}{length-2}{sting_input[length-1]}')
else:
print(sting_input)
| [
"[email protected]"
] | |
f8f82570e0f4443ac61f4cefdf713224bdfd4d23 | b2609138cde5297601025f6cf2a0ce4c247559e3 | /mysite/urls.py | 142222e4bc408dae921b00d2e73494098b5dc4d9 | [] | no_license | miltonArango/DjangoTutorial | 7593f007e367da5ed0c2fbd60af5cc62225a9b73 | aa21322d4d191a6bf485bbc237df42e01039f43e | refs/heads/master | 2021-01-10T06:39:39.294595 | 2015-11-01T02:10:33 | 2015-11-01T02:10:33 | 45,325,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'newsletter.views.home', name='home'),
url(r'^contact/$', 'newsletter.views.contact', name='contact'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
f760c53b3760bd59a2fee205e7f36427d7253daa | d59c2c70aa76331da774a73f29aac6817722dece | /commonfile.py | 5187725697bcd8a3c63fd9420d7e9adc6ed7c0b7 | [] | no_license | prashant799778/c | 88966bac643fa9e7d6d004c42f8a4fcf87265924 | 6bc8c4287e9cee2c18e634c3b3cffe1b0387c6ab | refs/heads/main | 2023-01-29T17:58:59.642222 | 2020-12-08T09:49:26 | 2020-12-08T09:49:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,917 | py |
from datetime import datetime,timedelta
import pytz
import json
import config
import hashlib
import random
import uuid
#current datetime time zone india
def CurrentDatetime():
ist = pytz.timezone('Asia/Kolkata')
ist_time = datetime.now(tz=ist)
ist_f_time = str(ist_time.strftime("%Y-%m-%d %H:%M:%S"))
return ist_f_time
def AddDaysInCurrentDate(noofdays):
ist = pytz.timezone('Asia/Kolkata')
ist_time = datetime.now(tz=ist)
ist_time = ist_time + timedelta(days=noofdays)
ist_f_time = str(ist_time.strftime("%Y-%m-%d %H:%M:%S"))
return ist_f_time
def Errormessage():
data = {"status":"false","message":"Somthing went wrong please contact system admin","result":""}
return data
def NoRecordFoundmessage():
data = {"status":"false","message":"No Record Found","result":[]}
return data
def IncorrectUserPasswordMsg():
data = {"status":"false","message":"Enter Correct UserID Password","result":""}
return data
def EmailAlreadyExistMsg():
data = {"status":"false","message":"Email Already Exists","result":""}
return data
def likeAlreadyExistMsg():
data = {"status":"false","message":"You have Already liked","result":""}
return data
def EventInterstAlreadyExistMsg():
data = {"status":"false","message":"You have Already Interested Event","result":""}
return data
def CountryAlreadyExistMsg():
data = {"status":"false","message":"Country Already Exists","result":""}
return data
def QualificationAlreadyExistMsg():
data = {"status":"false","message":"Qualification Already Exists","result":""}
return data
def UniversityAlreadyExistMsg():
data = {"status":"false","message":"UniversityAlready Exists","result":""}
return data
def EmailMobileAlreadyExistMsg():
data = {"status":"false","message":"Email Or MobileNo Already Exists","result":""}
return data
def postTitlepostDescriptionAlreadyExistMsg():
data = {"status":"false","message":"postTitle Or postDescription Already Exists","result":""}
return data
def aboutUsDescriptionAlreadyExistMsg():
data = {"status":"false","message":"aboutUs Description Already Exists","result":""}
return data
def MobileNoNotFound():
data = {"status":"false","message":"MobileNo Not Exists","result":""}
return data
def truemessage():
data = {"status":"true","message":"","result":""}
return data
def RecordExistMsg():
data = {"status":"false","message":"Record Already Exists","result":""}
return data
def InputKeyNotFoundMsg():
data = {"status":"false","message":"Input Keys Not Found","result":""}
return data
def ResponceWithInputresult(result):
data = {"status":"true","message":"","result":result}
return data
def ResponceWithInputmessage(msg,status):
data = {"status":status,"message":msg,"result":""}
return data
#success message for crud operation
def Successmessage(type):
if type == "insert":
output ="Record Inserted Successfully"
elif type == "update":
output ="Record Updated Successfully"
elif type == "delete":
output ="Record Deleted Successfully"
return output
def DecodeInputdata(data):
data = json.loads(data.decode("utf-8"))
return data
def CreateHashKey(FirstKey,SecoundKey):
# hash = hashlib.sha256()
# hash.update(('%s%s' % (FirstKey,SecoundKey)).encode('utf-8'))
Hashkey = uuid.uuid1()
return Hashkey
def createShareurl(FirstKey,SecoundKey):
inputtext = (FirstKey + SecoundKey)
hash = hashlib.sha256()
hash.update(inputtext.encode('utf-8'))
url = hash.hexdigest()
return url
def GetRandomNo():
RandomNo = str(random.randint(100000,999999))
return RandomNo
def MandatoryKeyMessage(KeyName):
data = {"status":"false","message": KeyName + " Not Found","result":""}
return data
def CheckKeyNameBlankValue(Keyvalue,inputdata):
for keyname in Keyvalue:
if keyname not in inputdata:
msg = MandatoryKeyMessage(keyname)
return msg
else:
if inputdata[keyname] == "":
msg = MandatoryKeyMessage(keyname)
return msg
return "1"
def CheckIfAnyOneExists(Keyvalue,inputdata):
for keyname in Keyvalue:
if keyname in inputdata and inputdata[keyname] != "":
return "1"
data = {"status":"false","message": "Enter Any One value in " + str(Keyvalue),"result":""}
return data
def Saveimage(file):
# filename = file.filename or ''
# filename = filename.replace("'","")
# #folder path to save campaign image
# FolderPath = ConstantData.GetCampaignImagePath(filename)
# filepath = '/CampImages/' + filename
# file.save(FolderPath)
# CampImagePath = filepath
return "1"
def EscapeSpecialChar(string):
newstr = string.translate(str.maketrans({"-":r"\-","]":r"\]","\\":r"\\","^":r"\^","$":r"\$","*":r"\*",".":r"\.","'":r"\'"}))
return newstr
def writeLog(apiName,data,flag):
try:
ist = pytz.timezone('Asia/Kolkata')
ist_time = datetime.now(tz=ist)
ist_f_time = ist_time.strftime("%Y-%m-%d %H:%M:%S")
data["time"] = str(ist_f_time)
data["api"] = str(apiName)
if flag == 0:
log = open("/var/www/medParliament/backend/med_parliament/request.log", "a")
elif flag == 1:
log = open("/var/www/medParliament/backend/med_parliament/response.log", "a")
log.write(str(data) + "\n")
log.close()
return 1
except Exception as e:
print("Error--->" + str(e))
return 0
| [
"[email protected]"
] | |
775e0a21e4da3bce7f0a14f7e13c3fe2481f27eb | d8c388015385f0f4a59f2873f5299c0038897e3f | /server/node_modules/dtrace-provider/build/config.gypi | b539aa8443265b0e44496d018964ffe94c0fe282 | [
"BSD-2-Clause"
] | permissive | MrJangoBox/PremoApp | ebf92557993785bed0dc424f2f5f69ee831ab634 | e879a3d188c1b35352d0c59b9dfb85844ead78b4 | refs/heads/master | 2020-04-06T06:58:10.361437 | 2016-09-04T06:28:41 | 2016-09-04T06:28:41 | 64,789,011 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,939 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"host_arch": "x64",
"icu_data_file": "icudt57l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt57l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "57",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_random_seed": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/MeMac/.node-gyp/6.2.2",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"dry_run": "",
"legacy_bundling": "",
"save_dev": "",
"browser": "",
"only": "",
"viewer": "man",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"maxsockets": "50",
"shell": "/bin/bash",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"if_present": "",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"global_style": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"access": "",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/MeMac/.npm-init.js",
"userconfig": "/Users/MeMac/.npmrc",
"node_version": "6.2.2",
"user": "501",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"progress": "true",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"tag_version_prefix": "v",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/MeMac/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/3.9.5 node/v6.2.2 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "0022",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/tmp",
"unsafe_perm": "",
"link": "",
"prefix": "/usr/local"
}
}
| [
"[email protected]"
] | |
f27bd70b1b95e95d8ddd07beb64a9a5fe2bb5ccf | 92f434e7dd0ead31ffda8a3388b3876c5bc6b1c4 | /combination_sum.py | d7c37a7156257643d1161db000bbff7192e494a8 | [] | no_license | annaymj/LeetCode | 8da53859816cd537dff40885f2c86aa4266be311 | 7694d0798fe55c69f350013b9329a5844c8c5e35 | refs/heads/master | 2021-10-01T20:31:34.627983 | 2021-09-20T03:22:03 | 2021-09-20T03:22:03 | 195,608,033 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | ```
Given an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the chosen numbers sum to target. You may return the combinations in any order.
The same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the frequency of at least one of the chosen numbers is different.
It is guaranteed that the number of unique combinations that sum up to target is less than 150 combinations for the given input.
Example 1:
Input: candidates = [2,3,6,7], target = 7
Output: [[2,2,3],[7]]
Explanation:
2 and 3 are candidates, and 2 + 2 + 3 = 7. Note that 2 can be used multiple times.
7 is a candidate, and 7 = 7.
These are the only two combinations.
```
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
results = []
self.dfs(target, [], 0, results, candidates)
return results
def dfs(self,remain, comb, start, results, candidates):
if remain == 0:
# make a deep copy of the current combination
results.append(list(comb))
return
elif remain < 0:
# exceed the scope, stop exploration.
return
for i in range(start, len(candidates)):
# add the number into the combination
comb.append(candidates[i])
# give the current number another chance, rather than moving on
self.dfs(remain - candidates[i], comb, i, results, candidates)
# backtrack, remove the number from the combination
comb.pop()
| [
"[email protected]"
] | |
1bcc6bd718a97a342700ea58fa2056cd7891915a | 732835ff027f10e326022108157ce7a371a6b5fe | /data/interp/interp_data_preprocessor.py | 2dc824ef91a1d77c208e925d59c788b17096568e | [] | no_license | NeedsMorePie/interpolator | a476fcac02667013037bf5389e17f8136e373144 | 494d503c729ba018614fc742f1aee1e48d37127e | refs/heads/master | 2020-03-16T20:18:33.615418 | 2018-09-04T21:43:44 | 2018-09-04T21:43:44 | 132,954,308 | 2 | 3 | null | 2018-09-01T19:01:31 | 2018-05-10T21:05:29 | Python | UTF-8 | Python | false | false | 8,493 | py | import multiprocessing
import os.path
import numpy as np
from common.utils.data import *
from data.interp.interp_data import InterpDataSet
from joblib import Parallel, delayed
class InterpDataPreprocessor:
def __init__(self, tf_record_directory, inbetween_locations, shard_size=1, validation_size=0, max_shot_len=10,
verbose=False):
"""
:param tf_record_directory: Str.
:param inbetween_locations: A list of lists. Each element specifies where inbetweens will be placed,
and each configuration will appear with uniform probability.
For example, let a single element in the list be [0, 1, 0].
With this, dataset elements will be sequences of 3 ordered frames,
where the middle (inbetween) frame is 2 frames away from the first and last frames.
The number of 1s must be the same for each list in this argument.
:param shard_size: Int.
:param validation_size: Int.
:param max_shot_len: Int.
:param verbose: Bool.
"""
self.tf_record_directory = tf_record_directory
self.inbetween_locations = inbetween_locations
self.shard_size = shard_size
self.validation_size = validation_size
self.max_shot_len = max_shot_len
self.verbose = verbose
def get_tf_record_dir(self):
return self.tf_record_directory
def preprocess_raw(self, raw_directory):
"""
Processes the data in raw_directory to the tf_record_directory.
:param raw_directory: The directory to the images to process.
:param validation_size: The TfRecords will be partitioned such that, if possible,
this number of validation sequences can be used for validation.
:param max_shot_len: Video shots larger than this value will be broken up.
"""
if self.verbose:
print('Checking directory for data.')
image_paths = self.get_data_paths(raw_directory)
self._convert_to_tf_record(image_paths, self.shard_size, self.validation_size, self.max_shot_len)
def get_data_paths(self, raw_directory):
"""
:param raw_directory: The directory to the images to process.
:return: List of list of image names, where image_paths[0][0] is the first image in the first video shot.
"""
raise NotImplementedError
def process_image(self, filename):
"""
Reads from and processes the file.
:param filename: String. Full path to the image file.
:return: bytes: The bytes that will be saved to the TFRecords.
Must be readable with tf.image.decode_image.
height: Height of the processed image.
width: Width of the processed image.
"""
raise NotImplementedError
def _convert_to_tf_record(self, image_paths, shard_size, validation_size, max_shot_len):
"""
:param image_paths: List of list of image names,
where image_paths[0][0] is the first image in the first video shot.
:return: Nothing.
"""
if not os.path.exists(self.tf_record_directory):
os.mkdir(self.tf_record_directory)
def _write(filename, iter_range, image_paths):
if self.verbose:
print('Writing', len(iter_range), 'data examples to the', filename, 'dataset.')
sharded_iter_ranges = create_shard_ranges(iter_range, shard_size)
Parallel(n_jobs=multiprocessing.cpu_count(), backend="threading")(
delayed(_write_shard)(shard_id, shard_range, image_paths, filename,
self.tf_record_directory, self.process_image, self.verbose)
for shard_id, shard_range in enumerate(sharded_iter_ranges)
)
image_paths = self._enforce_maximum_shot_len(image_paths, max_shot_len)
val_paths, train_paths = self._split_for_validation(image_paths, validation_size)
image_paths = val_paths + train_paths
train_start_idx = len(val_paths)
_write(InterpDataSet.VALIDATION_TF_RECORD_NAME, range(0, train_start_idx), image_paths)
_write(InterpDataSet.TRAIN_TF_RECORD_NAME, range(train_start_idx, len(image_paths)), image_paths)
def _enforce_maximum_shot_len(self, image_paths, max_shot_len):
"""
:param image_paths: List of list of image names,
where image_paths[0][0] is the first image in the first video shot.
:return: List in the same format as image_paths,
where len(return_value)[i] for all i <= max_shot_len.
"""
cur_len = len(image_paths)
i = 0
while i < cur_len:
if len(image_paths[i]) > max_shot_len:
part_1 = image_paths[i][:max_shot_len]
part_2 = image_paths[i][max_shot_len:]
image_paths = image_paths[:i] + [part_1] + [part_2] + image_paths[i+1:]
cur_len += 1
i += 1
return image_paths
def _split_for_validation(self, image_paths, validation_size):
"""
:param image_paths: List of list of image names,
where image_paths[0][0] is the first image in the first video shot.
:param validation_size: The split will guarantee that at there will be at least this many validation elements.
:return: (validation_image_paths, train_image_paths), where both have the same structure as image_paths.
"""
if validation_size == 0:
return [], image_paths
# Count the number of sequences that exist for a certain shot length.
max_len = 0
for spec in self.inbetween_locations:
max_len = max(2 + len(spec), max_len)
a = np.zeros(max_len + 1)
for spec in self.inbetween_locations:
a[2 + len(spec)] += 1
for i in range(1, len(a)):
a[i] += a[i-1]
# Find the split indices.
cur_samples = 0
split_indices = (len(image_paths)-1, len(image_paths[-1])-1)
for i in range(len(image_paths)):
for j in range(len(image_paths[i])):
cur_samples += a[min(j + 1, len(a) - 1)]
if cur_samples >= validation_size:
split_indices = (i, j)
break
if cur_samples >= validation_size:
break
i, j = split_indices
val_split = []
val_split += image_paths[:i]
if len(image_paths[i][:j+1]) > 0:
val_split.append(image_paths[i][:j+1])
train_split = []
if len(image_paths[i][j+1:]) > 0:
train_split.append(image_paths[i][j+1:])
train_split += image_paths[i+1:]
return val_split, train_split
def _write_shard(shard_id, shard_range, image_paths, filename, directory, processor_fn, verbose):
"""
:param shard_id: Index of the shard.
:param shard_range: Iteration range of the shard.
:param image_paths: List of list of image names.
:param filename: Base name of the output shard.
:param directory: Output directory.
:param processor_fn: Function to read and process from filename with before saving to TFRecords.
:return: Nothing.
"""
if verbose and len(shard_range) > 0:
print('Writing to shard', shard_id, 'data points', shard_range[0], 'to', shard_range[-1])
path = os.path.join(directory, str(shard_id) + '_' + filename)
writer = tf.python_io.TFRecordWriter(path)
for i in shard_range:
if len(image_paths[i]) <= 0:
continue
shot_raw = []
h = None
w = None
for image_path in image_paths[i]:
bytes, h, w = processor_fn(image_path)
shot_raw.append(bytes)
# Write to tf record.
example = tf.train.Example(
features=tf.train.Features(
feature={
InterpDataSet.SHOT_LEN: tf_int64_feature(len(shot_raw)),
InterpDataSet.SHOT: tf_bytes_list_feature(shot_raw),
InterpDataSet.HEIGHT: tf_int64_feature(h),
InterpDataSet.WIDTH: tf_int64_feature(w)
}))
writer.write(example.SerializeToString())
writer.close()
| [
"[email protected]"
] | |
e6eb6911400fa80166135a89111fe9226a54f964 | f24335fe2f38935e45017b40f5d032010a61f246 | /from_scratch/logistic_regression/init.py | f2b64f2aa7d6ff6052a5bf285df530d36c83add5 | [] | no_license | guruprasaad123/ml_for_life | 85a8bab14a7e3f8329e66ed0dba02eb11669205b | 04a47792d6b288e0c2deb28740827089ce7e97dd | refs/heads/master | 2022-11-29T11:49:41.461117 | 2021-03-23T17:27:22 | 2021-03-23T17:27:22 | 176,289,176 | 4 | 0 | null | 2022-11-21T21:30:32 | 2019-03-18T13:13:31 | Jupyter Notebook | UTF-8 | Python | false | false | 5,537 | py | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import gradient_descent as gd
import time
from collections import OrderedDict
def create_data():
data = OrderedDict(
amount_spent = [50, 10, 20, 5, 95, 70, 100, 200, 0],
send_discount = [0, 1, 1, 1, 0, 0, 0, 0, 1]
)
df = pd.DataFrame.from_dict(data) # creating a dataframe
arr = df.as_matrix() # converting into array
return arr # returning the python array
def get_data(path='data.csv', cols = ['dist_cycled','calories'], n_rows = 1000):
df = pd.read_csv(path) #Reads in the CSV file specified
df = df[cols] #Gets only the specified columns
df.fillna(0, inplace = True) #Replaces missing values with 0.
print('Loaded df of size %d'%(len(df)))
arr = df.as_matrix() #returns the dataframe as a python array.
print(arr[:2,:])
return arr
points = create_data()
def compute_total_error(m,b): #Computes total mean squared error
totalError = 0
for i in range(len(points)):
#x = points[i,0]
#y = points[i,1]
[x,y] = points[i]
totalError += (y - (m * x + b)) ** 2 #Error is calculated as y' = mx + b(Assuming linear regression) so E = (y-y')^2, summed over all points
return totalError/float(len(points)) #Returning the mean squared error.
def total_error(point_pair): #driver function for compute_total_error
return compute_total_error(point_pair[0], point_pair[1])
def compute_jacobian(point_pair, h = 1e-5): #computes the jacobian of the function total_error
n = len(point_pair)
jacobian = np.zeros(n) #initialize the jacobian matrix
for i in range(n):
x_i = np.zeros(n)
x_i[i] += h #add the limit value, any small value > 0 should do
jacobian[i] = (total_error(point_pair+x_i) - total_error(point_pair))/h #calculate derivative using first principle method f'(x) = lt(h->0) (f(x+h) - f(x))/h
print('Jacobian => ',jacobian[i])
return jacobian #return the jacobian for the pair of points
def compute_hessian(point_pair, h = 1e-5): #computes the hessian of the function total_error, it is found as the derivative of the jacobian
n = len(point_pair)
hessian = np.zeros((n,n)) #initialize the hessian matrix
for i in range(n):
x_i = np.zeros(n)
x_i[i] += h #add the limit value, any small value > 0 should do
hessian[i] = (compute_jacobian(point_pair+x_i) - compute_jacobian(point_pair))/h #calculate derivative using first principle method f'(x) = lt(h->0) (f(x+h) - f(x))/h
print('hessian =>',hessian[i])
return hessian #return the jacobian for the pair of points
def compute_newton(init_points, max_iter = 10000, e = 1e-5): #calculate roots of the equation, i.e. find x if f(x) = 0. In our case we want to find the minima point, so we find f'(x) = 0
point_pair_arr = np.zeros((max_iter, len(init_points))) #initalize m,b values
point_pair_arr[0] = init_points #start points
opt_val = None #optimal_value to return
for i in range(max_iter):
jacobian = compute_jacobian(point_pair_arr[i]) #calculate the jacobian at current m,b
hessian = compute_hessian(point_pair_arr[i]) #calculate the hessian at current m,b
point_pair_arr[i+1] = point_pair_arr[i] - np.dot(np.linalg.pinv(hessian), jacobian) #calulate the new m, new b using newton's equation x(t+1) = x(t) - f(x(t))/f'(x(t)) but we want to find root of f'(x) so we would do x(t+1) = x(t) - f'(x(t))/f''(x(t))
#pinv is pseudo inverse, it prevents values like 1/0 and replaces it with a very high value.
print('New m is %.2f and new b is %.2f'%(point_pair_arr[i,0], point_pair_arr[i,1]))
opt_val = point_pair_arr[i+1]
if np.abs(total_error(point_pair_arr[i+1]) - total_error(point_pair_arr[i])) < e: #used for early stopping, stops when there is no real improvement.
print('Optimal m is %.2f and Optimal b is %.2f'%(point_pair_arr[i+1,0], point_pair_arr[i+1,1]))
break
return opt_val
def plot_line_data(m, b): #Plots the calculated line from m and b
X = points[:,0]
Y = points[:,1]
plt.plot(X, Y, 'bo') #First plots the data points
plt.plot(X, m * X + b) #Plot the line.
plt.axis([0,1.5* max(X), 0, 1.3 * max(Y)]) #Set the axes range.
plt.title("Best line.")
plt.text(10, 130, "m="+str(round(m,4))+" b="+str(round(b,4)) + " error="+str(compute_total_error(m,b)))
plt.show() #shows the graph.
return
def main(): #main driver function
init_points = np.array([0.0,1.0]) #intial points
print("2nd order optimization starts at "+ str(time.asctime())) #start time
time_t = time.time() #start time
newton_points = compute_newton(init_points, max_iter = 100) #find the solution
print(newton_points)
print("b = {0}, m = {1}, error = {2}".format(newton_points[1], newton_points[0], compute_total_error(newton_points[0], newton_points[1])))
time_t = time.time() - time_t #end time
print("2nd order optimization ends at %s and has taken %dms"%(str(time.asctime()), time_t))
plot_line_data(newton_points[0], newton_points[1]) #plot the line generated
print("1st order optimization starts at "+ str(time.asctime())) #start time
time_t = time.time()
m,b = gd.run()
time_t = time.time() - time_t #end time
print("1st order optimization ends at %s and has taken %dms"%(str(time.asctime()), time_t))
plot_line_data(m, b) #plot the generated line
return
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
ddd00e589ec924d8b2bce0b19db3a2e20f929e9d | 2e89cc44dae7b8425a3b969094729fb174bc1d3a | /rlpyt/ul/models/rl/sac_rl_models.py | d36350e379ba5b98097d15fb88e0f1bc8c3defec | [
"MIT"
] | permissive | ZiwenZhuang/rlpyt | 161d9d0f6d99f7a6883ff33dafeb4d3f245821ce | 95a05a86f576190cf6217fd9aad7b5f615ee97d1 | refs/heads/master | 2021-08-10T12:17:11.325561 | 2021-07-01T05:04:39 | 2021-07-01T05:04:39 | 215,345,715 | 1 | 0 | MIT | 2019-10-15T16:27:50 | 2019-10-15T16:27:49 | null | UTF-8 | Python | false | false | 6,426 | py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlpyt.models.conv2d import Conv2dModel
from rlpyt.models.mlp import MlpModel
from rlpyt.utils.tensor import infer_leading_dims, restore_leading_dims
def weight_init(m):
"""Custom weight init for Conv2D and Linear layers."""
if isinstance(m, nn.Linear):
nn.init.orthogonal_(m.weight.data)
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
# delta-orthogonal init from https://arxiv.org/pdf/1806.05393.pdf
assert m.weight.size(2) == m.weight.size(3)
m.weight.data.fill_(0.0)
m.bias.data.fill_(0.0)
mid = m.weight.size(2) // 2
gain = nn.init.calculate_gain('relu')
nn.init.orthogonal_(m.weight.data[:, :, mid, mid], gain)
class SacModel(nn.Module):
"""To keep the standard agent.model interface for shared params, etc."""
def __init__(self, conv, pi_fc1, pi_mlp):
super().__init__()
self.conv = conv
self.pi_fc1 = pi_fc1
self.pi_mlp = pi_mlp
def forward(self, observation, prev_action, prev_reward):
"""Just to keep the standard obs, prev_action, prev_rew interface."""
conv = self.conv(observation)
latent = self.pi_fc1(conv)
mu, log_std = self.pi_mlp(latent, prev_action, prev_reward)
return mu, log_std, latent, conv
class SacConvModel(nn.Module):
def __init__(
self,
image_shape,
channels=None,
kernel_sizes=None,
strides=None,
paddings=None,
final_nonlinearity=True,
):
super().__init__()
c, h, w = image_shape
self.conv = Conv2dModel(
in_channels=c,
channels=channels or [32, 32, 32, 32],
kernel_sizes=kernel_sizes or [3, 3, 3, 3],
strides=strides or [2, 1, 1, 1],
paddings=paddings,
final_nonlinearity=final_nonlinearity,
)
self._output_shape = self.conv.conv_out_shape(h=h, w=w, c=c)
self._output_size = self.conv.conv_out_size(h=h, w=w, c=c)
def forward(self, observation):
if observation.dtype == torch.uint8:
img = observation.type(torch.float)
img = img.mul_(1. / 255)
else:
img = observation
lead_dim, T, B, img_shape = infer_leading_dims(img, 3)
conv = self.conv(img.view(T * B, *img_shape))
conv = restore_leading_dims(conv, lead_dim, T, B)
return conv
@property
def output_shape(self):
return self._output_shape
@property
def output_size(self):
return self._output_size
class SacFc1Model(nn.Module):
def __init__(
self,
input_size,
latent_size,
layer_norm=True,
):
super().__init__()
self.linear = nn.Linear(input_size, latent_size)
self.layer_norm = nn.LayerNorm(latent_size) if layer_norm else None
self._output_size = latent_size
def forward(self, conv_out):
if conv_out.dtype == torch.uint8: # Testing NoConv model
conv_out = conv_out.type(torch.float)
conv_out = conv_out.mul_(1. / 255)
lead_dim, T, B, _ = infer_leading_dims(conv_out, 3)
conv_out = F.relu(conv_out.view(T * B, -1)) # bc conv_out might be pre-activation
latent = self.linear(conv_out)
if self.layer_norm is not None:
latent = self.layer_norm(latent)
latent = restore_leading_dims(latent, lead_dim, T, B)
return latent
@property
def output_size(self):
return self._output_size
class SacActorModel(nn.Module):
def __init__(
self,
input_size,
action_size,
hidden_sizes,
min_log_std=-10.,
max_log_std=2.,
):
super().__init__()
self.mlp = MlpModel(
input_size=input_size,
hidden_sizes=hidden_sizes,
output_size=action_size * 2,
)
self.apply(weight_init)
self.min_log_std = min_log_std
self.max_log_std = max_log_std
def forward(self, latent, prev_action=None, prev_reward=None):
lead_dim, T, B, _ = infer_leading_dims(latent, 1) # latent is vector
out = self.mlp(latent.view(T * B, -1))
mu, log_std = out.chunk(chunks=2, dim=-1)
# Squash log_std into range.
log_std = torch.tanh(log_std)
log_std = self.min_log_std + 0.5 * (
self.max_log_std - self.min_log_std) * (1 + log_std)
mu, log_std = restore_leading_dims((mu, log_std), lead_dim, T, B)
return mu, log_std
class SacCriticModel(nn.Module):
def __init__(
self,
input_size,
action_size,
hidden_sizes,
):
super().__init__()
self.mlp1 = MlpModel(
input_size=input_size + action_size,
hidden_sizes=hidden_sizes,
output_size=1,
)
self.mlp2 = MlpModel(
input_size=input_size + action_size,
hidden_sizes=hidden_sizes,
output_size=1,
)
self.apply(weight_init)
def forward(self, latent, action, prev_action=None, prev_reward=None):
lead_dim, T, B, _ = infer_leading_dims(latent, 1) # latent is vector
q_input = torch.cat([
latent.view(T * B, -1),
action.view(T * B, -1),
], dim=1)
q1 = self.mlp1(q_input).squeeze(-1)
q2 = self.mlp2(q_input).squeeze(-1)
q1, q2 = restore_leading_dims((q1, q2), lead_dim, T, B)
return q1, q2
class SacNoConvModel(nn.Module):
"""To keep the standard agent.model interface for shared params, etc.
RESULT: yeah this didn't work in most envs, except a bit in walker.
"""
def __init__(self, pi_fc1, pi_mlp):
super().__init__()
# self.conv = conv
self.pi_fc1 = pi_fc1
self.pi_mlp = pi_mlp
def forward(self, observation, prev_action, prev_reward):
"""Just to keep the standard obs, prev_action, prev_rew interface."""
# conv = self.conv(observation)
conv = observation
latent = self.pi_fc1(conv)
mu, log_std = self.pi_mlp(latent, prev_action, prev_reward)
return mu, log_std, latent
| [
"[email protected]"
] | |
d834bca54d6d57c07bac682035f245028c3acca3 | 6e0dc5501f7295690432125605a3db6dee3220fd | /hc_django/project/models.py | 2a59b564e6ac4af4f612b385f99eca01f494e901 | [
"Apache-2.0"
] | permissive | HuangeHei/hcwy | efd49296c4c87e2c85b76d5a7ca44ca1d55c2407 | 21b78f102a7c4dbcd7e4522e5074073429f86b93 | refs/heads/master | 2020-03-06T22:07:32.020788 | 2018-04-03T09:24:19 | 2018-04-03T09:24:19 | 127,095,540 | 0 | 0 | null | 2018-03-28T07:15:01 | 2018-03-28T06:39:11 | Python | UTF-8 | Python | false | false | 501 | py | from django.db import models
# Create your models here.
class ProjectType(models.Model):
type_name = models.CharField(max_length=1024) # Type Name
type_desc = models.CharField(max_length=1024) # Type 简介
def __str__(self):
return self.type_name
class Projects(models.Model):
project_name = models.CharField(max_length=1024) # 项目名称
project_type = models.ForeignKey(ProjectType) # 项目类型
def __str__(self):
return self.project_name
| [
"[email protected]"
] | |
bc3047e4e9af54346d95aa12cf6ce060c4f27b6d | 0ee967f58a52b473366d200748b526d10b946cd6 | /sopy/migrations/a7cdfb24f1_wiki_redirect.py | fed9fe0fae0d0587126ea188941d60deddfebb03 | [
"BSD-3-Clause"
] | permissive | AlexFrazer/sopython-site | 22374ebcb8f802877bbee058baf4ac4252166645 | 4ede64cf6d04def596be13feeaa4d84ce8503ef3 | refs/heads/master | 2021-01-17T19:13:09.786186 | 2016-08-18T17:03:37 | 2016-08-18T17:03:37 | 22,970,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | """wiki redirect
Revision ID: a7cdfb24f1
Revises: 54a3343a03b
Create Date: 2015-05-16 09:43:45.455535
"""
from alembic import op
import sqlalchemy as sa
revision = 'a7cdfb24f1'
down_revision = '54a3343a03b'
branch_labels = ()
depends_on = None
def upgrade():
op.add_column('wiki_page', sa.Column('redirect_id', sa.Integer(), nullable=True))
op.create_unique_constraint(op.f('uq_wiki_page_title'), 'wiki_page', ['title'])
op.create_foreign_key(op.f('fk_wiki_page_redirect_id_wiki_page'), 'wiki_page', 'wiki_page', ['redirect_id'], ['id'])
def downgrade():
op.drop_constraint(op.f('fk_wiki_page_redirect_id_wiki_page'), 'wiki_page', type_='foreignkey')
op.drop_constraint(op.f('uq_wiki_page_title'), 'wiki_page', type_='unique')
op.drop_column('wiki_page', 'redirect_id')
| [
"[email protected]"
] | |
09f6cbedee6846b56a8cde264b31ac9a2339f362 | a0187b956995a3089905154e657fc72b9c26253e | /manage.py | 66ff5b32e8e6a215a663a5cba60c41b76d45b7cf | [
"MIT"
] | permissive | elleryq/site_framework_demo | ac21d29195dc4ddbf2a593de9963cea6d99920a2 | cb58860b01c34a2d020a8c8fa416d30feae3d277 | refs/heads/master | 2022-12-03T01:12:30.749440 | 2020-08-19T03:49:23 | 2020-08-19T03:49:23 | 288,624,070 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'site_framework_demo.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4532f799a2f525f142defee87eccd2d03128772e | acffeddbf87a0599352953646315ebdf970df927 | /posts/migrations/0008_auto_20200715_1347.py | f9ac2c17b81736bf87602dbe62819a51cafbceae | [] | no_license | lelematos/django-simple-commerce-model | a69d345b978714ad0c1a168ead227350b7262467 | b018e79692c80ace0c3cb99f9c5efe3e540e3853 | refs/heads/master | 2022-12-13T02:34:10.904803 | 2020-09-09T13:34:33 | 2020-09-09T13:34:33 | 279,939,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | # Generated by Django 3.0.8 on 2020-07-15 16:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0007_auto_20200714_1720'),
]
operations = [
migrations.AlterField(
model_name='item',
name='tamanhos',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| [
"[email protected]"
] | |
d84086205a421a8b6e2dd784d8e6b03134fc95b8 | 60ab7236a66115475e32e630bdafe07174b07306 | /tests/__mocks__/pika.py | c2fa89a7cf8b758d79b78784e5b66591a16c356b | [] | no_license | KJBose/Tools-RmqPy | b8d69fefc78a4eb837a9ab9159bb5de32ad071ba | 3444a44d6509bc262f79cc40aca5789ab16e681f | refs/heads/master | 2023-04-09T06:53:22.509060 | 2021-04-19T17:42:23 | 2021-04-19T17:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | class Channel:
def __init__(self): pass
def exchange_declare(self): pass
def queue_declare(self, queue, durable): pass
def queue_bind(self, queue, exchange): pass
def basic_qos(self, prefetch_count): pass
def basic_consume(self, queue, on_message_callback): pass
def start_consuming(self): pass
def basic_publish(self): pass
def basic_ac(self): pass
class Connection:
def __init__(self): pass
def channel(self): return Channel()
| [
"[email protected]"
] | |
dfd76324264cbc26e6c3808485bbb31e31d13615 | 5360372275da84dad50f4743eaed4d5cba2a3b4c | /bag/views.py | 736053d67e59a4941fd17c9847edb252317ae3d4 | [] | no_license | JihanePett/jbonjovi | ebcc6c6bb1837fb9d294a0ab35e71e8008f2b5a9 | bb70e42f0b166eee8bb6afbe2502f66ca81dca24 | refs/heads/master | 2023-02-28T00:42:39.838778 | 2021-02-05T11:12:09 | 2021-02-05T11:12:09 | 331,971,738 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,279 | py | from django.shortcuts import render, redirect,\
reverse, HttpResponse, get_object_or_404
from django.contrib import messages
from products.models import Product
# Create your views here.
def view_bag(request):
""" A view that renders the shopping bag contents page """
return render(request, 'bag/bag.html')
def add_to_bag(request, item_id):
""" Add a quantity of the specified product to the shopping bag """
product = Product.objects.get(pk=item_id)
quantity = int(request.POST.get('quantity'))
redirect_url = request.POST.get('redirect_url')
size = None
if 'product_size' in request.POST:
size = request.POST['product_size']
bag = request.session.get('bag', {})
if size:
if item_id in list(bag.keys()):
if size in bag[item_id]['items_by_size'].keys():
bag[item_id]['items_by_size'][size] += quantity
messages.success(request, f'Updated size {size.upper()}\
{product.name} quantity to\
{bag[item_id]["items_by_size"][size]}')
else:
bag[item_id]['items_by_size'][size] = quantity
messages.success(request, f'Added size {size.upper()}\
{product.name} to your bag')
else:
bag[item_id] = {'items_by_size': {size: quantity}}
messages.success(request, f'The size {size.upper()}\
{product.name} to your bag')
else:
if item_id in list(bag.keys()):
bag[item_id] += quantity
messages.success(request, f'New {product.name}\
quantity to {bag[item_id]}')
else:
bag[item_id] = quantity
messages.success(request, f'You have added {product.name}\
to your bag')
request.session['bag'] = bag
return redirect(redirect_url)
def adjust_bag(request, item_id):
"""adjusts the quantity and amount of products when adjusted"""
product = Product.objects.get(pk=item_id)
quantity = int(request.POST.get('quantity'))
size = None
if 'product_size' in request.POST:
size = request.POST['product_size']
bag = request.session.get('bag', {})
if size:
if quantity > 0:
bag[item_id]['items_by_size'][size] = quantity
messages.success(request,
f'Updated size {size.upper()} {product.name}\
quantity to\
{bag[item_id]["items_by_size"][size]}')
else:
del bag[item_id]['items_by_size'][size]
if not bag[item_id]['items_by_size']:
bag.pop(item_id)
messages.success(request,
f'Removed size {size.upper()}\
{product.name} from your bag')
else:
if quantity > 0:
bag[item_id] = quantity
messages.success(request,
f'Updated {product.name}\
quantity to {bag[item_id]}')
else:
bag.pop(item_id)
messages.success(request, f'Removed {product.name} from your bag')
request.session['bag'] = bag
return redirect(reverse('view_bag'))
def remove_from_bag(request, item_id):
"""Remove an item from the bag"""
try:
product = get_object_or_404(Product, pk=item_id)
size = None
if 'product_size' in request.POST:
size = request.POST['product_size']
bag = request.session.get('bag', {})
if size:
del bag[item_id]['items_by_size'][size]
if not bag[item_id]['items_by_size']:
bag.pop(item_id)
messages.success(request,
f'Removed size {size.upper()} {product.name} from your bag')
else:
bag.pop(item_id)
messages.success(request, f'Removed {product.name} from your bag')
request.session['bag'] = bag
return HttpResponse(status=200)
except Exception as e:
messages.error(request, f'Error removing item: {e}')
return HttpResponse(status=500)
| [
"[email protected]"
] | |
d332bb4c6d7967b7712450c34ae4edf3bc54394d | 80854c2ae9665e41d432a411788e10fd25c62e42 | /advent4.py | bc7242ace08a0d26f0a23663aa7eacb99bb00407 | [] | no_license | Lammmas/aoc2019 | f61589da634b0efb60e2ea45297880b0648e3e64 | d18789b95eff92b00c143ec498a2da57461f6146 | refs/heads/master | 2020-11-24T23:38:15.117779 | 2019-12-16T17:46:16 | 2019-12-16T17:46:16 | 228,391,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | start = 153517
end = 630395
count = 0
for c in range(start, end + 1):
code = [ int(e) for e in str(c) ]
decr = False
same = False
prev = -1
for n in code:
if n == prev:
same = True
elif n < prev:
decr = True
break
prev = n
hasPair = False
if same:
numbers = {}
for n in code:
if str(n) in numbers:
numbers[str(n)] += 1
else:
numbers[str(n)] = 1
for n, v in numbers.items():
if v == 2:
hasPair = True
if not decr and same and hasPair:
count += 1
print(count)
| [
"[email protected]"
] | |
f9f6cfbe20450566ba49576cc6d511634b8e84b8 | 559f9baf0143e5c0aec8cc4050371c1276b3347e | /code/annt.py | 7213b77a636e46638457d6425222c96d871ce18f | [] | no_license | jkfurtney/dotfiles | 1b8e6f494eb48e95ed3cb3add43998df1c0efecb | 3e81d7291684c019f65ac0b80214afc0578ddf3e | refs/heads/master | 2023-05-25T08:25:56.024770 | 2023-05-22T17:47:00 | 2023-05-22T17:47:00 | 68,296,776 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import time
import pyttsx
engine = pyttsx.init()
def say(data):
engine.say(data)
engine.runAndWait()
numbers=['zero','one','two','three','four', 'five','six','seven','eight','nine','ten','eleven','twelve']
def say_time():
s = time.localtime()
h, m = s.tm_hour, s.tm_min
if m == 30:
suffix = " o clock "
else:
suffix = " thirty "
announcement = " It is {hour} {suffix}".format(hour=str(h), suffix=suffix)
say(announcement)
import schedule
schedule.every().hour.at(":00").do(say_time)
schedule.every().hour.at(":30").do(say_time)
while True:
schedule.run_pending()
time.sleep(5)
| [
"[email protected]"
] | |
ca7e3822837e1022c25e71a04a08ef872eb2c937 | 5a0260b3792d479722cfe396347babbf45157cf9 | /assert4.py | 5d5b659650264b1f5851ad0b8709fbc6f700d3d8 | [] | no_license | paddumelanahalli/agile-programming | 9167807afd8c83653d4eb79c9931fcaac859403b | afb7d6b0441c24dff30523096b995c2046fa550f | refs/heads/master | 2023-03-09T19:54:36.372513 | 2023-03-03T04:52:28 | 2023-03-03T04:52:28 | 169,643,104 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 304 | py | # initializing number
a = 4
b = 0
# using assert to check for 0
print("The value of a / b is : ")
assert b != 0, "Divide by 0 error"
print(a / b)
# Test case 2
x = "hello"
#if condition returns False, AssertionError is raised:
assert x == "goodbye", "x should be 'hello'"
# Test case 3
x = "hello"
| [
"[email protected]"
] | |
b5a7b3986df24c714e294140a514a6b2c6d588d8 | fb8415ac14b1a75089ca145e4367f74e8dd7977e | /ODE.py | c270423adb35e5e24e6333425b284a6798f49bb1 | [] | no_license | yuuch/ode-homework | c816dd52addfde613a17b1f685ea30833e69c6b2 | 56e9febc2c2a3d4d2cf512bdcd58a93c8dd8734e | refs/heads/master | 2020-03-25T00:10:39.346584 | 2018-11-02T08:37:46 | 2018-11-02T08:37:46 | 143,172,706 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,358 | py | import numpy as np
import matplotlib
import matplotlib.pyplot as plt
def explicit_euler_method(u_0=1 ,v_0=1,n=10000):
# initial conditions
t_interval=[0,100]
mat_of_coe=[[98,198],[-99,-199]]# matrix of coefficients
u_value_list=[u_0]
v_value_list=[v_0]
h = (t_interval[1]-t_interval[0])*1.0/n
for i in range(n):
#u_n+1 = (98*u_n+198*v_n) *h +u_n
u_new = u_value_list[i] + (mat_of_coe[0][0]*u_value_list[i] + mat_of_coe[0][1] * \
v_value_list[i] )*h
u_value_list.append(u_new)
v_new = v_value_list[i] + (mat_of_coe[1][0]*u_value_list[i] + mat_of_coe[1][1] * \
v_value_list[i] )*h
v_value_list.append(v_new)
return u_value_list,v_value_list
def plot_lines(y1, y2, fig_name='myfig.svg',n=10000):
#matplotlib.use('agg')
x=[100.0/n*i for i in range(n+1)]
assert len(y1)==len(x)
plt.plot(x,y1,label='exact solution')
plt.plot(x,y2,label='numerical solution')
plt.title(fig_name[:-4])
plt.legend()
plt.savefig(fig_name,format='svg')
def exact_values(n=10000):
t = np.array([100.0/n*i for i in range(n+1)])
u_t = -3*np.exp(-100*t) + 4*np.exp(-1*t)
v_t = 3*np.exp(-100*t) - 2*np.exp(-1*t)
return u_t,v_t
def implicit_euler_method(u_0=1,v_0=1,n=10000):
t_interval=[0,100]
mat_of_coe=[[98,198],[-99,-199]]# matrix of coefficients
u_value_list=[u_0]
v_value_list=[v_0]
h = (t_interval[1]-t_interval[0])*1.0/n
for i in range(n):
u_new = u_value_list[i]*(1+199*h)+198*h*v_value_list[i]
u_new = u_new*(1.0/((1-98*h)*(1+199*h)+198*h*99*h))
u_value_list.append(u_new)
v_new = v_value_list[i]
v_new = 99*h*u_value_list[i]-(1-98*h)*v_value_list[i]
v_new = v_new*(1.0/((-198*h*99*h)-(1-98*h)*(1+199*h)))
v_value_list.append(v_new)
return u_value_list,v_value_list
def implicit_midpoint_method(u_0=1,v_0=1,n=10000):
t_interval=[0,100]
mat_of_coe=[[98,198],[-99,-199]]# matrix of coefficients
u_value_list=[u_0]
v_value_list=[v_0]
h = (t_interval[1]-t_interval[0])*1.0/n
dace = (1-98*h/2)*(1+199*h/2)-(-99*h/2)*(198*h/2) # it is just a constant
for i in range(n):
u_new = ((1+199.0*h/2)*(1+98.0*h/2)+198.0*h/2*(-99*h/2))*u_value_list[i]
u_new = u_new + 198*v_value_list[i]
u_new = u_new/dace
u_value_list.append(u_new)
v_new = (-99*h)*u_value_list[i]+((-99*h/2)*(99*h)+(1-49*h)*(1-199*h/2))*v_value_list[i]
v_new = v_new/dace
v_value_list.append(v_new)
return u_value_list,v_value_list
if __name__=="__main__":
n=10000
v_list,u_list = explicit_euler_method(n=n)
real_u ,real_v = exact_values(n=n)
imp_eur_u,imp_eur_v =implicit_euler_method(n=n)
imp_mid_u,imp_mid_v = implicit_midpoint_method(n=n)
#plot_lines(real_v,v_list,fig_name='u_lines.svg',n=n)
#plot_lines(real_u,u_list,fig_name='v_lines.svg')
plot_lines(real_v,imp_mid_v,fig_name='implicit_midpoint_v_lines.svg')
'''
class ODEs(object):
def __init__(self,matrix,u_0,v_0,n):
self.matrix = matrix
self.exact_resolution = {'u':[u_0],'v':[v_0]}
self.t_interval=[0,100]
self.n = n
self.h = (self.t_interval[1]-self.t_interval[0])*1.0/n
self.t = np.arange(n+1)*self.h
'''
| [
"[email protected]"
] | |
62ab7f2cf3236ecacf9734a1e433a4e4c6fc26ba | 20a0bd0a9675f52d4cbd100ee52f0f639fb552ef | /transit_odp/timetables/transxchange.py | 5131e1145b84322f07c74c7182aed99a52639682 | [] | no_license | yx20och/bods | 2f7d70057ee9f21565df106ef28dc2c4687dfdc9 | 4e147829500a85dd1822e94a375f24e304f67a98 | refs/heads/main | 2023-08-02T21:23:06.066134 | 2021-10-06T16:49:43 | 2021-10-06T16:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,067 | py | import logging
import zipfile
from collections.abc import Iterator
from pathlib import Path
from typing import List, Optional
from lxml import etree
from pydantic import BaseModel
from transit_odp.common.xmlelements import XMLElement
from transit_odp.common.xmlelements.exceptions import NoElement
from transit_odp.validate import ZippedValidator
from .constants import (
TRANSXCAHNGE_NAMESPACE,
TRANSXCHANGE_NAMESPACE_PREFIX,
TXC_21,
TXC_24,
)
from .utils import get_transxchange_schema
logger = logging.getLogger(__name__)
GRID_LOCATION = "Grid"
WSG84_LOCATION = "WGS84"
PRINCIPAL_TIMING_POINTS = ["PTP", "principalTimingPoint"]
class TXCSchemaViolation(BaseModel):
filename: str
line: int
details: str
@classmethod
def from_error(cls, error):
filename = Path(error.filename).name
return cls(filename=filename, line=error.line, details=error.message)
class TransXChangeElement(XMLElement):
"""A wrapper class to easily work lxml elements for TransXChange XML.
This adds the TransXChange namespaces to the XMLElement class.
The TransXChangeDocument tree is traversed using the following general
principle. Child elements are accessed via properties, e.g.
Service elements are document.services.
If you expect a bultin type to be returned this will generally
be a getter method e.g. documents.get_scheduled_stop_points_ids()
since this returns a list of strings.
Args:
root (etree._Element): the root of an lxml _Element.
Example:
# Traverse the tree
>>> tree = etree.parse(netexfile)
>>> trans = TransXChangeDocument(tree.getroot())
>>> trans.get_element("PublicationTimestamp")
PublicationTimestamp(text='2119-06-22T13:51:43.044Z')
>>> trans.get_elements(["dataObjects", "CompositeFrame"])
[CompositeFrame(...), CompositeFrame(...)]
>>> trans.get_elements(["dataObjects", "CompositeFrame", "Name"])
[Name(...), Name(...)
# Element attributes are accessed like dict values
>>> trans["version"]
'1.1'
"""
namespaces = {TRANSXCHANGE_NAMESPACE_PREFIX: TRANSXCAHNGE_NAMESPACE}
def _make_xpath(self, xpath):
if isinstance(xpath, (list, tuple)):
xpath = [TRANSXCHANGE_NAMESPACE_PREFIX + ":" + path for path in xpath]
else:
xpath = TRANSXCHANGE_NAMESPACE_PREFIX + ":" + xpath
return super()._make_xpath(xpath)
class TransXChangeDocument:
""" A class for handling and validating TransXChange XML Documents."""
def __init__(self, source):
"""Initialise class.
Args:
source (path|file|url): Something that can parsed by `lxml.etree.parse`.
"""
if hasattr(source, "seek"):
source.seek(0)
self.source = source
self.name = getattr(source, "name", source)
self._tree = etree.parse(self.source)
self._root = TransXChangeElement(self._tree.getroot())
def __repr__(self):
class_name = self.__class__.__name__
return f"{class_name}(source={self.name!r})"
def __getattr__(self, attr):
try:
return getattr(self._root, attr)
except AttributeError:
msg = f"{self.__class__.__name__!r} has no attribute {attr!r}"
raise AttributeError(msg)
def get_transxchange_version(self):
"""Get the TransXChangeDocuments schema version."""
return self._root["SchemaVersion"]
def get_location_system(self):
"""Gets the location system used by the TxC file.
Returns:
str or None: If LocationSystem exists return text, else return None.
"""
element = self._root.get_element_or_none("LocationSystem")
if element:
return element.text
if self.has_latitude():
return WSG84_LOCATION
return GRID_LOCATION
def get_creation_date_time(self):
"""Gets the CreationDateTime attribute from TxC file.
Returns:
str or None: If CreationDateTime exists return str, else return None.
"""
return self._root["CreationDateTime"]
def get_modifitication_date_time(self):
"""Gets the ModificationDateTime attribute from TxC file.
Returns:
str or None: If ModificationDateTime exists return str, else return None.
"""
return self._root["ModificationDateTime"]
def get_revision_number(self) -> str:
"""Gets the RevisionNumber attribute from a TxC file.
Returns:
str: Returns the value in RevisionNumber.
"""
return self._root["RevisionNumber"]
def get_file_name(self) -> str:
"""
Gets the FileName attribute from a TxC file.
Returns:
str: Returns the value in FileName.
"""
return self._root.get("FileName", "")
def get_modification(self) -> str:
"""
Gets the Modification attribute from a TxC file.
Returns:
str: Returns the value in Modification.
"""
return self._root["Modification"]
def get_services(self):
"""Get all the Service elements in the TransXChangeDocument.
Returns:
List[TransXChangeElement]: A list of TransXChangeElement Service elements.
"""
xpath = ["Services", "Service"]
return self.find_anywhere(xpath)
def get_service_codes(self):
xpath = ["Services", "Service", "ServiceCode"]
return self.find_anywhere(xpath)
def get_all_line_names(self):
"""Get the text of all the LineName elements in the TransXChangeDocument.
Returns:
List[str]: A list of the line names.
"""
xpath = ["Services", "Service", "Lines", "Line", "LineName"]
return [name.text for name in self.find_anywhere(xpath)]
def get_annotated_stop_point_refs(self):
"""Get all the AnnotatedStopPointRef elements in the TransXChangeDocument.
Returns:
List[TransXChangeElement]: A list of TransXChangeElement
AnnotatedStopPointRef elements.
"""
xpath = ["StopPoints", "AnnotatedStopPointRef"]
return self.find_anywhere(xpath)
def get_stop_points(self):
"""Get all the StopPoint elements in the TransXChangeDocument.
Returns:
List[TransXChangeElement]: A list of TransXChangeElement StopPoint elements.
"""
xpath = ["StopPoints", "StopPoint"]
return self.find_anywhere(xpath)
def has_latitude(self):
"""Check if the first stop point contains a latitude element.
Returns:
bool: If StopPoint < Place < Location has a Latitude element return True
else False.
"""
xpath = ["StopPoints", "StopPoint", "Place", "Location"]
locations = self.find_anywhere(xpath)
if len(locations) == 0:
return False
try:
locations[0].get_elements("Latitude")
return True
except NoElement:
return False
def get_journey_pattern_sections(self):
"""Get all the JourneyPatternSection elements in the TransXChangeDocument.
Returns:
List[TransXChangeElement]: A list of TransXChangeElement
JourneyPatternSection elements.
"""
xpath = ["JourneyPatternSections", "JourneyPatternSection"]
return self._root.get_elements(xpath)
def get_operators(self):
xpath = ["Operators", "Operator"]
return self.find_anywhere(xpath)
def get_licensed_operators(self):
xpath = ["Operators", "LicensedOperator"]
return self.find_anywhere(xpath)
def get_nocs(self) -> List[str]:
xpath = "NationalOperatorCode"
return [noc.text for noc in self.find_anywhere(xpath)]
def get_principal_timing_points(self) -> List[TransXChangeElement]:
xpath = "TimingStatus"
return [
s for s in self.find_anywhere(xpath) if s.text in PRINCIPAL_TIMING_POINTS
]
def get_operating_period_start_date(self) -> Optional[TransXChangeElement]:
xpath = ["Services", "Service", "OperatingPeriod", "StartDate"]
return self.find_anywhere(xpath)
def get_operating_period_end_date(self) -> Optional[TransXChangeElement]:
xpath = ["Services", "Service", "OperatingPeriod", "EndDate"]
return self.find_anywhere(xpath)
def get_public_use(self) -> Optional[TransXChangeElement]:
xpath = ["Services", "Service", "PublicUse"]
return self.find_anywhere(xpath)
class TransXChangeZip(ZippedValidator):
"""A class for working with a zip file containing transxchange files."""
def __init__(self, source):
if not hasattr(source, "seek"):
f_ = open(source, "rb")
else:
f_ = source
super().__init__(f_)
self._schema_21 = None
self._schema_24 = None
self.docs = []
def _get_schema(self, version):
"""Get an lxml schema for a specific TxC version.
Args:
version (str): TxC version string, either '2.1' or '2.4'.
Returns:
TxC schema as an lxml schema object
"""
if TXC_21 == version:
if self._schema_21 is None:
self._schema_21 = get_transxchange_schema(TXC_21)
return self._schema_21
else:
if self._schema_24 is None:
self._schema_24 = get_transxchange_schema(TXC_24)
return self._schema_24
def get_transxchange_docs(self, validate=False):
"""Get all the TransXChangeDocuments in a zip file.
Args:
validate (bool): Validate the document against a TxC schema.
Return:
List[TransXChangeDocument]: A list of TransXChangeDocuments
"""
filenames = self.get_files()
docs = []
for name in filenames:
doc = self.get_doc_from_name(name, validate=validate)
docs.append(doc)
return docs
def iter_doc(self):
"""Returns an Iterator of TransXChangeDocuments in a zip file.
Args:
validate (bool): Validate the document against a TxC schema.
Return Iterator[TransXChangeDocuments]: An iterator of TransXChangeDocuments.
"""
filenames = self.get_files()
return (self.get_doc_from_name(n) for n in filenames)
def get_doc_from_name(self, name):
"""Get a TransXChangeDocument from a zip file by name.
Args:
name (str): Name of file to retrieve
validate (bool): Validate the document against a TxC schema.
Return:
TransXChangeDocument: The TransXChangeDocument with name
"""
with self.open(name) as f_:
doc = TransXChangeDocument(f_)
return doc
def validate_contents(self):
"""Validates the contents of the zip file.
Returns:
None: None is return if the contents are all valid TxC files.
Raises:
XMLValidationException: if a DocumentInvalid exception is raised.
"""
filenames = self.get_files()
count = len(filenames)
logger.info(f"[TransXChange] Validating {count} files.")
for ind, name in enumerate(filenames, start=1):
logger.info(f"[TransXChange] => Validating {name} file {ind} of {count}.")
self.get_doc_from_name(name)
def validate(self):
"""Validate a zip file and then validate it's contents.
Returns:
None: If Zip and TransXChangeDocuments are all valid.
Raises:
NestedZipForbidden: if zip file contains another zip file.
ZipTooLarge: if zip file or sum of uncompressed files are
greater than max_file_size.
NoDataFound: if zip file contains no files with data_file_ext extension.
XMLValidationException: if a DocumentInvalid exception is raised.
"""
super().validate()
self.validate_contents()
class TransXChangeDatasetParser:
"""Class for iterating over transxchange file/s."""
def __init__(self, source):
self._source = source
def is_zipfile(self) -> bool:
return zipfile.is_zipfile(self._source)
def _iter_docs(self):
if self.is_zipfile():
with TransXChangeZip(self._source) as zip_:
for doc in zip_.iter_doc():
yield doc
else:
yield TransXChangeDocument(self._source)
def get_documents(self) -> Iterator[TransXChangeDocument]:
if self.is_zipfile():
with TransXChangeZip(self._source) as zip_:
for doc in zip_.iter_doc():
yield doc
else:
yield TransXChangeDocument(self._source)
def get_transxchange_versions(self) -> List[TransXChangeElement]:
return [doc.get_transxchange_version() for doc in self.get_documents()]
def get_stop_points(self):
all_stops = []
for doc in self.get_documents():
all_stops += doc.get_stop_points()
return all_stops
def get_annotated_stop_point_refs(self) -> List[TransXChangeElement]:
all_stops = []
for doc in self.get_documents():
all_stops += doc.get_annotated_stop_point_refs()
return all_stops
def get_principal_timing_points(self) -> List[TransXChangeElement]:
timing_points = []
for doc in self.get_documents():
timing_points += doc.get_principal_timing_points()
return timing_points
def get_nocs(self) -> List[str]:
nocs = []
for doc in self.get_documents():
nocs += doc.get_nocs()
return nocs
def get_line_names(self):
line_names = []
for doc in self.get_documents():
line_names += doc.get_all_line_names()
return line_names
| [
"[email protected]"
] | |
1ca416d2fa6c49c7791aad99940c8a5f39e04731 | ab8dc4b855906799c869b26c41767f08d2c05128 | /settings.py | 7d84a8dd8c5e9b9f03097fbdb252f2cf9ecf7b06 | [] | no_license | span1ard/rss_update_notifier | 3b4bcb0b79edacbb29242b94df7647206a1ab377 | 847d55dc8c42cc343259f5f3f209d4e3cded6311 | refs/heads/master | 2020-04-05T17:31:05.757370 | 2018-11-11T09:43:05 | 2018-11-11T09:43:05 | 157,064,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # settings
rss_list_file = 'rss_list.txt' # file with list your rss channels
timeout = 120 # time between queries
# available modes, pick one or more
subprocess_mode = False
file_mode = False
shell_mode = False # do not set True if stdout_mode True
init_ent_count = 5 # the number of entries from the channel shown at startup, set 0 to disable
stdout_mode = True # do not set True if shell_mode True;
# command: python rss.py | while read -r line;do firefox $line; done; | [
"[email protected]"
] | |
96e6f090af7124edf646f6756f1619345d2b5808 | 9b82291d67fb442124f6d34154e330a1c5079e6a | /sneakers/migrations/0002_auto_20191021_1829.py | dd665833d3ad680741692650ee95d52b78db2f95 | [] | no_license | katrek/sneakermarket | bae80dbaec386f19eeacb546d087db03fbe65374 | 58b42cd83a0e4fb527efadbdbcf2c978edb807f2 | refs/heads/master | 2020-08-18T06:39:17.602292 | 2019-10-22T18:34:51 | 2019-10-22T18:34:51 | 215,759,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # Generated by Django 2.2.6 on 2019-10-21 18:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('sneakers', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sneakers',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='sneakers',
name='model_brand',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sneakers.Brand', verbose_name='Brand'),
),
]
| [
"[email protected]"
] | |
5ad36e086471a28c386754e13e41fc6808c08bc9 | d203e90dda31b0ff1a49f0a553c51d37ce587dbe | /landlab/grid/structured_quad/structured.py | eec849124775e98996c377fe136cceb7b4ff8846 | [
"MIT"
] | permissive | gregtucker/landlab | 2077f3d4e41fca06b9b08feddab395d343e97079 | 975104e1a1af34c8cc64381081e8d1d39b28e8dc | refs/heads/master | 2021-01-18T12:19:05.275300 | 2015-09-01T22:03:01 | 2015-09-01T22:03:01 | 39,078,020 | 0 | 0 | null | 2015-07-14T13:59:58 | 2015-07-14T13:59:58 | null | UTF-8 | Python | false | false | 4,694 | py | #! /usr/bin/env python
"""
Examples
--------
>>> import numpy as np
>>> from landlab.grid.structured_quad.structured import StructuredQuadGrid
>>> (y, x) = np.meshgrid(np.arange(4.), np.arange(5.), indexing='ij')
>>> grid = StructuredQuadGrid((y, x))
>>> grid.number_of_nodes
20
>>> grid.number_of_node_rows == 4
True
>>> grid.number_of_node_columns == 5
True
>>> grid.corner_nodes
array([ 0, 4, 15, 19])
>>> grid.number_of_cells
6
"""
import numpy as np
from ..base import FIXED_VALUE_BOUNDARY
from ..unstructured.status import StatusGrid
from ..unstructured.base import BaseGrid
from .links import (StructuredQuadLinkGrid, node_id_at_link_start,
node_id_at_link_end)
from .cells import StructuredQuadCellGrid
from . import cells as quad_cells
from . import links as quad_links
from . import faces as quad_faces
from . import nodes
class StructuredQuadGrid(BaseGrid):
def __init__(self, node_coord, shape=None, axis_name=None, axis_units=None,
links=True, cells=True, node_status=None):
"""
Parameters
----------
node_coord : tuple
Coordinates of all grid nodes.
shape : tuple, optional
Shape of the grid of nodes.
"""
if len(node_coord) != 2:
raise ValueError('only 2d grids are supported')
self._shape = shape or node_coord[0].shape
if node_status is not None:
if node_status.size != nodes.number_of_nodes(self.shape):
raise ValueError('incorrect size for node_status array')
if node_status is None:
self._status = nodes.status_with_perimeter_as_boundary(
self.shape, node_status=FIXED_VALUE_BOUNDARY)
else:
self._status = node_status
if links:
#links = (node_id_at_link_start(self.shape),
# node_id_at_link_end(self.shape))
link_grid = StructuredQuadLinkGrid(self.shape)
if cells:
cell_grid = StructuredQuadCellGrid(self.shape)
#super(StructuredQuadGrid, self).__init__(node_status=node_status)
BaseGrid.__init__(self, (node_coord[0].flatten(),
node_coord[1].flatten()),
links=link_grid,
cells=cell_grid)
self._num_nodes = nodes.number_of_nodes(self.shape)
self._num_cells = quad_cells.number_of_cells(self.shape)
self._num_links = quad_links.number_of_links(self.shape)
self._num_faces = quad_faces.number_of_faces(self.shape)
self._num_core_nodes = nodes.number_of_core_nodes(self.shape)
self._num_core_cells = self._num_cells
self._node_x, self._node_y = (
np.ravel(node_coord[0]),
np.ravel(node_coord[1]),
)
self._node_id_at_cells = quad_cells.node_id_at_cells(self.shape)
self._cell_id_at_nodes = quad_cells.cell_ids(self.shape)
self._cell_node = quad_cells.node_id_at_cells(self.shape)
self._in_link_id_at_nodes = quad_links.node_in_link_ids(self.shape)
self._out_link_id_at_nodes = quad_links.node_out_link_ids(self.shape)
self._node_id_at_link_start = quad_links.node_id_at_link_start(self.shape)
self._node_id_at_link_end = quad_links.node_id_at_link_end(self.shape)
self._active_link_ids = quad_links.active_link_ids(self.shape, self._status)
@property
def shape(self):
"""Shape of the grid as rows, columns.
"""
return self._shape
#@property
#def number_of_core_nodes(self):
# """Number of core nodes.
# """
# return self._num_core_nodes
@property
def number_of_node_columns(self):
"""Number of node columns.
Returns the number of columns, including boundaries.
"""
return self.shape[1]
@property
def number_of_node_rows(self):
"""Number of node rows.
Returns the number of rows, including boundaries.
"""
return self.shape[0]
@property
def corner_nodes(self):
"""Nodes in grid corners.
Return the IDs to the corner nodes of the grid, sorted by ID.
Returns
-------
(4, ) ndarray
Array of corner node IDs.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.structured_quad.structured import StructuredQuadGrid
>>> (x, y) = np.meshgrid(np.arange(4.), np.arange(5.), indexing='ij')
>>> grid = StructuredQuadGrid((x, y))
>>> grid.corner_nodes
array([ 0, 4, 15, 19])
"""
return nodes.corners(self.shape)
| [
"[email protected]"
] | |
c7a5c42d257dc66dc4ccfd96746a2683f6614f50 | fc93668dd9d324e8c7de1f8f7ead0d15232e9cb8 | /SSD/train.py | 79e50f566d37bbbd51c680e065f46d7de2f48cbc | [
"MIT"
] | permissive | tormey97/MasterProject | 551aaed2299ee3ec494691324bbbb644fe1edf2d | d4f5d6a4d6e0df83ee44c726b16878595cd5fa44 | refs/heads/main | 2023-06-06T22:37:32.592523 | 2021-06-27T07:35:58 | 2021-06-27T07:35:58 | 344,808,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,271 | py | import argparse
import logging
import os
import torch
import torch.distributed as dist
from SSD.ssd.engine.inference import do_evaluation
from SSD.ssd.config import cfg
from SSD.ssd.data.build import make_data_loader
from SSD.ssd.engine.trainer import do_train
from SSD.ssd.modeling.detector import build_detection_model
from SSD.ssd.solver.build import make_optimizer, make_lr_scheduler
from SSD.ssd.utils import dist_util, mkdir
from SSD.ssd.utils.checkpoint import CheckPointer
from SSD.ssd.utils.dist_util import synchronize
from SSD.ssd.utils.logger import setup_logger
from SSD.ssd.utils.misc import str2bool
def train(cfg, args):
logger = logging.getLogger('SSD.trainer')
model = build_detection_model(cfg)
device = torch.device(cfg.MODEL.DEVICE)
print("has_cuda: ", torch.cuda.is_available())
model.to(device)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
lr = cfg.SOLVER.LR * args.num_gpus # scale by num gpus
optimizer = make_optimizer(cfg, model, lr)
milestones = [step // args.num_gpus for step in cfg.SOLVER.LR_STEPS]
scheduler = make_lr_scheduler(cfg, optimizer, milestones)
arguments = {"iteration": 0}
save_to_disk = dist_util.get_rank() == 0
checkpointer = CheckPointer(model, optimizer, scheduler, cfg.OUTPUT_DIR, save_to_disk, logger)
extra_checkpoint_data = checkpointer.load()
arguments.update(extra_checkpoint_data)
max_iter = cfg.SOLVER.MAX_ITER // args.num_gpus
train_loader = make_data_loader(cfg, is_train=True, distributed=args.distributed, max_iter=max_iter, start_iter=arguments['iteration'])
model = do_train(cfg, model, train_loader, optimizer, scheduler, checkpointer, device, arguments, args)
return model
def main():
parser = argparse.ArgumentParser(description='Single Shot MultiBox Detector Training With PyTorch')
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--log_step', default=10, type=int, help='Print logs every log_step')
parser.add_argument('--save_step', default=2500, type=int, help='Save checkpoint every save_step')
parser.add_argument('--eval_step', default=2500, type=int, help='Evaluate dataset every eval_step, disabled when eval_step < 0')
parser.add_argument('--use_tensorboard', default=True, type=str2bool)
parser.add_argument(
"--skip-test",
dest="skip_test",
help="Do not test the final model",
action="store_true",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = num_gpus > 1
args.num_gpus = num_gpus
if torch.cuda.is_available():
# This flag allows you to enable the inbuilt cudnn auto-tuner to
# find the best algorithm to use for your hardware.
torch.backends.cudnn.benchmark = True
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
synchronize()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
if cfg.OUTPUT_DIR:
mkdir(cfg.OUTPUT_DIR)
logger = setup_logger("SSD", dist_util.get_rank(), cfg.OUTPUT_DIR)
logger.info("Using {} GPUs".format(num_gpus))
logger.info(args)
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
model = train(cfg, args)
if not args.skip_test:
logger.info('Start evaluating...')
torch.cuda.empty_cache() # speed up evaluating after training finished
do_evaluation(cfg, model, distributed=args.distributed)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ed6425edc893df682b3844d0b2db82e4b26db86a | 60df08d44172e169aef7f2e4e60ec65bcf15b494 | /monitor_downloads.py | 8858c72b7752ea14c26375120978ebc4fd01b7f9 | [
"MIT"
] | permissive | apchavan/File-autoplacer | 6f21a0e5803459a41d1a39de60842c15357b7aa4 | f71f39f6f3125e0c311504fcb4acae6760e69c3d | refs/heads/master | 2020-07-27T02:14:41.357723 | 2020-02-16T11:01:25 | 2020-02-16T11:01:25 | 208,833,624 | 3 | 0 | MIT | 2019-10-24T08:13:58 | 2019-09-16T15:25:55 | Python | UTF-8 | Python | false | false | 7,665 | py | """
Functions to monitor 'Downloads' directory.
"""
import distutils.errors as distutils_errors
import os
import random
import time
from distutils.dir_util import copy_tree, remove_tree
from distutils.file_util import copy_file
from platform import system
import app_data
import audio_formats
import directory_names
import document_formats
import excluded_formats
import image_formats
import video_formats
def _get_download_dir_details():
''' Return path of 'Downloads' directory depending on OS and whether OS is 'Windows' (True) or not (False). '''
user_os = system().lower()
current_username = os.getlogin()
if user_os == "windows":
return ("C:\\Users\\" + current_username + "\\Downloads\\"), True
else:
return ("~/Downloads/"), False
def _is_hidden(file_folder: str="", _is_windows=False):
''' Check for "file_folder" is hidden or not, return bool value 'True' if hidden else return 'False' for regular ones. '''
if not file_folder.strip():
return
if _is_windows:
import win32api, win32con
ff_attribute = win32api.GetFileAttributes(file_folder)
return ff_attribute & (win32con.FILE_ATTRIBUTE_HIDDEN | win32con.FILE_ATTRIBUTE_SYSTEM)
else:
return file_folder.strip().startswith(".")
def _move_document(doc_name: str, download_path: str):
''' Move downloaded document to document directory specified below. '''
if not doc_name.strip() or not download_path.strip():
return
doc_dir = download_path + directory_names.document_directory()
if not os.path.isdir(doc_dir):
os.mkdir(doc_dir)
source_path = os.path.join(download_path, doc_name)
destination_path = os.path.join(doc_dir, doc_name)
try:
copy_file(src=source_path, dst=destination_path)
os.remove(path=source_path)
except (PermissionError, distutils_errors.DistutilsFileError) as Error:
return
def _move_image(image_name: str, download_path: str):
''' Move downloaded image to image directory specified below. '''
if not image_name.strip() or not download_path.strip():
return
img_dir = download_path + directory_names.image_directory()
if not os.path.isdir(img_dir):
os.mkdir(img_dir)
source_path = os.path.join(download_path, image_name)
destination_path = os.path.join(img_dir, image_name)
try:
copy_file(src=source_path, dst=destination_path)
os.remove(path=source_path)
except (PermissionError, distutils_errors.DistutilsFileError) as Error:
return
def _move_audio(audio_name: str="", download_path: str=""):
''' Move downloaded audio to audio directory specified below. '''
if not audio_name.strip() or not download_path.strip():
return
aud_dir = download_path + directory_names.audio_directory()
if not os.path.isdir(aud_dir):
os.mkdir(aud_dir)
source_path = os.path.join(download_path, audio_name)
destination_path = os.path.join(aud_dir, audio_name)
try:
copy_file(src=source_path, dst=destination_path)
os.remove(path=source_path)
except (PermissionError, distutils_errors.DistutilsFileError) as Error:
return
def _move_video(video_name: str="", download_path: str=""):
''' Move downloaded video to video directory specified below. '''
if not video_name.strip() or not download_path.strip():
return
vid_dir = download_path + directory_names.video_directory()
if not os.path.isdir(vid_dir):
os.mkdir(vid_dir)
source_path = os.path.join(download_path, video_name)
destination_path = os.path.join(vid_dir, video_name)
try:
copy_file(src=source_path, dst=destination_path)
os.remove(path=source_path)
except (PermissionError, distutils_errors.DistutilsFileError) as Error:
return
def _move_other(file_dir_name: str="", download_path: str=""):
''' Move downloaded other file/folder to other directory specified below. '''
if not file_dir_name.strip() or not download_path.strip():
return
other_dir = download_path + directory_names.other_directory()
if not os.path.isdir(other_dir):
os.mkdir(other_dir)
source_path = os.path.join(download_path, file_dir_name)
destination_path = os.path.join(other_dir, file_dir_name)
if os.path.isdir(source_path):
try:
copy_tree(src=source_path, dst=destination_path)
remove_tree(directory=source_path)
except (PermissionError, distutils_errors.DistutilsFileError) as Error:
return
elif os.path.isfile(source_path):
try:
copy_file(src=source_path, dst=destination_path)
os.remove(path=source_path)
except (PermissionError, distutils_errors.DistutilsFileError) as Error:
return
def _is_autoplacer_dirs(dir_name: str=""):
''' Return whether 'dir_name' is created by autoplacer application or not. '''
if not dir_name.strip():
return False
autoplacer_dirs = directory_names.autoplacer_directories()
if dir_name.strip() in autoplacer_dirs:
return True
return False
def _is_dir_downloading(dir_path: str=""):
''' Return whether directory contents still being downloaded (True) or not (False). '''
if not dir_path.strip():
return False
excluded_file_formats = excluded_formats.excluded_formats()
for root, directories, files in os.walk(dir_path):
for file in files:
file_extension = file[file.rfind(".") :]
if file_extension.upper() in excluded_file_formats:
return True
return False
def monitor_downloads_directory():
''' Monitors 'Downloads' directory on system. '''
_download_path, _is_windows = _get_download_dir_details()
excluded_file_formats = excluded_formats.excluded_formats()
while os.path.isfile(app_data.lockfile_name()):
time.sleep(random.randint(5, 10))
with os.scandir(path=_download_path) as scanner:
for entry in scanner:
if not os.path.isfile(app_data.lockfile_name()):
return
elif _is_hidden(file_folder=(_download_path + entry.name), _is_windows=_is_windows)\
or _is_autoplacer_dirs(dir_name=entry.name):
continue
elif entry.is_file():
file_extension = entry.name[entry.name.rfind(".") :]
if file_extension.upper() in excluded_file_formats:
continue
elif file_extension.lower() in video_formats.video_file_formats():
_move_video(video_name=entry.name, download_path=_download_path)
elif file_extension.lower() in audio_formats.audio_file_formats():
_move_audio(audio_name=entry.name, download_path=_download_path)
elif file_extension.lower() in document_formats.document_file_formats():
_move_document(doc_name=entry.name, download_path=_download_path)
elif file_extension.lower() in image_formats.image_file_formats():
_move_image(image_name=entry.name, download_path=_download_path)
else:
_move_other(file_dir_name=entry.name, download_path=_download_path)
elif entry.is_dir() and \
not _is_dir_downloading(dir_path=(_download_path + entry.name)) and \
entry.name not in excluded_formats.excluded_formats():
_move_other(file_dir_name=entry.name, download_path=_download_path)
| [
"[email protected]"
] | |
61d7063179f93d20eb1cfb1ff03940d9692e7a55 | dfb5dd691a2e91570fe8439e21b7b474303ec306 | /python网络数据采集/my_爬虫_进阶_之路/scrapy框架/my_spiders/电商项目集合/my_flask_server/mogujie_parse.py | def219d3c02d0d5e1e36e96f09b4ac61d275df54 | [] | no_license | 17637462979/python-2 | 92556a917ed3d91c605b8da81457a8574af292cb | bfc0b34e2c31cda7618d0cbed6890a0513a55022 | refs/heads/master | 2020-07-28T01:30:08.381607 | 2019-09-18T03:29:25 | 2019-09-18T03:29:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,899 | py | # coding:utf-8
'''
@author = super_fazai
@File : mogujie_parse.py
@connect : [email protected]
'''
"""
蘑菇街页面解析
"""
import sys
sys.path.append('..')
from decimal import Decimal
from settings import IP_POOL_TYPE
from sql_str_controller import (
mg_insert_str_2,
mg_update_str_3,
mg_update_str_4,)
from multiplex_code import (
_get_right_model_data,
contraband_name_check,
)
from fzutils.spider.async_always import *
class MoGuJieParse(Crawler):
def __init__(self):
super(MoGuJieParse, self).__init__(
ip_pool_type=IP_POOL_TYPE,
)
self._set_headers()
self.result_data = {}
def _set_headers(self):
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Host': 'm.mogujie.com',
'User-Agent': get_random_pc_ua(), # 随机一个请求头
}
def get_goods_data(self, goods_id):
'''
模拟构造得到data的url
:param goods_id: 常规商品goods_id
:return:
'''
"""
方案1: 原先采用调用api的方法, 无奈分析js源码未找到sign是如何md5加密,从而暂时无法实现通过api调用参数 (pass)
"""
# """ # 这些是构造参数
# mw-appkey:100028
# mw-t:1517037701053
# mw-uuid:956bf265-90a4-45b0-bfa8-31040782f99e
# mw-ttid:NMMain@mgj_h5_1.0
# mw-sign:ef29b1801c79d63907f3589c68e4cd4c
# data:{"iid":"1lnrc42","template":"1-2-detail_normal-1.0.0","appPlat":"m","noPintuan":false}
# callback:mwpCb2
# _:1517037701056
# """
# print('------>>>| 对应的手机端地址为: ', 'https://h5.mogujie.com/detail-normal/index.html?itemId=' + goods_id)
#
# appkey = '100028'
# t = str(time.time().__round__()) + str(randint(100, 999)) # time.time().__round__() 表示保留到个位
#
# uuid = '956bf265-90a4-45b0-bfa8-31040782f99e'
# ttid = 'NMMain@mgj_h5_1.0'
# sign = ''
#
# '''
# 下面是构造params
# '''
# params_data_2 = {
# 'iid': goods_id,
# 'template': '1-2-detail_normal-1.0.0',
# 'appPlat': 'm',
# 'noPintuan': 'false',
# }
#
# params = {
# 'data': json.dumps(params_data_2),
# }
#
# tmp_url = 'https://api.mogujie.com/h5/http.detail.api/1/?mw-appkey={}&mw-t={}&mw-uuid={}&mw-ttid={}&mw-sign={}&callback=mwpCb2'.format(
# appkey, t, uuid, ttid, sign
# )
#
# # 设置代理ip
# ip_object = MyIpPools()
# self.proxies = ip_object.get_proxy_ip_from_ip_pool() # {'http': ['xx', 'yy', ...]}
# self.proxy = self.proxies['http'][randint(0, len(self.proxies) - 1)]
#
# tmp_proxies = {
# 'http': self.proxy,
# }
# # print('------>>>| 正在使用代理ip: {} 进行爬取... |<<<------'.format(self.proxy))
#
# try:
# response = requests.get(tmp_url, headers=self.headers, params=params, proxies=tmp_proxies, timeout=13) # 在requests里面传数据,在构造头时,注意在url外头的&xxx=也得先构造
# last_url = re.compile(r'\+').sub('', response.url) # 转换后得到正确的url请求地址
# # print(last_url)
# response = requests.get(last_url, headers=self.headers, proxies=tmp_proxies, timeout=13) # 在requests里面传数据,在构造头时,注意在url外头的&xxx=也得先构造
# data = response.content.decode('utf-8')
# print(data)
# data = re.compile(r'mwpCb2\((.*)\)').findall(data) # 贪婪匹配匹配所有
# # print(data)
# except Exception:
# print('requests.get()请求超时....')
# print('data为空!')
# return self._data_error_init()
"""
方案2: 通过页面源码来获取
"""
if goods_id == '':
return self._data_error_init()
tmp_url = 'https://shop.mogujie.com/detail/' + str(goods_id)
print('------>>>| 原pc地址为: ', tmp_url)
data = {}
body = Requests.get_url_body(url=tmp_url, headers=self.headers, had_referer=True, ip_pool_type=self.ip_pool_type)
# print(body)
if body == '':
print('获取到的body为空str!')
return self._data_error_init()
try:
goods_info = re.compile(r'var detailInfo = (.*?);</script>').findall(body)[0]
# print(goods_info)
item_info = re.compile(r'itemInfo:(.*?),priceRuleImg').findall(goods_info)[0]
# print(item_info)
sku_info = re.compile(r'skuInfo:(.*?),pinTuanInfo').findall(goods_info)[0]
# print(sku_info)
shop_info = re.compile(r'shopInfo:(.*?),skuInfo').findall(goods_info)[0]
# print(shop_info)
item_info = json_2_dict(json_str=item_info)
sku_info = json_2_dict(json_str=sku_info)
shop_info = json_2_dict(json_str=shop_info)
# pprint(item_info)
# pprint(sku_info)
# pprint(shop_info)
data['title'] = self._get_title(item_info)
data['sub_title'] = ''
data['shop_name'] = self._get_shop_name(shop_info)
data['all_img_url'] = self._get_all_img_url(item_info=item_info)
data['p_info'], tmp_p_info_body = self._get_p_info(goods_id=goods_id)
data['div_desc'] = self._get_div_desc(tmp_p_info_body=tmp_p_info_body)
data['detail_name_list'] = self._get_detail_name_list(sku_info=sku_info)
'''
获取每个规格对应价格跟规格以及其库存
'''
price_info_list = self.get_price_info_list(sku_info=sku_info)
if price_info_list == '':
raise Exception
else:
# pprint(price_info_list)
data['price_info_list'] = price_info_list
# 商品价格和淘宝价
try:
tmp_price_list = sorted([round(float(item.get('detail_price', '')), 2) for item in data['price_info_list']])
price = Decimal(tmp_price_list[-1]).__round__(2) # 商品价格
taobao_price = Decimal(tmp_price_list[0]).__round__(2) # 淘宝价
# print('商品的最高价: ', price, ' 最低价: ', taobao_price)
except IndexError:
print('获取price和taobao_price时出错! 请检查')
raise Exception
data['price'] = price
data['taobao_price'] = taobao_price
except Exception as e:
print('遇到错误: ', e)
return self._data_error_init()
if data != {}:
# pprint(data)
self.result_data = data
return data
else:
print('data为空!')
return self._data_error_init()
def deal_with_data(self):
'''
处理得到规范的data数据
:return: result 类型 dict
'''
data = self.result_data
if data != {}:
shop_name = data['shop_name']
account = ''
title = data['title']
sub_title = data['sub_title']
price = data['price'] # 商品价格
taobao_price = data['taobao_price'] # 淘宝价
detail_name_list = data['detail_name_list']
price_info_list = data['price_info_list']
all_img_url = data['all_img_url']
p_info = data['p_info']
div_desc = data['div_desc']
is_delete = 0
if contraband_name_check(target_name=title):
print('违禁物品下架...')
is_delete = 1
else:
pass
result = {
# 'goods_url': data['goods_url'], # goods_url
'shop_name': shop_name, # 店铺名称
'account': account, # 掌柜
'title': title, # 商品名称
'sub_title': sub_title, # 子标题
'price': price, # 商品价格
'taobao_price': taobao_price, # 淘宝价
# 'goods_stock': goods_stock, # 商品库存
'detail_name_list': detail_name_list, # 商品标签属性名称
# 'detail_value_list': detail_value_list,# 商品标签属性对应的值
'price_info_list': price_info_list, # 要存储的每个标签对应规格的价格及其库存
'all_img_url': all_img_url, # 所有示例图片地址
'p_info': p_info, # 详细信息标签名对应属性
'div_desc': div_desc, # div_desc
'is_delete': is_delete # 用于判断商品是否已经下架
}
# pprint(result)
# print(result)
# wait_to_send_data = {
# 'reason': 'success',
# 'data': result,
# 'code': 1
# }
# json_data = json.dumps(wait_to_send_data, ensure_ascii=False)
# print(json_data)
return result
else:
print('待处理的data为空的dict, 该商品可能已经转移或者下架')
return {}
def _get_detail_name_list(self, sku_info):
# pprint(sku_info)
detail_name_list = self.get_goods_detail_name_list(sku_info=sku_info)
# print(detail_name_list)
assert detail_name_list != '', '获取detail_name_list出错, 请检查!'
return detail_name_list
def _get_div_desc(self, tmp_p_info_body):
div_desc = self.get_goods_div_desc(tmp_p_info_body=tmp_p_info_body)
assert div_desc != '', '获取到的div_desc为空str, 请检查!'
return div_desc
def _get_p_info(self, goods_id):
p_info_api_url = 'https://shop.mogujie.com/ajax/mgj.pc.detailinfo/v1?_ajax=1&itemId=' + str(goods_id)
tmp_p_info_body = Requests.get_url_body(url=p_info_api_url, headers=self.headers, had_referer=True, ip_pool_type=self.ip_pool_type)
# print(tmp_p_info_body)
assert tmp_p_info_body != '', '获取到的tmp_p_info_body为空值, 请检查!'
p_info = self.get_goods_p_info(tmp_p_info_body=tmp_p_info_body)
return p_info, tmp_p_info_body
def _get_title(self, item_info):
title = item_info.get('title', '')
assert title != '', 'title为空!'
return title
def _get_shop_name(self, shop_info):
return shop_info.get('name', '')
def _get_all_img_url(self, item_info):
return [{
'img_url': item,
} for item in item_info.get('topImages', [])]
def _data_error_init(self):
self.result_data = {} # 重置下,避免存入时影响下面爬取的赋值
return {}
def insert_into_mogujie_pintuan_table(self, data, pipeline) -> bool:
try:
tmp = _get_right_model_data(data=data, site_id=23)
except:
print('此处抓到的可能是蜜芽拼团券所以跳过')
return False
print('------>>>| 待存储的数据信息为: |', tmp.get('goods_id'))
params = self._get_db_insert_pintuan_params(item=tmp)
_r = pipeline._insert_into_table(sql_str=mg_insert_str_2, params=params)
return _r
def update_mogujie_pintuan_table(self, data, pipeline):
try:
tmp = _get_right_model_data(data=data, site_id=23)
except:
print('此处抓到的可能是蜜芽拼团券所以跳过')
return None
# print('------>>> | 待存储的数据信息为: |', tmp)
print('------>>>| 待存储的数据信息为: |', tmp.get('goods_id'))
params = self._get_db_update_pintuan_params(item=tmp)
pipeline._update_table(sql_str=mg_update_str_3, params=params)
def update_mogujie_pintuan_table_2(self, data, pipeline):
try:
tmp = _get_right_model_data(data=data, site_id=23)
except:
print('此处抓到的可能是蜜芽拼团券所以跳过')
return None
# print('------>>> | 待存储的数据信息为: |', tmp)
print('------>>>| 待存储的数据信息为: |', tmp.get('goods_id'))
params = self._get_db_update_pintuan_params_2(item=tmp)
pipeline._update_table(sql_str=mg_update_str_4, params=params)
def _get_db_insert_pintuan_params(self, item):
params = (
item['goods_id'],
item['goods_url'],
item['create_time'],
item['modify_time'],
item['shop_name'],
item['title'],
item['sub_title'],
item['price'],
item['taobao_price'],
dumps(item['detail_name_list'], ensure_ascii=False), # 把list转换为json才能正常插入数据(并设置ensure_ascii=False)
dumps(item['price_info_list'], ensure_ascii=False),
dumps(item['all_img_url'], ensure_ascii=False),
dumps(item['p_info'], ensure_ascii=False), # 存入到PropertyInfo
item['div_desc'], # 存入到DetailInfo
dumps(item['pintuan_time'], ensure_ascii=False),
item['pintuan_begin_time'],
item['pintuan_end_time'],
item['all_sell_count'],
item['fcid'],
item['page'],
item['sort'],
item['site_id'],
item['is_delete'],
)
return params
def _get_db_update_pintuan_params(self, item):
params = (
item['modify_time'],
item['shop_name'],
item['title'],
item['sub_title'],
item['price'],
item['taobao_price'],
dumps(item['detail_name_list'], ensure_ascii=False),
dumps(item['price_info_list'], ensure_ascii=False),
dumps(item['all_img_url'], ensure_ascii=False),
dumps(item['p_info'], ensure_ascii=False),
item['div_desc'],
item['is_delete'],
dumps(item['pintuan_time'], ensure_ascii=False),
item['pintuan_begin_time'],
item['pintuan_end_time'],
item['all_sell_count'],
item['goods_id'],
)
return params
def _get_db_update_pintuan_params_2(self, item):
params = (
item['modify_time'],
item['shop_name'],
item['title'],
item['sub_title'],
item['price'],
item['taobao_price'],
dumps(item['detail_name_list'], ensure_ascii=False),
dumps(item['price_info_list'], ensure_ascii=False),
dumps(item['all_img_url'], ensure_ascii=False),
dumps(item['p_info'], ensure_ascii=False),
item['div_desc'],
item['is_delete'],
item['goods_id'],
)
return params
def get_price_info_list(self, sku_info):
'''
得到商品每个规格的价格库存及对应img_url
:param sku_info:
:return: '' 表示出错 or [] 表示规格为空 or [{}, ...] 正常
'''
try:
skus = sku_info.get('skus', [])
# pprint(skus)
if skus == []:
print('skus为空! 每个规格的价格为空!')
return []
price_info_list = []
for item in skus:
tmp = {}
size = item.get('size', '')
style = item.get('style', '')
if size == '':
spec_value = style
elif style == '':
spec_value = size
else:
spec_value = style + '|' + size
normal_price = Decimal(item.get('price', 0) / 100).__round__(2).__str__()
detail_price = Decimal(item.get('nowprice', 0) / 100).__round__(2).__str__()
img_url = item.get('img', '')
rest_number = item.get('stock', 0)
if rest_number == 0:
pass
else:
tmp['spec_value'] = spec_value
tmp['normal_price'] = normal_price
tmp['detail_price'] = detail_price
tmp['img_url'] = img_url
tmp['rest_number'] = rest_number
price_info_list.append(tmp)
except Exception as e:
print('获取price_info_list时遇到错误: ', e)
return ''
return price_info_list
def get_goods_detail_name_list(self, sku_info):
'''
得到sku_info
:param sku_info:
:return: '' or [] or [{}, {}, ...]
'''
detail_name_list = []
try:
props = sku_info.get('props', [])
# pprint(props)
if props == []:
print('### detail_name_list为空值 ###')
return []
skus = sku_info.get('skus', [])
img_here = 0
try:
img = skus[0].get('img', '')
if img != '':
img_here = 1
except IndexError:
pass
for item in props:
label = item.get('label', '').replace(':', '')
if label != '':
if img_here == 1:
try:
if item.get('list', [])[0].get('type', '') == 'style':
detail_name_list.append({
'spec_name': label,
'img_here': 1,
})
except IndexError:
detail_name_list.append({
'spec_name': label,
'img_here': 0,
})
img_here = 0 # 记录后置0
else:
detail_name_list.append({
'spec_name': label,
'img_here': 0,
})
else:
pass
except Exception as e:
print('遇到错误: ', e)
return ''
return detail_name_list
def get_goods_p_info(self, tmp_p_info_body):
'''
得到p_info
:param tmp_p_info_body:
:return: [] or [{}, {}, ....]
'''
tmp_p_info_data = json_2_dict(json_str=tmp_p_info_body)
if tmp_p_info_data == {}:
return []
p_info = [{
'p_name': item.get('key', ''),
'p_value': item.get('value', ''),
} for item in tmp_p_info_data.get('data', {}).get('itemParams', {}).get('info', {}).get('set', [])]
return p_info
def get_goods_div_desc(self, tmp_p_info_body):
'''
得到div_desc
:param body:
:return: '' or str
'''
def _get_div_images_list(target):
div_images_list = []
for item in target:
if re.compile('http').findall(item) == []:
item = 'http:' + item
div_images_list.append(item)
return div_images_list
tmp_p_info_data = json_2_dict(json_str=tmp_p_info_body)
if tmp_p_info_data == {}:
return ''
div_images_list = _get_div_images_list(target=tmp_p_info_data.get('data', {}).get('detailInfos', {}).get('detailImage', [])[0].get('list', []))
if div_images_list == []:
# print('div_images_list为空list, 出错请检查!')
# 可能在[1] 这个里面再进行处理
div_images_list = _get_div_images_list(target=tmp_p_info_data.get('data', {}).get('detailInfos', {}).get('detailImage', [])[1].get('list', []))
if div_images_list == []:
print('div_images_list为空list, 出错请检查!')
return ''
else:
tmp_div_desc = ''
for item in div_images_list:
tmp = r'<img src="{}" style="height:auto;width:100%;"/>'.format(item)
tmp_div_desc += tmp
div_desc = '<div>' + tmp_div_desc + '</div>'
else:
tmp_div_desc = ''
for item in div_images_list:
tmp = r'<img src="{}" style="height:auto;width:100%;"/>'.format(item)
tmp_div_desc += tmp
div_desc = '<div>' + tmp_div_desc + '</div>'
return div_desc
def get_goods_id_from_url(self, mogujie_url) -> str:
mogujie_url = re.compile(r'http://').sub('https://', mogujie_url)
is_mogujie_url = re.compile(r'https://shop.mogujie.com/detail/.*?').findall(mogujie_url)
if is_mogujie_url != []:
# 常规商品的地址处理
if re.compile(r'https://shop.mogujie.com/detail/(.*?)\?.*?').findall(mogujie_url) != []:
tmp_mogujie_url = re.compile('https://shop.mogujie.com/detail/(.*?)\?.*?').findall(mogujie_url)[0]
if tmp_mogujie_url != '':
goods_id = tmp_mogujie_url
else:
mogujie_url = re.compile(r';').sub('', mogujie_url)
goods_id = re.compile(r'https://shop.mogujie.com/detail/(.*?)\?.*').findall(mogujie_url)[0]
else: # 直接跟goods_id的地址(往往是自己构造的)
mogujie_url = re.compile(r';').sub('', mogujie_url)
goods_id = re.compile('https://shop.mogujie.com/detail/(.*)').findall(mogujie_url)[0]
print('------>>>| 得到的蘑菇街商品id为:', goods_id)
return goods_id
else:
print('蘑菇街商品url错误, 非正规的url, 请参照格式(https://shop.mogujie.com/detail/)开头的...')
return ''
def __del__(self):
collect()
if __name__ == '__main__':
mogujie = MoGuJieParse()
while True:
mogujie_url = input('请输入待爬取的蘑菇街商品地址: ')
mogujie_url.strip('\n').strip(';')
goods_id = mogujie.get_goods_id_from_url(mogujie_url)
mogujie.get_goods_data(goods_id=goods_id)
data = mogujie.deal_with_data()
pprint(data)
| [
"[email protected]"
] | |
cff9ca4a4258f8e409289cf41a801d52dbb646fa | 550d6012c5f14e87803dbb29abc9edf8a34a1ca9 | /Lab01.py | f5998fbfc2cd2b216a473fc4aa8fc77331f06c5c | [
"Apache-2.0"
] | permissive | gksmfzz1/JavaProject | d89eb5efb4f265a2fe747e0c11da63049d5695cf | 6b22badf8052a66928ab3ef2f60e83bfa54466c1 | refs/heads/master | 2021-05-06T00:49:18.766105 | 2018-01-26T09:01:35 | 2018-01-26T09:01:35 | 114,331,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,667 | py | #1 print() 를 이용 다음 내용을 출력
print(' /\_/\ ----- ')
print('( \' \' ) / Hello \ ')
print('( - )< Junior |')
print(' | | | \ Coder!/ ')
print('(__|__) ----- ')
print('* * ** **** **** * *')
print('* * * * * * * * * *')
print('***** * * **** **** * * ')
print('* * ****** * * * * * ')
print('* * * * * * * * * ')
type(100)
type(99.99)
#2 name, weight, age를 변수로 선언하고 값으로 초기화
name = "하늘"
weight = 60
age = 27
print(name)
print(weight)
print(age)
#3 수학식을 파이썬 표현식으로 바꾸기
x = 1
y = 1
z = 1
print(3*x)
print((3*x)+y)
print((x+y)/7)
print((3*x)+y/(z+2))
#4 문장의 실행결과
x,y = 4,8
x*=y
print('x *= y :', x)
x,y - 4, 8
x -= y
print('x -= y :', x)
#5 x에 대입할 값을 수정
# x = ?
x = 3
print(x + 7 == 10)
#6 수식을 파이썬 프로그램으로 작성하고 계산 결과를 출력
print((-32+95)*12/3)
print((3*4-((-27+67)/4))**8)
print(((512+1968-432)/2**4)+128)
print(256 == 2**8)
print(50+50 <= 10*10)
print(99 != 10**2-1)
#7 표현식의 실행결과 서술하기
x = 2.5
y = -1.5
m = 18
n = 4
print(x+n*y-(x+n)*y)
print(m/n+m%n)
print(5*x-n/5)
print(1-(1-(1-(1-n))))
#8 생활 속 문제를 파이썬으로 풀기
a = 2.5 * 3 / 27
b = 4 * 2 / 30
print(a > b)
print(a)
print(b)
#9 각 표현식에 대한 결과 값
A, Z, D, M = 1, 2, 3, 4
print(3 + 4.5 * 2 +27 /8)
print(True or False and 3 < 4 or not (5 == 7))
print(True or (3 < 5 and 6 >= 2))
print(3 != A)
print(7 % 4 + 3 - 2 / 6 * Z)
print(D + 1 + M % 2 / 3)
print(5.0 / 3 + 3 / 3)
print(53 % 21 < 45 / 18)
print((4<6) or True and False or False and ( 2 > 3))
print(7 - ( 3 + 8 * 6 + 3) - (2 + 5 * 2))
#10 이윤율 계산
# 문제에 대한 배경지식이 필요 - 이윤율 공식
# 이윤율 = 잉여가치액 / (불변자본 + 가변자본)
print('이윤율 :', 45/(30+15))
#11 외국 쇼핑몰에서 노트북 구매
#1070.10-달러환율
#1308.14-유로환율
print('달러환율 =', 780*1070.10)
print('유로환율 =', 650*1308.14)
dolar = 780*1070.10
eur = 650*1308.14
print(dolar > eur)
#12 육상시합 트랙
a = 100*3.14
b = 90*3.14
c = a-b
print(a, b)
print('더 달려야 하는 거리 :', c)
#13 문장의 참 여부
print("Check out this line ")
print("//hello there " + '9' + str(7))
print('H'+'I'+ "is" + str(1) + "more example")
print('H' + str(6.5) + 'I' + "is" + str(1) + 'more example')
print("Print both of us", "Me too")
print("Reverse" + 'I' + 'T')
print("Nonot Here is" + str(1) + "more example")
print("Here is" + str(10*10))
print( not True)
print()
print
print("How about this one" + '?' + 'Huh?')
#14 bool 표현식의 값 계산
print(True and False and True or True)
print(True or True and True and False)
print((True and False)or(True and not False)\
or(False and not False))
print((2 < 3) or (5 > 2) and not (4 == 4) or 9 != 4)
print(6 == 9 or 5 < 6 and 8 < 4 or 4 > 3)
#15 유효한 표현식의 데이터 유형찾기
a = 27/13 +4
b = 27/13 +4.0
c = 42.7 % 3 + 18
d = (3 < 4)and 5/8
e = 23/5+23/5.0
h = 'a'+'b'
j = 'a' and not 'b'
print(a ,b,c,d,e,h,j)
type(a)
type(b)
type(c)
type(d)
type(e)
type(h)
type(j)
#16 증감 연산자가 파이썬에도 있나?
# 파이썬에서는 기본적으로 ++, --는 지원X
n = 3
#print(++n) n+=1
#print("n == " + n)
#print(--n) n-=1
#print("n == " + n)
#17
print('*** 사칙연산 프로그램 ***')
a = int(input('첫번째 정수를 입력하세요'))
b = int(input('두번째 정수를 입력하세요'))
print('%d + %d = %d' % (a, b,a+b))
print('%d - %d = %d' % (a, b,a-b))
print('%d * %d = %d' % (a, b,a*b))
print('%d / %d = %d' % (a, b,a/b))
| [
"[email protected]"
] | |
a74d39a7ed47f3466aa8e74441bccf8c60d82327 | aa84ffe9453175f50195c420064533e94308eaf7 | /hw/8/hw7.py | 9793949ed263094d4c38445a36951e1714f9db2b | [] | no_license | AryaAbhishek/ASE_aarya_sbhadau | 5caa3f0b76c93ba4e2da0e861c8b01c67819b0dd | 4291737404fa932992b4e2903970881fe5439cd3 | refs/heads/master | 2020-07-13T04:55:55.779298 | 2019-11-28T23:35:17 | 2019-11-28T23:35:17 | 204,995,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,488 | py | import random, math
from hw4 import Num, Table, cells, rows, file, fromString
seed = random.seed
import csv
# Build a distance function that reports the distance between two rows:
def distance(i, j, cols):
d = n = 0
p = 2
for col in cols:
n += 1
d0 = col.dist(i.cells[col.pos], j.cells[col.pos])
d += (d0 ** p)
# normalize distance
return d ** (1 / p) / n ** (1 / p)
# Divide the data
def cosine_distance(x, y, z, cols, dist):
return (distance(x, z, cols) ** 2 + dist ** 2 - distance(y, z, cols) ** 2) / (2 * dist)
class random_projection_tree:
def __init__(self):
self.leaves = []
self.children = []
self.table = None
self.level = 0
self.split_count = 0
self.is_root = False
def print_tree(root):
temp = ""
if not root.is_root:
for i in range(root.level):
temp += "|. "
print(temp + str(root.split_count))
temp = ""
if len(root.children) == 0:
for j in range(root.level - 1):
temp += "|. "
for col in root.leaves:
temp += col.col_name + " = "
if isinstance(col, Num):
temp += "{0} {1}".format(col.mu, col.sd)
else:
temp += "{0} {1}".format(col.mode, col.sym_ent())
print(temp)
else:
for each in root.children:
print_tree(each)
temp = ""
if root.is_root:
for col in root.leaves:
temp += col.col_name + " = "
if isinstance(col, Num):
temp += "{0} {1}".format(col.mu, col.sd)
else:
temp += "{0} {1}".format(col.mode, col.sym_ent())
print(temp)
class HW7:
def __init__(self, lines):
seed(1)
self.table = Table()
self.leaf_nodes = []
self.lines = lines
self.parse_lines()
self.tree = self.split_point(self.table, 0)
# print_tree(self.tree)
def parse_lines(self):
for i, row in enumerate(self.lines):
row = [x for x in row if x != ""]
self.table.read_lines(i, row)
def split_point(self, table, level):
node = random_projection_tree()
if len(table.rows) < 2 * pow(len(self.table.rows), 1 / 2):
for each in table.goals:
node.leaves.append(table.cols[each-1])
node.table = table
node.split_count = len(table.rows)
node.level = level
self.leaf_nodes.append(node)
return node
else:
_, best_points = self.best_pivot_points(table)
left_table, right_table = Table(), Table()
left_table.read_lines(0, [col.col_name for col in table.cols])
right_table.read_lines(0, [col.col_name for col in table.cols])
for i, each in enumerate(table.rows):
if i in best_points:
right_table.read_lines(i+1, each.cells)
else:
left_table.read_lines(i+1, each.cells)
split_count = len(left_table.rows) + len(right_table.rows)
node.children.append(self.split_point(left_table, level + 1))
node.children.append(self.split_point(right_table, level + 1))
node.split_count = split_count
node.level = level
return node
def fast_map(self, table):
cols = [table.cols[col] for col in table.xs]
random_point = random.randint(0, len(table.rows)-1)
pivot1, pivot2 = [], []
for row in range(0, len(table.rows)):
dist = distance(table.rows[random_point], table.rows[row], cols)
pivot1.append((row, dist))
pivot1.sort(key=lambda x: x[1])
pivot1_index = pivot1[math.floor(len(pivot1) * 0.9)][0]
for row in range(0, len(table.rows)):
dist = distance(table.rows[pivot1_index], table.rows[row], cols)
pivot2.append((row, dist))
pivot2.sort(key=lambda x: x[1])
dist = pivot2[math.floor(len(pivot2) * 0.9)][1]
pivot2_Index = pivot2[math.floor(len(pivot2) * 0.9)][0]
return pivot1_index, pivot2_Index, dist
def best_pivot_points(self, table):
count = 10
start = len(table.rows)
# left_split, right_split = 0, 0
best_tuple, best_point = None, None
while count > 0:
final_list = []
count -= 1
pivot_tuple = self.fast_map(table)
cols = [table.cols[col] for col in table.xs]
for row in range(0, len(table.rows)):
dist = cosine_distance(table.rows[pivot_tuple[0]], table.rows[pivot_tuple[1]], table.rows[row], cols, pivot_tuple[2])
final_list.append((row, dist))
final_list.sort(key=lambda x: x[1])
list_length = len(final_list)
index = (list_length - 1) // 2
if list_length % 2 !=0:
mid_dist = (final_list[index + 1][1] + final_list[index][1] ) / 2.0
else:
mid_dist = final_list[index][1]
point1 = set()
for point in final_list:
if mid_dist < point[1]:
point1.add(point[0])
right = abs((list_length - len(point1))- len(point1))
if start > right:
start = right
best_tuple = pivot_tuple
best_point = point1
return best_tuple, best_point
| [
"[email protected]"
] | |
258d9b057d63b655387f61435807603242c543ba | fe9e49ad74317e0bbdf3bb306f76adc618b376a9 | /p1-tests-martin/test_bool_arithmetic.py | 3522680fb840a8f59830442d765716c783f7fc71 | [] | no_license | blishko/usi-cc-tests | 56556983cd24c94d45a47491be5850bc2245940c | 364fd8123ca9ede4858dfd2da1beb740819fa07e | refs/heads/master | 2020-05-25T21:35:08.228115 | 2019-05-31T19:37:24 | 2019-05-31T19:37:24 | 188,001,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | print True + True
print True + False
print False + True
print False + False
print True - True
print True - False
print False - True
print False - False
print True * True
print True * False
print False * True
print False * False
print -True
print -False
print +True
print +False
| [
"[email protected]"
] | |
cdf43734eec6423bf3ea7671cf418e1083ca3f4c | ba6921a268198bc0af433622c021533905f5d462 | /tests/system/providers/google/cloud/dataproc/example_dataproc_spark_async.py | edb704f6bcc95e746cf65f861e54337477d0fd78 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | potiuk/airflow | b6447765b1a7b586a3d6c8d7ba9262f6bf68fbfd | ca2f3013bcb123c4b3973a5b85de77094bf2c459 | refs/heads/main | 2023-08-30T13:05:50.698888 | 2023-05-21T21:08:14 | 2023-05-21T21:26:14 | 173,467,275 | 8 | 7 | Apache-2.0 | 2023-05-21T21:58:40 | 2019-03-02T15:50:53 | Python | UTF-8 | Python | false | false | 3,837 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for DataprocSubmitJobOperator with async spark job.
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.dataproc import (
DataprocCreateClusterOperator,
DataprocDeleteClusterOperator,
DataprocSubmitJobOperator,
)
from airflow.providers.google.cloud.sensors.dataproc import DataprocJobSensor
from airflow.utils.trigger_rule import TriggerRule
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "dataproc_spark_async"
PROJECT_ID = os.environ.get("SYSTEM_TESTS_GCP_PROJECT")
CLUSTER_NAME = f"dataproc-spark-async-{ENV_ID}"
REGION = "europe-west1"
ZONE = "europe-west1-b"
# Cluster definition
CLUSTER_CONFIG = {
"master_config": {
"num_instances": 1,
"machine_type_uri": "n1-standard-4",
"disk_config": {"boot_disk_type": "pd-standard", "boot_disk_size_gb": 1024},
},
"worker_config": {
"num_instances": 2,
"machine_type_uri": "n1-standard-4",
"disk_config": {"boot_disk_type": "pd-standard", "boot_disk_size_gb": 1024},
},
}
TIMEOUT = {"seconds": 1 * 24 * 60 * 60}
# Jobs definitions
SPARK_JOB = {
"reference": {"project_id": PROJECT_ID},
"placement": {"cluster_name": CLUSTER_NAME},
"spark_job": {
"jar_file_uris": ["file:///usr/lib/spark/examples/jars/spark-examples.jar"],
"main_class": "org.apache.spark.examples.SparkPi",
},
}
with models.DAG(
DAG_ID,
schedule="@once",
start_date=datetime(2021, 1, 1),
catchup=False,
tags=["example", "dataproc"],
) as dag:
create_cluster = DataprocCreateClusterOperator(
task_id="create_cluster",
project_id=PROJECT_ID,
cluster_config=CLUSTER_CONFIG,
region=REGION,
cluster_name=CLUSTER_NAME,
)
# [START cloud_dataproc_async_submit_sensor]
spark_task_async = DataprocSubmitJobOperator(
task_id="spark_task_async", job=SPARK_JOB, region=REGION, project_id=PROJECT_ID, asynchronous=True
)
spark_task_async_sensor = DataprocJobSensor(
task_id="spark_task_async_sensor_task",
region=REGION,
project_id=PROJECT_ID,
dataproc_job_id=spark_task_async.output,
poke_interval=10,
)
# [END cloud_dataproc_async_submit_sensor]
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_cluster",
project_id=PROJECT_ID,
cluster_name=CLUSTER_NAME,
region=REGION,
trigger_rule=TriggerRule.ALL_DONE,
)
create_cluster >> spark_task_async >> spark_task_async_sensor >> delete_cluster
from tests.system.utils.watcher import watcher
# This test needs watcher in order to properly mark success/failure
# when "teardown" task with trigger rule is part of the DAG
list(dag.tasks) >> watcher()
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| [
"[email protected]"
] | |
146bf7d3e84c6e45a66688596a17cbc95264edf7 | 0421849de8e06a60e2e6522d591666c771f6f23d | /setup.py | 76df59cb560f26f2e7488ab774426e74ef766093 | [] | no_license | irskep/gw0rp | 68f4c5a905dc590325dc6d9354da26f2974ba0c9 | f985c919141e19ee099766c18b7fe416f921836c | refs/heads/master | 2021-01-22T04:01:07.174867 | 2017-05-26T06:04:44 | 2017-05-26T06:04:44 | 92,418,799 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | """
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
OPTIONS = dict(
argv_emulation=True,
frameworks=['libavbin.dylib','pymunk/libchipmunk.dylib'],
plist = dict(CFBundleIconFile='gw0rp.icns')#, PyRuntimeLocations=['/Library/Frameworks/Python.framework/Versions/Current/Python', '/System/Library/Frameworks/Python.framework/Versions/Current/Python'])
)
setup(
app=['gw0rp.py'],
data_files=['Data','gamelib','lepton',
'pymunk','psyco','gw0rp.icns', 'yaml', 'pyglet'],
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| [
"[email protected]"
] | |
7f460fc00756afe2813233fae211f7d73cbdd5ae | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/synapse/azure-synapse-nspkg/setup.py | f43112c0a11de1a4dd5c84fa945d4b6109ee771e | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 1,353 | py | #!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from setuptools import setup
setup(
name='azure-synapse-nspkg',
version='1.0.0',
description='Microsoft Azure Synapse Namespace Package [Internal]',
long_description=open('README.md', 'r').read(),
license='MIT License',
author='Microsoft Corporation',
author_email='[email protected]',
url='https://github.com/Azure/azure-sdk-for-python/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=[
'azure.synapse',
],
install_requires=[
'azure-nspkg>=2.0.0',
]
)
| [
"[email protected]"
] | |
2cd5dc24279b938bfffb5ee23d7cc5374d2009d4 | 103ec492a2e30af0634f4a9cef30a6b2ddc63cdd | /models/encoders/core/pyramidal_blstm.py | 386cb989c141cd78bc88b5317a0acb3d99707688 | [
"MIT"
] | permissive | superhg2012/tensorflow_end2end_speech_recognition | 800c9880e6cf6ec4036d09fe3248cfbd4d594618 | 6abf9631c0b6d608c249284cba85aee0a391c98e | refs/heads/master | 2021-07-07T12:24:23.058110 | 2017-10-04T08:20:59 | 2017-10-04T08:20:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,968 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Pyramidal bidirectional LSTM Encoder class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class PyramidalBLSTMEncoder(EncoderBase):
"""Pyramidal Bidirectional LSTM Encoder.
Args:
num_units (int): the number of units in each layer
num_layers (int): the number of layers
num_classes (int): the number of classes of target labels
(except for a blank label). if 0, return hidden states before
passing through the softmax layer
lstm_impl (string, optional):
BasicLSTMCell or LSTMCell or LSTMBlockCell or
LSTMBlockFusedCell or CudnnLSTM.
Choose the background implementation of tensorflow.
Default is LSTMBlockCell (the fastest implementation).
use_peephole (bool, optional): if True, use peephole
parameter_init (float, optional): the range of uniform distribution to
initialize weight parameters (>= 0)
clip_activation (float, optional): the range of activation clipping (> 0)
# num_proj (int, optional): the number of nodes in the projection layer
# bottleneck_dim (int, optional): the dimensions of the bottleneck layer
name (string, optional): the name of encoder
"""
def __init__(self,
num_units,
num_layers,
num_classes,
lstm_impl='LSTMBlockCell',
use_peephole=True,
parameter_init=0.1,
clip_activation=5.0,
num_proj=None,
bottleneck_dim=None,
concat=False,
name='pblstm_encoder'):
if num_units % 2 != 0:
raise ValueError('num_unit should be even number.')
self.num_units = num_units
self.num_layers = num_layers
self.num_classes = num_classes
self.lstm_impl = lstm_impl
self.use_peephole = use_peephole
self.parameter_init = parameter_init
self.clip_activation = clip_activation
self.num_proj = None
self.bottleneck_dim = None
self.name = name
self.return_hidden_states = True if num_classes == 0 else False
def _build(self, inputs, inputs_seq_len,
keep_prob_input, keep_prob_hidden, keep_prob_output):
"""Construct Pyramidal Bidirectional LSTM encoder.
Args:
inputs (placeholder): A tensor of size`[B, T, input_size]`
inputs_seq_len (placeholder): A tensor of size` [B]`
keep_prob_input (placeholder, float): A probability to keep nodes
in the input-hidden connection
keep_prob_hidden (placeholder, float): A probability to keep nodes
in the hidden-hidden connection
keep_prob_output (placeholder, float): A probability to keep nodes
in the hidden-output connection
Returns:
logits: A tensor of size `[T, B, num_classes]`
final_state: A final hidden state of the encoder
"""
# inputs: `[B, T, input_size]`
batch_size = tf.shape(inputs)[0]
# Dropout for the input-hidden connection
outputs = tf.nn.dropout(
inputs, keep_prob_input, name='dropout_input')
initializer = tf.random_uniform_initializer(
minval=-self.parameter_init, maxval=self.parameter_init)
# Hidden layers
for i_layer in range(1, self.num_layers + 1, 1):
with tf.variable_scope('pblstm_hidden' + str(i_layer),
initializer=initializer) as scope:
lstm_fw = tf.contrib.rnn.LSTMCell(
self.num_units,
use_peepholes=self.use_peephole,
cell_clip=self.clip_activation,
initializer=initializer,
num_proj=None,
forget_bias=1.0,
state_is_tuple=True)
lstm_bw = tf.contrib.rnn.LSTMCell(
self.num_units,
use_peepholes=self.use_peephole,
cell_clip=self.clip_activation,
initializer=initializer,
num_proj=self.num_proj,
forget_bias=1.0,
state_is_tuple=True)
# Dropout for the hidden-hidden connections
lstm_fw = tf.contrib.rnn.DropoutWrapper(
lstm_fw, output_keep_prob=keep_prob_hidden)
lstm_bw = tf.contrib.rnn.DropoutWrapper(
lstm_bw, output_keep_prob=keep_prob_hidden)
if i_layer > 0:
# Convert to time-major: `[T, B, input_size]`
outputs = tf.transpose(outputs, (1, 0, 2))
max_time = tf.shape(outputs)[0]
max_time_half = tf.floor(max_time / 2) + 1
# Apply concat_fn to each tensor in outputs along
# dimension 0 (times-axis)
i_time = tf.constant(0)
final_time, outputs, tensor_list = tf.while_loop(
cond=lambda t, hidden, tensor_list: t < max_time,
body=lambda t, hidden, tensor_list: self._concat_fn(
t, hidden, tensor_list),
loop_vars=[i_time, outputs, tf.Variable([])],
shape_invariants=[i_time.get_shape(),
outputs.get_shape(),
tf.TensorShape([None])])
outputs = tf.stack(tensor_list, axis=0)
inputs_seq_len = tf.cast(tf.floor(
tf.cast(inputs_seq_len, tf.float32) / 2),
tf.int32)
# Transpose to `[batch_size, time, input_size]`
outputs = tf.transpose(outputs, (1, 0, 2))
(outputs_fw, outputs_bw), final_state = tf.nn.bidirectional_dynamic_rnn(
cell_fw=lstm_fw,
cell_bw=lstm_bw,
inputs=outputs,
sequence_length=inputs_seq_len,
dtype=tf.float32,
scope=scope)
# NOTE: initial states are zero states by default
# Concatenate each direction
outputs = tf.concat(axis=2, values=[outputs_fw, outputs_bw])
if self.return_hidden_states:
return outputs, final_state
with tf.variable_scope('output') as scope:
logits_2d = tf.contrib.layers.fully_connected(
outputs, self.num_classes,
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(
stddev=self.parameter_init),
biases_initializer=tf.zeros_initializer(),
scope=scope)
# Reshape back to the original shape
logits = tf.reshape(
logits_2d, shape=[batch_size, -1, self.num_classes])
# Convert to time-major: `[T, B, num_classes]'
logits = tf.transpose(logits, (1, 0, 2))
# Dropout for the hidden-output connections
logits = tf.nn.dropout(
logits, keep_prob_output, name='dropout_output')
# NOTE: This may lead to bad results
return logits, final_state
def _concat_fn(self, current_time, x, tensor_list):
"""Concatenate each 2 time steps to reduce time resolution.
Args:
current_time: The current timestep
x: A tensor of size `[max_time, batch_size, feature_dim]`
result: A tensor of size `[t, batch_size, feature_dim * 2]`
Returns:
current_time: current_time + 2
x: A tensor of size `[max_time, batch_size, feature_dim]`
result: A tensor of size `[t + 1, batch_size, feature_dim * 2]`
"""
print(tensor_list)
print(current_time)
print('-----')
batch_size = tf.shape(x)[1]
feature_dim = x.get_shape().as_list()[2]
# Concat features in 2 timesteps
concat_x = tf.concat(
axis=0,
values=[tf.reshape(x[current_time],
shape=[1, batch_size, feature_dim]),
tf.reshape(x[current_time + 1],
shape=[1, batch_size, feature_dim])])
# Reshape to `[1, batch_size, feature_dim * 2]`
concat_x = tf.reshape(concat_x,
shape=[1, batch_size, feature_dim * 2])
tensor_list = tf.concat(axis=0, values=[tensor_list, [concat_x]])
# Skip 2 timesteps
current_time += 2
return current_time, x, tensor_list
| [
"[email protected]"
] | |
17a0169e232c08825edaf79508f39be114dec58a | 4b6d80db4fe03e213d0f55ddbfcf86a01b4ee5a6 | /generate_sample.py | 6675107399ac2851038f1bb5fd7f803a8f1b106d | [] | no_license | natalie-woerle/Weather-Visualizer | d70efa71d6a58b4014994e62d02c8daa78541c4a | cef8754d8ce3e7b4adda562954fac4b6812b953b | refs/heads/main | 2022-12-27T21:04:04.532144 | 2020-10-12T19:29:18 | 2020-10-12T19:29:18 | 303,480,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | from parse_web import WeatherParser
import os
import random
from datetime import datetime,date
parser = WeatherParser()
def generate_sample():
os.chdir(parser.csv_directory)
start_dt = date.today().replace(day=1, month=1).toordinal()
end_dt = date.today().toordinal()
random_day = date.fromordinal(random.randint(start_dt, end_dt))
filename = datetime.strftime(random_day,"%d.%m.%Y.csv")
with open(filename, "a+", encoding="UTF-8") as file:
for rel_time in parser.relevant_times:
for station in parser.relevant_stations:
temp = round(random.uniform(10, 25),1)
file.write(f"{rel_time};{station};{temp}\n") | [
"[email protected]"
] | |
a580f1dad684d130a74dbaf6cc30bf9f4051adfa | 7df49bf9a5fa6dfd53a1a888913e30926e1cdf34 | /even-getallen.py | 8541d4a40bf2a4685021c5a60aed0c4517bddbdc | [] | no_license | Rouamu/forever-young | 74349cdc0727c99a0d4adaf8aa4e848b4653ac5e | c6088f7cee887567ee4bf4588500ea26e5474b3e | refs/heads/main | 2023-08-21T18:54:29.515770 | 2021-10-11T08:08:33 | 2021-10-11T08:08:33 | 409,884,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | for i in range(20,50,2):
print(i)
| [
"[email protected]"
] | |
4782d50ed3d190192ad7c23f3fec7e4f81caff01 | 0d9252d483372ba70cc9319008ce874439f3d21f | /student_app/migrations/0002_auto_20200225_1245.py | fb3b7c1a56b5fd14770629858688dd357b4408c1 | [] | no_license | AjayJangid17/Student_Form | 0c2df4f104772d49239c3d47e8527e7633cfcf46 | b5f563d7519a86f2c8eaf2ccf170fd94d614643d | refs/heads/master | 2022-10-03T05:33:45.626322 | 2020-02-27T13:55:46 | 2020-02-27T13:55:46 | 243,525,500 | 0 | 0 | null | 2022-09-23T22:35:26 | 2020-02-27T13:26:13 | Python | UTF-8 | Python | false | false | 1,065 | py | # Generated by Django 3.0.3 on 2020-02-25 12:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='studentform',
name='address',
field=models.CharField(default=None, max_length=100, null=True),
),
migrations.AddField(
model_name='studentform',
name='city',
field=models.CharField(default=None, max_length=250, null=True),
),
migrations.AddField(
model_name='studentform',
name='email',
field=models.CharField(default=None, max_length=100, unique=True),
),
migrations.AddField(
model_name='studentform',
name='name',
field=models.CharField(default=None, max_length=100, null=True),
),
migrations.AlterModelTable(
name='studentform',
table='Student Form',
),
]
| [
"[email protected]"
] | |
89258605a3e246f4fc077795bad4a92a251671fe | 4546a122fc7c01aae9772c88f2844c705bdf5242 | /config/settings/production.py | d69314582447e73b8db6d1a42d849a39749f8f63 | [
"MIT"
] | permissive | damildrizzy/devfolio | c7d351cb365277afa4c5f436a63e6b2c66105557 | 21b709b34f71d7cf0ea2131c5dfe9c82aa9cd7fd | refs/heads/master | 2022-04-15T18:09:58.836305 | 2020-04-11T15:26:43 | 2020-04-11T15:26:43 | 254,665,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,086 | py | import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["example.com"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
"CacheControl": f"max-age={_AWS_EXPIRY}, s-maxage={_AWS_EXPIRY}, must-revalidate"
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
DEFAULT_FILE_STORAGE = "devfolio.utils.storages.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="devfolio <[email protected]>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[devfolio]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
# https://anymail.readthedocs.io/en/stable/esps/sendgrid/
EMAIL_BACKEND = "anymail.backends.sendgrid.EmailBackend"
ANYMAIL = {
"SENDGRID_API_KEY": env("SENDGRID_API_KEY"),
"SENDGRID_GENERATE_MESSAGE_ID": env("SENDGRID_GENERATE_MESSAGE_ID"),
"SENDGRID_MERGE_FIELD_FORMAT": env("SENDGRID_MERGE_FIELD_FORMAT"),
"SENDGRID_API_URL": env("SENDGRID_API_URL", default="https://api.sendgrid.com/v3/"),
}
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| [
"[email protected]"
] | |
3132075cd1af439907a9361da793fe4c8dc6e977 | de2a6c2d179cb6d0c583b54e09d1bf29c0892af1 | /bank_pay.py | ad997e810c401a685e7034e2469ded1660a38ea5 | [] | no_license | yyww322/RailWay | af6bf4a960e191811d63aed894285cebd2ba61cb | 00af82a3bf62341203956098ccac37972b9ab50f | refs/heads/master | 2021-01-10T01:40:31.582879 | 2015-11-16T09:13:27 | 2015-11-16T09:13:27 | 46,093,179 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | balance = 4213
annualInterestRate = 0.2
monthlyPaymentRate = 0.04
minipay=0
totalpaid=0
for mon in range(1,13) :
minipay=balance*monthlyPaymentRate
min_int=int(minipay*100+0.5)
min_ans=float(min_int)/100
totalpaid+=minipay
balance=(balance-minipay)
balance=balance+annualInterestRate*balance/12
bal_int=int(balance*100+0.5)
bal_ans=float(bal_int)/100
print("Month: "+str(mon))
print("Minimum monthly payment: "+str(min_ans))
print("Remaining balance: "+str(bal_ans))
total_int=int(totalpaid*100+0.5)
total_ans=float(total_int)/100
print("Total paid: : "+str(total_ans))
print("Remaining balance: "+str(bal_ans))
| [
"[email protected]"
] | |
70ebf42b0284e8968265696f81ac123a57e60175 | de70353efdddc68a4612accdf3b5ce1848ebf153 | /STA-EXAM 1-AnmolSureshkumarPanchal.py | b9916aaf425f99e8981653356df6a958e1a8d8d1 | [] | no_license | AnmolPanchal/Statistical-Computing | cec43a5e462cb541e436eeb238e060f990ef4ba7 | a678cc82b1adc2cc7f91502b6207303908744cc7 | refs/heads/master | 2020-04-11T01:58:25.744611 | 2018-12-12T04:15:41 | 2018-12-12T04:15:41 | 161,431,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,779 | py |
# coding: utf-8
# # Question 1 - Bootstrap, jackKnife, CI
# In[201]:
import pandas as pd
import numpy as np
import math
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
import matplotlib as mp
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
import scipy.stats
# In[202]:
data = pd.read_csv(r"C:\Users\anmol\Downloads\mtcars.csv")
## r before your normal string helps it to convert normal string to raw string
# In[207]:
# Summary statistics for the dataframe
data.describe()
# In[209]:
"""
Covariance
Covariance is one of the fundamental technique to understand the relation between two variable. A positive covariance number between two variables means that they are positively related, while a negative covariance number means the variables are inversely related. The key drawback of covariance is that it does explain us the degree of positive or negative relation between variables
"""
data.cov()
# In[210]:
data.corr()
"""
Correlation
Correlation is another most commonly used technique to determine the relationship between two variables. Correlation will explain wheather variables are positively or inversely related, also number tells us the degree to which the variables tend to move together.
When we say that two items are correlated means that the change in one item effects a change in another item. Correlation is always a range between -1 and 1. For example, If two items have a correlation of .6 (60%) means that change in one item results in positive 60% change to another item.
"""
data.corr()
# In[211]:
print ("DataFrame Index: ", data.index)
# In[212]:
print(data.values)
# In[213]:
# Sort your dataframe
data.sort_values(by =['mpg','Cars'], ascending=[True,True])
# In[214]:
# Resampling from our dataset
from sklearn.utils import resample
boot = resample(data.iloc[:,1:2], replace=False, n_samples=32, random_state=1)
# In[215]:
boot
# In[255]:
import math
import numpy
import numpy.random
def __array_mean_indices(a, indices, func_axis=None, dtype=None):
if func_axis == None:
return (numpy.mean(a.flat[indices], dtype=dtype), )
else:
return tuple(numpy.mean(numpy.reshape(numpy.take(a, [j,], axis=func_axis), -1)[indices]) for j in range(a.shape[func_axis]))
def __number_measurements(a, func_axis=None):
""" Calculates the number of measurements of an array from the array and the function axis.
"""
if func_axis == None:
return a.size
else:
return a.size / a.shape[func_axis]
def identity(x):
"""
Identity function used as default function in the resampling methods.
"""
return x
def bootstrap(a, iterations, func=identity, func_axis=None, dtype=None):
# Calculate the number of measurements
n = __number_measurements(a, func_axis)
# Evaluate the function on the bootstrap means
bootstrap_values = [func(*(__array_mean_indices(a, numpy.random.randint(0, high=n, size=n), func_axis=func_axis, dtype=dtype))) for i in range(iterations)]
# Return the average value and the error of this averaged value
return numpy.mean(bootstrap_values), math.sqrt(float(iterations)/float(iterations - 1))*numpy.std(bootstrap_values)
print (numpy.std(bootstrap_values))
# In[256]:
__array_mean_indices(boot.values,[0,31], func_axis=None, dtype=None)
# In[257]:
__number_measurements(boot.values, func_axis=None)
# In[258]:
identity(x)
# In[259]:
bootstrap(boot.values, 100, func=identity, func_axis=None, dtype=None)
# In[266]:
z = np.mean(boot.values)
v = np.std(boot.values)
print("The sample mean and std deviation is:->",z,v)
# In[289]:
CV = np.sqrt(np.var(boot))/np.mean(boot)
print(CV)
#Another way to obtain coeffiecient of variation is shown below:
b_cov = scipy.stats.variation(boot)
print(b_cov)
# In[264]:
a= np.mean(boot)
N=32
bias =(a - CV)/N
print(bias)
# In[61]:
n=32
se = np.std(boot) / n
print("Std error of this sample is:", se)
# In[287]:
mean_a, error_a = bootstrap(boot.values, 100)
print(mean_a,error_a)
#error_a is se_hat and se is se_that
# In[281]:
(mean_a > 34, mean_a < 10)
# In[282]:
(error_a > 2.0/math.sqrt(1000 - 1) - 0.01, error_a < 2.0/math.sqrt(1000 - 1) + 0.01)
# In[346]:
# from scipy.special import erfinv
# import numpy as np
# from astropy.stats import jackknife_resampling
# from astropy.stats import jackknife_stats
# In[347]:
test_statistic = np.mean
# In[348]:
test_statistic
# In[349]:
d = boot.values
# In[351]:
import numpy as np
from astropy.stats import jackknife_resampling
# from astropy.stats import jackknife_stats
resamples = jackknife_resampling(d)
resamples
# In[352]:
x = scipy.stats.variation
# In[353]:
def jackknife_resampling(data):
n = data.shape[0]
assert n > 0, "data must contain at least one measurement"
resamples = np.empty([n, n-1])
for i in range(n):
resamples[i] = np.delete(data, i)
return resamples
def jackknife_stats(data, statistic, conf_lvl=0.95):
stat_data = statistic(data)
jack_stat = np.apply_along_axis(statistic, 1, resamples)
mean_jack_stat = np.mean(jack_stat, axis=0)
# jackknife bias
bias = (n-1)*(mean_jack_stat - stat_data)
# jackknife standard error
std_err = np.sqrt((n-1)*np.mean((jack_stat - mean_jack_stat)*(jack_stat -
mean_jack_stat), axis=0))
# bias-corrected "jackknifed estimate"
estimate = stat_data - bias
# jackknife confidence interval
assert (conf_lvl > 0 and conf_lvl < 1), "confidence level must be in (0,1)."
z_score = np.sqrt(2.0)*erfinv(conf_lvl)
conf_interval = estimate + z_score*np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval
# In[354]:
jackknife_stats(resamples,np.std, conf_lvl=0.95)
# In[355]:
jackknife_stats(resamples,np.std, conf_lvl=0.95)
# In[356]:
jackknife_stats(d,x, conf_lvl=0.95)
# In[360]:
plt.hist(d, 25, histtype='step');
# In[361]:
def mean_confidence_interval(sample, confidence=0.95):
a = 1.0 * np.array(sample)
n = len(d)
m, se = np.mean(d), scipy.stats.sem(d)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
# In[362]:
mean_confidence_interval(resamples, confidence=0.95)
# In[466]:
np.percentile(resamples, 0.95)
# In[467]:
scipy.stats.mstats.mquantiles (resamples,0.95)
# In[468]:
scipy.stats.mstats.mquantiles (resamples,0.05)
# # Question 2 - LSSVD
# In[366]:
import pandas as pd
import numpy as np
# In[477]:
data = pd.read_csv(r"C:\Users\anmol\Downloads\charlie1.csv")
X = data[['z1','z2']]
y = data['Data']
y_out = np.array(y[20:])
x_out = np.array(X[20:])
y = y[0:20]
X = X[0:20]
X = np.array(X)
y = np.array(y)
# In[478]:
def Kernel(x, y, sigma):
return np.exp(-np.linalg.norm(x-y)**2 / ( (sigma ** 2)))
def Gram_Matrix(x):
K = np.zeros((len(x),len(x)))
for i in range(0, len(x)):
for j in range(0, len(x)):
K[i, j] = Kernel(x[i], x[j], sigma)
return K
def H(x):
mat = np.zeros((len(x), len(x)))
mat[0:len(x), 0:len(x)] = Gram_Matrix(x) + np.eye(len(x))/2*C
return mat
def alpha():
# a = 0.5*np.dot(np.linalg.inv(H_mat),(k + np.dot((2-np.dot(np.dot(e.T, np.linalg.inv(H_mat)), k))/(np.dot(np.dot(e.T, np.linalg.inv(H_mat)), e)),e)))
p1 = np.dot(np.dot(np.linalg.inv(H_mat), e.T),k)
p2 = np.dot(np.dot(np.linalg.inv(H_mat), e.T), e)
p3 = (2-p1)/p2
p3 = k + np.dot(p3, e)
a = 0.5*np.dot(np.linalg.inv(H_mat),p3)
return a
# In[513]:
e = np.ones(len(X))
k = np.zeros((len(X)))
sigma = 0.125
C = 1
# In[514]:
for j in range(0, len(X)):
k[j] = Kernel(X[j], X[j], sigma)
# In[515]:
H_mat = H(X)
al = alpha()
# In[516]:
def R_square():
p1 = 0
p2 = 0
total = 0
for s in range(0, len(X)):
k = Kernel(X[s], X[s], sigma)
for j in range(0, len(X)):
p1 = p1 + al[j]*Kernel(X[s], X[j], sigma)
for l in range(0, len(X)):
p2 = p2 + al[j]*al[l]*Kernel(X[j], X[l], sigma)
total = total + (k - 2 * p1 + p2)
final = total/len(X)
return final
final = R_square()
# In[517]:
final
# In[518]:
def classification(x):
t_out = []
t_in = []
p = 0
p1 = 0
for z in range(0, len(x)):
k = Kernel(x[z], x[z], sigma)
for j in range(0, len(X)):
p = p + al[j]*Kernel(x, X[j], sigma)
for l in range(0, len(X)):
p1 = p1 + al[j]*al[l]*Kernel(X[j], X[l], sigma)
d = k - 2*p + p1
if d <= final:
t_in.append(x[z])
else:
t_out.append(x[z])
return t_out, t_in
t_out, t_in = classification(x_out)
# In[505]:
t_out
# In[506]:
t_in
# In[507]:
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
clf = svm.OneClassSVM(kernel = 'rbf', gamma = 'auto')
clf.fit(t_out, t_in)
# In[526]:
clf.predict(t_out)
# In[524]:
n_error_outliers = t_out[t_out == 1].size
print("Number of errors = ",n_error_outliers,"/",y_out.size)
#classification rate
rate = n_error_outliers/y_out.size
print("Classification rate = ",100*(1-rate),"%")
# In[525]:
df = pd.DataFrame(t_out)
# In[511]:
import seaborn as sns
sns.pairplot(df)
# In[512]:
l = df.iloc[0:,1:2]
x = np.linspace(0, 10, 10)
y = l
plt.plot(t_out, y_out, 'o', color='black');
print("This shows that all t_out i.e outliers and y_out = New points are detected as anomaly and shown below at -1,0 ")
print("Rest all points are not shown as they appear to be inside the circle of radius = final =0.47 and are not counted as anomaly i.e why we have t_in as empty set for any -1 value.")
# # Question 3 - Acceptance Rejection Sampling
# In[493]:
import numpy as np
import scipy.stats as st
import seaborn as sns
import matplotlib.pyplot as plt
import math
# In[494]:
i = 0
k = 0
n = 1000
z = np.random.uniform(0,1,n)
while i<n:
u = np.random.uniform(0,1,1)
y = np.random.exponential(scale=0.001,size = 1)
k = k+1
if u >= np.sqrt(2/math.pi)*np.exp(-y*2/2):
i = i
else:
z[i] = y*(u < np.sqrt(2/math.pi)*np.exp(-y*2/2))
i += 1
print(i, k)
# In[495]:
# P= P(Y accepted) =1/c
P=i/k
c = 1/P
print("Bounding Constant is c:", c)
# In[496]:
sns.distplot(z, hist = True, kde = True)
plt.show()
# In[497]:
"""
Answers:
a) Calculate the optimal constant C for acceptance rejection as a function of λ.
"""
print("The expected number of iterations of the algorithm required until an X is successfully generated is exactly the bounding constant C. In particular, we assume that the ratio f(x)/g(x) is bounded by a constant c > 0. And in practice we would want c as close to 1 as possible.")
print("C =", c)
"""
b) What is the best parameterλ∈(0,∞) you could use for the proposals.
"""
print("λ = scaling parameter i.e scale =0.001 , I have observed that smaller the scale value goes more optimal exponential distribution is generated. So in this case out of all scale values I would consider scale = 0.001 as best parameter for our λ.")
print("scale = 0.001")
"""
c) Using the optimal λ, how many of the generated exponentially dis-tributed proposals do you expect to
accept (as a percentage)?
"""
print("The percentage of accepted distributed proposals")
print(100-( (k-i)/k)*100)
"""
d)Write Python codes to generate positive normals using the Accept-Reject Algorithm.
"""
print("The positive normal distribution values are plotted as follow: ")
sns.distplot(z, hist = True, kde = True)
plt.show()
# In[498]:
"""
Acceptance-Rejection method
Denote the density of X by f . This method requires a function g that majorizes f ,
g(x) ≥ f (x)
for all x. Now g will not be a density, since
c = {-∞, ∞}g(x)dx ≥ 1.
Assume that c < ∞. Then h(x) = g(x)/c is a density. Algorithm:
1. Generate Y having density h;
2. Generate U from U(0, 1), independent of Y ;
3. If U ≤ f (Y )/g(Y ), then set X = Y ; else go back to step 1.
The random variable X generated by this algorithm has density f .
Validity of the Acceptance-Rejection method
Note
P(X ≤ x) = P(Y ≤ x|Y accepted).
Now,
P(Y ≤ x, Y accepted) ={x,−∞}f (y)/g(y)*h(y)dy =1/c*{x,−∞}f (y)dy,
and thus, letting x → ∞ gives
P(Y accepted) =1/c.
Hence,
P(X ≤ x) =P(Y ≤ x, Y accepted)/P(Y accepted)={x,−∞}f (y)dy.
Source="https://www.win.tue.nl/~marko/2WB05/lecture8.pdf"
c=sqrt(2e/π)≈1.32.
Source ="https://www.scss.tcd.ie/Brett.Houlding/Domain.sites2/sslides5.pdf"
"""
| [
"[email protected]"
] | |
3411eab83cab3ceb57f6cd5eda2384ad592f80c4 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02753/s317320814.py | 1d01cebc3d9e84f7bab93a183ff353992a529d51 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | S = len(set(input()))
if S == 2:
print('Yes')
else:
print('No') | [
"[email protected]"
] | |
f0588455a5d8e043143afee399191423e28f8734 | 84e986f28e006d7d3d7f8227b0fbe2d5334612bb | /aws_xray_sdk/ext/django/middleware.py | c10fec7c6d9e2ffaec3e6ac23246b25b8875e4ce | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | azatoth/aws-xray-sdk-python | 40fe3cf250977fa4706880bcf316c49a38b90b08 | dc5086373c47e9a5e32882cfb20796d9f988a3a4 | refs/heads/master | 2020-03-18T07:32:46.703269 | 2018-05-21T20:38:17 | 2018-05-21T20:38:17 | 134,459,375 | 0 | 1 | Apache-2.0 | 2018-05-24T08:32:27 | 2018-05-22T18:34:27 | Python | UTF-8 | Python | false | false | 2,993 | py | import logging
import traceback
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core.models import http
from aws_xray_sdk.ext.util import calculate_sampling_decision, \
calculate_segment_name, construct_xray_header, prepare_response_header
log = logging.getLogger(__name__)
# Django will rewrite some http request headers.
USER_AGENT_KEY = 'HTTP_USER_AGENT'
X_FORWARDED_KEY = 'HTTP_X_FORWARDED_FOR'
REMOTE_ADDR_KEY = 'REMOTE_ADDR'
HOST_KEY = 'HTTP_HOST'
CONTENT_LENGTH_KEY = 'content-length'
class XRayMiddleware(object):
"""
Middleware that wraps each incoming request to a segment.
"""
def __init__(self, get_response):
self.get_response = get_response
# hooks for django version >= 1.10
def __call__(self, request):
sampling_decision = None
meta = request.META
xray_header = construct_xray_header(meta)
# a segment name is required
name = calculate_segment_name(meta.get(HOST_KEY), xray_recorder)
sampling_decision = calculate_sampling_decision(
trace_header=xray_header,
recorder=xray_recorder,
service_name=meta.get(HOST_KEY),
method=request.method,
path=request.path,
)
segment = xray_recorder.begin_segment(
name=name,
traceid=xray_header.root,
parent_id=xray_header.parent,
sampling=sampling_decision,
)
segment.save_origin_trace_header(xray_header)
segment.put_http_meta(http.URL, request.build_absolute_uri())
segment.put_http_meta(http.METHOD, request.method)
if meta.get(USER_AGENT_KEY):
segment.put_http_meta(http.USER_AGENT, meta.get(USER_AGENT_KEY))
if meta.get(X_FORWARDED_KEY):
# X_FORWARDED_FOR may come from untrusted source so we
# need to set the flag to true as additional information
segment.put_http_meta(http.CLIENT_IP, meta.get(X_FORWARDED_KEY))
segment.put_http_meta(http.X_FORWARDED_FOR, True)
elif meta.get(REMOTE_ADDR_KEY):
segment.put_http_meta(http.CLIENT_IP, meta.get(REMOTE_ADDR_KEY))
response = self.get_response(request)
segment.put_http_meta(http.STATUS, response.status_code)
if response.has_header(CONTENT_LENGTH_KEY):
length = int(response[CONTENT_LENGTH_KEY])
segment.put_http_meta(http.CONTENT_LENGTH, length)
response[http.XRAY_HEADER] = prepare_response_header(xray_header, segment)
xray_recorder.end_segment()
return response
def process_exception(self, request, exception):
"""
Add exception information and fault flag to the
current segment.
"""
segment = xray_recorder.current_segment()
segment.put_http_meta(http.STATUS, 500)
stack = traceback.extract_stack(limit=xray_recorder._max_trace_back)
segment.add_exception(exception, stack)
| [
"[email protected]"
] | |
b97f74a1f0621e7869d7ba6dbce18fdd009b8709 | 2e1c3b6a76cb17383421105298f9c5f9aa048ee0 | /Newbie-Python/newbie-python-59.py | 3a7663e1148559fffb1926392c7cada603400f85 | [] | no_license | viewless/Skillreceiving | e31a0c1e4177eefea3d2288317026fb1777ae4bc | 3a5a5860c4595ca108d531d5b3b1248e60e0f0ae | refs/heads/master | 2020-06-11T14:16:52.386960 | 2019-06-27T13:57:01 | 2019-06-27T13:57:01 | 193,995,531 | 0 | 0 | null | 2019-06-27T00:29:29 | 2019-06-27T00:29:28 | null | UTF-8 | Python | false | false | 641 | py |
sum = float(input())
mesec = int(input())
simple = sum
complex = sum
for i in range(mesec):
simple += (sum * 0.03)
complex += complex * 0.027
print("Simple interest rate: ", end = "")
print(str("%.2f" % simple) + " lv.")
print("Complex interest rate: ", end = "")
print(str("%.2f" % complex) + " lv.")
if simple >= complex:
win = simple - complex
win = "%.2f" % win
print("Choose a simple interest rate. You will win " + str(win) + " lv.")
else:
win = complex - simple
win = "%.2f" % win
print("Choose a complex interest rate. You will win " + str(win) + " lv.")
| [
"[email protected]"
] | |
d3d0752d658dc6192d9911d8f0c9d9b421b40a88 | 8ea9d7e5db9756915a35b52be5e3d12a66254f24 | /64.最小路径和.py | 4ba73a3e290210ee1e4bdacc636970011aa292ad | [] | no_license | Sander-houqi/leetcode-py | 236a7733343fc92b739f8283f953fd2996643737 | 315c37cf88cd21cff0fb96eccbf52589252e24ed | refs/heads/main | 2023-05-12T07:29:02.719586 | 2021-05-17T09:09:24 | 2021-05-17T09:09:24 | 341,113,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | #
# @lc app=leetcode.cn id=64 lang=python3
#
# [64] 最小路径和
#
# @lc code=start
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
if not grid or not grid[0]:
return 0
rows,cols = len(grid), len(grid[0])
dp = [ [0]*cols for _ in range(rows)]
# 左上角初始化
dp[0][0] = grid[0][0]
for i in range(1,rows):
dp[i][0] = dp[i-1][0] + grid[i][0]
for j in range(1,cols):
dp[0][j] = dp[0][j-1] + grid[0][j]
for i in range(1,rows):
for j in range(1,cols):
dp[i][j] = min(dp[i-1][j],dp[i][j-1])+ grid[i][j]
return dp[-1][-1]
# @lc code=end
| [
"[email protected]"
] | |
b26849f52600f81305a87609fd1e3a4afcf64817 | 7aceea648a4075f8151ae9aa7e03a577bb3034c0 | /test_docker.py | 81bd1e592d16de17e3c52b7056ae0c32f95447c8 | [] | no_license | bas079/DockerRedisPython | c79b01c084806eaf86f6ae4a5eba64729ecf67da | 9e14c3dbc724a3f7340ca62bf217873a3cfbeca8 | refs/heads/master | 2020-04-12T23:30:52.491822 | 2018-12-30T16:49:29 | 2018-12-30T16:49:29 | 162,821,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from selenium import webdriver
import time
import re
CHROME_DRIVER = "C://Users//Anna//Downloads//chromedriver_win32//chromedriver.exe"
SITE_NAME = "http://192.168.99.100:5000/"
#Open chrome driver
driver = webdriver.Chrome(executable_path=CHROME_DRIVER)
# Opening chrome browser on a desired page
driver.get(SITE_NAME)
# Maximize window
driver.maximize_window()
# Waiting for 2 seconds
time.sleep(2)
# Find string
element = driver.find_element_by_css_selector("body")
#Remove the word “World” from printing
string = re.sub('World', '', element.text)
# Print string
print("String is:", string)
# Closing current tab
driver.close()
# Closing driver session
driver.quit()
| [
"[email protected]"
] | |
5f035ea9290e53b5924dac886e9088c275dc4110 | 4a5548330b88e7d210995be983f1a6c09e1f6d12 | /Seed/views.py | b24253e2f6421d170e84ff67a38e8de9c3031221 | [] | no_license | kimwoojoo/Seedgermination | 0c3377dc747b1705b2fec96a38b9ba65b70a220c | 550977885b25b34e8aaf7e46a3d943ecfc1e67cf | refs/heads/master | 2020-03-27T06:02:17.152112 | 2019-03-07T09:47:43 | 2019-03-07T09:47:43 | 146,073,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,865 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse, JsonResponse
from .forms import ImageExampleForm
from .retrain import run_inference_on_image
from mysite.settings import MEDIA_ROOT
import json
import time
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#폼추가
class GetJson:
def __init__(self, DataJason):
self.__DataJson = DataJason
@property
def DataJson(self):
return self.__DataJson
@DataJson.setter
def DataJson(self, DATAJSON):
self.__DataJson = DATAJSON
tempJson = GetJson({})
def GetJsonData(request):
temp = request.POST.get('id',None)
return HttpResponse(json.dumps(tempJson.DataJson), content_type="application/json")
def Seedimg(request):
filename = "test.jpg"
ImagePath = os.path.join(BASE_DIR, filename)
return HttpResponse(ImagePath);
def handle_upload_file(f):
tempPath = "C:/django/Seed/static/test"
if f.name.split('.')[-1].upper() not in ['JPG','JPEG']:
return 'ERROR'
with open('{}.jpg'.format(tempPath), 'wb+') as w:
for chunk in f.chunks():
w.write(chunk)
return '{}.jpg'.format(tempPath)
def ImageUpload(request):
form = ImageExampleForm(request.POST, request.FILES)
if request.method == 'POST':
if form.is_valid():
handle_upload_file(request.FILES['image'])
filename = "test.jpg"
ImagePath = os.path.join("C:/django/Seed/static", filename)
temp = run_inference_on_image(ImagePath)
tempJson.DataJson = temp
return render(request, 'Seed/ImageView.html', {'ImagePath' : ImagePath })
return render(request, "Seed/ImageUpload.html", {'form' : form})
#def Image_Open(request):
# if request.method == "POST":
# Create your views here.
| [
"[email protected]"
] | |
827386bee3c894244614bc9be21b0dcc1ff5dfc6 | 39d7f92a87490893faf70958ac75a18d7f4010ed | /tests/test_blog.py | b3278ab7bead96ffd75376a7756c8ad87f66964c | [] | no_license | rahuls321/Python-Web-App-with-Flask- | 94ebab6189cef19e74d8dabef854f0fc092d0071 | 43d1def8bc1b18f5812a75b9db1ca1c0721fadbb | refs/heads/master | 2022-12-13T03:58:03.094409 | 2020-09-10T11:48:24 | 2020-09-10T11:48:24 | 294,392,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,608 | py | import pytest
from flaskr.db import get_db
def test_index(client, auth):
response = client.get('/')
assert b"Log In" in response.data
assert b"Register" in response.data
auth.login()
response = client.get('/')
assert b'Log Out' in response.data
assert b'test title' in response.data
assert b'by test on 2018-01-01' in response.data
assert b'test\nbody' in response.data
assert b'href="/1/update"' in response.data
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
'/1/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
def test_author_required(app, client, auth):
# change the post author to another user
with app.app_context():
db = get_db()
db.execute('UPDATE post SET author_id = 2 WHERE id = 1')
db.commit()
auth.login()
# current user can't modify other user's post
assert client.post('/1/update').status_code == 403
assert client.post('/1/delete').status_code == 403
# current user doesn't see edit link
assert b'href="/1/update"' not in client.get('/').data
@pytest.mark.parametrize('path', (
'/2/update',
'/2/delete',
))
def test_exists_required(client, auth, path):
auth.login()
assert client.post(path).status_code == 404
def test_create(client, auth, app):
auth.login()
assert client.get('/create').status_code == 200
client.post('/create', data={'title': 'created', 'body': ''})
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM post').fetchone()[0]
assert count == 2
def test_update(client, auth, app):
auth.login()
assert client.get('/1/update').status_code == 200
client.post('/1/update', data={'title': 'updated', 'body': ''})
with app.app_context():
db = get_db()
post = db.execute('SELECT * FROM post WHERE id = 1').fetchone()
assert post['title'] == 'updated'
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
))
def test_create_update_validate(client, auth, path):
auth.login()
response = client.post(path, data={'title': '', 'body': ''})
assert b'Title is required.' in response.data
def test_delete(client, auth, app):
auth.login()
response = client.post('/1/delete')
assert response.headers['Location'] == 'http://localhost/'
with app.app_context():
db = get_db()
post = db.execute('SELECT * FROM post WHERE id = 1').fetchone()
assert post is None
| [
"[email protected]"
] | |
675f616d7429e42b9bc576d686ae7a8e6c317952 | 5ce40c9a72d066d60a623790eb936fa4232b05ea | /Token_test/views.py | 4264305185ee99106ee04315b8a629a508d27229 | [] | no_license | Prashant9931/GUI_project | 0d3835a29e678c4d3dc53e7dbc5fc269cb63ae82 | 5e42d43d2150a66938fdbe9cb27912b01eaf57e7 | refs/heads/master | 2020-06-20T00:04:42.735120 | 2019-07-15T07:25:15 | 2019-07-15T07:25:15 | 196,921,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,370 | py | from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from django.contrib.auth.models import User,auth
from rest_framework.decorators import api_view
from django.http import HttpResponse
# from Token_test.models import User
from rest_framework.views import APIView
class Log(APIView):
def Login_api(request):
content = {'message': 'Hello, World!'}
return HttpResponse(content)
def home(request):
return render(request,'layout.html');
@csrf_exempt
def login(request):
if request.method=="POST":
username=request.POST['username']
password=request.POST['password']
list1=User.objects.filter(username=username)
if list1[0].username == username and list1[0].password == password:
return redirect('/')
return render(request,'login.html')
@csrf_exempt
def register(request):
if request.method=="POST":
# name=request.POST['name']
username=request.POST['username']
password=request.POST['password']
# confirm=request.POST['confirm']
user=User.objects.create_user(username=username,password=password)
user.save();
print('created')
return redirect('/')
pass
return render(request,'register.html')
# Create your views here.
| [
"[email protected]"
] | |
189c9ba3749f61709f665e9f312e3474846fd1e7 | 75adf2c86e13612bb584fe28bf7e6272dadaee30 | /env/bin/symilar | 0fc460f87e111154fb24921042335bd53a23ec70 | [] | no_license | yushghimire/python-flask-try | 84af540cae7319e8685738f2bb6bd5c442ef3612 | e1020a577c09ce8ba4de5a720d9eb3f5c8ac4223 | refs/heads/master | 2020-03-27T15:55:24.267992 | 2018-08-30T12:42:49 | 2018-08-30T12:42:49 | 146,747,790 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | #!/Users/yushghimire/mywork/python-flask-web/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"[email protected]"
] | ||
61e2b14749e868fd2a135d4e5ed2ec7b33bdd3b7 | 24abbefb90b95e8c63416cc028202d0623accf0d | /proc_image_usage.py | 510ef8695f20569ff48c324a17ac5c142435afb9 | [] | no_license | hehao98/pkudean-CAPTCHA-identification | 250a6abd146b9fad8459ed7c21398cea96969ace | e313fb0bd753187198d332cda1803782391672f9 | refs/heads/master | 2021-05-15T06:41:55.741060 | 2017-12-13T04:46:19 | 2017-12-13T04:46:19 | 114,070,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py |
import proc_image
# Process 100 images in the data folder, split it to 4 character images
# and store the result in train folder
proc_image.proc_image('data', 'train', 100)
| [
"[email protected]"
] | |
66784abd494206206b62b19a1ff3e51a284cfcca | 1a04d15acc0d5375f79e65c059662462890a4226 | /test2.py | b34bc857a9a9a32a6eb31cd715e757d846e70c7b | [] | no_license | Zero-Qzy/Connect6 | 6fd682cbcc59a3d87d524a807a2d4dfb5cd41f34 | 2f16b4835554d63d65de24f0327f0a8a96c5a915 | refs/heads/master | 2020-07-29T22:45:12.255598 | 2019-09-27T06:45:38 | 2019-09-27T06:45:51 | 209,989,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,686 | py | import tensorflow as tf
from captcha.image import ImageCaptcha
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import random
number=['0','1','2','3','4','5','6','7','8','9']
#alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
#ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
def random_captcha_text(char_set=number,captcha_size=4):
captcha_text=[]
for i in range(captcha_size):
c=random.choice(char_set)
captcha_text.append(c)
return captcha_text
def gen_captcha_text_image():
image=ImageCaptcha()
captcha_text=random_captcha_text()
captcha_text=''.join(captcha_text)
captcha=image.generate(captcha_text)
captcha_image=Image.open(captcha)
captcha_image=np.array(captcha_image)
return captcha_text,captcha_image
def convert2gray(img):
if len(img.shape)>2:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
def text2vec(text):
text_len = len(text)
if text_len > max_captcha:
raise ValueError('验证码最长4个字符')
vector = np.zeros(max_captcha * char_set_len)
def char2pos(c):
if c == '_':
k = 62
return k
k = ord(c) - 48
if k > 9:
k = ord(c) - 55
if k > 35:
k = ord(c) - 61
if k > 61:
raise ValueError('No Map')
return k
for i, c in enumerate(text):
idx = i * char_set_len + char2pos(c)
vector[idx] = 1
return vector
def get_next_batch(batch_size=128):
batch_x=np.zeros([batch_size,image_height*image_width])
batch_y=np.zeros([batch_size,max_captcha*char_set_len])
def wrap_gen_captcha_text_and_image():
while True:
text, image = gen_captcha_text_image()
if image.shape == (60, 160, 3):
return text, image
for i in range(batch_size):
text, image = wrap_gen_captcha_text_and_image()
image = convert2gray(image)
batch_x[i, :] = image.flatten() / 255
batch_y[i, :] = text2vec(text)
return batch_x, batch_y
def cnn_structure(w_alpha=0.01, b_alpha=0.1):
x = tf.reshape(X, shape=[-1, image_height, image_width, 1])
wc1=tf.get_variable(name='wc1',shape=[3,3,1,32],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
bc1 = tf.Variable(b_alpha * tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, keep_prob)
wc2=tf.get_variable(name='wc2',shape=[3,3,32,64],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
# wc2 = tf.Variable(w_alpha * tf.random_normal([3, 3, 32, 64]))
bc2 = tf.Variable(b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, keep_prob)
wc3=tf.get_variable(name='wc3',shape=[3,3,64,128],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
#wc3 = tf.Variable(w_alpha * tf.random_normal([3, 3, 64, 128]))
bc3 = tf.Variable(b_alpha * tf.random_normal([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, keep_prob)
wd1=tf.get_variable(name='wd1',shape=[8*20*128,1024],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
#wd1 = tf.Variable(w_alpha * tf.random_normal([7*20*128,1024]))
bd1 = tf.Variable(b_alpha * tf.random_normal([1024]))
dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1))
dense = tf.nn.dropout(dense, keep_prob)
wout=tf.get_variable('name',shape=[1024,max_captcha * char_set_len],dtype=tf.float32,initializer=tf.contrib.layers.xavier_initializer())
#wout = tf.Variable(w_alpha * tf.random_normal([1024, max_captcha * char_set_len]))
bout = tf.Variable(b_alpha * tf.random_normal([max_captcha * char_set_len]))
out = tf.add(tf.matmul(dense, wout), bout)
return out
def train_cnn():
output=cnn_structure()
cost=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=output,labels=Y))
optimizer=tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
predict=tf.reshape(output,[-1,max_captcha,char_set_len])
max_idx_p = tf.argmax(predict, 2)
max_idx_l = tf.argmax(tf.reshape(Y, [-1, max_captcha, char_set_len]), 2)
correct_pred = tf.equal(max_idx_p, max_idx_l)
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
saver=tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
step = 0
while True:
batch_x, batch_y = get_next_batch(100)
_, cost_= sess.run([optimizer, cost], feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.75})
print(step, cost_)
if step % 10 == 0:
batch_x_test, batch_y_test = get_next_batch(100)
acc = sess.run(accuracy, feed_dict={X: batch_x_test, Y: batch_y_test, keep_prob: 1.})
print(step, acc)
if acc > 0.99:
saver.save(sess, "./model/crack_capcha.model", global_step=step)
break
step += 1
def crack_captcha(captcha_image):
output = cnn_structure()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "./model/crack_capcha.model-1200")
predict = tf.argmax(tf.reshape(output, [-1, max_captcha, char_set_len]), 2)
text_list = sess.run(predict, feed_dict={X: [captcha_image], keep_prob: 1.})
text = text_list[0].tolist()
return text
if __name__=='__main__':
train=1
if train==0:
text,image=gen_captcha_text_image()
print("验证码大小:",image.shape)#(60,160,3)
image_height=60
image_width=160
max_captcha=len(text)
print("验证码文本最长字符数",max_captcha)
char_set=number
char_set_len=len(char_set)
X = tf.placeholder(tf.float32, [None, image_height * image_width])
Y = tf.placeholder(tf.float32, [None, max_captcha * char_set_len])
keep_prob = tf.placeholder(tf.float32)
train_cnn()
if train == 1:
image_height = 60
image_width = 160
char_set = number
char_set_len = len(char_set)
text, image = gen_captcha_text_image()
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, text, ha='center', va='center', transform=ax.transAxes)
plt.imshow(image)
# plt.show()
max_captcha = len(text)
image = convert2gray(image)
image = image.flatten() / 255
X = tf.placeholder(tf.float32, [None, image_height * image_width])
Y = tf.placeholder(tf.float32, [None, max_captcha * char_set_len])
keep_prob = tf.placeholder(tf.float32)
predict_text = crack_captcha(image)
print("正确: {} 预测: {}".format(text, predict_text))
plt.show()
| [
"[email protected]"
] | |
ea7414ccd524b613d35517979602e7d31e295b12 | 057782f7ac628468e5b21b90ad21781e91bb5857 | /keras_ocr/__init__.py | 238e1e43cbe5a56d803d0fa6bd2ee870a3db8552 | [
"MIT"
] | permissive | shitoubiao/keras-ocr | b5b88189f2e248657280ea3000b734c3785a4646 | 4cbe4fa07bc19cbaf3a4e65106a0e7e8b58674f7 | refs/heads/master | 2020-11-29T19:55:50.654788 | 2019-12-24T03:29:02 | 2019-12-24T03:29:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from . import detection, recognition, tools
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| [
"[email protected]"
] | |
875d11808705856a784ae3ebcbcd68faf06e2c35 | f82e45a4fac9eb14ccd5688e4146a006e0dc5f6f | /euler3.py | 3199d573eca8614ce53518ffb71eae3794a4546a | [] | no_license | jinalex/Project-Euler | e16dbc02487a12bb14b5bc7df17324c4eeed0c58 | 44bb4dc5616ccef52e6b7a8c56f4fdeaa7b65420 | refs/heads/master | 2020-05-19T14:16:27.146406 | 2015-05-07T01:26:28 | 2015-05-07T01:26:28 | 27,042,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import random
import math
def primeCheck (num,k):
prime = False
for i in range(k):
a = random.randint(2, int (num**(0.5)))
b = a**(num-1)
if (b % num) == 1:
prime = True
else:
prime = False
return prime
theThing = 600851475143
bigPrime = 0
for i in range(3,int(math.sqrt(theThing)),2):
if theThing%i == 0:
if primeCheck(i,1) and i > bigPrime:
bigPrime = i
print "The answer: ",bigPrime
| [
"[email protected]"
] | |
0938f07833a08a453f022b3767ab79d424d1d1de | 29d123b3372d6b18655eee80128fd01fc053dccd | /dbgate.cn.allupdate.py | a8c28829d893145f7fa6e7cfe96edfa91c7415a8 | [] | no_license | zzlyzq/dbgate | 28fd5f1807140745e5a52caf202b20254b60dd24 | 5ec17bef00ec5ffab996664fd3d54845c6cbf4c9 | refs/heads/master | 2020-09-17T14:50:53.650283 | 2016-09-09T09:56:39 | 2016-09-09T09:56:39 | 67,774,318 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,048 | py | #!/usr/bin/python
# -*- coding:utf-8 -*-
import urllib2
import os, sys, re
import shutil
import MySQLdb
def haproxyConfigGenerate():
conn = MySQLdb.connect(host = 'db.bops.live',port=3306,user = 'ims_wr', passwd = 'xxx', db = 'ims')
command = '''select db_ip,db_port,db_name,db_username,db_password,dbgate_port,request_userName,request_userEmail,request_userTel from DBgate_Configer order by dbgate_port;''';
print command
cur = conn.cursor()
cur.execute(command)
results = cur.fetchall()
if os.path.isfile("common.cfg"):
print "配置文件存在"
shutil.move("common.cfg","common.cfg.bak")
# 打开文件准备写入
fp = open("common.cfg","a")
for result in results:
db_ip = result[0]
db_port = result[1]
db_name = result[2]
db_username = result[3]
db_password = result[4]
dbgate_port = result[5]
request_userName = result[6]
request_userEmail = result[7]
request_userTel = result[8]
content = '''
# %s %s %s %s %s %s
listen %s
bind 0.0.0.0:%s
mode tcp
option tcplog
maxconn 4086
server server %s:%s
'''%(request_userName,request_userEmail,request_userTel,db_ip,db_port,db_name,dbgate_port,dbgate_port,db_ip,db_port)
fp.write(content)
cur.close()
conn.close()
fp.close()
conn = MySQLdb.connect(host="db.bops.live",user = "ims_wr", passwd = "xxx", db = "ims", charset = "utf8")
cur = conn.cursor()
command = ''' select db_ip, db_port, db_username, db_password, db_name from DBgate_Configer where dbgate_port != "-";'''
#print command
cur.execute(command)
results = cur.fetchall()
#print results
for result in results:
db_ip = result[0]
db_port = result[1]
db_username = result[2]
db_password = result[3]
db_name = result[4]
print result
haproxyConfigGenerate()
#print "mysql -h %s -P %s -u %s -p%s "%(db_ip,db_port,db_username,db_password)
#if ( db_accesscheck(db_ip,db_port,db_username,db_password,db_name) ):
# print "yanzheng tongguo "
# updateDB(db_ip,db_port,db_username,db_password,db_name)
#else:
# print "Some thing error!"
#print result
| [
"root@i-tr9h1f8i.(none)"
] | root@i-tr9h1f8i.(none) |
d3071e66efa5f6c7ae9ff183d8dcc585789b2b35 | 9ffe22376b494ca27f51b3fe534ae1dbf3013f33 | /apps/users/adminx.py | f9f268a8b5403c4351799917ab727c437028de67 | [] | no_license | boyl/mxshop | 42a01209dc65fd4753eaf9ce827f87e9d774ee74 | 66a1a85badd315703297f5f4adc864a4d40db760 | refs/heads/master | 2022-12-09T21:50:00.191631 | 2019-02-03T12:03:12 | 2019-02-03T12:03:12 | 166,653,438 | 0 | 0 | null | 2022-12-08T01:34:49 | 2019-01-20T11:27:48 | JavaScript | UTF-8 | Python | false | false | 662 | py | # coding=utf-8
import xadmin
from xadmin import views
from .models import VerifyCode
class BaseSetting(object):
# 添加主题功能
enable_themes = True
use_bootswatch = True
class GlobalSettings(object):
# 全局配置,后台管理标题和页脚
site_title = "仙剑奇侠传"
site_footer = "http://www.cnblogs.com/derek1184405959/"
# 菜单收缩
menu_style = "accordion"
class VerifyCodeAdmin(object):
list_display = ['code', 'mobile', "add_time"]
xadmin.site.register(VerifyCode, VerifyCodeAdmin)
xadmin.site.register(views.BaseAdminView, BaseSetting)
xadmin.site.register(views.CommAdminView, GlobalSettings)
| [
"[email protected]"
] | |
9fdab581dc161102b34bdb24ddee40c25225b337 | 82bc1647b5db6b0cf6fe4a1a7c0d988a41d01d65 | /动态规划/583.两个字符串的删除操作.py | ab5b3e0066f2286d4073a5c33664d1211b125c32 | [] | no_license | QinHaoChen97/VS_C | 8a6bc3f50be17109839bf4474a817244ff5be3e8 | 527c851298a351fe2e417ec014e8ed555bb0fab3 | refs/heads/master | 2023-05-25T15:00:09.594020 | 2023-05-03T15:27:56 | 2023-05-03T15:27:56 | 211,770,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # @before-stub-for-debug-begin
from python3problem583 import *
from typing import *
# @before-stub-for-debug-end
#
# @lc app=leetcode.cn id=583 lang=python3
#
# [583] 两个字符串的删除操作
#
# @lc code=start
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
len1=len(word1)
len2=len(word2)
dp=[[0 for j in range(len2+1)]for i in range(len1+1)]
#初始化
for j in range(len2+1):
dp[0][j]=j
for i in range(len1+1):
dp[i][0]=i
for i in range(1,len1+1):
for j in range(1,len2+1):
if word1[i-1]==word2[j-1]:#有一个移位
dp[i][j]=dp[i-1][j-1]
else:
dp[i][j]=min(dp[i-1][j],dp[i][j-1])+1
return dp[len1][len2]
# @lc code=end
| [
"[email protected]"
] | |
a1c915823dfea8d60070e5700d3478369646fc9e | 48207941ad9a15d8a482d4bd5b58e70670357612 | /algorithm/Python/algorithm/swexport/d4/contact.py | f5f109c6b3c9d3b9fccc79e964f2a515273ac984 | [] | no_license | chulsea/TIL | 57d7b77217ebbd592771ee20a6f370c4c0893f23 | b62e1edab9dd0fe475e6417c7af710702052660b | refs/heads/master | 2020-04-11T21:13:33.140353 | 2019-04-18T03:16:35 | 2019-04-18T03:16:35 | 162,099,009 | 9 | 4 | null | 2019-01-08T00:15:20 | 2018-12-17T08:32:29 | Jupyter Notebook | UTF-8 | Python | false | false | 962 | py | import sys
sys.stdin = open('inputs/contact_input.txt')
def solution(adj_list, start):
answer = start
queue = [start]
visited = [0 for _ in range(101)]
visited[start] = 1
while queue:
t = queue.pop(0)
if visited[answer] < visited[t]:
answer = t
elif visited[answer] == visited[t]:
answer = max(answer, t)
for k in adj_list[t]:
if not visited[k]:
visited[k] = visited[t] + 1
queue.append(k)
return answer
def main():
for test_case in range(10):
n, start = map(int, input().split())
adj_list = [[] for _ in range(101)]
temp = list(map(int, input().split()))
for i in range(0, n, 2):
s, f = map(int, temp[i:i + 2])
if f not in adj_list:
adj_list[s].append(f)
print(f'#{test_case+1} {solution(adj_list, start)}')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
df2658790a5cb574cb071b04555da9bfb0b9ffbb | e69940d0d8bbe1afeb06acce4332e6daf358dc3b | /open.py | eb68049060bb62e57e0fb68b74b03bb8a9731cfd | [] | no_license | fengjixuchui/loonix_container_escape | 1cf988a7dd036dbe118d55e185b21e01bc244b59 | fad4c7745d66a10da19047e8ab4f72fbba00e758 | refs/heads/master | 2023-06-07T16:46:46.806446 | 2021-06-17T01:52:36 | 2021-06-17T01:52:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | import os
os.system("whoami")
open("/proc/escape")
# os.system("whoami")
# os.system("/bin/bash")
| [
"[email protected]"
] | |
23ada30d75c6cae41e886163fa61e4745621ac4e | 43b5a19656139c6a1a47a3ad91297bf553a23949 | /engine/object.py | fa4f7f4e22feb8518759859a6f39f69a0d041454 | [] | no_license | sphoid/mm-demo | 07d735028e4b44a5504e8312c078aaac49e267d8 | bcac93204ed616d72a13ce987840f50afada1797 | refs/heads/master | 2023-01-03T15:56:03.830554 | 2020-11-02T00:33:54 | 2020-11-02T00:33:54 | 307,745,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | from pygame.sprite import Rect
from pygame.math import Vector2
class GameObject:
def __init__(self, rect, name=None, attributes=dict()):
self.name = name
self.rect = rect
self.flagged = False
self.attributes = attributes
def is_flagged(self):
return self.flagged
def flag(self):
self.flagged = True
def unflag(self):
self.flag = False
def get_name(self):
return self.name
def get_rect(self):
return Rect((self.get_left(), self.get_top()), (self.get_width(), self.get_height()))
def get_position(self):
return Vector2(self.get_left(), self.get_top())
def get_bottom(self):
return self.rect.bottom
def get_top(self):
return self.rect.top
def get_left(self):
return self.rect.left
def get_right(self):
return self.rect.right
def get_width(self):
return self.rect.width
def get_height(self):
return self.rect.height
def get_size(self):
return self.get_width(), self.get_height()
def collides_with(self, rect):
return self.get_rect().colliderect(rect)
| [
"[email protected]"
] | |
a644b62de928526c32cfb981e33d5612cd4b30b3 | 52a5df7c1cb83088d49a31b8a59c80e89394c8a3 | /sources/app/books/migrations/0002_bookreview.py | 04a599ce309d00ca0ce8944126e1e6748ff6877a | [] | no_license | lizardmon/Booken-Backend | 02cab32979400ca109d2a42ab1e9d7e2b2fbec3a | 32bcb0da29dd92a1d7711053e43a5225d4895fef | refs/heads/master | 2022-12-12T19:21:09.116158 | 2020-02-12T10:18:13 | 2020-02-12T10:18:13 | 223,547,726 | 0 | 1 | null | 2022-12-08T03:19:17 | 2019-11-23T07:15:53 | Python | UTF-8 | Python | false | false | 1,046 | py | # Generated by Django 2.2.8 on 2020-01-25 07:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('books', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BookReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(verbose_name='평점')),
('content', models.TextField(verbose_name='한줄평')),
('nickname', models.CharField(max_length=20, verbose_name='닉네임')),
('created_at', models.DateField(verbose_name='리뷰일')),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.Book', verbose_name='책')),
],
options={
'verbose_name': '리뷰',
'verbose_name_plural': '리뷰들',
},
),
]
| [
"[email protected]"
] | |
93f8f7671d059d757c8e9bd1718fbb7734d4ee2b | 0114ef4f93611b91e73f22e262b79ed34e2f9af3 | /dymos/transcriptions/explicit_shooting/ode_evaluation_group.py | 6ecf7df7cf58cf5bae0659920144db8d5e222768 | [
"Apache-2.0"
] | permissive | JustinSGray/dymos | c092796119b526e221fbf6a2d7fa156d673b3e15 | 620362b8429d03d23820517c9c61b5a2f0430410 | refs/heads/master | 2021-11-23T12:13:26.154594 | 2021-10-28T21:49:41 | 2021-10-28T21:49:41 | 162,737,356 | 0 | 1 | Apache-2.0 | 2021-01-07T16:04:18 | 2018-12-21T16:48:08 | Python | UTF-8 | Python | false | false | 17,390 | py | import numpy as np
import openmdao.api as om
from .vandermonde_control_interp_comp import VandermondeControlInterpComp
from .state_rate_collector_comp import StateRateCollectorComp
from .tau_comp import TauComp
from ...utils.introspection import get_targets, configure_controls_introspection,\
configure_time_introspection, configure_parameters_introspection, \
configure_states_discovery, configure_states_introspection
from ...utils.misc import get_rate_units
class ODEEvaluationGroup(om.Group):
"""
A group whose purpose is to evaluate the ODE and output the computed state rates.
Parameters
----------
ode_class : class
The class of the OpenMDAO system to be used to evaluate the ODE in this Group.
time_options : OptionsDictionary
OptionsDictionary of time options.
state_options : dict of {str: OptionsDictionary}
For each state variable, a dictionary of its options, keyed by name.
parameter_options : dict of {str: OptionsDictionary}
For each parameter, a dictionary of its options, keyed by name.
control_options : dict of {str: OptionsDictionary}
For each control variable, a dictionary of its options, keyed by name.
polynomial_control_options : dict of {str: OptionsDictionary}
For each polynomial variable, a dictionary of its options, keyed by name.
ode_init_kwargs : dict
A dictionary of keyword arguments to be passed to the instantiation of the ODE.
grid_data : GridData
The GridData instance pertaining to the phase to which this ODEEvaluationGroup belongs.
**kwargs : dict
Additional keyword arguments passed to Group.
"""
def __init__(self, ode_class, time_options, state_options, parameter_options, control_options,
polynomial_control_options, ode_init_kwargs=None,
grid_data=None, **kwargs):
super().__init__(**kwargs)
# Get the state vector. This isn't necessarily ordered
# so just pick the default ordering and go with it.
self.state_options = state_options
self.parameter_options = parameter_options
self.time_options = time_options
self.control_options = control_options
self.polynomial_control_options = polynomial_control_options
self.control_interpolants = {}
self.polynomial_control_interpolants = {}
self.ode_class = ode_class
self.grid_data = grid_data
self.ode_init_kwargs = {} if ode_init_kwargs is None else ode_init_kwargs
def set_segment_index(self, seg_idx):
"""
Set the segment_index option on those subsystems which require it.
Parameters
----------
seg_idx : int
The index of the current segment.
"""
self._get_subsystem('tau_comp').options['segment_index'] = seg_idx
control_interp_comp = self._get_subsystem('control_interp')
if control_interp_comp:
control_interp_comp.options['segment_index'] = seg_idx
def setup(self):
"""
Define the structure of the ODEEvaluationGroup.
"""
gd = self.grid_data
# All states, controls, parameters, and polyomial controls need to exist
# in the ODE evaluation group regardless of whether or not they have targets in the ODE.
# This makes taking the derivatives more consistent without Exceptions.
self._ivc = self.add_subsystem('ivc', om.IndepVarComp(), promotes_outputs=['*'])
# Add a component to compute the current non-dimensional phase time.
self.add_subsystem('tau_comp', TauComp(grid_data=self.grid_data,
time_units=self.time_options['units']),
promotes_inputs=['time', 't_initial', 't_duration'],
promotes_outputs=['stau', 'ptau', 'dstau_dt', 'time_phase'])
if self.control_options or self.polynomial_control_options:
c_options = self.control_options
pc_options = self.polynomial_control_options
# Add control interpolant
self._control_comp = self.add_subsystem('control_interp',
VandermondeControlInterpComp(grid_data=gd,
control_options=c_options,
polynomial_control_options=pc_options,
time_units=self.time_options['units']),
promotes_inputs=['ptau', 'stau', 't_duration', 'dstau_dt'])
self.add_subsystem('ode', self.ode_class(num_nodes=1, **self.ode_init_kwargs))
self.add_subsystem('state_rate_collector',
StateRateCollectorComp(state_options=self.state_options,
time_units=self.time_options['units']))
def configure(self):
"""
Perform I/O creation for this group's underlying members.
In dymos, this system sits within a subproblem and therefore isn't in the standard
configuration chain. We need to perform all of the introspection of the ODE here.
"""
ode = self._get_subsystem('ode')
configure_time_introspection(self.time_options, ode)
self._configure_time()
configure_parameters_introspection(self.parameter_options, ode)
self._configure_params()
configure_controls_introspection(self.control_options, ode,
time_units=self.time_options['units'])
self._configure_controls()
configure_controls_introspection(self.polynomial_control_options, ode,
time_units=self.time_options['units'])
self._configure_polynomial_controls()
if self.control_options or self.polynomial_control_options:
self._get_subsystem('control_interp').configure_io()
configure_states_discovery(self.state_options, ode)
configure_states_introspection(self.state_options, self.time_options, self.control_options,
self.parameter_options,
self.polynomial_control_options, ode)
self._configure_states()
self.state_rate_collector.configure_io()
def _configure_time(self):
targets = self.time_options['targets']
time_phase_targets = self.time_options['time_phase_targets']
t_initial_targets = self.time_options['t_initial_targets']
t_duration_targets = self.time_options['t_duration_targets']
units = self.time_options['units']
for tgts, var in [(targets, 'time'), (time_phase_targets, 'time_phase'),
(t_initial_targets, 't_initial'), (t_duration_targets, 't_duration')]:
if var != 'time_phase':
self._ivc.add_output(var, shape=(1,), units=units)
for t in tgts:
self.promotes('ode', inputs=[(t, var)])
if tgts:
self.set_input_defaults(name=var,
val=np.ones((1,)),
units=units)
def _configure_states(self):
for name, options in self.state_options.items():
shape = options['shape']
units = options['units']
targets = options['targets'] if options['targets'] is not None else []
rate_path, rate_io = self._get_rate_source_path(name)
var_name = f'states:{name}'
self._ivc.add_output(var_name, shape=shape, units=units)
self.add_design_var(var_name)
# Promote targets from the ODE
for tgt in targets:
self.promotes('ode', inputs=[(tgt, var_name)])
if targets:
self.set_input_defaults(name=var_name,
val=np.ones(shape),
units=options['units'])
# If the state rate source is an output, connect it, otherwise
# promote it to the appropriate name
if rate_io == 'output':
self.connect(rate_path, f'state_rate_collector.state_rates_in:{name}_rate')
else:
self.promotes('state_rate_collector',
inputs=[(f'state_rates_in:{name}_rate', rate_path)])
self.add_constraint(f'state_rate_collector.state_rates:{name}_rate')
def _configure_params(self):
for name, options in self.parameter_options.items():
shape = options['shape']
targets = get_targets(ode=self.ode, name=name, user_targets=options['targets'])
units = options['units']
var_name = f'parameters:{name}'
self._ivc.add_output(var_name, shape=shape, units=units)
self.add_design_var(var_name)
# Promote targets from the ODE
for tgt in targets:
self.promotes('ode', inputs=[(tgt, var_name)])
if targets:
self.set_input_defaults(name=var_name,
val=np.ones(shape),
units=options['units'])
def _configure_controls(self):
configure_controls_introspection(self.control_options, self.ode)
time_units = self.time_options['units']
if self.control_options:
gd = self.grid_data
if gd is None:
raise ValueError('ODEEvaluationGroup was provided with control options but '
'a GridData object was not provided.')
num_control_input_nodes = gd.subset_num_nodes['control_input']
for name, options in self.control_options.items():
shape = options['shape']
units = options['units']
rate_units = get_rate_units(units, time_units, deriv=1)
rate2_units = get_rate_units(units, time_units, deriv=2)
targets = options['targets']
rate_targets = options['rate_targets']
rate2_targets = options['rate2_targets']
uhat_name = f'controls:{name}'
u_name = f'control_values:{name}'
u_rate_name = f'control_rates:{name}_rate'
u_rate2_name = f'control_rates:{name}_rate2'
self._ivc.add_output(uhat_name, shape=(num_control_input_nodes,) + shape, units=units)
self.add_design_var(uhat_name)
self.add_constraint(u_name)
self.add_constraint(u_rate_name)
self.add_constraint(u_rate2_name)
self.promotes('control_interp', inputs=[uhat_name],
outputs=[u_name, u_rate_name, u_rate2_name])
# Promote targets from the ODE
for tgt in targets:
self.promotes('ode', inputs=[(tgt, u_name)])
if targets:
self.set_input_defaults(name=u_name,
val=np.ones(shape),
units=options['units'])
# Promote rate targets from the ODE
for tgt in rate_targets:
self.promotes('ode', inputs=[(tgt, u_rate_name)])
if rate_targets:
self.set_input_defaults(name=u_rate_name,
val=np.ones(shape),
units=rate_units)
# Promote rate2 targets from the ODE
for tgt in rate2_targets:
self.promotes('ode', inputs=[(tgt, u_rate2_name)])
if rate2_targets:
self.set_input_defaults(name=u_rate2_name,
val=np.ones(shape),
units=rate2_units)
def _configure_polynomial_controls(self):
configure_controls_introspection(self.polynomial_control_options, self.ode)
if self.polynomial_control_options:
time_units = self.time_options['units']
gd = self.grid_data
if gd is None:
raise ValueError('ODEEvaluationGroup was provided with control options but '
'a GridData object was not provided.')
for name, options in self.polynomial_control_options.items():
shape = options['shape']
units = options['units']
rate_units = get_rate_units(units, time_units, deriv=1)
rate2_units = get_rate_units(units, time_units, deriv=2)
targets = options['targets']
rate_targets = options['rate_targets']
rate2_targets = options['rate2_targets']
num_control_input_nodes = options['order'] + 1
uhat_name = f'polynomial_controls:{name}'
u_name = f'polynomial_control_values:{name}'
u_rate_name = f'polynomial_control_rates:{name}_rate'
u_rate2_name = f'polynomial_control_rates:{name}_rate2'
self._ivc.add_output(uhat_name, shape=(num_control_input_nodes,) + shape, units=units)
self.add_design_var(uhat_name)
self.add_constraint(u_name)
self.add_constraint(u_rate_name)
self.add_constraint(u_rate2_name)
self.promotes('control_interp', inputs=[uhat_name],
outputs=[u_name, u_rate_name, u_rate2_name])
# Promote targets from the ODE
for tgt in targets:
self.promotes('ode', inputs=[(tgt, u_name)])
if targets:
self.set_input_defaults(name=u_name,
val=np.ones(shape),
units=options['units'])
# Promote rate targets from the ODE
for tgt in rate_targets:
self.promotes('ode', inputs=[(tgt, u_rate_name)])
if rate_targets:
self.set_input_defaults(name=u_rate_name,
val=np.ones(shape),
units=rate_units)
# Promote rate2 targets from the ODE
for tgt in rate2_targets:
self.promotes('ode', inputs=[(tgt, u_rate2_name)])
if rate2_targets:
self.set_input_defaults(name=u_rate2_name,
val=np.ones(shape),
units=rate2_units)
def _get_rate_source_path(self, state_var):
"""
Get path of the rate source variable so that we can connect it to the
outputs when we're done.
Parameters
----------
state_var : str
The name of the state variable whose path is desired.
Returns
-------
path : str
The path to the rate source of the state variable.
io : str
A string indicating whether the variable in the path is an 'input'
or an 'output'.
"""
var = self.state_options[state_var]['rate_source']
if var == 'time':
rate_path = 'time'
io = 'input'
elif var == 'time_phase':
rate_path = 'time_phase'
io = 'input'
elif self.state_options is not None and var in self.state_options:
rate_path = f'states:{var}'
io = 'input'
elif self.control_options is not None and var in self.control_options:
rate_path = f'controls:{var}'
io = 'output'
elif self.polynomial_control_options is not None and var in self.polynomial_control_options:
rate_path = f'polynomial_controls:{var}'
io = 'output'
elif self.parameter_options is not None and var in self.parameter_options:
rate_path = f'parameters:{var}'
io = 'input'
elif var.endswith('_rate') and self.control_options is not None and \
var[:-5] in self.control_options:
rate_path = f'control_rates:{var}'
io = 'output'
elif var.endswith('_rate2') and self.control_options is not None and \
var[:-6] in self.control_options:
rate_path = f'control_rates:{var}'
io = 'output'
elif var.endswith('_rate') and self.polynomial_control_options is not None and \
var[:-5] in self.polynomial_control_options:
rate_path = f'polynomial_control_rates:{var}'
io = 'output'
elif var.endswith('_rate2') and self.polynomial_control_options is not None and \
var[:-6] in self.polynomial_control_options:
rate_path = f'polynomial_control_rates:{var}'
io = 'output'
else:
rate_path = f'ode.{var}'
io = 'output'
return rate_path, io
| [
"[email protected]"
] | |
9f294704ff416d1e9ad8eb07d8dc80ff0ef69ce1 | 9dc25eb9735b62aa1159dae3ae06a0e3eb32f6c3 | /api/urls.py | 255af256d868fb750cf0797d08104eacb5090b12 | [] | no_license | Luweiwei1/restful | 7900783aa74ded5d0746f9d663faa449566ee8bf | e2b32ba8268f1e4f922d070df882cc823fbe2561 | refs/heads/master | 2022-07-10T11:32:23.854187 | 2020-05-12T06:38:19 | 2020-05-12T06:38:19 | 263,515,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | from django.urls import path
from . import views
urlpatterns = [
path('test', views.welcome),
path('', views.test),
path('books', views.get_all_books),
path('authors', views.get_all_authors),
]
| [
"[email protected]"
] | |
728a5efca9a3bcbbb05ee129f4290af38ef6d78d | 35c9b0d0a0b51cad606630c920075096b1f77ebb | /venv/Scripts/easy_install-3.7-script.py | 466658959f0bbfa0f68d1c16a51af43769f7b7bf | [] | no_license | raj9226/session11 | 1e1793585e4d2a72df65355273dd7919433ca81d | 79f571103a6bf30558ebe1a77cae38ef00fafad8 | refs/heads/master | 2020-06-05T05:58:46.908576 | 2019-06-17T12:03:01 | 2019-06-17T12:03:01 | 192,338,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | #!"C:\Users\Rajeev yadav\PycharmProjects\session11\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"[email protected]"
] | |
2e6d911d2c703d11696a5e0a8922485817b9af65 | b0d06d89b28a575c961d02d117c7e5ded467e400 | /MQTT_publisher_subscriber.py | 0a39d2443dd438a588a06c8472ba6adc50c1b42b | [] | no_license | KnightFoxii/Internet-of-Things- | 47e902f474816056a4e59083f85b8621e6094319 | b11062b727ad86454c0cf9c355b8a693f3da57b3 | refs/heads/master | 2022-11-19T16:23:52.731728 | 2020-07-17T14:22:59 | 2020-07-17T14:22:59 | 280,442,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | #!/usr/bin/env python3
import time
import paho.mqtt.client as paho
import Adafruit_DHT as dht
broker="broker.hivemq.com"
#broker="172.16.180.240"
#broker="iot.eclipse.org"
def on_connect(client2, userdata, flags, rc):
print("Publisher Connected with result code "+str(rc))
time.sleep(2)
#define DHT11 reading
def DHT11_data():
# Sensor data of temperature and humidity
humi, temp = dht.read_retry(11,4)
return humi, temp
client2= paho.Client("client-002")
print("Connecting to broker... ",broker)
client2.connect(broker)
client2.on_connect = on_connect
client2.loop_start()
try:
while True:
humi,temp = DHT11_data()
print('Temperature={0:0.1f}*C Humidity={1:0.1f}%'.format(temp, humi))
print("publishing... ")
client2.publish("mit/temperature",str(temp))
time.sleep(10)
except KeyboardInterrupt:
client2.loop_stop()
client2.disconnect()
| [
"[email protected]"
] | |
ed59cde894e4bfe61ad662c1a4e4cc2a9a1df1f0 | ae66ad38a7b19c01f1099d671dd127716a5d4c34 | /accounts/migrations/0042_remove_userprofile_specialty.py | 7afae14e93dfea27e81f8ca0344fd5b72bd71263 | [] | no_license | selbieh/django-freelacer-website | 6fd1eb009e9b30738bfa59fa78f530144b273231 | 0971a7fc3dc7e63a1909bb6adf3a84d7d9083324 | refs/heads/master | 2022-11-22T19:07:48.470928 | 2019-11-24T12:24:26 | 2019-11-24T12:24:26 | 172,359,908 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | # Generated by Django 2.0.4 on 2018-09-12 11:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0041_auto_20180912_1305'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='specialty',
),
]
| [
"[email protected]"
] | |
d6679e7a3e5fab67fb72d4aac10e15205c544297 | 1a32e7722c1d690c52f39153064f26597211825f | /library/views.py | 0b9786d1a40db8e0694b82c6f0df0d95d314d112 | [] | no_license | joyonto51/user_login_system_in_django | 7f07f9a5372b91a421129eb690bd2e2a73a6175e | 06894cbe18de2f51dba07f5020a0a306b562868a | refs/heads/master | 2021-09-26T09:22:33.964661 | 2021-02-22T14:31:31 | 2021-02-22T14:31:31 | 235,355,698 | 0 | 0 | null | 2021-09-22T18:31:51 | 2020-01-21T14:01:36 | CSS | UTF-8 | Python | false | false | 1,509 | py | from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from rest_framework.response import Response
from rest_framework.views import APIView
from basic_auth.views import BaseView
from library.forms import BookForm
from library.models import Book, Author, Publisher
class BooksListView(BaseView):
template_name = 'book_list.html'
def get(self, request, *args, **kwargs):
context = {
'books': Book.objects.all().order_by('-publish_date'),
}
return render(request, self.template_name, context)
class BooksAddView(BaseView):
template_name = 'books_add.html'
def get(self, request, *args, **kwargs):
context = {
'authors': Author.objects.all(),
'publishers': Publisher.objects.all(),
}
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
author_id = request.POST.get('author_id')
publisher_id = request.POST.get('publisher_id')
form = BookForm(request.POST, author_id=author_id, publisher_id=publisher_id)
if form.is_valid():
print("form is valid")
data = form.cleaned_data
Book.objects.create(**data)
return HttpResponseRedirect(reverse('books_list'))
class GetBookListAPIVIew(APIView):
def get(self, request):
context = {
'books': "Congratulations"
}
return Response(context)
| [
"[email protected]"
] | |
25db600de71c54d78389d3ed1d7b1b0ab183c00d | e17a610dda0a6ff39c2bce5642aa1b69ebe26fe1 | /app/Pizza/Pizza/urls.py | 5e6b6fd75530623045c4dac11cedc19661640b1a | [] | no_license | MelekhSV/WebSite | cd9b96167ca720b690e87211b13d769ac107ba5c | dc9eae37f6a810fd92c8d3462bde84f40635966a | refs/heads/master | 2020-12-18T07:09:29.869597 | 2020-02-20T17:35:37 | 2020-02-20T17:35:37 | 235,320,401 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | """Pizza URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import include
from .views import redirect_blog
urlpatterns = [
path('', redirect_blog),
path('admin/', admin.site.urls),
path('pizza/', include('pizza1.urls'))
]
| [
"[email protected]"
] | |
0c21256062badde3c5b1216f8e1f319b71165db0 | c7786c99f2900ff4ca2735f52e3ccbe356c420c3 | /mpy-cross/mpy_cross/__main__.py | 9d957bca025d642cd446a46c9f28cd368b80c065 | [
"MIT",
"GPL-1.0-or-later"
] | permissive | jbgagnon/micropython | 6cecd0914e5a7e229e8b14a8e9129bc50b6e679a | cd3af4a27a100309990df5de3ea013c9c2e4f7d3 | refs/heads/master | 2022-09-10T03:37:13.714881 | 2022-09-06T13:16:09 | 2022-09-06T13:16:09 | 232,540,390 | 0 | 0 | MIT | 2020-01-08T10:41:56 | 2020-01-08T10:41:56 | null | UTF-8 | Python | false | false | 1,477 | py | #!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2022 Andrew Leech
# Copyright (c) 2022 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function
import argparse
import sys
from . import run, CrossCompileError
try:
run(sys.argv[1:])
except CrossCompileError as er:
print(er.args[0], file=sys.stderr)
raise SystemExit(1)
| [
"[email protected]"
] | |
91afd317c7a23b308c9270d1099ed2098a8e8775 | 275586008cd4409183e7fe61bc15fa1fb2fa6f05 | /sql/1.py | 3c529990e5c47d32a9d8a16961e358b609d031da | [
"MIT"
] | permissive | Zuoxiaoxian/Excel_Oracle_conf_log | 00e3eac991f3effa78e2cc1a439aadcf4cc2af1c | e4ded91c333af466542ba25dfd162f192347bc82 | refs/heads/master | 2020-03-19T02:49:30.456807 | 2018-06-01T04:46:37 | 2018-06-01T04:46:37 | 135,664,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | # -*- coding: utf-8 -*-
# 作者 :xiaoxianzuo.zuo
# QQ :1980179070
# 文件名 : 1.py
# 新建时间 :2018/5/31/031 21:36
from excel_01 import excel_01
print("121212")
zong_lists = excel_01.excel_01()
for zong_list in zong_lists:
for zong in zong_list:
print("zong: ", zong)
| [
"[email protected]"
] | |
77e54021441472e82756089a3436c146d8ad708c | 2b6fccdf785b183c3294b068b08d92bfdcacb181 | /Python/TimeDomainPropagator/Scripts/born_tests.py | a90e3b95af50074d19b560d6426f0be5dcb77d96 | [] | no_license | rsarkar-github/Godzilla | 63527b8eee1546c8fc66d653e3246f15f69a3f8c | edc7289a98f1dbaa9a8cdb862bcc7e41146aba3a | refs/heads/master | 2022-11-14T04:50:20.359880 | 2022-11-10T20:27:36 | 2022-11-10T20:27:36 | 98,580,864 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,190 | py | from ..Propagator.BornScattering import*
import numpy as np
import matplotlib.pyplot as plt
# Set parameters
nz = 400
nx = 500
nt_ = 700
dx_ = 15
dz_ = 15
dt_ = 0.004
fmax_ = 10.0
pad_cells_x = 100
pad_cells_z = 100
# Create velocity
vel2d_ = np.zeros((nz, nx), dtype=np.float32) + 2000
# Create vel pert
vel_pert2d_ = np.zeros((nt_, nz, nx), dtype=np.float32)
vel_pert2d_[:, int(nz / 2), pad_cells_x:(nx - pad_cells_x)] = 1
# Create source and target wavefields
source = np.zeros((nt_, nz, nx), dtype=np.float32)
born_wavefield = np.zeros((nt_, nz, nx), dtype=np.float32)
_, vals = ricker_time(freq_peak=fmax_, nt=nt_, dt=dt_, delay=0.15)
vals = vals / np.max(np.abs(vals))
source[:, pad_cells_z + 1, int(nx / 2)] = vals
# Forward Born
born_time_dependent_pert_propagator(
vel2d=vel2d_,
dx=dx_, dz=dz_, dt=dt_, fmax=fmax_,
vel_pert2d=vel_pert2d_,
source_wavefield=source,
born_scattered_wavefield=born_wavefield,
ncells_pad_z=pad_cells_z,
ncells_pad_x=pad_cells_x,
adjoint_mode=False
)
# Receiver selection mask
receiver_restriction_mask = np.zeros((nz, nx), dtype=np.float32)
receiver_restriction_mask[pad_cells_z + 1, pad_cells_x:(nx - pad_cells_x)] = 1.0
born_wavefield *= np.reshape(receiver_restriction_mask, newshape=(1, nz, nx))
# recorded_data = born_wavefield[:, pad_cells_z, pad_cells_x:(nx - pad_cells_x)]
# np.reshape(recorded_data, newshape=(nt_, nx - 2 * pad_cells_x))
# plt.imshow(recorded_data, cmap='Greys')
# plt.colorbar()
# plt.axes().set_aspect("equal")
# plt.show()
# Adjoint Born
born_time_dependent_pert_propagator(
vel2d=vel2d_,
dx=dx_, dz=dz_, dt=dt_, fmax=fmax_,
vel_pert2d=vel_pert2d_,
source_wavefield=source,
born_scattered_wavefield=born_wavefield,
ncells_pad_z=pad_cells_z,
ncells_pad_x=pad_cells_x,
adjoint_mode=True
)
# born_image = np.sum(vel_pert2d_, axis=0)
# plt.imshow(born_image, cmap='Greys')
# plt.colorbar()
# plt.axes().set_aspect("equal")
# plt.show()
# Show movie
for ii in range(0, nt_, 20):
plt.imshow(vel_pert2d_[ii, :, :], cmap='Greys', vmin=-1e-6, vmax=1e-6)
plt.colorbar()
plt.axes().set_aspect("equal")
plt.pause(0.05)
plt.gcf().clear()
| [
"[email protected]"
] | |
9aa92498b56d71c1d953076c5230ed4c68ff87d6 | 7370226f0f8b7498109ffb11c48827c44570c7c4 | /golden_kamui/bin/pip3 | f05b8f3f1fd8303995019ec3552466ce85497536 | [] | no_license | patchgi/golden_kamui | 2ea015a5cfa90a635670bfca12012fe649d3020c | 8d1bbf565f245351a3feb9ab8d608337773295bf | refs/heads/master | 2022-12-11T00:09:18.706052 | 2018-04-08T10:59:50 | 2018-04-08T10:59:50 | 128,630,680 | 0 | 0 | null | 2022-12-08T00:42:41 | 2018-04-08T10:43:38 | Python | UTF-8 | Python | false | false | 251 | #!/Users/patchgi/workspace/golden_kamui/golden_kamui/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
81412c0717d458f9d334e3be5d12234e39bf130b | aa780e180a952570d94dc71c7cfdbbda414d6cf5 | /delivery/delivery/factorytest.py | 9761abb4cfefec873e9fa7834777ab223e07e05d | [] | no_license | adel121/FSTDelivery | 6e5389ae04cf3499755dd58ff630e0ff66302c4a | b1604b3a3f94c6a243308bf1fcf9399d2f1cf344 | refs/heads/master | 2023-03-15T17:17:59.389969 | 2021-03-07T23:31:33 | 2021-03-07T23:31:33 | 345,476,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | import factory
from factory.django import DjangoModelFactory
from models import Client, Delivery_In, Delivery_Out, Manager
def dt():
Company= factory.Faker("name")
Phone = factory.Faker("phone_number")
Location = factory.Faker("location")
print(Company)
#if __name__ == "__main__":
# print("run from internal") | [
"[email protected]"
] | |
0484d24c7a091a4e29afad5ae7a8f55d7981047d | 55550afe1c18aacba9a481c690755cb7395d35f1 | /Week_06/G20190343020041/LeetCode_127_0041.py | 3b342e1c69f73f0eef1a6dce242075a2bde647a5 | [] | no_license | algorithm005-class02/algorithm005-class02 | eb5c0865fbb2c58362fddcd4fc8f8b9d02bb208c | 1a1abf5aabdd23755769efaa6c33579bc5b0917b | refs/heads/master | 2020-09-22T11:48:20.613692 | 2020-03-02T05:31:11 | 2020-03-02T05:31:11 | 225,177,649 | 45 | 153 | null | 2020-03-02T05:31:13 | 2019-12-01T14:47:06 | Java | UTF-8 | Python | false | false | 3,353 | py | # 给定两个单词(beginWord 和 endWord)和一个字典,找到从 beginWord 到 endWord 的最短转换序列的长度。转换需遵循如下规则:
#
#
# 每次转换只能改变一个字母。
# 转换过程中的中间单词必须是字典中的单词。
#
#
# 说明:
#
#
# 如果不存在这样的转换序列,返回 0。
# 所有单词具有相同的长度。
# 所有单词只由小写字母组成。
# 字典中不存在重复的单词。
# 你可以假设 beginWord 和 endWord 是非空的,且二者不相同。
#
#
# 示例 1:
#
# 输入:
# beginWord = "hit",
# endWord = "cog",
# wordList = ["hot","dot","dog","lot","log","cog"]
#
# 输出: 5
#
# 解释: 一个最短转换序列是 "hit" -> "hot" -> "dot" -> "dog" -> "cog",
# 返回它的长度 5。
#
#
# 示例 2:
#
# 输入:
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log"]
#
# 输出: 0
#
# 解释: endWord "cog" 不在字典中,所以无法进行转换。
# Related Topics 广度优先搜索
# leetcode submit region begin(Prohibit modification and deletion)
from collections import deque
from typing import List
class Solution:
def ladderLengthBFS(self, beginWord: str, endWord: str, wordList: List[str]) -> int:
if endWord not in wordList: return 0
d = self.construct_dict(set(wordList) | set([beginWord, endWord]))
queue, visited = deque([(beginWord, 1)]), set()
while queue:
current_word, level = queue.popleft()
if current_word == endWord:
return level
visited.add(current_word)
for i in range(len(beginWord)):
s = current_word[:i] + "_" + current_word[i + 1:]
neigh_words = d.get(s, [])
for nw in neigh_words:
if nw not in visited:
queue.append((nw, level + 1))
return 0
def ladderLength(self, beginWord: str, endWord: str, __wordList: List[str]) -> int:
if endWord not in __wordList: return 0
wordList = set(__wordList)
d = self.construct_dict(wordList | set([beginWord, endWord]))
queue_begin, queue_end, visited = set([beginWord]), set([endWord]), set([beginWord])
level = 1
while queue_begin:
level += 1
next_queue = set()
for current_word in queue_begin:
for i in range(len(beginWord)):
s = current_word[:i] + "_" + current_word[i + 1:]
neigh_words = d.get(s, [])
for nw in neigh_words:
if nw in queue_end:
return level
if nw not in visited:
next_queue.add(nw)
visited.add(nw)
queue_begin = next_queue
if len(queue_begin) > len(queue_end):
queue_begin, queue_end = queue_end, queue_begin
return 0
def construct_dict(self, word_list):
d = {}
for word in word_list:
for i in range(len(word)):
s = word[:i] + "_" + word[i + 1:]
d[s] = d.get(s, []) + [word]
return d
# leetcode submit region end(Prohibit modification and deletion)
print(Solution().ladderLength("hot", "dog", ["hot", "dog"]))
| [
"[email protected]"
] | |
1c9a32a153b468e182125b2d04b0bc8d300cd2c6 | c4a83b3074f241084ee7d121fd0487eb3cedeb48 | /app.py | 7c0d1db7cfd634fbf8412fe80b1c66b52edf38bd | [] | no_license | mattsunner/file-namer | b785b12dd257e80e771ed50ea47e018a11fc9bcf | 85538ebd5bd1e24757021ae10ef972694b98423a | refs/heads/master | 2021-05-24T17:10:57.659765 | 2020-04-07T02:51:07 | 2020-04-07T02:51:07 | 253,671,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py | """
File Renamer
Author: Matthew Sunner
"""
# imports
import os
# Renamer
def main():
# Update this to change the base name of all files
nameBase = "Presentation"
fileType = ".pptx" # Update to reflect needed file type extension
for count, filename in enumerate(os.listdir("xyz")):
# Use any extension needed here
dst = nameBase + str(count) + fileType
src = 'xyz' + filename
dst = 'xyz' + dst
os.rename(src, dst)
# Calling Function
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8fb0b6346a7f498e6e6ae0ef868624747fedfbda | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_040/ch84_2020_04_13_01_39_55_701842.py | 31f09490382824aef354d4726769738885db0668 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | def inverte_dicionario(x):
dicionario = {}
for nome,idade in x.items():
if idade not in dicionario:
dicionario[idade] = [nome]
else:
dicionario[idade].append(nome)
return dicionario | [
"[email protected]"
] | |
6c69cea4c687c96d1cc49a3d15c79b2c3a898666 | e5ad51960e526e186867b098e7b8c044c5662df0 | /model_seq2tree.py | 878ac49b9df51e9ec99628049c8bfa3e11d7de26 | [] | no_license | satos---jp/neural_decompiler | ce81ad1f9f6fb94aba3174a06c436c6af1917dfd | 38dca83fe79b7718955f06c9cb18222ca74de61a | refs/heads/master | 2021-07-18T00:27:15.408955 | 2020-05-26T12:56:12 | 2020-05-26T12:56:12 | 160,679,472 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,259 | py | import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import training
from chainer.training import extensions
from chainer import serializers
from chainer import cuda
from cuda_setting import isgpu
if isgpu:
xp = cuda.cupy
else:
xp = np
def sequence_embed(embed, xs):
x_len = [len(x) for x in xs]
x_section = np.cumsum(x_len[:-1])
ex = embed(F.concat(xs, axis=0))
exs = F.split_axis(ex, x_section, 0)
return exs
from model_attention import GlobalGeneralAttention
import random
class Seq2Tree(chainer.Chain):
def __init__(self, n_layers, n_source_vocab, trans_data, n_units,v_eos_src,n_maxsize):
super(Seq2Tree, self).__init__()
# for each nodetype, for each move, the result array.
self.trans_data = trans_data
self.embed_idx = []
ns = 0
def inc():
nonlocal ns
ns += 1
return ns-1
self.embed_idx = [[[inc() for v in vs] for vs in moves] for moves in self.trans_data]
self.embed_root_idx = ns
self.embed_y_size = ns+1
self.choicerange = []
self.is_trivial = []
self.choice_idx = []
s = 0
for d in self.trans_data:
ist = len(d)<=1
self.is_trivial.append(ist)
if ist:
self.choicerange.append(None)
self.choice_idx.append([0])
continue
b = s
s += len(d)
self.choicerange.append((b,s))
self.choice_idx.append(list(range(b,s)))
#self.choice_num_sum = sum(list(map(lambda d: len(d),self.trans_data)))
self.n_choicables = s
self.type_size = len(self.embed_idx)
with self.init_scope():
self.embed_x = L.EmbedID(n_source_vocab, n_units)
self.embed_y = L.EmbedID(self.embed_y_size, n_units) # maybe mergable
self.encoder = L.NStepBiLSTM(n_layers, n_units, n_units, 0.1)
self.decoder = L.NStepLSTM(n_layers, n_units, n_units*2, 0.1)
self.Wc = L.Linear(n_units*4, n_units)
self.Ws = L.Linear(n_units, self.n_choicables)
#self.att = Attention(n_units)
self.att = GlobalGeneralAttention(n_units)
self.n_layers = n_layers
self.n_units = n_units
self.v_eos_src = v_eos_src
self.n_maxsize = n_maxsize
self.rootidx = len(trans_data)-1
def forward(self, xs, ys):
batch = len(xs)
xs = [xp.array(x[::-1]) for x in xs]
exs = sequence_embed(self.embed_x, xs)
def sample_path(y):
my = y
eidx = self.embed_root_idx
ist = False
res = []
while True:
ty,ch,cs = y
ci = self.choice_idx[ty][ch]
res.append((eidx,ci,self.is_trivial[ty]))
lcs = len(cs)
if lcs == 0:
break
i = random.randint(0,lcs-1)
eidx = self.embed_idx[ty][ch][i]
y = cs[i]
#print(res)
return res
ys = [sample_path(y) for y in ys]
ys_out = [xp.array(list(map(lambda a: a[1],d))) for d in ys]
#print('ys out')
#print(ys_out)
ys_conds = [xp.array(list(map(lambda a: a[2],d)),dtype=xp.bool) for d in ys]
#print(self.embed_y_size,self.n_all_choice)
eys = sequence_embed(self.embed_y, [xp.array(list(map(lambda a: a[0],d))) for d in ys])
hx, cx, xs_states = self.encoder(None, None, exs)
hx = F.transpose(F.reshape(F.transpose(hx,(1,0,2)),(batch,self.n_layers,self.n_units*2)),(1,0,2))
cx = F.transpose(F.reshape(F.transpose(cx,(1,0,2)),(batch,self.n_layers,self.n_units*2)),(1,0,2))
_, _, os = self.decoder(hx, cx, eys)
#print('decode')
ctxs = [self.att(xh,yh) for (xh,yh) in zip(xs_states,os)]
#print('attentioned')
att_os = [F.tanh(self.Wc(F.concat([ch,yh],axis=1))) for (ch,yh) in zip(ctxs,os)]
concat_os = F.concat(att_os, axis=0)
concat_ys_out = F.concat(ys_out, axis=0)
concat_cond = F.concat(ys_conds, axis=0)
#print(concat_ys_out,concat_cond)
sxe = F.softmax_cross_entropy(self.Ws(concat_os), concat_ys_out, reduce='no')
sxec = F.where(concat_cond,xp.zeros(sxe.shape,dtype=xp.float32),sxe)
#print(sxec)
loss = F.sum(sxec) / batch
#print('lossed')
chainer.report({'loss': loss}, self)
#exit()
return loss
def translate(self, xs):
batch = len(xs)
#beam_with = 3
with chainer.no_backprop_mode(), chainer.using_config('train', False):
xs = [xp.array(x[::-1]) for x in xs]
exs = sequence_embed(self.embed_x, xs)
hx, cx, xs_outputs = self.encoder(None, None, exs)
#print(hx.shape,cx.shape,(1,xs_states[0].shape))
#sprint(xs_states)
hx = F.transpose(F.reshape(F.transpose(hx,(1,0,2)),(batch,self.n_layers,self.n_units*2)),(1,0,2))
cx = F.transpose(F.reshape(F.transpose(cx,(1,0,2)),(batch,self.n_layers,self.n_units*2)),(1,0,2))
hx = F.transpose(hx,axes=(1,0,2))
cx = F.transpose(cx,axes=(1,0,2))
ivs = sequence_embed(self.embed_y,list(map(lambda i: xp.array([i]),range(self.embed_y_size))))
v = ivs[self.embed_root_idx]
result = []
nsize = None
for i in range(len(xs_outputs)):
def expand_tree(ntype,eidx,nhxncx):
nonlocal nsize
(nhx,ncx) = nhxncx
if nsize > self.n_maxsize:
return (ntype,-1,[])
nsize += 1
#eidx = self.embed_idx[ntype][ppos]
ev = ivs[eidx]
thx,tcx,ys = self.decoder(nhx,ncx,[ev])
yh = ys[0]
ctx = self.att(xs_outputs[i],yh)
att_yh = F.tanh(self.Wc(F.concat([ctx,yh],axis=1)))
if self.is_trivial[ntype]:
nchoice = 0
else:
choice_from,choice_to = self.choicerange[ntype]
cl = choice_to - choice_from
wy = self.Ws(att_yh).data[0][choice_from:choice_to]
#print(wy.shape,wy)
wy = F.reshape(F.log_softmax(F.reshape(wy,(1,cl))),(cl,))
#wy = F.reshape(F.log_softmax(F.reshape(wy,(1,self.vs_target_vocab[ntype])),axis=1),(self.vs_target_vocab[ntype],)).data
nchoice = F.argmax(wy.data).data.astype(np.int32).item()
#print(ntype,nchoice)
#print(c_cfg.idx2nodetype(ntype))
ctypes = self.trans_data[ntype][nchoice]
resv = []
for j,ct in enumerate(ctypes):
teidx = self.embed_idx[ntype][nchoice][j]
resv.append(expand_tree(ct,teidx,(thx,tcx)))
return (ntype,nchoice,resv)
nhx,ncx = hx[i],cx[i]
ncx = F.reshape(ncx,(ncx.shape[0],1,ncx.shape[1]))
nhx = F.reshape(nhx,(nhx.shape[0],1,nhx.shape[1]))
# TODO(satos) What is the beam search for tree!?
# now, beam search is ommited.
nsize = 0
tree = expand_tree(self.type_size-1,self.embed_root_idx,(nhx,ncx))
result.append(tree)
return result
| [
"[email protected]"
] | |
e46fec238c3aaede65cf0647e8172acfe651ea69 | bb3b858056dc21ebf42400ee70925406a01b9df5 | /formEntry/migrations/0002_auto_20171105_2055.py | 28fb4460a20e2feac349835d92d213a73b91861e | [] | no_license | justingschumacher/ProjectStatusDashboard | 375d7f45f4cf71ac733dc55b918c29aed4687157 | 03f519bf35ae009a4f8ca31ad22441b112e3a9a2 | refs/heads/master | 2021-09-12T21:12:51.736850 | 2018-04-20T21:20:10 | 2018-04-20T21:20:10 | 109,703,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 708 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-06 04:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('formEntry', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='formAdmin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AddField(
model_name='project',
name='createdDate',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
5bab6d9efcb4ae37b92d4802abc1a955635a38f9 | b9831e156d9e1f08abb88a632f45f9902183e0a8 | /CanvassVirtual/Canvass/my_auth/urls.py | b28da0c7dcd572635940fa7e21d5c5a2cfe56c61 | [] | no_license | Canvass-PEC/Canvass | 37795127bf63153ea6da3ec87badfb069fa0dbfc | 5bf91606441976a3047849f699b333bbc9ae3241 | refs/heads/master | 2020-03-31T18:59:39.420217 | 2018-10-15T10:40:24 | 2018-10-15T10:40:24 | 146,416,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 905 | py | """Canvass URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from .views import signup,login,logout
app_name="my_auth"
urlpatterns = [
path('signup', signup, name='signup'),
path('logout', logout, name='logout'),
path('login', login, name='login'),
]
| [
"[email protected]"
] | |
1ffa3daf5190c3c64ab474fdc5d1a5428a9de59a | 692fd907a868d16065eed2e46d6011910445ec56 | /.svn/pristine/1f/1ffa3daf5190c3c64ab474fdc5d1a5428a9de59a.svn-base | efab79b1ba4afca4b3dfd8edfa2d21c291e6b6ec | [] | no_license | 1185973378/zhou | 793d8991d558f677762a5a787b4f8e907375ae09 | 2821b58d60276ffad2ac5d4e495967ec1623b402 | refs/heads/master | 2021-08-18T22:02:22.565320 | 2017-11-24T02:40:45 | 2017-11-24T02:40:45 | 111,510,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("这是一个主页,可以放一个介绍我们公司的动态视频(例如qq安装时!)") | [
"[email protected]"
] | ||
08203a2a2bde791173a982f2df5e7761707c80f6 | 2886af0b937f709c52896ffa1a465b4e5a1640e1 | /venv/Lib/site-packages/pip/_vendor/urllib3/request.py | ad96c5b2993588ed9dc3b0388551f1bcccd94ca6 | [] | no_license | TheBugKing/RapidNews | fb904f94bf7de279945430af5c409221db75386f | 20c0a5fee793e1b15e107a4a1d94a39502608e93 | refs/heads/master | 2022-12-23T01:10:08.546449 | 2018-09-04T08:59:57 | 2018-09-04T08:59:57 | 147,233,471 | 0 | 1 | null | 2022-12-14T14:28:27 | 2018-09-03T17:13:32 | Python | UTF-8 | Python | false | false | 5,995 | py | from __future__ import absolute_import
from .filepost import encode_multipart_formdata
from .packages.six.moves.urllib.parse import urlencode
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplementedError("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
urlopen_kw['request_url'] = url
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, headers=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': headers}
extra_kw.update(urlopen_kw)
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **extra_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimic behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError(
"request got values for both 'fields' and 'body', can only specify one.")
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
| [
"[email protected]"
] | |
bc5881a05c6e909e084192f3ce40d88b9d5a29c3 | 6d8f60ea657cdc4d82ae6f9272e3614a9ac6e525 | /learningStuff/tensorflow_learning.py | 0838cb7503812a6c940f99828d665d58c5a14445 | [] | no_license | bibongbong/pythonCookBook | 4d6dc3904a1afa1250304b66ec99a98c1a80649f | c92c75f2d5256de0bb2ec879a7d6484a22cc6bcd | refs/heads/master | 2021-06-10T01:37:26.178698 | 2021-06-01T13:52:18 | 2021-06-01T13:52:18 | 152,709,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,097 | py | import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
#从Tensorflow直接访问Fashion MNIST,导入和加载数据
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels),(test_images, test_labels) = fashion_mnist.load_data()
'''
Fashion MNIST 数据集,其中包含 70000 张单件服饰的灰度图像,涵盖 10 个类别。
较低分辨率(28x28 像素) 每张图都映射到一个标签,由于数据集不包含类别名词,所以用class_names来存储
0 T 恤衫/上衣
1 裤子
2 套衫
3 裙子
4 外套
5 凉鞋
6 衬衫
7 运动鞋
8 包包
9 踝靴
'''
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
print(train_images.shape) # (60000, 28, 28) 表示训练集中有60000张图片,每张像素28x28
print(len(train_labels)) # 60000 :表示60000个标签
print(train_labels) # [9 0 0 ... 3 0 5]
# 每张图的像素值介于0-255之间
plt.figure()
plt.imshow(train_images[0])
plt.colorbar() # 显示色彩对比栏
#plt.grid(False)
#plt.show() # 显示图像
# 先对数据预处理,在训练网络
# 我们将这些值缩小到0-1之间,然后将其馈送到神经网络模型
train_images = train_images / 255.0
test_images = test_images / 255.0
# 显示训练集中的前25张图片,并显示类别名称
plt.figure(figsize=(10,10)) # 设置每张图的大小 10x10
for i in range(25):
plt.subplot(5, 5, i+1) # 设置25张图片的排位,行数x列数
plt.xticks([])
plt.yticks([])
plt.grid(True)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
#plt.show()
'''
构建模型
1. 设置层
该网络中的第一层 tf.keras.layers.Flatten 将图像格式从二维数组(28x28 像素)转换成一维数组(28 * 28 = 784 像素)
在扁平化像素之后,该网络包含两个 tf.keras.layers.Dense 层的序列。这些层是密集连接或全连接神经层。
第一个 Dense 层具有 128 个节点(或神经元)
第二个(也是最后一个)层是具有 10 个节点的 softmax 层,该层会返回一个具有 10 个概率得分的数组,这些得分的总和为 1。
每个节点包含一个得分,表示当前图像属于 10 个类别中某一个的概率。
'''
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
'''
2. 编译模型
a. 损失函数: 衡量模型在训练期间的准确率。我们希望尽可能缩小该函数,以“引导”模型朝着正确的方向优化。
b. 优化器: 根据模型看到的数据及其损失函数更新模型的方式。
c. 指标: 用于监控训练和测试步骤。以下示例使用准确率,即图像被正确分类的比例。
'''
model.compile(optimizer=tf.train.AdamOptimizer(), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
'''
3. 训练模型
a. 将训练数据馈送到模型中,在本示例中为 train_images 和 train_labels 数组。
b. 模型学习将图像与标签相关联。
c. 我们要求模型对测试集进行预测,在本示例中为 test_images 数组。我们会验证预测结果是否与 test_labels 数组中的标签一致。
epochs 是迭代次数
在模型训练期间,系统会显示损失和准确率指标。该模型在训练数据上的准确率达到 0.89(即 89%)
'''
model.fit(train_images, train_labels, epochs=5)
'''
4. 评估准确率
Test accuracy: 0.8735
模型在测试数据集上的准确率略低于在训练数据集上的准确率。
训练准确率和测试准确率之间的这种差异表示出现过拟合。
如果机器学习模型在新数据上的表现不如在训练数据上的表现,就表示出现过拟合。
'''
test_loss, test_acc = model.evaluate(test_images, test_labels)
print("Test accuracy:", test_acc) | [
"[email protected]"
] | |
f12335bce20645d638681a0e894420cab9a0c193 | a4c7f5cd089efbf92e5ba904d378e23da1dc8130 | /test/wavy_surface.py | cb432bf0b8cb83734e4856584972f4103ce8b145 | [] | no_license | socketteer/hallucinator | 65ca6842d1343e6547e5612ffb8b63c2268f2944 | 87705406004fc88850b760e1322e6aedef927fa7 | refs/heads/master | 2023-06-26T17:56:41.239228 | 2021-07-30T02:59:07 | 2021-07-30T02:59:07 | 189,884,880 | 16 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | import hallucinator as hl
from typing import TypedDict, NamedTuple, Tuple
import math
def wavy_surface(amplitude: float = 1,
frequency: float = 1,
direction: float = 0,
phase: float = 0,
rotate_x: float = 0,
rotate_y: float = 0,
rotate_z: float = 0,
location: Tuple[int, int, int] = (0, 0, 20)):
surface_obj = hl.ParaObject3(hl.gen_plane_wave(amplitude, frequency, hl.unit_vector(direction), phase),
region_type='2d',
region_params={'surface_range': ((-5, 5), (-5, 5))},
species='surface')
surface_obj = surface_obj.rotate(theta=rotate_x, axis=(1, 0, 0))
surface_obj = surface_obj.rotate(theta=rotate_y, axis=(0, 1, 0))
surface_obj = surface_obj.rotate(theta=rotate_z, axis=(0, 0, 1))
surface_obj = surface_obj.translate(location)
return surface_obj
def wavy_scene(t, **kwargs):
scene = hl.MonochromeScene()
scene.add_object(wavy_surface(amplitude=1,
frequency=t,
direction=0,
phase=0,
rotate_x=-1,
rotate_y=4,
rotate_z=1,
location=(0, 0, 40)),
"surface")
camscene = scene.render_scene(camera_position=(0, 0, -15),
projection_type=hl.Projections.WEAK,
styles=hl.Styles.UNIFORM,
x_range=(-7, 7),
y_range=(-7, 7),
resolution=75,
densities=(6, 30))
return camscene
hl.render_from_array(wavy_scene(t=0))
params = dict(
frame_function=lambda d: wavy_scene(**d),
frame_arguments=hl.unroll_dict(dict(
t=hl.np.linspace(0, 37, num=1500),
)),
filename=f"../videos/lasagna3",
fps=15,
preview=True,
parallel_frames=False,
)
#hl.video(**params)
| [
"[email protected]"
] | |
319c864f1daa02655eec34ce43dfbabd4ef6e894 | 3a0a50bd242b10c271566d7e4c177f8d38ce2602 | /tkinterify.py | 36e4ff901ecb87a725d43e49ce2e2c2fa9d67c30 | [
"MIT"
] | permissive | rbricheno/tkinterify | 3ba7c84dd29936c9140a07c4b3881ee2612771a2 | 5f06e06c2bc8684c8a7a3b971e674aab9b79910d | refs/heads/master | 2020-04-02T07:13:50.871646 | 2018-10-22T21:28:30 | 2018-10-22T21:28:30 | 154,186,863 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,157 | py | import tkinter
import click
import sys
from io import StringIO
def tkinterify(cli_group, app_name="Tkinterified App"):
# Create and configure root
root = tkinter.Tk()
root.wm_title(app_name)
tkinter.Grid.rowconfigure(root, 0, weight=1)
tkinter.Grid.columnconfigure(root, 0, weight=1)
# Create and configure frame
frame = tkinter.Frame(root)
frame.grid(row=0, column=0, sticky="nsew")
frame.columnconfigure(0, weight=1)
frame.columnconfigure(1, weight=1)
frame.columnconfigure(2, weight=1)
frame.rowconfigure(0, weight=1)
frame.rowconfigure(1, weight=1)
initial_output = "Valid commands:\n"
initial_command_name_list = list(cli_group.commands.keys())
for available_command_name in initial_command_name_list:
initial_output = initial_output + " " + available_command_name + "\n"
initial_output = initial_output + "Ready for input."
# Some GUI widgets
run_string = tkinter.StringVar()
entry_run = tkinter.Entry(root, textvariable=run_string, width=50)
scrollbar_widget = tkinter.Scrollbar(root)
text_widget = tkinter.Text(root)
def clear_callback():
# Because the text widget is usually disabled, we have to explicitly enable it before we can write to it.
text_widget.config(state='normal')
text_widget.delete(1.0, tkinter.END)
text_widget.insert(tkinter.END, initial_output)
text_widget.config(state='disabled')
def run_callback():
command_args = []
try:
command_parts = run_string.get().split()
command_name = command_parts[0]
except IndexError:
return
if len(command_parts) > 1:
command_args = command_parts[1:]
if command_name:
try:
# Redirect stdout so we can read the output into a string for display within out GUI
real_stdout = sys.stdout
fake_stdout = StringIO()
sys.stdout.flush()
sys.stdout = fake_stdout
# Obtain list of available commands
available_commands = cli_group.commands
command_name_list = list(cli_group.commands.keys())
if command_name in command_name_list:
try:
# Make a fake context in which to run the command
context = available_commands[command_name].make_context("tkinter", command_args)
# Invoke the command within the fake context
available_commands[command_name].invoke(context)
except click.exceptions.UsageError as e:
print(e)
print(initial_output)
else:
print("Command not found.\n")
print(initial_output)
# Put stdout back
sys.stdout.flush()
sys.stdout = real_stdout
sys.stdout.flush()
output_string = fake_stdout.getvalue()
fake_stdout.close()
# Update the text output widget
text_widget.config(state='normal')
text_widget.delete(1.0, tkinter.END)
text_widget.insert(tkinter.END, output_string)
text_widget.config(state='disabled')
except IndexError:
pass
# More GUI widgets
button_run = tkinter.Button(root, text="Run", command=run_callback)
button_clear = tkinter.Button(root, text="Clear", command=clear_callback)
text_widget.delete(1.0, tkinter.END)
text_widget.insert(tkinter.END, initial_output)
entry_run.grid(row=0, column=0, sticky="new")
button_run.grid(row=0, column=1, sticky="n")
button_clear.grid(row=0, column=2, sticky="n")
text_widget.grid(row=1, column=0, columnspan=2, sticky="nsew")
scrollbar_widget.grid(row=1, column=2, sticky="ns")
scrollbar_widget.config(command=text_widget.yview)
text_widget.config(yscrollcommand=scrollbar_widget.set)
text_widget.config(state='disabled')
root.mainloop()
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.